diff --git a/.checks-out b/.checks-out new file mode 100644 index 0000000..3ed2625 --- /dev/null +++ b/.checks-out @@ -0,0 +1,108 @@ +approvals: +# This is the array of approval policies. +# +# For each pull request we traverse the array +# of approval policies until we find the first policy +# that matches. Note that the approval policy is +# an ordered list. You should specify your most specific +# policies first and your least specific policies last. +# +# An approval policy will match based +# on the criteria in the "scope" section. You can +# match based on the pull request base branch or +# based on the file paths of the modified files. +# +# The most common configuration will have only +# a single approval policy. The last approval policy +# is required to have an empty "scope" section. +# The last policy is the default approval policy. This +# policy will be applied when none of the other policies +# have matched. +[ + { + # + # The name is strictly for human consumption. + # It is used by the comment section. It is optional. + # + name: "dev" + # + # Limit this policy to pull requests that have + # base branch 'dev'. The base branch is where the + # changes should be applied. + # + scope: + { + branches: [ "dev", "qa" ] + } + # + # Require 1 approver from the MAINTAINERS file. + # Allow self-approval. + # + match: "all[count=1,self=true]" + # + # Apply a git tag after auto-merging the pull request. + # This feature is disabled by default. + tag: + { + enable: true + template: "{{.Version}}-dev" + increment: "patch" + } + } + { + name: "master" + scope: + { + branches: [ "master" ] + } + # + # Use this approval pattern instead of the default pattern. + # + pattern: "(?i)^shipit\\s*(?P\\S*)" + # + # Require 1 approver from the MAINTAINERS file. + # Deny self-approval. + # + match: "all[count=1,self=false]" + tag: + { + enable: true + template: "{{.Version}}" + increment: "minor" + } + } + # Disable service on remaining branches + # The remaining branches are feature branches + # and do not require approval. + # + { + match: "off" + } +] +# +# Block a pull request with this pull request comment. +# This feature is disabled by default. +# +antipattern: "(?i)^holdit" +# +# Automatically merge the pull request when all status checks pass. +# This feature is disabled by default. +# +merge: +{ + enable: true +} +# +# Report the status of pull request to various comment sinks. +# This feature is disabled by default. +# +comment: +{ + enable: true + targets: [ + { + target: github + types: [ "approve", "block", "reset" ] + } + ] +} diff --git a/.drone.yml b/.drone.yml deleted file mode 100644 index f955e4e..0000000 --- a/.drone.yml +++ /dev/null @@ -1,33 +0,0 @@ -workspace: - base: /go - path: src/github.com/lgtmco/lgtm - -pipeline: - test: - image: golang:1.6 - commands: - - make deps - - make gen_assets gen_migration - - make test - - build: - image: golang:1.6 - commands: - - make test_mysql - - make build - when: - event: push - - docker: - repo: lgtm/lgtm - tag: [latest, 1.0.0] - when: - branch: master - event: push - -services: - mysql: - image: mysql:5.6.27 - environment: - - MYSQL_DATABASE=test - - MYSQL_ALLOW_EMPTY_PASSWORD=yes diff --git a/.drone.yml.sig b/.drone.yml.sig deleted file mode 100644 index e28bed2..0000000 --- a/.drone.yml.sig +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJIUzI1NiJ9.d29ya3NwYWNlOgogIGJhc2U6IC9nbwogIHBhdGg6IHNyYy9naXRodWIuY29tL2xndG1jby9sZ3RtCgpwaXBlbGluZToKICB0ZXN0OgogICAgaW1hZ2U6IGdvbGFuZzoxLjYKICAgIGNvbW1hbmRzOgogICAgICAtIG1ha2UgZGVwcwogICAgICAtIG1ha2UgZ2VuX2Fzc2V0cyBnZW5fbWlncmF0aW9uCiAgICAgIC0gbWFrZSB0ZXN0CgogIGJ1aWxkOgogICAgaW1hZ2U6IGdvbGFuZzoxLjYKICAgIGNvbW1hbmRzOgogICAgIC0gbWFrZSB0ZXN0X215c3FsCiAgICAgLSBtYWtlIGJ1aWxkCiAgICB3aGVuOgogICAgICBldmVudDogcHVzaAoKICBkb2NrZXI6CiAgICByZXBvOiBsZ3RtL2xndG0KICAgIHRhZzogW2xhdGVzdCwgMS4wLjBdCiAgICB3aGVuOgogICAgICBicmFuY2g6IG1hc3RlcgogICAgICBldmVudDogcHVzaAoKc2VydmljZXM6CiAgbXlzcWw6CiAgICBpbWFnZTogbXlzcWw6NS42LjI3CiAgICBlbnZpcm9ubWVudDoKICAgICAgLSBNWVNRTF9EQVRBQkFTRT10ZXN0CiAgICAgIC0gTVlTUUxfQUxMT1dfRU1QVFlfUEFTU1dPUkQ9eWVzCg.1qNVh_I2NsM401y0ClSVJITLV0F3-zjPTtL7us8Orm4 \ No newline at end of file diff --git a/.gitignore b/.gitignore index fea4936..ebeaa25 100644 --- a/.gitignore +++ b/.gitignore @@ -1,13 +1,9 @@ -lgtm -*.sqlite -*.db -*.txt -*_gen.go -*.out +/vendor +checks-out +checks-out.sqlite .DS_Store .env -bindata.go - -web/react/node_modules -web/static/files/script.js -#web/static/files/*.css +.idea +.vscode +report.out +report.xml diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 0000000..629b001 --- /dev/null +++ b/.travis.yml @@ -0,0 +1,11 @@ +language: go +go: + - 1.x.x + +install: + - go get -u github.com/golang/dep + - go install github.com/golang/dep/cmd/dep + - export PATH=$GOPATH/bin:$PATH + - dep ensure + +script: make test diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 0000000..3328a99 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,177 @@ +Checks-out is a fork of [LGTM](https://github.com/lgtmco/lgtm). It was forked +at 4cbae5. Checks-out uses [semantic versioning](http://semver.org/). The +Checks-out configuration format is incompatible with the LGTM configuration +format but the legacy format can be parsed. + +# 0.23.0 + +* Improved error message when configuration or MAINTAINERS file is missing. +* Improved logging when GitHub hook sends an empty request body. +* Enabling a repository requires successful validation of the configuration files. +* Bug fix for ignoring GitHub pull request comments created by the service. +* Bug fix for organizations in the user interface. If another admin + of the organization has enabled it, then you will see the organization + enabled. Previously a user only saw the organizations they had + enabled themselves. + +# 0.22.0 + +* Bug fix for case-insensitive team name matching for repositories + using the HJSON format for MAINTAINERS file. +* Log request headers on 500-level errors. Will allow us to + debug intermittent GitHub status hooks with empty request body. + +# 0.21.0 + +* Case-insensitive team name matching. Previously only user names + were case-insensitive. +* Allow users and groups to be specified in anonymous match policy. + So "{foo,bar}\[count=2\]" foo and bar can either be a group or + a user name. Previously only usernames were accepted. **Breaking + change**: it is now a configuration error to have a user name + and group name with the same name. +* Eliminate default approval policy. Previously you could + omit the approvals list from your configuration file and a default + policy would be applied. This behavior can lead to silent + misconfiguration of the configuration file. It is now a configuration + error if no approval policy is specified. + **Breaking change**: If your configuration file has no approval policy + it is now a configuration error. You must specify an approval policy. +* Add 'uptodate' option to merge configuration. When auto-merge is enabled, + the uptodate feature will require the compare branch to have merged in + all commits from the base branch before automatic merge is performed. + Default value for this option is true. + +# 0.20.0 + +* New admin endpoints to manually deregister github repositories. +* Allow multiple Slack servers to be used as endpoints. + +# 0.13.0 + +* Use "github-collab \[parameter\]" notation in MAINTAINERS file. + parameter is either the literal "repo-self" or the "org/repo" slug + of another repository. +* A repository named “checks-out-configuration” in your organization + is special. The files template.checks-out and template.MAINTAINERS + in that repository will be used by any other repository that + is missing those files. +* user interface has the ability to register or de-register + an entire organization. If an organization is registered then + any new repositories created in the organization are automatically + added to the service. +* Approval scope has new parameters 'regexpaths', 'regexbase', + and 'regexcompare'. These are regular expression matches against + the file paths of the pull request, the base branch name, + or the compare branch name, respectively. + +# 0.12.0 + +* Skipped by accident + +# 0.11.0 + +* GitHub Reviews integration. Existing repositories must de-register and re-register +* Bugfix for GitHub API library calls that return an error without a HTTP response + +# 0.10.0 + +* github-org repo-self when used in a personal repository evaluates to the owner +* Implement "ignoreuimerge" feature to ignore merges from the user interface +* Allow "comment:" to provide a comment on the git commit when a pull request passes status checks +* Add shadowed variable checking to 'go vet' Makefile directive +* Bugfix for users with no user repositories and no group repositories +* Truncate GitHub status messages that are too long (140 character limit) +* Fix typo in reading environment variables +* Change github-team [name] to case insensitive comparison of slugs + +# 0.9.4 + +* Bugfix for users with no user repositories and no group repositories + +# 0.9.3 + +* Truncate GitHub status messages that are too long (140 character limit) + +# 0.9.2 + +* Fix typo in reading environment variables + +# 0.9.1 + +* Change github-team [name] to case insensitive comparison of slugs + +# 0.9.0 + +* Pull request commit status message shows error when configuration file is misconfigured +* Update dependencies to ensure that go-yaml has Apache 2.0 license +* Add integration end-to-end testing of checks-out service +* Bug fix for logging severity level of login errors +* Improved error reporting when OAuth capabilities have insufficient privileges +* Remove unused email address column from the database +* Bug fix to eliminate spurious warning level log message +* Refactoring of environment variables into one package +* Add "./checks-out -help" and "./checks-out -env" command-line options +* Parse legacy .lgtm configuration file format +* Add button in user interface for a user to deregister from service +* Ability to use a GitHub team from another organization "github-team {team} {org}" +* Ability to change the location of the documentation via an environment variable +* Ability to limit which users can create accounts via environment variables and user/org lists in the database +* Ability to parse the legacy MAINTAINERS toml file format +* Bug fix generate ERROR severity log message when recovering from panic + +# 0.8.0 + +* Http error response codes that are less than 500 use the warning log level. +* Bug fix. Generate 404 response when repo is not found in database. +* Add 'authormatch' section to approval policies. Can be used for Contributer License Agreements. +* Internal user agent blacklist when logging http requests. +* Update user interface with on/off toggle for activation. +* Add validation button to user interface. +* User case insensitive matching for GitHub usernames. ([#85](https://github.com/capitalone/checks-out/issues/85)) +* Ability to change commit ranges for disapproval and tag selection. ([#80](https://github.com/capitalone/checks-out/issues/80)) +* 'antititle' optional regex to prevent merges based on pull request title ([#39](https://github.com/capitalone/checks-out/issues/39)) +* Finegrain notification types as GitHub comments or Slack messages +* Postgres database backend support + +# 0.7.0 + +* Add new approval policy 'off' to disable service +* Add optional 'merge' section for each approval scope. ([#66](https://github.com/capitalone/checks-out/issues/66)) +* Simplify disapproval configuration. Add 'antimatch' to configuration. +* Add 'github-team repo-self' notation. ([#60](https://github.com/capitalone/checks-out/issues/60)) +* Show all errors in response when parsing configuration/MAINTAINERS files +* When possible collapse multierrors to a single HTTP response code. +* Pagination for GitHub API requests. Fixes bug for getting team members. +* Status messages with approval information. +* Populate name and email address for GitHub orgs and teams. ([#57](https://github.com/capitalone/checks-out/issues/57)) +* Set log level to Info on release build. +* Set GIN_MODE to release mode on release build. + +# 0.6.0 + +* Add 'antipattern' to configuration. Specifies disapproval regular expression. +* Added 'range' option to commit section of configuration. +* Added 'delete' option to merge section of configuration. +* Remove private repositories from /api/repos endpoint. Add /api/count endpoint that returns a count of all public and private repos. + +# 0.5.16 + +The following is a summary of the changes between the LGTM project +and this project. + +* The configuration file has changed from a .lgtm file in TOML format to +a configuration file in Human JSON ([HJSON](http://hjson.org/)) format. +* The MAINTAINERS file can either be specified in the original text +format or in HJSON format. +* The default behavior when reading the comments on a pull request has +been changed to only consider all comments since the HEAD commit on the +branch. This behavior can be changed to consider all comments on the +pull request with a configuration parameter. Fixes [#40](https://github.com/lgtmco/lgtm/issues/40). +* Add custom approval policies based on a grammar that tests groups of people against specified thresholds. Fixes [#32](https://github.com/lgtmco/lgtm/issues/32), [#35](https://github.com/lgtmco/lgtm/issues/35). +* Add ability to auto-merge a pull request after status checks have passed. +* Add ability to tag the base branch after an auto-merge has completed. +* Add custom approval scopes that can be based on either file paths in the +repository or against a list of base branches. Fixes [#20](https://github.com/lgtmco/lgtm/issues/20). +* Added LGTM_SUNLIGHT environment variable. Can be enabled to provide APIs that might have security or privacy concerns when publicly accessible. +* Improved error messages in API responses for misconfigured git repositories. diff --git a/Dockerfile b/Dockerfile deleted file mode 100644 index befcf14..0000000 --- a/Dockerfile +++ /dev/null @@ -1,16 +0,0 @@ -# Build the drone executable on a x64 Linux host: -# -# go build --ldflags '-extldflags "-static"' -o lgtm -# -# Build the docker image: -# -# docker build --rm=true -t lgtm/lgtm . - -FROM centurylink/ca-certs -EXPOSE 8000 - -ENV DATABASE_DRIVER=sqlite3 -ENV DATABASE_DATASOURCE=/var/lib/lgtm/lgtm.sqlite - -ADD lgtm /lgtm -ENTRYPOINT ["/lgtm"] diff --git a/ENVIRONMENT_VARS.md b/ENVIRONMENT_VARS.md new file mode 100644 index 0000000..6c839be --- /dev/null +++ b/ENVIRONMENT_VARS.md @@ -0,0 +1,188 @@ +# Environment Variables for Checks-Out + +This document contains information about all the environment variables that can or +must be defined for Checks-Out to function. + +## Server configuration + +### Server IP Address and Port +- Format: `SERVER_ADDR="_ip_address_:_port_"` but more specifically see the documentation for +golang's `http.ListenAndServe` and `http.ListanAndServeTLS` +- Default: `:8000`, which means port 8000 on all interfaces +- Required: No + +See https://golang.org/pkg/net/http/#ListenAndServe for detailed documentation on the format. + + +### Server SSL Certificate +- Format: `SERVER_CERT="_full_path_and_filename_of_ssl_server_cert_file_"` +- Default: None +- Required: No + +If SERVER_CERT is not specified, Checks-Out runs without SSL. See https://golang.org/pkg/net/http/#ListenAndServeTLS +for detailed documentation on the format. + +### Server SSL Key +- Format: `SERVER_KEY="_full_path_and_filename_of_ssl_server_key_file_"` +- Default: None +- Required: _Only if `SERVER_CERT` is also specified_ + +If SERVER_CERT is specified, then SERVER_KEY must be specified as well. See https://golang.org/pkg/net/http/#ListenAndServeTLS +for detailed documentation on the format. + +## Database configuration + +### Database Driver +- Format: `DB_DRIVER=sqlite3|postgres|mysql` +- Default: None +- Required: Yes + +### Database Datasource +- Format: `DB_SOURCE="_db_driver_specific_datasource_spec_"` +- Default: None +- Required: Yes + +Please refer to the datasource specifications for the sqlite3, lib/pq, and mysql drivers for their respective +specifications for this environment variable. + +## Slack integration +- Format: `SLACK_TARGET_URL="_slack_integration_url"` +- Default: None +- Required: No + +If the `SLACK_TARGET_URL` is not defined, then no logging into slack will happen, however it will also +currently cause logging that Slack is not configured to get generated every time a slackable event happens. + +## Github integration + +### Email Address To Use for Github +- Format: `GITHUB_EMAIL="_valid_email_address_"` +- Default: None +- Required: Yes + +### URL for Github API +- Format: `GITHUB_URL="_protocol_plus_hostname_plus_path_prefix_of_url_"` +- Default: `https://github.com` +- Required: No, defaults to `https://github.com` which is fine unless you are using your own +enterprise github + +### Github OAuth2 Client ID +- Format: `GITHUB_CLIENT="_your_OAuth2_client_id_"` +- Default: None +- Required: Yes. You must supply the github OAuth2 client ID issued by the github server you are +connecting to (github.com or enterprise-hosted github server) + +### Github OAuth2 Secret +- Format: `GITHUB_SECRET="_your_OAuth2_secret_"` +- Default: None +- Required: Yes. You must supply the github OAuth2 secret issued by the github server you are +connecting to (github.com or enterprise-hosted github server) + +### Github Scope To Use +- Format: `GITHUB_SCOPE="_valid_github_scope_specification_"`. Please see github documentation for +specifics on the specification format +- Default: `read:org,repo:status,admin:repo_hook` +- Required: No + +### Github testing-only settings + +#### Enable Github Integration Tests +- Format: `GITHUB_TEST_ENABLE=true|false`. Enable Github integration tests to run +- Default: False +- Required: No + +#### Github Integration Tests OAuth2 Token +- Format: `GITHUB_TEST_TOKEN=__OAUTH2_TOKEN_TO_USE`. This is an OAuth2 token to be sent to the +github endpoint for github integration test API requests +- Default: None +- Required: _only if GITHUB_TEST_ENABLE is true_ + +## Logging/Debug + +### Debug Logging +- Format: `DEBUG=true|false` +- Default: false +- Required: No + +If set to `true`, debug logging will be enabled and output to logging. + +### Checks-Out Sunlight +- Format: `CHECKS_OUT_SUNLIGHT=true|false` +- Default: false +- Required: No + +If set to true, exposes endpoints and data that might not be suitable for a live site. Specifically, +the following behaviors are enabled: +- `/api/repos` endpoint is available that will show all repositories managed under Checks-Out +- `/version` endpoint is available that will show the Checks-Out version +- Outputs stack trace information over HTTP when errors happen +- Puts the `X-CHECKS-OUT-VERSION` HTTP header with the Checks-Out version in every response + +### Blacklist User Agents from Logging +- Format: `BLACKLIST_USER_AGENTS=user_agent1[]:user_agent2...:user_agent_N]` +- Default: false +- Required: No + +Specify a colon-separated list of user agent strings for the middleware that does request logging +to skip logging on. This allows suppressing logging of user agents like the health checker from aws +that are generally operational noise. + +### How Frequently To Log Operational Statistics +- Format: `LOG_STATS_PERIOD=_valid_time.ParseDuration()_string_` +- Default: 0 (Do not periodically log) +- Required: No + +Specify a time duration in a valid format understood by the +[time.ParseDuraction() method](https://golang.org/pkg/time/#ParseDuration) to periodically log activity +of Checks-Out such as number of commits, approvers, and disapprovers in the specified time period. + +## Caching + +### Response caching +- Format: `CACHE_TTL=_time_specified_in_time.Duration_format_` +- Default: 15 minutes +- Required: No + +Determines the length of time the gin middleware will cache. Default is 15 minutes. + +### Github Repo Artifact Caching +- Format: `LONG_CACHE_TTL=_time_specified_in_time.Duration_format_` +- Default: 24 hours +- Required: No + +Determines the length of time Checks-Out will cache github artifacts like user information, +organization members, and team members in memory before going back to the server. Default +is 24 hours. + +## Account Creation Control +### Limit User Access +- Format: `LIMIT_USERS=true|false` +- Default: false +- Required: No + +If enabled, only users listed in the `limit_users` table in the database are +allowed to create accounts. Any existing accounts will still function, even +if the user's name is not in the `limit_users` table. + +### Limit Organizational Access +- Format: `LIMIT_ORGS=true|false` +- Default: false +- Required: No + +If enabled, only users in the organizations listed in the `limit_orgs` table +in the database are allowed to create accounts. Any existing accounts will +still function, even if the user is not any of the orgs named in the +`limit_orgs` table. + +If both `LIMIT_ORGS` and `LIMIT_USERS` are set to `true` then a user can +either be explicitly named in `limit_users` or can belong to an org named +in `limit_orgs`. + +## Miscellaneous Properties + +### Documentation Location +- Format: `CHECKS_OUT_DOCS_URL=_URL_for_docs_no_closing_slash_` +- Default: `https://capitalone.github.com/checks-out/docs` +- Required: No + +Provides the base URL for links to the documentation in the UI. diff --git a/Gopkg.lock b/Gopkg.lock new file mode 100644 index 0000000..053d297 --- /dev/null +++ b/Gopkg.lock @@ -0,0 +1,241 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + name = "github.com/Sirupsen/logrus" + packages = ["."] + revision = "ba1b36c82c5e05c4f912a88eab0dcd91a171688f" + version = "v0.11.5" + +[[projects]] + name = "github.com/davecgh/go-spew" + packages = ["spew"] + revision = "346938d642f2ec3594ed81d874461961cd0faa76" + version = "v1.1.0" + +[[projects]] + name = "github.com/dgrijalva/jwt-go" + packages = ["."] + revision = "dbeaa9332f19a944acb5736b4456cfcc02140e29" + version = "v3.1.0" + +[[projects]] + branch = "master" + name = "github.com/elazarl/go-bindata-assetfs" + packages = ["."] + revision = "30f82fa23fd844bd5bb1e5f216db87fd77b5eb43" + +[[projects]] + branch = "master" + name = "github.com/franela/goblin" + packages = ["."] + revision = "b962efd4bb2a58a949a16d3fdcd6ffa068d21b03" + +[[projects]] + branch = "master" + name = "github.com/gin-contrib/sse" + packages = ["."] + revision = "22d885f9ecc78bf4ee5d72b937e4bbcdc58e8cae" + +[[projects]] + name = "github.com/gin-gonic/gin" + packages = [".","binding","render"] + revision = "d459835d2b077e44f7c9b453505ee29881d5d12d" + version = "v1.2" + +[[projects]] + name = "github.com/go-sql-driver/mysql" + packages = ["."] + revision = "a0583e0143b1624142adab07e0e97fe106d99561" + version = "v1.3" + +[[projects]] + branch = "master" + name = "github.com/golang/protobuf" + packages = ["proto"] + revision = "130e6b02ab059e7b717a096f397c5b60111cae74" + +[[projects]] + branch = "master" + name = "github.com/google/go-github" + packages = ["github"] + revision = "464f5a91396a85da262abcac0b0a0aaf5b8f3c02" + +[[projects]] + branch = "master" + name = "github.com/google/go-querystring" + packages = ["query"] + revision = "53e6ce116135b80d037921a7fdd5138cf32d7a8a" + +[[projects]] + branch = "master" + name = "github.com/hashicorp/errwrap" + packages = ["."] + revision = "7554cd9344cec97297fa6649b055a8c98c2a1e55" + +[[projects]] + branch = "master" + name = "github.com/hashicorp/go-version" + packages = ["."] + revision = "fc61389e27c71d120f87031ca8c88a3428f372dd" + +[[projects]] + name = "github.com/hjson/hjson-go" + packages = ["."] + revision = "a2e1a590477be94b7c4cefbaa93e01aed412d26a" + version = "v0.2.3" + +[[projects]] + branch = "master" + name = "github.com/ianschenck/envflag" + packages = ["."] + revision = "9111d830d133f952887a936367fb0211c3134f0d" + +[[projects]] + name = "github.com/joho/godotenv" + packages = [".","autoload"] + revision = "a79fa1e548e2c689c241d10173efd51e5d689d5b" + version = "v1.2.0" + +[[projects]] + branch = "master" + name = "github.com/koding/cache" + packages = ["."] + revision = "e8a81b0b3f20f895153311abde1062894b5912d6" + +[[projects]] + branch = "master" + name = "github.com/lib/pq" + packages = [".","oid"] + revision = "456514e2defec52e0cd37f90ccf17ec8b28295e2" + +[[projects]] + name = "github.com/mattn/go-isatty" + packages = ["."] + revision = "0360b2af4f38e8d38c7fce2a9f4e702702d73a39" + version = "v0.0.3" + +[[projects]] + branch = "master" + name = "github.com/mattn/go-jsonpointer" + packages = ["."] + revision = "a39417dc2a7723d036cddc13e026faed7697395e" + +[[projects]] + name = "github.com/mattn/go-sqlite3" + packages = ["."] + revision = "ca5e3819723d8eeaf170ad510e7da1d6d2e94a08" + version = "v1.2.0" + +[[projects]] + branch = "master" + name = "github.com/mspiegel/go-multierror" + packages = ["."] + revision = "065aa648c994f1f905c52e38e9bb23f4c7848737" + +[[projects]] + name = "github.com/pelletier/go-toml" + packages = ["."] + revision = "16398bac157da96aa88f98a2df640c7f32af1da2" + version = "v1.0.1" + +[[projects]] + name = "github.com/pkg/errors" + packages = ["."] + revision = "645ef00459ed84a119197bfb8d8205042c6df63d" + version = "v0.8.0" + +[[projects]] + name = "github.com/pmezard/go-difflib" + packages = ["difflib"] + revision = "792786c7400a136282c1664665ae0a8db921c6c2" + version = "v1.0.0" + +[[projects]] + branch = "master" + name = "github.com/rubenv/sql-migrate" + packages = [".","sqlparse"] + revision = "79fe99e24311fa42469fb2ca23eb3f8f065e6155" + +[[projects]] + branch = "master" + name = "github.com/russross/meddler" + packages = ["."] + revision = "9515f6b01c158e6a98939d1ddfddb8a05544e08d" + +[[projects]] + branch = "master" + name = "github.com/stretchr/objx" + packages = ["."] + revision = "1a9d0bb9f541897e62256577b352fdbc1fb4fd94" + +[[projects]] + name = "github.com/stretchr/testify" + packages = ["assert","mock"] + revision = "69483b4bd14f5845b5a1e55bca19e954e827f1d0" + version = "v1.1.4" + +[[projects]] + branch = "master" + name = "github.com/ugorji/go" + packages = ["codec"] + revision = "bdcc60b419d136a85cdf2e7cbcac34b3f1cd6e57" + +[[projects]] + branch = "master" + name = "golang.org/x/net" + packages = ["context","context/ctxhttp"] + revision = "aabf50738bcdd9b207582cbe796b59ed65d56680" + source = "github.com/golang/net" + +[[projects]] + branch = "master" + name = "golang.org/x/oauth2" + packages = [".","internal"] + revision = "bb50c06baba3d0c76f9d125c0719093e315b5b44" + source = "github.com/golang/oauth2" + +[[projects]] + branch = "master" + name = "golang.org/x/sys" + packages = ["unix"] + revision = "8dbc5d05d6edcc104950cc299a1ce6641235bc86" + source = "github.com/golang/sys" + +[[projects]] + name = "google.golang.org/appengine" + packages = ["internal","internal/base","internal/datastore","internal/log","internal/remote_api","internal/urlfetch","urlfetch"] + revision = "150dc57a1b433e64154302bdc40b6bb8aefa313a" + source = "github.com/golang/appengine" + version = "v1.0.0" + +[[projects]] + name = "gopkg.in/go-playground/validator.v8" + packages = ["."] + revision = "5f1438d3fca68893a817e4a66806cea46a9e4ebf" + version = "v8.18.2" + +[[projects]] + name = "gopkg.in/gorp.v1" + packages = ["."] + revision = "c87af80f3cc5036b55b83d77171e156791085e2e" + version = "v1.7.1" + +[[projects]] + branch = "v2" + name = "gopkg.in/mgo.v2" + packages = [".","bson","internal/json","internal/sasl","internal/scram"] + revision = "3f83fa5005286a7fe593b055f0d7771a7dce4655" + +[[projects]] + branch = "v2" + name = "gopkg.in/yaml.v2" + packages = ["."] + revision = "eb3733d160e74a9c7e442f435eb3bea458e1d19f" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "3fbdbca37fb5208da80d2ce2e1fb73b619ba3f4b52ad8c3cf380a06ed23ff8bf" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml new file mode 100644 index 0000000..f217802 --- /dev/null +++ b/Gopkg.toml @@ -0,0 +1,111 @@ + +# Gopkg.toml example +# +# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" + + +[[constraint]] + name = "github.com/Sirupsen/logrus" + version = "0.11.0" + +[[constraint]] + name = "github.com/dgrijalva/jwt-go" + version = "3.0.0" + +[[constraint]] + name = "github.com/elazarl/go-bindata-assetfs" + +[[constraint]] + name = "github.com/franela/goblin" + branch = "master" + +[[constraint]] + name = "github.com/gin-gonic/gin" + version = "1.0.0" + +[[constraint]] + name = "github.com/go-sql-driver/mysql" + version = "1.2.0" + +[[constraint]] + name = "github.com/google/go-github" + +[[constraint]] + name = "github.com/hashicorp/go-version" + +[[constraint]] + name = "github.com/hjson/hjson-go" + version = "0.2.2" + +[[constraint]] + name = "github.com/ianschenck/envflag" + +[[constraint]] + name = "github.com/joho/godotenv" + version = "1.0.0" + +[[constraint]] + name = "github.com/koding/cache" + +[[constraint]] + name = "github.com/lib/pq" + +[[constraint]] + name = "github.com/mattn/go-jsonpointer" + +[[constraint]] + name = "github.com/mattn/go-sqlite3" + version = "1.2.0" + +[[constraint]] + name = "github.com/mspiegel/go-multierror" + +[[constraint]] + name = "github.com/pelletier/go-toml" + +[[constraint]] + name = "github.com/pkg/errors" + version = "0.8.0" + +[[constraint]] + name = "github.com/rubenv/sql-migrate" + +[[constraint]] + name = "github.com/russross/meddler" + +[[constraint]] + name = "github.com/stretchr/testify" + version = "1.1.4" + +[[override]] + name = "golang.org/x/net" + source = "github.com/golang/net" + +[[override]] + name = "golang.org/x/oauth2" + source = "github.com/golang/oauth2" + +[[override]] + name = "golang.org/x/sys" + source = "github.com/golang/sys" + +[[override]] + name = "google.golang.org/appengine" + source = "github.com/golang/appengine" diff --git a/MAINTAINERS b/MAINTAINERS new file mode 100644 index 0000000..ca3abad --- /dev/null +++ b/MAINTAINERS @@ -0,0 +1 @@ +github-org repo-self diff --git a/Makefile b/Makefile index cb6e396..44ac34a 100644 --- a/Makefile +++ b/Makefile @@ -1,30 +1,78 @@ -PACKAGES = $(shell go list ./... | grep -v /vendor/) +# Propagate git tag to version identifier +VERSION_TAG:=$(shell git describe --abbrev=0 --tags) +ifeq ($(VERSION_TAG),) +VERSION_TAG:=noversion +endif +# Propagate git revision number to version identifier +VERSION:=${VERSION_TAG}+$(shell git rev-parse --short HEAD) -all: build +# directories and remove vendor directory +# used for running unit tests +NOVENDOR := $(shell go list -e ./... | grep -v /vendor/) + +# files and remove vendor directory, auto generated files, and mock files +# used for static analysis, code linting, and code formatting +NOVENDOR_FILES := $(shell find . -name "*.go" | grep -v /vendor/ | grep -v /mock/ | grep -v "_gen\.go" ) -deps: - go get -u github.com/jteeuwen/go-bindata/... - go get -u github.com/elazarl/go-bindata-assetfs/... - go get -u github.com/vektra/mockery/... +all: build -gen: gen_assets gen_mocks gen_migration +update-deps: + glide update -s -u -v -gen_assets: - go generate github.com/lgtmco/lgtm/web/static - go generate github.com/lgtmco/lgtm/web/template +gen: gen-assets gen-migration -gen_mocks: - go generate github.com/lgtmco/lgtm/store - go generate github.com/lgtmco/lgtm/remote +gen-assets: + go generate github.com/capitalone/checks-out/web/static + go generate github.com/capitalone/checks-out/web/template -gen_migration: - go generate github.com/lgtmco/lgtm/store/migration +gen-migration: + go generate github.com/capitalone/checks-out/store/migration build: - go build --ldflags '-extldflags "-static" -X github.com/lgtmco/lgtm/version.VersionDev=$(CI_BUILD_NUMBER)' -o lgtm + @# static linking sqlite seems to break DNS + go build --ldflags '-X github.com/capitalone/checks-out/version.Version=$(VERSION)' -o checks-out + go test -run ^$$ $(NOVENDOR) > /dev/null + +test: vet + @# use redirect instead of tee to preserve exit code + go test -short -p=1 -cover $(NOVENDOR) -v > report.out; \ + code=$$?; \ + cat report.out; \ + grep -e 'FAIL' report.out; \ + exit $${code} + +test-complete: vet + @# use redirect instead of tee to preserve exit code + GITHUB_TEST_ENABLE=1 go test -short -p=1 -cover $(NOVENDOR) -v > report.out; \ + code=$$?; \ + cat report.out; \ + grep -e 'FAIL' report.out; \ + exit $${code} + +.PHONY: test-cover-html + +test-cover-html: + echo "mode: count" > coverage-all.out + echo "Packages: $(NOVENDOR)" + echo "VERSION: $(VERSION)" + $(foreach pkg,$(NOVENDOR), \ + echo ${pkg}; \ + go test -coverprofile=coverage.out -covermode=count ${pkg}; \ + tail -n +2 coverage.out >> coverage-all.out;) + go tool cover -html=coverage-all.out -o coverage.html + +fmt: + for FILE in $(NOVENDOR_FILES); do go fmt $$FILE; done; + +vet: + @echo 'Running go tool vet -shadow' + @for FILE in $(NOVENDOR_FILES); do go tool vet -shadow $$FILE || exit 1; done; + +lint: + for FILE in $(NOVENDOR_FILES); do golint $$FILE; done; -test: - @for PKG in $(PACKAGES); do go test -v -cover -coverprofile $$GOPATH/src/$$PKG/coverage.out $$PKG; done; +clean: + rm -f checks-out report.out -test_mysql: - DATABASE_DRIVER="mysql" DATABASE_DATASOURCE="root@tcp(127.0.0.1:3306)/test?parseTime=true" go test -v -cover github.com/lgtmco/lgtm/store/datastore +test-mysql: + DB_DRIVER="mysql" DB_SOURCE="root@tcp(127.0.0.1:3306)/test?parseTime=true" go test -v -cover github.com/capitalone/checks-out/store/datastore diff --git a/README.md b/README.md index 40b7327..3a18a3e 100644 --- a/README.md +++ b/README.md @@ -1,36 +1,126 @@ -[![Build Status](http://beta.drone.io/api/badges/lgtmco/lgtm/status.svg)](http://beta.drone.io/lgtmco/lgtm) +# Checks-Out -LGTM is a simple pull request approval system using GitHub protected branches and maintainers files. Pull requests are locked and cannot be merged until the minimum number of approvals are received. Project maintainers can indicate their approval by commenting on the pull request and including LGTM (looks good to me) in their approval text. For more information please see the documentation at https://lgtm.co/docs +Checks-Out is a simple pull request approval system using GitHub +protected branches and maintainers files. Pull requests are locked and cannot be +merged until the minimum number of approvals are received. Project maintainers +can indicate their approval by commenting on the pull request and including +"I approve" in their approval text. Checks-Out also provides integration +with GitHub Reviews. An accepted GitHub Review is counted as an approval. +GitHub Review that requests additional changes blocks the pull request from merging. -### Status - -LGTM is actively used by thousands of repositories. The lack of commit activity in the Git repository is not an indication that the project is abandoned or inactive. The lack of activity is an indication the author views this project as being feature-complete. +Read the [online documentation](http://www.capitalone.io/checks-out/) to find out more about Checks-Out. ### Development -LGTM is meant to be extremely simple and focused and is largely considered feature-complete. The author is certainly interested in minor improvements and bug fixes, but is not interested in major enhancements. Feel free to fork the project and extend (and even re-brand) as you see fit. +Checks-Out is a fork of [LGTM](https://github.com/lgtmco/lgtm). Our git repository +contains the commit history from the upstream project. We are actively seeking +contributions from the community. If you'd like to contribute we recommend +taking a look at the issues page. You can pick up an open issue and work on it, +submit a bug, or submit a new feature request for feedback. -### Setup +### Features -Please see our [installation guide](https://lgtm.co/docs/install/) to install the official Docker image. +Checks-Out has several features that distinguish itself from the parent LGTM project. -### Build +The most popular feature is the ability to specify multiple approval policies. +Policies are based around the concept of organizations. An organization is +a set of project maintainers. Various types of thresholds can be configured +for organizations and boolean conditions can be used to combine policies. +Policies can be configured to apply to specific file paths and/or git branches. +Refer to the customization documentation for more information about policies. -Clone the repository to your Go workspace: +Checks-Out has optional support for automatic tagging of merges. Tags can configured +based on timestamp or semantic versioning. + +Checks-Out has optional support for automatic merging of pull requests when +all status checks have passed. + +Checks-Out has changed the default behavior when new commits are added to a pull +request. By default only comments that have a later timestamp than the +latest commit are processed by Checks-Out. There is a configuration property to use +the original LGTM behavior which is to consider all comments on a pull request. + +### Usage + +#### .checks-out file + +Each repository managed by Checks-Out must have a .checks-out file in the root of the +repository. This file provides the configuration that Checks-Out uses for the +repository. The configuration file is described in detail in the +customization section of the online documentation. + +This repository has an .checks-out file that you can use as an example. +It is likely that you will need a simple .checks-out file, so you can use +the following template: -```sh -git clone git://github.com/lgtmco/lgtm.git $GOPATH/src/github.com/lgtmco/lgtm -cd $GOPATH/src/github.com/lgtmco/lgtm ``` +approvals: +[ + { + match: "all[count=1,self=false]" + } +] +``` + +#### MAINTAINERS file + +Each repository managed by Checks-Out should have a MAINTAINERS file that specifies +who is allowed to approve pull requests. The format of the file +is described in the maintainers section of the online +documentation. Here is a sample MAINTAINERS file to get you started: + +``` +github-org repo-self +``` + +### Build + +Checks-Out uses the Go [dep](https://github.com/golang/dep) dependency management tool. +Dependencies are not stored in the repository. Run `dep ensure` to install dependencies. Commands to build from source: ```sh -export GO15VENDOREXPERIMENT=1 - -make deps # Download required dependencies -make gen # Generate code make build # Build the binary ``` -If you are having trouble building this project please reference its .drone.yml file. Everything you need to know about building LGTM is defined in that file. +## Contributors + +We welcome your interest in Capital One’s Open Source Projects (the “Project”). Any Contributor to the Project must accept and sign a CLA indicating agreement to the license terms. Except for the license granted in this CLA to Capital One and to recipients of software distributed by Capital One, You reserve all right, title, and interest in and to your Contributions; this CLA does not impact your rights to use your own contributions for any other purpose. + +[Link to Individual CLA](https://docs.google.com/forms/d/19LpBBjykHPox18vrZvBbZUcK6gQTj7qv1O5hCduAZFU/viewform) + +[Link to Corporate CLA](https://docs.google.com/forms/d/e/1FAIpQLSeAbobIPLCVZD_ccgtMWBDAcN68oqbAJBQyDTSAQ1AkYuCp_g/viewform) + +This project adheres to the Capital One [Open Source Code of Conduct](http://www.capitalone.io/codeofconduct/). By participating, you are expected to honor this code. + +### Contribution Guidelines +We encourage any contributions that align with the intent of this project and add more functionality or languages that other developers can make use of. To contribute to the project, please submit a PR for our review. Before contributing any source code, familiarize yourself with the [Apache License 2.0](LICENSE), which controls the licensing for this project. + +## License + +Checks-Out is available under the Apache License 2.0. + +This distribution has a binary dependency on errwrap, which is available under +the Mozilla Public License 2.0 License. The source code of errwrap can be found at +https://github.com/hashicorp/errwrap. + +This distribution has a binary dependency on go-version, which is available under +the Mozilla Public License 2.0 License. The source code of go-version can be found at +https://github.com/hashicorp/go-version. + +This distribution has a binary dependency on go-multierror, which is available under +the Mozilla Public License 2.0 License. The source code of go-multierror can be found +at https://github.com/mspiegel/go-multierror. + +This distribution has a binary dependency on go-sql-driver/mysql, which is available under +the Mozilla Public License 2.0 License. The source code of go-sql-driver/mysql can be found +at https://github.com/go-sql-driver/mysql + +## FAQ + +1\. How is this different from GitHub Reviews? + +Please use [GitHub Reviews](https://help.github.com/articles/about-pull-request-reviews/) if it meets all your requirements. Some significant features in Checks-Out that are not (yet) in GitHub Reviews are: custom +approval policies, different approval policies for different branches and/or file paths, optional auto-merge +when all status checks have passed, optional auto-tagging of merges. diff --git a/admin/browse.go b/admin/browse.go new file mode 100644 index 0000000..35c6ed9 --- /dev/null +++ b/admin/browse.go @@ -0,0 +1,52 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package admin + +import ( + "context" + "fmt" + + jsonpointer "github.com/mattn/go-jsonpointer" + "github.com/capitalone/checks-out/envvars" + "github.com/capitalone/checks-out/hjson" + "github.com/capitalone/checks-out/model" + "github.com/capitalone/checks-out/remote" +) + +var configFileName = fmt.Sprintf(".%s", envvars.Env.Branding.Name) + +func GetConfigSubtree(c context.Context, r *model.Repo, u *model.User, path string) interface{} { + rcfile, err := remote.GetContents(c, u, r, configFileName) + if err != nil { + return err.Error() + } + var config map[string]interface{} + err = hjson.Unmarshal(rcfile, &config) + if err != nil { + return err.Error() + } + if !jsonpointer.Has(config, path) { + return nil + } + resp, err2 := jsonpointer.Get(config, path) + if err2 != nil { + return err2.Error() + } + return resp +} diff --git a/api/admin.go b/api/admin.go new file mode 100644 index 0000000..329d02d --- /dev/null +++ b/api/admin.go @@ -0,0 +1,146 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package api + +import ( + "errors" + "fmt" + "net/http" + "time" + + "github.com/capitalone/checks-out/admin" + "github.com/capitalone/checks-out/envvars" + "github.com/capitalone/checks-out/exterror" + "github.com/capitalone/checks-out/remote" + "github.com/capitalone/checks-out/router/middleware/session" + "github.com/capitalone/checks-out/shared/httputil" + "github.com/capitalone/checks-out/store" + + "github.com/Sirupsen/logrus" + "github.com/gin-gonic/gin" +) + +func unauthorizedError() error { + status := http.StatusUnauthorized + msg := "user is not an administrator" + return exterror.Create(status, errors.New(msg)) +} + +func adminMissingError() error { + if envvars.Env.Github.AdminOrg == "" { + if envvars.Env.Monitor.Sunlight { + status := http.StatusUnauthorized + msg := "GITHUB_ADMIN_ORG environment variable not defined" + return exterror.Create(status, errors.New(msg)) + } + return unauthorizedError() + } + return nil +} + +func CheckAdmin(c *gin.Context) { + err := adminMissingError() + if err != nil { + c.Error(err) + return + } + user := session.User(c) + orgs, err := remote.GetOrgs(c, user) + if err != nil { + c.Error(err) + return + } + isAdmin := false + for _, o := range orgs { + if o.Login == envvars.Env.Github.AdminOrg { + isAdmin = true + } + } + if !isAdmin { + c.Error(unauthorizedError()) + return + } +} + +func GetAllConfigurationSubtree(c *gin.Context) { + path := c.Param("path") + divisor := envvars.Env.Github.RequestsHz + if divisor < 1 { + divisor = 1 + } + rate := time.Second / time.Duration(divisor) + throttle := time.Tick(rate) + body := make(map[string]interface{}) + repos, err := store.GetAllRepos(c) + if err != nil { + c.Error(err) + return + } + for _, r := range repos { + <-throttle + u, err := store.GetUser(c, r.UserID) + if err != nil { + body[r.Slug] = err.Error() + } else { + body[r.Slug] = admin.GetConfigSubtree(c, r, u, path) + } + } + IndentedJSON(c, 200, body) +} + +func AdminDeleteRepo(c *gin.Context) { + var ( + owner = c.Param("owner") + name = c.Param("repo") + ) + repo, err := store.GetRepoOwnerName(c, owner, name) + if err != nil { + msg := fmt.Sprintf("Getting repository %s", name) + c.Error(exterror.Append(err, msg)) + return + } + err = store.DeleteRepo(c, repo) + if err != nil { + msg := fmt.Sprintf("Deleting repository %s", name) + c.Error(exterror.Append(err, msg)) + return + } + link := fmt.Sprintf( + "%s/hook", + httputil.GetURL(c.Request), + ) + + user, err := store.GetUser(c, repo.UserID) + if err != nil { + msg := fmt.Sprintf("Deleting repository %s", name) + c.Error(exterror.Append(err, msg)) + return + } + + err = remote.DelHook(c, user, repo, link) + if err != nil { + ext := exterror.Convert(err) + if ext.Status < 500 { + logrus.Warnf("Deleting repository hook for %s. %s", name, err) + } else { + logrus.Errorf("Deleting repository hook for %s. %s", name, err) + } + } + c.String(200, "") +} diff --git a/api/config.go b/api/config.go new file mode 100644 index 0000000..d4ca2dd --- /dev/null +++ b/api/config.go @@ -0,0 +1,56 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package api + +import ( + "fmt" + + "github.com/capitalone/checks-out/envvars" + "github.com/capitalone/checks-out/exterror" + "github.com/capitalone/checks-out/router/middleware/session" + "github.com/capitalone/checks-out/snapshot" + "github.com/capitalone/checks-out/store" + + "github.com/gin-gonic/gin" +) + +var configFileName = fmt.Sprintf(".%s", envvars.Env.Branding.Name) + +// GetConfig gets the parsed configuration file. +func GetConfig(c *gin.Context) { + var ( + owner = c.Param("owner") + name = c.Param("repo") + user = session.User(c) + caps = session.Capability(c) + ) + repo, err := store.GetRepoOwnerName(c, owner, name) + if err != nil { + msg := fmt.Sprintf("Getting repository %s", name) + c.Error(exterror.Append(err, msg)) + return + } + config, err := snapshot.GetConfig(c, user, caps, repo) + if err != nil { + msg := fmt.Sprintf("Getting %s configuration for %s", configFileName, name) + c.Error(exterror.Append(err, msg)) + } else { + IndentedJSON(c, 200, config) + } +} diff --git a/api/json.go b/api/json.go new file mode 100644 index 0000000..bf7bccb --- /dev/null +++ b/api/json.go @@ -0,0 +1,42 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package api + +import ( + "bytes" + "encoding/json" + + log "github.com/Sirupsen/logrus" + "github.com/gin-gonic/gin" +) + +func IndentedJSON(c *gin.Context, code int, obj interface{}) { + var buf bytes.Buffer + e := json.NewEncoder(&buf) + e.SetEscapeHTML(false) + e.SetIndent("", " ") + err := e.Encode(obj) + if err != nil { + log.Errorf("JSON encoding error %+v. %s", obj, err) + c.String(500, "JSON encoding error") + } else { + c.Header("Content-Type", "application/json; charset=utf-8") + c.String(code, string(buf.Bytes())) + } +} diff --git a/api/maintainer.go b/api/maintainer.go index a8e7de9..df3e6ac 100644 --- a/api/maintainer.go +++ b/api/maintainer.go @@ -1,13 +1,31 @@ +/* + +SPDX-Copyright: Copyright (c) Brad Rydzewski, project contributors, Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Brad Rydzewski, project contributors, Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ package api import ( - "github.com/lgtmco/lgtm/cache" - "github.com/lgtmco/lgtm/model" - "github.com/lgtmco/lgtm/remote" - "github.com/lgtmco/lgtm/router/middleware/session" - "github.com/lgtmco/lgtm/store" + "fmt" + + "github.com/capitalone/checks-out/exterror" + "github.com/capitalone/checks-out/router/middleware/session" + "github.com/capitalone/checks-out/snapshot" + "github.com/capitalone/checks-out/store" - log "github.com/Sirupsen/logrus" "github.com/gin-gonic/gin" ) @@ -17,72 +35,26 @@ func GetMaintainer(c *gin.Context) { owner = c.Param("owner") name = c.Param("repo") user = session.User(c) + caps = session.Capability(c) ) repo, err := store.GetRepoOwnerName(c, owner, name) if err != nil { - log.Errorf("Error getting repository %s. %s", name, err) - c.AbortWithStatus(404) + msg := fmt.Sprintf("Getting repository %s", name) + c.Error(exterror.Append(err, msg)) return } - file, err := remote.GetContents(c, user, repo, "MAINTAINERS") - if err != nil { - log.Debugf("no MAINTAINERS file for %s. Checking for team members.", repo.Slug) - members, merr := cache.GetMembers(c, user, repo.Owner) - if merr != nil { - log.Errorf("Error getting repository %s. %s", repo.Slug, err) - log.Errorf("Error getting org members %s. %s", repo.Owner, merr) - c.String(404, "MAINTAINERS file not found. %s", err) - return - } else { - log.Printf("found %v members", len(members)) - for _, member := range members { - file = append(file, member.Login...) - file = append(file, '\n') - } - } - } - - maintainer, err := model.ParseMaintainer(file) + _, maintainer, err := snapshot.GetConfigAndMaintainers(c, user, caps, repo) if err != nil { - log.Errorf("Error parsing MAINTAINERS file for %s. %s", repo.Slug, err) - c.String(500, "Error parsing MAINTAINERS file. %s.", err) + msg := fmt.Sprintf("Getting maintainer file for %s", name) + c.Error(exterror.Append(err, msg)) return } - c.JSON(200, maintainer) -} - -// GetMaintainer gets the MAINTAINER configuration file and returns -// a subset of the file with members belonging to the specified organization. -func GetMaintainerOrg(c *gin.Context) { - var ( - owner = c.Param("owner") - name = c.Param("repo") - team = c.Param("org") - user = session.User(c) - ) - repo, err := store.GetRepoOwnerName(c, owner, name) + _, err = maintainer.PersonToOrg() if err != nil { - log.Errorf("Error getting repository %s. %s", name, err) - c.AbortWithStatus(404) + msg := fmt.Sprintf("Expanding maintainer file for %s", name) + c.Error(exterror.Append(err, msg)) return } - file, err := remote.GetContents(c, user, repo, "MAINTAINERS") - if err != nil { - log.Errorf("Error getting repository %s. %s", repo.Slug, err) - c.String(404, "MAINTAINERS file not found. %s", err) - return - } - maintainer, err := model.ParseMaintainer(file) - if err != nil { - log.Errorf("Error parsing MAINTAINERS file for %s. %s", repo.Slug, err) - c.String(500, "Error parsing MAINTAINERS file. %s.", err) - return - } - subset, err := model.FromOrg(maintainer, team) - if err != nil { - log.Errorf("Error getting subset of MAINTAINERS file for %s/%s. %s", repo.Slug, team, err) - c.String(500, "Error getting subset of MAINTAINERS file. %s.", err) - return - } - c.JSON(200, subset) + IndentedJSON(c, 200, maintainer) + } diff --git a/api/orgs.go b/api/orgs.go new file mode 100644 index 0000000..3116ba0 --- /dev/null +++ b/api/orgs.go @@ -0,0 +1,259 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package api + +import ( + "fmt" + "net/http" + + "github.com/capitalone/checks-out/exterror" + "github.com/capitalone/checks-out/model" + "github.com/capitalone/checks-out/remote" + "github.com/capitalone/checks-out/router/middleware/session" + "github.com/capitalone/checks-out/set" + "github.com/capitalone/checks-out/shared/httputil" + "github.com/capitalone/checks-out/shared/token" + "github.com/capitalone/checks-out/store" + + "github.com/Sirupsen/logrus" + "github.com/gin-gonic/gin" + "github.com/pkg/errors" +) + +func collectAllOrgs(c *gin.Context) ([]*model.GitHubOrg, set.Set, error) { + user := session.User(c) + orgs, err := remote.GetOrgs(c, user) + if err != nil { + msg := fmt.Sprintf("Getting organizations for user %s", user.Login) + err = exterror.Append(err, msg) + return nil, nil, err + } + var names []string + for _, org := range orgs { + names = append(names, org.Login) + } + enabled, err := store.GetUserEnabledOrgs(c, names) + if err != nil { + msg := fmt.Sprintf("Getting enabled organizations for user %s", user.Login) + err = exterror.Append(err, msg) + return nil, nil, err + } + enabledNames := set.New() + for _, org := range enabled { + enabledNames.Add(org.Owner) + } + return orgs, enabledNames, nil +} + +// GetOrgs gets the list of user organizations. +func GetOrgs(c *gin.Context) { + user := session.User(c) + orgs, enabled, err := collectAllOrgs(c) + if err != nil { + c.Error(err) + return + } + for k := range orgs { + orgs[k].Enabled = enabled.Contains(orgs[k].Login) + } + orgs = append(orgs, &model.GitHubOrg{ + Login: user.Login, + Avatar: user.Avatar, + }) + IndentedJSON(c, 200, orgs) +} + +// GetEnabledOrgs gets the list of organizations monitored by the service. +func GetEnabledOrgs(c *gin.Context) { + _, enabled, err := collectAllOrgs(c) + if err != nil { + c.Error(err) + return + } + orgs := []*model.GitHubOrg{} + for name := range enabled { + orgs = append(orgs, &model.GitHubOrg{ + Login: name, + Enabled: true, + }) + } + IndentedJSON(c, 200, orgs) +} + +// GetTeams gets the list of teams. +func GetTeams(c *gin.Context) { + user := session.User(c) + owner := c.Param("owner") + teams, err := remote.ListTeams(c, user, owner) + if err != nil { + msg := fmt.Sprintf("Getting organizations for user %s", user.Login) + c.Error(exterror.Append(err, msg)) + return + } + IndentedJSON(c, 200, teams) +} + +// PostRepo activates a new repository. +func PostOrg(c *gin.Context) { + var ( + owner = c.Param("owner") + user = session.User(c) + ) + + // verify repo doesn't already exist + if _, err := store.GetOrgName(c, owner); err == nil { + err = errors.Errorf("Unable to activate org %s because it is already active.", owner) + inner := exterror.Create(http.StatusConflict, err) + c.Error(inner) + return + } + + org, err := remote.GetOrg(c, user, owner) + if err != nil { + msg := fmt.Sprintf("Looking for org %s on Github", owner) + c.Error(exterror.Append(err, msg)) + return + } + + org.UserID = user.ID + org.Secret = model.Rand() + + // creates a token to authorize the link callback url + t := token.New(token.HookToken, org.Owner) + sig, err := t.Sign(org.Secret) + if err != nil { + c.Error(exterror.Append(err, "Activating Org")) + return + } + + // create the hook callback url + link := fmt.Sprintf( + "%s/hook?access_token=%s", + httputil.GetURL(c.Request), + sig, + ) + err = remote.SetOrgHook(c, user, org, link) + if err != nil { + c.Error(exterror.Append(err, "Creating org hook")) + return + } + + err = store.CreateOrg(c, org) + if err != nil { + c.Error(exterror.Append(err, "Activating the Org")) + return + } + + repos, err := remote.GetOrgRepos(c, user, owner) + if err != nil { + c.Error(exterror.Append(err, "Getting Org Repos")) + return + } + baseURL := httputil.GetURL(c.Request) + for _, v := range repos { + _, err := TurnOnRepoQuiet(c, user, owner, v.Name, baseURL) + if err != nil { + c.Error(exterror.Append(err, "turning on Repo "+v.Name)) + return + } + } + IndentedJSON(c, 200, org) +} + +// AdminDeleteOrg deletes any org and all repos for that org from the database and unregisters their hooks. +// only should be called by an admin user, as it works across accounts. +func AdminDeleteOrg(c *gin.Context) { + var ( + owner = c.Param("owner") + ) + org, err := store.GetOrgName(c, owner) + // verify repo already exists + if err != nil { + err = errors.Errorf("Unable to deactivate org %s because it is not active.", owner) + inner := exterror.Create(http.StatusConflict, err) + c.Error(inner) + return + } + user, err := store.GetUser(c, org.UserID) + if err != nil { + msg := fmt.Sprintf("Deleting org %s", owner) + c.Error(exterror.Append(err, msg)) + return + } + deleteOrgInner(c, owner, user, org) +} + +// DeleteOrg deletes an org and all repos for that org from the database and unregisters their hooks. +func DeleteOrg(c *gin.Context) { + var ( + owner = c.Param("owner") + user = session.User(c) + ) + org, err := store.GetOrgName(c, owner) + // verify repo already exists + if err != nil { + err = errors.Errorf("Unable to deactivate org %s because it is not active.", owner) + inner := exterror.Create(http.StatusConflict, err) + c.Error(inner) + return + } + deleteOrgInner(c, owner, user, org) +} + +func deleteOrgInner(c *gin.Context, owner string, user *model.User, org *model.OrgDb) { + err := store.DeleteOrg(c, org) + if err != nil { + msg := fmt.Sprintf("Deleting org %s", owner) + c.Error(exterror.Append(err, msg)) + return + } + link := fmt.Sprintf( + "%s/hook", + httputil.GetURL(c.Request), + ) + err = remote.DelOrgHook(c, user, org, link) + if err != nil { + ext := exterror.Convert(err) + if ext.Status < 500 { + logrus.Warnf("Deleting org hook for %s. %s", owner, err) + } else { + logrus.Errorf("Deleting org hook for %s. %s", owner, err) + } + } + + repos, err := store.GetReposForOrg(c, owner) + if err != nil { + c.Error(exterror.Append(err, "Getting Org Repos")) + return + } + for _, v := range repos { + repo, err := store.GetRepoOwnerName(c, owner, v.Name) + if err != nil { + msg := fmt.Sprintf("Getting repository %s", v.Name) + c.Error(exterror.Append(err, msg)) + return + } + err = TurnOffRepo(c, user, repo, owner, v.Name, httputil.GetURL(c.Request)) + if err != nil { + c.Error(exterror.Append(err, "turning off Repo "+v.Name)) + return + } + } + c.String(200, "") +} diff --git a/api/orgs_test.go b/api/orgs_test.go new file mode 100644 index 0000000..371cb3d --- /dev/null +++ b/api/orgs_test.go @@ -0,0 +1,139 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package api + +import ( + "context" + "encoding/json" + "io/ioutil" + "net/http" + "net/http/httptest" + "strings" + "testing" + + "github.com/capitalone/checks-out/cache" + "github.com/capitalone/checks-out/model" + "github.com/capitalone/checks-out/remote" + "github.com/capitalone/checks-out/router/middleware" + "github.com/capitalone/checks-out/store" + + "github.com/Sirupsen/logrus" + "github.com/franela/goblin" + "github.com/gin-gonic/gin" + "github.com/pkg/errors" +) + +type mockCache struct { + cache.Cache +} + +func (mc *mockCache) Get(s string) (interface{}, error) { + if s == "orgs:octocat" { + return fakeOrgs, nil + } + return nil, errors.New("Unexpected") +} + +type mockCache2 struct { + cache.Cache +} + +func (mc *mockCache2) Get(s string) (interface{}, error) { + if s == "orgs:octocat" { + return nil, errors.New("Not Found") + } + return nil, errors.New("Unexpected") +} + +type mockRemote2 struct { + remote.Remote +} + +func (mr *mockRemote2) GetOrgs(c context.Context, u *model.User) ([]*model.GitHubOrg, error) { + if u == fakeUser { + return nil, errors.New("Not Found") + } + return nil, errors.New("Unexpected") +} + +type mockStore struct { + store.Store +} + +func (ms *mockStore) GetUserEnabledOrgs(names []string) ([]*model.OrgDb, error) { + return nil, nil +} +func TestOrgs(t *testing.T) { + gin.SetMode(gin.TestMode) + logrus.SetOutput(ioutil.Discard) + + g := goblin.Goblin(t) + + g.Describe("Org endpoint", func() { + g.It("Should return the orgs list", func() { + mc := &mockCache{} + ms := &mockStore{} + + e := gin.New() + e.NoRoute(GetOrgs) + e.Use(func(c *gin.Context) { + c.Set("user", fakeUser) + c.Set("cache", mc) + c.Set("store", ms) + }) + + w := httptest.NewRecorder() + r, _ := http.NewRequest("GET", "/", nil) + e.ServeHTTP(w, r) + + // the user is appended to the orgs list so we retrieve a full list of + // accounts to which the user has access. + orgs := append(fakeOrgs, &model.GitHubOrg{ + Login: fakeUser.Login, + }) + + want, _ := json.MarshalIndent(orgs, "", " ") + got := strings.TrimSpace(w.Body.String()) + g.Assert(got).Equal(string(want)) + g.Assert(w.Code).Equal(200) + }) + + g.It("Should return a 500 error", func() { + r := &mockRemote2{} + mc := &mockCache2{} + + e := gin.New() + e.NoRoute(GetOrgs) + e.Use(middleware.ExtError()) + e.Use(func(c *gin.Context) { + c.Set("user", fakeUser) + c.Set("cache", mc) + c.Set("remote", r) + }) + + w := httptest.NewRecorder() + req, _ := http.NewRequest("GET", "/", nil) + e.ServeHTTP(w, req) + + got := strings.TrimSpace(w.Body.String()) + g.Assert(got).Equal("Getting organizations for user octocat. Not Found") + g.Assert(w.Code).Equal(500) + }) + }) +} diff --git a/api/repos.go b/api/repos.go index 724a29d..12a70d3 100644 --- a/api/repos.go +++ b/api/repos.go @@ -1,30 +1,97 @@ +/* + +SPDX-Copyright: Copyright (c) Brad Rydzewski, project contributors, Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Brad Rydzewski, project contributors, Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ package api import ( + "context" "fmt" + "net/http" - "github.com/lgtmco/lgtm/cache" - "github.com/lgtmco/lgtm/model" - "github.com/lgtmco/lgtm/remote" - "github.com/lgtmco/lgtm/router/middleware/session" - "github.com/lgtmco/lgtm/shared/httputil" - "github.com/lgtmco/lgtm/shared/token" - "github.com/lgtmco/lgtm/store" + "github.com/capitalone/checks-out/exterror" + "github.com/capitalone/checks-out/model" + "github.com/capitalone/checks-out/remote" + "github.com/capitalone/checks-out/router/middleware/session" + "github.com/capitalone/checks-out/shared/httputil" + "github.com/capitalone/checks-out/shared/token" + "github.com/capitalone/checks-out/store" "github.com/Sirupsen/logrus" "github.com/gin-gonic/gin" + "github.com/pkg/errors" ) -// GetRepos gets the active repository list. -func GetRepos(c *gin.Context) { +// GetAllReposCount gets the count of all repositories managed by Meow. +func GetAllReposCount(c *gin.Context) { + repos, err := store.GetAllRepos(c) + if err != nil { + msg := "Getting repository list" + c.Error(exterror.Append(err, msg)) + return + } + c.String(200, "%d", len(repos)) +} + +// GetAllRepos gets all public repositories managed by Meow. +func GetAllRepos(c *gin.Context) { + repos, err := store.GetAllRepos(c) + if err != nil { + msg := "Getting repository list" + c.Error(exterror.Append(err, msg)) + return + } + public := []*model.Repo{} + for _, r := range repos { + if !r.Private { + public = append(public, r) + } + } + IndentedJSON(c, 200, public) +} + +// GetUserRepos gets the user's repository list. +func GetUserRepos(c *gin.Context) { + user := session.User(c) + repos, err := remote.GetUserRepos(c, user) + if err != nil { + msg := "Getting remote repository list" + c.Error(exterror.Append(err, msg)) + return + } + repoResponse(c, repos) +} + +// GetOrgRepos gets the active org repository list for the current user. +func GetOrgRepos(c *gin.Context) { + var ( + org = c.Param("org") + ) user := session.User(c) - repos, err := cache.GetRepos(c, user) + repos, err := remote.GetOrgRepos(c, user, org) if err != nil { - logrus.Errorf("Error getting remote repository list. %s", err) - c.String(500, "Error getting remote repository list") + msg := "Getting remote repository list" + c.Error(exterror.Append(err, msg)) return } + repoResponse(c, repos) +} +func repoResponse(c *gin.Context, repos []*model.Repo) { // copy the slice since we are going to mutate it and don't // want any nasty data races if the slice came from the cache. repoc := make([]*model.Repo, len(repos)) @@ -32,20 +99,20 @@ func GetRepos(c *gin.Context) { repom, err := store.GetRepoIntersectMap(c, repos) if err != nil { - logrus.Errorf("Error getting active repository list. %s", err) - c.String(500, "Error getting active repository list") + msg := "Getting active repository list" + c.Error(exterror.Append(err, msg)) return } // merges the slice of active and remote repositories favoring // and swapping in local repository information when possible. for i, repo := range repoc { - repo_, ok := repom[repo.Slug] + r, ok := repom[repo.Slug] if ok { - repoc[i] = repo_ + repoc[i] = r } } - c.JSON(200, repoc) + IndentedJSON(c, 200, repoc) } // GetRepo gets the repository by slug. @@ -56,11 +123,11 @@ func GetRepo(c *gin.Context) { ) repo, err := store.GetRepoOwnerName(c, owner, name) if err != nil { - logrus.Errorf("Error getting repository %s. %s", name, err) - c.String(404, "Error getting repository %s", name) + msg := fmt.Sprintf("Getting repository %s", name) + c.Error(exterror.Append(err, msg)) return } - c.JSON(200, repo) + IndentedJSON(c, 200, repo) } // PostRepo activates a new repository. @@ -69,20 +136,82 @@ func PostRepo(c *gin.Context) { owner = c.Param("owner") name = c.Param("repo") user = session.User(c) + caps = session.Capability(c) ) - // verify repo doesn't already exist - if _, err := store.GetRepoOwnerName(c, owner, name); err == nil { - c.AbortWithStatus(409) - c.String(409, "Error activating a repository that is already active.") + baseURL := httputil.GetURL(c.Request) + repo, err := TurnOnRepoFailFast(c, user, caps, owner, name, baseURL) + if err != nil { + c.Error(err) return } - repo, err := remote.GetRepo(c, user, owner, name) + IndentedJSON(c, 200, repo) + +} + +// DeleteRepo deletes a repository configuration. +func DeleteRepo(c *gin.Context) { + var ( + owner = c.Param("owner") + name = c.Param("repo") + user = session.User(c) + ) + repo, err := store.GetRepoOwnerName(c, owner, name) if err != nil { - c.String(404, "Error finding repository in GitHub. %s") + msg := fmt.Sprintf("Getting repository %s", name) + c.Error(exterror.Append(err, msg)) return } + err = TurnOffRepo(c, user, repo, owner, name, httputil.GetURL(c.Request)) + if err != nil { + c.Error(err) + } else { + c.String(200, "") + } +} + +// TurnOnRepoFailFast enables the repository. +// If already activated then return an error. +// Validate the configuration files. +func TurnOnRepoFailFast(c context.Context, user *model.User, caps *model.Capabilities, owner, name, baseURL string) (*model.Repo, error) { + return enableRepo(c, user, caps, owner, name, baseURL, false, true) +} + +// TurnOnRepoQuiet enables the repository. +// If already activated then do nothing. +// Do not validate the configuration files. +func TurnOnRepoQuiet(c context.Context, user *model.User, owner, name, baseURL string) (*model.Repo, error) { + return enableRepo(c, user, nil, owner, name, baseURL, true, false) +} + +func enableRepo(c context.Context, user *model.User, caps *model.Capabilities, owner, name, baseURL string, idempotent, validate bool) (*model.Repo, error) { + var repo *model.Repo + var err error + + // if repo already activated + if _, err = store.GetRepoOwnerName(c, owner, name); err == nil { + if idempotent { + return nil, nil + } + err = errors.Errorf("Unable to activate repository %s/%s because it is already active.", owner, name) + err = exterror.Create(http.StatusConflict, err) + return nil, err + } + + repo, err = remote.GetRepo(c, user, owner, name) + if err != nil { + msg := fmt.Sprintf("Looking for repository %s on Github", name) + return nil, exterror.Append(err, msg) + } + + if validate { + _, err = validateRepo(c, repo, owner, name, user, caps) + if err != nil { + return nil, err + } + } + repo.UserID = user.ID repo.Secret = model.Rand() @@ -90,56 +219,49 @@ func PostRepo(c *gin.Context) { t := token.New(token.HookToken, repo.Slug) sig, err := t.Sign(repo.Secret) if err != nil { - c.String(500, "Error activating repository. %s") - return + return nil, exterror.Append(err, "Activating repository") } // create the hook callback url link := fmt.Sprintf( "%s/hook?access_token=%s", - httputil.GetURL(c.Request), + baseURL, sig, ) err = remote.SetHook(c, user, repo, link) if err != nil { - c.String(500, "Error creating hook. %s", err) - return + return nil, exterror.Append(err, "Creating hook") } err = store.CreateRepo(c, repo) if err != nil { - c.String(500, "Error activating the repository. %s", err) - return + return nil, exterror.Append(err, "Activating the repository") } - c.JSON(200, repo) + + return repo, nil } -// DeleteRepo deletes a repository configuration. -func DeleteRepo(c *gin.Context) { - var ( - owner = c.Param("owner") - name = c.Param("repo") - user = session.User(c) - ) - repo, err := store.GetRepoOwnerName(c, owner, name) +func TurnOffRepo(c context.Context, user *model.User, repo *model.Repo, owner, name, baseURL string) error { + + err := store.DeleteRepo(c, repo) if err != nil { - logrus.Errorf("Error getting repository %s. %s", name, err) - c.AbortWithStatus(404) - return - } - err = store.DeleteRepo(c, repo) - if err != nil { - logrus.Errorf("Error deleting repository %s. %s", name, err) - c.AbortWithStatus(500) - return + msg := fmt.Sprintf("Deleting repository %s", name) + return exterror.Append(err, msg) } + link := fmt.Sprintf( "%s/hook", - httputil.GetURL(c.Request), + baseURL, ) err = remote.DelHook(c, user, repo, link) if err != nil { - logrus.Errorf("Error deleting repository hook for %s. %s", name, err) + ext := exterror.Convert(err) + if ext.Status < 500 { + logrus.Warnf("Deleting repository hook for %s. %s", name, err) + } else { + logrus.Errorf("Deleting repository hook for %s. %s", name, err) + } } - c.String(200, "") + + return nil } diff --git a/api/slack.go b/api/slack.go new file mode 100644 index 0000000..9e2b16a --- /dev/null +++ b/api/slack.go @@ -0,0 +1,137 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package api + +import ( + "github.com/gin-gonic/gin" + "github.com/capitalone/checks-out/store" + "github.com/capitalone/checks-out/exterror" + "fmt" + "encoding/json" + "io/ioutil" + "github.com/capitalone/checks-out/router/middleware/session" +) + +type UrlHolder struct { + Url string `json:"url"` +} + +func RegisterSlackUrl(c *gin.Context) { + registerSlackUrlInner(c, "", 201) +} + +func registerSlackUrlInner(c *gin.Context, user string, successCode int) { + var ( + hostname = c.Param("hostname") + ) + var urlHolder UrlHolder + defer func() { + c.Request.Body.Close() + }() + body, err := ioutil.ReadAll(c.Request.Body) + if err != nil { + msg := fmt.Sprintf("Registering slack URL by user %s for host %s ", user, hostname) + c.Error(exterror.Append(err, msg)) + c.Error(err) + } + err = json.Unmarshal(body, &urlHolder) + if err != nil { + msg := fmt.Sprintf("Registering slack URL by user %s for host %s ", user, hostname) + c.Error(exterror.Append(err, msg)) + c.Error(err) + } + + err = store.AddUpdateSlackUrl(c, hostname, user, urlHolder.Url) + if err != nil { + msg := fmt.Sprintf("Registering slack URL %s by user %s for host %s ", urlHolder.Url, user, hostname) + c.Error(exterror.Append(err, msg)) + c.Error(err) + } else { + c.Status(successCode) + } +} + +func getSlackUrlInner(c *gin.Context, user string) { + var ( + hostname = c.Param("hostname") + ) + + url, err := store.GetSlackUrl(c, hostname, user) + if err != nil { + msg := fmt.Sprintf("Getting slack URL by user %s for host %s ", user, hostname) + c.Error(exterror.Append(err, msg)) + c.Error(err) + } else { + if url == "" { + c.Status(404) + } else { + c.IndentedJSON(200, UrlHolder{ + Url: url, + }) + } + } +} + +func deleteSlackUrlInner(c *gin.Context, user string) { + var ( + hostname = c.Param("hostname") + ) + + err := store.DeleteSlackUrl(c, hostname, user) + if err != nil { + msg := fmt.Sprintf("Deleting slack URL by user %s for host %s ", user, hostname) + c.Error(exterror.Append(err, msg)) + c.Error(err) + } else { + c.Status(204) + } +} + +func GetSlackUrl(c *gin.Context) { + getSlackUrlInner(c, "") +} + +func DeleteSlackUrl(c *gin.Context) { + deleteSlackUrlInner(c, "") +} + +func UpdateSlackUrl(c *gin.Context) { + registerSlackUrlInner(c, "", 200) +} + +func UserRegisterSlackUrl(c *gin.Context) { + user := session.User(c) + registerSlackUrlInner(c, user.Login, 201) +} + +func UserGetSlackUrl(c *gin.Context) { + user := session.User(c) + getSlackUrlInner(c, user.Login) + +} + +func UserDeleteSlackUrl(c *gin.Context) { + user := session.User(c) + deleteSlackUrlInner(c, user.Login) +} + +func UserUpdateSlackUrl(c *gin.Context) { + user := session.User(c) + registerSlackUrlInner(c, user.Login, 200) +} diff --git a/api/stats.go b/api/stats.go new file mode 100644 index 0000000..4aa96f5 --- /dev/null +++ b/api/stats.go @@ -0,0 +1,30 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package api + +import ( + "github.com/capitalone/checks-out/usage" + + "github.com/gin-gonic/gin" +) + +func AdminStats(c *gin.Context) { + stats := usage.GetStats() + IndentedJSON(c, 200, stats) +} diff --git a/api/teams.go b/api/teams.go deleted file mode 100644 index bf67657..0000000 --- a/api/teams.go +++ /dev/null @@ -1,26 +0,0 @@ -package api - -import ( - "github.com/lgtmco/lgtm/cache" - "github.com/lgtmco/lgtm/model" - "github.com/lgtmco/lgtm/router/middleware/session" - - "github.com/Sirupsen/logrus" - "github.com/gin-gonic/gin" -) - -// GetTeams gets the list of user teams. -func GetTeams(c *gin.Context) { - user := session.User(c) - teams, err := cache.GetTeams(c, user) - if err != nil { - logrus.Errorf("Error getting teams for user %s. %s", user.Login, err) - c.String(500, "Error getting team list") - return - } - teams = append(teams, &model.Team{ - Login: user.Login, - Avatar: user.Avatar, - }) - c.JSON(200, teams) -} diff --git a/api/teams_test.go b/api/teams_test.go deleted file mode 100644 index f61779e..0000000 --- a/api/teams_test.go +++ /dev/null @@ -1,79 +0,0 @@ -package api - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "net/http/httptest" - "strings" - "testing" - - "github.com/lgtmco/lgtm/model" - - cache "github.com/lgtmco/lgtm/cache/mock" - remote "github.com/lgtmco/lgtm/remote/mock" - - "github.com/Sirupsen/logrus" - "github.com/franela/goblin" - "github.com/gin-gonic/gin" -) - -func TestTeams(t *testing.T) { - gin.SetMode(gin.TestMode) - logrus.SetOutput(ioutil.Discard) - - g := goblin.Goblin(t) - - g.Describe("Team endpoint", func() { - g.It("Should return the team list", func() { - cache := new(cache.Cache) - cache.On("Get", "teams:octocat").Return(fakeTeams, nil).Once() - - e := gin.New() - e.NoRoute(GetTeams) - e.Use(func(c *gin.Context) { - c.Set("user", fakeUser) - c.Set("cache", cache) - }) - - w := httptest.NewRecorder() - r, _ := http.NewRequest("GET", "/", nil) - e.ServeHTTP(w, r) - - // the user is appended to the team list so we retrieve a full list of - // accounts to which the user has access. - teams := append(fakeTeams, &model.Team{ - Login: fakeUser.Login, - }) - - want, _ := json.Marshal(teams) - got := strings.TrimSpace(w.Body.String()) - g.Assert(got).Equal(string(want)) - g.Assert(w.Code).Equal(200) - }) - - g.It("Should return a 500 error", func() { - remote := new(remote.Remote) - cache := new(cache.Cache) - cache.On("Get", "teams:octocat").Return(nil, fmt.Errorf("Not Found")).Once() - remote.On("GetTeams", fakeUser).Return(nil, fmt.Errorf("Not Found")).Once() - - e := gin.New() - e.NoRoute(GetTeams) - e.Use(func(c *gin.Context) { - c.Set("user", fakeUser) - c.Set("cache", cache) - c.Set("remote", remote) - }) - - w := httptest.NewRecorder() - r, _ := http.NewRequest("GET", "/", nil) - e.ServeHTTP(w, r) - - got := strings.TrimSpace(w.Body.String()) - g.Assert(got).Equal("Error getting team list") - g.Assert(w.Code).Equal(500) - }) - }) -} diff --git a/api/user_test.go b/api/user_test.go index b47d681..bb9926d 100644 --- a/api/user_test.go +++ b/api/user_test.go @@ -1,3 +1,21 @@ +/* + +SPDX-Copyright: Copyright (c) Brad Rydzewski, project contributors, Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Brad Rydzewski, project contributors, Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ package api import ( @@ -8,9 +26,9 @@ import ( "strings" "testing" - "github.com/Sirupsen/logrus" - "github.com/lgtmco/lgtm/model" + "github.com/capitalone/checks-out/model" + "github.com/Sirupsen/logrus" "github.com/franela/goblin" "github.com/gin-gonic/gin" ) @@ -34,7 +52,7 @@ func TestUsers(t *testing.T) { r, _ := http.NewRequest("GET", "/", nil) e.ServeHTTP(w, r) - want, _ := json.Marshal(fakeUser) + want, _ := json.MarshalIndent(fakeUser, "", " ") got := strings.TrimSpace(w.Body.String()) g.Assert(got).Equal(string(want)) g.Assert(w.Code).Equal(200) @@ -43,8 +61,8 @@ func TestUsers(t *testing.T) { } var ( - fakeUser = &model.User{Login: "octocat"} - fakeTeams = []*model.Team{ + fakeUser = &model.User{Login: "octocat"} + fakeOrgs = []*model.GitHubOrg{ {Login: "drone"}, {Login: "docker"}, } diff --git a/api/users.go b/api/users.go index 02f3f2b..ff597d7 100644 --- a/api/users.go +++ b/api/users.go @@ -1,12 +1,88 @@ +/* + +SPDX-Copyright: Copyright (c) Brad Rydzewski, project contributors, Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Brad Rydzewski, project contributors, Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ package api import ( - "github.com/gin-gonic/gin" + "database/sql" + "net/http" - "github.com/lgtmco/lgtm/router/middleware/session" + "github.com/capitalone/checks-out/exterror" + "github.com/capitalone/checks-out/remote" + "github.com/capitalone/checks-out/router/middleware/session" + "github.com/capitalone/checks-out/store" + + "github.com/gin-gonic/gin" ) // GetUser gets the currently authenticated user. func GetUser(c *gin.Context) { - c.JSON(200, session.User(c)) + IndentedJSON(c, 200, session.User(c)) +} + +// DeleteUser removes the currently authenticated user +// and all associated repositories from the database. +func DeleteUser(c *gin.Context) { + user := session.User(c) + repos, err := store.GetRepoUserId(c, user.ID) + if err != nil { + c.Error(exterror.Append(err, "Deleting user")) + return + } + for _, repo := range repos { + err = store.DeleteRepo(c, repo) + if err != nil { + c.Error(exterror.Append(err, "Deleting user")) + return + } + } + err = store.DeleteUser(c, user) + if err != nil { + c.Error(exterror.Append(err, "Deleting user")) + return + } + err = remote.RevokeAuthorization(c, user) + if err != nil { + c.Error(exterror.Append(err, "Deleting user")) + return + } + c.String(204, "") +} + +func GetReposForUserLogin(c *gin.Context) { + var ( + login = c.Param("user") + ) + user, err := store.GetUserLogin(c, login) + if err != nil { + if err == sql.ErrNoRows { + err = exterror.Create(http.StatusNotFound, err) + } + c.Error(err) + return + } + repos, err := store.GetRepoUserId(c, user.ID) + if err != nil { + if err == sql.ErrNoRows { + err = exterror.Create(http.StatusNotFound, err) + } + c.Error(err) + return + } + IndentedJSON(c, 200, repos) } diff --git a/api/validate.go b/api/validate.go new file mode 100644 index 0000000..e7fd31d --- /dev/null +++ b/api/validate.go @@ -0,0 +1,123 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package api + +import ( + "context" + "fmt" + + "github.com/capitalone/checks-out/exterror" + "github.com/capitalone/checks-out/hjson" + "github.com/capitalone/checks-out/model" + "github.com/capitalone/checks-out/router/middleware/session" + "github.com/capitalone/checks-out/snapshot" + "github.com/capitalone/checks-out/store" + + "github.com/gin-gonic/gin" +) + +const ( + updateMessage = `You are still using a .lgtm file to configure your project. + If you want to take advantage of the new features in Checks-Out, you will need to upgrade to the new file format first. + To upgrade, create a configuration file with the following content and check it into your default branch: ` +) + +var okMessage = fmt.Sprintf(`Your %s and MAINTAINERS files are valid.`, configFileName) + +func validateRepo(c context.Context, repo *model.Repo, owner string, name string, user *model.User, caps *model.Capabilities) (string, error) { + config, _, err := snapshot.GetConfigAndMaintainers(c, user, caps, repo) + if err != nil { + msg := fmt.Sprintf("Error validating %s", name) + return "", exterror.Append(err, msg) + } + err = snapshot.FixSlackTargets(c, config, user.Login) + if err != nil { + msg := fmt.Sprintf("Error validating %s", name) + return "", exterror.Append(err, msg) + } + if config.IsOld { + //this is a bit of a hack, but we're going to strip out the bits of the config that + //were added to the contents present in the configuration file + config.Approvals[0].Scope = nil + config.Approvals[0].AntiMatch = nil + config.Approvals[0].AuthorMatch = nil + config.Maintainers = model.MaintainersConfig{} + options := hjson.DefaultOptions() + options.OmitEmptyStructs = true + out, err := hjson.MarshalWithOptions(config, options) + if err != nil { + msg := fmt.Sprintf("Error converting .lgtm file for %s", name) + return "", exterror.Append(err, msg) + } + return string(out), nil + } + return "", nil +} + +func validateAndGetFile(c context.Context, owner string, name string, user *model.User, caps *model.Capabilities) (string, error) { + repo, err := store.GetRepoOwnerName(c, owner, name) + if err != nil { + msg := fmt.Sprintf("Getting repository %s", name) + return "", exterror.Append(err, msg) + + } + return validateRepo(c, repo, owner, name, user, caps) +} + +// Validate validates the configuration and MAINTAINER files. +func Validate(c *gin.Context) { + var ( + owner = c.Param("owner") + name = c.Param("repo") + user = session.User(c) + caps = session.Capability(c) + ) + file, err := validateAndGetFile(c, owner, name, user, caps) + if err != nil { + c.Error(err) + return + } + message := okMessage + if file != "" { + message = updateMessage + } + c.JSON(200, map[string]string{ + "message": message, + "file": file, + }) +} + +func Convert(c *gin.Context) { + var ( + owner = c.Param("owner") + name = c.Param("repo") + user = session.User(c) + caps = session.Capability(c) + ) + file, err := validateAndGetFile(c, owner, name, user, caps) + if err != nil { + c.Error(err) + return + } + status := 200 + if len(file) == 0 { + status = 204 + } + c.String(status, file) +} diff --git a/cache/cache.go b/cache/cache.go index 3c4e8ed..9fa0415 100644 --- a/cache/cache.go +++ b/cache/cache.go @@ -1,12 +1,29 @@ -package cache +/* + +SPDX-Copyright: Copyright (c) Brad Rydzewski, project contributors, Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Brad Rydzewski, project contributors, Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 -//go:generate mockery -name Cache -output mock -case=underscore +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package cache import ( + "context" "time" "github.com/koding/cache" - "golang.org/x/net/context" + "github.com/capitalone/checks-out/envvars" ) type Cache interface { @@ -33,3 +50,7 @@ func Default() Cache { func NewTTL(t time.Duration) Cache { return cache.NewMemoryWithTTL(t) } + +var ( + Longterm = cache.NewMemoryWithTTL(time.Duration(envvars.Env.Cache.LongCacheTTL)) +) diff --git a/cache/cache_test.go b/cache/cache_test.go index e984da2..1cec762 100644 --- a/cache/cache_test.go +++ b/cache/cache_test.go @@ -1,3 +1,21 @@ +/* + +SPDX-Copyright: Copyright (c) Brad Rydzewski, project contributors, Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Brad Rydzewski, project contributors, Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ package cache import ( diff --git a/cache/context.go b/cache/context.go index 5fe513d..f62126e 100644 --- a/cache/context.go +++ b/cache/context.go @@ -1,7 +1,7 @@ package cache import ( - "golang.org/x/net/context" + "context" ) const key = "cache" diff --git a/cache/helper.go b/cache/helper.go deleted file mode 100644 index 7cee5d1..0000000 --- a/cache/helper.go +++ /dev/null @@ -1,95 +0,0 @@ -package cache - -import ( - "fmt" - - "golang.org/x/net/context" - - "github.com/lgtmco/lgtm/model" - "github.com/lgtmco/lgtm/remote" -) - -// GetRepos returns the list of user repositories from the cache -// associated with the current context. -func GetRepos(c context.Context, user *model.User) ([]*model.Repo, error) { - key := fmt.Sprintf("repos:%s", - user.Login, - ) - // if we fetch from the cache we can return immediately - val, err := FromContext(c).Get(key) - if err == nil { - return val.([]*model.Repo), nil - } - // else we try to grab from the remote system and - // populate our cache. - repos, err := remote.GetRepos(c, user) - if err != nil { - return nil, err - } - FromContext(c).Set(key, repos) - return repos, nil -} - -// GetTeams returns the list of user teams from the cache -// associated with the current context. -func GetTeams(c context.Context, user *model.User) ([]*model.Team, error) { - key := fmt.Sprintf("teams:%s", - user.Login, - ) - // if we fetch from the cache we can return immediately - val, err := FromContext(c).Get(key) - if err == nil { - return val.([]*model.Team), nil - } - // else we try to grab from the remote system and - // populate our cache. - teams, err := remote.GetTeams(c, user) - if err != nil { - return nil, err - } - FromContext(c).Set(key, teams) - return teams, nil -} - -// GetPerm returns the user permissions repositories from the cache -// associated with the current repository. -func GetPerm(c context.Context, user *model.User, owner, name string) (*model.Perm, error) { - key := fmt.Sprintf("perms:%s:%s/%s", - user.Login, - owner, - name, - ) - // if we fetch from the cache we can return immediately - val, err := FromContext(c).Get(key) - if err == nil { - return val.(*model.Perm), nil - } - // else we try to grab from the remote system and - // populate our cache. - perm, err := remote.GetPerm(c, user, owner, name) - if err != nil { - return nil, err - } - FromContext(c).Set(key, perm) - return perm, nil -} - -// GetMembers returns the team members from the cache. -func GetMembers(c context.Context, user *model.User, team string) ([]*model.Member, error) { - key := fmt.Sprintf("members:%s", - team, - ) - // if we fetch from the cache we can return immediately - val, err := FromContext(c).Get(key) - if err == nil { - return val.([]*model.Member), nil - } - // else we try to grab from the remote system and - // populate our cache. - members, err := remote.GetMembers(c, user, team) - if err != nil { - return nil, err - } - FromContext(c).Set(key, members) - return members, nil -} diff --git a/cache/helper_test.go b/cache/helper_test.go deleted file mode 100644 index c0478e1..0000000 --- a/cache/helper_test.go +++ /dev/null @@ -1,157 +0,0 @@ -package cache - -import ( - "errors" - "fmt" - "testing" - - "github.com/lgtmco/lgtm/model" - "github.com/lgtmco/lgtm/remote" - "github.com/lgtmco/lgtm/remote/mock" - - "github.com/franela/goblin" - "github.com/gin-gonic/gin" -) - -func TestHelper(t *testing.T) { - - g := goblin.Goblin(t) - - g.Describe("Cache helpers", func() { - - var c *gin.Context - var r *mock.Remote - - g.BeforeEach(func() { - c = new(gin.Context) - ToContext(c, Default()) - - r = new(mock.Remote) - remote.ToContext(c, r) - }) - - g.It("Should get permissions from remote", func() { - r.On("GetPerm", fakeUser, fakeRepo.Owner, fakeRepo.Name).Return(fakePerm, nil).Once() - p, err := GetPerm(c, fakeUser, fakeRepo.Owner, fakeRepo.Name) - g.Assert(p).Equal(fakePerm) - g.Assert(err).Equal(nil) - }) - - g.It("Should get permissions from cache", func() { - key := fmt.Sprintf("perms:%s:%s/%s", - fakeUser.Login, - fakeRepo.Owner, - fakeRepo.Name, - ) - - Set(c, key, fakePerm) - r.On("GetPerm", fakeUser, fakeRepo.Owner, fakeRepo.Name).Return(nil, fakeErr).Once() - p, err := GetPerm(c, fakeUser, fakeRepo.Owner, fakeRepo.Name) - g.Assert(p).Equal(fakePerm) - g.Assert(err).Equal(nil) - }) - - g.It("Should get permissions error", func() { - r.On("GetPerm", fakeUser, fakeRepo.Owner, fakeRepo.Name).Return(nil, fakeErr).Once() - p, err := GetPerm(c, fakeUser, fakeRepo.Owner, fakeRepo.Name) - g.Assert(p == nil).IsTrue() - g.Assert(err).Equal(fakeErr) - }) - - g.It("Should set and get repos", func() { - - r.On("GetRepos", fakeUser).Return(fakeRepos, nil).Once() - p, err := GetRepos(c, fakeUser) - g.Assert(p).Equal(fakeRepos) - g.Assert(err).Equal(nil) - }) - - g.It("Should get repos", func() { - key := fmt.Sprintf("repos:%s", - fakeUser.Login, - ) - - Set(c, key, fakeRepos) - r.On("GetRepos", fakeUser).Return(nil, fakeErr).Once() - p, err := GetRepos(c, fakeUser) - g.Assert(p).Equal(fakeRepos) - g.Assert(err).Equal(nil) - }) - - g.It("Should get repos error", func() { - r.On("GetRepos", fakeUser).Return(nil, fakeErr).Once() - p, err := GetRepos(c, fakeUser) - g.Assert(p == nil).IsTrue() - g.Assert(err).Equal(fakeErr) - }) - - g.It("Should set and get teams", func() { - r.On("GetTeams", fakeUser).Return(fakeTeams, nil).Once() - p, err := GetTeams(c, fakeUser) - g.Assert(p).Equal(fakeTeams) - g.Assert(err).Equal(nil) - }) - - g.It("Should get teams", func() { - key := fmt.Sprintf("teams:%s", - fakeUser.Login, - ) - - Set(c, key, fakeTeams) - r.On("GetTeams", fakeUser).Return(nil, fakeErr).Once() - p, err := GetTeams(c, fakeUser) - g.Assert(p).Equal(fakeTeams) - g.Assert(err).Equal(nil) - }) - - g.It("Should get team error", func() { - r.On("GetTeams", fakeUser).Return(nil, fakeErr).Once() - p, err := GetTeams(c, fakeUser) - g.Assert(p == nil).IsTrue() - g.Assert(err).Equal(fakeErr) - }) - - g.It("Should set and get members", func() { - r.On("GetMembers", fakeUser, "drone").Return(fakeMembers, nil).Once() - p, err := GetMembers(c, fakeUser, "drone") - g.Assert(p).Equal(fakeMembers) - g.Assert(err).Equal(nil) - }) - - g.It("Should get members", func() { - key := "members:drone" - - Set(c, key, fakeMembers) - r.On("GetMembers", fakeUser, "drone").Return(nil, fakeErr).Once() - p, err := GetMembers(c, fakeUser, "drone") - g.Assert(p).Equal(fakeMembers) - g.Assert(err).Equal(nil) - }) - - g.It("Should get member error", func() { - r.On("GetMembers", fakeUser, "drone").Return(nil, fakeErr).Once() - p, err := GetMembers(c, fakeUser, "drone") - g.Assert(p == nil).IsTrue() - g.Assert(err).Equal(fakeErr) - }) - }) -} - -var ( - fakeErr = errors.New("Not Found") - fakeUser = &model.User{Login: "octocat"} - fakePerm = &model.Perm{Pull: true, Push: true, Admin: true} - fakeRepo = &model.Repo{Owner: "octocat", Name: "Hello-World"} - fakeRepos = []*model.Repo{ - {Owner: "octocat", Name: "Hello-World"}, - {Owner: "octocat", Name: "hello-world"}, - {Owner: "octocat", Name: "Spoon-Knife"}, - } - fakeTeams = []*model.Team{ - {Login: "drone"}, - {Login: "docker"}, - } - fakeMembers = []*model.Member{ - {Login: "octocat"}, - } -) diff --git a/cache/mock/cache.go b/cache/mock/cache.go deleted file mode 100644 index 3560768..0000000 --- a/cache/mock/cache.go +++ /dev/null @@ -1,41 +0,0 @@ -package mock - -import "github.com/stretchr/testify/mock" - -type Cache struct { - mock.Mock -} - -func (_m *Cache) Get(_a0 string) (interface{}, error) { - ret := _m.Called(_a0) - - var r0 interface{} - if rf, ok := ret.Get(0).(func(string) interface{}); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(interface{}) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(string) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} -func (_m *Cache) Set(_a0 string, _a1 interface{}) error { - ret := _m.Called(_a0, _a1) - - var r0 error - if rf, ok := ret.Get(0).(func(string, interface{}) error); ok { - r0 = rf(_a0, _a1) - } else { - r0 = ret.Error(0) - } - - return r0 -} diff --git a/cmd/integration/main.go b/cmd/integration/main.go new file mode 100644 index 0000000..4bb3751 --- /dev/null +++ b/cmd/integration/main.go @@ -0,0 +1,386 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package main + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "math/rand" + "net/http" + "net/url" + "os" + "time" + + "github.com/capitalone/checks-out/envvars" + gh "github.com/capitalone/checks-out/remote/github" + + log "github.com/Sirupsen/logrus" + "github.com/google/go-github/github" + "golang.org/x/oauth2" +) + +var ( + configFileName = fmt.Sprintf(".%s", envvars.Env.Branding.Name) + addr = envvars.Env.Server.Addr + githubToken = envvars.Env.Test.GithubToken + serviceToken string + githubAPI string + ctx context.Context +) + +var configText = ` +approvals: +[ + { + name: "master" + match: "universe[count=1,self=true]" + } +] +merge: +{ + enable: true +} +` + +func init() { + ctx = context.Background() + githubParams := gh.Get() + githubAPI = githubParams.API + if len(addr) == 0 { + log.Fatal("SERVER_ADDR environment variable must be defined") + } + if len(githubToken) == 0 { + log.Fatal("GITHUB_TEST_TOKEN environment variable must be defined") + } +} + +func createGitHubClient() *github.Client { + var err error + ts := oauth2.StaticTokenSource( + &oauth2.Token{AccessToken: githubToken}, + ) + tc := oauth2.NewClient(oauth2.NoContext, ts) + client := github.NewClient(tc) + client.BaseURL, err = url.Parse(githubAPI) + if err != nil { + log.Fatalf("Unable to parse url '%s': %s", githubAPI, err.Error()) + } + return client +} + +func randomString(strlen int) string { + rand.Seed(time.Now().UTC().UnixNano()) + const chars = "abcdefghijklmnopqrstuvwxyz0123456789" + result := make([]byte, strlen) + for i := 0; i < strlen; i++ { + result[i] = chars[rand.Intn(len(chars))] + } + return string(result) +} + +func createRepo(client *github.Client) *github.Repository { + name := randomString(16) + repo := &github.Repository{ + Name: github.String(name), + Private: github.Bool(false), + AutoInit: github.Bool(true), + } + repo, _, err := client.Repositories.Create(ctx, "", repo) + if err != nil { + log.Error("Unable to create git repository: ", err) + return nil + } + return repo +} + +func createCommit(client *github.Client, repo *github.Repository, branch *github.Reference, filename string, contents string) *github.Reference { + blob, _, err := client.Git.CreateBlob(ctx, *repo.Owner.Login, *repo.Name, &github.Blob{ + Content: github.String(contents), + Size: github.Int(len(contents)), + Encoding: github.String("utf-8"), + }) + if err != nil { + log.Error("Unable to create blob ", filename, err) + return nil + } + tree, _, err := client.Git.CreateTree(ctx, *repo.Owner.Login, *repo.Name, *branch.Object.SHA, []github.TreeEntry{{ + Path: github.String(filename), + Mode: github.String("100644"), + Type: github.String("blob"), + SHA: blob.SHA, + }}) + if err != nil { + log.Error("Unable to get create tree ", filename, err) + return nil + } + commit, _, err := client.Git.CreateCommit(ctx, *repo.Owner.Login, *repo.Name, &github.Commit{ + Message: github.String(fmt.Sprintf("%s commit", filename)), + Tree: tree, + Parents: []github.Commit{{ + SHA: branch.Object.SHA, + }, + }, + }) + if err != nil { + log.Error("Unable to get create commit ", filename, err) + return nil + } + branch.Object.SHA = commit.SHA + branch, _, err = client.Git.UpdateRef(ctx, *repo.Owner.Login, *repo.Name, branch, false) + if err != nil { + log.Error("Unable to update reference ", filename, err) + return nil + } + return branch +} + +func initServiceToken() bool { + route := fmt.Sprintf("/login?access_token=%s", githubToken) + resp, err := http.Post(addr+route, "", nil) + if err != nil { + log.Error("Unable to send POST /login to server ", err) + return false + } + if resp.StatusCode != 200 { + log.Errorf("POST /login to server did not return 200: %v", resp) + return false + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + log.Error("Unable to read POST /login bytes from server ", err) + return false + } + var token struct { + Access string `json:"access_token"` + } + err = json.NewDecoder(bytes.NewReader(body)).Decode(&token) + if err != nil { + log.Error("Unable to read POST /login response from server ", err) + return false + } + serviceToken = token.Access + return true +} + +func registerRepo(client *http.Client, repo *github.Repository) bool { + route := fmt.Sprintf("/api/repos/%s/%s", *repo.Owner.Login, *repo.Name) + req, err := http.NewRequest("POST", addr+route, nil) + if err != nil { + log.Error("Unable to create POST /api/repos route to server ", err) + return false + } + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", serviceToken)) + resp, err := client.Do(req) + if err != nil { + log.Error("Unable to send POST /api/repos to server ", err) + return false + } + if resp.StatusCode != 200 { + log.Errorf("POST /api/repos to server did not return 200: %v", resp) + return false + } + return true +} + +func validateRepo(client *http.Client, repo *github.Repository) bool { + route := fmt.Sprintf("/api/repos/%s/%s/validate", *repo.Owner.Login, *repo.Name) + req, err := http.NewRequest("GET", addr+route, nil) + if err != nil { + log.Error("Unable to create GET /api/repos/validate route to server ", err) + return false + } + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", serviceToken)) + resp, err := client.Do(req) + if err != nil { + log.Error("Unable to send GET /api/repos/validate to server ", err) + return false + } + if resp.StatusCode != 204 { + log.Errorf("GET /api/repos/validate to server did not return 204: %v", resp) + return false + } + return true +} + +func createPR(client *github.Client, repo *github.Repository) bool { + ref, _, err := client.Git.GetRef(ctx, *repo.Owner.Login, *repo.Name, "refs/heads/master") + if err != nil { + log.Error("Unable to get master head ", err) + return false + } + branch, _, err := client.Git.CreateRef(ctx, *repo.Owner.Login, *repo.Name, &github.Reference{ + Ref: github.String("refs/heads/foobar"), + Object: ref.Object, + }) + if err != nil { + log.Error("Unable to create branch ", err) + return false + } + if createCommit(client, repo, branch, "foobar", "") == nil { + return false + } + _, _, err = client.PullRequests.Create(ctx, *repo.Owner.Login, *repo.Name, &github.NewPullRequest{ + Title: github.String("Adds foobar feature"), + Head: github.String("foobar"), + Base: github.String("master"), + }) + if err != nil { + log.Error("Unable to create pull request ", err) + return false + } + return true +} + +func statusPR(client *http.Client, repo *github.Repository, expect bool) bool { + route := fmt.Sprintf("/api/pr/%s/%s/1/status", *repo.Owner.Login, *repo.Name) + req, err := http.NewRequest("GET", addr+route, nil) + if err != nil { + log.Error("Unable to create GET /api/pr/repos/status route to server ", err) + return false + } + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", serviceToken)) + resp, err := client.Do(req) + if err != nil { + log.Error("Unable to send GET /api/pr/repos/status to server ", err) + return false + } + if resp.StatusCode != 200 { + log.Errorf("GET /api/pr/repos/status to server did not return 200: %v", resp) + return false + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + log.Error("Unable to read GET /api/pr/repos/status bytes from server ", err) + return false + } + var status struct { + Approved bool `json:"approved"` + } + err = json.NewDecoder(bytes.NewReader(body)).Decode(&status) + if err != nil { + log.Error("Unable to read GET /api/pr/repos/status response from server ", err) + return false + } + if status.Approved != expect { + log.Errorf("pull request expected %t status received %t", expect, status.Approved) + return false + } + return true +} + +func addComment(client *github.Client, repo *github.Repository) bool { + _, _, err := client.Issues.CreateComment(ctx, *repo.Owner.Login, *repo.Name, 1, &github.IssueComment{ + Body: github.String("I approve"), + }) + if err != nil { + log.Error("Unable to create GitHub comment ", err) + return false + } + return true +} + +func unregisterRepo(webclient *http.Client, repo *github.Repository) { + if len(serviceToken) == 0 { + return + } + route := fmt.Sprintf("/api/repos/%s/%s", *repo.Owner.Login, *repo.Name) + req, err := http.NewRequest("DELETE", addr+route, nil) + if err != nil { + log.Error("Unable to create DELETE /api/repos route to server ", err) + return + } + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", serviceToken)) + resp, err := webclient.Do(req) + if err != nil { + log.Error("Unable to send DELETE /api/repos to server ", err) + return + } + if resp.StatusCode != 200 { + log.Errorf("DELETE /api/repos to server did not return 200: %v", resp) + return + } +} + +func deleteRepo(ghclient *github.Client, repo *github.Repository) { + _, err := ghclient.Repositories.Delete(ctx, *repo.Owner.Login, *repo.Name) + if err != nil { + log.Error("Unable to delete git repository ", err) + return + } +} + +func cleanupTests(ghclient *github.Client, webclient *http.Client, repo *github.Repository) { + unregisterRepo(webclient, repo) + deleteRepo(ghclient, repo) +} + +func runTests() int { + ghclient := createGitHubClient() + webclient := &http.Client{} + repo := createRepo(ghclient) + if repo == nil { + return 1 + } + defer cleanupTests(ghclient, webclient, repo) + ref, _, err := ghclient.Git.GetRef(ctx, *repo.Owner.Login, *repo.Name, "refs/heads/master") + if err != nil { + log.Error("Unable to get master head ", err) + return 1 + } + ref = createCommit(ghclient, repo, ref, configFileName, configText) + if ref == nil { + return 1 + } + ref = createCommit(ghclient, repo, ref, "MAINTAINERS", "") + if ref == nil { + return 1 + } + if !initServiceToken() { + return 1 + } + if !registerRepo(webclient, repo) { + return 1 + } + if !validateRepo(webclient, repo) { + return 1 + } + if !createPR(ghclient, repo) { + return 1 + } + if !statusPR(webclient, repo, false) { + return 1 + } + if !addComment(ghclient, repo) { + return 1 + } + if !statusPR(webclient, repo, true) { + return 1 + } + log.Info("All tests passed.") + return 0 +} + +func main() { + os.Exit(runTests()) +} diff --git a/docs/.gitkeep b/docs/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/docs/api/index.html b/docs/api/index.html new file mode 100644 index 0000000..b40af2b --- /dev/null +++ b/docs/api/index.html @@ -0,0 +1,680 @@ + + + + + REST API · checks-out + + + + + + + + + + + + + + + + +
+ +
+

REST API

+ +
+ +

REST API

+ +

The following REST APIs are exposed by checks-out.

+ +

Logging in

+ +

checks-out relies on Github’s OAuth2 support for authentication. To log in to the REST API, a user must go to /settings/tokens on their Github server (for public Github, this +would be https://github.com/settings/tokens.) and create a Personal Access Token. The token should have the permissions admin:org_hook, read:org, repo:status. Store this +token in a safe place; you cannot get it again from Github.

+ +

Endpoint: /login +Method: POST +Query parameters: access_token=PERSONAL_ACCESS_TOKEN +Body: none

+ +

Success: returns an AuthToken JSON structure, status = 200 +Failure: returns status 401 (unauthorized) +Tokens are currently set to expire after 72 hours.

+ +

AuthToken JSON structure

+ +
{
+    "access_token": "JWT_TOKEN",
+    "expires_in": SECONDS_UNTIL_EXPIRY
+}
+
+ +

Authorization

+ +

After calling the initial login, you will get back a JWT Token. This token must be supplied to all other authenticated requests using one of the following methods:

+ +
    +
  • a header with keyAuthorization and value Bearer JWT_TOKEN
  • +
  • a query parameter with key access_token and value JWT_TOKEN
  • +
  • a cookie with key user_sess and value JWT_TOKEN
  • +
+ +

Get Current User Info

+ +

Endpoint: /api/user +Method: GET

+ +

Success: returns a GetCurrentUser JSON structure

+ +

GetCurrentUser JSON Structure

+ +
{
+    "id": ID,
+    "login": "USER_NAME",
+    "avatar": "GITHUB_AVATAR_URL_FOR_USER"
+}
+
+ +

Delete Current User

+ +

This call removes the currently logged-in user from checks-out. If you do this, you will need to recreate your account.

+ +

Endpoint: /api/user +Method: DELETE

+ +

Success: returns a 204 (deleted) status code

+ +

Get Orgs for Current User

+ +

Returns all orgs that the current user belongs to.

+ +

Endpoint: /api/user/orgs +Method: GET

+ +

Success: returns a JSON list of GetOrg JSON structures

+ +

GetOrg JSON Structure

+ +
    {
+        "login": "NAME_OF_ORG",
+        "avatar": "GITHUB_AVATAR_URL_FOR_ORG",
+        "enabled": true|false,
+        "admin": true|false
+    },
+
+ +

If the current user is an admin for the org, admin is set to true. +If the org has been enabled in checks-out, enabled is set to true.

+ +

Get Personal Repos for Current User

+ +

Returns all repos in the user’s account (not repos in orgs that the user belongs to)

+ +

Endpoint: /api/user/repos +Method: GET

+ +

Success: returns a 200 (ok) status code and a JSON list of GetRepo JSON structures

+ +

GetRepo JSON Structure

+ +
    {
+        "id": ID_IN_checks-out,
+        "owner": "USER_NAME",
+        "name": "REPO_NAME",
+        "slug": "USER_NAME/REPO_NAME",
+        "link_url": URL_IN_GITHUB",
+        "private": true|false,
+        "org": true|false
+    },
+
+ +

If id is present, the repo is managed by checks-out. +If the repo is enabled because its org is managed by checks-out, org is set to true. +If the repo is private to the user, private is set to true.

+ +

Get Repos in Specified Org

+ +

Returns all repos in the specified org

+ +
+
Method: GET
+
+ +
+
Method: GET
+
+ +
+
Method: GET
+
+ +
+
GET
+
+ +

:org is replaced with the name of the org whose repos are being requested.

+ +

Success: returns a JSON list of GetRepo JSON structures

+ +

Get Enabled Orgs for Current User

+ +

Returns all orgs that have been enabled by the current user

+ +

Endpoint: /api/user/orgs/enabled +Method: GET

+ +

Success: return a JSON list of GetOrg JSON structures

+ +

note: the avatar and admin fields will always be set to "" and false respectively. the enabled field will always be set to true.

+ +

Enable Repo

+ +

Turns on checks-out monitoring for the specified repo.

+ +
+
Body: none
+
:owner is the name of the org or the name of the user, for a personal repo +:repo is the name of the repo
+
+ +

a personal repo +:repo is the name of the repo

+ +

Success: returns 200 (ok) and a GetRepo JSON structure +Failure: returns 409 (Conflict) if the repo is already enabled +Failure: returns 404 (not found) if the repo does not exist or is not available to the user

+ +

Disable Repo

+ +

Turns off checks-out monitoring for the specified repo

+ +
+
Method: DELETE
+
:owner is the name of the org or the name of the user, for a personal repo +:repo is the name of the repo
+
+ +

po +:repo is the name of the repo

+ +

Success: returns 200 (ok) +Failure: returns 409 (Conflict) if the repo is already disabled +Failure: returns 404 (not found) if the repo does not exist or is not available to the user

+ +

Get Enabled Repo

+ +

Returns information on the specified enabled repo

+ +
+
Method: GET
+
:owner is the name of the org or the name of the user, for a personal repo +:repo is the name of the repo
+
+ +

po +:repo is the name of the repo

+ +

Success: returns 200 (ok) and a GetRepo JSON structure +Failure: returns 404 (not found) if the repo does not exist or is not available to the user

+ +

Enable Org

+ +

Turns on checks-out monitoring for the specified org. All repos currently in the org are enabled and all repos added to the org in the future will be enabled on creation.

+ +
+
Body: none
+
+ +
+
Body: none
+
+ +
+
Body: none
+
+ +
+
Body: none
+
+ +
+
one
+
+ +

:owner is the name of the org

+ +

Success: returns 200 (ok) and an OrgInfo JSON structure +Failure: returns 409 (Conflict) if the org is already enabled +Failure: returns 404 (not found) if the org does not exist, is not available to the user, or if it is the user’s personal repos (owner == user name)

+ +

OrgInfo JSON Structure

+ +
{
+    "id": ID_IN_checks-out,
+    "owner": "ORG_NAME",
+    "link_url": "GITHUB_URL",
+    "private": true|false
+}
+
+ +

If the org is private, private is set to true.

+ +

Disable Org

+ +

Turns on checks-out monitoring for the specified org. All repos currently in the org are enabled and all repos added to the org in the future will be enabled on creation.

+ +
+
Method: DELETE
+
+ +
+
Method: DELETE
+
+ +
+
od: DELETE
+
+ +

:owner is the name of the org

+ +

Success: returns 200 (ok) +Failure: returns 409 (Conflict) if the org is already disabled +Failure: returns 404 (not found) if the org does not exist, is not available to the user, or if it is the user’s personal repos (owner == user name)

+ +

Get Config for Repo

+ +

Returns the .lgtm or .checks-out file for the repo

+ +
+
Method: GET
+
:owner is the name of the org or the name of the user, for a personal repo +:repo is the name of the repo
+
+ +

onal repo +:repo is the name of the repo

+ +

Success: returns 200 (ok) and a .checks-out config file in JSON +Failure: returns 404 (not found) if the file does not exist or is not available to the user

+ +

Get Maintainers for Repo

+ +

Returns the MAINTAINERS file for the repo

+ +
+
Method: GET
+
:owner is the name of the org or the name of the user, for a personal repo +:repo is the name of the repo
+
+ +

personal repo +:repo is the name of the repo

+ +

Success: returns 200 (ok) and a MAINTAINERS config file +Failure: returns 404 (not found) if the file does not exist or is not available to the user

+ +

Validate Config and Maintainers for Repo

+ +

Validates that the .lgtm/.checks-out and MAINTAINERS files are present and valid

+ +
+
Method: GET
+
:owner is the name of the org or the name of the user, for a personal repo +:repo is the name of the repo
+
+ +

rsonal repo +:repo is the name of the repo

+ +

Success: returns 200 (ok) +Failure: returns 404 (not found) if the file does not exist or is not available to the user +Failure: returns 400 (bad request) if the .lgtm/.checks-out or MAINTAINERS file is not valid. Error messages returned as text in body of response.

+ +

Convert .lgtm to .checks-out

+ +

Converts a .lgtm file in a repo into the equivalent .checks-out file

+ +
+
Method: GET
+
:owner is the name of the org or the name of the user, for a personal repo +:repo is the name of the repo
+
+ +

, for a personal repo +:repo is the name of the repo

+ +

Success: returns 200 (ok) and a .checks-out config file in JSON +Failure: returns 404 (not found) if the file does not exist or is not available to the user +Failure: returns 400 (bad request) if the .lgtm file is not valid. Error messages returned as text in body of response.

+ +

Get Teams in Org

+ +

Returns the teams defined in an org

+ +
+
Method: GET
+
+ +
+
Method: GET
+
+ +
+
Method: GET
+
+ +

:owner is the name of the org

+ +

Success: returns 200 (ok) and a JSON array of strings. Each string is the name of a team in the org +Failure: returns 404 (not found) if the org does not exist, is not available to the user, or is the user’s personal repos (owner == user name)

+ +

Get checks-out Status for Pull Request

+ +

Returns the status of a pull request and the configuration used to process it

+ +
+
Method: GET
+
:owner is the name of the org or the name of the user, for a personal repo +:repo is the name of the repo +:id is the id of the pull request in Github
+
+ +

is the id of the pull request in Github

+ +

Success: returns 200 (ok) and a Status JSON structure +Failure: returns 404 (not found) if the org, repo, or id does not exist or is not available to the user

+ +

Status JSON Structure

+ +
{
+    "approved": true|false,
+    "approvers": ["USER_NAMES_OF_APPROVERS"],
+    "disapprovers": ["USER_NAMES_OF_DISAPPROVERS"],
+    "policy": {
+    },
+    "settings": {
+    }
+}
+
+ +

approved is true if the pull request has been approved by checks-out, false otherwise. +approvers is an array of user names of the people who have approved the pull request. +disapprovers is an array of user names of the people who have disapproved the pull request. +policy is the policy in the .checks-out configuration file that is used for this pull request. +settings is the .checks-out configuration file. All optional sections are filled in with their default values.

+ +

User Slack URL Management

+ +

Register new User-Level Slack Target

+ +

Registers a new Slack target for all repos managed by the current user. The slack target’s name is the hostname for the Slack instance. +A User-Level Slack Target will override an Admin-Level Slack Target for the same hostname. It is not visible by any other users in checks-out.

+ +
+
Body: URL JSON Structure
+
+ +
+
Body: URL JSON Structure
+
+ +
+
y: URL JSON Structure
+
+ +

:hostname is the hostname for the slack group

+ +

Success: returns a 201 (created) status code

+ +

URL JSON Structure

+ +

json +{ + "url":"_URL_FOR_INCOMING_NOTIFICATIONS_WEBHOOKS_" +}

+ +

Get URL for User-Level Slack Target

+ +

Returns the URL for a User-Level Slack target for all repos managed by the current user.

+ +
+
Method: GET
+
+ +
+
Method: GET
+
+ +
+
Method: GET
+
+ +
+
hod: GET
+
+ +

:hostname is the hostname for the slack group

+ +

Success: returns a 200 (ok) status code and a URL JSON structure +Failure: returns a 404 (not found) status code if no URL is registered by the current user for the specified slack target

+ +

Update URL for User-Level Slack Target

+ +

Updates the specified Slack target for all repos managed by the current user.

+ +
+
Body: URL JSON Structure
+
+ +
+
Body: URL JSON Structure
+
+ +
+
URL JSON Structure
+
+ +

:hostname is the hostname for the slack group

+ +

Success: returns a 200 (created) status code

+ +

Delete User-Level Slack Target

+ +

Removes the specified Slack target for all repos managed by the current user. If there is an admin-level Slack target specified for +the same hostname, it will be used instead.

+ +
+
Method: DELETE
+
+ +
+
Method: DELETE
+
+ +
+
Method: DELETE
+
+ +
+
TE
+
+ +

:hostname is the hostname for the slack group

+ +

Success: returns a 204 (deleted) status code +Failure: returns a 404 (not found) status code if no URL is registered by the current user for the specified slack target

+ +

Admin

+ +

Get All Enabled Repos

+ +

Returns a list of all repos that have been enabled in checks-out across all users

+ +

Endpoint: /api/repos +Method: GET

+ +

Success: returns a 200 (ok) status code and a JSON list of GetRepo JSON structures

+ +

Get Count of Enabled Repos

+ +

Returns a count of all repos that have been enabled in checks-out across all users

+ +

Endpoint: /api/count +Method: GET

+ +

Success: returns a 200 (ok) status code and a number as text

+ +

Admin Slack URL Management

+ +

Register New Admin-Level Slack Target

+ +

Registers a new Slack target for all repos. The slack target’s name is the hostname for the Slack instance. +A User-Level Slack Target will override an Admin-Level Slack Target for the same hostname.

+ +
+
Body: URL JSON Structure
+
+ +
+
Body: URL JSON Structure
+
+ +
+
dy: URL JSON Structure
+
+ +

:hostname is the hostname for the slack group

+ +

Success: returns a 201 (created) status code

+ +

Get URL for Admin-Level Slack Target

+ +

Returns the URL for an Admin-Level Slack target for all repos.

+ +
+
Method: GET
+
+ +
+
Method: GET
+
+ +
+
Method: GET
+
+ +
+
thod: GET
+
+ +

:hostname is the hostname for the slack group

+ +

Success: returns a 200 (ok) status code and a URL JSON structure +Failure: returns a 404 (not found) status code if no URL is registered by the admin for the specified slack target

+ +

Update URL for Admin-Level Slack Target

+ +

Updates the specified Slack target for all repos.

+ +
+
Body: URL JSON Structure
+
+ +
+
Body: URL JSON Structure
+
+ +
+
y: URL JSON Structure
+
+ +

:hostname is the hostname for the slack group

+ +

Success: returns a 200 (created) status code

+ +

Delete Admin-Level Slack Target

+ +

Removes the specified Slack target for all repos. If there are user-level Slack targets specified for +the same hostname, they will be used instead for those users.

+ +
+
Method: DELETE
+
+ +
+
Method: DELETE
+
+ +
+
Method: DELETE
+
+ +
+
ETE
+
+ +

:hostname is the hostname for the slack group

+ +

Success: returns a 204 (deleted) status code +Failure: returns a 404 (not found) status code if no URL is registered by the current user for the specified slack target

+
+
+ +
+ + +
+
+ + + \ No newline at end of file diff --git a/docs/approvals/index.html b/docs/approvals/index.html new file mode 100644 index 0000000..65ac460 --- /dev/null +++ b/docs/approvals/index.html @@ -0,0 +1,118 @@ + + + + + Approval Policies · checks-out + + + + + + + + + + + + + + + + +
+ +
+

Approval Policies

+ +

Here are some templates you can use to write the approval policy section +of your .checks-out configuration.

+ +
match: "all[count=1,self=false]"
+
+ +

This policy requires one approval from anyone in the built-in ‘all’ +group. ‘all’ expands to all the people in the MAINTAINERS file. +Populating the MAINTAINERS file with the macro “github-org repo-self” +will expand to everyone within the organization of the GitHub repository. +self=false forbids the author of the pull request to approve their own +request. If you want self-approval then use “all[count=1,self=true]”

+ +
match: "sharks[count=1,self=false] and jets[count=1,self=false]"
+
+ +

You have defined the groups ‘sharks’ and ‘jets’ in the MAINTAINERS file. +This policy requires one approval from the sharks and one approval +from the jets. If someone belongs to both the sharks and the jets, then +their approval will count for both of the clauses and therefore they +do not require a second reviewer. In general, the teams work best when +they are non-overlapping. But the next example takes advantage of +overlapping groups.

+ +
match: "all[count=2,self=false] and reviewers[count=1,self=false]"
+
+ +

This policy requires two approvals and at least one of the approvals +must be from the group ‘reviewers’ in the MAINTAINERS file. The policy +takes advantage of the fact that reviewers is a subset of all. If +you only have 1 person in the reviewers group and you want the reviewer +to submit pull requests and it is OK to have only a single approval +against those pull request, then use +“all[count=2,self=false] and reviewers[count=1,self=true]” and the +reviewer must self-approve their pull request.

+
+
+ +
+ + +
+
+ + + \ No newline at end of file diff --git a/docs/branches/index.html b/docs/branches/index.html new file mode 100644 index 0000000..4852495 --- /dev/null +++ b/docs/branches/index.html @@ -0,0 +1,109 @@ + + + + + Branches · checks-out + + + + + + + + + + + + + + + + +
+ +
+

Branches

+ +
+ +

Overview

+ +

Checks-out automatically enables GitHub protected branches for your repositories’ default branch. You can further customize or disable this behavior by navigating to your repository branch settings screen in GitHub.

+ +

protected branches

+ +

GitHub Enterprise

+ +

GitHub Enterprise 2.4 does not have an API for protected branches. As a result you will need to manually configure protected branches after enabling your repository in checks-out. This can be done by navigating to your repository branch settings screen in GitHub Enterprise (pictured above).

+ +

Other Branches

+ +

Checks-out automatically enables itself for the default branch. If you would like to enable checks-out for additional branches you can manually configure additional branches in your repository branch settings screen in GitHub.

+ +

Other Checks

+ +

Checks-out automatically adds itself as a required status check. If you would like to enable additional status checks, such as continuous integration or code coverage, you can further customize this behavior in your repository branch settings screen in GitHub.

+
+
+ +
+ + +
+
+ + + \ No newline at end of file diff --git a/docs/categories/index.xml b/docs/categories/index.xml new file mode 100644 index 0000000..14fbadf --- /dev/null +++ b/docs/categories/index.xml @@ -0,0 +1,14 @@ + + + + Categories on checks-out + http://www.capitalone.io/checks-out/categories/ + Recent content in Categories on checks-out + Hugo -- gohugo.io + en-us + + + + + + \ No newline at end of file diff --git a/docs/customize/index.html b/docs/customize/index.html new file mode 100644 index 0000000..03e6973 --- /dev/null +++ b/docs/customize/index.html @@ -0,0 +1,720 @@ + + + + + Customize · checks-out + + + + + + + + + + + + + + + + +
+ +
+

Customize

+ +
+ +

Introduction

+ +

Place an .checks-out file in the root of your repository to customize your +project’s approval process. This file is in Human JSON +(HJSON) format. JSON is also valid HJSON so +you can write your .checks-out file in JSON if you prefer.

+ +

Each property of the .checks-out file has a default value that is used +when you do not provide a value. This document will explain each +section of the .checks-out file. At the beginning of each section an +example of the configuration will be provided. The example will +be populated with the default values for that section.

+ +

For example here are all the default values of the .checks-out file:

+ +
approvals:
+[
+  {
+    scope:
+    {
+      branches: []
+      paths: []
+    }
+    name: ""
+    match: "all[count=1,self=true]"
+    antimatch: "all[count=1,self=true]"
+    authormatch: "universe[count=1,self=true]"
+    pattern: null
+    antipattern: null
+    antititle: null
+    tag: null
+    merge: null
+    feedback: null
+  }
+]
+pattern: "(?i)^I approve\\s*(version:\\s*(?P<version>\\S+))?\\s*(comment:\\s*(?P<comment>.*\\S))?\\s*"
+antipattern: null
+antititle: null
+commit:
+{
+  range: head
+  antirange: head
+  tagrange: head
+  ignoreuimerge: false
+}
+maintainers:
+{
+  path: MAINTAINERS
+  type: text
+}
+feedback:
+{
+  type: ["comment", "review"]
+}
+merge:
+{
+  enable: false
+  merge: "merge"
+  delete: false
+  uptodate: true
+}
+tag:
+{
+  enable: false
+  algorithm: semver
+  template: "{{.Version}}"
+  increment: "patch"
+  docker: false
+}
+comment:
+{
+  enable: false
+  targets: []
+}
+deploy:
+{
+  enable: false
+  deployment: DEPLOYMENTS
+}
+
+ +

If you do not need to change the default values in a section +of the .checks-out then you may leave out that section from +the file. Here is a valid .checks-out file where we are changing +the number of approvers from 1 to 2 and the remaining properties +use the default values:

+ +
approvals:
+[
+  {
+    match: "all[count=2]"
+  }
+]
+
+ +

You can inspect the .checks-out file for your repository using the api endpoint +/api/repos/[orgname]/[reponame]/config. The output will have any missing +configuration parameters populated with default values. If the configuration +file cannot be parsed then an error message will be returned.

+ +

Approval Policies

+ +

checks-out has extensive support for different approval policies. Each approval +policy consists of two sections: the approval scope and the approval match. +The scope determines which policy is applied against a pull request. +The match determines when the pull request is allowed to merge. The +approvals section of .checks-out is an ordered list (an array) of policies. +The policies are traversed in the order specified by the configuration file. +The first scope that matches against the pull request is the policy +that is used (see Policy Scope section).

+ +

Each approval policy may optionally declare a tag section. The tag +section is identical in structure to the global tag section. If a policy +has a tag section then its tag section is applied instead of the global +tag section.

+ +

Each approval policy may optionally declare a merge section. The merge +section is identical in structure to the global merge section. If a policy +has a merge section then its merge section is applied instead of the global +merge section.

+ +

Each approval policy may optionally declare a pattern. If a policy has a pattern +then it is is applied for regular expression matching instead of the global +pattern. Each approval policy may optionally declare an antipattern. If a policy +has a antipattern then it is is applied for regular expression matching instead +of the global antipattern.

+ +

An .checks-out configuration file is required to have an empty scope +in the last policy in the policies array. This ensures that every +pull request has a matching scope.

+ +

If you have several approval policies, then we recommend enabling the GitHub +notification channel and include “open” in the types +field. This will enable checks-out to post a GitHub comment on a pull request +when the request is opened. The comment will describe which approval policy +is being applied to the pull request.

+ +

Policy Scope

+ +
scope:
+{
+  branches: []
+  paths: []
+}
+
+ +

If the ‘branches’ array is non-empty then the policy is limited +to the specified branches. In GitHub terminology this is a list of base +branches. The base branch is where the changes should be applied.

+ +

If the ‘paths’ array is non-empty then the policy is limited +to the specified file paths. Paths uses a simplified form of wildcard +glob expansion. The sequence ** will match zero or more instances +of any character. The sequence * will match zero or more instances +of any character except the directory separator /. All other character +sequences are literal matches.

+ +

For example, to match against all java source files use the path +expression **.java. To match against recursively against all files +in a subdirectory use foo/bar/**.

+ +

Policy Name

+ +
name: ""
+
+ +

The ‘name’ field of a policy allows you to optionally specify a human-readable +description of the policy. If the name is empty then the policy is +identified by it’s 1-based offset into the policy array. If the name is +nonempty then no two policies can have the same name.

+ +

Policy Match

+ +
match: "all[count=1,self=true]"
+
+ +

The policy match algorithms are specified with a domain-specific language (DSL). +The DSL is based around the concept of organization approvals. Boolean operators +can be used to combine organizations. Parenthesis may be used to defined +precedence of operations. There are several predefined organizations: “all”, +“universe”, “us”, and “them”.

+ +

All Match

+ +
match: "all[count=1,self=true]"
+
+ +

This matches against anyone in the MAINTAINERS file. The ‘count’ +field determines the minimum amount of approvals for a pull request +to be accepted. The ‘self’ field determines whether the author +of a pull request is allowed to approve the pull request.

+ +

Universe Match

+ +
match: "universe[count=1,self=true]"
+
+ +

This is a variation of “all” matching where anyone with an account +on this GitHub instance can approve the pull request. The ‘count’ +field determines the minimum amount of approvals for a pull request +to be accepted. The ‘self’ field determines whether the author +of a pull request is allowed to approve the pull request.

+ +

Us Match

+ +
match: "us[count=1,self=true]"
+
+ +

This policy will match to any person that has at least one organization +in common with the author of the pull request. The ‘count’ +field determines the minimum amount of approvals for a pull request +to be accepted. The ‘self’ field determines whether the author +of a pull request is allowed to approve the pull request.

+ +

Them Match

+ +
match: "them[count=1,self=true]"
+
+ +

This policy will match to any person that has at least no organizations +in common with the author of the pull request. The ‘count’ +field determines the minimum amount of approvals for a pull request +to be accepted. The ‘self’ field determines whether the author +of a pull request is allowed to approve the pull request.

+ +

Org Match

+ +
match: "foo[count=1,self=true]"
+
+ +

This policy will match against an organization named “foo” in the +MAINTAINERS file. The ‘count’ field determines the minimum +amount of organizations for a pull request to be accepted. +The ‘self’ field determines whether the author +of a pull request is allowed to approve the pull request.

+ +

Anonymous Org Match

+ +
match: "{john,jane,sally}[count=1,self=true]"
+
+ +

This policy will match against an an anonymous organization composed of the +specified people. Anonymous organizations are ignored by the ‘us’ and ‘them’ +groups defined above. The ‘count’ field determines the minimum amount of +organizations for a pull request to be accepted. The ‘self’ field determines +whether the author of a pull request is allowed to approve the pull request.

+ +

And Match

+ +
match: "foo[count=1,self=true] and bar[count=1,self=true]"
+
+ +

This policy allows you to combine two or more policies. All +of the policies must be met in order for this policy to be true.

+ +

Or Match

+ +
match: "foo[count=1,self=true] or bar[count=1,self=true]"
+
+ +

This policy allows you to combine two or more policies. One +or more of the policies must be met in order for this policy +to be true.

+ +

Not Match

+ +
match: "not foo[count=1,self=true]"
+
+ +

This policy allows you to negate another policy.

+ +

True Match

+ +
match: "true"
+
+ +

This policy allows all pull requests. It’s very likely that you +want to use the “off” approval policy instead of this policy.

+ +

False Match

+ +
match: "false"
+
+ +

This policy allows no pull requests.

+ +

Disable Match

+ +
match: "off"
+
+ +

This policy disables the service. Added in version 0.6.10.

+ +

This is identical to the approval policy “true” with the addition that +it adds a local merge policy to disable automatic merging of branches.

+ +

Atleast Function

+ +
match: "atleast(2, foo[count=3,self=false], bar, baz, {jane,john})"
+
+ +

This function requires at least N of the specified groups to match +successfully, where N is the first parameter to the function.

+ +

Author Function

+ +
match: "author(foo or bar or not {jane,john})"
+
+ +

This function checks to see who is the author for the pull request. It is used to limit an approval policy to a set of authors. The potential author can be specified using an expression built out of the other matching expression terms. Since a pull request can only have a single author, any expression that can only be satisfied by more than one author, such as specifying foo and bar, or foo[count=2] will create a case that cannot match.

+ +

Pattern

+ +
pattern: "(?i)^checks-out\\s*(?P<version>\\S*)"
+
+ +

The regular expression that checks-out matches against the comment on a GitHub +pull request. The version capture group must be specified if +the version section of .checks-out is enable.

+ +

Pattern matching is performed using Go’s regular expressions package. We +recommended testing custom regular expressions in the Go +playground.

+ +

Disapproval

+ +
antipattern: null
+antimatch: "all[count=1,self=true]"
+
+ +

checks-out has optional support for allowing the reviewer to raise a concern. +If an ‘antipattern’ is specified then the disapproval policy is enabled. +The disapproval policy is specified by the ‘antimatch’ parameter. +The antipattern was introduced in version 0.5.17 and the antimatch +was introduced in version 0.6.8.

+ +

Approvals and disapprovals are enforced by the following mechanism. First, +all the comments in the pull request are scanned for the ‘antipattern’ regular +expression. Any matching comments are applied towards the ‘antimatch’ +policy. If the antimatch policy becomes true then the pull request is blocked +from merge. If the antimatch policy is false then the match policy is tested +as usual. If the match policy becomes true then the pull request can be merged.

+ +

If a reviewer enters a disapproval comment followed by an approval comment, +then the approval comment will cancel out the disapproval comment. The +disapproval comment is forgotten but the approval comment also applies +towards the match policy, assuming the reviewer participates in both the +antimatch policy and the match policy.

+ +

The commit range configuration applies the same behavior to +approvals and disapprovals. With the default behavior, “head”, only comments +that occur after the timestamp of the HEAD of the branch are used. Therefore +an objection will be erased after a new commit. Using the behavior “all” the +objection will persist and the reviewer must enter another comment with +the approval string.

+ +

Author Restriction

+ +
authormatch: "universe[count=1,self=true]"
+
+ +

It is possible to restrict the allowed authors with the authormatch approval +policy. The default value allows all users to submit pull requests. Using +the policy “all[count=1,self=true]” will only all users listed in the +MAINTAINERS file to submit a pull request. Since a pull request can only have a single author, any expression that can only be satisfied by more than one author, such as specifying foo and bar, or foo[count=2] will create a case that cannot match.

+ +

The difference between authormatch and the author function in the match is subtle. author is used to specify that a match rule applies an approval policy to a set of authors. authormatch is used to specify which authors can create pull requests at all.

+ +

Work-In-Progress Pull Requests

+ +
antititle: null
+
+ +

Optionally specify a regular expression that checks-out matches against the title +of a GitHub pull request. If the title matches then block the pull request +from being merged. A common value is “^WIP:“. This feature was added in +version 0.7.6.

+ +

Pattern matching is performed using Go’s regular expressions package. We +recommended testing custom regular expressions in the Go +playground.

+ +

Commit

+ +
commit:
+{
+  range: head
+  antirange: head
+  tagrange: head
+  ignoreuimerge: false
+}
+
+ +

The range options affect the processing of commits on the pull request. The +range fields can have two possible values: “head” or “all”. “head” will use the +comments that occur after the timestamp of the HEAD of the branch. “all” will +use all the comments on that branch. The ‘range’ parameter affects approval +comments, ‘antirange’ affects disapproval comments, and ‘tagrange’ affects +the tagging section. ‘range’ was introduced in version 0.5.17. ‘antirange’ +and ‘tagrange’ was introduced in version 0.7.7.

+ +

‘ignoreuimerge’ will ignore merges from upstream that are made through the +GitHub user interface (by clicking on “update branch”).

+ +

Maintainers

+ +
maintainers:
+{
+  path: MAINTAINERS
+  type: text
+}
+
+ +

The path to the file that specifies the project maintainers. +The type field can be “text”, “hjson”, “toml”, or “legacy”.

+ +

Merge

+ +
merge:
+{
+  enable: false
+  delete: false
+  method: "merge"
+  uptodate: true
+}
+
+ +

checks-out has the ability to automatically merge a pull request when the branch is +mergeable and all status checks (including optional ones) have passed. If +the delete flag is true then the head branch is deleted after a successful +merge. The delete flag was introduced in version 0.5.18.

+ +

If you enable automatic merges in the global merge section and you have +an approval policy that is the policy “true”, then you are +encouraged to declare local merge section for that policy that is set to false. +Otherwise checks-out will automatically merge pull requests on those +branches as soon as they are opened. Instead of using the policy “true”, +use the policy “off” which handles this case automatically.

+ +

The parameter ‘method’ determines the type of merge. It can be “merge”, +“squash” or “rebase”.

+ +

The parameter ‘uptodate’ determines whether the compare branch must +be zero commits behind the base branch before performing an automatic +merge. This parameter is enabled by default. It was introduced in +version 0.21.0.

+ +

Tag

+ +
tag:
+{
+  enable: false
+  algorithm: semver
+  template: "{{.Version}}"
+  increment: "patch"
+  docker: false
+}
+
+ +

checks-out has the ability to automatically place a tag on a merge. +There are three tagging algorithms: “semver”, “timestamp-rfc3339”, +and “timestamp-millis”. The ‘template’ field defines the +Golang text/template +that is used to generate the tag. The ‘increment’ field is used +by the semver algorithm and determines which section of the semantic +version to increment when no explicit version is provided +by the approver: ‘major’, ‘minor’, ‘patch’, or ‘none’. +The ‘docker’ field enables stricter validation of the template to comply with +Docker tag requirements.

+ +

Semantic Versioning

+ +

The semantic versioning standard is described at http://semver.org/.

+ +

Approval comments are used to provide the semantic version for the current pull request. By default, the version is specified after the I approve string:

+ +
I approve 1.0.0
+
+ +

will tag the merge commit with the version 1.0.0.

+ +

If multiple approvers for a pull commit specify a version, the highest version +specified will be used.

+ +

If no approver specifies a version, or if the maximum version specified by an +approver is less than any previously specified version tag, the version will be +set to the highest specified version tag with the patch version incremented.

+ +

Timestamp Versioning

+ +

There are currently two options for timestamps, one based on the RFC 3339 standard and one specifying milliseconds since the epoch (Jan 1, 1970, 12:00:00AM UTC).

+ +

The type “timestamp-rfc3339” will look like this:

+ +
2016-05-16T19.06.26Z
+
+ +

(colons aren’t legal in git tags, so the colons in the RFC format have been replaced by periods.)

+ +

The type “timestamp-millis” specifies the number of milliseconds since the epoch.

+ +

Feedback

+ +
feedback:
+{
+  type: ["comment", "review"]
+}
+
+ +

The feedback section customizes the processing of approval events. The feedback +type determines which kinds of events are processed. “comment” accepts +pull request comments. “review” accepts pull request reviews.

+ +

Comment

+ +
comment:
+{
+  enable: false
+  targets: []  
+}
+
+ +

Allows notifications to be sent to output channels when checks-out performs an +action. Legal values for the “types” field are the following:

+ +
    +
  • “error” Error while processing webhook
  • +
  • “open” Pull request is opened
  • +
  • “close” Pull request is closed without being merged
  • +
  • “accept” Pull request has been closed and merged
  • +
  • “approve” An approval comment has been added
  • +
  • “block” A disapproval comment has been added
  • +
  • “reset” Commit of PR branch has reset previous approvals
  • +
  • “merge” Pull request was auto-merged after all status checks passed
  • +
  • “tag” Branch was tagged after merge
  • +
  • “delete” Branch was auto-deleted after merge
  • +
  • “deploy” Deployment was triggered after merge
  • +
  • “author” Pull request is blocked because author is not approved
  • +
+ +

GitHub Comments

+ +
{
+  target: github
+  pattern: null
+  types: []
+}
+
+ +

If enabled then checks-out will add comments to the pull request. Pattern is an +optional regular expression that allows comments to be only applied to +some pull requests based on the title of the request. The types field +is described above. If types is empty then all types send notifications. +GitHub comments was introduced in 0.7.9.

+ +

Slack Comments

+ +
{
+  target: foobar.slack.com
+  pattern: null
+  types: []
+  names: []
+}
+
+ +

If enabled then checks-out will write messages to Slack. Pattern is an +optional regular expression that allows comments to be only applied to +some pull requests based on the title of the request. The types field +is described above. If types is empty then all types send notifications. +Names is a list of slack channels or users you want to message. Channels are prefixed with #, +users are prefixed with @. Slack comment support was introduced in 0.7.9.

+ +

The Slack integration allows custom servers to be specified.

+ +

The target field can be the hostname of the Slack target such as “foobar.slack.com”. +If that hostname has been previously registered with the checks-out service then you +will be able to use it. Otherwise you need to register a Slack Webhook with the checks-out service. Use the +/api/user/slack/:hostname endpoint to register the webhook. This is documented +on the API page.

+ +

Deploy

+ +
deploy:
+{
+  enable: false
+  deployment: DEPLOYMENTS
+}
+
+ +

TODO: add documentation

+
+
+ +
+ + +
+
+ + + \ No newline at end of file diff --git a/docs/docs.css b/docs/docs.css new file mode 100644 index 0000000..5d904da --- /dev/null +++ b/docs/docs.css @@ -0,0 +1,114 @@ +@media screen and (min-width: 1201px) { + .container { + width: 70%; + } +} +@media screen and (min-width: 901px) { + .container { + width: 90%; + } +} +@media screen and (max-width: 768px) { + .grid-flex-cell { + display: block; + width:100%; + } +} + +.grid-flex-container { + margin-top:30px; +} +.grid-flex-cell { + border:none; + text-align:left; +} +.grid-flex-cell-1of4 { + min-width:250px; + margin-left:30px; +} +h1 { + font-size:32px; + margin-bottom:0px; +} +h2 { + font-size:24px; + margin:60px 0px 30px 0px; +} +.grid-flex-cell p { + margin-bottom: 30px; +} + +.navbar { + border-bottom: 1px solid #e6eaed !important; +} +.navbar .navbar-brand { + margin-left:0px; +} +.side-nav a { + color: #4d5659 !important; +} + +img[alt="hook success"], +img[alt="hook error"], +img[alt="github registration"], +img[alt="protected branches"] { + border: 1px solid #e6eaed; + border-radius:3px; + max-width:600px; +} + +img[alt="approval complete"], +img[alt="pending approval"] { + max-width:600px; +} + +img[alt="maintainers team"] { + max-width:500px; +} + +pre code { + font-family: "Roboto Mono"; + font-size: 13px; +} + +#TableOfContents { + margin-top:30px; + margin-bottom:-60px; +} +#TableOfContents li a { + display:inline-block; + margin-bottom:5px; +} +#TableOfContents li a:hover { + text-decoration: underline; +} +section h1 { + border-top: 1px solid #e6eaed; + padding-top: 30px; + padding-bottom:30px; + margin-top: 30px; + font-size:28px; +} +blockquote { + background-color: #e3f8ff; + color: #00b0e9; +} +blockquote p { + padding:30px; + font-style: normal; +} +blockquote a { + color: #008dba; +} +blockquote a:hover { + text-decoration: underline; +} +blockquote ~ h1 { + border-top:0px; + margin-top:-20px; +} + +table { + border: 1px solid #e6eaed !important; + margin-bottom: 40px; +} diff --git a/docs/faq/index.html b/docs/faq/index.html new file mode 100644 index 0000000..5b371b4 --- /dev/null +++ b/docs/faq/index.html @@ -0,0 +1,161 @@ + + + + + FAQ · checks-out + + + + + + + + + + + + + + + + +
+ +
+

FAQ

+ +
+ +

Why don’t I see my repository?

+ +

Here are some tips for troubleshooting:

+ +
    +
  • Is your GitHub repository public?
  • +
  • Do you have Admin access to the repository?
  • +
  • Did you grant access to your GitHub organization?
  • +
  • Did you recently create the repository? There may be up to a 15 minute delay due to internal caching.
  • +
  • Does your organization restrict integrations? Learn more.
  • +
+ +

Can I use with GitHub Enterprise?

+ +

Yes, In order to use with GitHub Enterprise you will need to install and run your own instance. See the the documentation.

+ +

GitHub Enterprise 2.4 has partial support for protected branches and requires manual configuration. GitHub Enterprise 2.3 and below are not supported.

+ +

Why are all the configuration files in HJSON format?

+ +

When we began to write complex approval policies in the .checks-out +configuration file, it became apparent that TOML was too verbose for +the nested configuration sections. We looked for a golang text format +serde library that supported the following features: actually parses +whatever format without crashing on corner cases, can defer +unmarshal of specific fields (json.rawMessage), and can write custom +marshal and unmarshal functions.

+ +

The encoding/json library is the only library that met these requirements. +Human JSON is a small amount of syntactic sugar for JSON and adding +support for HJSON was trivial.

+ +

Can I install on my own server?

+ +

Yes. See the install documentation.

+ +

Can I use a central Maintainers file?

+ +

Sort of. You can create a maintainers team in your GitHub organization. See the maintainers documentation.

+ +

Why aren’t approvals being processed?!

+ +

Verify that you have receive the correct number of approvals. The default configuration requires a minimum of two approvals. See the documentation to learn more about custom configurations.

+ +

Verify that hooks are being sent correctly. You can see an audit log of all hooks in the Webhooks & Services section or your repository settings.

+ +

Verify the message is being processed successful. An unsuccessful message will be flagged accordingly in GitHub. Error messages from the service are written to the response body.

+ +

hook error

+ +

Verify the response from a successful hook. The approval settings, approval status, and list of individuals that approved the pull request are included in the payload for auditing purposes.

+ +

hook success

+ +

If the payload indicates the pull request was approved, and this is not reflected in the status you can click the re-deliver button to re-deliver the payload.

+ +

Why can’t I merge my pull request?

+ +

Please remember that checks-out uses GitHub protected branches which prevents merging a branch that has failed to meet all required status checks. The ability to merge a pull request is completely governed by GitHub.

+ +

Please also note that when protected branches prevent you from merging a pull request that has fallen behind the target branch. That is to say, if a pull request is merged, all outstanding pull requests will need to re-sync before they can be merged.

+ +

This thing is busted!

+ +

Please be sure you have read the documentation, including this FAQ, and fully understand how GitHub protected branches work. If you are still experiencing issues please contact support and we’ll be happy to help!

+
+
+ +
+ + +
+
+ + + \ No newline at end of file diff --git a/docs/images/app_registration.png b/docs/images/app_registration.png new file mode 100644 index 0000000..53e8070 Binary files /dev/null and b/docs/images/app_registration.png differ diff --git a/docs/images/approval_complete.png b/docs/images/approval_complete.png new file mode 100644 index 0000000..effc27c Binary files /dev/null and b/docs/images/approval_complete.png differ diff --git a/docs/images/hook_error.png b/docs/images/hook_error.png new file mode 100644 index 0000000..4940341 Binary files /dev/null and b/docs/images/hook_error.png differ diff --git a/docs/images/hook_success.png b/docs/images/hook_success.png new file mode 100644 index 0000000..14e963b Binary files /dev/null and b/docs/images/hook_success.png differ diff --git a/docs/images/maintainers_team.png b/docs/images/maintainers_team.png new file mode 100644 index 0000000..7326b14 Binary files /dev/null and b/docs/images/maintainers_team.png differ diff --git a/docs/images/pending_approval.png b/docs/images/pending_approval.png new file mode 100644 index 0000000..38cbd49 Binary files /dev/null and b/docs/images/pending_approval.png differ diff --git a/docs/images/protected_branches.png b/docs/images/protected_branches.png new file mode 100644 index 0000000..9a3d144 Binary files /dev/null and b/docs/images/protected_branches.png differ diff --git a/docs/index.html b/docs/index.html new file mode 100644 index 0000000..d6f04ed --- /dev/null +++ b/docs/index.html @@ -0,0 +1,14 @@ + + + + + + checks-out + + + + + + \ No newline at end of file diff --git a/docs/index.xml b/docs/index.xml new file mode 100644 index 0000000..3bc68ad --- /dev/null +++ b/docs/index.xml @@ -0,0 +1,106 @@ + + + + checks-out + http://www.capitalone.io/checks-out/ + Recent content on checks-out + Hugo -- gohugo.io + en-us + Sat, 05 Dec 2015 16:00:21 -0800 + + + + + + Quick Start + http://www.capitalone.io/checks-out/overview/ + Sat, 05 Dec 2015 16:00:21 -0800 + + http://www.capitalone.io/checks-out/overview/ + Overview Checks-out is a fork of LGTM. +Checks-out will register itself as a required status check. When you enable your repository on public GitHub it is automatically configured to use protected branches. Protected branches prevent pull requests from being merged until required status checks are passing. If you are using Enterprise GitHub then you might be required to manually enable protected branches on the master branch. +You can customize many options using a . + + + + Maintainers + http://www.capitalone.io/checks-out/maintainers/ + Sat, 05 Dec 2015 16:00:21 -0800 + + http://www.capitalone.io/checks-out/maintainers/ + The list of approvers is stored in a MAINTAINERS text file in the root of your repository. The maintainers file can be in either a text or HJSON format. The format is specified in the .checks-out file file and the default format is text. +You can inspect the maintainers file for your repository using the api endpoint /api/repos/[orgname]/[reponame]/maintainers. The output will have any github-org and github-team macros expanded (see below). + + + + Branches + http://www.capitalone.io/checks-out/branches/ + Sat, 05 Dec 2015 16:00:21 -0800 + + http://www.capitalone.io/checks-out/branches/ + Overview Checks-out automatically enables GitHub protected branches for your repositories&rsquo; default branch. You can further customize or disable this behavior by navigating to your repository branch settings screen in GitHub. +GitHub Enterprise GitHub Enterprise 2.4 does not have an API for protected branches. As a result you will need to manually configure protected branches after enabling your repository in checks-out. This can be done by navigating to your repository branch settings screen in GitHub Enterprise (pictured above). + + + + Customize + http://www.capitalone.io/checks-out/customize/ + Sat, 05 Dec 2015 16:00:21 -0800 + + http://www.capitalone.io/checks-out/customize/ + Introduction Place an .checks-out file in the root of your repository to customize your project&rsquo;s approval process. This file is in Human JSON (HJSON) format. JSON is also valid HJSON so you can write your .checks-out file in JSON if you prefer. +Each property of the .checks-out file has a default value that is used when you do not provide a value. This document will explain each section of the . + + + + Approval Policies + http://www.capitalone.io/checks-out/approvals/ + Sat, 05 Dec 2015 16:00:21 -0800 + + http://www.capitalone.io/checks-out/approvals/ + Here are some templates you can use to write the approval policy section of your .checks-out configuration. +match: &quot;all[count=1,self=false]&quot; This policy requires one approval from anyone in the built-in &lsquo;all&rsquo; group. &lsquo;all&rsquo; expands to all the people in the MAINTAINERS file. Populating the MAINTAINERS file with the macro &ldquo;github-org repo-self&rdquo; will expand to everyone within the organization of the GitHub repository. self=false forbids the author of the pull request to approve their own request. + + + + Self Hosting + http://www.capitalone.io/checks-out/install/ + Sat, 05 Dec 2015 16:00:21 -0800 + + http://www.capitalone.io/checks-out/install/ + Requirements checks-out ships as a single binary file. If you are planning on integrating with GitHub Enterprise it requires version 2.4 or higher. +Configuration This is a full list of configuration options. Please note that many of these options use default configuration value that should work for the majority of installations. +Environment Variables for checks-out This document contains information about all the environment variables that can or must be defined for checks-out to function. + + + + REST API + http://www.capitalone.io/checks-out/api/ + Wed, 21 Jun 2017 10:56:00 -0400 + + http://www.capitalone.io/checks-out/api/ + REST API The following REST APIs are exposed by checks-out. +Logging in checks-out relies on Github&rsquo;s OAuth2 support for authentication. To log in to the REST API, a user must go to /settings/tokens on their Github server (for public Github, this would be https://github.com/settings/tokens.) and create a Personal Access Token. The token should have the permissions admin:org_hook, read:org, repo:status. Store this token in a safe place; you cannot get it again from Github. + + + + Support + http://www.capitalone.io/checks-out/support/ + Sat, 05 Dec 2015 16:00:21 -0800 + + http://www.capitalone.io/checks-out/support/ + Please be sure to read the faq before engaging the support team. This is a free service run by open source community members and we ask that you are courteous of their time. + + + + FAQ + http://www.capitalone.io/checks-out/faq/ + Sat, 05 Dec 2015 16:00:21 -0800 + + http://www.capitalone.io/checks-out/faq/ + Why don&rsquo;t I see my repository? Here are some tips for troubleshooting: + Is your GitHub repository public? Do you have Admin access to the repository? Did you grant access to your GitHub organization? Did you recently create the repository? There may be up to a 15 minute delay due to internal caching. Does your organization restrict integrations? Learn more. Can I use with GitHub Enterprise? Yes, In order to use with GitHub Enterprise you will need to install and run your own instance. + + + + \ No newline at end of file diff --git a/docs/install/index.html b/docs/install/index.html new file mode 100644 index 0000000..de0a609 --- /dev/null +++ b/docs/install/index.html @@ -0,0 +1,530 @@ + + + + + Self Hosting · checks-out + + + + + + + + + + + + + + + + +
+ +
+

Self Hosting

+ +
+ +

Requirements

+ +

checks-out ships as a single binary file. If you are planning on integrating +with GitHub Enterprise it requires version 2.4 or higher.

+ +

Configuration

+ +

This is a full list of configuration options. Please note that many of these +options use default configuration value that should work for the majority of +installations.

+ +

Environment Variables for checks-out

+ +

This document contains information about all the environment variables that can or +must be defined for checks-out to function.

+ +

Server configuration

+ +

Server IP Address and Port

+ +
    +
  • Format: SERVER_ADDR="_ip_address_:_port_" but more specifically see the documentation for golang’s http.ListenAndServe and http.ListanAndServeTLS
  • +
  • Default: :8000, which means port 8000 on all interfaces
  • +
  • Required: No
  • +
+ +

See https://golang.org/pkg/net/http/#ListenAndServe for detailed documentation on the format.

+ +

Server SSL Certificate

+ +
    +
  • Format: SERVER_CERT="_full_path_and_filename_of_ssl_server_cert_file_"
  • +
  • Default: None
  • +
  • Required: No
  • +
+ +

If SERVER_CERT is not specified, checks-out runs without SSL. See https://golang.org/pkg/net/http/#ListenAndServeTLS +for detailed documentation on the format.

+ +

Server SSL Key

+ +
    +
  • Format: SERVER_KEY="_full_path_and_filename_of_ssl_server_key_file_"
  • +
  • Default: None
  • +
  • Required: Only if SERVER_CERT is also specified
  • +
+ +

If SERVER_CERT is specified, then SERVER_KEY must be specified as well. See https://golang.org/pkg/net/http/#ListenAndServeTLS for detailed documentation on the format.

+ +

Database configuration

+ +

Database Driver

+ +
    +
  • Format: DB_DRIVER=sqlite3|postgres|mysql
  • +
  • Default: None
  • +
  • Required: Yes
  • +
+ +

Database Datasource

+ +
    +
  • Format: DB_SOURCE="_db_driver_specific_datasource_spec_"
  • +
  • Default: None
  • +
  • Required: Yes
  • +
+ +

Please refer to the datasource specifications for the sqlite3, lib/pq, and mysql drivers for their respective +specifications for this environment variable.

+ +

Slack integration

+ +
    +
  • Format: SLACK_TARGET_URL="_slack_integration_url"
  • +
  • Default: None
  • +
  • Required: No
  • +
+ +

If the SLACK_TARGET_URL is not defined, then no logging into slack will happen, however it will also +currently cause logging that Slack is not configured to get generated every time a slackable event happens.

+ +

Github integration

+ +

Email Address To Use for Github

+ +
    +
  • Format: GITHUB_EMAIL="_valid_email_address_"
  • +
  • Default: None
  • +
  • Required: Yes
  • +
+ +

Email address for git commits.

+ +

URL for Github API

+ +
    +
  • Format: GITHUB_URL="_protocol_plus_hostname_plus_path_prefix_of_url_"
  • +
  • Default: https://github.com
  • +
  • Required: No, defaults to https://github.com which is fine unless you are using your own enterprise github
  • +
+ +

Github OAuth2 Client ID

+ +
    +
  • Format: GITHUB_CLIENT="_your_OAuth2_client_id_"
  • +
  • Default: None
  • +
  • Required: Yes. You must supply the github OAuth2 client ID issued by the github server you are connecting to (github.com or enterprise-hosted github server)
  • +
+ +

Github OAuth2 Secret

+ +
    +
  • Format: GITHUB_SECRET="_your_OAuth2_secret_"
  • +
  • Default: None
  • +
  • Required: Yes. You must supply the github OAuth2 secret issued by the github server you are connecting to (github.com or enterprise-hosted github server)
  • +
+ +

Github Scope To Use

+ +
    +
  • Format: GITHUB_SCOPE="_valid_github_scope_specification_". Please see github documentation for specifics on the specification format
  • +
  • Default: read:org,repo:status,admin:repo_hook,admin:org_hook
  • +
  • Required: No
  • +
+ +

Permissions granted to checks-out by Github. The minimum +required permissions are the default ones.

+ +

Github rate limiting

+ +
    +
  • Format: GITHUB_BATCH_PER_SECOND=int
  • +
  • Default: 10
  • +
  • Required: no
  • +
+ +

GitHub batch access rate limiter. For certain calls that might happen in rapid succession, this limits how +fast those calls are made to the server. The value is in Hertz; the default value of 10 means that you cannot +send more than 10 calls a second to Github.

+ +

Github testing-only settings

+ +

Enable Github Integration Tests

+ +
    +
  • Format: GITHUB_TEST_ENABLE=true|false
  • +
  • Default: False
  • +
  • Required: No
  • +
+ +

Enable Github integration tests to run

+ +

Github Integration Tests OAuth2 Token

+ +
    +
  • Format: GITHUB_TEST_TOKEN=__OAUTH2_TOKEN_TO_USE
  • +
  • Default: None
  • +
  • Required: _only if GITHUB_TESTENABLE is true
  • +
+ +

This is an OAuth2 token to be sent to the github endpoint for github integration test API requests

+ +

Logging/Debug

+ +

Debug Logging

+ +
    +
  • Format: LOG_LEVEL=debug|info|warn|error|fatal|panic
  • +
  • Default: info
  • +
  • Required: No
  • +
+ +

Specifies the default logging level used in checks-out.

+ +

Checks-out Sunlight

+ +
    +
  • Format: CHECKS_OUT_SUNLIGHT=true|false
  • +
  • Default: false
  • +
  • Required: No
  • +
+ +

If set to true, exposes endpoints and data that might not be suitable for a live site. Specifically, +the following behaviors are enabled:

+ +
    +
  • /api/repos endpoint is available that will show all repositories managed under checks-out
  • +
  • /version endpoint is available that will show the checks-out version
  • +
  • Outputs stack trace information over HTTP when errors happen
  • +
  • Puts the X-CHECKS-OUT-VERSION HTTP header with the checks-out version in every response
  • +
+ +

Blacklist User Agents from Logging

+ +
    +
  • Format: BLACKLIST_USER_AGENTS=user_agent1[]:user_agent2...:user_agent_N]
  • +
  • Default: false
  • +
  • Required: No
  • +
+ +

Specify a colon-separated list of user agent strings for the middleware that does request logging +to skip logging on. This allows suppressing logging of user agents like the health checker from aws +that are generally operational noise.

+ +

How Frequently To Log Operational Statistics

+ +
    +
  • Format: LOG_STATS_PERIOD=_valid_time.ParseDuration()_string_
  • +
  • Default: 0 (Do not periodically log)
  • +
  • Required: No
  • +
+ +

Specify a time duration in a valid format understood by the +time.ParseDuraction() method to periodically log activity +of checks-out such as number of commits, approvers, and disapprovers in the specified time period.

+ +

Caching

+ +

Response caching

+ +
    +
  • Format: CACHE_TTL=_time_specified_in_time.Duration_format_
  • +
  • Default: 15 minutes
  • +
  • Required: No
  • +
+ +

Determines the length of time the gin middleware will cache. Default is 15 minutes.

+ +

Github Repo Artifact Caching

+ +
    +
  • Format: LONG_CACHE_TTL=_time_specified_in_time.Duration_format_
  • +
  • Default: 24 hours
  • +
  • Required: No
  • +
+ +

Determines the length of time checks-out will cache github artifacts like user information, +organization members, and team members in memory before going back to the server. Default +is 24 hours.

+ +

Account Creation Control

+ +

Limit User Access

+ +
    +
  • Format: LIMIT_USERS=true|false
  • +
  • Default: false
  • +
  • Required: No
  • +
+ +

If enabled, only users listed in the limit_users table in the database are +allowed to create accounts. Any existing accounts will still function, even +if the user’s name is not in the limit_users table.

+ +

Limit Organizational Access

+ +
    +
  • Format: LIMIT_ORGS=true|false
  • +
  • Default: false
  • +
  • Required: No
  • +
+ +

If enabled, only users in the organizations listed in the limit_orgs table +in the database are allowed to create accounts. Any existing accounts will +still function, even if the user is not any of the orgs named in the +limit_orgs table.

+ +

If both LIMIT_ORGS and LIMIT_USERS are set to true then a user can +either be explicitly named in limit_users or can belong to an org named +in limit_orgs.

+ +

Miscellaneous Properties

+ +

Documentation Location

+ +
    +
  • Format: CHECKS_OUT_DOCS_URL=_URL_for_docs_no_closing_slash_
  • +
  • Default: https://www.capitalone.io/checks-out
  • +
  • Required: No
  • +
+ +

Provides the base URL for links to the documentation in the UI.

+ +

Slack Integration

+ +
    +
  • Format: SLACK_TARGET_URL=url
  • +
  • Default: none
  • +
  • Required: no
  • +
+ +

Provides the default Slack notification url. This is the URL that’s used by defaut for slack integration when the +target specified in the .checks-out file is “slack”.

+ +

Admin management

+ +
    +
  • Format: GITHUB_ADMIN_ORG=_github_org_name_
  • +
  • Default: none
  • +
  • Required: no
  • +
+ +

Specifies the Github organization whose members have admin privleges in checks-out. If this is not specified, +there are no admin users in checks-out. Admin users have access to certain REST API endpoints that other users +do not.

+ +

Template Repo Name

+ +
    +
  • Format: ORG_REPOSITORY_NAME=_name_for_template_repo_per_org_
  • +
  • Default: checks-out-configuration
  • +
  • Required: No
  • +
+ +

Org management repo name. This is the name of the repo in an org that is used to hold the default .checks-out and +MAINTAINERS files for all repos in the org.

+ +

Legacy Properties

+ +

The following environment variables exist to provide system-wide defaults for repos that are still using a +.lgtm file. They are considered obsolete

+ +
    +
  • Format: CHECKS_OUT_APPROVALS=_number_of_approvals_
  • +
  • Default: 2
  • +
  • Required: No
  • +
+ +

Legacy default number of approvals

+ +
    +
  • Format: CHECKS_OUT_PATTERN=_regex_for_approval_comments_
  • +
  • Default: (?i)LGTM
  • +
  • Required: No
  • +
+ +

Legacy matching pattern

+ +
    +
  • Format: CHECKS_OUT_SELF_APPROVAL_OFF=true|false
  • +
  • Default: false
  • +
  • Required: No
  • +
+ +

Legacy self-approval behavior. Set to true to disable self-approvals.

+ +

Registration

+ +

Register your application with GitHub (or GitHub Enterprise) to create your client +id and secret. It is very import that the redirect URL matches your http(s) scheme +and hostname exactly with /login as the path.

+ +

Please use this screenshot for reference:

+ +

github registration

+ +

Reverse Proxies

+ +

If you are running behind a reverse proxy please ensure the X-Forwarded-For +and X-Forwarded-Proto variables are configured.

+ +

This is an example nginx configuration:

+ +
location / {
+    proxy_set_header X-Forwarded-For $remote_addr;
+    proxy_set_header X-Forwarded-Proto $scheme;
+    proxy_set_header Host $http_host;
+	proxy_pass http://127.0.0.1:8000;
+}
+
+ +

This is an example caddy server configuration:

+ +
checks-out.mycomopany.com {
+        proxy / localhost:8000 {
+                proxy_header X-Forwarded-Proto {scheme}
+                proxy_header X-Forwarded-For {host}
+                proxy_header Host {host}
+        }
+}
+
+ +

Note that when running behind a reverse proxy you should change the recommended +port mappings from --publish=80:8000 to something like --publish=8000:8000.

+
+
+ +
+ + +
+
+ + + \ No newline at end of file diff --git a/docs/maintainers/index.html b/docs/maintainers/index.html new file mode 100644 index 0000000..417f8b7 --- /dev/null +++ b/docs/maintainers/index.html @@ -0,0 +1,192 @@ + + + + + Maintainers · checks-out + + + + + + + + + + + + + + + + +
+ +
+

Maintainers

+ +
+ +

The list of approvers is stored in a MAINTAINERS text file in the root +of your repository. The maintainers file can be in either a text or HJSON +format. The format is specified in the .checks-out file file +and the default format is text.

+ +

You can inspect the maintainers file for your repository using the api endpoint +/api/repos/[orgname]/[reponame]/maintainers. The output will have any +github-org and github-team macros expanded (see below). If the maintainers +file cannot be parsed then an error message will be returned.

+ +

Text format

+ +

The text format can be any of the following types.

+ +

Username, separated by newline:

+ +
foo
+bar
+baz
+
+ +

Username and email address, separated by newline:

+ +
foo <foo@mail.com>
+bar <bar@mail.com>
+baz <baz@mail.com>
+
+ +

FullName, email address and username, separated by newline:

+ +
Fooshure Jones <foo@mail.com> (@foo)
+Bar None <bar@mail.com> (@bar)
+Bazinga Smith <baz@mail.com> (@baz)
+
+ +

Directives for importing GitHub organizations and GitHub teams. +These directives populate both the ‘people’ and ‘org’ fields +with the correct values (see below).

+ +
github-org foo # loads organization foo
+github-team bar # loads team bar within the organization of the repository
+github-team bar foo # loads team bar from organization foo
+github-org repo-self # loads the organization of the repository
+github-team repo-self # loads all the teams of the repository
+
+ +

In the examples above the groups are automatically assigned the following names:

+ +
foo
+bar
+foo-bar
+[name of organization]
+[names of teams]
+
+ +

HJSON format

+ +

Human JSON format inspired by the Docker +project. +Organizations can be specified using this format.

+ +

The github-org and github-team directives can be used in the ‘group’ section +but not in the ‘people’ section. These directives will populate the correct +values into the ‘people’ section.

+ +
{
+  people:
+  {
+    bob:
+    {
+      name: Bob Bobson
+      email: bob@email.co
+    }
+    fred:
+    {
+      name: Fred Fredson
+      email: fred@email.co
+    }
+    jon:
+    {
+      name: Jon Jonson
+      email: jon@email.co
+    }
+    ralph:
+    {
+      name: Ralph Ralphington
+      email: ralph@email.co
+    }
+    george:
+    {
+      name: George McGeorge
+      email: george@email.co
+    }
+  }
+  org:
+  {
+    cap:
+    {
+      people: [ "bob", "fred", "jon", "github-org cap" ]
+    }
+    iron:
+    {
+      people: [ "ralph", "george", "github-team iron" ]
+    }
+  }
+}
+
+
+
+ +
+ + +
+
+ + + \ No newline at end of file diff --git a/docs/overview/index.html b/docs/overview/index.html new file mode 100644 index 0000000..a67cbb1 --- /dev/null +++ b/docs/overview/index.html @@ -0,0 +1,151 @@ + + + + + Quick Start · checks-out + + + + + + + + + + + + + + + + +
+ +
+

Quick Start

+ +
+ +

Overview

+ +

Checks-out is a fork of LGTM.

+ +

Checks-out will register itself as a required status check. When you enable your repository on public GitHub it is automatically configured to use protected branches. Protected branches prevent pull requests from being merged until required status checks are passing. If you are using Enterprise GitHub then you might be required to manually enable protected branches on the master branch.

+ +

You can customize many options using a .checks-out file in your repository. Please see the customization documentation for more detail.

+ +

Setup

+ +

The simplest way to configure checks-out is to create a .checks-out file the +default branch of your GitHub repository with the contents:

+ +
approvals:
+[
+  {
+    # count determines how many approvals are needed
+    # self determines whether you can approve your own pull requests
+    match: "all[count=1,self=true]"
+  }
+]
+
+ +

Create a MAINTAINERS file in the default branch with the contents:

+ +
github-org repo-self
+
+ +

Login to the checks-out service (this will provide the bot +with credentials to monitor your repository) and enable the service +for your repo. Click on the button that says “OFF” to enable the service.

+ +

Organization Setup

+ +

Create a repository named “checks-out-configuration” in your organization. +The files template.checks-out and template.MAINTAINERS in the +checks-out-configuration repository will be used for any repository +that is missing .checks-out or MAINTAINERS files. You can also register +an entire GitHub organization using the checks-out user interface. Select +the “on/off” toggle next to the name of an organization. Registering +an organization will register any existing repositories and will auto-register +any new repositories. Added in version 0.12.0.

+ +

Approvers

+ +

Define a list of individuals that can approve pull requests. This list should be store in a MAINTAINERS file to the root of your repository. Please see the maintainers documentation for supported file formats.

+ +

Please note that checks-out pulls the MAINTAINERS file from your repository default branch (typically master). Changes to the MAINTAINERS file are not recognized until present in the default branch. Changes to the MAINTAINERS file in a pull request are therefore not recognized until merged.

+ +

Approvals

+ +

Pull requests are locked and cannot be merged until the minimum number of approvals are received. Project maintainers can indicate their approval by commenting on the pull request and including “I approve” in their approval text.

+ +

The service also provides integration with GitHub Reviews. An accepted GitHub +Review is counted as an approval. GitHub Review that requests additional +changes blocks the pull request from merging.

+
+
+ +
+ + +
+
+ + + \ No newline at end of file diff --git a/docs/sitemap.xml b/docs/sitemap.xml new file mode 100644 index 0000000..eac71cd --- /dev/null +++ b/docs/sitemap.xml @@ -0,0 +1,66 @@ + + + + + http://www.capitalone.io/checks-out/overview/ + 2015-12-05T16:00:21-08:00 + + + + http://www.capitalone.io/checks-out/maintainers/ + 2015-12-05T16:00:21-08:00 + + + + http://www.capitalone.io/checks-out/branches/ + 2015-12-05T16:00:21-08:00 + + + + http://www.capitalone.io/checks-out/customize/ + 2015-12-05T16:00:21-08:00 + + + + http://www.capitalone.io/checks-out/approvals/ + 2015-12-05T16:00:21-08:00 + + + + http://www.capitalone.io/checks-out/install/ + 2015-12-05T16:00:21-08:00 + + + + http://www.capitalone.io/checks-out/api/ + 2017-06-21T10:56:00-04:00 + + + + http://www.capitalone.io/checks-out/support/ + 2015-12-05T16:00:21-08:00 + + + + http://www.capitalone.io/checks-out/faq/ + 2015-12-05T16:00:21-08:00 + + + + http://www.capitalone.io/checks-out/categories/ + 0 + + + + http://www.capitalone.io/checks-out/tags/ + 0 + + + + http://www.capitalone.io/checks-out/ + 2015-12-05T16:00:21-08:00 + 0 + + + \ No newline at end of file diff --git a/docs/style.css b/docs/style.css new file mode 100644 index 0000000..902542d --- /dev/null +++ b/docs/style.css @@ -0,0 +1,2936 @@ +html { + font-family: sans-serif; + -ms-text-size-adjust: 100%; + -webkit-text-size-adjust: 100% +} + +body { + margin: 0 +} + +article, +aside, +details, +figcaption, +figure, +footer, +header, +hgroup, +main, +menu, +nav, +section, +summary { + display: block +} + +audio, +canvas, +progress, +video { + display: inline-block; + vertical-align: baseline +} + +[hidden], +template { + display: none +} + +a { + background-color: transparent +} + +a:active, +a:hover { + outline: 0 +} + +abbr[title] { + border-bottom: 1px dotted +} + +b, +strong { + font-weight: bold +} + +dfn { + font-style: italic +} + +h1 { + font-size: 2em; + margin: .67em 0 +} + +mark { + background: #ff0; + color: #000 +} + +small { + font-size: 80% +} + +sub, +sup { + font-size: 75%; + line-height: 0; + position: relative; + vertical-align: baseline +} + +sup { + top: -0.5em +} + +sub { + bottom: -0.25em +} + +img { + border: 0 +} + +figure { + margin: 1em 40px +} + +hr { + -moz-box-sizing: content-box; + box-sizing: content-box; + height: 0 +} + +pre { + overflow: auto +} + +code, +kbd, +pre, +samp { + font-family: monospace, monospace; + font-size: 1em +} + +button, +input, +optgroup, +select, +textarea { + color: inherit; + font: inherit; + margin: 0 +} + +button { + overflow: visible +} + +button, +select { + text-transform: none +} + +button, +html input[type="button"], +input[type="reset"], +input[type="submit"] { + -webkit-appearancebuttoncursor: pointer +} + +button[disabled], +html input[disabled] { + cursor: default +} + +button::-moz-focus-inner, +input::-moz-focus-inner { + border: 0; + padding: 0 +} + +input { + line-height: normal +} + +input[type="checkbox"], +input[type="radio"] { + box-sizing: border-box; + padding: 0 +} + +input[type="number"]::-webkit-inner-spin-button, +input[type="number"]::-webkit-outer-spin-button { + height: auto +} + +input[type="search"] { + -webkit-appearance: textfield; + box-sizing: content-box +} + +input[type="search"]::-webkit-search-cancel-button, +input[type="search"]::-webkit-search-decoration { + -webkit-appearance: none +} + +fieldset { + border: 1px solid #c0c0c0; + margin: 0 2px; + padding: .35em .625em .75em +} + +legend { + border: 0; + padding: 0 +} + +textarea { + overflow: auto +} + +optgroup { + font-weight: bold +} + +table { + border-collapse: collapse; + border-spacing: 0 +} + +td, +th { + padding: 0 +} + +html { + font-family: sans-serif; + -ms-text-size-adjust: 100%; + -webkit-text-size-adjust: 100% +} + +body { + margin: 0 +} + +article, +aside, +details, +figcaption, +figure, +footer, +header, +hgroup, +main, +menu, +nav, +section, +summary { + display: block +} + +audio, +canvas, +progress, +video { + display: inline-block; + vertical-align: baseline +} + +audio:not([controls]) { + display: none; + height: 0 +} + +[hidden], +template { + display: none +} + +a { + background-color: transparent +} + +a:active, +a:hover { + outline: 0 +} + +abbr[title] { + border-bottom: 1px dotted +} + +b, +strong { + font-weight: bold +} + +dfn { + font-style: italic +} + +h1 { + font-size: 2em; + margin: .67em 0 +} + +mark { + background: #ff0; + color: #000 +} + +small { + font-size: 80% +} + +sub, +sup { + font-size: 75%; + line-height: 0; + position: relative; + vertical-align: baseline +} + +sup { + top: -0.5em +} + +sub { + bottom: -0.25em +} + +img { + border: 0 +} + +svg:not(:root) { + overflow: hidden +} + +figure { + margin: 1em 40px +} + +hr { + -moz-box-sizing: content-box; + box-sizing: content-box; + height: 0 +} + +pre { + overflow: auto +} + +code, +kbd, +pre, +samp { + font-family: monospace, monospace; + font-size: 1em +} + +button, +input, +optgroup, +select, +textarea { + color: inherit; + font: inherit; + margin: 0 +} + +button { + overflow: visible +} + +button, +select { + text-transform: none +} + +button, +html input[type="button"], +input[type="reset"], +input[type="submit"] { + -webkit-appearance: button; + cursor: pointer +} + +button[disabled], +html input[disabled] { + cursor: default +} + +button::-moz-focus-inner, +input::-moz-focus-inner { + border: 0; + padding: 0 +} + +input { + line-height: normal +} + +input[type="checkbox"], +input[type="radio"] { + -moz-box-sizing: border-box; + box-sizing: border-box; + padding: 0 +} + +input[type="number"]::-webkit-inner-spin-button, +input[type="number"]::-webkit-outer-spin-button { + height: auto +} + +input[type="search"] { + -webkit-appearance: textfield; + -moz-box-sizing: content-box; + box-sizing: content-box +} + +input[type="search"]::-webkit-search-cancel-button, +input[type="search"]::-webkit-search-decoration { + -webkit-appearance: none +} + +fieldset { + border: 1px solid #c0c0c0; + margin: 0 2px; + padding: .35em .625em .75em +} + +legend { + border: 0; + padding: 0 +} + +textarea { + overflow: auto +} + +optgroup { + font-weight: bold +} + +table { + border-collapse: collapse; + border-spacing: 0 +} + +td, +th { + padding: 0 +} + +*, +*:after, +*:before { + -moz-box-sizing: border-box; + box-sizing: border-box +} + +html { + height: 100% +} + +body { + min-height: 100%; + font-family: 'proxima-nova', helvetica neue, helvetica, arial, sans-serif; + font-size: 16px; + line-height: 1.5; + color: #4d5659; + text-rendering: optimizeSpeed +} + +p { + margin-top: 0; + margin-bottom: 30px +} + +::-moz-selection { + background-color: #74ddff; + color: #fff +} + +::selection { + background-color: #74ddff; + color: #fff +} + +a { + color: #00b0e9; + text-decoration: none; + cursor: pointer +} + +a:focus { + outline: none +} + +a:hover { + color: #008dba +} + +.pull-left { + float: left +} + +.pull-right { + float: right +} + +.hide { + display: none +} + +.cf:before, +.cf:after { + content: " "; + display: table +} + +.cf:after { + clear: both +} + +img, +iframe, +video { + max-width: 100% +} + +h1, +h2, +h3, +h4, +h5, +h6 { + margin-top: 0; + margin-bottom: 30px; + line-height: 1.25; + font-family: 'aktiv-grotesk', helvetica neue, helvetica, arial, sans-serif +} + +h1, +h2, +h3, +h4, +h5, +h6, +strong { + color: #000 +} + +h1, +h2, +h3, +h4, +h5, +h6 { + font-weight: 300 +} + +h6 { + text-transform: uppercase; + color: #828c8f +} + +abbr { + color: #828c8f; + border-bottom: 1px dotted #b7c0c3; + cursor: help +} + +small, +sup, +sub { + font-size: 12px +} + +mark { + padding: 3px 5px; + background: #ffffb3 +} + +blockquote { + margin: 30px 0; + padding: 0 15px; + color: #828c8f; + font: italic 16px/1.55 Georgia, Times, serif +} + +blockquote p:only-child, +blockquote p:last-child { + margin: 0 +} + +blockquote footer { + margin-top: 15px; + font-size: 16px; + color: #9ba3a5 +} + +blockquote footer:before { + content: '\2014 \00A0' +} + +.blockquote-centered { + text-align: center +} + +.blockquote-medium { + font-size: 22px +} + +.blockquote-large { + font-size: 36px; + line-height: 1.3 +} + +@media screen and (max-width:899px) { + h1 { + font-size: 38px + } + h2 { + font-size: 32px + } + h3 { + font-size: 26px + } + h4 { + font-size: 20px + } + h5 { + font-size: 16px + } + h6 { + font-size: 14px + } + .blockquote-large { + font-size: 32px + } +} + +@media screen and (min-width:900px) { + h1 { + font-size: 42px + } + h2 { + font-size: 36px + } + h3 { + font-size: 30px + } + h4 { + font-size: 24px + } + h5 { + font-size: 18px + } + h6 { + font-size: 16px + } +} + +ul, +ol { + margin: 0 0 30px 0; + padding-left: 30px +} + +ul li>ul, +ol li>ul, +ul li>ol, +ol li>ol { + margin-top: 30px +} + +ul li ul li, +ol li ul li { + list-style: circle +} + +ul li li ul li, +ol li li ul li { + list-style: square +} + +ul li { + list-style: disc +} + +.list-unstyled { + padding-left: 0 +} + +.list-unstyled li { + list-style: none +} + +.list-inline li { + display: inline-block +} + +dl dt { + font-weight: 500; + color: #000 +} + +dl dd { + margin: 0 +} + +.container { + margin: 0 auto +} + +.grid-flex-container { + padding: 0; + list-style: none; + display: -webkit-flex; + display: -ms-flexbox; + display: flex; + -ms-flex-flow: row wrap; + flex-flow: row wrap; + -webkit-flex-flow: row wrap +} + +[class^="grid-flex-cell"] { + margin-bottom: 30px +} + +@media screen and (max-width:600px) { + .container { + width: 90% + } + [class^="grid-flex-cell"] { + -webkit-flex: 0 0 100%; + -ms-flex: 0 0 100%; + flex: 0 0 100% + } +} + +@media screen and (min-width:601px) { + [class^="grid-flex-cell"] { + -webkit-flex: 1; + -ms-flex: 1; + flex: 1; + padding: 15px 0 + } + [class^="grid-flex-cell"]:not(:first-child) { + margin-left: 30px + } + .grid-flex-cell-1of2, + .grid-flex-cell-1of3, + .grid-flex-cell-1of4 { + -webkit-flex: none; + -ms-flex: none; + flex: none + } + .grid-flex-cell-1of2 { + width: 50% + } + .grid-flex-cell-1of3 { + width: 34% + } + .grid-flex-cell-1of4 { + width: 25% + } +} + +@media screen and (min-width:481px) and (max-width:900px) { + .container { + width: 85% + } +} + +@media screen and (min-width:901px) { + .container { + width: 70% + } +} + +select, +.form-input, +.form-checkbox, +.form-radio { + margin-bottom: 15px +} + +legend, +label { + font-weight: 800 +} + +legend { + padding: 0 15px; + font-size: 22px; + color: #000 +} + +fieldset { + border: 1px solid #e6eaed; + border-radius: 3px; + padding: 30px; + margin: 0 0 30px 0 +} + +label { + display: block; + margin-bottom: 5px; + font-size: 12px; + cursor: pointer +} + +select, +.form-input, +.form-radio, +.form-checkbox { + outline: 0 +} + +.form-inline { + overflow: hidden +} + +.form-input { + display: block; + padding: 10px; + width: 100%; + border-radius: 3px; + border: 1px solid #e6eaed; + -webkit-appearance: none +} + +.form-input:focus { + transition: all .2s ease-in; + border: 1px solid #74ddff; + background-color: #f8fdff +} + +.form-input:disabled { + background-color: #fafbfb; + cursor: not-allowed +} + +.form-input:required:focus { + border: 1px solid #fec57b; + background-color: #fff9f2 +} + +.form-input.input-invalid { + border: 1px solid #ffc7d8; + color: #ff749d +} + +.form-input.input-invalid:focus { + border: 1px solid #e80044; + background-color: #fff4f7 +} + +.form-input.input-valid { + border: 1px solid #c6ebd3; + color: #8ed6a8 +} + +.form-input.input-valid:focus { + border: 1px solid #43bb6e; + background-color: #f4fbf6 +} + +textarea { + resize: none; + min-height: 90px +} + +input[type=file], +label.radio, +label.checkbox, +select { + cursor: pointer +} + +input[type=file] { + padding: 10px; + background-color: #f8f9fa; + font-size: 12px; + color: #b7c0c3 +} + +select { + border: 1px solid #e6eaed; + height: 40px; + padding: 10px; + width: 100%; + background-color: #fff +} + +select:focus { + border: 1px solid #74ddff +} + +select:focus:-moz-focusring { + color: transparent; + text-shadow: 0 0 0 #000 +} + +.input-radio, +.input-checkbox { + margin: 5px 0; + font-size: 16px; + font-weight: normal +} + +.input-radio.disabled, +.input-checkbox.disabled { + color: #b7c0c3; + cursor: not-allowed +} + +.input-radio input[type=checkbox], +.input-checkbox input[type=checkbox], +.input-radio input[type=radio], +.input-checkbox input[type=radio] { + display: inline-block; + margin-right: 10px +} + +@media screen and (min-width:768px) { + .form-inline.form-no-labels .button { + margin-top: 0 + } + .form-inline .form-element { + float: left; + width: 50% + } + .form-inline .button { + margin-top: 22.5px + } + .form-inline .form-element:not(:first-of-type) { + padding-left: 5px + } + .form-inline .form-input, + .form-inline .input-checkbox, + .form-inline .input-radio, + .form-inline select { + margin: 0 + } +} + +.button { + display: inline-block; + border: none; + border-radius: 3px; + padding-left: 15px; + padding-right: 15px; + text-align: center; + cursor: pointer; + background-color: #00b0e9; + transition-delay: 0; + transition-duration: .3s; + transition-timing-function: ease-in; + transition-property: background, border-color; + -webkit-user-select: none; + -moz-user-select: none; + -ms-user-select: none; + user-select: none; + appearance: none +} + +.button:not([class*='button-outlined']) { + padding-top: 10px; + padding-bottom: 10px +} + +.button:hover { + background-color: #009ed2 +} + +.button:disabled, +.button .button-disabled { + background-color: #74ddff; + cursor: not-allowed +} + +.button:focus { + outline: none +} + +.button, +.button:hover { + color: #fff +} + +.button-small { + padding: 3px 5px; + font-size: 12px +} + +.button-large { + padding: 15px 30px; + font-size: 22px +} + +.button-unstyled, +.button-unstyled:hover { + background-color: transparent +} + +.button-unstyled { + padding: 0; + color: #00b0e9 +} + +.button-unstyled:hover { + color: #008dba +} + +.button-neutral { + background-color: #e6eaed +} + +.button-neutral:hover { + background-color: #b0bcc6 +} + +.button-neutral:disabled, +.button-neutral .button-disabled { + background-color: #e6eaed; + color: #b7c0c3 +} + +.button-neutral:disabled:hover, +.button-neutral .button-disabled:hover { + color: #b7c0c3 +} + +.button-neutral, +.button-neutral:hover { + color: #272b2d +} + +.button-approve { + background-color: #43bb6e +} + +.button-approve:hover { + background-color: #369658 +} + +.button-approve:disabled, +.button-approve .button-disabled { + background-color: #c6ebd3 +} + +.button-warn { + background-color: #e80044 +} + +.button-warn:hover { + background-color: #ba0036 +} + +.button-warn:disabled, +.button-warn.button-disabled { + background-color: #ffc7d8 +} + +.button-caution { + background-color: transparent; + color: #e80044; + text-decoration: underline +} + +.button-caution:hover { + background-color: transparent; + color: #ba0036 +} + +.button-caution:disabled, +.button-caution .button-disabled { + color: #b7c0c3 +} + +*[class*='button-outlined'] { + border-width: 2px; + border-style: solid; + padding-top: 8px; + padding-bottom: 8px +} + +*[class*='button-outlined'], +*[class*='button-outlined']:hover, +*[class*='button-outlined']:disabled { + background-color: transparent +} + +.button-outlined { + border-color: #00b0e9; + color: #00b0e9 +} + +.button-outlined:hover { + border-color: #008dba; + color: #008dba +} + +.button-outlined:disabled { + border-color: #74ddff; + color: #74ddff +} + +.button-outlined-neutral { + border-color: #b7c0c3; + color: #828c8f +} + +.button-outlined-neutral:hover { + border-color: #828c8f; + color: #4d5659 +} + +.button-outlined-neutral:disabled, +.button-outlined-neutral.button-disabled { + color: #c5cdcf; + border-color: #e6eaed +} + +.button-outlined-neutral:disabled:hover, +.button-outlined-neutral.button-disabled:hover { + color: #c5cdcf +} + +.button-outlined-approve { + border-color: #43bb6e; + color: #43bb6e +} + +.button-outlined-approve:hover { + border-color: #369658; + color: #369658 +} + +.button-outlined-approve:disabled { + border-color: #c6ebd3; + color: #c6ebd3 +} + +.button-outlined-warn { + border-color: #e80044; + color: #e80044 +} + +.button-outlined-warn:hover { + border-color: #ba0036; + color: #ba0036 +} + +.button-outlined-warn:disabled { + border-color: #ffc7d8; + color: #ffc7d8 +} + +.button-group .button { + border-radius: 0; + margin-left: -1px; + border: 1px solid #008dba +} + +.button-group .button:first-child, +.button-group .button:first-child:only-child { + margin: 0 +} + +.button-group .button:first-child:only-child { + border-radius: 3px +} + +.button-group .button:first-child { + border-radius: 3px 0 0 3px +} + +.button-group .button:last-child { + border-radius: 0 3px 3px 0 +} + +.button-group .button-neutral, +.button-group .button-outlined { + border: 1px solid #b7c0c3 +} + +@media screen and (max-width:480px) { + .button-group .button { + border-radius: none; + margin: 0; + width: 100% + } + .button-group .button:first-of-type { + border-radius: 3px 3px 0 0 + } + .button-group .button:last-of-type { + border-radius: 0 0 3px 3px + } + .button-group .button:not(:last-of-type) { + border-bottom: none + } +} + +code { + font-family: Menlo, Monaco, Consolas, monospace; + font-size: 12px; + padding: 3px 5px +} + +pre { + display: block; + padding: 15px; + margin-bottom: 30px; + max-width: 100%; + line-height: 1.5; + word-break: break-all; + word-wrap: break-word +} + +pre code { + padding: 0; + white-space: pre-wrap +} + +code, +pre { + border-radius: 3px +} + +.code-light { + color: #000; + background: #f5f7f8 +} + +.code-dark { + color: #5ca1b5; + background: #003546 +} + +.dropdown-standalone { + position: relative; + display: inline-block; + cursor: pointer; + padding: 10px; + border-radius: 3px +} + +.dropdown-standalone:hover { + border-radius: 3px 3px 0 0 +} + +.dropdown-standalone:hover .dropdown { + display: block; + position: absolute; + top: 100%; + left: 0; + width: 100%; + border-radius: 0 0 3px 3px +} + +.dropdown-standalone:hover .dropdown a { + padding: 10px; + display: block +} + +.dropdown-standalone .dropdown { + display: none +} + +.dropdown-standalone .icon-arrow-down { + position: relative; + top: -2px; + margin-left: 5px +} + +.dropdown-outlined, +.dropdown-outlined .dropdown { + border: 1px solid #e6eaed +} + +.dropdown-standalone.dropdown-outlined .dropdown { + width: 101%; + left: -1px +} + +.dropdown-outlined .dropdown { + background-color: #fff +} + +.dropdown-outlined .dropdown li:not(:last-child) { + border-bottom: 1px solid #e6eaed +} + +.dropdown-coloured, +.dropdown-coloured .dropdown { + background-color: #00b0e9 +} + +.dropdown-coloured .button-unstyled, +.dropdown-coloured .dropdown a { + color: #fff +} + +.dropdown-coloured .icon-arrow-down { + border-top-color: #fff +} + +.dropdown-coloured .dropdown li:not(:last-child) { + border-bottom: 1px solid #009ed2 +} + +.dropdown-coloured .dropdown a:hover { + color: #e3f8ff +} + +.dropdown-neutral, +.dropdown-neutral .dropdown { + background-color: #e6eaed +} + +.dropdown-neutral a { + color: #4d5659 +} + +.dropdown-neutral a:hover, +.dropdown-neutral .button-unstyled:hover { + color: #272b2d +} + +.dropdown-neutral .icon-arrow-down { + border-top-color: #4d5659 +} + +.dropdown-neutral .dropdown li:not(:last-child) { + border-bottom: 1px solid #b7c0c3 +} + +.icon-arrow-left, +.icon-arrow-right, +.icon-arrow-up, +.icon-arrow-down { + display: inline-block; + width: 0; + height: 0; + line-height: 0; + font-size: 0 +} + +.icon-arrow-up, +.icon-arrow-down { + border-left: 5px solid transparent; + border-right: 5px solid transparent +} + +.icon-arrow-left, +.icon-arrow-right { + border-top: 5px solid transparent; + border-bottom: 5px solid transparent +} + +.icon-arrow-left { + border-right: 5px solid #00b0e9 +} + +.icon-arrow-right { + border-left: 5px solid #00b0e9 +} + +.icon-arrow-down { + border-top: 5px solid #00b0e9 +} + +.icon-arrow-up { + border-bottom: 5px solid #00b0e9 +} + +.media-outlined, +img { + border-radius: 3px +} + +figure { + width: auto; + margin: 0 +} + +figure img { + vertical-align: middle +} + +figure figcaption { + color: #828c8f; + font-size: 12px +} + +.media-outlined { + display: inline-block; + padding: 5px; + border: 1px solid #e6eaed +} + +@media screen and (max-width:480px) { + figure { + text-align: center + } + figure img+img { + margin: 5px auto 0 + } +} + +@media screen and (min-width:481px) { + figure img:not(:last-child):not(:only-child) { + margin-right: 5px + } + figure img:not(:only-child) { + margin-top: 5px + } + figure img+figcaption { + margin-bottom: 5px + } + figure img+figcaption:not(:only-child) { + margin-right: 5px + } +} + +.message { + padding: 15px; + position: relative; + background-color: #e3f8ff; + color: #00b0e9 +} + +.message a { + font-weight: bold +} + +.message p:last-of-type { + margin: 0 +} + +.message:after { + border: solid transparent; + content: ''; + height: 0; + width: 0; + position: absolute; + pointer-events: none; + border-color: rgba(255, 255, 255, 0); + border-width: 5px +} + +.message-dismissable { + padding-right: 30px +} + +.message-full-width { + text-align: center +} + +.message-above:after, +.message-below:after { + left: 50%; + margin-left: -6px +} + +.message-below:after { + border-bottom-color: #e3f8ff; + bottom: 100% +} + +.message-below.message-error:after { + border-bottom-color: #ffe3eb +} + +.message-below.message-success:after { + border-bottom-color: #e8f7ef +} + +.message-below.message-alert:after { + border-bottom-color: #ffeed7 +} + +.message-above:after { + border-top-color: #e3f8ff; + top: 100% +} + +.message-above.message-error:after { + border-top-color: #ffe3eb +} + +.message-above.message-success:after { + border-top-color: #e8f7ef +} + +.message-above.message-alert:after { + border-top-color: #ffeed7 +} + +.message-error { + background-color: #ffe3eb; + color: #e80044 +} + +.message-error a { + color: #ba0036 +} + +.message-error a:hover { + color: #8b0029 +} + +.message-alert { + background-color: #ffeed7; + color: #f18902 +} + +.message-alert a { + color: #c16e02 +} + +.message-alert a:hover { + color: #915201 +} + +.message-success { + background-color: #e8f7ef; + color: #43bb6e +} + +.message-success a { + color: #369658 +} + +.message-success a:hover { + color: #287042 +} + +.close { + display: block; + position: absolute; + top: 50%; + right: 15px; + margin-top: -15px; + cursor: pointer; + font-size: large; + font-weight: bold +} + +form .message { + padding: 10px +} + +.modal { + position: fixed; + top: 50%; + left: 50%; + border-radius: 3px; + z-index: 20; + -webkit-transform: translate(-50%, -50%); + -ms-transform: translate(-50%, -50%); + transform: translate(-50%, -50%); + background-color: #fff; + -webkit-overflow-scrolling: touch +} + +.modal.modal-no-sections { + padding: 30px; + text-align: center +} + +.modal-title { + margin: 0; + float: left +} + +.modal-head, +.modal-body { + padding: 15px +} + +.modal-head { + border-bottom: 1px solid #e6eaed +} + +.modal-footer { + padding: 15px 0; + text-align: center; + border-top: 1px solid #e6eaed +} + +.modal-footer button, +.modal-footer .button { + min-width: 120px +} + +.modal-footer button+button, +.modal-footer .button+.button { + margin-left: 5px +} + +.modal-close { + float: right; + color: #b7c0c3; + line-height: 1.5; + font-weight: bold; + font-size: 22px +} + +.modal-close:hover { + color: #828c8f +} + +.has-modal:before { + content: '' +} + +.overlay, +.has-modal:before { + position: fixed; + top: 0; + left: 0; + width: 100%; + height: 100%; + z-index: 10; + box-shadow: inset 0 0 100px rgba(0, 0, 0, 0.7); + background-color: rgba(0, 0, 0, 0.4) +} + +@media screen and (max-width:480px) { + .modal { + height: 100%; + width: 100%; + top: 0; + left: 0; + border-radius: 0; + -webkit-transform: none; + -ms-transform: none; + transform: none + } +} + +@media screen and (min-width:481px) and (max-width:768px) { + .modal { + width: 80% + } +} + +@media screen and (min-width:769px) { + .modal { + width: 50% + } +} + +.top-nav { + margin-bottom: 30px +} + +.top-nav ul { + margin: 0 +} + +.top-nav li { + padding: 15px +} + +.top-nav a { + display: inline-block +} + +.top-nav label { + font-weight: normal; + font-size: 16px; + line-height: 1 +} + +.top-nav .menu-toggle { + visibility: hidden; + position: absolute; + left: -100% +} + +.top-nav .icon-arrow-down { + position: relative; + top: -2px; + margin-left: 5px +} + +.top-nav-light { + border-bottom: 1px solid #e6eaed +} + +.top-nav-light label { + color: #00b0e9 +} + +.top-nav-light, +.top-nav-light li { + background-color: #fff +} + +.top-nav-dark label { + color: #438193 +} + +.top-nav-dark a { + color: #438193 +} + +.top-nav-dark a:hover, +.top-nav-dark a.active, +.top-nav-dark label { + color: #85b8c7 +} + +.top-nav-dark, +.top-nav-dark li { + background-color: #003546 +} + +.top-nav-dark .icon-arrow-down { + border-top-color: #438193 +} + +.breadcrumbs { + padding: 10px 0 +} + +.breadcrumbs li { + color: #b7c0c3 +} + +.breadcrumbs a { + padding: 0 10px +} + +.pagination li:not(:last-child) { + border-right: 1px solid #e6eaed +} + +.pagination em[class^='icon'] { + top: 1px; + position: relative +} + +.pagination .icon-arrow-right { + border-left: 5px solid #00b0e9 +} + +.pagination .icon-arrow-left { + border-right: 5px solid #00b0e9 +} + +.tabs { + border-bottom: 1px solid #e6eaed +} + +.tabs a { + display: block +} + +.side-nav li:not(:last-child) a { + border-bottom: 1px solid #e6eaed +} + +.unavailable-item { + color: #b7c0c3 +} + +.unavailable-item:not([href]), +.unavailable-item:not([href]):hover { + color: #b7c0c3; + cursor: default; + pointer-events: none +} + +.current-item { + color: #4d5659; + font-weight: 800 +} + +.side-nav a, +.pagination a, +.side-nav li, +.pagination li { + display: block +} + +.side-nav a:hover:not(.unavailable-item), +.pagination a:hover:not(.unavailable-item), +.side-nav .current-item, +.pagination .current-item { + background-color: #fafbfb +} + +.pagination, +.breadcrumbs { + display: inline-block +} + +.pagination li, +.breadcrumbs li { + float: left +} + +.breadcrumbs, +.side-nav, +.pagination { + border-radius: 3px; + border: 1px solid #e6eaed +} + +.side-nav a, +.pagination a { + padding: 10px 15px +} + +.breadcrumbs a, +.pagination a { + font-size: 12px +} + +@media screen and (max-width:480px) { + .tabs a { + position: relative; + padding: 10px + } +} + +@media screen and (min-width:481px) { + .tabs a { + padding: 10px 15px + } + .tabs .current-item { + position: relative; + top: 1px; + border: 1px solid #e6eaed; + border-bottom-color: transparent; + border-radius: 3px 3px 0 0; + background-color: #fff + } +} + +@media screen and (max-width:768px) { + .top-nav ul { + display: none + } + .top-nav ul>li { + padding: 10px + } + .top-nav .icon-arrow-down { + display: none + } + .top-nav label, + .top-nav .menu-toggle:checked~ul, + .top-nav .menu-toggle:checked~ul>li { + display: block + } + .top-nav label { + padding: 15px 15px 15px 0; + margin-bottom: 0; + text-align: right; + cursor: pointer + } + .top-nav .menu-toggle:checked~ul .dropdown { + display: block; + z-index: 1000 + } + .top-nav .menu-toggle:checked~ul .dropdown li { + padding: 5px 15px; + float: left; + border: 0 + } + .top-nav .menu-toggle:checked~ul .dropdown a { + font-size: 12px; + font-weight: normal + } + .top-nav-light .menu-toggle:checked~ul>li:not(:last-child) { + border-bottom: 1px solid #e6eaed + } + .top-nav-dark .menu-toggle:checked~ul>li:not(:last-child) { + border-bottom: 1px solid #1f4753 + } +} + +@media screen and (min-width:769px) { + .top-nav label { + display: none + } + .top-nav .has-dropdown { + position: relative + } + .top-nav .has-dropdown:hover .dropdown { + display: block + } + .top-nav .dropdown { + display: none; + position: absolute; + top: 100%; + left: 0 + } + .top-nav .dropdown li { + display: block + } + .top-nav-light .dropdown { + border-left: 1px solid #e6eaed; + border-right: 1px solid #e6eaed + } + .top-nav-light .dropdown li { + border-bottom: 1px solid #e6eaed + } + .top-nav-dark .dropdown li { + border-bottom: 1px solid #1f4753 + } +} + +.progress, +.meter { + border-radius: 3px; + line-height: 0 +} + +.progress.progress-rounded, +.progress-rounded .meter { + border-radius: 50px +} + +.progress { + border: 1px solid #e6eaed; + background-color: #fdfdfd +} + +.progress .meter { + display: inline-block; + background-color: #00b0e9; + height: 100% +} + +.progress-small { + height: 15px +} + +.progress-large { + height: 25px +} + +.progress-success .meter { + background-color: #43bb6e +} + +.progress-warning .meter { + background-color: #f18902 +} + +.progress-error .meter { + background-color: #e80044 +} + +.table-responsive { + overflow-x: auto +} + +table { + width: 100%; + max-width: 100%; + margin-bottom: 30px +} + +td, +th { + text-align: left; + padding: 10px; + border-bottom: 1px solid #e6eaed +} + +thead th { + font-weight: 800; + color: #4d5659 +} + +tr.table-cell-error { + background-color: #ffe3eb; + color: #e80044 +} + +tr.table-cell-error a { + color: #ba0036 +} + +tr.table-cell-error a:hover { + color: #8b0029 +} + +tr.table-cell-success { + background-color: #e8f7ef; + color: #43bb6e +} + +tr.table-cell-success a { + color: #369658 +} + +tr.table-cell-success a:hover { + color: #287042 +} + +tr.table-cell-alert { + background-color: #ffeed7; + color: #f18902 +} + +tr.table-cell-alert a { + color: #c16e02 +} + +tr.table-cell-alert a:hover { + color: #915201 +} + +.table-striped tr:nth-child(even) { + color: #828c8f +} + +.table-outlined { + border: 1px solid #e6eaed; + border-radius: 3px +} + +.table-outlined tr:last-of-type td { + border: none +} + +.table-with-hover tr:hover, +.table-striped tr:nth-child(even) { + background-color: #fafbfb +} + +@media screen and (max-width:767px) { + table td, + table th { + padding: 5px 10px + } +} + +.avatar-radius { + border-radius: 3px +} + +.avatar-rounded { + border-radius: 100% +} + +.avatar-small { + height: 25px; + width: 25px +} + +.avatar-medium { + height: 50px; + width: 50px +} + +.avatar-large { + height: 100px; + width: 100px +} + +@media print { + * { + background: transparent !important; + color: #000 !important; + box-shadow: none !important; + text-shadow: none !important + } + a, + a:visited { + text-decoration: underline + } + a[href]:after { + content: " (" attr(href) ")" + } + abbr[title]:after { + content: " (" attr(title) ")" + } + a[href^="#"]:after, + a[href^="javascript:"]:after { + content: '' + } + pre, + blockquote { + page-break-inside: avoid + } + thead { + display: table-header-group + } + tr, + img { + page-break-inside: avoid + } + img { + max-width: 100% !important + } + p, + h2, + h3 { + orphans: 3; + widows: 3 + } + h2, + h3 { + page-break-after: avoid + } +} + +body { + border-style: solid; + border-color: #f2f4f6 +} + +pre { + color: #000; + background: #f5f7f8 +} + +code { + color: #000; + background: #f5f7f8 +} + +header { + padding-bottom: 30px; + border-bottom: 1px solid #f2f4f6 +} + +header h1 { + margin-bottom: 0 +} + +header p { + font-size: 22px +} + +header .button { + margin: 30px 0 +} + +footer[role='contentinfo'] { + padding: 30px 0; + border-top: 1px solid #f2f4f6; + color: #828c8f; + text-align: center +} + +footer[role='contentinfo'] li:not(:last-of-type):after { + content: '\2219'; + margin: 0 5px 0 10px +} + +footer[role='contentinfo'] p { + margin-bottom: 0 +} + +footer[role='contentinfo'] .logo { + margin-top: 30px; + display: inline-block; + width: 17px; + height: 20px; + background: url("data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0idXRmLTgiPz4NCjwhLS0gR2VuZXJhdG9yOiBBZG9iZSBJbGx1c3RyYXRvciAxNy4xLjAsIFNWRyBFeHBvcnQgUGx1Zy1JbiAuIFNWRyBWZXJzaW9uOiA2LjAwIEJ1aWxkIDApICAtLT4NCjwhRE9DVFlQRSBzdmcgUFVCTElDICItLy9XM0MvL0RURCBTVkcgMS4xLy9FTiIgImh0dHA6Ly93d3cudzMub3JnL0dyYXBoaWNzL1NWRy8xLjEvRFREL3N2ZzExLmR0ZCI+DQo8c3ZnIHZlcnNpb249IjEuMSIgaWQ9IkxheWVyXzEiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgeG1sbnM6eGxpbms9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkveGxpbmsiIHg9IjBweCIgeT0iMHB4Ig0KCSB2aWV3Qm94PSIxNjIgMjQ2LjEgMjg4IDMzNS42IiBlbmFibGUtYmFja2dyb3VuZD0ibmV3IDE2MiAyNDYuMSAyODggMzM1LjYiIHhtbDpzcGFjZT0icHJlc2VydmUiPg0KPGc+DQoJPGc+DQoJCTxwYXRoIGZpbGw9IiNCN0MwQzMiIGQ9Ik0zMDIuMSwzOTUuMmw1OC4xLDcwLjNsMi0yYzYuMy02LjcsMTIuMS0xMy4zLDE3LjctMjBjNS41LTYuNywyMC0yMiwyNS4yLTI5LjFMNDI4LDQ1Ng0KCQkJYy0zLjksNS41LTkuMSwxMi4xLTE1LjMsMTkuNmMtNi4zLDcuNS0xMy43LDE2LjEtMjIuOCwyNS42bDYwLjEsNzQuM2gtNjAuOWwtMzIuMy0zOS43Yy0yOS41LDMwLjctNjAuNSw0Ni05Mi4zLDQ2DQoJCQljLTI4LjcsMC01My4xLTkuMS03My4xLTI3LjVjLTE5LjYtMTguNC0yOS41LTQxLjItMjkuNS02OC40YzAtMzIuMywxNS4zLTU5LjMsNDYuOC04MC41bDIxLjItMTQuNWMwLjQsMCwwLjgtMC40LDEuNi0xLjINCgkJCWMwLjgtMC40LDEuNi0xLjIsMi44LTIuNGMtMjEuMi0yMi44LTMyLjMtNDUuMi0zMi4zLTY3LjJzNy4xLTM5LjcsMjEuMi01My41YzE0LjUtMTMuNywzMy4zLTIwLjQsNTYuNS0yMC40DQoJCQljMjIuNCwwLDQwLjUsNi43LDU1LjEsMjBjMTQuNSwxMy4zLDIyLDMwLjMsMjIsNTAuM2MwLDE0LjEtMy45LDI2LjctMTEuNywzOC4xQzMzNi43LDM2NS43LDMyMi41LDM3OS41LDMwMi4xLDM5NS4yeg0KCQkJIE0yNjMuMiw0MjIuN2wtMi44LDJjLTIwLDEzLjctMzMuNywyNS4yLTQwLjgsMzMuM2MtNy4xLDguMS0xMC43LDE3LjMtMTAuNywyNi43YzAsMTMuMyw1LjUsMjUuOSwxNi4xLDM2LjkNCgkJCWMxMS4xLDEwLjcsMjMuNiwxNi4xLDM2LjksMTYuMWMxOC44LDAsNDAuNS0xMi4xLDY1LjItMzYuOUwyNjMuMiw0MjIuN3ogTTI3My45LDM2MC45bDMuOS0zLjJjNi43LTUuMSwxMi41LTkuOSwxNi45LTEzLjMNCgkJCWM0LjMtMy45LDcuOS03LjEsOS45LTkuOWM0LjMtNS4xLDYuMy0xMS4zLDYuMy0xOS4yYzAtOC43LTIuOC0xNS4zLTguNy0yMC44cy0xMy4zLTcuOS0yMy4yLTcuOWMtOC43LDAtMTYuMSwyLjgtMjIuNCw4LjMNCgkJCWMtNS45LDUuMS05LjEsMTEuNy05LjEsMjBjMCw5LjUsMy45LDE4LjgsMTEuMywyOC4zbDEyLjEsMTQuNUMyNzEuNSwzNTguMywyNzIuMywzNTkuMywyNzMuOSwzNjAuOXoiLz4NCgk8L2c+DQo8L2c+DQo8L3N2Zz4NCg==") no-repeat 0 0 +} + +section { + padding: 30px 0 +} + +section:not(:last-child) { + border-bottom: 1px solid #f2f4f6 +} + +.sample { + border: 1px solid #e6eaed; + padding: 30px; + border-radius: 3px +} + +.sample *:last-child { + margin-bottom: 0 +} + +.sample h1, +.sample h2, +.sample h3, +.sample h4, +.sample h5, +.sample h6 { + margin-bottom: 10px +} + +.avatar { + margin: 0 30px 30px 0 +} + +.buttons .button-group { + margin-bottom: 10px +} + +.buttons .button-group .button { + margin: 0 0 0 -1px +} + +.buttons .button { + margin: 5px +} + +.top-nav ul { + z-index: 10 +} + +.progress { + margin-bottom: 10px +} + +.progress .meter { + min-width: 50% +} + +.modal-sample { + padding: 15px; + margin-bottom: 30px; + box-shadow: inset 0 0 100px rgba(0, 0, 0, 0.7); + background-color: rgba(0, 0, 0, 0.4) +} + +.modal-sample .modal { + position: relative; + top: 0; + left: 50%; + margin-left: -25%; + -webkit-transform: none; + -ms-transform: none; + transform: none +} + +.message, +.form-inline { + margin-bottom: 10px +} + +.color { + display: inline-block; + padding: 15px; + color: #fff; + text-align: center +} + +.blue1 { + background-color: #001620 +} + +.blue2 { + background-color: #002531 +} + +.blue3 { + background-color: #003546 +} + +.blue4 { + background-color: #1f4753 +} + +.blue5 { + background-color: #438193 +} + +.blue6 { + background-color: #00b0e9 +} + +.blue7 { + background-color: #74ddff +} + +.blue8 { + background-color: #e3f8ff; + color: #00b0e9 +} + +.gray1 { + background-color: #272b2d +} + +.gray2 { + background-color: #4d5659 +} + +.gray3 { + background-color: #828c8f +} + +.gray4 { + background-color: #b7c0c3 +} + +.gray5 { + background-color: #e6eaed +} + +.gray4, +.gray5 { + color: #272b2d +} + +.pink1 { + background-color: #ee0788 +} + +.pink2 { + background-color: #fec1d1 +} + +.pink3 { + background-color: #ffedf2 +} + +.pink2, +.pink3 { + color: #ee0788 +} + +.red1 { + background-color: #e80044 +} + +.red2 { + background-color: #ffc7d8 +} + +.red3 { + background-color: #ffe3eb +} + +.red2, +.red3 { + color: #e80044 +} + +.green1 { + background-color: #43bb6e +} + +.green2 { + background-color: #c6ebd3 +} + +.green3 { + background-color: #e8f7ef +} + +.green2, +.green3 { + color: #43bb6e +} + +.orange1 { + background-color: #f18902 +} + +.orange2 { + background-color: #fec57b +} + +.orange3 { + background-color: #ffeed7 +} + +.orange2, +.orange3 { + color: #f18902 +} + +.icon-sample { + margin: 0 5px +} + +[class^="grid-flex-cell"] { + border: 1px solid #e6eaed; + border-radius: 3px; + text-align: center +} + +[class^="grid-flex-cell"] p { + margin: 0 +} + +@media screen and (min-width:769px) { + body { + border-width: 10px + } +} + +@media screen and (max-width:768px) { + body { + border-width: 5px + } + .modal-sample .modal { + width: auto; + left: 0; + margin-left: 0 + } +} + +@media screen and (max-width:480px) { + .button { + display: inline-block; + width: auto + } + .logo { + margin: 30px auto; + display: block; + text-align: center + } + .color { + width: 100% + } +} + +@media screen and (min-width:481px) { + .logo { + margin: 20px; + display: inline-block + } + .color { + width: 250px; + margin-bottom: 5px + } +} + +html, +body, +h1, +h2, +h3, +p { + font-family: "Roboto" +} + +body { + border: none +} + +.hidden { + display: none +} + +.top-nav { + margin-bottom: 60px; + border-bottom: 1px solid #e6eaed; + background-color: white +} + +.top-nav ul { + margin: 0 +} + +.top-nav li { + padding: 15px +} + +.top-nav a { + display: inline-block +} + +.top-nav label { + font-weight: normal; + font-size: 16px; + line-height: 1 +} + +.navbar { + background-color: #FFF; + border-radius: 0; + z-index: 2; + margin-top: 15px; + padding-bottom: 15px; + margin-bottom: 30px; + border-bottom: 1px solid #e6eaed; + background-color: white +} + +.navbar-brand { + width: 133px; + min-width: 133px; + max-width: 133px; + height: 32px; + min-height: 32px; + max-height: 32px; + background-size: 133px; + background-repeat: no-repeat; + background-position: center center; + background-size: 116px; + background-position-x: 0; + display: inline-block; + float: left; + border-radius: 3px; + margin-left: 12px +} + +.navbar img { + width: 32px; + min-width: 32px; + max-width: 32px; + height: 32px; + min-height: 32px; + max-height: 32px; + border-radius: 50% +} + +.navbar-nav.navbar-right { + margin: 0; + list-style: none; + display: inline-block; + float: right +} + +.navbar-nav.navbar-right li { + line-height: 32px; + max-height: 32px; + padding: 0; + display: inline-block; + vertical-align: middle +} + +.navbar.navbar-homepage { + margin-bottom: 0; + border: none +} + +.navbar.navbar-homepage .button { + padding: 0 30px; + text-transform: uppercase; + font-size: 14px; + line-height: 28px; + color: #E48125; + border-color: #E48125 +} + +.footer { + background: #FFF; + padding: 30px 0; + color: #AAA; + text-align: right +} + +.footer a { + color: #666 +} + +.message { + margin-top: 100px +} + +.banner { + height: calc(100vh - 10px); + min-height: 500px; + background: #FFF; + display: flex; + align-items: center; + justify-content: center +} + +.banner-text { + text-align: center +} + +.banner-text p { + font-size: 22px; + max-width: 700px; + margin: 40px auto +} + +.banner-text p a { + color: #4d5659 +} + +.banner-text .button { + color: #E48125; + border: 2px solid #E48125; + background: #FFF +} + +.banner-text .button:last-child { + color: #CCC; + border: 2px solid #CCC +} + +.banner-actions { + max-width: 500px; + margin: 0 auto; + display: flex +} + +.banner-actions a { + text-transform: uppercase; + flex: 1 1 50% +} + +.banner-actions a:first-child { + margin-right: 10px +} + +.banner-actions a span { + margin-right: 10px +} + +.ribbon { + background: #565656; + background: #666; + padding: 60px 0; + margin-top: 120px +} + +.ribbon h4 { + color: #FFF; + text-shadow: 0 1px 2px rgba(0, 0, 0, 0.2); + text-align: center; + margin-bottom: 30px; + padding-bottom: 0; + font-size: 32px +} + +.ribbon p { + font-weight: 300; + color: #FFF; + padding-bottom: 0; + text-align: center; + max-width: 400px; + margin: 0 auto; + margin-bottom: 30px; + margin-top: 0; + font-size: 18px +} + +.ribbon .button-outlined { + color: #FFF; + border-color: #FFF; + text-transform: uppercase; + border-radius: 20px; + padding: 5px 40px; + padding: 5px 20px; + border-radius: 3px +} + +.ribbon .button-outlined .octicon { + margin-right: 6px +} + +.ribbon .container { + text-align: center +} + +.ribbon-login { + background: #fff; + margin-top: 10px; + padding-top: 0; + padding-bottom: 0; + margin-bottom: 200px +} + +.ribbon-login .button { + text-transform: uppercase; + border-radius: 30px; + padding: 7px 30px; + font-size: 18px +} + +.pricing { + margin-top: 100px +} + +.pricing .grid-flex-cell { + min-height: 300px +} + +.feature { + padding-top: 60px; + border-top: 1px solid #EEE +} + +.features .grid-flex-container { + background: #FFF; + margin-bottom: 90px +} + +.features .grid-flex-container:last-child { + margin-bottom: 90px +} + +.features .grid-flex-cell { + border: none; + text-align: left +} + +.issues .container { + max-width: 600px !IMPORTANT +} + +.feature { + margin-bottom: 90px +} + +.feature>div:first-child { + text-align: center +} + +.feature>div:first-child p { + font-size: 18px; + max-width: 600px; + margin: 0 auto +} + +.maintainers { + max-width: 600px; + margin: 0 auto; + margin-top: 60px; + -webkit-touch-callout: none; + -webkit-user-select: none; + -khtml-user-select: none; + -moz-user-select: none; + -ms-user-select: none; + user-select: none; + border: 1px solid #e6eaed; + box-shadow: 3px 3px rgba(0, 0, 0, 0.02); + border-radius: 3px +} + +.maintainers * { + cursor: default +} + +.maintainers div { + font-family: "Roboto Mono"; + font-size: 14px; + color: #CCC; + background: #fafafa; + border-bottom: 1px solid #f0f0f0; + text-align: left; + padding: 10px +} + +.maintainers table { + margin-bottom: 0 +} + +.maintainers table td { + border: none; + font-family: "Roboto Mono"; + font-size: 13px; + padding: 5px 15px; + color: #999 +} + +.maintainers table td:first-child { + text-align: right; + width: 50px; + border-right: 1px solid #f0f0f0 +} + +.maintainers table tr:first-child td { + padding-top: 10px +} + +.maintainers table tr:last-child td { + padding-bottom: 10px +} + +.footer { + background: #FFF; + padding: 30px 0; + color: #AAA; + text-align: right +} + +.footer a { + color: #666 +} + +.issues { + padding-left: 10px; + margin: 60px auto; + -webkit-touch-callout: none; + -webkit-user-select: none; + -khtml-user-select: none; + -moz-user-select: none; + -ms-user-select: none; + user-select: none +} + +.issues * { + cursor: default +} + +.issues .issue { + display: flex; + margin-bottom: 30px +} + +.issues .issue img { + width: 32px; + height: 32px; + border-radius: 3px; + margin-right: 20px; + opacity: .5 +} + +.issues .issue em { + font-weight: bold; + color: #999; + font-style: normal +} + +.issues .issue b { + font-size: 28px +} + +.issues .issue>div { + flex: 1 1 auto; + box-shadow: 3px 3px rgba(0, 0, 0, 0.02) +} + +.issues .issue>div div:first-child { + color: #CCC; + background: #fafafa; + border: 1px solid #f0f0f0; + line-height: 32px; + padding: 0 10px; + border-top-right-radius: 1px; + border-top-left-radius: 1px; + font-size: 14px +} + +.issues .issue>div div:last-child { + border: 1px solid #f0f0f0; + border-top: none; + border-bottom-right-radius: 1px; + border-bottom-left-radius: 1px; + padding: 10px +} + +.issues .checks { + display: flex; + margin-bottom: 20px +} + +.issues .checks>div:first-child { + width: 32px; + height: 32px; + border-radius: 3px; + margin-right: 20px; + background: #e2cc7a +} + +.issues .checks>div:first-child .octicon { + color: #FFF; + font-size: 28px; + text-align: center; + line-height: 32px; + display: inline-block; + width: 32px +} + +.issues .checks>div:last-child { + border: 1px solid #e2cc7a; + border-radius: 4px; + overflow: hidden; + flex: 1 1 auto; + box-shadow: 3px 3px rgba(0, 0, 0, 0.02); + box-shadow: 3px 3px rgba(226, 204, 122, 0.2) +} + +.issues .checks>div:last-child>div { + padding: 10px 15px +} + +.issues .checks strong { + font-size: 14px; + font-weight: bold; + color: #cea61b +} + +.issues .checks p { + font-size: 14px; + color: #767676; + margin: 0; + margin-bottom: 10px; + padding: 0 +} + +.issues .checks ul { + padding: 0; + margin: 0; + border-top: 1px solid #eee +} + +.issues .checks li { + list-style: none; + padding: 7px 15px; + font-size: 14px; + background: #fafafa; + border-bottom: 1px solid #f0f0f0 +} + +.issues .checks li:last-child { + border-bottom: none +} + +.issues .checks li .dot-green { + margin-right: 5px; + color: #6cc644 +} + +.issues .checks li .dot-yellow { + color: #e2cc7a; + margin-right: 5px +} + +.issues .checks li em { + font-style: normal; + font-weight: bold +} + +.issues .checks-success>div:first-child { + background: #6cc644 +} + +.issues .checks-success>div:last-child { + border: 1px solid #6cc644; + box-shadow: 3px 3px rgba(0, 0, 0, 0.02) +} + +.issues .checks-success>div:last-child .button { + background: #6cc644; + font-size: 14px; + padding: 5px 15px; + margin: 5px +} + +.issues .checks-success strong { + color: #6cc644 +} + +.issues .more { + text-align: center; + color: #DDD +} + +.issues .more .octicon { + font-size: 26px +} \ No newline at end of file diff --git a/docs/support/index.html b/docs/support/index.html new file mode 100644 index 0000000..0aacc5b --- /dev/null +++ b/docs/support/index.html @@ -0,0 +1,84 @@ + + + + + Support · checks-out + + + + + + + + + + + + + + + + +
+ +
+

Support

+ +

Please be sure to read the faq before engaging the support team. This is a free service run by open source community members and we ask that you are courteous of their time.

+
+
+ +
+ + +
+
+ + + \ No newline at end of file diff --git a/docs/tags/index.xml b/docs/tags/index.xml new file mode 100644 index 0000000..aa2c5ae --- /dev/null +++ b/docs/tags/index.xml @@ -0,0 +1,14 @@ + + + + Tags on checks-out + http://www.capitalone.io/checks-out/tags/ + Recent content in Tags on checks-out + Hugo -- gohugo.io + en-us + + + + + + \ No newline at end of file diff --git a/envvars/envvars.go b/envvars/envvars.go new file mode 100644 index 0000000..4a15395 --- /dev/null +++ b/envvars/envvars.go @@ -0,0 +1,206 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package envvars + +import ( + "errors" + "fmt" + "strings" + "time" + + "github.com/capitalone/checks-out/set" + + "github.com/ianschenck/envflag" + "github.com/mspiegel/go-multierror" +) + +type EnvValues struct { + // Server configuration + Server struct { + Addr string + Cert string + Key string + } + // Database configuration + Db struct { + Driver string + Datasource string + } + // Default pattern customization + Pattern struct { + Default string + } + // External (user-facing) customization + Branding struct { + Name string + ShortName string + } + // Github integration + Github struct { + Email string + Url string + Client string + Secret string + Scope string + AdminOrg string + RequestsHz int + } + // Slack integration + Slack struct { + TargetUrl string + } + // Logging/debug config + Monitor struct { + LogLevel string + Sunlight bool + UaList string + LogPeriod time.Duration + DocsUrl string + } + // Caching config + Cache struct { + CacheTTL time.Duration + LongCacheTTL time.Duration + } + // Github testing config + Test struct { + GithubToken string + GithubTestEnable bool + } + // Access config + Access struct { + LimitUsers bool + LimitOrgs bool + } + + Old struct { + Approvals int64 + Pattern string + SelfApprovalOff bool + } +} + +var Env EnvValues + +var logLevels = set.New("debug", "info", "warn", "error", "fatal", "panic") + +func init() { + configure() +} + +const ( + //splitting up the patterns because one day we might want to automatically append the fieldPattern when it is missing + fieldPattern = `\s*(version:\s*(?P\S+))?\s*(comment:\s*(?P.*\S))?\s*` + pattern = `(?i)^I approve` + fieldPattern +) + +func configure() { + envflag.StringVar(&Env.Server.Addr, "SERVER_ADDR", ":8000", "Server ip address and port") + envflag.StringVar(&Env.Server.Cert, "SERVER_CERT", "", "Path to SSL certificate") + envflag.StringVar(&Env.Server.Key, "SERVER_KEY", "", "SSL certificate key") + + envflag.StringVar(&Env.Db.Driver, "DB_DRIVER", "", "One of sqlite3|postgres|mysql. Required") + envflag.StringVar(&Env.Db.Datasource, "DB_SOURCE", "", "Database data source. Required") + + envflag.StringVar(&Env.Branding.Name, "BRANDING_NAME", "checks-out", "Branding of this service") + envflag.StringVar(&Env.Branding.ShortName, "BRANDING_SHORT_NAME", "checks-out", "Abbreviated branding of this service") + + envflag.StringVar(&Env.Pattern.Default, "DEFAULT_PATTERN", pattern, "Default pattern used for matchers") + + envflag.StringVar(&Env.Github.Email, "GITHUB_EMAIL", "", "Email for git commits. Required") + envflag.StringVar(&Env.Github.Url, "GITHUB_URL", "https://github.com", "Github url") + envflag.StringVar(&Env.Github.Client, "GITHUB_CLIENT", "", "OAuth2 client id. Required") + envflag.StringVar(&Env.Github.Secret, "GITHUB_SECRET", "", "OAuth2 secret. Required") + envflag.StringVar(&Env.Github.Scope, "GITHUB_SCOPE", "read:org,repo:status,admin:repo_hook,admin:org_hook", "Permission scope") + envflag.StringVar(&Env.Github.AdminOrg, "GITHUB_ADMIN_ORG", "", "GitHub organization with admin privileges") + envflag.IntVar(&Env.Github.RequestsHz, "GITHUB_BATCH_PER_SECOND", 10, "GitHub batch access rate limiter") + + envflag.StringVar(&Env.Slack.TargetUrl, "SLACK_TARGET_URL", "", "Slack notification url") + + envflag.StringVar(&Env.Monitor.LogLevel, "LOG_LEVEL", "info", "One of debug|info|warn|error|fatal|panic") + envflag.BoolVar(&Env.Monitor.Sunlight, "CHECKS_OUT_SUNLIGHT", false, "Exposes additional endpoints") + envflag.StringVar(&Env.Monitor.UaList, "BLACKLIST_USER_AGENTS", "", "Skip logging of these agents") + envflag.DurationVar(&Env.Monitor.LogPeriod, "LOG_STATS_PERIOD", 0, "Period logging of statistics") + envflag.StringVar(&Env.Monitor.DocsUrl, "CHECKS_OUT_DOCS_URL", "https://capitalone.github.com/checks-out/docs", "Provides the base URL for links to the documentation.") + + envflag.DurationVar(&Env.Cache.CacheTTL, "CACHE_TTL", time.Minute*15, "Cache length for short lived entries") + envflag.DurationVar(&Env.Cache.LongCacheTTL, "LONG_CACHE_TTL", time.Hour*24, "Cache length for long lived entries") + + envflag.StringVar(&Env.Test.GithubToken, "GITHUB_TEST_TOKEN", "", "GitHub integration test token") + envflag.BoolVar(&Env.Test.GithubTestEnable, "GITHUB_TEST_ENABLE", false, "GitHub integration testing") + + envflag.BoolVar(&Env.Access.LimitUsers, "LIMIT_USERS", false, "Only allow users who are listed in the allowed_users table") + envflag.BoolVar(&Env.Access.LimitOrgs, "LIMIT_ORGS", false, "Only allow users who are members of the orgs listed in the allowed_orgs table") + + envflag.Int64Var(&Env.Old.Approvals, "CHECKS_OUT_APPROVALS", 2, "Legacy default number of approvals") + envflag.StringVar(&Env.Old.Pattern, "CHECKS_OUT_PATTERN", "(?i)LGTM", "Legacy matching pattern") + envflag.BoolVar(&Env.Old.SelfApprovalOff, "CHECKS_OUT_SELF_APPROVAL_OFF", false, "Legacy self-approval behavior") + + envflag.Parse() + + Env.Monitor.LogLevel = strings.ToLower(Env.Monitor.LogLevel) + Env.Github.Url = strings.TrimRight(Env.Github.Url, "/") +} + +func Usage() { + envflag.EnvironmentFlags.PrintDefaults() +} + +func Validate() error { + var errs error + if Env.Db.Driver == "" { + err := errors.New("Missing required environment variable DB_DRIVER") + errs = multierror.Append(errs, err) + } + if Env.Db.Datasource == "" { + err := errors.New("Missing required environment variable DB_SOURCE") + errs = multierror.Append(errs, err) + } + if Env.Github.Email == "" { + err := errors.New("Missing required environment variable GITHUB_EMAIL") + errs = multierror.Append(errs, err) + } + if Env.Github.Client == "" { + err := errors.New("Missing required environment variable GITHUB_CLIENT") + errs = multierror.Append(errs, err) + } + if Env.Github.Secret == "" { + err := errors.New("Missing required environment variable GITHUB_SECRET") + errs = multierror.Append(errs, err) + } + if Env.Github.Url == "" { + err := errors.New("Environment variable GITHUB_URL is empty") + errs = multierror.Append(errs, err) + } + if (Env.Server.Cert != "" && Env.Server.Key == "") || (Env.Server.Cert == "" && Env.Server.Key != "") { + err := errors.New("Both server SSL certificate and SSL must be specified for SSL.") + errs = multierror.Append(errs, err) + } + if !logLevels.Contains(Env.Monitor.LogLevel) { + err := fmt.Errorf("Environment variable LOG_LEVEL '%s' must be one of: %s", + Env.Monitor.LogLevel, + "'debug', 'info', 'warn', 'error', 'fatal', 'panic'") + errs = multierror.Append(errs, err) + } + if !strings.HasPrefix(Env.Github.Url, "https://") { + err := errors.New("GITHUB_URL must have prefix 'https://'") + errs = multierror.Append(errs, err) + } + return errs +} diff --git a/envvars/envvars_test.go b/envvars/envvars_test.go new file mode 100644 index 0000000..61d428e --- /dev/null +++ b/envvars/envvars_test.go @@ -0,0 +1,120 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package envvars + +import ( + "flag" + "os" + "testing" + + "github.com/ianschenck/envflag" + "github.com/mspiegel/go-multierror" +) + +var driver, source, email, client, secret string + +func setup() { + envflag.EnvironmentFlags = flag.NewFlagSet("environment", flag.ExitOnError) + driver = os.Getenv("DB_DRIVER") + source = os.Getenv("DB_SOURCE") + email = os.Getenv("GITHUB_EMAIL") + client = os.Getenv("GITHUB_CLIENT") + secret = os.Getenv("GITHUB_SECRET") + + os.Unsetenv("DB_DRIVER") + os.Unsetenv("DB_SOURCE") + os.Unsetenv("GITHUB_EMAIL") + os.Unsetenv("GITHUB_CLIENT") + os.Unsetenv("GITHUB_SECRET") +} + +func required() { + os.Setenv("DB_DRIVER", "sqlite3") + os.Setenv("DB_SOURCE", "checks-out.sqlite") + os.Setenv("GITHUB_EMAIL", "broken@example.com") + os.Setenv("GITHUB_CLIENT", "foo") + os.Setenv("GITHUB_SECRET", "bar") +} + +func teardown() { + envflag.EnvironmentFlags = flag.NewFlagSet("environment", flag.ExitOnError) + os.Setenv("DB_DRIVER", driver) + os.Setenv("DB_SOURCE", source) + os.Setenv("GITHUB_EMAIL", email) + os.Setenv("GITHUB_CLIENT", client) + os.Setenv("GITHUB_SECRET", secret) +} + +func TestRequiredVars(t *testing.T) { + + setup() + configure() + + errs, ok := Validate().(*multierror.Error) + + if !ok { + t.Error("Validation did not return a multierror") + } + if len(errs.Errors) != 5 { + t.Errorf("Expected 5 errors and %d were generated: %s", len(errs.Errors), errs.Error()) + } + + teardown() + configure() +} + +func TestSSL(t *testing.T) { + + setup() + required() + cert := os.Getenv("SERVER_CERT") + key := os.Getenv("SERVER_KEY") + os.Setenv("SERVER_CERT", "foobar") + os.Unsetenv("SERVER_KEY") + configure() + + err := Validate() + + if err.Error() != "Both server SSL certificate and SSL must be specified for SSL." { + t.Error("SSL error not reported ", err.Error()) + } + + teardown() + os.Setenv("SERVER_CERT", cert) + os.Setenv("SERVER_KEY", key) + configure() +} + +func TestLogLevel(t *testing.T) { + setup() + required() + logLevel := os.Getenv("LOG_LEVEL") + os.Setenv("LOG_LEVEL", "foobar") + configure() + + err := Validate() + exp := "Environment variable LOG_LEVEL 'foobar' must be one of: 'debug', 'info', 'warn', 'error', 'fatal', 'panic'" + if err.Error() != exp { + t.Error("Log level error incorrect ", err.Error()) + } + + teardown() + os.Setenv("LOG_LEVEL", logLevel) + configure() +} diff --git a/example.MAINTAINERS b/example.MAINTAINERS new file mode 100644 index 0000000..ca3abad --- /dev/null +++ b/example.MAINTAINERS @@ -0,0 +1 @@ +github-org repo-self diff --git a/example.checks-out b/example.checks-out new file mode 100644 index 0000000..dd16e45 --- /dev/null +++ b/example.checks-out @@ -0,0 +1,8 @@ +approvals: +[ + { + # count determines how many approvals are needed + # self determines whether you can approve your own pull requests + match: "all[count=1,self=false]" + } +] diff --git a/exterror/exterror.go b/exterror/exterror.go new file mode 100644 index 0000000..b0d8363 --- /dev/null +++ b/exterror/exterror.go @@ -0,0 +1,118 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package exterror + +import ( + "errors" + "fmt" + "reflect" + + log "github.com/Sirupsen/logrus" + "github.com/mspiegel/go-multierror" +) + +type ExtError struct { + Status int + Err error +} + +func (e ExtError) Error() string { + return e.Err.Error() +} + +func Create(status int, err error) ExtError { + return ExtError{Status: status, Err: err} +} + +func Append(prev error, head string) error { + prevMsg := prev.Error() + if len(prevMsg) == 0 { + return errors.New(head) + } + newMsg := fmt.Errorf("%s. %s", head, prevMsg) + switch v := prev.(type) { + case ExtError: + return Create(v.Status, newMsg) + case *multierror.Error: + // flatten the multierror to retrieve the http response + ext := Convert(v) + return Create(ext.Status, newMsg) + default: + return newMsg + } +} + +func Convert(err error) ExtError { + switch v := err.(type) { + case ExtError: + log.Debugf("No conversion necessary for ExtError %s", err.Error()) + return v + case *multierror.Error: + log.Debugf("Multierror conversion for %s", err.Error()) + return convertMultiError(v) + default: + log.Errorf("Automatic promotion to 500 response for %s", reflect.TypeOf(err).String()) + return ExtError{Status: 500, Err: err} + } +} + +func allExtError(errs *multierror.Error) bool { + if len(errs.Errors) == 0 { + return false + } + for _, e := range errs.Errors { + if _, ok := e.(ExtError); !ok { + return false + } + } + return true +} + +func allEqualStatus(errs *multierror.Error) bool { + resp := errs.Errors[0].(ExtError).Status + for _, e := range errs.Errors { + if resp != e.(ExtError).Status { + return false + } + } + return true +} + +func allRangeStatus(errs *multierror.Error, low int, high int) bool { + for _, e := range errs.Errors { + status := e.(ExtError).Status + if (status < low) || (status >= high) { + return false + } + } + return true +} + +func convertMultiError(errs *multierror.Error) ExtError { + status := 500 + if !allExtError(errs) { + return ExtError{Status: status, Err: errs} + } + if allEqualStatus(errs) { + status = errs.Errors[0].(ExtError).Status + } else if allRangeStatus(errs, 400, 500) { + status = 400 + } + return ExtError{Status: status, Err: errs} +} diff --git a/exterror/exterror_test.go b/exterror/exterror_test.go new file mode 100644 index 0000000..5c2666a --- /dev/null +++ b/exterror/exterror_test.go @@ -0,0 +1,88 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package exterror + +import ( + "errors" + "strings" + "testing" + + "github.com/mspiegel/go-multierror" +) + +func TestConvert(t *testing.T) { + i := ExtError{Status: 404, Err: errors.New("foobar")} + o := Convert(i) + if o.Status != i.Status { + t.Error("Expected status 404") + } + if o.Err.Error() != i.Err.Error() { + t.Error("Incorrect error message") + } +} + +func TestAppend(t *testing.T) { + prev := ExtError{Status: 404, Err: errors.New("foobar")} + err := Append(prev, "baz") + if prev.Status != err.(ExtError).Status { + t.Error("Expected status 404") + } + if err.Error() != "baz. foobar" { + t.Errorf("Incorrect error message: %s", err.Error()) + } + plain := errors.New("foobar") + err = Append(plain, "baz") + if err.Error() != "baz. foobar" { + t.Errorf("Incorrect error message: %s", err.Error()) + } + var errs error + e1 := Create(400, errors.New("foo")) + e2 := Create(400, errors.New("bar")) + e3 := Create(400, errors.New("baz")) + errs = multierror.Append(errs, e1, e2, e3) + errs = Append(errs, "prefix") + if 400 != errs.(ExtError).Status { + t.Error("Expected status 400") + } + if !strings.HasPrefix(errs.Error(), "prefix.") { + t.Errorf("Incorrect error message: %s", errs.Error()) + } +} + +func TestConvertMultiError(t *testing.T) { + e1 := Create(404, nil) + e2 := Create(401, nil) + e3 := Create(500, nil) + out := convertMultiError(new(multierror.Error)) + if out.Status != 500 { + t.Error("Expected status 500") + } + out = convertMultiError(multierror.Append(nil, e1, e2, e3).(*multierror.Error)) + if out.Status != 500 { + t.Error("Expected status 500") + } + out = convertMultiError(multierror.Append(nil, e1, e2).(*multierror.Error)) + if out.Status != 400 { + t.Error("Expected status 400") + } + out = convertMultiError(multierror.Append(nil, e1, e2, errors.New("foobar")).(*multierror.Error)) + if out.Status != 500 { + t.Error("Expected status 500") + } +} diff --git a/hjson/hjson.go b/hjson/hjson.go new file mode 100644 index 0000000..8a0f338 --- /dev/null +++ b/hjson/hjson.go @@ -0,0 +1,639 @@ +/* + +SPDX-Copyright: Copyright (c) Christian Zangl, Capital One Services, LLC +SPDX-License-Identifier: MIT +Copyright 2016, 2017 Christian Zangl, Capital One Services, LLC + +See below for original copyright information. +*/ +package hjson + +import ( + "encoding/json" + + "bytes" + "errors" + "fmt" + "github.com/hjson/hjson-go" + "math" + "reflect" + "regexp" + "sort" + "strconv" + "strings" +) + +func Unmarshal(data []byte, v interface{}) error { + var container interface{} + err := hjson.Unmarshal(data, &container) + if err != nil { + return err + } + data, err = json.Marshal(container) + if err != nil { + return err + } + return json.Unmarshal(data, &v) +} + +// The code below is copied and modified from the github.com/hjson/hjson-go project. +// The existing code (as of hash 94a8f0bea3436e0721b2e28b85df411d75a399fa, released on 25-Sept-2016) +// does not handle encoding of structs. The code below does. +// I intend to contribute this back to the project, but that will take time. +// Until then, this copy of the code is working well for our needs. + +// Copyright notice for original code: +/* +MIT License + +Copyright (c) 2016 Christian Zangl + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +// EncoderOptions defines options for encoding to Hjson. +type EncoderOptions struct { + // End of line, should be either \n or \r\n + Eol string + // Place braces on the same line + BracesSameLine bool + // Emit braces at the root level + EmitRootBraces bool + // Always place string in quotes + QuoteAlways bool + // Indent string + IndentBy string + // Allow the -0 value (unlike ES6) + AllowMinusZero bool + // Encode unknown values as 'null' + UnknownAsNull bool + // Skip empty structs + OmitEmptyStructs bool +} + +// DefaultOptions returns the default encoding options. +func DefaultOptions() EncoderOptions { + opt := EncoderOptions{} + opt.Eol = "\n" + opt.BracesSameLine = false + opt.EmitRootBraces = true + opt.QuoteAlways = false + opt.IndentBy = " " + opt.AllowMinusZero = false + opt.UnknownAsNull = false + opt.OmitEmptyStructs = false + return opt +} + +type hjsonEncoder struct { + bytes.Buffer // output + EncoderOptions + indent int +} + +var needsEscape, needsQuotes, needsEscapeML, startsWithKeyword, needsEscapeName *regexp.Regexp + +func init() { + // needsEscape tests if the string can be written without escapes + needsEscape = regexp.MustCompile(`[\\\"\x00-\x1f\x7f-\x9f\x{00ad}\x{0600}-\x{0604}\x{070f}\x{17b4}\x{17b5}\x{200c}-\x{200f}\x{2028}-\x{202f}\x{2060}-\x{206f}\x{feff}\x{fff0}-\x{ffff}]`) + // needsQuotes tests if the string can be written as a quoteless string (includes needsEscape but without \\ and \") + needsQuotes = regexp.MustCompile(`^\s|^"|^'''|^#|^/\*|^//|^\{|^\}|^\[|^\]|^:|^,|\s$|[\x00-\x1f\x7f-\x9f\x{00ad}\x{0600}-\x{0604}\x{070f}\x{17b4}\x{17b5}\x{200c}-\x{200f}\x{2028}-\x{202f}\x{2060}-\x{206f}\x{feff}\x{fff0}-\x{ffff}]`) + // needsEscapeML tests if the string can be written as a multiline string (includes needsEscape but without \n, \r, \\ and \") + needsEscapeML = regexp.MustCompile(`'''|[\x00-\x09\x0b\x0c\x0e-\x1f\x7f-\x9f\x{00ad}\x{0600}-\x{0604}\x{070f}\x{17b4}\x{17b5}\x{200c}-\x{200f}\x{2028}-\x{202f}\x{2060}-\x{206f}\x{feff}\x{fff0}-\x{ffff}]`) + // starts with a keyword and optionally is followed by a comment + startsWithKeyword = regexp.MustCompile(`^(true|false|null)\s*((,|\]|\}|#|//|/\*).*)?$`) + needsEscapeName = regexp.MustCompile(`[,\{\[\}\]\s:#"]|//|/\*|'''`) +} + +var meta = map[byte][]byte{ + // table of character substitutions + '\b': []byte("\\b"), + '\t': []byte("\\t"), + '\n': []byte("\\n"), + '\f': []byte("\\f"), + '\r': []byte("\\r"), + '"': []byte("\\\""), + '\\': []byte("\\\\"), +} + +func (e *hjsonEncoder) quoteReplace(text string) string { + return string(needsEscape.ReplaceAllFunc([]byte(text), func(a []byte) []byte { + c := meta[a[0]] + if c != nil { + return c + } + return []byte(fmt.Sprintf("\\u%04x", c)) + })) +} + +func (e *hjsonEncoder) quote(value string, separator string, isRootObject bool) { + + // Check if we can insert this string without quotes + // see hjson syntax (must not parse as true, false, null or number) + + if len(value) == 0 { + e.WriteString(separator + `""`) + } else if e.QuoteAlways || + needsQuotes.MatchString(value) || + startsWithNumber([]byte(value)) || + startsWithKeyword.MatchString(value) { + + // If the string contains no control characters, no quote characters, and no + // backslash characters, then we can safely slap some quotes around it. + // Otherwise we first check if the string can be expressed in multiline + // format or we must replace the offending characters with safe escape + // sequences. + + if !needsEscape.MatchString(value) { + e.WriteString(separator + `"` + value + `"`) + } else if !needsEscapeML.MatchString(value) && !isRootObject { + e.mlString(value, separator) + } else { + e.WriteString(separator + `"` + e.quoteReplace(value) + `"`) + } + } else { + // return without quotes + e.WriteString(separator + value) + } +} + +func (e *hjsonEncoder) mlString(value string, separator string) { + // wrap the string into the ''' (multiline) format + + a := strings.Split(strings.Replace(value, "\r", "", -1), "\n") + + if len(a) == 1 { + // The string contains only a single line. We still use the multiline + // format as it avoids escaping the \ character (e.g. when used in a + // regex). + e.WriteString(separator + "'''") + e.WriteString(a[0]) + } else { + e.writeIndent(e.indent + 1) + e.WriteString("'''") + for _, v := range a { + indent := e.indent + 1 + if len(v) == 0 { + indent = 0 + } + e.writeIndent(indent) + e.WriteString(v) + } + e.writeIndent(e.indent + 1) + } + e.WriteString("'''") +} + +func (e *hjsonEncoder) quoteName(name string) string { + if len(name) == 0 { + return `""` + } + + // Check if we can insert this name without quotes + + if needsEscapeName.MatchString(name) { + if needsEscape.MatchString(name) { + name = e.quoteReplace(name) + } + return `"` + name + `"` + } + // without quotes + return name +} + +type sortAlpha []reflect.Value + +func (s sortAlpha) Len() int { + return len(s) +} +func (s sortAlpha) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} +func (s sortAlpha) Less(i, j int) bool { + return s[i].String() < s[j].String() +} + +func (e *hjsonEncoder) writeIndent(indent int) { + e.WriteString(e.Eol) + for i := 0; i < indent; i++ { + e.WriteString(e.IndentBy) + } +} + +func (e *hjsonEncoder) str(value reflect.Value, noIndent bool, separator string, isRootObject bool) error { + + // Produce a string from value. + + //first check if value implements Marshaler, and use it + m := reflect.TypeOf((*json.Marshaler)(nil)).Elem() + + kind := value.Kind() + + for kind == reflect.Interface || kind == reflect.Ptr { + if value.IsNil() { + e.WriteString(separator) + e.WriteString("null") + return nil + } + if value.Type().Implements(m) { + b, err := value.Interface().(json.Marshaler).MarshalJSON() + if err != nil { + return err + } + e.WriteString(separator) + e.WriteString(string(b)) + return nil + } + value = value.Elem() + kind = value.Kind() + } + + if value.Type().Implements(m) { + b, err := value.Interface().(json.Marshaler).MarshalJSON() + if err != nil { + return err + } + e.WriteString(separator) + e.WriteString(string(b)) + return nil + } + + switch kind { + case reflect.String: + e.quote(value.String(), separator, isRootObject) + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + e.WriteString(separator) + e.WriteString(strconv.FormatInt(value.Int(), 10)) + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Uintptr: + e.WriteString(separator) + e.WriteString(strconv.FormatUint(value.Uint(), 10)) + + case reflect.Float32, reflect.Float64: + // JSON numbers must be finite. Encode non-finite numbers as null. + e.WriteString(separator) + number := value.Float() + if math.IsInf(number, 0) || math.IsNaN(number) { + e.WriteString("null") + } else if !e.AllowMinusZero && number == -0 { + e.WriteString("0") + } else { + // find shortest representation ('G' does not work) + val := strconv.FormatFloat(number, 'f', -1, 64) + exp := strconv.FormatFloat(number, 'E', -1, 64) + if len(exp) < len(val) { + val = strings.ToLower(exp) + } + e.WriteString(val) + } + + case reflect.Bool: + e.WriteString(separator) + if value.Bool() { + e.WriteString("true") + } else { + e.WriteString("false") + } + + case reflect.Slice, reflect.Array: + + len := value.Len() + if len == 0 { + e.WriteString(separator) + e.WriteString("[]") + break + } + + indent1 := e.indent + e.indent++ + + if !noIndent && !e.BracesSameLine { + e.writeIndent(indent1) + } else { + e.WriteString(separator) + } + e.WriteString("[") + + // Join all of the element texts together, separated with newlines + for i := 0; i < len; i++ { + e.writeIndent(e.indent) + if err := e.str(value.Index(i), true, "", false); err != nil { + return err + } + } + + e.writeIndent(indent1) + e.WriteString("]") + + e.indent = indent1 + + case reflect.Map: + + len := value.Len() + if len == 0 { + e.WriteString(separator) + e.WriteString("{}") + break + } + + showBraces := !isRootObject || e.EmitRootBraces + indent1 := e.indent + if showBraces { + e.indent++ + } + + if showBraces { + if !noIndent && !e.BracesSameLine { + e.writeIndent(indent1) + } else { + e.WriteString(separator) + } + e.WriteString("{") + } + + keys := value.MapKeys() + sort.Sort(sortAlpha(keys)) + + // Join all of the member texts together, separated with newlines + for i := 0; i < len; i++ { + if i > 0 || showBraces { + e.writeIndent(e.indent) + } + e.WriteString(e.quoteName(keys[i].String())) + e.WriteString(":") + if err := e.str(value.MapIndex(keys[i]), false, " ", false); err != nil { + return err + } + } + + if showBraces { + e.writeIndent(indent1) + e.WriteString("}") + } + + e.indent = indent1 + + case reflect.Struct: + + l := value.NumField() + if l == 0 { + e.WriteString(separator) + e.WriteString("{}") + break + } + + showBraces := !isRootObject || e.EmitRootBraces + indent1 := e.indent + if showBraces { + e.indent++ + } + + if showBraces { + if !noIndent && !e.BracesSameLine { + e.writeIndent(indent1) + } else { + e.WriteString(separator) + } + e.WriteString("{") + } + + // Join all of the member texts together, separated with newlines + for i := 0; i < l; i++ { + curStructField := value.Type().Field(i) + curField := value.Field(i) + + name := curStructField.Name + jsonTag, ok := curStructField.Tag.Lookup("json") + omitEmpty := false + if ok { + splits := strings.Split(jsonTag, ",") + if splits[0] == "-" { + continue + } + name = splits[0] + if len(splits) > 1 { + omitEmpty = (splits[1] == "omitempty") + } + } + if omitEmpty && isEmptyValue(curField, e.OmitEmptyStructs) { + continue + } + if i > 0 || showBraces { + e.writeIndent(e.indent) + } + e.WriteString(e.quoteName(name)) + e.WriteString(":") + if err := e.str(curField, false, " ", false); err != nil { + return err + } + } + + if showBraces { + e.writeIndent(indent1) + e.WriteString("}") + } + + e.indent = indent1 + + default: + if e.UnknownAsNull { + // Use null as a placeholder for non-JSON values. + e.WriteString("null") + } else { + return errors.New("Unsupported type " + value.Type().String()) + } + } + return nil +} + +func isEmptyValue(v reflect.Value, omitEmptyStructs bool) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflect.Struct: + if omitEmptyStructs { + //recursively check all the fields + l := v.NumField() + for i := 0; i < l; i++ { + if !isEmptyValue(v.Field(i), omitEmptyStructs) { + return false + } + } + return true + } + } + return false +} + +// Marshal returns the Hjson encoding of v using +// default options. +// +// See MarshalWithOptions. +// +func Marshal(v interface{}) ([]byte, error) { + return MarshalWithOptions(v, DefaultOptions()) +} + +// MarshalWithOptions returns the Hjson encoding of v. +// +// Marshal traverses the value v recursively. +// +// Boolean values encode as JSON booleans. +// +// Floating point, integer, and Number values encode as JSON numbers. +// +// String values encode as Hjson strings (quoteless, multiline or +// JSON). +// +// Array and slice values encode as JSON arrays. +// +// Map values encode as JSON objects. The map's key type must be a +// string. The map keys are sorted and used as JSON object keys. +// +// Pointer values encode as the value pointed to. +// A nil pointer encodes as the null JSON value. +// +// Interface values encode as the value contained in the interface. +// A nil interface value encodes as the null JSON value. +// +// JSON cannot represent cyclic data structures and Marshal does not +// handle them. Passing cyclic structures to Marshal will result in +// an infinite recursion. +// +func MarshalWithOptions(v interface{}, options EncoderOptions) ([]byte, error) { + e := &hjsonEncoder{} + e.indent = 0 + e.Eol = options.Eol + e.BracesSameLine = options.BracesSameLine + e.EmitRootBraces = options.EmitRootBraces + e.QuoteAlways = options.QuoteAlways + e.IndentBy = options.IndentBy + e.OmitEmptyStructs = options.OmitEmptyStructs + + err := e.str(reflect.ValueOf(v), true, "", true) + if err != nil { + return nil, err + } + return e.Bytes(), nil +} + +type parseNumber struct { + data []byte + at int // The index of the current character + ch byte // The current character +} + +func (p *parseNumber) next() bool { + // get the next character. + len := len(p.data) + if p.at < len { + p.ch = p.data[p.at] + p.at++ + return true + } + if p.at == len { + p.at++ + p.ch = 0 + } + return false +} + +func startsWithNumber(text []byte) bool { + if _, err := tryParseNumber(text, true); err == nil { + return true + } + return false +} + +func tryParseNumber(text []byte, stopAtNext bool) (float64, error) { + // Parse a number value. + + p := parseNumber{text, 0, ' '} + leadingZeros := 0 + testLeading := true + p.next() + if p.ch == '-' { + p.next() + } + for p.ch >= '0' && p.ch <= '9' { + if testLeading { + if p.ch == '0' { + leadingZeros++ + } else { + testLeading = false + } + } + p.next() + } + if testLeading { + leadingZeros-- + } // single 0 is allowed + if p.ch == '.' { + for p.next() && p.ch >= '0' && p.ch <= '9' { + } + } + if p.ch == 'e' || p.ch == 'E' { + p.next() + if p.ch == '-' || p.ch == '+' { + p.next() + } + for p.ch >= '0' && p.ch <= '9' { + p.next() + } + } + + end := p.at + + // skip white/to (newline) + for p.ch > 0 && p.ch <= ' ' { + p.next() + } + + if stopAtNext { + // end scan if we find a punctuator character like ,}] or a comment + if p.ch == ',' || p.ch == '}' || p.ch == ']' || + p.ch == '#' || p.ch == '/' && (p.data[p.at] == '/' || p.data[p.at] == '*') { + p.ch = 0 + } + } + + if p.ch > 0 || leadingZeros != 0 { + return 0, errors.New("Invalid number") + } + number, err := strconv.ParseFloat(string(p.data[0:end-1]), 64) + if err != nil { + return 0, err + } + if math.IsInf(number, 0) || math.IsNaN(number) { + return 0, errors.New("Invalid number") + } + return number, nil +} diff --git a/hugo/config.toml b/hugo/config.toml new file mode 100644 index 0000000..aba1727 --- /dev/null +++ b/hugo/config.toml @@ -0,0 +1,4 @@ +baseurl = "http://www.capitalone.io/checks-out/" +languageCode = "en-us" +title = "checks-out" +theme = "default" diff --git a/hugo/content/api.md b/hugo/content/api.md new file mode 100644 index 0000000..28041fe --- /dev/null +++ b/hugo/content/api.md @@ -0,0 +1,456 @@ ++++ +date = "2017-06-21T10:56:00-04:00" +draft = false +title = "REST API" +weight = 7 +menu = "main" ++++ + +# REST API + +The following REST APIs are exposed by checks-out. + +## Logging in + +checks-out relies on Github's OAuth2 support for authentication. To log in to the REST API, a user must go to /settings/tokens on their Github server (for public Github, this +would be .) and create a Personal Access Token. The token should have the permissions admin:org_hook, read:org, repo:status. Store this +token in a safe place; you cannot get it again from Github. + +Endpoint: /login +Method: POST +Query parameters: access_token=PERSONAL_ACCESS_TOKEN +Body: none + +Success: returns an AuthToken JSON structure, status = 200 +Failure: returns status 401 (unauthorized) +Tokens are currently set to expire after 72 hours. + +### AuthToken JSON structure + +```json +{ + "access_token": "JWT_TOKEN", + "expires_in": SECONDS_UNTIL_EXPIRY +} +``` + +## Authorization + +After calling the initial login, you will get back a JWT Token. This token must be supplied to all other authenticated requests using one of the following methods: + +- a header with key`Authorization` and value `Bearer JWT_TOKEN` +- a query parameter with key `access_token` and value `JWT_TOKEN` +- a cookie with key `user_sess` and value `JWT_TOKEN` + +## Get Current User Info + +Endpoint: /api/user +Method: GET + +Success: returns a GetCurrentUser JSON structure + +### GetCurrentUser JSON Structure + +```json +{ + "id": ID, + "login": "USER_NAME", + "avatar": "GITHUB_AVATAR_URL_FOR_USER" +} +``` + +## Delete Current User + +This call removes the currently logged-in user from checks-out. If you do this, you will need to recreate your account. + +Endpoint: /api/user +Method: DELETE + +Success: returns a 204 (deleted) status code + +## Get Orgs for Current User + +Returns all orgs that the current user belongs to. + +Endpoint: /api/user/orgs +Method: GET + +Success: returns a JSON list of GetOrg JSON structures + +### GetOrg JSON Structure + +```json + { + "login": "NAME_OF_ORG", + "avatar": "GITHUB_AVATAR_URL_FOR_ORG", + "enabled": true|false, + "admin": true|false + }, +``` + +If the current user is an admin for the org, `admin` is set to true. +If the org has been enabled in checks-out, `enabled` is set to true. + +## Get Personal Repos for Current User + +Returns all repos in the user's account (not repos in orgs that the user belongs to) + +Endpoint: /api/user/repos +Method: GET + +Success: returns a 200 (ok) status code and a JSON list of GetRepo JSON structures + +### GetRepo JSON Structure + +```json + { + "id": ID_IN_checks-out, + "owner": "USER_NAME", + "name": "REPO_NAME", + "slug": "USER_NAME/REPO_NAME", + "link_url": URL_IN_GITHUB", + "private": true|false, + "org": true|false + }, +``` + +If `id` is present, the repo is managed by checks-out. +If the repo is enabled because its org is managed by checks-out, `org` is set to `true`. +If the repo is private to the user, `private` is set to `true`. + +## Get Repos in Specified Org + +Returns all repos in the specified org + +Endpoint: /api/user/repos/:org +Method: GET + +:org is replaced with the name of the org whose repos are being requested. + +Success: returns a JSON list of GetRepo JSON structures + +## Get Enabled Orgs for Current User + +Returns all orgs that have been enabled by the current user + +Endpoint: /api/user/orgs/enabled +Method: GET + +Success: return a JSON list of GetOrg JSON structures + +note: the `avatar` and `admin` fields will always be set to `""` and `false` respectively. the `enabled` field will always be set to `true`. + +## Enable Repo + +Turns on checks-out monitoring for the specified repo. + +Endpoint: /api/repos/:owner/:repo +Method: POST +Body: none + +:owner is the name of the org or the name of the user, for a personal repo +:repo is the name of the repo + +Success: returns 200 (ok) and a GetRepo JSON structure +Failure: returns 409 (Conflict) if the repo is already enabled +Failure: returns 404 (not found) if the repo does not exist or is not available to the user + +## Disable Repo + +Turns off checks-out monitoring for the specified repo + +Endpoint: /api/repos/:owner/:repo +Method: DELETE + +:owner is the name of the org or the name of the user, for a personal repo +:repo is the name of the repo + +Success: returns 200 (ok) +Failure: returns 409 (Conflict) if the repo is already disabled +Failure: returns 404 (not found) if the repo does not exist or is not available to the user + +## Get Enabled Repo + +Returns information on the specified enabled repo + +Endpoint: /api/repos/:owner/:repo +Method: GET + +:owner is the name of the org or the name of the user, for a personal repo +:repo is the name of the repo + +Success: returns 200 (ok) and a GetRepo JSON structure +Failure: returns 404 (not found) if the repo does not exist or is not available to the user + +## Enable Org + +Turns on checks-out monitoring for the specified org. All repos currently in the org are enabled and all repos added to the org in the future will be enabled on creation. + +Endpoint: /api/repos/:owner +Method: POST +Body: none + +:owner is the name of the org + +Success: returns 200 (ok) and an OrgInfo JSON structure +Failure: returns 409 (Conflict) if the org is already enabled +Failure: returns 404 (not found) if the org does not exist, is not available to the user, or if it is the user's personal repos (owner == user name) + +### OrgInfo JSON Structure + +```json +{ + "id": ID_IN_checks-out, + "owner": "ORG_NAME", + "link_url": "GITHUB_URL", + "private": true|false +} +``` + +If the org is private, `private` is set to `true`. + +## Disable Org + +Turns on checks-out monitoring for the specified org. All repos currently in the org are enabled and all repos added to the org in the future will be enabled on creation. + +Endpoint: /api/repos/:owner +Method: DELETE + +:owner is the name of the org + +Success: returns 200 (ok) +Failure: returns 409 (Conflict) if the org is already disabled +Failure: returns 404 (not found) if the org does not exist, is not available to the user, or if it is the user's personal repos (owner == user name) + +## Get Config for Repo + +Returns the .lgtm or .checks-out file for the repo + +Endpoint: /api/repos/:owner/:repo/config +Method: GET + +:owner is the name of the org or the name of the user, for a personal repo +:repo is the name of the repo + +Success: returns 200 (ok) and a .checks-out config file in JSON +Failure: returns 404 (not found) if the file does not exist or is not available to the user + +## Get Maintainers for Repo + +Returns the MAINTAINERS file for the repo + +Endpoint: /api/repos/:owner/:repo/maintainers +Method: GET + +:owner is the name of the org or the name of the user, for a personal repo +:repo is the name of the repo + +Success: returns 200 (ok) and a MAINTAINERS config file +Failure: returns 404 (not found) if the file does not exist or is not available to the user + +## Validate Config and Maintainers for Repo + +Validates that the .lgtm/.checks-out and MAINTAINERS files are present and valid + +Endpoint: /api/repos/:owner/:repo/validate +Method: GET + +:owner is the name of the org or the name of the user, for a personal repo +:repo is the name of the repo + +Success: returns 200 (ok) +Failure: returns 404 (not found) if the file does not exist or is not available to the user +Failure: returns 400 (bad request) if the .lgtm/.checks-out or MAINTAINERS file is not valid. Error messages returned as text in body of response. + +## Convert .lgtm to .checks-out + +Converts a .lgtm file in a repo into the equivalent .checks-out file + +Endpoint: /api/repos/:owner/:repo/lgtm-to-checks-out +Method: GET + +:owner is the name of the org or the name of the user, for a personal repo +:repo is the name of the repo + +Success: returns 200 (ok) and a .checks-out config file in JSON +Failure: returns 404 (not found) if the file does not exist or is not available to the user +Failure: returns 400 (bad request) if the .lgtm file is not valid. Error messages returned as text in body of response. + +## Get Teams in Org + +Returns the teams defined in an org + +Endpoint: /api/teams/:owner +Method: GET + +:owner is the name of the org + +Success: returns 200 (ok) and a JSON array of strings. Each string is the name of a team in the org +Failure: returns 404 (not found) if the org does not exist, is not available to the user, or is the user's personal repos (owner == user name) + +## Get checks-out Status for Pull Request + +Returns the status of a pull request and the configuration used to process it + +Endpoint: /api/pr/:owner/:repo/:id/status +Method: GET + +:owner is the name of the org or the name of the user, for a personal repo +:repo is the name of the repo +:id is the id of the pull request in Github + +Success: returns 200 (ok) and a Status JSON structure +Failure: returns 404 (not found) if the org, repo, or id does not exist or is not available to the user + +### Status JSON Structure + +```json +{ + "approved": true|false, + "approvers": ["USER_NAMES_OF_APPROVERS"], + "disapprovers": ["USER_NAMES_OF_DISAPPROVERS"], + "policy": { + }, + "settings": { + } +} +``` + +`approved` is `true` if the pull request has been approved by checks-out, `false` otherwise. +`approvers` is an array of user names of the people who have approved the pull request. +`disapprovers` is an array of user names of the people who have disapproved the pull request. +`policy` is the policy in the .checks-out configuration file that is used for this pull request. +`settings` is the .checks-out configuration file. All optional sections are filled in with their default values. + + +## User Slack URL Management + +### Register new User-Level Slack Target + +Registers a new Slack target for all repos managed by the current user. The slack target's name is the hostname for the Slack instance. +A User-Level Slack Target will override an Admin-Level Slack Target for the same hostname. It is not visible by any other users in checks-out. + +Endpoint: /api/user/slack/:hostname +Method: POST +Body: URL JSON Structure + +:hostname is the hostname for the slack group + +Success: returns a 201 (created) status code + +#### URL JSON Structure + +```json +{ + "url":"_URL_FOR_INCOMING_NOTIFICATIONS_WEBHOOKS_" +}``` + +### Get URL for User-Level Slack Target + +Returns the URL for a User-Level Slack target for all repos managed by the current user. + +Endpoint: /api/user/slack/:hostname +Method: GET + +:hostname is the hostname for the slack group + +Success: returns a 200 (ok) status code and a URL JSON structure +Failure: returns a 404 (not found) status code if no URL is registered by the current user for the specified slack target + +### Update URL for User-Level Slack Target + +Updates the specified Slack target for all repos managed by the current user. + +Endpoint: /api/user/slack/:hostname +Method: PUT +Body: URL JSON Structure + +:hostname is the hostname for the slack group + +Success: returns a 200 (created) status code + +### Delete User-Level Slack Target + +Removes the specified Slack target for all repos managed by the current user. If there is an admin-level Slack target specified for +the same hostname, it will be used instead. + +Endpoint: /api/user/slack/:hostname +Method: DELETE + +:hostname is the hostname for the slack group + +Success: returns a 204 (deleted) status code +Failure: returns a 404 (not found) status code if no URL is registered by the current user for the specified slack target + +## Admin + +### Get All Enabled Repos + +Returns a list of all repos that have been enabled in checks-out across all users + +Endpoint: /api/repos +Method: GET + +Success: returns a 200 (ok) status code and a JSON list of GetRepo JSON structures + + +### Get Count of Enabled Repos + +Returns a count of all repos that have been enabled in checks-out across all users + +Endpoint: /api/count +Method: GET + +Success: returns a 200 (ok) status code and a number as text + +### Admin Slack URL Management + +#### Register New Admin-Level Slack Target + +Registers a new Slack target for all repos. The slack target's name is the hostname for the Slack instance. +A User-Level Slack Target will override an Admin-Level Slack Target for the same hostname. + +Endpoint: /api/admin/slack/:hostname +Method: POST +Body: URL JSON Structure + +:hostname is the hostname for the slack group + +Success: returns a 201 (created) status code + +### Get URL for Admin-Level Slack Target + +Returns the URL for an Admin-Level Slack target for all repos. + +Endpoint: /api/admin/slack/:hostname +Method: GET + +:hostname is the hostname for the slack group + +Success: returns a 200 (ok) status code and a URL JSON structure +Failure: returns a 404 (not found) status code if no URL is registered by the admin for the specified slack target + +### Update URL for Admin-Level Slack Target + +Updates the specified Slack target for all repos. + +Endpoint: /api/admin/slack/:hostname +Method: PUT +Body: URL JSON Structure + +:hostname is the hostname for the slack group + +Success: returns a 200 (created) status code + +### Delete Admin-Level Slack Target + +Removes the specified Slack target for all repos. If there are user-level Slack targets specified for +the same hostname, they will be used instead for those users. + +Endpoint: /api/admin/slack/:hostname +Method: DELETE + +:hostname is the hostname for the slack group + +Success: returns a 204 (deleted) status code +Failure: returns a 404 (not found) status code if no URL is registered by the current user for the specified slack target diff --git a/hugo/content/approvals.md b/hugo/content/approvals.md new file mode 100644 index 0000000..8474ef2 --- /dev/null +++ b/hugo/content/approvals.md @@ -0,0 +1,47 @@ ++++ +date = "2015-12-05T16:00:21-08:00" +draft = false +title = "Approval Policies" +weight = 5 +menu = "main" +toc = true ++++ + +Here are some templates you can use to write the approval policy section +of your .checks-out configuration. + +``` +match: "all[count=1,self=false]" +``` + +This policy requires one approval from anyone in the built-in 'all' +group. 'all' expands to all the people in the MAINTAINERS file. +Populating the MAINTAINERS file with the macro "github-org repo-self" +will expand to everyone within the organization of the GitHub repository. +self=false forbids the author of the pull request to approve their own +request. If you want self-approval then use "all[count=1,self=true]" + +``` +match: "sharks[count=1,self=false] and jets[count=1,self=false]" +``` + +You have defined the groups 'sharks' and 'jets' in the MAINTAINERS file. +This policy requires one approval from the sharks and one approval +from the jets. If someone belongs to both the sharks and the jets, then +their approval will count for both of the clauses and therefore they +do not require a second reviewer. In general, the teams work best when +they are non-overlapping. But the next example takes advantage of +overlapping groups. + +``` +match: "all[count=2,self=false] and reviewers[count=1,self=false]" +``` + +This policy requires two approvals and at least one of the approvals +must be from the group 'reviewers' in the MAINTAINERS file. The policy +takes advantage of the fact that reviewers is a subset of all. If +you only have 1 person in the reviewers group and you want the reviewer +to submit pull requests and it is OK to have only a single approval +against those pull request, then use +"all[count=2,self=false] and reviewers[count=1,self=true]" and the +reviewer must self-approve their pull request. \ No newline at end of file diff --git a/hugo/content/branches.md b/hugo/content/branches.md new file mode 100644 index 0000000..3b365db --- /dev/null +++ b/hugo/content/branches.md @@ -0,0 +1,26 @@ ++++ +date = "2015-12-05T16:00:21-08:00" +draft = false +title = "Branches" +weight = 3 +menu = "main" +toc = true ++++ + +# Overview + +Checks-out automatically enables GitHub protected branches for your repositories' __default branch__. You can further customize or disable this behavior by navigating to your repository branch settings screen in GitHub. + +![protected branches](/checks-out/images/protected_branches.png) + +# GitHub Enterprise + +GitHub Enterprise 2.4 does not have an API for protected branches. As a result you will need to manually configure protected branches after enabling your repository in checks-out. This can be done by navigating to your repository branch settings screen in GitHub Enterprise (pictured above). + +# Other Branches + +Checks-out automatically enables itself for the __default__ branch. If you would like to enable checks-out for additional branches you can manually configure additional branches in your repository branch settings screen in GitHub. + +# Other Checks + +Checks-out automatically adds itself as a required status check. If you would like to enable additional status checks, such as continuous integration or code coverage, you can further customize this behavior in your repository branch settings screen in GitHub. diff --git a/hugo/content/customize.md b/hugo/content/customize.md new file mode 100644 index 0000000..c2e8a19 --- /dev/null +++ b/hugo/content/customize.md @@ -0,0 +1,577 @@ ++++ +date = "2015-12-05T16:00:21-08:00" +draft = false +title = "Customize" +weight = 4 +menu = "main" +toc = true ++++ + +# Introduction + +Place an `.checks-out` file in the root of your repository to customize your +project's approval process. This file is in Human JSON +([HJSON](http://hjson.org/)) format. JSON is also valid HJSON so +you can write your .checks-out file in JSON if you prefer. + +Each property of the .checks-out file has a default value that is used +when you do not provide a value. This document will explain each +section of the .checks-out file. At the beginning of each section an +example of the configuration will be provided. The example will +be populated with the default values for that section. + +For example here are all the default values of the .checks-out file: + +``` +approvals: +[ + { + scope: + { + branches: [] + paths: [] + } + name: "" + match: "all[count=1,self=true]" + antimatch: "all[count=1,self=true]" + authormatch: "universe[count=1,self=true]" + pattern: null + antipattern: null + antititle: null + tag: null + merge: null + feedback: null + } +] +pattern: "(?i)^I approve\\s*(version:\\s*(?P\\S+))?\\s*(comment:\\s*(?P.*\\S))?\\s*" +antipattern: null +antititle: null +commit: +{ + range: head + antirange: head + tagrange: head + ignoreuimerge: false +} +maintainers: +{ + path: MAINTAINERS + type: text +} +feedback: +{ + type: ["comment", "review"] +} +merge: +{ + enable: false + merge: "merge" + delete: false + uptodate: true +} +tag: +{ + enable: false + algorithm: semver + template: "{{.Version}}" + increment: "patch" + docker: false +} +comment: +{ + enable: false + targets: [] +} +deploy: +{ + enable: false + deployment: DEPLOYMENTS +} +``` + +If you do not need to change the default values in a section +of the .checks-out then you may leave out that section from +the file. Here is a valid .checks-out file where we are changing +the number of approvers from 1 to 2 and the remaining properties +use the default values: + +``` +approvals: +[ + { + match: "all[count=2]" + } +] +``` + +You can inspect the .checks-out file for your repository using the api endpoint +`/api/repos/[orgname]/[reponame]/config`. The output will have any missing +configuration parameters populated with default values. If the configuration +file cannot be parsed then an error message will be returned. + +# Approval Policies + +checks-out has extensive support for different approval policies. Each approval +policy consists of two sections: the approval scope and the approval match. +The scope determines which policy is applied against a pull request. +The match determines when the pull request is allowed to merge. The +approvals section of .checks-out is an ordered list (an array) of policies. +The policies are traversed in the order specified by the configuration file. +The first scope that matches against the pull request is the policy +that is used (see Policy Scope section). + +Each approval policy may optionally declare a tag section. The tag +section is identical in structure to the global tag section. If a policy +has a tag section then its tag section is applied instead of the global +tag section. + +Each approval policy may optionally declare a merge section. The merge +section is identical in structure to the global merge section. If a policy +has a merge section then its merge section is applied instead of the global +merge section. + +Each approval policy may optionally declare a pattern. If a policy has a pattern +then it is is applied for regular expression matching instead of the global +pattern. Each approval policy may optionally declare an antipattern. If a policy +has a antipattern then it is is applied for regular expression matching instead +of the global antipattern. + +An .checks-out configuration file is required to have an empty scope +in the last policy in the policies array. This ensures that every +pull request has a matching scope. + +If you have several approval policies, then we recommend enabling the [GitHub +notification channel](#github-comments) and include "open" in the types +field. This will enable checks-out to post a GitHub comment on a pull request +when the request is opened. The comment will describe which approval policy +is being applied to the pull request. + +## Policy Scope +``` +scope: +{ + branches: [] + paths: [] +} +``` +If the 'branches' array is non-empty then the policy is limited +to the specified branches. In GitHub terminology this is a list of base +branches. The base branch is where the changes should be applied. + +If the 'paths' array is non-empty then the policy is limited +to the specified file paths. Paths uses a simplified form of wildcard +glob expansion. The sequence `**` will match zero or more instances +of any character. The sequence `*` will match zero or more instances +of any character except the directory separator `/`. All other character +sequences are literal matches. + +For example, to match against all java source files use the path +expression `**.java`. To match against recursively against all files +in a subdirectory use `foo/bar/**`. + +## Policy Name +``` +name: "" +``` + +The 'name' field of a policy allows you to optionally specify a human-readable +description of the policy. If the name is empty then the policy is +identified by it's 1-based offset into the policy array. If the name is +nonempty then no two policies can have the same name. + +## Policy Match +``` +match: "all[count=1,self=true]" +``` +The policy match algorithms are specified with a domain-specific language (DSL). +The DSL is based around the concept of organization approvals. Boolean operators +can be used to combine organizations. Parenthesis may be used to defined +precedence of operations. There are several predefined organizations: "all", +"universe", "us", and "them". + +### All Match +``` +match: "all[count=1,self=true]" +``` +This matches against anyone in the MAINTAINERS file. The 'count' +field determines the minimum amount of approvals for a pull request +to be accepted. The 'self' field determines whether the author +of a pull request is allowed to approve the pull request. + +### Universe Match +``` +match: "universe[count=1,self=true]" +``` +This is a variation of "all" matching where anyone with an account +on this GitHub instance can approve the pull request. The 'count' +field determines the minimum amount of approvals for a pull request +to be accepted. The 'self' field determines whether the author +of a pull request is allowed to approve the pull request. + +### Us Match +``` +match: "us[count=1,self=true]" +``` +This policy will match to any person that has at least one organization +in common with the author of the pull request. The 'count' +field determines the minimum amount of approvals for a pull request +to be accepted. The 'self' field determines whether the author +of a pull request is allowed to approve the pull request. + +### Them Match +``` +match: "them[count=1,self=true]" +``` +This policy will match to any person that has at least no organizations +in common with the author of the pull request. The 'count' +field determines the minimum amount of approvals for a pull request +to be accepted. The 'self' field determines whether the author +of a pull request is allowed to approve the pull request. + +### Org Match +``` +match: "foo[count=1,self=true]" +``` + +This policy will match against an organization named "foo" in the +MAINTAINERS file. The 'count' field determines the minimum +amount of organizations for a pull request to be accepted. +The 'self' field determines whether the author +of a pull request is allowed to approve the pull request. + +### Anonymous Org Match +``` +match: "{john,jane,sally}[count=1,self=true]" +``` + +This policy will match against an an anonymous organization composed of the +specified people. Anonymous organizations are ignored by the 'us' and 'them' +groups defined above. The 'count' field determines the minimum amount of +organizations for a pull request to be accepted. The 'self' field determines +whether the author of a pull request is allowed to approve the pull request. + +### And Match +``` +match: "foo[count=1,self=true] and bar[count=1,self=true]" +``` +This policy allows you to combine two or more policies. All +of the policies must be met in order for this policy to be true. + +### Or Match +``` +match: "foo[count=1,self=true] or bar[count=1,self=true]" +``` +This policy allows you to combine two or more policies. One +or more of the policies must be met in order for this policy +to be true. + +### Not Match +``` +match: "not foo[count=1,self=true]" +``` +This policy allows you to negate another policy. + +### True Match +``` +match: "true" +``` +This policy allows all pull requests. It's very likely that you +want to use the "off" approval policy instead of this policy. + +### False Match +``` +match: "false" +``` +This policy allows no pull requests. + +### Disable Match +``` +match: "off" +``` +This policy disables the service. Added in version 0.6.10. + +This is identical to the approval policy "true" with the addition that +it adds a local merge policy to disable automatic merging of branches. + +### Atleast Function +``` +match: "atleast(2, foo[count=3,self=false], bar, baz, {jane,john})" +``` +This function requires at least N of the specified groups to match +successfully, where N is the first parameter to the function. + +### Author Function +``` +match: "author(foo or bar or not {jane,john})" +``` +This function checks to see who is the author for the pull request. It is used to limit an approval policy to a set of authors. The potential author can be specified using an expression built out of the other matching expression terms. Since a pull request can only have a single author, any expression that can only be satisfied by more than one author, such as specifying `foo and bar`, or `foo[count=2]` will create a case that cannot match. + + +## Pattern +``` +pattern: "(?i)^checks-out\\s*(?P\\S*)" +``` +The regular expression that checks-out matches against the comment on a GitHub +pull request. The version capture group must be specified if +the version section of .checks-out is enable. + +Pattern matching is performed using Go's regular expressions package. We +recommended testing custom regular expressions in the [Go +playground](http://play.golang.org/p/nQx_jGsLHz). + +## Disapproval +``` +antipattern: null +antimatch: "all[count=1,self=true]" +``` +checks-out has optional support for allowing the reviewer to raise a concern. +If an 'antipattern' is specified then the disapproval policy is enabled. +The disapproval policy is specified by the 'antimatch' parameter. +The antipattern was introduced in version 0.5.17 and the antimatch +was introduced in version 0.6.8. + +Approvals and disapprovals are enforced by the following mechanism. First, +all the comments in the pull request are scanned for the 'antipattern' regular +expression. Any matching comments are applied towards the 'antimatch' +policy. If the antimatch policy becomes true then the pull request is blocked +from merge. If the antimatch policy is false then the match policy is tested +as usual. If the match policy becomes true then the pull request can be merged. + +If a reviewer enters a disapproval comment followed by an approval comment, +then the approval comment will cancel out the disapproval comment. The +disapproval comment is forgotten but the approval comment also applies +towards the match policy, assuming the reviewer participates in both the +antimatch policy and the match policy. + +The [commit range](#commit) configuration applies the same behavior to +approvals and disapprovals. With the default behavior, "head", only comments +that occur after the timestamp of the HEAD of the branch are used. Therefore +an objection will be erased after a new commit. Using the behavior "all" the +objection will persist and the reviewer must enter another comment with +the approval string. + +## Author Restriction +``` +authormatch: "universe[count=1,self=true]" +``` + +It is possible to restrict the allowed authors with the authormatch approval +policy. The default value allows all users to submit pull requests. Using +the policy "all[count=1,self=true]" will only all users listed in the +MAINTAINERS file to submit a pull request. Since a pull request can only have a single author, any expression that can only be satisfied by more than one author, such as specifying `foo and bar`, or `foo[count=2]` will create a case that cannot match. + +The difference between `authormatch` and the `author` function in the `match` is subtle. `author` is used to specify that a match rule applies an approval policy to a set of authors. `authormatch` is used to specify which authors can create pull requests at all. + +## Work-In-Progress Pull Requests +``` +antititle: null +``` +Optionally specify a regular expression that checks-out matches against the title +of a GitHub pull request. If the title matches then block the pull request +from being merged. A common value is "^WIP:". This feature was added in +version 0.7.6. + +Pattern matching is performed using Go's regular expressions package. We +recommended testing custom regular expressions in the [Go +playground](http://play.golang.org/p/nQx_jGsLHz). + +## Commit +``` +commit: +{ + range: head + antirange: head + tagrange: head + ignoreuimerge: false +} +``` +The range options affect the processing of commits on the pull request. The +range fields can have two possible values: "head" or "all". "head" will use the +comments that occur after the timestamp of the HEAD of the branch. "all" will +use all the comments on that branch. The 'range' parameter affects approval +comments, 'antirange' affects disapproval comments, and 'tagrange' affects +the tagging section. 'range' was introduced in version 0.5.17. 'antirange' +and 'tagrange' was introduced in version 0.7.7. + +'ignoreuimerge' will ignore merges from upstream that are made through the +GitHub user interface (by clicking on "update branch"). + +## Maintainers +``` +maintainers: +{ + path: MAINTAINERS + type: text +} +``` +The path to the file that specifies the [project maintainers](../maintainers). +The type field can be "text", "hjson", "toml", or "legacy". + +## Merge +``` +merge: +{ + enable: false + delete: false + method: "merge" + uptodate: true +} +``` +checks-out has the ability to automatically merge a pull request when the branch is +mergeable and all status checks (including optional ones) have passed. If +the delete flag is true then the head branch is deleted after a successful +merge. The delete flag was introduced in version 0.5.18. + +If you enable automatic merges in the global merge section and you have +an approval policy that is the policy "true", then you are +encouraged to declare local merge section for that policy that is set to false. +Otherwise checks-out will automatically merge pull requests on those +branches as soon as they are opened. Instead of using the policy "true", +use the policy "off" which handles this case automatically. + +The parameter 'method' determines the type of merge. It can be "merge", +"squash" or "rebase". + +The parameter 'uptodate' determines whether the compare branch must +be zero commits behind the base branch before performing an automatic +merge. This parameter is enabled by default. It was introduced in +version 0.21.0. + +## Tag +``` +tag: +{ + enable: false + algorithm: semver + template: "{{.Version}}" + increment: "patch" + docker: false +} +``` +checks-out has the ability to automatically place a tag on a merge. +There are three tagging algorithms: "semver", "timestamp-rfc3339", +and "timestamp-millis". The 'template' field defines the +Golang [text/template](https://golang.org/pkg/text/template/) +that is used to generate the tag. The 'increment' field is used +by the semver algorithm and determines which section of the semantic +version to increment when no explicit version is provided +by the approver: 'major', 'minor', 'patch', or 'none'. +The 'docker' field enables stricter validation of the template to comply with +Docker tag requirements. + +### Semantic Versioning + +The semantic versioning standard is described at . + +Approval comments are used to provide the semantic version for the current pull request. By default, the version is specified after the I approve string: + +``` +I approve 1.0.0 +``` + +will tag the merge commit with the version 1.0.0. + +If multiple approvers for a pull commit specify a version, the highest version +specified will be used. + +If no approver specifies a version, or if the maximum version specified by an +approver is less than any previously specified version tag, the version will be +set to the highest specified version tag with the patch version incremented. + +### Timestamp Versioning + +There are currently two options for timestamps, one based on the RFC 3339 standard and one specifying milliseconds since the epoch (Jan 1, 1970, 12:00:00AM UTC). + +The type "timestamp-rfc3339" will look like this: +``` +2016-05-16T19.06.26Z +``` +(colons aren't legal in git tags, so the colons in the RFC format have been replaced by periods.) + +The type "timestamp-millis" specifies the number of milliseconds since the epoch. + +## Feedback +``` +feedback: +{ + type: ["comment", "review"] +} +``` + +The feedback section customizes the processing of approval events. The feedback +type determines which kinds of events are processed. "comment" accepts +pull request comments. "review" accepts pull request reviews. + +## Comment +``` +comment: +{ + enable: false + targets: [] +} +``` +Allows notifications to be sent to output channels when checks-out performs an +action. Legal values for the "types" field are the following: + +* "error" Error while processing webhook +* "open" Pull request is opened +* "close" Pull request is closed without being merged +* "accept" Pull request has been closed and merged +* "approve" An approval comment has been added +* "block" A disapproval comment has been added +* "reset" Commit of PR branch has reset previous approvals +* "merge" Pull request was auto-merged after all status checks passed +* "tag" Branch was tagged after merge +* "delete" Branch was auto-deleted after merge +* "deploy" Deployment was triggered after merge +* "author" Pull request is blocked because author is not approved + +### GitHub Comments +``` +{ + target: github + pattern: null + types: [] +} +``` + +If enabled then checks-out will add comments to the pull request. Pattern is an +optional regular expression that allows comments to be only applied to +some pull requests based on the title of the request. The types field +is described above. If types is empty then all types send notifications. +GitHub comments was introduced in 0.7.9. + +### Slack Comments +``` +{ + target: foobar.slack.com + pattern: null + types: [] + names: [] +} +``` + +If enabled then checks-out will write messages to Slack. Pattern is an +optional regular expression that allows comments to be only applied to +some pull requests based on the title of the request. The types field +is described above. If types is empty then all types send notifications. +Names is a list of slack channels or users you want to message. Channels are prefixed with #, +users are prefixed with @. Slack comment support was introduced in 0.7.9. + +The Slack integration allows custom servers to be specified. + +The target field can be the hostname of the Slack target such as "foobar.slack.com". +If that hostname has been previously registered with the checks-out service then you +will be able to use it. Otherwise you need to register a [Slack Webhook] (https://api.slack.com/incoming-webhooks) with the checks-out service. Use the +/api/user/slack/:hostname endpoint to register the webhook. This is documented +on the API page. + +## Deploy +``` +deploy: +{ + enable: false + deployment: DEPLOYMENTS +} +``` +TODO: add documentation diff --git a/hugo/content/faq.md b/hugo/content/faq.md new file mode 100644 index 0000000..75a9009 --- /dev/null +++ b/hugo/content/faq.md @@ -0,0 +1,72 @@ ++++ +date = "2015-12-05T16:00:21-08:00" +draft = false +title = "FAQ" +weight = 8 +menu = "main" +toc = true ++++ + +# Why don't I see my repository? + +Here are some tips for troubleshooting: + +* Is your GitHub repository public? +* Do you have Admin access to the repository? +* Did you grant access to your GitHub organization? +* Did you recently create the repository? There may be up to a 15 minute delay due to internal caching. +* Does your organization restrict integrations? [Learn more](https://github.com/blog/1941-organization-approved-applications). + +# Can I use with GitHub Enterprise? + +Yes, In order to use with GitHub Enterprise you will need to install and run your own instance. See the [the documentation](../install). + +GitHub Enterprise 2.4 has partial support for protected branches and requires [manual configuration](../branches/#github-enterprise:84e35ebc125ab31a6f85cb9b5e08d8c9). GitHub Enterprise 2.3 and below are not supported. + +# Why are all the configuration files in HJSON format? + +When we began to write complex approval policies in the .checks-out +configuration file, it became apparent that TOML was too verbose for +the nested configuration sections. We looked for a golang text format +serde library that supported the following features: actually parses +whatever format without crashing on corner cases, can defer +unmarshal of specific fields (json.rawMessage), and can write custom +marshal and unmarshal functions. + +The encoding/json library is the only library that met these requirements. +Human JSON is a small amount of syntactic sugar for JSON and adding +support for HJSON was trivial. + +# Can I install on my own server? + +Yes. See the [install documentation](../install). + +# Can I use a central Maintainers file? + +Sort of. You can create a maintainers team in your GitHub organization. See the [maintainers documentation](../maintainers). + +# Why aren't approvals being processed?! + +Verify that you have receive the correct number of approvals. The default configuration requires a minimum of two approvals. See the [documentation](../customize) to learn more about custom configurations. + +Verify that hooks are being sent correctly. You can see an audit log of all hooks in the **Webhooks & Services** section or your repository settings. + +Verify the message is being processed successful. An unsuccessful message will be flagged accordingly in GitHub. Error messages from the service are written to the response body. + +![hook error](/checks-out/images/hook_error.png) + +Verify the response from a successful hook. The approval settings, approval status, and list of individuals that approved the pull request are included in the payload for auditing purposes. + +![hook success](/checks-out/images/hook_success.png) + +If the payload indicates the pull request was approved, and this is not reflected in the status you can click the re-deliver button to re-deliver the payload. + +# Why can't I merge my pull request? + +Please remember that checks-out uses GitHub [protected branches](https://github.com/blog/2051-protected-branches-and-required-status-checks) which prevents merging a branch that has failed to meet all required status checks. The ability to merge a pull request is completely governed by GitHub. + +Please also note that when protected branches prevent you from merging a pull request that has fallen behind the target branch. That is to say, if a pull request is merged, all outstanding pull requests will need to re-sync before they can be merged. + +# This thing is busted! + +Please be sure you have read the documentation, including this FAQ, and fully understand how GitHub [protected branches](https://github.com/blog/2051-protected-branches-and-required-status-checks) work. If you are still experiencing issues please [contact support](../support) and we'll be happy to help! diff --git a/hugo/content/install.md b/hugo/content/install.md new file mode 100644 index 0000000..2a7c1d4 --- /dev/null +++ b/hugo/content/install.md @@ -0,0 +1,335 @@ ++++ +date = "2015-12-05T16:00:21-08:00" +draft = false +title = "Self Hosting" +weight = 6 +menu = "main" +toc = true ++++ + +# Requirements + +checks-out ships as a single binary file. If you are planning on integrating +with GitHub Enterprise it requires version 2.4 or higher. + +# Configuration + +This is a full list of configuration options. Please note that many of these +options use default configuration value that should work for the majority of +installations. + +# Environment Variables for checks-out + +This document contains information about all the environment variables that can or +must be defined for checks-out to function. + +## Server configuration + +### Server IP Address and Port + +- Format: `SERVER_ADDR="_ip_address_:_port_"` but more specifically see the documentation for golang's `http.ListenAndServe` and `http.ListanAndServeTLS` +- Default: `:8000`, which means port 8000 on all interfaces +- Required: No + +See for detailed documentation on the format. + +### Server SSL Certificate + +- Format: `SERVER_CERT="_full_path_and_filename_of_ssl_server_cert_file_"` +- Default: None +- Required: No + +If SERVER_CERT is not specified, checks-out runs without SSL. See +for detailed documentation on the format. + +### Server SSL Key + +- Format: `SERVER_KEY="_full_path_and_filename_of_ssl_server_key_file_"` +- Default: None +- Required: _Only if `SERVER_CERT` is also specified_ + +If SERVER_CERT is specified, then SERVER_KEY must be specified as well. See for detailed documentation on the format. + +## Database configuration + +### Database Driver + +- Format: `DB_DRIVER=sqlite3|postgres|mysql` +- Default: None +- Required: Yes + +### Database Datasource + +- Format: `DB_SOURCE="_db_driver_specific_datasource_spec_"` +- Default: None +- Required: Yes + +Please refer to the datasource specifications for the sqlite3, lib/pq, and mysql drivers for their respective +specifications for this environment variable. + +## Slack integration + +- Format: `SLACK_TARGET_URL="_slack_integration_url"` +- Default: None +- Required: No + +If the `SLACK_TARGET_URL` is not defined, then no logging into slack will happen, however it will also +currently cause logging that Slack is not configured to get generated every time a slackable event happens. + +## Github integration + +### Email Address To Use for Github + +- Format: `GITHUB_EMAIL="_valid_email_address_"` +- Default: None +- Required: Yes + +Email address for git commits. + +### URL for Github API + +- Format: `GITHUB_URL="_protocol_plus_hostname_plus_path_prefix_of_url_"` +- Default: `https://github.com` +- Required: No, defaults to `https://github.com` which is fine unless you are using your own enterprise github + +### Github OAuth2 Client ID + +- Format: `GITHUB_CLIENT="_your_OAuth2_client_id_"` +- Default: None +- Required: Yes. You must supply the github OAuth2 client ID issued by the github server you are connecting to (github.com or enterprise-hosted github server) + +### Github OAuth2 Secret + +- Format: `GITHUB_SECRET="_your_OAuth2_secret_"` +- Default: None +- Required: Yes. You must supply the github OAuth2 secret issued by the github server you are connecting to (github.com or enterprise-hosted github server) + +### Github Scope To Use + +- Format: `GITHUB_SCOPE="_valid_github_scope_specification_"`. Please see github documentation for specifics on the specification format +- Default: `read:org,repo:status,admin:repo_hook,admin:org_hook` +- Required: No + +Permissions granted to checks-out by Github. The minimum +required permissions are the default ones. + +### Github rate limiting + +- Format: `GITHUB_BATCH_PER_SECOND=int` +- Default: 10 +- Required: no + +GitHub batch access rate limiter. For certain calls that might happen in rapid succession, this limits how +fast those calls are made to the server. The value is in Hertz; the default value of 10 means that you cannot +send more than 10 calls a second to Github. + +### Github testing-only settings + +#### Enable Github Integration Tests + +- Format: `GITHUB_TEST_ENABLE=true|false` +- Default: False +- Required: No + +Enable Github integration tests to run + +#### Github Integration Tests OAuth2 Token + +- Format: `GITHUB_TEST_TOKEN=__OAUTH2_TOKEN_TO_USE` +- Default: None +- Required: _only if GITHUB_TEST_ENABLE is true_ + + This is an OAuth2 token to be sent to the github endpoint for github integration test API requests + +## Logging/Debug + +### Debug Logging + +- Format: `LOG_LEVEL=debug|info|warn|error|fatal|panic` +- Default: info +- Required: No + +Specifies the default logging level used in checks-out. + +### Checks-out Sunlight + +- Format: `CHECKS_OUT_SUNLIGHT=true|false` +- Default: false +- Required: No + +If set to true, exposes endpoints and data that might not be suitable for a live site. Specifically, +the following behaviors are enabled: + +- `/api/repos` endpoint is available that will show all repositories managed under checks-out +- `/version` endpoint is available that will show the checks-out version +- Outputs stack trace information over HTTP when errors happen +- Puts the `X-CHECKS-OUT-VERSION` HTTP header with the checks-out version in every response + +### Blacklist User Agents from Logging + +- Format: `BLACKLIST_USER_AGENTS=user_agent1[]:user_agent2...:user_agent_N]` +- Default: false +- Required: No + +Specify a colon-separated list of user agent strings for the middleware that does request logging +to skip logging on. This allows suppressing logging of user agents like the health checker from aws +that are generally operational noise. + +### How Frequently To Log Operational Statistics + +- Format: `LOG_STATS_PERIOD=_valid_time.ParseDuration()_string_` +- Default: 0 (Do not periodically log) +- Required: No + +Specify a time duration in a valid format understood by the +[time.ParseDuraction() method](https://golang.org/pkg/time/#ParseDuration) to periodically log activity +of checks-out such as number of commits, approvers, and disapprovers in the specified time period. + +## Caching + +### Response caching + +- Format: `CACHE_TTL=_time_specified_in_time.Duration_format_` +- Default: 15 minutes +- Required: No + +Determines the length of time the gin middleware will cache. Default is 15 minutes. + +### Github Repo Artifact Caching + +- Format: `LONG_CACHE_TTL=_time_specified_in_time.Duration_format_` +- Default: 24 hours +- Required: No + +Determines the length of time checks-out will cache github artifacts like user information, +organization members, and team members in memory before going back to the server. Default +is 24 hours. + +## Account Creation Control + +### Limit User Access + +- Format: `LIMIT_USERS=true|false` +- Default: false +- Required: No + +If enabled, only users listed in the `limit_users` table in the database are +allowed to create accounts. Any existing accounts will still function, even +if the user's name is not in the `limit_users` table. + +### Limit Organizational Access + +- Format: `LIMIT_ORGS=true|false` +- Default: false +- Required: No + +If enabled, only users in the organizations listed in the `limit_orgs` table +in the database are allowed to create accounts. Any existing accounts will +still function, even if the user is not any of the orgs named in the +`limit_orgs` table. + +If both `LIMIT_ORGS` and `LIMIT_USERS` are set to `true` then a user can +either be explicitly named in `limit_users` or can belong to an org named +in `limit_orgs`. + +## Miscellaneous Properties + +### Documentation Location + +- Format: `CHECKS_OUT_DOCS_URL=_URL_for_docs_no_closing_slash_` +- Default: `https://www.capitalone.io/checks-out` +- Required: No + +Provides the base URL for links to the documentation in the UI. + +### Slack Integration + +- Format: `SLACK_TARGET_URL=url` +- Default: none +- Required: no + +Provides the default Slack notification url. This is the URL that's used by defaut for slack integration when the +target specified in the .checks-out file is "slack". + +### Admin management + +- Format: `GITHUB_ADMIN_ORG=_github_org_name_` +- Default: none +- Required: no + +Specifies the Github organization whose members have admin privleges in checks-out. If this is not specified, +there are no admin users in checks-out. Admin users have access to certain REST API endpoints that other users +do not. + +### Template Repo Name + +- Format: `ORG_REPOSITORY_NAME=_name_for_template_repo_per_org_` +- Default: checks-out-configuration +- Required: No + +Org management repo name. This is the name of the repo in an org that is used to hold the default .checks-out and +MAINTAINERS files for all repos in the org. + +## Legacy Properties + +The following environment variables exist to provide system-wide defaults for repos that are still using a +.lgtm file. They are considered obsolete + +- Format: `CHECKS_OUT_APPROVALS=_number_of_approvals_` +- Default: 2 +- Required: No + +Legacy default number of approvals + +- Format: `CHECKS_OUT_PATTERN=_regex_for_approval_comments_` +- Default: (?i)LGTM +- Required: No + +Legacy matching pattern + +- Format: `CHECKS_OUT_SELF_APPROVAL_OFF=true|false` +- Default: false +- Required: No + +Legacy self-approval behavior. Set to true to disable self-approvals. + +# Registration + +Register your application with GitHub (or GitHub Enterprise) to create your client +id and secret. It is very import that the redirect URL matches your http(s) scheme +and hostname exactly with `/login` as the path. + +Please use this screenshot for reference: + +![github registration](/checks-out/images/app_registration.png) + +# Reverse Proxies + +If you are running behind a reverse proxy please ensure the `X-Forwarded-For` +and `X-Forwarded-Proto` variables are configured. + +This is an example nginx configuration: + +```nginx +location / { + proxy_set_header X-Forwarded-For $remote_addr; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header Host $http_host; + proxy_pass http://127.0.0.1:8000; +} +``` + +This is an example caddy server configuration: + +```nginx +checks-out.mycomopany.com { + proxy / localhost:8000 { + proxy_header X-Forwarded-Proto {scheme} + proxy_header X-Forwarded-For {host} + proxy_header Host {host} + } +} +``` + +Note that when running behind a reverse proxy you should change the recommended +port mappings from `--publish=80:8000` to something like `--publish=8000:8000`. diff --git a/hugo/content/maintainers.md b/hugo/content/maintainers.md new file mode 100644 index 0000000..eeb0ee6 --- /dev/null +++ b/hugo/content/maintainers.md @@ -0,0 +1,120 @@ ++++ +date = "2015-12-05T16:00:21-08:00" +draft = false +title = "Maintainers" +weight = 2 +menu = "main" ++++ + +The list of approvers is stored in a `MAINTAINERS` text file in the root +of your repository. The maintainers file can be in either a text or HJSON +format. The format is specified in the [.checks-out file](../customize) file +and the default format is text. + +You can inspect the maintainers file for your repository using the api endpoint +`/api/repos/[orgname]/[reponame]/maintainers`. The output will have any +github-org and github-team macros expanded (see below). If the maintainers +file cannot be parsed then an error message will be returned. + +# Text format + +The text format can be any of the following types. + +Username, separated by newline: + +``` +foo +bar +baz +``` + +Username and email address, separated by newline: + +``` +foo +bar +baz +``` + +FullName, email address and username, separated by newline: + +``` +Fooshure Jones (@foo) +Bar None (@bar) +Bazinga Smith (@baz) +``` + +Directives for importing GitHub organizations and GitHub teams. +These directives populate both the 'people' and 'org' fields +with the correct values (see below). + +``` +github-org foo # loads organization foo +github-team bar # loads team bar within the organization of the repository +github-team bar foo # loads team bar from organization foo +github-org repo-self # loads the organization of the repository +github-team repo-self # loads all the teams of the repository +``` + +In the examples above the groups are automatically assigned the following names: +``` +foo +bar +foo-bar +[name of organization] +[names of teams] +``` + +# HJSON format + +[Human JSON](http://hjson.org) format inspired by the [Docker +project](https://github.com/docker/opensource/blob/master/MAINTAINERS). +Organizations can be specified using this format. + +The github-org and github-team directives can be used in the 'group' section +but not in the 'people' section. These directives will populate the correct +values into the 'people' section. + +``` +{ + people: + { + bob: + { + name: Bob Bobson + email: bob@email.co + } + fred: + { + name: Fred Fredson + email: fred@email.co + } + jon: + { + name: Jon Jonson + email: jon@email.co + } + ralph: + { + name: Ralph Ralphington + email: ralph@email.co + } + george: + { + name: George McGeorge + email: george@email.co + } + } + org: + { + cap: + { + people: [ "bob", "fred", "jon", "github-org cap" ] + } + iron: + { + people: [ "ralph", "george", "github-team iron" ] + } + } +} +``` diff --git a/hugo/content/overview.md b/hugo/content/overview.md new file mode 100644 index 0000000..b387e36 --- /dev/null +++ b/hugo/content/overview.md @@ -0,0 +1,67 @@ ++++ +date = "2015-12-05T16:00:21-08:00" +draft = false +title = "Quick Start" +weight = 1 +menu = "main" +toc = true ++++ + +# Overview + +Checks-out is a fork of [LGTM](https://github.com/lgtmco/lgtm). + +Checks-out will register itself as a required status check. When you enable your repository on public GitHub it is automatically configured to use [protected branches](https://github.com/blog/2051-protected-branches-and-required-status-checks). Protected branches prevent pull requests from being merged until required status checks are passing. If you are using Enterprise GitHub then you might be required to manually enable protected branches on the master branch. + +You can customize many options using a .checks-out file in your repository. Please see the [customization documentation](../customize) for more detail. + +# Setup + +The simplest way to configure checks-out is to create a .checks-out file the +default branch of your GitHub repository with the contents: + +``` +approvals: +[ + { + # count determines how many approvals are needed + # self determines whether you can approve your own pull requests + match: "all[count=1,self=true]" + } +] +``` + +Create a MAINTAINERS file in the default branch with the contents: + +``` +github-org repo-self +``` + +Login to the checks-out service (this will provide the bot +with credentials to monitor your repository) and enable the service +for your repo. Click on the button that says "OFF" to enable the service. + +## Organization Setup + +Create a repository named "checks-out-configuration" in your organization. +The files template.checks-out and template.MAINTAINERS in the +checks-out-configuration repository will be used for any repository +that is missing .checks-out or MAINTAINERS files. You can also register +an entire GitHub organization using the checks-out user interface. Select +the "on/off" toggle next to the name of an organization. Registering +an organization will register any existing repositories and will auto-register +any new repositories. Added in version 0.12.0. + +# Approvers + +Define a list of individuals that can approve pull requests. This list should be store in a MAINTAINERS file to the root of your repository. Please see the [maintainers](../maintainers) documentation for supported file formats. + +**Please note** that checks-out pulls the MAINTAINERS file from your repository default branch (typically master). Changes to the MAINTAINERS file are not recognized until present in the default branch. Changes to the MAINTAINERS file in a pull request are therefore not recognized until merged. + +# Approvals + +Pull requests are locked and cannot be merged until the minimum number of approvals are received. Project maintainers can indicate their approval by commenting on the pull request and including "I approve" in their approval text. + +The service also provides integration with GitHub Reviews. An accepted GitHub +Review is counted as an approval. GitHub Review that requests additional +changes blocks the pull request from merging. diff --git a/hugo/content/support.md b/hugo/content/support.md new file mode 100644 index 0000000..084253b --- /dev/null +++ b/hugo/content/support.md @@ -0,0 +1,9 @@ ++++ +date = "2015-12-05T16:00:21-08:00" +draft = false +title = "Support" +weight = 7 +menu = "main" ++++ + +Please be sure to read the **[faq](../faq)** before engaging the support team. This is a free service run by open source community members and we ask that you are courteous of their time. diff --git a/hugo/layouts/index.html b/hugo/layouts/index.html new file mode 100644 index 0000000..5961655 --- /dev/null +++ b/hugo/layouts/index.html @@ -0,0 +1,13 @@ + + + + + checks-out + + + + + + \ No newline at end of file diff --git a/hugo/static/docs.css b/hugo/static/docs.css new file mode 100644 index 0000000..5d904da --- /dev/null +++ b/hugo/static/docs.css @@ -0,0 +1,114 @@ +@media screen and (min-width: 1201px) { + .container { + width: 70%; + } +} +@media screen and (min-width: 901px) { + .container { + width: 90%; + } +} +@media screen and (max-width: 768px) { + .grid-flex-cell { + display: block; + width:100%; + } +} + +.grid-flex-container { + margin-top:30px; +} +.grid-flex-cell { + border:none; + text-align:left; +} +.grid-flex-cell-1of4 { + min-width:250px; + margin-left:30px; +} +h1 { + font-size:32px; + margin-bottom:0px; +} +h2 { + font-size:24px; + margin:60px 0px 30px 0px; +} +.grid-flex-cell p { + margin-bottom: 30px; +} + +.navbar { + border-bottom: 1px solid #e6eaed !important; +} +.navbar .navbar-brand { + margin-left:0px; +} +.side-nav a { + color: #4d5659 !important; +} + +img[alt="hook success"], +img[alt="hook error"], +img[alt="github registration"], +img[alt="protected branches"] { + border: 1px solid #e6eaed; + border-radius:3px; + max-width:600px; +} + +img[alt="approval complete"], +img[alt="pending approval"] { + max-width:600px; +} + +img[alt="maintainers team"] { + max-width:500px; +} + +pre code { + font-family: "Roboto Mono"; + font-size: 13px; +} + +#TableOfContents { + margin-top:30px; + margin-bottom:-60px; +} +#TableOfContents li a { + display:inline-block; + margin-bottom:5px; +} +#TableOfContents li a:hover { + text-decoration: underline; +} +section h1 { + border-top: 1px solid #e6eaed; + padding-top: 30px; + padding-bottom:30px; + margin-top: 30px; + font-size:28px; +} +blockquote { + background-color: #e3f8ff; + color: #00b0e9; +} +blockquote p { + padding:30px; + font-style: normal; +} +blockquote a { + color: #008dba; +} +blockquote a:hover { + text-decoration: underline; +} +blockquote ~ h1 { + border-top:0px; + margin-top:-20px; +} + +table { + border: 1px solid #e6eaed !important; + margin-bottom: 40px; +} diff --git a/hugo/static/images/app_registration.png b/hugo/static/images/app_registration.png new file mode 100644 index 0000000..53e8070 Binary files /dev/null and b/hugo/static/images/app_registration.png differ diff --git a/hugo/static/images/approval_complete.png b/hugo/static/images/approval_complete.png new file mode 100644 index 0000000..effc27c Binary files /dev/null and b/hugo/static/images/approval_complete.png differ diff --git a/hugo/static/images/hook_error.png b/hugo/static/images/hook_error.png new file mode 100644 index 0000000..4940341 Binary files /dev/null and b/hugo/static/images/hook_error.png differ diff --git a/hugo/static/images/hook_success.png b/hugo/static/images/hook_success.png new file mode 100644 index 0000000..14e963b Binary files /dev/null and b/hugo/static/images/hook_success.png differ diff --git a/hugo/static/images/maintainers_team.png b/hugo/static/images/maintainers_team.png new file mode 100644 index 0000000..7326b14 Binary files /dev/null and b/hugo/static/images/maintainers_team.png differ diff --git a/hugo/static/images/pending_approval.png b/hugo/static/images/pending_approval.png new file mode 100644 index 0000000..38cbd49 Binary files /dev/null and b/hugo/static/images/pending_approval.png differ diff --git a/hugo/static/images/protected_branches.png b/hugo/static/images/protected_branches.png new file mode 100644 index 0000000..9a3d144 Binary files /dev/null and b/hugo/static/images/protected_branches.png differ diff --git a/hugo/static/style.css b/hugo/static/style.css new file mode 100644 index 0000000..902542d --- /dev/null +++ b/hugo/static/style.css @@ -0,0 +1,2936 @@ +html { + font-family: sans-serif; + -ms-text-size-adjust: 100%; + -webkit-text-size-adjust: 100% +} + +body { + margin: 0 +} + +article, +aside, +details, +figcaption, +figure, +footer, +header, +hgroup, +main, +menu, +nav, +section, +summary { + display: block +} + +audio, +canvas, +progress, +video { + display: inline-block; + vertical-align: baseline +} + +[hidden], +template { + display: none +} + +a { + background-color: transparent +} + +a:active, +a:hover { + outline: 0 +} + +abbr[title] { + border-bottom: 1px dotted +} + +b, +strong { + font-weight: bold +} + +dfn { + font-style: italic +} + +h1 { + font-size: 2em; + margin: .67em 0 +} + +mark { + background: #ff0; + color: #000 +} + +small { + font-size: 80% +} + +sub, +sup { + font-size: 75%; + line-height: 0; + position: relative; + vertical-align: baseline +} + +sup { + top: -0.5em +} + +sub { + bottom: -0.25em +} + +img { + border: 0 +} + +figure { + margin: 1em 40px +} + +hr { + -moz-box-sizing: content-box; + box-sizing: content-box; + height: 0 +} + +pre { + overflow: auto +} + +code, +kbd, +pre, +samp { + font-family: monospace, monospace; + font-size: 1em +} + +button, +input, +optgroup, +select, +textarea { + color: inherit; + font: inherit; + margin: 0 +} + +button { + overflow: visible +} + +button, +select { + text-transform: none +} + +button, +html input[type="button"], +input[type="reset"], +input[type="submit"] { + -webkit-appearancebuttoncursor: pointer +} + +button[disabled], +html input[disabled] { + cursor: default +} + +button::-moz-focus-inner, +input::-moz-focus-inner { + border: 0; + padding: 0 +} + +input { + line-height: normal +} + +input[type="checkbox"], +input[type="radio"] { + box-sizing: border-box; + padding: 0 +} + +input[type="number"]::-webkit-inner-spin-button, +input[type="number"]::-webkit-outer-spin-button { + height: auto +} + +input[type="search"] { + -webkit-appearance: textfield; + box-sizing: content-box +} + +input[type="search"]::-webkit-search-cancel-button, +input[type="search"]::-webkit-search-decoration { + -webkit-appearance: none +} + +fieldset { + border: 1px solid #c0c0c0; + margin: 0 2px; + padding: .35em .625em .75em +} + +legend { + border: 0; + padding: 0 +} + +textarea { + overflow: auto +} + +optgroup { + font-weight: bold +} + +table { + border-collapse: collapse; + border-spacing: 0 +} + +td, +th { + padding: 0 +} + +html { + font-family: sans-serif; + -ms-text-size-adjust: 100%; + -webkit-text-size-adjust: 100% +} + +body { + margin: 0 +} + +article, +aside, +details, +figcaption, +figure, +footer, +header, +hgroup, +main, +menu, +nav, +section, +summary { + display: block +} + +audio, +canvas, +progress, +video { + display: inline-block; + vertical-align: baseline +} + +audio:not([controls]) { + display: none; + height: 0 +} + +[hidden], +template { + display: none +} + +a { + background-color: transparent +} + +a:active, +a:hover { + outline: 0 +} + +abbr[title] { + border-bottom: 1px dotted +} + +b, +strong { + font-weight: bold +} + +dfn { + font-style: italic +} + +h1 { + font-size: 2em; + margin: .67em 0 +} + +mark { + background: #ff0; + color: #000 +} + +small { + font-size: 80% +} + +sub, +sup { + font-size: 75%; + line-height: 0; + position: relative; + vertical-align: baseline +} + +sup { + top: -0.5em +} + +sub { + bottom: -0.25em +} + +img { + border: 0 +} + +svg:not(:root) { + overflow: hidden +} + +figure { + margin: 1em 40px +} + +hr { + -moz-box-sizing: content-box; + box-sizing: content-box; + height: 0 +} + +pre { + overflow: auto +} + +code, +kbd, +pre, +samp { + font-family: monospace, monospace; + font-size: 1em +} + +button, +input, +optgroup, +select, +textarea { + color: inherit; + font: inherit; + margin: 0 +} + +button { + overflow: visible +} + +button, +select { + text-transform: none +} + +button, +html input[type="button"], +input[type="reset"], +input[type="submit"] { + -webkit-appearance: button; + cursor: pointer +} + +button[disabled], +html input[disabled] { + cursor: default +} + +button::-moz-focus-inner, +input::-moz-focus-inner { + border: 0; + padding: 0 +} + +input { + line-height: normal +} + +input[type="checkbox"], +input[type="radio"] { + -moz-box-sizing: border-box; + box-sizing: border-box; + padding: 0 +} + +input[type="number"]::-webkit-inner-spin-button, +input[type="number"]::-webkit-outer-spin-button { + height: auto +} + +input[type="search"] { + -webkit-appearance: textfield; + -moz-box-sizing: content-box; + box-sizing: content-box +} + +input[type="search"]::-webkit-search-cancel-button, +input[type="search"]::-webkit-search-decoration { + -webkit-appearance: none +} + +fieldset { + border: 1px solid #c0c0c0; + margin: 0 2px; + padding: .35em .625em .75em +} + +legend { + border: 0; + padding: 0 +} + +textarea { + overflow: auto +} + +optgroup { + font-weight: bold +} + +table { + border-collapse: collapse; + border-spacing: 0 +} + +td, +th { + padding: 0 +} + +*, +*:after, +*:before { + -moz-box-sizing: border-box; + box-sizing: border-box +} + +html { + height: 100% +} + +body { + min-height: 100%; + font-family: 'proxima-nova', helvetica neue, helvetica, arial, sans-serif; + font-size: 16px; + line-height: 1.5; + color: #4d5659; + text-rendering: optimizeSpeed +} + +p { + margin-top: 0; + margin-bottom: 30px +} + +::-moz-selection { + background-color: #74ddff; + color: #fff +} + +::selection { + background-color: #74ddff; + color: #fff +} + +a { + color: #00b0e9; + text-decoration: none; + cursor: pointer +} + +a:focus { + outline: none +} + +a:hover { + color: #008dba +} + +.pull-left { + float: left +} + +.pull-right { + float: right +} + +.hide { + display: none +} + +.cf:before, +.cf:after { + content: " "; + display: table +} + +.cf:after { + clear: both +} + +img, +iframe, +video { + max-width: 100% +} + +h1, +h2, +h3, +h4, +h5, +h6 { + margin-top: 0; + margin-bottom: 30px; + line-height: 1.25; + font-family: 'aktiv-grotesk', helvetica neue, helvetica, arial, sans-serif +} + +h1, +h2, +h3, +h4, +h5, +h6, +strong { + color: #000 +} + +h1, +h2, +h3, +h4, +h5, +h6 { + font-weight: 300 +} + +h6 { + text-transform: uppercase; + color: #828c8f +} + +abbr { + color: #828c8f; + border-bottom: 1px dotted #b7c0c3; + cursor: help +} + +small, +sup, +sub { + font-size: 12px +} + +mark { + padding: 3px 5px; + background: #ffffb3 +} + +blockquote { + margin: 30px 0; + padding: 0 15px; + color: #828c8f; + font: italic 16px/1.55 Georgia, Times, serif +} + +blockquote p:only-child, +blockquote p:last-child { + margin: 0 +} + +blockquote footer { + margin-top: 15px; + font-size: 16px; + color: #9ba3a5 +} + +blockquote footer:before { + content: '\2014 \00A0' +} + +.blockquote-centered { + text-align: center +} + +.blockquote-medium { + font-size: 22px +} + +.blockquote-large { + font-size: 36px; + line-height: 1.3 +} + +@media screen and (max-width:899px) { + h1 { + font-size: 38px + } + h2 { + font-size: 32px + } + h3 { + font-size: 26px + } + h4 { + font-size: 20px + } + h5 { + font-size: 16px + } + h6 { + font-size: 14px + } + .blockquote-large { + font-size: 32px + } +} + +@media screen and (min-width:900px) { + h1 { + font-size: 42px + } + h2 { + font-size: 36px + } + h3 { + font-size: 30px + } + h4 { + font-size: 24px + } + h5 { + font-size: 18px + } + h6 { + font-size: 16px + } +} + +ul, +ol { + margin: 0 0 30px 0; + padding-left: 30px +} + +ul li>ul, +ol li>ul, +ul li>ol, +ol li>ol { + margin-top: 30px +} + +ul li ul li, +ol li ul li { + list-style: circle +} + +ul li li ul li, +ol li li ul li { + list-style: square +} + +ul li { + list-style: disc +} + +.list-unstyled { + padding-left: 0 +} + +.list-unstyled li { + list-style: none +} + +.list-inline li { + display: inline-block +} + +dl dt { + font-weight: 500; + color: #000 +} + +dl dd { + margin: 0 +} + +.container { + margin: 0 auto +} + +.grid-flex-container { + padding: 0; + list-style: none; + display: -webkit-flex; + display: -ms-flexbox; + display: flex; + -ms-flex-flow: row wrap; + flex-flow: row wrap; + -webkit-flex-flow: row wrap +} + +[class^="grid-flex-cell"] { + margin-bottom: 30px +} + +@media screen and (max-width:600px) { + .container { + width: 90% + } + [class^="grid-flex-cell"] { + -webkit-flex: 0 0 100%; + -ms-flex: 0 0 100%; + flex: 0 0 100% + } +} + +@media screen and (min-width:601px) { + [class^="grid-flex-cell"] { + -webkit-flex: 1; + -ms-flex: 1; + flex: 1; + padding: 15px 0 + } + [class^="grid-flex-cell"]:not(:first-child) { + margin-left: 30px + } + .grid-flex-cell-1of2, + .grid-flex-cell-1of3, + .grid-flex-cell-1of4 { + -webkit-flex: none; + -ms-flex: none; + flex: none + } + .grid-flex-cell-1of2 { + width: 50% + } + .grid-flex-cell-1of3 { + width: 34% + } + .grid-flex-cell-1of4 { + width: 25% + } +} + +@media screen and (min-width:481px) and (max-width:900px) { + .container { + width: 85% + } +} + +@media screen and (min-width:901px) { + .container { + width: 70% + } +} + +select, +.form-input, +.form-checkbox, +.form-radio { + margin-bottom: 15px +} + +legend, +label { + font-weight: 800 +} + +legend { + padding: 0 15px; + font-size: 22px; + color: #000 +} + +fieldset { + border: 1px solid #e6eaed; + border-radius: 3px; + padding: 30px; + margin: 0 0 30px 0 +} + +label { + display: block; + margin-bottom: 5px; + font-size: 12px; + cursor: pointer +} + +select, +.form-input, +.form-radio, +.form-checkbox { + outline: 0 +} + +.form-inline { + overflow: hidden +} + +.form-input { + display: block; + padding: 10px; + width: 100%; + border-radius: 3px; + border: 1px solid #e6eaed; + -webkit-appearance: none +} + +.form-input:focus { + transition: all .2s ease-in; + border: 1px solid #74ddff; + background-color: #f8fdff +} + +.form-input:disabled { + background-color: #fafbfb; + cursor: not-allowed +} + +.form-input:required:focus { + border: 1px solid #fec57b; + background-color: #fff9f2 +} + +.form-input.input-invalid { + border: 1px solid #ffc7d8; + color: #ff749d +} + +.form-input.input-invalid:focus { + border: 1px solid #e80044; + background-color: #fff4f7 +} + +.form-input.input-valid { + border: 1px solid #c6ebd3; + color: #8ed6a8 +} + +.form-input.input-valid:focus { + border: 1px solid #43bb6e; + background-color: #f4fbf6 +} + +textarea { + resize: none; + min-height: 90px +} + +input[type=file], +label.radio, +label.checkbox, +select { + cursor: pointer +} + +input[type=file] { + padding: 10px; + background-color: #f8f9fa; + font-size: 12px; + color: #b7c0c3 +} + +select { + border: 1px solid #e6eaed; + height: 40px; + padding: 10px; + width: 100%; + background-color: #fff +} + +select:focus { + border: 1px solid #74ddff +} + +select:focus:-moz-focusring { + color: transparent; + text-shadow: 0 0 0 #000 +} + +.input-radio, +.input-checkbox { + margin: 5px 0; + font-size: 16px; + font-weight: normal +} + +.input-radio.disabled, +.input-checkbox.disabled { + color: #b7c0c3; + cursor: not-allowed +} + +.input-radio input[type=checkbox], +.input-checkbox input[type=checkbox], +.input-radio input[type=radio], +.input-checkbox input[type=radio] { + display: inline-block; + margin-right: 10px +} + +@media screen and (min-width:768px) { + .form-inline.form-no-labels .button { + margin-top: 0 + } + .form-inline .form-element { + float: left; + width: 50% + } + .form-inline .button { + margin-top: 22.5px + } + .form-inline .form-element:not(:first-of-type) { + padding-left: 5px + } + .form-inline .form-input, + .form-inline .input-checkbox, + .form-inline .input-radio, + .form-inline select { + margin: 0 + } +} + +.button { + display: inline-block; + border: none; + border-radius: 3px; + padding-left: 15px; + padding-right: 15px; + text-align: center; + cursor: pointer; + background-color: #00b0e9; + transition-delay: 0; + transition-duration: .3s; + transition-timing-function: ease-in; + transition-property: background, border-color; + -webkit-user-select: none; + -moz-user-select: none; + -ms-user-select: none; + user-select: none; + appearance: none +} + +.button:not([class*='button-outlined']) { + padding-top: 10px; + padding-bottom: 10px +} + +.button:hover { + background-color: #009ed2 +} + +.button:disabled, +.button .button-disabled { + background-color: #74ddff; + cursor: not-allowed +} + +.button:focus { + outline: none +} + +.button, +.button:hover { + color: #fff +} + +.button-small { + padding: 3px 5px; + font-size: 12px +} + +.button-large { + padding: 15px 30px; + font-size: 22px +} + +.button-unstyled, +.button-unstyled:hover { + background-color: transparent +} + +.button-unstyled { + padding: 0; + color: #00b0e9 +} + +.button-unstyled:hover { + color: #008dba +} + +.button-neutral { + background-color: #e6eaed +} + +.button-neutral:hover { + background-color: #b0bcc6 +} + +.button-neutral:disabled, +.button-neutral .button-disabled { + background-color: #e6eaed; + color: #b7c0c3 +} + +.button-neutral:disabled:hover, +.button-neutral .button-disabled:hover { + color: #b7c0c3 +} + +.button-neutral, +.button-neutral:hover { + color: #272b2d +} + +.button-approve { + background-color: #43bb6e +} + +.button-approve:hover { + background-color: #369658 +} + +.button-approve:disabled, +.button-approve .button-disabled { + background-color: #c6ebd3 +} + +.button-warn { + background-color: #e80044 +} + +.button-warn:hover { + background-color: #ba0036 +} + +.button-warn:disabled, +.button-warn.button-disabled { + background-color: #ffc7d8 +} + +.button-caution { + background-color: transparent; + color: #e80044; + text-decoration: underline +} + +.button-caution:hover { + background-color: transparent; + color: #ba0036 +} + +.button-caution:disabled, +.button-caution .button-disabled { + color: #b7c0c3 +} + +*[class*='button-outlined'] { + border-width: 2px; + border-style: solid; + padding-top: 8px; + padding-bottom: 8px +} + +*[class*='button-outlined'], +*[class*='button-outlined']:hover, +*[class*='button-outlined']:disabled { + background-color: transparent +} + +.button-outlined { + border-color: #00b0e9; + color: #00b0e9 +} + +.button-outlined:hover { + border-color: #008dba; + color: #008dba +} + +.button-outlined:disabled { + border-color: #74ddff; + color: #74ddff +} + +.button-outlined-neutral { + border-color: #b7c0c3; + color: #828c8f +} + +.button-outlined-neutral:hover { + border-color: #828c8f; + color: #4d5659 +} + +.button-outlined-neutral:disabled, +.button-outlined-neutral.button-disabled { + color: #c5cdcf; + border-color: #e6eaed +} + +.button-outlined-neutral:disabled:hover, +.button-outlined-neutral.button-disabled:hover { + color: #c5cdcf +} + +.button-outlined-approve { + border-color: #43bb6e; + color: #43bb6e +} + +.button-outlined-approve:hover { + border-color: #369658; + color: #369658 +} + +.button-outlined-approve:disabled { + border-color: #c6ebd3; + color: #c6ebd3 +} + +.button-outlined-warn { + border-color: #e80044; + color: #e80044 +} + +.button-outlined-warn:hover { + border-color: #ba0036; + color: #ba0036 +} + +.button-outlined-warn:disabled { + border-color: #ffc7d8; + color: #ffc7d8 +} + +.button-group .button { + border-radius: 0; + margin-left: -1px; + border: 1px solid #008dba +} + +.button-group .button:first-child, +.button-group .button:first-child:only-child { + margin: 0 +} + +.button-group .button:first-child:only-child { + border-radius: 3px +} + +.button-group .button:first-child { + border-radius: 3px 0 0 3px +} + +.button-group .button:last-child { + border-radius: 0 3px 3px 0 +} + +.button-group .button-neutral, +.button-group .button-outlined { + border: 1px solid #b7c0c3 +} + +@media screen and (max-width:480px) { + .button-group .button { + border-radius: none; + margin: 0; + width: 100% + } + .button-group .button:first-of-type { + border-radius: 3px 3px 0 0 + } + .button-group .button:last-of-type { + border-radius: 0 0 3px 3px + } + .button-group .button:not(:last-of-type) { + border-bottom: none + } +} + +code { + font-family: Menlo, Monaco, Consolas, monospace; + font-size: 12px; + padding: 3px 5px +} + +pre { + display: block; + padding: 15px; + margin-bottom: 30px; + max-width: 100%; + line-height: 1.5; + word-break: break-all; + word-wrap: break-word +} + +pre code { + padding: 0; + white-space: pre-wrap +} + +code, +pre { + border-radius: 3px +} + +.code-light { + color: #000; + background: #f5f7f8 +} + +.code-dark { + color: #5ca1b5; + background: #003546 +} + +.dropdown-standalone { + position: relative; + display: inline-block; + cursor: pointer; + padding: 10px; + border-radius: 3px +} + +.dropdown-standalone:hover { + border-radius: 3px 3px 0 0 +} + +.dropdown-standalone:hover .dropdown { + display: block; + position: absolute; + top: 100%; + left: 0; + width: 100%; + border-radius: 0 0 3px 3px +} + +.dropdown-standalone:hover .dropdown a { + padding: 10px; + display: block +} + +.dropdown-standalone .dropdown { + display: none +} + +.dropdown-standalone .icon-arrow-down { + position: relative; + top: -2px; + margin-left: 5px +} + +.dropdown-outlined, +.dropdown-outlined .dropdown { + border: 1px solid #e6eaed +} + +.dropdown-standalone.dropdown-outlined .dropdown { + width: 101%; + left: -1px +} + +.dropdown-outlined .dropdown { + background-color: #fff +} + +.dropdown-outlined .dropdown li:not(:last-child) { + border-bottom: 1px solid #e6eaed +} + +.dropdown-coloured, +.dropdown-coloured .dropdown { + background-color: #00b0e9 +} + +.dropdown-coloured .button-unstyled, +.dropdown-coloured .dropdown a { + color: #fff +} + +.dropdown-coloured .icon-arrow-down { + border-top-color: #fff +} + +.dropdown-coloured .dropdown li:not(:last-child) { + border-bottom: 1px solid #009ed2 +} + +.dropdown-coloured .dropdown a:hover { + color: #e3f8ff +} + +.dropdown-neutral, +.dropdown-neutral .dropdown { + background-color: #e6eaed +} + +.dropdown-neutral a { + color: #4d5659 +} + +.dropdown-neutral a:hover, +.dropdown-neutral .button-unstyled:hover { + color: #272b2d +} + +.dropdown-neutral .icon-arrow-down { + border-top-color: #4d5659 +} + +.dropdown-neutral .dropdown li:not(:last-child) { + border-bottom: 1px solid #b7c0c3 +} + +.icon-arrow-left, +.icon-arrow-right, +.icon-arrow-up, +.icon-arrow-down { + display: inline-block; + width: 0; + height: 0; + line-height: 0; + font-size: 0 +} + +.icon-arrow-up, +.icon-arrow-down { + border-left: 5px solid transparent; + border-right: 5px solid transparent +} + +.icon-arrow-left, +.icon-arrow-right { + border-top: 5px solid transparent; + border-bottom: 5px solid transparent +} + +.icon-arrow-left { + border-right: 5px solid #00b0e9 +} + +.icon-arrow-right { + border-left: 5px solid #00b0e9 +} + +.icon-arrow-down { + border-top: 5px solid #00b0e9 +} + +.icon-arrow-up { + border-bottom: 5px solid #00b0e9 +} + +.media-outlined, +img { + border-radius: 3px +} + +figure { + width: auto; + margin: 0 +} + +figure img { + vertical-align: middle +} + +figure figcaption { + color: #828c8f; + font-size: 12px +} + +.media-outlined { + display: inline-block; + padding: 5px; + border: 1px solid #e6eaed +} + +@media screen and (max-width:480px) { + figure { + text-align: center + } + figure img+img { + margin: 5px auto 0 + } +} + +@media screen and (min-width:481px) { + figure img:not(:last-child):not(:only-child) { + margin-right: 5px + } + figure img:not(:only-child) { + margin-top: 5px + } + figure img+figcaption { + margin-bottom: 5px + } + figure img+figcaption:not(:only-child) { + margin-right: 5px + } +} + +.message { + padding: 15px; + position: relative; + background-color: #e3f8ff; + color: #00b0e9 +} + +.message a { + font-weight: bold +} + +.message p:last-of-type { + margin: 0 +} + +.message:after { + border: solid transparent; + content: ''; + height: 0; + width: 0; + position: absolute; + pointer-events: none; + border-color: rgba(255, 255, 255, 0); + border-width: 5px +} + +.message-dismissable { + padding-right: 30px +} + +.message-full-width { + text-align: center +} + +.message-above:after, +.message-below:after { + left: 50%; + margin-left: -6px +} + +.message-below:after { + border-bottom-color: #e3f8ff; + bottom: 100% +} + +.message-below.message-error:after { + border-bottom-color: #ffe3eb +} + +.message-below.message-success:after { + border-bottom-color: #e8f7ef +} + +.message-below.message-alert:after { + border-bottom-color: #ffeed7 +} + +.message-above:after { + border-top-color: #e3f8ff; + top: 100% +} + +.message-above.message-error:after { + border-top-color: #ffe3eb +} + +.message-above.message-success:after { + border-top-color: #e8f7ef +} + +.message-above.message-alert:after { + border-top-color: #ffeed7 +} + +.message-error { + background-color: #ffe3eb; + color: #e80044 +} + +.message-error a { + color: #ba0036 +} + +.message-error a:hover { + color: #8b0029 +} + +.message-alert { + background-color: #ffeed7; + color: #f18902 +} + +.message-alert a { + color: #c16e02 +} + +.message-alert a:hover { + color: #915201 +} + +.message-success { + background-color: #e8f7ef; + color: #43bb6e +} + +.message-success a { + color: #369658 +} + +.message-success a:hover { + color: #287042 +} + +.close { + display: block; + position: absolute; + top: 50%; + right: 15px; + margin-top: -15px; + cursor: pointer; + font-size: large; + font-weight: bold +} + +form .message { + padding: 10px +} + +.modal { + position: fixed; + top: 50%; + left: 50%; + border-radius: 3px; + z-index: 20; + -webkit-transform: translate(-50%, -50%); + -ms-transform: translate(-50%, -50%); + transform: translate(-50%, -50%); + background-color: #fff; + -webkit-overflow-scrolling: touch +} + +.modal.modal-no-sections { + padding: 30px; + text-align: center +} + +.modal-title { + margin: 0; + float: left +} + +.modal-head, +.modal-body { + padding: 15px +} + +.modal-head { + border-bottom: 1px solid #e6eaed +} + +.modal-footer { + padding: 15px 0; + text-align: center; + border-top: 1px solid #e6eaed +} + +.modal-footer button, +.modal-footer .button { + min-width: 120px +} + +.modal-footer button+button, +.modal-footer .button+.button { + margin-left: 5px +} + +.modal-close { + float: right; + color: #b7c0c3; + line-height: 1.5; + font-weight: bold; + font-size: 22px +} + +.modal-close:hover { + color: #828c8f +} + +.has-modal:before { + content: '' +} + +.overlay, +.has-modal:before { + position: fixed; + top: 0; + left: 0; + width: 100%; + height: 100%; + z-index: 10; + box-shadow: inset 0 0 100px rgba(0, 0, 0, 0.7); + background-color: rgba(0, 0, 0, 0.4) +} + +@media screen and (max-width:480px) { + .modal { + height: 100%; + width: 100%; + top: 0; + left: 0; + border-radius: 0; + -webkit-transform: none; + -ms-transform: none; + transform: none + } +} + +@media screen and (min-width:481px) and (max-width:768px) { + .modal { + width: 80% + } +} + +@media screen and (min-width:769px) { + .modal { + width: 50% + } +} + +.top-nav { + margin-bottom: 30px +} + +.top-nav ul { + margin: 0 +} + +.top-nav li { + padding: 15px +} + +.top-nav a { + display: inline-block +} + +.top-nav label { + font-weight: normal; + font-size: 16px; + line-height: 1 +} + +.top-nav .menu-toggle { + visibility: hidden; + position: absolute; + left: -100% +} + +.top-nav .icon-arrow-down { + position: relative; + top: -2px; + margin-left: 5px +} + +.top-nav-light { + border-bottom: 1px solid #e6eaed +} + +.top-nav-light label { + color: #00b0e9 +} + +.top-nav-light, +.top-nav-light li { + background-color: #fff +} + +.top-nav-dark label { + color: #438193 +} + +.top-nav-dark a { + color: #438193 +} + +.top-nav-dark a:hover, +.top-nav-dark a.active, +.top-nav-dark label { + color: #85b8c7 +} + +.top-nav-dark, +.top-nav-dark li { + background-color: #003546 +} + +.top-nav-dark .icon-arrow-down { + border-top-color: #438193 +} + +.breadcrumbs { + padding: 10px 0 +} + +.breadcrumbs li { + color: #b7c0c3 +} + +.breadcrumbs a { + padding: 0 10px +} + +.pagination li:not(:last-child) { + border-right: 1px solid #e6eaed +} + +.pagination em[class^='icon'] { + top: 1px; + position: relative +} + +.pagination .icon-arrow-right { + border-left: 5px solid #00b0e9 +} + +.pagination .icon-arrow-left { + border-right: 5px solid #00b0e9 +} + +.tabs { + border-bottom: 1px solid #e6eaed +} + +.tabs a { + display: block +} + +.side-nav li:not(:last-child) a { + border-bottom: 1px solid #e6eaed +} + +.unavailable-item { + color: #b7c0c3 +} + +.unavailable-item:not([href]), +.unavailable-item:not([href]):hover { + color: #b7c0c3; + cursor: default; + pointer-events: none +} + +.current-item { + color: #4d5659; + font-weight: 800 +} + +.side-nav a, +.pagination a, +.side-nav li, +.pagination li { + display: block +} + +.side-nav a:hover:not(.unavailable-item), +.pagination a:hover:not(.unavailable-item), +.side-nav .current-item, +.pagination .current-item { + background-color: #fafbfb +} + +.pagination, +.breadcrumbs { + display: inline-block +} + +.pagination li, +.breadcrumbs li { + float: left +} + +.breadcrumbs, +.side-nav, +.pagination { + border-radius: 3px; + border: 1px solid #e6eaed +} + +.side-nav a, +.pagination a { + padding: 10px 15px +} + +.breadcrumbs a, +.pagination a { + font-size: 12px +} + +@media screen and (max-width:480px) { + .tabs a { + position: relative; + padding: 10px + } +} + +@media screen and (min-width:481px) { + .tabs a { + padding: 10px 15px + } + .tabs .current-item { + position: relative; + top: 1px; + border: 1px solid #e6eaed; + border-bottom-color: transparent; + border-radius: 3px 3px 0 0; + background-color: #fff + } +} + +@media screen and (max-width:768px) { + .top-nav ul { + display: none + } + .top-nav ul>li { + padding: 10px + } + .top-nav .icon-arrow-down { + display: none + } + .top-nav label, + .top-nav .menu-toggle:checked~ul, + .top-nav .menu-toggle:checked~ul>li { + display: block + } + .top-nav label { + padding: 15px 15px 15px 0; + margin-bottom: 0; + text-align: right; + cursor: pointer + } + .top-nav .menu-toggle:checked~ul .dropdown { + display: block; + z-index: 1000 + } + .top-nav .menu-toggle:checked~ul .dropdown li { + padding: 5px 15px; + float: left; + border: 0 + } + .top-nav .menu-toggle:checked~ul .dropdown a { + font-size: 12px; + font-weight: normal + } + .top-nav-light .menu-toggle:checked~ul>li:not(:last-child) { + border-bottom: 1px solid #e6eaed + } + .top-nav-dark .menu-toggle:checked~ul>li:not(:last-child) { + border-bottom: 1px solid #1f4753 + } +} + +@media screen and (min-width:769px) { + .top-nav label { + display: none + } + .top-nav .has-dropdown { + position: relative + } + .top-nav .has-dropdown:hover .dropdown { + display: block + } + .top-nav .dropdown { + display: none; + position: absolute; + top: 100%; + left: 0 + } + .top-nav .dropdown li { + display: block + } + .top-nav-light .dropdown { + border-left: 1px solid #e6eaed; + border-right: 1px solid #e6eaed + } + .top-nav-light .dropdown li { + border-bottom: 1px solid #e6eaed + } + .top-nav-dark .dropdown li { + border-bottom: 1px solid #1f4753 + } +} + +.progress, +.meter { + border-radius: 3px; + line-height: 0 +} + +.progress.progress-rounded, +.progress-rounded .meter { + border-radius: 50px +} + +.progress { + border: 1px solid #e6eaed; + background-color: #fdfdfd +} + +.progress .meter { + display: inline-block; + background-color: #00b0e9; + height: 100% +} + +.progress-small { + height: 15px +} + +.progress-large { + height: 25px +} + +.progress-success .meter { + background-color: #43bb6e +} + +.progress-warning .meter { + background-color: #f18902 +} + +.progress-error .meter { + background-color: #e80044 +} + +.table-responsive { + overflow-x: auto +} + +table { + width: 100%; + max-width: 100%; + margin-bottom: 30px +} + +td, +th { + text-align: left; + padding: 10px; + border-bottom: 1px solid #e6eaed +} + +thead th { + font-weight: 800; + color: #4d5659 +} + +tr.table-cell-error { + background-color: #ffe3eb; + color: #e80044 +} + +tr.table-cell-error a { + color: #ba0036 +} + +tr.table-cell-error a:hover { + color: #8b0029 +} + +tr.table-cell-success { + background-color: #e8f7ef; + color: #43bb6e +} + +tr.table-cell-success a { + color: #369658 +} + +tr.table-cell-success a:hover { + color: #287042 +} + +tr.table-cell-alert { + background-color: #ffeed7; + color: #f18902 +} + +tr.table-cell-alert a { + color: #c16e02 +} + +tr.table-cell-alert a:hover { + color: #915201 +} + +.table-striped tr:nth-child(even) { + color: #828c8f +} + +.table-outlined { + border: 1px solid #e6eaed; + border-radius: 3px +} + +.table-outlined tr:last-of-type td { + border: none +} + +.table-with-hover tr:hover, +.table-striped tr:nth-child(even) { + background-color: #fafbfb +} + +@media screen and (max-width:767px) { + table td, + table th { + padding: 5px 10px + } +} + +.avatar-radius { + border-radius: 3px +} + +.avatar-rounded { + border-radius: 100% +} + +.avatar-small { + height: 25px; + width: 25px +} + +.avatar-medium { + height: 50px; + width: 50px +} + +.avatar-large { + height: 100px; + width: 100px +} + +@media print { + * { + background: transparent !important; + color: #000 !important; + box-shadow: none !important; + text-shadow: none !important + } + a, + a:visited { + text-decoration: underline + } + a[href]:after { + content: " (" attr(href) ")" + } + abbr[title]:after { + content: " (" attr(title) ")" + } + a[href^="#"]:after, + a[href^="javascript:"]:after { + content: '' + } + pre, + blockquote { + page-break-inside: avoid + } + thead { + display: table-header-group + } + tr, + img { + page-break-inside: avoid + } + img { + max-width: 100% !important + } + p, + h2, + h3 { + orphans: 3; + widows: 3 + } + h2, + h3 { + page-break-after: avoid + } +} + +body { + border-style: solid; + border-color: #f2f4f6 +} + +pre { + color: #000; + background: #f5f7f8 +} + +code { + color: #000; + background: #f5f7f8 +} + +header { + padding-bottom: 30px; + border-bottom: 1px solid #f2f4f6 +} + +header h1 { + margin-bottom: 0 +} + +header p { + font-size: 22px +} + +header .button { + margin: 30px 0 +} + +footer[role='contentinfo'] { + padding: 30px 0; + border-top: 1px solid #f2f4f6; + color: #828c8f; + text-align: center +} + +footer[role='contentinfo'] li:not(:last-of-type):after { + content: '\2219'; + margin: 0 5px 0 10px +} + +footer[role='contentinfo'] p { + margin-bottom: 0 +} + +footer[role='contentinfo'] .logo { + margin-top: 30px; + display: inline-block; + width: 17px; + height: 20px; + background: url("data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0idXRmLTgiPz4NCjwhLS0gR2VuZXJhdG9yOiBBZG9iZSBJbGx1c3RyYXRvciAxNy4xLjAsIFNWRyBFeHBvcnQgUGx1Zy1JbiAuIFNWRyBWZXJzaW9uOiA2LjAwIEJ1aWxkIDApICAtLT4NCjwhRE9DVFlQRSBzdmcgUFVCTElDICItLy9XM0MvL0RURCBTVkcgMS4xLy9FTiIgImh0dHA6Ly93d3cudzMub3JnL0dyYXBoaWNzL1NWRy8xLjEvRFREL3N2ZzExLmR0ZCI+DQo8c3ZnIHZlcnNpb249IjEuMSIgaWQ9IkxheWVyXzEiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgeG1sbnM6eGxpbms9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkveGxpbmsiIHg9IjBweCIgeT0iMHB4Ig0KCSB2aWV3Qm94PSIxNjIgMjQ2LjEgMjg4IDMzNS42IiBlbmFibGUtYmFja2dyb3VuZD0ibmV3IDE2MiAyNDYuMSAyODggMzM1LjYiIHhtbDpzcGFjZT0icHJlc2VydmUiPg0KPGc+DQoJPGc+DQoJCTxwYXRoIGZpbGw9IiNCN0MwQzMiIGQ9Ik0zMDIuMSwzOTUuMmw1OC4xLDcwLjNsMi0yYzYuMy02LjcsMTIuMS0xMy4zLDE3LjctMjBjNS41LTYuNywyMC0yMiwyNS4yLTI5LjFMNDI4LDQ1Ng0KCQkJYy0zLjksNS41LTkuMSwxMi4xLTE1LjMsMTkuNmMtNi4zLDcuNS0xMy43LDE2LjEtMjIuOCwyNS42bDYwLjEsNzQuM2gtNjAuOWwtMzIuMy0zOS43Yy0yOS41LDMwLjctNjAuNSw0Ni05Mi4zLDQ2DQoJCQljLTI4LjcsMC01My4xLTkuMS03My4xLTI3LjVjLTE5LjYtMTguNC0yOS41LTQxLjItMjkuNS02OC40YzAtMzIuMywxNS4zLTU5LjMsNDYuOC04MC41bDIxLjItMTQuNWMwLjQsMCwwLjgtMC40LDEuNi0xLjINCgkJCWMwLjgtMC40LDEuNi0xLjIsMi44LTIuNGMtMjEuMi0yMi44LTMyLjMtNDUuMi0zMi4zLTY3LjJzNy4xLTM5LjcsMjEuMi01My41YzE0LjUtMTMuNywzMy4zLTIwLjQsNTYuNS0yMC40DQoJCQljMjIuNCwwLDQwLjUsNi43LDU1LjEsMjBjMTQuNSwxMy4zLDIyLDMwLjMsMjIsNTAuM2MwLDE0LjEtMy45LDI2LjctMTEuNywzOC4xQzMzNi43LDM2NS43LDMyMi41LDM3OS41LDMwMi4xLDM5NS4yeg0KCQkJIE0yNjMuMiw0MjIuN2wtMi44LDJjLTIwLDEzLjctMzMuNywyNS4yLTQwLjgsMzMuM2MtNy4xLDguMS0xMC43LDE3LjMtMTAuNywyNi43YzAsMTMuMyw1LjUsMjUuOSwxNi4xLDM2LjkNCgkJCWMxMS4xLDEwLjcsMjMuNiwxNi4xLDM2LjksMTYuMWMxOC44LDAsNDAuNS0xMi4xLDY1LjItMzYuOUwyNjMuMiw0MjIuN3ogTTI3My45LDM2MC45bDMuOS0zLjJjNi43LTUuMSwxMi41LTkuOSwxNi45LTEzLjMNCgkJCWM0LjMtMy45LDcuOS03LjEsOS45LTkuOWM0LjMtNS4xLDYuMy0xMS4zLDYuMy0xOS4yYzAtOC43LTIuOC0xNS4zLTguNy0yMC44cy0xMy4zLTcuOS0yMy4yLTcuOWMtOC43LDAtMTYuMSwyLjgtMjIuNCw4LjMNCgkJCWMtNS45LDUuMS05LjEsMTEuNy05LjEsMjBjMCw5LjUsMy45LDE4LjgsMTEuMywyOC4zbDEyLjEsMTQuNUMyNzEuNSwzNTguMywyNzIuMywzNTkuMywyNzMuOSwzNjAuOXoiLz4NCgk8L2c+DQo8L2c+DQo8L3N2Zz4NCg==") no-repeat 0 0 +} + +section { + padding: 30px 0 +} + +section:not(:last-child) { + border-bottom: 1px solid #f2f4f6 +} + +.sample { + border: 1px solid #e6eaed; + padding: 30px; + border-radius: 3px +} + +.sample *:last-child { + margin-bottom: 0 +} + +.sample h1, +.sample h2, +.sample h3, +.sample h4, +.sample h5, +.sample h6 { + margin-bottom: 10px +} + +.avatar { + margin: 0 30px 30px 0 +} + +.buttons .button-group { + margin-bottom: 10px +} + +.buttons .button-group .button { + margin: 0 0 0 -1px +} + +.buttons .button { + margin: 5px +} + +.top-nav ul { + z-index: 10 +} + +.progress { + margin-bottom: 10px +} + +.progress .meter { + min-width: 50% +} + +.modal-sample { + padding: 15px; + margin-bottom: 30px; + box-shadow: inset 0 0 100px rgba(0, 0, 0, 0.7); + background-color: rgba(0, 0, 0, 0.4) +} + +.modal-sample .modal { + position: relative; + top: 0; + left: 50%; + margin-left: -25%; + -webkit-transform: none; + -ms-transform: none; + transform: none +} + +.message, +.form-inline { + margin-bottom: 10px +} + +.color { + display: inline-block; + padding: 15px; + color: #fff; + text-align: center +} + +.blue1 { + background-color: #001620 +} + +.blue2 { + background-color: #002531 +} + +.blue3 { + background-color: #003546 +} + +.blue4 { + background-color: #1f4753 +} + +.blue5 { + background-color: #438193 +} + +.blue6 { + background-color: #00b0e9 +} + +.blue7 { + background-color: #74ddff +} + +.blue8 { + background-color: #e3f8ff; + color: #00b0e9 +} + +.gray1 { + background-color: #272b2d +} + +.gray2 { + background-color: #4d5659 +} + +.gray3 { + background-color: #828c8f +} + +.gray4 { + background-color: #b7c0c3 +} + +.gray5 { + background-color: #e6eaed +} + +.gray4, +.gray5 { + color: #272b2d +} + +.pink1 { + background-color: #ee0788 +} + +.pink2 { + background-color: #fec1d1 +} + +.pink3 { + background-color: #ffedf2 +} + +.pink2, +.pink3 { + color: #ee0788 +} + +.red1 { + background-color: #e80044 +} + +.red2 { + background-color: #ffc7d8 +} + +.red3 { + background-color: #ffe3eb +} + +.red2, +.red3 { + color: #e80044 +} + +.green1 { + background-color: #43bb6e +} + +.green2 { + background-color: #c6ebd3 +} + +.green3 { + background-color: #e8f7ef +} + +.green2, +.green3 { + color: #43bb6e +} + +.orange1 { + background-color: #f18902 +} + +.orange2 { + background-color: #fec57b +} + +.orange3 { + background-color: #ffeed7 +} + +.orange2, +.orange3 { + color: #f18902 +} + +.icon-sample { + margin: 0 5px +} + +[class^="grid-flex-cell"] { + border: 1px solid #e6eaed; + border-radius: 3px; + text-align: center +} + +[class^="grid-flex-cell"] p { + margin: 0 +} + +@media screen and (min-width:769px) { + body { + border-width: 10px + } +} + +@media screen and (max-width:768px) { + body { + border-width: 5px + } + .modal-sample .modal { + width: auto; + left: 0; + margin-left: 0 + } +} + +@media screen and (max-width:480px) { + .button { + display: inline-block; + width: auto + } + .logo { + margin: 30px auto; + display: block; + text-align: center + } + .color { + width: 100% + } +} + +@media screen and (min-width:481px) { + .logo { + margin: 20px; + display: inline-block + } + .color { + width: 250px; + margin-bottom: 5px + } +} + +html, +body, +h1, +h2, +h3, +p { + font-family: "Roboto" +} + +body { + border: none +} + +.hidden { + display: none +} + +.top-nav { + margin-bottom: 60px; + border-bottom: 1px solid #e6eaed; + background-color: white +} + +.top-nav ul { + margin: 0 +} + +.top-nav li { + padding: 15px +} + +.top-nav a { + display: inline-block +} + +.top-nav label { + font-weight: normal; + font-size: 16px; + line-height: 1 +} + +.navbar { + background-color: #FFF; + border-radius: 0; + z-index: 2; + margin-top: 15px; + padding-bottom: 15px; + margin-bottom: 30px; + border-bottom: 1px solid #e6eaed; + background-color: white +} + +.navbar-brand { + width: 133px; + min-width: 133px; + max-width: 133px; + height: 32px; + min-height: 32px; + max-height: 32px; + background-size: 133px; + background-repeat: no-repeat; + background-position: center center; + background-size: 116px; + background-position-x: 0; + display: inline-block; + float: left; + border-radius: 3px; + margin-left: 12px +} + +.navbar img { + width: 32px; + min-width: 32px; + max-width: 32px; + height: 32px; + min-height: 32px; + max-height: 32px; + border-radius: 50% +} + +.navbar-nav.navbar-right { + margin: 0; + list-style: none; + display: inline-block; + float: right +} + +.navbar-nav.navbar-right li { + line-height: 32px; + max-height: 32px; + padding: 0; + display: inline-block; + vertical-align: middle +} + +.navbar.navbar-homepage { + margin-bottom: 0; + border: none +} + +.navbar.navbar-homepage .button { + padding: 0 30px; + text-transform: uppercase; + font-size: 14px; + line-height: 28px; + color: #E48125; + border-color: #E48125 +} + +.footer { + background: #FFF; + padding: 30px 0; + color: #AAA; + text-align: right +} + +.footer a { + color: #666 +} + +.message { + margin-top: 100px +} + +.banner { + height: calc(100vh - 10px); + min-height: 500px; + background: #FFF; + display: flex; + align-items: center; + justify-content: center +} + +.banner-text { + text-align: center +} + +.banner-text p { + font-size: 22px; + max-width: 700px; + margin: 40px auto +} + +.banner-text p a { + color: #4d5659 +} + +.banner-text .button { + color: #E48125; + border: 2px solid #E48125; + background: #FFF +} + +.banner-text .button:last-child { + color: #CCC; + border: 2px solid #CCC +} + +.banner-actions { + max-width: 500px; + margin: 0 auto; + display: flex +} + +.banner-actions a { + text-transform: uppercase; + flex: 1 1 50% +} + +.banner-actions a:first-child { + margin-right: 10px +} + +.banner-actions a span { + margin-right: 10px +} + +.ribbon { + background: #565656; + background: #666; + padding: 60px 0; + margin-top: 120px +} + +.ribbon h4 { + color: #FFF; + text-shadow: 0 1px 2px rgba(0, 0, 0, 0.2); + text-align: center; + margin-bottom: 30px; + padding-bottom: 0; + font-size: 32px +} + +.ribbon p { + font-weight: 300; + color: #FFF; + padding-bottom: 0; + text-align: center; + max-width: 400px; + margin: 0 auto; + margin-bottom: 30px; + margin-top: 0; + font-size: 18px +} + +.ribbon .button-outlined { + color: #FFF; + border-color: #FFF; + text-transform: uppercase; + border-radius: 20px; + padding: 5px 40px; + padding: 5px 20px; + border-radius: 3px +} + +.ribbon .button-outlined .octicon { + margin-right: 6px +} + +.ribbon .container { + text-align: center +} + +.ribbon-login { + background: #fff; + margin-top: 10px; + padding-top: 0; + padding-bottom: 0; + margin-bottom: 200px +} + +.ribbon-login .button { + text-transform: uppercase; + border-radius: 30px; + padding: 7px 30px; + font-size: 18px +} + +.pricing { + margin-top: 100px +} + +.pricing .grid-flex-cell { + min-height: 300px +} + +.feature { + padding-top: 60px; + border-top: 1px solid #EEE +} + +.features .grid-flex-container { + background: #FFF; + margin-bottom: 90px +} + +.features .grid-flex-container:last-child { + margin-bottom: 90px +} + +.features .grid-flex-cell { + border: none; + text-align: left +} + +.issues .container { + max-width: 600px !IMPORTANT +} + +.feature { + margin-bottom: 90px +} + +.feature>div:first-child { + text-align: center +} + +.feature>div:first-child p { + font-size: 18px; + max-width: 600px; + margin: 0 auto +} + +.maintainers { + max-width: 600px; + margin: 0 auto; + margin-top: 60px; + -webkit-touch-callout: none; + -webkit-user-select: none; + -khtml-user-select: none; + -moz-user-select: none; + -ms-user-select: none; + user-select: none; + border: 1px solid #e6eaed; + box-shadow: 3px 3px rgba(0, 0, 0, 0.02); + border-radius: 3px +} + +.maintainers * { + cursor: default +} + +.maintainers div { + font-family: "Roboto Mono"; + font-size: 14px; + color: #CCC; + background: #fafafa; + border-bottom: 1px solid #f0f0f0; + text-align: left; + padding: 10px +} + +.maintainers table { + margin-bottom: 0 +} + +.maintainers table td { + border: none; + font-family: "Roboto Mono"; + font-size: 13px; + padding: 5px 15px; + color: #999 +} + +.maintainers table td:first-child { + text-align: right; + width: 50px; + border-right: 1px solid #f0f0f0 +} + +.maintainers table tr:first-child td { + padding-top: 10px +} + +.maintainers table tr:last-child td { + padding-bottom: 10px +} + +.footer { + background: #FFF; + padding: 30px 0; + color: #AAA; + text-align: right +} + +.footer a { + color: #666 +} + +.issues { + padding-left: 10px; + margin: 60px auto; + -webkit-touch-callout: none; + -webkit-user-select: none; + -khtml-user-select: none; + -moz-user-select: none; + -ms-user-select: none; + user-select: none +} + +.issues * { + cursor: default +} + +.issues .issue { + display: flex; + margin-bottom: 30px +} + +.issues .issue img { + width: 32px; + height: 32px; + border-radius: 3px; + margin-right: 20px; + opacity: .5 +} + +.issues .issue em { + font-weight: bold; + color: #999; + font-style: normal +} + +.issues .issue b { + font-size: 28px +} + +.issues .issue>div { + flex: 1 1 auto; + box-shadow: 3px 3px rgba(0, 0, 0, 0.02) +} + +.issues .issue>div div:first-child { + color: #CCC; + background: #fafafa; + border: 1px solid #f0f0f0; + line-height: 32px; + padding: 0 10px; + border-top-right-radius: 1px; + border-top-left-radius: 1px; + font-size: 14px +} + +.issues .issue>div div:last-child { + border: 1px solid #f0f0f0; + border-top: none; + border-bottom-right-radius: 1px; + border-bottom-left-radius: 1px; + padding: 10px +} + +.issues .checks { + display: flex; + margin-bottom: 20px +} + +.issues .checks>div:first-child { + width: 32px; + height: 32px; + border-radius: 3px; + margin-right: 20px; + background: #e2cc7a +} + +.issues .checks>div:first-child .octicon { + color: #FFF; + font-size: 28px; + text-align: center; + line-height: 32px; + display: inline-block; + width: 32px +} + +.issues .checks>div:last-child { + border: 1px solid #e2cc7a; + border-radius: 4px; + overflow: hidden; + flex: 1 1 auto; + box-shadow: 3px 3px rgba(0, 0, 0, 0.02); + box-shadow: 3px 3px rgba(226, 204, 122, 0.2) +} + +.issues .checks>div:last-child>div { + padding: 10px 15px +} + +.issues .checks strong { + font-size: 14px; + font-weight: bold; + color: #cea61b +} + +.issues .checks p { + font-size: 14px; + color: #767676; + margin: 0; + margin-bottom: 10px; + padding: 0 +} + +.issues .checks ul { + padding: 0; + margin: 0; + border-top: 1px solid #eee +} + +.issues .checks li { + list-style: none; + padding: 7px 15px; + font-size: 14px; + background: #fafafa; + border-bottom: 1px solid #f0f0f0 +} + +.issues .checks li:last-child { + border-bottom: none +} + +.issues .checks li .dot-green { + margin-right: 5px; + color: #6cc644 +} + +.issues .checks li .dot-yellow { + color: #e2cc7a; + margin-right: 5px +} + +.issues .checks li em { + font-style: normal; + font-weight: bold +} + +.issues .checks-success>div:first-child { + background: #6cc644 +} + +.issues .checks-success>div:last-child { + border: 1px solid #6cc644; + box-shadow: 3px 3px rgba(0, 0, 0, 0.02) +} + +.issues .checks-success>div:last-child .button { + background: #6cc644; + font-size: 14px; + padding: 5px 15px; + margin: 5px +} + +.issues .checks-success strong { + color: #6cc644 +} + +.issues .more { + text-align: center; + color: #DDD +} + +.issues .more .octicon { + font-size: 26px +} \ No newline at end of file diff --git a/hugo/themes/default/layouts/_default/single.html b/hugo/themes/default/layouts/_default/single.html new file mode 100644 index 0000000..9cbc0aa --- /dev/null +++ b/hugo/themes/default/layouts/_default/single.html @@ -0,0 +1,51 @@ + + + + + {{ .Title }} · {{ .Site.Title }} + + + + + + + + + + + + + + + + +
+ +
+

{{ .Title }}

+ {{ if .Params.toc }} {{ .TableOfContents }} {{ end }} +
{{ .Content }}
+
+ +
+ {{ $currentPage := . }} + +
+
+ + + \ No newline at end of file diff --git a/hugo/themes/default/static/.gitkeep b/hugo/themes/default/static/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/logstats/logstats.go b/logstats/logstats.go new file mode 100644 index 0000000..9561683 --- /dev/null +++ b/logstats/logstats.go @@ -0,0 +1,109 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package logstats + +import ( + "sync" + "time" + + "github.com/capitalone/checks-out/envvars" + "github.com/capitalone/checks-out/model" + "github.com/capitalone/checks-out/set" + "github.com/capitalone/checks-out/store/datastore" + + log "github.com/Sirupsen/logrus" +) + +var ( + lock = sync.Mutex{} + pr = set.Empty() + approvers = set.Empty() + disapprovers = set.Empty() +) + +func RecordPR(id string) { + lock.Lock() + pr.Add(id) + lock.Unlock() +} + +func RecordApprover(id string) { + lock.Lock() + approvers.Add(id) + lock.Unlock() +} + +func RecordDisapprover(id string) { + lock.Lock() + disapprovers.Add(id) + lock.Unlock() +} + +func resetStats() { + pr = set.Empty() + approvers = set.Empty() + disapprovers = set.Empty() +} + +func usersAndOrgs(repos []*model.Repo) (set.Set, set.Set) { + users := set.Empty() + orgs := set.Empty() + for _, repo := range repos { + if repo.Org { + orgs.Add(repo.Owner) + } else { + users.Add(repo.Owner) + } + } + return users, orgs +} + +func writeLog() { + repos, err := datastore.Get().GetAllRepos() + if err != nil { + log.Error("Periodic logging unable to fetch repository list", err) + } else { + users, orgs := usersAndOrgs(repos) + log.Infof("Monitoring %d repositories", len(repos)) + log.Infof("Monitoring %d users", len(users)) + log.Infof("Monitoring %d organizations", len(orgs)) + } + log.Infof("Accepted %d pull requests in last period", len(pr)) + log.Infof("Accepted %d approvers in last period", len(approvers)) + log.Infof("Accepted %d disapprovers in last period", len(disapprovers)) +} + +func logTask() { + period := envvars.Env.Monitor.LogPeriod + if period == 0 { + return + } + t := time.NewTicker(period) + for { + lock.Lock() + writeLog() + resetStats() + lock.Unlock() + <-t.C + } +} + +func Start() { + go logTask() +} diff --git a/main.go b/main.go index e63e57c..4982c40 100644 --- a/main.go +++ b/main.go @@ -1,50 +1,110 @@ +/* + +SPDX-Copyright: Copyright (c) Brad Rydzewski, project contributors, Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Brad Rydzewski, project contributors, Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ package main import ( + "flag" + "fmt" "net/http" "time" - "github.com/lgtmco/lgtm/router" - "github.com/lgtmco/lgtm/router/middleware" + "github.com/capitalone/checks-out/envvars" + "github.com/capitalone/checks-out/logstats" + "github.com/capitalone/checks-out/migration" + _ "github.com/capitalone/checks-out/notifier/github" + _ "github.com/capitalone/checks-out/notifier/slack" + "github.com/capitalone/checks-out/remote" + "github.com/capitalone/checks-out/router" + "github.com/capitalone/checks-out/store/datastore" + "github.com/capitalone/checks-out/version" "github.com/Sirupsen/logrus" - "github.com/gin-gonic/contrib/ginrus" - "github.com/ianschenck/envflag" _ "github.com/joho/godotenv/autoload" ) -var ( - addr = envflag.String("SERVER_ADDR", ":8000", "") - cert = envflag.String("SERVER_CERT", "", "") - key = envflag.String("SERVER_KEY", "", "") +func setLogLevel(level string) { + switch level { + case "panic": + logrus.SetLevel(logrus.PanicLevel) + case "fatal": + logrus.SetLevel(logrus.FatalLevel) + case "error": + logrus.SetLevel(logrus.ErrorLevel) + case "warn": + logrus.SetLevel(logrus.WarnLevel) + case "info": + logrus.SetLevel(logrus.InfoLevel) + case "debug": + logrus.SetLevel(logrus.DebugLevel) + default: + logrus.Fatal("Unrecognized log level ", level) + } +} - debug = envflag.Bool("DEBUG", false, "") -) +func startService() { -func main() { - envflag.Parse() + err := envvars.Validate() + if err != nil { + logrus.Fatal(err) + } - if *debug { - logrus.SetLevel(logrus.DebugLevel) - } else { - logrus.SetLevel(logrus.WarnLevel) + setLogLevel(envvars.Env.Monitor.LogLevel) + + logstats.Start() + + r := remote.Get() + ds := datastore.Get() + + err = migration.Migrate(r, ds) + + if err != nil { + logrus.Fatal(err) } - handler := router.Load( - ginrus.Ginrus(logrus.StandardLogger(), time.RFC3339, true), - middleware.Version, - middleware.Store(), - middleware.Remote(), - middleware.Cache(), - ) + handler := router.Load() + + logrus.Infof("Starting %s service on %d", envvars.Env.Branding.ShortName, time.Now().Format(time.RFC1123)) - if *cert != "" { + if envvars.Env.Server.Cert != "" { logrus.Fatal( - http.ListenAndServeTLS(*addr, *cert, *key, handler), + http.ListenAndServeTLS(envvars.Env.Server.Addr, envvars.Env.Server.Cert, envvars.Env.Server.Key, handler), ) } else { logrus.Fatal( - http.ListenAndServe(*addr, handler), + http.ListenAndServe(envvars.Env.Server.Addr, handler), ) } + +} + +func main() { + ver := flag.Bool("version", false, "print version") + env := flag.Bool("env", false, "print environment variables") + help := flag.Bool("help", false, "print help information") + flag.Parse() + if *help { + flag.PrintDefaults() + } else if *ver { + fmt.Println(version.Version) + } else if *env { + envvars.Usage() + } else { + startService() + } } diff --git a/matcher/language_test.go b/matcher/language_test.go new file mode 100644 index 0000000..9bef6b9 --- /dev/null +++ b/matcher/language_test.go @@ -0,0 +1,71 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package matcher + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +func TestBuildParseTree(t *testing.T) { + input := "a and b or (us and them) or anyone and not d or f[self=true,count=10] and nof(a,b,c[self=false,count=2] and (d or e),1)" + tokens := BuildTokens(input) + _, err := BuildParseTree(tokens) + assert.Nil(t, err) +} + +func TestBuildParseTreeSmall(t *testing.T) { + input := "a and b or c" + tokens := BuildTokens(input) + _, err := BuildParseTree(tokens) + assert.Nil(t, err) +} + +func TestBuildParseTreeParens(t *testing.T) { + input := "(a and b) or not (c and (d or e))" + tokens := BuildTokens(input) + _, err := BuildParseTree(tokens) + assert.Nil(t, err) +} + +func TestBuildParseTreeInvalid(t *testing.T) { + vals := []string{ + "(a and b) or not (c and (d or e)))", + ")", + "(a and )", + "( and a)", + } + for _, input := range vals { + tokens := BuildTokens(input) + _, err := BuildParseTree(tokens) + assert.NotNil(t, err) + } +} + +func TestInvalidErrorMessages(t *testing.T) { + tokens := BuildTokens("foo[and]") + _, err := BuildParseTree(tokens) + assert.Equal(t, err.Error(), "invalid 'and' at position 5") + tokens = BuildTokens("foo[a=1") + _, err = BuildParseTree(tokens) + assert.Equal(t, err.Error(), "missing ']' at position 7") + tokens = BuildTokens("foo[a,b,c]") + _, err = BuildParseTree(tokens) + assert.Equal(t, err.Error(), "invalid ',' at position 6") +} diff --git a/matcher/parser.go b/matcher/parser.go new file mode 100644 index 0000000..db31b4a --- /dev/null +++ b/matcher/parser.go @@ -0,0 +1,481 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package matcher + +import ( + "fmt" + + "github.com/pkg/errors" +) + +type parseKind int + +const ( + _ parseKind = iota + PARSE_NOUN + PARSE_ANONYMOUS + PARSE_JOINER + PARSE_FUNCTION +) + +type ParseToken interface { + fmt.Stringer + kind() parseKind + parent() ParseToken + setParent(ParseToken) + isPopulated() bool +} + +type JoinerKind int + +const ( + _ JoinerKind = iota + JOINER_AND + JOINER_OR + JOINER_NOT +) + +type childToken struct { + p ParseToken +} + +func (ct *childToken) parent() ParseToken { + return ct.p +} + +func (ct *childToken) setParent(p ParseToken) { + ct.p = p +} + +type NounParseToken struct { + Name string + Attributes map[string]string + *childToken +} + +func (*NounParseToken) kind() parseKind { + return PARSE_NOUN +} + +func (np *NounParseToken) String() string { + label := np.Name + return fmt.Sprintf("%s %v", label, np.Attributes) +} + +func (*NounParseToken) isPopulated() bool { + return true +} + +type AnonymousParseToken struct { + Members []string + Attributes map[string]string + *childToken +} + +func (*AnonymousParseToken) kind() parseKind { + return PARSE_ANONYMOUS +} + +func (np *AnonymousParseToken) String() string { + return fmt.Sprintf("%v %v", np.Members, np.Attributes) +} + +func (*AnonymousParseToken) isPopulated() bool { + return true +} + +type FunctionParseToken struct { + Name string + Parameters []ParseToken + *childToken +} + +func (*FunctionParseToken) isPopulated() bool { + return true +} + +func (*FunctionParseToken) kind() parseKind { + return PARSE_FUNCTION +} + +func (fp *FunctionParseToken) String() string { + label := fp.Name + return fmt.Sprintf("%s %v", label, fp.Parameters) +} + +type AndOrParseToken struct { + JKind JoinerKind + Left ParseToken + Right ParseToken + *childToken +} + +func (*AndOrParseToken) kind() parseKind { + return PARSE_JOINER +} + +func (aop *AndOrParseToken) String() string { + l := "{INVALID}" + if aop.Left != nil { + l = aop.Left.String() + } + r := "{INVALID}" + if aop.Right != nil { + r = aop.Right.String() + } + j := "AND" + if aop.JKind == JOINER_OR { + j = "OR" + } + return fmt.Sprintf("%s %s %s", l, j, r) +} + +func (aop *AndOrParseToken) isPopulated() bool { + return aop.Left != nil && aop.Right != nil +} + +type NotParseToken struct { + Child ParseToken + *childToken +} + +func (*NotParseToken) kind() parseKind { + return PARSE_JOINER +} + +func (np *NotParseToken) String() string { + if np.Child == nil { + return "NOT {MISSING}" + } + return fmt.Sprintf("NOT %s", np.Child.String()) +} + +func (np *NotParseToken) isPopulated() bool { + return np.Child != nil +} + +type funcState int + +const ( + _ funcState = iota + FUNC_OUT + FUNC_IN +) + +/* +Valid grammar: + +NOUN := NAME | US | THEM | ANY +PO_NAME := NOUN ATTRIBUTE_CLAUSE? +ATTRIBUTE_CLAUSE := LBRACKET (NAME EQUAL NAME COMMA)* NAME EQUAL NAME RBRACKET +NOT_STMT := NOT? PO_NAME +CLAUSE := NOT_STMT ((AND NOT_STMT) | (OR NOT_STMT))* + +*/ +type parseTreeBuilder struct { + root ParseToken + lastToken ParseToken +} + +func BuildParseTree(tokens []token) (ParseToken, error) { + ptb := &parseTreeBuilder{} + pos, err := ptb.buildParseTreeInner(tokens, FUNC_OUT) + if err == nil && pos != len(tokens) { + return nil, errors.New("premature end of tokens") + } + return ptb.root, err +} + +func (ptb *parseTreeBuilder) addJoiner(kind JoinerKind) { + pt := &AndOrParseToken{JKind: kind, childToken: &childToken{}} + pt.setParent(ptb.lastToken.parent()) + if ptb.root == ptb.lastToken { + ptb.root = pt + } + switch lt := ptb.lastToken.parent().(type) { + case *AndOrParseToken: + lt.Right = pt + case *NotParseToken: + lt.Child = pt + } + ptb.lastToken.setParent(pt) + pt.Left = ptb.lastToken + ptb.lastToken = pt +} + +func (ptb *parseTreeBuilder) addToTree(pt ParseToken) { + if ptb.root == nil { + ptb.root = pt + } else { + switch lt := ptb.lastToken.(type) { + case *AndOrParseToken: + lt.Right = pt + case *NotParseToken: + lt.Child = pt + default: + panic(fmt.Sprintf("can't add a child to token %+v", ptb.lastToken)) + } + } + pt.setParent(ptb.lastToken) + ptb.lastToken = pt +} + +func (ptb *parseTreeBuilder) addNoun(name string) { + pt := &NounParseToken{Name: name, childToken: &childToken{}} + ptb.addToTree(pt) +} + +func (ptb *parseTreeBuilder) addAnonymous(members []string) { + pt := &AnonymousParseToken{Members: members, Attributes: map[string]string{}, childToken: &childToken{}} + ptb.addToTree(pt) +} + +func (ptb *parseTreeBuilder) addFunc(name string) { + pt := &FunctionParseToken{Name: name, Parameters: []ParseToken{}, childToken: &childToken{}} + ptb.addToTree(pt) +} + +type attribState int + +const ( + _ attribState = iota + NONE + IN_ATTRIB + IN_NAME + IN_EQ + IN_VAL + IN_COMMA + IN_SET + IN_SET_VAL + IN_SET_COMMA + OUT_SET +) + +func makeError(t token) error { + return errors.Errorf("invalid '%s' at position %d", t.name, t.pos) +} + +func (ptb *parseTreeBuilder) handleAttributes(tokens []token, start int) (map[string]string, int, error) { + if len(tokens) == 0 { + return nil, 0, errors.Errorf("missing ']' to close '[' found at position %d", start) + } + attribs := map[string]string{} + var curAttribName string + inAttrib := IN_ATTRIB + for i := 0; i < len(tokens); i++ { + v := tokens[i] + switch v.value { + case TOKEN_NAME: + switch inAttrib { + case IN_ATTRIB, IN_COMMA: + curAttribName = v.name + inAttrib = IN_NAME + case IN_EQ: + attribs[curAttribName] = v.name + inAttrib = IN_VAL + default: + return nil, 0, makeError(v) + } + + case TOKEN_RBRACKET: + if inAttrib == IN_VAL { + return attribs, i + 1, nil + } + return nil, 0, makeError(v) + + case TOKEN_EQUAL: + if inAttrib == IN_NAME { + inAttrib = IN_EQ + } else { + return nil, 0, makeError(v) + } + + case TOKEN_COMMA: + if inAttrib == IN_VAL { + inAttrib = IN_COMMA + } else { + return nil, 0, makeError(v) + } + default: + return nil, 0, makeError(v) + } + } + return nil, 0, errors.Errorf("missing ']' at position %d", + tokens[len(tokens)-1].pos) +} + +func (ptb *parseTreeBuilder) handleSetMembership(tokens []token, start int) ([]string, int, error) { + var members []string + if len(tokens) == 0 { + return nil, 0, errors.Errorf("missing '}' to close '{' found at position %d", start) + } else if tokens[0].value == TOKEN_RBRACE { + return members, 1, nil + } + for i := 0; i < len(tokens)-1; i++ { + t := tokens[i] + if t.value != TOKEN_NAME { + return nil, 0, makeError(t) + } + members = append(members, t.name) + next := tokens[i+1].value + if next == TOKEN_RBRACE { + return members, i + 2, nil + } else if next == TOKEN_COMMA { + i++ + } else { + return nil, 0, errors.Errorf("missing ',' at position %d", tokens[i+1].pos) + } + } + return nil, 0, errors.Errorf("missing '}' at position %d", + tokens[len(tokens)-1].pos) +} + +func (ptb *parseTreeBuilder) handleFunctionArguments(tokens []token) ([]ParseToken, int, error) { + done := false + subPos := 0 + params := []ParseToken{} + for !done { + ptbInner := &parseTreeBuilder{} + newPos, err := ptbInner.buildParseTreeInner(tokens[subPos:], FUNC_IN) + if err != nil { + return nil, 0, err + } + params = append(params, ptbInner.root) + subPos += newPos + switch tokens[subPos].value { + case TOKEN_COMMA: + subPos++ + continue //loop again + case TOKEN_RPAREN: + done = true //at end of parameters + default: + return nil, 0, makeError(tokens[subPos]) + } + } + + //subPos should be pointing to an RParen, so skip over it and continue + return params, subPos + 1, nil +} + +func (ptb *parseTreeBuilder) beginAttributes() bool { + last := ptb.lastToken + if last == nil { + return false + } + if last.kind() == PARSE_NOUN && len(last.(*NounParseToken).Attributes) == 0 { + return true + } + if last.kind() == PARSE_ANONYMOUS && len(last.(*AnonymousParseToken).Attributes) == 0 { + return true + } + return false +} + +func (ptb *parseTreeBuilder) buildParseTreeInner(tokens []token, inFunc funcState) (int, error) { + for pos := 0; pos < len(tokens); pos++ { + v := tokens[pos] + switch v.value { + case TOKEN_LBRACE: + members, newPos, err := ptb.handleSetMembership(tokens[pos+1:], v.pos) + if err != nil { + return 0, err + } + ptb.addAnonymous(members) + pos += newPos + case TOKEN_NAME: + if ptb.lastToken != nil && ptb.lastToken.kind() != PARSE_JOINER { + return 0, errors.Errorf("invalid noun %s at position %d", v.name, v.pos) + } + //figure out if this is a noun or a function by peeking at the next token + if pos < len(tokens)-1 && tokens[pos+1].value == TOKEN_LPAREN { + ptb.addFunc(v.name) + } else { + ptb.addNoun(v.name) + } + case TOKEN_LBRACKET: + if !ptb.beginAttributes() { + return 0, makeError(v) + } + attribs, newPos, err := ptb.handleAttributes(tokens[pos+1:], v.pos) + if err != nil { + return 0, err + } + switch lt := ptb.lastToken.(type) { + case *NounParseToken: + lt.Attributes = attribs + case *AnonymousParseToken: + lt.Attributes = attribs + default: + //this will trigger a panic but I'll know the type + ptb.lastToken.(*NounParseToken).Attributes = attribs + } + pos += newPos + case TOKEN_COMMA: + if inFunc == FUNC_IN { + return pos, nil + } + return 0, makeError(v) + case TOKEN_AND: + if ptb.lastToken == nil { + return 0, makeError(v) + } + ptb.addJoiner(JOINER_AND) + case TOKEN_OR: + if ptb.lastToken == nil { + return 0, makeError(v) + } + ptb.addJoiner(JOINER_OR) + case TOKEN_NOT: + if ptb.lastToken != nil && ptb.lastToken.kind() != PARSE_JOINER { + return 0, makeError(v) + } + pt := &NotParseToken{childToken: &childToken{}} + ptb.addToTree(pt) + case TOKEN_LPAREN: + if ptb.lastToken == nil || ptb.lastToken.kind() == PARSE_JOINER { + //grouping parens, recurse (kinda) + ptbInner := &parseTreeBuilder{} + newPos, err := ptbInner.buildParseTreeInner(tokens[pos+1:], FUNC_OUT) + if err != nil { + return 0, err + } + ptb.addToTree(ptbInner.root) + pos += newPos + 1 + } else if ptb.lastToken != nil && ptb.lastToken.kind() == PARSE_FUNCTION { + //function parameters + params, newPos, err := ptb.handleFunctionArguments(tokens[pos+1:]) + if err != nil { + return 0, err + } + ptb.lastToken.(*FunctionParseToken).Parameters = params + pos += newPos + } else { + return 0, makeError(v) + } + case TOKEN_RPAREN: + if ptb.lastToken == nil || !ptb.lastToken.isPopulated() { + return 0, makeError(v) + } + return pos, nil + default: + return 0, makeError(v) + } + } + return len(tokens), nil +} diff --git a/matcher/parser_test.go b/matcher/parser_test.go new file mode 100644 index 0000000..82a2b09 --- /dev/null +++ b/matcher/parser_test.go @@ -0,0 +1,346 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package matcher + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestBasics(t *testing.T) { + tokens := BuildTokens("a and b") + root, err := BuildParseTree(tokens) + assert.Nil(t, err) + //should have a tree with a root of andornode and a left of noun node a with no attribs and a right of noun node b with no attribs + andNode, ok := root.(*AndOrParseToken) + assert.True(t, ok) + assert.Equal(t, andNode.JKind, JOINER_AND) + aNode, ok := andNode.Left.(*NounParseToken) + assert.True(t, ok) + assert.Equal(t, aNode.Name, "a") + assert.Equal(t, 0, len(aNode.Attributes)) + + bNode, ok := andNode.Right.(*NounParseToken) + assert.True(t, ok) + assert.Equal(t, bNode.Name, "b") + assert.Equal(t, 0, len(bNode.Attributes)) +} + +func TestAnonymousGroup(t *testing.T) { + tokens := BuildTokens("{}") + root, err := BuildParseTree(tokens) + assert.Nil(t, err) + node, ok := root.(*AnonymousParseToken) + assert.True(t, ok) + assert.Equal(t, 0, len(node.Members)) + tokens = BuildTokens("{a, b,c }") + root, err = BuildParseTree(tokens) + assert.Nil(t, err) + node, ok = root.(*AnonymousParseToken) + assert.True(t, ok) + assert.Equal(t, 3, len(node.Members)) +} + +func TestAttributes(t *testing.T) { + tokens := BuildTokens("a and b[count=1,self=false]") + root, err := BuildParseTree(tokens) + assert.Nil(t, err) + checkTestAttributes(root, t) + + tokens = BuildTokens("a and b[self=false,count=1]") + root, err = BuildParseTree(tokens) + assert.Nil(t, err) + checkTestAttributes(root, t) +} + +func checkTestAttributes(root ParseToken, t *testing.T) { + //should have a tree with a root of andornode and a left of noun node a with no attribs and a right of noun node b with expected attribs + andNode, ok := root.(*AndOrParseToken) + assert.True(t, ok) + assert.Equal(t, andNode.JKind, JOINER_AND) + aNode, ok := andNode.Left.(*NounParseToken) + assert.True(t, ok) + assert.Equal(t, aNode.Name, "a") + assert.Equal(t, 0, len(aNode.Attributes)) + + bNode, ok := andNode.Right.(*NounParseToken) + assert.True(t, ok) + assert.Equal(t, bNode.Name, "b") + assert.Equal(t, 2, len(bNode.Attributes)) + assert.Equal(t, "1", bNode.Attributes["count"]) + assert.Equal(t, "false", bNode.Attributes["self"]) +} + +func TestNestedSimple(t *testing.T) { + tokens := BuildTokens("(a and b)") + root, err := BuildParseTree(tokens) + assert.Nil(t, err) + checkNestedSimple(root, t) + + tokens = BuildTokens("((a) and b)") + root, err = BuildParseTree(tokens) + assert.Nil(t, err) + checkNestedSimple(root, t) + + tokens = BuildTokens("(a and (b))") + root, err = BuildParseTree(tokens) + assert.Nil(t, err) + checkNestedSimple(root, t) +} + +func checkNestedSimple(root ParseToken, t *testing.T) { + //should have a tree with a root of andornode and a left of noun node a with no attribs and a right of noun node b with no attribs + andNode, ok := root.(*AndOrParseToken) + assert.True(t, ok) + assert.Equal(t, andNode.JKind, JOINER_AND) + aNode, ok := andNode.Left.(*NounParseToken) + assert.True(t, ok) + assert.Equal(t, aNode.Name, "a") + assert.Equal(t, 0, len(aNode.Attributes)) + + bNode, ok := andNode.Right.(*NounParseToken) + assert.True(t, ok) + assert.Equal(t, bNode.Name, "b") + assert.Equal(t, 0, len(bNode.Attributes)) +} + +func TestNestedComplex(t *testing.T) { + tokens := BuildTokens("a and b or c") + root, err := BuildParseTree(tokens) + assert.Nil(t, err) + checkNestedComplex1(root, t) + + tokens = BuildTokens("(a and b) or c") + root, err = BuildParseTree(tokens) + assert.Nil(t, err) + checkNestedComplex2(root, t) + + tokens = BuildTokens("a and (b or c)") + root, err = BuildParseTree(tokens) + assert.Nil(t, err) + checkNestedComplex1(root, t) +} + +func checkNestedComplex1(root ParseToken, t *testing.T) { + /* + and + / \ + a or + / \ + b c + */ + andNode, ok := root.(*AndOrParseToken) + assert.True(t, ok) + assert.Equal(t, andNode.JKind, JOINER_AND) + + aNode, ok := andNode.Left.(*NounParseToken) + assert.True(t, ok) + assert.Equal(t, aNode.Name, "a") + assert.Equal(t, 0, len(aNode.Attributes)) + + orNode, ok := andNode.Right.(*AndOrParseToken) + assert.True(t, ok) + assert.Equal(t, orNode.JKind, JOINER_OR) + + bNode, ok := orNode.Left.(*NounParseToken) + assert.True(t, ok) + assert.Equal(t, bNode.Name, "b") + assert.Equal(t, 0, len(bNode.Attributes)) + + cNode, ok := orNode.Right.(*NounParseToken) + assert.True(t, ok) + assert.Equal(t, cNode.Name, "c") + assert.Equal(t, 0, len(cNode.Attributes)) +} + +func checkNestedComplex2(root ParseToken, t *testing.T) { + /* + or + / \ + and c + / \ + a b + */ + orNode, ok := root.(*AndOrParseToken) + assert.True(t, ok) + assert.Equal(t, orNode.JKind, JOINER_OR) + + andNode, ok := orNode.Left.(*AndOrParseToken) + assert.True(t, ok) + assert.Equal(t, andNode.JKind, JOINER_AND) + + aNode, ok := andNode.Left.(*NounParseToken) + assert.True(t, ok) + assert.Equal(t, aNode.Name, "a") + assert.Equal(t, 0, len(aNode.Attributes)) + + bNode, ok := andNode.Right.(*NounParseToken) + assert.True(t, ok) + assert.Equal(t, bNode.Name, "b") + assert.Equal(t, 0, len(bNode.Attributes)) + + cNode, ok := orNode.Right.(*NounParseToken) + assert.True(t, ok) + assert.Equal(t, cNode.Name, "c") + assert.Equal(t, 0, len(cNode.Attributes)) +} + +func TestNot(t *testing.T) { + tokens := BuildTokens("not a") + root, err := BuildParseTree(tokens) + assert.Nil(t, err) + checkNot1(root, t) + + tokens = BuildTokens("not not a") + root, err = BuildParseTree(tokens) + assert.Nil(t, err) + checkNot2(root, t) + + tokens = BuildTokens("b and not not a") + root, err = BuildParseTree(tokens) + assert.Nil(t, err) + checkNot3(root, t) +} + +func checkNot1(root ParseToken, t *testing.T) { + notNode, ok := root.(*NotParseToken) + assert.True(t, ok) + + aNode, ok := notNode.Child.(*NounParseToken) + assert.True(t, ok) + assert.Equal(t, aNode.Name, "a") + assert.Equal(t, 0, len(aNode.Attributes)) +} + +func checkNot2(root ParseToken, t *testing.T) { + notNode, ok := root.(*NotParseToken) + assert.True(t, ok) + + notNode2, ok := notNode.Child.(*NotParseToken) + assert.True(t, ok) + + aNode, ok := notNode2.Child.(*NounParseToken) + assert.True(t, ok) + assert.Equal(t, aNode.Name, "a") + assert.Equal(t, 0, len(aNode.Attributes)) +} + +func checkNot3(root ParseToken, t *testing.T) { + andNode, ok := root.(*AndOrParseToken) + assert.True(t, ok) + assert.Equal(t, andNode.JKind, JOINER_AND) + + bNode, ok := andNode.Left.(*NounParseToken) + assert.True(t, ok) + assert.Equal(t, bNode.Name, "b") + assert.Equal(t, 0, len(bNode.Attributes)) + + notNode, ok := andNode.Right.(*NotParseToken) + assert.True(t, ok) + + notNode2, ok := notNode.Child.(*NotParseToken) + assert.True(t, ok) + + aNode, ok := notNode2.Child.(*NounParseToken) + assert.True(t, ok) + assert.Equal(t, aNode.Name, "a") + assert.Equal(t, 0, len(aNode.Attributes)) +} + +func TestFunctionsSimple(t *testing.T) { + tokens := BuildTokens("a(x,y,z,1)") + root, err := BuildParseTree(tokens) + assert.Nil(t, err) + checkFunctionsSimple1(root, t) +} + +func checkFunctionsSimple1(root ParseToken, t *testing.T) { + aNode, ok := root.(*FunctionParseToken) + assert.True(t, ok) + assert.Equal(t, aNode.Name, "a") + params := aNode.Parameters + assert.Equal(t, 4, len(params)) + if len(params) == 4 { + xNode, ok := params[0].(*NounParseToken) + assert.True(t, ok) + assert.Equal(t, "x", xNode.Name) + + yNode, ok := params[1].(*NounParseToken) + assert.True(t, ok) + assert.Equal(t, "y", yNode.Name) + + zNode, ok := params[2].(*NounParseToken) + assert.True(t, ok) + assert.Equal(t, "z", zNode.Name) + + oneNode, ok := params[3].(*NounParseToken) + assert.True(t, ok) + assert.Equal(t, "1", oneNode.Name) + } +} + +func TestFunctionsNested(t *testing.T) { + tokens := BuildTokens("a(x[self=false],y(3),(z and b(2,3)),1)") + root, err := BuildParseTree(tokens) + assert.Nil(t, err) + checkFunctionsNested(root, t) +} + +func checkFunctionsNested(root ParseToken, t *testing.T) { + aNode, ok := root.(*FunctionParseToken) + assert.True(t, ok) + assert.Equal(t, aNode.Name, "a") + params := aNode.Parameters + assert.Equal(t, 4, len(params)) + + if len(params) == 4 { + xNode, ok := params[0].(*NounParseToken) + assert.True(t, ok) + assert.Equal(t, "x", xNode.Name) + assert.Equal(t, 1, len(xNode.Attributes)) + assert.Equal(t, "false", xNode.Attributes["self"]) + + yNode, ok := params[1].(*FunctionParseToken) + assert.True(t, ok) + assert.Equal(t, "y", yNode.Name) + assert.Equal(t, 1, len(yNode.Parameters)) + assert.Equal(t, "3", yNode.Parameters[0].(*NounParseToken).Name) + + andNode, ok := params[2].(*AndOrParseToken) + assert.True(t, ok) + assert.Equal(t, andNode.JKind, JOINER_AND) + + zNode, ok := andNode.Left.(*NounParseToken) + assert.True(t, ok) + assert.Equal(t, "z", zNode.Name) + + bNode, ok := andNode.Right.(*FunctionParseToken) + assert.True(t, ok) + assert.Equal(t, "b", bNode.Name) + assert.Equal(t, 2, len(bNode.Parameters)) + + assert.Equal(t, "2", bNode.Parameters[0].(*NounParseToken).Name) + assert.Equal(t, "3", bNode.Parameters[1].(*NounParseToken).Name) + + oneNode, ok := params[3].(*NounParseToken) + assert.True(t, ok) + assert.Equal(t, "1", oneNode.Name) + } +} diff --git a/matcher/scanner.go b/matcher/scanner.go new file mode 100644 index 0000000..91a269f --- /dev/null +++ b/matcher/scanner.go @@ -0,0 +1,120 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package matcher + +/* +language parse tokens: + +NAME - refers to: a person_name, an org_name, a special org name (us, them, any), an attribute name, an attribute value, a number, true, or false +LBRACKET - [ for specifying attributes +RBRACKET - ] for specifying attributes +LPAREN - ( for grouping terms +RPAREN - ) for grouping terms +EQUAL - = for assigning attribute value to an attribute name +COMMA - , for separating items in attribute or parameter lists +AND - logical and +OR - logical or +NOT - logical not +LBRACE - { for specifying anonymous groups +RBRACE - } for specifying anonymous groups +*/ + +type token struct { + name string + value tokenType + pos int +} + +type tokenType int + +const ( + TOKEN_INVALID tokenType = iota + TOKEN_NAME + TOKEN_LBRACKET + TOKEN_RBRACKET + TOKEN_EQUAL + TOKEN_LPAREN + TOKEN_RPAREN + TOKEN_COMMA + TOKEN_AND + TOKEN_OR + TOKEN_NOT + TOKEN_LBRACE + TOKEN_RBRACE +) + +func getTokenType(curString string) tokenType { + switch curString { + case "and": + return TOKEN_AND + case "or": + return TOKEN_OR + case "not": + return TOKEN_NOT + default: + return TOKEN_NAME + } +} + +func BuildTokens(in string) []token { + var curString []rune + var curPos int + out := []token{} + f := func(t token) { + if len(curString) > 0 { + cur := string(curString) + out = append(out, token{cur, getTokenType(cur), curPos}) + curString = curString[:0] + curPos = 0 + } + if len(t.name) > 0 { + out = append(out, t) + } + } + for i, v := range in { + pos := i + 1 + switch v { + case '[': + f(token{"[", TOKEN_LBRACKET, pos}) + case ']': + f(token{"]", TOKEN_RBRACKET, pos}) + case '=': + f(token{"=", TOKEN_EQUAL, pos}) + case '(': + f(token{"(", TOKEN_LPAREN, pos}) + case ')': + f(token{")", TOKEN_RPAREN, pos}) + case '{': + f(token{"{", TOKEN_LBRACE, pos}) + case '}': + f(token{"}", TOKEN_RBRACE, pos}) + case ',': + f(token{",", TOKEN_COMMA, pos}) + case ' ', '\n', '\t', '\r': + f(token{"", 0, 0}) + default: + if curPos == 0 { + curPos = pos + } + curString = append(curString, v) + } + } + f(token{"", 0, 0}) + return out +} diff --git a/migration/migration.go b/migration/migration.go new file mode 100644 index 0000000..c22d53a --- /dev/null +++ b/migration/migration.go @@ -0,0 +1,95 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package migration + +import ( + "context" + + "github.com/capitalone/checks-out/envvars" + "github.com/capitalone/checks-out/remote" + "github.com/capitalone/checks-out/store" + + log "github.com/Sirupsen/logrus" + "github.com/mspiegel/go-multierror" +) + +// Migrate performs any (store x remote) operations necessary +// after a database migration. +func Migrate(r remote.Remote, s store.Store) error { + var errs error + ids := s.GetMigrations() + if ids.Contains("002_org.sql") { + errs = multierror.Append(errs, insertRepoOrgs(r, s)) + } + if ids.Contains("005_oauth_scope.sql") { + errs = multierror.Append(errs, insertDefaultScope(s)) + } + return errs +} + +func insertDefaultScope(s store.Store) error { + log.Info("Applying 005_oauth_scope.sql migrations") + count := 0 + users, err := s.GetAllUsers() + if err != nil { + return err + } + for _, user := range users { + user.Scopes = envvars.Env.Github.Scope + err = s.UpdateUser(user) + if err != nil { + log.Warnf("Unable to update user %s: %s", user.Login, err) + continue + } + count++ + } + log.Infof("Applied 005_oauth_scope.sql migrations to %d out of a total %d users", count, len(users)) + return nil +} + +func insertRepoOrgs(r remote.Remote, s store.Store) error { + log.Info("Applying 002_org.sql migrations") + count := 0 + repos, err := s.GetAllRepos() + if err != nil { + return err + } + for _, repo := range repos { + model, err := s.GetUser(repo.UserID) + if err != nil { + log.Warnf("Unable to retrieve user %s (id %d) for repo %s: %s", + repo.Owner, repo.UserID, repo.Slug, err) + continue + } + update, err := r.GetRepo(context.Background(), model, repo.Owner, repo.Name) + if err != nil { + log.Warnf("Unable to retrieve repo %s: %s", repo.Slug, err) + continue + } + repo.Org = update.Org + err = s.UpdateRepo(repo) + if err != nil { + log.Warnf("Unable to update repo %s: %s", repo.Slug, err) + continue + } + count++ + } + log.Infof("Applied 002_org.sql migrations to %d out of a total %d repos", count, len(repos)) + return nil +} diff --git a/model/approval.go b/model/approval.go new file mode 100644 index 0000000..5db23a5 --- /dev/null +++ b/model/approval.go @@ -0,0 +1,453 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package model + +import ( + "encoding/json" + "fmt" + "regexp" + "sort" + "strings" + "time" + + "github.com/capitalone/checks-out/set" + "github.com/capitalone/checks-out/strings/lowercase" + "github.com/capitalone/checks-out/strings/miniglob" + "github.com/capitalone/checks-out/strings/rxserde" + + "github.com/pkg/errors" +) + +type ApprovalRequest struct { + Config *Config + Maintainer *MaintainerSnapshot + PullRequest *PullRequest + Repository *Repo + Issues []*Issue + ApprovalComments []Feedback + DisapprovalComments []Feedback + Files []CommitFile +} + +type Feedback interface { + IsApproval(req *ApprovalRequest) bool + IsDisapproval(req *ApprovalRequest) bool + GetAuthor() lowercase.String + GetBody() string + GetSubmittedAt() time.Time +} + +// Processor updates bookkepping for comment tracking. +type Processor func(Feedback, ApprovalOp) + +// MatchAction decides whether to invoke the Processor +type MatchAction func(*ApprovalRequest, Feedback, set.Set, Processor) + +func Approve(request *ApprovalRequest, policy *ApprovalPolicy, p Processor) (bool, error) { + authorRequest := *request + authorComment := Comment{Author: request.PullRequest.Author} + authorRequest.ApprovalComments = []Feedback{&authorComment} + + if titleAction(&authorRequest, &authorComment, p) { + return false, nil + } + + inner, err := policy.AuthorMatch.Match(&authorRequest, p, authorLimitAction, authorRequest.ApprovalComments) + if err != nil { + return false, err + } + if !inner { + return false, nil + } + + inner, err = policy.AntiMatch.Match(request, p, antiMatch, request.DisapprovalComments) + if err != nil { + return false, err + } + if inner { + return false, nil + } + + return policy.Match.Match(request, p, approvalAction, request.ApprovalComments) +} + +// ApprovalPolicy combines an approval scope (that determines +// when the policy can be applied) and an approval match +// (that determines whether the match is successful) +type ApprovalPolicy struct { + // Name is used to describe this policy to humans. + // Optional but if specified it must be unique. + Name string `json:"name,omitempty"` + // Position is the 1-based index of this policy in the approval array. + Position int `json:"-"` + // ApprovalScope determines when the policy can be applied + Scope *ApprovalScope `json:"scope,omitempty"` + // Match is a JSON object that stores the approval match. + // every approval policy needs a Match + Match MatcherHolder `json:"match"` + // AntiMatch is a JSON object that stores the disapproval match. + AntiMatch *MatcherHolder `json:"antimatch,omitempty"` + // AntiMatch is a JSON object that stores the author match. + AuthorMatch *MatcherHolder `json:"authormatch,omitempty"` + // Tag is an optional tag section to be used for this + // approval scope. If this field is empty then the + // global tag section is used. + Tag *TagConfig `json:"tag,omitempty"` + // Merge is an optional merge section to be used for this + // approval scope. If this field is empty then the + // global merge section is used. + Merge *MergeConfig `json:"merge,omitempty"` + // Pattern is an optional regular expression to be used for this + // approval scope. If this field is empty then the + // global pattern is used. + Pattern *rxserde.RegexSerde `json:"pattern,omitempty"` + // AntiPattern is an optional regular expression to be used for this + // approval scope. If this field is empty then the + // global antipattern is used. + AntiPattern *rxserde.RegexSerde `json:"antipattern,omitempty"` + // AntiTitle is an optional regular expression to be used for this + // approval scope. If this field is empty then the + // global antititle is used. + AntiTitle *rxserde.RegexSerde `json:"antititle,omitempty"` + // Feedback is an optional feedback section to be used for this + // approval scope. If this field is empty then the + // global feedback section is used. + Feedback *FeedbackConfig `json:"feedback,omitempty"` +} + +// ApprovalScope determines when the policy can be applied +type ApprovalScope struct { + Paths []miniglob.MiniGlob `json:"paths,omitempty"` + Branches set.Set `json:"branches,omitempty"` + PathRegexp []rxserde.RegexSerde `json:"regexpaths,omitempty"` + BaseRegexp []rxserde.RegexSerde `json:"regexbase,omitempty"` + CompareRegexp []rxserde.RegexSerde `json:"regexcompare,omitempty"` +} + +// MatcherHolder stores an an Matcher +// JSON marshal and unmarshal are implemented +// on this struct. +type MatcherHolder struct { + Matcher +} + +// Matcher determines whether the match is successful) +type Matcher interface { + Match(req *ApprovalRequest, proc Processor, a MatchAction, feedback []Feedback) (bool, error) + GetType() string + Validate(m *MaintainerSnapshot) error +} + +type ChangePolicy interface { + ChangePolicy(policy *ApprovalPolicy) +} + +type CommonMatch struct { + // minimum number of approvals required + Approvals int `json:"approvals"` + // if true then author can self-approve request + Self bool `json:"self"` +} + +// UniverseMatch accepts the request when the number +// of people who have approved is greater than or +// equal to the threshold. Approvals are not restricted +// to project maintainers. +type UniverseMatch struct { + CommonMatch +} + +func (match UniverseMatch) MarshalJSON() ([]byte, error) { + s := fmt.Sprintf(`"universe[count=%d,self=%v]"`, match.Approvals, match.Self) + return []byte(s), nil +} + +// MaintainerMatch accepts the request when the number +// of people who have approved is greater than or +// equal to the threshold. Approvals are restricted +// to project maintainers. +type MaintainerMatch struct { + CommonMatch +} + +func (match MaintainerMatch) MarshalJSON() ([]byte, error) { + s := fmt.Sprintf(`"all[count=%d,self=%v]"`, match.Approvals, match.Self) + return []byte(s), nil +} + +// AnonymousMatch accepts the request when the number +// of people who have approved is greater than or +// equal to the threshold. +type AnonymousMatch struct { + CommonMatch + // restrict the approvers to this set + Entities set.LowerSet `json:"entities"` +} + +func (match AnonymousMatch) MarshalJSON() ([]byte, error) { + p := match.Entities.Keys().ToStringSlice() + sort.Strings(p) //make sure they are always in the same order so we can test + s := fmt.Sprintf(`"{%s}[count=%d,self=%v]"`, strings.Join(p, ","), match.Approvals, match.Self) + return []byte(s), nil +} + +// EntityMatch accepts the request when the +// group or person meets the minimum number of approvals. +type EntityMatch struct { + CommonMatch + // name of the group or person to match + Entity lowercase.String `json:"entity"` +} + +func (match EntityMatch) MarshalJSON() ([]byte, error) { + s := fmt.Sprintf(`"%s[count=%d,self=%v]"`, match.Entity, match.Approvals, match.Self) + return []byte(s), nil +} + +// UsMatch is restricted to maintainers who share +// a group with the author of the pull request. +type UsMatch struct { + CommonMatch +} + +func (match UsMatch) MarshalJSON() ([]byte, error) { + s := fmt.Sprintf(`"us[count=%d,self=%v]"`, match.Approvals, match.Self) + return []byte(s), nil +} + +// ThemMatch is restricted to maintainers who +// do not share a group with the author of the +// pull request. +type ThemMatch struct { + CommonMatch +} + +func (match ThemMatch) MarshalJSON() ([]byte, error) { + s := fmt.Sprintf(`"them[count=%d,self=%v]"`, match.Approvals, match.Self) + return []byte(s), nil +} + +type AtLeastMatch struct { + // minimum number of approvals required + Approvals int `json:"approvals"` + Choose []MatcherHolder `json:"choose"` +} + +func (match AtLeastMatch) MarshalJSON() ([]byte, error) { + c := make([]string, 0, len(match.Choose)) + for _, v := range match.Choose { + b, e := json.Marshal(v) + if e != nil { + return nil, e + } + c = append(c, string(b[1:len(b)-1])) + } + s := fmt.Sprintf(`"atleast(%d,%s)"`, match.Approvals, strings.Join(c, ",")) + return []byte(s), nil +} + +type AuthorMatch struct { + Inner MatcherHolder `json:"inner"` +} + +func (match AuthorMatch) MarshalJSON() ([]byte, error) { + b, e := json.Marshal(match.Inner) + if e != nil { + return nil, e + } + c := string(b[1 : len(b)-1]) + s := fmt.Sprintf(`"author(%s)"`, c) + return []byte(s), nil +} + +type IssueAuthorMatch struct{} + +func (match IssueAuthorMatch) MarshalJSON() ([]byte, error) { + return []byte("issue-author"), nil +} + +// AndMatch performs a boolean 'and' operation on +// two or more Matchers. +type AndMatch struct { + And []MatcherHolder `json:"and"` +} + +func (match AndMatch) MarshalJSON() ([]byte, error) { + c := make([]string, 0, len(match.And)) + for _, v := range match.And { + b, e := json.Marshal(v) + if e != nil { + return nil, e + } + c = append(c, string(b[1:len(b)-1])) + } + s := strings.Join(c, " and ") + return []byte(fmt.Sprintf(`"%s"`, s)), nil +} + +// OrMatch performs a boolean 'or' operation on +// two or more Matchers. +type OrMatch struct { + Or []MatcherHolder `json:"or"` +} + +func (match OrMatch) MarshalJSON() ([]byte, error) { + c := make([]string, 0, len(match.Or)) + for _, v := range match.Or { + b, e := json.Marshal(v) + if e != nil { + return nil, e + } + c = append(c, string(b[1:len(b)-1])) + } + s := strings.Join(c, " or ") + return []byte(fmt.Sprintf(`"%s"`, s)), nil +} + +// NotMatch performs a boolean 'not' operation on +// a matcher. +type NotMatch struct { + Not MatcherHolder `json:"not"` +} + +func (match NotMatch) MarshalJSON() ([]byte, error) { + b, e := json.Marshal(match.Not) + if e != nil { + return nil, e + } + return []byte(fmt.Sprintf(`"not %s"`, string(b[1:len(b)-1]))), nil +} + +// TrueMatch always returns true. +type TrueMatch struct{} + +func (match TrueMatch) MarshalJSON() ([]byte, error) { + return []byte(`"true"`), nil +} + +// FalseMatch always returns false. +type FalseMatch struct{} + +func (match FalseMatch) MarshalJSON() ([]byte, error) { + return []byte(`"false"`), nil +} + +// DisableMatch always returns true and disables service. +type DisableMatch struct{} + +func (match DisableMatch) MarshalJSON() ([]byte, error) { + return []byte(`"off"`), nil +} + +func (match MatcherHolder) MarshalJSON() ([]byte, error) { + return json.Marshal(match.Matcher) +} + +func (match *MatcherHolder) UnmarshalJSON(data []byte) error { + var text string + err := json.Unmarshal(data, &text) + if err != nil { + return err + } + m, err := GenerateMatcher(text) + if err != nil { + return errors.Errorf("Unable to parse approval policy '%s'. %s", + text, err.Error()) + } + match.Matcher = m + return nil +} + +func DefaultApprovalPolicy() *ApprovalPolicy { + a := new(ApprovalPolicy) + a.AntiMatch = new(MatcherHolder) + a.AuthorMatch = new(MatcherHolder) + a.Scope = DefaultApprovalScope() + a.Match.Matcher = DefaultMatcher() + a.AntiMatch.Matcher = DefaultMatcher() + a.AuthorMatch.Matcher = DefaultUniverseMatch() + return a +} + +func DefaultApprovalScope() *ApprovalScope { + return &ApprovalScope{} +} + +func DefaultMatcher() Matcher { + return DefaultMaintainerMatch() +} + +func DefaultMaintainerMatch() *MaintainerMatch { + m := new(MaintainerMatch) + m.Approvals = 1 + m.Self = true + return m +} + +func DefaultUniverseMatch() *UniverseMatch { + m := new(UniverseMatch) + m.Approvals = 1 + m.Self = true + return m +} + +func DefaultAnonymousMatch() *AnonymousMatch { + m := new(AnonymousMatch) + m.Approvals = 1 + m.Self = true + return m +} + +func DefaultEntityMatch() *EntityMatch { + m := new(EntityMatch) + m.Approvals = 1 + m.Self = true + return m +} + +func DefaultUsMatch() *UsMatch { + m := new(UsMatch) + m.Approvals = 1 + m.Self = true + return m +} + +func DefaultThemMatch() *ThemMatch { + m := new(ThemMatch) + m.Approvals = 1 + m.Self = true + return m +} + +// IsTitleMatch returns true if the text matches the +// antititle regular expression. +func (req *ApprovalRequest) IsTitleMatch() bool { + var regExp *regexp.Regexp + policy := FindApprovalPolicy(req) + if policy.AntiTitle != nil { + regExp = policy.AntiTitle.Regex + } else if req.Config.AntiTitle != nil { + regExp = req.Config.AntiTitle.Regex + } + if regExp == nil { + // title matching is optional + return false + } + return regExp.MatchString(req.PullRequest.Title) +} diff --git a/model/approval_op.go b/model/approval_op.go new file mode 100644 index 0000000..ca4e1f9 --- /dev/null +++ b/model/approval_op.go @@ -0,0 +1,29 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package model + +type ApprovalOp int + +const ( + Approval ApprovalOp = iota + DisapprovalInsert + DisapprovalRemove + ValidAuthor + ValidTitle +) diff --git a/model/approval_test.go b/model/approval_test.go new file mode 100644 index 0000000..ea4c141 --- /dev/null +++ b/model/approval_test.go @@ -0,0 +1,219 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package model + +import ( + "encoding/json" + "reflect" + "testing" + + "github.com/capitalone/checks-out/hjson" + "github.com/capitalone/checks-out/set" +) + +var peopleApproval = `"{Foo,bar,baz,foo}"` + +var atLeastApproval = `"atleast(10, a, b, c or d)"` + +var nestedApproval = ` +approvals: [ +{ + match: "{Foo,bar,baz,foo} and a" +} +] +` + +func TestAnonymousMatchUnmarshal(t *testing.T) { + var container MatcherHolder + err := hjson.Unmarshal([]byte(peopleApproval), &container) + if err != nil { + t.Fatal("Error parsing anonymous approval match", err) + } + o1 := container.Matcher.(*AnonymousMatch).Entities + e1 := set.NewLowerFromString("foo", "bar", "baz") + if !reflect.DeepEqual(o1, e1) { + t.Error("Anonymous set generated incorrectly", o1) + } +} + +func TestAtLeastMatcherUnmarshal(t *testing.T) { + var container MatcherHolder + err := hjson.Unmarshal([]byte(atLeastApproval), &container) + if err != nil { + t.Fatal("Error parsing atleast approval match", err) + } + s := container.Matcher.(*AtLeastMatch) + if len(s.Choose) != 3 { + t.Error("Atleast list generated incorrectly", s.Choose) + } + if s.Approvals != 10 { + t.Error("Approval count parsed incorrectly", s.Approvals) + } +} + +var implicitMatcher = ` +approvals: [ + { + scope: { + branches: [ "master" ] + } + } + { + scope: { + branches: [] + } + } +] +` + +func TestImplicitApproval(t *testing.T) { + c, err := ParseConfig([]byte(implicitMatcher), AllowAll()) + if err != nil { + t.Fatalf("Error parsing configuration file: %+v", err) + } + t0 := reflect.TypeOf(c.Approvals[0].Match.Matcher).String() + t1 := reflect.TypeOf(c.Approvals[1].Match.Matcher).String() + if t0 != "*model.MaintainerMatch" { + t.Errorf("First approval match is not maintainers: %s", t0) + } + if t1 != "*model.MaintainerMatch" { + t.Errorf("Second approval match is not maintainers: %s", t1) + } +} + +func TestMarshal(t *testing.T) { + m := MatcherHolder{Matcher: DefaultMatcher()} + text, err := json.Marshal(m) + if err != nil { + t.Fatal("Error marshaling match policy", err) + } + var mjson string + err = json.Unmarshal(text, &mjson) + if err != nil { + t.Fatal("Error unmarshaling match policy", err) + } + if mjson != "all[count=1,self=true]" { + t.Errorf("Unexpected marshaling, got %s, expected all[count=1,self=true]", mjson) + } +} + +func TestOffMatch(t *testing.T) { + a := ` + { + "match": "off" + } + ` + var ap ApprovalPolicy + + err := json.Unmarshal([]byte(a), &ap) + if err != nil { + t.Fatal("Error unmarshalling approval policy", err) + } + + setupPolicyDefaults(0, &ap) + + if _, ok := ap.Match.Matcher.(*DisableMatch); !ok { + t.Fatalf("Expected DisableMatch, got %+v", ap.Match.Matcher) + } + if ap.AntiMatch.Matcher == nil { + t.Fatal("Expected non-nil anti-matcher") + } + if _, ok := ap.AntiMatch.Matcher.(*FalseMatch); !ok { + t.Fatalf("Expected FalseMatch for anti-matcher, got %+v", ap.AntiMatch.Matcher) + } + if ap.Merge == nil { + t.Fatal("Expected non-nil merge rule") + } + if ap.Tag == nil { + t.Fatal("Expected non-nil tag rule") + } +} + +func TestTrueMatchNils(t *testing.T) { + a := ` + { + "match": "true" + } + ` + var ap ApprovalPolicy + + err := json.Unmarshal([]byte(a), &ap) + if err != nil { + t.Fatal("Error unmarshalling approval policy", err) + } + + setupPolicyDefaults(0, &ap) + + if _, ok := ap.Match.Matcher.(*TrueMatch); !ok { + t.Fatalf("Expected TrueMatch, got %+v", ap.Match.Matcher) + } + if ap.Merge != nil { + t.Fatal("Expected nil merge rule") + } + if ap.Tag != nil { + t.Fatal("Expected nil tag rule") + } +} + +func TestRoundTripMatcher(t *testing.T) { + m := ` + { + "match": "atleast(2, foo[count=3,self=true],all[count=2,self=false] or not universe and baz[count=5], true or false and fred[self=true])" + } + ` + var ap ApprovalPolicy + + err := json.Unmarshal([]byte(m), &ap) + if err != nil { + t.Fatal("Error unmarshalling approval policy", err) + } + roundTrip, err := json.Marshal(ap) + if err != nil { + t.Fatal("Error marshaling approval policy", err) + } + expected := `{"match":"atleast(2,foo[count=3,self=true],all[count=2,self=false] or not universe[count=1,self=true] and baz[count=5,self=true],true or false and fred[count=1,self=true])"}` + if string(roundTrip) != expected { + t.Fatalf("Error round-tripping approval policy. Expected '%s', got '%s'", expected, string(roundTrip)) + } +} + +func TestAnonymousGroupRoundTripping(t *testing.T) { + vals := map[string]string{ + `{"match": "{bob,jane}"}`: `{"match":"{bob,jane}[count=1,self=true]"}`, + `{"match": "{bob,jane}[count=2,self=false]"}`: `{"match":"{bob,jane}[count=2,self=false]"}`, + `{"match": "atleast(1,{bob,jane}[count=2,self=false])"}`: `{"match":"atleast(1,{bob,jane}[count=2,self=false])"}`, + `{"match": "atleast(1,{bob,jane})"}`: `{"match":"atleast(1,{bob,jane}[count=1,self=true])"}`, + } + for k, v := range vals { + var ap ApprovalPolicy + + err := json.Unmarshal([]byte(k), &ap) + if err != nil { + t.Fatal("Error unmarshalling approval policy", err) + } + roundTrip, err := json.Marshal(ap) + if err != nil { + t.Fatal("Error marshaling approval policy", err) + } + if string(roundTrip) != v { + t.Errorf("Error round-tripping approval policy. Expected '%s', got '%s'", v, string(roundTrip)) + } + } + +} diff --git a/model/approval_validate.go b/model/approval_validate.go new file mode 100644 index 0000000..e61229d --- /dev/null +++ b/model/approval_validate.go @@ -0,0 +1,182 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package model + +import ( + "fmt" + + "github.com/capitalone/checks-out/set" + + multierror "github.com/mspiegel/go-multierror" + "github.com/pkg/errors" +) + +func validateApprovals(approvals []*ApprovalPolicy) error { + var errs error + if len(approvals) == 0 { + return errors.New("No approval policies specified") + } + tailScope := approvals[len(approvals)-1].Scope + if len(tailScope.Paths) > 0 { + errs = multierror.Append(errs, errors.New("Final scope must have no paths")) + } + if len(tailScope.Branches) > 0 { + errs = multierror.Append(errs, errors.New("Final scope must have no branches")) + } + if len(tailScope.PathRegexp) > 0 { + errs = multierror.Append(errs, errors.New("Final scope must have no path regexp")) + } + if len(tailScope.BaseRegexp) > 0 { + errs = multierror.Append(errs, errors.New("Final scope must have no base branch regular expressions")) + } + if len(tailScope.CompareRegexp) > 0 { + errs = multierror.Append(errs, errors.New("Final scope must have no compare branch regular expressions")) + } + for _, approval := range approvals { + errs = multierror.Append(errs, approval.Validate()) + } + errs = validateNames(approvals, errs) + return errs +} + +func (a *ApprovalPolicy) Validate() error { + var errs error + if a.Tag != nil { + errs = multierror.Append(errs, a.Tag.Compile()) + } + if len(a.Scope.Paths) > 0 && len(a.Scope.PathRegexp) > 0 { + err := errors.New("'paths' and 'regexpaths' cannot be used together") + errs = multierror.Append(errs, err) + } + if len(a.Scope.Branches) > 0 && len(a.Scope.BaseRegexp) > 0 { + err := errors.New("'branches' and 'regexbase' cannot be used together") + errs = multierror.Append(errs, err) + } + return errs +} + +func validateNames(approvals []*ApprovalPolicy, errs error) error { + names := set.Empty() + dupls := set.Empty() + for _, approval := range approvals { + name := approval.Name + if len(name) > 0 { + if names.Contains(name) { + if !dupls.Contains(name) { + err := fmt.Errorf("The approval scope name '%s' is used more than once", name) + errs = multierror.Append(errs, err) + dupls.Add(name) + } + } else { + names.Add(name) + } + } + } + return errs +} + +func (match CommonMatch) Validate(_ *MaintainerSnapshot) error { + if match.Approvals <= 0 { + return errors.New("approval count must be positive") + } + return nil +} + +func (match *EntityMatch) Validate(m *MaintainerSnapshot) error { + var errs error + errs = multierror.Append(errs, match.CommonMatch.Validate(m)) + ent := match.Entity.String() + _, org := m.Org[ent] + _, person := m.People[ent] + if !org && !person { + err := fmt.Errorf("%s must be either org or person", ent) + errs = multierror.Append(errs, err) + } + return errs +} + +func (match *AnonymousMatch) Validate(m *MaintainerSnapshot) error { + var errs error + errs = multierror.Append(errs, match.CommonMatch.Validate(m)) + ents := match.Entities + for e := range ents { + ent := e.String() + _, org := m.Org[ent] + _, person := m.People[ent] + if !org && !person { + err := fmt.Errorf("%s must be either org or person", ent) + errs = multierror.Append(errs, err) + } + } + return errs +} + +func (match *DisableMatch) Validate(_ *MaintainerSnapshot) error { + return nil +} + +func (match *TrueMatch) Validate(_ *MaintainerSnapshot) error { + return nil +} + +func (match *FalseMatch) Validate(_ *MaintainerSnapshot) error { + return nil +} + +func (match *AndMatch) Validate(m *MaintainerSnapshot) error { + var errs error + for _, a := range match.And { + err := a.Validate(m) + errs = multierror.Append(errs, err) + } + return errs +} + +func (match *OrMatch) Validate(m *MaintainerSnapshot) error { + var errs error + for _, o := range match.Or { + err := o.Validate(m) + errs = multierror.Append(errs, err) + } + return errs +} + +func (match *NotMatch) Validate(m *MaintainerSnapshot) error { + return match.Not.Validate(m) +} + +func (match *AtLeastMatch) Validate(m *MaintainerSnapshot) error { + var errs error + if match.Approvals <= 0 { + errs = errors.New("approval count must be positive") + } + for _, c := range match.Choose { + err := c.Validate(m) + errs = multierror.Append(errs, err) + } + return errs +} + +func (match *AuthorMatch) Validate(m *MaintainerSnapshot) error { + return match.Inner.Validate(m) +} + +func (match *IssueAuthorMatch) Validate(_ *MaintainerSnapshot) error { + return nil +} diff --git a/model/audit.go b/model/audit.go new file mode 100644 index 0000000..309ad2b --- /dev/null +++ b/model/audit.go @@ -0,0 +1,38 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package model + +import ( + "github.com/capitalone/checks-out/set" +) + +type AuditConfig struct { + // enable auditing of commits. Default is false. + Enable bool `json:"enable"` + // which branches to audit + Branches set.Set `json:"branches"` +} + +func DefaultAudit() AuditConfig { + branches := set.New("master") + return AuditConfig{ + Enable: false, + Branches: branches, + } +} diff --git a/model/branch.go b/model/branch.go new file mode 100644 index 0000000..d992119 --- /dev/null +++ b/model/branch.go @@ -0,0 +1,43 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package model + +type PullRequest struct { + Issue + Branch Branch + Body string +} + +type Branch struct { + CompareName string + CompareSHA string + CompareOwner string + Mergeable bool + Merged bool + MergeCommitSHA string + BaseName string + BaseSHA string +} + +type BranchCompare struct { + Status string + AheadBy int + BehindBy int + TotalCommits int +} diff --git a/model/capabilities.go b/model/capabilities.go new file mode 100644 index 0000000..ae1c38d --- /dev/null +++ b/model/capabilities.go @@ -0,0 +1,92 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package model + +import ( + "errors" + + "github.com/capitalone/checks-out/set" + + "github.com/mspiegel/go-multierror" +) + +type Capabilities struct { + Org struct { + Read bool + } + Repo struct { + Tag bool + Merge bool + DeleteBranch bool + CommitStatus bool + PRWriteComment bool + DeploymentStatus bool + } +} + +func AllowAll() *Capabilities { + caps := new(Capabilities) + caps.Org.Read = true + caps.Repo.Tag = true + caps.Repo.Merge = true + caps.Repo.DeleteBranch = true + caps.Repo.CommitStatus = true + caps.Repo.PRWriteComment = true + caps.Repo.DeploymentStatus = true + return caps +} + +func validateCapabilities(c *Config, caps *Capabilities) error { + var errs error + errMsgs := set.Empty() + if !caps.Repo.CommitStatus { + errMsgs.Add("commit status OAuth scope is required") + } + if c.Tag.Enable && !caps.Repo.Tag { + errMsgs.Add("unable to git tag with provided OAuth scopes") + } + if c.Merge.Enable && !caps.Repo.Merge { + errMsgs.Add("unable to git merge with provided OAuth scopes") + } + if c.Merge.Enable && c.Merge.Delete && !caps.Repo.DeleteBranch { + errMsgs.Add("unable to delete branch with provided OAuth scopes") + } + for _, policy := range c.Approvals { + if policy.Tag != nil && policy.Tag.Enable && !caps.Repo.Tag { + errMsgs.Add("unable to git tag with provided OAuth scopes") + } + if policy.Merge != nil && policy.Merge.Enable && !caps.Repo.Merge { + errMsgs.Add("unable to git merge with provided OAuth scopes") + } + if policy.Merge != nil && policy.Merge.Enable && policy.Merge.Delete && !caps.Repo.DeleteBranch { + errMsgs.Add("unable to delete branch with provided OAuth scopes") + } + } + if c.Comment.Enable { + for _, target := range c.Comment.Targets { + if target.Target == Github.String() && !caps.Repo.PRWriteComment { + errMsgs.Add("unable to add PR comment with provided OAuth scopes") + } + } + } + for msg := range errMsgs { + errs = multierror.Append(errs, errors.New(msg)) + } + return errs +} diff --git a/model/comment.go b/model/comment.go index 00f1d19..f33b540 100644 --- a/model/comment.go +++ b/model/comment.go @@ -1,6 +1,89 @@ +/* + +SPDX-Copyright: Copyright (c) Brad Rydzewski, project contributors, Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Brad Rydzewski, project contributors, Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ package model +import ( + "fmt" + "regexp" + "time" + + "github.com/capitalone/checks-out/envvars" + "github.com/capitalone/checks-out/strings/lowercase" +) + +var CommentPrefix = fmt.Sprintf("Message from %s --", envvars.Env.Branding.ShortName) + type Comment struct { - Author string - Body string + Author lowercase.String + Body string + SubmittedAt time.Time +} + +// IsApproval returns true if the comment body matches the regular +// expression pattern. +func (c *Comment) IsApproval(req *ApprovalRequest) bool { + var regExp *regexp.Regexp + policy := FindApprovalPolicy(req) + if policy.Pattern != nil { + regExp = policy.Pattern.Regex + } else { + regExp = req.Config.Pattern.Regex + } + if regExp == nil { + // this should never happen + return false + } + return regExp.MatchString(c.Body) +} + +// IsDisapproval returns true if the comment body matches the +// antipattern regular expression. +func (c *Comment) IsDisapproval(req *ApprovalRequest) bool { + var regExp *regexp.Regexp + policy := FindApprovalPolicy(req) + if policy.AntiPattern != nil { + regExp = policy.AntiPattern.Regex + } else if req.Config.AntiPattern != nil { + regExp = req.Config.AntiPattern.Regex + } + if regExp == nil { + // disapproval matching is optional + return false + } + return regExp.MatchString(c.Body) +} + +func (c *Comment) GetAuthor() lowercase.String { + return c.Author +} + +func (c *Comment) GetBody() string { + return c.Body +} + +func (c *Comment) GetSubmittedAt() time.Time { + return c.SubmittedAt +} + +func (c *Comment) String() string { + if c == nil { + return "nil" + } + return fmt.Sprintf("{Author: %s, Comment: %s}", c.Author, c.Body) } diff --git a/model/commentsupport.go b/model/commentsupport.go new file mode 100644 index 0000000..f69ef16 --- /dev/null +++ b/model/commentsupport.go @@ -0,0 +1,186 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package model + +import ( + "encoding/json" + "fmt" +) + +type CommentMessage int + +const ( + _ CommentMessage = iota + //error while processing webhook + CommentError + //pr is opened + CommentOpen + //pr is closed without being merged + CommentClose + //pr has been closed and merged + CommentAccept + //an approval comment has been added to the PR + CommentApprove + //a blocking comment has been added to the PR + CommentBlock + //new push on PR branch has reset previous approvals + CommentReset + //a merge via the user interface has been ignored + CommentPushIgnore + //pr was auto-merged after all status checks passed + CommentMerge + //repo was tagged after merge + CommentTag + //branch for pr was auto-deleted after merge + CommentDelete + //deployment was triggered after merge + CommentDeployment + //pull request blocked because of author + CommentAuthor +) + +// CommentMessage enum maps. +var ( + strMapCommentMessage = map[string]CommentMessage{ + "error": CommentError, + "open": CommentOpen, + "close": CommentClose, + "accept": CommentAccept, + "approve": CommentApprove, + "block": CommentBlock, + "reset": CommentReset, + "push-ignore": CommentPushIgnore, + "merge": CommentMerge, + "tag": CommentTag, + "delete": CommentDelete, + "deploy": CommentDeployment, + "author": CommentAuthor, + } + + intMapCommentMessage = map[CommentMessage]string{ + CommentError: "error", + CommentOpen: "open", + CommentClose: "close", + CommentAccept: "accept", + CommentApprove: "approve", + CommentBlock: "block", + CommentReset: "reset", + CommentPushIgnore: "push-ignore", + CommentMerge: "merge", + CommentTag: "tag", + CommentDelete: "delete", + CommentDeployment: "deploy", + CommentAuthor: "author", + } +) + +// Known says whether or not this value is a known enum value. +func (s CommentMessage) Known() bool { + _, ok := intMapCommentMessage[s] + return ok +} + +// String is for the standard stringer interface. +func (s CommentMessage) String() string { + return intMapCommentMessage[s] +} + +// UnmarshalJSON satisfies the json.Unmarshaler +func (s *CommentMessage) UnmarshalJSON(data []byte) error { + str := "" + err := json.Unmarshal(data, &str) + if err != nil { + return err + } + var ok bool + *s, ok = strMapCommentMessage[str] + if !ok { + return fmt.Errorf("Unknown CommentMessage enum value: %s", str) + } + return nil +} + +// MarshalJSON satisfies the json.Marshaler +func (s CommentMessage) MarshalJSON() ([]byte, error) { + if !s.Known() { + return nil, fmt.Errorf("Unknown CommentMessage enum value: %d", int(s)) + } + name := intMapCommentMessage[s] + return json.Marshal(name) +} + +type CommentTarget int + +const ( + _ CommentTarget = iota + Github + Slack +) + +// CommentTarget enum maps. +var ( + strMapCommentTarget = map[string]CommentTarget{ + "github": Github, + "slack": Slack, + } + + intMapCommentTarget = map[CommentTarget]string{ + Github: "github", + Slack: "slack", + } +) + +func ToCommentTarget(s string) CommentTarget { + return strMapCommentTarget[s] +} + +// Known says whether or not this value is a known enum value. +func (s CommentTarget) Known() bool { + _, ok := intMapCommentTarget[s] + return ok +} + +// String is for the standard stringer interface. +func (s CommentTarget) String() string { + return intMapCommentTarget[s] +} + +// UnmarshalJSON satisfies the json.Unmarshaler +func (s *CommentTarget) UnmarshalJSON(data []byte) error { + str := "" + err := json.Unmarshal(data, &str) + if err != nil { + return err + } + var ok bool + *s, ok = strMapCommentTarget[str] + if !ok { + return fmt.Errorf("Unknown CommentTarget enum value: %s", str) + } + return nil +} + +// MarshalJSON satisfies the json.Marshaler +func (s CommentTarget) MarshalJSON() ([]byte, error) { + if !s.Known() { + return nil, fmt.Errorf("Unknown CommentTarget enum value: %d", int(s)) + } + name := intMapCommentTarget[s] + return json.Marshal(name) +} diff --git a/model/commitfile.go b/model/commitfile.go new file mode 100644 index 0000000..338a37a --- /dev/null +++ b/model/commitfile.go @@ -0,0 +1,23 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package model + +type CommitFile struct { + Filename string +} diff --git a/model/commitrange.go b/model/commitrange.go new file mode 100644 index 0000000..fda9cd7 --- /dev/null +++ b/model/commitrange.go @@ -0,0 +1,27 @@ +//go:generate enumer -type=CommitRange +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package model + +type CommitRange int + +const ( + Head CommitRange = iota + All +) diff --git a/model/commitrange_enumer.go b/model/commitrange_enumer.go new file mode 100644 index 0000000..97d6586 --- /dev/null +++ b/model/commitrange_enumer.go @@ -0,0 +1,76 @@ +// Code generated by "enumer -type=CommitRange"; DO NOT EDIT +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package model + +import ( + "encoding/json" + "fmt" +) + +// CommitRange is an enum. +// It is serialized as a string in JSON and as an integer in SQL. + +// CommitRange enum maps. +var ( + strMapCommitRange = map[string]CommitRange{ + "all": All, + "head": Head, + } + + intMapCommitRange = map[CommitRange]string{ + All: "all", + Head: "head", + } +) + +// Known says whether or not this value is a known enum value. +func (s CommitRange) Known() bool { + _, ok := intMapCommitRange[s] + return ok +} + +// String is for the standard stringer interface. +func (s CommitRange) String() string { + return intMapCommitRange[s] +} + +// UnmarshalJSON satisfies the json.Unmarshaler +func (s *CommitRange) UnmarshalJSON(data []byte) error { + str := "" + err := json.Unmarshal(data, &str) + if err != nil { + return err + } + var ok bool + *s, ok = strMapCommitRange[str] + if !ok { + return fmt.Errorf("Unknown CommitRange enum value: %s", str) + } + return nil +} + +// MarshalJSON satisfies the json.Marshaler +func (s CommitRange) MarshalJSON() ([]byte, error) { + if !s.Known() { + return nil, fmt.Errorf("Unknown CommitRange enum value: %d", int(s)) + } + name := intMapCommitRange[s] + return json.Marshal(name) +} diff --git a/model/commitstatus.go b/model/commitstatus.go new file mode 100644 index 0000000..b3473f0 --- /dev/null +++ b/model/commitstatus.go @@ -0,0 +1,42 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package model + +import ( + "fmt" + + "github.com/capitalone/checks-out/envvars" +) + +// ServiceName is the status context posted to remote +var ServiceName = fmt.Sprintf("approvals/%s", envvars.Env.Branding.Name) + +// AuditName is the prefix of the audit context posted to remote +var AuditName = fmt.Sprintf("audit/%s", envvars.Env.Branding.Name) + +type CombinedStatus struct { + State string + Statuses map[string]CommitStatus +} + +type CommitStatus struct { + Context string + State string + Description string +} diff --git a/model/config.go b/model/config.go index bf6d63e..1715b57 100644 --- a/model/config.go +++ b/model/config.go @@ -1,63 +1,241 @@ +/* + +SPDX-Copyright: Copyright (c) Brad Rydzewski, project contributors, Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Brad Rydzewski, project contributors, Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ package model import ( "regexp" - "github.com/BurntSushi/toml" - "github.com/ianschenck/envflag" + "github.com/capitalone/checks-out/envvars" + "github.com/capitalone/checks-out/hjson" + "github.com/capitalone/checks-out/strings/rxserde" + + "github.com/mspiegel/go-multierror" + "github.com/pelletier/go-toml" ) type Config struct { - Approvals int `json:"approvals" toml:"approvals"` - Pattern string `json:"pattern" toml:"pattern"` - Team string `json:"team" toml:"team"` - SelfApprovalOff bool `json:"self_approval_off" toml:"self_approval_off"` + Approvals []*ApprovalPolicy `json:"approvals"` + Pattern rxserde.RegexSerde `json:"pattern"` + AntiPattern *rxserde.RegexSerde `json:"antipattern,omitempty"` + AntiTitle *rxserde.RegexSerde `json:"antititle,omitempty"` + Commit CommitConfig `json:"commit,omitempty"` + Maintainers MaintainersConfig `json:"maintainers,omitempty"` + Merge MergeConfig `json:"merge,omitempty"` + Feedback FeedbackConfig `json:"feedback,omitempty"` + Tag TagConfig `json:"tag,omitempty"` + Comment CommentConfig `json:"comment,omitempty"` + Deployment DeployConfig `json:"deploy,omitempty"` + Audit AuditConfig `json:"audit,omitempty"` + IsOld bool `json:"-"` +} - re *regexp.Regexp +type CommitConfig struct { + Range CommitRange `json:"range"` + AntiRange CommitRange `json:"antirange"` + TagRange CommitRange `json:"tagrange"` + IgnoreUIMerge bool `json:"ignoreuimerge,omitempty"` } -var ( - approvals = envflag.Int("LGTM_APPROVALS", 2, "") - pattern = envflag.String("LGTM_PATTERN", "(?i)LGTM", "") - team = envflag.String("LGTM_TEAM", "MAINTAINERS", "") - selfApprovalOff = envflag.Bool("LGTM_SELF_APPROVAL_OFF", false, "") -) +type MaintainersConfig struct { + Path string `json:"path"` + Type string `json:"type"` +} + +type MergeConfig struct { + Enable bool `json:"enable"` + UpToDate bool `json:"uptodate"` + Method string `json:"method"` + Delete bool `json:"delete"` +} -// ParseConfig parses a projects .lgtm file -func ParseConfig(data []byte) (*Config, error) { - return ParseConfigStr(string(data)) +type FeedbackConfig struct { + Types []FeedbackType `json:"types,omitempty"` } -// ParseConfigStr parses a projects .lgtm file in string format. -func ParseConfigStr(data string) (*Config, error) { +type CommentConfig struct { + Enable bool `json:"enable"` + Targets []TargetConfig `json:"targets"` +} + +type TargetConfig struct { + Target string `json:"target"` + Pattern *rxserde.RegexSerde `json:"pattern"` + Types []CommentMessage `json:"types"` + Names []string `json:"names"` + Url string `json:"-"` +} + +type DeployConfig struct { + Enable bool `json:"enable"` + Path string `json:"path"` + DeploymentMap DeploymentConfigs `json:"-"` +} + +const ( + maintainers = "MAINTAINERS" + maintType = "text" + deployment = "DEPLOYMENTS" +) + +func DefaultConfig() *Config { c := new(Config) - _, err := toml.Decode(data, c) + c.Approvals = nil + c.Pattern = rxserde.RegexSerde{Regex: regexp.MustCompile(envvars.Env.Pattern.Default)} + c.Maintainers.Path = maintainers + c.Maintainers.Type = maintType + c.Deployment.Path = deployment + c.Feedback = DefaultFeedback() + c.Merge = DefaultMerge() + c.Tag = DefaultTag() + c.Audit = DefaultAudit() + _ = c.Tag.Compile() + return c +} + +func NonEmptyConfig() *Config { + c := DefaultConfig() + c.Approvals = []*ApprovalPolicy{DefaultApprovalPolicy()} + c.Approvals[0].Position = 1 + return c +} + +func (c *Config) Validate(caps *Capabilities) error { + var errs error + errs = multierror.Append(errs, validateCapabilities(c, caps)) + errs = multierror.Append(errs, validateApprovals(c.Approvals)) + errs = multierror.Append(errs, validateMaintainerConfig(&c.Maintainers)) + return errs +} + +func (c *Config) GetFeedbackConfig(policy *ApprovalPolicy) *FeedbackConfig { + if policy.Feedback == nil { + return &c.Feedback + } + return policy.Feedback +} + +func (c *Config) GetMergeConfig(policy *ApprovalPolicy) *MergeConfig { + if policy.Merge == nil { + return &c.Merge + } + return policy.Merge +} + +// ParseConfig parses a project configuration file. +func ParseConfig(data []byte, caps *Capabilities) (*Config, error) { + c := DefaultConfig() + err := hjson.Unmarshal(data, c) if err != nil { return nil, err } - if c.Approvals == 0 { - c.Approvals = *approvals + var errs error + errs = multierror.Append(errs, c.Tag.Compile()) + if errs != nil { + return nil, errs + } + for i, policy := range c.Approvals { + setupPolicyDefaults(i, policy) + } + errs = multierror.Append(errs, c.Validate(caps)) + if errs != nil { + return nil, errs + } + return c, nil +} + +func setupPolicyDefaults(i int, policy *ApprovalPolicy) { + if policy.AuthorMatch == nil { + policy.AuthorMatch = new(MatcherHolder) } - if len(c.Pattern) == 0 { - c.Pattern = *pattern + if policy.AntiMatch == nil { + policy.AntiMatch = new(MatcherHolder) } - if len(c.Team) == 0 { - c.Team = *team + if policy.Scope == nil { + policy.Scope = DefaultApprovalScope() } - if c.SelfApprovalOff == false { - c.SelfApprovalOff = *selfApprovalOff + if policy.Match.Matcher == nil { + policy.Match.Matcher = DefaultMatcher() } + if policy.AntiMatch.Matcher == nil { + policy.AntiMatch.Matcher = DefaultMatcher() + } + if policy.AuthorMatch.Matcher == nil { + policy.AuthorMatch.Matcher = DefaultUniverseMatch() + } + if c, ok := policy.Match.Matcher.(ChangePolicy); ok { + c.ChangePolicy(policy) + } + policy.Position = i + 1 +} - c.re, err = regexp.Compile(c.Pattern) - return c, err +// ParseOldConfig parses a projects .lgtm file +func ParseOldConfig(data []byte) (*Config, error) { + c, err := parseOldConfigStr(string(data)) + if err != nil { + return nil, err + } + // convert to a current config structure + // we don't map Team because it wasn't publically supported before + + regex, err := regexp.Compile(c.Get("pattern").(string)) + if err != nil { + return nil, err + } + + c2 := NonEmptyConfig() + c2.Pattern = rxserde.RegexSerde{Regex: regex} + mm := &MaintainerMatch{ + CommonMatch{ + Approvals: int(c.Get("approvals").(int64)), + Self: !c.Get("self_approval_off").(bool), + }, + } + c2.Approvals[0].Match.Matcher = mm + //clear out unused fields + c2.Tag = TagConfig{} + c2.Deployment = DeployConfig{} + c2.IsOld = true + c2.Maintainers.Type = "legacy" + return c2, nil } -// IsMatch returns true if the text matches the regular -// epxression pattern. -func (c *Config) IsMatch(text string) bool { - if c.re == nil { - // this should never happen - return false +//all of these variables are for old file format support +var ( + oldApprovals = envvars.Env.Old.Approvals + oldPattern = envvars.Env.Old.Pattern + oldSelfApprovalOff = envvars.Env.Old.SelfApprovalOff +) + +// parseOldConfigStr parses a projects .lgtm file in string format. +func parseOldConfigStr(data string) (*toml.Tree, error) { + c, err := toml.Load(data) + if err != nil { + return nil, err + } + if !c.Has("approvals") { + c.Set("approvals", oldApprovals) + } + if !c.Has("pattern") { + c.Set("pattern", oldPattern) + } + if !c.Has("self_approval_off") { + c.Set("self_approval_off", oldSelfApprovalOff) } - return c.re.MatchString(text) + return c, nil } diff --git a/model/config_test.go b/model/config_test.go new file mode 100644 index 0000000..97843bf --- /dev/null +++ b/model/config_test.go @@ -0,0 +1,250 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package model + +import ( + "fmt" + "regexp" + "testing" + + "github.com/capitalone/checks-out/hjson" + + "github.com/stretchr/testify/assert" +) + +func TestParseEmptyConfig(t *testing.T) { + input := []byte{} + _, err := ParseConfig(input, AllowAll()) + if err == nil { + t.Fatal("Empty config did not generate error", err) + } +} + +func TestParseDefaultRegex(t *testing.T) { + c := DefaultConfig() + matcher := c.Pattern.Regex + assert.Equal(t, `(?i)^I approve\s*(version:\s*(?P\S+))?\s*(comment:\s*(?P.*\S))?\s*`, matcher.String()) + names := matcher.SubexpNames() + fmt.Println(names) + assert.Equal(t, 5, len(names)) + assert.Equal(t, "version", names[2]) + assert.Equal(t, "comment", names[4]) + assert.Nil(t, matcher.FindStringSubmatch("AYUP")) + doRegEx(matcher, t, "I approve", []string{"I approve", "", ""}) + doRegEx(matcher, t, "I approve 1.2", []string{"I approve ", "", ""}) + doRegEx(matcher, t, "I approve version:", []string{"I approve ", "", ""}) + doRegEx(matcher, t, "I approve comment:", []string{"I approve ", "", ""}) + doRegEx(matcher, t, "I approve 1.2 hi there", []string{"I approve ", "", ""}) + doRegEx(matcher, t, "I approve version:1.2", []string{"I approve version:1.2", "1.2", ""}) + doRegEx(matcher, t, "I approve version: 1.2 comment: hi there", []string{"I approve version: 1.2 comment: hi there", "1.2", "hi there"}) + doRegEx(matcher, t, "I approve comment: hi there", []string{"I approve comment: hi there", "", "hi there"}) + doRegEx(matcher, t, "I approve comment:hi there ", []string{"I approve comment:hi there ", "", "hi there"}) +} + +func doRegEx(matcher *regexp.Regexp, t *testing.T, input string, values []string) { + matches := matcher.FindStringSubmatch(input) + fmt.Println(matches) + assert.Equal(t, 5, len(matches)) + assert.Equal(t, values[0], matches[0]) + assert.Equal(t, values[1], matches[2]) + assert.Equal(t, values[2], matches[4]) +} + +func TestParseSimpleConfig(t *testing.T) { + input := []byte(` + approvals: + [ + { + match: "all[count=1,self=false]" + } + ] + merge: + { + enable: true + } + tag: + { + enable: true + } + `) + config, err := ParseConfig(input, AllowAll()) + if err != nil { + t.Fatal("Unable to parse empty configuration", err) + } + assert.Equal(t, true, config.Tag.Enable) + assert.Equal(t, "semver", config.Tag.Alg) + assert.Equal(t, 1, len(config.Approvals)) + assert.Equal(t, 1, config.Approvals[0].Position) + s, err := config.Tag.GenerateTag(TemplateTag{Version: "0.5.0"}) + assert.Nil(t, err) + assert.Equal(t, "0.5.0", s) +} + +func TestParseComplicatedConfig(t *testing.T) { + input := []byte(` +approvals: +[ + { + name: "dev" + scope: + { + branches: [ "dev" ] + } + match: "all[count=1,self=true]" + tag: + { + enable: true + template: "{{.Version}}-dev" + increment: "patch" + } + } + { + name: "master" + scope: + { + branches: [ "master" ] + } + pattern: "(?i)^shipit\\s*(?P\\S*)" + match: "all[count=1,self=false]" + tag: + { + enable: true + template: "{{.Version}}" + increment: "none" + } + } + # Disable checks-out on remaining branches + { + match: "off" + } +] +antipattern: "(?i)^holdit" +merge: +{ + enable: true +} +comment: +{ + enable: true + targets: [ + { + target: github + types: [ "open" ] + } + { + target: slack + names: [ "#feed" ] + } + ] +} + `) + _, err := ParseConfig(input, AllowAll()) + if err != nil { + t.Fatal("Unable to parse empty configuration", err) + } +} + +func TestOldConfig(t *testing.T) { + input := []byte(` + approvals = 3 + pattern = "LOOKGOOD" + self_approval_off = true + `) + config, err := ParseOldConfig(input) + if err != nil { + t.Fatal("Expected valid file, got", err) + } + mm, ok := config.Approvals[0].Match.Matcher.(*MaintainerMatch) + assert.True(t, ok, "Expected MaintainerMatch") + assert.Equal(t, false, mm.Self) + assert.Equal(t, 3, mm.Approvals) + assert.Equal(t, "LOOKGOOD", config.Pattern.Regex.String()) +} + +func TestOldConfigDefault(t *testing.T) { + input := []byte(` + `) + config, err := ParseOldConfig(input) + if err != nil { + t.Fatal("Expected valid file, got", err) + } + mm, ok := config.Approvals[0].Match.Matcher.(*MaintainerMatch) + assert.True(t, ok, "Expected MaintainerMatch") + assert.Equal(t, true, mm.Self) + assert.Equal(t, 2, mm.Approvals) + assert.Equal(t, "(?i)LGTM", config.Pattern.Regex.String()) +} + +func TestOldConfigConvert(t *testing.T) { + input := []byte(` + approvals = 3 + pattern = "LOOKGOOD" + self_approval_off = true + `) + config, err := ParseOldConfig(input) + if err != nil { + t.Fatal("Expected valid file, got", err) + } + + options := hjson.DefaultOptions() + options.OmitEmptyStructs = true + out, err := hjson.MarshalWithOptions(config, options) + expected := `{ + approvals: + [ + { + scope: + { + } + match: "all[count=3,self=false]" + antimatch: "all[count=1,self=true]" + authormatch: "universe[count=1,self=true]" + } + ] + pattern: "LOOKGOOD" + maintainers: + { + path: MAINTAINERS + type: legacy + } + merge: + { + enable: false + uptodate: true + method: merge + delete: false + } + feedback: + { + types: + [ + "comment" + "review" + ] + } + audit: + { + enable: false + branches: ["master"] + } +}` + if expected != string(out) { + t.Fatalf("Expected\n%v\ngot\n%v\n", expected, string(out)) + } +} diff --git a/model/deployment.go b/model/deployment.go new file mode 100644 index 0000000..5475b0a --- /dev/null +++ b/model/deployment.go @@ -0,0 +1,46 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package model + +import ( + "errors" + + "github.com/capitalone/checks-out/hjson" +) + +type DeploymentConfigs map[string]DeploymentConfig + +type DeploymentConfig struct { + Tasks []string `json:"tasks"` + Environment *string `json:"env"` +} + +type DeploymentInfo struct { + Ref string + Task string + Environment string +} + +func (c *Config) LoadDeploymentMap(deployData []byte) error { + if len(deployData) == 0 { + return errors.New("No content in deployment map") + } + err := hjson.Unmarshal(deployData, &c.Deployment.DeploymentMap) + return err +} diff --git a/model/deployment_test.go b/model/deployment_test.go new file mode 100644 index 0000000..b3f5780 --- /dev/null +++ b/model/deployment_test.go @@ -0,0 +1,101 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package model + +import ( + "testing" +) + +var deployFile = ` +{ + deploy: + { + tasks: [ "t1", "t2", "t3" ] + env: qa + } + master: + { + tasks: [ "t1" ] + env: prod + } + preprod: + { + env: preprod + } + preprod2: + { + tasks: [ "t5" ] + } +} +` + +func TestDeploymentHJSON(t *testing.T) { + c := DefaultConfig() + err := c.LoadDeploymentMap([]byte(deployFile)) + d := c.Deployment.DeploymentMap + if err != nil { + panic(err) + } + if len(d) != 4 { + t.Fatalf("Should have 4 entries in d, only have %d", len(d)) + } + deploy, ok := d["deploy"] + if !ok { + t.Fatalf("should have an entry for deploy") + } + if *deploy.Environment != "qa" { + t.Fatalf("should have qa for environment, had %s", *deploy.Environment) + } + if len(deploy.Tasks) != 3 { + t.Fatalf("should have had 3 entries in tasks, had %d", len(deploy.Tasks)) + } + + master, ok := d["master"] + if !ok { + t.Fatalf("should have an entry for master") + } + if *master.Environment != "prod" { + t.Fatalf("should have prod for environment, had %s", *master.Environment) + } + if len(master.Tasks) != 1 { + t.Fatalf("should have had 1 entry in tasks, had %d", len(master.Tasks)) + } + + preprod, ok := d["preprod"] + if !ok { + t.Fatalf("should have an entry for preprod") + } + if *preprod.Environment != "preprod" { + t.Fatalf("should have prod for environment, had %s", *preprod.Environment) + } + if len(preprod.Tasks) != 0 { + t.Fatalf("should have had 0 entries in tasks, had %d", len(preprod.Tasks)) + } + + preprod2, ok := d["preprod2"] + if !ok { + t.Fatalf("should have an entry for preprod2") + } + if preprod2.Environment != nil { + t.Fatalf("should have no environment, had %s", *preprod2.Environment) + } + if len(preprod2.Tasks) != 1 { + t.Fatalf("should have had 1 entry in tasks, had %d", len(preprod2.Tasks)) + } +} diff --git a/model/feedback.go b/model/feedback.go new file mode 100644 index 0000000..990d293 --- /dev/null +++ b/model/feedback.go @@ -0,0 +1,48 @@ +//go:generate enumer -type=FeedbackType +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package model + +import "encoding/json" + +func DefaultFeedback() FeedbackConfig { + return FeedbackConfig{ + Types: []FeedbackType{CommentType, ReviewType}, + } +} + +// Used to avoid recursion in UnmarshalJSON +type shadowFeedbackConfig FeedbackConfig + +func (c *FeedbackConfig) UnmarshalJSON(text []byte) error { + dummy := shadowFeedbackConfig(DefaultFeedback()) + err := json.Unmarshal(text, &dummy) + if err != nil { + return err + } + *c = FeedbackConfig(dummy) + return nil +} + +type FeedbackType int + +const ( + CommentType FeedbackType = iota + ReviewType +) diff --git a/model/feedback_enumer.go b/model/feedback_enumer.go new file mode 100644 index 0000000..829a3e1 --- /dev/null +++ b/model/feedback_enumer.go @@ -0,0 +1,76 @@ +// Code generated by "enumer -type=FeedbackType"; DO NOT EDIT +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package model + +import ( + "encoding/json" + "fmt" +) + +// FeedbackType is an enum. +// It is serialized as a string in JSON and as an integer in SQL. + +// FeedbackType enum maps. +var ( + strMapFeedbackType = map[string]FeedbackType{ + "comment": CommentType, + "review": ReviewType, + } + + intMapFeedbackType = map[FeedbackType]string{ + CommentType: "comment", + ReviewType: "review", + } +) + +// Known says whether or not this value is a known enum value. +func (s FeedbackType) Known() bool { + _, ok := intMapFeedbackType[s] + return ok +} + +// String is for the standard stringer interface. +func (s FeedbackType) String() string { + return intMapFeedbackType[s] +} + +// UnmarshalJSON satisfies the json.Unmarshaler +func (s *FeedbackType) UnmarshalJSON(data []byte) error { + str := "" + err := json.Unmarshal(data, &str) + if err != nil { + return err + } + var ok bool + *s, ok = strMapFeedbackType[str] + if !ok { + return fmt.Errorf("Unknown FeedbackType enum value: %s", str) + } + return nil +} + +// MarshalJSON satisfies the json.Marshaler +func (s FeedbackType) MarshalJSON() ([]byte, error) { + if !s.Known() { + return nil, fmt.Errorf("Unknown FeedbackType enum value: %d", int(s)) + } + name := intMapFeedbackType[s] + return json.Marshal(name) +} diff --git a/model/hook.go b/model/hook.go deleted file mode 100644 index 2a171f9..0000000 --- a/model/hook.go +++ /dev/null @@ -1,7 +0,0 @@ -package model - -type Hook struct { - Repo *Repo - Issue *Issue - Comment *Comment -} diff --git a/model/issue.go b/model/issue.go index 1044081..8a851ca 100644 --- a/model/issue.go +++ b/model/issue.go @@ -1,7 +1,29 @@ +/* + +SPDX-Copyright: Copyright (c) Brad Rydzewski, project contributors, Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Brad Rydzewski, project contributors, Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ package model +import ( + "github.com/capitalone/checks-out/strings/lowercase" +) + type Issue struct { Number int Title string - Author string + Author lowercase.String } diff --git a/model/maintainer.go b/model/maintainer.go index b3e9cc8..2859ad3 100644 --- a/model/maintainer.go +++ b/model/maintainer.go @@ -1,13 +1,27 @@ +/* + +SPDX-Copyright: Copyright (c) Brad Rydzewski, project contributors, Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Brad Rydzewski, project contributors, Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ package model import ( - "bufio" - "bytes" "fmt" - "regexp" - "strings" - "github.com/BurntSushi/toml" + "github.com/capitalone/checks-out/set" ) // Person represets an individual in the MAINTAINERS file. @@ -18,173 +32,30 @@ type Person struct { } // Org represents a group, team or subset of users. -type Org struct { - People []string `json:"people" toml:"people"` -} - -// Maintainer represents a MAINTAINERS file. -type Maintainer struct { - People map[string]*Person `json:"people" toml:"people"` - Org map[string]*Org `json:"org" toml:"org"` -} - -// ParseMaintainer parses a projects MAINTAINERS file and returns -// the list of maintainers. -func ParseMaintainer(data []byte) (*Maintainer, error) { - return ParseMaintainerStr(string(data)) -} - -// ParseMaintainerStr parses a projects MAINTAINERS file in string -// format and returns the list of maintainers. -func ParseMaintainerStr(data string) (*Maintainer, error) { - m, err := parseMaintainerToml(data) - if err != nil { - m, err = parseMaintainerText(data) - if err != nil { - return nil, err - } - } - return m, nil -} - -// FromOrg returns a new Maintainer file with a subset of people -// that are part of the specified org. -func FromOrg(from *Maintainer, name string) (*Maintainer, error) { - m := new(Maintainer) - m.Org = map[string]*Org{} - m.People = map[string]*Person{} - var members []string - - switch { - case from.Org == nil: - return nil, fmt.Errorf("No organization section") - case from.People == nil: - return nil, fmt.Errorf("No people section") - case len(from.People) == 0: - return nil, fmt.Errorf("No people section") - } - org, ok := from.Org[name] - if !ok { - return nil, fmt.Errorf("No organization section for %s", name) - } - - for _, login := range org.People { - person, ok := from.People[login] - if !ok { - continue - } - m.People[login] = person - members = append(members, person.Login) - } - m.Org["core"] = &Org{members} - return m, nil -} - -func parseMaintainerToml(data string) (*Maintainer, error) { - m := new(Maintainer) - _, err := toml.Decode(data, m) - if err != nil { - return nil, err - } - if m.People == nil { - return nil, fmt.Errorf("Invalid Toml format. Missing people section.") - } - // if the person is defined in the file, but the Login field is - // empty, we can use the map key as the Login value. This is mainly - // here to support Docker projects, which use GitHub instead of Login - for k, v := range m.People { - if len(v.Login) == 0 { - v.Login = k - } - } - return m, nil +type Org interface { + GetPeople() (set.Set, error) } -func parseMaintainerText(data string) (*Maintainer, error) { - m := new(Maintainer) - m.People = map[string]*Person{} - - buf := bytes.NewBufferString(data) - reader := bufio.NewReader(buf) - for { - line, _, err := reader.ReadLine() - if err != nil { - break - } - - item := parseln(string(line)) - if len(item) == 0 { - continue - } - - person := parseLogin(item) - if person == nil { - person = parseLoginMeta(item) - } - if person == nil { - person = parseLoginEmail(item) - } - if person == nil { - return nil, fmt.Errorf("Invalid file format.") - } - - m.People[person.Login] = person - } - return m, nil +type OrgSerde struct { + People set.Set `json:"people" toml:"people"` } -func parseln(s string) string { - if s == "" || string(s[0]) == "#" { - return "" - } - index := strings.Index(s, " #") - if index > -1 { - s = strings.TrimSpace(s[0:index]) - } - return s +// Maintainer represents a MAINTAINERS file. +type Maintainer struct { + RawPeople map[string]*Person `json:"people" toml:"people"` + RawOrg map[string]*OrgSerde `json:"org" toml:"org"` } -// regular expression determines if a line in the maintainers -// file only has the single GitHub username and no other metadata. -var reLogin = regexp.MustCompile("^\\w[\\w-]+$") - -// regular expression determines if a line in the maintainers -// file has the username and metadata. -var reLoginMeta = regexp.MustCompile("(.+) <(.+)> \\(@(.+)\\)") +var MaintTypes = set.New("text", "hjson", "toml", "legacy") -// regular expression determines if a line in the maintainers -// file has the username and email. -var reLoginEmail = regexp.MustCompile("(.+) <(.+)>") - -func parseLoginMeta(line string) *Person { - matches := reLoginMeta.FindStringSubmatch(line) - if len(matches) != 4 { - return nil - } - return &Person{ - Name: strings.TrimSpace(matches[1]), - Email: strings.TrimSpace(matches[2]), - Login: strings.TrimSpace(matches[3]), +func validateMaintainerConfig(c *MaintainersConfig) error { + if !MaintTypes.Contains(c.Type) { + return fmt.Errorf("%s is not one of the permitted MAINTAINER types %s", + c.Type, MaintTypes.Keys()) } + return nil } -func parseLoginEmail(line string) *Person { - matches := reLoginEmail.FindStringSubmatch(line) - if len(matches) != 3 { - return nil - } - return &Person{ - Login: strings.TrimSpace(matches[1]), - Email: strings.TrimSpace(matches[2]), - } -} - -func parseLogin(line string) *Person { - line = strings.TrimSpace(line) - if !reLogin.MatchString(line) { - return nil - } - return &Person{ - Login: line, - } +func (o *OrgSerde) GetPeople() (set.Set, error) { + return o.People, nil } diff --git a/model/maintainer_test.go b/model/maintainer_test.go deleted file mode 100644 index af8ee62..0000000 --- a/model/maintainer_test.go +++ /dev/null @@ -1,76 +0,0 @@ -package model - -import "testing" - -func TestParseMaintainer(t *testing.T) { - var files = []string{maintainerFile, maintainerFileEmail, maintainerFileSimple, maintainerFileMixed, maintainerFileToml} - for _, file := range files { - parsed, err := ParseMaintainerStr(file) - if err != nil { - t.Error(err) - return - } - if len(parsed.People) != len(people) { - t.Errorf("Wanted %d maintainers, got %d", len(people), len(parsed.People)) - return - } - for _, want := range people { - got, ok := parsed.People[want.Login] - if !ok { - t.Errorf("Wanted user %s in file", want.Login) - } else if want.Login != got.Login { - t.Errorf("Wanted login %s, got %s", want.Login, got.Login) - } - } - } -} - -var people = []Person{ - {Login: "bradrydzewski"}, - {Login: "mattnorris"}, -} - -var maintainerFile = ` -Brad Rydzewski (@bradrydzewski) -Matt Norris (@mattnorris) -` - -var maintainerFileEmail = ` -bradrydzewski -mattnorris -` - -// simple format with usernames only. includes -// spaces and comments. -var maintainerFileSimple = ` -bradrydzewski -mattnorris` - -// simple format with usernames only. includes -// spaces and comments. -var maintainerFileMixed = ` -bradrydzewski -Matt Norris (@mattnorris) -` - -// advanced toml format for the maintainers file. -var maintainerFileToml = ` -[org] - [org.core] - people = [ - "mattnorris", - "bradrydzewski", - ] - -[people] - - [people.bradrydzewski] - name = "Brad Rydzewski" - email = "brad.rydzewski@mail.com" - login = "bradrydzewski" - - [people.mattnorris] - name = "Matt Norris" - email = "matt.norris@mail.com" - login = "mattnorris" -` diff --git a/model/matcher_translator.go b/model/matcher_translator.go new file mode 100644 index 0000000..2366c8b --- /dev/null +++ b/model/matcher_translator.go @@ -0,0 +1,253 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package model + +import ( + "strconv" + + "github.com/capitalone/checks-out/matcher" + "github.com/capitalone/checks-out/set" + "github.com/capitalone/checks-out/strings/lowercase" + + "github.com/mspiegel/go-multierror" + "github.com/pkg/errors" +) + +func GenerateMatcher(input string) (Matcher, error) { + tokens := matcher.BuildTokens(input) + parseTree, err := matcher.BuildParseTree(tokens) + if err != nil { + return nil, err + } + + return walkTree(parseTree) +} + +func buildAnonymousMatcher(pt *matcher.AnonymousParseToken) (Matcher, error) { + m := DefaultAnonymousMatch() + m.Entities = set.NewLowerFromString(pt.Members...) + err := parseCommonMatch(&m.CommonMatch, pt.Attributes) + return m, err +} + +func buildNounMatcher(pt *matcher.NounParseToken) (Matcher, error) { + // figure out if this is a: + // org name + // special org name (us, them, anyone, people) + // boolean (true, false) + var m Matcher + var err error + switch pt.Name { + case "off": + if len(pt.Attributes) > 0 { + return nil, errors.New("Attributes are not allowed for off") + } + m = &DisableMatch{} + case "true": + if len(pt.Attributes) > 0 { + return nil, errors.New("Attributes are not allowed for true") + } + m = &TrueMatch{} + case "false": + if len(pt.Attributes) > 0 { + return nil, errors.New("Attributes are not allowed for false") + } + m = &FalseMatch{} + case "issue-author": + m = &IssueAuthorMatch{} + case "all": + d := DefaultMaintainerMatch() + m = d + err = parseCommonMatch(&d.CommonMatch, pt.Attributes) + case "universe": + d := DefaultUniverseMatch() + m = d + err = parseCommonMatch(&d.CommonMatch, pt.Attributes) + case "us": + d := DefaultUsMatch() + m = d + err = parseCommonMatch(&d.CommonMatch, pt.Attributes) + case "them": + d := DefaultThemMatch() + m = d + err = parseCommonMatch(&d.CommonMatch, pt.Attributes) + default: + d := DefaultEntityMatch() + d.Entity = lowercase.Create(pt.Name) + m = d + err = parseCommonMatch(&d.CommonMatch, pt.Attributes) + } + if err != nil { + return nil, err + } + return m, nil +} + +func buildAndMatcher(pt *matcher.AndOrParseToken) (Matcher, error) { + cm := &AndMatch{} + child, err := walkTree(pt.Left) + if err != nil { + return nil, err + } + cm.And = append(cm.And, MatcherHolder{Matcher: child}) + child, err = walkTree(pt.Right) + if err != nil { + return nil, err + } + cm.And = append(cm.And, MatcherHolder{Matcher: child}) + return cm, nil +} + +func buildOrMatcher(pt *matcher.AndOrParseToken) (Matcher, error) { + cm := &OrMatch{} + child, err := walkTree(pt.Left) + if err != nil { + return nil, err + } + cm.Or = append(cm.Or, MatcherHolder{Matcher: child}) + child, err = walkTree(pt.Right) + if err != nil { + return nil, err + } + cm.Or = append(cm.Or, MatcherHolder{Matcher: child}) + return cm, nil +} + +func buildAndOrMatcher(pt *matcher.AndOrParseToken) (Matcher, error) { + if pt.JKind == matcher.JOINER_AND { + return buildAndMatcher(pt) + } else if pt.JKind == matcher.JOINER_OR { + return buildOrMatcher(pt) + } else { + return nil, errors.Errorf("Unknown operator %v", pt) + } +} + +func buildNotMatcher(pt *matcher.NotParseToken) (Matcher, error) { + m := &NotMatch{} + child, err := walkTree(pt.Child) + if err != nil { + return nil, err + } + m.Not = MatcherHolder{Matcher: child} + return m, nil +} + +func buildFuncMatcher(pt *matcher.FunctionParseToken) (Matcher, error) { + switch pt.Name { + case "atleast": + return buildAtLeastMatcher(pt) + case "author": + return buildAuthorMatcher(pt) + default: + return nil, errors.Errorf("Unknown function '%s'", pt.Name) + } +} + +func getSymbol(pt matcher.ParseToken) (string, error) { + switch noun := pt.(type) { + case *matcher.NounParseToken: + return noun.Name, nil + default: + return "", errors.Errorf("Unable to convert %v to a symbol", pt) + } +} + +func buildAtLeastMatcher(pt *matcher.FunctionParseToken) (Matcher, error) { + var errs error + m := &AtLeastMatch{} + if len(pt.Parameters) == 0 { + return nil, errors.New("atleast() function must have at least one argument") + } + sym, err := getSymbol(pt.Parameters[0]) + if err != nil { + return nil, err + } + count, valid := strconv.Atoi(sym) + if valid != nil { + return nil, errors.Errorf("atleast() function first argument expected number, observed %s", pt.Parameters[0]) + } + m.Approvals = count + for _, e := range pt.Parameters[1:] { + c, err := walkTree(e) + if err != nil { + errs = multierror.Append(errs, err) + } else { + m.Choose = append(m.Choose, MatcherHolder{Matcher: c}) + } + } + return m, errs +} + +func buildAuthorMatcher(pt *matcher.FunctionParseToken) (Matcher, error) { + m := &AuthorMatch{} + if len(pt.Parameters) != 1 { + return nil, errors.New("author() function must have one argument") + } + inner, err := walkTree(pt.Parameters[0]) + if err != nil { + return nil, err + } + m.Inner = MatcherHolder{Matcher: inner} + return m, nil +} + +func parseCommonMatch(m *CommonMatch, attributes map[string]string) error { + maxAllowed := 0 + if approvals, ok := attributes["count"]; ok { + count, valid := strconv.Atoi(approvals) + if valid != nil { + return errors.Errorf("Expected number, found %s for count attribute on %+v", approvals, m) + } + m.Approvals = count + maxAllowed++ + } + if self, ok := attributes["self"]; ok { + s, valid := strconv.ParseBool(self) + if valid != nil { + return errors.Errorf("Expected true or false, found %s for self attribute on %+v", self, m) + } + m.Self = s + maxAllowed++ + } + if len(attributes) > maxAllowed { + return errors.Errorf("Unexpected attributes found on %+v", m) + } + return nil +} + +func walkTree(root matcher.ParseToken) (Matcher, error) { + if root == nil { + return &TrueMatch{}, nil + } + switch pt := root.(type) { + case *matcher.NounParseToken: + return buildNounMatcher(pt) + case *matcher.AnonymousParseToken: + return buildAnonymousMatcher(pt) + case *matcher.AndOrParseToken: + return buildAndOrMatcher(pt) + case *matcher.NotParseToken: + return buildNotMatcher(pt) + case *matcher.FunctionParseToken: + return buildFuncMatcher(pt) + default: + return nil, errors.Errorf("Unknown parse token type: %+v", root) + } +} diff --git a/model/matchpolicy.go b/model/matchpolicy.go new file mode 100644 index 0000000..cf0d013 --- /dev/null +++ b/model/matchpolicy.go @@ -0,0 +1,332 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package model + +import ( + "github.com/capitalone/checks-out/set" +) + +func titleAction(req *ApprovalRequest, f Feedback, proc Processor) bool { + forbid := req.IsTitleMatch() + if !forbid { + proc(f, ValidTitle) + } + return forbid +} + +func authorLimitAction(req *ApprovalRequest, f Feedback, people set.Set, proc Processor) { + people.Add(f.GetAuthor().String()) + proc(f, ValidAuthor) +} + +func authorPolicyAction(req *ApprovalRequest, f Feedback, people set.Set, proc Processor) { + people.Add(f.GetAuthor().String()) +} + +func approvalAction(req *ApprovalRequest, f Feedback, people set.Set, proc Processor) { + author := f.GetAuthor().String() + if f.IsApproval(req) && !people.Contains(author) { + people.Add(author) + proc(f, Approval) + } +} + +func antiMatch(req *ApprovalRequest, f Feedback, people set.Set, proc Processor) { + author := f.GetAuthor().String() + match := f.IsApproval(req) + antiMatch := f.IsDisapproval(req) + if match && antiMatch { + // do nothing if comment indicates approval and disapproval + return + } else if antiMatch && !people.Contains(author) { + people.Add(author) + proc(f, DisapprovalInsert) + } else if match && people.Contains(author) { + people.Remove(author) + proc(f, DisapprovalRemove) + } +} + +func doMatch(candidates set.Set, self bool, min int, req *ApprovalRequest, + proc Processor, action MatchAction, feedback []Feedback) (bool, error) { + + participants := set.Empty() + + for _, f := range feedback { + author := f.GetAuthor().String() + // cannot approve your own pull request + if !self && author == req.PullRequest.Author.String() { + continue + } + // the user must a member of the candidate set + if _, ok := candidates[author]; !ok { + continue + } + action(req, f, participants, proc) + } + return len(participants) >= min, nil +} + +func (match *UniverseMatch) Match(req *ApprovalRequest, proc Processor, a MatchAction, feedback []Feedback) (bool, error) { + candidates := set.Empty() + for _, f := range feedback { + candidates.Add(f.GetAuthor().String()) + } + return doMatch(candidates, match.Self, match.Approvals, req, proc, a, feedback) +} + +func (match *MaintainerMatch) Match(req *ApprovalRequest, proc Processor, a MatchAction, feedback []Feedback) (bool, error) { + candidates := set.Empty() + for k := range req.Maintainer.People { + candidates.Add(k) + } + return doMatch(candidates, match.Self, match.Approvals, req, proc, a, feedback) +} + +func (match *AnonymousMatch) Match(req *ApprovalRequest, proc Processor, a MatchAction, feedback []Feedback) (bool, error) { + candidates := set.Empty() + for entity := range match.Entities { + ent := entity.String() + if org, ok := req.Maintainer.Org[ent]; ok { + people, err := org.GetPeople() + if err != nil { + return false, err + } + candidates.AddAll(people) + } else { + candidates.Add(ent) + } + } + return doMatch(candidates, match.Self, match.Approvals, req, proc, a, feedback) +} + +func (match *EntityMatch) Match(req *ApprovalRequest, proc Processor, a MatchAction, feedback []Feedback) (bool, error) { + candidates := set.Empty() + ent := match.Entity.String() + if org, ok := req.Maintainer.Org[ent]; ok { + people, err := org.GetPeople() + if err != nil { + return false, err + } + candidates.AddAll(people) + } else { + candidates.Add(ent) + } + return doMatch(candidates, match.Self, match.Approvals, req, proc, a, feedback) +} + +func (match *UsMatch) Match(req *ApprovalRequest, proc Processor, a MatchAction, feedback []Feedback) (bool, error) { + candidates := set.Empty() + mapping, err := req.Maintainer.PersonToOrg() + if err != nil { + return false, err + } + if orgs, ok := mapping[req.PullRequest.Author.String()]; ok { + for name := range orgs { + if org, ok := req.Maintainer.Org[name]; ok { + people, err := org.GetPeople() + if err != nil { + return false, err + } + candidates.AddAll(people) + } + } + } + return doMatch(candidates, match.Self, match.Approvals, req, proc, a, feedback) +} + +func (match *ThemMatch) Match(req *ApprovalRequest, proc Processor, a MatchAction, feedback []Feedback) (bool, error) { + us := set.Empty() + mapping, err := req.Maintainer.PersonToOrg() + if err != nil { + return false, err + } + if orgs, ok := mapping[req.PullRequest.Author.String()]; ok { + for name := range orgs { + if org, ok := req.Maintainer.Org[name]; ok { + people, err := org.GetPeople() + if err != nil { + return false, err + } + us.AddAll(people) + } + } + } + all := set.Empty() + for k := range req.Maintainer.People { + all.Add(k) + } + candidates := all.Difference(us) + return doMatch(candidates, match.Self, match.Approvals, req, proc, a, feedback) +} + +func (match *AtLeastMatch) Match(req *ApprovalRequest, proc Processor, a MatchAction, feedback []Feedback) (bool, error) { + if len(match.Choose) == 0 { + return false, nil + } + count := 0 + for _, m := range match.Choose { + inner, err := m.Match(req, proc, a, feedback) + if err != nil { + return false, err + } + if inner { + count++ + } + } + return count >= match.Approvals, nil +} + +func (match *AuthorMatch) Match(req *ApprovalRequest, proc Processor, _ MatchAction, feedback []Feedback) (bool, error) { + authorReq := *req + authorComment := Comment{Author: req.PullRequest.Author} + authorReq.ApprovalComments = []Feedback{&authorComment} + return match.Inner.Match(&authorReq, proc, authorPolicyAction, authorReq.ApprovalComments) +} + +func (match *IssueAuthorMatch) Match(req *ApprovalRequest, proc Processor, a MatchAction, feedback []Feedback) (bool, error) { + candidates := set.Empty() + self := req.PullRequest.Author + for _, issue := range req.Issues { + author := issue.Author + if self != author { + candidates.Add(author.String()) + } + } + return doMatch(candidates, false, len(candidates), req, proc, a, feedback) +} + +func (match *AndMatch) Match(req *ApprovalRequest, proc Processor, a MatchAction, feedback []Feedback) (bool, error) { + if len(match.And) == 0 { + return false, nil + } + result := true + for _, m := range match.And { + inner, err := m.Match(req, proc, a, feedback) + if err != nil { + return false, err + } + result = inner && result + } + return result, nil +} + +func (match *OrMatch) Match(req *ApprovalRequest, proc Processor, a MatchAction, feedback []Feedback) (bool, error) { + if len(match.Or) == 0 { + return false, nil + } + result := false + for _, m := range match.Or { + inner, err := m.Match(req, proc, a, feedback) + if err != nil { + return false, err + } + result = inner || result + } + return result, nil +} + +func (match *NotMatch) Match(req *ApprovalRequest, proc Processor, a MatchAction, feedback []Feedback) (bool, error) { + inner, err := match.Not.Match(req, proc, a, feedback) + return !inner, err +} + +func (match *TrueMatch) Match(req *ApprovalRequest, proc Processor, a MatchAction, feedback []Feedback) (bool, error) { + return true, nil +} + +func (match *FalseMatch) Match(req *ApprovalRequest, proc Processor, a MatchAction, feedback []Feedback) (bool, error) { + return false, nil +} + +func (match *DisableMatch) Match(req *ApprovalRequest, proc Processor, a MatchAction, feedback []Feedback) (bool, error) { + return true, nil +} + +func (match *DisableMatch) ChangePolicy(policy *ApprovalPolicy) { + if policy.Merge == nil { + m := DefaultMerge() + policy.Merge = &m + } + if policy.Tag == nil { + tag := DefaultTag() + policy.Tag = &tag + } + policy.AntiMatch.Matcher = &FalseMatch{} +} + +func (match *MaintainerMatch) GetType() string { + return "all" +} + +func (match *UniverseMatch) GetType() string { + return "universe" +} + +func (match *UsMatch) GetType() string { + return "us" +} + +func (match *ThemMatch) GetType() string { + return "them" +} + +func (match *AnonymousMatch) GetType() string { + return "anonymous" +} + +func (match *EntityMatch) GetType() string { + return "entity" +} + +func (match *NotMatch) GetType() string { + return "not" +} + +func (match *AndMatch) GetType() string { + return "and" +} + +func (match *OrMatch) GetType() string { + return "or" +} + +func (match *AtLeastMatch) GetType() string { + return "atleast" +} + +func (match *AuthorMatch) GetType() string { + return "author" +} + +func (match *IssueAuthorMatch) GetType() string { + return "issue-author" +} + +func (match *TrueMatch) GetType() string { + return "true" +} + +func (match *FalseMatch) GetType() string { + return "false" +} + +func (match *DisableMatch) GetType() string { + return "off" +} diff --git a/model/matchpolicy_test.go b/model/matchpolicy_test.go new file mode 100644 index 0000000..708629b --- /dev/null +++ b/model/matchpolicy_test.go @@ -0,0 +1,705 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package model + +import ( + "reflect" + "regexp" + "testing" + + "github.com/capitalone/checks-out/set" + "github.com/capitalone/checks-out/strings/lowercase" + "github.com/capitalone/checks-out/strings/rxserde" +) + +func createRequest() *ApprovalRequest { + request := &ApprovalRequest{} + request.Maintainer = &MaintainerSnapshot{} + request.PullRequest = &PullRequest{} + request.Maintainer.People = map[string]*Person{ + "alice": &Person{Name: "alice"}, + "bob": &Person{Name: "bob"}, + "carol": &Person{Name: "carol"}, + "dan": &Person{Name: "dan"}, + } + request.Maintainer.Org = map[string]Org{ + "guelph": &OrgSerde{People: map[string]bool{"alice": true, "bob": true}}, + "ghibelline": &OrgSerde{People: map[string]bool{"carol": true, "dan": true}}, + } + request.PullRequest.Author = lowercase.Create("alice") + request.PullRequest.Branch.BaseName = "master" + request.ApprovalComments = []Feedback{ + &Comment{Author: lowercase.Create("Alice"), Body: "I approve"}, + &Comment{Author: lowercase.Create("Bob"), Body: "I approve"}, + &Comment{Author: lowercase.Create("bob"), Body: "I approve"}, + &Comment{Author: lowercase.Create("CAROL"), Body: "I approve"}, + &Comment{Author: lowercase.Create("mystery"), Body: "mystery"}, + &Review{Author: lowercase.Create("dan"), State: lowercase.Create("approved")}, + &Comment{Author: lowercase.Create("foobar"), Body: "I approve"}, + } + request.DisapprovalComments = request.ApprovalComments + request.Config = NonEmptyConfig() + request.Issues = []*Issue{ + &Issue{Author: lowercase.Create("Alice")}, + &Issue{Author: lowercase.Create("Bob")}, + } + return request +} + +func TestAnyoneMatch(t *testing.T) { + request := createRequest() + universe := UniverseMatch{} + universe.Approvals = 5 + universe.Self = true + match := MatcherHolder{&universe} + request.Config.Approvals[0].Match = match + approvers := set.Empty() + policy := FindApprovalPolicy(request) + success, _ := Approve(request, policy, func(f Feedback, op ApprovalOp) { + if op == Approval { + approvers.Add(f.GetAuthor().String()) + } + }) + if success == false { + t.Error("match failure") + } + if !reflect.DeepEqual(approvers, set.New("alice", "bob", "carol", "dan", "foobar")) { + t.Error("Approvers set generated incorrectly", approvers) + } +} + +func TestAuthorPolicy(t *testing.T) { + request := createRequest() + m := AnonymousMatch{} + m.Approvals = 1 + m.Self = true + m.Entities = set.NewLowerFromString("alice", "bob", "foo", "bar") + author := AuthorMatch{} + author.Inner = MatcherHolder{Matcher: &m} + request.Config.Approvals[0].Match = MatcherHolder{Matcher: &author} + approvers := set.Empty() + policy := FindApprovalPolicy(request) + success, _ := Approve(request, policy, func(f Feedback, op ApprovalOp) { + if op == Approval { + approvers.Add(f.GetAuthor().String()) + } + }) + if success == false { + t.Error("match failure") + } + if len(approvers) != 0 { + t.Error("approvers set should be empty") + } + m = AnonymousMatch{} + m.Approvals = 1 + m.Self = true + m.Entities = set.NewLowerFromString("bob", "foo", "bar") + author = AuthorMatch{} + author.Inner = MatcherHolder{Matcher: &m} + request.Config.Approvals[0].Match = MatcherHolder{Matcher: &author} + approvers = set.Empty() + policy = FindApprovalPolicy(request) + success, _ = Approve(request, policy, func(f Feedback, op ApprovalOp) { + if op == Approval { + approvers.Add(f.GetAuthor().String()) + } + }) + if success == true { + t.Error("match success") + } + if len(approvers) != 0 { + t.Error("approvers set should be empty") + } +} + +func TestAnonymousMatch(t *testing.T) { + request := createRequest() + m := AnonymousMatch{} + m.Approvals = 4 + m.Self = true + m.Entities = set.NewLowerFromString("foo", "bar", "baz") + match := MatcherHolder{&m} + request.Config.Approvals[0].Match = match + approvers := set.Empty() + policy := FindApprovalPolicy(request) + success, _ := Approve(request, policy, func(f Feedback, op ApprovalOp) { + if op == Approval { + approvers.Add(f.GetAuthor().String()) + } + }) + if success == true { + t.Error("match did not fail") + } + m.Entities = set.NewLowerFromString("alice", "bob", "carol", "dan", "foo", "bar") + request.Config.Approvals[0].Match = match + approvers = set.Empty() + policy = FindApprovalPolicy(request) + success, _ = Approve(request, policy, func(f Feedback, op ApprovalOp) { + if op == Approval { + approvers.Add(f.GetAuthor().String()) + } + }) + if success == false { + t.Error("match did not succeed") + } + if !reflect.DeepEqual(approvers, set.New("alice", "bob", "carol", "dan")) { + t.Error("Approvers set generated incorrectly", approvers) + } +} + +func TestAnonymousUserAndGroupMatch(t *testing.T) { + request := createRequest() + m := AnonymousMatch{} + m.Approvals = 4 + m.Self = true + m.Entities = set.NewLowerFromString("guelph", "alice") + match := MatcherHolder{&m} + request.Config.Approvals[0].Match = match + approvers := set.Empty() + policy := FindApprovalPolicy(request) + success, _ := Approve(request, policy, func(f Feedback, op ApprovalOp) { + if op == Approval { + approvers.Add(f.GetAuthor().String()) + } + }) + if success == true { + t.Error("match did not fail") + } + m.Entities = set.NewLowerFromString("guelph", "alice", "carol", "dan", "foo", "bar") + request.Config.Approvals[0].Match = match + approvers = set.Empty() + policy = FindApprovalPolicy(request) + success, _ = Approve(request, policy, func(f Feedback, op ApprovalOp) { + if op == Approval { + approvers.Add(f.GetAuthor().String()) + } + }) + if success == false { + t.Error("match did not succeed") + } + if !reflect.DeepEqual(approvers, set.New("alice", "bob", "carol", "dan")) { + t.Error("Approvers set generated incorrectly", approvers) + } +} + +func TestIssueAuthorMatch(t *testing.T) { + request := createRequest() + m := IssueAuthorMatch{} + request.Config.Approvals[0].Match = MatcherHolder{&m} + approvers := set.Empty() + policy := FindApprovalPolicy(request) + success, _ := Approve(request, policy, func(f Feedback, op ApprovalOp) { + if op == Approval { + approvers.Add(f.GetAuthor().String()) + } + }) + if success == false { + t.Error("match did not succeed") + } + request.Issues = append(request.Issues, &Issue{Author: lowercase.Create("david")}) + approvers = set.Empty() + policy = FindApprovalPolicy(request) + success, _ = Approve(request, policy, func(f Feedback, op ApprovalOp) { + if op == Approval { + approvers.Add(f.GetAuthor().String()) + } + }) + if success == true { + t.Error("match did not fail") + } +} + +func TestAuthorMatch(t *testing.T) { + request := createRequest() + m := MaintainerMatch{} + a := AnonymousMatch{} + a.Approvals = 1 + a.Entities = set.NewLowerFromString("bob") + a.Self = true + m.Approvals = 4 + m.Self = true + request.Config.Approvals[0].Match = MatcherHolder{&m} + request.Config.Approvals[0].AuthorMatch = &MatcherHolder{&a} + approvers := set.Empty() + author := false + policy := FindApprovalPolicy(request) + success, _ := Approve(request, policy, func(f Feedback, op ApprovalOp) { + if op == Approval { + approvers.Add(f.GetAuthor().String()) + } else if op == ValidAuthor { + author = true + } + }) + if success == true { + t.Error("match did not fail") + } + if author == true { + t.Error("author match did not fail") + } + a.Entities = set.NewLowerFromString("alice", "bob") + approvers = set.Empty() + author = false + policy = FindApprovalPolicy(request) + success, _ = Approve(request, policy, func(f Feedback, op ApprovalOp) { + if op == Approval { + approvers.Add(f.GetAuthor().String()) + } else if op == ValidAuthor { + author = true + } + }) + if success == false { + t.Error("match did not succeed") + } + if author == false { + t.Error("author match did not succeed") + } +} + +func TestMaintainersMatch(t *testing.T) { + request := createRequest() + m := MaintainerMatch{} + m.Approvals = 5 + m.Self = true + match := MatcherHolder{&m} + request.Config.Approvals[0].Match = match + approvers := set.Empty() + policy := FindApprovalPolicy(request) + success, _ := Approve(request, policy, func(f Feedback, op ApprovalOp) { + if op == Approval { + approvers.Add(f.GetAuthor().String()) + } + }) + if success == true { + t.Error("match did not fail") + } + m.Approvals = 4 + m.Self = true + match = MatcherHolder{&m} + request.Config.Approvals[0].Match = match + approvers = set.Empty() + policy = FindApprovalPolicy(request) + success, _ = Approve(request, policy, func(f Feedback, op ApprovalOp) { + if op == Approval { + approvers.Add(f.GetAuthor().String()) + } + }) + if success == false { + t.Error("match did not succeed") + } + if !reflect.DeepEqual(approvers, set.New("alice", "bob", "carol", "dan")) { + t.Error("Approvers set generated incorrectly", approvers) + } +} + +func TestEntityGroupMatch(t *testing.T) { + request := createRequest() + m := EntityMatch{} + m.Entity = lowercase.Create("guelph") + m.Approvals = 2 + m.Self = false + match := MatcherHolder{&m} + request.Config.Approvals[0].Match = match + approvers := set.Empty() + policy := FindApprovalPolicy(request) + success, _ := Approve(request, policy, func(f Feedback, op ApprovalOp) { + if op == Approval { + approvers.Add(f.GetAuthor().String()) + } + }) + if success == true { + t.Error("match did not fail") + } + m.Approvals = 2 + m.Self = true + match = MatcherHolder{&m} + request.Config.Approvals[0].Match = match + approvers = set.Empty() + policy = FindApprovalPolicy(request) + success, _ = Approve(request, policy, func(f Feedback, op ApprovalOp) { + if op == Approval { + approvers.Add(f.GetAuthor().String()) + } + }) + if success == false { + t.Error("match did not succeed") + } + if !reflect.DeepEqual(approvers, set.New("alice", "bob")) { + t.Error("Approvers set generated incorrectly", approvers) + } +} + +func TestEntityIndividualMatch(t *testing.T) { + request := createRequest() + m := EntityMatch{} + m.Entity = lowercase.Create("alice") + m.Approvals = 1 + m.Self = false + match := MatcherHolder{&m} + request.Config.Approvals[0].Match = match + approvers := set.Empty() + policy := FindApprovalPolicy(request) + success, _ := Approve(request, policy, func(f Feedback, op ApprovalOp) { + if op == Approval { + approvers.Add(f.GetAuthor().String()) + } + }) + if success == true { + t.Error("match did not fail") + } + m.Approvals = 1 + m.Self = true + match = MatcherHolder{&m} + request.Config.Approvals[0].Match = match + approvers = set.Empty() + policy = FindApprovalPolicy(request) + success, _ = Approve(request, policy, func(f Feedback, op ApprovalOp) { + if op == Approval { + approvers.Add(f.GetAuthor().String()) + } + }) + if success == false { + t.Error("match did not succeed") + } + if !reflect.DeepEqual(approvers, set.New("alice")) { + t.Error("Approvers set generated incorrectly", approvers) + } +} + +func TestUsMatch(t *testing.T) { + request := createRequest() + m := UsMatch{} + m.Approvals = 3 + m.Self = true + match := MatcherHolder{&m} + request.Config.Approvals[0].Match = match + approvers := set.Empty() + policy := FindApprovalPolicy(request) + success, _ := Approve(request, policy, func(f Feedback, op ApprovalOp) { + if op == Approval { + approvers.Add(f.GetAuthor().String()) + } + }) + if success == true { + t.Error("match did not fail") + } + m.Approvals = 2 + m.Self = true + match = MatcherHolder{&m} + request.Config.Approvals[0].Match = match + approvers = set.Empty() + policy = FindApprovalPolicy(request) + success, _ = Approve(request, policy, func(f Feedback, op ApprovalOp) { + if op == Approval { + approvers.Add(f.GetAuthor().String()) + } + }) + if success == false { + t.Error("match did not succeed") + } + if !reflect.DeepEqual(approvers, set.New("alice", "bob")) { + t.Error("Approvers set generated incorrectly", approvers) + } +} + +func TestThemMatch(t *testing.T) { + request := createRequest() + m := ThemMatch{} + m.Approvals = 3 + m.Self = true + match := MatcherHolder{&m} + request.Config.Approvals[0].Match = match + approvers := set.Empty() + policy := FindApprovalPolicy(request) + success, _ := Approve(request, policy, func(f Feedback, op ApprovalOp) { + if op == Approval { + approvers.Add(f.GetAuthor().String()) + } + }) + if success == true { + t.Error("match did not fail") + } + m.Approvals = 2 + m.Self = true + match = MatcherHolder{&m} + request.Config.Approvals[0].Match = match + approvers = set.Empty() + policy = FindApprovalPolicy(request) + success, _ = Approve(request, policy, func(f Feedback, op ApprovalOp) { + if op == Approval { + approvers.Add(f.GetAuthor().String()) + } + }) + if success == false { + t.Error("match did not succeed") + } + if !reflect.DeepEqual(approvers, set.New("carol", "dan")) { + t.Error("Approvers set generated incorrectly", approvers) + } +} + +func TestTrueMatch(t *testing.T) { + request := createRequest() + match := MatcherHolder{&TrueMatch{}} + request.Config.Approvals[0].Match = match + policy := FindApprovalPolicy(request) + success, _ := Approve(request, policy, func(f Feedback, _ ApprovalOp) {}) + if success == false { + t.Error("match did not succeed") + } +} + +func TestDisableMatch(t *testing.T) { + request := createRequest() + match := MatcherHolder{&DisableMatch{}} + request.Config.Approvals[0].Match = match + policy := FindApprovalPolicy(request) + success, _ := Approve(request, policy, func(f Feedback, _ ApprovalOp) {}) + if success == false { + t.Error("match did not succeed") + } +} + +func TestFalseMatch(t *testing.T) { + request := createRequest() + match := MatcherHolder{&FalseMatch{}} + request.Config.Approvals[0].Match = match + policy := FindApprovalPolicy(request) + success, _ := Approve(request, policy, func(f Feedback, _ ApprovalOp) {}) + if success == true { + t.Error("match did not fail") + } +} + +func TestNotMatch(t *testing.T) { + request := createRequest() + match := MatcherHolder{&NotMatch{Not: MatcherHolder{&FalseMatch{}}}} + request.Config.Approvals[0].Match = match + policy := FindApprovalPolicy(request) + success, _ := Approve(request, policy, func(f Feedback, _ ApprovalOp) {}) + if success == false { + t.Error("match did not succeed") + } +} + +func TestAndMatch(t *testing.T) { + request := createRequest() + match := MatcherHolder{ + &AndMatch{And: []MatcherHolder{ + {&FalseMatch{}}, + {&TrueMatch{}}}}} + request.Config.Approvals[0].Match = match + policy := FindApprovalPolicy(request) + success, _ := Approve(request, policy, func(f Feedback, _ ApprovalOp) {}) + if success == true { + t.Error("match did not fail") + } + match = MatcherHolder{ + &AndMatch{And: []MatcherHolder{ + {&TrueMatch{}}, + {&TrueMatch{}}}}} + request.Config.Approvals[0].Match = match + policy = FindApprovalPolicy(request) + success, _ = Approve(request, policy, func(f Feedback, _ ApprovalOp) {}) + if success == false { + t.Error("match did not succeed") + } +} + +func TestOrMatch(t *testing.T) { + request := createRequest() + match := MatcherHolder{ + &OrMatch{Or: []MatcherHolder{ + {&FalseMatch{}}, + {&FalseMatch{}}}}} + request.Config.Approvals[0].Match = match + policy := FindApprovalPolicy(request) + success, _ := Approve(request, policy, func(f Feedback, _ ApprovalOp) {}) + if success == true { + t.Error("match did not fail") + } + match = MatcherHolder{ + &OrMatch{Or: []MatcherHolder{ + MatcherHolder{&FalseMatch{}}, + MatcherHolder{&TrueMatch{}}}}} + request.Config.Approvals[0].Match = match + policy = FindApprovalPolicy(request) + success, _ = Approve(request, policy, func(f Feedback, _ ApprovalOp) {}) + if success == false { + t.Error("match did not succeed") + } +} + +func TestDisapprovalComments(t *testing.T) { + request := createRequest() + request.ApprovalComments[3] = &Comment{Author: lowercase.Create("carol"), Body: "NONONO"} + request.Config.AntiPattern = &rxserde.RegexSerde{Regex: regexp.MustCompile("NONONO")} + universe := MaintainerMatch{} + universe.Approvals = 4 + universe.Self = true + match := MatcherHolder{&universe} + request.Config.Approvals[0].Match = match + request.Config.Approvals[0].AntiMatch = &match + approvers := set.Empty() + disapprovers := set.Empty() + + //copy of closure from web/approval.go + approverFunc := func(f Feedback, op ApprovalOp) { + author := f.GetAuthor().String() + switch op { + case Approval: + approvers.Add(author) + case DisapprovalInsert: + disapprovers.Add(author) + case DisapprovalRemove: + disapprovers.Remove(author) + case ValidAuthor: + // do nothing + case ValidTitle: + // do nothing + default: + t.Fatalf("Unknown approval operation %d", op) + } + } + policy := FindApprovalPolicy(request) + success, _ := Approve(request, policy, approverFunc) + if success == true { + t.Error("match did not fail") + } + if !reflect.DeepEqual(approvers, set.New("alice", "bob", "dan")) { + t.Error("Approvers set generated incorrectly", approvers) + } + if !reflect.DeepEqual(disapprovers, set.New("carol")) { + t.Error("disapprovers set generated incorrectly", disapprovers) + } + request.ApprovalComments = append(request.ApprovalComments, &Comment{Author: lowercase.Create("carol"), Body: "I approve"}) + request.DisapprovalComments = request.ApprovalComments + policy = FindApprovalPolicy(request) + success, _ = Approve(request, policy, approverFunc) + if success == false { + t.Error("match did not succeed") + } + if !reflect.DeepEqual(approvers, set.New("alice", "bob", "carol", "dan")) { + t.Error("Approvers set generated incorrectly", approvers) + } + if !reflect.DeepEqual(disapprovers, set.New()) { + t.Error("disapprovers set generated incorrectly", disapprovers) + } +} + +func TestDisapprovalReview(t *testing.T) { + request := createRequest() + request.ApprovalComments[3] = &Review{Author: lowercase.Create("carol"), State: lowercase.Create("changes_requested")} + universe := MaintainerMatch{} + universe.Approvals = 4 + universe.Self = true + match := MatcherHolder{&universe} + request.Config.Approvals[0].Match = match + request.Config.Approvals[0].AntiMatch = &match + approvers := set.Empty() + disapprovers := set.Empty() + + //copy of closure from web/approval.go + approverFunc := func(f Feedback, op ApprovalOp) { + author := f.GetAuthor().String() + switch op { + case Approval: + approvers.Add(author) + case DisapprovalInsert: + disapprovers.Add(author) + case DisapprovalRemove: + disapprovers.Remove(author) + case ValidAuthor: + // do nothing + case ValidTitle: + // do nothing + default: + t.Fatalf("Unknown approval operation %d", op) + } + } + policy := FindApprovalPolicy(request) + success, _ := Approve(request, policy, approverFunc) + if success == true { + t.Error("match did not fail") + } + if !reflect.DeepEqual(approvers, set.New("alice", "bob", "dan")) { + t.Error("Approvers set generated incorrectly", approvers) + } + if !reflect.DeepEqual(disapprovers, set.New("carol")) { + t.Error("disapprovers set generated incorrectly", disapprovers) + } + request.ApprovalComments = append(request.ApprovalComments, &Comment{Author: lowercase.Create("carol"), Body: "I approve"}) + request.DisapprovalComments = request.ApprovalComments + policy = FindApprovalPolicy(request) + success, _ = Approve(request, policy, approverFunc) + if success == false { + t.Error("match did not succeed") + } + if !reflect.DeepEqual(approvers, set.New("alice", "bob", "carol", "dan")) { + t.Error("Approvers set generated incorrectly", approvers) + } + if !reflect.DeepEqual(disapprovers, set.New()) { + t.Error("disapprovers set generated incorrectly", disapprovers) + } +} + +func TestTitleMatch(t *testing.T) { + request := createRequest() + request.PullRequest = &PullRequest{} + request.PullRequest.Title = "WIP: An experiment" + request.Config.AntiTitle = &rxserde.RegexSerde{Regex: regexp.MustCompile("^WIP")} + m := AnonymousMatch{} + m.Approvals = 4 + m.Self = true + m.Entities = set.NewLowerFromString("foo", "bar", "baz") + match := MatcherHolder{&m} + m.Entities = set.NewLowerFromString("alice", "bob", "carol", "dan", "foo", "bar") + request.Config.Approvals[0].Match = match + approvers := set.Empty() + validTitle := false + policy := FindApprovalPolicy(request) + success, _ := Approve(request, policy, func(f Feedback, op ApprovalOp) { + if op == Approval { + approvers.Add(f.GetAuthor().String()) + } else if op == ValidTitle { + validTitle = true + } + }) + if success == true { + t.Error("match did not fail") + } + if validTitle == true { + t.Error("validTitle did not fail") + } + request.PullRequest.Title = "An experiment" + validTitle = false + policy = FindApprovalPolicy(request) + success, _ = Approve(request, policy, func(f Feedback, op ApprovalOp) { + if op == Approval { + approvers.Add(f.GetAuthor().String()) + } else if op == ValidTitle { + validTitle = true + } + }) + if success == false { + t.Error("match did not succeed") + } + if validTitle == false { + t.Error("validTitle did not succeed") + } +} diff --git a/model/merge.go b/model/merge.go new file mode 100644 index 0000000..b10ebc8 --- /dev/null +++ b/model/merge.go @@ -0,0 +1,43 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package model + +import "encoding/json" + +func DefaultMerge() MergeConfig { + return MergeConfig{ + Enable: false, + UpToDate: true, + Method: "merge", + Delete: false, + } +} + +// Used to avoid recursion in UnmarshalJSON +type shadowMergeConfig MergeConfig + +func (m *MergeConfig) UnmarshalJSON(text []byte) error { + dummy := shadowMergeConfig(DefaultMerge()) + err := json.Unmarshal(text, &dummy) + if err != nil { + return err + } + *m = MergeConfig(dummy) + return nil +} diff --git a/model/org.go b/model/org.go new file mode 100644 index 0000000..fd237e7 --- /dev/null +++ b/model/org.go @@ -0,0 +1,26 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package model + +type GitHubOrg struct { + Login string `json:"login"` + Avatar string `json:"avatar"` + Enabled bool `json:"enabled"` + Admin bool `json:"admin"` +} diff --git a/model/repo.go b/model/repo.go index b4c618f..ce1cc77 100644 --- a/model/repo.go +++ b/model/repo.go @@ -1,3 +1,21 @@ +/* + +SPDX-Copyright: Copyright (c) Brad Rydzewski, project contributors, Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Brad Rydzewski, project contributors, Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ package model type Repo struct { @@ -9,6 +27,7 @@ type Repo struct { Link string `json:"link_url" meddler:"repo_link"` Private bool `json:"private" meddler:"repo_private"` Secret string `json:"-" meddler:"repo_secret"` + Org bool `json:"org" meddler:"repo_org"` } type Perm struct { @@ -16,3 +35,13 @@ type Perm struct { Push bool Admin bool } + + +type OrgDb struct { + ID int64 `json:"id,omitempty" meddler:"org_id,pk"` + UserID int64 `json:"-" meddler:"org_user_id"` + Owner string `json:"owner" meddler:"org_owner"` + Link string `json:"link_url" meddler:"org_link"` + Private bool `json:"private" meddler:"org_private"` + Secret string `json:"-" meddler:"org_secret"` +} diff --git a/model/review.go b/model/review.go new file mode 100644 index 0000000..3343b1a --- /dev/null +++ b/model/review.go @@ -0,0 +1,55 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package model + +import ( + "time" + + "github.com/capitalone/checks-out/strings/lowercase" +) + +type Review struct { + ID int + Author lowercase.String + Body string + SubmittedAt time.Time + State lowercase.String +} + +// IsApproval returns true if the review has been approved +func (r *Review) IsApproval(req *ApprovalRequest) bool { + return r.State.String() == "approved" +} + +// IsDisapproval returns true if changes have been requested +func (r *Review) IsDisapproval(req *ApprovalRequest) bool { + return r.State.String() == "changes_requested" +} + +func (r *Review) GetAuthor() lowercase.String { + return r.Author +} + +func (r *Review) GetBody() string { + return r.Body +} + +func (r *Review) GetSubmittedAt() time.Time { + return r.SubmittedAt +} diff --git a/model/scopepolicy.go b/model/scopepolicy.go new file mode 100644 index 0000000..4ce4fa7 --- /dev/null +++ b/model/scopepolicy.go @@ -0,0 +1,110 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package model + +import ( + "github.com/capitalone/checks-out/strings/miniglob" + "github.com/capitalone/checks-out/strings/rxserde" + + log "github.com/Sirupsen/logrus" +) + +func fileMatch(globs []miniglob.MiniGlob, filename string) bool { + for _, g := range globs { + if g.Regex.MatchString(filename) { + return true + } + } + return false +} + +func matchesRegexp(exprs []rxserde.RegexSerde, candidate string) bool { + for _, expr := range exprs { + if expr.Regex.MatchString(candidate) { + return true + } + } + return false +} + +// O(n^2) algorithm. Consider rewriting if performance becomes an issue +func matchesPaths(globs []miniglob.MiniGlob, files []CommitFile) bool { + if len(files) == 0 { + return false + } + for _, f := range files { + if !fileMatch(globs, f.Filename) { + return false + } + } + return true +} + +// O(n^2) algorithm. Consider rewriting if performance becomes an issue +func matchesPathsRegexp(exprs []rxserde.RegexSerde, files []CommitFile) bool { + if len(files) == 0 { + return false + } + for _, f := range files { + if !matchesRegexp(exprs, f.Filename) { + return false + } + } + return true +} + +func matchesScope(branch *Branch, scope *ApprovalScope, files []CommitFile) bool { + paths := true + branches := true + pathRegexp := true + baseRegexp := true + compareRegexp := true + if len(scope.Paths) > 0 { + paths = matchesPaths(scope.Paths, files) + } + if len(scope.Branches) > 0 { + branches = scope.Branches.Contains(branch.BaseName) + } + if len(scope.PathRegexp) > 0 { + pathRegexp = matchesPathsRegexp(scope.PathRegexp, files) + } + if len(scope.BaseRegexp) > 0 { + baseRegexp = matchesRegexp(scope.BaseRegexp, branch.BaseName) + } + if len(scope.CompareRegexp) > 0 { + compareRegexp = matchesRegexp(scope.CompareRegexp, branch.CompareName) + } + return paths && branches && pathRegexp && baseRegexp && compareRegexp +} + +var internalErrorPolicy = ApprovalPolicy{ + Scope: &ApprovalScope{}, + Match: MatcherHolder{&FalseMatch{}}, +} + +func FindApprovalPolicy(req *ApprovalRequest) *ApprovalPolicy { + for _, approval := range req.Config.Approvals { + if matchesScope(&req.PullRequest.Branch, approval.Scope, req.Files) { + return approval + } + } + log.Warnf("Internal error. repo %s does not have a default scope.", + req.Repository.Name) + return &internalErrorPolicy +} diff --git a/model/scopepolicy_test.go b/model/scopepolicy_test.go new file mode 100644 index 0000000..4285fac --- /dev/null +++ b/model/scopepolicy_test.go @@ -0,0 +1,133 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package model + +import ( + "regexp" + "testing" + + "github.com/capitalone/checks-out/strings/miniglob" + "github.com/capitalone/checks-out/strings/rxserde" + + "github.com/capitalone/checks-out/set" +) + +func TestMatchesBranch(t *testing.T) { + req := createRequest() + emptyScope := ApprovalScope{} + if !matchesScope(&req.PullRequest.Branch, &emptyScope, []CommitFile{}) { + t.Error("Empty scope should match against request") + } + masterScope := ApprovalScope{Branches: set.New("master")} + if !matchesScope(&req.PullRequest.Branch, &masterScope, []CommitFile{}) { + t.Error("Master scope should match against request") + } + foobarScope := ApprovalScope{Branches: set.New("foobar")} + if matchesScope(&req.PullRequest.Branch, &foobarScope, []CommitFile{}) { + t.Error("Foobar scope should not match against request") + } +} + +func TestMatchesPaths(t *testing.T) { + var files []CommitFile + files = append(files, CommitFile{ + Filename: "a", + }) + files = append(files, CommitFile{ + Filename: "b", + }) + files = append(files, CommitFile{ + Filename: "foo/bar", + }) + globs := []miniglob.MiniGlob{ + miniglob.MustCreate("a"), + miniglob.MustCreate("b"), + miniglob.MustCreate("foo/*"), + } + if !matchesPaths(globs, files) { + t.Error("Path policy did not match") + } + globs = []miniglob.MiniGlob{ + miniglob.MustCreate("a"), + miniglob.MustCreate("b"), + miniglob.MustCreate("/**r/"), + } + if !matchesPaths(globs, files) { + t.Error("Path policy did not match") + } + globs = []miniglob.MiniGlob{ + miniglob.MustCreate("a"), + miniglob.MustCreate("foo/*"), + } + if matchesPaths(globs, files) { + t.Error("Path policy should not match") + } + globs = []miniglob.MiniGlob{ + miniglob.MustCreate("a"), + miniglob.MustCreate("b"), + miniglob.MustCreate("foo/bar/baz"), + } + if matchesPaths(globs, files) { + t.Error("Path policy should not match") + } +} + +func TestMatchesPathsRegexp(t *testing.T) { + var files []CommitFile + files = append(files, CommitFile{ + Filename: "a", + }) + files = append(files, CommitFile{ + Filename: "b", + }) + files = append(files, CommitFile{ + Filename: "foo/bar", + }) + globs := []rxserde.RegexSerde{ + rxserde.RegexSerde{Regex: regexp.MustCompile("^a$")}, + rxserde.RegexSerde{Regex: regexp.MustCompile("^b$")}, + rxserde.RegexSerde{Regex: regexp.MustCompile("foo/.*")}, + } + if !matchesPathsRegexp(globs, files) { + t.Error("Path policy did not match") + } + globs = []rxserde.RegexSerde{ + rxserde.RegexSerde{Regex: regexp.MustCompile("^a$")}, + rxserde.RegexSerde{Regex: regexp.MustCompile("^b$")}, + rxserde.RegexSerde{Regex: regexp.MustCompile("bar")}, + } + if !matchesPathsRegexp(globs, files) { + t.Error("Path policy did not match") + } + globs = []rxserde.RegexSerde{ + rxserde.RegexSerde{Regex: regexp.MustCompile("^a$")}, + rxserde.RegexSerde{Regex: regexp.MustCompile("foo/.*")}, + } + if matchesPathsRegexp(globs, files) { + t.Error("Path policy should not match") + } + globs = []rxserde.RegexSerde{ + rxserde.RegexSerde{Regex: regexp.MustCompile("^a$")}, + rxserde.RegexSerde{Regex: regexp.MustCompile("^b$")}, + rxserde.RegexSerde{Regex: regexp.MustCompile("foo/bar/baz")}, + } + if matchesPathsRegexp(globs, files) { + t.Error("Path policy should not match") + } +} diff --git a/model/semver.go b/model/semver.go new file mode 100644 index 0000000..954fcc8 --- /dev/null +++ b/model/semver.go @@ -0,0 +1,29 @@ +//go:generate enumer -type=Semver +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package model + +type Semver int + +const ( + Patch Semver = iota + Major + Minor + None +) diff --git a/model/semver_enumer.go b/model/semver_enumer.go new file mode 100644 index 0000000..7039385 --- /dev/null +++ b/model/semver_enumer.go @@ -0,0 +1,80 @@ +// Code generated by "enumer -type=Semver"; DO NOT EDIT +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package model + +import ( + "encoding/json" + "fmt" +) + +// Semver is an enum. +// It is serialized as a string in JSON and as an integer in SQL. + +// Semver enum maps. +var ( + strMapSemver = map[string]Semver{ + "major": Major, + "minor": Minor, + "patch": Patch, + "none": None, + } + + intMapSemver = map[Semver]string{ + Major: "major", + Minor: "minor", + Patch: "patch", + None: "none", + } +) + +// Known says whether or not this value is a known enum value. +func (s Semver) Known() bool { + _, ok := intMapSemver[s] + return ok +} + +// String is for the standard stringer interface. +func (s Semver) String() string { + return intMapSemver[s] +} + +// UnmarshalJSON satisfies the json.Unmarshaler +func (s *Semver) UnmarshalJSON(data []byte) error { + str := "" + err := json.Unmarshal(data, &str) + if err != nil { + return err + } + var ok bool + *s, ok = strMapSemver[str] + if !ok { + return fmt.Errorf("Unknown Semver enum value: %s", str) + } + return nil +} + +// MarshalJSON satisfies the json.Marshaler +func (s Semver) MarshalJSON() ([]byte, error) { + if !s.Known() { + return nil, fmt.Errorf("Unknown Semver enum value: %d", int(s)) + } + name := intMapSemver[s] + return json.Marshal(name) +} diff --git a/model/snapshot.go b/model/snapshot.go new file mode 100644 index 0000000..ed47d42 --- /dev/null +++ b/model/snapshot.go @@ -0,0 +1,49 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package model + +import "github.com/capitalone/checks-out/set" + +type MaintainerSnapshot struct { + People map[string]*Person + Org map[string]Org +} + +func (m *MaintainerSnapshot) PersonToOrg() (map[string]set.Set, error) { + mapping := make(map[string]set.Set) + for k, v := range m.Org { + //value is name of person in the org + people, err := v.GetPeople() + if err != nil { + return nil, err + } + for name := range people { + if _, ok := m.People[name]; !ok { + continue + } + orgs, ok := mapping[name] + if !ok { + orgs = set.Empty() + mapping[name] = orgs + } + orgs.Add(k) + } + } + return mapping, nil +} diff --git a/model/tag.go b/model/tag.go new file mode 100644 index 0000000..d859d7d --- /dev/null +++ b/model/tag.go @@ -0,0 +1,139 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package model + +import ( + "bytes" + "encoding/json" + "fmt" + "regexp" + "text/template" +) + +type Tag string + +type TagConfig struct { + // enable automatic tagging of merges. Default is false. + Enable bool `json:"enable"` + // algorithm for generating tag name. Allowed values are + // "semver", "timestamp-rfc3339", and "timestamp-millis". + // Default is "semver". + Alg string `json:"algorithm"` + // golang text/template for producing tag name. + // Container struct is TemplateTag. + // Default template is "{{.Version}}" + TemplateRaw string `json:"template"` + Template *template.Template `json:"-"` + // If true then perform stricter validation + // of templates to comply with Docker tag requirements + Docker bool `json:"docker"` + // Version increment policy for the "semver" algorithm. + // Allowed values are "major", "minor", "patch", and "none". + // Default is "patch". + Increment Semver `json:"increment"` +} + +type TemplateTag struct { + Version string +} + +func DefaultTag() TagConfig { + return TagConfig{ + Enable: false, + Alg: "semver", + TemplateRaw: "{{.Version}}", + Docker: false, + Increment: Patch, + } +} + +var dockerRegex = regexp.MustCompile(`^[A-Za-z0-9_.\-]+$`) + +var illegalRegex = regexp.MustCompile(`[[:cntrl:]]|[ ~^:?*\\\]]`) + +// Used to avoid recursion in UnmarshalJSON +type shadowTagConfig TagConfig + +func (t *TagConfig) UnmarshalJSON(text []byte) error { + dummy := shadowTagConfig(DefaultTag()) + err := json.Unmarshal(text, &dummy) + if err != nil { + return err + } + *t = TagConfig(dummy) + return nil +} + +func checkRefFormat(t *TagConfig, tag string) error { + ills := illegalRegex.FindString(tag) + if len(ills) > 0 { + err := fmt.Errorf(`Illegal template tag %s: + cannot have the illegal character %s. Illegal characters are + ASCII control character, space, tilde, caret, colon, + question-mark, asterisk, open bracket, + and blackslash`, t.TemplateRaw, ills) + return err + } + return nil +} + +func (t *TagConfig) execute(body interface{}) ([]byte, error) { + var buffer bytes.Buffer + err := t.Template.Execute(&buffer, body) + if err != nil { + return nil, err + } + return buffer.Bytes(), nil +} + +func (t *TagConfig) Compile() error { + err := t.build() + if err != nil { + return err + } + return t.validate() +} + +func (t *TagConfig) build() error { + var err error + t.Template, err = template.New("tag").Parse(t.TemplateRaw) + return err +} + +func (t *TagConfig) validate() error { + buffer, err := t.execute(TemplateTag{}) + if err != nil { + return err + } + tpl := string(buffer) + if t.Docker && !dockerRegex.MatchString(tpl) { + err := fmt.Errorf(`Illegal template tag %s with Docker validation enabled: + only [A-Za-z0-9_.-] characters are allowed`, t.TemplateRaw) + return err + } + return checkRefFormat(t, tpl) +} + +func (t *TagConfig) GenerateTag(body TemplateTag) (string, error) { + buffer, err := t.execute(body) + if err != nil { + return "", err + } + return string(buffer), nil +} diff --git a/model/tag_test.go b/model/tag_test.go new file mode 100644 index 0000000..1107fcb --- /dev/null +++ b/model/tag_test.go @@ -0,0 +1,41 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package model + +import ( + "testing" +) + +func TestCheckRefFormat(t *testing.T) { + cfg := TagConfig{TemplateRaw: "hello world"} + if cfg.Compile() == nil { + t.Error("space character should not pass validation") + } + cfg = TagConfig{TemplateRaw: "{{ .Version }}"} + if cfg.Compile() != nil { + t.Error("space character in template should pass validation") + } +} + +func TestTagValidateDocker(t *testing.T) { + cfg := TagConfig{TemplateRaw: "hello+world", Docker: true} + if cfg.Compile() == nil { + t.Error("+ character should not pass validation") + } +} diff --git a/model/team.go b/model/team.go deleted file mode 100644 index 561f927..0000000 --- a/model/team.go +++ /dev/null @@ -1,10 +0,0 @@ -package model - -type Team struct { - Login string `json:"login"` - Avatar string `json:"avatar"` -} - -type Member struct { - Login string `json:"login"` -} diff --git a/model/user.go b/model/user.go index 475f86f..355e1f5 100644 --- a/model/user.go +++ b/model/user.go @@ -1,10 +1,28 @@ +/* + +SPDX-Copyright: Copyright (c) Brad Rydzewski, project contributors, Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Brad Rydzewski, project contributors, Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ package model type User struct { ID int64 `json:"id" meddler:"user_id,pk"` Login string `json:"login" meddler:"user_login"` - Email string `json:"email" meddler:"user_email"` Token string `json:"-" meddler:"user_token"` Avatar string `json:"avatar" meddler:"user_avatar"` Secret string `json:"-" meddler:"user_secret"` + Scopes string `json:"-" meddler:"user_scopes"` } diff --git a/notifier/context.go b/notifier/context.go index 6817186..26d3558 100644 --- a/notifier/context.go +++ b/notifier/context.go @@ -1,6 +1,24 @@ +/* + +SPDX-Copyright: Copyright (c) Brad Rydzewski, project contributors, Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Brad Rydzewski, project contributors, Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ package notifier -import "golang.org/x/net/context" +import "context" const key = "sender" diff --git a/notifier/github/github.go b/notifier/github/github.go index e7bb0b5..de0f2ad 100644 --- a/notifier/github/github.go +++ b/notifier/github/github.go @@ -1,4 +1,60 @@ +/* + +SPDX-Copyright: Copyright (c) Brad Rydzewski, project contributors, Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Brad Rydzewski, project contributors, Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ package github -// TODO(bradrydzewski) ability to notify users of a new pull request requiring -// approval by commenting on the GitHub pull request. +import ( + "context" + "fmt" + + log "github.com/Sirupsen/logrus" + "github.com/capitalone/checks-out/model" + "github.com/capitalone/checks-out/notifier" + "github.com/capitalone/checks-out/remote" + "github.com/capitalone/checks-out/web" +) + +type MySender struct{} + +func (ms *MySender) Prefix(mw notifier.MessageWrapper) string { + return fmt.Sprintf("Pull Request %s in repo %s: ", mw.MessageHeader.PrName, mw.MessageHeader.Slug) +} + +func (ms *MySender) Send(c context.Context, header notifier.MessageHeader, message string, names []string, url string) { + if header.PrNumber <= 0 { + return + } + repo, user, caps, err := web.GetRepoAndUser(c, header.Slug) + if err != nil { + log.Warnf("Error retrieving GitHub information: %v", err) + return + } + if !caps.Repo.PRWriteComment { + return + } + + //ignore names + err = remote.WriteComment(c, user, repo, header.PrNumber, message) + if err != nil { + log.Warnf("Error sending GitHub notification: %v", err) + } +} + +func init() { + notifier.Register(model.Github, &MySender{}) +} diff --git a/notifier/hipchat/hipchat.go b/notifier/hipchat/hipchat.go deleted file mode 100644 index 0b995f4..0000000 --- a/notifier/hipchat/hipchat.go +++ /dev/null @@ -1,4 +0,0 @@ -package hipchat - -// TODO(bradrydzewski) ability to notify users of a new pull request requiring -// approval by sending a notification to a hipchat room. diff --git a/notifier/mock/sender.go b/notifier/mock/sender.go deleted file mode 100644 index 3eab0b7..0000000 --- a/notifier/mock/sender.go +++ /dev/null @@ -1,21 +0,0 @@ -package mock - -import "github.com/lgtmco/lgtm/notifier" -import "github.com/stretchr/testify/mock" - -type Sender struct { - mock.Mock -} - -func (_m *Sender) Send(_a0 *notifier.Notification) error { - ret := _m.Called(_a0) - - var r0 error - if rf, ok := ret.Get(0).(func(*notifier.Notification) error); ok { - r0 = rf(_a0) - } else { - r0 = ret.Error(0) - } - - return r0 -} diff --git a/notifier/notifier.go b/notifier/notifier.go new file mode 100644 index 0000000..ebcf5bb --- /dev/null +++ b/notifier/notifier.go @@ -0,0 +1,133 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package notifier + +import ( + "context" + "fmt" + "strings" + + log "github.com/Sirupsen/logrus" + "github.com/capitalone/checks-out/model" +) + +var ( + senders = map[model.CommentTarget]Sender{} +) + +type Sender interface { + Send(c context.Context, header MessageHeader, message string, names []string, url string) + Prefix(mw MessageWrapper) string +} + +type MessageWrapper struct { + MessageHeader + Messages []MessageInfo +} + +func (mw *MessageWrapper) Merge(in MessageWrapper) { + if mw.PrNumber != in.PrNumber || mw.PrName != in.PrName { + log.Warnf("Attempted to merge together incompatible message wrappers -- shouldn't ever happen, skipping: %v, %v", *mw, in) + return + } + mw.Messages = append(mw.Messages, in.Messages...) +} + +type MessageHeader struct { + PrName string + PrNumber int + Slug string +} + +type MessageInfo struct { + Message string + Type model.CommentMessage +} + +// Register is called by the init methods of the notification +// systems at startup. If we have something bad for registering +// messaging endpoints then we should not start the service up. +func Register(name model.CommentTarget, sender Sender) { + if _, ok := senders[name]; ok { + panic("Duplicate Sender name " + name.String()) + } + senders[name] = sender +} + +func BuildErrorMessage(prName string, prNumber int, slug string, message string) MessageWrapper { + return MessageWrapper{ + MessageHeader: MessageHeader{ + PrName: prName, + PrNumber: prNumber, + Slug: slug, + }, + Messages: []MessageInfo{ + { + Message: message, + Type: model.CommentError, + }, + }, + } +} + +func SendErrorMessage(c context.Context, config *model.Config, prName string, prNumber int, slug string, message string) { + SendMessage(c, config, BuildErrorMessage(prName, prNumber, slug, message)) +} + +func hasMessageType(mi MessageInfo, tc model.TargetConfig) bool { + if tc.Types == nil { + return true + } + for _, curType := range tc.Types { + if curType == mi.Type { + return true + } + } + return false +} + +func SendMessage(c context.Context, config *model.Config, mw MessageWrapper) { + if !config.Comment.Enable { + return + } + for _, v := range config.Comment.Targets { + sender, ok := senders[model.ToCommentTarget(v.Target)] + if !ok { + log.Warnf("Unregistered sender %s; skipping", v.Target) + continue + } + //check if the filter applies + if v.Pattern != nil && !v.Pattern.Regex.MatchString(mw.PrName) { + continue + } + var messages []string + for _, mi := range mw.Messages { + //check if the message type matches + if hasMessageType(mi, v) { + messages = append(messages, mi.Message) + } + } + if len(messages) == 0 { + continue + } + message := strings.Join(messages, "\n") + message = fmt.Sprintf("%s%s", sender.Prefix(mw), message) + sender.Send(c, mw.MessageHeader, message, v.Names, v.Url) + } +} diff --git a/notifier/sender.go b/notifier/sender.go deleted file mode 100644 index b718a32..0000000 --- a/notifier/sender.go +++ /dev/null @@ -1,18 +0,0 @@ -package notifier - -import "golang.org/x/net/context" - -//go:generate mockery -name Sender -output mock -case=underscore - -// Sender defines a notification provider that is capable of sending out -// notifications to a list of maintainers or reviewers. An example provider -// might be a Slack or GitHub bot. -type Sender interface { - Send(*Notification) error -} - -// Send sends a notification to the list of maintainers indicating a commit is -// ready for their review and possible approval. -func Send(c context.Context, n *Notification) error { - return FromContext(c).Send(n) -} diff --git a/notifier/slack/slack.go b/notifier/slack/slack.go index c052411..4a61275 100644 --- a/notifier/slack/slack.go +++ b/notifier/slack/slack.go @@ -1,4 +1,108 @@ +/* + +SPDX-Copyright: Copyright (c) Brad Rydzewski, project contributors, Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Brad Rydzewski, project contributors, Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ package slack -// TODO(bradrydzewski) ability to notify users of a new pull request requiring -// approval by sending a notification to a slack room. +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "net/http" + "strings" + + "github.com/capitalone/checks-out/envvars" + "github.com/capitalone/checks-out/model" + "github.com/capitalone/checks-out/notifier" + + log "github.com/Sirupsen/logrus" +) + +var ( + githubUrl = envvars.Env.Github.Url + httpClient = &http.Client{} +) + +func init() { + notifier.Register(model.Slack, &MySender{}) +} + +type MySender struct{} + +//https://github.com/capitalone/checks-out/pull/205 +func (ms *MySender) Prefix(mw notifier.MessageWrapper) string { + // + return fmt.Sprintf("<%s/%s/pull/%d|Pull Request %s> in <%s/%s|repo %s>: ", + githubUrl, + mw.MessageHeader.Slug, + mw.MessageHeader.PrNumber, + mw.MessageHeader.PrName, + githubUrl, + mw.MessageHeader.Slug, + mw.MessageHeader.Slug) +} + +func (ms *MySender) Send(c context.Context, header notifier.MessageHeader, message string, names []string, url string) { + if url == "" { + log.Warn("Error sending to Slack: SLACK_TARGET_URL is not configured") + return + } + + baseURL := c.Value("BASE_URL") + iconURL := "" + if baseURL != nil { + iconURL = baseURL.(string) + "/static/images/meowser.png" + } + + d := struct { + Channel string `json:"channel"` + Text string `json:"text"` + Username string `json:"username"` + IconURL string `json:"icon_url"` + }{ + Text: message, + Username: "Meowser", + IconURL: iconURL, + } + for _, name := range names { + d.Channel = name + output, err := toJson(d) + if err != nil { + log.Warnf("Unable to convert notification to JSON: %v", err) + continue + } + go func(msg string) { + //write to the slack channel + _, err := httpClient.Post(url, "application/json", strings.NewReader(msg)) + if err != nil { + log.Warnf("Error while writing %s to %s: %v", msg, url, err) + } + }(output) + } +} + +func toJson(s interface{}) (string, error) { + var b bytes.Buffer + e := json.NewEncoder(&b) + e.SetEscapeHTML(false) + err := e.Encode(s) + if err != nil { + return "", err + } + return b.String(), nil +} diff --git a/remote/cache.go b/remote/cache.go new file mode 100644 index 0000000..ddfd681 --- /dev/null +++ b/remote/cache.go @@ -0,0 +1,143 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package remote + +import ( + "context" + "fmt" + + "github.com/capitalone/checks-out/cache" + "github.com/capitalone/checks-out/model" + "github.com/capitalone/checks-out/set" +) + +// GetOrgs returns the list of user organizations from the cache +// associated with the current context. +func GetOrgs(c context.Context, user *model.User) ([]*model.GitHubOrg, error) { + key := fmt.Sprintf("orgs:%s", + user.Login, + ) + // if we fetch from the cache we can return immediately + val, err := cache.Get(c, key) + if err == nil { + return val.([]*model.GitHubOrg), nil + } + // else we try to grab from the remote system and + // populate our cache. + orgs, err := FromContext(c).GetOrgs(c, user) + if err != nil { + return nil, err + } + cache.Set(c, key, orgs) + return orgs, nil +} + +// ListTeams returns the list of team names from the cache +// associated with the current context. +func ListTeams(c context.Context, user *model.User, org string) (set.Set, error) { + key := fmt.Sprintf("teams:%s", org) + // if we fetch from the cache we can return immediately + val, err := cache.Get(c, key) + if err == nil { + return val.(set.Set), nil + } + // else we try to grab from the remote system and + // populate our cache. + teams, err := FromContext(c).ListTeams(c, user, org) + if err != nil { + return nil, err + } + cache.Set(c, key, teams) + return teams, nil +} + +// GetPerm returns the user permissions repositories from the cache +// associated with the current repository. +func GetPerm(c context.Context, user *model.User, owner, name string) (*model.Perm, error) { + key := fmt.Sprintf("perms:%s:%s/%s", + user.Login, + owner, + name, + ) + // if we fetch from the cache we can return immediately + val, err := cache.Get(c, key) + if err == nil { + return val.(*model.Perm), nil + } + // else we try to grab from the remote system and + // populate our cache. + perm, err := FromContext(c).GetPerm(c, user, owner, name) + if err != nil { + return nil, err + } + cache.Set(c, key, perm) + return perm, nil +} + +func getPeople(c context.Context, user *model.User, ids set.Set) ([]*model.Person, error) { + var people []*model.Person + for login := range ids { + key := fmt.Sprintf("people:%s", login) + val, err := cache.Longterm.Get(key) + if err == nil { + people = append(people, val.(*model.Person)) + } else { + p, err := GetPerson(c, user, login) + if err != nil { + return nil, err + } + err = cache.Longterm.Set(key, p) + if err != nil { + return nil, err + } + people = append(people, p) + } + } + return people, nil +} + +// GetCollaborators gets an collaborators member list from the remote system. +// Looks for information about each person in the long term cache. +func GetCollaborators(c context.Context, user *model.User, owner, name string) ([]*model.Person, error) { + users, err := FromContext(c).GetCollaborators(c, user, owner, name) + if err != nil { + return nil, err + } + return getPeople(c, user, users) +} + +// GetOrgMembers gets an organization member list from the remote system. +// Looks for information about each person in the long term cache. +func GetOrgMembers(c context.Context, user *model.User, org string) ([]*model.Person, error) { + users, err := FromContext(c).GetOrgMembers(c, user, org) + if err != nil { + return nil, err + } + return getPeople(c, user, users) +} + +// GetTeamMembers gets an repo's team members list from the remote system. +// Looks for information about each person in the long term cache. +func GetTeamMembers(c context.Context, user *model.User, org string, team string) ([]*model.Person, error) { + users, err := FromContext(c).GetTeamMembers(c, user, org, team) + if err != nil { + return nil, err + } + return getPeople(c, user, users) +} diff --git a/remote/cache_test.go b/remote/cache_test.go new file mode 100644 index 0000000..4e6afb9 --- /dev/null +++ b/remote/cache_test.go @@ -0,0 +1,146 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package remote + +import ( + "context" + "errors" + "fmt" + "testing" + + "github.com/capitalone/checks-out/cache" + "github.com/capitalone/checks-out/model" + + "github.com/franela/goblin" + "github.com/gin-gonic/gin" + "github.com/stretchr/testify/mock" +) + +type MockRemote struct { + Remote + mock.Mock +} + +func (mr *MockRemote) GetPerm(c context.Context, u *model.User, owner string, name string) (*model.Perm, error) { + args := mr.Called(u, owner, name) + perm, _ := args.Get(0).(*model.Perm) + return perm, args.Error(1) +} + +func (mr *MockRemote) GetRepos(c context.Context, u *model.User) ([]*model.Repo, error) { + args := mr.Called(u) + repos, _ := args.Get(0).([]*model.Repo) + return repos, args.Error(1) +} + +func (mr *MockRemote) GetOrgs(c context.Context, u *model.User) ([]*model.GitHubOrg, error) { + args := mr.Called(u) + orgs, _ := args.Get(0).([]*model.GitHubOrg) + return orgs, args.Error(1) +} + +func TestHelper(t *testing.T) { + + g := goblin.Goblin(t) + + g.Describe("Cache helpers", func() { + + var c *gin.Context + var r *MockRemote + + g.BeforeEach(func() { + c = new(gin.Context) + cache.ToContext(c, cache.Default()) + + r = &MockRemote{} + ToContext(c, r) + }) + + g.It("Should get permissions from remote", func() { + r.On("GetPerm", fakeUser, fakeRepo.Owner, fakeRepo.Name).Return(fakePerm, nil).Once() + p, err := GetPerm(c, fakeUser, fakeRepo.Owner, fakeRepo.Name) + g.Assert(p).Equal(fakePerm) + g.Assert(err).Equal(nil) + }) + + g.It("Should get permissions from cache", func() { + key := fmt.Sprintf("perms:%s:%s/%s", + fakeUser.Login, + fakeRepo.Owner, + fakeRepo.Name, + ) + + cache.Set(c, key, fakePerm) + r.On("GetPerm", fakeUser, fakeRepo.Owner, fakeRepo.Name).Return(nil, fakeErr).Once() + p, err := GetPerm(c, fakeUser, fakeRepo.Owner, fakeRepo.Name) + g.Assert(p).Equal(fakePerm) + g.Assert(err).Equal(nil) + }) + + g.It("Should get permissions error", func() { + r.On("GetPerm", fakeUser, fakeRepo.Owner, fakeRepo.Name).Return(nil, fakeErr).Once() + p, err := GetPerm(c, fakeUser, fakeRepo.Owner, fakeRepo.Name) + g.Assert(p == nil).IsTrue() + g.Assert(err).Equal(fakeErr) + }) + + g.It("Should set and get orgs", func() { + r.On("GetOrgs", fakeUser).Return(fakeOrgs, nil).Once() + p, err := GetOrgs(c, fakeUser) + g.Assert(p).Equal(fakeOrgs) + g.Assert(err).Equal(nil) + }) + + g.It("Should get orgs", func() { + key := fmt.Sprintf("orgs:%s", + fakeUser.Login, + ) + + cache.Set(c, key, fakeOrgs) + r.On("GetOrgs", fakeUser).Return(nil, fakeErr).Once() + p, err := GetOrgs(c, fakeUser) + g.Assert(p).Equal(fakeOrgs) + g.Assert(err).Equal(nil) + }) + + g.It("Should get org error", func() { + r.On("GetOrgs", fakeUser).Return(nil, fakeErr).Once() + p, err := GetOrgs(c, fakeUser) + g.Assert(p == nil).IsTrue() + g.Assert(err).Equal(fakeErr) + }) + + }) +} + +var ( + fakeErr = errors.New("Not Found") + fakeUser = &model.User{Login: "octocat"} + fakePerm = &model.Perm{Pull: true, Push: true, Admin: true} + fakeRepo = &model.Repo{Owner: "octocat", Name: "Hello-World"} + fakeRepos = []*model.Repo{ + {Owner: "octocat", Name: "Hello-World"}, + {Owner: "octocat", Name: "hello-world"}, + {Owner: "octocat", Name: "Spoon-Knife"}, + } + fakeOrgs = []*model.GitHubOrg{ + {Login: "drone"}, + {Login: "docker"}, + } +) diff --git a/remote/context.go b/remote/context.go index 4b818ab..499fcd0 100644 --- a/remote/context.go +++ b/remote/context.go @@ -1,6 +1,24 @@ +/* + +SPDX-Copyright: Copyright (c) Brad Rydzewski, project contributors, Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Brad Rydzewski, project contributors, Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ package remote -import "golang.org/x/net/context" +import "context" const key = "remote" diff --git a/remote/github/client.go b/remote/github/client.go index 7b5692c..b0b4627 100644 --- a/remote/github/client.go +++ b/remote/github/client.go @@ -1,3 +1,21 @@ +/* + +SPDX-Copyright: Copyright (c) Brad Rydzewski, project contributors, Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Brad Rydzewski, project contributors, Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ package github import ( @@ -18,7 +36,7 @@ const ( pathRepos = "%sapi/user/repos" pathRepo = "%sapi/repos/%s" pathConf = "%sapi/repos/%s/maintainers" - pathBranch = "%srepos/%s/%s/branches/%s" + pathBranch = "%srepos/%s/%s/branches/%s/protection/required_status_checks/contexts" ) type Client struct { @@ -35,8 +53,8 @@ func NewClient(uri string) *Client { // authenticates all outbound requests with the given token. func NewClientToken(uri, token string) *Client { config := new(oauth2.Config) - auther := config.Client(oauth2.NoContext, &oauth2.Token{AccessToken: token}) - return &Client{auther, uri} + author := config.Client(oauth2.NoContext, &oauth2.Token{AccessToken: token}) + return &Client{author, uri} } // SetClient sets the default http client. This should be @@ -46,18 +64,6 @@ func (c *Client) SetClient(client *http.Client) { c.client = client } -func (c *Client) Branch(owner, name, branch string) (*Branch, error) { - out := new(Branch) - uri := fmt.Sprintf(pathBranch, c.base, owner, name, branch) - err := c.get(uri, out) - return out, err -} - -func (c *Client) BranchProtect(owner, name, branch string, in *Branch) error { - uri := fmt.Sprintf(pathBranch, c.base, owner, name, branch) - return c.patch(uri, in, nil) -} - // // http request helper functions // @@ -83,8 +89,8 @@ func (c *Client) patch(rawurl string, in, out interface{}) error { } // helper function for making an http DELETE request. -func (c *Client) delete(rawurl string) error { - return c.do(rawurl, "DELETE", nil, nil) +func (c *Client) delete(rawurl string, in, out interface{}) error { + return c.do(rawurl, "DELETE", in, nil) } // helper function to make an http request @@ -117,7 +123,7 @@ func (c *Client) stream(rawurl, method string, in, out interface{}) (io.ReadClos var buf io.ReadWriter if in != nil { buf = new(bytes.Buffer) - err := json.NewEncoder(buf).Encode(in) + err = json.NewEncoder(buf).Encode(in) if err != nil { return nil, err } diff --git a/remote/github/github.go b/remote/github/github.go index 65e3abe..75a0086 100644 --- a/remote/github/github.go +++ b/remote/github/github.go @@ -1,32 +1,105 @@ +/* + +SPDX-Copyright: Copyright (c) Brad Rydzewski, project contributors, Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Brad Rydzewski, project contributors, Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ package github import ( - "encoding/json" + "context" + "errors" "fmt" "net/http" + "sort" "strings" "time" + "github.com/capitalone/checks-out/envvars" + "github.com/capitalone/checks-out/exterror" + "github.com/capitalone/checks-out/model" + "github.com/capitalone/checks-out/set" + "github.com/capitalone/checks-out/shared/httputil" + "github.com/capitalone/checks-out/strings/lowercase" + log "github.com/Sirupsen/logrus" "github.com/google/go-github/github" - "github.com/lgtmco/lgtm/model" - "github.com/lgtmco/lgtm/shared/httputil" + multierror "github.com/mspiegel/go-multierror" "golang.org/x/oauth2" ) -// name of the status message posted to GitHub -const context = "approvals/lgtm" +const ( + DefaultURL = "https://github.com" + DefaultAPI = "https://api.github.com/" + DefaultScope = "read:org,repo:status" +) + +func createErrorFallback(resp *github.Response, err error, fallback int) error { + if resp != nil { + return exterror.Create(resp.StatusCode, err) + } + return exterror.Create(fallback, err) +} + +func createError(resp *github.Response, err error) error { + return createErrorFallback(resp, err, http.StatusInternalServerError) +} type Github struct { URL string API string Client string Secret string - Scopes []string } -func (g *Github) GetUser(res http.ResponseWriter, req *http.Request) (*model.User, error) { +func Get() *Github { + remote := &Github{ + API: DefaultAPI, + URL: envvars.Env.Github.Url, + Client: envvars.Env.Github.Client, + Secret: envvars.Env.Github.Secret, + } + if remote.URL != DefaultURL { + remote.URL = strings.TrimSuffix(remote.URL, "/") + remote.API = remote.URL + "/api/v3/" + } + return remote +} + +func (g *Github) Capabilities(ctx context.Context, u *model.User) (*model.Capabilities, error) { + var errs error + s := set.New(strings.Split(u.Scopes, ",")...) + caps := new(model.Capabilities) + caps.Org.Read = s.Contains("read:org") || s.Contains("write:org") || s.Contains("admin:org") + caps.Repo.CommitStatus = s.Contains("repo:status") || s.Contains("repo") || s.Contains("public_repo") + caps.Repo.DeploymentStatus = s.Contains("repo_deployment") || s.Contains("repo") || s.Contains("public_repo") + caps.Repo.DeleteBranch = s.Contains("repo") || s.Contains("public_repo") + caps.Repo.Merge = s.Contains("repo") || s.Contains("public_repo") + caps.Repo.Tag = s.Contains("repo") || s.Contains("public_repo") + caps.Repo.PRWriteComment = s.Contains("repo") || s.Contains("public_repo") + if !caps.Repo.CommitStatus { + errs = multierror.Append(errs, errors.New("commit status OAuth scope is required")) + } + if errs != nil { + return nil, exterror.Create(http.StatusUnauthorized, errs) + } + return caps, nil +} +func (g *Github) GetUser(ctx context.Context, res http.ResponseWriter, req *http.Request) (*model.User, error) { + scopes := envvars.Env.Github.Scope var config = &oauth2.Config{ ClientID: g.Client, ClientSecret: g.Secret, @@ -35,7 +108,7 @@ func (g *Github) GetUser(res http.ResponseWriter, req *http.Request) (*model.Use AuthURL: fmt.Sprintf("%s/login/oauth/authorize", g.URL), TokenURL: fmt.Sprintf("%s/login/oauth/access_token", g.URL), }, - Scopes: g.Scopes, + Scopes: strings.Split(scopes, ","), } // get the oauth code from the incoming request. if no code is present @@ -50,100 +123,291 @@ func (g *Github) GetUser(res http.ResponseWriter, req *http.Request) (*model.Use // exchanges the oauth2 code for an access token token, err := config.Exchange(oauth2.NoContext, code) if err != nil { - return nil, fmt.Errorf("Error exchanging token. %s", err) + err = fmt.Errorf("Exchanging token. %s", err) + return nil, exterror.Create(http.StatusBadRequest, err) } // get the currently authenticated user details for the access token - client := setupClient(g.API, token.AccessToken) - user, _, err := client.Users.Get("") + client := anonymousClient(ctx, g.API, token.AccessToken) + user, resp, err := client.Users.Get(ctx, "") if err != nil { - return nil, fmt.Errorf("Error fetching user. %s", err) + err = fmt.Errorf("Fetching user. %s", err) + return nil, createError(resp, err) + } + + // get the subset of requested scopes granted to the access token + scopes, err = g.GetScopes(ctx, token.AccessToken) + if err != nil { + err = fmt.Errorf("Fetching user. %s", err) + return nil, createError(resp, err) } return &model.User{ - Login: *user.Login, + Login: user.GetLogin(), Token: token.AccessToken, - Avatar: *user.AvatarURL, + Avatar: user.GetAvatarURL(), + Scopes: scopes, }, nil } -func (g *Github) GetUserToken(token string) (string, error) { - client := setupClient(g.API, token) - user, _, err := client.Users.Get("") +func (g *Github) GetUserToken(ctx context.Context, token string) (string, error) { + client := anonymousClient(ctx, g.API, token) + user, resp, err := client.Users.Get(ctx, "") + if err != nil { + err = fmt.Errorf("Fetching user. %s", err) + return "", createError(resp, err) + } + return user.GetLogin(), nil +} + +func (g *Github) GetScopes(ctx context.Context, token string) (string, error) { + client := basicAuthClient(g.API, g.Client, g.Secret) + auth, resp, err := client.Authorizations.Check(ctx, g.Client, token) if err != nil { - return "", fmt.Errorf("Error fetching user. %s", err) + err = fmt.Errorf("Checking authorization. %s", err) + return "", createError(resp, err) + } + scopes := set.Empty() + for _, scope := range auth.Scopes { + scopes.Add(string(scope)) } - return *user.Login, nil + return scopes.Print(","), nil } -func (g *Github) GetTeams(user *model.User) ([]*model.Team, error) { - client := setupClient(g.API, user.Token) - orgs, _, err := client.Organizations.List("", &github.ListOptions{PerPage: 100}) +func (g *Github) RevokeAuthorization(ctx context.Context, user *model.User) error { + client := basicAuthClient(g.API, g.Client, g.Secret) + resp, err := client.Authorizations.Revoke(ctx, g.Client, user.Token) if err != nil { - return nil, fmt.Errorf("Error fetching teams. %s", err) + err = fmt.Errorf("Revoking authorization. %s", err) + return createError(resp, err) } - teams := []*model.Team{} + return nil +} + +func (g *Github) GetOrgs(ctx context.Context, user *model.User) ([]*model.GitHubOrg, error) { + client := setupClient(ctx, g.API, user) + return getOrgs(ctx, client) +} + +func getOrgs(ctx context.Context, client *github.Client) ([]*model.GitHubOrg, error) { + var orgs []*github.Organization + resp, err := buildCompleteList(func(opts *github.ListOptions) (*github.Response, error) { + newOrgs, resp, err := client.Organizations.List(ctx, "", opts) + orgs = append(orgs, newOrgs...) + return resp, err + }) + if err != nil { + err = fmt.Errorf("Fetching orgs. %s", err) + return nil, createError(resp, err) + } + res := []*model.GitHubOrg{} for _, org := range orgs { - team := model.Team{ - Login: *org.Login, - Avatar: *org.AvatarURL, + o := model.GitHubOrg{ + Login: org.GetLogin(), + Avatar: org.GetAvatarURL(), + Admin: orgIsAdmin(ctx, client, org.GetLogin()), } - teams = append(teams, &team) + res = append(res, &o) + } + sort.Slice(res, func(i, j int) bool { + return strings.ToLower(res[i].Login) < strings.ToLower(res[j].Login) + }) + return res, nil +} + +func orgIsAdmin(ctx context.Context, client *github.Client, owner string) bool { + m, err := getOrgPerm(ctx, client, owner) + if err != nil { + //don't care, just return false + return false + } + return m.Admin +} + +func (g *Github) GetPerson(ctx context.Context, user *model.User, login string) (*model.Person, error) { + client := setupClient(ctx, g.API, user) + return getPerson(ctx, client, login) +} + +func getPerson(ctx context.Context, client *github.Client, login string) (*model.Person, error) { + user, resp, err := client.Users.Get(ctx, login) + if err != nil { + err = fmt.Errorf("Accessing information for user %s. %s", login, err) + return nil, createError(resp, err) + } + return &model.Person{ + Login: login, + Name: user.GetName(), + Email: user.GetEmail(), + }, nil +} + +func (g *Github) ListTeams(ctx context.Context, user *model.User, org string) (set.Set, error) { + client := setupClient(ctx, g.API, user) + resp, err := getTeams(ctx, client, org) + if err != nil { + return nil, err + } + teams := set.Empty() + for _, t := range resp { + teams.Add(t.GetSlug()) } return teams, nil } -func (g *Github) GetMembers(user *model.User, team string) ([]*model.Member, error) { - client := setupClient(g.API, user.Token) - teams, _, err := client.Organizations.ListTeams(team, &github.ListOptions{PerPage: 100}) +func getTeams(ctx context.Context, client *github.Client, org string) ([]*github.Team, error) { + var teams []*github.Team + resp, err := buildCompleteList(func(opts *github.ListOptions) (*github.Response, error) { + newTeams, response, err := client.Organizations.ListTeams(ctx, org, opts) + teams = append(teams, newTeams...) + return response, err + }) if err != nil { - return nil, fmt.Errorf("Error accessing team list. %s", err) + err = fmt.Errorf("Accessing teams for organization %s. %s", org, err) + return nil, createError(resp, err) } - var id int - for _, team := range teams { - if strings.ToLower(*team.Name) == "maintainers" { - id = *team.ID + return teams, nil +} + +func (g *Github) GetTeamMembers(ctx context.Context, user *model.User, org string, team string) (set.Set, error) { + client := setupClient(ctx, g.API, user) + return getTeamMembers(ctx, client, org, team) +} + +func getTeamMembers(ctx context.Context, client *github.Client, org string, team string) (set.Set, error) { + teams, err := getTeams(ctx, client, org) + if err != nil { + return nil, err + } + var id *int + for _, t := range teams { + if strings.EqualFold(t.GetSlug(), team) { + id = t.ID break } } - if id == 0 { - return nil, fmt.Errorf("Error finding approvers team. %s", err) + if id == nil { + err = fmt.Errorf("Team %s not found for organization %s.", team, org) + return nil, exterror.Create(http.StatusNotFound, err) } - opts := github.OrganizationListTeamMembersOptions{} - opts.PerPage = 100 - teammates, _, err := client.Organizations.ListTeamMembers(id, &opts) + topts := github.OrganizationListTeamMembersOptions{} + var teammates []*github.User + resp, err := buildCompleteList(func(opts *github.ListOptions) (*github.Response, error) { + topts.ListOptions = *opts + newTmates, resp2, err2 := client.Organizations.ListTeamMembers(ctx, *id, &topts) + teammates = append(teammates, newTmates...) + return resp2, err2 + }) if err != nil { - return nil, fmt.Errorf("Error fetching team members. %s", err) + err = fmt.Errorf("Fetching team %s members for organization %s. %s", team, org, err) + return nil, createError(resp, err) } - var members []*model.Member - for _, teammate := range teammates { - members = append(members, &model.Member{ - Login: *teammate.Login, - }) + names := set.Empty() + for _, u := range teammates { + names.Add(*u.Login) } - return members, nil + return names, nil } -func (g *Github) GetRepo(user *model.User, owner, name string) (*model.Repo, error) { - client := setupClient(g.API, user.Token) - repo_, _, err := client.Repositories.Get(owner, name) +func (g *Github) GetOrgMembers(ctx context.Context, user *model.User, org string) (set.Set, error) { + client := setupClient(ctx, g.API, user) + return getOrgMembers(ctx, client, org) +} + +func getOrgMembers(ctx context.Context, client *github.Client, org string) (set.Set, error) { + var orgmembers []*github.User + lmOptions := github.ListMembersOptions{} + resp, err := buildCompleteList(func(opts *github.ListOptions) (*github.Response, error) { + lmOptions.ListOptions = *opts + newOrgs, resp, err := client.Organizations.ListMembers(ctx, org, &lmOptions) + orgmembers = append(orgmembers, newOrgs...) + return resp, err + }) if err != nil { - return nil, fmt.Errorf("Error fetching repository. %s", err) + err = fmt.Errorf("Accessing organization %s. %s", org, err) + return nil, createError(resp, err) + } + names := set.Empty() + for _, u := range orgmembers { + names.Add(u.GetLogin()) + } + return names, nil +} + +func (g *Github) GetCollaborators(ctx context.Context, user *model.User, owner, name string) (set.Set, error) { + client := setupClient(ctx, g.API, user) + return getCollaborators(ctx, client, owner, name) +} + +func getCollaborators(ctx context.Context, client *github.Client, owner, name string) (set.Set, error) { + var collab []*github.User + lcOptions := github.ListCollaboratorsOptions{} + resp, err := buildCompleteList(func(opts *github.ListOptions) (*github.Response, error) { + lcOptions.ListOptions = *opts + next, resp, err := client.Repositories.ListCollaborators(ctx, owner, name, &lcOptions) + collab = append(collab, next...) + return resp, err + }) + if err != nil { + err = fmt.Errorf("Accessing collaborators for %s/%s. %s", owner, name, err) + return nil, createError(resp, err) + } + names := set.Empty() + for _, u := range collab { + names.Add(u.GetLogin()) + } + return names, nil +} + +func (g *Github) GetRepo(ctx context.Context, user *model.User, owner, name string) (*model.Repo, error) { + client := setupClient(ctx, g.API, user) + return getRepo(ctx, client, owner, name) +} + +func getRepo(ctx context.Context, client *github.Client, owner, name string) (*model.Repo, error) { + repo, resp, err := client.Repositories.Get(ctx, owner, name) + if err != nil { + err = fmt.Errorf("Fetching repository. %s", err) + return nil, createError(resp, err) } return &model.Repo{ Owner: owner, Name: name, - Slug: *repo_.FullName, - Link: *repo_.HTMLURL, - Private: *repo_.Private, + Slug: repo.GetFullName(), + Link: repo.GetHTMLURL(), + Private: repo.GetPrivate(), + Org: repo.Organization != nil, + }, nil +} + +func (g *Github) GetOrg(ctx context.Context, user *model.User, owner string) (*model.OrgDb, error) { + client := setupClient(ctx, g.API, user) + return getOrg(ctx, client, owner) +} + +func getOrg(ctx context.Context, client *github.Client, owner string) (*model.OrgDb, error) { + org, resp, err := client.Organizations.Get(ctx, owner) + if err != nil { + err = fmt.Errorf("Fetching org %s. %s", owner, err) + return nil, createError(resp, err) + } + return &model.OrgDb{ + Owner: owner, + Link: org.GetHTMLURL(), + Private: false, }, nil } -func (g *Github) GetPerm(user *model.User, owner, name string) (*model.Perm, error) { - client := setupClient(g.API, user.Token) - repo, _, err := client.Repositories.Get(owner, name) +func (g *Github) GetPerm(ctx context.Context, user *model.User, owner, name string) (*model.Perm, error) { + client := setupClient(ctx, g.API, user) + return getPerm(ctx, client, owner, name) +} + +func getPerm(ctx context.Context, client *github.Client, owner, name string) (*model.Perm, error) { + repo, resp, err := client.Repositories.Get(ctx, owner, name) if err != nil { - return nil, fmt.Errorf("Error fetching repository. %s", err) + err = fmt.Errorf("Fetching repository. %s", err) + return nil, createError(resp, err) } m := &model.Perm{} m.Admin = (*repo.Permissions)["admin"] @@ -152,9 +416,47 @@ func (g *Github) GetPerm(user *model.User, owner, name string) (*model.Perm, err return m, nil } -func (g *Github) GetRepos(u *model.User) ([]*model.Repo, error) { - client := setupClient(g.API, u.Token) - all, err := GetUserRepos(client) +func (g *Github) GetOrgPerm(ctx context.Context, user *model.User, owner string) (*model.Perm, error) { + client := setupClient(ctx, g.API, user) + return getOrgPerm(ctx, client, owner) +} + +func getOrgPerm(ctx context.Context, client *github.Client, owner string) (*model.Perm, error) { + perms, resp, err := client.Organizations.GetOrgMembership(ctx, "", owner) + if err != nil { + err = fmt.Errorf("Fetching org permission. %s", err) + return nil, createError(resp, err) + } + m := &model.Perm{} + m.Admin = (*perms.Role == "admin") + return m, nil +} + +func (g *Github) GetUserRepos(ctx context.Context, u *model.User) ([]*model.Repo, error) { + client := setupClient(ctx, g.API, u) + all, err := getUserRepos(ctx, client, u.Login) + if err != nil { + return nil, err + } + + repos := []*model.Repo{} + for _, repo := range all { + repos = append(repos, &model.Repo{ + Owner: repo.Owner.GetLogin(), + Name: repo.GetName(), + Slug: repo.GetFullName(), + Link: repo.GetHTMLURL(), + Private: repo.GetPrivate(), + Org: repo.Organization != nil, + }) + } + + return repos, nil +} + +func (g *Github) GetOrgRepos(ctx context.Context, u *model.User, owner string) ([]*model.Repo, error) { + client := setupClient(ctx, g.API, u) + all, err := getOrgRepos(ctx, client, owner) if err != nil { return nil, err } @@ -166,127 +468,518 @@ func (g *Github) GetRepos(u *model.User) ([]*model.Repo, error) { continue } repos = append(repos, &model.Repo{ - Owner: *repo.Owner.Login, - Name: *repo.Name, - Slug: *repo.FullName, - Link: *repo.HTMLURL, - Private: *repo.Private, + Owner: repo.Owner.GetLogin(), + Name: repo.GetName(), + Slug: repo.GetFullName(), + Link: repo.GetHTMLURL(), + Private: repo.GetPrivate(), + Org: repo.Organization != nil, }) } return repos, nil } -func (g *Github) SetHook(user *model.User, repo *model.Repo, link string) error { - client := setupClient(g.API, user.Token) +func (g *Github) SetHook(ctx context.Context, user *model.User, repo *model.Repo, link string) error { + client := setupClient(ctx, g.API, user) + return g.setHook(ctx, client, user, repo, link) +} + +// createProtectionRequest takes the GitHub protection status returned by +// GET /repos/:owner/:repo/branches/:branch/protection and converts it into +// a format suitable for PUT /repos/:owner/:repo/branches/:branch/protection +// (GitHub please fix your API) +func createProtectionRequest(input *github.Protection) *github.ProtectionRequest { + output := &github.ProtectionRequest{ + RequiredStatusChecks: nil, + EnforceAdmins: false, + RequiredPullRequestReviews: nil, + Restrictions: nil, + } + if input == nil { + return output + } + output.RequiredStatusChecks = input.RequiredStatusChecks + if input.EnforceAdmins != nil { + output.EnforceAdmins = input.EnforceAdmins.Enabled + } + if input.RequiredPullRequestReviews != nil { + inReviews := input.RequiredPullRequestReviews + outReviews := &github.PullRequestReviewsEnforcementRequest{ + DismissalRestrictionsRequest: nil, + DismissStaleReviews: inReviews.DismissStaleReviews, + RequireCodeOwnerReviews: inReviews.RequireCodeOwnerReviews, + } + inDismissal := inReviews.DismissalRestrictions + if len(inDismissal.Users) > 0 || len(inDismissal.Teams) > 0 { + outDismissal := &github.DismissalRestrictionsRequest{ + Users: []string{}, + Teams: []string{}, + } + for _, user := range inDismissal.Users { + outDismissal.Users = append(outDismissal.Users, user.GetLogin()) + } + for _, team := range inDismissal.Teams { + outDismissal.Teams = append(outDismissal.Teams, team.GetSlug()) + } + outReviews.DismissalRestrictionsRequest = outDismissal + } + output.RequiredPullRequestReviews = outReviews + } + if input.Restrictions != nil { + inRestrict := input.Restrictions + outRestrict := &github.BranchRestrictionsRequest{ + Users: []string{}, + Teams: []string{}, + } + for _, user := range inRestrict.Users { + outRestrict.Users = append(outRestrict.Users, user.GetLogin()) + } + for _, team := range inRestrict.Teams { + outRestrict.Teams = append(outRestrict.Teams, team.GetSlug()) + } + output.Restrictions = outRestrict + } + return output +} + +func addBranchProtection(ctx context.Context, client *github.Client, owner, repo, branch string) error { + protect, resp, err := client.Repositories.GetBranchProtection(ctx, owner, repo, branch) + if err != nil && resp.StatusCode != http.StatusNotFound { + return createError(resp, err) + } + preq := createProtectionRequest(protect) + if preq.RequiredStatusChecks == nil { + preq.RequiredStatusChecks = &github.RequiredStatusChecks{ + Strict: true, + Contexts: []string{}, + } + } + for _, ctx := range preq.RequiredStatusChecks.Contexts { + if ctx == model.ServiceName { + return nil + } + } + preq.RequiredStatusChecks.Contexts = append(preq.RequiredStatusChecks.Contexts, model.ServiceName) + _, resp, err = client.Repositories.UpdateBranchProtection(ctx, owner, repo, branch, preq) + if err != nil { + return createError(resp, err) + } + return nil +} - repo_, _, err := client.Repositories.Get(repo.Owner, repo.Name) +func removeBranchProtection(ctx context.Context, client *github.Client, owner, repo, branch string) error { + protect, resp, err := client.Repositories.GetBranchProtection(ctx, owner, repo, branch) + if resp.StatusCode == http.StatusNotFound { + return nil + } if err != nil { - return err + return createError(resp, err) + } + preq := createProtectionRequest(protect) + if preq.RequiredStatusChecks == nil { + return nil } + success := false + ctxs := preq.RequiredStatusChecks.Contexts + for i, ctx := range ctxs { + if ctx == model.ServiceName { + success = true + ctxs[i] = ctxs[len(ctxs)-1] + ctxs = ctxs[:len(ctxs)-1] + break + } + } + if !success { + return nil + } + preq.RequiredStatusChecks.Contexts = ctxs + _, resp, err = client.Repositories.UpdateBranchProtection(ctx, owner, repo, branch, preq) + if err != nil { + return createError(resp, err) + } + return nil +} - old, err := GetHook(client, repo.Owner, repo.Name, link) +func (g *Github) setHook(ctx context.Context, client *github.Client, user *model.User, repo *model.Repo, link string) error { + old, err := getHook(ctx, client, repo.Owner, repo.Name, link) if err == nil && old != nil { - client.Repositories.DeleteHook(repo.Owner, repo.Name, *old.ID) + client.Repositories.DeleteHook(ctx, repo.Owner, repo.Name, old.GetID()) } - _, err = CreateHook(client, repo.Owner, repo.Name, link) + _, err = createHook(ctx, client, repo.Owner, repo.Name, link) if err != nil { - log.Debugf("Error creating the webhook at %s. %s", link, err) + log.Debugf("Creating the webhook at %s. %s", link, err) return err } - in := new(Branch) - in.Protection.Enabled = true - in.Protection.Checks.Enforcement = "non_admins" - in.Protection.Checks.Contexts = []string{context} + r, resp, err := client.Repositories.Get(ctx, repo.Owner, repo.Name) + if err != nil { + return createError(resp, err) + } - client_ := NewClientToken(g.API, user.Token) - err = client_.BranchProtect(repo.Owner, repo.Name, *repo_.DefaultBranch, in) + _, resp, err = client.Repositories.GetBranch(ctx, repo.Owner, repo.Name, r.GetDefaultBranch()) + if resp.StatusCode == http.StatusNotFound { + return nil + } if err != nil { - if g.URL == "https://github.com" { - return err - } - log.Warnf("Error configuring protected branch for %s/%s@%s. %s", repo.Owner, repo.Name, *repo_.DefaultBranch, err) + return createError(resp, err) } - return nil + + return addBranchProtection(ctx, client, repo.Owner, repo.Name, r.GetDefaultBranch()) } -func (g *Github) DelHook(user *model.User, repo *model.Repo, link string) error { - client := setupClient(g.API, user.Token) +func (g *Github) DelHook(ctx context.Context, user *model.User, repo *model.Repo, link string) error { + client := setupClient(ctx, g.API, user) + return g.delHook(ctx, client, user, repo, link) +} - hook, err := GetHook(client, repo.Owner, repo.Name, link) +func (g *Github) delHook(ctx context.Context, client *github.Client, user *model.User, repo *model.Repo, link string) error { + hook, err := getHook(ctx, client, repo.Owner, repo.Name, link) if err != nil { return err } else if hook == nil { return nil } - _, err = client.Repositories.DeleteHook(repo.Owner, repo.Name, *hook.ID) + resp, err := client.Repositories.DeleteHook(ctx, repo.Owner, repo.Name, hook.GetID()) if err != nil { - return err + return createError(resp, err) } - repo_, _, err := client.Repositories.Get(repo.Owner, repo.Name) + r, resp, err := client.Repositories.Get(ctx, repo.Owner, repo.Name) if err != nil { + return createError(resp, err) + } + + _, resp, err = client.Repositories.GetBranch(ctx, repo.Owner, repo.Name, r.GetDefaultBranch()) + if resp.StatusCode == http.StatusNotFound { + return nil + } + if err != nil { + return createError(resp, err) + } + + return removeBranchProtection(ctx, client, repo.Owner, repo.Name, r.GetDefaultBranch()) +} + +func (g *Github) SetOrgHook(ctx context.Context, user *model.User, org *model.OrgDb, link string) error { + client := setupClient(ctx, g.API, user) + + old, err := getOrgHook(ctx, client, org.Owner, link) + if err == nil && old != nil { + client.Organizations.DeleteHook(ctx, org.Owner, old.GetID()) + } + + _, err = createOrgHook(ctx, client, org.Owner, link) + if err != nil { + log.Debugf("Creating the webhook at %s. %s", link, err) return err } - client_ := NewClientToken(g.API, user.Token) - branch, _ := client_.Branch(repo.Owner, repo.Name, *repo_.DefaultBranch) - if len(branch.Protection.Checks.Contexts) == 0 { + return nil +} + +func (g *Github) DelOrgHook(ctx context.Context, user *model.User, org *model.OrgDb, link string) error { + client := setupClient(ctx, g.API, user) + + hook, err := getOrgHook(ctx, client, org.Owner, link) + if err != nil { + return err + } else if hook == nil { return nil } + resp, err := client.Organizations.DeleteHook(ctx, org.Owner, hook.GetID()) + if err != nil { + return createError(resp, err) + } + + return nil +} + +// buildOtherContextSlice returns all contexts besides the one for this service +func buildOtherContextSlice(branch *Branch) []string { checks := []string{} for _, check := range branch.Protection.Checks.Contexts { - if check != context { + if check != model.ServiceName { checks = append(checks, check) } } - branch.Protection.Checks.Contexts = checks - return client_.BranchProtect(repo.Owner, repo.Name, *repo_.DefaultBranch, branch) + return checks +} + +func (g *Github) GetAllComments(ctx context.Context, u *model.User, r *model.Repo, num int) ([]*model.Comment, error) { + client := setupClient(ctx, g.API, u) + return getAllComments(ctx, client, r, num) +} + +func getAllComments(ctx context.Context, client *github.Client, r *model.Repo, num int) ([]*model.Comment, error) { + lcOpts := github.IssueListCommentsOptions{Direction: "desc", Sort: "created"} + var comm []*github.IssueComment + resp, err := buildCompleteList(func(opts *github.ListOptions) (*github.Response, error) { + lcOpts.ListOptions = *opts + newCom, resp, err := client.Issues.ListComments(ctx, r.Owner, r.Name, num, &lcOpts) + comm = append(comm, newCom...) + return resp, err + }) + if err != nil { + return nil, createError(resp, err) + } + comments := []*model.Comment{} + for _, comment := range comm { + comments = append(comments, &model.Comment{ + Author: lowercase.Create(*comment.User.Login), + Body: comment.GetBody(), + SubmittedAt: comment.GetCreatedAt(), + }) + } + return comments, nil +} + +func (g *Github) IsHeadUIMerge(ctx context.Context, u *model.User, r *model.Repo, num int) (bool, error) { + client := setupClient(ctx, g.API, u) + pr, resp, err := client.PullRequests.Get(ctx, r.Owner, r.Name, num) + if err != nil { + return false, createError(resp, err) + } + commit, resp, err := client.Repositories.GetCommit(ctx, r.Owner, r.Name, *pr.Head.SHA) + if err != nil { + return false, createError(resp, err) + } + return isCommitUIMerge(commit), nil +} + +func (g *Github) GetCommentsSinceHead(ctx context.Context, u *model.User, r *model.Repo, num int, noUIMerge bool) ([]*model.Comment, error) { + client := setupClient(ctx, g.API, u) + return getCommentsSinceHead(ctx, client, r, num, noUIMerge) } -func (g *Github) GetComments(u *model.User, r *model.Repo, num int) ([]*model.Comment, error) { - client := setupClient(g.API, u.Token) +func getHead(ctx context.Context, client *github.Client, r *model.Repo, num int, noUIMerge bool) (*github.RepositoryCommit, error) { + pr, resp, err := client.PullRequests.Get(ctx, r.Owner, r.Name, num) + if err != nil { + return nil, createError(resp, err) + } + commit, resp, err := client.Repositories.GetCommit(ctx, r.Owner, r.Name, *pr.Head.SHA) + if err != nil { + return nil, createError(resp, err) + } + if noUIMerge { + commit, err = ignoreUIMerge(ctx, client, r, pr, commit) + if err != nil { + return nil, err + } + } + return commit, nil +} - opts := github.IssueListCommentsOptions{Direction: "desc", Sort: "created"} - opts.PerPage = 100 - comments_, _, err := client.Issues.ListComments(r.Owner, r.Name, num, &opts) +func getCommentsSinceHead(ctx context.Context, client *github.Client, r *model.Repo, num int, noUIMerge bool) ([]*model.Comment, error) { + commit, err := getHead(ctx, client, r, num, noUIMerge) if err != nil { return nil, err } + lcOpts := github.IssueListCommentsOptions{ + Direction: "desc", + Sort: "created", + Since: commit.Commit.Committer.GetDate()} + var comm []*github.IssueComment + resp, err := buildCompleteList(func(opts *github.ListOptions) (*github.Response, error) { + lcOpts.ListOptions = *opts + newCom, resp2, err2 := client.Issues.ListComments(ctx, r.Owner, r.Name, num, &lcOpts) + comm = append(comm, newCom...) + return resp2, err2 + }) + + if err != nil { + return nil, createError(resp, err) + } comments := []*model.Comment{} - for _, comment := range comments_ { + for _, comment := range comm { comments = append(comments, &model.Comment{ - Author: *comment.User.Login, - Body: *comment.Body, + Author: lowercase.Create(*comment.User.Login), + Body: comment.GetBody(), }) } return comments, nil } -func (g *Github) GetContents(u *model.User, r *model.Repo, path string) ([]byte, error) { - client := setupClient(g.API, u.Token) - content, _, _, err := client.Repositories.GetContents(r.Owner, r.Name, path, nil) +func (g *Github) GetAllReviews(ctx context.Context, u *model.User, r *model.Repo, num int) ([]*model.Review, error) { + client := setupClient(ctx, g.API, u) + return getAllReviews(ctx, client, r, num) +} + +func getAllReviews(ctx context.Context, client *github.Client, r *model.Repo, num int) ([]*model.Review, error) { + var extReviews []*github.PullRequestReview + resp, err := buildCompleteList(func(opts *github.ListOptions) (*github.Response, error) { + newReviews, resp2, err2 := client.PullRequests.ListReviews(ctx, r.Owner, r.Name, num, opts) + extReviews = append(extReviews, newReviews...) + return resp2, err2 + }) + if err != nil { + return nil, createError(resp, err) + } + // error checking to handle https://github.com/google/go-github/issues/540 + if resp.StatusCode != http.StatusOK { + err = fmt.Errorf("Unable to retrieve PR reviews for %s %d", r.Slug, num) + return nil, createError(resp, err) + } + reviews := []*model.Review{} + for i, rev := range extReviews { + if rev == nil || rev.User == nil { + log.Errorf("Found a nil rev or nil rev.User in the github.PullRequestReview element %d for review %d in repo %+v.", i, num, *r) + continue + } + reviews = append(reviews, &model.Review{ + ID: rev.GetID(), + Author: lowercase.Create(rev.User.GetLogin()), + State: lowercase.Create(rev.GetState()), + Body: rev.GetBody(), + SubmittedAt: rev.GetSubmittedAt(), + }) + } + return reviews, nil +} + +func (g *Github) GetReviewsSinceHead(ctx context.Context, u *model.User, r *model.Repo, num int, noUIMerge bool) ([]*model.Review, error) { + client := setupClient(ctx, g.API, u) + return getReviewsSinceHead(ctx, client, r, num, noUIMerge) +} + +func getReviewsSinceHead(ctx context.Context, client *github.Client, r *model.Repo, num int, noUIMerge bool) ([]*model.Review, error) { + commit, err := getHead(ctx, client, r, num, noUIMerge) if err != nil { return nil, err } - return content.Decode() + all, err2 := getAllReviews(ctx, client, r, num) + if err2 != nil { + return nil, err2 + } + reviews := []*model.Review{} + for _, rev := range all { + if rev.SubmittedAt.After(commit.Commit.Committer.GetDate()) { + reviews = append(reviews, rev) + } + } + return reviews, nil } -func (g *Github) SetStatus(u *model.User, r *model.Repo, num int, ok bool) error { - client := setupClient(g.API, u.Token) +func (g *Github) CreateURLCompare(c context.Context, u *model.User, r *model.Repo, sha1, sha2 string) string { + return fmt.Sprintf("%s/%s/%s/compare/%s...%s", g.URL, r.Owner, r.Name, sha1, sha2) +} - pr, _, err := client.PullRequests.Get(r.Owner, r.Name, num) +func (g *Github) GetCommits(ctx context.Context, u *model.User, r *model.Repo, sha string, page, perPage int) ([]string, int, error) { + client := setupClient(ctx, g.API, u) + return getCommits(ctx, client, r, sha, page, perPage) +} + +func getCommits(ctx context.Context, client *github.Client, r *model.Repo, sha string, page, perPage int) ([]string, int, error) { + var commits []string + opt := github.CommitsListOptions{ + SHA: sha, + ListOptions: github.ListOptions{ + Page: page, + PerPage: perPage, + }, + } + lst, resp, err := client.Repositories.ListCommits(ctx, r.Owner, r.Name, &opt) if err != nil { - return err + return nil, 0, createError(resp, err) + } + for _, val := range lst { + commits = append(commits, val.GetSHA()) } + return commits, resp.NextPage, nil +} - status := "pending" - desc := "this commit is pending approval" - if ok { - status = "success" - desc = "this commit looks good" +func (g *Github) GetContents(ctx context.Context, u *model.User, r *model.Repo, path string) ([]byte, error) { + client := setupClient(ctx, g.API, u) + return getContents(ctx, client, r, path) +} + +func getContents(ctx context.Context, client *github.Client, r *model.Repo, path string) ([]byte, error) { + content, _, resp, err := client.Repositories.GetContents(ctx, r.Owner, r.Name, path, nil) + if err != nil { + return nil, createError(resp, err) + } + body, err := content.GetContent() + if err != nil { + return nil, err + } + return []byte(body), nil +} + +func (g *Github) GetStatus(ctx context.Context, u *model.User, r *model.Repo, sha string) (model.CombinedStatus, error) { + client := setupClient(ctx, g.API, u) + return getStatus(ctx, client, r, sha) +} + +func getStatus(ctx context.Context, client *github.Client, r *model.Repo, sha string) (model.CombinedStatus, error) { + result := model.CombinedStatus{} + status, resp, err := client.Repositories.GetCombinedStatus(ctx, r.Owner, r.Name, sha, nil) + if err != nil { + return result, createError(resp, err) + } + result.State = status.GetState() + result.Statuses = make(map[string]model.CommitStatus) + for _, s := range status.Statuses { + result.Statuses[s.GetContext()] = model.CommitStatus{ + Context: s.GetContext(), + Description: s.GetDescription(), + State: s.GetState(), + } + } + return result, nil +} + +func getRequiredStatusChecks(ctx context.Context, client *github.Client, r *model.Repo, branch string) ([]string, error) { + content, resp, err := client.Repositories.GetRequiredStatusChecks(ctx, r.Owner, r.Name, branch) + if err != nil { + if resp.StatusCode == http.StatusNotFound { + return nil, nil + } + return nil, createError(resp, err) + } + return content.Contexts, nil +} + +func (g *Github) HasRequiredStatus(ctx context.Context, u *model.User, r *model.Repo, branch, sha string) (bool, error) { + client := setupClient(ctx, g.API, u) + return hasRequiredStatus(ctx, client, r, branch, sha) +} + +func hasRequiredStatus(ctx context.Context, client *github.Client, r *model.Repo, branch, sha string) (bool, error) { + status, err := getStatus(ctx, client, r, sha) + if err != nil { + return false, err + } + if status.State != "success" { + return false, nil + } + log.Debug("overall status is success -- checking to see if all status checks returned success") + required, err := getRequiredStatusChecks(ctx, client, r, branch) + if err != nil { + return false, err + } + for _, r := range required { + if _, ok := status.Statuses[r]; !ok { + return false, nil + } + } + for _, s := range status.Statuses { + if s.State != "success" { + return false, nil + } + } + return true, nil +} + +func (g *Github) SetStatus(ctx context.Context, u *model.User, r *model.Repo, sha, context, status, desc string) error { + client := setupClient(ctx, g.API, u) + return setStatus(ctx, client, r, sha, context, status, desc) +} + +func setStatus(ctx context.Context, client *github.Client, r *model.Repo, sha, context, status, desc string) error { + // An undocumented feature of GitHub statuses is a 140 character limit + if len(desc) > 135 { + desc = desc[:135] + "..." } data := github.RepoStatus{ @@ -295,38 +988,373 @@ func (g *Github) SetStatus(u *model.User, r *model.Repo, num int, ok bool) error Description: github.String(desc), } - _, _, err = client.Repositories.CreateStatus(r.Owner, r.Name, *pr.Head.SHA, &data) - return err + _, resp, err := client.Repositories.CreateStatus(ctx, r.Owner, r.Name, sha, &data) + if err != nil { + return createError(resp, err) + } + return nil } -func (g *Github) GetHook(r *http.Request) (*model.Hook, error) { +func (g *Github) CreateEmptyCommit(ctx context.Context, u *model.User, r *model.Repo, sha, msg string) (string, error) { + client := setupClient(ctx, g.API, u) + return createEmptyCommit(ctx, client, r, sha, msg) +} - // only process comment hooks - if r.Header.Get("X-Github-Event") != "issue_comment" { - return nil, nil +func createEmptyCommit(ctx context.Context, client *github.Client, r *model.Repo, sha, msg string) (string, error) { + prev, resp, err := client.Git.GetCommit(ctx, r.Owner, r.Name, sha) + if err != nil { + return "", createError(resp, err) } + commit, resp, err := client.Git.CreateCommit(ctx, r.Owner, r.Name, &github.Commit{ + Message: github.String(msg), + Tree: prev.Tree, + Parents: []github.Commit{{ + SHA: github.String(sha), + }}, + }) + if err != nil { + return "", createError(resp, err) + } + return commit.GetSHA(), nil +} - data := commentHook{} - err := json.NewDecoder(r.Body).Decode(&data) +func (g *Github) CreateReference(ctx context.Context, u *model.User, r *model.Repo, sha, name string) (string, error) { + client := setupClient(ctx, g.API, u) + return createReference(ctx, client, r, sha, name) +} + +func createReference(ctx context.Context, client *github.Client, r *model.Repo, sha, name string) (string, error) { + ref, resp, err := client.Git.CreateRef(ctx, r.Owner, r.Name, &github.Reference{ + Ref: github.String(name), + Object: &github.GitObject{ + Type: github.String("commit"), + SHA: github.String(sha), + }, + }) if err != nil { - return nil, err + return "", createError(resp, err) } + return ref.Object.GetSHA(), nil +} - if len(data.Issue.PullRequest.Link) == 0 { - return nil, nil +func (g *Github) CreatePR(ctx context.Context, u *model.User, r *model.Repo, title, head, base, body string) (int, error) { + client := setupClient(ctx, g.API, u) + return createPR(ctx, client, r, title, head, base, body) +} + +func createPR(ctx context.Context, client *github.Client, r *model.Repo, title, head, base, body string) (int, error) { + pr := github.NewPullRequest{ + Title: github.String(title), + Head: github.String(head), + Base: github.String(base), + Body: github.String(body), + } + pullRequest, resp, err := client.PullRequests.Create(ctx, r.Owner, r.Name, &pr) + if err != nil { + return 0, createError(resp, err) + } + return pullRequest.GetNumber(), nil +} + +func (g *Github) GetPullRequest(ctx context.Context, u *model.User, r *model.Repo, number int) (model.PullRequest, error) { + client := setupClient(ctx, g.API, u) + return getPullRequest(ctx, client, r, number) +} + +func getPullRequest(ctx context.Context, client *github.Client, r *model.Repo, number int) (model.PullRequest, error) { + pr, _, err := doGetPullRequest(ctx, client, r, number) + return pr, err +} + +func (g *Github) GetPullRequestFiles(ctx context.Context, u *model.User, r *model.Repo, number int) ([]model.CommitFile, error) { + client := setupClient(ctx, g.API, u) + return getPullRequestFiles(ctx, client, r, number) +} + +func getPullRequestFiles(ctx context.Context, client *github.Client, r *model.Repo, number int) ([]model.CommitFile, error) { + var files []*github.CommitFile + resp, err := buildCompleteList(func(opts *github.ListOptions) (*github.Response, error) { + newFiles, resp, err := client.PullRequests.ListFiles(ctx, r.Owner, r.Name, number, opts) + files = append(files, newFiles...) + return resp, err + }) + if err != nil { + return nil, createError(resp, err) + } + res := []model.CommitFile{} + for _, f := range files { + if f.Filename == nil { + log.Warnf("Repo %s pr %d has a modified file with no filename: %s", + r.Name, number, f.String()) + continue + } + res = append(res, model.CommitFile{Filename: f.GetFilename()}) + } + return res, nil +} + +func (g *Github) GetPullRequestsForCommit(ctx context.Context, u *model.User, r *model.Repo, sha *string) ([]model.PullRequest, error) { + client := setupClient(ctx, g.API, u) + return getPullRequestsForCommit(ctx, client, r, sha) +} + +func getPullRequestsForCommit(ctx context.Context, client *github.Client, r *model.Repo, sha *string) ([]model.PullRequest, error) { + log.Debug("sha == ", *sha) + issues, resp, err := client.Search.Issues(ctx, fmt.Sprintf("%s&type=pr", *sha), &github.SearchOptions{ + TextMatch: false, + }) + if err != nil { + return nil, createError(resp, err) + } + out := []model.PullRequest{} + for _, v := range issues.Issues { + log.Debugf("got pull request %v", v) + if v.State != nil && *v.State == "closed" { + log.Debugf("skipping pull request %s because it's closed", *v.Title) + continue + } + pr, prHeadSha, err := doGetPullRequest(ctx, client, r, *v.Number) + + if err != nil { + return nil, err + } + + if *prHeadSha != *sha { + log.Debugf("Pull Request %d has sha %s at head, not sha %s, so not a pull request for this commit", pr.Number, *prHeadSha, *sha) + continue + } + + out = append(out, pr) + + } + return out, nil +} + +func (g *Github) GetIssue(ctx context.Context, u *model.User, r *model.Repo, number int) (model.Issue, error) { + client := setupClient(ctx, g.API, u) + return getIssue(ctx, client, r, number) +} + +func getIssue(ctx context.Context, client *github.Client, r *model.Repo, number int) (model.Issue, error) { + issue, resp, err := client.Issues.Get(ctx, r.Owner, r.Name, number) + if err != nil { + return model.Issue{}, exterror.Create(resp.StatusCode, err) } + result := model.Issue{ + Number: number, + Title: issue.GetTitle(), + Author: lowercase.Create(issue.User.GetLogin()), + } + return result, nil +} - hook := new(model.Hook) - hook.Issue = new(model.Issue) - hook.Issue.Number = data.Issue.Number - hook.Issue.Author = data.Issue.User.Login - hook.Repo = new(model.Repo) - hook.Repo.Owner = data.Repository.Owner.Login - hook.Repo.Name = data.Repository.Name - hook.Repo.Slug = data.Repository.FullName - hook.Comment = new(model.Comment) - hook.Comment.Body = data.Comment.Body - hook.Comment.Author = data.Comment.User.Login +func (g *Github) MergePR(ctx context.Context, u *model.User, r *model.Repo, pullRequest model.PullRequest, approvers []*model.Person, message string, mergeMethod string) (*string, error) { + client := setupClient(ctx, g.API, u) + return mergePR(ctx, client, r, pullRequest, approvers, message, mergeMethod) +} - return hook, nil +func mergePR(ctx context.Context, client *github.Client, r *model.Repo, pullRequest model.PullRequest, approvers []*model.Person, message string, mergeMethod string) (*string, error) { + log.Debugf("incoming message: %v", message) + msg := message + if len(msg) > 0 { + msg += "\n" + } + msg += fmt.Sprintf("Merged by %s\n", envvars.Env.Branding.ShortName) + if len(approvers) > 0 { + apps := "Approved by:\n" + for _, v := range approvers { + //Brad Rydzewski (@bradrydzewski) + if len(v.Name) > 0 { + apps += fmt.Sprintf("%s", v.Name) + } + if len(v.Email) > 0 { + apps += fmt.Sprintf(" <%s>", v.Email) + } + if len(v.Login) > 0 { + apps += fmt.Sprintf(" (@%s)", v.Login) + } + apps += "\n" + } + msg += apps + } + log.Debugf("Constructed message: %v", msg) + options := github.PullRequestOptions{MergeMethod: "merge"} + options.MergeMethod = mergeMethod + result, resp, err := client.PullRequests.Merge(ctx, r.Owner, r.Name, pullRequest.Number, msg, &options) + if err != nil { + return nil, createError(resp, err) + } + + if !(*result.Merged) { + return nil, errors.New(*result.Message) + } + return result.SHA, nil +} + +func (g *Github) CompareBranches(ctx context.Context, u *model.User, repo *model.Repo, base string, head string, owner string) (model.BranchCompare, error) { + client := setupClient(ctx, g.API, u) + return compareBranches(ctx, client, repo, base, head, owner) +} + +func compareBranches(ctx context.Context, client *github.Client, repo *model.Repo, base string, head string, owner string) (model.BranchCompare, error) { + var result model.BranchCompare + if repo.Owner != owner { + head = owner + ":" + head + } + res, resp, err := client.Repositories.CompareCommits(ctx, repo.Owner, repo.Name, base, head) + if err != nil { + return result, createError(resp, err) + } + result.AheadBy = res.GetAheadBy() + result.BehindBy = res.GetBehindBy() + result.Status = res.GetStatus() + result.TotalCommits = res.GetTotalCommits() + return result, nil +} + +func (g *Github) DeleteBranch(ctx context.Context, u *model.User, repo *model.Repo, name string) error { + client := setupClient(ctx, g.API, u) + return deleteBranch(ctx, client, repo, name) +} + +func deleteBranch(ctx context.Context, client *github.Client, repo *model.Repo, name string) error { + resp, err := client.Git.DeleteRef(ctx, repo.Owner, repo.Name, "refs/heads/"+name) + if err != nil { + err = fmt.Errorf("Deleting branch %s/%s/%s. %s", repo.Owner, repo.Name, name, err) + return createError(resp, err) + } + return nil +} + +func (g *Github) ListTags(ctx context.Context, u *model.User, r *model.Repo) ([]model.Tag, error) { + client := setupClient(ctx, g.API, u) + return listTags(ctx, client, r) +} + +func listTags(ctx context.Context, client *github.Client, r *model.Repo) ([]model.Tag, error) { + var tags []*github.RepositoryTag + resp, err := buildCompleteList(func(opts *github.ListOptions) (*github.Response, error) { + newTags, resp, err := client.Repositories.ListTags(ctx, r.Owner, r.Name, opts) + tags = append(tags, newTags...) + return resp, err + }) + + if err != nil { + return nil, createError(resp, err) + } + out := make([]model.Tag, len(tags)) + for k, v := range tags { + out[k] = model.Tag(*v.Name) + } + return out, nil +} + +func (g *Github) Tag(ctx context.Context, u *model.User, r *model.Repo, tag *string, sha *string) error { + client := setupClient(ctx, g.API, u) + return doTag(ctx, client, r, tag, sha) +} + +func doTag(ctx context.Context, client *github.Client, r *model.Repo, tag *string, sha *string) error { + t := time.Now() + gittag, resp, err := client.Git.CreateTag(ctx, r.Owner, r.Name, &github.Tag{ + Tag: tag, + SHA: sha, + Message: github.String(fmt.Sprintf("Tagged by %s", envvars.Env.Branding.ShortName)), + Tagger: &github.CommitAuthor{ + Date: &t, + Name: github.String(envvars.Env.Branding.ShortName), + Email: github.String(envvars.Env.Github.Email), + }, + Object: &github.GitObject{ + SHA: sha, + Type: github.String("commit"), + }, + }) + + if err != nil { + return createError(resp, err) + } + _, resp, err = client.Git.CreateRef(ctx, r.Owner, r.Name, &github.Reference{ + Ref: github.String("refs/tags/" + *tag), + Object: &github.GitObject{ + SHA: gittag.SHA, + }, + }) + + if err != nil { + return createError(resp, err) + } + + return nil +} + +func (g *Github) WriteComment(ctx context.Context, u *model.User, r *model.Repo, num int, message string) error { + client := setupClient(ctx, g.API, u) + return writeComment(ctx, client, r, num, message) +} + +func writeComment(ctx context.Context, client *github.Client, r *model.Repo, num int, message string) error { + emsg := model.CommentPrefix + " " + message + _, resp, err := client.Issues.CreateComment(ctx, r.Owner, r.Name, num, &github.IssueComment{ + Body: github.String(emsg), + }) + if err != nil { + return createError(resp, err) + } + return nil +} + +func (g *Github) ScheduleDeployment(ctx context.Context, u *model.User, r *model.Repo, d model.DeploymentInfo) error { + client := setupClient(ctx, g.API, u) + return scheduleDeployment(ctx, client, r, d) +} + +func scheduleDeployment(ctx context.Context, client *github.Client, r *model.Repo, d model.DeploymentInfo) error { + _, resp, err := client.Repositories.CreateDeployment(ctx, r.Owner, r.Name, &github.DeploymentRequest{ + Ref: github.String(d.Ref), + Task: github.String(d.Task), + Environment: github.String(d.Environment), + }) + if err != nil { + return createError(resp, err) + } + return nil +} + +func doGetPullRequest(ctx context.Context, client *github.Client, r *model.Repo, number int) (model.PullRequest, *string, error) { + pr, resp, err := client.PullRequests.Get(ctx, r.Owner, r.Name, number) + if err != nil { + return model.PullRequest{}, nil, createError(resp, err) + } + + log.Debug("current issue ==", number) + log.Debug("current pr ==", *pr) + sha := pr.Head.SHA + + mergeable := true + if pr.Mergeable != nil { + mergeable = *pr.Mergeable + } + + result := model.PullRequest{ + Issue: model.Issue{ + Number: number, + Title: pr.GetTitle(), + Author: lowercase.Create(pr.User.GetLogin()), + }, + // head branch contains what you like to be applied + // base branch contains where changes should be applied + Branch: model.Branch{ + CompareName: pr.Head.GetRef(), + CompareSHA: pr.Head.GetSHA(), + CompareOwner: pr.Head.User.GetLogin(), + Mergeable: mergeable, + Merged: pr.GetMerged(), + MergeCommitSHA: pr.GetMergeCommitSHA(), + BaseName: pr.Base.GetRef(), + BaseSHA: pr.Base.GetSHA(), + }, + Body: pr.GetBody(), + } + return result, sha, nil } diff --git a/remote/github/github_integration_test.go b/remote/github/github_integration_test.go new file mode 100644 index 0000000..a8b7e68 --- /dev/null +++ b/remote/github/github_integration_test.go @@ -0,0 +1,449 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package github + +import ( + "context" + "fmt" + "math/rand" + "net/url" + "testing" + "time" + + "github.com/google/go-github/github" + "github.com/capitalone/checks-out/envvars" + "github.com/capitalone/checks-out/exterror" + "github.com/capitalone/checks-out/model" + "golang.org/x/oauth2" +) + +func createTestClient(t *testing.T) *github.Client { + var err error + githubParams := Get() + urlstring := githubParams.API + ts := oauth2.StaticTokenSource( + &oauth2.Token{AccessToken: envvars.Env.Test.GithubToken}, + ) + tc := oauth2.NewClient(oauth2.NoContext, ts) + client := github.NewClient(tc) + client.BaseURL, err = url.Parse(urlstring) + if err != nil { + t.Fatalf("Unable to parse url '%s': %s", urlstring, err.Error()) + } + return client +} + +func TestGitHubClient(t *testing.T) { + if !envvars.Env.Test.GithubTestEnable { + t.Log("Skipping GitHub integration tests") + return + } + if len(envvars.Env.Test.GithubToken) == 0 { + t.Fatal("GITHUB_TEST_TOKEN environment variable must be defined") + } + if len(envvars.Env.Github.Email) == 0 { + t.Fatal("GITHUB_EMAIL environment variable must be defined") + } + client := createTestClient(t) + repo := createRepo(t, client) + branch := createTestBranch(t, client, repo) + branch = createTestCommit(t, client, repo, branch, "foo") + pr := createTestPR(t, client, repo) + createPRComment(t, client, repo, pr, "this is a comment") + branch = createTestCommit(t, client, repo, branch, "bar") + commit := getCommit(t, client, repo, branch) + createPRComment(t, client, repo, pr, "this is another comment") + createPRReview(t, client, repo, pr, "this is a review") + t.Run("GetRepo", func(t *testing.T) { testGetRepo(t, client, repo) }) + t.Run("GetComments", func(t *testing.T) { testGetComments(t, client, repo, pr) }) + t.Run("GetReviews", func(t *testing.T) { testGetReviews(t, client, repo, pr) }) + t.Run("GetOrgs", func(t *testing.T) { testGetOrgs(t, client) }) + t.Run("GetModelRepo", func(t *testing.T) { testGetModelRepo(t, client, repo) }) + t.Run("GetRepos", func(t *testing.T) { testGetUserRepos(t, client, repo) }) + t.Run("GetContents", func(t *testing.T) { testGetContents(t, client, repo) }) + t.Run("GetPullRequest", func(t *testing.T) { testGetPullRequest(t, client, repo, pr) }) + t.Run("CompareBranches", func(t *testing.T) { testCompareBranches(t, client, repo, pr) }) + t.Run("GetPullRequestFiles", func(t *testing.T) { testGetPullRequestFiles(t, client, repo, pr) }) + t.Run("GetAllComments", func(t *testing.T) { testGetAllComments(t, client, repo, pr) }) + t.Run("Tags", func(t *testing.T) { testTags(t, client, repo, branch) }) + t.Run("GetPullRequestForCommit", func(t *testing.T) { testGetPullRequestForCommit(t, client, repo, commit) }) + t.Run("MergeAndDelete", func(t *testing.T) { testMergeAndDelete(t, client, repo, pr) }) + deleteRepo(t, client, repo) +} + +func TestGitHubMergeSquash(t *testing.T) { + if !envvars.Env.Test.GithubTestEnable { + t.Log("Skipping GitHub integration tests") + return + } + if len(envvars.Env.Test.GithubToken) == 0 { + t.Fatal("GITHUB_TEST_TOKEN environment variable must be defined") + } + client := createTestClient(t) + repo := createRepo(t, client) + branch := createTestBranch(t, client, repo) + branch = createTestCommit(t, client, repo, branch, "foo") + pr := createTestPR(t, client, repo) + createPRComment(t, client, repo, pr, "this is a comment") + branch = createTestCommit(t, client, repo, branch, "bar") + createPRComment(t, client, repo, pr, "this is another comment") + createPRReview(t, client, repo, pr, "this is a review") + t.Run("SquashAndDelete", func(t *testing.T) { testSquashAndDelete(t, client, repo, pr) }) + deleteRepo(t, client, repo) +} + +func TestGitHubMergeRebase(t *testing.T) { + if !envvars.Env.Test.GithubTestEnable { + t.Log("Skipping GitHub integration tests") + return + } + if len(envvars.Env.Test.GithubToken) == 0 { + t.Fatal("GITHUB_TEST_TOKEN environment variable must be defined") + } + client := createTestClient(t) + repo := createRepo(t, client) + branch := createTestBranch(t, client, repo) + branch = createTestCommit(t, client, repo, branch, "foo") + pr := createTestPR(t, client, repo) + createPRComment(t, client, repo, pr, "this is a comment") + branch = createTestCommit(t, client, repo, branch, "bar") + createPRComment(t, client, repo, pr, "this is another comment") + createPRReview(t, client, repo, pr, "this is a review") + t.Run("RebaseAndDelete", func(t *testing.T) { testRebaseAndDelete(t, client, repo, pr) }) + deleteRepo(t, client, repo) +} + +func testGetRepo(t *testing.T, client *github.Client, repo *github.Repository) { + ctx := context.Background() + _, _, err := client.Repositories.Get(ctx, *repo.Owner.Login, *repo.Name) + if err != nil { + t.Error("Unable to get repository", err) + } +} + +func testGetComments(t *testing.T, client *github.Client, repo *github.Repository, pr *github.PullRequest) { + ctx := context.Background() + opts := github.IssueListCommentsOptions{Direction: "desc", Sort: "created"} + opts.PerPage = 100 + comm, _, err := client.Issues.ListComments(ctx, *repo.Owner.Login, *repo.Name, *pr.Number, &opts) + if err != nil { + t.Error("Unable to get comments", err) + } + if len(comm) != 2 { + t.Error("Did not find 2 comments", len(comm)) + } +} + +func testGetReviews(t *testing.T, client *github.Client, repo *github.Repository, pr *github.PullRequest) { + ctx := context.Background() + reviews, _, err := client.PullRequests.ListReviews(ctx, *repo.Owner.Login, *repo.Name, *pr.Number, nil) + if err != nil { + t.Error("Unable to get pull request reviews", err) + } + if len(reviews) != 1 { + t.Error("Did not find 1 pull request review", len(reviews)) + } +} + +func testGetOrgs(t *testing.T, client *github.Client) { + ctx := context.Background() + orgs, err := getOrgs(ctx, client) + if err != nil { + t.Error("Unable to get github organizations", err) + } + if orgs == nil { + t.Error("organizations must be non-nil value") + } +} + +func testGetModelRepo(t *testing.T, client *github.Client, repo *github.Repository) { + ctx := context.Background() + res, err := getRepo(ctx, client, *repo.Owner.Login, *repo.Name) + if err != nil { + t.Error("Unable to get repository", err) + } + if res.Name != *repo.Name { + t.Error("Repository name is incorrect", res.Name) + } +} + +func testGetUserRepos(t *testing.T, client *github.Client, repo *github.Repository) { + ctx := context.Background() + repos, err := getUserRepos(ctx, client, repo.Owner.GetLogin()) + if err != nil { + t.Error("Unable to get user's repositories", err) + } + if repos == nil { + t.Error("repos must be non-nil value") + } +} + +func testGetContents(t *testing.T, client *github.Client, repo *github.Repository) { + ctx := context.Background() + r := &model.Repo{Owner: *repo.Owner.Login, Name: *repo.Name} + res, err := getContents(ctx, client, r, "README.md") + if err != nil { + t.Error("Unable to get README.md", err) + } + if len(res) == 0 { + t.Error("Unable to get README.md contents", res) + } + res, err = getContents(ctx, client, r, "foobar.md") + if err == nil { + t.Error("Failed to produce error message") + } + if err.(exterror.ExtError).Status != 404 { + t.Error("Error status is not a 404") + } +} + +func testGetPullRequest(t *testing.T, client *github.Client, repo *github.Repository, pr *github.PullRequest) { + ctx := context.Background() + r := &model.Repo{Owner: *repo.Owner.Login, Name: *repo.Name} + res, err := getPullRequest(ctx, client, r, *pr.Number) + if err != nil { + t.Error("Unable to get pull request", err) + } + if res.Number != *pr.Number { + t.Error("Pull request number is incorrect", res.Number) + } +} + +func testCompareBranches(t *testing.T, client *github.Client, repo *github.Repository, pr *github.PullRequest) { + ctx := context.Background() + r := &model.Repo{Owner: *repo.Owner.Login, Name: *repo.Name} + res, err := compareBranches(ctx, client, r, pr.Base.GetRef(), pr.Head.GetRef(), pr.Head.User.GetLogin()) + if err != nil { + t.Error("Unable to compare branches", err) + } + if res.AheadBy != 2 { + t.Error("Ahead by number is incorrect", res.AheadBy) + } +} + +func testGetPullRequestFiles(t *testing.T, client *github.Client, repo *github.Repository, pr *github.PullRequest) { + ctx := context.Background() + r := &model.Repo{Owner: *repo.Owner.Login, Name: *repo.Name} + res, err := getPullRequestFiles(ctx, client, r, *pr.Number) + if err != nil { + t.Error("Unable to get pull request files", err) + } + if len(res) == 0 { + t.Error("Length of pull request files should be nonzero") + } +} + +func testGetAllComments(t *testing.T, client *github.Client, repo *github.Repository, pr *github.PullRequest) { + ctx := context.Background() + r := &model.Repo{Owner: *repo.Owner.Login, Name: *repo.Name} + res, err := getAllComments(ctx, client, r, *pr.Number) + if err != nil { + t.Error("Unable to get all comments", err) + } + if len(res) != 2 { + t.Error("Unable to find two comments", len(res)) + } +} + +func testGetPullRequestForCommit(t *testing.T, client *github.Client, repo *github.Repository, commit *github.Commit) { + ctx := context.Background() + r := &model.Repo{Owner: *repo.Owner.Login, Name: *repo.Name} + _, err := getPullRequestsForCommit(ctx, client, r, commit.SHA) + if err != nil { + t.Error("Unable to get pull requests for commit", err) + } +} + +func testMergeAndDelete(t *testing.T, client *github.Client, repo *github.Repository, pr *github.PullRequest) { + testMergeAndDeleteInner(t, client, repo, pr, "merge") +} + +func testSquashAndDelete(t *testing.T, client *github.Client, repo *github.Repository, pr *github.PullRequest) { + testMergeAndDeleteInner(t, client, repo, pr, "squash") +} + +func testRebaseAndDelete(t *testing.T, client *github.Client, repo *github.Repository, pr *github.PullRequest) { + testMergeAndDeleteInner(t, client, repo, pr, "rebase") +} + +func testMergeAndDeleteInner(t *testing.T, client *github.Client, repo *github.Repository, pr *github.PullRequest, mergeMethod string) { + ctx := context.Background() + r := &model.Repo{Owner: *repo.Owner.Login, Name: *repo.Name} + p := model.PullRequest{Issue: model.Issue{Number: *pr.Number}} + latest, _, err := client.PullRequests.Get(ctx, r.Owner, r.Name, p.Number) + if err != nil { + t.Error("Unable to get latest status of pull request", err) + } + if latest.Mergeable != nil && *latest.Mergeable { + _, err = mergePR(ctx, client, r, p, []*model.Person{}, "", "") + if err != nil && err.(exterror.ExtError).Status != 405 { + t.Error("Unable to merge pull request", err) + } + } + err = deleteBranch(ctx, client, r, "foobar") + if err != nil { + t.Error("Unable to delete branch", err) + } +} + +func testTags(t *testing.T, client *github.Client, repo *github.Repository, branch *github.Reference) { + ctx := context.Background() + r := &model.Repo{Owner: *repo.Owner.Login, Name: *repo.Name} + tags, err := listTags(ctx, client, r) + if err != nil { + t.Error("Unable to list tags", err) + } + if len(tags) != 0 { + t.Error("Expected zero tags", len(tags)) + } + err = doTag(ctx, client, r, github.String("foo"), branch.Object.SHA) + if err != nil { + t.Error("Unable to create tags", err) + } +} + +func createRepo(t *testing.T, client *github.Client) *github.Repository { + ctx := context.Background() + name := randomString(16) + repo := &github.Repository{ + Name: github.String(name), + Private: github.Bool(false), + AutoInit: github.Bool(true), + } + repo, _, err := client.Repositories.Create(ctx, "", repo) + if err != nil { + t.Fatal("Unable to create git repository", err) + } + return repo +} + +func createTestBranch(t *testing.T, client *github.Client, repo *github.Repository) *github.Reference { + ctx := context.Background() + ref, _, err := client.Git.GetRef(ctx, *repo.Owner.Login, *repo.Name, "refs/heads/master") + if err != nil { + t.Fatal("Unable to get master head", err) + } + ref, _, err = client.Git.CreateRef(ctx, *repo.Owner.Login, *repo.Name, &github.Reference{ + Ref: github.String("refs/heads/foobar"), + Object: ref.Object, + }) + if err != nil { + t.Fatal("Unable to create branch", err) + } + return ref +} + +func createTestCommit(t *testing.T, client *github.Client, repo *github.Repository, branch *github.Reference, filename string) *github.Reference { + ctx := context.Background() + blob, _, err := client.Git.CreateBlob(ctx, *repo.Owner.Login, *repo.Name, &github.Blob{ + Content: github.String(""), + Size: github.Int(0), + Encoding: github.String("utf-8"), + }) + if err != nil { + t.Fatal("Unable to get create blob", filename, err) + } + tree, _, err := client.Git.CreateTree(ctx, *repo.Owner.Login, *repo.Name, *branch.Object.SHA, []github.TreeEntry{{ + Path: github.String(filename), + Mode: github.String("100644"), + Type: github.String("blob"), + SHA: blob.SHA, + }}) + if err != nil { + t.Fatal("Unable to get create tree", filename, err) + } + commit, _, err := client.Git.CreateCommit(ctx, *repo.Owner.Login, *repo.Name, &github.Commit{ + Message: github.String(fmt.Sprintf("%s commit", filename)), + Tree: tree, + Parents: []github.Commit{{ + SHA: branch.Object.SHA, + }, + }, + }) + if err != nil { + t.Fatal("Unable to get create commit", filename, err) + } + branch.Object.SHA = commit.SHA + branch, _, err = client.Git.UpdateRef(ctx, *repo.Owner.Login, *repo.Name, branch, false) + if err != nil { + t.Fatal("Unable to update reference", filename, err) + } + return branch +} + +func getCommit(t *testing.T, client *github.Client, repo *github.Repository, branch *github.Reference) *github.Commit { + ctx := context.Background() + commit, _, err := client.Git.GetCommit(ctx, *repo.Owner.Login, *repo.Name, *branch.Object.SHA) + if err != nil { + t.Fatal("Unable to get commit", *branch.Object.SHA, err) + } + return commit +} + +func createTestPR(t *testing.T, client *github.Client, repo *github.Repository) *github.PullRequest { + ctx := context.Background() + pr, _, err := client.PullRequests.Create(ctx, *repo.Owner.Login, *repo.Name, &github.NewPullRequest{ + Title: github.String("Adds foobar feature"), + Head: github.String("foobar"), + Base: github.String("master"), + }) + if err != nil { + t.Fatal("Unable to create pull request", err) + } + return pr +} + +func createPRComment(t *testing.T, client *github.Client, + repo *github.Repository, pr *github.PullRequest, comment string) { + ctx := context.Background() + r := &model.Repo{Owner: *repo.Owner.Login, Name: *repo.Name} + err := writeComment(ctx, client, r, *pr.Number, comment) + if err != nil { + t.Error("Unable to add comment", comment, err) + } +} + +func createPRReview(t *testing.T, client *github.Client, + repo *github.Repository, pr *github.PullRequest, comment string) { + ctx := context.Background() + review := github.PullRequestReviewRequest{} + review.Body = &comment + _, resp, err := client.PullRequests.CreateReview(ctx, *repo.Owner.Login, *repo.Name, *pr.Number, &review) + if err != nil { + t.Error("Unable to add pull request review", err, resp) + } +} + +func deleteRepo(t *testing.T, client *github.Client, repo *github.Repository) { + ctx := context.Background() + _, err := client.Repositories.Delete(ctx, *repo.Owner.Login, *repo.Name) + if err != nil { + t.Error("Unable to delete git repository", err) + } +} + +func randomString(strlen int) string { + rand.Seed(time.Now().UTC().UnixNano()) + const chars = "abcdefghijklmnopqrstuvwxyz0123456789" + result := make([]byte, strlen) + for i := 0; i < strlen; i++ { + result[i] = chars[rand.Intn(len(chars))] + } + return string(result) +} diff --git a/remote/github/paging.go b/remote/github/paging.go new file mode 100644 index 0000000..fdee22e --- /dev/null +++ b/remote/github/paging.go @@ -0,0 +1,37 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package github + +import ( + "github.com/google/go-github/github" +) + +func buildCompleteList(process func(opts *github.ListOptions) (*github.Response, error)) (*github.Response, error) { + var response *github.Response + var err error + opts := &github.ListOptions{} + for { + response, err = process(opts) + if err != nil || response.NextPage == 0 { + break + } + opts.Page = response.NextPage + } + return response, err +} diff --git a/remote/github/types.go b/remote/github/types.go index 4f990e1..a9ec910 100644 --- a/remote/github/types.go +++ b/remote/github/types.go @@ -1,11 +1,22 @@ -package github +/* -type Error struct { - Message string `json:"message"` -} +SPDX-Copyright: Copyright (c) Brad Rydzewski, project contributors, Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Brad Rydzewski, project contributors, Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at -func (e Error) Error() string { return e.Message } -func (e Error) String() string { return e.Message } + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package github type Branch struct { Protection struct { @@ -16,37 +27,3 @@ type Branch struct { } `json:"required_status_checks"` } `json:"protection"` } - -// commentHook represents a subset of the issue_comment payload. -type commentHook struct { - Issue struct { - Link string `json:"html_url"` - Number int `json:"number"` - User struct { - Login string `json:"login"` - } `json:"user"` - - PullRequest struct { - Link string `json:"html_url"` - } `json:"pull_request"` - } `json:"issue"` - - Comment struct { - Body string `json:"body"` - User struct { - Login string `json:"login"` - } `json:"user"` - } `json:"comment"` - - Repository struct { - Name string `json:"name"` - FullName string `json:"full_name"` - Desc string `json:"description"` - Private bool `json:"private"` - Owner struct { - Login string `json:"login"` - Type string `json:"type"` - Avatar string `json:"avatar_url"` - } `json:"owner"` - } `json:"repository"` -} diff --git a/remote/github/utils.go b/remote/github/utils.go index d81823c..d4b2787 100644 --- a/remote/github/utils.go +++ b/remote/github/utils.go @@ -1,33 +1,135 @@ +/* + +SPDX-Copyright: Copyright (c) Brad Rydzewski, project contributors, Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Brad Rydzewski, project contributors, Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ package github import ( - "fmt" + "context" + "net/http" "net/url" + "runtime" + "strings" + + "github.com/capitalone/checks-out/exterror" + "github.com/capitalone/checks-out/model" + "github.com/capitalone/checks-out/usage" "github.com/google/go-github/github" "golang.org/x/oauth2" ) -func setupClient(rawurl, accessToken string) *github.Client { +const ( + gitHubSubstring = "github.com/google/go-github/" + gitHubCaller = "github.com/google/go-github/github.(*Client).Do" +) + +type UserTransport struct { + Login string + Event string + // Transport is the underlying HTTP transport to use when making requests. + // It will default to http.DefaultTransport if nil. + Transport http.RoundTripper +} + +// RoundTrip implements the RoundTripper interface. +func (t *UserTransport) RoundTrip(req *http.Request) (*http.Response, error) { + caller := t.caller() + usage.RecordApiRequest(t.Login, t.Event, caller) + return t.transport().RoundTrip(req) +} + +func locateParent(frames *runtime.Frames) bool { + for { + frame, more := frames.Next() + if strings.HasSuffix(frame.Function, gitHubCaller) { + return true + } + if !more { + return false + } + } +} + +func (t *UserTransport) caller() string { + pc := make([]uintptr, 16) + n := runtime.Callers(2, pc) + pc = pc[:n] + frames := runtime.CallersFrames(pc) + success := locateParent(frames) + if success { + frame, _ := frames.Next() + idx := strings.Index(frame.Function, gitHubSubstring) + if idx >= 0 { + return frame.Function[idx+len(gitHubSubstring):] + } + return frame.Function + } + return "" +} + +func (t *UserTransport) transport() http.RoundTripper { + if t.Transport != nil { + return t.Transport + } + return http.DefaultTransport +} + +func setupClient(ctx context.Context, rawurl string, user *model.User) *github.Client { + return createClient(ctx, rawurl, user.Token, user.Login) +} + +func anonymousClient(ctx context.Context, rawurl, accessToken string) *github.Client { + return createClient(ctx, rawurl, accessToken, "") +} + +func createClient(ctx context.Context, rawurl, accessToken, login string) *github.Client { token := oauth2.Token{AccessToken: accessToken} source := oauth2.StaticTokenSource(&token) - client := oauth2.NewClient(oauth2.NoContext, source) - github := github.NewClient(client) - github.BaseURL, _ = url.Parse(rawurl) - return github + client := oauth2.NewClient(context.Background(), source) + client.Transport = &UserTransport{ + Login: login, + Event: usage.GetEventFromContext(ctx), + Transport: client.Transport} + g := github.NewClient(client) + g.BaseURL, _ = url.Parse(rawurl) + return g } -// GetHook is a heper function that retrieves a hook by +func basicAuthClient(rawurl, id, secret string) *github.Client { + transport := github.BasicAuthTransport{ + Username: id, + Password: secret, + } + g := github.NewClient(transport.Client()) + g.BaseURL, _ = url.Parse(rawurl) + return g +} + +// getHook is a helper function that retrieves a hook by // hostname. To do this, it will retrieve a list of all hooks // and iterate through the list. -func GetHook(client *github.Client, owner, name, rawurl string) (*github.Hook, error) { - hooks, _, err := client.Repositories.ListHooks(owner, name, nil) +func getHook(ctx context.Context, client *github.Client, owner, name, rawurl string) (*github.Hook, error) { + hooks, resp, err := client.Repositories.ListHooks(ctx, owner, name, nil) if err != nil { - return nil, err + return nil, exterror.Create(resp.StatusCode, err) } newurl, err := url.Parse(rawurl) if err != nil { - fmt.Println("error parsing new hook url", rawurl, err) return nil, err } for _, hook := range hooks { @@ -37,72 +139,110 @@ func GetHook(client *github.Client, owner, name, rawurl string) (*github.Hook, e } oldurl, err := url.Parse(hookurl) if err != nil { - fmt.Println("error parsing old hook url", hookurl, err) continue } if newurl.Host == oldurl.Host { - return &hook, nil + return hook, nil } } return nil, nil } -func DeleteHook(client *github.Client, owner, name, url string) error { - hook, err := GetHook(client, owner, name, url) +// getOrgHook is a helper function that retrieves a hook by +// hostname. To do this, it will retrieve a list of all hooks +// and iterate through the list. +func getOrgHook(ctx context.Context, client *github.Client, owner, rawurl string) (*github.Hook, error) { + hooks, resp, err := client.Organizations.ListHooks(ctx, owner, nil) if err != nil { - return err + return nil, exterror.Create(resp.StatusCode, err) } - if hook == nil { - return nil + newurl, err := url.Parse(rawurl) + if err != nil { + return nil, err + } + for _, hook := range hooks { + hookurl, ok := hook.Config["url"].(string) + if !ok { + continue + } + oldurl, err := url.Parse(hookurl) + if err != nil { + continue + } + if newurl.Host == oldurl.Host { + return hook, nil + } } - _, err = client.Repositories.DeleteHook(owner, name, *hook.ID) - return err + return nil, nil } -// CreateHook is a heper function that creates a post-commit hook +// createHook is a helper function that creates a post-commit hook // for the specified repository. -func CreateHook(client *github.Client, owner, name, url string) (*github.Hook, error) { +func createHook(ctx context.Context, client *github.Client, owner, name, url string) (*github.Hook, error) { var hook = new(github.Hook) hook.Name = github.String("web") - hook.Events = []string{"issue_comment"} + hook.Events = []string{"issue_comment", "status", "pull_request", "pull_request_review"} hook.Config = map[string]interface{}{} hook.Config["url"] = url hook.Config["content_type"] = "json" - created, _, err := client.Repositories.CreateHook(owner, name, hook) + created, resp, err := client.Repositories.CreateHook(ctx, owner, name, hook) + if err != nil { + err = exterror.Create(resp.StatusCode, err) + } return created, err } -// GetFile is a heper function that retrieves a file from -// GitHub and returns its contents in byte array format. -func GetFile(client *github.Client, owner, name, path, ref string) ([]byte, error) { - var opts = new(github.RepositoryContentGetOptions) - opts.Ref = ref - content, _, _, err := client.Repositories.GetContents(owner, name, path, opts) +// createOrgHook is a helper function that creates a post-commit hook +// for the specified Organization. +func createOrgHook(ctx context.Context, client *github.Client, owner, url string) (*github.Hook, error) { + var hook = new(github.Hook) + hook.Name = github.String("web") + hook.Events = []string{"repository"} + hook.Config = map[string]interface{}{} + hook.Config["url"] = url + hook.Config["content_type"] = "json" + created, resp, err := client.Organizations.CreateHook(ctx, owner, hook) if err != nil { - return nil, err + err = exterror.Create(resp.StatusCode, err) } - return content.Decode() + return created, err } -// GetUserRepos is a helper function that returns a list of +// getUserRepos is a helper function that returns a list of // all user repositories. Paginated results are aggregated into // a single list. -func GetUserRepos(client *github.Client) ([]github.Repository, error) { - var repos []github.Repository - var opts = github.RepositoryListOptions{} - opts.PerPage = 100 - opts.Page = 1 - - // loop through user repository list - for opts.Page > 0 { - list, resp, err := client.Repositories.List("", &opts) - if err != nil { - return nil, err - } - repos = append(repos, list...) +func getUserRepos(ctx context.Context, client *github.Client, user string) ([]*github.Repository, error) { + var repos []*github.Repository + var loOpts = github.RepositoryListOptions{} + resp, err := buildCompleteList(func(opts *github.ListOptions) (*github.Response, error) { + loOpts.ListOptions = *opts + next, resp, err := client.Repositories.List(ctx, user, &loOpts) + repos = append(repos, next...) + return resp, err + }) - // increment the next page to retrieve - opts.Page = resp.NextPage + if err != nil { + return nil, exterror.Create(resp.StatusCode, err) + } + + return repos, nil +} + +// getOrgRepos is a helper function that returns a list of +// all user repositories. Paginated results are aggregated into +// a single list. +func getOrgRepos(ctx context.Context, client *github.Client, org string) ([]*github.Repository, error) { + var repos []*github.Repository + var loOpts = github.RepositoryListByOrgOptions{} + resp, err := buildCompleteList(func(opts *github.ListOptions) (*github.Response, error) { + loOpts.ListOptions = *opts + next, resp, err := client.Repositories.ListByOrg(ctx, org, &loOpts) + repos = append(repos, next...) + return resp, err + }) + + if err != nil { + return nil, exterror.Create(resp.StatusCode, err) } return repos, nil diff --git a/remote/github/walk.go b/remote/github/walk.go new file mode 100644 index 0000000..f7e5c29 --- /dev/null +++ b/remote/github/walk.go @@ -0,0 +1,94 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package github + +import ( + "context" + + "github.com/capitalone/checks-out/exterror" + "github.com/capitalone/checks-out/model" + "github.com/capitalone/checks-out/set" + + "github.com/google/go-github/github" +) + +var systemAccounts = set.New("GitHub", "GitHub Enterprise") + +func isCommitUIMerge(commit *github.RepositoryCommit) bool { + return len(commit.Parents) == 2 && systemAccounts.Contains(*commit.Commit.Committer.Name) +} + +// ignoreUIMerge will find the first commit that is not a merge +// created through the user interface. +func ignoreUIMerge(ctx context.Context, client *github.Client, r *model.Repo, pr *github.PullRequest, + commit *github.RepositoryCommit) (*github.RepositoryCommit, error) { + + // Test the current commit before fetching commits from the base branch + if !isCommitUIMerge(commit) { + return commit, nil + } + + // Fetch commits from the base branch that have occurred since + // the pull request was opened. These are the commits to ignore. + lcOpts := github.CommitsListOptions{SHA: *pr.Base.Ref, Since: *pr.CreatedAt} + commits := set.Empty() + resp, err := buildCompleteList(func(opts *github.ListOptions) (*github.Response, error) { + lcOpts.ListOptions = *opts + next, resp, err := client.Repositories.ListCommits(ctx, r.Owner, r.Name, &lcOpts) + for _, c := range next { + commits.Add(*c.SHA) + } + return resp, err + }) + if err != nil { + return nil, exterror.Create(resp.StatusCode, err) + } + return followCommit(ctx, client, r, commit, commits) +} + +// followCommit walks up the merge commits and selects the +// first commit that is not a merge commit. Commits from +// the base branch are ignored. +func followCommit(ctx context.Context, client *github.Client, r *model.Repo, commit *github.RepositoryCommit, + baseref set.Set) (*github.RepositoryCommit, error) { + + if !isCommitUIMerge(commit) { + return commit, nil + } + left := &commit.Parents[0] + right := &commit.Parents[1] + if baseref.Contains(*left.SHA) && baseref.Contains(*right.SHA) { + return commit, nil + } + if !baseref.Contains(*left.SHA) && !baseref.Contains(*right.SHA) { + return commit, nil + } + var target *github.Commit + if baseref.Contains(*left.SHA) { + target = right + } + if baseref.Contains(*right.SHA) { + target = left + } + commit, resp, err := client.Repositories.GetCommit(ctx, r.Owner, r.Name, *target.SHA) + if err != nil { + return nil, exterror.Create(resp.StatusCode, err) + } + return followCommit(ctx, client, r, commit, baseref) +} diff --git a/remote/mock/remote.go b/remote/mock/remote.go deleted file mode 100644 index 98d361f..0000000 --- a/remote/mock/remote.go +++ /dev/null @@ -1,255 +0,0 @@ -package mock - -import "github.com/stretchr/testify/mock" - -import "net/http" -import "github.com/lgtmco/lgtm/model" - -type Remote struct { - mock.Mock -} - -func (_m *Remote) GetUser(_a0 http.ResponseWriter, _a1 *http.Request) (*model.User, error) { - ret := _m.Called(_a0, _a1) - - var r0 *model.User - if rf, ok := ret.Get(0).(func(http.ResponseWriter, *http.Request) *model.User); ok { - r0 = rf(_a0, _a1) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*model.User) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(http.ResponseWriter, *http.Request) error); ok { - r1 = rf(_a0, _a1) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} -func (_m *Remote) GetUserToken(_a0 string) (string, error) { - ret := _m.Called(_a0) - - var r0 string - if rf, ok := ret.Get(0).(func(string) string); ok { - r0 = rf(_a0) - } else { - r0 = ret.Get(0).(string) - } - - var r1 error - if rf, ok := ret.Get(1).(func(string) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} -func (_m *Remote) GetTeams(_a0 *model.User) ([]*model.Team, error) { - ret := _m.Called(_a0) - - var r0 []*model.Team - if rf, ok := ret.Get(0).(func(*model.User) []*model.Team); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]*model.Team) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(*model.User) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} -func (_m *Remote) GetMembers(_a0 *model.User, _a1 string) ([]*model.Member, error) { - ret := _m.Called(_a0, _a1) - - var r0 []*model.Member - if rf, ok := ret.Get(0).(func(*model.User, string) []*model.Member); ok { - r0 = rf(_a0, _a1) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]*model.Member) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(*model.User, string) error); ok { - r1 = rf(_a0, _a1) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} -func (_m *Remote) GetRepo(_a0 *model.User, _a1 string, _a2 string) (*model.Repo, error) { - ret := _m.Called(_a0, _a1, _a2) - - var r0 *model.Repo - if rf, ok := ret.Get(0).(func(*model.User, string, string) *model.Repo); ok { - r0 = rf(_a0, _a1, _a2) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*model.Repo) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(*model.User, string, string) error); ok { - r1 = rf(_a0, _a1, _a2) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} -func (_m *Remote) GetPerm(_a0 *model.User, _a1 string, _a2 string) (*model.Perm, error) { - ret := _m.Called(_a0, _a1, _a2) - - var r0 *model.Perm - if rf, ok := ret.Get(0).(func(*model.User, string, string) *model.Perm); ok { - r0 = rf(_a0, _a1, _a2) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*model.Perm) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(*model.User, string, string) error); ok { - r1 = rf(_a0, _a1, _a2) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} -func (_m *Remote) GetRepos(_a0 *model.User) ([]*model.Repo, error) { - ret := _m.Called(_a0) - - var r0 []*model.Repo - if rf, ok := ret.Get(0).(func(*model.User) []*model.Repo); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]*model.Repo) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(*model.User) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} -func (_m *Remote) SetHook(_a0 *model.User, _a1 *model.Repo, _a2 string) error { - ret := _m.Called(_a0, _a1, _a2) - - var r0 error - if rf, ok := ret.Get(0).(func(*model.User, *model.Repo, string) error); ok { - r0 = rf(_a0, _a1, _a2) - } else { - r0 = ret.Error(0) - } - - return r0 -} -func (_m *Remote) DelHook(_a0 *model.User, _a1 *model.Repo, _a2 string) error { - ret := _m.Called(_a0, _a1, _a2) - - var r0 error - if rf, ok := ret.Get(0).(func(*model.User, *model.Repo, string) error); ok { - r0 = rf(_a0, _a1, _a2) - } else { - r0 = ret.Error(0) - } - - return r0 -} -func (_m *Remote) GetComments(_a0 *model.User, _a1 *model.Repo, _a2 int) ([]*model.Comment, error) { - ret := _m.Called(_a0, _a1, _a2) - - var r0 []*model.Comment - if rf, ok := ret.Get(0).(func(*model.User, *model.Repo, int) []*model.Comment); ok { - r0 = rf(_a0, _a1, _a2) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]*model.Comment) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(*model.User, *model.Repo, int) error); ok { - r1 = rf(_a0, _a1, _a2) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} -func (_m *Remote) GetContents(_a0 *model.User, _a1 *model.Repo, _a2 string) ([]byte, error) { - ret := _m.Called(_a0, _a1, _a2) - - var r0 []byte - if rf, ok := ret.Get(0).(func(*model.User, *model.Repo, string) []byte); ok { - r0 = rf(_a0, _a1, _a2) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]byte) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(*model.User, *model.Repo, string) error); ok { - r1 = rf(_a0, _a1, _a2) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} -func (_m *Remote) SetStatus(_a0 *model.User, _a1 *model.Repo, _a2 int, _a3 bool) error { - ret := _m.Called(_a0, _a1, _a2, _a3) - - var r0 error - if rf, ok := ret.Get(0).(func(*model.User, *model.Repo, int, bool) error); ok { - r0 = rf(_a0, _a1, _a2, _a3) - } else { - r0 = ret.Error(0) - } - - return r0 -} -func (_m *Remote) GetHook(r *http.Request) (*model.Hook, error) { - ret := _m.Called(r) - - var r0 *model.Hook - if rf, ok := ret.Get(0).(func(*http.Request) *model.Hook); ok { - r0 = rf(r) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*model.Hook) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(*http.Request) error); ok { - r1 = rf(r) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} diff --git a/remote/remote.go b/remote/remote.go index 77a0d8b..d8787d5 100644 --- a/remote/remote.go +++ b/remote/remote.go @@ -1,118 +1,358 @@ -package remote +/* + +SPDX-Copyright: Copyright (c) Brad Rydzewski, project contributors, Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Brad Rydzewski, project contributors, Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at -//go:generate mockery -name Remote -output mock -case=underscore + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package remote import ( + "context" "net/http" + "sync" - "github.com/lgtmco/lgtm/model" - "golang.org/x/net/context" + "github.com/capitalone/checks-out/model" + "github.com/capitalone/checks-out/remote/github" + "github.com/capitalone/checks-out/set" ) type Remote interface { + // Capabilities generates the user's capabilities + Capabilities(context.Context, *model.User) (*model.Capabilities, error) + // GetUser authenticates a user with the remote system. - GetUser(http.ResponseWriter, *http.Request) (*model.User, error) + GetUser(context.Context, http.ResponseWriter, *http.Request) (*model.User, error) // GetUserToken authenticates a user with the remote system using // the remote systems OAuth token. - GetUserToken(string) (string, error) + GetUserToken(context.Context, string) (string, error) + + // RevokeAuthorization revokes the OAuth token + RevokeAuthorization(c context.Context, user *model.User) error + + // GetPerson retrieves metadata information about a user with the remote system. + GetPerson(c context.Context, user *model.User, login string) (*model.Person, error) + + // GetOrgs gets a organization list from the remote system. + GetOrgs(context.Context, *model.User) ([]*model.GitHubOrg, error) + + // ListTeams gets the repo's list of team names + ListTeams(c context.Context, user *model.User, org string) (set.Set, error) + + // GetOrgMembers gets an organization member list from the remote system. + GetOrgMembers(context.Context, *model.User, string) (set.Set, error) - // GetTeams gets a team list from the remote system. - GetTeams(*model.User) ([]*model.Team, error) + // GetTeamMembers gets an org's team members list from the remote system. + GetTeamMembers(c context.Context, user *model.User, org string, team string) (set.Set, error) - // GetMembers gets a team member list from the remote system. - GetMembers(*model.User, string) ([]*model.Member, error) + // GetCollaborators gets a collaborators list from the remote system. + GetCollaborators(context.Context, *model.User, string, string) (set.Set, error) // GetRepo gets a repository from the remote system. - GetRepo(*model.User, string, string) (*model.Repo, error) + GetRepo(context.Context, *model.User, string, string) (*model.Repo, error) // GetPerm gets a repository permission from the remote system. - GetPerm(*model.User, string, string) (*model.Perm, error) - - // GetRepo gets a repository list from the remote system. - GetRepos(*model.User) ([]*model.Repo, error) + GetPerm(context.Context, *model.User, string, string) (*model.Perm, error) // SetHook adds a webhook to the remote repository. - SetHook(*model.User, *model.Repo, string) error + SetHook(context.Context, *model.User, *model.Repo, string) error // DelHook deletes a webhook from the remote repository. - DelHook(*model.User, *model.Repo, string) error + DelHook(context.Context, *model.User, *model.Repo, string) error + + // GetAllComments gets pull request comments from the remote system. + GetAllComments(context.Context, *model.User, *model.Repo, int) ([]*model.Comment, error) + + // GetCommentsSinceHead gets pull request comments from the remote system since the head commit was committed. + GetCommentsSinceHead(context.Context, *model.User, *model.Repo, int, bool) ([]*model.Comment, error) + + // GetAllReviews gets pull request reviews from the remote system. + GetAllReviews(context.Context, *model.User, *model.Repo, int) ([]*model.Review, error) + + // GetReviewsSinceHead gets pull request reviews from the remote system since the head commit was committed. + GetReviewsSinceHead(context.Context, *model.User, *model.Repo, int, bool) ([]*model.Review, error) + + // IsHeadUIMerge tests whether the HEAD of the pull request is a user interface merge. + IsHeadUIMerge(c context.Context, u *model.User, r *model.Repo, num int) (bool, error) - // GetComments gets pull request comments from the remote system. - GetComments(*model.User, *model.Repo, int) ([]*model.Comment, error) + // CreateCompareURL creates a URL that prepares a diff of two commits + CreateURLCompare(c context.Context, u *model.User, r *model.Repo, sha1, sha2 string) string + + // GetCommits gets one page of git commits + GetCommits(context.Context, *model.User, *model.Repo, string, int, int) ([]string, int, error) // GetContents gets the file contents from the remote system. - GetContents(*model.User, *model.Repo, string) ([]byte, error) + GetContents(context.Context, *model.User, *model.Repo, string) ([]byte, error) + + // GetStatus gets the commit statuses in the remote system. + GetStatus(c context.Context, u *model.User, r *model.Repo, sha string) (model.CombinedStatus, error) + + // HasRequiredStatus tests whether the required commit statuses are passing. + HasRequiredStatus(c context.Context, u *model.User, r *model.Repo, branch, sha string) (bool, error) + + // SetStatus adds or updates the commit status in the remote system. + SetStatus(c context.Context, u *model.User, r *model.Repo, sha, context, status, desc string) error + + // CreateEmptyCommit creates an empty commit from the provided parent sha. + CreateEmptyCommit(c context.Context, u *model.User, r *model.Repo, sha, msg string) (string, error) + + // CreateReference creates a reference pointing to the provided commit + CreateReference(ctx context.Context, u *model.User, r *model.Repo, sha, name string) (string, error) - // SetStatus adds or updates the pull request status in the remote system. - SetStatus(*model.User, *model.Repo, int, bool) error + // CreatePR creates a new pull request + CreatePR(c context.Context, u *model.User, r *model.Repo, title, head, base, body string) (int, error) - // GetHook gets the hook from the http Request. - GetHook(r *http.Request) (*model.Hook, error) + // MergePR merges the named pull request from the remote system + MergePR(c context.Context, u *model.User, r *model.Repo, pullRequest model.PullRequest, approvers []*model.Person, message string, mergeMethod string) (*string, error) + + // CompareBranches compares two branches for changes + CompareBranches(c context.Context, u *model.User, repo *model.Repo, base string, head string, owner string) (model.BranchCompare, error) + + // DeleteBranch deletes a branch with the given reference + DeleteBranch(c context.Context, u *model.User, repo *model.Repo, ref string) error + + // GetMaxExistingTag finds the highest tag across all tags + ListTags(c context.Context, u *model.User, r *model.Repo) ([]model.Tag, error) + + // Tag applies a tag with the specified string to the specified sha + Tag(c context.Context, u *model.User, r *model.Repo, tag *string, sha *string) error + + // GetPullRequest returns the pull request associated with a pull request number + GetPullRequest(c context.Context, u *model.User, r *model.Repo, number int) (model.PullRequest, error) + + // GetPullRequestFiles returns the changed files associated with a pull request number + GetPullRequestFiles(c context.Context, u *model.User, r *model.Repo, number int) ([]model.CommitFile, error) + + // GetPullRequestsForCommit returns all pull requests associated with a commit SHA + GetPullRequestsForCommit(c context.Context, u *model.User, r *model.Repo, sha *string) ([]model.PullRequest, error) + + // GetIssue returns the issue associated with a issue number + GetIssue(c context.Context, u *model.User, r *model.Repo, number int) (model.Issue, error) + + // WriteComment puts a new comment into the PR + WriteComment(c context.Context, u *model.User, r *model.Repo, num int, message string) error + + ScheduleDeployment(c context.Context, u *model.User, r *model.Repo, d model.DeploymentInfo) error + + GetOrgPerm(c context.Context, user *model.User, owner string) (*model.Perm, error) + + // SetOrgHook adds a webhook to the remote organization. + SetOrgHook(context.Context, *model.User, *model.OrgDb, string) error + + // DelOrgHook deletes a webhook from the remote organization. + DelOrgHook(context.Context, *model.User, *model.OrgDb, string) error + + GetOrg(c context.Context, user *model.User, owner string) (*model.OrgDb, error) + + // GetOrgRepos gets the organization repository list from the remote system. + GetOrgRepos(c context.Context, u *model.User, owner string) ([]*model.Repo, error) + + // GetUserRepos gets the user repository list from the remote system. + GetUserRepos(context.Context, *model.User) ([]*model.Repo, error) +} + +// Capabilities generates the user's capabilities +func Capabilities(c context.Context, u *model.User) (*model.Capabilities, error) { + return FromContext(c).Capabilities(c, u) } // GetUser authenticates a user with the remote system. func GetUser(c context.Context, w http.ResponseWriter, r *http.Request) (*model.User, error) { - return FromContext(c).GetUser(w, r) + return FromContext(c).GetUser(c, w, r) } // GetUserToken authenticates a user with the remote system using // the remote systems OAuth token. func GetUserToken(c context.Context, token string) (string, error) { - return FromContext(c).GetUserToken(token) + return FromContext(c).GetUserToken(c, token) } -// GetTeams gets a team list from the remote system. -func GetTeams(c context.Context, u *model.User) ([]*model.Team, error) { - return FromContext(c).GetTeams(u) +// RevokeAuthorization revokes the OAuth token +func RevokeAuthorization(c context.Context, user *model.User) error { + return FromContext(c).RevokeAuthorization(c, user) } -// GetMembers gets a team members list from the remote system. -func GetMembers(c context.Context, u *model.User, team string) ([]*model.Member, error) { - return FromContext(c).GetMembers(u, team) +// GetPerson retrieves metadata information about a user with the remote system. +func GetPerson(c context.Context, user *model.User, login string) (*model.Person, error) { + return FromContext(c).GetPerson(c, user, login) } // GetRepo gets a repository from the remote system. func GetRepo(c context.Context, u *model.User, owner, name string) (*model.Repo, error) { - return FromContext(c).GetRepo(u, owner, name) + return FromContext(c).GetRepo(c, u, owner, name) +} + +// GetAllComments gets pull request comments from the remote system. +func GetAllComments(c context.Context, u *model.User, r *model.Repo, num int) ([]*model.Comment, error) { + return FromContext(c).GetAllComments(c, u, r, num) +} + +// IsHeadUIMerge tests whether the HEAD of the pull request is a user interface merge. +func IsHeadUIMerge(c context.Context, u *model.User, r *model.Repo, num int) (bool, error) { + return FromContext(c).IsHeadUIMerge(c, u, r, num) +} + +// GetCommentsSinceHead gets pull request comments from the remote system since the head commit was committed +func GetCommentsSinceHead(c context.Context, u *model.User, r *model.Repo, num int, noUIMerge bool) ([]*model.Comment, error) { + return FromContext(c).GetCommentsSinceHead(c, u, r, num, noUIMerge) } -// GetPerm gets a repository permission from the remote system. -func GetPerm(c context.Context, u *model.User, owner, name string) (*model.Perm, error) { - return FromContext(c).GetPerm(u, owner, name) +// GetAllReviews gets pull request reviews from the remote system. +func GetAllReviews(c context.Context, u *model.User, r *model.Repo, num int) ([]*model.Review, error) { + return FromContext(c).GetAllReviews(c, u, r, num) } -// GetRepos gets a repository list from the remote system. -func GetRepos(c context.Context, u *model.User) ([]*model.Repo, error) { - return FromContext(c).GetRepos(u) +// GetReviewsSinceHead gets pull request reviews from the remote system since the head commit was committed +func GetReviewsSinceHead(c context.Context, u *model.User, r *model.Repo, num int, noUIMerge bool) ([]*model.Review, error) { + return FromContext(c).GetReviewsSinceHead(c, u, r, num, noUIMerge) } -// GetComments gets pull request comments from the remote system. -func GetComments(c context.Context, u *model.User, r *model.Repo, num int) ([]*model.Comment, error) { - return FromContext(c).GetComments(u, r, num) +// CreateURLCompare creates a URL that prepares a diff of two commits +func CreateURLCompare(c context.Context, u *model.User, r *model.Repo, sha1, sha2 string) string { + return FromContext(c).CreateURLCompare(c, u, r, sha1, sha2) +} + +// GetCommits gets one page of git commits +func GetCommits(c context.Context, u *model.User, r *model.Repo, sha string, page, perPage int) ([]string, int, error) { + return FromContext(c).GetCommits(c, u, r, sha, page, perPage) } // GetContents gets the file contents from the remote system. func GetContents(c context.Context, u *model.User, r *model.Repo, path string) ([]byte, error) { - return FromContext(c).GetContents(u, r, path) + return FromContext(c).GetContents(c, u, r, path) } // SetHook adds a webhook to the remote repository. func SetHook(c context.Context, u *model.User, r *model.Repo, hook string) error { - return FromContext(c).SetHook(u, r, hook) + return FromContext(c).SetHook(c, u, r, hook) } // DelHook deletes a webhook from the remote repository. func DelHook(c context.Context, u *model.User, r *model.Repo, hook string) error { - return FromContext(c).DelHook(u, r, hook) + return FromContext(c).DelHook(c, u, r, hook) +} + +// GetStatus gets the commit statuses in the remote system. +func GetStatus(c context.Context, u *model.User, r *model.Repo, sha string) (model.CombinedStatus, error) { + return FromContext(c).GetStatus(c, u, r, sha) +} + +// SetStatus adds or updates the commit status in the remote system. +func SetStatus(c context.Context, u *model.User, r *model.Repo, sha, context, status, desc string) error { + return FromContext(c).SetStatus(c, u, r, sha, context, status, desc) +} + +// HasRequiredStatus tests whether the required commit statuses are passing. +func HasRequiredStatus(c context.Context, u *model.User, r *model.Repo, branch, sha string) (bool, error) { + return FromContext(c).HasRequiredStatus(c, u, r, branch, sha) +} + +// CreateEmptyCommit creates an empty commit from the provided parent sha. +func CreateEmptyCommit(c context.Context, u *model.User, r *model.Repo, sha, msg string) (string, error) { + return FromContext(c).CreateEmptyCommit(c, u, r, sha, msg) +} + +// CreateReference creates a reference pointing to the provided commit +func CreateReference(c context.Context, u *model.User, r *model.Repo, sha, name string) (string, error) { + return FromContext(c).CreateReference(c, u, r, sha, name) +} + +// CreatePR creates a new pull request +func CreatePR(c context.Context, u *model.User, r *model.Repo, title, head, base, body string) (int, error) { + return FromContext(c).CreatePR(c, u, r, title, head, base, body) +} + +func MergePR(c context.Context, u *model.User, r *model.Repo, pullRequest model.PullRequest, approvers []*model.Person, message string, mergeMethod string) (*string, error) { + return FromContext(c).MergePR(c, u, r, pullRequest, approvers, message, mergeMethod) +} + +func CompareBranches(c context.Context, u *model.User, r *model.Repo, ref1 string, ref2 string, owner string) (model.BranchCompare, error) { + return FromContext(c).CompareBranches(c, u, r, ref1, ref2, owner) +} + +func DeleteBranch(c context.Context, u *model.User, repo *model.Repo, ref string) error { + return FromContext(c).DeleteBranch(c, u, repo, ref) +} + +func ListTags(c context.Context, u *model.User, r *model.Repo) ([]model.Tag, error) { + return FromContext(c).ListTags(c, u, r) +} + +func Tag(c context.Context, u *model.User, r *model.Repo, tag *string, sha *string) error { + return FromContext(c).Tag(c, u, r, tag, sha) } -// SetStatus adds or updates the pull request status in the remote system. -func SetStatus(c context.Context, u *model.User, r *model.Repo, num int, ok bool) error { - return FromContext(c).SetStatus(u, r, num, ok) +func GetPullRequest(c context.Context, u *model.User, r *model.Repo, number int) (model.PullRequest, error) { + return FromContext(c).GetPullRequest(c, u, r, number) } -// GetHook gets the hook from the http Request. -func GetHook(c context.Context, r *http.Request) (*model.Hook, error) { - return FromContext(c).GetHook(r) +func GetPullRequestFiles(c context.Context, u *model.User, r *model.Repo, number int) ([]model.CommitFile, error) { + return FromContext(c).GetPullRequestFiles(c, u, r, number) +} + +func GetPullRequestsForCommit(c context.Context, u *model.User, r *model.Repo, sha *string) ([]model.PullRequest, error) { + return FromContext(c).GetPullRequestsForCommit(c, u, r, sha) +} + +// GetIssue returns the issue associated with a issue number +func GetIssue(c context.Context, u *model.User, r *model.Repo, number int) (model.Issue, error) { + return FromContext(c).GetIssue(c, u, r, number) +} + +func WriteComment(c context.Context, u *model.User, r *model.Repo, num int, message string) error { + return FromContext(c).WriteComment(c, u, r, num, message) +} + +func ScheduleDeployment(c context.Context, u *model.User, r *model.Repo, d model.DeploymentInfo) error { + return FromContext(c).ScheduleDeployment(c, u, r, d) +} + +func GetOrgPerm(c context.Context, user *model.User, owner string) (*model.Perm, error) { + return FromContext(c).GetOrgPerm(c, user, owner) +} + +// SetOrgHook adds a webhook to the remote organization. +func SetOrgHook(c context.Context, u *model.User, o *model.OrgDb, hook string) error { + return FromContext(c).SetOrgHook(c, u, o, hook) +} + +// DelOrgHook deletes a webhook from the remote organization. +func DelOrgHook(c context.Context, u *model.User, o *model.OrgDb, hook string) error { + return FromContext(c).DelOrgHook(c, u, o, hook) +} + +func GetOrg(c context.Context, user *model.User, owner string) (*model.OrgDb, error) { + return FromContext(c).GetOrg(c, user, owner) +} + +func GetUserRepos(c context.Context, user *model.User) ([]*model.Repo, error) { + return FromContext(c).GetUserRepos(c, user) +} + +func GetOrgRepos(c context.Context, user *model.User, owner string) ([]*model.Repo, error) { + return FromContext(c).GetOrgRepos(c, user, owner) +} + +var once sync.Once +var cachedRemote Remote + +//todo use some more dynamic way to register which git repository we are interacting with. For now, +//todo we only have github support, but might add support for other repo types at some point. +func Get() Remote { + once.Do(func() { + cachedRemote = github.Get() + }) + return cachedRemote } diff --git a/router/middleware/access/access.go b/router/middleware/access/access.go index 79ebcd5..5d560c2 100644 --- a/router/middleware/access/access.go +++ b/router/middleware/access/access.go @@ -1,13 +1,54 @@ +/* + +SPDX-Copyright: Copyright (c) Brad Rydzewski, project contributors, Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Brad Rydzewski, project contributors, Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ package access import ( - "github.com/lgtmco/lgtm/cache" - "github.com/lgtmco/lgtm/router/middleware/session" + "github.com/capitalone/checks-out/remote" + "github.com/capitalone/checks-out/router/middleware/session" log "github.com/Sirupsen/logrus" "github.com/gin-gonic/gin" ) +func OwnerAdmin(c *gin.Context) { + var ( + owner = c.Param("owner") + user = session.User(c) + ) + + perm, err := remote.GetOrgPerm(c, user, owner) + if err != nil { + log.Warnf("Cannot find org %s/%s. %s", owner, err) + c.String(404, "Not Found") + c.Abort() + return + } + if !perm.Admin { + log.Warnf("User %s does not have Admin access to org %s", user.Login, owner) + c.String(403, "Insufficient privileges") + c.Abort() + return + } + log.Debugf("User %s granted Admin access to org %s", user.Login, owner) + c.Next() +} + func RepoAdmin(c *gin.Context) { var ( owner = c.Param("owner") @@ -15,15 +56,15 @@ func RepoAdmin(c *gin.Context) { user = session.User(c) ) - perm, err := cache.GetPerm(c, user, owner, name) + perm, err := remote.GetPerm(c, user, owner, name) if err != nil { - log.Errorf("Cannot find repository %s/%s. %s", owner, name, err) + log.Warnf("Cannot find repository %s/%s. %s", owner, name, err) c.String(404, "Not Found") c.Abort() return } if !perm.Admin { - log.Errorf("User %s does not have Admin access to repository %s/%s", user.Login, owner, name) + log.Warnf("User %s does not have Admin access to repository %s/%s", user.Login, owner, name) c.String(403, "Insufficient privileges") c.Abort() return @@ -39,15 +80,15 @@ func RepoPull(c *gin.Context) { user = session.User(c) ) - perm, err := cache.GetPerm(c, user, owner, name) + perm, err := remote.GetPerm(c, user, owner, name) if err != nil { - log.Errorf("Cannot find repository %s/%s. %s", owner, name, err) + log.Warnf("Cannot find repository %s/%s. %s", owner, name, err) c.String(404, "Not Found") c.Abort() return } if !perm.Pull { - log.Errorf("User %s does not have Pull access to repository %s/%s", user.Login, owner, name) + log.Warnf("User %s does not have Pull access to repository %s/%s", user.Login, owner, name) c.String(404, "Not Found") c.Abort() return diff --git a/router/middleware/cache.go b/router/middleware/cache.go index 4876051..c251632 100644 --- a/router/middleware/cache.go +++ b/router/middleware/cache.go @@ -1,22 +1,35 @@ +/* + +SPDX-Copyright: Copyright (c) Brad Rydzewski, project contributors, Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Brad Rydzewski, project contributors, Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ package middleware import ( "time" - "github.com/lgtmco/lgtm/cache" - "github.com/gin-gonic/gin" - "github.com/ianschenck/envflag" -) - -var ( - ttl = envflag.Duration("CACHE_TTL", time.Minute*15, "") + "github.com/capitalone/checks-out/cache" + "github.com/capitalone/checks-out/envvars" ) func Cache() gin.HandlerFunc { - cache_ := cache.NewTTL(*ttl) + cache_ := cache.NewTTL(time.Duration(envvars.Env.Cache.CacheTTL)) return func(c *gin.Context) { - c.Set("cache", cache_) + cache.ToContext(c, cache_) c.Next() } } diff --git a/router/middleware/exterror.go b/router/middleware/exterror.go new file mode 100644 index 0000000..087f5b4 --- /dev/null +++ b/router/middleware/exterror.go @@ -0,0 +1,60 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package middleware + +import ( + "fmt" + + "github.com/capitalone/checks-out/exterror" + + log "github.com/Sirupsen/logrus" + "github.com/gin-gonic/gin" +) + +func ExtError() gin.HandlerFunc { + return func(c *gin.Context) { + c.Next() + errs := c.Errors + if len(errs) == 1 { + logAndRespond(c, errs[0].Err) + } else if len(errs) > 1 { + err := fmt.Errorf("Multiple errors: %s", errs.String()) + err = exterror.ExtError{Status: 500, Err: err} + logAndRespond(c, err) + } + } +} + +func emitLog(c *gin.Context, e exterror.ExtError) { + msg := e.Err.Error() + if e.Status < 500 { + log.Warn(msg) + } else { + log.Error(msg) + for k, v := range c.Request.Header { + log.Errorf("%s: %v", k, v) + } + } +} + +func logAndRespond(c *gin.Context, e error) { + err := exterror.Convert(e) + emitLog(c, err) + c.String(err.Status, err.Error()) +} diff --git a/router/middleware/logrequests.go b/router/middleware/logrequests.go new file mode 100644 index 0000000..06fb668 --- /dev/null +++ b/router/middleware/logrequests.go @@ -0,0 +1,80 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package middleware + +import ( + "strings" + "time" + + "github.com/capitalone/checks-out/envvars" + "github.com/capitalone/checks-out/exterror" + + "github.com/Sirupsen/logrus" + "github.com/gin-gonic/gin" +) + +func Ginrus(logger *logrus.Logger, timeFormat string, utc bool) gin.HandlerFunc { + uaslice := strings.Split(envvars.Env.Monitor.UaList, ":") + if len(uaslice) == 1 && len(uaslice[0]) == 0 { + uaslice = nil + } + return func(c *gin.Context) { + start := time.Now() + // some evil middlewares modify this values + path := c.Request.URL.Path + c.Next() + + userAgent := c.Request.UserAgent() + + for _, v := range uaslice { + if strings.Contains(userAgent, v) { + return + } + } + end := time.Now() + latency := end.Sub(start) + if utc { + end = end.UTC() + } + + entry := logger.WithFields(logrus.Fields{ + "status": c.Writer.Status(), + "method": c.Request.Method, + "path": path, + "ip": c.ClientIP(), + "latency": latency, + "user-agent": userAgent, + "time": end.Format(timeFormat), + }) + + if len(c.Errors) > 1 { + // Append error field if this is an erroneous request. + entry.Error(c.Errors.String()) + } else if len(c.Errors) == 1 { + err := c.Errors[0].Err + if exterror.Convert(err).Status < 500 { + entry.Warn(err.Error()) + } else { + entry.Error(err.Error()) + } + } else { + entry.Info() + } + } +} diff --git a/vendor/github.com/gin-gonic/gin/recovery.go b/router/middleware/recovery.go similarity index 59% rename from vendor/github.com/gin-gonic/gin/recovery.go rename to router/middleware/recovery.go index c502f35..0451d56 100644 --- a/vendor/github.com/gin-gonic/gin/recovery.go +++ b/router/middleware/recovery.go @@ -1,8 +1,22 @@ -// Copyright 2014 Manu Martinez-Almeida. All rights reserved. -// Use of this source code is governed by a MIT style -// license that can be found in the LICENSE file. +/* -package gin +SPDX-Copyright: Copyright (c) Brad Rydzewski, project contributors, Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Brad Rydzewski, project contributors, Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package middleware import ( "bytes" @@ -12,6 +26,9 @@ import ( "log" "net/http/httputil" "runtime" + + "github.com/Sirupsen/logrus" + "github.com/gin-gonic/gin" ) var ( @@ -19,27 +36,30 @@ var ( centerDot = []byte("·") dot = []byte(".") slash = []byte("/") + reset = string([]byte{27, 91, 48, 109}) ) // Recovery returns a middleware that recovers from any panics and writes a 500 if there was one. -func Recovery() HandlerFunc { - return RecoveryWithWriter(DefaultErrorWriter) +func Recovery(sunlight bool) gin.HandlerFunc { + return RecoveryWithWriter(gin.DefaultErrorWriter, sunlight) } -func RecoveryWithWriter(out io.Writer) HandlerFunc { - var logger *log.Logger - if out != nil { - logger = log.New(out, "\n\n\x1b[31m", log.LstdFlags) - } - return func(c *Context) { +func RecoveryWithWriter(out io.Writer, sunlight bool) gin.HandlerFunc { + logger := log.New(out, "\n\n\x1b[31m", log.LstdFlags) + return func(c *gin.Context) { defer func() { if err := recover(); err != nil { - if logger != nil { - stack := stack(3) - httprequest, _ := httputil.DumpRequest(c.Request, false) - logger.Printf("[Recovery] panic recovered:\n%s\n%s\n%s%s", string(httprequest), err, stack, reset) + stack := stack(3) + logrus.Error("Recovering from panic. Preserving stack trace.") + httprequest, _ := httputil.DumpRequest(c.Request, false) + logger.Printf("[Recovery] panic recovered:\n%s\n%s\n%s%s", string(httprequest), err, stack, reset) + if sunlight { + c.String(500, "[Recovery] panic recovered: %s\n%s", + err, + stack) + } else { + c.String(500, "Internal server error") } - c.AbortWithStatus(500) } }() c.Next() diff --git a/router/middleware/remote.go b/router/middleware/remote.go index 43667ea..b5e5c22 100644 --- a/router/middleware/remote.go +++ b/router/middleware/remote.go @@ -1,41 +1,32 @@ -package middleware +/* -import ( - "strings" +SPDX-Copyright: Copyright (c) Brad Rydzewski, project contributors, Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Brad Rydzewski, project contributors, Capital One Services, LLC - "github.com/lgtmco/lgtm/remote/github" +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - "github.com/gin-gonic/gin" - "github.com/ianschenck/envflag" -) + http://www.apache.org/licenses/LICENSE-2.0 -const ( - DefaultURL = "https://github.com" - DefaultAPI = "https://api.github.com/" - DefaultScope = "user:email,read:org,public_repo" -) +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package middleware + +import ( + "github.com/capitalone/checks-out/remote" -var ( - server = envflag.String("GITHUB_URL", DefaultURL, "") - client = envflag.String("GITHUB_CLIENT", "", "") - secret = envflag.String("GITHUB_SECRET", "", "") - scope = envflag.String("GITHUB_SCOPE", DefaultScope, "") + "github.com/gin-gonic/gin" ) func Remote() gin.HandlerFunc { - remote := &github.Github{ - API: DefaultAPI, - URL: *server, - Client: *client, - Secret: *secret, - Scopes: strings.Split(*scope, ","), - } - if remote.URL != DefaultURL { - remote.URL = strings.TrimSuffix(remote.URL, "/") - remote.API = remote.URL + "/api/v3/" - } return func(c *gin.Context) { - c.Set("remote", remote) + remote.ToContext(c, remote.Get()) c.Next() } } diff --git a/router/middleware/session/capabilities.go b/router/middleware/session/capabilities.go new file mode 100644 index 0000000..28835a7 --- /dev/null +++ b/router/middleware/session/capabilities.go @@ -0,0 +1,53 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package session + +import ( + "github.com/gin-gonic/gin" + "github.com/capitalone/checks-out/exterror" + "github.com/capitalone/checks-out/model" + "github.com/capitalone/checks-out/remote" +) + +func Capability(c *gin.Context) *model.Capabilities { + v, ok := c.Get("capabilities") + if !ok { + return nil + } + u, ok := v.(*model.Capabilities) + if !ok { + return nil + } + return u +} + +func SetCapability(c *gin.Context) { + user := User(c) + if user != nil { + caps, err := remote.Capabilities(c, user) + if err != nil { + err2 := exterror.Convert(err) + c.String(err2.Status, err2.Err.Error()) + c.Abort() + return + } + c.Set("capabilities", caps) + } + c.Next() +} diff --git a/router/middleware/session/session.go b/router/middleware/session/user.go similarity index 50% rename from router/middleware/session/session.go rename to router/middleware/session/user.go index ced7f78..4dc3fd6 100644 --- a/router/middleware/session/session.go +++ b/router/middleware/session/user.go @@ -1,11 +1,29 @@ +/* + +SPDX-Copyright: Copyright (c) Brad Rydzewski, project contributors, Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Brad Rydzewski, project contributors, Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ package session import ( "net/http" - "github.com/lgtmco/lgtm/model" - "github.com/lgtmco/lgtm/shared/token" - "github.com/lgtmco/lgtm/store" + "github.com/capitalone/checks-out/model" + "github.com/capitalone/checks-out/shared/token" + "github.com/capitalone/checks-out/store" "github.com/gin-gonic/gin" ) @@ -26,8 +44,9 @@ func UserMust(c *gin.Context) { user := User(c) switch { case user == nil: - c.AbortWithStatus(http.StatusUnauthorized) - // c.HTML(http.StatusUnauthorized, "401.html", gin.H{}) + c.String(http.StatusUnauthorized, + "You must be logged in and authorized to use this endpoint") + c.Abort() default: c.Next() } @@ -57,7 +76,9 @@ func SetUser(c *gin.Context) { // if csrf token validation fails, exit immediately // with a not authorized error. if err != nil { - c.AbortWithStatus(http.StatusUnauthorized) + c.String(http.StatusUnauthorized, + "You must be logged in and authorized to use this endpoint") + c.Abort() return } } diff --git a/router/middleware/store.go b/router/middleware/store.go index fcd504d..4176339 100644 --- a/router/middleware/store.go +++ b/router/middleware/store.go @@ -1,21 +1,40 @@ +/* + +SPDX-Copyright: Copyright (c) Brad Rydzewski, project contributors, Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Brad Rydzewski, project contributors, Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ package middleware import ( - "github.com/lgtmco/lgtm/store/datastore" + "fmt" "github.com/gin-gonic/gin" - "github.com/ianschenck/envflag" -) - -var ( - driver = envflag.String("DATABASE_DRIVER", "sqlite3", "") - datasource = envflag.String("DATABASE_DATASOURCE", "lgtm.sqlite", "") + "github.com/capitalone/checks-out/store" + "github.com/capitalone/checks-out/store/datastore" ) func Store() gin.HandlerFunc { - store := datastore.New(*driver, *datasource) return func(c *gin.Context) { - c.Set("store", store) + store.ToContext(c, datastore.Get()) c.Next() } } + +func BaseURL(c *gin.Context) { + url := c.Request.URL + c.Set("BASE_URL", fmt.Sprintf("%s://%s", url.Scheme, url.Host)) + c.Next() +} diff --git a/router/middleware/version.go b/router/middleware/version.go index 2b0025a..bafba80 100644 --- a/router/middleware/version.go +++ b/router/middleware/version.go @@ -1,14 +1,32 @@ +/* + +SPDX-Copyright: Copyright (c) Brad Rydzewski, project contributors, Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Brad Rydzewski, project contributors, Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ package middleware import ( - "github.com/lgtmco/lgtm/version" + "github.com/capitalone/checks-out/version" "github.com/gin-gonic/gin" ) -// Version is a middleware function that appends the LGTM version information +// Version is a middleware function that appends version information // to the HTTP response. This is intended for debugging and troubleshooting. func Version(c *gin.Context) { - c.Header("X-LGTM-VERSION", version.Version) + c.Header("X-CHECKS-OUT-VERSION", version.Version) c.Next() } diff --git a/router/router.go b/router/router.go index 9813ee7..3457fdc 100644 --- a/router/router.go +++ b/router/router.go @@ -1,21 +1,45 @@ +/* + +SPDX-Copyright: Copyright (c) Brad Rydzewski, project contributors, Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Brad Rydzewski, project contributors, Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ package router import ( "net/http" + "time" + "github.com/Sirupsen/logrus" + "github.com/capitalone/checks-out/api" + "github.com/capitalone/checks-out/envvars" + "github.com/capitalone/checks-out/router/middleware" + "github.com/capitalone/checks-out/router/middleware/access" + "github.com/capitalone/checks-out/router/middleware/header" + "github.com/capitalone/checks-out/router/middleware/session" + "github.com/capitalone/checks-out/web" + "github.com/capitalone/checks-out/web/static" + "github.com/capitalone/checks-out/web/template" "github.com/gin-gonic/gin" - "github.com/lgtmco/lgtm/api" - "github.com/lgtmco/lgtm/router/middleware/access" - "github.com/lgtmco/lgtm/router/middleware/header" - "github.com/lgtmco/lgtm/router/middleware/session" - "github.com/lgtmco/lgtm/web" - "github.com/lgtmco/lgtm/web/static" - "github.com/lgtmco/lgtm/web/template" ) -func Load(middleware ...gin.HandlerFunc) http.Handler { +// Load creates a new HTTP handler +func Load() http.Handler { e := gin.New() - e.Use(gin.Recovery()) + sunlight := envvars.Env.Monitor.Sunlight + e.Use(middleware.Recovery(sunlight)) e.SetHTMLTemplate(template.Template()) e.StaticFS("/static", static.FileSystem()) @@ -23,19 +47,63 @@ func Load(middleware ...gin.HandlerFunc) http.Handler { e.Use(header.NoCache) e.Use(header.Options) e.Use(header.Secure) - e.Use(middleware...) + e.Use(middleware.Ginrus(logrus.StandardLogger(), time.RFC3339, true)) + e.Use(middleware.Store()) + e.Use(middleware.Remote()) + e.Use(middleware.Cache()) + e.Use(middleware.ExtError()) + if sunlight { + e.Use(middleware.Version) + } e.Use(session.SetUser) + e.Use(session.SetCapability) + e.Use(middleware.BaseURL) + + adminGroup := e.Group("/admin", session.UserMust, api.CheckAdmin) + adminGroup.GET("config/subtree/*path", api.GetAllConfigurationSubtree) + adminGroup.POST("slack/:hostname", api.RegisterSlackUrl) + adminGroup.GET("slack/:hostname", api.GetSlackUrl) + adminGroup.DELETE("slack/:hostname", api.DeleteSlackUrl) + adminGroup.PUT("slack/:hostname", api.UpdateSlackUrl) + adminGroup.DELETE("repos/:owner", api.AdminDeleteOrg) + adminGroup.DELETE("repos/:owner/:repo", api.AdminDeleteRepo) + adminGroup.GET("user/:user/repos", api.GetReposForUserLogin) + adminGroup.GET("stats", api.AdminStats) e.GET("/api/user", session.UserMust, api.GetUser) - e.GET("/api/user/teams", session.UserMust, api.GetTeams) - e.GET("/api/user/repos", session.UserMust, api.GetRepos) + e.DELETE("/api/user", session.UserMust, api.DeleteUser) + e.GET("/api/user/orgs", session.UserMust, api.GetOrgs) + e.GET("/api/user/repos", session.UserMust, api.GetUserRepos) + e.GET("/api/user/repos/:org", session.UserMust, api.GetOrgRepos) + e.GET("/api/user/orgs/enabled", session.UserMust, api.GetEnabledOrgs) + e.GET("/api/repos/:owner/:repo", session.UserMust, access.RepoPull, api.GetRepo) e.POST("/api/repos/:owner/:repo", session.UserMust, access.RepoAdmin, api.PostRepo) e.DELETE("/api/repos/:owner/:repo", session.UserMust, access.RepoAdmin, api.DeleteRepo) + e.GET("/api/repos/:owner/:repo/config", session.UserMust, access.RepoPull, api.GetConfig) e.GET("/api/repos/:owner/:repo/maintainers", session.UserMust, access.RepoPull, api.GetMaintainer) - e.GET("/api/repos/:owner/:repo/maintainers/:org", session.UserMust, access.RepoPull, api.GetMaintainerOrg) + e.GET("/api/repos/:owner/:repo/validate", session.UserMust, access.RepoPull, api.Validate) + e.GET("/api/repos/:owner/:repo/lgtm-to-checks-out", session.UserMust, access.RepoPull, api.Convert) + + e.GET("/api/teams/:owner", session.UserMust, api.GetTeams) + + e.GET("/api/pr/:owner/:repo/:id/status", session.UserMust, access.RepoPull, web.ApprovalStatus) + + e.POST("/api/repos/:owner", session.UserMust, access.OwnerAdmin, api.PostOrg) + e.DELETE("/api/repos/:owner", session.UserMust, access.OwnerAdmin, api.DeleteOrg) + + e.POST("/api/user/slack/:hostname", session.UserMust, api.UserRegisterSlackUrl) + e.GET("/api/user/slack/:hostname", session.UserMust, api.UserGetSlackUrl) + e.DELETE("/api/user/slack/:hostname", session.UserMust, api.UserDeleteSlackUrl) + e.PUT("/api/user/slack/:hostname", session.UserMust, api.UserUpdateSlackUrl) + + if sunlight { + e.GET("/api/repos", api.GetAllRepos) + e.GET("/version", web.Version) + } + e.GET("/api/count", api.GetAllReposCount) - e.POST("/hook", web.Hook) + e.POST("/hook", web.ProcessHook) e.GET("/login", web.Login) e.POST("/login", web.LoginToken) e.GET("/logout", web.Logout) diff --git a/set/lowerset.go b/set/lowerset.go new file mode 100644 index 0000000..5895967 --- /dev/null +++ b/set/lowerset.go @@ -0,0 +1,176 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package set + +import ( + "encoding/json" + "sort" + "github.com/capitalone/checks-out/strings/lowercase" + "strings" +) + +// LowerSet is a collection of unique lowercase strings +type LowerSet map[lowercase.String]bool + +// Empty creates an empty lowerset +func EmptyLower() LowerSet { + return make(map[lowercase.String]bool) +} + +// AddAll inserts all elements from a lowerset into the lowerset +func (s LowerSet) AddAll(input LowerSet) { + for k := range input { + s.Add(k) + } +} + +// AddAll inserts all elements from a set into the lowerset +func (s LowerSet) AddAllSet(input Set) { + for k := range input { + s.Add(lowercase.Create(k)) + } +} + +// Add inserts an element into the lowerset +func (s LowerSet) Add(key lowercase.String) { + s[key] = true +} + +func (s LowerSet) AddString(key string) { + s[lowercase.Create(key)] = true +} + +// Remove deletes an element from the lowerset +func (s LowerSet) Remove(key lowercase.String) { + delete(s, key) +} + +func (s LowerSet) RemoveString(key string) { + delete(s, lowercase.Create(key)) +} + +// Contains tests whether an element is a member of the lowerset +func (s LowerSet) Contains(key lowercase.String) bool { + _, ok := s[key] + return ok +} + +// ContainsString tests whether an element is a member of the lowerset +func (s LowerSet) ContainsString(key string) bool { + _, ok := s[lowercase.Create(key)] + return ok +} + +// Intersection returns all elements in common between this lowerset and the lowerset passed in +func (s LowerSet) Intersection(other LowerSet) LowerSet { + var small, large LowerSet + if len(s) < len(other) { + small, large = s, other + } else { + small, large = other, s + } + res := EmptyLower() + for k := range small { + if large.Contains(k) { + res.Add(k) + } + } + return res +} + +// Difference returns all elements that are in this lowerset but not in the passed-in lowerset +func (s LowerSet) Difference(other LowerSet) LowerSet { + res := EmptyLower() + for k := range s { + if !other.Contains(k) { + res.Add(k) + } + } + return res +} + +func (s LowerSet) Print(sep string) string { + keys := s.KeysSorted(func(s1,s2 lowercase.String) bool { + return s1.String() < s2.String() + }) + return strings.Join(keys.ToStringSlice(), sep) +} + +// NewLower creates a new lowerset with the provided values +func NewLower(keys ...lowercase.String) LowerSet { + set := EmptyLower() + for _, k := range keys { + set.Add(k) + } + return set +} + +// NewLowerFromString creates a new lowerset with the provided values +func NewLowerFromString(keys ...string) LowerSet { + set := EmptyLower() + for _, k := range keys { + set.Add(lowercase.Create(k)) + } + return set +} + +func (s LowerSet) Keys() lowercase.Slice { + l := len(s) + if l == 0 { + return nil + } + lst := make(lowercase.Slice, 0, l) + for k := range s { + lst = append(lst, k) + } + return lst +} + +func (s LowerSet) KeysSorted(f func (s1, s2 lowercase.String) bool) lowercase.Slice { + lst := s.Keys() + sort.Slice(lst, func(i, j int) bool { + return f(lst[i], lst[j]) + }) + return lst +} + +func (s LowerSet) MarshalJSON() ([]byte, error) { + keys := make([]string, 0, len(s)) + for k := range s { + keys = append(keys, k.String()) + } + return json.Marshal(keys) +} + +func (s *LowerSet) UnmarshalJSON(data []byte) error { + var keys []string + err := json.Unmarshal(data, &keys) + if err != nil { + return err + } + *s = EmptyLower() + for _, k := range keys { + s.AddString(k) + } + return nil +} + +func (s LowerSet) ToSet() Set { + return New(s.Keys().ToStringSlice()...) +} \ No newline at end of file diff --git a/set/lowerset_test.go b/set/lowerset_test.go new file mode 100644 index 0000000..448ff6e --- /dev/null +++ b/set/lowerset_test.go @@ -0,0 +1,130 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package set + +import ( + "encoding/json" + "reflect" + "testing" + "github.com/capitalone/checks-out/strings/lowercase" +) + +func TestAddAllLower(t *testing.T) { + s := EmptyLower() + s.AddAll(NewLowerFromString("A", "B", "C")) + if !s.Contains(lowercase.Create("a")) { + t.Error("Set is missing value 'a'") + } + if !s.Contains(lowercase.Create("b")) { + t.Error("Set is missing value 'b'") + } + // We pass in upper-case, it matches lowercase + if !s.ContainsString("C") { + t.Error("Set is missing value 'C'") + } + if !s.ContainsString("c") { + t.Error("Set is missing value 'c'") + } +} + +func TestAddContainsLower(t *testing.T) { + s := EmptyLower() + s.AddString("Foo") + s.AddString("Bar") + s.AddString("foo") + if !s.ContainsString("foo") { + t.Error("Set is missing value 'foo'") + } + if !s.ContainsString("bar") { + t.Error("Set is missing value 'bar'") + } + if s.ContainsString("baz") { + t.Error("Set is not missing value 'baz'") + } +} + +func TestNewLower(t *testing.T) { + s := NewLowerFromString("Foo", "Bar", "foo") + if !s.ContainsString("foo") { + t.Error("Set is missing value 'foo'") + } + if !s.ContainsString("bar") { + t.Error("Set is missing value 'bar'") + } + if s.ContainsString("baz") { + t.Error("Set is not missing value 'baz'") + } +} + +func TestKeysLower(t *testing.T) { + s := NewLowerFromString("Foo", "Bar", "baz") + keys := s.Keys() + if len(keys) != 3 { + t.Error("Set keys list has incorrect size", keys) + } +} + +func TestPrintLower(t *testing.T) { + x := NewLower() + y := NewLowerFromString("a", "b") + res1 := x.Print(",") + res2 := y.Print(",") + if res1 != "" { + t.Error("Set print incorrect", res1) + } + if res2 != "a,b" { + t.Error("Set print incorrect", res2) + } +} + +func TestIntersectionLower(t *testing.T) { + a := NewLowerFromString("a", "B", "c") + b := NewLowerFromString("b", "C", "y", "z") + obs := a.Intersection(b) + exp := NewLowerFromString("b", "c") + if !reflect.DeepEqual(exp, obs) { + t.Error("Set difference incorrect", obs) + } + obs = b.Intersection(a) + if !reflect.DeepEqual(exp, obs) { + t.Error("Set difference incorrect", obs) + } +} + +func TestJSONLower(t *testing.T) { + var out Set + in := NewLowerFromString("fOO", "Bar") + text, err := json.Marshal(in) + if err != nil { + t.Fatal("Error marshaling set", err) + } + err = json.Unmarshal(text, &out) + if err != nil { + t.Fatal("Error unmarshaling set", err) + } + if !out.Contains("foo") { + t.Error("Set is missing value 'foo'") + } + if !out.Contains("bar") { + t.Error("Set is missing value 'bar'") + } + if out.Contains("baz") { + t.Error("Set is not missing value 'baz'") + } +} diff --git a/set/set.go b/set/set.go new file mode 100644 index 0000000..17ac915 --- /dev/null +++ b/set/set.go @@ -0,0 +1,145 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package set + +import ( + "encoding/json" + "sort" +) + +// Set is a collection of unique strings +type Set map[string]bool + +// Empty creates an empty set +func Empty() Set { + return make(map[string]bool) +} + +// AddAll inserts all element into the set +func (s Set) AddAll(input Set) { + for k := range input { + s.Add(k) + } +} + +// Add inserts an element into the set +func (s Set) Add(key string) { + s[key] = true +} + +// Remove deletes an element from the set +func (s Set) Remove(key string) { + delete(s, key) +} + +// Contains tests whether an element is a member of the set +func (s Set) Contains(key string) bool { + _, ok := s[key] + return ok +} + +func (s Set) Intersection(other Set) Set { + var small, large Set + if len(s) < len(other) { + small, large = s, other + } else { + small, large = other, s + } + res := Empty() + for k := range small { + if large.Contains(k) { + res.Add(k) + } + } + return res +} + +func (s Set) Difference(other Set) Set { + res := Empty() + for k := range s { + if !other.Contains(k) { + res.Add(k) + } + } + return res +} + +func (s Set) Print(sep string) string { + res := "" + keys := s.KeysSorted(func(s1,s2 string) bool { + return s1 < s2 + }) + for i, k := range keys { + res += k + if i < len(keys)-1 { + res += sep + } + } + return res +} + +// New creates a new set with the provided values +func New(keys ...string) Set { + set := Empty() + for _, k := range keys { + set.Add(k) + } + return set +} + +func (s Set) Keys() []string { + l := len(s) + if l == 0 { + return nil + } + lst := make([]string, 0, l) + for k := range s { + lst = append(lst, k) + } + return lst +} + +func (s Set) KeysSorted(f func (s1, s2 string) bool) []string { + lst := s.Keys() + sort.Slice(lst, func(i, j int) bool { + return f(lst[i], lst[j]) + }) + return lst +} + +func (s Set) MarshalJSON() ([]byte, error) { + keys := make([]string, 0, len(s)) + for k := range s { + keys = append(keys, k) + } + return json.Marshal(keys) +} + +func (s *Set) UnmarshalJSON(data []byte) error { + var keys []string + err := json.Unmarshal(data, &keys) + if err != nil { + return err + } + *s = Empty() + for _, k := range keys { + s.Add(k) + } + return nil +} diff --git a/set/set_test.go b/set/set_test.go new file mode 100644 index 0000000..539cba3 --- /dev/null +++ b/set/set_test.go @@ -0,0 +1,111 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package set + +import ( + "encoding/json" + "reflect" + "testing" +) + +func TestAddContains(t *testing.T) { + s := Empty() + s.Add("foo") + s.Add("bar") + s.Add("foo") + if !s.Contains("foo") { + t.Error("Set is missing value 'foo'") + } + if !s.Contains("bar") { + t.Error("Set is missing value 'bar'") + } + if s.Contains("baz") { + t.Error("Set is not missing value 'baz'") + } +} + +func TestNew(t *testing.T) { + s := New("foo", "bar", "foo") + if !s.Contains("foo") { + t.Error("Set is missing value 'foo'") + } + if !s.Contains("bar") { + t.Error("Set is missing value 'bar'") + } + if s.Contains("baz") { + t.Error("Set is not missing value 'baz'") + } +} + +func TestKeys(t *testing.T) { + s := New("foo", "bar", "baz") + keys := s.Keys() + if len(keys) != 3 { + t.Error("Set keys list has incorrect size", keys) + } +} + +func TestPrint(t *testing.T) { + x := New() + y := New("a", "b") + res1 := x.Print(",") + res2 := y.Print(",") + if res1 != "" { + t.Error("Set print incorrect", res1) + } + if res2 != "a,b" { + t.Error("Set print incorrect", res2) + } +} + +func TestIntersection(t *testing.T) { + a := New("a", "b", "c") + b := New("b", "c", "y", "z") + obs := a.Intersection(b) + exp := New("b", "c") + if !reflect.DeepEqual(exp, obs) { + t.Error("Set difference incorrect", obs) + } + obs = b.Intersection(a) + if !reflect.DeepEqual(exp, obs) { + t.Error("Set difference incorrect", obs) + } +} + +func TestJSON(t *testing.T) { + var out Set + in := New("foo", "bar") + text, err := json.Marshal(in) + if err != nil { + t.Fatal("Error marshaling set", err) + } + err = json.Unmarshal(text, &out) + if err != nil { + t.Fatal("Error unmarshaling set", err) + } + if !out.Contains("foo") { + t.Error("Set is missing value 'foo'") + } + if !out.Contains("bar") { + t.Error("Set is missing value 'bar'") + } + if out.Contains("baz") { + t.Error("Set is not missing value 'baz'") + } +} diff --git a/shared/token/token.go b/shared/token/token.go index 78ac04f..be44d53 100644 --- a/shared/token/token.go +++ b/shared/token/token.go @@ -1,3 +1,21 @@ +/* + +SPDX-Copyright: Copyright (c) Brad Rydzewski, project contributors, Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Brad Rydzewski, project contributors, Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ package token import ( @@ -91,10 +109,11 @@ func (t *Token) Sign(secret string) (string, error) { // with an expiration date. func (t *Token) SignExpires(secret string, exp int64) (string, error) { token := jwt.New(jwt.SigningMethodHS256) - token.Claims["type"] = t.Kind - token.Claims["text"] = t.Text + claims := token.Claims.(jwt.MapClaims) + claims["type"] = t.Kind + claims["text"] = t.Text if exp > 0 { - token.Claims["exp"] = float64(exp) + claims["exp"] = float64(exp) } return token.SignedString([]byte(secret)) } @@ -108,7 +127,8 @@ func keyFunc(token *Token, fn SecretFunc) jwt.Keyfunc { // extract the token kind and cast to // the expected type. - kindv, ok := t.Claims["type"] + claims := t.Claims.(jwt.MapClaims) + kindv, ok := claims["type"] if !ok { return nil, jwt.ValidationError{} } @@ -116,7 +136,7 @@ func keyFunc(token *Token, fn SecretFunc) jwt.Keyfunc { // extract the token value and cast to // exepected type. - textv, ok := t.Claims["text"] + textv, ok := claims["text"] if !ok { return nil, jwt.ValidationError{} } diff --git a/snapshot/errors.go b/snapshot/errors.go new file mode 100644 index 0000000..d314fc8 --- /dev/null +++ b/snapshot/errors.go @@ -0,0 +1,29 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package snapshot + +import ( + "net/http" + + "github.com/capitalone/checks-out/exterror" +) + +func badRequest(err error) error { + return exterror.Create(http.StatusBadRequest, err) +} diff --git a/snapshot/maintainer.go b/snapshot/maintainer.go new file mode 100644 index 0000000..e0e889e --- /dev/null +++ b/snapshot/maintainer.go @@ -0,0 +1,57 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package snapshot + +import ( + "context" + "fmt" + + "github.com/capitalone/checks-out/model" + "github.com/capitalone/checks-out/set" +) + +const SelfRepo = "repo-self" +const SelfTeam = "repo-self" + +var ReservedOrgs = set.New("all", "us", "them", "universe") + +// ParseMaintainer parses a projects MAINTAINERS file and returns +// the list of maintainers. +func ParseMaintainer(c context.Context, user *model.User, data []byte, r *model.Repo, typ string) (*model.Maintainer, error) { + switch typ { + case "text": + return parseMaintainerText(c, user, data, r) + case "hjson": + return parseMaintainerHJSON(data) + case "toml": + return parseMaintainerToml(data) + case "legacy": + //try to do toml, then do text -- only for .lgtm files + m, err := parseMaintainerToml(data) + if err != nil { + m, err = parseMaintainerText(c, user, data, r) + } + return m, err + + default: + err := fmt.Errorf("%s is not one of the permitted MAINTAINER types %s", + typ, model.MaintTypes.Keys()) + return nil, badRequest(err) + } +} diff --git a/snapshot/maintainer_hjson.go b/snapshot/maintainer_hjson.go new file mode 100644 index 0000000..fd9da2f --- /dev/null +++ b/snapshot/maintainer_hjson.go @@ -0,0 +1,63 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package snapshot + +import ( + "errors" + "fmt" + + "github.com/capitalone/checks-out/hjson" + "github.com/capitalone/checks-out/model" + + "github.com/mspiegel/go-multierror" +) + +func parseMaintainerHJSON(data []byte) (*model.Maintainer, error) { + m := new(model.Maintainer) + err := hjson.Unmarshal(data, m) + if err != nil { + return nil, badRequest(err) + } + if m.RawPeople == nil { + err = errors.New("Invalid HJSON format. Missing people section.") + return nil, badRequest(err) + } + err = validateMaintainerHJSON(m) + return m, err +} + +func validateMaintainerHJSON(m *model.Maintainer) error { + var errs error + for k, v := range m.RawPeople { + if len(v.Login) == 0 { + // Populate the login field if it is missing. + v.Login = k + } else if v.Login != k { + err := fmt.Errorf("Mismatched key %s and login field %s", k, v.Login) + errs = multierror.Append(errs, badRequest(err)) + } + } + for k := range m.RawOrg { + if ReservedOrgs.Contains(k) { + err := fmt.Errorf("The organization name %s is a reserved name", k) + errs = multierror.Append(errs, badRequest(err)) + } + } + return errs +} diff --git a/snapshot/maintainer_test.go b/snapshot/maintainer_test.go new file mode 100644 index 0000000..059ecb3 --- /dev/null +++ b/snapshot/maintainer_test.go @@ -0,0 +1,262 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package snapshot + +import ( + "testing" + + "github.com/capitalone/checks-out/cache" + "github.com/capitalone/checks-out/model" + "github.com/capitalone/checks-out/set" + + "github.com/gin-gonic/gin" +) + +func parseMaintainerInternal(m *model.Maintainer, t *testing.T) { + if len(m.RawPeople) != len(people) { + t.Fatalf("Wanted %d maintainers, got %d", len(people), len(m.RawPeople)) + } + for _, want := range people { + got, ok := m.RawPeople[want.Login] + if !ok { + t.Errorf("Wanted user %s in file", want.Login) + } else if want.Login != got.Login { + t.Errorf("Wanted login %s, got %s", want.Login, got.Login) + } + } +} + +func TestParseMaintainerText(t *testing.T) { + var files = []string{maintainerFile, maintainerFileEmail, maintainerFileSimple, maintainerFileMixed} + for _, file := range files { + parsed, err := ParseMaintainer(nil, nil, []byte(file), nil, "text") + if err != nil { + t.Fatal(err) + } + parseMaintainerInternal(parsed, t) + } +} + +func TestParseMaintainerHJSON(t *testing.T) { + var files = []string{maintainerFileHJSON} + for _, file := range files { + parsed, err := ParseMaintainer(nil, nil, []byte(file), nil, "hjson") + if err != nil { + t.Fatal(err) + } + parseMaintainerInternal(parsed, t) + } +} + +var people = []model.Person{ + {Login: "bradrydzewski"}, + {Login: "mattnorris"}, +} + +var maintainerFile = ` +Brad Rydzewski (@bradrydzewski) +Matt Norris (@mattnorris) +` + +var maintainerFileEmail = ` +bradrydzewski +mattnorris +` + +// simple format with usernames only. includes +// spaces and comments. +var maintainerFileSimple = ` +bradrydzewski +mattnorris` + +// simple format with usernames only. includes +// spaces and comments. +var maintainerFileMixed = ` +bradrydzewski +Matt Norris (@mattnorris) +` + +// advanced hjson format for the maintainers file. +var maintainerFileHJSON = ` +{ + people: + { + bradrydzewski: + { + name: Brad Rydzewski + email: brad.rydzewski@mail.com + login: bradrydzewski + } + mattnorris: + { + name: Matt Norris + email: matt.norris@mail.com + login: mattnorris + } + } + org: + { + core: + { + people: + [ + mattnorris + bradrydzewski + ] + } + } +} +` + +var teamOrgFile = ` +github-org illuminati +github-team a-team org1 +github-team b-team +` + +var teamRepoSelf = ` +github-team repo-self +` + +var tomlFile = ` +[people] +[people.person1] +[people.person2] +login = "person2" +[people.person3] +login = "person3" +[people.person4] +login = "person4" +[people.person5] +login = "person5" +[people.person6] +login = "person6" + +[org] +[org.org1] +people = ["person2","person4","person6"] + +[org.org2] +people = ["person1","person3","person5"] +` + +func TestParseToml(t *testing.T) { + parsed, err := parseMaintainerToml([]byte(tomlFile)) + if err != nil { + t.Fatal(err) + } + validateToml(parsed, t) +} + +func validateToml(parsed *model.Maintainer, t *testing.T) { + if len(parsed.RawPeople) != 6 { + t.Errorf("Expected 6 people, got %d", len(parsed.RawPeople)) + } + p1, ok := parsed.RawPeople["person1"] + if !ok { + t.Fatal("Missing person1") + } + if p1.Login != "person1" { + t.Errorf("Person1 login field is incorrect: %s", p1.Login) + } + if len(parsed.RawOrg) != 2 { + t.Errorf("Expected 2 orgs, got %d", len(parsed.RawOrg)) + } + o1, ok := parsed.RawOrg["org1"] + if !ok { + t.Fatal("Missing org1") + } + o1Peeps := set.New("person2", "person4", "person6") + if len(o1.People.Intersection(o1Peeps)) != 3 { + t.Errorf("Expected %v in org1, had %v", o1Peeps, o1.People) + } + + o2, ok := parsed.RawOrg["org2"] + if !ok { + t.Fatal("Missing org2") + } + o2Peeps := set.New("person1", "person3", "person5") + if len(o2.People.Intersection(o2Peeps)) != 3 { + t.Errorf("Expected %v in org2, had %v", o2Peeps, o2.People) + } +} + +func TestParseLegacy(t *testing.T) { + m, err := ParseMaintainer(nil, nil, []byte(tomlFile), nil, "legacy") + if err != nil { + t.Fatal(err) + } + validateToml(m, t) + m2, err := ParseMaintainer(nil, nil, []byte(maintainerFile), nil, "legacy") + if err != nil { + t.Fatal(err) + } + parseMaintainerInternal(m2, t) +} + +func TestParseTeamAndOrg(t *testing.T) { + parsed, err := parseMaintainerText(nil, nil, []byte(teamOrgFile), nil) + if err != nil { + t.Fatal(err) + } + org := parsed.RawOrg["illuminati"] + if org == nil { + t.Error("The illuminati is missing") + } + if !org.People.Contains("github-org illuminati") { + t.Error("The illuminati is missing its members") + } + team := parsed.RawOrg["org1-a-team"] + if team == nil { + t.Error("The a-team is missing") + } + if !team.People.Contains("github-team a-team org1") { + t.Error("The a-team is missing its members") + } + team = parsed.RawOrg["b-team"] + if team == nil { + t.Error("The b-team is missing") + } + if !team.People.Contains("github-team b-team") { + t.Error("The b-team is missing its members") + } +} + +func TestParseTeamRepoSelf(t *testing.T) { + c := &gin.Context{} + csh := cache.Default() + csh.Set("teams:foo", set.New()) + cache.ToContext(c, csh) + repo := &model.Repo{ + Owner: "foo", + Name: "bar", + Org: true, + } + parsed, err := parseMaintainerText(c, nil, []byte(teamRepoSelf), repo) + if err != nil { + t.Fatal(err) + } + org := parsed.RawOrg["_"] + if org == nil { + t.Error("The repo-self org is missing") + } + if !org.People.Contains("github-org repo-self") { + t.Error("The repo-self org is missing its members") + } +} diff --git a/snapshot/maintainer_text.go b/snapshot/maintainer_text.go new file mode 100644 index 0000000..d48428f --- /dev/null +++ b/snapshot/maintainer_text.go @@ -0,0 +1,233 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package snapshot + +import ( + "bufio" + "bytes" + "context" + "fmt" + "regexp" + "strings" + + "github.com/capitalone/checks-out/model" + "github.com/capitalone/checks-out/remote" +) + +func parseMaintainerText(c context.Context, user *model.User, data []byte, r *model.Repo) (*model.Maintainer, error) { + m := new(model.Maintainer) + m.RawPeople = map[string]*model.Person{} + m.RawOrg = map[string]*model.OrgSerde{} + + buf := bytes.NewBuffer(data) + reader := bufio.NewReader(buf) + for { + line, _, err := reader.ReadLine() + if err != nil { + break + } + + item := parseln(string(line)) + if len(item) == 0 { + continue + } + + if name := ParseOrgName(item); len(name) > 0 { + if name == SelfRepo { + name = r.Owner + } + err := addOrg(m, item, name) + if err != nil { + return nil, err + } + continue + } + if collab := ParseCollabName(item); len(collab) > 0 { + if collab == SelfRepo { + collab = r.Slug + } + collab += "-collaborators" + err := addOrg(m, item, collab) + if err != nil { + return nil, err + } + continue + } + if team, org0 := ParseTeamName(item); len(team) > 0 { + if team == SelfTeam { + org := org0 + if org0 == "" { + if !r.Org { + err := fmt.Errorf("Cannot expand GitHub teams for user repository %s/%s", + r.Owner, r.Name) + return nil, badRequest(err) + } + err := addOrg(m, "github-org repo-self", "_") + if err != nil { + return nil, err + } + org = r.Owner + } + teams, err := remote.ListTeams(c, user, org) + if err != nil { + return nil, err + } + for t := range teams { + i := fmt.Sprintf("github-team %s", t) + name := t + if org0 != "" { + i = fmt.Sprintf("github-team %s %s", t, org) + name = org + "-" + t + } + err := addOrg(m, i, name) + if err != nil { + return nil, err + } + } + } else { + name := team + if org0 != "" { + name = org0 + "-" + team + } + err := addOrg(m, item, name) + if err != nil { + return nil, err + } + } + continue + } + + person := parseLogin(item) + if person == nil { + person = parseLoginMeta(item) + } + if person == nil { + person = parseLoginEmail(item) + } + if person == nil { + err := fmt.Errorf("Unable to parse line: %s", item) + return nil, badRequest(err) + } + m.RawPeople[person.Login] = person + } + return m, nil +} + +func addOrg(m *model.Maintainer, item string, name string) error { + name = strings.ToLower(name) + o := &model.OrgSerde{People: map[string]bool{item: true}} + if _, ok := m.RawOrg[name]; ok { + err := fmt.Errorf("Duplicate organization detected %s", name) + return badRequest(err) + } + m.RawOrg[name] = o + return nil +} + +func parseln(s string) string { + if s == "" || string(s[0]) == "#" { + return "" + } + index := strings.Index(s, " #") + if index > -1 { + s = strings.TrimSpace(s[0:index]) + } + return s +} + +// regular expression determines if a line in the maintainers +// is a reference to an organization. +var reOrg = regexp.MustCompile(`^github-org (\S+)`) + +// regular expression determines if a line in the maintainers +// is a reference to collaborators list. +var reCollab = regexp.MustCompile(`^github-collab (\S+)`) + +// regular expression determines if a line in the maintainers +// is a reference to a team. +var reTeam = regexp.MustCompile(`^github-team (\S+)\s*(\S*)`) + +// regular expression determines if a line in the maintainers +// file only has the single GitHub username and no other metadata. +var reLogin = regexp.MustCompile(`^\S+$`) + +// regular expression determines if a line in the maintainers +// file has the username and metadata. +var reLoginMeta = regexp.MustCompile(`(.+) <(.+)> \(@(.+)\)`) + +// regular expression determines if a line in the maintainers +// file has the username and email. +var reLoginEmail = regexp.MustCompile(`(.+) <(.+)>`) + +func parseLoginMeta(line string) *model.Person { + matches := reLoginMeta.FindStringSubmatch(line) + if len(matches) != 4 { + return nil + } + return &model.Person{ + Name: strings.TrimSpace(matches[1]), + Email: strings.TrimSpace(matches[2]), + Login: strings.TrimSpace(matches[3]), + } +} + +func parseLoginEmail(line string) *model.Person { + matches := reLoginEmail.FindStringSubmatch(line) + if len(matches) != 3 { + return nil + } + return &model.Person{ + Login: strings.TrimSpace(matches[1]), + Email: strings.TrimSpace(matches[2]), + } +} + +func parseLogin(line string) *model.Person { + line = strings.TrimSpace(line) + if !reLogin.MatchString(line) { + return nil + } + return &model.Person{ + Login: line, + } +} + +func ParseOrgName(line string) string { + matches := reOrg.FindStringSubmatch(line) + if len(matches) != 2 { + return "" + } + return matches[1] +} + +func ParseCollabName(line string) string { + matches := reCollab.FindStringSubmatch(line) + if len(matches) != 2 { + return "" + } + return matches[1] +} + +func ParseTeamName(line string) (string, string) { + matches := reTeam.FindStringSubmatch(line) + if len(matches) != 3 { + return "", "" + } + return matches[1], matches[2] +} diff --git a/snapshot/maintainer_toml.go b/snapshot/maintainer_toml.go new file mode 100644 index 0000000..0d14191 --- /dev/null +++ b/snapshot/maintainer_toml.go @@ -0,0 +1,133 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package snapshot + +import ( + "errors" + "fmt" + + "github.com/capitalone/checks-out/model" + "github.com/capitalone/checks-out/set" + + "github.com/pelletier/go-toml" +) + +func parseMaintainerToml(data []byte) (*model.Maintainer, error) { + tree, err := toml.Load(string(data)) + if err != nil { + return nil, badRequest(err) + } + + m := new(model.Maintainer) + + if tree.Has("people") { + m.RawPeople, err = buildRawPeople(tree) + if err != nil { + return nil, badRequest(err) + } + } + + if tree.Has("org") { + m.RawOrg, err = buildRawOrg(tree) + if err != nil { + return nil, badRequest(err) + } + } + + if m.RawPeople == nil { + err = errors.New("Invalid Toml format. Missing people section.") + return nil, badRequest(err) + } + + err = validateMaintainerHJSON(m) + return m, err +} + +func buildRawOrg(tree *toml.Tree) (map[string]*model.OrgSerde, error) { + rawOrg := map[string]*model.OrgSerde{} + o, ok := tree.Get("org").(*toml.Tree) + if !ok { + return nil, errors.New("Invalid Toml format. Org section is invalid.") + } + for _, v := range o.Keys() { + org, ok := o.Get(v).(*toml.Tree) + if !ok { + return nil, fmt.Errorf("Invalid toml format. Org %s is invalid.", v) + } + curOrg := &model.OrgSerde{} + pSlice, ok := org.Get("people").([]interface{}) + if !ok { + return nil, fmt.Errorf("Invalid toml format. Org %s people isn't a slice of strings", v) + } + curOrg.People = set.Empty() + for _, v2 := range pSlice { + if p, ok := v2.(string); ok { + curOrg.People.Add(p) + } else { + return nil, fmt.Errorf("Invalid toml format. Org %s people had invalid person %v", v, v2) + } + } + rawOrg[v] = curOrg + } + return rawOrg, nil +} + +func buildRawPeople(tree *toml.Tree) (map[string]*model.Person, error) { + rawPeople := map[string]*model.Person{} + p, ok := tree.Get("people").(*toml.Tree) + if !ok { + return nil, errors.New("Invalid Toml format. People section is invalid.") + } + for _, k := range p.Keys() { + person, ok := p.Get(k).(*toml.Tree) + if !ok { + return nil, fmt.Errorf("Invalid toml format. Person %s is invalid.", k) + } + curPerson := &model.Person{} + // Populate the login field if it is missing. + str, err := extractString("login", k, person) + if err != nil { + return nil, err + } + curPerson.Login = str + + str, err = extractString("name", "", person) + if err != nil { + return nil, err + } + curPerson.Name = str + + str, err = extractString("email", "", person) + if err != nil { + return nil, err + } + curPerson.Email = str + + rawPeople[k] = curPerson + } + return rawPeople, nil +} + +func extractString(key string, def string, person *toml.Tree) (string, error) { + if str, ok := person.GetDefault(key, def).(string); !ok { + return "", fmt.Errorf("Invalid toml format. Invalid field %s", key) + } else { + return str, nil + } +} diff --git a/snapshot/snapshot.go b/snapshot/snapshot.go new file mode 100644 index 0000000..7a34228 --- /dev/null +++ b/snapshot/snapshot.go @@ -0,0 +1,351 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package snapshot + +import ( + "context" + "errors" + "fmt" + "net/http" + "strings" + + "github.com/capitalone/checks-out/envvars" + "github.com/capitalone/checks-out/exterror" + "github.com/capitalone/checks-out/model" + "github.com/capitalone/checks-out/remote" + "github.com/capitalone/checks-out/set" + "github.com/capitalone/checks-out/store" + + "github.com/mspiegel/go-multierror" +) + +var ConfigTemplateName = fmt.Sprintf("template.%s", envvars.Env.Branding.Name) + +var configFileName = fmt.Sprintf(".%s", envvars.Env.Branding.Name) + +var orgRepoName = fmt.Sprintf("%s-configuration", envvars.Env.Branding.Name) + +const MaintainersTemplateName = "template.MAINTAINERS" + +type OrgLazyLoad struct { + Input set.Set `json:"input"` + People set.Set `json:"people"` + Err error `json:"error"` + Init bool `json:"-"` + Ctx context.Context `json:"-"` + User *model.User `json:"-"` + Caps *model.Capabilities `json:"-"` + Repo *model.Repo `json:"-"` +} + +func findConfig(c context.Context, user *model.User, caps *model.Capabilities, repo *model.Repo) (*model.Config, error) { + var rcfile []byte + var exterr, err error + // look for configuration file in current repository + rcfile, exterr = remote.GetContents(c, user, repo, configFileName) + if exterr == nil { + return model.ParseConfig(rcfile, caps) + } + // look for legacy file + rcfile, err = remote.GetContents(c, user, repo, ".lgtm") + if err == nil { + return model.ParseOldConfig(rcfile) + } + // look for template configuration file in org repository + if !repo.Org || len(orgRepoName) == 0 { + return nil, exterr + } + orgRepo := *repo + orgRepo.Name = orgRepoName + _, err = remote.GetRepo(c, user, orgRepo.Owner, orgRepo.Name) + if err != nil { + ext, ok := err.(exterror.ExtError) + if ok && ext.Status == http.StatusNotFound { + return nil, exterr + } + return nil, multierror.Append(exterr, err) + } + rcfile, err = remote.GetContents(c, user, &orgRepo, ConfigTemplateName) + if err == nil { + return model.ParseConfig(rcfile, caps) + } + return nil, multierror.Append(exterr, err) +} + +func GetConfig(c context.Context, user *model.User, caps *model.Capabilities, repo *model.Repo) (*model.Config, error) { + config, err := findConfig(c, user, caps, repo) + if err != nil { + err = badRequest(err) + return nil, exterror.Append(err, fmt.Sprintf("Parsing %s file", configFileName)) + } + if config.Deployment.Enable { + var deployFile []byte + deployFile, err = remote.GetContents(c, user, repo, config.Deployment.Path) + if err != nil { + msg := fmt.Sprintf("%s file not found", config.Deployment.Path) + return nil, exterror.Append(err, msg) + } + config.LoadDeploymentMap(deployFile) + } + return config, nil +} + +func GetConfigAndMaintainers(c context.Context, user *model.User, caps *model.Capabilities, repo *model.Repo) (*model.Config, *model.MaintainerSnapshot, error) { + config, err := GetConfig(c, user, caps, repo) + if err != nil { + return nil, nil, err + } + snapshot, err := createSnapshot(c, user, caps, repo, config) + if err != nil { + return nil, nil, err + } + return config, snapshot, err +} + +func FixSlackTargets(c context.Context, config *model.Config, user string) error { + for k, v := range config.Comment.Targets { + // we're potentially going to modify the curTarget + curTarget := &config.Comment.Targets[k] + //skip over the github targets + if v.Target == model.Github.String() { + continue + } + if v.Target == model.Slack.String() { + //set URL to the default slack URL if no override URL was specified + if v.Url == "" { + curTarget.Url = envvars.Env.Slack.TargetUrl + } + continue + } + // for everything else try to find the url for it + // if found, change target to slack and set the url + url, err := store.GetSlackUrl(c, v.Target, user) + if err != nil { + return err + } + if url == "" { + url, err = store.GetSlackUrl(c, v.Target, "") + } + if err != nil { + return err + } + if url == "" { + return errors.New("No URL found for slack host " + v.Target) + } + curTarget.Target = model.Slack.String() + curTarget.Url = url + } + return nil +} + +func findMaintainers(c context.Context, user *model.User, repo *model.Repo, path string) ([]byte, error) { + file, err := remote.GetContents(c, user, repo, path) + if err == nil { + return file, nil + } + if !repo.Org || len(orgRepoName) == 0 { + return nil, exterror.Append(err, fmt.Sprintf("%s file not found", path)) + } + orgRepo := *repo + orgRepo.Name = orgRepoName + file, err = remote.GetContents(c, user, &orgRepo, MaintainersTemplateName) + if err == nil { + return file, nil + } + return nil, exterror.Append(err, fmt.Sprintf("%s file not found", path)) +} + +func createSnapshot(c context.Context, user *model.User, caps *model.Capabilities, repo *model.Repo, config *model.Config) (*model.MaintainerSnapshot, error) { + file, err := findMaintainers(c, user, repo, config.Maintainers.Path) + if err != nil { + return nil, err + } + maintainer, err := ParseMaintainer(c, user, file, repo, config.Maintainers.Type) + if err != nil { + msg := fmt.Sprintf("Parsing maintainers file with %s format", + config.Maintainers.Type) + return nil, exterror.Append(err, msg) + } + snapshot, err := maintainerToSnapshot(c, user, caps, repo, maintainer) + if err != nil { + return nil, err + } + err = validateSnapshot(config, snapshot) + return snapshot, err +} + +// orgLazyLoadCandidate returns true if this github team is eligible +// for lazy expansion. The special name "_" is introduced +// when the "github-team repo-self" is found in the MAINTAINERS +// file. If "github-team repo-self" is not found then we must use eager +// evaluation of the GitHub teams in order to populate the +// model.MaintainerSnapshot.People field. +func orgLazyLoadCandidate(people set.Set, orgs map[string]*model.OrgSerde) bool { + if _, ok := orgs["_"]; !ok { + return false + } + if len(people) != 1 { + return false + } + team, _ := ParseTeamName(people.Keys()[0]) + return len(team) > 0 +} + +func maintainerToSnapshot(c context.Context, u *model.User, caps *model.Capabilities, r *model.Repo, m *model.Maintainer) (*model.MaintainerSnapshot, error) { + var errs error + s := new(model.MaintainerSnapshot) + s.People = map[string]*model.Person{} + s.Org = map[string]model.Org{} + for k, v := range m.RawPeople { + k = strings.ToLower(k) + s.People[k] = v + } + for k, v := range m.RawOrg { + k = strings.ToLower(k) + if s.People[k] != nil { + msg := fmt.Errorf("%s cannot be both a team and a person", k) + errs = multierror.Append(errs, badRequest(msg)) + } + if orgLazyLoadCandidate(v.People, m.RawOrg) { + s.Org[k] = &OrgLazyLoad{ + Input: v.People, + Init: false, + Ctx: c, + User: u, + Caps: caps, + Repo: r, + } + } else { + org := set.Empty() + s.Org[k] = &model.OrgSerde{ + People: org, + } + for login := range v.People { + lst, err := memberExpansion(c, u, caps, r, login) + if err != nil { + msg := fmt.Sprintf("Attempting to expand %s", login) + errs = multierror.Append(errs, exterror.Append(err, msg)) + } else { + if lst != nil { + addToPeople(lst, s) + addToOrg(lst, org) + } else { + org.Add(strings.ToLower(login)) + } + } + } + } + } + if errs != nil { + return nil, errs + } + return s, nil +} + +func memberExpansion(c context.Context, u *model.User, caps *model.Capabilities, r *model.Repo, input string) ([]*model.Person, error) { + + org := ParseOrgName(input) + if org != "" { + if org == SelfRepo { + if !r.Org { + p, err := remote.GetPerson(c, u, u.Login) + if err != nil { + return nil, exterror.Append(err, "Cannot fetch information about repository owner.") + } + return []*model.Person{p}, nil + } + if !caps.Org.Read { + err := errors.New("Cannot read GitHub organizations with provided OAuth scopes") + return nil, badRequest(err) + } + return remote.GetOrgMembers(c, u, r.Owner) + } + return remote.GetOrgMembers(c, u, org) + } + collab := ParseCollabName(input) + if collab != "" { + if collab == SelfRepo { + return remote.GetCollaborators(c, u, r.Owner, r.Name) + } + pieces := strings.Split(collab, "/") + if len(pieces) != 2 { + err := fmt.Errorf("%s is not a repository slug", collab) + return nil, badRequest(err) + } + return remote.GetCollaborators(c, u, pieces[0], pieces[1]) + } + team, org := ParseTeamName(input) + if team != "" { + if org == "" && !r.Org { + err := fmt.Errorf("Cannot expand GitHub teams for user repository %s/%s", + r.Owner, r.Name) + return nil, badRequest(err) + } + if !caps.Org.Read { + err := errors.New("Cannot read GitHub teams with provided OAuth scopes") + return nil, badRequest(err) + } + if org == "" { + org = r.Owner + } + return remote.GetTeamMembers(c, u, org, team) + } + return nil, nil +} + +func addToPeople(lst []*model.Person, ms *model.MaintainerSnapshot) { + for _, m := range lst { + login := strings.ToLower(m.Login) + if _, ok := ms.People[login]; !ok { + ms.People[login] = m + } + } +} + +func addToOrg(lst []*model.Person, org set.Set) { + for _, m := range lst { + login := strings.ToLower(m.Login) + org.Add(login) + } +} + +func (o *OrgLazyLoad) GetPeople() (set.Set, error) { + var errs error + if o.Init { + return o.People, o.Err + } + org := set.Empty() + for login := range o.Input { + lst, err := memberExpansion(o.Ctx, o.User, o.Caps, o.Repo, login) + if err != nil { + msg := fmt.Sprintf("Attempting to expand %s", login) + errs = multierror.Append(errs, exterror.Append(err, msg)) + } else { + if lst != nil { + addToOrg(lst, org) + } else { + org.Add(strings.ToLower(login)) + } + } + } + o.People = org + o.Err = errs + o.Init = true + return o.People, o.Err +} diff --git a/snapshot/snapshot_test.go b/snapshot/snapshot_test.go new file mode 100644 index 0000000..385713a --- /dev/null +++ b/snapshot/snapshot_test.go @@ -0,0 +1,299 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package snapshot + +import ( + "context" + "testing" + + "github.com/capitalone/checks-out/cache" + "github.com/capitalone/checks-out/envvars" + "github.com/capitalone/checks-out/model" + "github.com/capitalone/checks-out/remote" + "github.com/capitalone/checks-out/set" + "github.com/capitalone/checks-out/store" + "github.com/capitalone/checks-out/strings/lowercase" + + "github.com/gin-gonic/gin" +) + +func init() { + cache.Longterm.Set("people:Foo", &model.Person{Login: "Foo"}) + cache.Longterm.Set("people:Bar", &model.Person{Login: "Bar"}) + cache.Longterm.Set("people:Baz", &model.Person{Login: "Baz"}) +} + +type mockRemote struct { + remote.Remote +} + +func (m *mockRemote) GetOrgMembers(c context.Context, user *model.User, org string) (set.Set, error) { + switch org { + case "a": + return map[string]bool{ + "Foo": true, + }, nil + case "org": + return map[string]bool{ + "Foo": true, + "Bar": true, + }, nil + default: + return map[string]bool{}, nil + } +} + +func (m *mockRemote) GetTeamMembers(c context.Context, user *model.User, org string, team string) (set.Set, error) { + return map[string]bool{ + "Bar": true, + "Baz": true, + }, nil +} + +func TestMaintainerToSnapshot(t *testing.T) { + c := &gin.Context{} + u := &model.User{} + caps := model.AllowAll() + r := &model.Repo{Org: true} + m := &model.Maintainer{ + RawPeople: map[string]*model.Person{ + "Foo": &model.Person{Login: "Foo", Name: "Mr. Foo"}, + }, + RawOrg: map[string]*model.OrgSerde{ + "G1": &model.OrgSerde{People: set.New("github-org a")}, + "G2": &model.OrgSerde{People: set.New("github-org org")}, + "G3": &model.OrgSerde{People: set.New("github-team team")}, + }, + } + remote.ToContext(c, &mockRemote{}) + s, err := maintainerToSnapshot(c, u, caps, r, m) + if err != nil { + t.Error("Error converting maintainer to snapshot", err) + } + if len(s.People) != 3 { + t.Error("people not populated") + } + if _, ok := s.Org["G1"]; ok { + t.Error("group G1 was not converted to lowercase") + } + g1, err := s.Org["g1"].GetPeople() + if err != nil { + t.Error(err) + } + if g1.Contains("Foo") { + t.Error("user Foo was not converted to lowercase") + } + if !g1.Contains("foo") { + t.Error("user foo is missing from group g1") + } + if s.People["foo"].Name != "Mr. Foo" { + t.Error("foo user was overridden", s.People["foo"]) + } +} + +func TestMaintainerToSnapshotOrgSelf(t *testing.T) { + c := &gin.Context{} + u := &model.User{} + caps := model.AllowAll() + r := &model.Repo{Owner: "a", Org: true} + m := &model.Maintainer{ + RawOrg: map[string]*model.OrgSerde{ + "g1": &model.OrgSerde{People: set.New("github-org repo-self")}, + }, + } + remote.ToContext(c, &mockRemote{}) + s, err := maintainerToSnapshot(c, u, caps, r, m) + if err != nil { + t.Error("Error converting maintainer to snapshot", err) + } + if _, ok := s.People["foo"]; !ok { + t.Error("people not populated") + } + if _, ok := s.People["bar"]; ok { + t.Error("bar is populated") + } + g1, err := s.Org["g1"].GetPeople() + if err != nil { + t.Error(err) + } + if !g1.Contains("foo") { + t.Error("org contents not populated") + } +} + +func TestSnapshotValidateApproval(t *testing.T) { + c := &gin.Context{} + u := &model.User{} + caps := model.AllowAll() + r := &model.Repo{Owner: "a", Org: true} + m := &model.Maintainer{ + RawOrg: map[string]*model.OrgSerde{ + "g1": &model.OrgSerde{People: set.New("github-org repo-self")}, + }, + } + remote.ToContext(c, &mockRemote{}) + s, err := maintainerToSnapshot(c, u, caps, r, m) + if err != nil { + t.Error("Error converting maintainer to snapshot", err) + } + config := model.NonEmptyConfig() + m1 := model.DefaultEntityMatch() + m1.Entity = lowercase.Create("foobar") + config.Approvals[0].Match = model.MatcherHolder{Matcher: m1} + err = validateSnapshot(config, s) + if err == nil { + t.Error("Expected error validating snapshot") + } + m2 := model.DefaultAnonymousMatch() + m2.Entities = set.NewLower(lowercase.Create("foobar")) + config.Approvals[0].Match = model.MatcherHolder{Matcher: m2} + err = validateSnapshot(config, s) + if err == nil { + t.Error("Expected error validating snapshot") + } +} + +func TestAddMembers(t *testing.T) { + c := model.OrgSerde{People: set.New("foo")} + m := model.MaintainerSnapshot{ + People: map[string]*model.Person{ + "foo": &model.Person{Login: "foo", Name: "Mr. Foo"}, + }, + Org: map[string]model.Org{ + "c": &c, + }, + } + lst := []*model.Person{ + &model.Person{Login: "foo"}, + &model.Person{Login: "bar"}, + &model.Person{Login: "baz"}, + } + addToPeople(lst, &m) + addToOrg(lst, c.People) + if len(m.People) != 3 { + t.Error("Persons not added to snapshot", m.People) + } + if len(c.People) != 3 { + t.Error("Persons not added to organization", c.People) + } + if m.People["foo"].Name != "Mr. Foo" { + t.Error("foo user was overridden", m.People["foo"]) + } +} + +func TestPopulatePersonToOrg(t *testing.T) { + m := model.MaintainerSnapshot{ + People: map[string]*model.Person{ + "foo": &model.Person{Login: "foo"}, + "bar": &model.Person{Login: "bar"}, + "baz": &model.Person{Login: "baz"}, + }, + Org: map[string]model.Org{ + "a": &model.OrgSerde{People: set.New("foo")}, + "b": &model.OrgSerde{People: set.New("foo", "bar")}, + "c": &model.OrgSerde{People: set.New("foo", "bar", "baz", "quux")}, + }, + } + mapping, err := m.PersonToOrg() + if err != nil { + t.Error(err) + } + if len(mapping) != 3 { + t.Errorf("foo was not mapped to the correct organizations: %v", + mapping["foo"]) + } + if len(mapping["bar"]) != 2 { + t.Errorf("bar was not mapped to the correct organizations: %v", + mapping["bar"]) + } + if len(mapping["baz"]) != 1 { + t.Errorf("baz was not mapped to the correct organizations: %v", + mapping["baz"]) + } + if len(mapping["quux"]) != 0 { + t.Errorf("quux was not mapped to the correct organizations: %v", + mapping["quux"]) + } +} + +type mockStore struct { + store.Store +} + +func (ms mockStore) GetSlackUrl(hostname string, user string) (string, error) { + if user == "" && hostname == "floopy-server.slack.com" { + return "http://this-is-the-url.com", nil + } + if user == "jon" && hostname == "bloopy-server.slack.com" { + return "http://this-is-the-other-url.com", nil + } + return "", nil +} + +func TestFixSlackTargets(t *testing.T) { + envvars.Env.Slack.TargetUrl = "http://standard-url.com" + c := store.AddToContext(context.Background(), mockStore{}) + config := &model.Config{ + Comment: model.CommentConfig{ + Enable: true, + Targets: []model.TargetConfig{ + { + Target: "floopy-server.slack.com", + Names: []string{"#channel1"}, + }, + { + Target: "github", + }, + { + Target: "slack", + Names: []string{"#channelZ"}, + }, + { + Target: "bloopy-server.slack.com", + Names: []string{"#channel2"}, + }, + }, + }, + } + err := FixSlackTargets(c, config, "jon") + if err != nil { + t.Errorf("Unexpected error %v", err) + } + if config.Comment.Targets[0].Target != model.Slack.String() { + t.Errorf("Unexpected target %s", config.Comment.Targets[0].Target) + } + if config.Comment.Targets[0].Url != "http://this-is-the-url.com" { + t.Errorf("Unexpected url %s", config.Comment.Targets[0].Url) + } + if config.Comment.Targets[1].Target != model.Github.String() { + t.Errorf("Unexpected target %s", config.Comment.Targets[0].Target) + } + if config.Comment.Targets[2].Target != model.Slack.String() { + t.Errorf("Unexpected target %s", config.Comment.Targets[0].Target) + } + if config.Comment.Targets[2].Url != "http://standard-url.com" { + t.Errorf("Unexpected url %s", config.Comment.Targets[0].Url) + } + if config.Comment.Targets[3].Target != model.Slack.String() { + t.Errorf("Unexpected target %s", config.Comment.Targets[0].Target) + } + if config.Comment.Targets[3].Url != "http://this-is-the-other-url.com" { + t.Errorf("Unexpected url %s", config.Comment.Targets[0].Url) + } +} diff --git a/snapshot/validate.go b/snapshot/validate.go new file mode 100644 index 0000000..d7144a5 --- /dev/null +++ b/snapshot/validate.go @@ -0,0 +1,42 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package snapshot + +import ( + "github.com/capitalone/checks-out/model" + + multierror "github.com/mspiegel/go-multierror" +) + +func validateSnapshot(config *model.Config, snapshot *model.MaintainerSnapshot) error { + var errs error + for _, approval := range config.Approvals { + err := approval.Match.Validate(snapshot) + errs = multierror.Append(errs, err) + if approval.AntiMatch != nil { + err := approval.AntiMatch.Validate(snapshot) + errs = multierror.Append(errs, err) + } + if approval.AuthorMatch != nil { + err := approval.AuthorMatch.Validate(snapshot) + errs = multierror.Append(errs, err) + } + } + return errs +} diff --git a/store/context.go b/store/context.go index 06d075c..bd31721 100644 --- a/store/context.go +++ b/store/context.go @@ -1,8 +1,24 @@ +/* + +SPDX-Copyright: Copyright (c) Brad Rydzewski, project contributors, Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Brad Rydzewski, project contributors, Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ package store -import ( - "golang.org/x/net/context" -) +import "context" const key = "store" @@ -20,3 +36,6 @@ func FromContext(c context.Context) Store { func ToContext(c Setter, store Store) { c.Set(key, store) } +func AddToContext(c context.Context, store Store) context.Context { + return context.WithValue(c, key, store) +} \ No newline at end of file diff --git a/store/datastore/datastore.go b/store/datastore/datastore.go index 4fbecdf..6ab7649 100644 --- a/store/datastore/datastore.go +++ b/store/datastore/datastore.go @@ -1,79 +1,96 @@ +/* + +SPDX-Copyright: Copyright (c) Brad Rydzewski, project contributors, Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Brad Rydzewski, project contributors, Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ package datastore import ( "database/sql" - "os" + "sync" "time" - "github.com/lgtmco/lgtm/store" - "github.com/lgtmco/lgtm/store/migration" + "github.com/capitalone/checks-out/envvars" + "github.com/capitalone/checks-out/set" + "github.com/capitalone/checks-out/store" + "github.com/capitalone/checks-out/store/migration" "github.com/Sirupsen/logrus" + // bindings for meddler _ "github.com/go-sql-driver/mysql" + // bindings for meddler _ "github.com/mattn/go-sqlite3" - "github.com/rubenv/sql-migrate" + // bindings for meddler + _ "github.com/lib/pq" + migrate "github.com/rubenv/sql-migrate" "github.com/russross/meddler" ) type datastore struct { *sql.DB + Migrations set.Set + curDB string } -// New creates a database connection for the given driver and datasource +var once sync.Once +var cachedStore store.Store + +func Get() store.Store { + once.Do(func() { + cachedStore = create(envvars.Env.Db.Driver, envvars.Env.Db.Datasource) + }) + return cachedStore +} + +// creates a database connection for the given driver and datasource // and returns a new Store. -func New(driver, config string) store.Store { - db := Open(driver, config) - return From(db) +func create(driver, config string) store.Store { + db, migrations := Open(driver, config) + return From(db, migrations, driver) } // From returns a Store using an existing database connection. -func From(db *sql.DB) store.Store { - return &datastore{db} +func From(db *sql.DB, migrations set.Set, driver string) store.Store { + return &datastore{db, migrations, driver} } // Open opens a new database connection with the specified // driver and connection string and returns a store. -func Open(driver, config string) *sql.DB { +func Open(driver, config string) (*sql.DB, set.Set) { db, err := sql.Open(driver, config) if err != nil { logrus.Errorln(err) logrus.Fatalln("database connection failed") } - if driver == "mysql" { - // per issue https://github.com/go-sql-driver/mysql/issues/257 - db.SetMaxIdleConns(0) - } setupMeddler(driver) logrus.Debugf("Driver %s", driver) logrus.Debugf("Data Source %s", config) - if err := pingDatabase(db); err != nil { + if err = pingDatabase(db); err != nil { logrus.Errorln(err) logrus.Fatalln("database ping attempts failed") } - - if err := setupDatabase(driver, db); err != nil { + ids, err := setupDatabase(driver, db) + if err != nil { logrus.Errorln(err) logrus.Fatalln("migration failed") } - return db -} - -// OpenTest opens a new database connection for testing purposes. -// The database driver and connection string are provided by -// environment variables, with fallback to in-memory sqlite. -func openTest() *sql.DB { - var ( - driver = "sqlite3" - config = ":memory:" - ) - if os.Getenv("DATABASE_DRIVER") != "" { - driver = os.Getenv("DATABASE_DRIVER") - config = os.Getenv("DATABASE_DATASOURCE") - } - return Open(driver, config) + return db, ids } // helper function to ping the database with backoff to ensure @@ -85,7 +102,7 @@ func pingDatabase(db *sql.DB) (err error) { if err == nil { return } - logrus.Infof("database ping failed. retry in 1s") + logrus.Infof("database ping failed. retry in 1s. %s", err) time.Sleep(time.Second) } return @@ -93,14 +110,25 @@ func pingDatabase(db *sql.DB) (err error) { // helper function to setup the databsae by performing // automated database migration steps. -func setupDatabase(driver string, db *sql.DB) error { +func setupDatabase(driver string, db *sql.DB) (set.Set, error) { var migrations = &migrate.AssetMigrationSource{ Asset: migration.Asset, AssetDir: migration.AssetDir, Dir: driver, } - _, err := migrate.Exec(db, driver, migrations, migrate.Up) - return err + todo, _, err := migrate.PlanMigration(db, driver, migrations, migrate.Up, 0) + if err != nil { + return nil, err + } + _, err = migrate.Exec(db, driver, migrations, migrate.Up) + if err != nil { + return nil, err + } + done := set.Empty() + for _, m := range todo { + done.Add(m.Id) + } + return done, nil } // helper function to setup the meddler default driver @@ -115,3 +143,9 @@ func setupMeddler(driver string) { meddler.Default = meddler.PostgreSQL } } + +const ( + POSTGRES = "postgres" + MYSQL = "mysql" + SQLITE = "sqlite3" +) diff --git a/store/datastore/datastore_test.go b/store/datastore/datastore_test.go new file mode 100644 index 0000000..80bf73e --- /dev/null +++ b/store/datastore/datastore_test.go @@ -0,0 +1,41 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package datastore + +import ( + "database/sql" + "os" + "github.com/capitalone/checks-out/set" +) + +// OpenTest opens a new database connection for testing purposes. +// The database driver and connection string are provided by +// environment variables, with fallback to in-memory sqlite. +func openTest() (*sql.DB, set.Set, string) { + var ( + driver = "sqlite3" + config = ":memory:" + ) + if os.Getenv("TEST_DB_DRIVER") != "" && os.Getenv("TEST_DB_SOURCE") != "" { + driver = os.Getenv("TEST_DB_DRIVER") + config = os.Getenv("TEST_DB_SOURCE") + } + db, migrations := Open(driver, config) + return db, migrations, driver +} diff --git a/store/datastore/repos.go b/store/datastore/repos.go index 52dae2e..da1cfbe 100644 --- a/store/datastore/repos.go +++ b/store/datastore/repos.go @@ -1,14 +1,40 @@ +/* + +SPDX-Copyright: Copyright (c) Brad Rydzewski, project contributors, Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Brad Rydzewski, project contributors, Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ package datastore import ( + "database/sql" "fmt" + "net/http" "strings" - "github.com/lgtmco/lgtm/model" + "github.com/capitalone/checks-out/exterror" + "github.com/capitalone/checks-out/model" + "github.com/capitalone/checks-out/set" "github.com/russross/meddler" ) +func (db *datastore) GetMigrations() set.Set { + return db.Migrations +} + func (db *datastore) GetRepo(id int64) (*model.Repo, error) { var repo = new(model.Repo) var err = meddler.Load(db, repoTable, repo, id) @@ -17,21 +43,67 @@ func (db *datastore) GetRepo(id int64) (*model.Repo, error) { func (db *datastore) GetRepoSlug(slug string) (*model.Repo, error) { var repo = new(model.Repo) - var err = meddler.QueryRow(db, repo, repoSlugQuery, slug) + var err = meddler.QueryRow(db, repo, repoSlugQuery[db.curDB], slug) + if err == sql.ErrNoRows { + return repo, exterror.Create(http.StatusNotFound, err) + } return repo, err } func (db *datastore) GetRepoMulti(slug ...string) ([]*model.Repo, error) { var repos = []*model.Repo{} - var instr, params = toList(slug) - var stmt = fmt.Sprintf(repoListQuery, instr) + if len(slug) == 0 { + return repos, nil + } + var instr, params = db.toList(slug) + var stmt = fmt.Sprintf(repoSlugsQuery, instr) var err = meddler.QueryAll(db, &repos, stmt, params...) return repos, err } -func (db *datastore) GetRepoOwner(owner string) ([]*model.Repo, error) { +func (db *datastore) GetAllRepos() ([]*model.Repo, error) { + var repos = []*model.Repo{} + var err = meddler.QueryAll(db, &repos, repoListQuery) + return repos, err +} + +func (db *datastore) GetUserEnabledOrgs(names []string) ([]*model.OrgDb, error) { + var orgs = []*model.OrgDb{} + var instr, params = db.toList(names) + var stmt = fmt.Sprintf(userOrgListQuery, instr) + var err = meddler.QueryAll(db, &orgs, stmt, params...) + return orgs, err +} + +func (db *datastore) GetOrgByName(owner string) (*model.OrgDb, error) { + var org = new(model.OrgDb) + var err = meddler.QueryRow(db, org, orgNameQuery[db.curDB], owner) + if err == sql.ErrNoRows { + return org, exterror.Create(http.StatusNotFound, err) + } + return org, err +} + +// CreateOrg creates a new auto-enrolling org. +func (db *datastore) CreateOrg(org *model.OrgDb) error { + return meddler.Insert(db, orgTable, org) +} + +// DeleteOrg deletes an auto-enrolling org. +func (db *datastore) DeleteOrg(org *model.OrgDb) error { + var _, err = db.Exec(orgDeleteStmt[db.curDB], org.ID) + return err +} + +func (db *datastore) GetReposForOrg(owner string) ([]*model.Repo, error) { var repos = []*model.Repo{} - var err = meddler.QueryAll(db, &repos, repoOwnerQuery, owner) + var err = meddler.QueryAll(db, &repos, repoOwnerQuery[db.curDB], owner) + return repos, err +} + +func (db *datastore) GetRepoUserId(id int64) ([]*model.Repo, error) { + var repos = []*model.Repo{} + var err = meddler.QueryAll(db, &repos, repoUserIdQuery[db.curDB], id) return repos, err } @@ -44,48 +116,157 @@ func (db *datastore) UpdateRepo(repo *model.Repo) error { } func (db *datastore) DeleteRepo(repo *model.Repo) error { - var _, err = db.Exec(repoDeleteStmt, repo.ID) + var _, err = db.Exec(repoDeleteStmt[db.curDB], repo.ID) return err } -func toList(items []string) (string, []interface{}) { +func (db *datastore) toList(items []string) (string, []interface{}) { var size = len(items) if size > 990 { size = 990 items = items[:990] } - var qs = make([]string, size, size) - var in = make([]interface{}, size, size) + var qs []string + var out []interface{} for i, item := range items { - qs[i] = "?" - in[i] = item + switch db.curDB { + case POSTGRES: + qs = append(qs, fmt.Sprintf("$%d", i+1)) + default: + qs = append(qs, "?") + } + out = append(out, item) } - return strings.Join(qs, ","), in + return strings.Join(qs, ","), out } -const repoTable = "repos" +const ( + repoTable = "repos" + orgTable = "orgs" +) -const repoSlugQuery = ` -SELECT * -FROM repos -WHERE repo_slug = ? -LIMIT 1; -` +var repoSlugQuery = map[string]string{ + POSTGRES: ` + SELECT * + FROM repos + WHERE repo_slug = $1 + LIMIT 1; + `, + MYSQL: ` + SELECT * + FROM repos + WHERE repo_slug = ? + LIMIT 1; + `, + SQLITE: ` + SELECT * + FROM repos + WHERE repo_slug = ? + LIMIT 1; + `, +} -const repoOwnerQuery = ` +const repoSlugsQuery = ` SELECT * FROM repos -WHERE repo_owner = ? +WHERE repo_slug IN (%s) +ORDER BY repo_slug ` +var repoUserIdQuery = map[string]string{ + POSTGRES: ` + SELECT * + FROM repos + WHERE repo_user_id = $1 + `, + MYSQL: ` + SELECT * + FROM repos + WHERE repo_user_id = ? + `, + SQLITE: ` + SELECT * + FROM repos + WHERE repo_user_id = ? + `, +} + +var repoOwnerQuery = map[string]string{ + POSTGRES: ` + SELECT * + FROM repos + WHERE repo_owner = $1 + `, + MYSQL: ` + SELECT * + FROM repos + WHERE repo_owner = ? + `, + SQLITE: ` + SELECT * + FROM repos + WHERE repo_owner = ? + `, +} + const repoListQuery = ` SELECT * FROM repos -WHERE repo_slug IN (%s) -ORDER BY repo_slug ` -const repoDeleteStmt = ` -DELETE FROM repos -WHERE repo_id = ? +var orgNameQuery = map[string]string{ + POSTGRES: ` + SELECT * + FROM orgs + WHERE org_owner = $1 + LIMIT 1; + `, + MYSQL: ` + SELECT * + FROM orgs + WHERE org_owner = ? + LIMIT 1; + `, + SQLITE: ` + SELECT * + FROM orgs + WHERE org_owner = ? + LIMIT 1; + `, +} + +const userOrgListQuery = ` +SELECT * +FROM orgs +WHERE org_owner IN (%s) ` + +var repoDeleteStmt = map[string]string{ + POSTGRES: ` + DELETE FROM repos + WHERE repo_id = $1 + `, + MYSQL: ` + DELETE FROM repos + WHERE repo_id = ? + `, + SQLITE: ` + DELETE FROM repos + WHERE repo_id = ? + `, +} + +var orgDeleteStmt = map[string]string{ + POSTGRES: ` + DELETE FROM orgs + WHERE org_id = $1 + `, + MYSQL: ` + DELETE FROM orgs + WHERE org_id = ? + `, + SQLITE: ` + DELETE FROM orgs + WHERE org_id = ? + `, +} diff --git a/store/datastore/repos_test.go b/store/datastore/repos_test.go index bf97569..b32fd69 100644 --- a/store/datastore/repos_test.go +++ b/store/datastore/repos_test.go @@ -1,17 +1,36 @@ +/* + +SPDX-Copyright: Copyright (c) Brad Rydzewski, project contributors, Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Brad Rydzewski, project contributors, Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ package datastore import ( "testing" + "github.com/capitalone/checks-out/model" + "github.com/franela/goblin" - "github.com/lgtmco/lgtm/model" ) func Test_repostore(t *testing.T) { - db := openTest() + db, ids, driver := openTest() defer db.Close() - s := From(db) + s := From(db, ids, driver) g := goblin.Goblin(t) g.Describe("Repo", func() { @@ -152,3 +171,85 @@ func Test_repostore(t *testing.T) { }) }) } + +func Test_orgstore(t *testing.T) { + db, ids, driver := openTest() + defer db.Close() + + s := From(db, ids, driver) + g := goblin.Goblin(t) + g.Describe("Org", func() { + + // before each test be sure to purge the package + // table data from the database. + g.BeforeEach(func() { + db.Exec("DELETE FROM orgs") + db.Exec("DELETE FROM users") + }) + + g.It("Should Add an Org and get by name", func() { + org := model.OrgDb{ + UserID: 1, + Owner: "testorg", + } + err1 := s.CreateOrg(&org) + getorg, err3 := s.GetOrgByName(org.Owner) + g.Assert(err1 == nil).IsTrue() + g.Assert(err3 == nil).IsTrue() + g.Assert(org.ID).Equal(getorg.ID) + }) + + g.It("Should Get a Multiple Orgs", func() { + org1 := &model.OrgDb{ + UserID: 1, + Owner: "foo", + } + org2 := &model.OrgDb{ + UserID: 2, + Owner: "octocat", + } + org3 := &model.OrgDb{ + UserID: 2, + Owner: "bar", + } + s.CreateOrg(org1) + s.CreateOrg(org2) + s.CreateOrg(org3) + + repos, err := s.GetUserEnabledOrgs([]string{"octocat", "bar"}) + g.Assert(err == nil).IsTrue() + g.Assert(len(repos)).Equal(2) + g.Assert(repos[0].Owner == "octocat" || repos[0].Owner == "bar").IsTrue("Expected to be octocat or bar") + g.Assert(repos[1].Owner == "octocat" || repos[1].Owner == "bar").IsTrue("Expected to be octocat or bar") + }) + + g.It("Should Delete an Org", func() { + org := model.OrgDb{ + UserID: 1, + Owner: "testorg", + } + s.CreateOrg(&org) + _, err1 := s.GetOrgByName(org.Owner) + err2 := s.DeleteOrg(&org) + _, err3 := s.GetOrgByName(org.Owner) + g.Assert(err1 == nil).IsTrue() + g.Assert(err2 == nil).IsTrue() + g.Assert(err3 == nil).IsFalse() + }) + + g.It("Should Enforce Unique Owners", func() { + org1 := model.OrgDb{ + UserID: 1, + Owner: "foo", + } + org2 := model.OrgDb{ + UserID: 2, + Owner: "foo", + } + err1 := s.CreateOrg(&org1) + err2 := s.CreateOrg(&org2) + g.Assert(err1 == nil).IsTrue() + g.Assert(err2 == nil).IsFalse() + }) + }) +} diff --git a/store/datastore/slack.go b/store/datastore/slack.go new file mode 100644 index 0000000..6ca7174 --- /dev/null +++ b/store/datastore/slack.go @@ -0,0 +1,112 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package datastore + +// Returns the slack URL for the specified hostname and user +// if the user string is blank, the default (admin-level) hostname is returned +// if no hostname is found, an empty string is returned +func (db *datastore) GetSlackUrl(hostname string, user string) (string, error) { + rows, err := db.Query(getSlackStmt[db.curDB], hostname, user) + if err != nil { + return "", err + } + defer rows.Close() + if !rows.Next() { + return "", nil + } + url := new(string) + err = rows.Scan(url) + + if url == nil { + *url = "" + } + return *url, err +} + +// Stores or updates the slack URL for the specified hostname and user +// if the user string is blank, the default (admin-level) hostname is stored or updated +func (db *datastore) AddUpdateSlackUrl(hostname string, user string, url string) error { + query := upsertSlackStmt[db.curDB] + var values []interface{} + switch db.curDB { + case POSTGRES: + values = []interface{}{hostname, user, url, url, hostname, user} + case MYSQL: + values = []interface{}{hostname, user, url, url} + case SQLITE: + values = []interface{}{hostname, user, url} + } + _, err := db.Exec(query, values...) + return err +} + +// Deletes the slack URL for the specified hostname and user +// if the user string is blank, the default (admin-level) hostname is deleted +func (db *datastore) DeleteSlackUrl(hostname string, user string) error { + _, err := db.Exec(deleteSlackStmt[db.curDB], hostname, user) + return err +} + +var upsertSlackStmt = map[string]string{ + POSTGRES: ` + INSERT into slack_urls(host_name, user, url) + VALUES ($1, $2, $3) + ON CONFLICT DO UPDATE + SET url = $4 WHERE host_name = $5 and user = $6 + `, + MYSQL: ` + INSERT into slack_urls(host_name, user, url) + VALUES (?, ?, ?) + ON DUPLICATE KEY UPDATE url=? + `, + SQLITE: ` + INSERT OR UPDATE into slack_urls(host_name, user, url) + VALUES (?, ?, ?) + `, +} + +var getSlackStmt = map[string]string{ + POSTGRES: ` + SELECT url FROM slack_urls + WHERE host_name = $1 and user = $2 + `, + MYSQL: ` + SELECT url FROM slack_urls + WHERE host_name = ? and user = ? + `, + SQLITE: ` + SELECT url FROM slack_urls + WHERE host_name = ? and user = ? + `, +} + +var deleteSlackStmt = map[string]string{ + POSTGRES: ` + DELETE FROM slack_urls + WHERE host_name = $1 and user = $2 + `, + MYSQL: ` + DELETE FROM slack_urls + WHERE host_name = ? and user = ? + `, + SQLITE: ` + DELETE FROM slack_urls + WHERE host_name = ? and user = ? + `, +} diff --git a/store/datastore/users.go b/store/datastore/users.go index 3de3ab8..e06cbac 100644 --- a/store/datastore/users.go +++ b/store/datastore/users.go @@ -1,7 +1,25 @@ +/* + +SPDX-Copyright: Copyright (c) Brad Rydzewski, project contributors, Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Brad Rydzewski, project contributors, Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ package datastore import ( - "github.com/lgtmco/lgtm/model" + "github.com/capitalone/checks-out/model" "github.com/russross/meddler" ) @@ -14,7 +32,7 @@ func (db *datastore) GetUser(id int64) (*model.User, error) { func (db *datastore) GetUserLogin(login string) (*model.User, error) { var usr = new(model.User) - var err = meddler.QueryRow(db, usr, userLoginQuery, login) + var err = meddler.QueryRow(db, usr, userLoginQuery[db.curDB], login) return usr, err } @@ -27,23 +45,69 @@ func (db *datastore) UpdateUser(user *model.User) error { } func (db *datastore) DeleteUser(user *model.User) error { - var _, err = db.Exec(userDeleteStmt, user.ID) + var _, err = db.Exec(userDeleteStmt[db.curDB], user.ID) return err } +func (db *datastore) GetAllUsers() ([]*model.User, error) { + var repos = []*model.User{} + var err = meddler.QueryAll(db, &repos, userListQuery) + return repos, err +} + +func (db *datastore) CheckValidUser(login string) (bool, error) { + rows, err := db.Query(limitUserQuery[db.curDB], login) + if err != nil { + return false, err + } + defer rows.Close() + return rows.Next(), nil +} + +func (db *datastore) GetValidOrgs() ([]string, error) { + out := []string{} + rows, err := db.Query(limitOrgQuery) + if err != nil { + return nil, err + } + defer rows.Close() + for rows.Next() { + var org string + err := rows.Scan(&org) + if err != nil { + return nil, err + } + out = append(out, org) + } + return out, nil +} + const userTable = "users" -const userLoginQuery = ` -SELECT * -FROM users -WHERE user_login=? -LIMIT 1 -` +var userLoginQuery = map[string]string{ + POSTGRES: ` + SELECT * + FROM users + WHERE user_login=$1 + LIMIT 1 + `, + MYSQL: ` + SELECT * + FROM users + WHERE user_login= ? + LIMIT 1 + `, + SQLITE: ` + SELECT * + FROM users + WHERE user_login= ? + LIMIT 1 + `, +} const userListQuery = ` SELECT * FROM users -ORDER BY user_login ASC ` const userCountQuery = ` @@ -51,7 +115,37 @@ SELECT count(1) FROM users ` -const userDeleteStmt = ` -DELETE FROM users -WHERE user_id=? +var userDeleteStmt = map[string]string{ + POSTGRES: ` + DELETE FROM users + WHERE user_id=$1 + `, + MYSQL: ` + DELETE FROM users + WHERE user_id=? + `, + SQLITE: ` + DELETE FROM users + WHERE user_id=? + `, +} + +var limitUserQuery = map[string]string{ + POSTGRES: ` + SELECT 1 AS exist FROM limit_users + WHERE login=$1 + `, + MYSQL: ` + SELECT 1 AS exist FROM limit_users + WHERE login=? + `, + SQLITE: ` + SELECT 1 AS exist FROM limit_users + WHERE login=? + `, +} + +const limitOrgQuery = ` +SELECT org +FROM limit_orgs ` diff --git a/store/datastore/users_test.go b/store/datastore/users_test.go index 1cf1f65..8f41419 100644 --- a/store/datastore/users_test.go +++ b/store/datastore/users_test.go @@ -1,16 +1,95 @@ +/* + +SPDX-Copyright: Copyright (c) Brad Rydzewski, project contributors, Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Brad Rydzewski, project contributors, Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ package datastore import ( - "testing" - "github.com/franela/goblin" - "github.com/lgtmco/lgtm/model" + "github.com/capitalone/checks-out/model" + "testing" ) +func TestDatastore_GetValidOrgs(t *testing.T) { + db, ids, driver := openTest() + defer db.Close() + s := From(db, ids, driver) + + //put in some sample values + db.Exec("insert into limit_orgs values ('beatles'), ('stones'), ('ledzep')") + vals, err := s.GetValidOrgs() + if err != nil { + t.Errorf("Error getting valid orgs: %v", err) + } + if len(vals) != 3 { + t.Errorf("Expected 3 values, got %d", len(vals)) + } + db.Exec("delete from limit_orgs") + vals, err = s.GetValidOrgs() + if err != nil { + t.Errorf("Error getting valid orgs: %v", err) + } + if len(vals) != 0 { + t.Errorf("Expected 0 values, got %d", len(vals)) + } +} + +func TestDatastore_CheckUserValid(t *testing.T) { + db, ids, driver := openTest() + defer db.Close() + s := From(db, ids, driver) + + //put in some sample values + db.Exec("insert into limit_users values ('john'), ('paul'), ('george'), ('ringo')") + found, err := s.CheckValidUser("john") + if err != nil { + t.Errorf("Error checking valid user: %v", err) + } + if !found { + t.Error("Expected john to be found, he wasn't.") + } + found, err = s.CheckValidUser("frank") + if err != nil { + t.Errorf("Error checking valid user: %v", err) + } + if found { + t.Error("Expected frank to not be found, he was.") + } + db.Exec("delete from limit_users") + found, err = s.CheckValidUser("john") + if err != nil { + t.Errorf("Error checking valid user: %v", err) + } + if found { + t.Error("Expected john to not be found, he was.") + } + found, err = s.CheckValidUser("frank") + if err != nil { + t.Errorf("Error checking valid user: %v", err) + } + if found { + t.Error("Expected frank to not be found, he was.") + } +} + func Test_userstore(t *testing.T) { - db := openTest() + db, ids, driver := openTest() defer db.Close() - s := From(db) + s := From(db, ids, driver) g := goblin.Goblin(t) g.Describe("User", func() { @@ -24,7 +103,6 @@ func Test_userstore(t *testing.T) { g.It("Should Update a User", func() { user := model.User{ Login: "joe", - Email: "foo@bar.com", Token: "e42080dddf012c718e476da161d21ad5", } err1 := s.CreateUser(&user) @@ -39,7 +117,6 @@ func Test_userstore(t *testing.T) { g.It("Should Add a new User", func() { user := model.User{ Login: "joe", - Email: "foo@bar.com", Token: "e42080dddf012c718e476da161d21ad5", } err := s.CreateUser(&user) @@ -52,7 +129,6 @@ func Test_userstore(t *testing.T) { Login: "joe", Token: "f0b461ca586c27872b43a0685cbc2847", Secret: "976f22a5eef7caacb7e678d6c52f49b1", - Email: "foo@bar.com", Avatar: "b9015b0857e16ac4d94a0ffd9a0b79c8", } @@ -63,14 +139,12 @@ func Test_userstore(t *testing.T) { g.Assert(user.Login).Equal(getuser.Login) g.Assert(user.Token).Equal(getuser.Token) g.Assert(user.Secret).Equal(getuser.Secret) - g.Assert(user.Email).Equal(getuser.Email) g.Assert(user.Avatar).Equal(getuser.Avatar) }) g.It("Should Get a User By Login", func() { user := model.User{ Login: "joe", - Email: "foo@bar.com", Token: "e42080dddf012c718e476da161d21ad5", } s.CreateUser(&user) @@ -83,12 +157,10 @@ func Test_userstore(t *testing.T) { g.It("Should Enforce Unique User Login", func() { user1 := model.User{ Login: "joe", - Email: "foo@bar.com", Token: "e42080dddf012c718e476da161d21ad5", } user2 := model.User{ Login: "joe", - Email: "foo@bar.com", Token: "ab20g0ddaf012c744e136da16aa21ad9", } err1 := s.CreateUser(&user1) @@ -100,7 +172,6 @@ func Test_userstore(t *testing.T) { g.It("Should Del a User", func() { user := model.User{ Login: "joe", - Email: "foo@bar.com", Token: "e42080dddf012c718e476da161d21ad5", } s.CreateUser(&user) diff --git a/store/migration/migration.go b/store/migration/migration.go index 7ab3c98..7d98274 100644 --- a/store/migration/migration.go +++ b/store/migration/migration.go @@ -1,3 +1,3 @@ package migration -//go:generate go-bindata -pkg migration -o migration_gen.go sqlite3/ mysql/ +//go:generate go-bindata -pkg migration -o migration_gen.go sqlite3/ mysql/ postgres/ diff --git a/store/migration/migration_gen.go b/store/migration/migration_gen.go new file mode 100644 index 0000000..70a98c9 --- /dev/null +++ b/store/migration/migration_gen.go @@ -0,0 +1,701 @@ +// Code generated by go-bindata. +// sources: +// sqlite3/001_init.sql +// sqlite3/002_org.sql +// sqlite3/003_drop_emails.sql +// sqlite3/004_limit_users.sql +// sqlite3/005_oauth_scope.sql +// sqlite3/006_add_orgs_table.sql +// sqlite3/007_add_slack_urls.sql +// mysql/001_init.sql +// mysql/002_org.sql +// mysql/003_drop_emails.sql +// mysql/004_limit_users.sql +// mysql/005_oauth_scope.sql +// mysql/006_add_orgs_table.sql +// mysql/007_add_slack_urls.sql +// postgres/001_init.sql +// postgres/002_org.sql +// postgres/003_drop_emails.sql +// postgres/004_limit_users.sql +// postgres/005_oauth_scope.sql +// postgres/006_add_orgs_table.sql +// postgres/007_add_slack_urls.sql +// DO NOT EDIT! + +package migration + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" +) + +func bindataRead(data []byte, name string) ([]byte, error) { + gz, err := gzip.NewReader(bytes.NewBuffer(data)) + if err != nil { + return nil, fmt.Errorf("Read %q: %v", name, err) + } + + var buf bytes.Buffer + _, err = io.Copy(&buf, gz) + clErr := gz.Close() + + if err != nil { + return nil, fmt.Errorf("Read %q: %v", name, err) + } + if clErr != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +type asset struct { + bytes []byte + info os.FileInfo +} + +type bindataFileInfo struct { + name string + size int64 + mode os.FileMode + modTime time.Time +} + +func (fi bindataFileInfo) Name() string { + return fi.name +} +func (fi bindataFileInfo) Size() int64 { + return fi.size +} +func (fi bindataFileInfo) Mode() os.FileMode { + return fi.mode +} +func (fi bindataFileInfo) ModTime() time.Time { + return fi.modTime +} +func (fi bindataFileInfo) IsDir() bool { + return false +} +func (fi bindataFileInfo) Sys() interface{} { + return nil +} + +var _sqlite3001_initSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x8c\x92\xc1\x8e\x82\x30\x10\x86\xef\x7d\x8a\x39\x6a\x56\x9f\x80\x13\xca\xec\xa6\x59\x6d\xdd\x5a\x12\x3c\x99\x66\xb7\x21\x8d\x40\x49\x41\xdd\xc7\xdf\xd0\x14\x94\x1e\x36\x72\x6a\xbe\xf0\x4f\xe7\x9b\xce\x7a\x0d\x6f\xb5\x29\x9d\xea\x35\xe4\x2d\x21\x5b\x81\xa9\x44\x90\xe9\x66\x87\x40\xdf\x81\x71\x09\x58\xd0\xa3\x3c\xc2\xb5\xd3\xae\x83\x05\xf1\x87\xb3\xf9\x01\xff\x51\x26\xf1\x03\x05\x1c\x04\xdd\xa7\xe2\x04\x9f\x78\x82\x34\x97\x9c\xb2\xad\xc0\x3d\x32\x49\x56\xfe\xff\xca\x96\xa6\x01\x00\x89\xc5\x88\x7a\x7b\xd1\x11\xd2\xb5\x32\xd5\x1c\xa9\x9b\xea\x95\x9b\xa1\x4e\x7f\x3b\xdd\x07\x44\x56\x39\xa3\x5f\x39\x2e\x1e\xd7\x2c\xc9\x32\xf9\x57\xc5\xe9\xd6\x7a\x95\xe1\x30\xa9\xbc\xe2\xe2\x03\xd3\x00\x42\x20\x60\x7b\x6f\xb4\x83\xa9\x7b\xcf\x1a\x55\x6b\x88\x58\x57\x5d\xcb\x98\x55\xa6\xb9\xc4\xac\x75\xe6\x36\xbc\x0b\x6c\x38\xdf\x61\xca\xc6\x78\xb0\x8f\xf4\xa7\xd2\x33\x7b\xca\x32\x2c\x22\x7b\xf3\x7b\x9e\xf5\xcb\xd9\x38\x90\x07\x5e\x26\xaf\x54\x18\x07\x11\x55\x08\x78\x68\xe3\x79\xbf\x32\x7b\x6f\x08\xc9\x04\x3f\x84\x47\xf1\x99\xe4\x99\xf8\x1d\x4b\xc8\x5f\x00\x00\x00\xff\xff\x54\x85\x0e\xca\x96\x02\x00\x00") + +func sqlite3001_initSqlBytes() ([]byte, error) { + return bindataRead( + _sqlite3001_initSql, + "sqlite3/001_init.sql", + ) +} + +func sqlite3001_initSql() (*asset, error) { + bytes, err := sqlite3001_initSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "sqlite3/001_init.sql", size: 662, mode: os.FileMode(420), modTime: time.Unix(1481147727, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _sqlite3002_orgSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xd2\xd5\x55\xd0\xce\xcd\x4c\x2f\x4a\x2c\x49\x55\x08\x2d\xe0\xe2\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\x28\x4a\x2d\xc8\x2f\x56\x70\x74\x71\x51\x70\xf6\xf7\x09\xf5\xf5\x03\x0b\xc4\xe7\x17\xa5\x2b\x38\xf9\xfb\xfb\xb8\x3a\xfa\x29\xb8\xb8\xba\x39\x86\xfa\x84\x28\xb8\x39\xfa\x04\xbb\x5a\x73\x01\x02\x00\x00\xff\xff\x4d\x6f\x52\x6d\x4d\x00\x00\x00") + +func sqlite3002_orgSqlBytes() ([]byte, error) { + return bindataRead( + _sqlite3002_orgSql, + "sqlite3/002_org.sql", + ) +} + +func sqlite3002_orgSql() (*asset, error) { + bytes, err := sqlite3002_orgSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "sqlite3/002_org.sql", size: 77, mode: os.FileMode(420), modTime: time.Unix(1481147727, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _sqlite3003_drop_emailsSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x6c\x90\xc1\x4e\xc3\x30\x10\x44\xef\xfb\x15\x7b\x6b\x2b\xd2\x2f\xc8\xc9\x24\x1b\x64\x91\xd8\x65\xb3\x96\xda\x53\x15\x81\x55\x45\x34\x4d\x95\x04\xf8\x7d\x84\x63\x28\x41\xf8\x34\x7e\xda\xf5\x8c\x67\xbb\xc5\xbb\xae\x3d\x0d\xcd\xe4\xd1\x5d\x01\x54\x29\xc4\x28\xea\xbe\x24\x7c\x1b\xfd\x30\x22\x93\x51\x15\xa1\x58\x9c\x7c\x77\x3d\x06\x98\x02\x64\x4c\x4a\x28\x4e\xea\x02\x8d\x15\xa4\xbd\xae\xa5\x8e\x7b\x6b\x08\xe2\xd8\xbe\x60\x38\xda\x08\x3d\x10\xe3\x8e\x75\xa5\xf8\x80\x8f\x74\x40\xe5\xc4\x6a\x93\x31\x55\x64\x04\x92\x30\x7f\xee\x4f\xed\x05\x11\x85\xf6\xdf\x68\xea\x5f\xfd\x1f\xd4\xbc\x37\x53\x33\x2c\xd0\xe8\x9f\x07\x3f\x45\x04\x89\x33\xfa\xc9\xd1\xfa\xf6\xe6\x06\x36\x29\x80\x36\x35\xb1\x7c\xa5\xb1\x73\x50\xa8\xa9\xa4\x4c\x7e\xd2\x26\x78\x5b\x89\x3a\xf8\x47\x3d\x1b\xc7\xcb\x6c\x09\x05\xdb\x0a\x96\xf5\xe4\x6c\x77\xb1\x9c\x05\xff\xdd\x77\xde\x7f\x5c\xfe\x6b\x5c\xe5\x39\x66\xb6\x74\x95\x99\x5d\x7c\xd7\xb4\xe7\xf0\x2d\xcc\xa9\x50\xae\x14\x5c\xad\xd2\xcf\x00\x00\x00\xff\xff\x71\xe7\x23\x15\xba\x01\x00\x00") + +func sqlite3003_drop_emailsSqlBytes() ([]byte, error) { + return bindataRead( + _sqlite3003_drop_emailsSql, + "sqlite3/003_drop_emails.sql", + ) +} + +func sqlite3003_drop_emailsSql() (*asset, error) { + bytes, err := sqlite3003_drop_emailsSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "sqlite3/003_drop_emails.sql", size: 442, mode: os.FileMode(420), modTime: time.Unix(1483464365, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _sqlite3004_limit_usersSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x8c\xce\xb1\x0a\x83\x30\x10\xc6\xf1\xfd\x9e\xe2\x1b\x95\xea\x13\x64\xb2\xf5\x0a\x81\xa2\xad\x5e\xc0\xad\x74\x90\x10\x50\x53\xa2\xa5\xaf\x5f\x1a\x1d\x1c\x3a\xf4\xc6\x3b\xf8\xdd\x3f\xcf\x71\x18\x9d\x0d\x8f\xa5\x87\x79\x12\x9d\x1a\x2e\x84\x21\xc5\xf1\xc2\xd0\x67\x54\xb5\x80\x3b\xdd\x4a\x8b\xc1\x8d\x6e\xb9\xbf\xe6\x3e\xcc\x48\x08\x83\xb7\x6e\x42\x1c\xe1\x4e\x28\x33\x95\xbe\x19\x4e\xe2\x3e\xa5\x54\xfd\x81\xf9\x60\xa3\x05\x1f\xec\xaa\x00\x19\x36\xc8\x07\xbb\x32\xfb\xc6\xd2\xbf\x27\xa2\xb2\xa9\xaf\x1b\xbb\xab\x52\x3f\x0e\xdf\x0f\x8a\x3e\x01\x00\x00\xff\xff\x23\xb0\x80\x6b\xe6\x00\x00\x00") + +func sqlite3004_limit_usersSqlBytes() ([]byte, error) { + return bindataRead( + _sqlite3004_limit_usersSql, + "sqlite3/004_limit_users.sql", + ) +} + +func sqlite3004_limit_usersSql() (*asset, error) { + bytes, err := sqlite3004_limit_usersSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "sqlite3/004_limit_users.sql", size: 230, mode: os.FileMode(420), modTime: time.Unix(1483464365, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _sqlite3005_oauth_scopeSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xd2\xd5\x55\xd0\xce\xcd\x4c\x2f\x4a\x2c\x49\x55\x08\x2d\xe0\xe2\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\x28\x2d\x4e\x2d\x2a\x56\x70\x74\x71\x51\x70\xf6\xf7\x09\xf5\xf5\x03\x0b\xc4\x17\x27\xe7\x17\xa4\x16\x2b\x84\xb8\x46\x84\x28\xb8\xb8\xba\x39\x86\xfa\x84\x28\xa8\xab\x5b\x73\x01\x02\x00\x00\xff\xff\x49\xec\x90\x98\x4a\x00\x00\x00") + +func sqlite3005_oauth_scopeSqlBytes() ([]byte, error) { + return bindataRead( + _sqlite3005_oauth_scopeSql, + "sqlite3/005_oauth_scope.sql", + ) +} + +func sqlite3005_oauth_scopeSql() (*asset, error) { + bytes, err := sqlite3005_oauth_scopeSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "sqlite3/005_oauth_scope.sql", size: 74, mode: os.FileMode(420), modTime: time.Unix(1495121623, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _sqlite3006_add_orgs_tableSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x8c\x90\xc1\x4e\xc3\x30\x10\x44\xef\xfe\x8a\x39\x26\x82\x4a\x50\xd1\x53\x4e\x6e\xb2\x80\x45\x6b\x97\xad\x83\xda\x53\x85\xc0\xaa\x2c\x20\xa9\x9c\x42\xf9\x7c\x64\xe4\xa4\x05\x2e\xf8\x64\xcd\xe8\xed\xee\xcc\x68\x84\xb3\x37\xbf\x0d\x8f\x7b\x87\x7a\x27\x44\xc9\x24\x2d\xc1\xca\xe9\x8c\xa0\xae\xa1\x8d\x05\xad\xd4\xd2\x2e\xd1\x86\x6d\x87\x4c\x20\x7e\x36\xfe\x19\xdf\x4f\x69\x4b\x37\xc4\x58\xb0\x9a\x4b\x5e\xe3\x8e\xd6\x90\xb5\x35\x4a\x97\x4c\x73\xd2\xf6\x3c\x01\xef\x9d\x0b\x91\x4a\x40\x2f\xb7\x87\xc6\x05\x00\x0f\x92\xcb\x5b\xc9\xd9\x78\x32\xc9\x7b\xef\xd5\x37\x2f\x38\xf1\x2e\x2f\xc6\x57\x83\xb9\x0b\xfe\x23\x1e\x3d\x35\x66\x46\x52\xf7\x72\xe7\x9e\x82\xdb\xff\x9d\x57\x6b\x75\x5f\x13\xb2\x61\x67\x2e\xf2\x62\x88\xab\x74\x45\xab\x5f\x71\xfd\xe7\xe6\x78\xa0\xd1\x29\xfe\x91\x2f\xfe\x01\xf7\xa1\x7f\xe0\x49\x8c\xeb\x4f\xdb\xaf\xda\x43\x23\x44\xc5\x66\x91\xda\x8f\x44\x21\xbe\x02\x00\x00\xff\xff\x7b\x6c\xa2\x6a\xa1\x01\x00\x00") + +func sqlite3006_add_orgs_tableSqlBytes() ([]byte, error) { + return bindataRead( + _sqlite3006_add_orgs_tableSql, + "sqlite3/006_add_orgs_table.sql", + ) +} + +func sqlite3006_add_orgs_tableSql() (*asset, error) { + bytes, err := sqlite3006_add_orgs_tableSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "sqlite3/006_add_orgs_table.sql", size: 417, mode: os.FileMode(420), modTime: time.Unix(1496351647, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _sqlite3007_add_slack_urlsSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x6c\x8f\x31\x4e\xc5\x40\x0c\x44\x7b\x9f\x62\xca\xff\xc5\xcf\x09\x52\x81\x42\x87\x04\x42\x50\x47\x4b\x30\xc1\xca\xae\x37\x78\xbd\x22\xb9\x3d\x4a\x0a\x94\x22\x9d\xf5\xac\x79\x9a\x69\x1a\xdc\x25\x19\x2d\x38\xe3\x7d\x26\x1a\x8c\xb7\xd3\xc3\x47\x64\xc8\x17\x34\x3b\x78\x91\xe2\x05\x25\x86\x61\xea\xab\xc5\x72\x21\x40\x3e\x21\xea\x3c\xb2\x61\x36\x49\xc1\x56\x4c\xbc\x22\x54\xcf\xa2\x83\x71\x62\xf5\x1b\x01\xdf\xb9\x78\xaf\x21\x31\x9c\x17\xdf\x7d\x5a\x63\xdc\x5e\xb5\xb0\x9d\x50\x8b\x27\x50\xe5\xa7\xf2\xe5\x5f\x76\xdb\xc3\x57\xba\xb6\x44\xc7\x09\x5d\xfe\x55\xa2\xee\xf5\xf9\x05\x6f\xf7\x0f\x4f\x8f\x87\xd2\x2d\xfd\x05\x00\x00\xff\xff\x7c\x1e\xe0\x1f\xec\x00\x00\x00") + +func sqlite3007_add_slack_urlsSqlBytes() ([]byte, error) { + return bindataRead( + _sqlite3007_add_slack_urlsSql, + "sqlite3/007_add_slack_urls.sql", + ) +} + +func sqlite3007_add_slack_urlsSql() (*asset, error) { + bytes, err := sqlite3007_add_slack_urlsSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "sqlite3/007_add_slack_urls.sql", size: 236, mode: os.FileMode(420), modTime: time.Unix(1497971578, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _mysql001_initSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x8c\x92\x41\x6f\x82\x30\x18\x86\xef\xfd\x15\xdf\x11\xb2\x99\x6c\x66\x9e\x38\x55\xf9\xb6\x35\xd3\xe2\x6a\x59\xf4\x64\x9a\xad\x31\x8d\x08\xa6\xa0\xfe\xfd\x85\x5a\x81\x6d\xb2\xc8\xa9\xe9\xc3\x5b\x78\x9f\x7e\x83\x01\xdc\xed\xcc\xc6\xaa\x4a\x43\xba\x27\x64\x22\x90\x4a\x04\x49\xc7\x53\x04\xf6\x0c\x3c\x91\x80\x4b\xb6\x90\x0b\x38\x94\xda\x96\x10\x10\xb7\x58\x9b\x2f\x70\x0f\xe3\x12\x5f\x50\xc0\x5c\xb0\x19\x15\x2b\x78\xc3\x15\xd0\x54\x26\x6b\xc6\x27\x02\x67\xc8\x25\xb9\x77\x81\xac\xd8\x98\x1c\x00\x3e\xa8\x98\xbc\x52\x11\x0c\x47\xa3\xd0\xa3\xaa\xd8\xea\x1e\xa4\x77\xca\x64\xd7\x91\x3a\xaa\x4a\xd9\x16\x3d\x3e\x0c\x9f\x2e\xac\xd4\x9f\x56\x57\xbf\x63\x29\x67\xef\x29\x06\xed\xef\x84\x24\x8c\xfe\xed\x6c\xf5\xbe\x70\x9d\xeb\x45\xd3\xf9\xa6\xd2\x2e\xd1\xa8\xf2\x09\xbf\x5d\x9c\x72\x6d\xe1\x4f\x2d\xc7\x72\xb5\xd3\xd0\xc3\xca\xec\xb0\xe9\x63\x99\xc9\xb7\x3f\x98\xf7\xe1\xe0\xde\x9a\x63\x7d\xc5\x30\x4e\x92\x29\x52\x7e\x39\xcf\x6b\xba\xee\xa9\xf9\xe4\x59\x13\x9d\x4a\x14\xde\xd2\xd9\x0b\x8d\x63\x60\x3c\xc6\x25\x04\x6d\xad\x30\xba\xe1\x4d\xef\xa5\x3e\xb6\x3b\x81\x71\x71\xca\x09\x89\x45\x32\xef\xa6\xa3\xee\x8e\x9b\xc2\x88\x7c\x07\x00\x00\xff\xff\x77\x0d\xa2\x03\xb8\x02\x00\x00") + +func mysql001_initSqlBytes() ([]byte, error) { + return bindataRead( + _mysql001_initSql, + "mysql/001_init.sql", + ) +} + +func mysql001_initSql() (*asset, error) { + bytes, err := mysql001_initSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "mysql/001_init.sql", size: 696, mode: os.FileMode(420), modTime: time.Unix(1481147727, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _mysql002_orgSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xd2\xd5\x55\xd0\xce\xcd\x4c\x2f\x4a\x2c\x49\x55\x08\x2d\xe0\xe2\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\x28\x4a\x2d\xc8\x2f\x56\x70\x74\x71\x51\x70\xf6\xf7\x09\xf5\xf5\x03\x0b\xc4\xe7\x17\xa5\x2b\x38\xf9\xfb\xfb\xb8\x3a\xfa\x29\xb8\xb8\xba\x39\x86\xfa\x84\x28\xb8\x39\xfa\x04\xbb\x5a\x73\x71\x21\x9b\xe6\x92\x5f\x9e\x87\xcd\x3c\x97\x20\xff\x00\x74\x03\xad\x01\x01\x00\x00\xff\xff\x25\x2b\x07\x70\x87\x00\x00\x00") + +func mysql002_orgSqlBytes() ([]byte, error) { + return bindataRead( + _mysql002_orgSql, + "mysql/002_org.sql", + ) +} + +func mysql002_orgSql() (*asset, error) { + bytes, err := mysql002_orgSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "mysql/002_org.sql", size: 135, mode: os.FileMode(420), modTime: time.Unix(1481147727, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _mysql003_drop_emailsSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xd2\xd5\x55\xd0\xce\xcd\x4c\x2f\x4a\x2c\x49\x55\x08\x2d\xe0\xe2\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\x28\x2d\x4e\x2d\x2a\x56\x70\x09\xf2\x0f\x50\x70\xf6\xf7\x09\xf5\xf5\x03\x8b\xc4\xa7\xe6\x26\x66\xe6\x58\x73\x71\x21\x6b\x75\xc9\x2f\xcf\xc3\xa6\xd9\xd1\xc5\x05\x53\xaf\x42\x98\x63\x90\xb3\x87\x63\x90\x86\x91\xa9\xa9\xa6\x82\x8b\xab\x9b\x63\xa8\x4f\x88\x82\xba\xba\x35\x20\x00\x00\xff\xff\xe3\x75\x50\xf8\x8d\x00\x00\x00") + +func mysql003_drop_emailsSqlBytes() ([]byte, error) { + return bindataRead( + _mysql003_drop_emailsSql, + "mysql/003_drop_emails.sql", + ) +} + +func mysql003_drop_emailsSql() (*asset, error) { + bytes, err := mysql003_drop_emailsSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "mysql/003_drop_emails.sql", size: 141, mode: os.FileMode(420), modTime: time.Unix(1483464365, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _mysql004_limit_usersSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xd2\xd5\x55\xd0\xce\xcd\x4c\x2f\x4a\x2c\x49\x55\x08\x2d\xe0\xe2\x72\x0e\x72\x75\x0c\x71\x55\x08\x71\x74\xf2\x71\x55\xf0\x74\x53\xf0\xf3\x0f\x51\x70\x8d\xf0\x0c\x0e\x09\x56\xc8\xc9\xcc\xcd\x2c\x89\x2f\x2d\x4e\x2d\x2a\x56\xd0\xe0\x52\x50\xc8\xc9\x4f\xcf\xcc\x53\x00\x83\x30\xc7\x20\x67\x0f\xc7\x20\x0d\x23\x53\x53\x4d\xb0\x16\xbf\x50\x1f\x1f\x2e\x9d\x50\x3f\xcf\xc0\x50\x57\x0d\xb0\x42\x4d\x2e\x4d\x6b\x22\x8c\xcf\x2f\x4a\x87\x98\x9e\x5f\x94\x8e\xc3\x58\x05\x05\x1d\x05\xa8\xc9\xf9\x45\xe9\x10\x73\x91\xbd\xe1\x92\x5f\x9e\xc7\xc5\xe5\x12\xe4\x1f\x00\xb5\x07\xc9\xe1\xd6\x58\x24\x40\x56\x5a\x73\x01\x02\x00\x00\xff\xff\x21\x06\x25\x0f\x09\x01\x00\x00") + +func mysql004_limit_usersSqlBytes() ([]byte, error) { + return bindataRead( + _mysql004_limit_usersSql, + "mysql/004_limit_users.sql", + ) +} + +func mysql004_limit_usersSql() (*asset, error) { + bytes, err := mysql004_limit_usersSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "mysql/004_limit_users.sql", size: 265, mode: os.FileMode(420), modTime: time.Unix(1483464365, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _mysql005_oauth_scopeSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xd2\xd5\x55\xd0\xce\xcd\x4c\x2f\x4a\x2c\x49\x55\x08\x2d\xe0\xe2\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\x28\x2d\x4e\x2d\x2a\x56\x70\x74\x71\x51\x70\xf6\xf7\x09\xf5\xf5\x03\x0b\xc4\x17\x27\xe7\x17\xa4\x16\x2b\x84\x39\x06\x39\x7b\x38\x06\x69\x18\x99\x9a\x6a\x2a\xb8\xb8\xba\x39\x86\xfa\x84\x28\xa8\xab\x5b\x73\x71\x21\x1b\xe9\x92\x5f\x9e\x87\xcd\x50\x97\x20\xff\x00\x2c\xa6\x5a\x03\x02\x00\x00\xff\xff\x8c\x08\x6c\xbb\x8f\x00\x00\x00") + +func mysql005_oauth_scopeSqlBytes() ([]byte, error) { + return bindataRead( + _mysql005_oauth_scopeSql, + "mysql/005_oauth_scope.sql", + ) +} + +func mysql005_oauth_scopeSql() (*asset, error) { + bytes, err := mysql005_oauth_scopeSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "mysql/005_oauth_scope.sql", size: 143, mode: os.FileMode(420), modTime: time.Unix(1495121623, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _mysql006_add_orgs_tableSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x84\x90\xc1\x4e\xc3\x30\x10\x44\xef\xfb\x15\x73\x4c\x04\x95\xa0\xa2\xa7\x9c\xdc\x64\x01\x8b\xd4\x2e\x5b\x07\xb5\xa7\x0a\x81\x55\x45\x40\x52\x39\x85\xfe\x3e\x72\x49\xaa\x1c\x90\xea\x93\xb5\x33\x6f\xec\xd9\xc9\x04\x57\x5f\xf5\x2e\xbc\x1e\x3c\xaa\x3d\x51\x2e\xac\x1c\xc3\xa9\x79\xc9\xd0\xf7\x30\xd6\x81\xd7\x7a\xe5\x56\x68\xc3\xae\x43\x42\x40\xbc\x6d\xeb\x77\xfc\x1d\x6d\x1c\x3f\xb0\x60\x29\x7a\xa1\x64\x83\x27\xde\x40\x55\xce\x6e\xb5\xc9\x85\x17\x6c\x1c\x01\xd7\x11\xf9\xee\x7c\x38\x71\x3d\x32\xcc\xdb\x63\xe3\x43\x8c\x7a\x51\x92\x3f\x2a\x49\xa6\xb3\x59\x3a\x88\x9f\x75\xf3\x81\xb1\x78\x7b\x33\xbd\x3b\xab\xfb\x50\xff\xc4\xaf\x63\x6e\x6d\xc9\xca\x0c\xf3\xce\xbf\x05\x7f\xf8\x27\xb2\x32\xfa\xb9\xe2\xe4\xfc\x6c\x4a\x69\x46\xa4\x4a\xc7\xd2\x97\x3e\xd5\x54\x45\x01\x6d\x0a\x5e\x63\x64\xcd\x2e\xfa\xfa\x86\x31\x72\xbc\xd8\xa2\x3d\x36\x44\x85\xd8\xe5\x88\xcd\xe8\x37\x00\x00\xff\xff\x43\x18\x08\xd4\x7c\x01\x00\x00") + +func mysql006_add_orgs_tableSqlBytes() ([]byte, error) { + return bindataRead( + _mysql006_add_orgs_tableSql, + "mysql/006_add_orgs_table.sql", + ) +} + +func mysql006_add_orgs_tableSql() (*asset, error) { + bytes, err := mysql006_add_orgs_tableSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "mysql/006_add_orgs_table.sql", size: 380, mode: os.FileMode(420), modTime: time.Unix(1496351647, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _mysql007_add_slack_urlsSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x7c\x8f\x41\x4b\xc3\x40\x10\x46\xef\xf3\x2b\xbe\x63\x82\x2d\x68\xb1\xa7\x9e\xd6\x36\xa0\xa0\xad\x2c\xa9\xd7\xb0\xd6\x31\x2e\xd9\x6c\xe2\xec\x2e\x9a\x7f\x2f\xc9\x21\x04\x0f\xbd\x0d\xbc\xe1\x7d\xbc\xf5\x1a\x37\xad\xad\xc5\x44\xc6\xb9\x27\xba\x08\x8f\x67\x34\xef\x8e\x61\x3f\xe1\xbb\x08\xfe\xb5\x21\x06\x04\x67\x2e\x4d\x95\xc4\x85\x8c\x00\xfb\x01\xeb\x23\xd7\x2c\xe8\xc5\xb6\x46\x06\x34\x3c\x40\x9d\xcb\x53\xf5\x74\xdc\xeb\xe2\xa5\x38\x96\x2b\x02\xbe\xba\x10\x2b\x6f\x5a\xc6\x9b\xd2\xfb\x47\xa5\xb3\xcd\x76\x9b\x4f\x62\x9f\x9c\x1b\x5f\x52\x60\xb9\x42\xc5\xcd\xf0\xee\x76\x73\xff\x8f\x7a\xfb\x9d\x38\x9b\x57\x56\x93\x2d\xa7\x7c\x47\xb4\x8c\x3b\x74\x3f\x9e\xe8\xa0\x4f\xaf\x28\xd5\xc3\x73\xb1\xc8\xd9\xd1\x5f\x00\x00\x00\xff\xff\x5f\x31\x16\x1b\x06\x01\x00\x00") + +func mysql007_add_slack_urlsSqlBytes() ([]byte, error) { + return bindataRead( + _mysql007_add_slack_urlsSql, + "mysql/007_add_slack_urls.sql", + ) +} + +func mysql007_add_slack_urlsSql() (*asset, error) { + bytes, err := mysql007_add_slack_urlsSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "mysql/007_add_slack_urls.sql", size: 262, mode: os.FileMode(420), modTime: time.Unix(1497972070, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _postgres001_initSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x8c\x92\xdf\x6a\x83\x30\x14\x87\xef\xf3\x14\xe7\xb2\xb2\xf6\x09\xbc\xd2\x79\x56\xc2\x5c\xec\x62\x04\x7b\x55\xc2\x16\x24\xd4\x7f\x44\xdb\xee\xf1\x87\x21\xda\x1a\xd8\xa8\x57\xe1\x23\xe7\xf8\xfd\x4e\xce\x6e\x07\x2f\x8d\xae\x8c\x1c\x15\x14\x3d\x21\xaf\x1c\x23\x81\x20\xa2\x38\x45\xa0\x6f\xc0\x32\x01\x58\xd2\x5c\xe4\x70\x19\x94\x19\x60\x43\xec\xe1\xa4\xbf\xc1\x7e\x31\xdd\xe7\xc8\x69\x94\xc2\x81\xd3\x8f\x88\x1f\xe1\x1d\x8f\x64\x6b\xef\xd4\x5d\xa5\x5b\x00\x10\x58\x0a\x87\xc6\xee\xac\x3c\xa4\x1a\xa9\xeb\x35\x92\x57\x39\x4a\xb3\x42\x83\xfa\x32\x6a\x74\x88\x6c\x0b\x46\x3f\x0b\xdc\xdc\x7f\x13\x90\x20\xfc\x57\xdf\xa8\xbe\xb3\xfa\xd3\x61\xd1\xff\xcb\xdf\x5e\x5a\x82\x52\x26\x70\x8f\xdc\xe1\xee\xd6\x2a\x03\x8b\xb1\x65\xad\x6c\x14\x78\x6c\xa8\x2f\x95\xcf\x6a\xdd\x9e\x7d\xd6\x1b\x7d\x9d\xe6\x0f\x71\x96\xa5\x18\xb1\xb9\xdc\x25\xf6\x22\x2f\xad\x57\x89\x29\x4b\xb0\xf4\x12\xeb\x9f\xd3\xca\x37\x63\xf3\x10\xee\x38\x08\x9f\xe9\x30\x0f\xc2\xeb\xe0\xf0\xa4\xf1\xb8\x47\x49\x77\x6b\x09\x49\x78\x76\x70\x0f\x61\x6b\xc2\x47\x62\x77\x29\x24\xbf\x01\x00\x00\xff\xff\x1e\xfd\x38\xa0\x7e\x02\x00\x00") + +func postgres001_initSqlBytes() ([]byte, error) { + return bindataRead( + _postgres001_initSql, + "postgres/001_init.sql", + ) +} + +func postgres001_initSql() (*asset, error) { + bytes, err := postgres001_initSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "postgres/001_init.sql", size: 638, mode: os.FileMode(420), modTime: time.Unix(1481147727, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _postgres002_orgSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xd2\xd5\x55\xd0\xce\xcd\x4c\x2f\x4a\x2c\x49\x55\x08\x2d\xe0\xe2\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\x28\x4a\x2d\xc8\x2f\x56\x70\x74\x71\x51\x70\xf6\xf7\x09\xf5\xf5\x03\x0b\xc4\xe7\x17\xa5\x2b\x38\xf9\xfb\xfb\xb8\x3a\xfa\x29\xb8\xb8\xba\x39\x86\xfa\x84\x28\xb8\x39\xfa\x04\xbb\x5a\x73\x71\x21\x9b\xe6\x92\x5f\x9e\x87\xcd\x3c\x97\x20\xff\x00\x74\x03\xad\x01\x01\x00\x00\xff\xff\x25\x2b\x07\x70\x87\x00\x00\x00") + +func postgres002_orgSqlBytes() ([]byte, error) { + return bindataRead( + _postgres002_orgSql, + "postgres/002_org.sql", + ) +} + +func postgres002_orgSql() (*asset, error) { + bytes, err := postgres002_orgSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "postgres/002_org.sql", size: 135, mode: os.FileMode(420), modTime: time.Unix(1483464365, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _postgres003_drop_emailsSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xd2\xd5\x55\xd0\xce\xcd\x4c\x2f\x4a\x2c\x49\x55\x08\x2d\xe0\xe2\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\x28\x2d\x4e\x2d\x2a\x56\x70\x09\xf2\x0f\x50\x70\xf6\xf7\x09\xf5\xf5\x03\x8b\xc4\xa7\xe6\x26\x66\xe6\x58\x73\x71\x21\x6b\x75\xc9\x2f\xcf\xc3\xa6\xd9\xd1\xc5\x05\x53\xaf\x42\x98\x63\x90\xb3\x87\x63\x90\x86\x91\xa9\xa9\xa6\x82\x8b\xab\x9b\x63\xa8\x4f\x88\x82\xba\xba\x35\x20\x00\x00\xff\xff\xe3\x75\x50\xf8\x8d\x00\x00\x00") + +func postgres003_drop_emailsSqlBytes() ([]byte, error) { + return bindataRead( + _postgres003_drop_emailsSql, + "postgres/003_drop_emails.sql", + ) +} + +func postgres003_drop_emailsSql() (*asset, error) { + bytes, err := postgres003_drop_emailsSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "postgres/003_drop_emails.sql", size: 141, mode: os.FileMode(420), modTime: time.Unix(1483464365, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _postgres004_limit_usersSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x8c\xce\xc1\x0a\x82\x40\x10\xc6\xf1\xfb\x3c\xc5\x77\x54\xca\x27\xf0\x64\x39\xc1\x42\x68\xe9\x2c\x78\x8b\x0e\xb2\x2c\xa8\x1b\xa3\xd1\xeb\x47\x2a\xe1\xa1\x43\x73\xfd\xe0\x37\xff\x24\xc1\xae\xf7\x4e\xef\x53\x0b\xfb\x20\x3a\x56\x9c\x09\x43\xb2\xc3\x99\x61\x4e\x28\x4a\x01\x37\xa6\x96\x1a\x9d\xef\xfd\x74\x7b\x8e\xad\x8e\x88\x08\xe8\x82\xf3\x03\xe6\x13\x6e\x84\x80\xbd\x2d\xcc\xd5\x72\x34\x2f\x31\xc5\xe9\x1f\x5e\x50\xb7\x70\x41\xdd\xd7\xc1\x0a\x05\x75\x0b\xb3\xcd\xcc\xc3\x6b\x20\xca\xab\xf2\xb2\xb2\x9b\xb0\xf4\xc7\xf0\xf9\x90\xd2\x3b\x00\x00\xff\xff\x2f\x48\xf5\xe6\xe9\x00\x00\x00") + +func postgres004_limit_usersSqlBytes() ([]byte, error) { + return bindataRead( + _postgres004_limit_usersSql, + "postgres/004_limit_users.sql", + ) +} + +func postgres004_limit_usersSql() (*asset, error) { + bytes, err := postgres004_limit_usersSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "postgres/004_limit_users.sql", size: 233, mode: os.FileMode(420), modTime: time.Unix(1483464365, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _postgres005_oauth_scopeSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xd2\xd5\x55\xd0\xce\xcd\x4c\x2f\x4a\x2c\x49\x55\x08\x2d\xe0\xe2\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\x28\x2d\x4e\x2d\x2a\x56\x70\x74\x71\x51\x70\xf6\xf7\x09\xf5\xf5\x03\x0b\xc4\x17\x27\xe7\x17\xa4\x16\x2b\x84\x39\x06\x39\x7b\x38\x06\x69\x18\x99\x9a\x6a\x2a\xb8\xb8\xba\x39\x86\xfa\x84\x28\xa8\xab\x5b\x73\x71\x21\x1b\xe9\x92\x5f\x9e\x87\xcd\x50\x97\x20\xff\x00\x2c\xa6\x5a\x03\x02\x00\x00\xff\xff\x8c\x08\x6c\xbb\x8f\x00\x00\x00") + +func postgres005_oauth_scopeSqlBytes() ([]byte, error) { + return bindataRead( + _postgres005_oauth_scopeSql, + "postgres/005_oauth_scope.sql", + ) +} + +func postgres005_oauth_scopeSql() (*asset, error) { + bytes, err := postgres005_oauth_scopeSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "postgres/005_oauth_scope.sql", size: 143, mode: os.FileMode(420), modTime: time.Unix(1495121623, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _postgres006_add_orgs_tableSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x8c\x50\xb1\x4e\xc3\x30\x14\xdc\xfd\x15\x37\x26\x82\x4a\x50\xd1\x29\x93\xd3\x3c\x8a\x45\xb0\x8b\xe3\xa0\x76\xaa\x10\x58\x95\x05\x24\x95\x53\x08\x9f\x8f\x0c\x89\x09\x88\x01\x4f\xd6\xdd\xbb\xf7\xee\x6e\x36\xc3\xc9\x8b\xdb\xfb\xfb\xa3\x45\x7d\x60\x6c\xa9\x89\x1b\x82\xe1\x79\x49\x10\x97\x90\xca\x80\x36\xa2\x32\x15\x5a\xbf\xef\x90\x30\x20\xfc\x76\xee\x11\x5f\x2f\x17\xab\x8a\xb4\xe0\x25\xd6\x5a\xdc\x70\xbd\xc5\x35\x6d\x19\x70\x1a\xa6\x5e\x3b\xeb\x3f\x47\x85\x34\xb4\x22\x3d\xe2\x6d\xdf\x58\x1f\xd4\x77\x5c\x2f\xaf\xb8\x4e\xe6\x8b\x45\x3a\x92\xcf\xae\x79\xc2\x94\x3c\x3f\x9b\x5f\x44\xf6\xe0\xdd\x5b\x70\x8b\x5c\xa9\x92\xb8\x1c\xf1\xce\x3e\x78\x7b\xfc\x63\x65\x2d\xc5\x6d\x4d\x49\x3c\x9b\xb2\x34\x8b\x41\x85\x2c\x68\xf3\x2b\xa8\x7b\xdf\x4d\x3d\x2a\x39\x44\xff\xde\x90\xfd\x43\x1e\xa3\xff\xd0\x0f\x68\x70\x30\xad\xbe\x68\xfb\x86\xb1\x42\xab\xf5\x50\x7d\x50\x64\xec\x23\x00\x00\xff\xff\xd2\x89\x99\x08\x9e\x01\x00\x00") + +func postgres006_add_orgs_tableSqlBytes() ([]byte, error) { + return bindataRead( + _postgres006_add_orgs_tableSql, + "postgres/006_add_orgs_table.sql", + ) +} + +func postgres006_add_orgs_tableSql() (*asset, error) { + bytes, err := postgres006_add_orgs_tableSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "postgres/006_add_orgs_table.sql", size: 414, mode: os.FileMode(420), modTime: time.Unix(1496351647, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _postgres007_add_slack_urlsSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x6c\x8e\xbd\x8a\x83\x40\x14\x46\xfb\xfb\x14\x5f\xa9\xac\x3e\x81\x95\xa2\x2c\xb2\x2e\x2b\xb3\x49\x61\x25\x13\x73\x93\x0c\x19\xc7\x64\x7e\x88\x8f\x1f\xb4\x08\x16\x76\x97\x73\xf9\x0e\x27\x4d\xf1\x35\xaa\xab\x95\x9e\x71\x7c\x10\x0d\x96\x97\xd3\xcb\x93\x66\xa8\x0b\xcc\xe4\xc1\xb3\x72\xde\xc1\x69\x39\xdc\xfb\x60\xb5\x8b\x08\x50\x67\x14\xf5\xf7\x7f\x25\xea\xbc\x41\x2b\xea\xdf\x5c\x74\xf8\xa9\xba\x84\x80\xdb\xe4\x7c\x6f\xe4\xc8\xf0\x3c\xfb\xd5\x61\x82\xd6\xcb\x2b\x38\xb6\x3b\xd4\xea\x1d\x68\xd4\x33\x70\xf4\x91\x25\xeb\x38\xa6\x38\x23\xda\x66\x97\xd3\xcb\x10\x95\xe2\xaf\xc5\x21\x2f\x9a\x6a\x13\x9a\xd1\x3b\x00\x00\xff\xff\x35\x28\x1e\xb2\xe0\x00\x00\x00") + +func postgres007_add_slack_urlsSqlBytes() ([]byte, error) { + return bindataRead( + _postgres007_add_slack_urlsSql, + "postgres/007_add_slack_urls.sql", + ) +} + +func postgres007_add_slack_urlsSql() (*asset, error) { + bytes, err := postgres007_add_slack_urlsSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "postgres/007_add_slack_urls.sql", size: 224, mode: os.FileMode(420), modTime: time.Unix(1497971713, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +// Asset loads and returns the asset for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func Asset(name string) ([]byte, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) + } + return a.bytes, nil + } + return nil, fmt.Errorf("Asset %s not found", name) +} + +// MustAsset is like Asset but panics when Asset would return an error. +// It simplifies safe initialization of global variables. +func MustAsset(name string) []byte { + a, err := Asset(name) + if err != nil { + panic("asset: Asset(" + name + "): " + err.Error()) + } + + return a +} + +// AssetInfo loads and returns the asset info for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func AssetInfo(name string) (os.FileInfo, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) + } + return a.info, nil + } + return nil, fmt.Errorf("AssetInfo %s not found", name) +} + +// AssetNames returns the names of the assets. +func AssetNames() []string { + names := make([]string, 0, len(_bindata)) + for name := range _bindata { + names = append(names, name) + } + return names +} + +// _bindata is a table, holding each asset generator, mapped to its name. +var _bindata = map[string]func() (*asset, error){ + "sqlite3/001_init.sql": sqlite3001_initSql, + "sqlite3/002_org.sql": sqlite3002_orgSql, + "sqlite3/003_drop_emails.sql": sqlite3003_drop_emailsSql, + "sqlite3/004_limit_users.sql": sqlite3004_limit_usersSql, + "sqlite3/005_oauth_scope.sql": sqlite3005_oauth_scopeSql, + "sqlite3/006_add_orgs_table.sql": sqlite3006_add_orgs_tableSql, + "sqlite3/007_add_slack_urls.sql": sqlite3007_add_slack_urlsSql, + "mysql/001_init.sql": mysql001_initSql, + "mysql/002_org.sql": mysql002_orgSql, + "mysql/003_drop_emails.sql": mysql003_drop_emailsSql, + "mysql/004_limit_users.sql": mysql004_limit_usersSql, + "mysql/005_oauth_scope.sql": mysql005_oauth_scopeSql, + "mysql/006_add_orgs_table.sql": mysql006_add_orgs_tableSql, + "mysql/007_add_slack_urls.sql": mysql007_add_slack_urlsSql, + "postgres/001_init.sql": postgres001_initSql, + "postgres/002_org.sql": postgres002_orgSql, + "postgres/003_drop_emails.sql": postgres003_drop_emailsSql, + "postgres/004_limit_users.sql": postgres004_limit_usersSql, + "postgres/005_oauth_scope.sql": postgres005_oauth_scopeSql, + "postgres/006_add_orgs_table.sql": postgres006_add_orgs_tableSql, + "postgres/007_add_slack_urls.sql": postgres007_add_slack_urlsSql, +} + +// AssetDir returns the file names below a certain +// directory embedded in the file by go-bindata. +// For example if you run go-bindata on data/... and data contains the +// following hierarchy: +// data/ +// foo.txt +// img/ +// a.png +// b.png +// then AssetDir("data") would return []string{"foo.txt", "img"} +// AssetDir("data/img") would return []string{"a.png", "b.png"} +// AssetDir("foo.txt") and AssetDir("notexist") would return an error +// AssetDir("") will return []string{"data"}. +func AssetDir(name string) ([]string, error) { + node := _bintree + if len(name) != 0 { + cannonicalName := strings.Replace(name, "\\", "/", -1) + pathList := strings.Split(cannonicalName, "/") + for _, p := range pathList { + node = node.Children[p] + if node == nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + } + } + if node.Func != nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + rv := make([]string, 0, len(node.Children)) + for childName := range node.Children { + rv = append(rv, childName) + } + return rv, nil +} + +type bintree struct { + Func func() (*asset, error) + Children map[string]*bintree +} +var _bintree = &bintree{nil, map[string]*bintree{ + "mysql": &bintree{nil, map[string]*bintree{ + "001_init.sql": &bintree{mysql001_initSql, map[string]*bintree{}}, + "002_org.sql": &bintree{mysql002_orgSql, map[string]*bintree{}}, + "003_drop_emails.sql": &bintree{mysql003_drop_emailsSql, map[string]*bintree{}}, + "004_limit_users.sql": &bintree{mysql004_limit_usersSql, map[string]*bintree{}}, + "005_oauth_scope.sql": &bintree{mysql005_oauth_scopeSql, map[string]*bintree{}}, + "006_add_orgs_table.sql": &bintree{mysql006_add_orgs_tableSql, map[string]*bintree{}}, + "007_add_slack_urls.sql": &bintree{mysql007_add_slack_urlsSql, map[string]*bintree{}}, + }}, + "postgres": &bintree{nil, map[string]*bintree{ + "001_init.sql": &bintree{postgres001_initSql, map[string]*bintree{}}, + "002_org.sql": &bintree{postgres002_orgSql, map[string]*bintree{}}, + "003_drop_emails.sql": &bintree{postgres003_drop_emailsSql, map[string]*bintree{}}, + "004_limit_users.sql": &bintree{postgres004_limit_usersSql, map[string]*bintree{}}, + "005_oauth_scope.sql": &bintree{postgres005_oauth_scopeSql, map[string]*bintree{}}, + "006_add_orgs_table.sql": &bintree{postgres006_add_orgs_tableSql, map[string]*bintree{}}, + "007_add_slack_urls.sql": &bintree{postgres007_add_slack_urlsSql, map[string]*bintree{}}, + }}, + "sqlite3": &bintree{nil, map[string]*bintree{ + "001_init.sql": &bintree{sqlite3001_initSql, map[string]*bintree{}}, + "002_org.sql": &bintree{sqlite3002_orgSql, map[string]*bintree{}}, + "003_drop_emails.sql": &bintree{sqlite3003_drop_emailsSql, map[string]*bintree{}}, + "004_limit_users.sql": &bintree{sqlite3004_limit_usersSql, map[string]*bintree{}}, + "005_oauth_scope.sql": &bintree{sqlite3005_oauth_scopeSql, map[string]*bintree{}}, + "006_add_orgs_table.sql": &bintree{sqlite3006_add_orgs_tableSql, map[string]*bintree{}}, + "007_add_slack_urls.sql": &bintree{sqlite3007_add_slack_urlsSql, map[string]*bintree{}}, + }}, +}} + +// RestoreAsset restores an asset under the given directory +func RestoreAsset(dir, name string) error { + data, err := Asset(name) + if err != nil { + return err + } + info, err := AssetInfo(name) + if err != nil { + return err + } + err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) + if err != nil { + return err + } + err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) + if err != nil { + return err + } + err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) + if err != nil { + return err + } + return nil +} + +// RestoreAssets restores an asset under the given directory recursively +func RestoreAssets(dir, name string) error { + children, err := AssetDir(name) + // File + if err != nil { + return RestoreAsset(dir, name) + } + // Dir + for _, child := range children { + err = RestoreAssets(dir, filepath.Join(name, child)) + if err != nil { + return err + } + } + return nil +} + +func _filePath(dir, name string) string { + cannonicalName := strings.Replace(name, "\\", "/", -1) + return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...) +} + diff --git a/store/migration/mysql/1_init.sql b/store/migration/mysql/001_init.sql similarity index 84% rename from store/migration/mysql/1_init.sql rename to store/migration/mysql/001_init.sql index 010c8bb..795a407 100644 --- a/store/migration/mysql/1_init.sql +++ b/store/migration/mysql/001_init.sql @@ -7,7 +7,6 @@ CREATE TABLE IF NOT EXISTS users ( ,user_email VARCHAR(255) ,user_avatar VARCHAR(1024) ,user_secret VARCHAR(255) - ,UNIQUE(user_login) ); @@ -20,12 +19,11 @@ CREATE TABLE IF NOT EXISTS repos ( ,repo_link VARCHAR(1024) ,repo_private BOOLEAN ,repo_secret VARCHAR(255) - ,UNIQUE(repo_slug) ); -CREATE INDEX ix_repo_owner ON repos (repo_owner); -CREATE INDEX ix_repo_user_id ON repos (repo_user_id); +ALTER TABLE repos ADD INDEX (repo_owner); +ALTER TABLE repos ADD INDEX (repo_user_id); -- +migrate Down diff --git a/store/migration/mysql/002_org.sql b/store/migration/mysql/002_org.sql new file mode 100644 index 0000000..adc998c --- /dev/null +++ b/store/migration/mysql/002_org.sql @@ -0,0 +1,7 @@ +-- +migrate Up + +ALTER TABLE repos ADD COLUMN repo_org BOOLEAN DEFAULT FALSE; + +-- +migrate Down + +ALTER TABLE repos DROP COLUMN repo_org; \ No newline at end of file diff --git a/store/migration/mysql/003_drop_emails.sql b/store/migration/mysql/003_drop_emails.sql new file mode 100644 index 0000000..34c17e6 --- /dev/null +++ b/store/migration/mysql/003_drop_emails.sql @@ -0,0 +1,7 @@ +-- +migrate Up + +ALTER TABLE users DROP COLUMN user_email; + +-- +migrate Down + +ALTER TABLE users ADD COLUMN user_email VARCHAR(255) DEFAULT ''; \ No newline at end of file diff --git a/store/migration/mysql/004_limit_users.sql b/store/migration/mysql/004_limit_users.sql new file mode 100644 index 0000000..eb23caf --- /dev/null +++ b/store/migration/mysql/004_limit_users.sql @@ -0,0 +1,17 @@ +-- +migrate Up + +CREATE TABLE IF NOT EXISTS limit_users ( + login VARCHAR(255) NOT NULL +,UNIQUE(login) +); + +CREATE TABLE IF NOT EXISTS limit_orgs ( + org VARCHAR(255) NOT NULL + , UNIQUE(org) +); + +-- +migrate Down + +DROP TABLE limit_users; + +DROP TABLE limit_orgs; diff --git a/store/migration/mysql/005_oauth_scope.sql b/store/migration/mysql/005_oauth_scope.sql new file mode 100644 index 0000000..2cce342 --- /dev/null +++ b/store/migration/mysql/005_oauth_scope.sql @@ -0,0 +1,7 @@ +-- +migrate Up + +ALTER TABLE users ADD COLUMN user_scopes VARCHAR(255) DEFAULT ''; + +-- +migrate Down + +ALTER TABLE users DROP COLUMN user_scopes; \ No newline at end of file diff --git a/store/migration/mysql/006_add_orgs_table.sql b/store/migration/mysql/006_add_orgs_table.sql new file mode 100644 index 0000000..ea0b5d3 --- /dev/null +++ b/store/migration/mysql/006_add_orgs_table.sql @@ -0,0 +1,18 @@ +-- +migrate Up + +CREATE TABLE IF NOT EXISTS orgs ( + org_id INTEGER PRIMARY KEY AUTO_INCREMENT + ,org_user_id INTEGER + ,org_owner VARCHAR(255) + ,org_link VARCHAR(1024) + ,org_private BOOLEAN + ,org_secret VARCHAR(255) + ,UNIQUE(org_owner) +); + +ALTER TABLE orgs ADD INDEX (org_owner); +ALTER TABLE orgs ADD INDEX (org_user_id); + +-- +migrate Down + +DROP TABLE orgs; diff --git a/store/migration/mysql/007_add_slack_urls.sql b/store/migration/mysql/007_add_slack_urls.sql new file mode 100644 index 0000000..c9e168c --- /dev/null +++ b/store/migration/mysql/007_add_slack_urls.sql @@ -0,0 +1,13 @@ +-- +migrate Up + +create table if not exists slack_urls( + id integer primary key AUTO_INCREMENT, + host_name VARCHAR(255) not null, + user VARCHAR(255) not null, + url VARCHAR(1024) not null, + unique(host_name, user) +); + +-- +migrate Down + +DROP TABLE slack_urls; diff --git a/store/migration/postgres/001_init.sql b/store/migration/postgres/001_init.sql new file mode 100644 index 0000000..55f6b71 --- /dev/null +++ b/store/migration/postgres/001_init.sql @@ -0,0 +1,33 @@ +-- +migrate Up + +CREATE TABLE IF NOT EXISTS users ( + user_id BIGSERIAL PRIMARY KEY +,user_login TEXT +,user_token TEXT +,user_email TEXT +,user_avatar TEXT +,user_secret TEXT + +,UNIQUE(user_login) +); + +CREATE TABLE IF NOT EXISTS repos ( + repo_id BIGSERIAL PRIMARY KEY +,repo_user_id INTEGER +,repo_owner TEXT +,repo_name TEXT +,repo_slug TEXT +,repo_link TEXT +,repo_private BOOLEAN +,repo_secret TEXT + +,UNIQUE(repo_slug) +); + +CREATE INDEX IF NOT EXISTS ix_repo_owner ON repos (repo_owner); +CREATE INDEX IF NOT EXISTS ix_repo_user_id ON repos (repo_user_id); + +-- +migrate Down + +DROP TABLE repos; +DROP TABLE users; diff --git a/store/migration/postgres/002_org.sql b/store/migration/postgres/002_org.sql new file mode 100644 index 0000000..adc998c --- /dev/null +++ b/store/migration/postgres/002_org.sql @@ -0,0 +1,7 @@ +-- +migrate Up + +ALTER TABLE repos ADD COLUMN repo_org BOOLEAN DEFAULT FALSE; + +-- +migrate Down + +ALTER TABLE repos DROP COLUMN repo_org; \ No newline at end of file diff --git a/store/migration/postgres/003_drop_emails.sql b/store/migration/postgres/003_drop_emails.sql new file mode 100644 index 0000000..34c17e6 --- /dev/null +++ b/store/migration/postgres/003_drop_emails.sql @@ -0,0 +1,7 @@ +-- +migrate Up + +ALTER TABLE users DROP COLUMN user_email; + +-- +migrate Down + +ALTER TABLE users ADD COLUMN user_email VARCHAR(255) DEFAULT ''; \ No newline at end of file diff --git a/store/migration/postgres/004_limit_users.sql b/store/migration/postgres/004_limit_users.sql new file mode 100644 index 0000000..be2cc10 --- /dev/null +++ b/store/migration/postgres/004_limit_users.sql @@ -0,0 +1,17 @@ +-- +migrate Up + +CREATE TABLE IF NOT EXISTS limit_users ( + login TEXT + ,UNIQUE(login) +); + +CREATE TABLE IF NOT EXISTS limit_orgs ( + org TEXT + , UNIQUE(org) +); + +-- +migrate Down + +DROP TABLE limit_users; + +DROP TABLE limit_orgs; diff --git a/store/migration/postgres/005_oauth_scope.sql b/store/migration/postgres/005_oauth_scope.sql new file mode 100644 index 0000000..2cce342 --- /dev/null +++ b/store/migration/postgres/005_oauth_scope.sql @@ -0,0 +1,7 @@ +-- +migrate Up + +ALTER TABLE users ADD COLUMN user_scopes VARCHAR(255) DEFAULT ''; + +-- +migrate Down + +ALTER TABLE users DROP COLUMN user_scopes; \ No newline at end of file diff --git a/store/migration/postgres/006_add_orgs_table.sql b/store/migration/postgres/006_add_orgs_table.sql new file mode 100644 index 0000000..f1e3499 --- /dev/null +++ b/store/migration/postgres/006_add_orgs_table.sql @@ -0,0 +1,18 @@ +-- +migrate Up + +CREATE TABLE IF NOT EXISTS orgs ( + org_id BIGSERIAL PRIMARY KEY + ,org_user_id INTEGER + ,org_owner VARCHAR(255) + ,org_link VARCHAR(1024) + ,org_private BOOLEAN + ,org_secret VARCHAR(255) + ,UNIQUE(org_owner) +); + +CREATE INDEX IF NOT EXISTS ix_org_owner ON orgs (org_owner); +CREATE INDEX IF NOT EXISTS ix_org_user_id ON orgs (org_user_id); + +-- +migrate Down + +DROP TABLE orgs; diff --git a/store/migration/postgres/007_add_slack_urls.sql b/store/migration/postgres/007_add_slack_urls.sql new file mode 100644 index 0000000..b2c83ca --- /dev/null +++ b/store/migration/postgres/007_add_slack_urls.sql @@ -0,0 +1,13 @@ +-- +migrate Up + +create table if not exists slack_urls( + id BIGSERIAL PRIMARY KEY, + host_name text not null, + user text not null, + url text not null, + unique(host_name, user) +); + +-- +migrate Down + +DROP TABLE slack_urls; diff --git a/store/migration/sqlite3/1_init.sql b/store/migration/sqlite3/001_init.sql similarity index 100% rename from store/migration/sqlite3/1_init.sql rename to store/migration/sqlite3/001_init.sql diff --git a/store/migration/sqlite3/002_org.sql b/store/migration/sqlite3/002_org.sql new file mode 100644 index 0000000..4676be5 --- /dev/null +++ b/store/migration/sqlite3/002_org.sql @@ -0,0 +1,3 @@ +-- +migrate Up + +ALTER TABLE repos ADD COLUMN repo_org BOOLEAN DEFAULT FALSE; diff --git a/store/migration/sqlite3/003_drop_emails.sql b/store/migration/sqlite3/003_drop_emails.sql new file mode 100644 index 0000000..7d519a6 --- /dev/null +++ b/store/migration/sqlite3/003_drop_emails.sql @@ -0,0 +1,25 @@ +-- +migrate Up + +ALTER TABLE users RENAME TO temp_users; + +CREATE TABLE IF NOT EXISTS users ( + user_id INTEGER PRIMARY KEY AUTOINCREMENT +,user_login TEXT +,user_token TEXT +,user_avatar TEXT +,user_secret TEXT + +,UNIQUE(user_login) +); + +INSERT INTO users +SELECT + user_id, user_login, user_token, user_avatar, user_secret +FROM + temp_users; + +DROP TABLE temp_users; + +-- +migrate Down + +ALTER TABLE users ADD COLUMN user_email TEXT DEFAULT ''; \ No newline at end of file diff --git a/store/migration/sqlite3/004_limit_users.sql b/store/migration/sqlite3/004_limit_users.sql new file mode 100644 index 0000000..027af61 --- /dev/null +++ b/store/migration/sqlite3/004_limit_users.sql @@ -0,0 +1,17 @@ +-- +migrate Up + +CREATE TABLE IF NOT EXISTS limit_users ( + login TEXT +,UNIQUE(login) +); + +CREATE TABLE IF NOT EXISTS limit_orgs ( + org TEXT + , UNIQUE(org) +); + +-- +migrate Down + +DROP TABLE limit_users; + +DROP TABLE limit_orgs; diff --git a/store/migration/sqlite3/005_oauth_scope.sql b/store/migration/sqlite3/005_oauth_scope.sql new file mode 100644 index 0000000..5d7746d --- /dev/null +++ b/store/migration/sqlite3/005_oauth_scope.sql @@ -0,0 +1,3 @@ +-- +migrate Up + +ALTER TABLE users ADD COLUMN user_scopes TEXT DEFAULT ''; diff --git a/store/migration/sqlite3/006_add_orgs_table.sql b/store/migration/sqlite3/006_add_orgs_table.sql new file mode 100644 index 0000000..e7b485c --- /dev/null +++ b/store/migration/sqlite3/006_add_orgs_table.sql @@ -0,0 +1,18 @@ +-- +migrate Up + +CREATE TABLE IF NOT EXISTS orgs ( + org_id INTEGER PRIMARY KEY AUTOINCREMENT, + org_user_id INTEGER, + org_owner VARCHAR(255), + org_link VARCHAR(1024), + org_private BOOLEAN, + org_secret VARCHAR(255), + UNIQUE (org_owner) +); + +CREATE INDEX IF NOT EXISTS ix_org_owner ON orgs (org_owner); +CREATE INDEX IF NOT EXISTS ix_org_user_id ON orgs (org_user_id); + +-- +migrate Down + +DROP TABLE orgs; diff --git a/store/migration/sqlite3/007_add_slack_urls.sql b/store/migration/sqlite3/007_add_slack_urls.sql new file mode 100644 index 0000000..8b21dfa --- /dev/null +++ b/store/migration/sqlite3/007_add_slack_urls.sql @@ -0,0 +1,13 @@ +-- +migrate Up + +create table if not exists slack_urls( + id integer primary key autoincrement, + host_name text not null, + user text not null, + url text not null, + unique(host_name, user) +); + +-- +migrate Down + +DROP TABLE slack_urls; diff --git a/store/mock/store.go b/store/mock/store.go deleted file mode 100644 index 506f50e..0000000 --- a/store/mock/store.go +++ /dev/null @@ -1,208 +0,0 @@ -package mock - -import "github.com/stretchr/testify/mock" - -import "github.com/lgtmco/lgtm/model" - -type Store struct { - mock.Mock -} - -func (_m *Store) GetUser(_a0 int64) (*model.User, error) { - ret := _m.Called(_a0) - - var r0 *model.User - if rf, ok := ret.Get(0).(func(int64) *model.User); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*model.User) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(int64) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} -func (_m *Store) GetUserLogin(_a0 string) (*model.User, error) { - ret := _m.Called(_a0) - - var r0 *model.User - if rf, ok := ret.Get(0).(func(string) *model.User); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*model.User) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(string) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} -func (_m *Store) CreateUser(_a0 *model.User) error { - ret := _m.Called(_a0) - - var r0 error - if rf, ok := ret.Get(0).(func(*model.User) error); ok { - r0 = rf(_a0) - } else { - r0 = ret.Error(0) - } - - return r0 -} -func (_m *Store) UpdateUser(_a0 *model.User) error { - ret := _m.Called(_a0) - - var r0 error - if rf, ok := ret.Get(0).(func(*model.User) error); ok { - r0 = rf(_a0) - } else { - r0 = ret.Error(0) - } - - return r0 -} -func (_m *Store) DeleteUser(_a0 *model.User) error { - ret := _m.Called(_a0) - - var r0 error - if rf, ok := ret.Get(0).(func(*model.User) error); ok { - r0 = rf(_a0) - } else { - r0 = ret.Error(0) - } - - return r0 -} -func (_m *Store) GetRepo(_a0 int64) (*model.Repo, error) { - ret := _m.Called(_a0) - - var r0 *model.Repo - if rf, ok := ret.Get(0).(func(int64) *model.Repo); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*model.Repo) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(int64) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} -func (_m *Store) GetRepoSlug(_a0 string) (*model.Repo, error) { - ret := _m.Called(_a0) - - var r0 *model.Repo - if rf, ok := ret.Get(0).(func(string) *model.Repo); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*model.Repo) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(string) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} -func (_m *Store) GetRepoMulti(_a0 ...string) ([]*model.Repo, error) { - ret := _m.Called(_a0) - - var r0 []*model.Repo - if rf, ok := ret.Get(0).(func(...string) []*model.Repo); ok { - r0 = rf(_a0...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]*model.Repo) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(...string) error); ok { - r1 = rf(_a0...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} -func (_m *Store) GetRepoOwner(_a0 string) ([]*model.Repo, error) { - ret := _m.Called(_a0) - - var r0 []*model.Repo - if rf, ok := ret.Get(0).(func(string) []*model.Repo); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]*model.Repo) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(string) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} -func (_m *Store) CreateRepo(_a0 *model.Repo) error { - ret := _m.Called(_a0) - - var r0 error - if rf, ok := ret.Get(0).(func(*model.Repo) error); ok { - r0 = rf(_a0) - } else { - r0 = ret.Error(0) - } - - return r0 -} -func (_m *Store) UpdateRepo(_a0 *model.Repo) error { - ret := _m.Called(_a0) - - var r0 error - if rf, ok := ret.Get(0).(func(*model.Repo) error); ok { - r0 = rf(_a0) - } else { - r0 = ret.Error(0) - } - - return r0 -} -func (_m *Store) DeleteRepo(_a0 *model.Repo) error { - ret := _m.Called(_a0) - - var r0 error - if rf, ok := ret.Get(0).(func(*model.Repo) error); ok { - r0 = rf(_a0) - } else { - r0 = ret.Error(0) - } - - return r0 -} diff --git a/store/store.go b/store/store.go index 8a5c284..af6f397 100644 --- a/store/store.go +++ b/store/store.go @@ -1,18 +1,38 @@ +/* + +SPDX-Copyright: Copyright (c) Brad Rydzewski, project contributors, Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Brad Rydzewski, project contributors, Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ package store import ( + "context" "path" - "github.com/lgtmco/lgtm/model" - - "golang.org/x/net/context" + "github.com/capitalone/checks-out/cache" + "github.com/capitalone/checks-out/model" + "github.com/capitalone/checks-out/set" ) -//go:generate mockery -name Store -output mock -case=underscore - // Store defines a data storage abstraction for managing structured data // in the system. type Store interface { + // GetMigrations gets the set of migrations that were applied when service started + GetMigrations() set.Set + // GetUser gets a user by unique ID. GetUser(int64) (*model.User, error) @@ -28,6 +48,9 @@ type Store interface { // DeleteUser deletes a user account. DeleteUser(*model.User) error + // GetAllUsers gets a list of all users. + GetAllUsers() ([]*model.User, error) + // GetRepo gets a repo by unique ID. GetRepo(int64) (*model.Repo, error) @@ -37,8 +60,11 @@ type Store interface { // GetRepoMulti gets a list of multiple repos by their full name. GetRepoMulti(...string) ([]*model.Repo, error) - // GetRepoOwner gets a list by owner. - GetRepoOwner(string) ([]*model.Repo, error) + // GetAllRepos gets a list of all repositories. + GetAllRepos() ([]*model.Repo, error) + + // GetRepoUserId gets a list by user unique ID. + GetRepoUserId(int64) ([]*model.Repo, error) // CreateRepo creates a new repository. CreateRepo(*model.Repo) error @@ -48,6 +74,40 @@ type Store interface { // DeleteRepo deletes a user repository. DeleteRepo(*model.Repo) error + + // CheckValidUser returns true if the user exists in the valid_user table + CheckValidUser(string) (bool, error) + + // GetValidOrgs returns a list of the entries in the valid_orgs table + GetValidOrgs() ([]string, error) + + // GetUserEnabledOrgs the auto-enrolling orgs with the specified names + GetUserEnabledOrgs(names []string) ([]*model.OrgDb, error) + + // Returns the auto-enrolling org with the specified name + GetOrgByName(owner string) (*model.OrgDb, error) + + // CreateOrg creates a new auto-enrolling org. + CreateOrg(org *model.OrgDb) error + + // DeleteOrg deletes an auto-enrolling org. + DeleteOrg(org *model.OrgDb) error + + // GetReposForOrg returns all repos in the specified org + GetReposForOrg(owner string) ([]*model.Repo, error) + + // Returns the slack URL for the specified hostname and user + // if the user string is blank, the default (admin-level) hostname is returned + // if no hostname is found, an empty string is returned + GetSlackUrl(hostname string, user string) (string, error) + + // Stores or updates the slack URL for the specified hostname and user + // if the user string is blank, the default (admin-level) hostname is stored or updated + AddUpdateSlackUrl(hostname string, user string, url string) error + + // Deletes the slack URL for the specified hostname and user + // if the user string is blank, the default (admin-level) hostname is deleted + DeleteSlackUrl(hostname string, user string) error } // GetUser gets a user by unique ID. @@ -75,6 +135,11 @@ func DeleteUser(c context.Context, user *model.User) error { return FromContext(c).DeleteUser(user) } +// GetAllUsers gets a list of all users. +func GetAllUsers(c context.Context) ([]*model.User, error) { + return FromContext(c).GetAllUsers() +} + // GetRepo gets a repo by unique ID. func GetRepo(c context.Context, id int64) (*model.Repo, error) { return FromContext(c).GetRepo(id) @@ -85,19 +150,24 @@ func GetRepoSlug(c context.Context, slug string) (*model.Repo, error) { return FromContext(c).GetRepoSlug(slug) } -// GetRepoOwnerName gets a repo by its owner and name. -func GetRepoOwnerName(c context.Context, owner, name string) (*model.Repo, error) { - return GetRepoSlug(c, path.Join(owner, name)) -} - // GetRepoMulti gets a list of multiple repos by their full name. func GetRepoMulti(c context.Context, slug ...string) ([]*model.Repo, error) { return FromContext(c).GetRepoMulti(slug...) } -// GetRepoOwner gets a repo list by account. -func GetRepoOwner(c context.Context, owner string) ([]*model.Repo, error) { - return FromContext(c).GetRepoOwner(owner) +// GetAllRepos gets a list of all repositories. +func GetAllRepos(c context.Context) ([]*model.Repo, error) { + return FromContext(c).GetAllRepos() +} + +// GetRepoOwnerName gets a repo by its owner and name. +func GetRepoOwnerName(c context.Context, owner, name string) (*model.Repo, error) { + return GetRepoSlug(c, path.Join(owner, name)) +} + +// GetRepoUserId gets a repo list by unique user ID. +func GetRepoUserId(c context.Context, id int64) ([]*model.Repo, error) { + return FromContext(c).GetRepoUserId(id) } // GetRepoIntersect gets a repo list by account login. @@ -116,11 +186,11 @@ func GetRepoIntersectMap(c context.Context, repos []*model.Repo) (map[string]*mo if err != nil { return nil, err } - set := make(map[string]*model.Repo, len(repos)) + repoSet := make(map[string]*model.Repo, len(repos)) for _, repo := range repos { - set[repo.Slug] = repo + repoSet[repo.Slug] = repo } - return set, nil + return repoSet, nil } // CreateRepo creates a new repository. @@ -137,3 +207,57 @@ func UpdateRepo(c context.Context, repo *model.Repo) error { func DeleteRepo(c context.Context, repo *model.Repo) error { return FromContext(c).DeleteRepo(repo) } + +// CheckValidUser returns true if the user exists in the valid_user table +func CheckValidUser(c context.Context, login string) (bool, error) { + return FromContext(c).CheckValidUser(login) +} + +func GetValidOrgs(c context.Context) (set.Set, error) { + //error is of no use on Get, nil coming back is what matters + orgs, _ := cache.Get(c, "valid_orgs") + if orgs == nil { + orgsSlice, err := FromContext(c).GetValidOrgs() + if err != nil { + return nil, err + } + orgs = set.New(orgsSlice...) + cache.Set(c, "valid_orgs", orgs) + } + return orgs.(set.Set), nil +} + +func GetUserEnabledOrgs(c context.Context, names []string) ([]*model.OrgDb, error) { + return FromContext(c).GetUserEnabledOrgs(names) +} + +func GetOrgName(c context.Context, owner string) (*model.OrgDb, error) { + return FromContext(c).GetOrgByName(owner) +} + +// GetReposForOrg returns all repos in the specified org +func GetReposForOrg(c context.Context, owner string) ([]*model.Repo, error) { + return FromContext(c).GetReposForOrg(owner) +} + +// CreateOrg creates a new auto-enrolling org. +func CreateOrg(c context.Context, org *model.OrgDb) error { + return FromContext(c).CreateOrg(org) +} + +// DeleteOrg deletes an auto-enrolling org +func DeleteOrg(c context.Context, org *model.OrgDb) error { + return FromContext(c).DeleteOrg(org) +} + +func GetSlackUrl(c context.Context, hostname string, user string) (string, error) { + return FromContext(c).GetSlackUrl(hostname, user) +} + +func AddUpdateSlackUrl(c context.Context, hostname string, user string, url string) error { + return FromContext(c).AddUpdateSlackUrl(hostname, user, url) +} + +func DeleteSlackUrl(c context.Context, hostname string, user string) error { + return FromContext(c).DeleteSlackUrl(hostname, user) +} diff --git a/store/store_test.go b/store/store_test.go new file mode 100644 index 0000000..c190b1f --- /dev/null +++ b/store/store_test.go @@ -0,0 +1,81 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package store + +import ( + "context" + "testing" + "time" + + "github.com/capitalone/checks-out/cache" +) + +type mockStore struct { + Store + callCount int +} + +func (ms *mockStore) GetValidOrgs() ([]string, error) { + ms.callCount++ + return []string{"beatles", "stones", "ledzep"}, nil +} + +func TestGetValidOrgs(t *testing.T) { + s := &mockStore{} + + c := context.Background() + c = context.WithValue(c, "store", s) + c = context.WithValue(c, "cache", cache.NewTTL(40*time.Millisecond)) + + orgs, err := GetValidOrgs(c) + validHelper := func() { + if err != nil { + t.Fatalf("Expected no error, got %v", err) + } + if orgs == nil { + t.Fatal("Expected non-nil Set returned") + } + if !orgs.Contains("beatles") { + t.Error("Should have contained beatles") + } + if !orgs.Contains("stones") { + t.Error("Should have contained stones") + } + if !orgs.Contains("ledzep") { + t.Error("Should have contained ledzep") + } + } + validHelper() + if s.callCount != 1 { + t.Errorf("Expected 1 call to store, got %d", s.callCount) + } + //test caching is doing its job + orgs, err = GetValidOrgs(c) + validHelper() + if s.callCount != 1 { + t.Errorf("Expected 1 call to store, got %d", s.callCount) + } + //sleep to let cache expire + time.Sleep(50 * time.Millisecond) + orgs, err = GetValidOrgs(c) + validHelper() + if s.callCount != 2 { + t.Errorf("Expected 2 calls to store, got %d", s.callCount) + } +} diff --git a/strings/lowercase/lowercase.go b/strings/lowercase/lowercase.go new file mode 100644 index 0000000..77e83a4 --- /dev/null +++ b/strings/lowercase/lowercase.go @@ -0,0 +1,53 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package lowercase + +import ( + "strings" +) + +type String struct { + val string +} + +func Create(s string) String { + return String{strings.ToLower(s)} +} + +func (l String) String() string { + return l.val +} + +type Slice []String + +func CreateSlice(ss ...string) Slice { + var out Slice + for _, v := range ss { + out = append(out, Create(v)) + } + return out +} + +func (ls Slice) ToStringSlice() []string { + var out []string + for _, v := range ls { + out = append(out, v.String()) + } + return out +} diff --git a/strings/lowercase/lowercase_test.go b/strings/lowercase/lowercase_test.go new file mode 100644 index 0000000..5d50360 --- /dev/null +++ b/strings/lowercase/lowercase_test.go @@ -0,0 +1,44 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package lowercase + +import ( + "testing" +) + +func TestCreate(t *testing.T) { + l := Create("Foobar") + if l.val != "foobar" { + t.Error("Lowercase value generated incorrectly") + } +} + +func TestLowercase(t *testing.T) { + l := Create("Foobar") + if l.String() != "foobar" { + t.Error("Lowercase value retrieved incorrectly") + } +} + +func TestEmpty(t *testing.T) { + var l String + if l.String() != "" { + t.Error("empty lowercase not handled correctly") + } +} \ No newline at end of file diff --git a/strings/miniglob/miniglob.go b/strings/miniglob/miniglob.go new file mode 100644 index 0000000..512faec --- /dev/null +++ b/strings/miniglob/miniglob.go @@ -0,0 +1,100 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package miniglob + +import ( + "bytes" + "encoding/json" + "regexp" + "strings" +) + +type MiniGlob struct { + Regex *regexp.Regexp + Text string +} + +func Pattern(text string) string { + var buffer bytes.Buffer + buffer.WriteString("^") + text = strings.TrimSpace(text) + text = strings.TrimPrefix(text, "/") + text = strings.TrimSuffix(text, "/") + outer := strings.Split(text, "**") + for i, sec1 := range outer { + inner := strings.Split(sec1, "*") + for j, sec2 := range inner { + buffer.WriteString(regexp.QuoteMeta(sec2)) + if j < len(inner)-1 { + buffer.WriteString("[^/]*") + } + } + if i < len(outer)-1 { + buffer.WriteString(".*") + } + } + buffer.WriteString("$") + return buffer.String() +} + +func Compile(text string) (*regexp.Regexp, error) { + return regexp.Compile(Pattern(text)) +} + +func Create(text string) (MiniGlob, error) { + regex, err := Compile(text) + if err != nil { + return MiniGlob{}, err + } + return MiniGlob{Text: text, Regex: regex}, nil +} + +func MustCreate(text string) MiniGlob { + glob, err := Create(text) + if err != nil { + panic(err) + } + return glob +} + +func (rs MiniGlob) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + e := json.NewEncoder(&buf) + e.SetEscapeHTML(false) + err := e.Encode(rs.Text) + if err != nil { + return nil, err + } + // golang json encoder adds a newline for each value + return bytes.TrimSpace(buf.Bytes()), nil +} + +func (rs *MiniGlob) UnmarshalJSON(text []byte) error { + var pat string + err := json.Unmarshal(text, &pat) + if err != nil { + return err + } + r, err := Create(pat) + if err != nil { + return err + } + *rs = r + return nil +} diff --git a/strings/miniglob/miniglob_test.go b/strings/miniglob/miniglob_test.go new file mode 100644 index 0000000..8dc0245 --- /dev/null +++ b/strings/miniglob/miniglob_test.go @@ -0,0 +1,62 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package miniglob + +import ( + "encoding/json" + "testing" +) + +func testPattern(t *testing.T, in string, out string) { + obs := Pattern(in) + if obs != out { + t.Errorf("Pattern compilation of %s expected %s and observed %s", + in, out, obs) + } +} + +func TestPattern(t *testing.T) { + testPattern(t, "", "^$") + testPattern(t, "foo", "^foo$") + testPattern(t, "/foo/bar/", "^foo/bar$") + testPattern(t, "*.java", "^[^/]*\\.java$") + testPattern(t, "**.java", "^.*\\.java$") + testPattern(t, "**/*.java", "^.*/[^/]*\\.java$") +} + +func TestSerde(t *testing.T) { + var body MiniGlob + err := json.Unmarshal([]byte("\" /hello*world/ \""), &body) + if err != nil { + t.Fatal("Unmarshal failure", err) + } + if body.Text != " /hello*world/ " { + t.Errorf("Unmarshal text context not successful: %s", body.Text) + } + if body.Regex.String() != "^hello[^/]*world$" { + t.Errorf("Unmarshal regex context not successful: %s", body.Regex.String()) + } + out, err := json.Marshal(body) + if err != nil { + t.Fatal("Marshal failure", err) + } + if string(out) != "\" /hello*world/ \"" { + t.Errorf("Marshal context not successful: %s", string(out)) + } +} diff --git a/strings/rxserde/regexp.go b/strings/rxserde/regexp.go new file mode 100644 index 0000000..e01bfd9 --- /dev/null +++ b/strings/rxserde/regexp.go @@ -0,0 +1,55 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package rxserde + +import ( + "bytes" + "encoding/json" + "regexp" +) + +type RegexSerde struct { + Regex *regexp.Regexp +} + +func (rs RegexSerde) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + e := json.NewEncoder(&buf) + e.SetEscapeHTML(false) + err := e.Encode(rs.Regex.String()) + if err != nil { + return nil, err + } + // golang json encoder adds a newline for each value + return bytes.TrimSpace(buf.Bytes()), nil +} + +func (rs *RegexSerde) UnmarshalJSON(text []byte) error { + var pat string + err := json.Unmarshal(text, &pat) + if err != nil { + return err + } + r, err := regexp.Compile(pat) + if err != nil { + return err + } + rs.Regex = r + return nil +} diff --git a/strings/rxserde/regexp_test.go b/strings/rxserde/regexp_test.go new file mode 100644 index 0000000..798df75 --- /dev/null +++ b/strings/rxserde/regexp_test.go @@ -0,0 +1,74 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package rxserde + +import ( + "bytes" + "encoding/json" + "regexp" + "testing" +) + +type Testinner struct { + Pattern RegexSerde +} + +const text = `{ + "pattern": "abcdef" +}` + +const expected = `"(?i)^I approve\\s*(?P\\S*)"` + +func TestMarshalDirect(t *testing.T) { + r := RegexSerde{regexp.MustCompile(`(?i)^I approve\s*(?P\S*)`)} + b, err := r.MarshalJSON() + if err != nil { + t.Fatal("Unable to marshal regex serde", err) + } + s := string(b) + if s != `"(?i)^I approve\\s*(?P\\S*)"` { + t.Error("marshal regex serde did not yield expected result", s) + } +} + +func TestMarshalRegexSerde(t *testing.T) { + r := RegexSerde{regexp.MustCompile(`(?i)^I approve\s*(?P\S*)`)} + var buf bytes.Buffer + e := json.NewEncoder(&buf) + e.SetEscapeHTML(false) + err := e.Encode(r) + if err != nil { + t.Fatal("Unable to marshal regex serde", err) + } + s := string(bytes.TrimSpace(buf.Bytes())) + if s != `"(?i)^I approve\\s*(?P\\S*)"` { + t.Error("marshal regex serde did not yield expected result", s) + } +} + +func TestInnerRegexPattern(t *testing.T) { + var result Testinner + err := json.Unmarshal([]byte(text), &result) + if err != nil { + t.Fatal("Unable to unmarshal test struct", err) + } + if result.Pattern.Regex.String() != "abcdef" { + t.Error("Unable to read inner pattern", result) + } +} diff --git a/swagger.yaml b/swagger.yaml new file mode 100644 index 0000000..539be6c --- /dev/null +++ b/swagger.yaml @@ -0,0 +1,154 @@ +# Example YAML to get you started quickly. +# Be aware that YAML has indentation based scoping. +# Code completion support is available so start typing for available options. +swagger: '2.0' + +# This is your document metadata +info: + version: "0.7.0" + title: Checks-Out +securityDefinitions: + CookieSession: + type: apiKey + in: header + name: user_sess + HeaderSession: + type: apiKey + in: header + name: Authorization + QuerySession: + type: apiKey + in: query + name: access_token +# Describe your paths here +paths: + /api/count: + # This is a HTTP operation + get: + # Describe this verb here. Note: you can use markdown + description: | + Number of public and private repositories. + responses: + # Response code + 200: + description: count + # A schema describing your response object. + # Use JSON Schema format + schema: + type: number + /api/repos: + # This is a HTTP operation + get: + # Describe this verb here. Note: you can use markdown + description: | + Metadata about the public GitHub repositories + registered with this service. This endpoint is + enabled if the CHECKS_OUT_SUNLIGHT environment variable + is set. + produces: + - application/json + responses: + # Response code + 200: + description: repositories + # A schema describing your response object. + # Use JSON Schema format + schema: + type: array + description: Repos + items: + description: Repo + properties: + id: + type: number + owner: + type: string + name: + type: string + slug: + type: string + link_url: + type: string + private: + type: boolean + + /api/repos/{owner}/{repo}/config: + get: + # Describe this verb here. Note: you can use markdown + description: | + Parses repository configuration file + and returns a complete configuration with + default sections populated. + parameters: + - name: owner + in: path + required: true + description: user or organization + type: string + - name: repo + in: path + required: true + description: repository name + type: string + security: + - CookieSession: [ ] + - HeaderSession: [ ] + - QuerySession: [ ] + produces: + - application/json + responses: + # Response code + 200: + description: configuration + # A schema describing your response object. + # Use JSON Schema format + schema: + description: ChecksOutConfig + type: object + /api/repos/{owner}/{repo}/maintainers: + get: + # Describe this verb here. Note: you can use markdown + description: | + Parses repository MAINTAINERS configuration file + and returns the populated group information. + parameters: + - name: owner + in: path + required: true + description: user or organization + type: string + - name: repo + in: path + required: true + description: repository name + type: string + security: + - CookieSession: [ ] + - HeaderSession: [ ] + - QuerySession: [ ] + produces: + - application/json + responses: + # Response code + 200: + description: maintainers + # A schema describing your response object. + # Use JSON Schema format + schema: + description: Maintainers + type: object + /version: + # This is a HTTP operation + get: + # Describe this verb here. Note: you can use markdown + description: | + Version information. This endpoint is enabled if the + CHECKS_OUT_SUNLIGHT environment variable is set. + responses: + # Response code + 200: + description: version + # A schema describing your response object. + # Use JSON Schema format + schema: + type: string \ No newline at end of file diff --git a/usage/usage.go b/usage/usage.go new file mode 100644 index 0000000..2d1ad9b --- /dev/null +++ b/usage/usage.go @@ -0,0 +1,138 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package usage + +import ( + "context" + "sync" + "time" + + log "github.com/Sirupsen/logrus" +) + +type usageType int + +const ( + event usageType = iota +) + +func AddEventToContext(c context.Context, e string) context.Context { + return context.WithValue(c, event, e) +} + +func GetEventFromContext(c context.Context) string { + e, ok := c.Value(event).(string) + if !ok { + return "" + } + return e +} + +var lock = sync.Mutex{} + +type Usage struct { + Users map[string]int `json:"users"` + HookIn map[string]int `json:"hook_in"` + HookOut map[string]int `json:"hook_out"` + RemoteReq map[string]int `json:"remote"` +} + +var data Usage + +func init() { + data = createUsage() +} + +func createUsage() Usage { + return Usage{ + Users: make(map[string]int), + HookIn: make(map[string]int), + HookOut: make(map[string]int), + RemoteReq: make(map[string]int), + } +} + +func RecordIncomingWebHook(event string) { + lock.Lock() + data.HookIn[event]++ + lock.Unlock() +} + +func RecordApiRequest(user string, event string, req string) { + lock.Lock() + data.Users[user]++ + data.HookOut[event]++ + data.RemoteReq[req]++ + lock.Unlock() +} + +func copyMap(dst, src map[string]int) { + for k, v := range src { + dst[k] = v + } +} + +func GetStats() Usage { + stats := createUsage() + lock.Lock() + copyMap(stats.Users, data.Users) + copyMap(stats.HookIn, data.HookIn) + copyMap(stats.HookOut, data.HookOut) + copyMap(stats.RemoteReq, data.RemoteReq) + lock.Unlock() + return stats +} + +func writeLog() { + log.Info("Usage statistics for the past hour") + for k, v := range data.Users { + log.Infof("User %s : %d api requests", k, v) + } + for k, v := range data.HookIn { + log.Infof("Hook %s : %d incoming requests", k, v) + } + for k, v := range data.HookOut { + log.Infof("Hook %s : %d outgoing requests", k, v) + } + for k, v := range data.RemoteReq { + log.Infof("Remote request %s : %d requests", k, v) + } +} + +func resetStats() { + data = createUsage() +} + +func usageTask() { + wait := 60 - time.Now().Minute() + timer := time.NewTimer(time.Duration(wait) * time.Minute) + <-timer.C + ticker := time.NewTicker(time.Hour) + for { + lock.Lock() + writeLog() + resetStats() + lock.Unlock() + <-ticker.C + } +} + +func Start() { + go usageTask() +} diff --git a/usage/usage_test.go b/usage/usage_test.go new file mode 100644 index 0000000..5391c20 --- /dev/null +++ b/usage/usage_test.go @@ -0,0 +1,38 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package usage + +import ( + "testing" + "context" +) + +func TestContextUnique(t *testing.T) { + c := context.Background() + c2 := AddEventToContext(c, "Hello") + i := 0 + v := c2.Value(i) + if v != nil { + t.Error("Should not be able to get value without opaque type") + } + v2 := GetEventFromContext(c2) + if v2 != "Hello" { + t.Errorf("Expected Hello, got %v",v2) + } +} \ No newline at end of file diff --git a/vendor/github.com/BurntSushi/toml/COMPATIBLE b/vendor/github.com/BurntSushi/toml/COMPATIBLE deleted file mode 100644 index 21e0938..0000000 --- a/vendor/github.com/BurntSushi/toml/COMPATIBLE +++ /dev/null @@ -1,3 +0,0 @@ -Compatible with TOML version -[v0.2.0](https://github.com/mojombo/toml/blob/master/versions/toml-v0.2.0.md) - diff --git a/vendor/github.com/BurntSushi/toml/COPYING b/vendor/github.com/BurntSushi/toml/COPYING deleted file mode 100644 index 5a8e332..0000000 --- a/vendor/github.com/BurntSushi/toml/COPYING +++ /dev/null @@ -1,14 +0,0 @@ - DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE - Version 2, December 2004 - - Copyright (C) 2004 Sam Hocevar - - Everyone is permitted to copy and distribute verbatim or modified - copies of this license document, and changing it is allowed as long - as the name is changed. - - DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. You just DO WHAT THE FUCK YOU WANT TO. - diff --git a/vendor/github.com/BurntSushi/toml/Makefile b/vendor/github.com/BurntSushi/toml/Makefile deleted file mode 100644 index 3600848..0000000 --- a/vendor/github.com/BurntSushi/toml/Makefile +++ /dev/null @@ -1,19 +0,0 @@ -install: - go install ./... - -test: install - go test -v - toml-test toml-test-decoder - toml-test -encoder toml-test-encoder - -fmt: - gofmt -w *.go */*.go - colcheck *.go */*.go - -tags: - find ./ -name '*.go' -print0 | xargs -0 gotags > TAGS - -push: - git push origin master - git push github master - diff --git a/vendor/github.com/BurntSushi/toml/README.md b/vendor/github.com/BurntSushi/toml/README.md deleted file mode 100644 index 5a5df63..0000000 --- a/vendor/github.com/BurntSushi/toml/README.md +++ /dev/null @@ -1,220 +0,0 @@ -## TOML parser and encoder for Go with reflection - -TOML stands for Tom's Obvious, Minimal Language. This Go package provides a -reflection interface similar to Go's standard library `json` and `xml` -packages. This package also supports the `encoding.TextUnmarshaler` and -`encoding.TextMarshaler` interfaces so that you can define custom data -representations. (There is an example of this below.) - -Spec: https://github.com/mojombo/toml - -Compatible with TOML version -[v0.2.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.2.0.md) - -Documentation: http://godoc.org/github.com/BurntSushi/toml - -Installation: - -```bash -go get github.com/BurntSushi/toml -``` - -Try the toml validator: - -```bash -go get github.com/BurntSushi/toml/cmd/tomlv -tomlv some-toml-file.toml -``` - -[![Build status](https://api.travis-ci.org/BurntSushi/toml.png)](https://travis-ci.org/BurntSushi/toml) - - -### Testing - -This package passes all tests in -[toml-test](https://github.com/BurntSushi/toml-test) for both the decoder -and the encoder. - -### Examples - -This package works similarly to how the Go standard library handles `XML` -and `JSON`. Namely, data is loaded into Go values via reflection. - -For the simplest example, consider some TOML file as just a list of keys -and values: - -```toml -Age = 25 -Cats = [ "Cauchy", "Plato" ] -Pi = 3.14 -Perfection = [ 6, 28, 496, 8128 ] -DOB = 1987-07-05T05:45:00Z -``` - -Which could be defined in Go as: - -```go -type Config struct { - Age int - Cats []string - Pi float64 - Perfection []int - DOB time.Time // requires `import time` -} -``` - -And then decoded with: - -```go -var conf Config -if _, err := toml.Decode(tomlData, &conf); err != nil { - // handle error -} -``` - -You can also use struct tags if your struct field name doesn't map to a TOML -key value directly: - -```toml -some_key_NAME = "wat" -``` - -```go -type TOML struct { - ObscureKey string `toml:"some_key_NAME"` -} -``` - -### Using the `encoding.TextUnmarshaler` interface - -Here's an example that automatically parses duration strings into -`time.Duration` values: - -```toml -[[song]] -name = "Thunder Road" -duration = "4m49s" - -[[song]] -name = "Stairway to Heaven" -duration = "8m03s" -``` - -Which can be decoded with: - -```go -type song struct { - Name string - Duration duration -} -type songs struct { - Song []song -} -var favorites songs -if _, err := toml.Decode(blob, &favorites); err != nil { - log.Fatal(err) -} - -for _, s := range favorites.Song { - fmt.Printf("%s (%s)\n", s.Name, s.Duration) -} -``` - -And you'll also need a `duration` type that satisfies the -`encoding.TextUnmarshaler` interface: - -```go -type duration struct { - time.Duration -} - -func (d *duration) UnmarshalText(text []byte) error { - var err error - d.Duration, err = time.ParseDuration(string(text)) - return err -} -``` - -### More complex usage - -Here's an example of how to load the example from the official spec page: - -```toml -# This is a TOML document. Boom. - -title = "TOML Example" - -[owner] -name = "Tom Preston-Werner" -organization = "GitHub" -bio = "GitHub Cofounder & CEO\nLikes tater tots and beer." -dob = 1979-05-27T07:32:00Z # First class dates? Why not? - -[database] -server = "192.168.1.1" -ports = [ 8001, 8001, 8002 ] -connection_max = 5000 -enabled = true - -[servers] - - # You can indent as you please. Tabs or spaces. TOML don't care. - [servers.alpha] - ip = "10.0.0.1" - dc = "eqdc10" - - [servers.beta] - ip = "10.0.0.2" - dc = "eqdc10" - -[clients] -data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it - -# Line breaks are OK when inside arrays -hosts = [ - "alpha", - "omega" -] -``` - -And the corresponding Go types are: - -```go -type tomlConfig struct { - Title string - Owner ownerInfo - DB database `toml:"database"` - Servers map[string]server - Clients clients -} - -type ownerInfo struct { - Name string - Org string `toml:"organization"` - Bio string - DOB time.Time -} - -type database struct { - Server string - Ports []int - ConnMax int `toml:"connection_max"` - Enabled bool -} - -type server struct { - IP string - DC string -} - -type clients struct { - Data [][]interface{} - Hosts []string -} -``` - -Note that a case insensitive match will be tried if an exact match can't be -found. - -A working example of the above can be found in `_examples/example.{go,toml}`. - diff --git a/vendor/github.com/BurntSushi/toml/decode.go b/vendor/github.com/BurntSushi/toml/decode.go deleted file mode 100644 index 6c7d398..0000000 --- a/vendor/github.com/BurntSushi/toml/decode.go +++ /dev/null @@ -1,492 +0,0 @@ -package toml - -import ( - "fmt" - "io" - "io/ioutil" - "math" - "reflect" - "strings" - "time" -) - -var e = fmt.Errorf - -// Unmarshaler is the interface implemented by objects that can unmarshal a -// TOML description of themselves. -type Unmarshaler interface { - UnmarshalTOML(interface{}) error -} - -// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`. -func Unmarshal(p []byte, v interface{}) error { - _, err := Decode(string(p), v) - return err -} - -// Primitive is a TOML value that hasn't been decoded into a Go value. -// When using the various `Decode*` functions, the type `Primitive` may -// be given to any value, and its decoding will be delayed. -// -// A `Primitive` value can be decoded using the `PrimitiveDecode` function. -// -// The underlying representation of a `Primitive` value is subject to change. -// Do not rely on it. -// -// N.B. Primitive values are still parsed, so using them will only avoid -// the overhead of reflection. They can be useful when you don't know the -// exact type of TOML data until run time. -type Primitive struct { - undecoded interface{} - context Key -} - -// DEPRECATED! -// -// Use MetaData.PrimitiveDecode instead. -func PrimitiveDecode(primValue Primitive, v interface{}) error { - md := MetaData{decoded: make(map[string]bool)} - return md.unify(primValue.undecoded, rvalue(v)) -} - -// PrimitiveDecode is just like the other `Decode*` functions, except it -// decodes a TOML value that has already been parsed. Valid primitive values -// can *only* be obtained from values filled by the decoder functions, -// including this method. (i.e., `v` may contain more `Primitive` -// values.) -// -// Meta data for primitive values is included in the meta data returned by -// the `Decode*` functions with one exception: keys returned by the Undecoded -// method will only reflect keys that were decoded. Namely, any keys hidden -// behind a Primitive will be considered undecoded. Executing this method will -// update the undecoded keys in the meta data. (See the example.) -func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error { - md.context = primValue.context - defer func() { md.context = nil }() - return md.unify(primValue.undecoded, rvalue(v)) -} - -// Decode will decode the contents of `data` in TOML format into a pointer -// `v`. -// -// TOML hashes correspond to Go structs or maps. (Dealer's choice. They can be -// used interchangeably.) -// -// TOML arrays of tables correspond to either a slice of structs or a slice -// of maps. -// -// TOML datetimes correspond to Go `time.Time` values. -// -// All other TOML types (float, string, int, bool and array) correspond -// to the obvious Go types. -// -// An exception to the above rules is if a type implements the -// encoding.TextUnmarshaler interface. In this case, any primitive TOML value -// (floats, strings, integers, booleans and datetimes) will be converted to -// a byte string and given to the value's UnmarshalText method. See the -// Unmarshaler example for a demonstration with time duration strings. -// -// Key mapping -// -// TOML keys can map to either keys in a Go map or field names in a Go -// struct. The special `toml` struct tag may be used to map TOML keys to -// struct fields that don't match the key name exactly. (See the example.) -// A case insensitive match to struct names will be tried if an exact match -// can't be found. -// -// The mapping between TOML values and Go values is loose. That is, there -// may exist TOML values that cannot be placed into your representation, and -// there may be parts of your representation that do not correspond to -// TOML values. This loose mapping can be made stricter by using the IsDefined -// and/or Undecoded methods on the MetaData returned. -// -// This decoder will not handle cyclic types. If a cyclic type is passed, -// `Decode` will not terminate. -func Decode(data string, v interface{}) (MetaData, error) { - p, err := parse(data) - if err != nil { - return MetaData{}, err - } - md := MetaData{ - p.mapping, p.types, p.ordered, - make(map[string]bool, len(p.ordered)), nil, - } - return md, md.unify(p.mapping, rvalue(v)) -} - -// DecodeFile is just like Decode, except it will automatically read the -// contents of the file at `fpath` and decode it for you. -func DecodeFile(fpath string, v interface{}) (MetaData, error) { - bs, err := ioutil.ReadFile(fpath) - if err != nil { - return MetaData{}, err - } - return Decode(string(bs), v) -} - -// DecodeReader is just like Decode, except it will consume all bytes -// from the reader and decode it for you. -func DecodeReader(r io.Reader, v interface{}) (MetaData, error) { - bs, err := ioutil.ReadAll(r) - if err != nil { - return MetaData{}, err - } - return Decode(string(bs), v) -} - -// unify performs a sort of type unification based on the structure of `rv`, -// which is the client representation. -// -// Any type mismatch produces an error. Finding a type that we don't know -// how to handle produces an unsupported type error. -func (md *MetaData) unify(data interface{}, rv reflect.Value) error { - - // Special case. Look for a `Primitive` value. - if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() { - // Save the undecoded data and the key context into the primitive - // value. - context := make(Key, len(md.context)) - copy(context, md.context) - rv.Set(reflect.ValueOf(Primitive{ - undecoded: data, - context: context, - })) - return nil - } - - // Special case. Unmarshaler Interface support. - if rv.CanAddr() { - if v, ok := rv.Addr().Interface().(Unmarshaler); ok { - return v.UnmarshalTOML(data) - } - } - - // Special case. Handle time.Time values specifically. - // TODO: Remove this code when we decide to drop support for Go 1.1. - // This isn't necessary in Go 1.2 because time.Time satisfies the encoding - // interfaces. - if rv.Type().AssignableTo(rvalue(time.Time{}).Type()) { - return md.unifyDatetime(data, rv) - } - - // Special case. Look for a value satisfying the TextUnmarshaler interface. - if v, ok := rv.Interface().(TextUnmarshaler); ok { - return md.unifyText(data, v) - } - // BUG(burntsushi) - // The behavior here is incorrect whenever a Go type satisfies the - // encoding.TextUnmarshaler interface but also corresponds to a TOML - // hash or array. In particular, the unmarshaler should only be applied - // to primitive TOML values. But at this point, it will be applied to - // all kinds of values and produce an incorrect error whenever those values - // are hashes or arrays (including arrays of tables). - - k := rv.Kind() - - // laziness - if k >= reflect.Int && k <= reflect.Uint64 { - return md.unifyInt(data, rv) - } - switch k { - case reflect.Ptr: - elem := reflect.New(rv.Type().Elem()) - err := md.unify(data, reflect.Indirect(elem)) - if err != nil { - return err - } - rv.Set(elem) - return nil - case reflect.Struct: - return md.unifyStruct(data, rv) - case reflect.Map: - return md.unifyMap(data, rv) - case reflect.Array: - return md.unifyArray(data, rv) - case reflect.Slice: - return md.unifySlice(data, rv) - case reflect.String: - return md.unifyString(data, rv) - case reflect.Bool: - return md.unifyBool(data, rv) - case reflect.Interface: - // we only support empty interfaces. - if rv.NumMethod() > 0 { - return e("Unsupported type '%s'.", rv.Kind()) - } - return md.unifyAnything(data, rv) - case reflect.Float32: - fallthrough - case reflect.Float64: - return md.unifyFloat64(data, rv) - } - return e("Unsupported type '%s'.", rv.Kind()) -} - -func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error { - tmap, ok := mapping.(map[string]interface{}) - if !ok { - return mismatch(rv, "map", mapping) - } - - for key, datum := range tmap { - var f *field - fields := cachedTypeFields(rv.Type()) - for i := range fields { - ff := &fields[i] - if ff.name == key { - f = ff - break - } - if f == nil && strings.EqualFold(ff.name, key) { - f = ff - } - } - if f != nil { - subv := rv - for _, i := range f.index { - subv = indirect(subv.Field(i)) - } - if isUnifiable(subv) { - md.decoded[md.context.add(key).String()] = true - md.context = append(md.context, key) - if err := md.unify(datum, subv); err != nil { - return e("Type mismatch for '%s.%s': %s", - rv.Type().String(), f.name, err) - } - md.context = md.context[0 : len(md.context)-1] - } else if f.name != "" { - // Bad user! No soup for you! - return e("Field '%s.%s' is unexported, and therefore cannot "+ - "be loaded with reflection.", rv.Type().String(), f.name) - } - } - } - return nil -} - -func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error { - tmap, ok := mapping.(map[string]interface{}) - if !ok { - return badtype("map", mapping) - } - if rv.IsNil() { - rv.Set(reflect.MakeMap(rv.Type())) - } - for k, v := range tmap { - md.decoded[md.context.add(k).String()] = true - md.context = append(md.context, k) - - rvkey := indirect(reflect.New(rv.Type().Key())) - rvval := reflect.Indirect(reflect.New(rv.Type().Elem())) - if err := md.unify(v, rvval); err != nil { - return err - } - md.context = md.context[0 : len(md.context)-1] - - rvkey.SetString(k) - rv.SetMapIndex(rvkey, rvval) - } - return nil -} - -func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error { - datav := reflect.ValueOf(data) - if datav.Kind() != reflect.Slice { - return badtype("slice", data) - } - sliceLen := datav.Len() - if sliceLen != rv.Len() { - return e("expected array length %d; got TOML array of length %d", - rv.Len(), sliceLen) - } - return md.unifySliceArray(datav, rv) -} - -func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error { - datav := reflect.ValueOf(data) - if datav.Kind() != reflect.Slice { - return badtype("slice", data) - } - sliceLen := datav.Len() - if rv.IsNil() { - rv.Set(reflect.MakeSlice(rv.Type(), sliceLen, sliceLen)) - } - return md.unifySliceArray(datav, rv) -} - -func (md *MetaData) unifySliceArray(data, rv reflect.Value) error { - sliceLen := data.Len() - for i := 0; i < sliceLen; i++ { - v := data.Index(i).Interface() - sliceval := indirect(rv.Index(i)) - if err := md.unify(v, sliceval); err != nil { - return err - } - } - return nil -} - -func (md *MetaData) unifyDatetime(data interface{}, rv reflect.Value) error { - if _, ok := data.(time.Time); ok { - rv.Set(reflect.ValueOf(data)) - return nil - } - return badtype("time.Time", data) -} - -func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error { - if s, ok := data.(string); ok { - rv.SetString(s) - return nil - } - return badtype("string", data) -} - -func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error { - if num, ok := data.(float64); ok { - switch rv.Kind() { - case reflect.Float32: - fallthrough - case reflect.Float64: - rv.SetFloat(num) - default: - panic("bug") - } - return nil - } - return badtype("float", data) -} - -func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error { - if num, ok := data.(int64); ok { - if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 { - switch rv.Kind() { - case reflect.Int, reflect.Int64: - // No bounds checking necessary. - case reflect.Int8: - if num < math.MinInt8 || num > math.MaxInt8 { - return e("Value '%d' is out of range for int8.", num) - } - case reflect.Int16: - if num < math.MinInt16 || num > math.MaxInt16 { - return e("Value '%d' is out of range for int16.", num) - } - case reflect.Int32: - if num < math.MinInt32 || num > math.MaxInt32 { - return e("Value '%d' is out of range for int32.", num) - } - } - rv.SetInt(num) - } else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 { - unum := uint64(num) - switch rv.Kind() { - case reflect.Uint, reflect.Uint64: - // No bounds checking necessary. - case reflect.Uint8: - if num < 0 || unum > math.MaxUint8 { - return e("Value '%d' is out of range for uint8.", num) - } - case reflect.Uint16: - if num < 0 || unum > math.MaxUint16 { - return e("Value '%d' is out of range for uint16.", num) - } - case reflect.Uint32: - if num < 0 || unum > math.MaxUint32 { - return e("Value '%d' is out of range for uint32.", num) - } - } - rv.SetUint(unum) - } else { - panic("unreachable") - } - return nil - } - return badtype("integer", data) -} - -func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error { - if b, ok := data.(bool); ok { - rv.SetBool(b) - return nil - } - return badtype("boolean", data) -} - -func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error { - rv.Set(reflect.ValueOf(data)) - return nil -} - -func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error { - var s string - switch sdata := data.(type) { - case TextMarshaler: - text, err := sdata.MarshalText() - if err != nil { - return err - } - s = string(text) - case fmt.Stringer: - s = sdata.String() - case string: - s = sdata - case bool: - s = fmt.Sprintf("%v", sdata) - case int64: - s = fmt.Sprintf("%d", sdata) - case float64: - s = fmt.Sprintf("%f", sdata) - default: - return badtype("primitive (string-like)", data) - } - if err := v.UnmarshalText([]byte(s)); err != nil { - return err - } - return nil -} - -// rvalue returns a reflect.Value of `v`. All pointers are resolved. -func rvalue(v interface{}) reflect.Value { - return indirect(reflect.ValueOf(v)) -} - -// indirect returns the value pointed to by a pointer. -// Pointers are followed until the value is not a pointer. -// New values are allocated for each nil pointer. -// -// An exception to this rule is if the value satisfies an interface of -// interest to us (like encoding.TextUnmarshaler). -func indirect(v reflect.Value) reflect.Value { - if v.Kind() != reflect.Ptr { - if v.CanAddr() { - pv := v.Addr() - if _, ok := pv.Interface().(TextUnmarshaler); ok { - return pv - } - } - return v - } - if v.IsNil() { - v.Set(reflect.New(v.Type().Elem())) - } - return indirect(reflect.Indirect(v)) -} - -func isUnifiable(rv reflect.Value) bool { - if rv.CanSet() { - return true - } - if _, ok := rv.Interface().(TextUnmarshaler); ok { - return true - } - return false -} - -func badtype(expected string, data interface{}) error { - return e("Expected %s but found '%T'.", expected, data) -} - -func mismatch(user reflect.Value, expected string, data interface{}) error { - return e("Type mismatch for %s. Expected %s but found '%T'.", - user.Type().String(), expected, data) -} diff --git a/vendor/github.com/BurntSushi/toml/decode_meta.go b/vendor/github.com/BurntSushi/toml/decode_meta.go deleted file mode 100644 index ef6f545..0000000 --- a/vendor/github.com/BurntSushi/toml/decode_meta.go +++ /dev/null @@ -1,122 +0,0 @@ -package toml - -import "strings" - -// MetaData allows access to meta information about TOML data that may not -// be inferrable via reflection. In particular, whether a key has been defined -// and the TOML type of a key. -type MetaData struct { - mapping map[string]interface{} - types map[string]tomlType - keys []Key - decoded map[string]bool - context Key // Used only during decoding. -} - -// IsDefined returns true if the key given exists in the TOML data. The key -// should be specified hierarchially. e.g., -// -// // access the TOML key 'a.b.c' -// IsDefined("a", "b", "c") -// -// IsDefined will return false if an empty key given. Keys are case sensitive. -func (md *MetaData) IsDefined(key ...string) bool { - if len(key) == 0 { - return false - } - - var hash map[string]interface{} - var ok bool - var hashOrVal interface{} = md.mapping - for _, k := range key { - if hash, ok = hashOrVal.(map[string]interface{}); !ok { - return false - } - if hashOrVal, ok = hash[k]; !ok { - return false - } - } - return true -} - -// Type returns a string representation of the type of the key specified. -// -// Type will return the empty string if given an empty key or a key that -// does not exist. Keys are case sensitive. -func (md *MetaData) Type(key ...string) string { - fullkey := strings.Join(key, ".") - if typ, ok := md.types[fullkey]; ok { - return typ.typeString() - } - return "" -} - -// Key is the type of any TOML key, including key groups. Use (MetaData).Keys -// to get values of this type. -type Key []string - -func (k Key) String() string { - return strings.Join(k, ".") -} - -func (k Key) maybeQuotedAll() string { - var ss []string - for i := range k { - ss = append(ss, k.maybeQuoted(i)) - } - return strings.Join(ss, ".") -} - -func (k Key) maybeQuoted(i int) string { - quote := false - for _, c := range k[i] { - if !isBareKeyChar(c) { - quote = true - break - } - } - if quote { - return "\"" + strings.Replace(k[i], "\"", "\\\"", -1) + "\"" - } else { - return k[i] - } -} - -func (k Key) add(piece string) Key { - newKey := make(Key, len(k)+1) - copy(newKey, k) - newKey[len(k)] = piece - return newKey -} - -// Keys returns a slice of every key in the TOML data, including key groups. -// Each key is itself a slice, where the first element is the top of the -// hierarchy and the last is the most specific. -// -// The list will have the same order as the keys appeared in the TOML data. -// -// All keys returned are non-empty. -func (md *MetaData) Keys() []Key { - return md.keys -} - -// Undecoded returns all keys that have not been decoded in the order in which -// they appear in the original TOML document. -// -// This includes keys that haven't been decoded because of a Primitive value. -// Once the Primitive value is decoded, the keys will be considered decoded. -// -// Also note that decoding into an empty interface will result in no decoding, -// and so no keys will be considered decoded. -// -// In this sense, the Undecoded keys correspond to keys in the TOML document -// that do not have a concrete type in your representation. -func (md *MetaData) Undecoded() []Key { - undecoded := make([]Key, 0, len(md.keys)) - for _, key := range md.keys { - if !md.decoded[key.String()] { - undecoded = append(undecoded, key) - } - } - return undecoded -} diff --git a/vendor/github.com/BurntSushi/toml/doc.go b/vendor/github.com/BurntSushi/toml/doc.go deleted file mode 100644 index fe26800..0000000 --- a/vendor/github.com/BurntSushi/toml/doc.go +++ /dev/null @@ -1,27 +0,0 @@ -/* -Package toml provides facilities for decoding and encoding TOML configuration -files via reflection. There is also support for delaying decoding with -the Primitive type, and querying the set of keys in a TOML document with the -MetaData type. - -The specification implemented: https://github.com/mojombo/toml - -The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify -whether a file is a valid TOML document. It can also be used to print the -type of each key in a TOML document. - -Testing - -There are two important types of tests used for this package. The first is -contained inside '*_test.go' files and uses the standard Go unit testing -framework. These tests are primarily devoted to holistically testing the -decoder and encoder. - -The second type of testing is used to verify the implementation's adherence -to the TOML specification. These tests have been factored into their own -project: https://github.com/BurntSushi/toml-test - -The reason the tests are in a separate project is so that they can be used by -any implementation of TOML. Namely, it is language agnostic. -*/ -package toml diff --git a/vendor/github.com/BurntSushi/toml/encode.go b/vendor/github.com/BurntSushi/toml/encode.go deleted file mode 100644 index c7e227c..0000000 --- a/vendor/github.com/BurntSushi/toml/encode.go +++ /dev/null @@ -1,551 +0,0 @@ -package toml - -import ( - "bufio" - "errors" - "fmt" - "io" - "reflect" - "sort" - "strconv" - "strings" - "time" -) - -type tomlEncodeError struct{ error } - -var ( - errArrayMixedElementTypes = errors.New( - "can't encode array with mixed element types") - errArrayNilElement = errors.New( - "can't encode array with nil element") - errNonString = errors.New( - "can't encode a map with non-string key type") - errAnonNonStruct = errors.New( - "can't encode an anonymous field that is not a struct") - errArrayNoTable = errors.New( - "TOML array element can't contain a table") - errNoKey = errors.New( - "top-level values must be a Go map or struct") - errAnything = errors.New("") // used in testing -) - -var quotedReplacer = strings.NewReplacer( - "\t", "\\t", - "\n", "\\n", - "\r", "\\r", - "\"", "\\\"", - "\\", "\\\\", -) - -// Encoder controls the encoding of Go values to a TOML document to some -// io.Writer. -// -// The indentation level can be controlled with the Indent field. -type Encoder struct { - // A single indentation level. By default it is two spaces. - Indent string - - // hasWritten is whether we have written any output to w yet. - hasWritten bool - w *bufio.Writer -} - -// NewEncoder returns a TOML encoder that encodes Go values to the io.Writer -// given. By default, a single indentation level is 2 spaces. -func NewEncoder(w io.Writer) *Encoder { - return &Encoder{ - w: bufio.NewWriter(w), - Indent: " ", - } -} - -// Encode writes a TOML representation of the Go value to the underlying -// io.Writer. If the value given cannot be encoded to a valid TOML document, -// then an error is returned. -// -// The mapping between Go values and TOML values should be precisely the same -// as for the Decode* functions. Similarly, the TextMarshaler interface is -// supported by encoding the resulting bytes as strings. (If you want to write -// arbitrary binary data then you will need to use something like base64 since -// TOML does not have any binary types.) -// -// When encoding TOML hashes (i.e., Go maps or structs), keys without any -// sub-hashes are encoded first. -// -// If a Go map is encoded, then its keys are sorted alphabetically for -// deterministic output. More control over this behavior may be provided if -// there is demand for it. -// -// Encoding Go values without a corresponding TOML representation---like map -// types with non-string keys---will cause an error to be returned. Similarly -// for mixed arrays/slices, arrays/slices with nil elements, embedded -// non-struct types and nested slices containing maps or structs. -// (e.g., [][]map[string]string is not allowed but []map[string]string is OK -// and so is []map[string][]string.) -func (enc *Encoder) Encode(v interface{}) error { - rv := eindirect(reflect.ValueOf(v)) - if err := enc.safeEncode(Key([]string{}), rv); err != nil { - return err - } - return enc.w.Flush() -} - -func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) { - defer func() { - if r := recover(); r != nil { - if terr, ok := r.(tomlEncodeError); ok { - err = terr.error - return - } - panic(r) - } - }() - enc.encode(key, rv) - return nil -} - -func (enc *Encoder) encode(key Key, rv reflect.Value) { - // Special case. Time needs to be in ISO8601 format. - // Special case. If we can marshal the type to text, then we used that. - // Basically, this prevents the encoder for handling these types as - // generic structs (or whatever the underlying type of a TextMarshaler is). - switch rv.Interface().(type) { - case time.Time, TextMarshaler: - enc.keyEqElement(key, rv) - return - } - - k := rv.Kind() - switch k { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, - reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, - reflect.Uint64, - reflect.Float32, reflect.Float64, reflect.String, reflect.Bool: - enc.keyEqElement(key, rv) - case reflect.Array, reflect.Slice: - if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) { - enc.eArrayOfTables(key, rv) - } else { - enc.keyEqElement(key, rv) - } - case reflect.Interface: - if rv.IsNil() { - return - } - enc.encode(key, rv.Elem()) - case reflect.Map: - if rv.IsNil() { - return - } - enc.eTable(key, rv) - case reflect.Ptr: - if rv.IsNil() { - return - } - enc.encode(key, rv.Elem()) - case reflect.Struct: - enc.eTable(key, rv) - default: - panic(e("Unsupported type for key '%s': %s", key, k)) - } -} - -// eElement encodes any value that can be an array element (primitives and -// arrays). -func (enc *Encoder) eElement(rv reflect.Value) { - switch v := rv.Interface().(type) { - case time.Time: - // Special case time.Time as a primitive. Has to come before - // TextMarshaler below because time.Time implements - // encoding.TextMarshaler, but we need to always use UTC. - enc.wf(v.In(time.FixedZone("UTC", 0)).Format("2006-01-02T15:04:05Z")) - return - case TextMarshaler: - // Special case. Use text marshaler if it's available for this value. - if s, err := v.MarshalText(); err != nil { - encPanic(err) - } else { - enc.writeQuoted(string(s)) - } - return - } - switch rv.Kind() { - case reflect.Bool: - enc.wf(strconv.FormatBool(rv.Bool())) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, - reflect.Int64: - enc.wf(strconv.FormatInt(rv.Int(), 10)) - case reflect.Uint, reflect.Uint8, reflect.Uint16, - reflect.Uint32, reflect.Uint64: - enc.wf(strconv.FormatUint(rv.Uint(), 10)) - case reflect.Float32: - enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 32))) - case reflect.Float64: - enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 64))) - case reflect.Array, reflect.Slice: - enc.eArrayOrSliceElement(rv) - case reflect.Interface: - enc.eElement(rv.Elem()) - case reflect.String: - enc.writeQuoted(rv.String()) - default: - panic(e("Unexpected primitive type: %s", rv.Kind())) - } -} - -// By the TOML spec, all floats must have a decimal with at least one -// number on either side. -func floatAddDecimal(fstr string) string { - if !strings.Contains(fstr, ".") { - return fstr + ".0" - } - return fstr -} - -func (enc *Encoder) writeQuoted(s string) { - enc.wf("\"%s\"", quotedReplacer.Replace(s)) -} - -func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) { - length := rv.Len() - enc.wf("[") - for i := 0; i < length; i++ { - elem := rv.Index(i) - enc.eElement(elem) - if i != length-1 { - enc.wf(", ") - } - } - enc.wf("]") -} - -func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) { - if len(key) == 0 { - encPanic(errNoKey) - } - for i := 0; i < rv.Len(); i++ { - trv := rv.Index(i) - if isNil(trv) { - continue - } - panicIfInvalidKey(key) - enc.newline() - enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll()) - enc.newline() - enc.eMapOrStruct(key, trv) - } -} - -func (enc *Encoder) eTable(key Key, rv reflect.Value) { - panicIfInvalidKey(key) - if len(key) == 1 { - // Output an extra new line between top-level tables. - // (The newline isn't written if nothing else has been written though.) - enc.newline() - } - if len(key) > 0 { - enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll()) - enc.newline() - } - enc.eMapOrStruct(key, rv) -} - -func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value) { - switch rv := eindirect(rv); rv.Kind() { - case reflect.Map: - enc.eMap(key, rv) - case reflect.Struct: - enc.eStruct(key, rv) - default: - panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String()) - } -} - -func (enc *Encoder) eMap(key Key, rv reflect.Value) { - rt := rv.Type() - if rt.Key().Kind() != reflect.String { - encPanic(errNonString) - } - - // Sort keys so that we have deterministic output. And write keys directly - // underneath this key first, before writing sub-structs or sub-maps. - var mapKeysDirect, mapKeysSub []string - for _, mapKey := range rv.MapKeys() { - k := mapKey.String() - if typeIsHash(tomlTypeOfGo(rv.MapIndex(mapKey))) { - mapKeysSub = append(mapKeysSub, k) - } else { - mapKeysDirect = append(mapKeysDirect, k) - } - } - - var writeMapKeys = func(mapKeys []string) { - sort.Strings(mapKeys) - for _, mapKey := range mapKeys { - mrv := rv.MapIndex(reflect.ValueOf(mapKey)) - if isNil(mrv) { - // Don't write anything for nil fields. - continue - } - enc.encode(key.add(mapKey), mrv) - } - } - writeMapKeys(mapKeysDirect) - writeMapKeys(mapKeysSub) -} - -func (enc *Encoder) eStruct(key Key, rv reflect.Value) { - // Write keys for fields directly under this key first, because if we write - // a field that creates a new table, then all keys under it will be in that - // table (not the one we're writing here). - rt := rv.Type() - var fieldsDirect, fieldsSub [][]int - var addFields func(rt reflect.Type, rv reflect.Value, start []int) - addFields = func(rt reflect.Type, rv reflect.Value, start []int) { - for i := 0; i < rt.NumField(); i++ { - f := rt.Field(i) - // skip unexporded fields - if f.PkgPath != "" { - continue - } - frv := rv.Field(i) - if f.Anonymous { - frv := eindirect(frv) - t := frv.Type() - if t.Kind() != reflect.Struct { - encPanic(errAnonNonStruct) - } - addFields(t, frv, f.Index) - } else if typeIsHash(tomlTypeOfGo(frv)) { - fieldsSub = append(fieldsSub, append(start, f.Index...)) - } else { - fieldsDirect = append(fieldsDirect, append(start, f.Index...)) - } - } - } - addFields(rt, rv, nil) - - var writeFields = func(fields [][]int) { - for _, fieldIndex := range fields { - sft := rt.FieldByIndex(fieldIndex) - sf := rv.FieldByIndex(fieldIndex) - if isNil(sf) { - // Don't write anything for nil fields. - continue - } - - keyName := sft.Tag.Get("toml") - if keyName == "-" { - continue - } - if keyName == "" { - keyName = sft.Name - } - - keyName, opts := getOptions(keyName) - if _, ok := opts["omitempty"]; ok && isEmpty(sf) { - continue - } else if _, ok := opts["omitzero"]; ok && isZero(sf) { - continue - } - - enc.encode(key.add(keyName), sf) - } - } - writeFields(fieldsDirect) - writeFields(fieldsSub) -} - -// tomlTypeName returns the TOML type name of the Go value's type. It is -// used to determine whether the types of array elements are mixed (which is -// forbidden). If the Go value is nil, then it is illegal for it to be an array -// element, and valueIsNil is returned as true. - -// Returns the TOML type of a Go value. The type may be `nil`, which means -// no concrete TOML type could be found. -func tomlTypeOfGo(rv reflect.Value) tomlType { - if isNil(rv) || !rv.IsValid() { - return nil - } - switch rv.Kind() { - case reflect.Bool: - return tomlBool - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, - reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, - reflect.Uint64: - return tomlInteger - case reflect.Float32, reflect.Float64: - return tomlFloat - case reflect.Array, reflect.Slice: - if typeEqual(tomlHash, tomlArrayType(rv)) { - return tomlArrayHash - } else { - return tomlArray - } - case reflect.Ptr, reflect.Interface: - return tomlTypeOfGo(rv.Elem()) - case reflect.String: - return tomlString - case reflect.Map: - return tomlHash - case reflect.Struct: - switch rv.Interface().(type) { - case time.Time: - return tomlDatetime - case TextMarshaler: - return tomlString - default: - return tomlHash - } - default: - panic("unexpected reflect.Kind: " + rv.Kind().String()) - } -} - -// tomlArrayType returns the element type of a TOML array. The type returned -// may be nil if it cannot be determined (e.g., a nil slice or a zero length -// slize). This function may also panic if it finds a type that cannot be -// expressed in TOML (such as nil elements, heterogeneous arrays or directly -// nested arrays of tables). -func tomlArrayType(rv reflect.Value) tomlType { - if isNil(rv) || !rv.IsValid() || rv.Len() == 0 { - return nil - } - firstType := tomlTypeOfGo(rv.Index(0)) - if firstType == nil { - encPanic(errArrayNilElement) - } - - rvlen := rv.Len() - for i := 1; i < rvlen; i++ { - elem := rv.Index(i) - switch elemType := tomlTypeOfGo(elem); { - case elemType == nil: - encPanic(errArrayNilElement) - case !typeEqual(firstType, elemType): - encPanic(errArrayMixedElementTypes) - } - } - // If we have a nested array, then we must make sure that the nested - // array contains ONLY primitives. - // This checks arbitrarily nested arrays. - if typeEqual(firstType, tomlArray) || typeEqual(firstType, tomlArrayHash) { - nest := tomlArrayType(eindirect(rv.Index(0))) - if typeEqual(nest, tomlHash) || typeEqual(nest, tomlArrayHash) { - encPanic(errArrayNoTable) - } - } - return firstType -} - -func getOptions(keyName string) (string, map[string]struct{}) { - opts := make(map[string]struct{}) - ss := strings.Split(keyName, ",") - name := ss[0] - if len(ss) > 1 { - for _, opt := range ss { - opts[opt] = struct{}{} - } - } - - return name, opts -} - -func isZero(rv reflect.Value) bool { - switch rv.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - if rv.Int() == 0 { - return true - } - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - if rv.Uint() == 0 { - return true - } - case reflect.Float32, reflect.Float64: - if rv.Float() == 0.0 { - return true - } - } - - return false -} - -func isEmpty(rv reflect.Value) bool { - switch rv.Kind() { - case reflect.String: - if len(strings.TrimSpace(rv.String())) == 0 { - return true - } - case reflect.Array, reflect.Slice, reflect.Map: - if rv.Len() == 0 { - return true - } - } - - return false -} - -func (enc *Encoder) newline() { - if enc.hasWritten { - enc.wf("\n") - } -} - -func (enc *Encoder) keyEqElement(key Key, val reflect.Value) { - if len(key) == 0 { - encPanic(errNoKey) - } - panicIfInvalidKey(key) - enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1)) - enc.eElement(val) - enc.newline() -} - -func (enc *Encoder) wf(format string, v ...interface{}) { - if _, err := fmt.Fprintf(enc.w, format, v...); err != nil { - encPanic(err) - } - enc.hasWritten = true -} - -func (enc *Encoder) indentStr(key Key) string { - return strings.Repeat(enc.Indent, len(key)-1) -} - -func encPanic(err error) { - panic(tomlEncodeError{err}) -} - -func eindirect(v reflect.Value) reflect.Value { - switch v.Kind() { - case reflect.Ptr, reflect.Interface: - return eindirect(v.Elem()) - default: - return v - } -} - -func isNil(rv reflect.Value) bool { - switch rv.Kind() { - case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - return rv.IsNil() - default: - return false - } -} - -func panicIfInvalidKey(key Key) { - for _, k := range key { - if len(k) == 0 { - encPanic(e("Key '%s' is not a valid table name. Key names "+ - "cannot be empty.", key.maybeQuotedAll())) - } - } -} - -func isValidKeyName(s string) bool { - return len(s) != 0 -} diff --git a/vendor/github.com/BurntSushi/toml/encoding_types.go b/vendor/github.com/BurntSushi/toml/encoding_types.go deleted file mode 100644 index d36e1dd..0000000 --- a/vendor/github.com/BurntSushi/toml/encoding_types.go +++ /dev/null @@ -1,19 +0,0 @@ -// +build go1.2 - -package toml - -// In order to support Go 1.1, we define our own TextMarshaler and -// TextUnmarshaler types. For Go 1.2+, we just alias them with the -// standard library interfaces. - -import ( - "encoding" -) - -// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here -// so that Go 1.1 can be supported. -type TextMarshaler encoding.TextMarshaler - -// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined -// here so that Go 1.1 can be supported. -type TextUnmarshaler encoding.TextUnmarshaler diff --git a/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go b/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go deleted file mode 100644 index e8d503d..0000000 --- a/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build !go1.2 - -package toml - -// These interfaces were introduced in Go 1.2, so we add them manually when -// compiling for Go 1.1. - -// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here -// so that Go 1.1 can be supported. -type TextMarshaler interface { - MarshalText() (text []byte, err error) -} - -// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined -// here so that Go 1.1 can be supported. -type TextUnmarshaler interface { - UnmarshalText(text []byte) error -} diff --git a/vendor/github.com/BurntSushi/toml/lex.go b/vendor/github.com/BurntSushi/toml/lex.go deleted file mode 100644 index 2191228..0000000 --- a/vendor/github.com/BurntSushi/toml/lex.go +++ /dev/null @@ -1,874 +0,0 @@ -package toml - -import ( - "fmt" - "strings" - "unicode/utf8" -) - -type itemType int - -const ( - itemError itemType = iota - itemNIL // used in the parser to indicate no type - itemEOF - itemText - itemString - itemRawString - itemMultilineString - itemRawMultilineString - itemBool - itemInteger - itemFloat - itemDatetime - itemArray // the start of an array - itemArrayEnd - itemTableStart - itemTableEnd - itemArrayTableStart - itemArrayTableEnd - itemKeyStart - itemCommentStart -) - -const ( - eof = 0 - tableStart = '[' - tableEnd = ']' - arrayTableStart = '[' - arrayTableEnd = ']' - tableSep = '.' - keySep = '=' - arrayStart = '[' - arrayEnd = ']' - arrayValTerm = ',' - commentStart = '#' - stringStart = '"' - stringEnd = '"' - rawStringStart = '\'' - rawStringEnd = '\'' -) - -type stateFn func(lx *lexer) stateFn - -type lexer struct { - input string - start int - pos int - width int - line int - state stateFn - items chan item - - // A stack of state functions used to maintain context. - // The idea is to reuse parts of the state machine in various places. - // For example, values can appear at the top level or within arbitrarily - // nested arrays. The last state on the stack is used after a value has - // been lexed. Similarly for comments. - stack []stateFn -} - -type item struct { - typ itemType - val string - line int -} - -func (lx *lexer) nextItem() item { - for { - select { - case item := <-lx.items: - return item - default: - lx.state = lx.state(lx) - } - } -} - -func lex(input string) *lexer { - lx := &lexer{ - input: input + "\n", - state: lexTop, - line: 1, - items: make(chan item, 10), - stack: make([]stateFn, 0, 10), - } - return lx -} - -func (lx *lexer) push(state stateFn) { - lx.stack = append(lx.stack, state) -} - -func (lx *lexer) pop() stateFn { - if len(lx.stack) == 0 { - return lx.errorf("BUG in lexer: no states to pop.") - } - last := lx.stack[len(lx.stack)-1] - lx.stack = lx.stack[0 : len(lx.stack)-1] - return last -} - -func (lx *lexer) current() string { - return lx.input[lx.start:lx.pos] -} - -func (lx *lexer) emit(typ itemType) { - lx.items <- item{typ, lx.current(), lx.line} - lx.start = lx.pos -} - -func (lx *lexer) emitTrim(typ itemType) { - lx.items <- item{typ, strings.TrimSpace(lx.current()), lx.line} - lx.start = lx.pos -} - -func (lx *lexer) next() (r rune) { - if lx.pos >= len(lx.input) { - lx.width = 0 - return eof - } - - if lx.input[lx.pos] == '\n' { - lx.line++ - } - r, lx.width = utf8.DecodeRuneInString(lx.input[lx.pos:]) - lx.pos += lx.width - return r -} - -// ignore skips over the pending input before this point. -func (lx *lexer) ignore() { - lx.start = lx.pos -} - -// backup steps back one rune. Can be called only once per call of next. -func (lx *lexer) backup() { - lx.pos -= lx.width - if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' { - lx.line-- - } -} - -// accept consumes the next rune if it's equal to `valid`. -func (lx *lexer) accept(valid rune) bool { - if lx.next() == valid { - return true - } - lx.backup() - return false -} - -// peek returns but does not consume the next rune in the input. -func (lx *lexer) peek() rune { - r := lx.next() - lx.backup() - return r -} - -// errorf stops all lexing by emitting an error and returning `nil`. -// Note that any value that is a character is escaped if it's a special -// character (new lines, tabs, etc.). -func (lx *lexer) errorf(format string, values ...interface{}) stateFn { - lx.items <- item{ - itemError, - fmt.Sprintf(format, values...), - lx.line, - } - return nil -} - -// lexTop consumes elements at the top level of TOML data. -func lexTop(lx *lexer) stateFn { - r := lx.next() - if isWhitespace(r) || isNL(r) { - return lexSkip(lx, lexTop) - } - - switch r { - case commentStart: - lx.push(lexTop) - return lexCommentStart - case tableStart: - return lexTableStart - case eof: - if lx.pos > lx.start { - return lx.errorf("Unexpected EOF.") - } - lx.emit(itemEOF) - return nil - } - - // At this point, the only valid item can be a key, so we back up - // and let the key lexer do the rest. - lx.backup() - lx.push(lexTopEnd) - return lexKeyStart -} - -// lexTopEnd is entered whenever a top-level item has been consumed. (A value -// or a table.) It must see only whitespace, and will turn back to lexTop -// upon a new line. If it sees EOF, it will quit the lexer successfully. -func lexTopEnd(lx *lexer) stateFn { - r := lx.next() - switch { - case r == commentStart: - // a comment will read to a new line for us. - lx.push(lexTop) - return lexCommentStart - case isWhitespace(r): - return lexTopEnd - case isNL(r): - lx.ignore() - return lexTop - case r == eof: - lx.ignore() - return lexTop - } - return lx.errorf("Expected a top-level item to end with a new line, "+ - "comment or EOF, but got %q instead.", r) -} - -// lexTable lexes the beginning of a table. Namely, it makes sure that -// it starts with a character other than '.' and ']'. -// It assumes that '[' has already been consumed. -// It also handles the case that this is an item in an array of tables. -// e.g., '[[name]]'. -func lexTableStart(lx *lexer) stateFn { - if lx.peek() == arrayTableStart { - lx.next() - lx.emit(itemArrayTableStart) - lx.push(lexArrayTableEnd) - } else { - lx.emit(itemTableStart) - lx.push(lexTableEnd) - } - return lexTableNameStart -} - -func lexTableEnd(lx *lexer) stateFn { - lx.emit(itemTableEnd) - return lexTopEnd -} - -func lexArrayTableEnd(lx *lexer) stateFn { - if r := lx.next(); r != arrayTableEnd { - return lx.errorf("Expected end of table array name delimiter %q, "+ - "but got %q instead.", arrayTableEnd, r) - } - lx.emit(itemArrayTableEnd) - return lexTopEnd -} - -func lexTableNameStart(lx *lexer) stateFn { - switch r := lx.peek(); { - case r == tableEnd || r == eof: - return lx.errorf("Unexpected end of table name. (Table names cannot " + - "be empty.)") - case r == tableSep: - return lx.errorf("Unexpected table separator. (Table names cannot " + - "be empty.)") - case r == stringStart || r == rawStringStart: - lx.ignore() - lx.push(lexTableNameEnd) - return lexValue // reuse string lexing - case isWhitespace(r): - return lexTableNameStart - default: - return lexBareTableName - } -} - -// lexTableName lexes the name of a table. It assumes that at least one -// valid character for the table has already been read. -func lexBareTableName(lx *lexer) stateFn { - switch r := lx.next(); { - case isBareKeyChar(r): - return lexBareTableName - case r == tableSep || r == tableEnd: - lx.backup() - lx.emitTrim(itemText) - return lexTableNameEnd - default: - return lx.errorf("Bare keys cannot contain %q.", r) - } -} - -// lexTableNameEnd reads the end of a piece of a table name, optionally -// consuming whitespace. -func lexTableNameEnd(lx *lexer) stateFn { - switch r := lx.next(); { - case isWhitespace(r): - return lexTableNameEnd - case r == tableSep: - lx.ignore() - return lexTableNameStart - case r == tableEnd: - return lx.pop() - default: - return lx.errorf("Expected '.' or ']' to end table name, but got %q "+ - "instead.", r) - } -} - -// lexKeyStart consumes a key name up until the first non-whitespace character. -// lexKeyStart will ignore whitespace. -func lexKeyStart(lx *lexer) stateFn { - r := lx.peek() - switch { - case r == keySep: - return lx.errorf("Unexpected key separator %q.", keySep) - case isWhitespace(r) || isNL(r): - lx.next() - return lexSkip(lx, lexKeyStart) - case r == stringStart || r == rawStringStart: - lx.ignore() - lx.emit(itemKeyStart) - lx.push(lexKeyEnd) - return lexValue // reuse string lexing - default: - lx.ignore() - lx.emit(itemKeyStart) - return lexBareKey - } -} - -// lexBareKey consumes the text of a bare key. Assumes that the first character -// (which is not whitespace) has not yet been consumed. -func lexBareKey(lx *lexer) stateFn { - switch r := lx.next(); { - case isBareKeyChar(r): - return lexBareKey - case isWhitespace(r): - lx.emitTrim(itemText) - return lexKeyEnd - case r == keySep: - lx.backup() - lx.emitTrim(itemText) - return lexKeyEnd - default: - return lx.errorf("Bare keys cannot contain %q.", r) - } -} - -// lexKeyEnd consumes the end of a key and trims whitespace (up to the key -// separator). -func lexKeyEnd(lx *lexer) stateFn { - switch r := lx.next(); { - case r == keySep: - return lexSkip(lx, lexValue) - case isWhitespace(r): - return lexSkip(lx, lexKeyEnd) - default: - return lx.errorf("Expected key separator %q, but got %q instead.", - keySep, r) - } -} - -// lexValue starts the consumption of a value anywhere a value is expected. -// lexValue will ignore whitespace. -// After a value is lexed, the last state on the next is popped and returned. -func lexValue(lx *lexer) stateFn { - // We allow whitespace to precede a value, but NOT new lines. - // In array syntax, the array states are responsible for ignoring new - // lines. - r := lx.next() - if isWhitespace(r) { - return lexSkip(lx, lexValue) - } - - switch { - case r == arrayStart: - lx.ignore() - lx.emit(itemArray) - return lexArrayValue - case r == stringStart: - if lx.accept(stringStart) { - if lx.accept(stringStart) { - lx.ignore() // Ignore """ - return lexMultilineString - } - lx.backup() - } - lx.ignore() // ignore the '"' - return lexString - case r == rawStringStart: - if lx.accept(rawStringStart) { - if lx.accept(rawStringStart) { - lx.ignore() // Ignore """ - return lexMultilineRawString - } - lx.backup() - } - lx.ignore() // ignore the "'" - return lexRawString - case r == 't': - return lexTrue - case r == 'f': - return lexFalse - case r == '-': - return lexNumberStart - case isDigit(r): - lx.backup() // avoid an extra state and use the same as above - return lexNumberOrDateStart - case r == '.': // special error case, be kind to users - return lx.errorf("Floats must start with a digit, not '.'.") - } - return lx.errorf("Expected value but found %q instead.", r) -} - -// lexArrayValue consumes one value in an array. It assumes that '[' or ',' -// have already been consumed. All whitespace and new lines are ignored. -func lexArrayValue(lx *lexer) stateFn { - r := lx.next() - switch { - case isWhitespace(r) || isNL(r): - return lexSkip(lx, lexArrayValue) - case r == commentStart: - lx.push(lexArrayValue) - return lexCommentStart - case r == arrayValTerm: - return lx.errorf("Unexpected array value terminator %q.", - arrayValTerm) - case r == arrayEnd: - return lexArrayEnd - } - - lx.backup() - lx.push(lexArrayValueEnd) - return lexValue -} - -// lexArrayValueEnd consumes the cruft between values of an array. Namely, -// it ignores whitespace and expects either a ',' or a ']'. -func lexArrayValueEnd(lx *lexer) stateFn { - r := lx.next() - switch { - case isWhitespace(r) || isNL(r): - return lexSkip(lx, lexArrayValueEnd) - case r == commentStart: - lx.push(lexArrayValueEnd) - return lexCommentStart - case r == arrayValTerm: - lx.ignore() - return lexArrayValue // move on to the next value - case r == arrayEnd: - return lexArrayEnd - } - return lx.errorf("Expected an array value terminator %q or an array "+ - "terminator %q, but got %q instead.", arrayValTerm, arrayEnd, r) -} - -// lexArrayEnd finishes the lexing of an array. It assumes that a ']' has -// just been consumed. -func lexArrayEnd(lx *lexer) stateFn { - lx.ignore() - lx.emit(itemArrayEnd) - return lx.pop() -} - -// lexString consumes the inner contents of a string. It assumes that the -// beginning '"' has already been consumed and ignored. -func lexString(lx *lexer) stateFn { - r := lx.next() - switch { - case isNL(r): - return lx.errorf("Strings cannot contain new lines.") - case r == '\\': - lx.push(lexString) - return lexStringEscape - case r == stringEnd: - lx.backup() - lx.emit(itemString) - lx.next() - lx.ignore() - return lx.pop() - } - return lexString -} - -// lexMultilineString consumes the inner contents of a string. It assumes that -// the beginning '"""' has already been consumed and ignored. -func lexMultilineString(lx *lexer) stateFn { - r := lx.next() - switch { - case r == '\\': - return lexMultilineStringEscape - case r == stringEnd: - if lx.accept(stringEnd) { - if lx.accept(stringEnd) { - lx.backup() - lx.backup() - lx.backup() - lx.emit(itemMultilineString) - lx.next() - lx.next() - lx.next() - lx.ignore() - return lx.pop() - } - lx.backup() - } - } - return lexMultilineString -} - -// lexRawString consumes a raw string. Nothing can be escaped in such a string. -// It assumes that the beginning "'" has already been consumed and ignored. -func lexRawString(lx *lexer) stateFn { - r := lx.next() - switch { - case isNL(r): - return lx.errorf("Strings cannot contain new lines.") - case r == rawStringEnd: - lx.backup() - lx.emit(itemRawString) - lx.next() - lx.ignore() - return lx.pop() - } - return lexRawString -} - -// lexMultilineRawString consumes a raw string. Nothing can be escaped in such -// a string. It assumes that the beginning "'" has already been consumed and -// ignored. -func lexMultilineRawString(lx *lexer) stateFn { - r := lx.next() - switch { - case r == rawStringEnd: - if lx.accept(rawStringEnd) { - if lx.accept(rawStringEnd) { - lx.backup() - lx.backup() - lx.backup() - lx.emit(itemRawMultilineString) - lx.next() - lx.next() - lx.next() - lx.ignore() - return lx.pop() - } - lx.backup() - } - } - return lexMultilineRawString -} - -// lexMultilineStringEscape consumes an escaped character. It assumes that the -// preceding '\\' has already been consumed. -func lexMultilineStringEscape(lx *lexer) stateFn { - // Handle the special case first: - if isNL(lx.next()) { - lx.next() - return lexMultilineString - } else { - lx.backup() - lx.push(lexMultilineString) - return lexStringEscape(lx) - } -} - -func lexStringEscape(lx *lexer) stateFn { - r := lx.next() - switch r { - case 'b': - fallthrough - case 't': - fallthrough - case 'n': - fallthrough - case 'f': - fallthrough - case 'r': - fallthrough - case '"': - fallthrough - case '\\': - return lx.pop() - case 'u': - return lexShortUnicodeEscape - case 'U': - return lexLongUnicodeEscape - } - return lx.errorf("Invalid escape character %q. Only the following "+ - "escape characters are allowed: "+ - "\\b, \\t, \\n, \\f, \\r, \\\", \\/, \\\\, "+ - "\\uXXXX and \\UXXXXXXXX.", r) -} - -func lexShortUnicodeEscape(lx *lexer) stateFn { - var r rune - for i := 0; i < 4; i++ { - r = lx.next() - if !isHexadecimal(r) { - return lx.errorf("Expected four hexadecimal digits after '\\u', "+ - "but got '%s' instead.", lx.current()) - } - } - return lx.pop() -} - -func lexLongUnicodeEscape(lx *lexer) stateFn { - var r rune - for i := 0; i < 8; i++ { - r = lx.next() - if !isHexadecimal(r) { - return lx.errorf("Expected eight hexadecimal digits after '\\U', "+ - "but got '%s' instead.", lx.current()) - } - } - return lx.pop() -} - -// lexNumberOrDateStart consumes either a (positive) integer, float or -// datetime. It assumes that NO negative sign has been consumed. -func lexNumberOrDateStart(lx *lexer) stateFn { - r := lx.next() - if !isDigit(r) { - if r == '.' { - return lx.errorf("Floats must start with a digit, not '.'.") - } else { - return lx.errorf("Expected a digit but got %q.", r) - } - } - return lexNumberOrDate -} - -// lexNumberOrDate consumes either a (positive) integer, float or datetime. -func lexNumberOrDate(lx *lexer) stateFn { - r := lx.next() - switch { - case r == '-': - if lx.pos-lx.start != 5 { - return lx.errorf("All ISO8601 dates must be in full Zulu form.") - } - return lexDateAfterYear - case isDigit(r): - return lexNumberOrDate - case r == '.': - return lexFloatStart - } - - lx.backup() - lx.emit(itemInteger) - return lx.pop() -} - -// lexDateAfterYear consumes a full Zulu Datetime in ISO8601 format. -// It assumes that "YYYY-" has already been consumed. -func lexDateAfterYear(lx *lexer) stateFn { - formats := []rune{ - // digits are '0'. - // everything else is direct equality. - '0', '0', '-', '0', '0', - 'T', - '0', '0', ':', '0', '0', ':', '0', '0', - 'Z', - } - for _, f := range formats { - r := lx.next() - if f == '0' { - if !isDigit(r) { - return lx.errorf("Expected digit in ISO8601 datetime, "+ - "but found %q instead.", r) - } - } else if f != r { - return lx.errorf("Expected %q in ISO8601 datetime, "+ - "but found %q instead.", f, r) - } - } - lx.emit(itemDatetime) - return lx.pop() -} - -// lexNumberStart consumes either an integer or a float. It assumes that -// a negative sign has already been read, but that *no* digits have been -// consumed. lexNumberStart will move to the appropriate integer or float -// states. -func lexNumberStart(lx *lexer) stateFn { - // we MUST see a digit. Even floats have to start with a digit. - r := lx.next() - if !isDigit(r) { - if r == '.' { - return lx.errorf("Floats must start with a digit, not '.'.") - } else { - return lx.errorf("Expected a digit but got %q.", r) - } - } - return lexNumber -} - -// lexNumber consumes an integer or a float after seeing the first digit. -func lexNumber(lx *lexer) stateFn { - r := lx.next() - switch { - case isDigit(r): - return lexNumber - case r == '.': - return lexFloatStart - } - - lx.backup() - lx.emit(itemInteger) - return lx.pop() -} - -// lexFloatStart starts the consumption of digits of a float after a '.'. -// Namely, at least one digit is required. -func lexFloatStart(lx *lexer) stateFn { - r := lx.next() - if !isDigit(r) { - return lx.errorf("Floats must have a digit after the '.', but got "+ - "%q instead.", r) - } - return lexFloat -} - -// lexFloat consumes the digits of a float after a '.'. -// Assumes that one digit has been consumed after a '.' already. -func lexFloat(lx *lexer) stateFn { - r := lx.next() - if isDigit(r) { - return lexFloat - } - - lx.backup() - lx.emit(itemFloat) - return lx.pop() -} - -// lexConst consumes the s[1:] in s. It assumes that s[0] has already been -// consumed. -func lexConst(lx *lexer, s string) stateFn { - for i := range s[1:] { - if r := lx.next(); r != rune(s[i+1]) { - return lx.errorf("Expected %q, but found %q instead.", s[:i+1], - s[:i]+string(r)) - } - } - return nil -} - -// lexTrue consumes the "rue" in "true". It assumes that 't' has already -// been consumed. -func lexTrue(lx *lexer) stateFn { - if fn := lexConst(lx, "true"); fn != nil { - return fn - } - lx.emit(itemBool) - return lx.pop() -} - -// lexFalse consumes the "alse" in "false". It assumes that 'f' has already -// been consumed. -func lexFalse(lx *lexer) stateFn { - if fn := lexConst(lx, "false"); fn != nil { - return fn - } - lx.emit(itemBool) - return lx.pop() -} - -// lexCommentStart begins the lexing of a comment. It will emit -// itemCommentStart and consume no characters, passing control to lexComment. -func lexCommentStart(lx *lexer) stateFn { - lx.ignore() - lx.emit(itemCommentStart) - return lexComment -} - -// lexComment lexes an entire comment. It assumes that '#' has been consumed. -// It will consume *up to* the first new line character, and pass control -// back to the last state on the stack. -func lexComment(lx *lexer) stateFn { - r := lx.peek() - if isNL(r) || r == eof { - lx.emit(itemText) - return lx.pop() - } - lx.next() - return lexComment -} - -// lexSkip ignores all slurped input and moves on to the next state. -func lexSkip(lx *lexer, nextState stateFn) stateFn { - return func(lx *lexer) stateFn { - lx.ignore() - return nextState - } -} - -// isWhitespace returns true if `r` is a whitespace character according -// to the spec. -func isWhitespace(r rune) bool { - return r == '\t' || r == ' ' -} - -func isNL(r rune) bool { - return r == '\n' || r == '\r' -} - -func isDigit(r rune) bool { - return r >= '0' && r <= '9' -} - -func isHexadecimal(r rune) bool { - return (r >= '0' && r <= '9') || - (r >= 'a' && r <= 'f') || - (r >= 'A' && r <= 'F') -} - -func isBareKeyChar(r rune) bool { - return (r >= 'A' && r <= 'Z') || - (r >= 'a' && r <= 'z') || - (r >= '0' && r <= '9') || - r == '_' || - r == '-' -} - -func (itype itemType) String() string { - switch itype { - case itemError: - return "Error" - case itemNIL: - return "NIL" - case itemEOF: - return "EOF" - case itemText: - return "Text" - case itemString: - return "String" - case itemRawString: - return "String" - case itemMultilineString: - return "String" - case itemRawMultilineString: - return "String" - case itemBool: - return "Bool" - case itemInteger: - return "Integer" - case itemFloat: - return "Float" - case itemDatetime: - return "DateTime" - case itemTableStart: - return "TableStart" - case itemTableEnd: - return "TableEnd" - case itemKeyStart: - return "KeyStart" - case itemArray: - return "Array" - case itemArrayEnd: - return "ArrayEnd" - case itemCommentStart: - return "CommentStart" - } - panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype))) -} - -func (item item) String() string { - return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val) -} diff --git a/vendor/github.com/BurntSushi/toml/parse.go b/vendor/github.com/BurntSushi/toml/parse.go deleted file mode 100644 index c6069be..0000000 --- a/vendor/github.com/BurntSushi/toml/parse.go +++ /dev/null @@ -1,498 +0,0 @@ -package toml - -import ( - "fmt" - "log" - "strconv" - "strings" - "time" - "unicode" - "unicode/utf8" -) - -type parser struct { - mapping map[string]interface{} - types map[string]tomlType - lx *lexer - - // A list of keys in the order that they appear in the TOML data. - ordered []Key - - // the full key for the current hash in scope - context Key - - // the base key name for everything except hashes - currentKey string - - // rough approximation of line number - approxLine int - - // A map of 'key.group.names' to whether they were created implicitly. - implicits map[string]bool -} - -type parseError string - -func (pe parseError) Error() string { - return string(pe) -} - -func parse(data string) (p *parser, err error) { - defer func() { - if r := recover(); r != nil { - var ok bool - if err, ok = r.(parseError); ok { - return - } - panic(r) - } - }() - - p = &parser{ - mapping: make(map[string]interface{}), - types: make(map[string]tomlType), - lx: lex(data), - ordered: make([]Key, 0), - implicits: make(map[string]bool), - } - for { - item := p.next() - if item.typ == itemEOF { - break - } - p.topLevel(item) - } - - return p, nil -} - -func (p *parser) panicf(format string, v ...interface{}) { - msg := fmt.Sprintf("Near line %d (last key parsed '%s'): %s", - p.approxLine, p.current(), fmt.Sprintf(format, v...)) - panic(parseError(msg)) -} - -func (p *parser) next() item { - it := p.lx.nextItem() - if it.typ == itemError { - p.panicf("%s", it.val) - } - return it -} - -func (p *parser) bug(format string, v ...interface{}) { - log.Fatalf("BUG: %s\n\n", fmt.Sprintf(format, v...)) -} - -func (p *parser) expect(typ itemType) item { - it := p.next() - p.assertEqual(typ, it.typ) - return it -} - -func (p *parser) assertEqual(expected, got itemType) { - if expected != got { - p.bug("Expected '%s' but got '%s'.", expected, got) - } -} - -func (p *parser) topLevel(item item) { - switch item.typ { - case itemCommentStart: - p.approxLine = item.line - p.expect(itemText) - case itemTableStart: - kg := p.next() - p.approxLine = kg.line - - var key Key - for ; kg.typ != itemTableEnd && kg.typ != itemEOF; kg = p.next() { - key = append(key, p.keyString(kg)) - } - p.assertEqual(itemTableEnd, kg.typ) - - p.establishContext(key, false) - p.setType("", tomlHash) - p.ordered = append(p.ordered, key) - case itemArrayTableStart: - kg := p.next() - p.approxLine = kg.line - - var key Key - for ; kg.typ != itemArrayTableEnd && kg.typ != itemEOF; kg = p.next() { - key = append(key, p.keyString(kg)) - } - p.assertEqual(itemArrayTableEnd, kg.typ) - - p.establishContext(key, true) - p.setType("", tomlArrayHash) - p.ordered = append(p.ordered, key) - case itemKeyStart: - kname := p.next() - p.approxLine = kname.line - p.currentKey = p.keyString(kname) - - val, typ := p.value(p.next()) - p.setValue(p.currentKey, val) - p.setType(p.currentKey, typ) - p.ordered = append(p.ordered, p.context.add(p.currentKey)) - p.currentKey = "" - default: - p.bug("Unexpected type at top level: %s", item.typ) - } -} - -// Gets a string for a key (or part of a key in a table name). -func (p *parser) keyString(it item) string { - switch it.typ { - case itemText: - return it.val - case itemString, itemMultilineString, - itemRawString, itemRawMultilineString: - s, _ := p.value(it) - return s.(string) - default: - p.bug("Unexpected key type: %s", it.typ) - panic("unreachable") - } -} - -// value translates an expected value from the lexer into a Go value wrapped -// as an empty interface. -func (p *parser) value(it item) (interface{}, tomlType) { - switch it.typ { - case itemString: - return p.replaceEscapes(it.val), p.typeOfPrimitive(it) - case itemMultilineString: - trimmed := stripFirstNewline(stripEscapedWhitespace(it.val)) - return p.replaceEscapes(trimmed), p.typeOfPrimitive(it) - case itemRawString: - return it.val, p.typeOfPrimitive(it) - case itemRawMultilineString: - return stripFirstNewline(it.val), p.typeOfPrimitive(it) - case itemBool: - switch it.val { - case "true": - return true, p.typeOfPrimitive(it) - case "false": - return false, p.typeOfPrimitive(it) - } - p.bug("Expected boolean value, but got '%s'.", it.val) - case itemInteger: - num, err := strconv.ParseInt(it.val, 10, 64) - if err != nil { - // See comment below for floats describing why we make a - // distinction between a bug and a user error. - if e, ok := err.(*strconv.NumError); ok && - e.Err == strconv.ErrRange { - - p.panicf("Integer '%s' is out of the range of 64-bit "+ - "signed integers.", it.val) - } else { - p.bug("Expected integer value, but got '%s'.", it.val) - } - } - return num, p.typeOfPrimitive(it) - case itemFloat: - num, err := strconv.ParseFloat(it.val, 64) - if err != nil { - // Distinguish float values. Normally, it'd be a bug if the lexer - // provides an invalid float, but it's possible that the float is - // out of range of valid values (which the lexer cannot determine). - // So mark the former as a bug but the latter as a legitimate user - // error. - // - // This is also true for integers. - if e, ok := err.(*strconv.NumError); ok && - e.Err == strconv.ErrRange { - - p.panicf("Float '%s' is out of the range of 64-bit "+ - "IEEE-754 floating-point numbers.", it.val) - } else { - p.bug("Expected float value, but got '%s'.", it.val) - } - } - return num, p.typeOfPrimitive(it) - case itemDatetime: - t, err := time.Parse("2006-01-02T15:04:05Z", it.val) - if err != nil { - p.bug("Expected Zulu formatted DateTime, but got '%s'.", it.val) - } - return t, p.typeOfPrimitive(it) - case itemArray: - array := make([]interface{}, 0) - types := make([]tomlType, 0) - - for it = p.next(); it.typ != itemArrayEnd; it = p.next() { - if it.typ == itemCommentStart { - p.expect(itemText) - continue - } - - val, typ := p.value(it) - array = append(array, val) - types = append(types, typ) - } - return array, p.typeOfArray(types) - } - p.bug("Unexpected value type: %s", it.typ) - panic("unreachable") -} - -// establishContext sets the current context of the parser, -// where the context is either a hash or an array of hashes. Which one is -// set depends on the value of the `array` parameter. -// -// Establishing the context also makes sure that the key isn't a duplicate, and -// will create implicit hashes automatically. -func (p *parser) establishContext(key Key, array bool) { - var ok bool - - // Always start at the top level and drill down for our context. - hashContext := p.mapping - keyContext := make(Key, 0) - - // We only need implicit hashes for key[0:-1] - for _, k := range key[0 : len(key)-1] { - _, ok = hashContext[k] - keyContext = append(keyContext, k) - - // No key? Make an implicit hash and move on. - if !ok { - p.addImplicit(keyContext) - hashContext[k] = make(map[string]interface{}) - } - - // If the hash context is actually an array of tables, then set - // the hash context to the last element in that array. - // - // Otherwise, it better be a table, since this MUST be a key group (by - // virtue of it not being the last element in a key). - switch t := hashContext[k].(type) { - case []map[string]interface{}: - hashContext = t[len(t)-1] - case map[string]interface{}: - hashContext = t - default: - p.panicf("Key '%s' was already created as a hash.", keyContext) - } - } - - p.context = keyContext - if array { - // If this is the first element for this array, then allocate a new - // list of tables for it. - k := key[len(key)-1] - if _, ok := hashContext[k]; !ok { - hashContext[k] = make([]map[string]interface{}, 0, 5) - } - - // Add a new table. But make sure the key hasn't already been used - // for something else. - if hash, ok := hashContext[k].([]map[string]interface{}); ok { - hashContext[k] = append(hash, make(map[string]interface{})) - } else { - p.panicf("Key '%s' was already created and cannot be used as "+ - "an array.", keyContext) - } - } else { - p.setValue(key[len(key)-1], make(map[string]interface{})) - } - p.context = append(p.context, key[len(key)-1]) -} - -// setValue sets the given key to the given value in the current context. -// It will make sure that the key hasn't already been defined, account for -// implicit key groups. -func (p *parser) setValue(key string, value interface{}) { - var tmpHash interface{} - var ok bool - - hash := p.mapping - keyContext := make(Key, 0) - for _, k := range p.context { - keyContext = append(keyContext, k) - if tmpHash, ok = hash[k]; !ok { - p.bug("Context for key '%s' has not been established.", keyContext) - } - switch t := tmpHash.(type) { - case []map[string]interface{}: - // The context is a table of hashes. Pick the most recent table - // defined as the current hash. - hash = t[len(t)-1] - case map[string]interface{}: - hash = t - default: - p.bug("Expected hash to have type 'map[string]interface{}', but "+ - "it has '%T' instead.", tmpHash) - } - } - keyContext = append(keyContext, key) - - if _, ok := hash[key]; ok { - // Typically, if the given key has already been set, then we have - // to raise an error since duplicate keys are disallowed. However, - // it's possible that a key was previously defined implicitly. In this - // case, it is allowed to be redefined concretely. (See the - // `tests/valid/implicit-and-explicit-after.toml` test in `toml-test`.) - // - // But we have to make sure to stop marking it as an implicit. (So that - // another redefinition provokes an error.) - // - // Note that since it has already been defined (as a hash), we don't - // want to overwrite it. So our business is done. - if p.isImplicit(keyContext) { - p.removeImplicit(keyContext) - return - } - - // Otherwise, we have a concrete key trying to override a previous - // key, which is *always* wrong. - p.panicf("Key '%s' has already been defined.", keyContext) - } - hash[key] = value -} - -// setType sets the type of a particular value at a given key. -// It should be called immediately AFTER setValue. -// -// Note that if `key` is empty, then the type given will be applied to the -// current context (which is either a table or an array of tables). -func (p *parser) setType(key string, typ tomlType) { - keyContext := make(Key, 0, len(p.context)+1) - for _, k := range p.context { - keyContext = append(keyContext, k) - } - if len(key) > 0 { // allow type setting for hashes - keyContext = append(keyContext, key) - } - p.types[keyContext.String()] = typ -} - -// addImplicit sets the given Key as having been created implicitly. -func (p *parser) addImplicit(key Key) { - p.implicits[key.String()] = true -} - -// removeImplicit stops tagging the given key as having been implicitly -// created. -func (p *parser) removeImplicit(key Key) { - p.implicits[key.String()] = false -} - -// isImplicit returns true if the key group pointed to by the key was created -// implicitly. -func (p *parser) isImplicit(key Key) bool { - return p.implicits[key.String()] -} - -// current returns the full key name of the current context. -func (p *parser) current() string { - if len(p.currentKey) == 0 { - return p.context.String() - } - if len(p.context) == 0 { - return p.currentKey - } - return fmt.Sprintf("%s.%s", p.context, p.currentKey) -} - -func stripFirstNewline(s string) string { - if len(s) == 0 || s[0] != '\n' { - return s - } - return s[1:len(s)] -} - -func stripEscapedWhitespace(s string) string { - esc := strings.Split(s, "\\\n") - if len(esc) > 1 { - for i := 1; i < len(esc); i++ { - esc[i] = strings.TrimLeftFunc(esc[i], unicode.IsSpace) - } - } - return strings.Join(esc, "") -} - -func (p *parser) replaceEscapes(str string) string { - var replaced []rune - s := []byte(str) - r := 0 - for r < len(s) { - if s[r] != '\\' { - c, size := utf8.DecodeRune(s[r:]) - r += size - replaced = append(replaced, c) - continue - } - r += 1 - if r >= len(s) { - p.bug("Escape sequence at end of string.") - return "" - } - switch s[r] { - default: - p.bug("Expected valid escape code after \\, but got %q.", s[r]) - return "" - case 'b': - replaced = append(replaced, rune(0x0008)) - r += 1 - case 't': - replaced = append(replaced, rune(0x0009)) - r += 1 - case 'n': - replaced = append(replaced, rune(0x000A)) - r += 1 - case 'f': - replaced = append(replaced, rune(0x000C)) - r += 1 - case 'r': - replaced = append(replaced, rune(0x000D)) - r += 1 - case '"': - replaced = append(replaced, rune(0x0022)) - r += 1 - case '\\': - replaced = append(replaced, rune(0x005C)) - r += 1 - case 'u': - // At this point, we know we have a Unicode escape of the form - // `uXXXX` at [r, r+5). (Because the lexer guarantees this - // for us.) - escaped := p.asciiEscapeToUnicode(s[r+1 : r+5]) - replaced = append(replaced, escaped) - r += 5 - case 'U': - // At this point, we know we have a Unicode escape of the form - // `uXXXX` at [r, r+9). (Because the lexer guarantees this - // for us.) - escaped := p.asciiEscapeToUnicode(s[r+1 : r+9]) - replaced = append(replaced, escaped) - r += 9 - } - } - return string(replaced) -} - -func (p *parser) asciiEscapeToUnicode(bs []byte) rune { - s := string(bs) - hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32) - if err != nil { - p.bug("Could not parse '%s' as a hexadecimal number, but the "+ - "lexer claims it's OK: %s", s, err) - } - - // BUG(burntsushi) - // I honestly don't understand how this works. I can't seem - // to find a way to make this fail. I figured this would fail on invalid - // UTF-8 characters like U+DCFF, but it doesn't. - if !utf8.ValidString(string(rune(hex))) { - p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s) - } - return rune(hex) -} - -func isStringType(ty itemType) bool { - return ty == itemString || ty == itemMultilineString || - ty == itemRawString || ty == itemRawMultilineString -} diff --git a/vendor/github.com/BurntSushi/toml/session.vim b/vendor/github.com/BurntSushi/toml/session.vim deleted file mode 100644 index 562164b..0000000 --- a/vendor/github.com/BurntSushi/toml/session.vim +++ /dev/null @@ -1 +0,0 @@ -au BufWritePost *.go silent!make tags > /dev/null 2>&1 diff --git a/vendor/github.com/BurntSushi/toml/type_check.go b/vendor/github.com/BurntSushi/toml/type_check.go deleted file mode 100644 index c73f8af..0000000 --- a/vendor/github.com/BurntSushi/toml/type_check.go +++ /dev/null @@ -1,91 +0,0 @@ -package toml - -// tomlType represents any Go type that corresponds to a TOML type. -// While the first draft of the TOML spec has a simplistic type system that -// probably doesn't need this level of sophistication, we seem to be militating -// toward adding real composite types. -type tomlType interface { - typeString() string -} - -// typeEqual accepts any two types and returns true if they are equal. -func typeEqual(t1, t2 tomlType) bool { - if t1 == nil || t2 == nil { - return false - } - return t1.typeString() == t2.typeString() -} - -func typeIsHash(t tomlType) bool { - return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash) -} - -type tomlBaseType string - -func (btype tomlBaseType) typeString() string { - return string(btype) -} - -func (btype tomlBaseType) String() string { - return btype.typeString() -} - -var ( - tomlInteger tomlBaseType = "Integer" - tomlFloat tomlBaseType = "Float" - tomlDatetime tomlBaseType = "Datetime" - tomlString tomlBaseType = "String" - tomlBool tomlBaseType = "Bool" - tomlArray tomlBaseType = "Array" - tomlHash tomlBaseType = "Hash" - tomlArrayHash tomlBaseType = "ArrayHash" -) - -// typeOfPrimitive returns a tomlType of any primitive value in TOML. -// Primitive values are: Integer, Float, Datetime, String and Bool. -// -// Passing a lexer item other than the following will cause a BUG message -// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime. -func (p *parser) typeOfPrimitive(lexItem item) tomlType { - switch lexItem.typ { - case itemInteger: - return tomlInteger - case itemFloat: - return tomlFloat - case itemDatetime: - return tomlDatetime - case itemString: - return tomlString - case itemMultilineString: - return tomlString - case itemRawString: - return tomlString - case itemRawMultilineString: - return tomlString - case itemBool: - return tomlBool - } - p.bug("Cannot infer primitive type of lex item '%s'.", lexItem) - panic("unreachable") -} - -// typeOfArray returns a tomlType for an array given a list of types of its -// values. -// -// In the current spec, if an array is homogeneous, then its type is always -// "Array". If the array is not homogeneous, an error is generated. -func (p *parser) typeOfArray(types []tomlType) tomlType { - // Empty arrays are cool. - if len(types) == 0 { - return tomlArray - } - - theType := types[0] - for _, t := range types[1:] { - if !typeEqual(theType, t) { - p.panicf("Array contains values of type '%s' and '%s', but "+ - "arrays must be homogeneous.", theType, t) - } - } - return tomlArray -} diff --git a/vendor/github.com/BurntSushi/toml/type_fields.go b/vendor/github.com/BurntSushi/toml/type_fields.go deleted file mode 100644 index 7592f87..0000000 --- a/vendor/github.com/BurntSushi/toml/type_fields.go +++ /dev/null @@ -1,241 +0,0 @@ -package toml - -// Struct field handling is adapted from code in encoding/json: -// -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the Go distribution. - -import ( - "reflect" - "sort" - "sync" -) - -// A field represents a single field found in a struct. -type field struct { - name string // the name of the field (`toml` tag included) - tag bool // whether field has a `toml` tag - index []int // represents the depth of an anonymous field - typ reflect.Type // the type of the field -} - -// byName sorts field by name, breaking ties with depth, -// then breaking ties with "name came from toml tag", then -// breaking ties with index sequence. -type byName []field - -func (x byName) Len() int { return len(x) } - -func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - -func (x byName) Less(i, j int) bool { - if x[i].name != x[j].name { - return x[i].name < x[j].name - } - if len(x[i].index) != len(x[j].index) { - return len(x[i].index) < len(x[j].index) - } - if x[i].tag != x[j].tag { - return x[i].tag - } - return byIndex(x).Less(i, j) -} - -// byIndex sorts field by index sequence. -type byIndex []field - -func (x byIndex) Len() int { return len(x) } - -func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - -func (x byIndex) Less(i, j int) bool { - for k, xik := range x[i].index { - if k >= len(x[j].index) { - return false - } - if xik != x[j].index[k] { - return xik < x[j].index[k] - } - } - return len(x[i].index) < len(x[j].index) -} - -// typeFields returns a list of fields that TOML should recognize for the given -// type. The algorithm is breadth-first search over the set of structs to -// include - the top struct and then any reachable anonymous structs. -func typeFields(t reflect.Type) []field { - // Anonymous fields to explore at the current level and the next. - current := []field{} - next := []field{{typ: t}} - - // Count of queued names for current level and the next. - count := map[reflect.Type]int{} - nextCount := map[reflect.Type]int{} - - // Types already visited at an earlier level. - visited := map[reflect.Type]bool{} - - // Fields found. - var fields []field - - for len(next) > 0 { - current, next = next, current[:0] - count, nextCount = nextCount, map[reflect.Type]int{} - - for _, f := range current { - if visited[f.typ] { - continue - } - visited[f.typ] = true - - // Scan f.typ for fields to include. - for i := 0; i < f.typ.NumField(); i++ { - sf := f.typ.Field(i) - if sf.PkgPath != "" { // unexported - continue - } - name := sf.Tag.Get("toml") - if name == "-" { - continue - } - index := make([]int, len(f.index)+1) - copy(index, f.index) - index[len(f.index)] = i - - ft := sf.Type - if ft.Name() == "" && ft.Kind() == reflect.Ptr { - // Follow pointer. - ft = ft.Elem() - } - - // Record found field and index sequence. - if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { - tagged := name != "" - if name == "" { - name = sf.Name - } - fields = append(fields, field{name, tagged, index, ft}) - if count[f.typ] > 1 { - // If there were multiple instances, add a second, - // so that the annihilation code will see a duplicate. - // It only cares about the distinction between 1 or 2, - // so don't bother generating any more copies. - fields = append(fields, fields[len(fields)-1]) - } - continue - } - - // Record new anonymous struct to explore in next round. - nextCount[ft]++ - if nextCount[ft] == 1 { - f := field{name: ft.Name(), index: index, typ: ft} - next = append(next, f) - } - } - } - } - - sort.Sort(byName(fields)) - - // Delete all fields that are hidden by the Go rules for embedded fields, - // except that fields with TOML tags are promoted. - - // The fields are sorted in primary order of name, secondary order - // of field index length. Loop over names; for each name, delete - // hidden fields by choosing the one dominant field that survives. - out := fields[:0] - for advance, i := 0, 0; i < len(fields); i += advance { - // One iteration per name. - // Find the sequence of fields with the name of this first field. - fi := fields[i] - name := fi.name - for advance = 1; i+advance < len(fields); advance++ { - fj := fields[i+advance] - if fj.name != name { - break - } - } - if advance == 1 { // Only one field with this name - out = append(out, fi) - continue - } - dominant, ok := dominantField(fields[i : i+advance]) - if ok { - out = append(out, dominant) - } - } - - fields = out - sort.Sort(byIndex(fields)) - - return fields -} - -// dominantField looks through the fields, all of which are known to -// have the same name, to find the single field that dominates the -// others using Go's embedding rules, modified by the presence of -// TOML tags. If there are multiple top-level fields, the boolean -// will be false: This condition is an error in Go and we skip all -// the fields. -func dominantField(fields []field) (field, bool) { - // The fields are sorted in increasing index-length order. The winner - // must therefore be one with the shortest index length. Drop all - // longer entries, which is easy: just truncate the slice. - length := len(fields[0].index) - tagged := -1 // Index of first tagged field. - for i, f := range fields { - if len(f.index) > length { - fields = fields[:i] - break - } - if f.tag { - if tagged >= 0 { - // Multiple tagged fields at the same level: conflict. - // Return no field. - return field{}, false - } - tagged = i - } - } - if tagged >= 0 { - return fields[tagged], true - } - // All remaining fields have the same length. If there's more than one, - // we have a conflict (two fields named "X" at the same level) and we - // return no field. - if len(fields) > 1 { - return field{}, false - } - return fields[0], true -} - -var fieldCache struct { - sync.RWMutex - m map[reflect.Type][]field -} - -// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. -func cachedTypeFields(t reflect.Type) []field { - fieldCache.RLock() - f := fieldCache.m[t] - fieldCache.RUnlock() - if f != nil { - return f - } - - // Compute fields without lock. - // Might duplicate effort but won't hold other computations back. - f = typeFields(t) - if f == nil { - f = []field{} - } - - fieldCache.Lock() - if fieldCache.m == nil { - fieldCache.m = map[reflect.Type][]field{} - } - fieldCache.m[t] = f - fieldCache.Unlock() - return f -} diff --git a/vendor/github.com/Sirupsen/logrus/LICENSE b/vendor/github.com/Sirupsen/logrus/LICENSE deleted file mode 100644 index f090cb4..0000000 --- a/vendor/github.com/Sirupsen/logrus/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Simon Eskildsen - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/Sirupsen/logrus/README.md b/vendor/github.com/Sirupsen/logrus/README.md deleted file mode 100644 index e755e7c..0000000 --- a/vendor/github.com/Sirupsen/logrus/README.md +++ /dev/null @@ -1,377 +0,0 @@ -# Logrus :walrus: [![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus) [![godoc reference](https://godoc.org/github.com/Sirupsen/logrus?status.png)][godoc] - -Logrus is a structured logger for Go (golang), completely API compatible with -the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not -yet stable (pre 1.0). Logrus itself is completely stable and has been used in -many large deployments. The core API is unlikely to change much but please -version control your Logrus to make sure you aren't fetching latest `master` on -every build.** - -Nicely color-coded in development (when a TTY is attached, otherwise just -plain text): - -![Colored](http://i.imgur.com/PY7qMwd.png) - -With `log.Formatter = new(logrus.JSONFormatter)`, for easy parsing by logstash -or Splunk: - -```json -{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the -ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"} - -{"level":"warning","msg":"The group's number increased tremendously!", -"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"} - -{"animal":"walrus","level":"info","msg":"A giant walrus appears!", -"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"} - -{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.", -"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"} - -{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true, -"time":"2014-03-10 19:57:38.562543128 -0400 EDT"} -``` - -With the default `log.Formatter = new(logrus.TextFormatter)` when a TTY is not -attached, the output is compatible with the -[logfmt](http://godoc.org/github.com/kr/logfmt) format: - -```text -time="2014-04-20 15:36:23.830442383 -0400 EDT" level="info" msg="A group of walrus emerges from the ocean" animal="walrus" size=10 -time="2014-04-20 15:36:23.830584199 -0400 EDT" level="warning" msg="The group's number increased tremendously!" omg=true number=122 -time="2014-04-20 15:36:23.830596521 -0400 EDT" level="info" msg="A giant walrus appears!" animal="walrus" size=10 -time="2014-04-20 15:36:23.830611837 -0400 EDT" level="info" msg="Tremendously sized cow enters the ocean." animal="walrus" size=9 -time="2014-04-20 15:36:23.830626464 -0400 EDT" level="fatal" msg="The ice breaks!" omg=true number=100 -``` - -#### Example - -The simplest way to use Logrus is simply the package-level exported logger: - -```go -package main - -import ( - log "github.com/Sirupsen/logrus" -) - -func main() { - log.WithFields(log.Fields{ - "animal": "walrus", - }).Info("A walrus appears") -} -``` - -Note that it's completely api-compatible with the stdlib logger, so you can -replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"` -and you'll now have the flexibility of Logrus. You can customize it all you -want: - -```go -package main - -import ( - "os" - log "github.com/Sirupsen/logrus" - "github.com/Sirupsen/logrus/hooks/airbrake" -) - -func init() { - // Log as JSON instead of the default ASCII formatter. - log.SetFormatter(&log.JSONFormatter{}) - - // Use the Airbrake hook to report errors that have Error severity or above to - // an exception tracker. You can create custom hooks, see the Hooks section. - log.AddHook(&logrus_airbrake.AirbrakeHook{}) - - // Output to stderr instead of stdout, could also be a file. - log.SetOutput(os.Stderr) - - // Only log the warning severity or above. - log.SetLevel(log.WarnLevel) -} - -func main() { - log.WithFields(log.Fields{ - "animal": "walrus", - "size": 10, - }).Info("A group of walrus emerges from the ocean") - - log.WithFields(log.Fields{ - "omg": true, - "number": 122, - }).Warn("The group's number increased tremendously!") - - log.WithFields(log.Fields{ - "omg": true, - "number": 100, - }).Fatal("The ice breaks!") -} -``` - -For more advanced usage such as logging to multiple locations from the same -application, you can also create an instance of the `logrus` Logger: - -```go -package main - -import ( - "github.com/Sirupsen/logrus" -) - -// Create a new instance of the logger. You can have any number of instances. -var log = logrus.New() - -func main() { - // The API for setting attributes is a little different than the package level - // exported logger. See Godoc. - log.Out = os.Stderr - - log.WithFields(logrus.Fields{ - "animal": "walrus", - "size": 10, - }).Info("A group of walrus emerges from the ocean") -} -``` - -#### Fields - -Logrus encourages careful, structured logging though logging fields instead of -long, unparseable error messages. For example, instead of: `log.Fatalf("Failed -to send event %s to topic %s with key %d")`, you should log the much more -discoverable: - -```go -log.WithFields(log.Fields{ - "event": event, - "topic": topic, - "key": key, -}).Fatal("Failed to send event") -``` - -We've found this API forces you to think about logging in a way that produces -much more useful logging messages. We've been in countless situations where just -a single added field to a log statement that was already there would've saved us -hours. The `WithFields` call is optional. - -In general, with Logrus using any of the `printf`-family functions should be -seen as a hint you should add a field, however, you can still use the -`printf`-family functions with Logrus. - -#### Hooks - -You can add hooks for logging levels. For example to send errors to an exception -tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to -multiple places simultaneously, e.g. syslog. - -```go -// Not the real implementation of the Airbrake hook. Just a simple sample. -import ( - log "github.com/Sirupsen/logrus" -) - -func init() { - log.AddHook(new(AirbrakeHook)) -} - -type AirbrakeHook struct{} - -// `Fire()` takes the entry that the hook is fired for. `entry.Data[]` contains -// the fields for the entry. See the Fields section of the README. -func (hook *AirbrakeHook) Fire(entry *logrus.Entry) error { - err := airbrake.Notify(entry.Data["error"].(error)) - if err != nil { - log.WithFields(log.Fields{ - "source": "airbrake", - "endpoint": airbrake.Endpoint, - }).Info("Failed to send error to Airbrake") - } - - return nil -} - -// `Levels()` returns a slice of `Levels` the hook is fired for. -func (hook *AirbrakeHook) Levels() []log.Level { - return []log.Level{ - log.ErrorLevel, - log.FatalLevel, - log.PanicLevel, - } -} -``` - -Logrus comes with built-in hooks. Add those, or your custom hook, in `init`: - -```go -import ( - log "github.com/Sirupsen/logrus" - "github.com/Sirupsen/logrus/hooks/airbrake" - "github.com/Sirupsen/logrus/hooks/syslog" - "log/syslog" -) - -func init() { - log.AddHook(new(logrus_airbrake.AirbrakeHook)) - - hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") - if err != nil { - log.Error("Unable to connect to local syslog daemon") - } else { - log.AddHook(hook) - } -} -``` - -* [`github.com/Sirupsen/logrus/hooks/airbrake`](https://github.com/Sirupsen/logrus/blob/master/hooks/airbrake/airbrake.go) - Send errors to an exception tracking service compatible with the Airbrake API. - Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. - -* [`github.com/Sirupsen/logrus/hooks/papertrail`](https://github.com/Sirupsen/logrus/blob/master/hooks/papertrail/papertrail.go) - Send errors to the Papertrail hosted logging service via UDP. - -* [`github.com/Sirupsen/logrus/hooks/syslog`](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) - Send errors to remote syslog server. - Uses standard library `log/syslog` behind the scenes. - -* [`github.com/nubo/hiprus`](https://github.com/nubo/hiprus) - Send errors to a channel in hipchat. - -* [`github.com/sebest/logrusly`](https://github.com/sebest/logrusly) - Send logs to Loggly (https://www.loggly.com/) - -* [`github.com/johntdyer/slackrus`](https://github.com/johntdyer/slackrus) - Hook for Slack chat. - -* [`github.com/wercker/journalhook`](https://github.com/wercker/journalhook). - Hook for logging to `systemd-journald`. - -#### Level logging - -Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic. - -```go -log.Debug("Useful debugging information.") -log.Info("Something noteworthy happened!") -log.Warn("You should probably take a look at this.") -log.Error("Something failed but I'm not quitting.") -// Calls os.Exit(1) after logging -log.Fatal("Bye.") -// Calls panic() after logging -log.Panic("I'm bailing.") -``` - -You can set the logging level on a `Logger`, then it will only log entries with -that severity or anything above it: - -```go -// Will log anything that is info or above (warn, error, fatal, panic). Default. -log.SetLevel(log.InfoLevel) -``` - -It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose -environment if your application has that. - -#### Entries - -Besides the fields added with `WithField` or `WithFields` some fields are -automatically added to all logging events: - -1. `time`. The timestamp when the entry was created. -2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after - the `AddFields` call. E.g. `Failed to send event.` -3. `level`. The logging level. E.g. `info`. - -#### Environments - -Logrus has no notion of environment. - -If you wish for hooks and formatters to only be used in specific environments, -you should handle that yourself. For example, if your application has a global -variable `Environment`, which is a string representation of the environment you -could do: - -```go -import ( - log "github.com/Sirupsen/logrus" -) - -init() { - // do something here to set environment depending on an environment variable - // or command-line flag - if Environment == "production" { - log.SetFormatter(logrus.JSONFormatter) - } else { - // The TextFormatter is default, you don't actually have to do this. - log.SetFormatter(logrus.TextFormatter) - } -} -``` - -This configuration is how `logrus` was intended to be used, but JSON in -production is mostly only useful if you do log aggregation with tools like -Splunk or Logstash. - -#### Formatters - -The built-in logging formatters are: - -* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise - without colors. - * *Note:* to force colored output when there is no TTY, set the `ForceColors` - field to `true`. To force no colored output even if there is a TTY set the - `DisableColors` field to `true` -* `logrus.JSONFormatter`. Logs fields as JSON. - -Third party logging formatters: - -* [`zalgo`](https://github.com/aybabtme/logzalgo): invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦. - -You can define your formatter by implementing the `Formatter` interface, -requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a -`Fields` type (`map[string]interface{}`) with all your fields as well as the -default ones (see Entries section above): - -```go -type MyJSONFormatter struct { -} - -log.SetFormatter(new(MyJSONFormatter)) - -func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { - // Note this doesn't include Time, Level and Message which are available on - // the Entry. Consult `godoc` on information about those fields or read the - // source of the official loggers. - serialized, err := json.Marshal(entry.Data) - if err != nil { - return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) - } - return append(serialized, '\n'), nil -} -``` - -#### Logger as an `io.Writer` - -Logrus can be transormed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it. - -```go -w := logger.Writer() -defer w.Close() - -srv := http.Server{ - // create a stdlib log.Logger that writes to - // logrus.Logger. - ErrorLog: log.New(w, "", 0), -} -``` - -Each line written to that writer will be printed the usual way, using formatters -and hooks. The level for those entries is `info`. - -#### Rotation - -Log rotation is not provided with Logrus. Log rotation should be done by an -external program (like `logrotate(8)`) that can compress and delete old log -entries. It should not be a feature of the application-level logger. - - -[godoc]: https://godoc.org/github.com/Sirupsen/logrus diff --git a/vendor/github.com/Sirupsen/logrus/entry.go b/vendor/github.com/Sirupsen/logrus/entry.go deleted file mode 100644 index 17fe6f7..0000000 --- a/vendor/github.com/Sirupsen/logrus/entry.go +++ /dev/null @@ -1,252 +0,0 @@ -package logrus - -import ( - "bytes" - "fmt" - "io" - "os" - "time" -) - -// An entry is the final or intermediate Logrus logging entry. It contains all -// the fields passed with WithField{,s}. It's finally logged when Debug, Info, -// Warn, Error, Fatal or Panic is called on it. These objects can be reused and -// passed around as much as you wish to avoid field duplication. -type Entry struct { - Logger *Logger - - // Contains all the fields set by the user. - Data Fields - - // Time at which the log entry was created - Time time.Time - - // Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic - Level Level - - // Message passed to Debug, Info, Warn, Error, Fatal or Panic - Message string -} - -func NewEntry(logger *Logger) *Entry { - return &Entry{ - Logger: logger, - // Default is three fields, give a little extra room - Data: make(Fields, 5), - } -} - -// Returns a reader for the entry, which is a proxy to the formatter. -func (entry *Entry) Reader() (*bytes.Buffer, error) { - serialized, err := entry.Logger.Formatter.Format(entry) - return bytes.NewBuffer(serialized), err -} - -// Returns the string representation from the reader and ultimately the -// formatter. -func (entry *Entry) String() (string, error) { - reader, err := entry.Reader() - if err != nil { - return "", err - } - - return reader.String(), err -} - -// Add a single field to the Entry. -func (entry *Entry) WithField(key string, value interface{}) *Entry { - return entry.WithFields(Fields{key: value}) -} - -// Add a map of fields to the Entry. -func (entry *Entry) WithFields(fields Fields) *Entry { - data := Fields{} - for k, v := range entry.Data { - data[k] = v - } - for k, v := range fields { - data[k] = v - } - return &Entry{Logger: entry.Logger, Data: data} -} - -func (entry *Entry) log(level Level, msg string) { - entry.Time = time.Now() - entry.Level = level - entry.Message = msg - - if err := entry.Logger.Hooks.Fire(level, entry); err != nil { - entry.Logger.mu.Lock() - fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err) - entry.Logger.mu.Unlock() - } - - reader, err := entry.Reader() - if err != nil { - entry.Logger.mu.Lock() - fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) - entry.Logger.mu.Unlock() - } - - entry.Logger.mu.Lock() - defer entry.Logger.mu.Unlock() - - _, err = io.Copy(entry.Logger.Out, reader) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) - } - - // To avoid Entry#log() returning a value that only would make sense for - // panic() to use in Entry#Panic(), we avoid the allocation by checking - // directly here. - if level <= PanicLevel { - panic(entry) - } -} - -func (entry *Entry) Debug(args ...interface{}) { - if entry.Logger.Level >= DebugLevel { - entry.log(DebugLevel, fmt.Sprint(args...)) - } -} - -func (entry *Entry) Print(args ...interface{}) { - entry.Info(args...) -} - -func (entry *Entry) Info(args ...interface{}) { - if entry.Logger.Level >= InfoLevel { - entry.log(InfoLevel, fmt.Sprint(args...)) - } -} - -func (entry *Entry) Warn(args ...interface{}) { - if entry.Logger.Level >= WarnLevel { - entry.log(WarnLevel, fmt.Sprint(args...)) - } -} - -func (entry *Entry) Warning(args ...interface{}) { - entry.Warn(args...) -} - -func (entry *Entry) Error(args ...interface{}) { - if entry.Logger.Level >= ErrorLevel { - entry.log(ErrorLevel, fmt.Sprint(args...)) - } -} - -func (entry *Entry) Fatal(args ...interface{}) { - if entry.Logger.Level >= FatalLevel { - entry.log(FatalLevel, fmt.Sprint(args...)) - } - os.Exit(1) -} - -func (entry *Entry) Panic(args ...interface{}) { - if entry.Logger.Level >= PanicLevel { - entry.log(PanicLevel, fmt.Sprint(args...)) - } - panic(fmt.Sprint(args...)) -} - -// Entry Printf family functions - -func (entry *Entry) Debugf(format string, args ...interface{}) { - if entry.Logger.Level >= DebugLevel { - entry.Debug(fmt.Sprintf(format, args...)) - } -} - -func (entry *Entry) Infof(format string, args ...interface{}) { - if entry.Logger.Level >= InfoLevel { - entry.Info(fmt.Sprintf(format, args...)) - } -} - -func (entry *Entry) Printf(format string, args ...interface{}) { - entry.Infof(format, args...) -} - -func (entry *Entry) Warnf(format string, args ...interface{}) { - if entry.Logger.Level >= WarnLevel { - entry.Warn(fmt.Sprintf(format, args...)) - } -} - -func (entry *Entry) Warningf(format string, args ...interface{}) { - entry.Warnf(format, args...) -} - -func (entry *Entry) Errorf(format string, args ...interface{}) { - if entry.Logger.Level >= ErrorLevel { - entry.Error(fmt.Sprintf(format, args...)) - } -} - -func (entry *Entry) Fatalf(format string, args ...interface{}) { - if entry.Logger.Level >= FatalLevel { - entry.Fatal(fmt.Sprintf(format, args...)) - } -} - -func (entry *Entry) Panicf(format string, args ...interface{}) { - if entry.Logger.Level >= PanicLevel { - entry.Panic(fmt.Sprintf(format, args...)) - } -} - -// Entry Println family functions - -func (entry *Entry) Debugln(args ...interface{}) { - if entry.Logger.Level >= DebugLevel { - entry.Debug(entry.sprintlnn(args...)) - } -} - -func (entry *Entry) Infoln(args ...interface{}) { - if entry.Logger.Level >= InfoLevel { - entry.Info(entry.sprintlnn(args...)) - } -} - -func (entry *Entry) Println(args ...interface{}) { - entry.Infoln(args...) -} - -func (entry *Entry) Warnln(args ...interface{}) { - if entry.Logger.Level >= WarnLevel { - entry.Warn(entry.sprintlnn(args...)) - } -} - -func (entry *Entry) Warningln(args ...interface{}) { - entry.Warnln(args...) -} - -func (entry *Entry) Errorln(args ...interface{}) { - if entry.Logger.Level >= ErrorLevel { - entry.Error(entry.sprintlnn(args...)) - } -} - -func (entry *Entry) Fatalln(args ...interface{}) { - if entry.Logger.Level >= FatalLevel { - entry.Fatal(entry.sprintlnn(args...)) - } -} - -func (entry *Entry) Panicln(args ...interface{}) { - if entry.Logger.Level >= PanicLevel { - entry.Panic(entry.sprintlnn(args...)) - } -} - -// Sprintlnn => Sprint no newline. This is to get the behavior of how -// fmt.Sprintln where spaces are always added between operands, regardless of -// their type. Instead of vendoring the Sprintln implementation to spare a -// string allocation, we do the simplest thing. -func (entry *Entry) sprintlnn(args ...interface{}) string { - msg := fmt.Sprintln(args...) - return msg[:len(msg)-1] -} diff --git a/vendor/github.com/Sirupsen/logrus/exported.go b/vendor/github.com/Sirupsen/logrus/exported.go deleted file mode 100644 index a67e1b8..0000000 --- a/vendor/github.com/Sirupsen/logrus/exported.go +++ /dev/null @@ -1,188 +0,0 @@ -package logrus - -import ( - "io" -) - -var ( - // std is the name of the standard logger in stdlib `log` - std = New() -) - -func StandardLogger() *Logger { - return std -} - -// SetOutput sets the standard logger output. -func SetOutput(out io.Writer) { - std.mu.Lock() - defer std.mu.Unlock() - std.Out = out -} - -// SetFormatter sets the standard logger formatter. -func SetFormatter(formatter Formatter) { - std.mu.Lock() - defer std.mu.Unlock() - std.Formatter = formatter -} - -// SetLevel sets the standard logger level. -func SetLevel(level Level) { - std.mu.Lock() - defer std.mu.Unlock() - std.Level = level -} - -// GetLevel returns the standard logger level. -func GetLevel() Level { - std.mu.Lock() - defer std.mu.Unlock() - return std.Level -} - -// AddHook adds a hook to the standard logger hooks. -func AddHook(hook Hook) { - std.mu.Lock() - defer std.mu.Unlock() - std.Hooks.Add(hook) -} - -// WithField creates an entry from the standard logger and adds a field to -// it. If you want multiple fields, use `WithFields`. -// -// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal -// or Panic on the Entry it returns. -func WithField(key string, value interface{}) *Entry { - return std.WithField(key, value) -} - -// WithFields creates an entry from the standard logger and adds multiple -// fields to it. This is simply a helper for `WithField`, invoking it -// once for each field. -// -// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal -// or Panic on the Entry it returns. -func WithFields(fields Fields) *Entry { - return std.WithFields(fields) -} - -// Debug logs a message at level Debug on the standard logger. -func Debug(args ...interface{}) { - std.Debug(args...) -} - -// Print logs a message at level Info on the standard logger. -func Print(args ...interface{}) { - std.Print(args...) -} - -// Info logs a message at level Info on the standard logger. -func Info(args ...interface{}) { - std.Info(args...) -} - -// Warn logs a message at level Warn on the standard logger. -func Warn(args ...interface{}) { - std.Warn(args...) -} - -// Warning logs a message at level Warn on the standard logger. -func Warning(args ...interface{}) { - std.Warning(args...) -} - -// Error logs a message at level Error on the standard logger. -func Error(args ...interface{}) { - std.Error(args...) -} - -// Panic logs a message at level Panic on the standard logger. -func Panic(args ...interface{}) { - std.Panic(args...) -} - -// Fatal logs a message at level Fatal on the standard logger. -func Fatal(args ...interface{}) { - std.Fatal(args...) -} - -// Debugf logs a message at level Debug on the standard logger. -func Debugf(format string, args ...interface{}) { - std.Debugf(format, args...) -} - -// Printf logs a message at level Info on the standard logger. -func Printf(format string, args ...interface{}) { - std.Printf(format, args...) -} - -// Infof logs a message at level Info on the standard logger. -func Infof(format string, args ...interface{}) { - std.Infof(format, args...) -} - -// Warnf logs a message at level Warn on the standard logger. -func Warnf(format string, args ...interface{}) { - std.Warnf(format, args...) -} - -// Warningf logs a message at level Warn on the standard logger. -func Warningf(format string, args ...interface{}) { - std.Warningf(format, args...) -} - -// Errorf logs a message at level Error on the standard logger. -func Errorf(format string, args ...interface{}) { - std.Errorf(format, args...) -} - -// Panicf logs a message at level Panic on the standard logger. -func Panicf(format string, args ...interface{}) { - std.Panicf(format, args...) -} - -// Fatalf logs a message at level Fatal on the standard logger. -func Fatalf(format string, args ...interface{}) { - std.Fatalf(format, args...) -} - -// Debugln logs a message at level Debug on the standard logger. -func Debugln(args ...interface{}) { - std.Debugln(args...) -} - -// Println logs a message at level Info on the standard logger. -func Println(args ...interface{}) { - std.Println(args...) -} - -// Infoln logs a message at level Info on the standard logger. -func Infoln(args ...interface{}) { - std.Infoln(args...) -} - -// Warnln logs a message at level Warn on the standard logger. -func Warnln(args ...interface{}) { - std.Warnln(args...) -} - -// Warningln logs a message at level Warn on the standard logger. -func Warningln(args ...interface{}) { - std.Warningln(args...) -} - -// Errorln logs a message at level Error on the standard logger. -func Errorln(args ...interface{}) { - std.Errorln(args...) -} - -// Panicln logs a message at level Panic on the standard logger. -func Panicln(args ...interface{}) { - std.Panicln(args...) -} - -// Fatalln logs a message at level Fatal on the standard logger. -func Fatalln(args ...interface{}) { - std.Fatalln(args...) -} diff --git a/vendor/github.com/Sirupsen/logrus/formatter.go b/vendor/github.com/Sirupsen/logrus/formatter.go deleted file mode 100644 index 038ce9f..0000000 --- a/vendor/github.com/Sirupsen/logrus/formatter.go +++ /dev/null @@ -1,44 +0,0 @@ -package logrus - -// The Formatter interface is used to implement a custom Formatter. It takes an -// `Entry`. It exposes all the fields, including the default ones: -// -// * `entry.Data["msg"]`. The message passed from Info, Warn, Error .. -// * `entry.Data["time"]`. The timestamp. -// * `entry.Data["level"]. The level the entry was logged at. -// -// Any additional fields added with `WithField` or `WithFields` are also in -// `entry.Data`. Format is expected to return an array of bytes which are then -// logged to `logger.Out`. -type Formatter interface { - Format(*Entry) ([]byte, error) -} - -// This is to not silently overwrite `time`, `msg` and `level` fields when -// dumping it. If this code wasn't there doing: -// -// logrus.WithField("level", 1).Info("hello") -// -// Would just silently drop the user provided level. Instead with this code -// it'll logged as: -// -// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."} -// -// It's not exported because it's still using Data in an opinionated way. It's to -// avoid code duplication between the two default formatters. -func prefixFieldClashes(data Fields) { - _, ok := data["time"] - if ok { - data["fields.time"] = data["time"] - } - - _, ok = data["msg"] - if ok { - data["fields.msg"] = data["msg"] - } - - _, ok = data["level"] - if ok { - data["fields.level"] = data["level"] - } -} diff --git a/vendor/github.com/Sirupsen/logrus/hooks.go b/vendor/github.com/Sirupsen/logrus/hooks.go deleted file mode 100644 index 0da2b36..0000000 --- a/vendor/github.com/Sirupsen/logrus/hooks.go +++ /dev/null @@ -1,34 +0,0 @@ -package logrus - -// A hook to be fired when logging on the logging levels returned from -// `Levels()` on your implementation of the interface. Note that this is not -// fired in a goroutine or a channel with workers, you should handle such -// functionality yourself if your call is non-blocking and you don't wish for -// the logging calls for levels returned from `Levels()` to block. -type Hook interface { - Levels() []Level - Fire(*Entry) error -} - -// Internal type for storing the hooks on a logger instance. -type levelHooks map[Level][]Hook - -// Add a hook to an instance of logger. This is called with -// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface. -func (hooks levelHooks) Add(hook Hook) { - for _, level := range hook.Levels() { - hooks[level] = append(hooks[level], hook) - } -} - -// Fire all the hooks for the passed level. Used by `entry.log` to fire -// appropriate hooks for a log entry. -func (hooks levelHooks) Fire(level Level, entry *Entry) error { - for _, hook := range hooks[level] { - if err := hook.Fire(entry); err != nil { - return err - } - } - - return nil -} diff --git a/vendor/github.com/Sirupsen/logrus/json_formatter.go b/vendor/github.com/Sirupsen/logrus/json_formatter.go deleted file mode 100644 index 0e38a61..0000000 --- a/vendor/github.com/Sirupsen/logrus/json_formatter.go +++ /dev/null @@ -1,32 +0,0 @@ -package logrus - -import ( - "encoding/json" - "fmt" - "time" -) - -type JSONFormatter struct{} - -func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { - data := make(Fields, len(entry.Data)+3) - for k, v := range entry.Data { - // Otherwise errors are ignored by `encoding/json` - // https://github.com/Sirupsen/logrus/issues/137 - if err, ok := v.(error); ok { - data[k] = err.Error() - } else { - data[k] = v - } - } - prefixFieldClashes(data) - data["time"] = entry.Time.Format(time.RFC3339) - data["msg"] = entry.Message - data["level"] = entry.Level.String() - - serialized, err := json.Marshal(data) - if err != nil { - return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) - } - return append(serialized, '\n'), nil -} diff --git a/vendor/github.com/Sirupsen/logrus/logger.go b/vendor/github.com/Sirupsen/logrus/logger.go deleted file mode 100644 index b392e54..0000000 --- a/vendor/github.com/Sirupsen/logrus/logger.go +++ /dev/null @@ -1,161 +0,0 @@ -package logrus - -import ( - "io" - "os" - "sync" -) - -type Logger struct { - // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a - // file, or leave it default which is `os.Stdout`. You can also set this to - // something more adventorous, such as logging to Kafka. - Out io.Writer - // Hooks for the logger instance. These allow firing events based on logging - // levels and log entries. For example, to send errors to an error tracking - // service, log to StatsD or dump the core on fatal errors. - Hooks levelHooks - // All log entries pass through the formatter before logged to Out. The - // included formatters are `TextFormatter` and `JSONFormatter` for which - // TextFormatter is the default. In development (when a TTY is attached) it - // logs with colors, but to a file it wouldn't. You can easily implement your - // own that implements the `Formatter` interface, see the `README` or included - // formatters for examples. - Formatter Formatter - // The logging level the logger should log at. This is typically (and defaults - // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be - // logged. `logrus.Debug` is useful in - Level Level - // Used to sync writing to the log. - mu sync.Mutex -} - -// Creates a new logger. Configuration should be set by changing `Formatter`, -// `Out` and `Hooks` directly on the default logger instance. You can also just -// instantiate your own: -// -// var log = &Logger{ -// Out: os.Stderr, -// Formatter: new(JSONFormatter), -// Hooks: make(levelHooks), -// Level: logrus.DebugLevel, -// } -// -// It's recommended to make this a global instance called `log`. -func New() *Logger { - return &Logger{ - Out: os.Stdout, - Formatter: new(TextFormatter), - Hooks: make(levelHooks), - Level: InfoLevel, - } -} - -// Adds a field to the log entry, note that you it doesn't log until you call -// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry. -// Ff you want multiple fields, use `WithFields`. -func (logger *Logger) WithField(key string, value interface{}) *Entry { - return NewEntry(logger).WithField(key, value) -} - -// Adds a struct of fields to the log entry. All it does is call `WithField` for -// each `Field`. -func (logger *Logger) WithFields(fields Fields) *Entry { - return NewEntry(logger).WithFields(fields) -} - -func (logger *Logger) Debugf(format string, args ...interface{}) { - NewEntry(logger).Debugf(format, args...) -} - -func (logger *Logger) Infof(format string, args ...interface{}) { - NewEntry(logger).Infof(format, args...) -} - -func (logger *Logger) Printf(format string, args ...interface{}) { - NewEntry(logger).Printf(format, args...) -} - -func (logger *Logger) Warnf(format string, args ...interface{}) { - NewEntry(logger).Warnf(format, args...) -} - -func (logger *Logger) Warningf(format string, args ...interface{}) { - NewEntry(logger).Warnf(format, args...) -} - -func (logger *Logger) Errorf(format string, args ...interface{}) { - NewEntry(logger).Errorf(format, args...) -} - -func (logger *Logger) Fatalf(format string, args ...interface{}) { - NewEntry(logger).Fatalf(format, args...) -} - -func (logger *Logger) Panicf(format string, args ...interface{}) { - NewEntry(logger).Panicf(format, args...) -} - -func (logger *Logger) Debug(args ...interface{}) { - NewEntry(logger).Debug(args...) -} - -func (logger *Logger) Info(args ...interface{}) { - NewEntry(logger).Info(args...) -} - -func (logger *Logger) Print(args ...interface{}) { - NewEntry(logger).Info(args...) -} - -func (logger *Logger) Warn(args ...interface{}) { - NewEntry(logger).Warn(args...) -} - -func (logger *Logger) Warning(args ...interface{}) { - NewEntry(logger).Warn(args...) -} - -func (logger *Logger) Error(args ...interface{}) { - NewEntry(logger).Error(args...) -} - -func (logger *Logger) Fatal(args ...interface{}) { - NewEntry(logger).Fatal(args...) -} - -func (logger *Logger) Panic(args ...interface{}) { - NewEntry(logger).Panic(args...) -} - -func (logger *Logger) Debugln(args ...interface{}) { - NewEntry(logger).Debugln(args...) -} - -func (logger *Logger) Infoln(args ...interface{}) { - NewEntry(logger).Infoln(args...) -} - -func (logger *Logger) Println(args ...interface{}) { - NewEntry(logger).Println(args...) -} - -func (logger *Logger) Warnln(args ...interface{}) { - NewEntry(logger).Warnln(args...) -} - -func (logger *Logger) Warningln(args ...interface{}) { - NewEntry(logger).Warnln(args...) -} - -func (logger *Logger) Errorln(args ...interface{}) { - NewEntry(logger).Errorln(args...) -} - -func (logger *Logger) Fatalln(args ...interface{}) { - NewEntry(logger).Fatalln(args...) -} - -func (logger *Logger) Panicln(args ...interface{}) { - NewEntry(logger).Panicln(args...) -} diff --git a/vendor/github.com/Sirupsen/logrus/logrus.go b/vendor/github.com/Sirupsen/logrus/logrus.go deleted file mode 100644 index 43ee12e..0000000 --- a/vendor/github.com/Sirupsen/logrus/logrus.go +++ /dev/null @@ -1,94 +0,0 @@ -package logrus - -import ( - "fmt" - "log" -) - -// Fields type, used to pass to `WithFields`. -type Fields map[string]interface{} - -// Level type -type Level uint8 - -// Convert the Level to a string. E.g. PanicLevel becomes "panic". -func (level Level) String() string { - switch level { - case DebugLevel: - return "debug" - case InfoLevel: - return "info" - case WarnLevel: - return "warning" - case ErrorLevel: - return "error" - case FatalLevel: - return "fatal" - case PanicLevel: - return "panic" - } - - return "unknown" -} - -// ParseLevel takes a string level and returns the Logrus log level constant. -func ParseLevel(lvl string) (Level, error) { - switch lvl { - case "panic": - return PanicLevel, nil - case "fatal": - return FatalLevel, nil - case "error": - return ErrorLevel, nil - case "warn", "warning": - return WarnLevel, nil - case "info": - return InfoLevel, nil - case "debug": - return DebugLevel, nil - } - - var l Level - return l, fmt.Errorf("not a valid logrus Level: %q", lvl) -} - -// These are the different logging levels. You can set the logging level to log -// on your instance of logger, obtained with `logrus.New()`. -const ( - // PanicLevel level, highest level of severity. Logs and then calls panic with the - // message passed to Debug, Info, ... - PanicLevel Level = iota - // FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the - // logging level is set to Panic. - FatalLevel - // ErrorLevel level. Logs. Used for errors that should definitely be noted. - // Commonly used for hooks to send errors to an error tracking service. - ErrorLevel - // WarnLevel level. Non-critical entries that deserve eyes. - WarnLevel - // InfoLevel level. General operational entries about what's going on inside the - // application. - InfoLevel - // DebugLevel level. Usually only enabled when debugging. Very verbose logging. - DebugLevel -) - -// Won't compile if StdLogger can't be realized by a log.Logger -var _ StdLogger = &log.Logger{} - -// StdLogger is what your logrus-enabled library should take, that way -// it'll accept a stdlib logger and a logrus logger. There's no standard -// interface, this is the closest we get, unfortunately. -type StdLogger interface { - Print(...interface{}) - Printf(string, ...interface{}) - Println(...interface{}) - - Fatal(...interface{}) - Fatalf(string, ...interface{}) - Fatalln(...interface{}) - - Panic(...interface{}) - Panicf(string, ...interface{}) - Panicln(...interface{}) -} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_darwin.go b/vendor/github.com/Sirupsen/logrus/terminal_darwin.go deleted file mode 100644 index 8fe02a4..0000000 --- a/vendor/github.com/Sirupsen/logrus/terminal_darwin.go +++ /dev/null @@ -1,12 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package logrus - -import "syscall" - -const ioctlReadTermios = syscall.TIOCGETA - -type Termios syscall.Termios diff --git a/vendor/github.com/Sirupsen/logrus/terminal_freebsd.go b/vendor/github.com/Sirupsen/logrus/terminal_freebsd.go deleted file mode 100644 index 0428ee5..0000000 --- a/vendor/github.com/Sirupsen/logrus/terminal_freebsd.go +++ /dev/null @@ -1,20 +0,0 @@ -/* - Go 1.2 doesn't include Termios for FreeBSD. This should be added in 1.3 and this could be merged with terminal_darwin. -*/ -package logrus - -import ( - "syscall" -) - -const ioctlReadTermios = syscall.TIOCGETA - -type Termios struct { - Iflag uint32 - Oflag uint32 - Cflag uint32 - Lflag uint32 - Cc [20]uint8 - Ispeed uint32 - Ospeed uint32 -} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_linux.go b/vendor/github.com/Sirupsen/logrus/terminal_linux.go deleted file mode 100644 index a2c0b40..0000000 --- a/vendor/github.com/Sirupsen/logrus/terminal_linux.go +++ /dev/null @@ -1,12 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package logrus - -import "syscall" - -const ioctlReadTermios = syscall.TCGETS - -type Termios syscall.Termios diff --git a/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go b/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go deleted file mode 100644 index b8bebc1..0000000 --- a/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go +++ /dev/null @@ -1,21 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux darwin freebsd openbsd - -package logrus - -import ( - "syscall" - "unsafe" -) - -// IsTerminal returns true if the given file descriptor is a terminal. -func IsTerminal() bool { - fd := syscall.Stdout - var termios Termios - _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) - return err == 0 -} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_openbsd.go b/vendor/github.com/Sirupsen/logrus/terminal_openbsd.go deleted file mode 100644 index d238bfa..0000000 --- a/vendor/github.com/Sirupsen/logrus/terminal_openbsd.go +++ /dev/null @@ -1,8 +0,0 @@ - -package logrus - -import "syscall" - -const ioctlReadTermios = syscall.TIOCGETA - -type Termios syscall.Termios diff --git a/vendor/github.com/Sirupsen/logrus/terminal_windows.go b/vendor/github.com/Sirupsen/logrus/terminal_windows.go deleted file mode 100644 index 2e09f6f..0000000 --- a/vendor/github.com/Sirupsen/logrus/terminal_windows.go +++ /dev/null @@ -1,27 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build windows - -package logrus - -import ( - "syscall" - "unsafe" -) - -var kernel32 = syscall.NewLazyDLL("kernel32.dll") - -var ( - procGetConsoleMode = kernel32.NewProc("GetConsoleMode") -) - -// IsTerminal returns true if the given file descriptor is a terminal. -func IsTerminal() bool { - fd := syscall.Stdout - var st uint32 - r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) - return r != 0 && e == 0 -} diff --git a/vendor/github.com/Sirupsen/logrus/text_formatter.go b/vendor/github.com/Sirupsen/logrus/text_formatter.go deleted file mode 100644 index 71dcb66..0000000 --- a/vendor/github.com/Sirupsen/logrus/text_formatter.go +++ /dev/null @@ -1,145 +0,0 @@ -package logrus - -import ( - "bytes" - "fmt" - "regexp" - "sort" - "strings" - "time" -) - -const ( - nocolor = 0 - red = 31 - green = 32 - yellow = 33 - blue = 34 - gray = 37 -) - -var ( - baseTimestamp time.Time - isTerminal bool - noQuoteNeeded *regexp.Regexp -) - -func init() { - baseTimestamp = time.Now() - isTerminal = IsTerminal() -} - -func miniTS() int { - return int(time.Since(baseTimestamp) / time.Second) -} - -type TextFormatter struct { - // Set to true to bypass checking for a TTY before outputting colors. - ForceColors bool - - // Force disabling colors. - DisableColors bool - - // Disable timestamp logging. useful when output is redirected to logging - // system that already adds timestamps. - DisableTimestamp bool - - // Enable logging the full timestamp when a TTY is attached instead of just - // the time passed since beginning of execution. - FullTimestamp bool - - // The fields are sorted by default for a consistent output. For applications - // that log extremely frequently and don't use the JSON formatter this may not - // be desired. - DisableSorting bool -} - -func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { - var keys []string = make([]string, 0, len(entry.Data)) - for k := range entry.Data { - keys = append(keys, k) - } - - if !f.DisableSorting { - sort.Strings(keys) - } - - b := &bytes.Buffer{} - - prefixFieldClashes(entry.Data) - - isColored := (f.ForceColors || isTerminal) && !f.DisableColors - - if isColored { - f.printColored(b, entry, keys) - } else { - if !f.DisableTimestamp { - f.appendKeyValue(b, "time", entry.Time.Format(time.RFC3339)) - } - f.appendKeyValue(b, "level", entry.Level.String()) - f.appendKeyValue(b, "msg", entry.Message) - for _, key := range keys { - f.appendKeyValue(b, key, entry.Data[key]) - } - } - - b.WriteByte('\n') - return b.Bytes(), nil -} - -func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string) { - var levelColor int - switch entry.Level { - case DebugLevel: - levelColor = gray - case WarnLevel: - levelColor = yellow - case ErrorLevel, FatalLevel, PanicLevel: - levelColor = red - default: - levelColor = blue - } - - levelText := strings.ToUpper(entry.Level.String())[0:4] - - if !f.FullTimestamp { - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message) - } else { - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(time.RFC3339), entry.Message) - } - for _, k := range keys { - v := entry.Data[k] - fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%v", levelColor, k, v) - } -} - -func needsQuoting(text string) bool { - for _, ch := range text { - if !((ch >= 'a' && ch <= 'z') || - (ch >= 'A' && ch <= 'Z') || - (ch >= '0' && ch <= '9') || - ch == '-' || ch == '.') { - return false - } - } - return true -} - -func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key, value interface{}) { - switch value.(type) { - case string: - if needsQuoting(value.(string)) { - fmt.Fprintf(b, "%v=%s ", key, value) - } else { - fmt.Fprintf(b, "%v=%q ", key, value) - } - case error: - if needsQuoting(value.(error).Error()) { - fmt.Fprintf(b, "%v=%s ", key, value) - } else { - fmt.Fprintf(b, "%v=%q ", key, value) - } - default: - fmt.Fprintf(b, "%v=%v ", key, value) - } -} diff --git a/vendor/github.com/Sirupsen/logrus/writer.go b/vendor/github.com/Sirupsen/logrus/writer.go deleted file mode 100644 index 90d3e01..0000000 --- a/vendor/github.com/Sirupsen/logrus/writer.go +++ /dev/null @@ -1,31 +0,0 @@ -package logrus - -import ( - "bufio" - "io" - "runtime" -) - -func (logger *Logger) Writer() (*io.PipeWriter) { - reader, writer := io.Pipe() - - go logger.writerScanner(reader) - runtime.SetFinalizer(writer, writerFinalizer) - - return writer -} - -func (logger *Logger) writerScanner(reader *io.PipeReader) { - scanner := bufio.NewScanner(reader) - for scanner.Scan() { - logger.Print(scanner.Text()) - } - if err := scanner.Err(); err != nil { - logger.Errorf("Error while reading from Writer: %s", err) - } - reader.Close() -} - -func writerFinalizer(writer *io.PipeWriter) { - writer.Close() -} diff --git a/vendor/github.com/dgrijalva/jwt-go/LICENSE b/vendor/github.com/dgrijalva/jwt-go/LICENSE deleted file mode 100644 index df83a9c..0000000 --- a/vendor/github.com/dgrijalva/jwt-go/LICENSE +++ /dev/null @@ -1,8 +0,0 @@ -Copyright (c) 2012 Dave Grijalva - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - diff --git a/vendor/github.com/dgrijalva/jwt-go/README.md b/vendor/github.com/dgrijalva/jwt-go/README.md deleted file mode 100644 index f8321b0..0000000 --- a/vendor/github.com/dgrijalva/jwt-go/README.md +++ /dev/null @@ -1,59 +0,0 @@ -A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](http://self-issued.info/docs/draft-jones-json-web-token.html) - -**NOTICE:** A vulnerability in JWT was [recently published](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/). As this library doesn't force users to validate the `alg` is what they expected, it's possible your usage is effected. There will be an update soon to remedy this, and it will likey require backwards-incompatible changes to the API. In the short term, please make sure your implementation verifies the `alg` is what you expect. - -## What the heck is a JWT? - -In short, it's a signed JSON object that does something useful (for example, authentication). It's commonly used for `Bearer` tokens in Oauth 2. A token is made of three parts, separated by `.`'s. The first two parts are JSON objects, that have been [base64url](http://tools.ietf.org/html/rfc4648) encoded. The last part is the signature, encoded the same way. - -The first part is called the header. It contains the necessary information for verifying the last part, the signature. For example, which encryption method was used for signing and what key was used. - -The part in the middle is the interesting bit. It's called the Claims and contains the actual stuff you care about. Refer to [the RFC](http://self-issued.info/docs/draft-jones-json-web-token.html) for information about reserved keys and the proper way to add your own. - -## What's in the box? - -This library supports the parsing and verification as well as the generation and signing of JWTs. Current supported signing algorithms are RSA256 and HMAC SHA256, though hooks are present for adding your own. - -## Parse and Verify - -Parsing and verifying tokens is pretty straight forward. You pass in the token and a function for looking up the key. This is done as a callback since you may need to parse the token to find out what signing method and key was used. - -```go - token, err := jwt.Parse(myToken, func(token *jwt.Token) (interface{}, error) { - // Don't forget to validate the alg is what you expect: - if _, ok := t.Method.(*jwt.SigningMethodRSA); !ok { - return nil, fmt.Errorf("Unexpected signing method: %v", t.Header["alg"]) - } - return myLookupKey(token.Header["kid"]) - }) - - if err == nil && token.Valid { - deliverGoodness("!") - } else { - deliverUtterRejection(":(") - } -``` - -## Create a token - -```go - // Create the token - token := jwt.New(jwt.SigningMethodHS256) - // Set some claims - token.Claims["foo"] = "bar" - token.Claims["exp"] = time.Now().Add(time.Hour * 72).Unix() - // Sign and get the complete encoded token as a string - tokenString, err := token.SignedString(mySigningKey) -``` - -## Project Status & Versioning - -This library is considered production ready. Feedback and feature requests are appreciated. The API should be considered stable. There should be very few backwards-incompatible changes outside of major version updates (and only with good reason). - -This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull requests will land on `master`. Periodically, versions will be tagged from `master`. You can find all the releases on [the project releases page](https://github.com/dgrijalva/jwt-go/releases). - -## More - -Documentation can be found [on godoc.org](http://godoc.org/github.com/dgrijalva/jwt-go). - -The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. For a more http centric example, see [this gist](https://gist.github.com/cryptix/45c33ecf0ae54828e63b). diff --git a/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md b/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md deleted file mode 100644 index 972722e..0000000 --- a/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md +++ /dev/null @@ -1,54 +0,0 @@ -## `jwt-go` Version History - -#### 2.2.0 - -* Gracefully handle a `nil` `Keyfunc` being passed to `Parse`. Result will now be the parsed token and an error, instead of a panic. - -#### 2.1.0 - -Backwards compatible API change that was missed in 2.0.0. - -* The `SignedString` method on `Token` now takes `interface{}` instead of `[]byte` - -#### 2.0.0 - -There were two major reasons for breaking backwards compatibility with this update. The first was a refactor required to expand the width of the RSA and HMAC-SHA signing implementations. There will likely be no required code changes to support this change. - -The second update, while unfortunately requiring a small change in integration, is required to open up this library to other signing methods. Not all keys used for all signing methods have a single standard on-disk representation. Requiring `[]byte` as the type for all keys proved too limiting. Additionally, this implementation allows for pre-parsed tokens to be reused, which might matter in an application that parses a high volume of tokens with a small set of keys. Backwards compatibilty has been maintained for passing `[]byte` to the RSA signing methods, but they will also accept `*rsa.PublicKey` and `*rsa.PrivateKey`. - -It is likely the only integration change required here will be to change `func(t *jwt.Token) ([]byte, error)` to `func(t *jwt.Token) (interface{}, error)` when calling `Parse`. - -* **Compatibility Breaking Changes** - * `SigningMethodHS256` is now `*SigningMethodHMAC` instead of `type struct` - * `SigningMethodRS256` is now `*SigningMethodRSA` instead of `type struct` - * `KeyFunc` now returns `interface{}` instead of `[]byte` - * `SigningMethod.Sign` now takes `interface{}` instead of `[]byte` for the key - * `SigningMethod.Verify` now takes `interface{}` instead of `[]byte` for the key -* Renamed type `SigningMethodHS256` to `SigningMethodHMAC`. Specific sizes are now just instances of this type. - * Added public package global `SigningMethodHS256` - * Added public package global `SigningMethodHS384` - * Added public package global `SigningMethodHS512` -* Renamed type `SigningMethodRS256` to `SigningMethodRSA`. Specific sizes are now just instances of this type. - * Added public package global `SigningMethodRS256` - * Added public package global `SigningMethodRS384` - * Added public package global `SigningMethodRS512` -* Moved sample private key for HMAC tests from an inline value to a file on disk. Value is unchanged. -* Refactored the RSA implementation to be easier to read -* Exposed helper methods `ParseRSAPrivateKeyFromPEM` and `ParseRSAPublicKeyFromPEM` - -#### 1.0.2 - -* Fixed bug in parsing public keys from certificates -* Added more tests around the parsing of keys for RS256 -* Code refactoring in RS256 implementation. No functional changes - -#### 1.0.1 - -* Fixed panic if RS256 signing method was passed an invalid key - -#### 1.0.0 - -* First versioned release -* API stabilized -* Supports creating, signing, parsing, and validating JWT tokens -* Supports RS256 and HS256 signing methods \ No newline at end of file diff --git a/vendor/github.com/dgrijalva/jwt-go/doc.go b/vendor/github.com/dgrijalva/jwt-go/doc.go deleted file mode 100644 index a86dc1a..0000000 --- a/vendor/github.com/dgrijalva/jwt-go/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -// Package jwt is a Go implementation of JSON Web Tokens: http://self-issued.info/docs/draft-jones-json-web-token.html -// -// See README.md for more info. -package jwt diff --git a/vendor/github.com/dgrijalva/jwt-go/hmac.go b/vendor/github.com/dgrijalva/jwt-go/hmac.go deleted file mode 100644 index 402ff08..0000000 --- a/vendor/github.com/dgrijalva/jwt-go/hmac.go +++ /dev/null @@ -1,84 +0,0 @@ -package jwt - -import ( - "crypto" - "crypto/hmac" - "errors" -) - -// Implements the HMAC-SHA family of signing methods signing methods -type SigningMethodHMAC struct { - Name string - Hash crypto.Hash -} - -// Specific instances for HS256 and company -var ( - SigningMethodHS256 *SigningMethodHMAC - SigningMethodHS384 *SigningMethodHMAC - SigningMethodHS512 *SigningMethodHMAC - ErrSignatureInvalid = errors.New("signature is invalid") -) - -func init() { - // HS256 - SigningMethodHS256 = &SigningMethodHMAC{"HS256", crypto.SHA256} - RegisterSigningMethod(SigningMethodHS256.Alg(), func() SigningMethod { - return SigningMethodHS256 - }) - - // HS384 - SigningMethodHS384 = &SigningMethodHMAC{"HS384", crypto.SHA384} - RegisterSigningMethod(SigningMethodHS384.Alg(), func() SigningMethod { - return SigningMethodHS384 - }) - - // HS512 - SigningMethodHS512 = &SigningMethodHMAC{"HS512", crypto.SHA512} - RegisterSigningMethod(SigningMethodHS512.Alg(), func() SigningMethod { - return SigningMethodHS512 - }) -} - -func (m *SigningMethodHMAC) Alg() string { - return m.Name -} - -func (m *SigningMethodHMAC) Verify(signingString, signature string, key interface{}) error { - if keyBytes, ok := key.([]byte); ok { - var sig []byte - var err error - if sig, err = DecodeSegment(signature); err == nil { - if !m.Hash.Available() { - return ErrHashUnavailable - } - - hasher := hmac.New(m.Hash.New, keyBytes) - hasher.Write([]byte(signingString)) - - if !hmac.Equal(sig, hasher.Sum(nil)) { - err = ErrSignatureInvalid - } - } - return err - } - - return ErrInvalidKey -} - -// Implements the Sign method from SigningMethod for this signing method. -// Key must be []byte -func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) (string, error) { - if keyBytes, ok := key.([]byte); ok { - if !m.Hash.Available() { - return "", ErrHashUnavailable - } - - hasher := hmac.New(m.Hash.New, keyBytes) - hasher.Write([]byte(signingString)) - - return EncodeSegment(hasher.Sum(nil)), nil - } - - return "", ErrInvalidKey -} diff --git a/vendor/github.com/dgrijalva/jwt-go/jwt.go b/vendor/github.com/dgrijalva/jwt-go/jwt.go deleted file mode 100644 index f7b4322..0000000 --- a/vendor/github.com/dgrijalva/jwt-go/jwt.go +++ /dev/null @@ -1,237 +0,0 @@ -package jwt - -import ( - "encoding/base64" - "encoding/json" - "errors" - "net/http" - "strings" - "time" -) - -// TimeFunc provides the current time when parsing token to validate "exp" claim (expiration time). -// You can override it to use another time value. This is useful for testing or if your -// server uses a different time zone than your tokens. -var TimeFunc = time.Now - -// Parse methods use this callback function to supply -// the key for verification. The function receives the parsed, -// but unverified Token. This allows you to use propries in the -// Header of the token (such as `kid`) to identify which key to use. -type Keyfunc func(*Token) (interface{}, error) - -// Error constants -var ( - ErrInvalidKey = errors.New("key is invalid or of invalid type") - ErrHashUnavailable = errors.New("the requested hash function is unavailable") - ErrNoTokenInRequest = errors.New("no token present in request") -) - -// A JWT Token. Different fields will be used depending on whether you're -// creating or parsing/verifying a token. -type Token struct { - Raw string // The raw token. Populated when you Parse a token - Method SigningMethod // The signing method used or to be used - Header map[string]interface{} // The first segment of the token - Claims map[string]interface{} // The second segment of the token - Signature string // The third segment of the token. Populated when you Parse a token - Valid bool // Is the token valid? Populated when you Parse/Verify a token -} - -// Create a new Token. Takes a signing method -func New(method SigningMethod) *Token { - return &Token{ - Header: map[string]interface{}{ - "typ": "JWT", - "alg": method.Alg(), - }, - Claims: make(map[string]interface{}), - Method: method, - } -} - -// Get the complete, signed token -func (t *Token) SignedString(key interface{}) (string, error) { - var sig, sstr string - var err error - if sstr, err = t.SigningString(); err != nil { - return "", err - } - if sig, err = t.Method.Sign(sstr, key); err != nil { - return "", err - } - return strings.Join([]string{sstr, sig}, "."), nil -} - -// Generate the signing string. This is the -// most expensive part of the whole deal. Unless you -// need this for something special, just go straight for -// the SignedString. -func (t *Token) SigningString() (string, error) { - var err error - parts := make([]string, 2) - for i, _ := range parts { - var source map[string]interface{} - if i == 0 { - source = t.Header - } else { - source = t.Claims - } - - var jsonValue []byte - if jsonValue, err = json.Marshal(source); err != nil { - return "", err - } - - parts[i] = EncodeSegment(jsonValue) - } - return strings.Join(parts, "."), nil -} - -// Parse, validate, and return a token. -// keyFunc will receive the parsed token and should return the key for validating. -// If everything is kosher, err will be nil -func Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { - parts := strings.Split(tokenString, ".") - if len(parts) != 3 { - return nil, &ValidationError{err: "token contains an invalid number of segments", Errors: ValidationErrorMalformed} - } - - var err error - token := &Token{Raw: tokenString} - // parse Header - var headerBytes []byte - if headerBytes, err = DecodeSegment(parts[0]); err != nil { - return token, &ValidationError{err: err.Error(), Errors: ValidationErrorMalformed} - } - if err = json.Unmarshal(headerBytes, &token.Header); err != nil { - return token, &ValidationError{err: err.Error(), Errors: ValidationErrorMalformed} - } - - // parse Claims - var claimBytes []byte - if claimBytes, err = DecodeSegment(parts[1]); err != nil { - return token, &ValidationError{err: err.Error(), Errors: ValidationErrorMalformed} - } - if err = json.Unmarshal(claimBytes, &token.Claims); err != nil { - return token, &ValidationError{err: err.Error(), Errors: ValidationErrorMalformed} - } - - // Lookup signature method - if method, ok := token.Header["alg"].(string); ok { - if token.Method = GetSigningMethod(method); token.Method == nil { - return token, &ValidationError{err: "signing method (alg) is unavailable.", Errors: ValidationErrorUnverifiable} - } - } else { - return token, &ValidationError{err: "signing method (alg) is unspecified.", Errors: ValidationErrorUnverifiable} - } - - // Lookup key - var key interface{} - if keyFunc == nil { - // keyFunc was not provided. short circuiting validation - return token, &ValidationError{err: "no Keyfunc was provided.", Errors: ValidationErrorUnverifiable} - } - if key, err = keyFunc(token); err != nil { - // keyFunc returned an error - return token, &ValidationError{err: err.Error(), Errors: ValidationErrorUnverifiable} - } - - // Check expiration times - vErr := &ValidationError{} - now := TimeFunc().Unix() - if exp, ok := token.Claims["exp"].(float64); ok { - if now > int64(exp) { - vErr.err = "token is expired" - vErr.Errors |= ValidationErrorExpired - } - } - if nbf, ok := token.Claims["nbf"].(float64); ok { - if now < int64(nbf) { - vErr.err = "token is not valid yet" - vErr.Errors |= ValidationErrorNotValidYet - } - } - - // Perform validation - if err = token.Method.Verify(strings.Join(parts[0:2], "."), parts[2], key); err != nil { - vErr.err = err.Error() - vErr.Errors |= ValidationErrorSignatureInvalid - } - - if vErr.valid() { - token.Valid = true - return token, nil - } - - return token, vErr -} - -// The errors that might occur when parsing and validating a token -const ( - ValidationErrorMalformed uint32 = 1 << iota // Token is malformed - ValidationErrorUnverifiable // Token could not be verified because of signing problems - ValidationErrorSignatureInvalid // Signature validation failed - ValidationErrorExpired // Exp validation failed - ValidationErrorNotValidYet // NBF validation failed -) - -// The error from Parse if token is not valid -type ValidationError struct { - err string - Errors uint32 // bitfield. see ValidationError... constants -} - -// Validation error is an error type -func (e ValidationError) Error() string { - if e.err == "" { - return "token is invalid" - } - return e.err -} - -// No errors -func (e *ValidationError) valid() bool { - if e.Errors > 0 { - return false - } - return true -} - -// Try to find the token in an http.Request. -// This method will call ParseMultipartForm if there's no token in the header. -// Currently, it looks in the Authorization header as well as -// looking for an 'access_token' request parameter in req.Form. -func ParseFromRequest(req *http.Request, keyFunc Keyfunc) (token *Token, err error) { - - // Look for an Authorization header - if ah := req.Header.Get("Authorization"); ah != "" { - // Should be a bearer token - if len(ah) > 6 && strings.ToUpper(ah[0:6]) == "BEARER" { - return Parse(ah[7:], keyFunc) - } - } - - // Look for "access_token" parameter - req.ParseMultipartForm(10e6) - if tokStr := req.Form.Get("access_token"); tokStr != "" { - return Parse(tokStr, keyFunc) - } - - return nil, ErrNoTokenInRequest - -} - -// Encode JWT specific base64url encoding with padding stripped -func EncodeSegment(seg []byte) string { - return strings.TrimRight(base64.URLEncoding.EncodeToString(seg), "=") -} - -// Decode JWT specific base64url encoding with padding stripped -func DecodeSegment(seg string) ([]byte, error) { - if l := len(seg) % 4; l > 0 { - seg += strings.Repeat("=", 4-l) - } - - return base64.URLEncoding.DecodeString(seg) -} diff --git a/vendor/github.com/dgrijalva/jwt-go/rsa.go b/vendor/github.com/dgrijalva/jwt-go/rsa.go deleted file mode 100644 index cddffce..0000000 --- a/vendor/github.com/dgrijalva/jwt-go/rsa.go +++ /dev/null @@ -1,114 +0,0 @@ -package jwt - -import ( - "crypto" - "crypto/rand" - "crypto/rsa" -) - -// Implements the RSA family of signing methods signing methods -type SigningMethodRSA struct { - Name string - Hash crypto.Hash -} - -// Specific instances for RS256 and company -var ( - SigningMethodRS256 *SigningMethodRSA - SigningMethodRS384 *SigningMethodRSA - SigningMethodRS512 *SigningMethodRSA -) - -func init() { - // RS256 - SigningMethodRS256 = &SigningMethodRSA{"RS256", crypto.SHA256} - RegisterSigningMethod(SigningMethodRS256.Alg(), func() SigningMethod { - return SigningMethodRS256 - }) - - // RS384 - SigningMethodRS384 = &SigningMethodRSA{"RS384", crypto.SHA384} - RegisterSigningMethod(SigningMethodRS384.Alg(), func() SigningMethod { - return SigningMethodRS384 - }) - - // RS512 - SigningMethodRS512 = &SigningMethodRSA{"RS512", crypto.SHA512} - RegisterSigningMethod(SigningMethodRS512.Alg(), func() SigningMethod { - return SigningMethodRS512 - }) -} - -func (m *SigningMethodRSA) Alg() string { - return m.Name -} - -// Implements the Verify method from SigningMethod -// For this signing method, must be either a PEM encoded PKCS1 or PKCS8 RSA public key as -// []byte, or an rsa.PublicKey structure. -func (m *SigningMethodRSA) Verify(signingString, signature string, key interface{}) error { - var err error - - // Decode the signature - var sig []byte - if sig, err = DecodeSegment(signature); err != nil { - return err - } - - var rsaKey *rsa.PublicKey - - switch k := key.(type) { - case []byte: - if rsaKey, err = ParseRSAPublicKeyFromPEM(k); err != nil { - return err - } - case *rsa.PublicKey: - rsaKey = k - default: - return ErrInvalidKey - } - - // Create hasher - if !m.Hash.Available() { - return ErrHashUnavailable - } - hasher := m.Hash.New() - hasher.Write([]byte(signingString)) - - // Verify the signature - return rsa.VerifyPKCS1v15(rsaKey, m.Hash, hasher.Sum(nil), sig) -} - -// Implements the Sign method from SigningMethod -// For this signing method, must be either a PEM encoded PKCS1 or PKCS8 RSA private key as -// []byte, or an rsa.PrivateKey structure. -func (m *SigningMethodRSA) Sign(signingString string, key interface{}) (string, error) { - var err error - var rsaKey *rsa.PrivateKey - - switch k := key.(type) { - case []byte: - if rsaKey, err = ParseRSAPrivateKeyFromPEM(k); err != nil { - return "", err - } - case *rsa.PrivateKey: - rsaKey = k - default: - return "", ErrInvalidKey - } - - // Create the hasher - if !m.Hash.Available() { - return "", ErrHashUnavailable - } - - hasher := m.Hash.New() - hasher.Write([]byte(signingString)) - - // Sign the string and return the encoded bytes - if sigBytes, err := rsa.SignPKCS1v15(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil)); err == nil { - return EncodeSegment(sigBytes), nil - } else { - return "", err - } -} diff --git a/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go b/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go deleted file mode 100644 index 6f3b6ff..0000000 --- a/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go +++ /dev/null @@ -1,68 +0,0 @@ -package jwt - -import ( - "crypto/rsa" - "crypto/x509" - "encoding/pem" - "errors" -) - -var ( - ErrKeyMustBePEMEncoded = errors.New("Invalid Key: Key must be PEM encoded PKCS1 or PKCS8 private key") - ErrNotRSAPrivateKey = errors.New("Key is not a valid RSA private key") -) - -// Parse PEM encoded PKCS1 or PKCS8 private key -func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) { - var err error - - // Parse PEM block - var block *pem.Block - if block, _ = pem.Decode(key); block == nil { - return nil, ErrKeyMustBePEMEncoded - } - - var parsedKey interface{} - if parsedKey, err = x509.ParsePKCS1PrivateKey(block.Bytes); err != nil { - if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil { - return nil, err - } - } - - var pkey *rsa.PrivateKey - var ok bool - if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok { - return nil, ErrNotRSAPrivateKey - } - - return pkey, nil -} - -// Parse PEM encoded PKCS1 or PKCS8 public key -func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) { - var err error - - // Parse PEM block - var block *pem.Block - if block, _ = pem.Decode(key); block == nil { - return nil, ErrKeyMustBePEMEncoded - } - - // Parse the key - var parsedKey interface{} - if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { - if cert, err := x509.ParseCertificate(block.Bytes); err == nil { - parsedKey = cert.PublicKey - } else { - return nil, err - } - } - - var pkey *rsa.PublicKey - var ok bool - if pkey, ok = parsedKey.(*rsa.PublicKey); !ok { - return nil, ErrNotRSAPrivateKey - } - - return pkey, nil -} diff --git a/vendor/github.com/dgrijalva/jwt-go/signing_method.go b/vendor/github.com/dgrijalva/jwt-go/signing_method.go deleted file mode 100644 index 109dd0f..0000000 --- a/vendor/github.com/dgrijalva/jwt-go/signing_method.go +++ /dev/null @@ -1,24 +0,0 @@ -package jwt - -var signingMethods = map[string]func() SigningMethod{} - -// Signing method -type SigningMethod interface { - Verify(signingString, signature string, key interface{}) error - Sign(signingString string, key interface{}) (string, error) - Alg() string -} - -// Register the "alg" name and a factory function for signing method. -// This is typically done during init() in the method's implementation -func RegisterSigningMethod(alg string, f func() SigningMethod) { - signingMethods[alg] = f -} - -// Get a signing method from an "alg" string -func GetSigningMethod(alg string) (method SigningMethod) { - if methodF, ok := signingMethods[alg]; ok { - method = methodF() - } - return -} diff --git a/vendor/github.com/elazarl/go-bindata-assetfs/LICENSE b/vendor/github.com/elazarl/go-bindata-assetfs/LICENSE deleted file mode 100644 index 5782c72..0000000 --- a/vendor/github.com/elazarl/go-bindata-assetfs/LICENSE +++ /dev/null @@ -1,23 +0,0 @@ -Copyright (c) 2014, Elazar Leibovich -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/elazarl/go-bindata-assetfs/README.md b/vendor/github.com/elazarl/go-bindata-assetfs/README.md deleted file mode 100644 index 795d3d3..0000000 --- a/vendor/github.com/elazarl/go-bindata-assetfs/README.md +++ /dev/null @@ -1,46 +0,0 @@ -# go-bindata-assetfs - -Serve embedded files from [jteeuwen/go-bindata](https://github.com/jteeuwen/go-bindata) with `net/http`. - -[GoDoc](http://godoc.org/github.com/elazarl/go-bindata-assetfs) - -### Installation - -Install with - - $ go get github.com/jteeuwen/go-bindata/... - $ go get github.com/elazarl/go-bindata-assetfs/... - -### Creating embedded data - -Usage is identical to [jteeuwen/go-bindata](https://github.com/jteeuwen/go-bindata) usage, -instead of running `go-bindata` run `go-bindata-assetfs`. - -The tool will create a `bindata_assetfs.go` file, which contains the embedded data. - -A typical use case is - - $ go-bindata-assetfs data/... - -### Using assetFS in your code - -The generated file provides an `assetFS()` function that returns a `http.Filesystem` -wrapping the embedded files. What you usually want to do is: - - http.Handle("/", http.FileServer(assetFS())) - -This would run an HTTP server serving the embedded files. - -## Without running binary tool - -You can always just run the `go-bindata` tool, and then - -use - - import "github.com/elazarl/go-bindata-assetfs" - ... - http.Handle("/", - http.FileServer( - &assetfs.AssetFS{Asset: Asset, AssetDir: AssetDir, Prefix: "data"})) - -to serve files embedded from the `data` directory. diff --git a/vendor/github.com/elazarl/go-bindata-assetfs/assetfs.go b/vendor/github.com/elazarl/go-bindata-assetfs/assetfs.go deleted file mode 100644 index 69d58f8..0000000 --- a/vendor/github.com/elazarl/go-bindata-assetfs/assetfs.go +++ /dev/null @@ -1,147 +0,0 @@ -package assetfs - -import ( - "bytes" - "errors" - "io" - "io/ioutil" - "net/http" - "os" - "path" - "path/filepath" - "time" -) - -var ( - fileTimestamp = time.Now() -) - -// FakeFile implements os.FileInfo interface for a given path and size -type FakeFile struct { - // Path is the path of this file - Path string - // Dir marks of the path is a directory - Dir bool - // Len is the length of the fake file, zero if it is a directory - Len int64 -} - -func (f *FakeFile) Name() string { - _, name := filepath.Split(f.Path) - return name -} - -func (f *FakeFile) Mode() os.FileMode { - mode := os.FileMode(0644) - if f.Dir { - return mode | os.ModeDir - } - return mode -} - -func (f *FakeFile) ModTime() time.Time { - return fileTimestamp -} - -func (f *FakeFile) Size() int64 { - return f.Len -} - -func (f *FakeFile) IsDir() bool { - return f.Mode().IsDir() -} - -func (f *FakeFile) Sys() interface{} { - return nil -} - -// AssetFile implements http.File interface for a no-directory file with content -type AssetFile struct { - *bytes.Reader - io.Closer - FakeFile -} - -func NewAssetFile(name string, content []byte) *AssetFile { - return &AssetFile{ - bytes.NewReader(content), - ioutil.NopCloser(nil), - FakeFile{name, false, int64(len(content))}} -} - -func (f *AssetFile) Readdir(count int) ([]os.FileInfo, error) { - return nil, errors.New("not a directory") -} - -func (f *AssetFile) Size() int64 { - return f.FakeFile.Size() -} - -func (f *AssetFile) Stat() (os.FileInfo, error) { - return f, nil -} - -// AssetDirectory implements http.File interface for a directory -type AssetDirectory struct { - AssetFile - ChildrenRead int - Children []os.FileInfo -} - -func NewAssetDirectory(name string, children []string, fs *AssetFS) *AssetDirectory { - fileinfos := make([]os.FileInfo, 0, len(children)) - for _, child := range children { - _, err := fs.AssetDir(filepath.Join(name, child)) - fileinfos = append(fileinfos, &FakeFile{child, err == nil, 0}) - } - return &AssetDirectory{ - AssetFile{ - bytes.NewReader(nil), - ioutil.NopCloser(nil), - FakeFile{name, true, 0}, - }, - 0, - fileinfos} -} - -func (f *AssetDirectory) Readdir(count int) ([]os.FileInfo, error) { - if count <= 0 { - return f.Children, nil - } - if f.ChildrenRead+count > len(f.Children) { - count = len(f.Children) - f.ChildrenRead - } - rv := f.Children[f.ChildrenRead : f.ChildrenRead+count] - f.ChildrenRead += count - return rv, nil -} - -func (f *AssetDirectory) Stat() (os.FileInfo, error) { - return f, nil -} - -// AssetFS implements http.FileSystem, allowing -// embedded files to be served from net/http package. -type AssetFS struct { - // Asset should return content of file in path if exists - Asset func(path string) ([]byte, error) - // AssetDir should return list of files in the path - AssetDir func(path string) ([]string, error) - // Prefix would be prepended to http requests - Prefix string -} - -func (fs *AssetFS) Open(name string) (http.File, error) { - name = path.Join(fs.Prefix, name) - if len(name) > 0 && name[0] == '/' { - name = name[1:] - } - if children, err := fs.AssetDir(name); err == nil { - return NewAssetDirectory(name, children, fs), nil - } - b, err := fs.Asset(name) - if err != nil { - return nil, err - } - return NewAssetFile(name, b), nil -} diff --git a/vendor/github.com/elazarl/go-bindata-assetfs/doc.go b/vendor/github.com/elazarl/go-bindata-assetfs/doc.go deleted file mode 100644 index a664249..0000000 --- a/vendor/github.com/elazarl/go-bindata-assetfs/doc.go +++ /dev/null @@ -1,13 +0,0 @@ -// assetfs allows packages to serve static content embedded -// with the go-bindata tool with the standard net/http package. -// -// See https://github.com/jteeuwen/go-bindata for more information -// about embedding binary data with go-bindata. -// -// Usage example, after running -// $ go-bindata data/... -// use: -// http.Handle("/", -// http.FileServer( -// &assetfs.AssetFS{Asset: Asset, AssetDir: AssetDir, Prefix: "data"})) -package assetfs diff --git a/vendor/github.com/franela/goblin/LICENSE b/vendor/github.com/franela/goblin/LICENSE deleted file mode 100644 index b2d6522..0000000 --- a/vendor/github.com/franela/goblin/LICENSE +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2013 Marcos Lilljedahl and Jonathan Leibiusky - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/franela/goblin/Makefile b/vendor/github.com/franela/goblin/Makefile deleted file mode 100644 index 66763dc..0000000 --- a/vendor/github.com/franela/goblin/Makefile +++ /dev/null @@ -1,3 +0,0 @@ -export GOPATH=$(shell pwd) -test: - go test -v diff --git a/vendor/github.com/franela/goblin/README.md b/vendor/github.com/franela/goblin/README.md deleted file mode 100644 index 2972b15..0000000 --- a/vendor/github.com/franela/goblin/README.md +++ /dev/null @@ -1,132 +0,0 @@ -[![Build Status](https://travis-ci.org/franela/goblin.png?branch=master)](https://travis-ci.org/franela/goblin) -Goblin -====== - -![](https://github.com/marcosnils/goblin/blob/master/goblin_logo.jpg?raw=true) - -A [Mocha](http://visionmedia.github.io/mocha/) like BDD testing framework for Go - -No extensive documentation nor complicated steps to get it running - -Run tests as usual with `go test` - -Colorful reports and beautiful syntax - - -Why Goblin? ------------ - -Inspired by the flexibility and simplicity of Node BDD and frustrated by the -rigorousness of Go way of testing, we wanted to bring a new tool to -write self-describing and comprehensive code. - - - -What do I get with it? ----------------------- - -- Preserve the exact same syntax and behaviour as Node's Mocha -- Nest as many `Describe` and `It` blocks as you want -- Use `Before`, `BeforeEach`, `After` and `AfterEach` for setup and teardown your tests -- No need to remember confusing parameters in `Describe` and `It` blocks -- Use a declarative and expressive language to write your tests -- Plug different assertion libraries ([Gomega](https://github.com/onsi/gomega) supported so far) -- Skip your tests the same way as you would do in Mocha -- Automatic terminal support for colored outputs -- Two line setup is all you need to get up running - - - -How do I use it? ----------------- - -Since ```go test``` is not currently extensive, you will have to hook Goblin to it. You do that by -adding a single test method in your test file. All your goblin tests will be implemented inside this function. - -```go -package foobar - -import ( - "testing" - . "github.com/franela/goblin" -) - -func Test(t *testing.T) { - g := Goblin(t) - g.Describe("Numbers", func() { - g.It("Should add two numbers ", func() { - g.Assert(1+1).Equal(2) - }) - g.It("Should match equal numbers", func() { - g.Assert(2).Equal(4) - }) - g.It("Should substract two numbers") - }) -} -``` - -Ouput will be something like: - -![](https://github.com/marcosnils/goblin/blob/master/goblin_output.png?raw=true) - -Nice and easy, right? - -Can I do asynchronous tests? ----------------------------- - -Yes! Goblin will help you to test asynchronous things, like goroutines, etc. You just need to add a ```done``` parameter to the handler function of your ```It```. This handler function should be called when your test passes. - -```go - ... - g.Describe("Numbers", func() { - g.It("Should add two numbers asynchronously", func(done Done) { - go func() { - g.Assert(1+1).Equal(2) - done() - }() - }) - }) - ... -``` - -Goblin will wait for the ```done``` call, a ```Fail``` call or any false assertion. - -How do I use it with Gomega? ----------------------------- - -Gomega is a nice assertion framework. But it doesn't provide a nice way to hook it to testing frameworks. It should just panic instead of requiring a fail function. There is an issue about that [here](https://github.com/onsi/gomega/issues/5). -While this is being discussed and hopefully fixed, the way to use Gomega with Goblin is: - -```go -package foobar - -import ( - "testing" - . "github.com/franela/goblin" - . "github.com/onsi/gomega" -) - -func Test(t *testing.T) { - g := Goblin(t) - - //special hook for gomega - RegisterFailHandler(func(m string, _ ...int) { g.Fail(m) }) - - g.Describe("lala", func() { - g.It("lslslslsls", func() { - Expect(1).To(Equal(10)) - }) - }) -} -``` - -TODO: ------ - -We do have a couple of [issues](https://github.com/franela/goblin/issues) pending we'll be addressing soon. But feel free to -contribute and send us PRs (with tests please :smile:). - -Contributions: ------------- - -Special thanks to [Leandro Reox](https://github.com/leandroreox) (Leitan) for the goblin logo. diff --git a/vendor/github.com/franela/goblin/assertions.go b/vendor/github.com/franela/goblin/assertions.go deleted file mode 100644 index c493358..0000000 --- a/vendor/github.com/franela/goblin/assertions.go +++ /dev/null @@ -1,59 +0,0 @@ -package goblin - -import ( - "fmt" - "reflect" - "strings" -) - -type Assertion struct { - src interface{} - fail func(interface{}) -} - -func objectsAreEqual(a, b interface{}) bool { - if reflect.TypeOf(a) != reflect.TypeOf(b) { - return false - } - - if reflect.DeepEqual(a, b) { - return true - } - - if fmt.Sprintf("%#v", a) == fmt.Sprintf("%#v", b) { - return true - } - - return false -} - -func formatMessages(messages ...string) string { - if len(messages) > 0 { - return ", " + strings.Join(messages, " ") - } - return "" -} - -func (a *Assertion) Eql(dst interface{}) { - a.Equal(dst) -} - -func (a *Assertion) Equal(dst interface{}) { - if !objectsAreEqual(a.src, dst) { - a.fail(fmt.Sprintf("%v %s %v", a.src, "does not equal", dst)) - } -} - -func (a *Assertion) IsTrue(messages ...string) { - if !objectsAreEqual(a.src, true) { - message := fmt.Sprintf("%v %s%s", a.src, "expected false to be truthy", formatMessages(messages...)) - a.fail(message) - } -} - -func (a *Assertion) IsFalse(messages ...string) { - if !objectsAreEqual(a.src, false) { - message := fmt.Sprintf("%v %s%s", a.src, "expected true to be falsey", formatMessages(messages...)) - a.fail(message) - } -} diff --git a/vendor/github.com/franela/goblin/goblin.go b/vendor/github.com/franela/goblin/goblin.go deleted file mode 100644 index 891e8b0..0000000 --- a/vendor/github.com/franela/goblin/goblin.go +++ /dev/null @@ -1,280 +0,0 @@ -package goblin - -import ( - "flag" - "fmt" - "runtime" - "testing" - "time" -) - -type Done func(error ...interface{}) - -type Runnable interface { - run(*G) bool -} - -func (g *G) Describe(name string, h func()) { - d := &Describe{name: name, h: h, parent: g.parent} - - if d.parent != nil { - d.parent.children = append(d.parent.children, Runnable(d)) - } - - g.parent = d - - h() - - g.parent = d.parent - - if g.parent == nil { - g.reporter.begin() - if d.run(g) { - g.t.Fail() - } - g.reporter.end() - } -} - -type Describe struct { - name string - h func() - children []Runnable - befores []func() - afters []func() - afterEach []func() - beforeEach []func() - hasTests bool - parent *Describe -} - -func (d *Describe) runBeforeEach() { - if d.parent != nil { - d.parent.runBeforeEach() - } - - for _, b := range d.beforeEach { - b() - } -} - -func (d *Describe) runAfterEach() { - - if d.parent != nil { - d.parent.runAfterEach() - } - - for _, a := range d.afterEach { - a() - } -} - -func (d *Describe) run(g *G) bool { - g.reporter.beginDescribe(d.name) - - failed := "" - - if d.hasTests { - for _, b := range d.befores { - b() - } - } - - for _, r := range d.children { - if r.run(g) { - failed = "true" - } - } - - if d.hasTests { - for _, a := range d.afters { - a() - } - } - - g.reporter.endDescribe() - - return failed != "" -} - -type Failure struct { - stack []string - testName string - message string -} - -type It struct { - h interface{} - name string - parent *Describe - failure *Failure - reporter Reporter - isAsync bool -} - -func (it *It) run(g *G) bool { - g.currentIt = it - - if it.h == nil { - g.reporter.itIsPending(it.name) - return false - } - //TODO: should handle errors for beforeEach - it.parent.runBeforeEach() - - runIt(g, it.h) - - it.parent.runAfterEach() - - failed := false - if it.failure != nil { - failed = true - } - - if failed { - g.reporter.itFailed(it.name) - g.reporter.failure(it.failure) - } else { - g.reporter.itPassed(it.name) - } - return failed -} - -func (it *It) failed(msg string, stack []string) { - it.failure = &Failure{stack: stack, message: msg, testName: it.parent.name + " " + it.name} -} - -var timeout *time.Duration -var isTty *bool - -func init() { - //Flag parsing - timeout = flag.Duration("goblin.timeout", 5*time.Second, "Sets default timeouts for all tests") - isTty = flag.Bool("goblin.tty", true, "Sets the default output format (color / monochrome)") - flag.Parse() -} - -func Goblin(t *testing.T, arguments ...string) *G { - var gobtimeout = timeout - if arguments != nil { - //Programatic flags - var args = flag.NewFlagSet("Goblin arguments", flag.ContinueOnError) - gobtimeout = args.Duration("goblin.timeout", 5*time.Second, "Sets timeouts for tests") - args.Parse(arguments) - } - g := &G{t: t, timeout: *gobtimeout} - var fancy TextFancier - if *isTty { - fancy = &TerminalFancier{} - } else { - fancy = &Monochrome{} - } - - g.reporter = Reporter(&DetailedReporter{fancy: fancy}) - return g -} - -func runIt(g *G, h interface{}) { - defer timeTrack(time.Now(), g) - g.timedOut = false - g.shouldContinue = make(chan bool) - if call, ok := h.(func()); ok { - // the test is synchronous - go func() { call(); g.shouldContinue <- true }() - } else if call, ok := h.(func(Done)); ok { - doneCalled := 0 - go func() { - call(func(msg ...interface{}) { - if len(msg) > 0 { - g.Fail(msg) - } else { - doneCalled++ - if doneCalled > 1 { - g.Fail("Done called multiple times") - } - g.shouldContinue <- true - } - }) - }() - } else { - panic("Not implemented.") - } - select { - case <-g.shouldContinue: - case <-time.After(g.timeout): - fmt.Println("Timedout") - //Set to nil as it shouldn't continue - g.shouldContinue = nil - g.timedOut = true - g.Fail("Test exceeded " + fmt.Sprintf("%s", g.timeout)) - } -} - -type G struct { - t *testing.T - parent *Describe - currentIt *It - timeout time.Duration - reporter Reporter - timedOut bool - shouldContinue chan bool -} - -func (g *G) SetReporter(r Reporter) { - g.reporter = r -} - -func (g *G) It(name string, h ...interface{}) { - it := &It{name: name, parent: g.parent, reporter: g.reporter} - notifyParents(g.parent) - if len(h) > 0 { - it.h = h[0] - } - g.parent.children = append(g.parent.children, Runnable(it)) -} - -func notifyParents(d *Describe) { - d.hasTests = true - if d.parent != nil { - notifyParents(d.parent) - } -} - -func (g *G) Before(h func()) { - g.parent.befores = append(g.parent.befores, h) -} - -func (g *G) BeforeEach(h func()) { - g.parent.beforeEach = append(g.parent.beforeEach, h) -} - -func (g *G) After(h func()) { - g.parent.afters = append(g.parent.afters, h) -} - -func (g *G) AfterEach(h func()) { - g.parent.afterEach = append(g.parent.afterEach, h) -} - -func (g *G) Assert(src interface{}) *Assertion { - return &Assertion{src: src, fail: g.Fail} -} - -func timeTrack(start time.Time, g *G) { - g.reporter.itTook(time.Since(start)) -} - -func (g *G) Fail(error interface{}) { - //Skips 7 stacks due to the functions between the stack and the test - stack := ResolveStack(4) - message := fmt.Sprintf("%v", error) - g.currentIt.failed(message, stack) - if g.shouldContinue != nil { - g.shouldContinue <- true - } - - if !g.timedOut { - //Stop test function execution - runtime.Goexit() - } -} diff --git a/vendor/github.com/franela/goblin/goblin_logo.jpg b/vendor/github.com/franela/goblin/goblin_logo.jpg deleted file mode 100644 index 44534f2..0000000 Binary files a/vendor/github.com/franela/goblin/goblin_logo.jpg and /dev/null differ diff --git a/vendor/github.com/franela/goblin/goblin_output.png b/vendor/github.com/franela/goblin/goblin_output.png deleted file mode 100644 index ce9396e..0000000 Binary files a/vendor/github.com/franela/goblin/goblin_output.png and /dev/null differ diff --git a/vendor/github.com/franela/goblin/mono_reporter.go b/vendor/github.com/franela/goblin/mono_reporter.go deleted file mode 100644 index 04d6e5e..0000000 --- a/vendor/github.com/franela/goblin/mono_reporter.go +++ /dev/null @@ -1,26 +0,0 @@ -package goblin - -import () - -type Monochrome struct { -} - -func (self *Monochrome) Red(text string) string { - return "!" + text -} - -func (self *Monochrome) Gray(text string) string { - return text -} - -func (self *Monochrome) Cyan(text string) string { - return text -} - -func (self *Monochrome) WithCheck(text string) string { - return ">>>" + text -} - -func (self *Monochrome) Green(text string) string { - return text -} diff --git a/vendor/github.com/franela/goblin/reporting.go b/vendor/github.com/franela/goblin/reporting.go deleted file mode 100644 index 1d67d66..0000000 --- a/vendor/github.com/franela/goblin/reporting.go +++ /dev/null @@ -1,137 +0,0 @@ -package goblin - -import ( - "fmt" - "strconv" - "strings" - "time" -) - -type Reporter interface { - beginDescribe(string) - endDescribe() - begin() - end() - failure(*Failure) - itTook(time.Duration) - itFailed(string) - itPassed(string) - itIsPending(string) -} - -type TextFancier interface { - Red(text string) string - Gray(text string) string - Cyan(text string) string - Green(text string) string - WithCheck(text string) string -} - -type DetailedReporter struct { - level, failed, passed, pending int - failures []*Failure - executionTime, totalExecutionTime time.Duration - fancy TextFancier -} - -func (r *DetailedReporter) SetTextFancier(f TextFancier) { - r.fancy = f -} - -type TerminalFancier struct { -} - -func (self *TerminalFancier) Red(text string) string { - return "\033[31m" + text + "\033[0m" -} - -func (self *TerminalFancier) Gray(text string) string { - return "\033[90m" + text + "\033[0m" -} - -func (self *TerminalFancier) Cyan(text string) string { - return "\033[36m" + text + "\033[0m" -} - -func (self *TerminalFancier) Green(text string) string { - return "\033[32m" + text + "\033[0m" -} - -func (self *TerminalFancier) WithCheck(text string) string { - return "\033[32m\u2713\033[0m " + text -} - -func (r *DetailedReporter) getSpace() string { - return strings.Repeat(" ", (r.level+1)*2) -} - -func (r *DetailedReporter) failure(failure *Failure) { - r.failures = append(r.failures, failure) -} - -func (r *DetailedReporter) print(text string) { - fmt.Printf("%v%v\n", r.getSpace(), text) -} - -func (r *DetailedReporter) printWithCheck(text string) { - fmt.Printf("%v%v\n", r.getSpace(), r.fancy.WithCheck(text)) -} - -func (r *DetailedReporter) beginDescribe(name string) { - fmt.Println("") - r.print(name) - r.level++ -} - -func (r *DetailedReporter) endDescribe() { - r.level-- -} - -func (r *DetailedReporter) itTook(duration time.Duration) { - r.executionTime = duration - r.totalExecutionTime += duration -} - -func (r *DetailedReporter) itFailed(name string) { - r.failed++ - r.print(r.fancy.Red(strconv.Itoa(r.failed) + ") " + name)) -} - -func (r *DetailedReporter) itPassed(name string) { - r.passed++ - r.printWithCheck(r.fancy.Gray(name)) -} - -func (r *DetailedReporter) itIsPending(name string) { - r.pending++ - r.print(r.fancy.Cyan("- " + name)) -} - -func (r *DetailedReporter) begin() { -} - -func (r *DetailedReporter) end() { - comp := fmt.Sprintf("%d tests complete", r.passed) - t := fmt.Sprintf("(%d ms)", r.totalExecutionTime/time.Millisecond) - - //fmt.Printf("\n\n \033[32m%d tests complete\033[0m \033[90m(%d ms)\033[0m\n", r.passed, r.totalExecutionTime/time.Millisecond) - fmt.Printf("\n\n %v %v\n", r.fancy.Green(comp), r.fancy.Gray(t)) - - if r.pending > 0 { - pend := fmt.Sprintf("%d test(s) pending", r.pending) - fmt.Printf(" %v\n\n", r.fancy.Cyan(pend)) - } - - if len(r.failures) > 0 { - fmt.Printf("%s \n\n", r.fancy.Red(fmt.Sprintf(" %d tests failed:", len(r.failures)))) - - } - - for i, failure := range r.failures { - fmt.Printf(" %d) %s:\n\n", i+1, failure.testName) - fmt.Printf(" %s\n", r.fancy.Red(failure.message)) - for _, stackItem := range failure.stack { - fmt.Printf(" %s\n", r.fancy.Gray(stackItem)) - } - } -} diff --git a/vendor/github.com/franela/goblin/resolver.go b/vendor/github.com/franela/goblin/resolver.go deleted file mode 100644 index 125fcec..0000000 --- a/vendor/github.com/franela/goblin/resolver.go +++ /dev/null @@ -1,21 +0,0 @@ -package goblin - -import ( - "runtime/debug" - "strings" -) - -func ResolveStack(skip int) []string { - return cleanStack(debug.Stack(), skip) -} - -func cleanStack(stack []byte, skip int) []string { - arrayStack := strings.Split(string(stack), "\n") - var finalStack []string - for i := skip; i < len(arrayStack); i++ { - if strings.Contains(arrayStack[i], ".go") { - finalStack = append(finalStack, arrayStack[i]) - } - } - return finalStack -} diff --git a/vendor/github.com/gin-gonic/contrib/ginrus/ginrus.go b/vendor/github.com/gin-gonic/contrib/ginrus/ginrus.go deleted file mode 100644 index 464efec..0000000 --- a/vendor/github.com/gin-gonic/contrib/ginrus/ginrus.go +++ /dev/null @@ -1,51 +0,0 @@ -// Package ginrus provides log handling using logrus package. -// -// Based on github.com/stephenmuss/ginerus but adds more options. -package ginrus - -import ( - "time" - - "github.com/Sirupsen/logrus" - "github.com/gin-gonic/gin" -) - -// Ginrus returns a gin.HandlerFunc (middleware) that logs requests using logrus. -// -// Requests with errors are logged using logrus.Error(). -// Requests without errors are logged using logrus.Info(). -// -// It receives: -// 1. A time package format string (e.g. time.RFC3339). -// 2. A boolean stating whether to use UTC time zone or local. -func Ginrus(logger *logrus.Logger, timeFormat string, utc bool) gin.HandlerFunc { - return func(c *gin.Context) { - start := time.Now() - // some evil middlewares modify this values - path := c.Request.URL.Path - c.Next() - - end := time.Now() - latency := end.Sub(start) - if utc { - end = end.UTC() - } - - entry := logger.WithFields(logrus.Fields{ - "status": c.Writer.Status(), - "method": c.Request.Method, - "path": path, - "ip": c.ClientIP(), - "latency": latency, - "user-agent": c.Request.UserAgent(), - "time": end.Format(timeFormat), - }) - - if len(c.Errors) > 0 { - // Append error field if this is an erroneous request. - entry.Error(c.Errors.String()) - } else { - entry.Info() - } - } -} diff --git a/vendor/github.com/gin-gonic/gin/AUTHORS.md b/vendor/github.com/gin-gonic/gin/AUTHORS.md deleted file mode 100644 index 2feaf46..0000000 --- a/vendor/github.com/gin-gonic/gin/AUTHORS.md +++ /dev/null @@ -1,229 +0,0 @@ -List of all the awesome people working to make Gin the best Web Framework in Go. - - - -##gin 0.x series authors - -**Maintainer:** Manu Martinez-Almeida (@manucorporat), Javier Provecho (@javierprovecho) - -People and companies, who have contributed, in alphabetical order. - -**@858806258 (杰哥)** -- Fix typo in example - - -**@achedeuzot (Klemen Sever)** -- Fix newline debug printing - - -**@adammck (Adam Mckaig)** -- Add MIT license - - -**@AlexanderChen1989 (Alexander)** -- Typos in README - - -**@alexanderdidenko (Aleksandr Didenko)** -- Add support multipart/form-data - - -**@alexandernyquist (Alexander Nyquist)** -- Using template.Must to fix multiple return issue -- ★ Added support for OPTIONS verb -- ★ Setting response headers before calling WriteHeader -- Improved documentation for model binding -- ★ Added Content.Redirect() -- ★ Added tons of Unit tests - - -**@austinheap (Austin Heap)** -- Added travis CI integration - - -**@andredublin (Andre Dublin)** -- Fix typo in comment - - -**@bredov (Ludwig Valda Vasquez)** -- Fix html templating in debug mode - - -**@bluele (Jun Kimura)** -- Fixes code examples in README - - -**@chad-russell** -- ★ Support for serializing gin.H into XML - - -**@dickeyxxx (Jeff Dickey)** -- Typos in README -- Add example about serving static files - - -**@donileo (Adonis)** -- Add NoMethod handler - - -**@dutchcoders (DutchCoders)** -- ★ Fix security bug that allows client to spoof ip -- Fix typo. r.HTMLTemplates -> SetHTMLTemplate - - -**@el3ctro- (Joshua Loper)** -- Fix typo in example - - -**@ethankan (Ethan Kan)** -- Unsigned integers in binding - - -**(Evgeny Persienko)** -- Validate sub structures - - -**@frankbille (Frank Bille)** -- Add support for HTTP Realm Auth - - -**@fmd (Fareed Dudhia)** -- Fix typo. SetHTTPTemplate -> SetHTMLTemplate - - -**@ironiridis (Christopher Harrington)** -- Remove old reference - - -**@jammie-stackhouse (Jamie Stackhouse)** -- Add more shortcuts for router methods - - -**@jasonrhansen** -- Fix spelling and grammar errors in documentation - - -**@JasonSoft (Jason Lee)** -- Fix typo in comment - - -**@joiggama (Ignacio Galindo)** -- Add utf-8 charset header on renders - - -**@julienschmidt (Julien Schmidt)** -- gofmt the code examples - - -**@kelcecil (Kel Cecil)** -- Fix readme typo - - -**@kyledinh (Kyle Dinh)** -- Adds RunTLS() - - -**@LinusU (Linus Unnebäck)** -- Small fixes in README - - -**@loongmxbt (Saint Asky)** -- Fix typo in example - - -**@lucas-clemente (Lucas Clemente)** -- ★ work around path.Join removing trailing slashes from routes - - -**@mattn (Yasuhiro Matsumoto)** -- Improve color logger - - -**@mdigger (Dmitry Sedykh)** -- Fixes Form binding when content-type is x-www-form-urlencoded -- No repeat call c.Writer.Status() in gin.Logger -- Fixes Content-Type for json render - - -**@mirzac (Mirza Ceric)** -- Fix debug printing - - -**@mopemope (Yutaka Matsubara)** -- ★ Adds Godep support (Dependencies Manager) -- Fix variadic parameter in the flexible render API -- Fix Corrupted plain render -- Add Pluggable View Renderer Example - - -**@msemenistyi (Mykyta Semenistyi)** -- update Readme.md. Add code to String method - - -**@msoedov (Sasha Myasoedov)** -- ★ Adds tons of unit tests. - - -**@ngerakines (Nick Gerakines)** -- ★ Improves API, c.GET() doesn't panic -- Adds MustGet() method - - -**@r8k (Rajiv Kilaparti)** -- Fix Port usage in README. - - -**@rayrod2030 (Ray Rodriguez)** -- Fix typo in example - - -**@rns** -- Fix typo in example - - -**@RobAWilkinson (Robert Wilkinson)** -- Add example of forms and params - - -**@rogierlommers (Rogier Lommers)** -- Add updated static serve example - - -**@se77en (Damon Zhao)** -- Improve color logging - - -**@silasb (Silas Baronda)** -- Fixing quotes in README - - -**@SkuliOskarsson (Skuli Oskarsson)** -- Fixes some texts in README II - - -**@slimmy (Jimmy Pettersson)** -- Added messages for required bindings - - -**@smira (Andrey Smirnov)** -- Add support for ignored/unexported fields in binding - - -**@superalsrk (SRK.Lyu)** -- Update httprouter godeps - - -**@tebeka (Miki Tebeka)** -- Use net/http constants instead of numeric values - - -**@techjanitor** -- Update context.go reserved IPs - - -**@yosssi (Keiji Yoshida)** -- Fix link in README - - -**@yuyabee** -- Fixed README \ No newline at end of file diff --git a/vendor/github.com/gin-gonic/gin/BENCHMARKS.md b/vendor/github.com/gin-gonic/gin/BENCHMARKS.md deleted file mode 100644 index 181f75b..0000000 --- a/vendor/github.com/gin-gonic/gin/BENCHMARKS.md +++ /dev/null @@ -1,298 +0,0 @@ -**Machine:** intel i7 ivy bridge quad-core. 8GB RAM. -**Date:** June 4th, 2015 -[https://github.com/gin-gonic/go-http-routing-benchmark](https://github.com/gin-gonic/go-http-routing-benchmark) - -``` -BenchmarkAce_Param 5000000 372 ns/op 32 B/op 1 allocs/op -BenchmarkBear_Param 1000000 1165 ns/op 424 B/op 5 allocs/op -BenchmarkBeego_Param 1000000 2440 ns/op 720 B/op 10 allocs/op -BenchmarkBone_Param 1000000 1067 ns/op 384 B/op 3 allocs/op -BenchmarkDenco_Param 5000000 240 ns/op 32 B/op 1 allocs/op -BenchmarkEcho_Param 10000000 130 ns/op 0 B/op 0 allocs/op -BenchmarkGin_Param 10000000 133 ns/op 0 B/op 0 allocs/op -BenchmarkGocraftWeb_Param 1000000 1826 ns/op 656 B/op 9 allocs/op -BenchmarkGoji_Param 2000000 957 ns/op 336 B/op 2 allocs/op -BenchmarkGoJsonRest_Param 1000000 2021 ns/op 657 B/op 14 allocs/op -BenchmarkGoRestful_Param 200000 8825 ns/op 2496 B/op 31 allocs/op -BenchmarkGorillaMux_Param 500000 3340 ns/op 784 B/op 9 allocs/op -BenchmarkHttpRouter_Param 10000000 152 ns/op 32 B/op 1 allocs/op -BenchmarkHttpTreeMux_Param 2000000 717 ns/op 336 B/op 2 allocs/op -BenchmarkKocha_Param 3000000 423 ns/op 56 B/op 3 allocs/op -BenchmarkMacaron_Param 1000000 3410 ns/op 1104 B/op 11 allocs/op -BenchmarkMartini_Param 200000 7101 ns/op 1152 B/op 12 allocs/op -BenchmarkPat_Param 1000000 2040 ns/op 656 B/op 14 allocs/op -BenchmarkPossum_Param 1000000 2048 ns/op 624 B/op 7 allocs/op -BenchmarkR2router_Param 1000000 1144 ns/op 432 B/op 6 allocs/op -BenchmarkRevel_Param 200000 6725 ns/op 1672 B/op 28 allocs/op -BenchmarkRivet_Param 1000000 1121 ns/op 464 B/op 5 allocs/op -BenchmarkTango_Param 1000000 1479 ns/op 256 B/op 10 allocs/op -BenchmarkTigerTonic_Param 1000000 3393 ns/op 992 B/op 19 allocs/op -BenchmarkTraffic_Param 300000 5525 ns/op 1984 B/op 23 allocs/op -BenchmarkVulcan_Param 2000000 924 ns/op 98 B/op 3 allocs/op -BenchmarkZeus_Param 1000000 1084 ns/op 368 B/op 3 allocs/op -BenchmarkAce_Param5 3000000 614 ns/op 160 B/op 1 allocs/op -BenchmarkBear_Param5 1000000 1617 ns/op 469 B/op 5 allocs/op -BenchmarkBeego_Param5 1000000 3373 ns/op 992 B/op 13 allocs/op -BenchmarkBone_Param5 1000000 1478 ns/op 432 B/op 3 allocs/op -BenchmarkDenco_Param5 3000000 570 ns/op 160 B/op 1 allocs/op -BenchmarkEcho_Param5 5000000 256 ns/op 0 B/op 0 allocs/op -BenchmarkGin_Param5 10000000 222 ns/op 0 B/op 0 allocs/op -BenchmarkGocraftWeb_Param5 1000000 2789 ns/op 928 B/op 12 allocs/op -BenchmarkGoji_Param5 1000000 1287 ns/op 336 B/op 2 allocs/op -BenchmarkGoJsonRest_Param5 1000000 3670 ns/op 1105 B/op 17 allocs/op -BenchmarkGoRestful_Param5 200000 10756 ns/op 2672 B/op 31 allocs/op -BenchmarkGorillaMux_Param5 300000 5543 ns/op 912 B/op 9 allocs/op -BenchmarkHttpRouter_Param5 5000000 403 ns/op 160 B/op 1 allocs/op -BenchmarkHttpTreeMux_Param5 1000000 1089 ns/op 336 B/op 2 allocs/op -BenchmarkKocha_Param5 1000000 1682 ns/op 440 B/op 10 allocs/op -BenchmarkMacaron_Param5 300000 4596 ns/op 1376 B/op 14 allocs/op -BenchmarkMartini_Param5 100000 15703 ns/op 1280 B/op 12 allocs/op -BenchmarkPat_Param5 300000 5320 ns/op 1008 B/op 42 allocs/op -BenchmarkPossum_Param5 1000000 2155 ns/op 624 B/op 7 allocs/op -BenchmarkR2router_Param5 1000000 1559 ns/op 432 B/op 6 allocs/op -BenchmarkRevel_Param5 200000 8184 ns/op 2024 B/op 35 allocs/op -BenchmarkRivet_Param5 1000000 1914 ns/op 528 B/op 9 allocs/op -BenchmarkTango_Param5 1000000 3280 ns/op 944 B/op 18 allocs/op -BenchmarkTigerTonic_Param5 200000 11638 ns/op 2519 B/op 53 allocs/op -BenchmarkTraffic_Param5 200000 8941 ns/op 2280 B/op 31 allocs/op -BenchmarkVulcan_Param5 1000000 1279 ns/op 98 B/op 3 allocs/op -BenchmarkZeus_Param5 1000000 1574 ns/op 416 B/op 3 allocs/op -BenchmarkAce_Param20 1000000 1528 ns/op 640 B/op 1 allocs/op -BenchmarkBear_Param20 300000 4906 ns/op 1633 B/op 5 allocs/op -BenchmarkBeego_Param20 200000 10529 ns/op 3868 B/op 17 allocs/op -BenchmarkBone_Param20 300000 7362 ns/op 2539 B/op 5 allocs/op -BenchmarkDenco_Param20 1000000 1884 ns/op 640 B/op 1 allocs/op -BenchmarkEcho_Param20 2000000 689 ns/op 0 B/op 0 allocs/op -BenchmarkGin_Param20 3000000 545 ns/op 0 B/op 0 allocs/op -BenchmarkGocraftWeb_Param20 200000 9437 ns/op 3804 B/op 16 allocs/op -BenchmarkGoji_Param20 500000 3987 ns/op 1246 B/op 2 allocs/op -BenchmarkGoJsonRest_Param20 100000 12799 ns/op 4492 B/op 21 allocs/op -BenchmarkGoRestful_Param20 100000 19451 ns/op 5244 B/op 33 allocs/op -BenchmarkGorillaMux_Param20 100000 12456 ns/op 3275 B/op 11 allocs/op -BenchmarkHttpRouter_Param20 1000000 1333 ns/op 640 B/op 1 allocs/op -BenchmarkHttpTreeMux_Param20 300000 6490 ns/op 2187 B/op 4 allocs/op -BenchmarkKocha_Param20 300000 5335 ns/op 1808 B/op 27 allocs/op -BenchmarkMacaron_Param20 200000 11325 ns/op 4252 B/op 18 allocs/op -BenchmarkMartini_Param20 20000 64419 ns/op 3644 B/op 14 allocs/op -BenchmarkPat_Param20 50000 24672 ns/op 4888 B/op 151 allocs/op -BenchmarkPossum_Param20 1000000 2085 ns/op 624 B/op 7 allocs/op -BenchmarkR2router_Param20 300000 6809 ns/op 2283 B/op 8 allocs/op -BenchmarkRevel_Param20 100000 16600 ns/op 5551 B/op 54 allocs/op -BenchmarkRivet_Param20 200000 8428 ns/op 2620 B/op 26 allocs/op -BenchmarkTango_Param20 100000 16302 ns/op 8224 B/op 48 allocs/op -BenchmarkTigerTonic_Param20 30000 46828 ns/op 10538 B/op 178 allocs/op -BenchmarkTraffic_Param20 50000 28871 ns/op 7998 B/op 66 allocs/op -BenchmarkVulcan_Param20 1000000 2267 ns/op 98 B/op 3 allocs/op -BenchmarkZeus_Param20 300000 6828 ns/op 2507 B/op 5 allocs/op -BenchmarkAce_ParamWrite 3000000 502 ns/op 40 B/op 2 allocs/op -BenchmarkBear_ParamWrite 1000000 1303 ns/op 424 B/op 5 allocs/op -BenchmarkBeego_ParamWrite 1000000 2489 ns/op 728 B/op 11 allocs/op -BenchmarkBone_ParamWrite 1000000 1181 ns/op 384 B/op 3 allocs/op -BenchmarkDenco_ParamWrite 5000000 315 ns/op 32 B/op 1 allocs/op -BenchmarkEcho_ParamWrite 10000000 237 ns/op 8 B/op 1 allocs/op -BenchmarkGin_ParamWrite 5000000 336 ns/op 0 B/op 0 allocs/op -BenchmarkGocraftWeb_ParamWrite 1000000 2079 ns/op 664 B/op 10 allocs/op -BenchmarkGoji_ParamWrite 1000000 1092 ns/op 336 B/op 2 allocs/op -BenchmarkGoJsonRest_ParamWrite 1000000 3329 ns/op 1136 B/op 19 allocs/op -BenchmarkGoRestful_ParamWrite 200000 9273 ns/op 2504 B/op 32 allocs/op -BenchmarkGorillaMux_ParamWrite 500000 3919 ns/op 792 B/op 10 allocs/op -BenchmarkHttpRouter_ParamWrite 10000000 223 ns/op 32 B/op 1 allocs/op -BenchmarkHttpTreeMux_ParamWrite 2000000 788 ns/op 336 B/op 2 allocs/op -BenchmarkKocha_ParamWrite 3000000 549 ns/op 56 B/op 3 allocs/op -BenchmarkMacaron_ParamWrite 500000 4558 ns/op 1216 B/op 16 allocs/op -BenchmarkMartini_ParamWrite 200000 8850 ns/op 1256 B/op 16 allocs/op -BenchmarkPat_ParamWrite 500000 3679 ns/op 1088 B/op 19 allocs/op -BenchmarkPossum_ParamWrite 1000000 2114 ns/op 624 B/op 7 allocs/op -BenchmarkR2router_ParamWrite 1000000 1320 ns/op 432 B/op 6 allocs/op -BenchmarkRevel_ParamWrite 200000 8048 ns/op 2128 B/op 33 allocs/op -BenchmarkRivet_ParamWrite 1000000 1393 ns/op 472 B/op 6 allocs/op -BenchmarkTango_ParamWrite 2000000 819 ns/op 136 B/op 5 allocs/op -BenchmarkTigerTonic_ParamWrite 300000 5860 ns/op 1440 B/op 25 allocs/op -BenchmarkTraffic_ParamWrite 200000 7429 ns/op 2400 B/op 27 allocs/op -BenchmarkVulcan_ParamWrite 2000000 972 ns/op 98 B/op 3 allocs/op -BenchmarkZeus_ParamWrite 1000000 1226 ns/op 368 B/op 3 allocs/op -BenchmarkAce_GithubStatic 5000000 294 ns/op 0 B/op 0 allocs/op -BenchmarkBear_GithubStatic 3000000 575 ns/op 88 B/op 3 allocs/op -BenchmarkBeego_GithubStatic 1000000 1561 ns/op 368 B/op 7 allocs/op -BenchmarkBone_GithubStatic 200000 12301 ns/op 2880 B/op 60 allocs/op -BenchmarkDenco_GithubStatic 20000000 74.6 ns/op 0 B/op 0 allocs/op -BenchmarkEcho_GithubStatic 10000000 176 ns/op 0 B/op 0 allocs/op -BenchmarkGin_GithubStatic 10000000 159 ns/op 0 B/op 0 allocs/op -BenchmarkGocraftWeb_GithubStatic 1000000 1116 ns/op 304 B/op 6 allocs/op -BenchmarkGoji_GithubStatic 5000000 413 ns/op 0 B/op 0 allocs/op -BenchmarkGoRestful_GithubStatic 30000 55200 ns/op 3520 B/op 36 allocs/op -BenchmarkGoJsonRest_GithubStatic 1000000 1504 ns/op 337 B/op 12 allocs/op -BenchmarkGorillaMux_GithubStatic 100000 23620 ns/op 464 B/op 8 allocs/op -BenchmarkHttpRouter_GithubStatic 20000000 78.3 ns/op 0 B/op 0 allocs/op -BenchmarkHttpTreeMux_GithubStatic 20000000 84.9 ns/op 0 B/op 0 allocs/op -BenchmarkKocha_GithubStatic 20000000 111 ns/op 0 B/op 0 allocs/op -BenchmarkMacaron_GithubStatic 1000000 2686 ns/op 752 B/op 8 allocs/op -BenchmarkMartini_GithubStatic 100000 22244 ns/op 832 B/op 11 allocs/op -BenchmarkPat_GithubStatic 100000 13278 ns/op 3648 B/op 76 allocs/op -BenchmarkPossum_GithubStatic 1000000 1429 ns/op 480 B/op 4 allocs/op -BenchmarkR2router_GithubStatic 2000000 726 ns/op 144 B/op 5 allocs/op -BenchmarkRevel_GithubStatic 300000 6271 ns/op 1288 B/op 25 allocs/op -BenchmarkRivet_GithubStatic 3000000 474 ns/op 112 B/op 2 allocs/op -BenchmarkTango_GithubStatic 1000000 1842 ns/op 256 B/op 10 allocs/op -BenchmarkTigerTonic_GithubStatic 5000000 361 ns/op 48 B/op 1 allocs/op -BenchmarkTraffic_GithubStatic 30000 47197 ns/op 18920 B/op 149 allocs/op -BenchmarkVulcan_GithubStatic 1000000 1415 ns/op 98 B/op 3 allocs/op -BenchmarkZeus_GithubStatic 1000000 2522 ns/op 512 B/op 11 allocs/op -BenchmarkAce_GithubParam 3000000 578 ns/op 96 B/op 1 allocs/op -BenchmarkBear_GithubParam 1000000 1592 ns/op 464 B/op 5 allocs/op -BenchmarkBeego_GithubParam 1000000 2891 ns/op 784 B/op 11 allocs/op -BenchmarkBone_GithubParam 300000 6440 ns/op 1456 B/op 16 allocs/op -BenchmarkDenco_GithubParam 3000000 514 ns/op 128 B/op 1 allocs/op -BenchmarkEcho_GithubParam 5000000 292 ns/op 0 B/op 0 allocs/op -BenchmarkGin_GithubParam 10000000 242 ns/op 0 B/op 0 allocs/op -BenchmarkGocraftWeb_GithubParam 1000000 2343 ns/op 720 B/op 10 allocs/op -BenchmarkGoji_GithubParam 1000000 1566 ns/op 336 B/op 2 allocs/op -BenchmarkGoJsonRest_GithubParam 1000000 2828 ns/op 721 B/op 15 allocs/op -BenchmarkGoRestful_GithubParam 10000 177711 ns/op 2816 B/op 35 allocs/op -BenchmarkGorillaMux_GithubParam 100000 13591 ns/op 816 B/op 9 allocs/op -BenchmarkHttpRouter_GithubParam 5000000 352 ns/op 96 B/op 1 allocs/op -BenchmarkHttpTreeMux_GithubParam 2000000 973 ns/op 336 B/op 2 allocs/op -BenchmarkKocha_GithubParam 2000000 889 ns/op 128 B/op 5 allocs/op -BenchmarkMacaron_GithubParam 500000 4047 ns/op 1168 B/op 12 allocs/op -BenchmarkMartini_GithubParam 50000 28982 ns/op 1184 B/op 12 allocs/op -BenchmarkPat_GithubParam 200000 8747 ns/op 2480 B/op 56 allocs/op -BenchmarkPossum_GithubParam 1000000 2158 ns/op 624 B/op 7 allocs/op -BenchmarkR2router_GithubParam 1000000 1352 ns/op 432 B/op 6 allocs/op -BenchmarkRevel_GithubParam 200000 7673 ns/op 1784 B/op 30 allocs/op -BenchmarkRivet_GithubParam 1000000 1573 ns/op 480 B/op 6 allocs/op -BenchmarkTango_GithubParam 1000000 2418 ns/op 480 B/op 13 allocs/op -BenchmarkTigerTonic_GithubParam 300000 6048 ns/op 1440 B/op 28 allocs/op -BenchmarkTraffic_GithubParam 100000 20143 ns/op 6024 B/op 55 allocs/op -BenchmarkVulcan_GithubParam 1000000 2224 ns/op 98 B/op 3 allocs/op -BenchmarkZeus_GithubParam 500000 4156 ns/op 1312 B/op 12 allocs/op -BenchmarkAce_GithubAll 10000 109482 ns/op 13792 B/op 167 allocs/op -BenchmarkBear_GithubAll 10000 287490 ns/op 79952 B/op 943 allocs/op -BenchmarkBeego_GithubAll 3000 562184 ns/op 146272 B/op 2092 allocs/op -BenchmarkBone_GithubAll 500 2578716 ns/op 648016 B/op 8119 allocs/op -BenchmarkDenco_GithubAll 20000 94955 ns/op 20224 B/op 167 allocs/op -BenchmarkEcho_GithubAll 30000 58705 ns/op 0 B/op 0 allocs/op -BenchmarkGin_GithubAll 30000 50991 ns/op 0 B/op 0 allocs/op -BenchmarkGocraftWeb_GithubAll 5000 449648 ns/op 133280 B/op 1889 allocs/op -BenchmarkGoji_GithubAll 2000 689748 ns/op 56113 B/op 334 allocs/op -BenchmarkGoJsonRest_GithubAll 5000 537769 ns/op 135995 B/op 2940 allocs/op -BenchmarkGoRestful_GithubAll 100 18410628 ns/op 797236 B/op 7725 allocs/op -BenchmarkGorillaMux_GithubAll 200 8036360 ns/op 153137 B/op 1791 allocs/op -BenchmarkHttpRouter_GithubAll 20000 63506 ns/op 13792 B/op 167 allocs/op -BenchmarkHttpTreeMux_GithubAll 10000 165927 ns/op 56112 B/op 334 allocs/op -BenchmarkKocha_GithubAll 10000 171362 ns/op 23304 B/op 843 allocs/op -BenchmarkMacaron_GithubAll 2000 817008 ns/op 224960 B/op 2315 allocs/op -BenchmarkMartini_GithubAll 100 12609209 ns/op 237952 B/op 2686 allocs/op -BenchmarkPat_GithubAll 300 4830398 ns/op 1504101 B/op 32222 allocs/op -BenchmarkPossum_GithubAll 10000 301716 ns/op 97440 B/op 812 allocs/op -BenchmarkR2router_GithubAll 10000 270691 ns/op 77328 B/op 1182 allocs/op -BenchmarkRevel_GithubAll 1000 1491919 ns/op 345553 B/op 5918 allocs/op -BenchmarkRivet_GithubAll 10000 283860 ns/op 84272 B/op 1079 allocs/op -BenchmarkTango_GithubAll 5000 473821 ns/op 87078 B/op 2470 allocs/op -BenchmarkTigerTonic_GithubAll 2000 1120131 ns/op 241088 B/op 6052 allocs/op -BenchmarkTraffic_GithubAll 200 8708979 ns/op 2664762 B/op 22390 allocs/op -BenchmarkVulcan_GithubAll 5000 353392 ns/op 19894 B/op 609 allocs/op -BenchmarkZeus_GithubAll 2000 944234 ns/op 300688 B/op 2648 allocs/op -BenchmarkAce_GPlusStatic 5000000 251 ns/op 0 B/op 0 allocs/op -BenchmarkBear_GPlusStatic 3000000 415 ns/op 72 B/op 3 allocs/op -BenchmarkBeego_GPlusStatic 1000000 1416 ns/op 352 B/op 7 allocs/op -BenchmarkBone_GPlusStatic 10000000 192 ns/op 32 B/op 1 allocs/op -BenchmarkDenco_GPlusStatic 30000000 47.6 ns/op 0 B/op 0 allocs/op -BenchmarkEcho_GPlusStatic 10000000 131 ns/op 0 B/op 0 allocs/op -BenchmarkGin_GPlusStatic 10000000 131 ns/op 0 B/op 0 allocs/op -BenchmarkGocraftWeb_GPlusStatic 1000000 1035 ns/op 288 B/op 6 allocs/op -BenchmarkGoji_GPlusStatic 5000000 304 ns/op 0 B/op 0 allocs/op -BenchmarkGoJsonRest_GPlusStatic 1000000 1286 ns/op 337 B/op 12 allocs/op -BenchmarkGoRestful_GPlusStatic 200000 9649 ns/op 2160 B/op 30 allocs/op -BenchmarkGorillaMux_GPlusStatic 1000000 2346 ns/op 464 B/op 8 allocs/op -BenchmarkHttpRouter_GPlusStatic 30000000 42.7 ns/op 0 B/op 0 allocs/op -BenchmarkHttpTreeMux_GPlusStatic 30000000 49.5 ns/op 0 B/op 0 allocs/op -BenchmarkKocha_GPlusStatic 20000000 74.8 ns/op 0 B/op 0 allocs/op -BenchmarkMacaron_GPlusStatic 1000000 2520 ns/op 736 B/op 8 allocs/op -BenchmarkMartini_GPlusStatic 300000 5310 ns/op 832 B/op 11 allocs/op -BenchmarkPat_GPlusStatic 5000000 398 ns/op 96 B/op 2 allocs/op -BenchmarkPossum_GPlusStatic 1000000 1434 ns/op 480 B/op 4 allocs/op -BenchmarkR2router_GPlusStatic 2000000 646 ns/op 144 B/op 5 allocs/op -BenchmarkRevel_GPlusStatic 300000 6172 ns/op 1272 B/op 25 allocs/op -BenchmarkRivet_GPlusStatic 3000000 444 ns/op 112 B/op 2 allocs/op -BenchmarkTango_GPlusStatic 1000000 1400 ns/op 208 B/op 10 allocs/op -BenchmarkTigerTonic_GPlusStatic 10000000 213 ns/op 32 B/op 1 allocs/op -BenchmarkTraffic_GPlusStatic 1000000 3091 ns/op 1208 B/op 16 allocs/op -BenchmarkVulcan_GPlusStatic 2000000 863 ns/op 98 B/op 3 allocs/op -BenchmarkZeus_GPlusStatic 10000000 237 ns/op 16 B/op 1 allocs/op -BenchmarkAce_GPlusParam 3000000 435 ns/op 64 B/op 1 allocs/op -BenchmarkBear_GPlusParam 1000000 1205 ns/op 448 B/op 5 allocs/op -BenchmarkBeego_GPlusParam 1000000 2494 ns/op 720 B/op 10 allocs/op -BenchmarkBone_GPlusParam 1000000 1126 ns/op 384 B/op 3 allocs/op -BenchmarkDenco_GPlusParam 5000000 325 ns/op 64 B/op 1 allocs/op -BenchmarkEcho_GPlusParam 10000000 168 ns/op 0 B/op 0 allocs/op -BenchmarkGin_GPlusParam 10000000 170 ns/op 0 B/op 0 allocs/op -BenchmarkGocraftWeb_GPlusParam 1000000 1895 ns/op 656 B/op 9 allocs/op -BenchmarkGoji_GPlusParam 1000000 1071 ns/op 336 B/op 2 allocs/op -BenchmarkGoJsonRest_GPlusParam 1000000 2282 ns/op 657 B/op 14 allocs/op -BenchmarkGoRestful_GPlusParam 100000 19400 ns/op 2560 B/op 33 allocs/op -BenchmarkGorillaMux_GPlusParam 500000 5001 ns/op 784 B/op 9 allocs/op -BenchmarkHttpRouter_GPlusParam 10000000 240 ns/op 64 B/op 1 allocs/op -BenchmarkHttpTreeMux_GPlusParam 2000000 797 ns/op 336 B/op 2 allocs/op -BenchmarkKocha_GPlusParam 3000000 505 ns/op 56 B/op 3 allocs/op -BenchmarkMacaron_GPlusParam 1000000 3668 ns/op 1104 B/op 11 allocs/op -BenchmarkMartini_GPlusParam 200000 10672 ns/op 1152 B/op 12 allocs/op -BenchmarkPat_GPlusParam 1000000 2376 ns/op 704 B/op 14 allocs/op -BenchmarkPossum_GPlusParam 1000000 2090 ns/op 624 B/op 7 allocs/op -BenchmarkR2router_GPlusParam 1000000 1233 ns/op 432 B/op 6 allocs/op -BenchmarkRevel_GPlusParam 200000 6778 ns/op 1704 B/op 28 allocs/op -BenchmarkRivet_GPlusParam 1000000 1279 ns/op 464 B/op 5 allocs/op -BenchmarkTango_GPlusParam 1000000 1981 ns/op 272 B/op 10 allocs/op -BenchmarkTigerTonic_GPlusParam 500000 3893 ns/op 1064 B/op 19 allocs/op -BenchmarkTraffic_GPlusParam 200000 6585 ns/op 2000 B/op 23 allocs/op -BenchmarkVulcan_GPlusParam 1000000 1233 ns/op 98 B/op 3 allocs/op -BenchmarkZeus_GPlusParam 1000000 1350 ns/op 368 B/op 3 allocs/op -BenchmarkAce_GPlus2Params 3000000 512 ns/op 64 B/op 1 allocs/op -BenchmarkBear_GPlus2Params 1000000 1564 ns/op 464 B/op 5 allocs/op -BenchmarkBeego_GPlus2Params 1000000 3043 ns/op 784 B/op 11 allocs/op -BenchmarkBone_GPlus2Params 1000000 3152 ns/op 736 B/op 7 allocs/op -BenchmarkDenco_GPlus2Params 3000000 431 ns/op 64 B/op 1 allocs/op -BenchmarkEcho_GPlus2Params 5000000 247 ns/op 0 B/op 0 allocs/op -BenchmarkGin_GPlus2Params 10000000 219 ns/op 0 B/op 0 allocs/op -BenchmarkGocraftWeb_GPlus2Params 1000000 2363 ns/op 720 B/op 10 allocs/op -BenchmarkGoji_GPlus2Params 1000000 1540 ns/op 336 B/op 2 allocs/op -BenchmarkGoJsonRest_GPlus2Params 1000000 2872 ns/op 721 B/op 15 allocs/op -BenchmarkGoRestful_GPlus2Params 100000 23030 ns/op 2720 B/op 35 allocs/op -BenchmarkGorillaMux_GPlus2Params 200000 10516 ns/op 816 B/op 9 allocs/op -BenchmarkHttpRouter_GPlus2Params 5000000 273 ns/op 64 B/op 1 allocs/op -BenchmarkHttpTreeMux_GPlus2Params 2000000 939 ns/op 336 B/op 2 allocs/op -BenchmarkKocha_GPlus2Params 2000000 844 ns/op 128 B/op 5 allocs/op -BenchmarkMacaron_GPlus2Params 500000 3914 ns/op 1168 B/op 12 allocs/op -BenchmarkMartini_GPlus2Params 50000 35759 ns/op 1280 B/op 16 allocs/op -BenchmarkPat_GPlus2Params 200000 7089 ns/op 2304 B/op 41 allocs/op -BenchmarkPossum_GPlus2Params 1000000 2093 ns/op 624 B/op 7 allocs/op -BenchmarkR2router_GPlus2Params 1000000 1320 ns/op 432 B/op 6 allocs/op -BenchmarkRevel_GPlus2Params 200000 7351 ns/op 1800 B/op 30 allocs/op -BenchmarkRivet_GPlus2Params 1000000 1485 ns/op 480 B/op 6 allocs/op -BenchmarkTango_GPlus2Params 1000000 2111 ns/op 448 B/op 12 allocs/op -BenchmarkTigerTonic_GPlus2Params 300000 6271 ns/op 1528 B/op 28 allocs/op -BenchmarkTraffic_GPlus2Params 100000 14886 ns/op 3312 B/op 34 allocs/op -BenchmarkVulcan_GPlus2Params 1000000 1883 ns/op 98 B/op 3 allocs/op -BenchmarkZeus_GPlus2Params 1000000 2686 ns/op 784 B/op 6 allocs/op -BenchmarkAce_GPlusAll 300000 5912 ns/op 640 B/op 11 allocs/op -BenchmarkBear_GPlusAll 100000 16448 ns/op 5072 B/op 61 allocs/op -BenchmarkBeego_GPlusAll 50000 32916 ns/op 8976 B/op 129 allocs/op -BenchmarkBone_GPlusAll 50000 25836 ns/op 6992 B/op 76 allocs/op -BenchmarkDenco_GPlusAll 500000 4462 ns/op 672 B/op 11 allocs/op -BenchmarkEcho_GPlusAll 500000 2806 ns/op 0 B/op 0 allocs/op -BenchmarkGin_GPlusAll 500000 2579 ns/op 0 B/op 0 allocs/op -BenchmarkGocraftWeb_GPlusAll 50000 25223 ns/op 8144 B/op 116 allocs/op -BenchmarkGoji_GPlusAll 100000 14237 ns/op 3696 B/op 22 allocs/op -BenchmarkGoJsonRest_GPlusAll 50000 29227 ns/op 8221 B/op 183 allocs/op -BenchmarkGoRestful_GPlusAll 10000 203144 ns/op 36064 B/op 441 allocs/op -BenchmarkGorillaMux_GPlusAll 20000 80906 ns/op 9712 B/op 115 allocs/op -BenchmarkHttpRouter_GPlusAll 500000 3040 ns/op 640 B/op 11 allocs/op -BenchmarkHttpTreeMux_GPlusAll 200000 9627 ns/op 3696 B/op 22 allocs/op -BenchmarkKocha_GPlusAll 200000 8108 ns/op 976 B/op 43 allocs/op -BenchmarkMacaron_GPlusAll 30000 48083 ns/op 13968 B/op 142 allocs/op -BenchmarkMartini_GPlusAll 10000 196978 ns/op 15072 B/op 178 allocs/op -BenchmarkPat_GPlusAll 30000 58865 ns/op 16880 B/op 343 allocs/op -BenchmarkPossum_GPlusAll 100000 19685 ns/op 6240 B/op 52 allocs/op -BenchmarkR2router_GPlusAll 100000 16251 ns/op 5040 B/op 76 allocs/op -BenchmarkRevel_GPlusAll 20000 93489 ns/op 21656 B/op 368 allocs/op -BenchmarkRivet_GPlusAll 100000 16907 ns/op 5408 B/op 64 allocs/op -``` \ No newline at end of file diff --git a/vendor/github.com/gin-gonic/gin/CHANGELOG.md b/vendor/github.com/gin-gonic/gin/CHANGELOG.md deleted file mode 100644 index 82f1bea..0000000 --- a/vendor/github.com/gin-gonic/gin/CHANGELOG.md +++ /dev/null @@ -1,150 +0,0 @@ -#CHANGELOG - -###Gin 1.0rc2 (...) - -- [PERFORMANCE] Fast path for writing Content-Type. -- [PERFORMANCE] Much faster 404 routing -- [PERFORMANCE] Allocation optimizations -- [PERFORMANCE] Faster root tree lookup -- [PERFORMANCE] Zero overhead, String() and JSON() rendering. -- [PERFORMANCE] Faster ClientIP parsing -- [PERFORMANCE] Much faster SSE implementation -- [NEW] Benchmarks suite -- [NEW] Bind validation can be disabled and replaced with custom validators. -- [NEW] More flexible HTML render -- [NEW] Multipart and PostForm bindings -- [NEW] Adds method to return all the registered routes -- [NEW] Context.HandlerName() returns the main handler's name -- [NEW] Adds Error.IsType() helper -- [FIX] Binding multipart form -- [FIX] Integration tests -- [FIX] Crash when binding non struct object in Context. -- [FIX] RunTLS() implementation -- [FIX] Logger() unit tests -- [FIX] Adds SetHTMLTemplate() warning -- [FIX] Context.IsAborted() -- [FIX] More unit tests -- [FIX] JSON, XML, HTML renders accept custom content-types -- [FIX] gin.AbortIndex is unexported -- [FIX] Better approach to avoid directory listing in StaticFS() -- [FIX] Context.ClientIP() always returns the IP with trimmed spaces. -- [FIX] Better warning when running in debug mode. -- [FIX] Google App Engine integration. debugPrint does not use os.Stdout -- [FIX] Fixes integer overflow in error type -- [FIX] Error implements the json.Marshaller interface -- [FIX] MIT license in every file - - -###Gin 1.0rc1 (May 22, 2015) - -- [PERFORMANCE] Zero allocation router -- [PERFORMANCE] Faster JSON, XML and text rendering -- [PERFORMANCE] Custom hand optimized HttpRouter for Gin -- [PERFORMANCE] Misc code optimizations. Inlining, tail call optimizations -- [NEW] Built-in support for golang.org/x/net/context -- [NEW] Any(path, handler). Create a route that matches any path -- [NEW] Refactored rendering pipeline (faster and static typeded) -- [NEW] Refactored errors API -- [NEW] IndentedJSON() prints pretty JSON -- [NEW] Added gin.DefaultWriter -- [NEW] UNIX socket support -- [NEW] RouterGroup.BasePath is exposed -- [NEW] JSON validation using go-validate-yourself (very powerful options) -- [NEW] Completed suite of unit tests -- [NEW] HTTP streaming with c.Stream() -- [NEW] StaticFile() creates a router for serving just one file. -- [NEW] StaticFS() has an option to disable directory listing. -- [NEW] StaticFS() for serving static files through virtual filesystems -- [NEW] Server-Sent Events native support -- [NEW] WrapF() and WrapH() helpers for wrapping http.HandlerFunc and http.Handler -- [NEW] Added LoggerWithWriter() middleware -- [NEW] Added RecoveryWithWriter() middleware -- [NEW] Added DefaultPostFormValue() -- [NEW] Added DefaultFormValue() -- [NEW] Added DefaultParamValue() -- [FIX] BasicAuth() when using custom realm -- [FIX] Bug when serving static files in nested routing group -- [FIX] Redirect using built-in http.Redirect() -- [FIX] Logger when printing the requested path -- [FIX] Documentation typos -- [FIX] Context.Engine renamed to Context.engine -- [FIX] Better debugging messages -- [FIX] ErrorLogger -- [FIX] Debug HTTP render -- [FIX] Refactored binding and render modules -- [FIX] Refactored Context initialization -- [FIX] Refactored BasicAuth() -- [FIX] NoMethod/NoRoute handlers -- [FIX] Hijacking http -- [FIX] Better support for Google App Engine (using log instead of fmt) - - -###Gin 0.6 (Mar 9, 2015) - -- [NEW] Support multipart/form-data -- [NEW] NoMethod handler -- [NEW] Validate sub structures -- [NEW] Support for HTTP Realm Auth -- [FIX] Unsigned integers in binding -- [FIX] Improve color logger - - -###Gin 0.5 (Feb 7, 2015) - -- [NEW] Content Negotiation -- [FIX] Solved security bug that allow a client to spoof ip -- [FIX] Fix unexported/ignored fields in binding - - -###Gin 0.4 (Aug 21, 2014) - -- [NEW] Development mode -- [NEW] Unit tests -- [NEW] Add Content.Redirect() -- [FIX] Deferring WriteHeader() -- [FIX] Improved documentation for model binding - - -###Gin 0.3 (Jul 18, 2014) - -- [PERFORMANCE] Normal log and error log are printed in the same call. -- [PERFORMANCE] Improve performance of NoRouter() -- [PERFORMANCE] Improve context's memory locality, reduce CPU cache faults. -- [NEW] Flexible rendering API -- [NEW] Add Context.File() -- [NEW] Add shorcut RunTLS() for http.ListenAndServeTLS -- [FIX] Rename NotFound404() to NoRoute() -- [FIX] Errors in context are purged -- [FIX] Adds HEAD method in Static file serving -- [FIX] Refactors Static() file serving -- [FIX] Using keyed initialization to fix app-engine integration -- [FIX] Can't unmarshal JSON array, #63 -- [FIX] Renaming Context.Req to Context.Request -- [FIX] Check application/x-www-form-urlencoded when parsing form - - -###Gin 0.2b (Jul 08, 2014) -- [PERFORMANCE] Using sync.Pool to allocatio/gc overhead -- [NEW] Travis CI integration -- [NEW] Completely new logger -- [NEW] New API for serving static files. gin.Static() -- [NEW] gin.H() can be serialized into XML -- [NEW] Typed errors. Errors can be typed. Internet/external/custom. -- [NEW] Support for Godeps -- [NEW] Travis/Godocs badges in README -- [NEW] New Bind() and BindWith() methods for parsing request body. -- [NEW] Add Content.Copy() -- [NEW] Add context.LastError() -- [NEW] Add shorcut for OPTIONS HTTP method -- [FIX] Tons of README fixes -- [FIX] Header is written before body -- [FIX] BasicAuth() and changes API a little bit -- [FIX] Recovery() middleware only prints panics -- [FIX] Context.Get() does not panic anymore. Use MustGet() instead. -- [FIX] Multiple http.WriteHeader() in NotFound handlers -- [FIX] Engine.Run() panics if http server can't be setted up -- [FIX] Crash when route path doesn't start with '/' -- [FIX] Do not update header when status code is negative -- [FIX] Setting response headers before calling WriteHeader in context.String() -- [FIX] Add MIT license -- [FIX] Changes behaviour of ErrorLogger() and Logger() diff --git a/vendor/github.com/gin-gonic/gin/LICENSE b/vendor/github.com/gin-gonic/gin/LICENSE deleted file mode 100644 index 1ff7f37..0000000 --- a/vendor/github.com/gin-gonic/gin/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Manuel Martínez-Almeida - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/gin-gonic/gin/README.md b/vendor/github.com/gin-gonic/gin/README.md deleted file mode 100644 index 99a6d0b..0000000 --- a/vendor/github.com/gin-gonic/gin/README.md +++ /dev/null @@ -1,710 +0,0 @@ - -#Gin Web Framework - -[![Build Status](https://travis-ci.org/gin-gonic/gin.svg)](https://travis-ci.org/gin-gonic/gin) -[![Coverage Status](https://coveralls.io/repos/gin-gonic/gin/badge.svg?branch=master)](https://coveralls.io/r/gin-gonic/gin?branch=master) -[![Go Report Card](https://goreportcard.com/badge/github.com/gin-gonic/gin)](https://goreportcard.com/report/github.com/gin-gonic/gin) -[![GoDoc](https://godoc.org/github.com/gin-gonic/gin?status.svg)](https://godoc.org/github.com/gin-gonic/gin) -[![Join the chat at https://gitter.im/gin-gonic/gin](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/gin-gonic/gin?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) - -Gin is a web framework written in Go (Golang). It features a martini-like API with much better performance, up to 40 times faster thanks to [httprouter](https://github.com/julienschmidt/httprouter). If you need performance and good productivity, you will love Gin. - - - -![Gin console logger](https://gin-gonic.github.io/gin/other/console.png) - -```sh -$ cat test.go -``` -```go -package main - -import "github.com/gin-gonic/gin" - -func main() { - r := gin.Default() - r.GET("/ping", func(c *gin.Context) { - c.JSON(200, gin.H{ - "message": "pong", - }) - }) - r.Run() // listen and server on 0.0.0.0:8080 -} -``` - -## Benchmarks - -Gin uses a custom version of [HttpRouter](https://github.com/julienschmidt/httprouter) - -[See all benchmarks](/BENCHMARKS.md) - - -Benchmark name | (1) | (2) | (3) | (4) ---------------------------------|----------:|----------:|----------:|------: -BenchmarkAce_GithubAll | 10000 | 109482 | 13792 | 167 -BenchmarkBear_GithubAll | 10000 | 287490 | 79952 | 943 -BenchmarkBeego_GithubAll | 3000 | 562184 | 146272 | 2092 -BenchmarkBone_GithubAll | 500 | 2578716 | 648016 | 8119 -BenchmarkDenco_GithubAll | 20000 | 94955 | 20224 | 167 -BenchmarkEcho_GithubAll | 30000 | 58705 | 0 | 0 -**BenchmarkGin_GithubAll** | **30000** | **50991** | **0** | **0** -BenchmarkGocraftWeb_GithubAll | 5000 | 449648 | 133280 | 1889 -BenchmarkGoji_GithubAll | 2000 | 689748 | 56113 | 334 -BenchmarkGoJsonRest_GithubAll | 5000 | 537769 | 135995 | 2940 -BenchmarkGoRestful_GithubAll | 100 | 18410628 | 797236 | 7725 -BenchmarkGorillaMux_GithubAll | 200 | 8036360 | 153137 | 1791 -BenchmarkHttpRouter_GithubAll | 20000 | 63506 | 13792 | 167 -BenchmarkHttpTreeMux_GithubAll | 10000 | 165927 | 56112 | 334 -BenchmarkKocha_GithubAll | 10000 | 171362 | 23304 | 843 -BenchmarkMacaron_GithubAll | 2000 | 817008 | 224960 | 2315 -BenchmarkMartini_GithubAll | 100 | 12609209 | 237952 | 2686 -BenchmarkPat_GithubAll | 300 | 4830398 | 1504101 | 32222 -BenchmarkPossum_GithubAll | 10000 | 301716 | 97440 | 812 -BenchmarkR2router_GithubAll | 10000 | 270691 | 77328 | 1182 -BenchmarkRevel_GithubAll | 1000 | 1491919 | 345553 | 5918 -BenchmarkRivet_GithubAll | 10000 | 283860 | 84272 | 1079 -BenchmarkTango_GithubAll | 5000 | 473821 | 87078 | 2470 -BenchmarkTigerTonic_GithubAll | 2000 | 1120131 | 241088 | 6052 -BenchmarkTraffic_GithubAll | 200 | 8708979 | 2664762 | 22390 -BenchmarkVulcan_GithubAll | 5000 | 353392 | 19894 | 609 -BenchmarkZeus_GithubAll | 2000 | 944234 | 300688 | 2648 - -(1): Total Repetitions -(2): Single Repetition Duration (ns/op) -(3): Heap Memory (B/op) -(4): Average Allocations per Repetition (allocs/op) - -##Gin v1. stable - -- [x] Zero allocation router. -- [x] Still the fastest http router and framework. From routing to writing. -- [x] Complete suite of unit tests -- [x] Battle tested -- [x] API frozen, new releases will not break your code. - - -## Start using it -1. Download and install it: - - ```sh - $ go get github.com/gin-gonic/gin - ``` - -2. Import it in your code: - - ```go - import "github.com/gin-gonic/gin" - ``` - -3. (Optional) Import `net/http`. This is required for example if using constants such as `http.StatusOK`. - - ```go - import "net/http" - ``` - -##API Examples - -#### Using GET, POST, PUT, PATCH, DELETE and OPTIONS - -```go -func main() { - // Creates a gin router with default middleware: - // logger and recovery (crash-free) middleware - router := gin.Default() - - router.GET("/someGet", getting) - router.POST("/somePost", posting) - router.PUT("/somePut", putting) - router.DELETE("/someDelete", deleting) - router.PATCH("/somePatch", patching) - router.HEAD("/someHead", head) - router.OPTIONS("/someOptions", options) - - // By default it serves on :8080 unless a - // PORT environment variable was defined. - router.Run() - // router.Run(":3000") for a hard coded port -} -``` - -#### Parameters in path - -```go -func main() { - router := gin.Default() - - // This handler will match /user/john but will not match neither /user/ or /user - router.GET("/user/:name", func(c *gin.Context) { - name := c.Param("name") - c.String(http.StatusOK, "Hello %s", name) - }) - - // However, this one will match /user/john/ and also /user/john/send - // If no other routers match /user/john, it will redirect to /user/john/ - router.GET("/user/:name/*action", func(c *gin.Context) { - name := c.Param("name") - action := c.Param("action") - message := name + " is " + action - c.String(http.StatusOK, message) - }) - - router.Run(":8080") -} -``` - -#### Querystring parameters -```go -func main() { - router := gin.Default() - - // Query string parameters are parsed using the existing underlying request object. - // The request responds to a url matching: /welcome?firstname=Jane&lastname=Doe - router.GET("/welcome", func(c *gin.Context) { - firstname := c.DefaultQuery("firstname", "Guest") - lastname := c.Query("lastname") // shortcut for c.Request.URL.Query().Get("lastname") - - c.String(http.StatusOK, "Hello %s %s", firstname, lastname) - }) - router.Run(":8080") -} -``` - -### Multipart/Urlencoded Form - -```go -func main() { - router := gin.Default() - - router.POST("/form_post", func(c *gin.Context) { - message := c.PostForm("message") - nick := c.DefaultPostForm("nick", "anonymous") - - c.JSON(200, gin.H{ - "status": "posted", - "message": message, - "nick": nick, - }) - }) - router.Run(":8080") -} -``` - -### Another example: query + post form - -``` -POST /post?id=1234&page=1 HTTP/1.1 -Content-Type: application/x-www-form-urlencoded - -name=manu&message=this_is_great -``` - -```go -func main() { - router := gin.Default() - - router.POST("/post", func(c *gin.Context) { - - id := c.Query("id") - page := c.DefaultQuery("page", "0") - name := c.PostForm("name") - message := c.PostForm("message") - - fmt.Printf("id: %s; page: %s; name: %s; message: %s", id, page, name, message) - }) - router.Run(":8080") -} -``` - -``` -id: 1234; page: 1; name: manu; message: this_is_great -``` - -### Another example: upload file - -References issue [#548](https://github.com/gin-gonic/gin/issues/548). - -```go -func main() { - router := gin.Default() - - router.POST("/upload", func(c *gin.Context) { - - file, header , err := c.Request.FormFile("upload") - filename := header.Filename - fmt.Println(header.Filename) - out, err := os.Create("./tmp/"+filename+".png") - if err != nil { - log.Fatal(err) - } - defer out.Close() - _, err = io.Copy(out, file) - if err != nil { - log.Fatal(err) - } - }) - router.Run(":8080") -} -``` - -#### Grouping routes -```go -func main() { - router := gin.Default() - - // Simple group: v1 - v1 := router.Group("/v1") - { - v1.POST("/login", loginEndpoint) - v1.POST("/submit", submitEndpoint) - v1.POST("/read", readEndpoint) - } - - // Simple group: v2 - v2 := router.Group("/v2") - { - v2.POST("/login", loginEndpoint) - v2.POST("/submit", submitEndpoint) - v2.POST("/read", readEndpoint) - } - - router.Run(":8080") -} -``` - - -#### Blank Gin without middleware by default - -Use - -```go -r := gin.New() -``` -instead of - -```go -r := gin.Default() -``` - - -#### Using middleware -```go -func main() { - // Creates a router without any middleware by default - r := gin.New() - - // Global middleware - r.Use(gin.Logger()) - r.Use(gin.Recovery()) - - // Per route middleware, you can add as many as you desire. - r.GET("/benchmark", MyBenchLogger(), benchEndpoint) - - // Authorization group - // authorized := r.Group("/", AuthRequired()) - // exactly the same as: - authorized := r.Group("/") - // per group middleware! in this case we use the custom created - // AuthRequired() middleware just in the "authorized" group. - authorized.Use(AuthRequired()) - { - authorized.POST("/login", loginEndpoint) - authorized.POST("/submit", submitEndpoint) - authorized.POST("/read", readEndpoint) - - // nested group - testing := authorized.Group("testing") - testing.GET("/analytics", analyticsEndpoint) - } - - // Listen and server on 0.0.0.0:8080 - r.Run(":8080") -} -``` - -#### Model binding and validation - -To bind a request body into a type, use model binding. We currently support binding of JSON, XML and standard form values (foo=bar&boo=baz). - -Note that you need to set the corresponding binding tag on all fields you want to bind. For example, when binding from JSON, set `json:"fieldname"`. - -When using the Bind-method, Gin tries to infer the binder depending on the Content-Type header. If you are sure what you are binding, you can use BindWith. - -You can also specify that specific fields are required. If a field is decorated with `binding:"required"` and has a empty value when binding, the current request will fail with an error. - -```go -// Binding from JSON -type Login struct { - User string `form:"user" json:"user" binding:"required"` - Password string `form:"password" json:"password" binding:"required"` -} - -func main() { - router := gin.Default() - - // Example for binding JSON ({"user": "manu", "password": "123"}) - router.POST("/loginJSON", func(c *gin.Context) { - var json Login - if c.BindJSON(&json) == nil { - if json.User == "manu" && json.Password == "123" { - c.JSON(http.StatusOK, gin.H{"status": "you are logged in"}) - } else { - c.JSON(http.StatusUnauthorized, gin.H{"status": "unauthorized"}) - } - } - }) - - // Example for binding a HTML form (user=manu&password=123) - router.POST("/loginForm", func(c *gin.Context) { - var form Login - // This will infer what binder to use depending on the content-type header. - if c.Bind(&form) == nil { - if form.User == "manu" && form.Password == "123" { - c.JSON(http.StatusOK, gin.H{"status": "you are logged in"}) - } else { - c.JSON(http.StatusUnauthorized, gin.H{"status": "unauthorized"}) - } - } - }) - - // Listen and server on 0.0.0.0:8080 - router.Run(":8080") -} -``` - - -###Multipart/Urlencoded binding -```go -package main - -import ( - "github.com/gin-gonic/gin" - "github.com/gin-gonic/gin/binding" -) - -type LoginForm struct { - User string `form:"user" binding:"required"` - Password string `form:"password" binding:"required"` -} - -func main() { - router := gin.Default() - router.POST("/login", func(c *gin.Context) { - // you can bind multipart form with explicit binding declaration: - // c.BindWith(&form, binding.Form) - // or you can simply use autobinding with Bind method: - var form LoginForm - // in this case proper binding will be automatically selected - if c.Bind(&form) == nil { - if form.User == "user" && form.Password == "password" { - c.JSON(200, gin.H{"status": "you are logged in"}) - } else { - c.JSON(401, gin.H{"status": "unauthorized"}) - } - } - }) - router.Run(":8080") -} -``` - -Test it with: -```sh -$ curl -v --form user=user --form password=password http://localhost:8080/login -``` - - -#### XML and JSON rendering - -```go -func main() { - r := gin.Default() - - // gin.H is a shortcut for map[string]interface{} - r.GET("/someJSON", func(c *gin.Context) { - c.JSON(http.StatusOK, gin.H{"message": "hey", "status": http.StatusOK}) - }) - - r.GET("/moreJSON", func(c *gin.Context) { - // You also can use a struct - var msg struct { - Name string `json:"user"` - Message string - Number int - } - msg.Name = "Lena" - msg.Message = "hey" - msg.Number = 123 - // Note that msg.Name becomes "user" in the JSON - // Will output : {"user": "Lena", "Message": "hey", "Number": 123} - c.JSON(http.StatusOK, msg) - }) - - r.GET("/someXML", func(c *gin.Context) { - c.XML(http.StatusOK, gin.H{"message": "hey", "status": http.StatusOK}) - }) - - // Listen and server on 0.0.0.0:8080 - r.Run(":8080") -} -``` - -####Serving static files - -```go -func main() { - router := gin.Default() - router.Static("/assets", "./assets") - router.StaticFS("/more_static", http.Dir("my_file_system")) - router.StaticFile("/favicon.ico", "./resources/favicon.ico") - - // Listen and server on 0.0.0.0:8080 - router.Run(":8080") -} -``` - -####HTML rendering - -Using LoadHTMLTemplates() - -```go -func main() { - router := gin.Default() - router.LoadHTMLGlob("templates/*") - //router.LoadHTMLFiles("templates/template1.html", "templates/template2.html") - router.GET("/index", func(c *gin.Context) { - c.HTML(http.StatusOK, "index.tmpl", gin.H{ - "title": "Main website", - }) - }) - router.Run(":8080") -} -``` -templates/index.tmpl -```html - -

- {{ .title }} -

- -``` - -Using templates with same name in different directories - -```go -func main() { - router := gin.Default() - router.LoadHTMLGlob("templates/**/*") - router.GET("/posts/index", func(c *gin.Context) { - c.HTML(http.StatusOK, "posts/index.tmpl", gin.H{ - "title": "Posts", - }) - }) - router.GET("/users/index", func(c *gin.Context) { - c.HTML(http.StatusOK, "users/index.tmpl", gin.H{ - "title": "Users", - }) - }) - router.Run(":8080") -} -``` -templates/posts/index.tmpl -```html -{{ define "posts/index.tmpl" }} -

- {{ .title }} -

-

Using posts/index.tmpl

- -{{ end }} -``` -templates/users/index.tmpl -```html -{{ define "users/index.tmpl" }} -

- {{ .title }} -

-

Using users/index.tmpl

- -{{ end }} -``` - -You can also use your own html template render - -```go -import "html/template" - -func main() { - router := gin.Default() - html := template.Must(template.ParseFiles("file1", "file2")) - router.SetHTMLTemplate(html) - router.Run(":8080") -} -``` - - -#### Redirects - -Issuing a HTTP redirect is easy: - -```go -r.GET("/test", func(c *gin.Context) { - c.Redirect(http.StatusMovedPermanently, "http://www.google.com/") -}) -``` -Both internal and external locations are supported. - - -#### Custom Middleware - -```go -func Logger() gin.HandlerFunc { - return func(c *gin.Context) { - t := time.Now() - - // Set example variable - c.Set("example", "12345") - - // before request - - c.Next() - - // after request - latency := time.Since(t) - log.Print(latency) - - // access the status we are sending - status := c.Writer.Status() - log.Println(status) - } -} - -func main() { - r := gin.New() - r.Use(Logger()) - - r.GET("/test", func(c *gin.Context) { - example := c.MustGet("example").(string) - - // it would print: "12345" - log.Println(example) - }) - - // Listen and server on 0.0.0.0:8080 - r.Run(":8080") -} -``` - -#### Using BasicAuth() middleware -```go -// simulate some private data -var secrets = gin.H{ - "foo": gin.H{"email": "foo@bar.com", "phone": "123433"}, - "austin": gin.H{"email": "austin@example.com", "phone": "666"}, - "lena": gin.H{"email": "lena@guapa.com", "phone": "523443"}, -} - -func main() { - r := gin.Default() - - // Group using gin.BasicAuth() middleware - // gin.Accounts is a shortcut for map[string]string - authorized := r.Group("/admin", gin.BasicAuth(gin.Accounts{ - "foo": "bar", - "austin": "1234", - "lena": "hello2", - "manu": "4321", - })) - - // /admin/secrets endpoint - // hit "localhost:8080/admin/secrets - authorized.GET("/secrets", func(c *gin.Context) { - // get user, it was set by the BasicAuth middleware - user := c.MustGet(gin.AuthUserKey).(string) - if secret, ok := secrets[user]; ok { - c.JSON(http.StatusOK, gin.H{"user": user, "secret": secret}) - } else { - c.JSON(http.StatusOK, gin.H{"user": user, "secret": "NO SECRET :("}) - } - }) - - // Listen and server on 0.0.0.0:8080 - r.Run(":8080") -} -``` - - -#### Goroutines inside a middleware -When starting inside a middleware or handler, you **SHOULD NOT** use the original context inside it, you have to use a read-only copy. - -```go -func main() { - r := gin.Default() - - r.GET("/long_async", func(c *gin.Context) { - // create copy to be used inside the goroutine - cCp := c.Copy() - go func() { - // simulate a long task with time.Sleep(). 5 seconds - time.Sleep(5 * time.Second) - - // note that you are using the copied context "cCp", IMPORTANT - log.Println("Done! in path " + cCp.Request.URL.Path) - }() - }) - - r.GET("/long_sync", func(c *gin.Context) { - // simulate a long task with time.Sleep(). 5 seconds - time.Sleep(5 * time.Second) - - // since we are NOT using a goroutine, we do not have to copy the context - log.Println("Done! in path " + c.Request.URL.Path) - }) - - // Listen and server on 0.0.0.0:8080 - r.Run(":8080") -} -``` - -#### Custom HTTP configuration - -Use `http.ListenAndServe()` directly, like this: - -```go -func main() { - router := gin.Default() - http.ListenAndServe(":8080", router) -} -``` -or - -```go -func main() { - router := gin.Default() - - s := &http.Server{ - Addr: ":8080", - Handler: router, - ReadTimeout: 10 * time.Second, - WriteTimeout: 10 * time.Second, - MaxHeaderBytes: 1 << 20, - } - s.ListenAndServe() -} -``` - -#### Graceful restart or stop - -Do you want to graceful restart or stop your web server? -There are some ways this can be done. - -We can use [fvbock/endless](https://github.com/fvbock/endless) to replace the default `ListenAndServe`. Refer issue [#296](https://github.com/gin-gonic/gin/issues/296) for more details. - -```go -router := gin.Default() -router.GET("/", handler) -// [...] -endless.ListenAndServe(":4242", router) -``` - -An alternative to endless: - -* [manners](https://github.com/braintree/manners): A polite Go HTTP server that shuts down gracefully. diff --git a/vendor/github.com/gin-gonic/gin/auth.go b/vendor/github.com/gin-gonic/gin/auth.go deleted file mode 100644 index 125e659..0000000 --- a/vendor/github.com/gin-gonic/gin/auth.go +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright 2014 Manu Martinez-Almeida. All rights reserved. -// Use of this source code is governed by a MIT style -// license that can be found in the LICENSE file. - -package gin - -import ( - "crypto/subtle" - "encoding/base64" - "strconv" -) - -const AuthUserKey = "user" - -type ( - Accounts map[string]string - authPair struct { - Value string - User string - } - authPairs []authPair -) - -func (a authPairs) searchCredential(authValue string) (string, bool) { - if len(authValue) == 0 { - return "", false - } - for _, pair := range a { - if pair.Value == authValue { - return pair.User, true - } - } - return "", false -} - -// BasicAuthForRealm returns a Basic HTTP Authorization middleware. It takes as arguments a map[string]string where -// the key is the user name and the value is the password, as well as the name of the Realm. -// If the realm is empty, "Authorization Required" will be used by default. -// (see http://tools.ietf.org/html/rfc2617#section-1.2) -func BasicAuthForRealm(accounts Accounts, realm string) HandlerFunc { - if realm == "" { - realm = "Authorization Required" - } - realm = "Basic realm=" + strconv.Quote(realm) - pairs := processAccounts(accounts) - return func(c *Context) { - // Search user in the slice of allowed credentials - user, found := pairs.searchCredential(c.Request.Header.Get("Authorization")) - if !found { - // Credentials doesn't match, we return 401 and abort handlers chain. - c.Header("WWW-Authenticate", realm) - c.AbortWithStatus(401) - } else { - // The user credentials was found, set user's id to key AuthUserKey in this context, the userId can be read later using - // c.MustGet(gin.AuthUserKey) - c.Set(AuthUserKey, user) - } - } -} - -// BasicAuth returns a Basic HTTP Authorization middleware. It takes as argument a map[string]string where -// the key is the user name and the value is the password. -func BasicAuth(accounts Accounts) HandlerFunc { - return BasicAuthForRealm(accounts, "") -} - -func processAccounts(accounts Accounts) authPairs { - assert1(len(accounts) > 0, "Empty list of authorized credentials") - pairs := make(authPairs, 0, len(accounts)) - for user, password := range accounts { - assert1(len(user) > 0, "User can not be empty") - value := authorizationHeader(user, password) - pairs = append(pairs, authPair{ - Value: value, - User: user, - }) - } - return pairs -} - -func authorizationHeader(user, password string) string { - base := user + ":" + password - return "Basic " + base64.StdEncoding.EncodeToString([]byte(base)) -} - -func secureCompare(given, actual string) bool { - if subtle.ConstantTimeEq(int32(len(given)), int32(len(actual))) == 1 { - return subtle.ConstantTimeCompare([]byte(given), []byte(actual)) == 1 - } - /* Securely compare actual to itself to keep constant time, but always return false */ - return subtle.ConstantTimeCompare([]byte(actual), []byte(actual)) == 1 && false -} diff --git a/vendor/github.com/gin-gonic/gin/binding/binding.go b/vendor/github.com/gin-gonic/gin/binding/binding.go deleted file mode 100644 index dc7397f..0000000 --- a/vendor/github.com/gin-gonic/gin/binding/binding.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2014 Manu Martinez-Almeida. All rights reserved. -// Use of this source code is governed by a MIT style -// license that can be found in the LICENSE file. - -package binding - -import "net/http" - -const ( - MIMEJSON = "application/json" - MIMEHTML = "text/html" - MIMEXML = "application/xml" - MIMEXML2 = "text/xml" - MIMEPlain = "text/plain" - MIMEPOSTForm = "application/x-www-form-urlencoded" - MIMEMultipartPOSTForm = "multipart/form-data" - MIMEPROTOBUF = "application/x-protobuf" -) - -type Binding interface { - Name() string - Bind(*http.Request, interface{}) error -} - -type StructValidator interface { - // ValidateStruct can receive any kind of type and it should never panic, even if the configuration is not right. - // If the received type is not a struct, any validation should be skipped and nil must be returned. - // If the received type is a struct or pointer to a struct, the validation should be performed. - // If the struct is not valid or the validation itself fails, a descriptive error should be returned. - // Otherwise nil must be returned. - ValidateStruct(interface{}) error -} - -var Validator StructValidator = &defaultValidator{} - -var ( - JSON = jsonBinding{} - XML = xmlBinding{} - Form = formBinding{} - FormPost = formPostBinding{} - FormMultipart = formMultipartBinding{} - ProtoBuf = protobufBinding{} -) - -func Default(method, contentType string) Binding { - if method == "GET" { - return Form - } else { - switch contentType { - case MIMEJSON: - return JSON - case MIMEXML, MIMEXML2: - return XML - case MIMEPROTOBUF: - return ProtoBuf - default: //case MIMEPOSTForm, MIMEMultipartPOSTForm: - return Form - } - } -} - -func validate(obj interface{}) error { - if Validator == nil { - return nil - } - return Validator.ValidateStruct(obj) -} diff --git a/vendor/github.com/gin-gonic/gin/binding/default_validator.go b/vendor/github.com/gin-gonic/gin/binding/default_validator.go deleted file mode 100644 index 760728b..0000000 --- a/vendor/github.com/gin-gonic/gin/binding/default_validator.go +++ /dev/null @@ -1,41 +0,0 @@ -package binding - -import ( - "reflect" - "sync" - - "gopkg.in/go-playground/validator.v8" -) - -type defaultValidator struct { - once sync.Once - validate *validator.Validate -} - -var _ StructValidator = &defaultValidator{} - -func (v *defaultValidator) ValidateStruct(obj interface{}) error { - if kindOfData(obj) == reflect.Struct { - v.lazyinit() - if err := v.validate.Struct(obj); err != nil { - return error(err) - } - } - return nil -} - -func (v *defaultValidator) lazyinit() { - v.once.Do(func() { - config := &validator.Config{TagName: "binding"} - v.validate = validator.New(config) - }) -} - -func kindOfData(data interface{}) reflect.Kind { - value := reflect.ValueOf(data) - valueType := value.Kind() - if valueType == reflect.Ptr { - valueType = value.Elem().Kind() - } - return valueType -} diff --git a/vendor/github.com/gin-gonic/gin/binding/form.go b/vendor/github.com/gin-gonic/gin/binding/form.go deleted file mode 100644 index 557333e..0000000 --- a/vendor/github.com/gin-gonic/gin/binding/form.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2014 Manu Martinez-Almeida. All rights reserved. -// Use of this source code is governed by a MIT style -// license that can be found in the LICENSE file. - -package binding - -import "net/http" - -type formBinding struct{} -type formPostBinding struct{} -type formMultipartBinding struct{} - -func (formBinding) Name() string { - return "form" -} - -func (formBinding) Bind(req *http.Request, obj interface{}) error { - if err := req.ParseForm(); err != nil { - return err - } - req.ParseMultipartForm(32 << 10) // 32 MB - if err := mapForm(obj, req.Form); err != nil { - return err - } - return validate(obj) -} - -func (formPostBinding) Name() string { - return "form-urlencoded" -} - -func (formPostBinding) Bind(req *http.Request, obj interface{}) error { - if err := req.ParseForm(); err != nil { - return err - } - if err := mapForm(obj, req.PostForm); err != nil { - return err - } - return validate(obj) -} - -func (formMultipartBinding) Name() string { - return "multipart/form-data" -} - -func (formMultipartBinding) Bind(req *http.Request, obj interface{}) error { - if err := req.ParseMultipartForm(32 << 10); err != nil { - return err - } - if err := mapForm(obj, req.MultipartForm.Value); err != nil { - return err - } - return validate(obj) -} diff --git a/vendor/github.com/gin-gonic/gin/binding/form_mapping.go b/vendor/github.com/gin-gonic/gin/binding/form_mapping.go deleted file mode 100644 index 07c8375..0000000 --- a/vendor/github.com/gin-gonic/gin/binding/form_mapping.go +++ /dev/null @@ -1,150 +0,0 @@ -// Copyright 2014 Manu Martinez-Almeida. All rights reserved. -// Use of this source code is governed by a MIT style -// license that can be found in the LICENSE file. - -package binding - -import ( - "errors" - "reflect" - "strconv" -) - -func mapForm(ptr interface{}, form map[string][]string) error { - typ := reflect.TypeOf(ptr).Elem() - val := reflect.ValueOf(ptr).Elem() - for i := 0; i < typ.NumField(); i++ { - typeField := typ.Field(i) - structField := val.Field(i) - if !structField.CanSet() { - continue - } - - structFieldKind := structField.Kind() - inputFieldName := typeField.Tag.Get("form") - if inputFieldName == "" { - inputFieldName = typeField.Name - - // if "form" tag is nil, we inspect if the field is a struct. - // this would not make sense for JSON parsing but it does for a form - // since data is flatten - if structFieldKind == reflect.Struct { - err := mapForm(structField.Addr().Interface(), form) - if err != nil { - return err - } - continue - } - } - inputValue, exists := form[inputFieldName] - if !exists { - continue - } - - numElems := len(inputValue) - if structFieldKind == reflect.Slice && numElems > 0 { - sliceOf := structField.Type().Elem().Kind() - slice := reflect.MakeSlice(structField.Type(), numElems, numElems) - for i := 0; i < numElems; i++ { - if err := setWithProperType(sliceOf, inputValue[i], slice.Index(i)); err != nil { - return err - } - } - val.Field(i).Set(slice) - } else { - if err := setWithProperType(typeField.Type.Kind(), inputValue[0], structField); err != nil { - return err - } - } - } - return nil -} - -func setWithProperType(valueKind reflect.Kind, val string, structField reflect.Value) error { - switch valueKind { - case reflect.Int: - return setIntField(val, 0, structField) - case reflect.Int8: - return setIntField(val, 8, structField) - case reflect.Int16: - return setIntField(val, 16, structField) - case reflect.Int32: - return setIntField(val, 32, structField) - case reflect.Int64: - return setIntField(val, 64, structField) - case reflect.Uint: - return setUintField(val, 0, structField) - case reflect.Uint8: - return setUintField(val, 8, structField) - case reflect.Uint16: - return setUintField(val, 16, structField) - case reflect.Uint32: - return setUintField(val, 32, structField) - case reflect.Uint64: - return setUintField(val, 64, structField) - case reflect.Bool: - return setBoolField(val, structField) - case reflect.Float32: - return setFloatField(val, 32, structField) - case reflect.Float64: - return setFloatField(val, 64, structField) - case reflect.String: - structField.SetString(val) - default: - return errors.New("Unknown type") - } - return nil -} - -func setIntField(val string, bitSize int, field reflect.Value) error { - if val == "" { - val = "0" - } - intVal, err := strconv.ParseInt(val, 10, bitSize) - if err == nil { - field.SetInt(intVal) - } - return err -} - -func setUintField(val string, bitSize int, field reflect.Value) error { - if val == "" { - val = "0" - } - uintVal, err := strconv.ParseUint(val, 10, bitSize) - if err == nil { - field.SetUint(uintVal) - } - return err -} - -func setBoolField(val string, field reflect.Value) error { - if val == "" { - val = "false" - } - boolVal, err := strconv.ParseBool(val) - if err == nil { - field.SetBool(boolVal) - } - return nil -} - -func setFloatField(val string, bitSize int, field reflect.Value) error { - if val == "" { - val = "0.0" - } - floatVal, err := strconv.ParseFloat(val, bitSize) - if err == nil { - field.SetFloat(floatVal) - } - return err -} - -// Don't pass in pointers to bind to. Can lead to bugs. See: -// https://github.com/codegangsta/martini-contrib/issues/40 -// https://github.com/codegangsta/martini-contrib/pull/34#issuecomment-29683659 -func ensureNotPointer(obj interface{}) { - if reflect.TypeOf(obj).Kind() == reflect.Ptr { - panic("Pointers are not accepted as binding models") - } -} diff --git a/vendor/github.com/gin-gonic/gin/binding/json.go b/vendor/github.com/gin-gonic/gin/binding/json.go deleted file mode 100644 index 6e53244..0000000 --- a/vendor/github.com/gin-gonic/gin/binding/json.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2014 Manu Martinez-Almeida. All rights reserved. -// Use of this source code is governed by a MIT style -// license that can be found in the LICENSE file. - -package binding - -import ( - "encoding/json" - - "net/http" -) - -type jsonBinding struct{} - -func (jsonBinding) Name() string { - return "json" -} - -func (jsonBinding) Bind(req *http.Request, obj interface{}) error { - decoder := json.NewDecoder(req.Body) - if err := decoder.Decode(obj); err != nil { - return err - } - return validate(obj) -} diff --git a/vendor/github.com/gin-gonic/gin/binding/protobuf.go b/vendor/github.com/gin-gonic/gin/binding/protobuf.go deleted file mode 100644 index 9f95622..0000000 --- a/vendor/github.com/gin-gonic/gin/binding/protobuf.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2014 Manu Martinez-Almeida. All rights reserved. -// Use of this source code is governed by a MIT style -// license that can be found in the LICENSE file. - -package binding - -import ( - "github.com/golang/protobuf/proto" - - "io/ioutil" - "net/http" -) - -type protobufBinding struct{} - -func (protobufBinding) Name() string { - return "protobuf" -} - -func (protobufBinding) Bind(req *http.Request, obj interface{}) error { - - buf, err := ioutil.ReadAll(req.Body) - if err != nil { - return err - } - - if err = proto.Unmarshal(buf, obj.(proto.Message)); err != nil { - return err - } - - //Here it's same to return validate(obj), but util now we cann't add `binding:""` to the struct - //which automatically generate by gen-proto - return nil - //return validate(obj) -} diff --git a/vendor/github.com/gin-gonic/gin/binding/xml.go b/vendor/github.com/gin-gonic/gin/binding/xml.go deleted file mode 100644 index f84a6b7..0000000 --- a/vendor/github.com/gin-gonic/gin/binding/xml.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2014 Manu Martinez-Almeida. All rights reserved. -// Use of this source code is governed by a MIT style -// license that can be found in the LICENSE file. - -package binding - -import ( - "encoding/xml" - "net/http" -) - -type xmlBinding struct{} - -func (xmlBinding) Name() string { - return "xml" -} - -func (xmlBinding) Bind(req *http.Request, obj interface{}) error { - decoder := xml.NewDecoder(req.Body) - if err := decoder.Decode(obj); err != nil { - return err - } - return validate(obj) -} diff --git a/vendor/github.com/gin-gonic/gin/context.go b/vendor/github.com/gin-gonic/gin/context.go deleted file mode 100644 index 5d3b6a4..0000000 --- a/vendor/github.com/gin-gonic/gin/context.go +++ /dev/null @@ -1,568 +0,0 @@ -// Copyright 2014 Manu Martinez-Almeida. All rights reserved. -// Use of this source code is governed by a MIT style -// license that can be found in the LICENSE file. - -package gin - -import ( - "errors" - "io" - "math" - "net" - "net/http" - "net/url" - "strings" - "time" - - "github.com/gin-gonic/gin/binding" - "github.com/gin-gonic/gin/render" - "github.com/manucorporat/sse" - "golang.org/x/net/context" -) - -// Content-Type MIME of the most common data formats -const ( - MIMEJSON = binding.MIMEJSON - MIMEHTML = binding.MIMEHTML - MIMEXML = binding.MIMEXML - MIMEXML2 = binding.MIMEXML2 - MIMEPlain = binding.MIMEPlain - MIMEPOSTForm = binding.MIMEPOSTForm - MIMEMultipartPOSTForm = binding.MIMEMultipartPOSTForm -) - -const abortIndex int8 = math.MaxInt8 / 2 - -// Context is the most important part of gin. It allows us to pass variables between middleware, -// manage the flow, validate the JSON of a request and render a JSON response for example. -type Context struct { - writermem responseWriter - Request *http.Request - Writer ResponseWriter - - Params Params - handlers HandlersChain - index int8 - - engine *Engine - Keys map[string]interface{} - Errors errorMsgs - Accepted []string -} - -var _ context.Context = &Context{} - -/************************************/ -/********** CONTEXT CREATION ********/ -/************************************/ - -func (c *Context) reset() { - c.Writer = &c.writermem - c.Params = c.Params[0:0] - c.handlers = nil - c.index = -1 - c.Keys = nil - c.Errors = c.Errors[0:0] - c.Accepted = nil -} - -// Copy returns a copy of the current context that can be safely used outside the request's scope. -// This have to be used then the context has to be passed to a goroutine. -func (c *Context) Copy() *Context { - var cp = *c - cp.writermem.ResponseWriter = nil - cp.Writer = &cp.writermem - cp.index = abortIndex - cp.handlers = nil - return &cp -} - -// HandlerName returns the main handler's name. For example if the handler is "handleGetUsers()", this -// function will return "main.handleGetUsers" -func (c *Context) HandlerName() string { - return nameOfFunction(c.handlers.Last()) -} - -/************************************/ -/*********** FLOW CONTROL ***********/ -/************************************/ - -// Next should be used only inside middleware. -// It executes the pending handlers in the chain inside the calling handler. -// See example in github. -func (c *Context) Next() { - c.index++ - s := int8(len(c.handlers)) - for ; c.index < s; c.index++ { - c.handlers[c.index](c) - } -} - -// IsAborted returns true if the current context was aborted. -func (c *Context) IsAborted() bool { - return c.index >= abortIndex -} - -// Abort prevents pending handlers from being called. Note that this will not stop the current handler. -// Let's say you have an authorization middleware that validates that the current request is authorized. If the -// authorization fails (ex: the password does not match), call Abort to ensure the remaining handlers -// for this request are not called. -func (c *Context) Abort() { - c.index = abortIndex -} - -// AbortWithStatus calls `Abort()` and writes the headers with the specified status code. -// For example, a failed attempt to authentificate a request could use: context.AbortWithStatus(401). -func (c *Context) AbortWithStatus(code int) { - c.Status(code) - c.Writer.WriteHeaderNow() - c.Abort() -} - -// AbortWithError calls `AbortWithStatus()` and `Error()` internally. This method stops the chain, writes the status code and -// pushes the specified error to `c.Errors`. -// See Context.Error() for more details. -func (c *Context) AbortWithError(code int, err error) *Error { - c.AbortWithStatus(code) - return c.Error(err) -} - -/************************************/ -/********* ERROR MANAGEMENT *********/ -/************************************/ - -// Attaches an error to the current context. The error is pushed to a list of errors. -// It's a good idea to call Error for each error that occurred during the resolution of a request. -// A middleware can be used to collect all the errors -// and push them to a database together, print a log, or append it in the HTTP response. -func (c *Context) Error(err error) *Error { - var parsedError *Error - switch err.(type) { - case *Error: - parsedError = err.(*Error) - default: - parsedError = &Error{ - Err: err, - Type: ErrorTypePrivate, - } - } - c.Errors = append(c.Errors, parsedError) - return parsedError -} - -/************************************/ -/******** METADATA MANAGEMENT********/ -/************************************/ - -// Set is used to store a new key/value pair exclusivelly for this context. -// It also lazy initializes c.Keys if it was not used previously. -func (c *Context) Set(key string, value interface{}) { - if c.Keys == nil { - c.Keys = make(map[string]interface{}) - } - c.Keys[key] = value -} - -// Get returns the value for the given key, ie: (value, true). -// If the value does not exists it returns (nil, false) -func (c *Context) Get(key string) (value interface{}, exists bool) { - if c.Keys != nil { - value, exists = c.Keys[key] - } - return -} - -// MustGet returns the value for the given key if it exists, otherwise it panics. -func (c *Context) MustGet(key string) interface{} { - if value, exists := c.Get(key); exists { - return value - } - panic("Key \"" + key + "\" does not exist") -} - -/************************************/ -/************ INPUT DATA ************/ -/************************************/ - -// Param returns the value of the URL param. -// It is a shortcut for c.Params.ByName(key) -// router.GET("/user/:id", func(c *gin.Context) { -// // a GET request to /user/john -// id := c.Param("id") // id == "john" -// }) -func (c *Context) Param(key string) string { - return c.Params.ByName(key) -} - -// Query returns the keyed url query value if it exists, -// othewise it returns an empty string `("")`. -// It is shortcut for `c.Request.URL.Query().Get(key)` -// GET /path?id=1234&name=Manu&value= -// c.Query("id") == "1234" -// c.Query("name") == "Manu" -// c.Query("value") == "" -// c.Query("wtf") == "" -func (c *Context) Query(key string) string { - value, _ := c.GetQuery(key) - return value -} - -// DefaultQuery returns the keyed url query value if it exists, -// othewise it returns the specified defaultValue string. -// See: Query() and GetQuery() for further information. -// GET /?name=Manu&lastname= -// c.DefaultQuery("name", "unknown") == "Manu" -// c.DefaultQuery("id", "none") == "none" -// c.DefaultQuery("lastname", "none") == "" -func (c *Context) DefaultQuery(key, defaultValue string) string { - if value, ok := c.GetQuery(key); ok { - return value - } - return defaultValue -} - -// GetQuery is like Query(), it returns the keyed url query value -// if it exists `(value, true)` (even when the value is an empty string), -// othewise it returns `("", false)`. -// It is shortcut for `c.Request.URL.Query().Get(key)` -// GET /?name=Manu&lastname= -// ("Manu", true) == c.GetQuery("name") -// ("", false) == c.GetQuery("id") -// ("", true) == c.GetQuery("lastname") -func (c *Context) GetQuery(key string) (string, bool) { - req := c.Request - if values, ok := req.URL.Query()[key]; ok && len(values) > 0 { - return values[0], true - } - return "", false -} - -// PostForm returns the specified key from a POST urlencoded form or multipart form -// when it exists, otherwise it returns an empty string `("")`. -func (c *Context) PostForm(key string) string { - value, _ := c.GetPostForm(key) - return value -} - -// DefaultPostForm returns the specified key from a POST urlencoded form or multipart form -// when it exists, otherwise it returns the specified defaultValue string. -// See: PostForm() and GetPostForm() for further information. -func (c *Context) DefaultPostForm(key, defaultValue string) string { - if value, ok := c.GetPostForm(key); ok { - return value - } - return defaultValue -} - -// GetPostForm is like PostForm(key). It returns the specified key from a POST urlencoded -// form or multipart form when it exists `(value, true)` (even when the value is an empty string), -// otherwise it returns ("", false). -// For example, during a PATCH request to update the user's email: -// email=mail@example.com --> ("mail@example.com", true) := GetPostForm("email") // set email to "mail@example.com" -// email= --> ("", true) := GetPostForm("email") // set email to "" -// --> ("", false) := GetPostForm("email") // do nothing with email -func (c *Context) GetPostForm(key string) (string, bool) { - req := c.Request - req.ParseMultipartForm(32 << 20) // 32 MB - if values := req.PostForm[key]; len(values) > 0 { - return values[0], true - } - if req.MultipartForm != nil && req.MultipartForm.File != nil { - if values := req.MultipartForm.Value[key]; len(values) > 0 { - return values[0], true - } - } - return "", false -} - -// Bind checks the Content-Type to select a binding engine automatically, -// Depending the "Content-Type" header different bindings are used: -// "application/json" --> JSON binding -// "application/xml" --> XML binding -// otherwise --> returns an error -// It parses the request's body as JSON if Content-Type == "application/json" using JSON or XML as a JSON input. -// It decodes the json payload into the struct specified as a pointer. -// Like ParseBody() but this method also writes a 400 error if the json is not valid. -func (c *Context) Bind(obj interface{}) error { - b := binding.Default(c.Request.Method, c.ContentType()) - return c.BindWith(obj, b) -} - -// BindJSON is a shortcut for c.BindWith(obj, binding.JSON) -func (c *Context) BindJSON(obj interface{}) error { - return c.BindWith(obj, binding.JSON) -} - -// BindWith binds the passed struct pointer using the specified binding engine. -// See the binding package. -func (c *Context) BindWith(obj interface{}, b binding.Binding) error { - if err := b.Bind(c.Request, obj); err != nil { - c.AbortWithError(400, err).SetType(ErrorTypeBind) - return err - } - return nil -} - -// ClientIP implements a best effort algorithm to return the real client IP, it parses -// X-Real-IP and X-Forwarded-For in order to work properly with reverse-proxies such us: nginx or haproxy. -func (c *Context) ClientIP() string { - if c.engine.ForwardedByClientIP { - clientIP := strings.TrimSpace(c.requestHeader("X-Real-Ip")) - if len(clientIP) > 0 { - return clientIP - } - clientIP = c.requestHeader("X-Forwarded-For") - if index := strings.IndexByte(clientIP, ','); index >= 0 { - clientIP = clientIP[0:index] - } - clientIP = strings.TrimSpace(clientIP) - if len(clientIP) > 0 { - return clientIP - } - } - if ip, _, err := net.SplitHostPort(strings.TrimSpace(c.Request.RemoteAddr)); err == nil { - return ip - } - return "" -} - -// ContentType returns the Content-Type header of the request. -func (c *Context) ContentType() string { - return filterFlags(c.requestHeader("Content-Type")) -} - -func (c *Context) requestHeader(key string) string { - if values, _ := c.Request.Header[key]; len(values) > 0 { - return values[0] - } - return "" -} - -/************************************/ -/******** RESPONSE RENDERING ********/ -/************************************/ - -func (c *Context) Status(code int) { - c.writermem.WriteHeader(code) -} - -// Header is a intelligent shortcut for c.Writer.Header().Set(key, value) -// It writes a header in the response. -// If value == "", this method removes the header `c.Writer.Header().Del(key)` -func (c *Context) Header(key, value string) { - if len(value) == 0 { - c.Writer.Header().Del(key) - } else { - c.Writer.Header().Set(key, value) - } -} - -func (c *Context) SetCookie( - name string, - value string, - maxAge int, - path string, - domain string, - secure bool, - httpOnly bool, -) { - if path == "" { - path = "/" - } - http.SetCookie(c.Writer, &http.Cookie{ - Name: name, - Value: url.QueryEscape(value), - MaxAge: maxAge, - Path: path, - Domain: domain, - Secure: secure, - HttpOnly: httpOnly, - }) -} - -func (c *Context) Cookie(name string) (string, error) { - cookie, err := c.Request.Cookie(name) - if err != nil { - return "", err - } - val, _ := url.QueryUnescape(cookie.Value) - return val, nil -} - -func (c *Context) Render(code int, r render.Render) { - c.Status(code) - if err := r.Render(c.Writer); err != nil { - panic(err) - } -} - -// HTML renders the HTTP template specified by its file name. -// It also updates the HTTP code and sets the Content-Type as "text/html". -// See http://golang.org/doc/articles/wiki/ -func (c *Context) HTML(code int, name string, obj interface{}) { - instance := c.engine.HTMLRender.Instance(name, obj) - c.Render(code, instance) -} - -// IndentedJSON serializes the given struct as pretty JSON (indented + endlines) into the response body. -// It also sets the Content-Type as "application/json". -// WARNING: we recommend to use this only for development propuses since printing pretty JSON is -// more CPU and bandwidth consuming. Use Context.JSON() instead. -func (c *Context) IndentedJSON(code int, obj interface{}) { - c.Render(code, render.IndentedJSON{Data: obj}) -} - -// JSON serializes the given struct as JSON into the response body. -// It also sets the Content-Type as "application/json". -func (c *Context) JSON(code int, obj interface{}) { - c.Status(code) - if err := render.WriteJSON(c.Writer, obj); err != nil { - panic(err) - } -} - -// XML serializes the given struct as XML into the response body. -// It also sets the Content-Type as "application/xml". -func (c *Context) XML(code int, obj interface{}) { - c.Render(code, render.XML{Data: obj}) -} - -// YAML serializes the given struct as YAML into the response body. -func (c *Context) YAML(code int, obj interface{}) { - c.Render(code, render.YAML{Data: obj}) -} - -// String writes the given string into the response body. -func (c *Context) String(code int, format string, values ...interface{}) { - c.Status(code) - render.WriteString(c.Writer, format, values) -} - -// Redirect returns a HTTP redirect to the specific location. -func (c *Context) Redirect(code int, location string) { - c.Render(-1, render.Redirect{ - Code: code, - Location: location, - Request: c.Request, - }) -} - -// Data writes some data into the body stream and updates the HTTP code. -func (c *Context) Data(code int, contentType string, data []byte) { - c.Render(code, render.Data{ - ContentType: contentType, - Data: data, - }) -} - -// File writes the specified file into the body stream in a efficient way. -func (c *Context) File(filepath string) { - http.ServeFile(c.Writer, c.Request, filepath) -} - -// SSEvent writes a Server-Sent Event into the body stream. -func (c *Context) SSEvent(name string, message interface{}) { - c.Render(-1, sse.Event{ - Event: name, - Data: message, - }) -} - -func (c *Context) Stream(step func(w io.Writer) bool) { - w := c.Writer - clientGone := w.CloseNotify() - for { - select { - case <-clientGone: - return - default: - keepOpen := step(w) - w.Flush() - if !keepOpen { - return - } - } - } -} - -/************************************/ -/******** CONTENT NEGOTIATION *******/ -/************************************/ - -type Negotiate struct { - Offered []string - HTMLName string - HTMLData interface{} - JSONData interface{} - XMLData interface{} - Data interface{} -} - -func (c *Context) Negotiate(code int, config Negotiate) { - switch c.NegotiateFormat(config.Offered...) { - case binding.MIMEJSON: - data := chooseData(config.JSONData, config.Data) - c.JSON(code, data) - - case binding.MIMEHTML: - data := chooseData(config.HTMLData, config.Data) - c.HTML(code, config.HTMLName, data) - - case binding.MIMEXML: - data := chooseData(config.XMLData, config.Data) - c.XML(code, data) - - default: - c.AbortWithError(http.StatusNotAcceptable, errors.New("the accepted formats are not offered by the server")) - } -} - -func (c *Context) NegotiateFormat(offered ...string) string { - assert1(len(offered) > 0, "you must provide at least one offer") - - if c.Accepted == nil { - c.Accepted = parseAccept(c.requestHeader("Accept")) - } - if len(c.Accepted) == 0 { - return offered[0] - } - for _, accepted := range c.Accepted { - for _, offert := range offered { - if accepted == offert { - return offert - } - } - } - return "" -} - -func (c *Context) SetAccepted(formats ...string) { - c.Accepted = formats -} - -/************************************/ -/***** GOLANG.ORG/X/NET/CONTEXT *****/ -/************************************/ - -func (c *Context) Deadline() (deadline time.Time, ok bool) { - return -} - -func (c *Context) Done() <-chan struct{} { - return nil -} - -func (c *Context) Err() error { - return nil -} - -func (c *Context) Value(key interface{}) interface{} { - if key == 0 { - return c.Request - } - if keyAsString, ok := key.(string); ok { - val, _ := c.Get(keyAsString) - return val - } - return nil -} diff --git a/vendor/github.com/gin-gonic/gin/debug.go b/vendor/github.com/gin-gonic/gin/debug.go deleted file mode 100644 index a121591..0000000 --- a/vendor/github.com/gin-gonic/gin/debug.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2014 Manu Martinez-Almeida. All rights reserved. -// Use of this source code is governed by a MIT style -// license that can be found in the LICENSE file. - -package gin - -import ( - "bytes" - "html/template" - "log" -) - -func init() { - log.SetFlags(0) -} - -// IsDebugging returns true if the framework is running in debug mode. -// Use SetMode(gin.Release) to switch to disable the debug mode. -func IsDebugging() bool { - return ginMode == debugCode -} - -func debugPrintRoute(httpMethod, absolutePath string, handlers HandlersChain) { - if IsDebugging() { - nuHandlers := len(handlers) - handlerName := nameOfFunction(handlers.Last()) - debugPrint("%-6s %-25s --> %s (%d handlers)\n", httpMethod, absolutePath, handlerName, nuHandlers) - } -} - -func debugPrintLoadTemplate(tmpl *template.Template) { - if IsDebugging() { - var buf bytes.Buffer - for _, tmpl := range tmpl.Templates() { - buf.WriteString("\t- ") - buf.WriteString(tmpl.Name()) - buf.WriteString("\n") - } - debugPrint("Loaded HTML Templates (%d): \n%s\n", len(tmpl.Templates()), buf.String()) - } -} - -func debugPrint(format string, values ...interface{}) { - if IsDebugging() { - log.Printf("[GIN-debug] "+format, values...) - } -} - -func debugPrintWARNINGNew() { - debugPrint(`[WARNING] Running in "debug" mode. Switch to "release" mode in production. - - using env: export GIN_MODE=release - - using code: gin.SetMode(gin.ReleaseMode) - -`) -} - -func debugPrintWARNINGSetHTMLTemplate() { - debugPrint(`[WARNING] Since SetHTMLTemplate() is NOT thread-safe. It should only be called -at initialization. ie. before any route is registered or the router is listening in a socket: - - router := gin.Default() - router.SetHTMLTemplate(template) // << good place - -`) -} - -func debugPrintError(err error) { - if err != nil { - debugPrint("[ERROR] %v\n", err) - } -} diff --git a/vendor/github.com/gin-gonic/gin/deprecated.go b/vendor/github.com/gin-gonic/gin/deprecated.go deleted file mode 100644 index 0488a9b..0000000 --- a/vendor/github.com/gin-gonic/gin/deprecated.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2014 Manu Martinez-Almeida. All rights reserved. -// Use of this source code is governed by a MIT style -// license that can be found in the LICENSE file. - -package gin - -import "log" - -func (c *Context) GetCookie(name string) (string, error) { - log.Println("GetCookie() method is deprecated. Use Cookie() instead.") - return c.Cookie(name) -} diff --git a/vendor/github.com/gin-gonic/gin/errors.go b/vendor/github.com/gin-gonic/gin/errors.go deleted file mode 100644 index 7716bfa..0000000 --- a/vendor/github.com/gin-gonic/gin/errors.go +++ /dev/null @@ -1,159 +0,0 @@ -// Copyright 2014 Manu Martinez-Almeida. All rights reserved. -// Use of this source code is governed by a MIT style -// license that can be found in the LICENSE file. - -package gin - -import ( - "bytes" - "encoding/json" - "fmt" - "reflect" -) - -type ErrorType uint64 - -const ( - ErrorTypeBind ErrorType = 1 << 63 // used when c.Bind() fails - ErrorTypeRender ErrorType = 1 << 62 // used when c.Render() fails - ErrorTypePrivate ErrorType = 1 << 0 - ErrorTypePublic ErrorType = 1 << 1 - - ErrorTypeAny ErrorType = 1<<64 - 1 - ErrorTypeNu = 2 -) - -type ( - Error struct { - Err error - Type ErrorType - Meta interface{} - } - - errorMsgs []*Error -) - -var _ error = &Error{} - -func (msg *Error) SetType(flags ErrorType) *Error { - msg.Type = flags - return msg -} - -func (msg *Error) SetMeta(data interface{}) *Error { - msg.Meta = data - return msg -} - -func (msg *Error) JSON() interface{} { - json := H{} - if msg.Meta != nil { - value := reflect.ValueOf(msg.Meta) - switch value.Kind() { - case reflect.Struct: - return msg.Meta - case reflect.Map: - for _, key := range value.MapKeys() { - json[key.String()] = value.MapIndex(key).Interface() - } - default: - json["meta"] = msg.Meta - } - } - if _, ok := json["error"]; !ok { - json["error"] = msg.Error() - } - return json -} - -// MarshalJSON implements the json.Marshaller interface -func (msg *Error) MarshalJSON() ([]byte, error) { - return json.Marshal(msg.JSON()) -} - -// Implements the error interface -func (msg *Error) Error() string { - return msg.Err.Error() -} - -func (msg *Error) IsType(flags ErrorType) bool { - return (msg.Type & flags) > 0 -} - -// Returns a readonly copy filterd the byte. -// ie ByType(gin.ErrorTypePublic) returns a slice of errors with type=ErrorTypePublic -func (a errorMsgs) ByType(typ ErrorType) errorMsgs { - if len(a) == 0 { - return nil - } - if typ == ErrorTypeAny { - return a - } - var result errorMsgs - for _, msg := range a { - if msg.IsType(typ) { - result = append(result, msg) - } - } - return result -} - -// Returns the last error in the slice. It returns nil if the array is empty. -// Shortcut for errors[len(errors)-1] -func (a errorMsgs) Last() *Error { - length := len(a) - if length > 0 { - return a[length-1] - } - return nil -} - -// Returns an array will all the error messages. -// Example: -// c.Error(errors.New("first")) -// c.Error(errors.New("second")) -// c.Error(errors.New("third")) -// c.Errors.Errors() // == []string{"first", "second", "third"} -func (a errorMsgs) Errors() []string { - if len(a) == 0 { - return nil - } - errorStrings := make([]string, len(a)) - for i, err := range a { - errorStrings[i] = err.Error() - } - return errorStrings -} - -func (a errorMsgs) JSON() interface{} { - switch len(a) { - case 0: - return nil - case 1: - return a.Last().JSON() - default: - json := make([]interface{}, len(a)) - for i, err := range a { - json[i] = err.JSON() - } - return json - } -} - -func (a errorMsgs) MarshalJSON() ([]byte, error) { - return json.Marshal(a.JSON()) -} - -func (a errorMsgs) String() string { - if len(a) == 0 { - return "" - } - var buffer bytes.Buffer - for i, msg := range a { - fmt.Fprintf(&buffer, "Error #%02d: %s\n", (i + 1), msg.Err) - if msg.Meta != nil { - fmt.Fprintf(&buffer, " Meta: %v\n", msg.Meta) - } - } - return buffer.String() -} diff --git a/vendor/github.com/gin-gonic/gin/fs.go b/vendor/github.com/gin-gonic/gin/fs.go deleted file mode 100644 index 6af3ded..0000000 --- a/vendor/github.com/gin-gonic/gin/fs.go +++ /dev/null @@ -1,42 +0,0 @@ -package gin - -import ( - "net/http" - "os" -) - -type ( - onlyfilesFS struct { - fs http.FileSystem - } - neuteredReaddirFile struct { - http.File - } -) - -// Dir returns a http.Filesystem that can be used by http.FileServer(). It is used interally -// in router.Static(). -// if listDirectory == true, then it works the same as http.Dir() otherwise it returns -// a filesystem that prevents http.FileServer() to list the directory files. -func Dir(root string, listDirectory bool) http.FileSystem { - fs := http.Dir(root) - if listDirectory { - return fs - } - return &onlyfilesFS{fs} -} - -// Conforms to http.Filesystem -func (fs onlyfilesFS) Open(name string) (http.File, error) { - f, err := fs.fs.Open(name) - if err != nil { - return nil, err - } - return neuteredReaddirFile{f}, nil -} - -// Overrides the http.File default implementation -func (f neuteredReaddirFile) Readdir(count int) ([]os.FileInfo, error) { - // this disables directory listing - return nil, nil -} diff --git a/vendor/github.com/gin-gonic/gin/gin.go b/vendor/github.com/gin-gonic/gin/gin.go deleted file mode 100644 index 60f4ab2..0000000 --- a/vendor/github.com/gin-gonic/gin/gin.go +++ /dev/null @@ -1,370 +0,0 @@ -// Copyright 2014 Manu Martinez-Almeida. All rights reserved. -// Use of this source code is governed by a MIT style -// license that can be found in the LICENSE file. - -package gin - -import ( - "html/template" - "net" - "net/http" - "os" - "sync" - - "github.com/gin-gonic/gin/render" -) - -// Version is Framework's version -const Version = "v1.0rc2" - -var default404Body = []byte("404 page not found") -var default405Body = []byte("405 method not allowed") - -type HandlerFunc func(*Context) -type HandlersChain []HandlerFunc - -// Last returns the last handler in the chain. ie. the last handler is the main own. -func (c HandlersChain) Last() HandlerFunc { - length := len(c) - if length > 0 { - return c[length-1] - } - return nil -} - -type ( - RoutesInfo []RouteInfo - RouteInfo struct { - Method string - Path string - Handler string - } - - // Engine is the framework's instance, it contains the muxer, middleware and configuration settings. - // Create an instance of Engine, by using New() or Default() - Engine struct { - RouterGroup - HTMLRender render.HTMLRender - allNoRoute HandlersChain - allNoMethod HandlersChain - noRoute HandlersChain - noMethod HandlersChain - pool sync.Pool - trees methodTrees - - // Enables automatic redirection if the current route can't be matched but a - // handler for the path with (without) the trailing slash exists. - // For example if /foo/ is requested but a route only exists for /foo, the - // client is redirected to /foo with http status code 301 for GET requests - // and 307 for all other request methods. - RedirectTrailingSlash bool - - // If enabled, the router tries to fix the current request path, if no - // handle is registered for it. - // First superfluous path elements like ../ or // are removed. - // Afterwards the router does a case-insensitive lookup of the cleaned path. - // If a handle can be found for this route, the router makes a redirection - // to the corrected path with status code 301 for GET requests and 307 for - // all other request methods. - // For example /FOO and /..//Foo could be redirected to /foo. - // RedirectTrailingSlash is independent of this option. - RedirectFixedPath bool - - // If enabled, the router checks if another method is allowed for the - // current route, if the current request can not be routed. - // If this is the case, the request is answered with 'Method Not Allowed' - // and HTTP status code 405. - // If no other Method is allowed, the request is delegated to the NotFound - // handler. - HandleMethodNotAllowed bool - ForwardedByClientIP bool - } -) - -var _ IRouter = &Engine{} - -// New returns a new blank Engine instance without any middleware attached. -// By default the configuration is: -// - RedirectTrailingSlash: true -// - RedirectFixedPath: false -// - HandleMethodNotAllowed: false -// - ForwardedByClientIP: true -func New() *Engine { - debugPrintWARNINGNew() - engine := &Engine{ - RouterGroup: RouterGroup{ - Handlers: nil, - basePath: "/", - root: true, - }, - RedirectTrailingSlash: true, - RedirectFixedPath: false, - HandleMethodNotAllowed: false, - ForwardedByClientIP: true, - trees: make(methodTrees, 0, 9), - } - engine.RouterGroup.engine = engine - engine.pool.New = func() interface{} { - return engine.allocateContext() - } - return engine -} - -// Default returns an Engine instance with the Logger and Recovery middleware already attached. -func Default() *Engine { - engine := New() - engine.Use(Logger(), Recovery()) - return engine -} - -func (engine *Engine) allocateContext() *Context { - return &Context{engine: engine} -} - -func (engine *Engine) LoadHTMLGlob(pattern string) { - if IsDebugging() { - debugPrintLoadTemplate(template.Must(template.ParseGlob(pattern))) - engine.HTMLRender = render.HTMLDebug{Glob: pattern} - } else { - templ := template.Must(template.ParseGlob(pattern)) - engine.SetHTMLTemplate(templ) - } -} - -func (engine *Engine) LoadHTMLFiles(files ...string) { - if IsDebugging() { - engine.HTMLRender = render.HTMLDebug{Files: files} - } else { - templ := template.Must(template.ParseFiles(files...)) - engine.SetHTMLTemplate(templ) - } -} - -func (engine *Engine) SetHTMLTemplate(templ *template.Template) { - if len(engine.trees) > 0 { - debugPrintWARNINGSetHTMLTemplate() - } - engine.HTMLRender = render.HTMLProduction{Template: templ} -} - -// NoRoute adds handlers for NoRoute. It return a 404 code by default. -func (engine *Engine) NoRoute(handlers ...HandlerFunc) { - engine.noRoute = handlers - engine.rebuild404Handlers() -} - -// NoMethod sets the handlers called when... TODO -func (engine *Engine) NoMethod(handlers ...HandlerFunc) { - engine.noMethod = handlers - engine.rebuild405Handlers() -} - -// Use attachs a global middleware to the router. ie. the middleware attached though Use() will be -// included in the handlers chain for every single request. Even 404, 405, static files... -// For example, this is the right place for a logger or error management middleware. -func (engine *Engine) Use(middleware ...HandlerFunc) IRoutes { - engine.RouterGroup.Use(middleware...) - engine.rebuild404Handlers() - engine.rebuild405Handlers() - return engine -} - -func (engine *Engine) rebuild404Handlers() { - engine.allNoRoute = engine.combineHandlers(engine.noRoute) -} - -func (engine *Engine) rebuild405Handlers() { - engine.allNoMethod = engine.combineHandlers(engine.noMethod) -} - -func (engine *Engine) addRoute(method, path string, handlers HandlersChain) { - assert1(path[0] == '/', "path must begin with '/'") - assert1(len(method) > 0, "HTTP method can not be empty") - assert1(len(handlers) > 0, "there must be at least one handler") - - debugPrintRoute(method, path, handlers) - root := engine.trees.get(method) - if root == nil { - root = new(node) - engine.trees = append(engine.trees, methodTree{method: method, root: root}) - } - root.addRoute(path, handlers) -} - -// Routes returns a slice of registered routes, including some useful information, such as: -// the http method, path and the handler name. -func (engine *Engine) Routes() (routes RoutesInfo) { - for _, tree := range engine.trees { - routes = iterate("", tree.method, routes, tree.root) - } - return routes -} - -func iterate(path, method string, routes RoutesInfo, root *node) RoutesInfo { - path += root.path - if len(root.handlers) > 0 { - routes = append(routes, RouteInfo{ - Method: method, - Path: path, - Handler: nameOfFunction(root.handlers.Last()), - }) - } - for _, child := range root.children { - routes = iterate(path, method, routes, child) - } - return routes -} - -// Run attaches the router to a http.Server and starts listening and serving HTTP requests. -// It is a shortcut for http.ListenAndServe(addr, router) -// Note: this method will block the calling goroutine indefinitely unless an error happens. -func (engine *Engine) Run(addr ...string) (err error) { - defer func() { debugPrintError(err) }() - - address := resolveAddress(addr) - debugPrint("Listening and serving HTTP on %s\n", address) - err = http.ListenAndServe(address, engine) - return -} - -// RunTLS attaches the router to a http.Server and starts listening and serving HTTPS (secure) requests. -// It is a shortcut for http.ListenAndServeTLS(addr, certFile, keyFile, router) -// Note: this method will block the calling goroutine indefinitely unless an error happens. -func (engine *Engine) RunTLS(addr string, certFile string, keyFile string) (err error) { - debugPrint("Listening and serving HTTPS on %s\n", addr) - defer func() { debugPrintError(err) }() - - err = http.ListenAndServeTLS(addr, certFile, keyFile, engine) - return -} - -// RunUnix attaches the router to a http.Server and starts listening and serving HTTP requests -// through the specified unix socket (ie. a file). -// Note: this method will block the calling goroutine indefinitely unless an error happens. -func (engine *Engine) RunUnix(file string) (err error) { - debugPrint("Listening and serving HTTP on unix:/%s", file) - defer func() { debugPrintError(err) }() - - os.Remove(file) - listener, err := net.Listen("unix", file) - if err != nil { - return - } - defer listener.Close() - err = http.Serve(listener, engine) - return -} - -// Conforms to the http.Handler interface. -func (engine *Engine) ServeHTTP(w http.ResponseWriter, req *http.Request) { - c := engine.pool.Get().(*Context) - c.writermem.reset(w) - c.Request = req - c.reset() - - engine.handleHTTPRequest(c) - - engine.pool.Put(c) -} - -func (engine *Engine) handleHTTPRequest(context *Context) { - httpMethod := context.Request.Method - path := context.Request.URL.Path - - // Find root of the tree for the given HTTP method - t := engine.trees - for i, tl := 0, len(t); i < tl; i++ { - if t[i].method == httpMethod { - root := t[i].root - // Find route in tree - handlers, params, tsr := root.getValue(path, context.Params) - if handlers != nil { - context.handlers = handlers - context.Params = params - context.Next() - context.writermem.WriteHeaderNow() - return - - } else if httpMethod != "CONNECT" && path != "/" { - if tsr && engine.RedirectTrailingSlash { - redirectTrailingSlash(context) - return - } - if engine.RedirectFixedPath && redirectFixedPath(context, root, engine.RedirectFixedPath) { - return - } - } - break - } - } - - // TODO: unit test - if engine.HandleMethodNotAllowed { - for _, tree := range engine.trees { - if tree.method != httpMethod { - if handlers, _, _ := tree.root.getValue(path, nil); handlers != nil { - context.handlers = engine.allNoMethod - serveError(context, 405, default405Body) - return - } - } - } - } - context.handlers = engine.allNoRoute - serveError(context, 404, default404Body) -} - -var mimePlain = []string{MIMEPlain} - -func serveError(c *Context, code int, defaultMessage []byte) { - c.writermem.status = code - c.Next() - if !c.writermem.Written() { - if c.writermem.Status() == code { - c.writermem.Header()["Content-Type"] = mimePlain - c.Writer.Write(defaultMessage) - } else { - c.writermem.WriteHeaderNow() - } - } -} - -func redirectTrailingSlash(c *Context) { - req := c.Request - path := req.URL.Path - code := 301 // Permanent redirect, request with GET method - if req.Method != "GET" { - code = 307 - } - - if len(path) > 1 && path[len(path)-1] == '/' { - req.URL.Path = path[:len(path)-1] - } else { - req.URL.Path = path + "/" - } - debugPrint("redirecting request %d: %s --> %s", code, path, req.URL.String()) - http.Redirect(c.Writer, req, req.URL.String(), code) - c.writermem.WriteHeaderNow() -} - -func redirectFixedPath(c *Context, root *node, trailingSlash bool) bool { - req := c.Request - path := req.URL.Path - - fixedPath, found := root.findCaseInsensitivePath( - cleanPath(path), - trailingSlash, - ) - if found { - code := 301 // Permanent redirect, request with GET method - if req.Method != "GET" { - code = 307 - } - req.URL.Path = string(fixedPath) - debugPrint("redirecting request %d: %s --> %s", code, path, req.URL.String()) - http.Redirect(c.Writer, req, req.URL.String(), code) - c.writermem.WriteHeaderNow() - return true - } - return false -} diff --git a/vendor/github.com/gin-gonic/gin/logger.go b/vendor/github.com/gin-gonic/gin/logger.go deleted file mode 100644 index d56bc62..0000000 --- a/vendor/github.com/gin-gonic/gin/logger.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2014 Manu Martinez-Almeida. All rights reserved. -// Use of this source code is governed by a MIT style -// license that can be found in the LICENSE file. - -package gin - -import ( - "fmt" - "io" - "time" -) - -var ( - green = string([]byte{27, 91, 57, 55, 59, 52, 50, 109}) - white = string([]byte{27, 91, 57, 48, 59, 52, 55, 109}) - yellow = string([]byte{27, 91, 57, 55, 59, 52, 51, 109}) - red = string([]byte{27, 91, 57, 55, 59, 52, 49, 109}) - blue = string([]byte{27, 91, 57, 55, 59, 52, 52, 109}) - magenta = string([]byte{27, 91, 57, 55, 59, 52, 53, 109}) - cyan = string([]byte{27, 91, 57, 55, 59, 52, 54, 109}) - reset = string([]byte{27, 91, 48, 109}) -) - -func ErrorLogger() HandlerFunc { - return ErrorLoggerT(ErrorTypeAny) -} - -func ErrorLoggerT(typ ErrorType) HandlerFunc { - return func(c *Context) { - c.Next() - errors := c.Errors.ByType(typ) - if len(errors) > 0 { - c.JSON(-1, errors) - } - } -} - -// Logger instances a Logger middleware that will write the logs to gin.DefaultWriter -// By default gin.DefaultWriter = os.Stdout -func Logger() HandlerFunc { - return LoggerWithWriter(DefaultWriter) -} - -// LoggerWithWriter instance a Logger middleware with the specified writter buffer. -// Example: os.Stdout, a file opened in write mode, a socket... -func LoggerWithWriter(out io.Writer, notlogged ...string) HandlerFunc { - var skip map[string]struct{} - - if length := len(notlogged); length > 0 { - skip = make(map[string]struct{}, length) - - for _, path := range notlogged { - skip[path] = struct{}{} - } - } - - return func(c *Context) { - // Start timer - start := time.Now() - path := c.Request.URL.Path - - // Process request - c.Next() - - // Log only when path is not being skipped - if _, ok := skip[path]; !ok { - // Stop timer - end := time.Now() - latency := end.Sub(start) - - clientIP := c.ClientIP() - method := c.Request.Method - statusCode := c.Writer.Status() - statusColor := colorForStatus(statusCode) - methodColor := colorForMethod(method) - comment := c.Errors.ByType(ErrorTypePrivate).String() - - fmt.Fprintf(out, "[GIN] %v |%s %3d %s| %13v | %s |%s %s %-7s %s\n%s", - end.Format("2006/01/02 - 15:04:05"), - statusColor, statusCode, reset, - latency, - clientIP, - methodColor, reset, method, - path, - comment, - ) - } - } -} - -func colorForStatus(code int) string { - switch { - case code >= 200 && code < 300: - return green - case code >= 300 && code < 400: - return white - case code >= 400 && code < 500: - return yellow - default: - return red - } -} - -func colorForMethod(method string) string { - switch method { - case "GET": - return blue - case "POST": - return cyan - case "PUT": - return yellow - case "DELETE": - return red - case "PATCH": - return green - case "HEAD": - return magenta - case "OPTIONS": - return white - default: - return reset - } -} diff --git a/vendor/github.com/gin-gonic/gin/logo.jpg b/vendor/github.com/gin-gonic/gin/logo.jpg deleted file mode 100644 index bb51852..0000000 Binary files a/vendor/github.com/gin-gonic/gin/logo.jpg and /dev/null differ diff --git a/vendor/github.com/gin-gonic/gin/mode.go b/vendor/github.com/gin-gonic/gin/mode.go deleted file mode 100644 index fcb8f8f..0000000 --- a/vendor/github.com/gin-gonic/gin/mode.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2014 Manu Martinez-Almeida. All rights reserved. -// Use of this source code is governed by a MIT style -// license that can be found in the LICENSE file. - -package gin - -import ( - "os" - - "github.com/gin-gonic/gin/binding" -) - -const ENV_GIN_MODE = "GIN_MODE" - -const ( - DebugMode string = "debug" - ReleaseMode string = "release" - TestMode string = "test" -) -const ( - debugCode = iota - releaseCode = iota - testCode = iota -) - -// DefaultWriter is the default io.Writer used the Gin for debug output and -// middleware output like Logger() or Recovery(). -// Note that both Logger and Recovery provides custom ways to configure their -// output io.Writer. -// To support coloring in Windows use: -// import "github.com/mattn/go-colorable" -// gin.DefaultWriter = colorable.NewColorableStdout() -var DefaultWriter = os.Stdout -var DefaultErrorWriter = os.Stderr - -var ginMode = debugCode -var modeName = DebugMode - -func init() { - mode := os.Getenv(ENV_GIN_MODE) - if len(mode) == 0 { - SetMode(DebugMode) - } else { - SetMode(mode) - } -} - -func SetMode(value string) { - switch value { - case DebugMode: - ginMode = debugCode - case ReleaseMode: - ginMode = releaseCode - case TestMode: - ginMode = testCode - default: - panic("gin mode unknown: " + value) - } - modeName = value -} - -func DisableBindValidation() { - binding.Validator = nil -} - -func Mode() string { - return modeName -} diff --git a/vendor/github.com/gin-gonic/gin/path.go b/vendor/github.com/gin-gonic/gin/path.go deleted file mode 100644 index 43cdd04..0000000 --- a/vendor/github.com/gin-gonic/gin/path.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2013 Julien Schmidt. All rights reserved. -// Based on the path package, Copyright 2009 The Go Authors. -// Use of this source code is governed by a BSD-style license that can be found -// in the LICENSE file. - -package gin - -// CleanPath is the URL version of path.Clean, it returns a canonical URL path -// for p, eliminating . and .. elements. -// -// The following rules are applied iteratively until no further processing can -// be done: -// 1. Replace multiple slashes with a single slash. -// 2. Eliminate each . path name element (the current directory). -// 3. Eliminate each inner .. path name element (the parent directory) -// along with the non-.. element that precedes it. -// 4. Eliminate .. elements that begin a rooted path: -// that is, replace "/.." by "/" at the beginning of a path. -// -// If the result of this process is an empty string, "/" is returned -func cleanPath(p string) string { - // Turn empty string into "/" - if p == "" { - return "/" - } - - n := len(p) - var buf []byte - - // Invariants: - // reading from path; r is index of next byte to process. - // writing to buf; w is index of next byte to write. - - // path must start with '/' - r := 1 - w := 1 - - if p[0] != '/' { - r = 0 - buf = make([]byte, n+1) - buf[0] = '/' - } - - trailing := n > 2 && p[n-1] == '/' - - // A bit more clunky without a 'lazybuf' like the path package, but the loop - // gets completely inlined (bufApp). So in contrast to the path package this - // loop has no expensive function calls (except 1x make) - - for r < n { - switch { - case p[r] == '/': - // empty path element, trailing slash is added after the end - r++ - - case p[r] == '.' && r+1 == n: - trailing = true - r++ - - case p[r] == '.' && p[r+1] == '/': - // . element - r++ - - case p[r] == '.' && p[r+1] == '.' && (r+2 == n || p[r+2] == '/'): - // .. element: remove to last / - r += 2 - - if w > 1 { - // can backtrack - w-- - - if buf == nil { - for w > 1 && p[w] != '/' { - w-- - } - } else { - for w > 1 && buf[w] != '/' { - w-- - } - } - } - - default: - // real path element. - // add slash if needed - if w > 1 { - bufApp(&buf, p, w, '/') - w++ - } - - // copy element - for r < n && p[r] != '/' { - bufApp(&buf, p, w, p[r]) - w++ - r++ - } - } - } - - // re-append trailing slash - if trailing && w > 1 { - bufApp(&buf, p, w, '/') - w++ - } - - if buf == nil { - return p[:w] - } - return string(buf[:w]) -} - -// internal helper to lazily create a buffer if necessary -func bufApp(buf *[]byte, s string, w int, c byte) { - if *buf == nil { - if s[w] == c { - return - } - - *buf = make([]byte, len(s)) - copy(*buf, s[:w]) - } - (*buf)[w] = c -} diff --git a/vendor/github.com/gin-gonic/gin/render/data.go b/vendor/github.com/gin-gonic/gin/render/data.go deleted file mode 100644 index efa75d5..0000000 --- a/vendor/github.com/gin-gonic/gin/render/data.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2014 Manu Martinez-Almeida. All rights reserved. -// Use of this source code is governed by a MIT style -// license that can be found in the LICENSE file. - -package render - -import "net/http" - -type Data struct { - ContentType string - Data []byte -} - -func (r Data) Render(w http.ResponseWriter) error { - if len(r.ContentType) > 0 { - w.Header()["Content-Type"] = []string{r.ContentType} - } - w.Write(r.Data) - return nil -} diff --git a/vendor/github.com/gin-gonic/gin/render/html.go b/vendor/github.com/gin-gonic/gin/render/html.go deleted file mode 100644 index 8bfb23a..0000000 --- a/vendor/github.com/gin-gonic/gin/render/html.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2014 Manu Martinez-Almeida. All rights reserved. -// Use of this source code is governed by a MIT style -// license that can be found in the LICENSE file. - -package render - -import ( - "html/template" - "net/http" -) - -type ( - HTMLRender interface { - Instance(string, interface{}) Render - } - - HTMLProduction struct { - Template *template.Template - } - - HTMLDebug struct { - Files []string - Glob string - } - - HTML struct { - Template *template.Template - Name string - Data interface{} - } -) - -var htmlContentType = []string{"text/html; charset=utf-8"} - -func (r HTMLProduction) Instance(name string, data interface{}) Render { - return HTML{ - Template: r.Template, - Name: name, - Data: data, - } -} - -func (r HTMLDebug) Instance(name string, data interface{}) Render { - return HTML{ - Template: r.loadTemplate(), - Name: name, - Data: data, - } -} -func (r HTMLDebug) loadTemplate() *template.Template { - if len(r.Files) > 0 { - return template.Must(template.ParseFiles(r.Files...)) - } - if len(r.Glob) > 0 { - return template.Must(template.ParseGlob(r.Glob)) - } - panic("the HTML debug render was created without files or glob pattern") -} - -func (r HTML) Render(w http.ResponseWriter) error { - writeContentType(w, htmlContentType) - if len(r.Name) == 0 { - return r.Template.Execute(w, r.Data) - } - return r.Template.ExecuteTemplate(w, r.Name, r.Data) -} diff --git a/vendor/github.com/gin-gonic/gin/render/json.go b/vendor/github.com/gin-gonic/gin/render/json.go deleted file mode 100644 index 32e6058..0000000 --- a/vendor/github.com/gin-gonic/gin/render/json.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2014 Manu Martinez-Almeida. All rights reserved. -// Use of this source code is governed by a MIT style -// license that can be found in the LICENSE file. - -package render - -import ( - "encoding/json" - "net/http" -) - -type ( - JSON struct { - Data interface{} - } - - IndentedJSON struct { - Data interface{} - } -) - -var jsonContentType = []string{"application/json; charset=utf-8"} - -func (r JSON) Render(w http.ResponseWriter) error { - return WriteJSON(w, r.Data) -} - -func (r IndentedJSON) Render(w http.ResponseWriter) error { - writeContentType(w, jsonContentType) - jsonBytes, err := json.MarshalIndent(r.Data, "", " ") - if err != nil { - return err - } - w.Write(jsonBytes) - return nil -} - -func WriteJSON(w http.ResponseWriter, obj interface{}) error { - writeContentType(w, jsonContentType) - return json.NewEncoder(w).Encode(obj) -} diff --git a/vendor/github.com/gin-gonic/gin/render/redirect.go b/vendor/github.com/gin-gonic/gin/render/redirect.go deleted file mode 100644 index bd48d7d..0000000 --- a/vendor/github.com/gin-gonic/gin/render/redirect.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2014 Manu Martinez-Almeida. All rights reserved. -// Use of this source code is governed by a MIT style -// license that can be found in the LICENSE file. - -package render - -import ( - "fmt" - "net/http" -) - -type Redirect struct { - Code int - Request *http.Request - Location string -} - -func (r Redirect) Render(w http.ResponseWriter) error { - if (r.Code < 300 || r.Code > 308) && r.Code != 201 { - panic(fmt.Sprintf("Cannot redirect with status code %d", r.Code)) - } - http.Redirect(w, r.Request, r.Location, r.Code) - return nil -} diff --git a/vendor/github.com/gin-gonic/gin/render/render.go b/vendor/github.com/gin-gonic/gin/render/render.go deleted file mode 100644 index 3808666..0000000 --- a/vendor/github.com/gin-gonic/gin/render/render.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2014 Manu Martinez-Almeida. All rights reserved. -// Use of this source code is governed by a MIT style -// license that can be found in the LICENSE file. - -package render - -import "net/http" - -type Render interface { - Render(http.ResponseWriter) error -} - -var ( - _ Render = JSON{} - _ Render = IndentedJSON{} - _ Render = XML{} - _ Render = String{} - _ Render = Redirect{} - _ Render = Data{} - _ Render = HTML{} - _ HTMLRender = HTMLDebug{} - _ HTMLRender = HTMLProduction{} - _ Render = YAML{} -) - -func writeContentType(w http.ResponseWriter, value []string) { - header := w.Header() - if val := header["Content-Type"]; len(val) == 0 { - header["Content-Type"] = value - } -} diff --git a/vendor/github.com/gin-gonic/gin/render/text.go b/vendor/github.com/gin-gonic/gin/render/text.go deleted file mode 100644 index 5a9e280..0000000 --- a/vendor/github.com/gin-gonic/gin/render/text.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2014 Manu Martinez-Almeida. All rights reserved. -// Use of this source code is governed by a MIT style -// license that can be found in the LICENSE file. - -package render - -import ( - "fmt" - "io" - "net/http" -) - -type String struct { - Format string - Data []interface{} -} - -var plainContentType = []string{"text/plain; charset=utf-8"} - -func (r String) Render(w http.ResponseWriter) error { - WriteString(w, r.Format, r.Data) - return nil -} - -func WriteString(w http.ResponseWriter, format string, data []interface{}) { - writeContentType(w, plainContentType) - - if len(data) > 0 { - fmt.Fprintf(w, format, data...) - } else { - io.WriteString(w, format) - } -} diff --git a/vendor/github.com/gin-gonic/gin/render/xml.go b/vendor/github.com/gin-gonic/gin/render/xml.go deleted file mode 100644 index be22e6f..0000000 --- a/vendor/github.com/gin-gonic/gin/render/xml.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2014 Manu Martinez-Almeida. All rights reserved. -// Use of this source code is governed by a MIT style -// license that can be found in the LICENSE file. - -package render - -import ( - "encoding/xml" - "net/http" -) - -type XML struct { - Data interface{} -} - -var xmlContentType = []string{"application/xml; charset=utf-8"} - -func (r XML) Render(w http.ResponseWriter) error { - writeContentType(w, xmlContentType) - return xml.NewEncoder(w).Encode(r.Data) -} diff --git a/vendor/github.com/gin-gonic/gin/render/yaml.go b/vendor/github.com/gin-gonic/gin/render/yaml.go deleted file mode 100644 index 46937d8..0000000 --- a/vendor/github.com/gin-gonic/gin/render/yaml.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2014 Manu Martinez-Almeida. All rights reserved. -// Use of this source code is governed by a MIT style -// license that can be found in the LICENSE file. - -package render - -import ( - "net/http" - - "gopkg.in/yaml.v2" -) - -type YAML struct { - Data interface{} -} - -var yamlContentType = []string{"application/x-yaml; charset=utf-8"} - -func (r YAML) Render(w http.ResponseWriter) error { - writeContentType(w, yamlContentType) - - bytes, err := yaml.Marshal(r.Data) - if err != nil { - return err - } - - w.Write(bytes) - return nil -} diff --git a/vendor/github.com/gin-gonic/gin/response_writer.go b/vendor/github.com/gin-gonic/gin/response_writer.go deleted file mode 100644 index fcbe230..0000000 --- a/vendor/github.com/gin-gonic/gin/response_writer.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2014 Manu Martinez-Almeida. All rights reserved. -// Use of this source code is governed by a MIT style -// license that can be found in the LICENSE file. - -package gin - -import ( - "bufio" - "io" - "net" - "net/http" -) - -const ( - noWritten = -1 - defaultStatus = 200 -) - -type ( - ResponseWriter interface { - http.ResponseWriter - http.Hijacker - http.Flusher - http.CloseNotifier - - // Returns the HTTP response status code of the current request. - Status() int - - // Returns the number of bytes already written into the response http body. - // See Written() - Size() int - - // Writes the string into the response body. - WriteString(string) (int, error) - - // Returns true if the response body was already written. - Written() bool - - // Forces to write the http header (status code + headers). - WriteHeaderNow() - } - - responseWriter struct { - http.ResponseWriter - size int - status int - } -) - -var _ ResponseWriter = &responseWriter{} - -func (w *responseWriter) reset(writer http.ResponseWriter) { - w.ResponseWriter = writer - w.size = noWritten - w.status = defaultStatus -} - -func (w *responseWriter) WriteHeader(code int) { - if code > 0 && w.status != code { - if w.Written() { - debugPrint("[WARNING] Headers were already written. Wanted to override status code %d with %d", w.status, code) - } - w.status = code - } -} - -func (w *responseWriter) WriteHeaderNow() { - if !w.Written() { - w.size = 0 - w.ResponseWriter.WriteHeader(w.status) - } -} - -func (w *responseWriter) Write(data []byte) (n int, err error) { - w.WriteHeaderNow() - n, err = w.ResponseWriter.Write(data) - w.size += n - return -} - -func (w *responseWriter) WriteString(s string) (n int, err error) { - w.WriteHeaderNow() - n, err = io.WriteString(w.ResponseWriter, s) - w.size += n - return -} - -func (w *responseWriter) Status() int { - return w.status -} - -func (w *responseWriter) Size() int { - return w.size -} - -func (w *responseWriter) Written() bool { - return w.size != noWritten -} - -// Implements the http.Hijacker interface -func (w *responseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { - if w.size < 0 { - w.size = 0 - } - return w.ResponseWriter.(http.Hijacker).Hijack() -} - -// Implements the http.CloseNotify interface -func (w *responseWriter) CloseNotify() <-chan bool { - return w.ResponseWriter.(http.CloseNotifier).CloseNotify() -} - -// Implements the http.Flush interface -func (w *responseWriter) Flush() { - w.ResponseWriter.(http.Flusher).Flush() -} diff --git a/vendor/github.com/gin-gonic/gin/routergroup.go b/vendor/github.com/gin-gonic/gin/routergroup.go deleted file mode 100644 index f22729b..0000000 --- a/vendor/github.com/gin-gonic/gin/routergroup.go +++ /dev/null @@ -1,215 +0,0 @@ -// Copyright 2014 Manu Martinez-Almeida. All rights reserved. -// Use of this source code is governed by a MIT style -// license that can be found in the LICENSE file. - -package gin - -import ( - "net/http" - "path" - "regexp" - "strings" -) - -type ( - IRouter interface { - IRoutes - Group(string, ...HandlerFunc) *RouterGroup - } - - IRoutes interface { - Use(...HandlerFunc) IRoutes - - Handle(string, string, ...HandlerFunc) IRoutes - Any(string, ...HandlerFunc) IRoutes - GET(string, ...HandlerFunc) IRoutes - POST(string, ...HandlerFunc) IRoutes - DELETE(string, ...HandlerFunc) IRoutes - PATCH(string, ...HandlerFunc) IRoutes - PUT(string, ...HandlerFunc) IRoutes - OPTIONS(string, ...HandlerFunc) IRoutes - HEAD(string, ...HandlerFunc) IRoutes - - StaticFile(string, string) IRoutes - Static(string, string) IRoutes - StaticFS(string, http.FileSystem) IRoutes - } - - // RouterGroup is used internally to configure router, a RouterGroup is associated with a prefix - // and an array of handlers (middleware) - RouterGroup struct { - Handlers HandlersChain - basePath string - engine *Engine - root bool - } -) - -var _ IRouter = &RouterGroup{} - -// Use adds middleware to the group, see example code in github. -func (group *RouterGroup) Use(middleware ...HandlerFunc) IRoutes { - group.Handlers = append(group.Handlers, middleware...) - return group.returnObj() -} - -// Group creates a new router group. You should add all the routes that have common middlwares or the same path prefix. -// For example, all the routes that use a common middlware for authorization could be grouped. -func (group *RouterGroup) Group(relativePath string, handlers ...HandlerFunc) *RouterGroup { - return &RouterGroup{ - Handlers: group.combineHandlers(handlers), - basePath: group.calculateAbsolutePath(relativePath), - engine: group.engine, - } -} - -func (group *RouterGroup) BasePath() string { - return group.basePath -} - -func (group *RouterGroup) handle(httpMethod, relativePath string, handlers HandlersChain) IRoutes { - absolutePath := group.calculateAbsolutePath(relativePath) - handlers = group.combineHandlers(handlers) - group.engine.addRoute(httpMethod, absolutePath, handlers) - return group.returnObj() -} - -// Handle registers a new request handle and middleware with the given path and method. -// The last handler should be the real handler, the other ones should be middleware that can and should be shared among different routes. -// See the example code in github. -// -// For GET, POST, PUT, PATCH and DELETE requests the respective shortcut -// functions can be used. -// -// This function is intended for bulk loading and to allow the usage of less -// frequently used, non-standardized or custom methods (e.g. for internal -// communication with a proxy). -func (group *RouterGroup) Handle(httpMethod, relativePath string, handlers ...HandlerFunc) IRoutes { - if matches, err := regexp.MatchString("^[A-Z]+$", httpMethod); !matches || err != nil { - panic("http method " + httpMethod + " is not valid") - } - return group.handle(httpMethod, relativePath, handlers) -} - -// POST is a shortcut for router.Handle("POST", path, handle) -func (group *RouterGroup) POST(relativePath string, handlers ...HandlerFunc) IRoutes { - return group.handle("POST", relativePath, handlers) -} - -// GET is a shortcut for router.Handle("GET", path, handle) -func (group *RouterGroup) GET(relativePath string, handlers ...HandlerFunc) IRoutes { - return group.handle("GET", relativePath, handlers) -} - -// DELETE is a shortcut for router.Handle("DELETE", path, handle) -func (group *RouterGroup) DELETE(relativePath string, handlers ...HandlerFunc) IRoutes { - return group.handle("DELETE", relativePath, handlers) -} - -// PATCH is a shortcut for router.Handle("PATCH", path, handle) -func (group *RouterGroup) PATCH(relativePath string, handlers ...HandlerFunc) IRoutes { - return group.handle("PATCH", relativePath, handlers) -} - -// PUT is a shortcut for router.Handle("PUT", path, handle) -func (group *RouterGroup) PUT(relativePath string, handlers ...HandlerFunc) IRoutes { - return group.handle("PUT", relativePath, handlers) -} - -// OPTIONS is a shortcut for router.Handle("OPTIONS", path, handle) -func (group *RouterGroup) OPTIONS(relativePath string, handlers ...HandlerFunc) IRoutes { - return group.handle("OPTIONS", relativePath, handlers) -} - -// HEAD is a shortcut for router.Handle("HEAD", path, handle) -func (group *RouterGroup) HEAD(relativePath string, handlers ...HandlerFunc) IRoutes { - return group.handle("HEAD", relativePath, handlers) -} - -// Any registers a route that matches all the HTTP methods. -// GET, POST, PUT, PATCH, HEAD, OPTIONS, DELETE, CONNECT, TRACE -func (group *RouterGroup) Any(relativePath string, handlers ...HandlerFunc) IRoutes { - group.handle("GET", relativePath, handlers) - group.handle("POST", relativePath, handlers) - group.handle("PUT", relativePath, handlers) - group.handle("PATCH", relativePath, handlers) - group.handle("HEAD", relativePath, handlers) - group.handle("OPTIONS", relativePath, handlers) - group.handle("DELETE", relativePath, handlers) - group.handle("CONNECT", relativePath, handlers) - group.handle("TRACE", relativePath, handlers) - return group.returnObj() -} - -// StaticFile registers a single route in order to server a single file of the local filesystem. -// router.StaticFile("favicon.ico", "./resources/favicon.ico") -func (group *RouterGroup) StaticFile(relativePath, filepath string) IRoutes { - if strings.Contains(relativePath, ":") || strings.Contains(relativePath, "*") { - panic("URL parameters can not be used when serving a static file") - } - handler := func(c *Context) { - c.File(filepath) - } - group.GET(relativePath, handler) - group.HEAD(relativePath, handler) - return group.returnObj() -} - -// Static serves files from the given file system root. -// Internally a http.FileServer is used, therefore http.NotFound is used instead -// of the Router's NotFound handler. -// To use the operating system's file system implementation, -// use : -// router.Static("/static", "/var/www") -func (group *RouterGroup) Static(relativePath, root string) IRoutes { - return group.StaticFS(relativePath, Dir(root, false)) -} - -// StaticFS works just like `Static()` but a custom `http.FileSystem` can be used instead. -// Gin by default user: gin.Dir() -func (group *RouterGroup) StaticFS(relativePath string, fs http.FileSystem) IRoutes { - if strings.Contains(relativePath, ":") || strings.Contains(relativePath, "*") { - panic("URL parameters can not be used when serving a static folder") - } - handler := group.createStaticHandler(relativePath, fs) - urlPattern := path.Join(relativePath, "/*filepath") - - // Register GET and HEAD handlers - group.GET(urlPattern, handler) - group.HEAD(urlPattern, handler) - return group.returnObj() -} - -func (group *RouterGroup) createStaticHandler(relativePath string, fs http.FileSystem) HandlerFunc { - absolutePath := group.calculateAbsolutePath(relativePath) - fileServer := http.StripPrefix(absolutePath, http.FileServer(fs)) - _, nolisting := fs.(*onlyfilesFS) - return func(c *Context) { - if nolisting { - c.Writer.WriteHeader(404) - } - fileServer.ServeHTTP(c.Writer, c.Request) - } -} - -func (group *RouterGroup) combineHandlers(handlers HandlersChain) HandlersChain { - finalSize := len(group.Handlers) + len(handlers) - if finalSize >= int(abortIndex) { - panic("too many handlers") - } - mergedHandlers := make(HandlersChain, finalSize) - copy(mergedHandlers, group.Handlers) - copy(mergedHandlers[len(group.Handlers):], handlers) - return mergedHandlers -} - -func (group *RouterGroup) calculateAbsolutePath(relativePath string) string { - return joinPaths(group.basePath, relativePath) -} - -func (group *RouterGroup) returnObj() IRoutes { - if group.root { - return group.engine - } - return group -} diff --git a/vendor/github.com/gin-gonic/gin/tree.go b/vendor/github.com/gin-gonic/gin/tree.go deleted file mode 100644 index 4f1da27..0000000 --- a/vendor/github.com/gin-gonic/gin/tree.go +++ /dev/null @@ -1,605 +0,0 @@ -// Copyright 2013 Julien Schmidt. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be found -// in the LICENSE file. - -package gin - -import ( - "strings" - "unicode" -) - -// Param is a single URL parameter, consisting of a key and a value. -type Param struct { - Key string - Value string -} - -// Params is a Param-slice, as returned by the router. -// The slice is ordered, the first URL parameter is also the first slice value. -// It is therefore safe to read values by the index. -type Params []Param - -// Get returns the value of the first Param which key matches the given name. -// If no matching Param is found, an empty string is returned. -func (ps Params) Get(name string) (string, bool) { - for _, entry := range ps { - if entry.Key == name { - return entry.Value, true - } - } - return "", false -} - -// ByName returns the value of the first Param which key matches the given name. -// If no matching Param is found, an empty string is returned. -func (ps Params) ByName(name string) (va string) { - va, _ = ps.Get(name) - return -} - -type methodTree struct { - method string - root *node -} - -type methodTrees []methodTree - -func (trees methodTrees) get(method string) *node { - for _, tree := range trees { - if tree.method == method { - return tree.root - } - } - return nil -} - -func min(a, b int) int { - if a <= b { - return a - } - return b -} - -func countParams(path string) uint8 { - var n uint - for i := 0; i < len(path); i++ { - if path[i] != ':' && path[i] != '*' { - continue - } - n++ - } - if n >= 255 { - return 255 - } - return uint8(n) -} - -type nodeType uint8 - -const ( - static nodeType = iota // default - root - param - catchAll -) - -type node struct { - path string - wildChild bool - nType nodeType - maxParams uint8 - indices string - children []*node - handlers HandlersChain - priority uint32 -} - -// increments priority of the given child and reorders if necessary -func (n *node) incrementChildPrio(pos int) int { - n.children[pos].priority++ - prio := n.children[pos].priority - - // adjust position (move to front) - newPos := pos - for newPos > 0 && n.children[newPos-1].priority < prio { - // swap node positions - tmpN := n.children[newPos-1] - n.children[newPos-1] = n.children[newPos] - n.children[newPos] = tmpN - - newPos-- - } - - // build new index char string - if newPos != pos { - n.indices = n.indices[:newPos] + // unchanged prefix, might be empty - n.indices[pos:pos+1] + // the index char we move - n.indices[newPos:pos] + n.indices[pos+1:] // rest without char at 'pos' - } - - return newPos -} - -// addRoute adds a node with the given handle to the path. -// Not concurrency-safe! -func (n *node) addRoute(path string, handlers HandlersChain) { - fullPath := path - n.priority++ - numParams := countParams(path) - - // non-empty tree - if len(n.path) > 0 || len(n.children) > 0 { - walk: - for { - // Update maxParams of the current node - if numParams > n.maxParams { - n.maxParams = numParams - } - - // Find the longest common prefix. - // This also implies that the common prefix contains no ':' or '*' - // since the existing key can't contain those chars. - i := 0 - max := min(len(path), len(n.path)) - for i < max && path[i] == n.path[i] { - i++ - } - - // Split edge - if i < len(n.path) { - child := node{ - path: n.path[i:], - wildChild: n.wildChild, - indices: n.indices, - children: n.children, - handlers: n.handlers, - priority: n.priority - 1, - } - - // Update maxParams (max of all children) - for i := range child.children { - if child.children[i].maxParams > child.maxParams { - child.maxParams = child.children[i].maxParams - } - } - - n.children = []*node{&child} - // []byte for proper unicode char conversion, see #65 - n.indices = string([]byte{n.path[i]}) - n.path = path[:i] - n.handlers = nil - n.wildChild = false - } - - // Make new node a child of this node - if i < len(path) { - path = path[i:] - - if n.wildChild { - n = n.children[0] - n.priority++ - - // Update maxParams of the child node - if numParams > n.maxParams { - n.maxParams = numParams - } - numParams-- - - // Check if the wildcard matches - if len(path) >= len(n.path) && n.path == path[:len(n.path)] { - // check for longer wildcard, e.g. :name and :names - if len(n.path) >= len(path) || path[len(n.path)] == '/' { - continue walk - } - } - - panic("path segment '" + path + - "' conflicts with existing wildcard '" + n.path + - "' in path '" + fullPath + "'") - } - - c := path[0] - - // slash after param - if n.nType == param && c == '/' && len(n.children) == 1 { - n = n.children[0] - n.priority++ - continue walk - } - - // Check if a child with the next path byte exists - for i := 0; i < len(n.indices); i++ { - if c == n.indices[i] { - i = n.incrementChildPrio(i) - n = n.children[i] - continue walk - } - } - - // Otherwise insert it - if c != ':' && c != '*' { - // []byte for proper unicode char conversion, see #65 - n.indices += string([]byte{c}) - child := &node{ - maxParams: numParams, - } - n.children = append(n.children, child) - n.incrementChildPrio(len(n.indices) - 1) - n = child - } - n.insertChild(numParams, path, fullPath, handlers) - return - - } else if i == len(path) { // Make node a (in-path) leaf - if n.handlers != nil { - panic("handlers are already registered for path ''" + fullPath + "'") - } - n.handlers = handlers - } - return - } - } else { // Empty tree - n.insertChild(numParams, path, fullPath, handlers) - n.nType = root - } -} - -func (n *node) insertChild(numParams uint8, path string, fullPath string, handlers HandlersChain) { - var offset int // already handled bytes of the path - - // find prefix until first wildcard (beginning with ':'' or '*'') - for i, max := 0, len(path); numParams > 0; i++ { - c := path[i] - if c != ':' && c != '*' { - continue - } - - // find wildcard end (either '/' or path end) - end := i + 1 - for end < max && path[end] != '/' { - switch path[end] { - // the wildcard name must not contain ':' and '*' - case ':', '*': - panic("only one wildcard per path segment is allowed, has: '" + - path[i:] + "' in path '" + fullPath + "'") - default: - end++ - } - } - - // check if this Node existing children which would be - // unreachable if we insert the wildcard here - if len(n.children) > 0 { - panic("wildcard route '" + path[i:end] + - "' conflicts with existing children in path '" + fullPath + "'") - } - - // check if the wildcard has a name - if end-i < 2 { - panic("wildcards must be named with a non-empty name in path '" + fullPath + "'") - } - - if c == ':' { // param - // split path at the beginning of the wildcard - if i > 0 { - n.path = path[offset:i] - offset = i - } - - child := &node{ - nType: param, - maxParams: numParams, - } - n.children = []*node{child} - n.wildChild = true - n = child - n.priority++ - numParams-- - - // if the path doesn't end with the wildcard, then there - // will be another non-wildcard subpath starting with '/' - if end < max { - n.path = path[offset:end] - offset = end - - child := &node{ - maxParams: numParams, - priority: 1, - } - n.children = []*node{child} - n = child - } - - } else { // catchAll - if end != max || numParams > 1 { - panic("catch-all routes are only allowed at the end of the path in path '" + fullPath + "'") - } - - if len(n.path) > 0 && n.path[len(n.path)-1] == '/' { - panic("catch-all conflicts with existing handle for the path segment root in path '" + fullPath + "'") - } - - // currently fixed width 1 for '/' - i-- - if path[i] != '/' { - panic("no / before catch-all in path '" + fullPath + "'") - } - - n.path = path[offset:i] - - // first node: catchAll node with empty path - child := &node{ - wildChild: true, - nType: catchAll, - maxParams: 1, - } - n.children = []*node{child} - n.indices = string(path[i]) - n = child - n.priority++ - - // second node: node holding the variable - child = &node{ - path: path[i:], - nType: catchAll, - maxParams: 1, - handlers: handlers, - priority: 1, - } - n.children = []*node{child} - - return - } - } - - // insert remaining path part and handle to the leaf - n.path = path[offset:] - n.handlers = handlers -} - -// Returns the handle registered with the given path (key). The values of -// wildcards are saved to a map. -// If no handle can be found, a TSR (trailing slash redirect) recommendation is -// made if a handle exists with an extra (without the) trailing slash for the -// given path. -func (n *node) getValue(path string, po Params) (handlers HandlersChain, p Params, tsr bool) { - p = po -walk: // Outer loop for walking the tree - for { - if len(path) > len(n.path) { - if path[:len(n.path)] == n.path { - path = path[len(n.path):] - // If this node does not have a wildcard (param or catchAll) - // child, we can just look up the next child node and continue - // to walk down the tree - if !n.wildChild { - c := path[0] - for i := 0; i < len(n.indices); i++ { - if c == n.indices[i] { - n = n.children[i] - continue walk - } - } - - // Nothing found. - // We can recommend to redirect to the same URL without a - // trailing slash if a leaf exists for that path. - tsr = (path == "/" && n.handlers != nil) - return - } - - // handle wildcard child - n = n.children[0] - switch n.nType { - case param: - // find param end (either '/' or path end) - end := 0 - for end < len(path) && path[end] != '/' { - end++ - } - - // save param value - if cap(p) < int(n.maxParams) { - p = make(Params, 0, n.maxParams) - } - i := len(p) - p = p[:i+1] // expand slice within preallocated capacity - p[i].Key = n.path[1:] - p[i].Value = path[:end] - - // we need to go deeper! - if end < len(path) { - if len(n.children) > 0 { - path = path[end:] - n = n.children[0] - continue walk - } - - // ... but we can't - tsr = (len(path) == end+1) - return - } - - if handlers = n.handlers; handlers != nil { - return - } else if len(n.children) == 1 { - // No handle found. Check if a handle for this path + a - // trailing slash exists for TSR recommendation - n = n.children[0] - tsr = (n.path == "/" && n.handlers != nil) - } - - return - - case catchAll: - // save param value - if cap(p) < int(n.maxParams) { - p = make(Params, 0, n.maxParams) - } - i := len(p) - p = p[:i+1] // expand slice within preallocated capacity - p[i].Key = n.path[2:] - p[i].Value = path - - handlers = n.handlers - return - - default: - panic("invalid node type") - } - } - } else if path == n.path { - // We should have reached the node containing the handle. - // Check if this node has a handle registered. - if handlers = n.handlers; handlers != nil { - return - } - - if path == "/" && n.wildChild && n.nType != root { - tsr = true - return - } - - // No handle found. Check if a handle for this path + a - // trailing slash exists for trailing slash recommendation - for i := 0; i < len(n.indices); i++ { - if n.indices[i] == '/' { - n = n.children[i] - tsr = (len(n.path) == 1 && n.handlers != nil) || - (n.nType == catchAll && n.children[0].handlers != nil) - return - } - } - - return - } - - // Nothing found. We can recommend to redirect to the same URL with an - // extra trailing slash if a leaf exists for that path - tsr = (path == "/") || - (len(n.path) == len(path)+1 && n.path[len(path)] == '/' && - path == n.path[:len(n.path)-1] && n.handlers != nil) - return - } -} - -// Makes a case-insensitive lookup of the given path and tries to find a handler. -// It can optionally also fix trailing slashes. -// It returns the case-corrected path and a bool indicating whether the lookup -// was successful. -func (n *node) findCaseInsensitivePath(path string, fixTrailingSlash bool) (ciPath []byte, found bool) { - ciPath = make([]byte, 0, len(path)+1) // preallocate enough memory - - // Outer loop for walking the tree - for len(path) >= len(n.path) && strings.ToLower(path[:len(n.path)]) == strings.ToLower(n.path) { - path = path[len(n.path):] - ciPath = append(ciPath, n.path...) - - if len(path) > 0 { - // If this node does not have a wildcard (param or catchAll) child, - // we can just look up the next child node and continue to walk down - // the tree - if !n.wildChild { - r := unicode.ToLower(rune(path[0])) - for i, index := range n.indices { - // must use recursive approach since both index and - // ToLower(index) could exist. We must check both. - if r == unicode.ToLower(index) { - out, found := n.children[i].findCaseInsensitivePath(path, fixTrailingSlash) - if found { - return append(ciPath, out...), true - } - } - } - - // Nothing found. We can recommend to redirect to the same URL - // without a trailing slash if a leaf exists for that path - found = (fixTrailingSlash && path == "/" && n.handlers != nil) - return - } - - n = n.children[0] - switch n.nType { - case param: - // find param end (either '/' or path end) - k := 0 - for k < len(path) && path[k] != '/' { - k++ - } - - // add param value to case insensitive path - ciPath = append(ciPath, path[:k]...) - - // we need to go deeper! - if k < len(path) { - if len(n.children) > 0 { - path = path[k:] - n = n.children[0] - continue - } - - // ... but we can't - if fixTrailingSlash && len(path) == k+1 { - return ciPath, true - } - return - } - - if n.handlers != nil { - return ciPath, true - } else if fixTrailingSlash && len(n.children) == 1 { - // No handle found. Check if a handle for this path + a - // trailing slash exists - n = n.children[0] - if n.path == "/" && n.handlers != nil { - return append(ciPath, '/'), true - } - } - return - - case catchAll: - return append(ciPath, path...), true - - default: - panic("invalid node type") - } - } else { - // We should have reached the node containing the handle. - // Check if this node has a handle registered. - if n.handlers != nil { - return ciPath, true - } - - // No handle found. - // Try to fix the path by adding a trailing slash - if fixTrailingSlash { - for i := 0; i < len(n.indices); i++ { - if n.indices[i] == '/' { - n = n.children[i] - if (len(n.path) == 1 && n.handlers != nil) || - (n.nType == catchAll && n.children[0].handlers != nil) { - return append(ciPath, '/'), true - } - return - } - } - } - return - } - } - - // Nothing found. - // Try to fix the path by adding / removing a trailing slash - if fixTrailingSlash { - if path == "/" { - return ciPath, true - } - if len(path)+1 == len(n.path) && n.path[len(path)] == '/' && - strings.ToLower(path) == strings.ToLower(n.path[:len(path)]) && - n.handlers != nil { - return append(ciPath, n.path...), true - } - } - return -} diff --git a/vendor/github.com/gin-gonic/gin/utils.go b/vendor/github.com/gin-gonic/gin/utils.go deleted file mode 100644 index 18064fb..0000000 --- a/vendor/github.com/gin-gonic/gin/utils.go +++ /dev/null @@ -1,154 +0,0 @@ -// Copyright 2014 Manu Martinez-Almeida. All rights reserved. -// Use of this source code is governed by a MIT style -// license that can be found in the LICENSE file. - -package gin - -import ( - "encoding/xml" - "net/http" - "os" - "path" - "reflect" - "runtime" - "strings" -) - -const BindKey = "_gin-gonic/gin/bindkey" - -func Bind(val interface{}) HandlerFunc { - value := reflect.ValueOf(val) - if value.Kind() == reflect.Ptr { - panic(`Bind struct can not be a pointer. Example: - Use: gin.Bind(Struct{}) instead of gin.Bind(&Struct{}) -`) - } - typ := value.Type() - - return func(c *Context) { - obj := reflect.New(typ).Interface() - if c.Bind(obj) == nil { - c.Set(BindKey, obj) - } - } -} - -func WrapF(f http.HandlerFunc) HandlerFunc { - return func(c *Context) { - f(c.Writer, c.Request) - } -} - -func WrapH(h http.Handler) HandlerFunc { - return func(c *Context) { - h.ServeHTTP(c.Writer, c.Request) - } -} - -type H map[string]interface{} - -// MarshalXML allows type H to be used with xml.Marshal -func (h H) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - start.Name = xml.Name{ - Space: "", - Local: "map", - } - if err := e.EncodeToken(start); err != nil { - return err - } - for key, value := range h { - elem := xml.StartElement{ - Name: xml.Name{Space: "", Local: key}, - Attr: []xml.Attr{}, - } - if err := e.EncodeElement(value, elem); err != nil { - return err - } - } - if err := e.EncodeToken(xml.EndElement{Name: start.Name}); err != nil { - return err - } - return nil -} - -func assert1(guard bool, text string) { - if !guard { - panic(text) - } -} - -func filterFlags(content string) string { - for i, char := range content { - if char == ' ' || char == ';' { - return content[:i] - } - } - return content -} - -func chooseData(custom, wildcard interface{}) interface{} { - if custom == nil { - if wildcard == nil { - panic("negotiation config is invalid") - } - return wildcard - } - return custom -} - -func parseAccept(acceptHeader string) []string { - parts := strings.Split(acceptHeader, ",") - out := make([]string, 0, len(parts)) - for _, part := range parts { - index := strings.IndexByte(part, ';') - if index >= 0 { - part = part[0:index] - } - part = strings.TrimSpace(part) - if len(part) > 0 { - out = append(out, part) - } - } - return out -} - -func lastChar(str string) uint8 { - size := len(str) - if size == 0 { - panic("The length of the string can't be 0") - } - return str[size-1] -} - -func nameOfFunction(f interface{}) string { - return runtime.FuncForPC(reflect.ValueOf(f).Pointer()).Name() -} - -func joinPaths(absolutePath, relativePath string) string { - if len(relativePath) == 0 { - return absolutePath - } - - finalPath := path.Join(absolutePath, relativePath) - appendSlash := lastChar(relativePath) == '/' && lastChar(finalPath) != '/' - if appendSlash { - return finalPath + "/" - } - return finalPath -} - -func resolveAddress(addr []string) string { - switch len(addr) { - case 0: - if port := os.Getenv("PORT"); len(port) > 0 { - debugPrint("Environment variable PORT=\"%s\"", port) - return ":" + port - } - debugPrint("Environment variable PORT is undefined. Using port :8080 by default") - return ":8080" - case 1: - return addr[0] - default: - panic("too much parameters") - } -} diff --git a/vendor/github.com/gin-gonic/gin/wercker.yml b/vendor/github.com/gin-gonic/gin/wercker.yml deleted file mode 100644 index 3ab8084..0000000 --- a/vendor/github.com/gin-gonic/gin/wercker.yml +++ /dev/null @@ -1 +0,0 @@ -box: wercker/default \ No newline at end of file diff --git a/vendor/github.com/go-sql-driver/mysql/AUTHORS b/vendor/github.com/go-sql-driver/mysql/AUTHORS deleted file mode 100644 index 6dd0167..0000000 --- a/vendor/github.com/go-sql-driver/mysql/AUTHORS +++ /dev/null @@ -1,46 +0,0 @@ -# This is the official list of Go-MySQL-Driver authors for copyright purposes. - -# If you are submitting a patch, please add your name or the name of the -# organization which holds the copyright to this list in alphabetical order. - -# Names should be added to this file as -# Name -# The email address is not required for organizations. -# Please keep the list sorted. - - -# Individual Persons - -Aaron Hopkins -Arne Hormann -Carlos Nieto -Chris Moos -DisposaBoy -Frederick Mayle -Gustavo Kristic -Hanno Braun -Henri Yandell -Hirotaka Yamamoto -INADA Naoki -James Harr -Jian Zhen -Joshua Prunier -Julien Schmidt -Kamil Dziedzic -Leonardo YongUk Kim -Lucas Liu -Luke Scott -Michael Woolnough -Nicola Peduzzi -Runrioter Wung -Soroush Pour -Stan Putrya -Xiaobing Jiang -Xiuming Chen -Julien Lefevre - -# Organizations - -Barracuda Networks, Inc. -Google Inc. -Stripe Inc. diff --git a/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md b/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md deleted file mode 100644 index 161ad0f..0000000 --- a/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md +++ /dev/null @@ -1,92 +0,0 @@ -## HEAD - -Changes: - - - Go 1.1 is no longer supported - - Use decimals field from MySQL to format time types (#249) - - Buffer optimizations (#269) - - TLS ServerName defaults to the host (#283) - -Bugfixes: - - - Enable microsecond resolution on TIME, DATETIME and TIMESTAMP (#249) - - Fixed handling of queries without columns and rows (#255) - - Fixed a panic when SetKeepAlive() failed (#298) - -New Features: - - Support for returning table alias on Columns() (#289) - - Placeholder interpolation, can be actived with the DSN parameter `interpolateParams=true` (#309, #318) - - -## Version 1.2 (2014-06-03) - -Changes: - - - We switched back to a "rolling release". `go get` installs the current master branch again - - Version v1 of the driver will not be maintained anymore. Go 1.0 is no longer supported by this driver - - Exported errors to allow easy checking from application code - - Enabled TCP Keepalives on TCP connections - - Optimized INFILE handling (better buffer size calculation, lazy init, ...) - - The DSN parser also checks for a missing separating slash - - Faster binary date / datetime to string formatting - - Also exported the MySQLWarning type - - mysqlConn.Close returns the first error encountered instead of ignoring all errors - - writePacket() automatically writes the packet size to the header - - readPacket() uses an iterative approach instead of the recursive approach to merge splitted packets - -New Features: - - - `RegisterDial` allows the usage of a custom dial function to establish the network connection - - Setting the connection collation is possible with the `collation` DSN parameter. This parameter should be preferred over the `charset` parameter - - Logging of critical errors is configurable with `SetLogger` - - Google CloudSQL support - -Bugfixes: - - - Allow more than 32 parameters in prepared statements - - Various old_password fixes - - Fixed TestConcurrent test to pass Go's race detection - - Fixed appendLengthEncodedInteger for large numbers - - Renamed readLengthEnodedString to readLengthEncodedString and skipLengthEnodedString to skipLengthEncodedString (fixed typo) - - -## Version 1.1 (2013-11-02) - -Changes: - - - Go-MySQL-Driver now requires Go 1.1 - - Connections now use the collation `utf8_general_ci` by default. Adding `&charset=UTF8` to the DSN should not be necessary anymore - - Made closing rows and connections error tolerant. This allows for example deferring rows.Close() without checking for errors - - `[]byte(nil)` is now treated as a NULL value. Before, it was treated like an empty string / `[]byte("")` - - DSN parameter values must now be url.QueryEscape'ed. This allows text values to contain special characters, such as '&'. - - Use the IO buffer also for writing. This results in zero allocations (by the driver) for most queries - - Optimized the buffer for reading - - stmt.Query now caches column metadata - - New Logo - - Changed the copyright header to include all contributors - - Improved the LOAD INFILE documentation - - The driver struct is now exported to make the driver directly accessible - - Refactored the driver tests - - Added more benchmarks and moved all to a separate file - - Other small refactoring - -New Features: - - - Added *old_passwords* support: Required in some cases, but must be enabled by adding `allowOldPasswords=true` to the DSN since it is insecure - - Added a `clientFoundRows` parameter: Return the number of matching rows instead of the number of rows changed on UPDATEs - - Added TLS/SSL support: Use a TLS/SSL encrypted connection to the server. Custom TLS configs can be registered and used - -Bugfixes: - - - Fixed MySQL 4.1 support: MySQL 4.1 sends packets with lengths which differ from the specification - - Convert to DB timezone when inserting `time.Time` - - Splitted packets (more than 16MB) are now merged correctly - - Fixed false positive `io.EOF` errors when the data was fully read - - Avoid panics on reuse of closed connections - - Fixed empty string producing false nil values - - Fixed sign byte for positive TIME fields - - -## Version 1.0 (2013-05-14) - -Initial Release diff --git a/vendor/github.com/go-sql-driver/mysql/CONTRIBUTING.md b/vendor/github.com/go-sql-driver/mysql/CONTRIBUTING.md deleted file mode 100644 index f87c198..0000000 --- a/vendor/github.com/go-sql-driver/mysql/CONTRIBUTING.md +++ /dev/null @@ -1,40 +0,0 @@ -# Contributing Guidelines - -## Reporting Issues - -Before creating a new Issue, please check first if a similar Issue [already exists](https://github.com/go-sql-driver/mysql/issues?state=open) or was [recently closed](https://github.com/go-sql-driver/mysql/issues?direction=desc&page=1&sort=updated&state=closed). - -Please provide the following minimum information: -* Your Go-MySQL-Driver version (or git SHA) -* Your Go version (run `go version` in your console) -* A detailed issue description -* Error Log if present -* If possible, a short example - - -## Contributing Code - -By contributing to this project, you share your code under the Mozilla Public License 2, as specified in the LICENSE file. -Don't forget to add yourself to the AUTHORS file. - -### Pull Requests Checklist - -Please check the following points before submitting your pull request: -- [x] Code compiles correctly -- [x] Created tests, if possible -- [x] All tests pass -- [x] Extended the README / documentation, if necessary -- [x] Added yourself to the AUTHORS file - -### Code Review - -Everyone is invited to review and comment on pull requests. -If it looks fine to you, comment with "LGTM" (Looks good to me). - -If changes are required, notice the reviewers with "PTAL" (Please take another look) after committing the fixes. - -Before merging the Pull Request, at least one [team member](https://github.com/go-sql-driver?tab=members) must have commented with "LGTM". - -## Development Ideas - -If you are looking for ideas for code contributions, please check our [Development Ideas](https://github.com/go-sql-driver/mysql/wiki/Development-Ideas) Wiki page. diff --git a/vendor/github.com/go-sql-driver/mysql/LICENSE b/vendor/github.com/go-sql-driver/mysql/LICENSE deleted file mode 100644 index 14e2f77..0000000 --- a/vendor/github.com/go-sql-driver/mysql/LICENSE +++ /dev/null @@ -1,373 +0,0 @@ -Mozilla Public License Version 2.0 -================================== - -1. Definitions --------------- - -1.1. "Contributor" - means each individual or legal entity that creates, contributes to - the creation of, or owns Covered Software. - -1.2. "Contributor Version" - means the combination of the Contributions of others (if any) used - by a Contributor and that particular Contributor's Contribution. - -1.3. "Contribution" - means Covered Software of a particular Contributor. - -1.4. "Covered Software" - means Source Code Form to which the initial Contributor has attached - the notice in Exhibit A, the Executable Form of such Source Code - Form, and Modifications of such Source Code Form, in each case - including portions thereof. - -1.5. "Incompatible With Secondary Licenses" - means - - (a) that the initial Contributor has attached the notice described - in Exhibit B to the Covered Software; or - - (b) that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the - terms of a Secondary License. - -1.6. "Executable Form" - means any form of the work other than Source Code Form. - -1.7. "Larger Work" - means a work that combines Covered Software with other material, in - a separate file or files, that is not Covered Software. - -1.8. "License" - means this document. - -1.9. "Licensable" - means having the right to grant, to the maximum extent possible, - whether at the time of the initial grant or subsequently, any and - all of the rights conveyed by this License. - -1.10. "Modifications" - means any of the following: - - (a) any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered - Software; or - - (b) any new file in Source Code Form that contains any Covered - Software. - -1.11. "Patent Claims" of a Contributor - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the - License, by the making, using, selling, offering for sale, having - made, import, or transfer of either its Contributions or its - Contributor Version. - -1.12. "Secondary License" - means either the GNU General Public License, Version 2.0, the GNU - Lesser General Public License, Version 2.1, the GNU Affero General - Public License, Version 3.0, or any later versions of those - licenses. - -1.13. "Source Code Form" - means the form of the work preferred for making modifications. - -1.14. "You" (or "Your") - means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that - controls, is controlled by, or is under common control with You. For - purposes of this definition, "control" means (a) the power, direct - or indirect, to cause the direction or management of such entity, - whether by contract or otherwise, or (b) ownership of more than - fifty percent (50%) of the outstanding shares or beneficial - ownership of such entity. - -2. License Grants and Conditions --------------------------------- - -2.1. Grants - -Each Contributor hereby grants You a world-wide, royalty-free, -non-exclusive license: - -(a) under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and - -(b) under Patent Claims of such Contributor to make, use, sell, offer - for sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. - -2.2. Effective Date - -The licenses granted in Section 2.1 with respect to any Contribution -become effective for each Contribution on the date the Contributor first -distributes such Contribution. - -2.3. Limitations on Grant Scope - -The licenses granted in this Section 2 are the only rights granted under -this License. No additional rights or licenses will be implied from the -distribution or licensing of Covered Software under this License. -Notwithstanding Section 2.1(b) above, no patent license is granted by a -Contributor: - -(a) for any code that a Contributor has removed from Covered Software; - or - -(b) for infringements caused by: (i) Your and any other third party's - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - -(c) under Patent Claims infringed by Covered Software in the absence of - its Contributions. - -This License does not grant any rights in the trademarks, service marks, -or logos of any Contributor (except as may be necessary to comply with -the notice requirements in Section 3.4). - -2.4. Subsequent Licenses - -No Contributor makes additional grants as a result of Your choice to -distribute the Covered Software under a subsequent version of this -License (see Section 10.2) or under the terms of a Secondary License (if -permitted under the terms of Section 3.3). - -2.5. Representation - -Each Contributor represents that the Contributor believes its -Contributions are its original creation(s) or it has sufficient rights -to grant the rights to its Contributions conveyed by this License. - -2.6. Fair Use - -This License is not intended to limit any rights You have under -applicable copyright doctrines of fair use, fair dealing, or other -equivalents. - -2.7. Conditions - -Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted -in Section 2.1. - -3. Responsibilities -------------------- - -3.1. Distribution of Source Form - -All distribution of Covered Software in Source Code Form, including any -Modifications that You create or to which You contribute, must be under -the terms of this License. You must inform recipients that the Source -Code Form of the Covered Software is governed by the terms of this -License, and how they can obtain a copy of this License. You may not -attempt to alter or restrict the recipients' rights in the Source Code -Form. - -3.2. Distribution of Executable Form - -If You distribute Covered Software in Executable Form then: - -(a) such Covered Software must also be made available in Source Code - Form, as described in Section 3.1, and You must inform recipients of - the Executable Form how they can obtain a copy of such Source Code - Form by reasonable means in a timely manner, at a charge no more - than the cost of distribution to the recipient; and - -(b) You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter - the recipients' rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - -You may create and distribute a Larger Work under terms of Your choice, -provided that You also comply with the requirements of this License for -the Covered Software. If the Larger Work is a combination of Covered -Software with a work governed by one or more Secondary Licenses, and the -Covered Software is not Incompatible With Secondary Licenses, this -License permits You to additionally distribute such Covered Software -under the terms of such Secondary License(s), so that the recipient of -the Larger Work may, at their option, further distribute the Covered -Software under the terms of either this License or such Secondary -License(s). - -3.4. Notices - -You may not remove or alter the substance of any license notices -(including copyright notices, patent notices, disclaimers of warranty, -or limitations of liability) contained within the Source Code Form of -the Covered Software, except that You may alter any license notices to -the extent required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - -You may choose to offer, and to charge a fee for, warranty, support, -indemnity or liability obligations to one or more recipients of Covered -Software. However, You may do so only on Your own behalf, and not on -behalf of any Contributor. You must make it absolutely clear that any -such warranty, support, indemnity, or liability obligation is offered by -You alone, and You hereby agree to indemnify every Contributor for any -liability incurred by such Contributor as a result of warranty, support, -indemnity or liability terms You offer. You may include additional -disclaimers of warranty and limitations of liability specific to any -jurisdiction. - -4. Inability to Comply Due to Statute or Regulation ---------------------------------------------------- - -If it is impossible for You to comply with any of the terms of this -License with respect to some or all of the Covered Software due to -statute, judicial order, or regulation then You must: (a) comply with -the terms of this License to the maximum extent possible; and (b) -describe the limitations and the code they affect. Such description must -be placed in a text file included with all distributions of the Covered -Software under this License. Except to the extent prohibited by statute -or regulation, such description must be sufficiently detailed for a -recipient of ordinary skill to be able to understand it. - -5. Termination --------------- - -5.1. The rights granted under this License will terminate automatically -if You fail to comply with any of its terms. However, if You become -compliant, then the rights granted under this License from a particular -Contributor are reinstated (a) provisionally, unless and until such -Contributor explicitly and finally terminates Your grants, and (b) on an -ongoing basis, if such Contributor fails to notify You of the -non-compliance by some reasonable means prior to 60 days after You have -come back into compliance. Moreover, Your grants from a particular -Contributor are reinstated on an ongoing basis if such Contributor -notifies You of the non-compliance by some reasonable means, this is the -first time You have received notice of non-compliance with this License -from such Contributor, and You become compliant prior to 30 days after -Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent -infringement claim (excluding declaratory judgment actions, -counter-claims, and cross-claims) alleging that a Contributor Version -directly or indirectly infringes any patent, then the rights granted to -You by any and all Contributors for the Covered Software under Section -2.1 of this License shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all -end user license agreements (excluding distributors and resellers) which -have been validly granted by You or Your distributors under this License -prior to termination shall survive termination. - -************************************************************************ -* * -* 6. Disclaimer of Warranty * -* ------------------------- * -* * -* Covered Software is provided under this License on an "as is" * -* basis, without warranty of any kind, either expressed, implied, or * -* statutory, including, without limitation, warranties that the * -* Covered Software is free of defects, merchantable, fit for a * -* particular purpose or non-infringing. The entire risk as to the * -* quality and performance of the Covered Software is with You. * -* Should any Covered Software prove defective in any respect, You * -* (not any Contributor) assume the cost of any necessary servicing, * -* repair, or correction. This disclaimer of warranty constitutes an * -* essential part of this License. No use of any Covered Software is * -* authorized under this License except under this disclaimer. * -* * -************************************************************************ - -************************************************************************ -* * -* 7. Limitation of Liability * -* -------------------------- * -* * -* Under no circumstances and under no legal theory, whether tort * -* (including negligence), contract, or otherwise, shall any * -* Contributor, or anyone who distributes Covered Software as * -* permitted above, be liable to You for any direct, indirect, * -* special, incidental, or consequential damages of any character * -* including, without limitation, damages for lost profits, loss of * -* goodwill, work stoppage, computer failure or malfunction, or any * -* and all other commercial damages or losses, even if such party * -* shall have been informed of the possibility of such damages. This * -* limitation of liability shall not apply to liability for death or * -* personal injury resulting from such party's negligence to the * -* extent applicable law prohibits such limitation. Some * -* jurisdictions do not allow the exclusion or limitation of * -* incidental or consequential damages, so this exclusion and * -* limitation may not apply to You. * -* * -************************************************************************ - -8. Litigation -------------- - -Any litigation relating to this License may be brought only in the -courts of a jurisdiction where the defendant maintains its principal -place of business and such litigation shall be governed by laws of that -jurisdiction, without reference to its conflict-of-law provisions. -Nothing in this Section shall prevent a party's ability to bring -cross-claims or counter-claims. - -9. Miscellaneous ----------------- - -This License represents the complete agreement concerning the subject -matter hereof. If any provision of this License is held to be -unenforceable, such provision shall be reformed only to the extent -necessary to make it enforceable. Any law or regulation which provides -that the language of a contract shall be construed against the drafter -shall not be used to construe this License against a Contributor. - -10. Versions of the License ---------------------------- - -10.1. New Versions - -Mozilla Foundation is the license steward. Except as provided in Section -10.3, no one other than the license steward has the right to modify or -publish new versions of this License. Each version will be given a -distinguishing version number. - -10.2. Effect of New Versions - -You may distribute the Covered Software under the terms of the version -of the License under which You originally received the Covered Software, -or under the terms of any subsequent version published by the license -steward. - -10.3. Modified Versions - -If you create software not governed by this License, and you want to -create a new license for such software, you may create and use a -modified version of this License if you rename the license and remove -any references to the name of the license steward (except to note that -such modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary -Licenses - -If You choose to distribute Source Code Form that is Incompatible With -Secondary Licenses under the terms of this version of the License, the -notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice -------------------------------------------- - - This Source Code Form is subject to the terms of the Mozilla Public - License, v. 2.0. If a copy of the MPL was not distributed with this - file, You can obtain one at http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular -file, then You may include the notice in a location (such as a LICENSE -file in a relevant directory) where a recipient would be likely to look -for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - "Incompatible With Secondary Licenses" Notice ---------------------------------------------------------- - - This Source Code Form is "Incompatible With Secondary Licenses", as - defined by the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/go-sql-driver/mysql/README.md b/vendor/github.com/go-sql-driver/mysql/README.md deleted file mode 100644 index 706b7ef..0000000 --- a/vendor/github.com/go-sql-driver/mysql/README.md +++ /dev/null @@ -1,386 +0,0 @@ -# Go-MySQL-Driver - -A MySQL-Driver for Go's [database/sql](http://golang.org/pkg/database/sql) package - -![Go-MySQL-Driver logo](https://raw.github.com/wiki/go-sql-driver/mysql/gomysql_m.png "Golang Gopher holding the MySQL Dolphin") - -**Latest stable Release:** [Version 1.2 (June 03, 2014)](https://github.com/go-sql-driver/mysql/releases) - -[![Build Status](https://travis-ci.org/go-sql-driver/mysql.png?branch=master)](https://travis-ci.org/go-sql-driver/mysql) - ---------------------------------------- - * [Features](#features) - * [Requirements](#requirements) - * [Installation](#installation) - * [Usage](#usage) - * [DSN (Data Source Name)](#dsn-data-source-name) - * [Password](#password) - * [Protocol](#protocol) - * [Address](#address) - * [Parameters](#parameters) - * [Examples](#examples) - * [LOAD DATA LOCAL INFILE support](#load-data-local-infile-support) - * [time.Time support](#timetime-support) - * [Unicode support](#unicode-support) - * [Testing / Development](#testing--development) - * [License](#license) - ---------------------------------------- - -## Features - * Lightweight and [fast](https://github.com/go-sql-driver/sql-benchmark "golang MySQL-Driver performance") - * Native Go implementation. No C-bindings, just pure Go - * Connections over TCP/IPv4, TCP/IPv6, Unix domain sockets or [custom protocols](http://godoc.org/github.com/go-sql-driver/mysql#DialFunc) - * Automatic handling of broken connections - * Automatic Connection Pooling *(by database/sql package)* - * Supports queries larger than 16MB - * Full [`sql.RawBytes`](http://golang.org/pkg/database/sql/#RawBytes) support. - * Intelligent `LONG DATA` handling in prepared statements - * Secure `LOAD DATA LOCAL INFILE` support with file Whitelisting and `io.Reader` support - * Optional `time.Time` parsing - * Optional placeholder interpolation - -## Requirements - * Go 1.2 or higher - * MySQL (4.1+), MariaDB, Percona Server, Google CloudSQL or Sphinx (2.2.3+) - ---------------------------------------- - -## Installation -Simple install the package to your [$GOPATH](http://code.google.com/p/go-wiki/wiki/GOPATH "GOPATH") with the [go tool](http://golang.org/cmd/go/ "go command") from shell: -```bash -$ go get github.com/go-sql-driver/mysql -``` -Make sure [Git is installed](http://git-scm.com/downloads) on your machine and in your system's `PATH`. - -## Usage -_Go MySQL Driver_ is an implementation of Go's `database/sql/driver` interface. You only need to import the driver and can use the full [`database/sql`](http://golang.org/pkg/database/sql) API then. - -Use `mysql` as `driverName` and a valid [DSN](#dsn-data-source-name) as `dataSourceName`: -```go -import "database/sql" -import _ "github.com/go-sql-driver/mysql" - -db, err := sql.Open("mysql", "user:password@/dbname") -``` - -[Examples are available in our Wiki](https://github.com/go-sql-driver/mysql/wiki/Examples "Go-MySQL-Driver Examples"). - - -### DSN (Data Source Name) - -The Data Source Name has a common format, like e.g. [PEAR DB](http://pear.php.net/manual/en/package.database.db.intro-dsn.php) uses it, but without type-prefix (optional parts marked by squared brackets): -``` -[username[:password]@][protocol[(address)]]/dbname[?param1=value1&...¶mN=valueN] -``` - -A DSN in its fullest form: -``` -username:password@protocol(address)/dbname?param=value -``` - -Except for the databasename, all values are optional. So the minimal DSN is: -``` -/dbname -``` - -If you do not want to preselect a database, leave `dbname` empty: -``` -/ -``` -This has the same effect as an empty DSN string: -``` - -``` - -#### Password -Passwords can consist of any character. Escaping is **not** necessary. - -#### Protocol -See [net.Dial](http://golang.org/pkg/net/#Dial) for more information which networks are available. -In general you should use an Unix domain socket if available and TCP otherwise for best performance. - -#### Address -For TCP and UDP networks, addresses have the form `host:port`. -If `host` is a literal IPv6 address, it must be enclosed in square brackets. -The functions [net.JoinHostPort](http://golang.org/pkg/net/#JoinHostPort) and [net.SplitHostPort](http://golang.org/pkg/net/#SplitHostPort) manipulate addresses in this form. - -For Unix domain sockets the address is the absolute path to the MySQL-Server-socket, e.g. `/var/run/mysqld/mysqld.sock` or `/tmp/mysql.sock`. - -#### Parameters -*Parameters are case-sensitive!* - -Notice that any of `true`, `TRUE`, `True` or `1` is accepted to stand for a true boolean value. Not surprisingly, false can be specified as any of: `false`, `FALSE`, `False` or `0`. - -##### `allowAllFiles` - -``` -Type: bool -Valid Values: true, false -Default: false -``` - -`allowAllFiles=true` disables the file Whitelist for `LOAD DATA LOCAL INFILE` and allows *all* files. -[*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html) - -##### `allowCleartextPasswords` - -``` -Type: bool -Valid Values: true, false -Default: false -``` - -`allowCleartextPasswords=true` allows using the [cleartext client side plugin](http://dev.mysql.com/doc/en/cleartext-authentication-plugin.html) if required by an account, such as one defined with the [PAM authentication plugin](http://dev.mysql.com/doc/en/pam-authentication-plugin.html). Sending passwords in clear text may be a security problem in some configurations. To avoid problems if there is any possibility that the password would be intercepted, clients should connect to MySQL Server using a method that protects the password. Possibilities include [TLS / SSL](#tls), IPsec, or a private network. - -##### `allowOldPasswords` - -``` -Type: bool -Valid Values: true, false -Default: false -``` -`allowOldPasswords=true` allows the usage of the insecure old password method. This should be avoided, but is necessary in some cases. See also [the old_passwords wiki page](https://github.com/go-sql-driver/mysql/wiki/old_passwords). - -##### `charset` - -``` -Type: string -Valid Values: -Default: none -``` - -Sets the charset used for client-server interaction (`"SET NAMES "`). If multiple charsets are set (separated by a comma), the following charset is used if setting the charset failes. This enables for example support for `utf8mb4` ([introduced in MySQL 5.5.3](http://dev.mysql.com/doc/refman/5.5/en/charset-unicode-utf8mb4.html)) with fallback to `utf8` for older servers (`charset=utf8mb4,utf8`). - -Usage of the `charset` parameter is discouraged because it issues additional queries to the server. -Unless you need the fallback behavior, please use `collation` instead. - -##### `collation` - -``` -Type: string -Valid Values: -Default: utf8_general_ci -``` - -Sets the collation used for client-server interaction on connection. In contrast to `charset`, `collation` does not issue additional queries. If the specified collation is unavailable on the target server, the connection will fail. - -A list of valid charsets for a server is retrievable with `SHOW COLLATION`. - -##### `clientFoundRows` - -``` -Type: bool -Valid Values: true, false -Default: false -``` - -`clientFoundRows=true` causes an UPDATE to return the number of matching rows instead of the number of rows changed. - -##### `columnsWithAlias` - -``` -Type: bool -Valid Values: true, false -Default: false -``` - -When `columnsWithAlias` is true, calls to `sql.Rows.Columns()` will return the table alias and the column name separated by a dot. For example: - -``` -SELECT u.id FROM users as u -``` - -will return `u.id` instead of just `id` if `columnsWithAlias=true`. - -##### `interpolateParams` - -``` -Type: bool -Valid Values: true, false -Default: false -``` - -If `interpolateParams` is true, placeholders (`?`) in calls to `db.Query()` and `db.Exec()` are interpolated into a single query string with given parameters. This reduces the number of roundtrips, since the driver has to prepare a statement, execute it with given parameters and close the statement again with `interpolateParams=false`. - -*This can not be used together with the multibyte encodings BIG5, CP932, GB2312, GBK or SJIS. These are blacklisted as they may [introduce a SQL injection vulnerability](http://stackoverflow.com/a/12118602/3430118)!* - -##### `loc` - -``` -Type: string -Valid Values: -Default: UTC -``` - -Sets the location for time.Time values (when using `parseTime=true`). *"Local"* sets the system's location. See [time.LoadLocation](http://golang.org/pkg/time/#LoadLocation) for details. - -Note that this sets the location for time.Time values but does not change MySQL's [time_zone setting](https://dev.mysql.com/doc/refman/5.5/en/time-zone-support.html). For that see the [time_zone system variable](#system-variables), which can also be set as a DSN parameter. - -Please keep in mind, that param values must be [url.QueryEscape](http://golang.org/pkg/net/url/#QueryEscape)'ed. Alternatively you can manually replace the `/` with `%2F`. For example `US/Pacific` would be `loc=US%2FPacific`. - - -##### `parseTime` - -``` -Type: bool -Valid Values: true, false -Default: false -``` - -`parseTime=true` changes the output type of `DATE` and `DATETIME` values to `time.Time` instead of `[]byte` / `string` - - -##### `strict` - -``` -Type: bool -Valid Values: true, false -Default: false -``` - -`strict=true` enables the strict mode in which MySQL warnings are treated as errors. - -By default MySQL also treats notes as warnings. Use [`sql_notes=false`](http://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_sql_notes) to ignore notes. See the [examples](#examples) for an DSN example. - - -##### `timeout` - -``` -Type: decimal number -Default: OS default -``` - -*Driver* side connection timeout. The value must be a string of decimal numbers, each with optional fraction and a unit suffix ( *"ms"*, *"s"*, *"m"*, *"h"* ), such as *"30s"*, *"0.5m"* or *"1m30s"*. To set a server side timeout, use the parameter [`wait_timeout`](http://dev.mysql.com/doc/refman/5.6/en/server-system-variables.html#sysvar_wait_timeout). - - -##### `tls` - -``` -Type: bool / string -Valid Values: true, false, skip-verify, -Default: false -``` - -`tls=true` enables TLS / SSL encrypted connection to the server. Use `skip-verify` if you want to use a self-signed or invalid certificate (server side). Use a custom value registered with [`mysql.RegisterTLSConfig`](http://godoc.org/github.com/go-sql-driver/mysql#RegisterTLSConfig). - - -##### System Variables - -All other parameters are interpreted as system variables: - * `autocommit`: `"SET autocommit="` - * [`time_zone`](https://dev.mysql.com/doc/refman/5.5/en/time-zone-support.html): `"SET time_zone="` - * [`tx_isolation`](https://dev.mysql.com/doc/refman/5.5/en/server-system-variables.html#sysvar_tx_isolation): `"SET tx_isolation="` - * `param`: `"SET ="` - -*The values must be [url.QueryEscape](http://golang.org/pkg/net/url/#QueryEscape)'ed!* - -#### Examples -``` -user@unix(/path/to/socket)/dbname -``` - -``` -root:pw@unix(/tmp/mysql.sock)/myDatabase?loc=Local -``` - -``` -user:password@tcp(localhost:5555)/dbname?tls=skip-verify&autocommit=true -``` - -Use the [strict mode](#strict) but ignore notes: -``` -user:password@/dbname?strict=true&sql_notes=false -``` - -TCP via IPv6: -``` -user:password@tcp([de:ad:be:ef::ca:fe]:80)/dbname?timeout=90s&collation=utf8mb4_unicode_ci -``` - -TCP on a remote host, e.g. Amazon RDS: -``` -id:password@tcp(your-amazonaws-uri.com:3306)/dbname -``` - -Google Cloud SQL on App Engine: -``` -user@cloudsql(project-id:instance-name)/dbname -``` - -TCP using default port (3306) on localhost: -``` -user:password@tcp/dbname?charset=utf8mb4,utf8&sys_var=esc%40ped -``` - -Use the default protocol (tcp) and host (localhost:3306): -``` -user:password@/dbname -``` - -No Database preselected: -``` -user:password@/ -``` - -### `LOAD DATA LOCAL INFILE` support -For this feature you need direct access to the package. Therefore you must change the import path (no `_`): -```go -import "github.com/go-sql-driver/mysql" -``` - -Files must be whitelisted by registering them with `mysql.RegisterLocalFile(filepath)` (recommended) or the Whitelist check must be deactivated by using the DSN parameter `allowAllFiles=true` ([*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html)). - -To use a `io.Reader` a handler function must be registered with `mysql.RegisterReaderHandler(name, handler)` which returns a `io.Reader` or `io.ReadCloser`. The Reader is available with the filepath `Reader::` then. Choose different names for different handlers and `DeregisterReaderHandler` when you don't need it anymore. - -See the [godoc of Go-MySQL-Driver](http://godoc.org/github.com/go-sql-driver/mysql "golang mysql driver documentation") for details. - - -### `time.Time` support -The default internal output type of MySQL `DATE` and `DATETIME` values is `[]byte` which allows you to scan the value into a `[]byte`, `string` or `sql.RawBytes` variable in your programm. - -However, many want to scan MySQL `DATE` and `DATETIME` values into `time.Time` variables, which is the logical opposite in Go to `DATE` and `DATETIME` in MySQL. You can do that by changing the internal output type from `[]byte` to `time.Time` with the DSN parameter `parseTime=true`. You can set the default [`time.Time` location](http://golang.org/pkg/time/#Location) with the `loc` DSN parameter. - -**Caution:** As of Go 1.1, this makes `time.Time` the only variable type you can scan `DATE` and `DATETIME` values into. This breaks for example [`sql.RawBytes` support](https://github.com/go-sql-driver/mysql/wiki/Examples#rawbytes). - -Alternatively you can use the [`NullTime`](http://godoc.org/github.com/go-sql-driver/mysql#NullTime) type as the scan destination, which works with both `time.Time` and `string` / `[]byte`. - - -### Unicode support -Since version 1.1 Go-MySQL-Driver automatically uses the collation `utf8_general_ci` by default. - -Other collations / charsets can be set using the [`collation`](#collation) DSN parameter. - -Version 1.0 of the driver recommended adding `&charset=utf8` (alias for `SET NAMES utf8`) to the DSN to enable proper UTF-8 support. This is not necessary anymore. The [`collation`](#collation) parameter should be preferred to set another collation / charset than the default. - -See http://dev.mysql.com/doc/refman/5.7/en/charset-unicode.html for more details on MySQL's Unicode support. - - -## Testing / Development -To run the driver tests you may need to adjust the configuration. See the [Testing Wiki-Page](https://github.com/go-sql-driver/mysql/wiki/Testing "Testing") for details. - -Go-MySQL-Driver is not feature-complete yet. Your help is very appreciated. -If you want to contribute, you can work on an [open issue](https://github.com/go-sql-driver/mysql/issues?state=open) or review a [pull request](https://github.com/go-sql-driver/mysql/pulls). - -See the [Contribution Guidelines](https://github.com/go-sql-driver/mysql/blob/master/CONTRIBUTING.md) for details. - ---------------------------------------- - -## License -Go-MySQL-Driver is licensed under the [Mozilla Public License Version 2.0](https://raw.github.com/go-sql-driver/mysql/master/LICENSE) - -Mozilla summarizes the license scope as follows: -> MPL: The copyleft applies to any files containing MPLed code. - - -That means: - * You can **use** the **unchanged** source code both in private and commercially - * When distributing, you **must publish** the source code of any **changed files** licensed under the MPL 2.0 under a) the MPL 2.0 itself or b) a compatible license (e.g. GPL 3.0 or Apache License 2.0) - * You **needn't publish** the source code of your library as long as the files licensed under the MPL 2.0 are **unchanged** - -Please read the [MPL 2.0 FAQ](http://www.mozilla.org/MPL/2.0/FAQ.html) if you have further questions regarding the license. - -You can read the full terms here: [LICENSE](https://raw.github.com/go-sql-driver/mysql/master/LICENSE) - -![Go Gopher and MySQL Dolphin](https://raw.github.com/wiki/go-sql-driver/mysql/go-mysql-driver_m.jpg "Golang Gopher transporting the MySQL Dolphin in a wheelbarrow") - diff --git a/vendor/github.com/go-sql-driver/mysql/appengine.go b/vendor/github.com/go-sql-driver/mysql/appengine.go deleted file mode 100644 index 565614e..0000000 --- a/vendor/github.com/go-sql-driver/mysql/appengine.go +++ /dev/null @@ -1,19 +0,0 @@ -// Go MySQL Driver - A MySQL-Driver for Go's database/sql package -// -// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. - -// +build appengine - -package mysql - -import ( - "appengine/cloudsql" -) - -func init() { - RegisterDial("cloudsql", cloudsql.Dial) -} diff --git a/vendor/github.com/go-sql-driver/mysql/buffer.go b/vendor/github.com/go-sql-driver/mysql/buffer.go deleted file mode 100644 index 509ce89..0000000 --- a/vendor/github.com/go-sql-driver/mysql/buffer.go +++ /dev/null @@ -1,136 +0,0 @@ -// Go MySQL Driver - A MySQL-Driver for Go's database/sql package -// -// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. - -package mysql - -import "io" - -const defaultBufSize = 4096 - -// A buffer which is used for both reading and writing. -// This is possible since communication on each connection is synchronous. -// In other words, we can't write and read simultaneously on the same connection. -// The buffer is similar to bufio.Reader / Writer but zero-copy-ish -// Also highly optimized for this particular use case. -type buffer struct { - buf []byte - rd io.Reader - idx int - length int -} - -func newBuffer(rd io.Reader) buffer { - var b [defaultBufSize]byte - return buffer{ - buf: b[:], - rd: rd, - } -} - -// fill reads into the buffer until at least _need_ bytes are in it -func (b *buffer) fill(need int) error { - n := b.length - - // move existing data to the beginning - if n > 0 && b.idx > 0 { - copy(b.buf[0:n], b.buf[b.idx:]) - } - - // grow buffer if necessary - // TODO: let the buffer shrink again at some point - // Maybe keep the org buf slice and swap back? - if need > len(b.buf) { - // Round up to the next multiple of the default size - newBuf := make([]byte, ((need/defaultBufSize)+1)*defaultBufSize) - copy(newBuf, b.buf) - b.buf = newBuf - } - - b.idx = 0 - - for { - nn, err := b.rd.Read(b.buf[n:]) - n += nn - - switch err { - case nil: - if n < need { - continue - } - b.length = n - return nil - - case io.EOF: - if n >= need { - b.length = n - return nil - } - return io.ErrUnexpectedEOF - - default: - return err - } - } -} - -// returns next N bytes from buffer. -// The returned slice is only guaranteed to be valid until the next read -func (b *buffer) readNext(need int) ([]byte, error) { - if b.length < need { - // refill - if err := b.fill(need); err != nil { - return nil, err - } - } - - offset := b.idx - b.idx += need - b.length -= need - return b.buf[offset:b.idx], nil -} - -// returns a buffer with the requested size. -// If possible, a slice from the existing buffer is returned. -// Otherwise a bigger buffer is made. -// Only one buffer (total) can be used at a time. -func (b *buffer) takeBuffer(length int) []byte { - if b.length > 0 { - return nil - } - - // test (cheap) general case first - if length <= defaultBufSize || length <= cap(b.buf) { - return b.buf[:length] - } - - if length < maxPacketSize { - b.buf = make([]byte, length) - return b.buf - } - return make([]byte, length) -} - -// shortcut which can be used if the requested buffer is guaranteed to be -// smaller than defaultBufSize -// Only one buffer (total) can be used at a time. -func (b *buffer) takeSmallBuffer(length int) []byte { - if b.length == 0 { - return b.buf[:length] - } - return nil -} - -// takeCompleteBuffer returns the complete existing buffer. -// This can be used if the necessary buffer size is unknown. -// Only one buffer (total) can be used at a time. -func (b *buffer) takeCompleteBuffer() []byte { - if b.length == 0 { - return b.buf - } - return nil -} diff --git a/vendor/github.com/go-sql-driver/mysql/collations.go b/vendor/github.com/go-sql-driver/mysql/collations.go deleted file mode 100644 index 6c1d613..0000000 --- a/vendor/github.com/go-sql-driver/mysql/collations.go +++ /dev/null @@ -1,250 +0,0 @@ -// Go MySQL Driver - A MySQL-Driver for Go's database/sql package -// -// Copyright 2014 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. - -package mysql - -const defaultCollation byte = 33 // utf8_general_ci - -// A list of available collations mapped to the internal ID. -// To update this map use the following MySQL query: -// SELECT COLLATION_NAME, ID FROM information_schema.COLLATIONS -var collations = map[string]byte{ - "big5_chinese_ci": 1, - "latin2_czech_cs": 2, - "dec8_swedish_ci": 3, - "cp850_general_ci": 4, - "latin1_german1_ci": 5, - "hp8_english_ci": 6, - "koi8r_general_ci": 7, - "latin1_swedish_ci": 8, - "latin2_general_ci": 9, - "swe7_swedish_ci": 10, - "ascii_general_ci": 11, - "ujis_japanese_ci": 12, - "sjis_japanese_ci": 13, - "cp1251_bulgarian_ci": 14, - "latin1_danish_ci": 15, - "hebrew_general_ci": 16, - "tis620_thai_ci": 18, - "euckr_korean_ci": 19, - "latin7_estonian_cs": 20, - "latin2_hungarian_ci": 21, - "koi8u_general_ci": 22, - "cp1251_ukrainian_ci": 23, - "gb2312_chinese_ci": 24, - "greek_general_ci": 25, - "cp1250_general_ci": 26, - "latin2_croatian_ci": 27, - "gbk_chinese_ci": 28, - "cp1257_lithuanian_ci": 29, - "latin5_turkish_ci": 30, - "latin1_german2_ci": 31, - "armscii8_general_ci": 32, - "utf8_general_ci": 33, - "cp1250_czech_cs": 34, - "ucs2_general_ci": 35, - "cp866_general_ci": 36, - "keybcs2_general_ci": 37, - "macce_general_ci": 38, - "macroman_general_ci": 39, - "cp852_general_ci": 40, - "latin7_general_ci": 41, - "latin7_general_cs": 42, - "macce_bin": 43, - "cp1250_croatian_ci": 44, - "utf8mb4_general_ci": 45, - "utf8mb4_bin": 46, - "latin1_bin": 47, - "latin1_general_ci": 48, - "latin1_general_cs": 49, - "cp1251_bin": 50, - "cp1251_general_ci": 51, - "cp1251_general_cs": 52, - "macroman_bin": 53, - "utf16_general_ci": 54, - "utf16_bin": 55, - "utf16le_general_ci": 56, - "cp1256_general_ci": 57, - "cp1257_bin": 58, - "cp1257_general_ci": 59, - "utf32_general_ci": 60, - "utf32_bin": 61, - "utf16le_bin": 62, - "binary": 63, - "armscii8_bin": 64, - "ascii_bin": 65, - "cp1250_bin": 66, - "cp1256_bin": 67, - "cp866_bin": 68, - "dec8_bin": 69, - "greek_bin": 70, - "hebrew_bin": 71, - "hp8_bin": 72, - "keybcs2_bin": 73, - "koi8r_bin": 74, - "koi8u_bin": 75, - "latin2_bin": 77, - "latin5_bin": 78, - "latin7_bin": 79, - "cp850_bin": 80, - "cp852_bin": 81, - "swe7_bin": 82, - "utf8_bin": 83, - "big5_bin": 84, - "euckr_bin": 85, - "gb2312_bin": 86, - "gbk_bin": 87, - "sjis_bin": 88, - "tis620_bin": 89, - "ucs2_bin": 90, - "ujis_bin": 91, - "geostd8_general_ci": 92, - "geostd8_bin": 93, - "latin1_spanish_ci": 94, - "cp932_japanese_ci": 95, - "cp932_bin": 96, - "eucjpms_japanese_ci": 97, - "eucjpms_bin": 98, - "cp1250_polish_ci": 99, - "utf16_unicode_ci": 101, - "utf16_icelandic_ci": 102, - "utf16_latvian_ci": 103, - "utf16_romanian_ci": 104, - "utf16_slovenian_ci": 105, - "utf16_polish_ci": 106, - "utf16_estonian_ci": 107, - "utf16_spanish_ci": 108, - "utf16_swedish_ci": 109, - "utf16_turkish_ci": 110, - "utf16_czech_ci": 111, - "utf16_danish_ci": 112, - "utf16_lithuanian_ci": 113, - "utf16_slovak_ci": 114, - "utf16_spanish2_ci": 115, - "utf16_roman_ci": 116, - "utf16_persian_ci": 117, - "utf16_esperanto_ci": 118, - "utf16_hungarian_ci": 119, - "utf16_sinhala_ci": 120, - "utf16_german2_ci": 121, - "utf16_croatian_ci": 122, - "utf16_unicode_520_ci": 123, - "utf16_vietnamese_ci": 124, - "ucs2_unicode_ci": 128, - "ucs2_icelandic_ci": 129, - "ucs2_latvian_ci": 130, - "ucs2_romanian_ci": 131, - "ucs2_slovenian_ci": 132, - "ucs2_polish_ci": 133, - "ucs2_estonian_ci": 134, - "ucs2_spanish_ci": 135, - "ucs2_swedish_ci": 136, - "ucs2_turkish_ci": 137, - "ucs2_czech_ci": 138, - "ucs2_danish_ci": 139, - "ucs2_lithuanian_ci": 140, - "ucs2_slovak_ci": 141, - "ucs2_spanish2_ci": 142, - "ucs2_roman_ci": 143, - "ucs2_persian_ci": 144, - "ucs2_esperanto_ci": 145, - "ucs2_hungarian_ci": 146, - "ucs2_sinhala_ci": 147, - "ucs2_german2_ci": 148, - "ucs2_croatian_ci": 149, - "ucs2_unicode_520_ci": 150, - "ucs2_vietnamese_ci": 151, - "ucs2_general_mysql500_ci": 159, - "utf32_unicode_ci": 160, - "utf32_icelandic_ci": 161, - "utf32_latvian_ci": 162, - "utf32_romanian_ci": 163, - "utf32_slovenian_ci": 164, - "utf32_polish_ci": 165, - "utf32_estonian_ci": 166, - "utf32_spanish_ci": 167, - "utf32_swedish_ci": 168, - "utf32_turkish_ci": 169, - "utf32_czech_ci": 170, - "utf32_danish_ci": 171, - "utf32_lithuanian_ci": 172, - "utf32_slovak_ci": 173, - "utf32_spanish2_ci": 174, - "utf32_roman_ci": 175, - "utf32_persian_ci": 176, - "utf32_esperanto_ci": 177, - "utf32_hungarian_ci": 178, - "utf32_sinhala_ci": 179, - "utf32_german2_ci": 180, - "utf32_croatian_ci": 181, - "utf32_unicode_520_ci": 182, - "utf32_vietnamese_ci": 183, - "utf8_unicode_ci": 192, - "utf8_icelandic_ci": 193, - "utf8_latvian_ci": 194, - "utf8_romanian_ci": 195, - "utf8_slovenian_ci": 196, - "utf8_polish_ci": 197, - "utf8_estonian_ci": 198, - "utf8_spanish_ci": 199, - "utf8_swedish_ci": 200, - "utf8_turkish_ci": 201, - "utf8_czech_ci": 202, - "utf8_danish_ci": 203, - "utf8_lithuanian_ci": 204, - "utf8_slovak_ci": 205, - "utf8_spanish2_ci": 206, - "utf8_roman_ci": 207, - "utf8_persian_ci": 208, - "utf8_esperanto_ci": 209, - "utf8_hungarian_ci": 210, - "utf8_sinhala_ci": 211, - "utf8_german2_ci": 212, - "utf8_croatian_ci": 213, - "utf8_unicode_520_ci": 214, - "utf8_vietnamese_ci": 215, - "utf8_general_mysql500_ci": 223, - "utf8mb4_unicode_ci": 224, - "utf8mb4_icelandic_ci": 225, - "utf8mb4_latvian_ci": 226, - "utf8mb4_romanian_ci": 227, - "utf8mb4_slovenian_ci": 228, - "utf8mb4_polish_ci": 229, - "utf8mb4_estonian_ci": 230, - "utf8mb4_spanish_ci": 231, - "utf8mb4_swedish_ci": 232, - "utf8mb4_turkish_ci": 233, - "utf8mb4_czech_ci": 234, - "utf8mb4_danish_ci": 235, - "utf8mb4_lithuanian_ci": 236, - "utf8mb4_slovak_ci": 237, - "utf8mb4_spanish2_ci": 238, - "utf8mb4_roman_ci": 239, - "utf8mb4_persian_ci": 240, - "utf8mb4_esperanto_ci": 241, - "utf8mb4_hungarian_ci": 242, - "utf8mb4_sinhala_ci": 243, - "utf8mb4_german2_ci": 244, - "utf8mb4_croatian_ci": 245, - "utf8mb4_unicode_520_ci": 246, - "utf8mb4_vietnamese_ci": 247, -} - -// A blacklist of collations which is unsafe to interpolate parameters. -// These multibyte encodings may contains 0x5c (`\`) in their trailing bytes. -var unsafeCollations = map[byte]bool{ - 1: true, // big5_chinese_ci - 13: true, // sjis_japanese_ci - 28: true, // gbk_chinese_ci - 84: true, // big5_bin - 86: true, // gb2312_bin - 87: true, // gbk_bin - 88: true, // sjis_bin - 95: true, // cp932_japanese_ci - 96: true, // cp932_bin -} diff --git a/vendor/github.com/go-sql-driver/mysql/connection.go b/vendor/github.com/go-sql-driver/mysql/connection.go deleted file mode 100644 index 72ed09d..0000000 --- a/vendor/github.com/go-sql-driver/mysql/connection.go +++ /dev/null @@ -1,403 +0,0 @@ -// Go MySQL Driver - A MySQL-Driver for Go's database/sql package -// -// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. - -package mysql - -import ( - "crypto/tls" - "database/sql/driver" - "errors" - "net" - "strconv" - "strings" - "time" -) - -type mysqlConn struct { - buf buffer - netConn net.Conn - affectedRows uint64 - insertId uint64 - cfg *config - maxPacketAllowed int - maxWriteSize int - flags clientFlag - status statusFlag - sequence uint8 - parseTime bool - strict bool -} - -type config struct { - user string - passwd string - net string - addr string - dbname string - params map[string]string - loc *time.Location - tls *tls.Config - timeout time.Duration - collation uint8 - allowAllFiles bool - allowOldPasswords bool - allowCleartextPasswords bool - clientFoundRows bool - columnsWithAlias bool - interpolateParams bool -} - -// Handles parameters set in DSN after the connection is established -func (mc *mysqlConn) handleParams() (err error) { - for param, val := range mc.cfg.params { - switch param { - // Charset - case "charset": - charsets := strings.Split(val, ",") - for i := range charsets { - // ignore errors here - a charset may not exist - err = mc.exec("SET NAMES " + charsets[i]) - if err == nil { - break - } - } - if err != nil { - return - } - - // time.Time parsing - case "parseTime": - var isBool bool - mc.parseTime, isBool = readBool(val) - if !isBool { - return errors.New("Invalid Bool value: " + val) - } - - // Strict mode - case "strict": - var isBool bool - mc.strict, isBool = readBool(val) - if !isBool { - return errors.New("Invalid Bool value: " + val) - } - - // Compression - case "compress": - err = errors.New("Compression not implemented yet") - return - - // System Vars - default: - err = mc.exec("SET " + param + "=" + val + "") - if err != nil { - return - } - } - } - - return -} - -func (mc *mysqlConn) Begin() (driver.Tx, error) { - if mc.netConn == nil { - errLog.Print(ErrInvalidConn) - return nil, driver.ErrBadConn - } - err := mc.exec("START TRANSACTION") - if err == nil { - return &mysqlTx{mc}, err - } - - return nil, err -} - -func (mc *mysqlConn) Close() (err error) { - // Makes Close idempotent - if mc.netConn != nil { - err = mc.writeCommandPacket(comQuit) - if err == nil { - err = mc.netConn.Close() - } else { - mc.netConn.Close() - } - mc.netConn = nil - } - - mc.cfg = nil - mc.buf.rd = nil - - return -} - -func (mc *mysqlConn) Prepare(query string) (driver.Stmt, error) { - if mc.netConn == nil { - errLog.Print(ErrInvalidConn) - return nil, driver.ErrBadConn - } - // Send command - err := mc.writeCommandPacketStr(comStmtPrepare, query) - if err != nil { - return nil, err - } - - stmt := &mysqlStmt{ - mc: mc, - } - - // Read Result - columnCount, err := stmt.readPrepareResultPacket() - if err == nil { - if stmt.paramCount > 0 { - if err = mc.readUntilEOF(); err != nil { - return nil, err - } - } - - if columnCount > 0 { - err = mc.readUntilEOF() - } - } - - return stmt, err -} - -func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (string, error) { - buf := mc.buf.takeCompleteBuffer() - if buf == nil { - // can not take the buffer. Something must be wrong with the connection - errLog.Print(ErrBusyBuffer) - return "", driver.ErrBadConn - } - buf = buf[:0] - argPos := 0 - - for i := 0; i < len(query); i++ { - q := strings.IndexByte(query[i:], '?') - if q == -1 { - buf = append(buf, query[i:]...) - break - } - buf = append(buf, query[i:i+q]...) - i += q - - arg := args[argPos] - argPos++ - - if arg == nil { - buf = append(buf, "NULL"...) - continue - } - - switch v := arg.(type) { - case int64: - buf = strconv.AppendInt(buf, v, 10) - case float64: - buf = strconv.AppendFloat(buf, v, 'g', -1, 64) - case bool: - if v { - buf = append(buf, '1') - } else { - buf = append(buf, '0') - } - case time.Time: - if v.IsZero() { - buf = append(buf, "'0000-00-00'"...) - } else { - v := v.In(mc.cfg.loc) - v = v.Add(time.Nanosecond * 500) // To round under microsecond - year := v.Year() - year100 := year / 100 - year1 := year % 100 - month := v.Month() - day := v.Day() - hour := v.Hour() - minute := v.Minute() - second := v.Second() - micro := v.Nanosecond() / 1000 - - buf = append(buf, []byte{ - '\'', - digits10[year100], digits01[year100], - digits10[year1], digits01[year1], - '-', - digits10[month], digits01[month], - '-', - digits10[day], digits01[day], - ' ', - digits10[hour], digits01[hour], - ':', - digits10[minute], digits01[minute], - ':', - digits10[second], digits01[second], - }...) - - if micro != 0 { - micro10000 := micro / 10000 - micro100 := micro / 100 % 100 - micro1 := micro % 100 - buf = append(buf, []byte{ - '.', - digits10[micro10000], digits01[micro10000], - digits10[micro100], digits01[micro100], - digits10[micro1], digits01[micro1], - }...) - } - buf = append(buf, '\'') - } - case []byte: - if v == nil { - buf = append(buf, "NULL"...) - } else { - buf = append(buf, "_binary'"...) - if mc.status&statusNoBackslashEscapes == 0 { - buf = escapeBytesBackslash(buf, v) - } else { - buf = escapeBytesQuotes(buf, v) - } - buf = append(buf, '\'') - } - case string: - buf = append(buf, '\'') - if mc.status&statusNoBackslashEscapes == 0 { - buf = escapeStringBackslash(buf, v) - } else { - buf = escapeStringQuotes(buf, v) - } - buf = append(buf, '\'') - default: - return "", driver.ErrSkip - } - - if len(buf)+4 > mc.maxPacketAllowed { - return "", driver.ErrSkip - } - } - if argPos != len(args) { - return "", driver.ErrSkip - } - return string(buf), nil -} - -func (mc *mysqlConn) Exec(query string, args []driver.Value) (driver.Result, error) { - if mc.netConn == nil { - errLog.Print(ErrInvalidConn) - return nil, driver.ErrBadConn - } - if len(args) != 0 { - if !mc.cfg.interpolateParams { - return nil, driver.ErrSkip - } - // try to interpolate the parameters to save extra roundtrips for preparing and closing a statement - prepared, err := mc.interpolateParams(query, args) - if err != nil { - return nil, err - } - query = prepared - args = nil - } - mc.affectedRows = 0 - mc.insertId = 0 - - err := mc.exec(query) - if err == nil { - return &mysqlResult{ - affectedRows: int64(mc.affectedRows), - insertId: int64(mc.insertId), - }, err - } - return nil, err -} - -// Internal function to execute commands -func (mc *mysqlConn) exec(query string) error { - // Send command - err := mc.writeCommandPacketStr(comQuery, query) - if err != nil { - return err - } - - // Read Result - resLen, err := mc.readResultSetHeaderPacket() - if err == nil && resLen > 0 { - if err = mc.readUntilEOF(); err != nil { - return err - } - - err = mc.readUntilEOF() - } - - return err -} - -func (mc *mysqlConn) Query(query string, args []driver.Value) (driver.Rows, error) { - if mc.netConn == nil { - errLog.Print(ErrInvalidConn) - return nil, driver.ErrBadConn - } - if len(args) != 0 { - if !mc.cfg.interpolateParams { - return nil, driver.ErrSkip - } - // try client-side prepare to reduce roundtrip - prepared, err := mc.interpolateParams(query, args) - if err != nil { - return nil, err - } - query = prepared - args = nil - } - // Send command - err := mc.writeCommandPacketStr(comQuery, query) - if err == nil { - // Read Result - var resLen int - resLen, err = mc.readResultSetHeaderPacket() - if err == nil { - rows := new(textRows) - rows.mc = mc - - if resLen == 0 { - // no columns, no more data - return emptyRows{}, nil - } - // Columns - rows.columns, err = mc.readColumns(resLen) - return rows, err - } - } - return nil, err -} - -// Gets the value of the given MySQL System Variable -// The returned byte slice is only valid until the next read -func (mc *mysqlConn) getSystemVar(name string) ([]byte, error) { - // Send command - if err := mc.writeCommandPacketStr(comQuery, "SELECT @@"+name); err != nil { - return nil, err - } - - // Read Result - resLen, err := mc.readResultSetHeaderPacket() - if err == nil { - rows := new(textRows) - rows.mc = mc - - if resLen > 0 { - // Columns - if err := mc.readUntilEOF(); err != nil { - return nil, err - } - } - - dest := make([]driver.Value, resLen) - if err = rows.readRow(dest); err == nil { - return dest[0].([]byte), mc.readUntilEOF() - } - } - return nil, err -} diff --git a/vendor/github.com/go-sql-driver/mysql/const.go b/vendor/github.com/go-sql-driver/mysql/const.go deleted file mode 100644 index dddc129..0000000 --- a/vendor/github.com/go-sql-driver/mysql/const.go +++ /dev/null @@ -1,162 +0,0 @@ -// Go MySQL Driver - A MySQL-Driver for Go's database/sql package -// -// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. - -package mysql - -const ( - minProtocolVersion byte = 10 - maxPacketSize = 1<<24 - 1 - timeFormat = "2006-01-02 15:04:05.999999" -) - -// MySQL constants documentation: -// http://dev.mysql.com/doc/internals/en/client-server-protocol.html - -const ( - iOK byte = 0x00 - iLocalInFile byte = 0xfb - iEOF byte = 0xfe - iERR byte = 0xff -) - -// https://dev.mysql.com/doc/internals/en/capability-flags.html#packet-Protocol::CapabilityFlags -type clientFlag uint32 - -const ( - clientLongPassword clientFlag = 1 << iota - clientFoundRows - clientLongFlag - clientConnectWithDB - clientNoSchema - clientCompress - clientODBC - clientLocalFiles - clientIgnoreSpace - clientProtocol41 - clientInteractive - clientSSL - clientIgnoreSIGPIPE - clientTransactions - clientReserved - clientSecureConn - clientMultiStatements - clientMultiResults - clientPSMultiResults - clientPluginAuth - clientConnectAttrs - clientPluginAuthLenEncClientData - clientCanHandleExpiredPasswords - clientSessionTrack - clientDeprecateEOF -) - -const ( - comQuit byte = iota + 1 - comInitDB - comQuery - comFieldList - comCreateDB - comDropDB - comRefresh - comShutdown - comStatistics - comProcessInfo - comConnect - comProcessKill - comDebug - comPing - comTime - comDelayedInsert - comChangeUser - comBinlogDump - comTableDump - comConnectOut - comRegisterSlave - comStmtPrepare - comStmtExecute - comStmtSendLongData - comStmtClose - comStmtReset - comSetOption - comStmtFetch -) - -// https://dev.mysql.com/doc/internals/en/com-query-response.html#packet-Protocol::ColumnType -const ( - fieldTypeDecimal byte = iota - fieldTypeTiny - fieldTypeShort - fieldTypeLong - fieldTypeFloat - fieldTypeDouble - fieldTypeNULL - fieldTypeTimestamp - fieldTypeLongLong - fieldTypeInt24 - fieldTypeDate - fieldTypeTime - fieldTypeDateTime - fieldTypeYear - fieldTypeNewDate - fieldTypeVarChar - fieldTypeBit -) -const ( - fieldTypeNewDecimal byte = iota + 0xf6 - fieldTypeEnum - fieldTypeSet - fieldTypeTinyBLOB - fieldTypeMediumBLOB - fieldTypeLongBLOB - fieldTypeBLOB - fieldTypeVarString - fieldTypeString - fieldTypeGeometry -) - -type fieldFlag uint16 - -const ( - flagNotNULL fieldFlag = 1 << iota - flagPriKey - flagUniqueKey - flagMultipleKey - flagBLOB - flagUnsigned - flagZeroFill - flagBinary - flagEnum - flagAutoIncrement - flagTimestamp - flagSet - flagUnknown1 - flagUnknown2 - flagUnknown3 - flagUnknown4 -) - -// http://dev.mysql.com/doc/internals/en/status-flags.html -type statusFlag uint16 - -const ( - statusInTrans statusFlag = 1 << iota - statusInAutocommit - statusReserved // Not in documentation - statusMoreResultsExists - statusNoGoodIndexUsed - statusNoIndexUsed - statusCursorExists - statusLastRowSent - statusDbDropped - statusNoBackslashEscapes - statusMetadataChanged - statusQueryWasSlow - statusPsOutParams - statusInTransReadonly - statusSessionStateChanged -) diff --git a/vendor/github.com/go-sql-driver/mysql/driver.go b/vendor/github.com/go-sql-driver/mysql/driver.go deleted file mode 100644 index d310624..0000000 --- a/vendor/github.com/go-sql-driver/mysql/driver.go +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. - -// Go MySQL Driver - A MySQL-Driver for Go's database/sql package -// -// The driver should be used via the database/sql package: -// -// import "database/sql" -// import _ "github.com/go-sql-driver/mysql" -// -// db, err := sql.Open("mysql", "user:password@/dbname") -// -// See https://github.com/go-sql-driver/mysql#usage for details -package mysql - -import ( - "database/sql" - "database/sql/driver" - "net" -) - -// This struct is exported to make the driver directly accessible. -// In general the driver is used via the database/sql package. -type MySQLDriver struct{} - -// DialFunc is a function which can be used to establish the network connection. -// Custom dial functions must be registered with RegisterDial -type DialFunc func(addr string) (net.Conn, error) - -var dials map[string]DialFunc - -// RegisterDial registers a custom dial function. It can then be used by the -// network address mynet(addr), where mynet is the registered new network. -// addr is passed as a parameter to the dial function. -func RegisterDial(net string, dial DialFunc) { - if dials == nil { - dials = make(map[string]DialFunc) - } - dials[net] = dial -} - -// Open new Connection. -// See https://github.com/go-sql-driver/mysql#dsn-data-source-name for how -// the DSN string is formated -func (d MySQLDriver) Open(dsn string) (driver.Conn, error) { - var err error - - // New mysqlConn - mc := &mysqlConn{ - maxPacketAllowed: maxPacketSize, - maxWriteSize: maxPacketSize - 1, - } - mc.cfg, err = parseDSN(dsn) - if err != nil { - return nil, err - } - - // Connect to Server - if dial, ok := dials[mc.cfg.net]; ok { - mc.netConn, err = dial(mc.cfg.addr) - } else { - nd := net.Dialer{Timeout: mc.cfg.timeout} - mc.netConn, err = nd.Dial(mc.cfg.net, mc.cfg.addr) - } - if err != nil { - return nil, err - } - - // Enable TCP Keepalives on TCP connections - if tc, ok := mc.netConn.(*net.TCPConn); ok { - if err := tc.SetKeepAlive(true); err != nil { - // Don't send COM_QUIT before handshake. - mc.netConn.Close() - mc.netConn = nil - return nil, err - } - } - - mc.buf = newBuffer(mc.netConn) - - // Reading Handshake Initialization Packet - cipher, err := mc.readInitPacket() - if err != nil { - mc.Close() - return nil, err - } - - // Send Client Authentication Packet - if err = mc.writeAuthPacket(cipher); err != nil { - mc.Close() - return nil, err - } - - // Read Result Packet - err = mc.readResultOK() - if err != nil { - // Retry with old authentication method, if allowed - if mc.cfg != nil && mc.cfg.allowOldPasswords && err == ErrOldPassword { - if err = mc.writeOldAuthPacket(cipher); err != nil { - mc.Close() - return nil, err - } - if err = mc.readResultOK(); err != nil { - mc.Close() - return nil, err - } - } else if mc.cfg != nil && mc.cfg.allowCleartextPasswords && err == ErrCleartextPassword { - if err = mc.writeClearAuthPacket(); err != nil { - mc.Close() - return nil, err - } - if err = mc.readResultOK(); err != nil { - mc.Close() - return nil, err - } - } else { - mc.Close() - return nil, err - } - - } - - // Get max allowed packet size - maxap, err := mc.getSystemVar("max_allowed_packet") - if err != nil { - mc.Close() - return nil, err - } - mc.maxPacketAllowed = stringToInt(maxap) - 1 - if mc.maxPacketAllowed < maxPacketSize { - mc.maxWriteSize = mc.maxPacketAllowed - } - - // Handle DSN Params - err = mc.handleParams() - if err != nil { - mc.Close() - return nil, err - } - - return mc, nil -} - -func init() { - sql.Register("mysql", &MySQLDriver{}) -} diff --git a/vendor/github.com/go-sql-driver/mysql/errors.go b/vendor/github.com/go-sql-driver/mysql/errors.go deleted file mode 100644 index 44cf30d..0000000 --- a/vendor/github.com/go-sql-driver/mysql/errors.go +++ /dev/null @@ -1,131 +0,0 @@ -// Go MySQL Driver - A MySQL-Driver for Go's database/sql package -// -// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. - -package mysql - -import ( - "database/sql/driver" - "errors" - "fmt" - "io" - "log" - "os" -) - -// Various errors the driver might return. Can change between driver versions. -var ( - ErrInvalidConn = errors.New("Invalid Connection") - ErrMalformPkt = errors.New("Malformed Packet") - ErrNoTLS = errors.New("TLS encryption requested but server does not support TLS") - ErrOldPassword = errors.New("This user requires old password authentication. If you still want to use it, please add 'allowOldPasswords=1' to your DSN. See also https://github.com/go-sql-driver/mysql/wiki/old_passwords") - ErrCleartextPassword = errors.New("This user requires clear text authentication. If you still want to use it, please add 'allowCleartextPasswords=1' to your DSN.") - ErrUnknownPlugin = errors.New("The authentication plugin is not supported.") - ErrOldProtocol = errors.New("MySQL-Server does not support required Protocol 41+") - ErrPktSync = errors.New("Commands out of sync. You can't run this command now") - ErrPktSyncMul = errors.New("Commands out of sync. Did you run multiple statements at once?") - ErrPktTooLarge = errors.New("Packet for query is too large. You can change this value on the server by adjusting the 'max_allowed_packet' variable.") - ErrBusyBuffer = errors.New("Busy buffer") -) - -var errLog Logger = log.New(os.Stderr, "[MySQL] ", log.Ldate|log.Ltime|log.Lshortfile) - -// Logger is used to log critical error messages. -type Logger interface { - Print(v ...interface{}) -} - -// SetLogger is used to set the logger for critical errors. -// The initial logger is os.Stderr. -func SetLogger(logger Logger) error { - if logger == nil { - return errors.New("logger is nil") - } - errLog = logger - return nil -} - -// MySQLError is an error type which represents a single MySQL error -type MySQLError struct { - Number uint16 - Message string -} - -func (me *MySQLError) Error() string { - return fmt.Sprintf("Error %d: %s", me.Number, me.Message) -} - -// MySQLWarnings is an error type which represents a group of one or more MySQL -// warnings -type MySQLWarnings []MySQLWarning - -func (mws MySQLWarnings) Error() string { - var msg string - for i, warning := range mws { - if i > 0 { - msg += "\r\n" - } - msg += fmt.Sprintf( - "%s %s: %s", - warning.Level, - warning.Code, - warning.Message, - ) - } - return msg -} - -// MySQLWarning is an error type which represents a single MySQL warning. -// Warnings are returned in groups only. See MySQLWarnings -type MySQLWarning struct { - Level string - Code string - Message string -} - -func (mc *mysqlConn) getWarnings() (err error) { - rows, err := mc.Query("SHOW WARNINGS", nil) - if err != nil { - return - } - - var warnings = MySQLWarnings{} - var values = make([]driver.Value, 3) - - for { - err = rows.Next(values) - switch err { - case nil: - warning := MySQLWarning{} - - if raw, ok := values[0].([]byte); ok { - warning.Level = string(raw) - } else { - warning.Level = fmt.Sprintf("%s", values[0]) - } - if raw, ok := values[1].([]byte); ok { - warning.Code = string(raw) - } else { - warning.Code = fmt.Sprintf("%s", values[1]) - } - if raw, ok := values[2].([]byte); ok { - warning.Message = string(raw) - } else { - warning.Message = fmt.Sprintf("%s", values[0]) - } - - warnings = append(warnings, warning) - - case io.EOF: - return warnings - - default: - rows.Close() - return - } - } -} diff --git a/vendor/github.com/go-sql-driver/mysql/infile.go b/vendor/github.com/go-sql-driver/mysql/infile.go deleted file mode 100644 index 84c53a9..0000000 --- a/vendor/github.com/go-sql-driver/mysql/infile.go +++ /dev/null @@ -1,182 +0,0 @@ -// Go MySQL Driver - A MySQL-Driver for Go's database/sql package -// -// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. - -package mysql - -import ( - "fmt" - "io" - "os" - "strings" - "sync" -) - -var ( - fileRegister map[string]bool - fileRegisterLock sync.RWMutex - readerRegister map[string]func() io.Reader - readerRegisterLock sync.RWMutex -) - -// RegisterLocalFile adds the given file to the file whitelist, -// so that it can be used by "LOAD DATA LOCAL INFILE ". -// Alternatively you can allow the use of all local files with -// the DSN parameter 'allowAllFiles=true' -// -// filePath := "/home/gopher/data.csv" -// mysql.RegisterLocalFile(filePath) -// err := db.Exec("LOAD DATA LOCAL INFILE '" + filePath + "' INTO TABLE foo") -// if err != nil { -// ... -// -func RegisterLocalFile(filePath string) { - fileRegisterLock.Lock() - // lazy map init - if fileRegister == nil { - fileRegister = make(map[string]bool) - } - - fileRegister[strings.Trim(filePath, `"`)] = true - fileRegisterLock.Unlock() -} - -// DeregisterLocalFile removes the given filepath from the whitelist. -func DeregisterLocalFile(filePath string) { - fileRegisterLock.Lock() - delete(fileRegister, strings.Trim(filePath, `"`)) - fileRegisterLock.Unlock() -} - -// RegisterReaderHandler registers a handler function which is used -// to receive a io.Reader. -// The Reader can be used by "LOAD DATA LOCAL INFILE Reader::". -// If the handler returns a io.ReadCloser Close() is called when the -// request is finished. -// -// mysql.RegisterReaderHandler("data", func() io.Reader { -// var csvReader io.Reader // Some Reader that returns CSV data -// ... // Open Reader here -// return csvReader -// }) -// err := db.Exec("LOAD DATA LOCAL INFILE 'Reader::data' INTO TABLE foo") -// if err != nil { -// ... -// -func RegisterReaderHandler(name string, handler func() io.Reader) { - readerRegisterLock.Lock() - // lazy map init - if readerRegister == nil { - readerRegister = make(map[string]func() io.Reader) - } - - readerRegister[name] = handler - readerRegisterLock.Unlock() -} - -// DeregisterReaderHandler removes the ReaderHandler function with -// the given name from the registry. -func DeregisterReaderHandler(name string) { - readerRegisterLock.Lock() - delete(readerRegister, name) - readerRegisterLock.Unlock() -} - -func deferredClose(err *error, closer io.Closer) { - closeErr := closer.Close() - if *err == nil { - *err = closeErr - } -} - -func (mc *mysqlConn) handleInFileRequest(name string) (err error) { - var rdr io.Reader - var data []byte - - if idx := strings.Index(name, "Reader::"); idx == 0 || (idx > 0 && name[idx-1] == '/') { // io.Reader - // The server might return an an absolute path. See issue #355. - name = name[idx+8:] - - readerRegisterLock.RLock() - handler, inMap := readerRegister[name] - readerRegisterLock.RUnlock() - - if inMap { - rdr = handler() - if rdr != nil { - data = make([]byte, 4+mc.maxWriteSize) - - if cl, ok := rdr.(io.Closer); ok { - defer deferredClose(&err, cl) - } - } else { - err = fmt.Errorf("Reader '%s' is ", name) - } - } else { - err = fmt.Errorf("Reader '%s' is not registered", name) - } - } else { // File - name = strings.Trim(name, `"`) - fileRegisterLock.RLock() - fr := fileRegister[name] - fileRegisterLock.RUnlock() - if mc.cfg.allowAllFiles || fr { - var file *os.File - var fi os.FileInfo - - if file, err = os.Open(name); err == nil { - defer deferredClose(&err, file) - - // get file size - if fi, err = file.Stat(); err == nil { - rdr = file - if fileSize := int(fi.Size()); fileSize <= mc.maxWriteSize { - data = make([]byte, 4+fileSize) - } else if fileSize <= mc.maxPacketAllowed { - data = make([]byte, 4+mc.maxWriteSize) - } else { - err = fmt.Errorf("Local File '%s' too large: Size: %d, Max: %d", name, fileSize, mc.maxPacketAllowed) - } - } - } - } else { - err = fmt.Errorf("Local File '%s' is not registered. Use the DSN parameter 'allowAllFiles=true' to allow all files", name) - } - } - - // send content packets - if err == nil { - var n int - for err == nil { - n, err = rdr.Read(data[4:]) - if n > 0 { - if ioErr := mc.writePacket(data[:4+n]); ioErr != nil { - return ioErr - } - } - } - if err == io.EOF { - err = nil - } - } - - // send empty packet (termination) - if data == nil { - data = make([]byte, 4) - } - if ioErr := mc.writePacket(data[:4]); ioErr != nil { - return ioErr - } - - // read OK packet - if err == nil { - return mc.readResultOK() - } else { - mc.readPacket() - } - return err -} diff --git a/vendor/github.com/go-sql-driver/mysql/packets.go b/vendor/github.com/go-sql-driver/mysql/packets.go deleted file mode 100644 index 76cb7c8..0000000 --- a/vendor/github.com/go-sql-driver/mysql/packets.go +++ /dev/null @@ -1,1182 +0,0 @@ -// Go MySQL Driver - A MySQL-Driver for Go's database/sql package -// -// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. - -package mysql - -import ( - "bytes" - "crypto/tls" - "database/sql/driver" - "encoding/binary" - "fmt" - "io" - "math" - "time" -) - -// Packets documentation: -// http://dev.mysql.com/doc/internals/en/client-server-protocol.html - -// Read packet to buffer 'data' -func (mc *mysqlConn) readPacket() ([]byte, error) { - var payload []byte - for { - // Read packet header - data, err := mc.buf.readNext(4) - if err != nil { - errLog.Print(err) - mc.Close() - return nil, driver.ErrBadConn - } - - // Packet Length [24 bit] - pktLen := int(uint32(data[0]) | uint32(data[1])<<8 | uint32(data[2])<<16) - - if pktLen < 1 { - errLog.Print(ErrMalformPkt) - mc.Close() - return nil, driver.ErrBadConn - } - - // Check Packet Sync [8 bit] - if data[3] != mc.sequence { - if data[3] > mc.sequence { - return nil, ErrPktSyncMul - } else { - return nil, ErrPktSync - } - } - mc.sequence++ - - // Read packet body [pktLen bytes] - data, err = mc.buf.readNext(pktLen) - if err != nil { - errLog.Print(err) - mc.Close() - return nil, driver.ErrBadConn - } - - isLastPacket := (pktLen < maxPacketSize) - - // Zero allocations for non-splitting packets - if isLastPacket && payload == nil { - return data, nil - } - - payload = append(payload, data...) - - if isLastPacket { - return payload, nil - } - } -} - -// Write packet buffer 'data' -func (mc *mysqlConn) writePacket(data []byte) error { - pktLen := len(data) - 4 - - if pktLen > mc.maxPacketAllowed { - return ErrPktTooLarge - } - - for { - var size int - if pktLen >= maxPacketSize { - data[0] = 0xff - data[1] = 0xff - data[2] = 0xff - size = maxPacketSize - } else { - data[0] = byte(pktLen) - data[1] = byte(pktLen >> 8) - data[2] = byte(pktLen >> 16) - size = pktLen - } - data[3] = mc.sequence - - // Write packet - n, err := mc.netConn.Write(data[:4+size]) - if err == nil && n == 4+size { - mc.sequence++ - if size != maxPacketSize { - return nil - } - pktLen -= size - data = data[size:] - continue - } - - // Handle error - if err == nil { // n != len(data) - errLog.Print(ErrMalformPkt) - } else { - errLog.Print(err) - } - return driver.ErrBadConn - } -} - -/****************************************************************************** -* Initialisation Process * -******************************************************************************/ - -// Handshake Initialization Packet -// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::Handshake -func (mc *mysqlConn) readInitPacket() ([]byte, error) { - data, err := mc.readPacket() - if err != nil { - return nil, err - } - - if data[0] == iERR { - return nil, mc.handleErrorPacket(data) - } - - // protocol version [1 byte] - if data[0] < minProtocolVersion { - return nil, fmt.Errorf( - "Unsupported MySQL Protocol Version %d. Protocol Version %d or higher is required", - data[0], - minProtocolVersion, - ) - } - - // server version [null terminated string] - // connection id [4 bytes] - pos := 1 + bytes.IndexByte(data[1:], 0x00) + 1 + 4 - - // first part of the password cipher [8 bytes] - cipher := data[pos : pos+8] - - // (filler) always 0x00 [1 byte] - pos += 8 + 1 - - // capability flags (lower 2 bytes) [2 bytes] - mc.flags = clientFlag(binary.LittleEndian.Uint16(data[pos : pos+2])) - if mc.flags&clientProtocol41 == 0 { - return nil, ErrOldProtocol - } - if mc.flags&clientSSL == 0 && mc.cfg.tls != nil { - return nil, ErrNoTLS - } - pos += 2 - - if len(data) > pos { - // character set [1 byte] - // status flags [2 bytes] - // capability flags (upper 2 bytes) [2 bytes] - // length of auth-plugin-data [1 byte] - // reserved (all [00]) [10 bytes] - pos += 1 + 2 + 2 + 1 + 10 - - // second part of the password cipher [mininum 13 bytes], - // where len=MAX(13, length of auth-plugin-data - 8) - // - // The web documentation is ambiguous about the length. However, - // according to mysql-5.7/sql/auth/sql_authentication.cc line 538, - // the 13th byte is "\0 byte, terminating the second part of - // a scramble". So the second part of the password cipher is - // a NULL terminated string that's at least 13 bytes with the - // last byte being NULL. - // - // The official Python library uses the fixed length 12 - // which seems to work but technically could have a hidden bug. - cipher = append(cipher, data[pos:pos+12]...) - - // TODO: Verify string termination - // EOF if version (>= 5.5.7 and < 5.5.10) or (>= 5.6.0 and < 5.6.2) - // \NUL otherwise - // - //if data[len(data)-1] == 0 { - // return - //} - //return ErrMalformPkt - - // make a memory safe copy of the cipher slice - var b [20]byte - copy(b[:], cipher) - return b[:], nil - } - - // make a memory safe copy of the cipher slice - var b [8]byte - copy(b[:], cipher) - return b[:], nil -} - -// Client Authentication Packet -// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::HandshakeResponse -func (mc *mysqlConn) writeAuthPacket(cipher []byte) error { - // Adjust client flags based on server support - clientFlags := clientProtocol41 | - clientSecureConn | - clientLongPassword | - clientTransactions | - clientLocalFiles | - clientPluginAuth | - mc.flags&clientLongFlag - - if mc.cfg.clientFoundRows { - clientFlags |= clientFoundRows - } - - // To enable TLS / SSL - if mc.cfg.tls != nil { - clientFlags |= clientSSL - } - - // User Password - scrambleBuff := scramblePassword(cipher, []byte(mc.cfg.passwd)) - - pktLen := 4 + 4 + 1 + 23 + len(mc.cfg.user) + 1 + 1 + len(scrambleBuff) + 21 + 1 - - // To specify a db name - if n := len(mc.cfg.dbname); n > 0 { - clientFlags |= clientConnectWithDB - pktLen += n + 1 - } - - // Calculate packet length and get buffer with that size - data := mc.buf.takeSmallBuffer(pktLen + 4) - if data == nil { - // can not take the buffer. Something must be wrong with the connection - errLog.Print(ErrBusyBuffer) - return driver.ErrBadConn - } - - // ClientFlags [32 bit] - data[4] = byte(clientFlags) - data[5] = byte(clientFlags >> 8) - data[6] = byte(clientFlags >> 16) - data[7] = byte(clientFlags >> 24) - - // MaxPacketSize [32 bit] (none) - data[8] = 0x00 - data[9] = 0x00 - data[10] = 0x00 - data[11] = 0x00 - - // Charset [1 byte] - data[12] = mc.cfg.collation - - // SSL Connection Request Packet - // http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::SSLRequest - if mc.cfg.tls != nil { - // Send TLS / SSL request packet - if err := mc.writePacket(data[:(4+4+1+23)+4]); err != nil { - return err - } - - // Switch to TLS - tlsConn := tls.Client(mc.netConn, mc.cfg.tls) - if err := tlsConn.Handshake(); err != nil { - return err - } - mc.netConn = tlsConn - mc.buf.rd = tlsConn - } - - // Filler [23 bytes] (all 0x00) - pos := 13 - for ; pos < 13+23; pos++ { - data[pos] = 0 - } - - // User [null terminated string] - if len(mc.cfg.user) > 0 { - pos += copy(data[pos:], mc.cfg.user) - } - data[pos] = 0x00 - pos++ - - // ScrambleBuffer [length encoded integer] - data[pos] = byte(len(scrambleBuff)) - pos += 1 + copy(data[pos+1:], scrambleBuff) - - // Databasename [null terminated string] - if len(mc.cfg.dbname) > 0 { - pos += copy(data[pos:], mc.cfg.dbname) - data[pos] = 0x00 - pos++ - } - - // Assume native client during response - pos += copy(data[pos:], "mysql_native_password") - data[pos] = 0x00 - - // Send Auth packet - return mc.writePacket(data) -} - -// Client old authentication packet -// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchResponse -func (mc *mysqlConn) writeOldAuthPacket(cipher []byte) error { - // User password - scrambleBuff := scrambleOldPassword(cipher, []byte(mc.cfg.passwd)) - - // Calculate the packet length and add a tailing 0 - pktLen := len(scrambleBuff) + 1 - data := mc.buf.takeSmallBuffer(4 + pktLen) - if data == nil { - // can not take the buffer. Something must be wrong with the connection - errLog.Print(ErrBusyBuffer) - return driver.ErrBadConn - } - - // Add the scrambled password [null terminated string] - copy(data[4:], scrambleBuff) - data[4+pktLen-1] = 0x00 - - return mc.writePacket(data) -} - -// Client clear text authentication packet -// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchResponse -func (mc *mysqlConn) writeClearAuthPacket() error { - // Calculate the packet length and add a tailing 0 - pktLen := len(mc.cfg.passwd) + 1 - data := mc.buf.takeSmallBuffer(4 + pktLen) - if data == nil { - // can not take the buffer. Something must be wrong with the connection - errLog.Print(ErrBusyBuffer) - return driver.ErrBadConn - } - - // Add the clear password [null terminated string] - copy(data[4:], mc.cfg.passwd) - data[4+pktLen-1] = 0x00 - - return mc.writePacket(data) -} - -/****************************************************************************** -* Command Packets * -******************************************************************************/ - -func (mc *mysqlConn) writeCommandPacket(command byte) error { - // Reset Packet Sequence - mc.sequence = 0 - - data := mc.buf.takeSmallBuffer(4 + 1) - if data == nil { - // can not take the buffer. Something must be wrong with the connection - errLog.Print(ErrBusyBuffer) - return driver.ErrBadConn - } - - // Add command byte - data[4] = command - - // Send CMD packet - return mc.writePacket(data) -} - -func (mc *mysqlConn) writeCommandPacketStr(command byte, arg string) error { - // Reset Packet Sequence - mc.sequence = 0 - - pktLen := 1 + len(arg) - data := mc.buf.takeBuffer(pktLen + 4) - if data == nil { - // can not take the buffer. Something must be wrong with the connection - errLog.Print(ErrBusyBuffer) - return driver.ErrBadConn - } - - // Add command byte - data[4] = command - - // Add arg - copy(data[5:], arg) - - // Send CMD packet - return mc.writePacket(data) -} - -func (mc *mysqlConn) writeCommandPacketUint32(command byte, arg uint32) error { - // Reset Packet Sequence - mc.sequence = 0 - - data := mc.buf.takeSmallBuffer(4 + 1 + 4) - if data == nil { - // can not take the buffer. Something must be wrong with the connection - errLog.Print(ErrBusyBuffer) - return driver.ErrBadConn - } - - // Add command byte - data[4] = command - - // Add arg [32 bit] - data[5] = byte(arg) - data[6] = byte(arg >> 8) - data[7] = byte(arg >> 16) - data[8] = byte(arg >> 24) - - // Send CMD packet - return mc.writePacket(data) -} - -/****************************************************************************** -* Result Packets * -******************************************************************************/ - -// Returns error if Packet is not an 'Result OK'-Packet -func (mc *mysqlConn) readResultOK() error { - data, err := mc.readPacket() - if err == nil { - // packet indicator - switch data[0] { - - case iOK: - return mc.handleOkPacket(data) - - case iEOF: - if len(data) > 1 { - plugin := string(data[1:bytes.IndexByte(data, 0x00)]) - if plugin == "mysql_old_password" { - // using old_passwords - return ErrOldPassword - } else if plugin == "mysql_clear_password" { - // using clear text password - return ErrCleartextPassword - } else { - return ErrUnknownPlugin - } - } else { - return ErrOldPassword - } - - default: // Error otherwise - return mc.handleErrorPacket(data) - } - } - return err -} - -// Result Set Header Packet -// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-ProtocolText::Resultset -func (mc *mysqlConn) readResultSetHeaderPacket() (int, error) { - data, err := mc.readPacket() - if err == nil { - switch data[0] { - - case iOK: - return 0, mc.handleOkPacket(data) - - case iERR: - return 0, mc.handleErrorPacket(data) - - case iLocalInFile: - return 0, mc.handleInFileRequest(string(data[1:])) - } - - // column count - num, _, n := readLengthEncodedInteger(data) - if n-len(data) == 0 { - return int(num), nil - } - - return 0, ErrMalformPkt - } - return 0, err -} - -// Error Packet -// http://dev.mysql.com/doc/internals/en/generic-response-packets.html#packet-ERR_Packet -func (mc *mysqlConn) handleErrorPacket(data []byte) error { - if data[0] != iERR { - return ErrMalformPkt - } - - // 0xff [1 byte] - - // Error Number [16 bit uint] - errno := binary.LittleEndian.Uint16(data[1:3]) - - pos := 3 - - // SQL State [optional: # + 5bytes string] - if data[3] == 0x23 { - //sqlstate := string(data[4 : 4+5]) - pos = 9 - } - - // Error Message [string] - return &MySQLError{ - Number: errno, - Message: string(data[pos:]), - } -} - -// Ok Packet -// http://dev.mysql.com/doc/internals/en/generic-response-packets.html#packet-OK_Packet -func (mc *mysqlConn) handleOkPacket(data []byte) error { - var n, m int - - // 0x00 [1 byte] - - // Affected rows [Length Coded Binary] - mc.affectedRows, _, n = readLengthEncodedInteger(data[1:]) - - // Insert id [Length Coded Binary] - mc.insertId, _, m = readLengthEncodedInteger(data[1+n:]) - - // server_status [2 bytes] - mc.status = statusFlag(data[1+n+m]) | statusFlag(data[1+n+m+1])<<8 - - // warning count [2 bytes] - if !mc.strict { - return nil - } else { - pos := 1 + n + m + 2 - if binary.LittleEndian.Uint16(data[pos:pos+2]) > 0 { - return mc.getWarnings() - } - return nil - } -} - -// Read Packets as Field Packets until EOF-Packet or an Error appears -// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-Protocol::ColumnDefinition41 -func (mc *mysqlConn) readColumns(count int) ([]mysqlField, error) { - columns := make([]mysqlField, count) - - for i := 0; ; i++ { - data, err := mc.readPacket() - if err != nil { - return nil, err - } - - // EOF Packet - if data[0] == iEOF && (len(data) == 5 || len(data) == 1) { - if i == count { - return columns, nil - } - return nil, fmt.Errorf("ColumnsCount mismatch n:%d len:%d", count, len(columns)) - } - - // Catalog - pos, err := skipLengthEncodedString(data) - if err != nil { - return nil, err - } - - // Database [len coded string] - n, err := skipLengthEncodedString(data[pos:]) - if err != nil { - return nil, err - } - pos += n - - // Table [len coded string] - if mc.cfg.columnsWithAlias { - tableName, _, n, err := readLengthEncodedString(data[pos:]) - if err != nil { - return nil, err - } - pos += n - columns[i].tableName = string(tableName) - } else { - n, err = skipLengthEncodedString(data[pos:]) - if err != nil { - return nil, err - } - pos += n - } - - // Original table [len coded string] - n, err = skipLengthEncodedString(data[pos:]) - if err != nil { - return nil, err - } - pos += n - - // Name [len coded string] - name, _, n, err := readLengthEncodedString(data[pos:]) - if err != nil { - return nil, err - } - columns[i].name = string(name) - pos += n - - // Original name [len coded string] - n, err = skipLengthEncodedString(data[pos:]) - if err != nil { - return nil, err - } - - // Filler [uint8] - // Charset [charset, collation uint8] - // Length [uint32] - pos += n + 1 + 2 + 4 - - // Field type [uint8] - columns[i].fieldType = data[pos] - pos++ - - // Flags [uint16] - columns[i].flags = fieldFlag(binary.LittleEndian.Uint16(data[pos : pos+2])) - pos += 2 - - // Decimals [uint8] - columns[i].decimals = data[pos] - //pos++ - - // Default value [len coded binary] - //if pos < len(data) { - // defaultVal, _, err = bytesToLengthCodedBinary(data[pos:]) - //} - } -} - -// Read Packets as Field Packets until EOF-Packet or an Error appears -// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-ProtocolText::ResultsetRow -func (rows *textRows) readRow(dest []driver.Value) error { - mc := rows.mc - - data, err := mc.readPacket() - if err != nil { - return err - } - - // EOF Packet - if data[0] == iEOF && len(data) == 5 { - rows.mc = nil - return io.EOF - } - if data[0] == iERR { - rows.mc = nil - return mc.handleErrorPacket(data) - } - - // RowSet Packet - var n int - var isNull bool - pos := 0 - - for i := range dest { - // Read bytes and convert to string - dest[i], isNull, n, err = readLengthEncodedString(data[pos:]) - pos += n - if err == nil { - if !isNull { - if !mc.parseTime { - continue - } else { - switch rows.columns[i].fieldType { - case fieldTypeTimestamp, fieldTypeDateTime, - fieldTypeDate, fieldTypeNewDate: - dest[i], err = parseDateTime( - string(dest[i].([]byte)), - mc.cfg.loc, - ) - if err == nil { - continue - } - default: - continue - } - } - - } else { - dest[i] = nil - continue - } - } - return err // err != nil - } - - return nil -} - -// Reads Packets until EOF-Packet or an Error appears. Returns count of Packets read -func (mc *mysqlConn) readUntilEOF() error { - for { - data, err := mc.readPacket() - - // No Err and no EOF Packet - if err == nil && data[0] != iEOF { - continue - } - return err // Err or EOF - } -} - -/****************************************************************************** -* Prepared Statements * -******************************************************************************/ - -// Prepare Result Packets -// http://dev.mysql.com/doc/internals/en/com-stmt-prepare-response.html -func (stmt *mysqlStmt) readPrepareResultPacket() (uint16, error) { - data, err := stmt.mc.readPacket() - if err == nil { - // packet indicator [1 byte] - if data[0] != iOK { - return 0, stmt.mc.handleErrorPacket(data) - } - - // statement id [4 bytes] - stmt.id = binary.LittleEndian.Uint32(data[1:5]) - - // Column count [16 bit uint] - columnCount := binary.LittleEndian.Uint16(data[5:7]) - - // Param count [16 bit uint] - stmt.paramCount = int(binary.LittleEndian.Uint16(data[7:9])) - - // Reserved [8 bit] - - // Warning count [16 bit uint] - if !stmt.mc.strict { - return columnCount, nil - } else { - // Check for warnings count > 0, only available in MySQL > 4.1 - if len(data) >= 12 && binary.LittleEndian.Uint16(data[10:12]) > 0 { - return columnCount, stmt.mc.getWarnings() - } - return columnCount, nil - } - } - return 0, err -} - -// http://dev.mysql.com/doc/internals/en/com-stmt-send-long-data.html -func (stmt *mysqlStmt) writeCommandLongData(paramID int, arg []byte) error { - maxLen := stmt.mc.maxPacketAllowed - 1 - pktLen := maxLen - - // After the header (bytes 0-3) follows before the data: - // 1 byte command - // 4 bytes stmtID - // 2 bytes paramID - const dataOffset = 1 + 4 + 2 - - // Can not use the write buffer since - // a) the buffer is too small - // b) it is in use - data := make([]byte, 4+1+4+2+len(arg)) - - copy(data[4+dataOffset:], arg) - - for argLen := len(arg); argLen > 0; argLen -= pktLen - dataOffset { - if dataOffset+argLen < maxLen { - pktLen = dataOffset + argLen - } - - stmt.mc.sequence = 0 - // Add command byte [1 byte] - data[4] = comStmtSendLongData - - // Add stmtID [32 bit] - data[5] = byte(stmt.id) - data[6] = byte(stmt.id >> 8) - data[7] = byte(stmt.id >> 16) - data[8] = byte(stmt.id >> 24) - - // Add paramID [16 bit] - data[9] = byte(paramID) - data[10] = byte(paramID >> 8) - - // Send CMD packet - err := stmt.mc.writePacket(data[:4+pktLen]) - if err == nil { - data = data[pktLen-dataOffset:] - continue - } - return err - - } - - // Reset Packet Sequence - stmt.mc.sequence = 0 - return nil -} - -// Execute Prepared Statement -// http://dev.mysql.com/doc/internals/en/com-stmt-execute.html -func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error { - if len(args) != stmt.paramCount { - return fmt.Errorf( - "Arguments count mismatch (Got: %d Has: %d)", - len(args), - stmt.paramCount, - ) - } - - const minPktLen = 4 + 1 + 4 + 1 + 4 - mc := stmt.mc - - // Reset packet-sequence - mc.sequence = 0 - - var data []byte - - if len(args) == 0 { - data = mc.buf.takeBuffer(minPktLen) - } else { - data = mc.buf.takeCompleteBuffer() - } - if data == nil { - // can not take the buffer. Something must be wrong with the connection - errLog.Print(ErrBusyBuffer) - return driver.ErrBadConn - } - - // command [1 byte] - data[4] = comStmtExecute - - // statement_id [4 bytes] - data[5] = byte(stmt.id) - data[6] = byte(stmt.id >> 8) - data[7] = byte(stmt.id >> 16) - data[8] = byte(stmt.id >> 24) - - // flags (0: CURSOR_TYPE_NO_CURSOR) [1 byte] - data[9] = 0x00 - - // iteration_count (uint32(1)) [4 bytes] - data[10] = 0x01 - data[11] = 0x00 - data[12] = 0x00 - data[13] = 0x00 - - if len(args) > 0 { - pos := minPktLen - - var nullMask []byte - if maskLen, typesLen := (len(args)+7)/8, 1+2*len(args); pos+maskLen+typesLen >= len(data) { - // buffer has to be extended but we don't know by how much so - // we depend on append after all data with known sizes fit. - // We stop at that because we deal with a lot of columns here - // which makes the required allocation size hard to guess. - tmp := make([]byte, pos+maskLen+typesLen) - copy(tmp[:pos], data[:pos]) - data = tmp - nullMask = data[pos : pos+maskLen] - pos += maskLen - } else { - nullMask = data[pos : pos+maskLen] - for i := 0; i < maskLen; i++ { - nullMask[i] = 0 - } - pos += maskLen - } - - // newParameterBoundFlag 1 [1 byte] - data[pos] = 0x01 - pos++ - - // type of each parameter [len(args)*2 bytes] - paramTypes := data[pos:] - pos += len(args) * 2 - - // value of each parameter [n bytes] - paramValues := data[pos:pos] - valuesCap := cap(paramValues) - - for i, arg := range args { - // build NULL-bitmap - if arg == nil { - nullMask[i/8] |= 1 << (uint(i) & 7) - paramTypes[i+i] = fieldTypeNULL - paramTypes[i+i+1] = 0x00 - continue - } - - // cache types and values - switch v := arg.(type) { - case int64: - paramTypes[i+i] = fieldTypeLongLong - paramTypes[i+i+1] = 0x00 - - if cap(paramValues)-len(paramValues)-8 >= 0 { - paramValues = paramValues[:len(paramValues)+8] - binary.LittleEndian.PutUint64( - paramValues[len(paramValues)-8:], - uint64(v), - ) - } else { - paramValues = append(paramValues, - uint64ToBytes(uint64(v))..., - ) - } - - case float64: - paramTypes[i+i] = fieldTypeDouble - paramTypes[i+i+1] = 0x00 - - if cap(paramValues)-len(paramValues)-8 >= 0 { - paramValues = paramValues[:len(paramValues)+8] - binary.LittleEndian.PutUint64( - paramValues[len(paramValues)-8:], - math.Float64bits(v), - ) - } else { - paramValues = append(paramValues, - uint64ToBytes(math.Float64bits(v))..., - ) - } - - case bool: - paramTypes[i+i] = fieldTypeTiny - paramTypes[i+i+1] = 0x00 - - if v { - paramValues = append(paramValues, 0x01) - } else { - paramValues = append(paramValues, 0x00) - } - - case []byte: - // Common case (non-nil value) first - if v != nil { - paramTypes[i+i] = fieldTypeString - paramTypes[i+i+1] = 0x00 - - if len(v) < mc.maxPacketAllowed-pos-len(paramValues)-(len(args)-(i+1))*64 { - paramValues = appendLengthEncodedInteger(paramValues, - uint64(len(v)), - ) - paramValues = append(paramValues, v...) - } else { - if err := stmt.writeCommandLongData(i, v); err != nil { - return err - } - } - continue - } - - // Handle []byte(nil) as a NULL value - nullMask[i/8] |= 1 << (uint(i) & 7) - paramTypes[i+i] = fieldTypeNULL - paramTypes[i+i+1] = 0x00 - - case string: - paramTypes[i+i] = fieldTypeString - paramTypes[i+i+1] = 0x00 - - if len(v) < mc.maxPacketAllowed-pos-len(paramValues)-(len(args)-(i+1))*64 { - paramValues = appendLengthEncodedInteger(paramValues, - uint64(len(v)), - ) - paramValues = append(paramValues, v...) - } else { - if err := stmt.writeCommandLongData(i, []byte(v)); err != nil { - return err - } - } - - case time.Time: - paramTypes[i+i] = fieldTypeString - paramTypes[i+i+1] = 0x00 - - var val []byte - if v.IsZero() { - val = []byte("0000-00-00") - } else { - val = []byte(v.In(mc.cfg.loc).Format(timeFormat)) - } - - paramValues = appendLengthEncodedInteger(paramValues, - uint64(len(val)), - ) - paramValues = append(paramValues, val...) - - default: - return fmt.Errorf("Can't convert type: %T", arg) - } - } - - // Check if param values exceeded the available buffer - // In that case we must build the data packet with the new values buffer - if valuesCap != cap(paramValues) { - data = append(data[:pos], paramValues...) - mc.buf.buf = data - } - - pos += len(paramValues) - data = data[:pos] - } - - return mc.writePacket(data) -} - -// http://dev.mysql.com/doc/internals/en/binary-protocol-resultset-row.html -func (rows *binaryRows) readRow(dest []driver.Value) error { - data, err := rows.mc.readPacket() - if err != nil { - return err - } - - // packet indicator [1 byte] - if data[0] != iOK { - rows.mc = nil - // EOF Packet - if data[0] == iEOF && len(data) == 5 { - return io.EOF - } - - // Error otherwise - return rows.mc.handleErrorPacket(data) - } - - // NULL-bitmap, [(column-count + 7 + 2) / 8 bytes] - pos := 1 + (len(dest)+7+2)>>3 - nullMask := data[1:pos] - - for i := range dest { - // Field is NULL - // (byte >> bit-pos) % 2 == 1 - if ((nullMask[(i+2)>>3] >> uint((i+2)&7)) & 1) == 1 { - dest[i] = nil - continue - } - - // Convert to byte-coded string - switch rows.columns[i].fieldType { - case fieldTypeNULL: - dest[i] = nil - continue - - // Numeric Types - case fieldTypeTiny: - if rows.columns[i].flags&flagUnsigned != 0 { - dest[i] = int64(data[pos]) - } else { - dest[i] = int64(int8(data[pos])) - } - pos++ - continue - - case fieldTypeShort, fieldTypeYear: - if rows.columns[i].flags&flagUnsigned != 0 { - dest[i] = int64(binary.LittleEndian.Uint16(data[pos : pos+2])) - } else { - dest[i] = int64(int16(binary.LittleEndian.Uint16(data[pos : pos+2]))) - } - pos += 2 - continue - - case fieldTypeInt24, fieldTypeLong: - if rows.columns[i].flags&flagUnsigned != 0 { - dest[i] = int64(binary.LittleEndian.Uint32(data[pos : pos+4])) - } else { - dest[i] = int64(int32(binary.LittleEndian.Uint32(data[pos : pos+4]))) - } - pos += 4 - continue - - case fieldTypeLongLong: - if rows.columns[i].flags&flagUnsigned != 0 { - val := binary.LittleEndian.Uint64(data[pos : pos+8]) - if val > math.MaxInt64 { - dest[i] = uint64ToString(val) - } else { - dest[i] = int64(val) - } - } else { - dest[i] = int64(binary.LittleEndian.Uint64(data[pos : pos+8])) - } - pos += 8 - continue - - case fieldTypeFloat: - dest[i] = float64(math.Float32frombits(binary.LittleEndian.Uint32(data[pos : pos+4]))) - pos += 4 - continue - - case fieldTypeDouble: - dest[i] = math.Float64frombits(binary.LittleEndian.Uint64(data[pos : pos+8])) - pos += 8 - continue - - // Length coded Binary Strings - case fieldTypeDecimal, fieldTypeNewDecimal, fieldTypeVarChar, - fieldTypeBit, fieldTypeEnum, fieldTypeSet, fieldTypeTinyBLOB, - fieldTypeMediumBLOB, fieldTypeLongBLOB, fieldTypeBLOB, - fieldTypeVarString, fieldTypeString, fieldTypeGeometry: - var isNull bool - var n int - dest[i], isNull, n, err = readLengthEncodedString(data[pos:]) - pos += n - if err == nil { - if !isNull { - continue - } else { - dest[i] = nil - continue - } - } - return err - - case - fieldTypeDate, fieldTypeNewDate, // Date YYYY-MM-DD - fieldTypeTime, // Time [-][H]HH:MM:SS[.fractal] - fieldTypeTimestamp, fieldTypeDateTime: // Timestamp YYYY-MM-DD HH:MM:SS[.fractal] - - num, isNull, n := readLengthEncodedInteger(data[pos:]) - pos += n - - switch { - case isNull: - dest[i] = nil - continue - case rows.columns[i].fieldType == fieldTypeTime: - // database/sql does not support an equivalent to TIME, return a string - var dstlen uint8 - switch decimals := rows.columns[i].decimals; decimals { - case 0x00, 0x1f: - dstlen = 8 - case 1, 2, 3, 4, 5, 6: - dstlen = 8 + 1 + decimals - default: - return fmt.Errorf( - "MySQL protocol error, illegal decimals value %d", - rows.columns[i].decimals, - ) - } - dest[i], err = formatBinaryDateTime(data[pos:pos+int(num)], dstlen, true) - case rows.mc.parseTime: - dest[i], err = parseBinaryDateTime(num, data[pos:], rows.mc.cfg.loc) - default: - var dstlen uint8 - if rows.columns[i].fieldType == fieldTypeDate { - dstlen = 10 - } else { - switch decimals := rows.columns[i].decimals; decimals { - case 0x00, 0x1f: - dstlen = 19 - case 1, 2, 3, 4, 5, 6: - dstlen = 19 + 1 + decimals - default: - return fmt.Errorf( - "MySQL protocol error, illegal decimals value %d", - rows.columns[i].decimals, - ) - } - } - dest[i], err = formatBinaryDateTime(data[pos:pos+int(num)], dstlen, false) - } - - if err == nil { - pos += int(num) - continue - } else { - return err - } - - // Please report if this happens! - default: - return fmt.Errorf("Unknown FieldType %d", rows.columns[i].fieldType) - } - } - - return nil -} diff --git a/vendor/github.com/go-sql-driver/mysql/result.go b/vendor/github.com/go-sql-driver/mysql/result.go deleted file mode 100644 index c6438d0..0000000 --- a/vendor/github.com/go-sql-driver/mysql/result.go +++ /dev/null @@ -1,22 +0,0 @@ -// Go MySQL Driver - A MySQL-Driver for Go's database/sql package -// -// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. - -package mysql - -type mysqlResult struct { - affectedRows int64 - insertId int64 -} - -func (res *mysqlResult) LastInsertId() (int64, error) { - return res.insertId, nil -} - -func (res *mysqlResult) RowsAffected() (int64, error) { - return res.affectedRows, nil -} diff --git a/vendor/github.com/go-sql-driver/mysql/rows.go b/vendor/github.com/go-sql-driver/mysql/rows.go deleted file mode 100644 index ba606e1..0000000 --- a/vendor/github.com/go-sql-driver/mysql/rows.go +++ /dev/null @@ -1,106 +0,0 @@ -// Go MySQL Driver - A MySQL-Driver for Go's database/sql package -// -// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. - -package mysql - -import ( - "database/sql/driver" - "io" -) - -type mysqlField struct { - tableName string - name string - flags fieldFlag - fieldType byte - decimals byte -} - -type mysqlRows struct { - mc *mysqlConn - columns []mysqlField -} - -type binaryRows struct { - mysqlRows -} - -type textRows struct { - mysqlRows -} - -type emptyRows struct{} - -func (rows *mysqlRows) Columns() []string { - columns := make([]string, len(rows.columns)) - if rows.mc.cfg.columnsWithAlias { - for i := range columns { - if tableName := rows.columns[i].tableName; len(tableName) > 0 { - columns[i] = tableName + "." + rows.columns[i].name - } else { - columns[i] = rows.columns[i].name - } - } - } else { - for i := range columns { - columns[i] = rows.columns[i].name - } - } - return columns -} - -func (rows *mysqlRows) Close() error { - mc := rows.mc - if mc == nil { - return nil - } - if mc.netConn == nil { - return ErrInvalidConn - } - - // Remove unread packets from stream - err := mc.readUntilEOF() - rows.mc = nil - return err -} - -func (rows *binaryRows) Next(dest []driver.Value) error { - if mc := rows.mc; mc != nil { - if mc.netConn == nil { - return ErrInvalidConn - } - - // Fetch next row from stream - return rows.readRow(dest) - } - return io.EOF -} - -func (rows *textRows) Next(dest []driver.Value) error { - if mc := rows.mc; mc != nil { - if mc.netConn == nil { - return ErrInvalidConn - } - - // Fetch next row from stream - return rows.readRow(dest) - } - return io.EOF -} - -func (rows emptyRows) Columns() []string { - return nil -} - -func (rows emptyRows) Close() error { - return nil -} - -func (rows emptyRows) Next(dest []driver.Value) error { - return io.EOF -} diff --git a/vendor/github.com/go-sql-driver/mysql/statement.go b/vendor/github.com/go-sql-driver/mysql/statement.go deleted file mode 100644 index 6e869b3..0000000 --- a/vendor/github.com/go-sql-driver/mysql/statement.go +++ /dev/null @@ -1,150 +0,0 @@ -// Go MySQL Driver - A MySQL-Driver for Go's database/sql package -// -// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. - -package mysql - -import ( - "database/sql/driver" - "fmt" - "reflect" - "strconv" -) - -type mysqlStmt struct { - mc *mysqlConn - id uint32 - paramCount int - columns []mysqlField // cached from the first query -} - -func (stmt *mysqlStmt) Close() error { - if stmt.mc == nil || stmt.mc.netConn == nil { - errLog.Print(ErrInvalidConn) - return driver.ErrBadConn - } - - err := stmt.mc.writeCommandPacketUint32(comStmtClose, stmt.id) - stmt.mc = nil - return err -} - -func (stmt *mysqlStmt) NumInput() int { - return stmt.paramCount -} - -func (stmt *mysqlStmt) ColumnConverter(idx int) driver.ValueConverter { - return converter{} -} - -func (stmt *mysqlStmt) Exec(args []driver.Value) (driver.Result, error) { - if stmt.mc.netConn == nil { - errLog.Print(ErrInvalidConn) - return nil, driver.ErrBadConn - } - // Send command - err := stmt.writeExecutePacket(args) - if err != nil { - return nil, err - } - - mc := stmt.mc - - mc.affectedRows = 0 - mc.insertId = 0 - - // Read Result - resLen, err := mc.readResultSetHeaderPacket() - if err == nil { - if resLen > 0 { - // Columns - err = mc.readUntilEOF() - if err != nil { - return nil, err - } - - // Rows - err = mc.readUntilEOF() - } - if err == nil { - return &mysqlResult{ - affectedRows: int64(mc.affectedRows), - insertId: int64(mc.insertId), - }, nil - } - } - - return nil, err -} - -func (stmt *mysqlStmt) Query(args []driver.Value) (driver.Rows, error) { - if stmt.mc.netConn == nil { - errLog.Print(ErrInvalidConn) - return nil, driver.ErrBadConn - } - // Send command - err := stmt.writeExecutePacket(args) - if err != nil { - return nil, err - } - - mc := stmt.mc - - // Read Result - resLen, err := mc.readResultSetHeaderPacket() - if err != nil { - return nil, err - } - - rows := new(binaryRows) - rows.mc = mc - - if resLen > 0 { - // Columns - // If not cached, read them and cache them - if stmt.columns == nil { - rows.columns, err = mc.readColumns(resLen) - stmt.columns = rows.columns - } else { - rows.columns = stmt.columns - err = mc.readUntilEOF() - } - } - - return rows, err -} - -type converter struct{} - -func (c converter) ConvertValue(v interface{}) (driver.Value, error) { - if driver.IsValue(v) { - return v, nil - } - - rv := reflect.ValueOf(v) - switch rv.Kind() { - case reflect.Ptr: - // indirect pointers - if rv.IsNil() { - return nil, nil - } - return c.ConvertValue(rv.Elem().Interface()) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return rv.Int(), nil - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32: - return int64(rv.Uint()), nil - case reflect.Uint64: - u64 := rv.Uint() - if u64 >= 1<<63 { - return strconv.FormatUint(u64, 10), nil - } - return int64(u64), nil - case reflect.Float32, reflect.Float64: - return rv.Float(), nil - } - return nil, fmt.Errorf("unsupported type %T, a %s", v, rv.Kind()) -} diff --git a/vendor/github.com/go-sql-driver/mysql/transaction.go b/vendor/github.com/go-sql-driver/mysql/transaction.go deleted file mode 100644 index 33c749b..0000000 --- a/vendor/github.com/go-sql-driver/mysql/transaction.go +++ /dev/null @@ -1,31 +0,0 @@ -// Go MySQL Driver - A MySQL-Driver for Go's database/sql package -// -// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. - -package mysql - -type mysqlTx struct { - mc *mysqlConn -} - -func (tx *mysqlTx) Commit() (err error) { - if tx.mc == nil || tx.mc.netConn == nil { - return ErrInvalidConn - } - err = tx.mc.exec("COMMIT") - tx.mc = nil - return -} - -func (tx *mysqlTx) Rollback() (err error) { - if tx.mc == nil || tx.mc.netConn == nil { - return ErrInvalidConn - } - err = tx.mc.exec("ROLLBACK") - tx.mc = nil - return -} diff --git a/vendor/github.com/go-sql-driver/mysql/utils.go b/vendor/github.com/go-sql-driver/mysql/utils.go deleted file mode 100644 index 6a26ad1..0000000 --- a/vendor/github.com/go-sql-driver/mysql/utils.go +++ /dev/null @@ -1,973 +0,0 @@ -// Go MySQL Driver - A MySQL-Driver for Go's database/sql package -// -// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. - -package mysql - -import ( - "crypto/sha1" - "crypto/tls" - "database/sql/driver" - "encoding/binary" - "errors" - "fmt" - "io" - "net" - "net/url" - "strings" - "time" -) - -var ( - tlsConfigRegister map[string]*tls.Config // Register for custom tls.Configs - - errInvalidDSNUnescaped = errors.New("Invalid DSN: Did you forget to escape a param value?") - errInvalidDSNAddr = errors.New("Invalid DSN: Network Address not terminated (missing closing brace)") - errInvalidDSNNoSlash = errors.New("Invalid DSN: Missing the slash separating the database name") - errInvalidDSNUnsafeCollation = errors.New("Invalid DSN: interpolateParams can be used with ascii, latin1, utf8 and utf8mb4 charset") -) - -func init() { - tlsConfigRegister = make(map[string]*tls.Config) -} - -// RegisterTLSConfig registers a custom tls.Config to be used with sql.Open. -// Use the key as a value in the DSN where tls=value. -// -// rootCertPool := x509.NewCertPool() -// pem, err := ioutil.ReadFile("/path/ca-cert.pem") -// if err != nil { -// log.Fatal(err) -// } -// if ok := rootCertPool.AppendCertsFromPEM(pem); !ok { -// log.Fatal("Failed to append PEM.") -// } -// clientCert := make([]tls.Certificate, 0, 1) -// certs, err := tls.LoadX509KeyPair("/path/client-cert.pem", "/path/client-key.pem") -// if err != nil { -// log.Fatal(err) -// } -// clientCert = append(clientCert, certs) -// mysql.RegisterTLSConfig("custom", &tls.Config{ -// RootCAs: rootCertPool, -// Certificates: clientCert, -// }) -// db, err := sql.Open("mysql", "user@tcp(localhost:3306)/test?tls=custom") -// -func RegisterTLSConfig(key string, config *tls.Config) error { - if _, isBool := readBool(key); isBool || strings.ToLower(key) == "skip-verify" { - return fmt.Errorf("Key '%s' is reserved", key) - } - - tlsConfigRegister[key] = config - return nil -} - -// DeregisterTLSConfig removes the tls.Config associated with key. -func DeregisterTLSConfig(key string) { - delete(tlsConfigRegister, key) -} - -// parseDSN parses the DSN string to a config -func parseDSN(dsn string) (cfg *config, err error) { - // New config with some default values - cfg = &config{ - loc: time.UTC, - collation: defaultCollation, - } - - // [user[:password]@][net[(addr)]]/dbname[?param1=value1¶mN=valueN] - // Find the last '/' (since the password or the net addr might contain a '/') - foundSlash := false - for i := len(dsn) - 1; i >= 0; i-- { - if dsn[i] == '/' { - foundSlash = true - var j, k int - - // left part is empty if i <= 0 - if i > 0 { - // [username[:password]@][protocol[(address)]] - // Find the last '@' in dsn[:i] - for j = i; j >= 0; j-- { - if dsn[j] == '@' { - // username[:password] - // Find the first ':' in dsn[:j] - for k = 0; k < j; k++ { - if dsn[k] == ':' { - cfg.passwd = dsn[k+1 : j] - break - } - } - cfg.user = dsn[:k] - - break - } - } - - // [protocol[(address)]] - // Find the first '(' in dsn[j+1:i] - for k = j + 1; k < i; k++ { - if dsn[k] == '(' { - // dsn[i-1] must be == ')' if an address is specified - if dsn[i-1] != ')' { - if strings.ContainsRune(dsn[k+1:i], ')') { - return nil, errInvalidDSNUnescaped - } - return nil, errInvalidDSNAddr - } - cfg.addr = dsn[k+1 : i-1] - break - } - } - cfg.net = dsn[j+1 : k] - } - - // dbname[?param1=value1&...¶mN=valueN] - // Find the first '?' in dsn[i+1:] - for j = i + 1; j < len(dsn); j++ { - if dsn[j] == '?' { - if err = parseDSNParams(cfg, dsn[j+1:]); err != nil { - return - } - break - } - } - cfg.dbname = dsn[i+1 : j] - - break - } - } - - if !foundSlash && len(dsn) > 0 { - return nil, errInvalidDSNNoSlash - } - - if cfg.interpolateParams && unsafeCollations[cfg.collation] { - return nil, errInvalidDSNUnsafeCollation - } - - // Set default network if empty - if cfg.net == "" { - cfg.net = "tcp" - } - - // Set default address if empty - if cfg.addr == "" { - switch cfg.net { - case "tcp": - cfg.addr = "127.0.0.1:3306" - case "unix": - cfg.addr = "/tmp/mysql.sock" - default: - return nil, errors.New("Default addr for network '" + cfg.net + "' unknown") - } - - } - - return -} - -// parseDSNParams parses the DSN "query string" -// Values must be url.QueryEscape'ed -func parseDSNParams(cfg *config, params string) (err error) { - for _, v := range strings.Split(params, "&") { - param := strings.SplitN(v, "=", 2) - if len(param) != 2 { - continue - } - - // cfg params - switch value := param[1]; param[0] { - - // Enable client side placeholder substitution - case "interpolateParams": - var isBool bool - cfg.interpolateParams, isBool = readBool(value) - if !isBool { - return fmt.Errorf("Invalid Bool value: %s", value) - } - - // Disable INFILE whitelist / enable all files - case "allowAllFiles": - var isBool bool - cfg.allowAllFiles, isBool = readBool(value) - if !isBool { - return fmt.Errorf("Invalid Bool value: %s", value) - } - - // Use cleartext authentication mode (MySQL 5.5.10+) - case "allowCleartextPasswords": - var isBool bool - cfg.allowCleartextPasswords, isBool = readBool(value) - if !isBool { - return fmt.Errorf("Invalid Bool value: %s", value) - } - - // Use old authentication mode (pre MySQL 4.1) - case "allowOldPasswords": - var isBool bool - cfg.allowOldPasswords, isBool = readBool(value) - if !isBool { - return fmt.Errorf("Invalid Bool value: %s", value) - } - - // Switch "rowsAffected" mode - case "clientFoundRows": - var isBool bool - cfg.clientFoundRows, isBool = readBool(value) - if !isBool { - return fmt.Errorf("Invalid Bool value: %s", value) - } - - // Collation - case "collation": - collation, ok := collations[value] - if !ok { - // Note possibility for false negatives: - // could be triggered although the collation is valid if the - // collations map does not contain entries the server supports. - err = errors.New("unknown collation") - return - } - cfg.collation = collation - break - - case "columnsWithAlias": - var isBool bool - cfg.columnsWithAlias, isBool = readBool(value) - if !isBool { - return fmt.Errorf("Invalid Bool value: %s", value) - } - - // Time Location - case "loc": - if value, err = url.QueryUnescape(value); err != nil { - return - } - cfg.loc, err = time.LoadLocation(value) - if err != nil { - return - } - - // Dial Timeout - case "timeout": - cfg.timeout, err = time.ParseDuration(value) - if err != nil { - return - } - - // TLS-Encryption - case "tls": - boolValue, isBool := readBool(value) - if isBool { - if boolValue { - cfg.tls = &tls.Config{} - } - } else { - if strings.ToLower(value) == "skip-verify" { - cfg.tls = &tls.Config{InsecureSkipVerify: true} - } else if tlsConfig, ok := tlsConfigRegister[value]; ok { - if len(tlsConfig.ServerName) == 0 && !tlsConfig.InsecureSkipVerify { - host, _, err := net.SplitHostPort(cfg.addr) - if err == nil { - tlsConfig.ServerName = host - } - } - - cfg.tls = tlsConfig - } else { - return fmt.Errorf("Invalid value / unknown config name: %s", value) - } - } - - default: - // lazy init - if cfg.params == nil { - cfg.params = make(map[string]string) - } - - if cfg.params[param[0]], err = url.QueryUnescape(value); err != nil { - return - } - } - } - - return -} - -// Returns the bool value of the input. -// The 2nd return value indicates if the input was a valid bool value -func readBool(input string) (value bool, valid bool) { - switch input { - case "1", "true", "TRUE", "True": - return true, true - case "0", "false", "FALSE", "False": - return false, true - } - - // Not a valid bool value - return -} - -/****************************************************************************** -* Authentication * -******************************************************************************/ - -// Encrypt password using 4.1+ method -func scramblePassword(scramble, password []byte) []byte { - if len(password) == 0 { - return nil - } - - // stage1Hash = SHA1(password) - crypt := sha1.New() - crypt.Write(password) - stage1 := crypt.Sum(nil) - - // scrambleHash = SHA1(scramble + SHA1(stage1Hash)) - // inner Hash - crypt.Reset() - crypt.Write(stage1) - hash := crypt.Sum(nil) - - // outer Hash - crypt.Reset() - crypt.Write(scramble) - crypt.Write(hash) - scramble = crypt.Sum(nil) - - // token = scrambleHash XOR stage1Hash - for i := range scramble { - scramble[i] ^= stage1[i] - } - return scramble -} - -// Encrypt password using pre 4.1 (old password) method -// https://github.com/atcurtis/mariadb/blob/master/mysys/my_rnd.c -type myRnd struct { - seed1, seed2 uint32 -} - -const myRndMaxVal = 0x3FFFFFFF - -// Pseudo random number generator -func newMyRnd(seed1, seed2 uint32) *myRnd { - return &myRnd{ - seed1: seed1 % myRndMaxVal, - seed2: seed2 % myRndMaxVal, - } -} - -// Tested to be equivalent to MariaDB's floating point variant -// http://play.golang.org/p/QHvhd4qved -// http://play.golang.org/p/RG0q4ElWDx -func (r *myRnd) NextByte() byte { - r.seed1 = (r.seed1*3 + r.seed2) % myRndMaxVal - r.seed2 = (r.seed1 + r.seed2 + 33) % myRndMaxVal - - return byte(uint64(r.seed1) * 31 / myRndMaxVal) -} - -// Generate binary hash from byte string using insecure pre 4.1 method -func pwHash(password []byte) (result [2]uint32) { - var add uint32 = 7 - var tmp uint32 - - result[0] = 1345345333 - result[1] = 0x12345671 - - for _, c := range password { - // skip spaces and tabs in password - if c == ' ' || c == '\t' { - continue - } - - tmp = uint32(c) - result[0] ^= (((result[0] & 63) + add) * tmp) + (result[0] << 8) - result[1] += (result[1] << 8) ^ result[0] - add += tmp - } - - // Remove sign bit (1<<31)-1) - result[0] &= 0x7FFFFFFF - result[1] &= 0x7FFFFFFF - - return -} - -// Encrypt password using insecure pre 4.1 method -func scrambleOldPassword(scramble, password []byte) []byte { - if len(password) == 0 { - return nil - } - - scramble = scramble[:8] - - hashPw := pwHash(password) - hashSc := pwHash(scramble) - - r := newMyRnd(hashPw[0]^hashSc[0], hashPw[1]^hashSc[1]) - - var out [8]byte - for i := range out { - out[i] = r.NextByte() + 64 - } - - mask := r.NextByte() - for i := range out { - out[i] ^= mask - } - - return out[:] -} - -/****************************************************************************** -* Time related utils * -******************************************************************************/ - -// NullTime represents a time.Time that may be NULL. -// NullTime implements the Scanner interface so -// it can be used as a scan destination: -// -// var nt NullTime -// err := db.QueryRow("SELECT time FROM foo WHERE id=?", id).Scan(&nt) -// ... -// if nt.Valid { -// // use nt.Time -// } else { -// // NULL value -// } -// -// This NullTime implementation is not driver-specific -type NullTime struct { - Time time.Time - Valid bool // Valid is true if Time is not NULL -} - -// Scan implements the Scanner interface. -// The value type must be time.Time or string / []byte (formatted time-string), -// otherwise Scan fails. -func (nt *NullTime) Scan(value interface{}) (err error) { - if value == nil { - nt.Time, nt.Valid = time.Time{}, false - return - } - - switch v := value.(type) { - case time.Time: - nt.Time, nt.Valid = v, true - return - case []byte: - nt.Time, err = parseDateTime(string(v), time.UTC) - nt.Valid = (err == nil) - return - case string: - nt.Time, err = parseDateTime(v, time.UTC) - nt.Valid = (err == nil) - return - } - - nt.Valid = false - return fmt.Errorf("Can't convert %T to time.Time", value) -} - -// Value implements the driver Valuer interface. -func (nt NullTime) Value() (driver.Value, error) { - if !nt.Valid { - return nil, nil - } - return nt.Time, nil -} - -func parseDateTime(str string, loc *time.Location) (t time.Time, err error) { - base := "0000-00-00 00:00:00.0000000" - switch len(str) { - case 10, 19, 21, 22, 23, 24, 25, 26: // up to "YYYY-MM-DD HH:MM:SS.MMMMMM" - if str == base[:len(str)] { - return - } - t, err = time.Parse(timeFormat[:len(str)], str) - default: - err = fmt.Errorf("Invalid Time-String: %s", str) - return - } - - // Adjust location - if err == nil && loc != time.UTC { - y, mo, d := t.Date() - h, mi, s := t.Clock() - t, err = time.Date(y, mo, d, h, mi, s, t.Nanosecond(), loc), nil - } - - return -} - -func parseBinaryDateTime(num uint64, data []byte, loc *time.Location) (driver.Value, error) { - switch num { - case 0: - return time.Time{}, nil - case 4: - return time.Date( - int(binary.LittleEndian.Uint16(data[:2])), // year - time.Month(data[2]), // month - int(data[3]), // day - 0, 0, 0, 0, - loc, - ), nil - case 7: - return time.Date( - int(binary.LittleEndian.Uint16(data[:2])), // year - time.Month(data[2]), // month - int(data[3]), // day - int(data[4]), // hour - int(data[5]), // minutes - int(data[6]), // seconds - 0, - loc, - ), nil - case 11: - return time.Date( - int(binary.LittleEndian.Uint16(data[:2])), // year - time.Month(data[2]), // month - int(data[3]), // day - int(data[4]), // hour - int(data[5]), // minutes - int(data[6]), // seconds - int(binary.LittleEndian.Uint32(data[7:11]))*1000, // nanoseconds - loc, - ), nil - } - return nil, fmt.Errorf("Invalid DATETIME-packet length %d", num) -} - -// zeroDateTime is used in formatBinaryDateTime to avoid an allocation -// if the DATE or DATETIME has the zero value. -// It must never be changed. -// The current behavior depends on database/sql copying the result. -var zeroDateTime = []byte("0000-00-00 00:00:00.000000") - -const digits01 = "0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" -const digits10 = "0000000000111111111122222222223333333333444444444455555555556666666666777777777788888888889999999999" - -func formatBinaryDateTime(src []byte, length uint8, justTime bool) (driver.Value, error) { - // length expects the deterministic length of the zero value, - // negative time and 100+ hours are automatically added if needed - if len(src) == 0 { - if justTime { - return zeroDateTime[11 : 11+length], nil - } - return zeroDateTime[:length], nil - } - var dst []byte // return value - var pt, p1, p2, p3 byte // current digit pair - var zOffs byte // offset of value in zeroDateTime - if justTime { - switch length { - case - 8, // time (can be up to 10 when negative and 100+ hours) - 10, 11, 12, 13, 14, 15: // time with fractional seconds - default: - return nil, fmt.Errorf("illegal TIME length %d", length) - } - switch len(src) { - case 8, 12: - default: - return nil, fmt.Errorf("Invalid TIME-packet length %d", len(src)) - } - // +2 to enable negative time and 100+ hours - dst = make([]byte, 0, length+2) - if src[0] == 1 { - dst = append(dst, '-') - } - if src[1] != 0 { - hour := uint16(src[1])*24 + uint16(src[5]) - pt = byte(hour / 100) - p1 = byte(hour - 100*uint16(pt)) - dst = append(dst, digits01[pt]) - } else { - p1 = src[5] - } - zOffs = 11 - src = src[6:] - } else { - switch length { - case 10, 19, 21, 22, 23, 24, 25, 26: - default: - t := "DATE" - if length > 10 { - t += "TIME" - } - return nil, fmt.Errorf("illegal %s length %d", t, length) - } - switch len(src) { - case 4, 7, 11: - default: - t := "DATE" - if length > 10 { - t += "TIME" - } - return nil, fmt.Errorf("illegal %s-packet length %d", t, len(src)) - } - dst = make([]byte, 0, length) - // start with the date - year := binary.LittleEndian.Uint16(src[:2]) - pt = byte(year / 100) - p1 = byte(year - 100*uint16(pt)) - p2, p3 = src[2], src[3] - dst = append(dst, - digits10[pt], digits01[pt], - digits10[p1], digits01[p1], '-', - digits10[p2], digits01[p2], '-', - digits10[p3], digits01[p3], - ) - if length == 10 { - return dst, nil - } - if len(src) == 4 { - return append(dst, zeroDateTime[10:length]...), nil - } - dst = append(dst, ' ') - p1 = src[4] // hour - src = src[5:] - } - // p1 is 2-digit hour, src is after hour - p2, p3 = src[0], src[1] - dst = append(dst, - digits10[p1], digits01[p1], ':', - digits10[p2], digits01[p2], ':', - digits10[p3], digits01[p3], - ) - if length <= byte(len(dst)) { - return dst, nil - } - src = src[2:] - if len(src) == 0 { - return append(dst, zeroDateTime[19:zOffs+length]...), nil - } - microsecs := binary.LittleEndian.Uint32(src[:4]) - p1 = byte(microsecs / 10000) - microsecs -= 10000 * uint32(p1) - p2 = byte(microsecs / 100) - microsecs -= 100 * uint32(p2) - p3 = byte(microsecs) - switch decimals := zOffs + length - 20; decimals { - default: - return append(dst, '.', - digits10[p1], digits01[p1], - digits10[p2], digits01[p2], - digits10[p3], digits01[p3], - ), nil - case 1: - return append(dst, '.', - digits10[p1], - ), nil - case 2: - return append(dst, '.', - digits10[p1], digits01[p1], - ), nil - case 3: - return append(dst, '.', - digits10[p1], digits01[p1], - digits10[p2], - ), nil - case 4: - return append(dst, '.', - digits10[p1], digits01[p1], - digits10[p2], digits01[p2], - ), nil - case 5: - return append(dst, '.', - digits10[p1], digits01[p1], - digits10[p2], digits01[p2], - digits10[p3], - ), nil - } -} - -/****************************************************************************** -* Convert from and to bytes * -******************************************************************************/ - -func uint64ToBytes(n uint64) []byte { - return []byte{ - byte(n), - byte(n >> 8), - byte(n >> 16), - byte(n >> 24), - byte(n >> 32), - byte(n >> 40), - byte(n >> 48), - byte(n >> 56), - } -} - -func uint64ToString(n uint64) []byte { - var a [20]byte - i := 20 - - // U+0030 = 0 - // ... - // U+0039 = 9 - - var q uint64 - for n >= 10 { - i-- - q = n / 10 - a[i] = uint8(n-q*10) + 0x30 - n = q - } - - i-- - a[i] = uint8(n) + 0x30 - - return a[i:] -} - -// treats string value as unsigned integer representation -func stringToInt(b []byte) int { - val := 0 - for i := range b { - val *= 10 - val += int(b[i] - 0x30) - } - return val -} - -// returns the string read as a bytes slice, wheter the value is NULL, -// the number of bytes read and an error, in case the string is longer than -// the input slice -func readLengthEncodedString(b []byte) ([]byte, bool, int, error) { - // Get length - num, isNull, n := readLengthEncodedInteger(b) - if num < 1 { - return b[n:n], isNull, n, nil - } - - n += int(num) - - // Check data length - if len(b) >= n { - return b[n-int(num) : n], false, n, nil - } - return nil, false, n, io.EOF -} - -// returns the number of bytes skipped and an error, in case the string is -// longer than the input slice -func skipLengthEncodedString(b []byte) (int, error) { - // Get length - num, _, n := readLengthEncodedInteger(b) - if num < 1 { - return n, nil - } - - n += int(num) - - // Check data length - if len(b) >= n { - return n, nil - } - return n, io.EOF -} - -// returns the number read, whether the value is NULL and the number of bytes read -func readLengthEncodedInteger(b []byte) (uint64, bool, int) { - // See issue #349 - if len(b) == 0 { - return 0, true, 1 - } - switch b[0] { - - // 251: NULL - case 0xfb: - return 0, true, 1 - - // 252: value of following 2 - case 0xfc: - return uint64(b[1]) | uint64(b[2])<<8, false, 3 - - // 253: value of following 3 - case 0xfd: - return uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16, false, 4 - - // 254: value of following 8 - case 0xfe: - return uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16 | - uint64(b[4])<<24 | uint64(b[5])<<32 | uint64(b[6])<<40 | - uint64(b[7])<<48 | uint64(b[8])<<56, - false, 9 - } - - // 0-250: value of first byte - return uint64(b[0]), false, 1 -} - -// encodes a uint64 value and appends it to the given bytes slice -func appendLengthEncodedInteger(b []byte, n uint64) []byte { - switch { - case n <= 250: - return append(b, byte(n)) - - case n <= 0xffff: - return append(b, 0xfc, byte(n), byte(n>>8)) - - case n <= 0xffffff: - return append(b, 0xfd, byte(n), byte(n>>8), byte(n>>16)) - } - return append(b, 0xfe, byte(n), byte(n>>8), byte(n>>16), byte(n>>24), - byte(n>>32), byte(n>>40), byte(n>>48), byte(n>>56)) -} - -// reserveBuffer checks cap(buf) and expand buffer to len(buf) + appendSize. -// If cap(buf) is not enough, reallocate new buffer. -func reserveBuffer(buf []byte, appendSize int) []byte { - newSize := len(buf) + appendSize - if cap(buf) < newSize { - // Grow buffer exponentially - newBuf := make([]byte, len(buf)*2+appendSize) - copy(newBuf, buf) - buf = newBuf - } - return buf[:newSize] -} - -// escapeBytesBackslash escapes []byte with backslashes (\) -// This escapes the contents of a string (provided as []byte) by adding backslashes before special -// characters, and turning others into specific escape sequences, such as -// turning newlines into \n and null bytes into \0. -// https://github.com/mysql/mysql-server/blob/mysql-5.7.5/mysys/charset.c#L823-L932 -func escapeBytesBackslash(buf, v []byte) []byte { - pos := len(buf) - buf = reserveBuffer(buf, len(v)*2) - - for _, c := range v { - switch c { - case '\x00': - buf[pos] = '\\' - buf[pos+1] = '0' - pos += 2 - case '\n': - buf[pos] = '\\' - buf[pos+1] = 'n' - pos += 2 - case '\r': - buf[pos] = '\\' - buf[pos+1] = 'r' - pos += 2 - case '\x1a': - buf[pos] = '\\' - buf[pos+1] = 'Z' - pos += 2 - case '\'': - buf[pos] = '\\' - buf[pos+1] = '\'' - pos += 2 - case '"': - buf[pos] = '\\' - buf[pos+1] = '"' - pos += 2 - case '\\': - buf[pos] = '\\' - buf[pos+1] = '\\' - pos += 2 - default: - buf[pos] = c - pos += 1 - } - } - - return buf[:pos] -} - -// escapeStringBackslash is similar to escapeBytesBackslash but for string. -func escapeStringBackslash(buf []byte, v string) []byte { - pos := len(buf) - buf = reserveBuffer(buf, len(v)*2) - - for i := 0; i < len(v); i++ { - c := v[i] - switch c { - case '\x00': - buf[pos] = '\\' - buf[pos+1] = '0' - pos += 2 - case '\n': - buf[pos] = '\\' - buf[pos+1] = 'n' - pos += 2 - case '\r': - buf[pos] = '\\' - buf[pos+1] = 'r' - pos += 2 - case '\x1a': - buf[pos] = '\\' - buf[pos+1] = 'Z' - pos += 2 - case '\'': - buf[pos] = '\\' - buf[pos+1] = '\'' - pos += 2 - case '"': - buf[pos] = '\\' - buf[pos+1] = '"' - pos += 2 - case '\\': - buf[pos] = '\\' - buf[pos+1] = '\\' - pos += 2 - default: - buf[pos] = c - pos += 1 - } - } - - return buf[:pos] -} - -// escapeBytesQuotes escapes apostrophes in []byte by doubling them up. -// This escapes the contents of a string by doubling up any apostrophes that -// it contains. This is used when the NO_BACKSLASH_ESCAPES SQL_MODE is in -// effect on the server. -// https://github.com/mysql/mysql-server/blob/mysql-5.7.5/mysys/charset.c#L963-L1038 -func escapeBytesQuotes(buf, v []byte) []byte { - pos := len(buf) - buf = reserveBuffer(buf, len(v)*2) - - for _, c := range v { - if c == '\'' { - buf[pos] = '\'' - buf[pos+1] = '\'' - pos += 2 - } else { - buf[pos] = c - pos++ - } - } - - return buf[:pos] -} - -// escapeStringQuotes is similar to escapeBytesQuotes but for string. -func escapeStringQuotes(buf []byte, v string) []byte { - pos := len(buf) - buf = reserveBuffer(buf, len(v)*2) - - for i := 0; i < len(v); i++ { - c := v[i] - if c == '\'' { - buf[pos] = '\'' - buf[pos+1] = '\'' - pos += 2 - } else { - buf[pos] = c - pos++ - } - } - - return buf[:pos] -} diff --git a/vendor/github.com/golang/protobuf/LICENSE b/vendor/github.com/golang/protobuf/LICENSE deleted file mode 100644 index 1b1b192..0000000 --- a/vendor/github.com/golang/protobuf/LICENSE +++ /dev/null @@ -1,31 +0,0 @@ -Go support for Protocol Buffers - Google's data interchange format - -Copyright 2010 The Go Authors. All rights reserved. -https://github.com/golang/protobuf - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/vendor/github.com/golang/protobuf/proto/Makefile b/vendor/github.com/golang/protobuf/proto/Makefile deleted file mode 100644 index e2e0651..0000000 --- a/vendor/github.com/golang/protobuf/proto/Makefile +++ /dev/null @@ -1,43 +0,0 @@ -# Go support for Protocol Buffers - Google's data interchange format -# -# Copyright 2010 The Go Authors. All rights reserved. -# https://github.com/golang/protobuf -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -install: - go install - -test: install generate-test-pbs - go test - - -generate-test-pbs: - make install - make -C testdata - protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata,Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. proto3_proto/proto3.proto - make diff --git a/vendor/github.com/golang/protobuf/proto/clone.go b/vendor/github.com/golang/protobuf/proto/clone.go deleted file mode 100644 index e98ddec..0000000 --- a/vendor/github.com/golang/protobuf/proto/clone.go +++ /dev/null @@ -1,223 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Protocol buffer deep copy and merge. -// TODO: RawMessage. - -package proto - -import ( - "log" - "reflect" - "strings" -) - -// Clone returns a deep copy of a protocol buffer. -func Clone(pb Message) Message { - in := reflect.ValueOf(pb) - if in.IsNil() { - return pb - } - - out := reflect.New(in.Type().Elem()) - // out is empty so a merge is a deep copy. - mergeStruct(out.Elem(), in.Elem()) - return out.Interface().(Message) -} - -// Merge merges src into dst. -// Required and optional fields that are set in src will be set to that value in dst. -// Elements of repeated fields will be appended. -// Merge panics if src and dst are not the same type, or if dst is nil. -func Merge(dst, src Message) { - in := reflect.ValueOf(src) - out := reflect.ValueOf(dst) - if out.IsNil() { - panic("proto: nil destination") - } - if in.Type() != out.Type() { - // Explicit test prior to mergeStruct so that mistyped nils will fail - panic("proto: type mismatch") - } - if in.IsNil() { - // Merging nil into non-nil is a quiet no-op - return - } - mergeStruct(out.Elem(), in.Elem()) -} - -func mergeStruct(out, in reflect.Value) { - sprop := GetProperties(in.Type()) - for i := 0; i < in.NumField(); i++ { - f := in.Type().Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) - } - - if emIn, ok := in.Addr().Interface().(extendableProto); ok { - emOut := out.Addr().Interface().(extendableProto) - mergeExtension(emOut.ExtensionMap(), emIn.ExtensionMap()) - } - - uf := in.FieldByName("XXX_unrecognized") - if !uf.IsValid() { - return - } - uin := uf.Bytes() - if len(uin) > 0 { - out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) - } -} - -// mergeAny performs a merge between two values of the same type. -// viaPtr indicates whether the values were indirected through a pointer (implying proto2). -// prop is set if this is a struct field (it may be nil). -func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { - if in.Type() == protoMessageType { - if !in.IsNil() { - if out.IsNil() { - out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) - } else { - Merge(out.Interface().(Message), in.Interface().(Message)) - } - } - return - } - switch in.Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, - reflect.String, reflect.Uint32, reflect.Uint64: - if !viaPtr && isProto3Zero(in) { - return - } - out.Set(in) - case reflect.Interface: - // Probably a oneof field; copy non-nil values. - if in.IsNil() { - return - } - // Allocate destination if it is not set, or set to a different type. - // Otherwise we will merge as normal. - if out.IsNil() || out.Elem().Type() != in.Elem().Type() { - out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T) - } - mergeAny(out.Elem(), in.Elem(), false, nil) - case reflect.Map: - if in.Len() == 0 { - return - } - if out.IsNil() { - out.Set(reflect.MakeMap(in.Type())) - } - // For maps with value types of *T or []byte we need to deep copy each value. - elemKind := in.Type().Elem().Kind() - for _, key := range in.MapKeys() { - var val reflect.Value - switch elemKind { - case reflect.Ptr: - val = reflect.New(in.Type().Elem().Elem()) - mergeAny(val, in.MapIndex(key), false, nil) - case reflect.Slice: - val = in.MapIndex(key) - val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) - default: - val = in.MapIndex(key) - } - out.SetMapIndex(key, val) - } - case reflect.Ptr: - if in.IsNil() { - return - } - if out.IsNil() { - out.Set(reflect.New(in.Elem().Type())) - } - mergeAny(out.Elem(), in.Elem(), true, nil) - case reflect.Slice: - if in.IsNil() { - return - } - if in.Type().Elem().Kind() == reflect.Uint8 { - // []byte is a scalar bytes field, not a repeated field. - - // Edge case: if this is in a proto3 message, a zero length - // bytes field is considered the zero value, and should not - // be merged. - if prop != nil && prop.proto3 && in.Len() == 0 { - return - } - - // Make a deep copy. - // Append to []byte{} instead of []byte(nil) so that we never end up - // with a nil result. - out.SetBytes(append([]byte{}, in.Bytes()...)) - return - } - n := in.Len() - if out.IsNil() { - out.Set(reflect.MakeSlice(in.Type(), 0, n)) - } - switch in.Type().Elem().Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, - reflect.String, reflect.Uint32, reflect.Uint64: - out.Set(reflect.AppendSlice(out, in)) - default: - for i := 0; i < n; i++ { - x := reflect.Indirect(reflect.New(in.Type().Elem())) - mergeAny(x, in.Index(i), false, nil) - out.Set(reflect.Append(out, x)) - } - } - case reflect.Struct: - mergeStruct(out, in) - default: - // unknown type, so not a protocol buffer - log.Printf("proto: don't know how to copy %v", in) - } -} - -func mergeExtension(out, in map[int32]Extension) { - for extNum, eIn := range in { - eOut := Extension{desc: eIn.desc} - if eIn.value != nil { - v := reflect.New(reflect.TypeOf(eIn.value)).Elem() - mergeAny(v, reflect.ValueOf(eIn.value), false, nil) - eOut.value = v.Interface() - } - if eIn.enc != nil { - eOut.enc = make([]byte, len(eIn.enc)) - copy(eOut.enc, eIn.enc) - } - - out[extNum] = eOut - } -} diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go deleted file mode 100644 index f94b9f4..0000000 --- a/vendor/github.com/golang/protobuf/proto/decode.go +++ /dev/null @@ -1,868 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for decoding protocol buffer data to construct in-memory representations. - */ - -import ( - "errors" - "fmt" - "io" - "os" - "reflect" -) - -// errOverflow is returned when an integer is too large to be represented. -var errOverflow = errors.New("proto: integer overflow") - -// ErrInternalBadWireType is returned by generated code when an incorrect -// wire type is encountered. It does not get returned to user code. -var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") - -// The fundamental decoders that interpret bytes on the wire. -// Those that take integer types all return uint64 and are -// therefore of type valueDecoder. - -// DecodeVarint reads a varint-encoded integer from the slice. -// It returns the integer and the number of bytes consumed, or -// zero if there is not enough. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func DecodeVarint(buf []byte) (x uint64, n int) { - // x, n already 0 - for shift := uint(0); shift < 64; shift += 7 { - if n >= len(buf) { - return 0, 0 - } - b := uint64(buf[n]) - n++ - x |= (b & 0x7F) << shift - if (b & 0x80) == 0 { - return x, n - } - } - - // The number is too large to represent in a 64-bit value. - return 0, 0 -} - -// DecodeVarint reads a varint-encoded integer from the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) DecodeVarint() (x uint64, err error) { - // x, err already 0 - - i := p.index - l := len(p.buf) - - for shift := uint(0); shift < 64; shift += 7 { - if i >= l { - err = io.ErrUnexpectedEOF - return - } - b := p.buf[i] - i++ - x |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - p.index = i - return - } - } - - // The number is too large to represent in a 64-bit value. - err = errOverflow - return -} - -// DecodeFixed64 reads a 64-bit integer from the Buffer. -// This is the format for the -// fixed64, sfixed64, and double protocol buffer types. -func (p *Buffer) DecodeFixed64() (x uint64, err error) { - // x, err already 0 - i := p.index + 8 - if i < 0 || i > len(p.buf) { - err = io.ErrUnexpectedEOF - return - } - p.index = i - - x = uint64(p.buf[i-8]) - x |= uint64(p.buf[i-7]) << 8 - x |= uint64(p.buf[i-6]) << 16 - x |= uint64(p.buf[i-5]) << 24 - x |= uint64(p.buf[i-4]) << 32 - x |= uint64(p.buf[i-3]) << 40 - x |= uint64(p.buf[i-2]) << 48 - x |= uint64(p.buf[i-1]) << 56 - return -} - -// DecodeFixed32 reads a 32-bit integer from the Buffer. -// This is the format for the -// fixed32, sfixed32, and float protocol buffer types. -func (p *Buffer) DecodeFixed32() (x uint64, err error) { - // x, err already 0 - i := p.index + 4 - if i < 0 || i > len(p.buf) { - err = io.ErrUnexpectedEOF - return - } - p.index = i - - x = uint64(p.buf[i-4]) - x |= uint64(p.buf[i-3]) << 8 - x |= uint64(p.buf[i-2]) << 16 - x |= uint64(p.buf[i-1]) << 24 - return -} - -// DecodeZigzag64 reads a zigzag-encoded 64-bit integer -// from the Buffer. -// This is the format used for the sint64 protocol buffer type. -func (p *Buffer) DecodeZigzag64() (x uint64, err error) { - x, err = p.DecodeVarint() - if err != nil { - return - } - x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) - return -} - -// DecodeZigzag32 reads a zigzag-encoded 32-bit integer -// from the Buffer. -// This is the format used for the sint32 protocol buffer type. -func (p *Buffer) DecodeZigzag32() (x uint64, err error) { - x, err = p.DecodeVarint() - if err != nil { - return - } - x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) - return -} - -// These are not ValueDecoders: they produce an array of bytes or a string. -// bytes, embedded messages - -// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. -// This is the format used for the bytes protocol buffer -// type and for embedded messages. -func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { - n, err := p.DecodeVarint() - if err != nil { - return nil, err - } - - nb := int(n) - if nb < 0 { - return nil, fmt.Errorf("proto: bad byte length %d", nb) - } - end := p.index + nb - if end < p.index || end > len(p.buf) { - return nil, io.ErrUnexpectedEOF - } - - if !alloc { - // todo: check if can get more uses of alloc=false - buf = p.buf[p.index:end] - p.index += nb - return - } - - buf = make([]byte, nb) - copy(buf, p.buf[p.index:]) - p.index += nb - return -} - -// DecodeStringBytes reads an encoded string from the Buffer. -// This is the format used for the proto2 string type. -func (p *Buffer) DecodeStringBytes() (s string, err error) { - buf, err := p.DecodeRawBytes(false) - if err != nil { - return - } - return string(buf), nil -} - -// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. -// If the protocol buffer has extensions, and the field matches, add it as an extension. -// Otherwise, if the XXX_unrecognized field exists, append the skipped data there. -func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error { - oi := o.index - - err := o.skip(t, tag, wire) - if err != nil { - return err - } - - if !unrecField.IsValid() { - return nil - } - - ptr := structPointer_Bytes(base, unrecField) - - // Add the skipped field to struct field - obuf := o.buf - - o.buf = *ptr - o.EncodeVarint(uint64(tag<<3 | wire)) - *ptr = append(o.buf, obuf[oi:o.index]...) - - o.buf = obuf - - return nil -} - -// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. -func (o *Buffer) skip(t reflect.Type, tag, wire int) error { - - var u uint64 - var err error - - switch wire { - case WireVarint: - _, err = o.DecodeVarint() - case WireFixed64: - _, err = o.DecodeFixed64() - case WireBytes: - _, err = o.DecodeRawBytes(false) - case WireFixed32: - _, err = o.DecodeFixed32() - case WireStartGroup: - for { - u, err = o.DecodeVarint() - if err != nil { - break - } - fwire := int(u & 0x7) - if fwire == WireEndGroup { - break - } - ftag := int(u >> 3) - err = o.skip(t, ftag, fwire) - if err != nil { - break - } - } - default: - err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t) - } - return err -} - -// Unmarshaler is the interface representing objects that can -// unmarshal themselves. The method should reset the receiver before -// decoding starts. The argument points to data that may be -// overwritten, so implementations should not keep references to the -// buffer. -type Unmarshaler interface { - Unmarshal([]byte) error -} - -// Unmarshal parses the protocol buffer representation in buf and places the -// decoded result in pb. If the struct underlying pb does not match -// the data in buf, the results can be unpredictable. -// -// Unmarshal resets pb before starting to unmarshal, so any -// existing data in pb is always removed. Use UnmarshalMerge -// to preserve and append to existing data. -func Unmarshal(buf []byte, pb Message) error { - pb.Reset() - return UnmarshalMerge(buf, pb) -} - -// UnmarshalMerge parses the protocol buffer representation in buf and -// writes the decoded result to pb. If the struct underlying pb does not match -// the data in buf, the results can be unpredictable. -// -// UnmarshalMerge merges into existing data in pb. -// Most code should use Unmarshal instead. -func UnmarshalMerge(buf []byte, pb Message) error { - // If the object can unmarshal itself, let it. - if u, ok := pb.(Unmarshaler); ok { - return u.Unmarshal(buf) - } - return NewBuffer(buf).Unmarshal(pb) -} - -// DecodeMessage reads a count-delimited message from the Buffer. -func (p *Buffer) DecodeMessage(pb Message) error { - enc, err := p.DecodeRawBytes(false) - if err != nil { - return err - } - return NewBuffer(enc).Unmarshal(pb) -} - -// DecodeGroup reads a tag-delimited group from the Buffer. -func (p *Buffer) DecodeGroup(pb Message) error { - typ, base, err := getbase(pb) - if err != nil { - return err - } - return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base) -} - -// Unmarshal parses the protocol buffer representation in the -// Buffer and places the decoded result in pb. If the struct -// underlying pb does not match the data in the buffer, the results can be -// unpredictable. -func (p *Buffer) Unmarshal(pb Message) error { - // If the object can unmarshal itself, let it. - if u, ok := pb.(Unmarshaler); ok { - err := u.Unmarshal(p.buf[p.index:]) - p.index = len(p.buf) - return err - } - - typ, base, err := getbase(pb) - if err != nil { - return err - } - - err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base) - - if collectStats { - stats.Decode++ - } - - return err -} - -// unmarshalType does the work of unmarshaling a structure. -func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error { - var state errorState - required, reqFields := prop.reqCount, uint64(0) - - var err error - for err == nil && o.index < len(o.buf) { - oi := o.index - var u uint64 - u, err = o.DecodeVarint() - if err != nil { - break - } - wire := int(u & 0x7) - if wire == WireEndGroup { - if is_group { - return nil // input is satisfied - } - return fmt.Errorf("proto: %s: wiretype end group for non-group", st) - } - tag := int(u >> 3) - if tag <= 0 { - return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire) - } - fieldnum, ok := prop.decoderTags.get(tag) - if !ok { - // Maybe it's an extension? - if prop.extendable { - if e := structPointer_Interface(base, st).(extendableProto); isExtensionField(e, int32(tag)) { - if err = o.skip(st, tag, wire); err == nil { - ext := e.ExtensionMap()[int32(tag)] // may be missing - ext.enc = append(ext.enc, o.buf[oi:o.index]...) - e.ExtensionMap()[int32(tag)] = ext - } - continue - } - } - // Maybe it's a oneof? - if prop.oneofUnmarshaler != nil { - m := structPointer_Interface(base, st).(Message) - // First return value indicates whether tag is a oneof field. - ok, err = prop.oneofUnmarshaler(m, tag, wire, o) - if err == ErrInternalBadWireType { - // Map the error to something more descriptive. - // Do the formatting here to save generated code space. - err = fmt.Errorf("bad wiretype for oneof field in %T", m) - } - if ok { - continue - } - } - err = o.skipAndSave(st, tag, wire, base, prop.unrecField) - continue - } - p := prop.Prop[fieldnum] - - if p.dec == nil { - fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name) - continue - } - dec := p.dec - if wire != WireStartGroup && wire != p.WireType { - if wire == WireBytes && p.packedDec != nil { - // a packable field - dec = p.packedDec - } else { - err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType) - continue - } - } - decErr := dec(o, p, base) - if decErr != nil && !state.shouldContinue(decErr, p) { - err = decErr - } - if err == nil && p.Required { - // Successfully decoded a required field. - if tag <= 64 { - // use bitmap for fields 1-64 to catch field reuse. - var mask uint64 = 1 << uint64(tag-1) - if reqFields&mask == 0 { - // new required field - reqFields |= mask - required-- - } - } else { - // This is imprecise. It can be fooled by a required field - // with a tag > 64 that is encoded twice; that's very rare. - // A fully correct implementation would require allocating - // a data structure, which we would like to avoid. - required-- - } - } - } - if err == nil { - if is_group { - return io.ErrUnexpectedEOF - } - if state.err != nil { - return state.err - } - if required > 0 { - // Not enough information to determine the exact field. If we use extra - // CPU, we could determine the field only if the missing required field - // has a tag <= 64 and we check reqFields. - return &RequiredNotSetError{"{Unknown}"} - } - } - return err -} - -// Individual type decoders -// For each, -// u is the decoded value, -// v is a pointer to the field (pointer) in the struct - -// Sizes of the pools to allocate inside the Buffer. -// The goal is modest amortization and allocation -// on at least 16-byte boundaries. -const ( - boolPoolSize = 16 - uint32PoolSize = 8 - uint64PoolSize = 4 -) - -// Decode a bool. -func (o *Buffer) dec_bool(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - if len(o.bools) == 0 { - o.bools = make([]bool, boolPoolSize) - } - o.bools[0] = u != 0 - *structPointer_Bool(base, p.field) = &o.bools[0] - o.bools = o.bools[1:] - return nil -} - -func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - *structPointer_BoolVal(base, p.field) = u != 0 - return nil -} - -// Decode an int32. -func (o *Buffer) dec_int32(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word32_Set(structPointer_Word32(base, p.field), o, uint32(u)) - return nil -} - -func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u)) - return nil -} - -// Decode an int64. -func (o *Buffer) dec_int64(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word64_Set(structPointer_Word64(base, p.field), o, u) - return nil -} - -func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word64Val_Set(structPointer_Word64Val(base, p.field), o, u) - return nil -} - -// Decode a string. -func (o *Buffer) dec_string(p *Properties, base structPointer) error { - s, err := o.DecodeStringBytes() - if err != nil { - return err - } - *structPointer_String(base, p.field) = &s - return nil -} - -func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error { - s, err := o.DecodeStringBytes() - if err != nil { - return err - } - *structPointer_StringVal(base, p.field) = s - return nil -} - -// Decode a slice of bytes ([]byte). -func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error { - b, err := o.DecodeRawBytes(true) - if err != nil { - return err - } - *structPointer_Bytes(base, p.field) = b - return nil -} - -// Decode a slice of bools ([]bool). -func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - v := structPointer_BoolSlice(base, p.field) - *v = append(*v, u != 0) - return nil -} - -// Decode a slice of bools ([]bool) in packed format. -func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error { - v := structPointer_BoolSlice(base, p.field) - - nn, err := o.DecodeVarint() - if err != nil { - return err - } - nb := int(nn) // number of bytes of encoded bools - fin := o.index + nb - if fin < o.index { - return errOverflow - } - - y := *v - for o.index < fin { - u, err := p.valDec(o) - if err != nil { - return err - } - y = append(y, u != 0) - } - - *v = y - return nil -} - -// Decode a slice of int32s ([]int32). -func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - structPointer_Word32Slice(base, p.field).Append(uint32(u)) - return nil -} - -// Decode a slice of int32s ([]int32) in packed format. -func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error { - v := structPointer_Word32Slice(base, p.field) - - nn, err := o.DecodeVarint() - if err != nil { - return err - } - nb := int(nn) // number of bytes of encoded int32s - - fin := o.index + nb - if fin < o.index { - return errOverflow - } - for o.index < fin { - u, err := p.valDec(o) - if err != nil { - return err - } - v.Append(uint32(u)) - } - return nil -} - -// Decode a slice of int64s ([]int64). -func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - - structPointer_Word64Slice(base, p.field).Append(u) - return nil -} - -// Decode a slice of int64s ([]int64) in packed format. -func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error { - v := structPointer_Word64Slice(base, p.field) - - nn, err := o.DecodeVarint() - if err != nil { - return err - } - nb := int(nn) // number of bytes of encoded int64s - - fin := o.index + nb - if fin < o.index { - return errOverflow - } - for o.index < fin { - u, err := p.valDec(o) - if err != nil { - return err - } - v.Append(u) - } - return nil -} - -// Decode a slice of strings ([]string). -func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error { - s, err := o.DecodeStringBytes() - if err != nil { - return err - } - v := structPointer_StringSlice(base, p.field) - *v = append(*v, s) - return nil -} - -// Decode a slice of slice of bytes ([][]byte). -func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error { - b, err := o.DecodeRawBytes(true) - if err != nil { - return err - } - v := structPointer_BytesSlice(base, p.field) - *v = append(*v, b) - return nil -} - -// Decode a map field. -func (o *Buffer) dec_new_map(p *Properties, base structPointer) error { - raw, err := o.DecodeRawBytes(false) - if err != nil { - return err - } - oi := o.index // index at the end of this map entry - o.index -= len(raw) // move buffer back to start of map entry - - mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V - if mptr.Elem().IsNil() { - mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem())) - } - v := mptr.Elem() // map[K]V - - // Prepare addressable doubly-indirect placeholders for the key and value types. - // See enc_new_map for why. - keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K - keybase := toStructPointer(keyptr.Addr()) // **K - - var valbase structPointer - var valptr reflect.Value - switch p.mtype.Elem().Kind() { - case reflect.Slice: - // []byte - var dummy []byte - valptr = reflect.ValueOf(&dummy) // *[]byte - valbase = toStructPointer(valptr) // *[]byte - case reflect.Ptr: - // message; valptr is **Msg; need to allocate the intermediate pointer - valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V - valptr.Set(reflect.New(valptr.Type().Elem())) - valbase = toStructPointer(valptr) - default: - // everything else - valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V - valbase = toStructPointer(valptr.Addr()) // **V - } - - // Decode. - // This parses a restricted wire format, namely the encoding of a message - // with two fields. See enc_new_map for the format. - for o.index < oi { - // tagcode for key and value properties are always a single byte - // because they have tags 1 and 2. - tagcode := o.buf[o.index] - o.index++ - switch tagcode { - case p.mkeyprop.tagcode[0]: - if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil { - return err - } - case p.mvalprop.tagcode[0]: - if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil { - return err - } - default: - // TODO: Should we silently skip this instead? - return fmt.Errorf("proto: bad map data tag %d", raw[0]) - } - } - keyelem, valelem := keyptr.Elem(), valptr.Elem() - if !keyelem.IsValid() { - keyelem = reflect.Zero(p.mtype.Key()) - } - if !valelem.IsValid() { - valelem = reflect.Zero(p.mtype.Elem()) - } - - v.SetMapIndex(keyelem, valelem) - return nil -} - -// Decode a group. -func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error { - bas := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(bas) { - // allocate new nested message - bas = toStructPointer(reflect.New(p.stype)) - structPointer_SetStructPointer(base, p.field, bas) - } - return o.unmarshalType(p.stype, p.sprop, true, bas) -} - -// Decode an embedded message. -func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) { - raw, e := o.DecodeRawBytes(false) - if e != nil { - return e - } - - bas := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(bas) { - // allocate new nested message - bas = toStructPointer(reflect.New(p.stype)) - structPointer_SetStructPointer(base, p.field, bas) - } - - // If the object can unmarshal itself, let it. - if p.isUnmarshaler { - iv := structPointer_Interface(bas, p.stype) - return iv.(Unmarshaler).Unmarshal(raw) - } - - obuf := o.buf - oi := o.index - o.buf = raw - o.index = 0 - - err = o.unmarshalType(p.stype, p.sprop, false, bas) - o.buf = obuf - o.index = oi - - return err -} - -// Decode a slice of embedded messages. -func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error { - return o.dec_slice_struct(p, false, base) -} - -// Decode a slice of embedded groups. -func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error { - return o.dec_slice_struct(p, true, base) -} - -// Decode a slice of structs ([]*struct). -func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error { - v := reflect.New(p.stype) - bas := toStructPointer(v) - structPointer_StructPointerSlice(base, p.field).Append(bas) - - if is_group { - err := o.unmarshalType(p.stype, p.sprop, is_group, bas) - return err - } - - raw, err := o.DecodeRawBytes(false) - if err != nil { - return err - } - - // If the object can unmarshal itself, let it. - if p.isUnmarshaler { - iv := v.Interface() - return iv.(Unmarshaler).Unmarshal(raw) - } - - obuf := o.buf - oi := o.index - o.buf = raw - o.index = 0 - - err = o.unmarshalType(p.stype, p.sprop, is_group, bas) - - o.buf = obuf - o.index = oi - - return err -} diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go deleted file mode 100644 index eb7e047..0000000 --- a/vendor/github.com/golang/protobuf/proto/encode.go +++ /dev/null @@ -1,1331 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for encoding data into the wire format for protocol buffers. - */ - -import ( - "errors" - "fmt" - "reflect" - "sort" -) - -// RequiredNotSetError is the error returned if Marshal is called with -// a protocol buffer struct whose required fields have not -// all been initialized. It is also the error returned if Unmarshal is -// called with an encoded protocol buffer that does not include all the -// required fields. -// -// When printed, RequiredNotSetError reports the first unset required field in a -// message. If the field cannot be precisely determined, it is reported as -// "{Unknown}". -type RequiredNotSetError struct { - field string -} - -func (e *RequiredNotSetError) Error() string { - return fmt.Sprintf("proto: required field %q not set", e.field) -} - -var ( - // errRepeatedHasNil is the error returned if Marshal is called with - // a struct with a repeated field containing a nil element. - errRepeatedHasNil = errors.New("proto: repeated field has nil element") - - // errOneofHasNil is the error returned if Marshal is called with - // a struct with a oneof field containing a nil element. - errOneofHasNil = errors.New("proto: oneof field has nil value") - - // ErrNil is the error returned if Marshal is called with nil. - ErrNil = errors.New("proto: Marshal called with nil") -) - -// The fundamental encoders that put bytes on the wire. -// Those that take integer types all accept uint64 and are -// therefore of type valueEncoder. - -const maxVarintBytes = 10 // maximum length of a varint - -// EncodeVarint returns the varint encoding of x. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -// Not used by the package itself, but helpful to clients -// wishing to use the same encoding. -func EncodeVarint(x uint64) []byte { - var buf [maxVarintBytes]byte - var n int - for n = 0; x > 127; n++ { - buf[n] = 0x80 | uint8(x&0x7F) - x >>= 7 - } - buf[n] = uint8(x) - n++ - return buf[0:n] -} - -// EncodeVarint writes a varint-encoded integer to the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) EncodeVarint(x uint64) error { - for x >= 1<<7 { - p.buf = append(p.buf, uint8(x&0x7f|0x80)) - x >>= 7 - } - p.buf = append(p.buf, uint8(x)) - return nil -} - -// SizeVarint returns the varint encoding size of an integer. -func SizeVarint(x uint64) int { - return sizeVarint(x) -} - -func sizeVarint(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} - -// EncodeFixed64 writes a 64-bit integer to the Buffer. -// This is the format for the -// fixed64, sfixed64, and double protocol buffer types. -func (p *Buffer) EncodeFixed64(x uint64) error { - p.buf = append(p.buf, - uint8(x), - uint8(x>>8), - uint8(x>>16), - uint8(x>>24), - uint8(x>>32), - uint8(x>>40), - uint8(x>>48), - uint8(x>>56)) - return nil -} - -func sizeFixed64(x uint64) int { - return 8 -} - -// EncodeFixed32 writes a 32-bit integer to the Buffer. -// This is the format for the -// fixed32, sfixed32, and float protocol buffer types. -func (p *Buffer) EncodeFixed32(x uint64) error { - p.buf = append(p.buf, - uint8(x), - uint8(x>>8), - uint8(x>>16), - uint8(x>>24)) - return nil -} - -func sizeFixed32(x uint64) int { - return 4 -} - -// EncodeZigzag64 writes a zigzag-encoded 64-bit integer -// to the Buffer. -// This is the format used for the sint64 protocol buffer type. -func (p *Buffer) EncodeZigzag64(x uint64) error { - // use signed number to get arithmetic right shift. - return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} - -func sizeZigzag64(x uint64) int { - return sizeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} - -// EncodeZigzag32 writes a zigzag-encoded 32-bit integer -// to the Buffer. -// This is the format used for the sint32 protocol buffer type. -func (p *Buffer) EncodeZigzag32(x uint64) error { - // use signed number to get arithmetic right shift. - return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) -} - -func sizeZigzag32(x uint64) int { - return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) -} - -// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. -// This is the format used for the bytes protocol buffer -// type and for embedded messages. -func (p *Buffer) EncodeRawBytes(b []byte) error { - p.EncodeVarint(uint64(len(b))) - p.buf = append(p.buf, b...) - return nil -} - -func sizeRawBytes(b []byte) int { - return sizeVarint(uint64(len(b))) + - len(b) -} - -// EncodeStringBytes writes an encoded string to the Buffer. -// This is the format used for the proto2 string type. -func (p *Buffer) EncodeStringBytes(s string) error { - p.EncodeVarint(uint64(len(s))) - p.buf = append(p.buf, s...) - return nil -} - -func sizeStringBytes(s string) int { - return sizeVarint(uint64(len(s))) + - len(s) -} - -// Marshaler is the interface representing objects that can marshal themselves. -type Marshaler interface { - Marshal() ([]byte, error) -} - -// Marshal takes the protocol buffer -// and encodes it into the wire format, returning the data. -func Marshal(pb Message) ([]byte, error) { - // Can the object marshal itself? - if m, ok := pb.(Marshaler); ok { - return m.Marshal() - } - p := NewBuffer(nil) - err := p.Marshal(pb) - var state errorState - if err != nil && !state.shouldContinue(err, nil) { - return nil, err - } - if p.buf == nil && err == nil { - // Return a non-nil slice on success. - return []byte{}, nil - } - return p.buf, err -} - -// EncodeMessage writes the protocol buffer to the Buffer, -// prefixed by a varint-encoded length. -func (p *Buffer) EncodeMessage(pb Message) error { - t, base, err := getbase(pb) - if structPointer_IsNil(base) { - return ErrNil - } - if err == nil { - var state errorState - err = p.enc_len_struct(GetProperties(t.Elem()), base, &state) - } - return err -} - -// Marshal takes the protocol buffer -// and encodes it into the wire format, writing the result to the -// Buffer. -func (p *Buffer) Marshal(pb Message) error { - // Can the object marshal itself? - if m, ok := pb.(Marshaler); ok { - data, err := m.Marshal() - if err != nil { - return err - } - p.buf = append(p.buf, data...) - return nil - } - - t, base, err := getbase(pb) - if structPointer_IsNil(base) { - return ErrNil - } - if err == nil { - err = p.enc_struct(GetProperties(t.Elem()), base) - } - - if collectStats { - stats.Encode++ - } - - return err -} - -// Size returns the encoded size of a protocol buffer. -func Size(pb Message) (n int) { - // Can the object marshal itself? If so, Size is slow. - // TODO: add Size to Marshaler, or add a Sizer interface. - if m, ok := pb.(Marshaler); ok { - b, _ := m.Marshal() - return len(b) - } - - t, base, err := getbase(pb) - if structPointer_IsNil(base) { - return 0 - } - if err == nil { - n = size_struct(GetProperties(t.Elem()), base) - } - - if collectStats { - stats.Size++ - } - - return -} - -// Individual type encoders. - -// Encode a bool. -func (o *Buffer) enc_bool(p *Properties, base structPointer) error { - v := *structPointer_Bool(base, p.field) - if v == nil { - return ErrNil - } - x := 0 - if *v { - x = 1 - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error { - v := *structPointer_BoolVal(base, p.field) - if !v { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, 1) - return nil -} - -func size_bool(p *Properties, base structPointer) int { - v := *structPointer_Bool(base, p.field) - if v == nil { - return 0 - } - return len(p.tagcode) + 1 // each bool takes exactly one byte -} - -func size_proto3_bool(p *Properties, base structPointer) int { - v := *structPointer_BoolVal(base, p.field) - if !v && !p.oneof { - return 0 - } - return len(p.tagcode) + 1 // each bool takes exactly one byte -} - -// Encode an int32. -func (o *Buffer) enc_int32(p *Properties, base structPointer) error { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return ErrNil - } - x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error { - v := structPointer_Word32Val(base, p.field) - x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range - if x == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func size_int32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return 0 - } - x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -func size_proto3_int32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32Val(base, p.field) - x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range - if x == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -// Encode a uint32. -// Exactly the same as int32, except for no sign extension. -func (o *Buffer) enc_uint32(p *Properties, base structPointer) error { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return ErrNil - } - x := word32_Get(v) - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error { - v := structPointer_Word32Val(base, p.field) - x := word32Val_Get(v) - if x == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func size_uint32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return 0 - } - x := word32_Get(v) - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -func size_proto3_uint32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32Val(base, p.field) - x := word32Val_Get(v) - if x == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -// Encode an int64. -func (o *Buffer) enc_int64(p *Properties, base structPointer) error { - v := structPointer_Word64(base, p.field) - if word64_IsNil(v) { - return ErrNil - } - x := word64_Get(v) - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, x) - return nil -} - -func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error { - v := structPointer_Word64Val(base, p.field) - x := word64Val_Get(v) - if x == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, x) - return nil -} - -func size_int64(p *Properties, base structPointer) (n int) { - v := structPointer_Word64(base, p.field) - if word64_IsNil(v) { - return 0 - } - x := word64_Get(v) - n += len(p.tagcode) - n += p.valSize(x) - return -} - -func size_proto3_int64(p *Properties, base structPointer) (n int) { - v := structPointer_Word64Val(base, p.field) - x := word64Val_Get(v) - if x == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += p.valSize(x) - return -} - -// Encode a string. -func (o *Buffer) enc_string(p *Properties, base structPointer) error { - v := *structPointer_String(base, p.field) - if v == nil { - return ErrNil - } - x := *v - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(x) - return nil -} - -func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error { - v := *structPointer_StringVal(base, p.field) - if v == "" { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(v) - return nil -} - -func size_string(p *Properties, base structPointer) (n int) { - v := *structPointer_String(base, p.field) - if v == nil { - return 0 - } - x := *v - n += len(p.tagcode) - n += sizeStringBytes(x) - return -} - -func size_proto3_string(p *Properties, base structPointer) (n int) { - v := *structPointer_StringVal(base, p.field) - if v == "" && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += sizeStringBytes(v) - return -} - -// All protocol buffer fields are nillable, but be careful. -func isNil(v reflect.Value) bool { - switch v.Kind() { - case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - return v.IsNil() - } - return false -} - -// Encode a message struct. -func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error { - var state errorState - structp := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return ErrNil - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, err := m.Marshal() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - return state.err - } - - o.buf = append(o.buf, p.tagcode...) - return o.enc_len_struct(p.sprop, structp, &state) -} - -func size_struct_message(p *Properties, base structPointer) int { - structp := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return 0 - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, _ := m.Marshal() - n0 := len(p.tagcode) - n1 := sizeRawBytes(data) - return n0 + n1 - } - - n0 := len(p.tagcode) - n1 := size_struct(p.sprop, structp) - n2 := sizeVarint(uint64(n1)) // size of encoded length - return n0 + n1 + n2 -} - -// Encode a group struct. -func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error { - var state errorState - b := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(b) { - return ErrNil - } - - o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) - err := o.enc_struct(p.sprop, b) - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) - return state.err -} - -func size_struct_group(p *Properties, base structPointer) (n int) { - b := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(b) { - return 0 - } - - n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup)) - n += size_struct(p.sprop, b) - n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup)) - return -} - -// Encode a slice of bools ([]bool). -func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return ErrNil - } - for _, x := range s { - o.buf = append(o.buf, p.tagcode...) - v := uint64(0) - if x { - v = 1 - } - p.valEnc(o, v) - } - return nil -} - -func size_slice_bool(p *Properties, base structPointer) int { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return 0 - } - return l * (len(p.tagcode) + 1) // each bool takes exactly one byte -} - -// Encode a slice of bools ([]bool) in packed format. -func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(l)) // each bool takes exactly one byte - for _, x := range s { - v := uint64(0) - if x { - v = 1 - } - p.valEnc(o, v) - } - return nil -} - -func size_slice_packed_bool(p *Properties, base structPointer) (n int) { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return 0 - } - n += len(p.tagcode) - n += sizeVarint(uint64(l)) - n += l // each bool takes exactly one byte - return -} - -// Encode a slice of bytes ([]byte). -func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error { - s := *structPointer_Bytes(base, p.field) - if s == nil { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(s) - return nil -} - -func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error { - s := *structPointer_Bytes(base, p.field) - if len(s) == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(s) - return nil -} - -func size_slice_byte(p *Properties, base structPointer) (n int) { - s := *structPointer_Bytes(base, p.field) - if s == nil && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += sizeRawBytes(s) - return -} - -func size_proto3_slice_byte(p *Properties, base structPointer) (n int) { - s := *structPointer_Bytes(base, p.field) - if len(s) == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += sizeRawBytes(s) - return -} - -// Encode a slice of int32s ([]int32). -func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - p.valEnc(o, uint64(x)) - } - return nil -} - -func size_slice_int32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - for i := 0; i < l; i++ { - n += len(p.tagcode) - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - n += p.valSize(uint64(x)) - } - return -} - -// Encode a slice of int32s ([]int32) in packed format. -func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - // TODO: Reuse a Buffer. - buf := NewBuffer(nil) - for i := 0; i < l; i++ { - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - p.valEnc(buf, uint64(x)) - } - - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(len(buf.buf))) - o.buf = append(o.buf, buf.buf...) - return nil -} - -func size_slice_packed_int32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - var bufSize int - for i := 0; i < l; i++ { - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - bufSize += p.valSize(uint64(x)) - } - - n += len(p.tagcode) - n += sizeVarint(uint64(bufSize)) - n += bufSize - return -} - -// Encode a slice of uint32s ([]uint32). -// Exactly the same as int32, except for no sign extension. -func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - x := s.Index(i) - p.valEnc(o, uint64(x)) - } - return nil -} - -func size_slice_uint32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - for i := 0; i < l; i++ { - n += len(p.tagcode) - x := s.Index(i) - n += p.valSize(uint64(x)) - } - return -} - -// Encode a slice of uint32s ([]uint32) in packed format. -// Exactly the same as int32, except for no sign extension. -func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - // TODO: Reuse a Buffer. - buf := NewBuffer(nil) - for i := 0; i < l; i++ { - p.valEnc(buf, uint64(s.Index(i))) - } - - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(len(buf.buf))) - o.buf = append(o.buf, buf.buf...) - return nil -} - -func size_slice_packed_uint32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - var bufSize int - for i := 0; i < l; i++ { - bufSize += p.valSize(uint64(s.Index(i))) - } - - n += len(p.tagcode) - n += sizeVarint(uint64(bufSize)) - n += bufSize - return -} - -// Encode a slice of int64s ([]int64). -func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, s.Index(i)) - } - return nil -} - -func size_slice_int64(p *Properties, base structPointer) (n int) { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - for i := 0; i < l; i++ { - n += len(p.tagcode) - n += p.valSize(s.Index(i)) - } - return -} - -// Encode a slice of int64s ([]int64) in packed format. -func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - // TODO: Reuse a Buffer. - buf := NewBuffer(nil) - for i := 0; i < l; i++ { - p.valEnc(buf, s.Index(i)) - } - - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(len(buf.buf))) - o.buf = append(o.buf, buf.buf...) - return nil -} - -func size_slice_packed_int64(p *Properties, base structPointer) (n int) { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - var bufSize int - for i := 0; i < l; i++ { - bufSize += p.valSize(s.Index(i)) - } - - n += len(p.tagcode) - n += sizeVarint(uint64(bufSize)) - n += bufSize - return -} - -// Encode a slice of slice of bytes ([][]byte). -func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error { - ss := *structPointer_BytesSlice(base, p.field) - l := len(ss) - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(ss[i]) - } - return nil -} - -func size_slice_slice_byte(p *Properties, base structPointer) (n int) { - ss := *structPointer_BytesSlice(base, p.field) - l := len(ss) - if l == 0 { - return 0 - } - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - n += sizeRawBytes(ss[i]) - } - return -} - -// Encode a slice of strings ([]string). -func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error { - ss := *structPointer_StringSlice(base, p.field) - l := len(ss) - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(ss[i]) - } - return nil -} - -func size_slice_string(p *Properties, base structPointer) (n int) { - ss := *structPointer_StringSlice(base, p.field) - l := len(ss) - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - n += sizeStringBytes(ss[i]) - } - return -} - -// Encode a slice of message structs ([]*struct). -func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error { - var state errorState - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - - for i := 0; i < l; i++ { - structp := s.Index(i) - if structPointer_IsNil(structp) { - return errRepeatedHasNil - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, err := m.Marshal() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - continue - } - - o.buf = append(o.buf, p.tagcode...) - err := o.enc_len_struct(p.sprop, structp, &state) - if err != nil && !state.shouldContinue(err, nil) { - if err == ErrNil { - return errRepeatedHasNil - } - return err - } - } - return state.err -} - -func size_slice_struct_message(p *Properties, base structPointer) (n int) { - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - structp := s.Index(i) - if structPointer_IsNil(structp) { - return // return the size up to this point - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, _ := m.Marshal() - n += len(p.tagcode) - n += sizeRawBytes(data) - continue - } - - n0 := size_struct(p.sprop, structp) - n1 := sizeVarint(uint64(n0)) // size of encoded length - n += n0 + n1 - } - return -} - -// Encode a slice of group structs ([]*struct). -func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error { - var state errorState - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - - for i := 0; i < l; i++ { - b := s.Index(i) - if structPointer_IsNil(b) { - return errRepeatedHasNil - } - - o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) - - err := o.enc_struct(p.sprop, b) - - if err != nil && !state.shouldContinue(err, nil) { - if err == ErrNil { - return errRepeatedHasNil - } - return err - } - - o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) - } - return state.err -} - -func size_slice_struct_group(p *Properties, base structPointer) (n int) { - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - - n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup)) - n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup)) - for i := 0; i < l; i++ { - b := s.Index(i) - if structPointer_IsNil(b) { - return // return size up to this point - } - - n += size_struct(p.sprop, b) - } - return -} - -// Encode an extension map. -func (o *Buffer) enc_map(p *Properties, base structPointer) error { - v := *structPointer_ExtMap(base, p.field) - if err := encodeExtensionMap(v); err != nil { - return err - } - // Fast-path for common cases: zero or one extensions. - if len(v) <= 1 { - for _, e := range v { - o.buf = append(o.buf, e.enc...) - } - return nil - } - - // Sort keys to provide a deterministic encoding. - keys := make([]int, 0, len(v)) - for k := range v { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - for _, k := range keys { - o.buf = append(o.buf, v[int32(k)].enc...) - } - return nil -} - -func size_map(p *Properties, base structPointer) int { - v := *structPointer_ExtMap(base, p.field) - return sizeExtensionMap(v) -} - -// Encode a map field. -func (o *Buffer) enc_new_map(p *Properties, base structPointer) error { - var state errorState // XXX: or do we need to plumb this through? - - /* - A map defined as - map map_field = N; - is encoded in the same way as - message MapFieldEntry { - key_type key = 1; - value_type value = 2; - } - repeated MapFieldEntry map_field = N; - */ - - v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V - if v.Len() == 0 { - return nil - } - - keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) - - enc := func() error { - if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil { - return err - } - if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil { - return err - } - return nil - } - - // Don't sort map keys. It is not required by the spec, and C++ doesn't do it. - for _, key := range v.MapKeys() { - val := v.MapIndex(key) - - // The only illegal map entry values are nil message pointers. - if val.Kind() == reflect.Ptr && val.IsNil() { - return errors.New("proto: map has nil element") - } - - keycopy.Set(key) - valcopy.Set(val) - - o.buf = append(o.buf, p.tagcode...) - if err := o.enc_len_thing(enc, &state); err != nil { - return err - } - } - return nil -} - -func size_new_map(p *Properties, base structPointer) int { - v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V - - keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) - - n := 0 - for _, key := range v.MapKeys() { - val := v.MapIndex(key) - keycopy.Set(key) - valcopy.Set(val) - - // Tag codes for key and val are the responsibility of the sub-sizer. - keysize := p.mkeyprop.size(p.mkeyprop, keybase) - valsize := p.mvalprop.size(p.mvalprop, valbase) - entry := keysize + valsize - // Add on tag code and length of map entry itself. - n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry - } - return n -} - -// mapEncodeScratch returns a new reflect.Value matching the map's value type, -// and a structPointer suitable for passing to an encoder or sizer. -func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) { - // Prepare addressable doubly-indirect placeholders for the key and value types. - // This is needed because the element-type encoders expect **T, but the map iteration produces T. - - keycopy = reflect.New(mapType.Key()).Elem() // addressable K - keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K - keyptr.Set(keycopy.Addr()) // - keybase = toStructPointer(keyptr.Addr()) // **K - - // Value types are more varied and require special handling. - switch mapType.Elem().Kind() { - case reflect.Slice: - // []byte - var dummy []byte - valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte - valbase = toStructPointer(valcopy.Addr()) - case reflect.Ptr: - // message; the generated field type is map[K]*Msg (so V is *Msg), - // so we only need one level of indirection. - valcopy = reflect.New(mapType.Elem()).Elem() // addressable V - valbase = toStructPointer(valcopy.Addr()) - default: - // everything else - valcopy = reflect.New(mapType.Elem()).Elem() // addressable V - valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V - valptr.Set(valcopy.Addr()) // - valbase = toStructPointer(valptr.Addr()) // **V - } - return -} - -// Encode a struct. -func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error { - var state errorState - // Encode fields in tag order so that decoders may use optimizations - // that depend on the ordering. - // https://developers.google.com/protocol-buffers/docs/encoding#order - for _, i := range prop.order { - p := prop.Prop[i] - if p.enc != nil { - err := p.enc(o, p, base) - if err != nil { - if err == ErrNil { - if p.Required && state.err == nil { - state.err = &RequiredNotSetError{p.Name} - } - } else if err == errRepeatedHasNil { - // Give more context to nil values in repeated fields. - return errors.New("repeated field " + p.OrigName + " has nil element") - } else if !state.shouldContinue(err, p) { - return err - } - } - } - } - - // Do oneof fields. - if prop.oneofMarshaler != nil { - m := structPointer_Interface(base, prop.stype).(Message) - if err := prop.oneofMarshaler(m, o); err == ErrNil { - return errOneofHasNil - } else if err != nil { - return err - } - } - - // Add unrecognized fields at the end. - if prop.unrecField.IsValid() { - v := *structPointer_Bytes(base, prop.unrecField) - if len(v) > 0 { - o.buf = append(o.buf, v...) - } - } - - return state.err -} - -func size_struct(prop *StructProperties, base structPointer) (n int) { - for _, i := range prop.order { - p := prop.Prop[i] - if p.size != nil { - n += p.size(p, base) - } - } - - // Add unrecognized fields at the end. - if prop.unrecField.IsValid() { - v := *structPointer_Bytes(base, prop.unrecField) - n += len(v) - } - - // Factor in any oneof fields. - if prop.oneofSizer != nil { - m := structPointer_Interface(base, prop.stype).(Message) - n += prop.oneofSizer(m) - } - - return -} - -var zeroes [20]byte // longer than any conceivable sizeVarint - -// Encode a struct, preceded by its encoded length (as a varint). -func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error { - return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state) -} - -// Encode something, preceded by its encoded length (as a varint). -func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error { - iLen := len(o.buf) - o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length - iMsg := len(o.buf) - err := enc() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - lMsg := len(o.buf) - iMsg - lLen := sizeVarint(uint64(lMsg)) - switch x := lLen - (iMsg - iLen); { - case x > 0: // actual length is x bytes larger than the space we reserved - // Move msg x bytes right. - o.buf = append(o.buf, zeroes[:x]...) - copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) - case x < 0: // actual length is x bytes smaller than the space we reserved - // Move msg x bytes left. - copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) - o.buf = o.buf[:len(o.buf)+x] // x is negative - } - // Encode the length in the reserved space. - o.buf = o.buf[:iLen] - o.EncodeVarint(uint64(lMsg)) - o.buf = o.buf[:len(o.buf)+lMsg] - return state.err -} - -// errorState maintains the first error that occurs and updates that error -// with additional context. -type errorState struct { - err error -} - -// shouldContinue reports whether encoding should continue upon encountering the -// given error. If the error is RequiredNotSetError, shouldContinue returns true -// and, if this is the first appearance of that error, remembers it for future -// reporting. -// -// If prop is not nil, it may update any error with additional context about the -// field with the error. -func (s *errorState) shouldContinue(err error, prop *Properties) bool { - // Ignore unset required fields. - reqNotSet, ok := err.(*RequiredNotSetError) - if !ok { - return false - } - if s.err == nil { - if prop != nil { - err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field} - } - s.err = err - } - return true -} diff --git a/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/golang/protobuf/proto/equal.go deleted file mode 100644 index f5db1de..0000000 --- a/vendor/github.com/golang/protobuf/proto/equal.go +++ /dev/null @@ -1,276 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Protocol buffer comparison. - -package proto - -import ( - "bytes" - "log" - "reflect" - "strings" -) - -/* -Equal returns true iff protocol buffers a and b are equal. -The arguments must both be pointers to protocol buffer structs. - -Equality is defined in this way: - - Two messages are equal iff they are the same type, - corresponding fields are equal, unknown field sets - are equal, and extensions sets are equal. - - Two set scalar fields are equal iff their values are equal. - If the fields are of a floating-point type, remember that - NaN != x for all x, including NaN. If the message is defined - in a proto3 .proto file, fields are not "set"; specifically, - zero length proto3 "bytes" fields are equal (nil == {}). - - Two repeated fields are equal iff their lengths are the same, - and their corresponding elements are equal (a "bytes" field, - although represented by []byte, is not a repeated field) - - Two unset fields are equal. - - Two unknown field sets are equal if their current - encoded state is equal. - - Two extension sets are equal iff they have corresponding - elements that are pairwise equal. - - Every other combination of things are not equal. - -The return value is undefined if a and b are not protocol buffers. -*/ -func Equal(a, b Message) bool { - if a == nil || b == nil { - return a == b - } - v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) - if v1.Type() != v2.Type() { - return false - } - if v1.Kind() == reflect.Ptr { - if v1.IsNil() { - return v2.IsNil() - } - if v2.IsNil() { - return false - } - v1, v2 = v1.Elem(), v2.Elem() - } - if v1.Kind() != reflect.Struct { - return false - } - return equalStruct(v1, v2) -} - -// v1 and v2 are known to have the same type. -func equalStruct(v1, v2 reflect.Value) bool { - sprop := GetProperties(v1.Type()) - for i := 0; i < v1.NumField(); i++ { - f := v1.Type().Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - f1, f2 := v1.Field(i), v2.Field(i) - if f.Type.Kind() == reflect.Ptr { - if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { - // both unset - continue - } else if n1 != n2 { - // set/unset mismatch - return false - } - b1, ok := f1.Interface().(raw) - if ok { - b2 := f2.Interface().(raw) - // RawMessage - if !bytes.Equal(b1.Bytes(), b2.Bytes()) { - return false - } - continue - } - f1, f2 = f1.Elem(), f2.Elem() - } - if !equalAny(f1, f2, sprop.Prop[i]) { - return false - } - } - - if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { - em2 := v2.FieldByName("XXX_extensions") - if !equalExtensions(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { - return false - } - } - - uf := v1.FieldByName("XXX_unrecognized") - if !uf.IsValid() { - return true - } - - u1 := uf.Bytes() - u2 := v2.FieldByName("XXX_unrecognized").Bytes() - if !bytes.Equal(u1, u2) { - return false - } - - return true -} - -// v1 and v2 are known to have the same type. -// prop may be nil. -func equalAny(v1, v2 reflect.Value, prop *Properties) bool { - if v1.Type() == protoMessageType { - m1, _ := v1.Interface().(Message) - m2, _ := v2.Interface().(Message) - return Equal(m1, m2) - } - switch v1.Kind() { - case reflect.Bool: - return v1.Bool() == v2.Bool() - case reflect.Float32, reflect.Float64: - return v1.Float() == v2.Float() - case reflect.Int32, reflect.Int64: - return v1.Int() == v2.Int() - case reflect.Interface: - // Probably a oneof field; compare the inner values. - n1, n2 := v1.IsNil(), v2.IsNil() - if n1 || n2 { - return n1 == n2 - } - e1, e2 := v1.Elem(), v2.Elem() - if e1.Type() != e2.Type() { - return false - } - return equalAny(e1, e2, nil) - case reflect.Map: - if v1.Len() != v2.Len() { - return false - } - for _, key := range v1.MapKeys() { - val2 := v2.MapIndex(key) - if !val2.IsValid() { - // This key was not found in the second map. - return false - } - if !equalAny(v1.MapIndex(key), val2, nil) { - return false - } - } - return true - case reflect.Ptr: - return equalAny(v1.Elem(), v2.Elem(), prop) - case reflect.Slice: - if v1.Type().Elem().Kind() == reflect.Uint8 { - // short circuit: []byte - - // Edge case: if this is in a proto3 message, a zero length - // bytes field is considered the zero value. - if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 { - return true - } - if v1.IsNil() != v2.IsNil() { - return false - } - return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) - } - - if v1.Len() != v2.Len() { - return false - } - for i := 0; i < v1.Len(); i++ { - if !equalAny(v1.Index(i), v2.Index(i), prop) { - return false - } - } - return true - case reflect.String: - return v1.Interface().(string) == v2.Interface().(string) - case reflect.Struct: - return equalStruct(v1, v2) - case reflect.Uint32, reflect.Uint64: - return v1.Uint() == v2.Uint() - } - - // unknown type, so not a protocol buffer - log.Printf("proto: don't know how to compare %v", v1) - return false -} - -// base is the struct type that the extensions are based on. -// em1 and em2 are extension maps. -func equalExtensions(base reflect.Type, em1, em2 map[int32]Extension) bool { - if len(em1) != len(em2) { - return false - } - - for extNum, e1 := range em1 { - e2, ok := em2[extNum] - if !ok { - return false - } - - m1, m2 := e1.value, e2.value - - if m1 != nil && m2 != nil { - // Both are unencoded. - if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { - return false - } - continue - } - - // At least one is encoded. To do a semantically correct comparison - // we need to unmarshal them first. - var desc *ExtensionDesc - if m := extensionMaps[base]; m != nil { - desc = m[extNum] - } - if desc == nil { - log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) - continue - } - var err error - if m1 == nil { - m1, err = decodeExtension(e1.enc, desc) - } - if m2 == nil && err == nil { - m2, err = decodeExtension(e2.enc, desc) - } - if err != nil { - // The encoded form is invalid. - log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) - return false - } - if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { - return false - } - } - - return true -} diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go deleted file mode 100644 index 054f4f1..0000000 --- a/vendor/github.com/golang/protobuf/proto/extensions.go +++ /dev/null @@ -1,399 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Types and routines for supporting protocol buffer extensions. - */ - -import ( - "errors" - "fmt" - "reflect" - "strconv" - "sync" -) - -// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. -var ErrMissingExtension = errors.New("proto: missing extension") - -// ExtensionRange represents a range of message extensions for a protocol buffer. -// Used in code generated by the protocol compiler. -type ExtensionRange struct { - Start, End int32 // both inclusive -} - -// extendableProto is an interface implemented by any protocol buffer that may be extended. -type extendableProto interface { - Message - ExtensionRangeArray() []ExtensionRange - ExtensionMap() map[int32]Extension -} - -var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem() - -// ExtensionDesc represents an extension specification. -// Used in generated code from the protocol compiler. -type ExtensionDesc struct { - ExtendedType Message // nil pointer to the type that is being extended - ExtensionType interface{} // nil pointer to the extension type - Field int32 // field number - Name string // fully-qualified name of extension, for text formatting - Tag string // protobuf tag style -} - -func (ed *ExtensionDesc) repeated() bool { - t := reflect.TypeOf(ed.ExtensionType) - return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 -} - -// Extension represents an extension in a message. -type Extension struct { - // When an extension is stored in a message using SetExtension - // only desc and value are set. When the message is marshaled - // enc will be set to the encoded form of the message. - // - // When a message is unmarshaled and contains extensions, each - // extension will have only enc set. When such an extension is - // accessed using GetExtension (or GetExtensions) desc and value - // will be set. - desc *ExtensionDesc - value interface{} - enc []byte -} - -// SetRawExtension is for testing only. -func SetRawExtension(base extendableProto, id int32, b []byte) { - base.ExtensionMap()[id] = Extension{enc: b} -} - -// isExtensionField returns true iff the given field number is in an extension range. -func isExtensionField(pb extendableProto, field int32) bool { - for _, er := range pb.ExtensionRangeArray() { - if er.Start <= field && field <= er.End { - return true - } - } - return false -} - -// checkExtensionTypes checks that the given extension is valid for pb. -func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { - // Check the extended type. - if a, b := reflect.TypeOf(pb), reflect.TypeOf(extension.ExtendedType); a != b { - return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String()) - } - // Check the range. - if !isExtensionField(pb, extension.Field) { - return errors.New("proto: bad extension number; not in declared ranges") - } - return nil -} - -// extPropKey is sufficient to uniquely identify an extension. -type extPropKey struct { - base reflect.Type - field int32 -} - -var extProp = struct { - sync.RWMutex - m map[extPropKey]*Properties -}{ - m: make(map[extPropKey]*Properties), -} - -func extensionProperties(ed *ExtensionDesc) *Properties { - key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} - - extProp.RLock() - if prop, ok := extProp.m[key]; ok { - extProp.RUnlock() - return prop - } - extProp.RUnlock() - - extProp.Lock() - defer extProp.Unlock() - // Check again. - if prop, ok := extProp.m[key]; ok { - return prop - } - - prop := new(Properties) - prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) - extProp.m[key] = prop - return prop -} - -// encodeExtensionMap encodes any unmarshaled (unencoded) extensions in m. -func encodeExtensionMap(m map[int32]Extension) error { - for k, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - et := reflect.TypeOf(e.desc.ExtensionType) - props := extensionProperties(e.desc) - - p := NewBuffer(nil) - // If e.value has type T, the encoder expects a *struct{ X T }. - // Pass a *T with a zero field and hope it all works out. - x := reflect.New(et) - x.Elem().Set(reflect.ValueOf(e.value)) - if err := props.enc(p, props, toStructPointer(x)); err != nil { - return err - } - e.enc = p.buf - m[k] = e - } - return nil -} - -func sizeExtensionMap(m map[int32]Extension) (n int) { - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - n += len(e.enc) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - et := reflect.TypeOf(e.desc.ExtensionType) - props := extensionProperties(e.desc) - - // If e.value has type T, the encoder expects a *struct{ X T }. - // Pass a *T with a zero field and hope it all works out. - x := reflect.New(et) - x.Elem().Set(reflect.ValueOf(e.value)) - n += props.size(props, toStructPointer(x)) - } - return -} - -// HasExtension returns whether the given extension is present in pb. -func HasExtension(pb extendableProto, extension *ExtensionDesc) bool { - // TODO: Check types, field numbers, etc.? - _, ok := pb.ExtensionMap()[extension.Field] - return ok -} - -// ClearExtension removes the given extension from pb. -func ClearExtension(pb extendableProto, extension *ExtensionDesc) { - // TODO: Check types, field numbers, etc.? - delete(pb.ExtensionMap(), extension.Field) -} - -// GetExtension parses and returns the given extension of pb. -// If the extension is not present and has no default value it returns ErrMissingExtension. -func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, error) { - if err := checkExtensionTypes(pb, extension); err != nil { - return nil, err - } - - emap := pb.ExtensionMap() - e, ok := emap[extension.Field] - if !ok { - // defaultExtensionValue returns the default value or - // ErrMissingExtension if there is no default. - return defaultExtensionValue(extension) - } - - if e.value != nil { - // Already decoded. Check the descriptor, though. - if e.desc != extension { - // This shouldn't happen. If it does, it means that - // GetExtension was called twice with two different - // descriptors with the same field number. - return nil, errors.New("proto: descriptor conflict") - } - return e.value, nil - } - - v, err := decodeExtension(e.enc, extension) - if err != nil { - return nil, err - } - - // Remember the decoded version and drop the encoded version. - // That way it is safe to mutate what we return. - e.value = v - e.desc = extension - e.enc = nil - emap[extension.Field] = e - return e.value, nil -} - -// defaultExtensionValue returns the default value for extension. -// If no default for an extension is defined ErrMissingExtension is returned. -func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { - t := reflect.TypeOf(extension.ExtensionType) - props := extensionProperties(extension) - - sf, _, err := fieldDefault(t, props) - if err != nil { - return nil, err - } - - if sf == nil || sf.value == nil { - // There is no default value. - return nil, ErrMissingExtension - } - - if t.Kind() != reflect.Ptr { - // We do not need to return a Ptr, we can directly return sf.value. - return sf.value, nil - } - - // We need to return an interface{} that is a pointer to sf.value. - value := reflect.New(t).Elem() - value.Set(reflect.New(value.Type().Elem())) - if sf.kind == reflect.Int32 { - // We may have an int32 or an enum, but the underlying data is int32. - // Since we can't set an int32 into a non int32 reflect.value directly - // set it as a int32. - value.Elem().SetInt(int64(sf.value.(int32))) - } else { - value.Elem().Set(reflect.ValueOf(sf.value)) - } - return value.Interface(), nil -} - -// decodeExtension decodes an extension encoded in b. -func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { - o := NewBuffer(b) - - t := reflect.TypeOf(extension.ExtensionType) - - props := extensionProperties(extension) - - // t is a pointer to a struct, pointer to basic type or a slice. - // Allocate a "field" to store the pointer/slice itself; the - // pointer/slice will be stored here. We pass - // the address of this field to props.dec. - // This passes a zero field and a *t and lets props.dec - // interpret it as a *struct{ x t }. - value := reflect.New(t).Elem() - - for { - // Discard wire type and field number varint. It isn't needed. - if _, err := o.DecodeVarint(); err != nil { - return nil, err - } - - if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil { - return nil, err - } - - if o.index >= len(o.buf) { - break - } - } - return value.Interface(), nil -} - -// GetExtensions returns a slice of the extensions present in pb that are also listed in es. -// The returned slice has the same length as es; missing extensions will appear as nil elements. -func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { - epb, ok := pb.(extendableProto) - if !ok { - err = errors.New("proto: not an extendable proto") - return - } - extensions = make([]interface{}, len(es)) - for i, e := range es { - extensions[i], err = GetExtension(epb, e) - if err == ErrMissingExtension { - err = nil - } - if err != nil { - return - } - } - return -} - -// SetExtension sets the specified extension of pb to the specified value. -func SetExtension(pb extendableProto, extension *ExtensionDesc, value interface{}) error { - if err := checkExtensionTypes(pb, extension); err != nil { - return err - } - typ := reflect.TypeOf(extension.ExtensionType) - if typ != reflect.TypeOf(value) { - return errors.New("proto: bad extension value type") - } - // nil extension values need to be caught early, because the - // encoder can't distinguish an ErrNil due to a nil extension - // from an ErrNil due to a missing field. Extensions are - // always optional, so the encoder would just swallow the error - // and drop all the extensions from the encoded message. - if reflect.ValueOf(value).IsNil() { - return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) - } - - pb.ExtensionMap()[extension.Field] = Extension{desc: extension, value: value} - return nil -} - -// A global registry of extensions. -// The generated code will register the generated descriptors by calling RegisterExtension. - -var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) - -// RegisterExtension is called from the generated code. -func RegisterExtension(desc *ExtensionDesc) { - st := reflect.TypeOf(desc.ExtendedType).Elem() - m := extensionMaps[st] - if m == nil { - m = make(map[int32]*ExtensionDesc) - extensionMaps[st] = m - } - if _, ok := m[desc.Field]; ok { - panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) - } - m[desc.Field] = desc -} - -// RegisteredExtensions returns a map of the registered extensions of a -// protocol buffer struct, indexed by the extension number. -// The argument pb should be a nil pointer to the struct type. -func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { - return extensionMaps[reflect.TypeOf(pb).Elem()] -} diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go deleted file mode 100644 index 0de8f8d..0000000 --- a/vendor/github.com/golang/protobuf/proto/lib.go +++ /dev/null @@ -1,894 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -/* -Package proto converts data structures to and from the wire format of -protocol buffers. It works in concert with the Go source code generated -for .proto files by the protocol compiler. - -A summary of the properties of the protocol buffer interface -for a protocol buffer variable v: - - - Names are turned from camel_case to CamelCase for export. - - There are no methods on v to set fields; just treat - them as structure fields. - - There are getters that return a field's value if set, - and return the field's default value if unset. - The getters work even if the receiver is a nil message. - - The zero value for a struct is its correct initialization state. - All desired fields must be set before marshaling. - - A Reset() method will restore a protobuf struct to its zero state. - - Non-repeated fields are pointers to the values; nil means unset. - That is, optional or required field int32 f becomes F *int32. - - Repeated fields are slices. - - Helper functions are available to aid the setting of fields. - msg.Foo = proto.String("hello") // set field - - Constants are defined to hold the default values of all fields that - have them. They have the form Default_StructName_FieldName. - Because the getter methods handle defaulted values, - direct use of these constants should be rare. - - Enums are given type names and maps from names to values. - Enum values are prefixed by the enclosing message's name, or by the - enum's type name if it is a top-level enum. Enum types have a String - method, and a Enum method to assist in message construction. - - Nested messages, groups and enums have type names prefixed with the name of - the surrounding message type. - - Extensions are given descriptor names that start with E_, - followed by an underscore-delimited list of the nested messages - that contain it (if any) followed by the CamelCased name of the - extension field itself. HasExtension, ClearExtension, GetExtension - and SetExtension are functions for manipulating extensions. - - Oneof field sets are given a single field in their message, - with distinguished wrapper types for each possible field value. - - Marshal and Unmarshal are functions to encode and decode the wire format. - -When the .proto file specifies `syntax="proto3"`, there are some differences: - - - Non-repeated fields of non-message type are values instead of pointers. - - Getters are only generated for message and oneof fields. - - Enum types do not get an Enum method. - -The simplest way to describe this is to see an example. -Given file test.proto, containing - - package example; - - enum FOO { X = 17; } - - message Test { - required string label = 1; - optional int32 type = 2 [default=77]; - repeated int64 reps = 3; - optional group OptionalGroup = 4 { - required string RequiredField = 5; - } - oneof union { - int32 number = 6; - string name = 7; - } - } - -The resulting file, test.pb.go, is: - - package example - - import proto "github.com/golang/protobuf/proto" - import math "math" - - type FOO int32 - const ( - FOO_X FOO = 17 - ) - var FOO_name = map[int32]string{ - 17: "X", - } - var FOO_value = map[string]int32{ - "X": 17, - } - - func (x FOO) Enum() *FOO { - p := new(FOO) - *p = x - return p - } - func (x FOO) String() string { - return proto.EnumName(FOO_name, int32(x)) - } - func (x *FOO) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FOO_value, data) - if err != nil { - return err - } - *x = FOO(value) - return nil - } - - type Test struct { - Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` - Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` - Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` - Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` - // Types that are valid to be assigned to Union: - // *Test_Number - // *Test_Name - Union isTest_Union `protobuf_oneof:"union"` - XXX_unrecognized []byte `json:"-"` - } - func (m *Test) Reset() { *m = Test{} } - func (m *Test) String() string { return proto.CompactTextString(m) } - func (*Test) ProtoMessage() {} - - type isTest_Union interface { - isTest_Union() - } - - type Test_Number struct { - Number int32 `protobuf:"varint,6,opt,name=number"` - } - type Test_Name struct { - Name string `protobuf:"bytes,7,opt,name=name"` - } - - func (*Test_Number) isTest_Union() {} - func (*Test_Name) isTest_Union() {} - - func (m *Test) GetUnion() isTest_Union { - if m != nil { - return m.Union - } - return nil - } - const Default_Test_Type int32 = 77 - - func (m *Test) GetLabel() string { - if m != nil && m.Label != nil { - return *m.Label - } - return "" - } - - func (m *Test) GetType() int32 { - if m != nil && m.Type != nil { - return *m.Type - } - return Default_Test_Type - } - - func (m *Test) GetOptionalgroup() *Test_OptionalGroup { - if m != nil { - return m.Optionalgroup - } - return nil - } - - type Test_OptionalGroup struct { - RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` - } - func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } - func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } - - func (m *Test_OptionalGroup) GetRequiredField() string { - if m != nil && m.RequiredField != nil { - return *m.RequiredField - } - return "" - } - - func (m *Test) GetNumber() int32 { - if x, ok := m.GetUnion().(*Test_Number); ok { - return x.Number - } - return 0 - } - - func (m *Test) GetName() string { - if x, ok := m.GetUnion().(*Test_Name); ok { - return x.Name - } - return "" - } - - func init() { - proto.RegisterEnum("example.FOO", FOO_name, FOO_value) - } - -To create and play with a Test object: - - package main - - import ( - "log" - - "github.com/golang/protobuf/proto" - pb "./example.pb" - ) - - func main() { - test := &pb.Test{ - Label: proto.String("hello"), - Type: proto.Int32(17), - Reps: []int64{1, 2, 3}, - Optionalgroup: &pb.Test_OptionalGroup{ - RequiredField: proto.String("good bye"), - }, - Union: &pb.Test_Name{"fred"}, - } - data, err := proto.Marshal(test) - if err != nil { - log.Fatal("marshaling error: ", err) - } - newTest := &pb.Test{} - err = proto.Unmarshal(data, newTest) - if err != nil { - log.Fatal("unmarshaling error: ", err) - } - // Now test and newTest contain the same data. - if test.GetLabel() != newTest.GetLabel() { - log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) - } - // Use a type switch to determine which oneof was set. - switch u := test.Union.(type) { - case *pb.Test_Number: // u.Number contains the number. - case *pb.Test_Name: // u.Name contains the string. - } - // etc. - } -*/ -package proto - -import ( - "encoding/json" - "fmt" - "log" - "reflect" - "sort" - "strconv" - "sync" -) - -// Message is implemented by generated protocol buffer messages. -type Message interface { - Reset() - String() string - ProtoMessage() -} - -// Stats records allocation details about the protocol buffer encoders -// and decoders. Useful for tuning the library itself. -type Stats struct { - Emalloc uint64 // mallocs in encode - Dmalloc uint64 // mallocs in decode - Encode uint64 // number of encodes - Decode uint64 // number of decodes - Chit uint64 // number of cache hits - Cmiss uint64 // number of cache misses - Size uint64 // number of sizes -} - -// Set to true to enable stats collection. -const collectStats = false - -var stats Stats - -// GetStats returns a copy of the global Stats structure. -func GetStats() Stats { return stats } - -// A Buffer is a buffer manager for marshaling and unmarshaling -// protocol buffers. It may be reused between invocations to -// reduce memory usage. It is not necessary to use a Buffer; -// the global functions Marshal and Unmarshal create a -// temporary Buffer and are fine for most applications. -type Buffer struct { - buf []byte // encode/decode byte stream - index int // write point - - // pools of basic types to amortize allocation. - bools []bool - uint32s []uint32 - uint64s []uint64 - - // extra pools, only used with pointer_reflect.go - int32s []int32 - int64s []int64 - float32s []float32 - float64s []float64 -} - -// NewBuffer allocates a new Buffer and initializes its internal data to -// the contents of the argument slice. -func NewBuffer(e []byte) *Buffer { - return &Buffer{buf: e} -} - -// Reset resets the Buffer, ready for marshaling a new protocol buffer. -func (p *Buffer) Reset() { - p.buf = p.buf[0:0] // for reading/writing - p.index = 0 // for reading -} - -// SetBuf replaces the internal buffer with the slice, -// ready for unmarshaling the contents of the slice. -func (p *Buffer) SetBuf(s []byte) { - p.buf = s - p.index = 0 -} - -// Bytes returns the contents of the Buffer. -func (p *Buffer) Bytes() []byte { return p.buf } - -/* - * Helper routines for simplifying the creation of optional fields of basic type. - */ - -// Bool is a helper routine that allocates a new bool value -// to store v and returns a pointer to it. -func Bool(v bool) *bool { - return &v -} - -// Int32 is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it. -func Int32(v int32) *int32 { - return &v -} - -// Int is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it, but unlike Int32 -// its argument value is an int. -func Int(v int) *int32 { - p := new(int32) - *p = int32(v) - return p -} - -// Int64 is a helper routine that allocates a new int64 value -// to store v and returns a pointer to it. -func Int64(v int64) *int64 { - return &v -} - -// Float32 is a helper routine that allocates a new float32 value -// to store v and returns a pointer to it. -func Float32(v float32) *float32 { - return &v -} - -// Float64 is a helper routine that allocates a new float64 value -// to store v and returns a pointer to it. -func Float64(v float64) *float64 { - return &v -} - -// Uint32 is a helper routine that allocates a new uint32 value -// to store v and returns a pointer to it. -func Uint32(v uint32) *uint32 { - return &v -} - -// Uint64 is a helper routine that allocates a new uint64 value -// to store v and returns a pointer to it. -func Uint64(v uint64) *uint64 { - return &v -} - -// String is a helper routine that allocates a new string value -// to store v and returns a pointer to it. -func String(v string) *string { - return &v -} - -// EnumName is a helper function to simplify printing protocol buffer enums -// by name. Given an enum map and a value, it returns a useful string. -func EnumName(m map[int32]string, v int32) string { - s, ok := m[v] - if ok { - return s - } - return strconv.Itoa(int(v)) -} - -// UnmarshalJSONEnum is a helper function to simplify recovering enum int values -// from their JSON-encoded representation. Given a map from the enum's symbolic -// names to its int values, and a byte buffer containing the JSON-encoded -// value, it returns an int32 that can be cast to the enum type by the caller. -// -// The function can deal with both JSON representations, numeric and symbolic. -func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { - if data[0] == '"' { - // New style: enums are strings. - var repr string - if err := json.Unmarshal(data, &repr); err != nil { - return -1, err - } - val, ok := m[repr] - if !ok { - return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) - } - return val, nil - } - // Old style: enums are ints. - var val int32 - if err := json.Unmarshal(data, &val); err != nil { - return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) - } - return val, nil -} - -// DebugPrint dumps the encoded data in b in a debugging format with a header -// including the string s. Used in testing but made available for general debugging. -func (p *Buffer) DebugPrint(s string, b []byte) { - var u uint64 - - obuf := p.buf - index := p.index - p.buf = b - p.index = 0 - depth := 0 - - fmt.Printf("\n--- %s ---\n", s) - -out: - for { - for i := 0; i < depth; i++ { - fmt.Print(" ") - } - - index := p.index - if index == len(p.buf) { - break - } - - op, err := p.DecodeVarint() - if err != nil { - fmt.Printf("%3d: fetching op err %v\n", index, err) - break out - } - tag := op >> 3 - wire := op & 7 - - switch wire { - default: - fmt.Printf("%3d: t=%3d unknown wire=%d\n", - index, tag, wire) - break out - - case WireBytes: - var r []byte - - r, err = p.DecodeRawBytes(false) - if err != nil { - break out - } - fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) - if len(r) <= 6 { - for i := 0; i < len(r); i++ { - fmt.Printf(" %.2x", r[i]) - } - } else { - for i := 0; i < 3; i++ { - fmt.Printf(" %.2x", r[i]) - } - fmt.Printf(" ..") - for i := len(r) - 3; i < len(r); i++ { - fmt.Printf(" %.2x", r[i]) - } - } - fmt.Printf("\n") - - case WireFixed32: - u, err = p.DecodeFixed32() - if err != nil { - fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) - - case WireFixed64: - u, err = p.DecodeFixed64() - if err != nil { - fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) - - case WireVarint: - u, err = p.DecodeVarint() - if err != nil { - fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) - - case WireStartGroup: - fmt.Printf("%3d: t=%3d start\n", index, tag) - depth++ - - case WireEndGroup: - depth-- - fmt.Printf("%3d: t=%3d end\n", index, tag) - } - } - - if depth != 0 { - fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) - } - fmt.Printf("\n") - - p.buf = obuf - p.index = index -} - -// SetDefaults sets unset protocol buffer fields to their default values. -// It only modifies fields that are both unset and have defined defaults. -// It recursively sets default values in any non-nil sub-messages. -func SetDefaults(pb Message) { - setDefaults(reflect.ValueOf(pb), true, false) -} - -// v is a pointer to a struct. -func setDefaults(v reflect.Value, recur, zeros bool) { - v = v.Elem() - - defaultMu.RLock() - dm, ok := defaults[v.Type()] - defaultMu.RUnlock() - if !ok { - dm = buildDefaultMessage(v.Type()) - defaultMu.Lock() - defaults[v.Type()] = dm - defaultMu.Unlock() - } - - for _, sf := range dm.scalars { - f := v.Field(sf.index) - if !f.IsNil() { - // field already set - continue - } - dv := sf.value - if dv == nil && !zeros { - // no explicit default, and don't want to set zeros - continue - } - fptr := f.Addr().Interface() // **T - // TODO: Consider batching the allocations we do here. - switch sf.kind { - case reflect.Bool: - b := new(bool) - if dv != nil { - *b = dv.(bool) - } - *(fptr.(**bool)) = b - case reflect.Float32: - f := new(float32) - if dv != nil { - *f = dv.(float32) - } - *(fptr.(**float32)) = f - case reflect.Float64: - f := new(float64) - if dv != nil { - *f = dv.(float64) - } - *(fptr.(**float64)) = f - case reflect.Int32: - // might be an enum - if ft := f.Type(); ft != int32PtrType { - // enum - f.Set(reflect.New(ft.Elem())) - if dv != nil { - f.Elem().SetInt(int64(dv.(int32))) - } - } else { - // int32 field - i := new(int32) - if dv != nil { - *i = dv.(int32) - } - *(fptr.(**int32)) = i - } - case reflect.Int64: - i := new(int64) - if dv != nil { - *i = dv.(int64) - } - *(fptr.(**int64)) = i - case reflect.String: - s := new(string) - if dv != nil { - *s = dv.(string) - } - *(fptr.(**string)) = s - case reflect.Uint8: - // exceptional case: []byte - var b []byte - if dv != nil { - db := dv.([]byte) - b = make([]byte, len(db)) - copy(b, db) - } else { - b = []byte{} - } - *(fptr.(*[]byte)) = b - case reflect.Uint32: - u := new(uint32) - if dv != nil { - *u = dv.(uint32) - } - *(fptr.(**uint32)) = u - case reflect.Uint64: - u := new(uint64) - if dv != nil { - *u = dv.(uint64) - } - *(fptr.(**uint64)) = u - default: - log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) - } - } - - for _, ni := range dm.nested { - f := v.Field(ni) - // f is *T or []*T or map[T]*T - switch f.Kind() { - case reflect.Ptr: - if f.IsNil() { - continue - } - setDefaults(f, recur, zeros) - - case reflect.Slice: - for i := 0; i < f.Len(); i++ { - e := f.Index(i) - if e.IsNil() { - continue - } - setDefaults(e, recur, zeros) - } - - case reflect.Map: - for _, k := range f.MapKeys() { - e := f.MapIndex(k) - if e.IsNil() { - continue - } - setDefaults(e, recur, zeros) - } - } - } -} - -var ( - // defaults maps a protocol buffer struct type to a slice of the fields, - // with its scalar fields set to their proto-declared non-zero default values. - defaultMu sync.RWMutex - defaults = make(map[reflect.Type]defaultMessage) - - int32PtrType = reflect.TypeOf((*int32)(nil)) -) - -// defaultMessage represents information about the default values of a message. -type defaultMessage struct { - scalars []scalarField - nested []int // struct field index of nested messages -} - -type scalarField struct { - index int // struct field index - kind reflect.Kind // element type (the T in *T or []T) - value interface{} // the proto-declared default value, or nil -} - -// t is a struct type. -func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { - sprop := GetProperties(t) - for _, prop := range sprop.Prop { - fi, ok := sprop.decoderTags.get(prop.Tag) - if !ok { - // XXX_unrecognized - continue - } - ft := t.Field(fi).Type - - sf, nested, err := fieldDefault(ft, prop) - switch { - case err != nil: - log.Print(err) - case nested: - dm.nested = append(dm.nested, fi) - case sf != nil: - sf.index = fi - dm.scalars = append(dm.scalars, *sf) - } - } - - return dm -} - -// fieldDefault returns the scalarField for field type ft. -// sf will be nil if the field can not have a default. -// nestedMessage will be true if this is a nested message. -// Note that sf.index is not set on return. -func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { - var canHaveDefault bool - switch ft.Kind() { - case reflect.Ptr: - if ft.Elem().Kind() == reflect.Struct { - nestedMessage = true - } else { - canHaveDefault = true // proto2 scalar field - } - - case reflect.Slice: - switch ft.Elem().Kind() { - case reflect.Ptr: - nestedMessage = true // repeated message - case reflect.Uint8: - canHaveDefault = true // bytes field - } - - case reflect.Map: - if ft.Elem().Kind() == reflect.Ptr { - nestedMessage = true // map with message values - } - } - - if !canHaveDefault { - if nestedMessage { - return nil, true, nil - } - return nil, false, nil - } - - // We now know that ft is a pointer or slice. - sf = &scalarField{kind: ft.Elem().Kind()} - - // scalar fields without defaults - if !prop.HasDefault { - return sf, false, nil - } - - // a scalar field: either *T or []byte - switch ft.Elem().Kind() { - case reflect.Bool: - x, err := strconv.ParseBool(prop.Default) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) - } - sf.value = x - case reflect.Float32: - x, err := strconv.ParseFloat(prop.Default, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) - } - sf.value = float32(x) - case reflect.Float64: - x, err := strconv.ParseFloat(prop.Default, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) - } - sf.value = x - case reflect.Int32: - x, err := strconv.ParseInt(prop.Default, 10, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) - } - sf.value = int32(x) - case reflect.Int64: - x, err := strconv.ParseInt(prop.Default, 10, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) - } - sf.value = x - case reflect.String: - sf.value = prop.Default - case reflect.Uint8: - // []byte (not *uint8) - sf.value = []byte(prop.Default) - case reflect.Uint32: - x, err := strconv.ParseUint(prop.Default, 10, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) - } - sf.value = uint32(x) - case reflect.Uint64: - x, err := strconv.ParseUint(prop.Default, 10, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) - } - sf.value = x - default: - return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) - } - - return sf, false, nil -} - -// Map fields may have key types of non-float scalars, strings and enums. -// The easiest way to sort them in some deterministic order is to use fmt. -// If this turns out to be inefficient we can always consider other options, -// such as doing a Schwartzian transform. - -func mapKeys(vs []reflect.Value) sort.Interface { - s := mapKeySorter{ - vs: vs, - // default Less function: textual comparison - less: func(a, b reflect.Value) bool { - return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface()) - }, - } - - // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps; - // numeric keys are sorted numerically. - if len(vs) == 0 { - return s - } - switch vs[0].Kind() { - case reflect.Int32, reflect.Int64: - s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } - case reflect.Uint32, reflect.Uint64: - s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } - } - - return s -} - -type mapKeySorter struct { - vs []reflect.Value - less func(a, b reflect.Value) bool -} - -func (s mapKeySorter) Len() int { return len(s.vs) } -func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } -func (s mapKeySorter) Less(i, j int) bool { - return s.less(s.vs[i], s.vs[j]) -} - -// isProto3Zero reports whether v is a zero proto3 value. -func isProto3Zero(v reflect.Value) bool { - switch v.Kind() { - case reflect.Bool: - return !v.Bool() - case reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint32, reflect.Uint64: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.String: - return v.String() == "" - } - return false -} - -// ProtoPackageIsVersion1 is referenced from generated protocol buffer files -// to assert that that code is compatible with this version of the proto package. -const ProtoPackageIsVersion1 = true diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go deleted file mode 100644 index e25e01e..0000000 --- a/vendor/github.com/golang/protobuf/proto/message_set.go +++ /dev/null @@ -1,280 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Support for message sets. - */ - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "reflect" - "sort" -) - -// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. -// A message type ID is required for storing a protocol buffer in a message set. -var errNoMessageTypeID = errors.New("proto does not have a message type ID") - -// The first two types (_MessageSet_Item and messageSet) -// model what the protocol compiler produces for the following protocol message: -// message MessageSet { -// repeated group Item = 1 { -// required int32 type_id = 2; -// required string message = 3; -// }; -// } -// That is the MessageSet wire format. We can't use a proto to generate these -// because that would introduce a circular dependency between it and this package. - -type _MessageSet_Item struct { - TypeId *int32 `protobuf:"varint,2,req,name=type_id"` - Message []byte `protobuf:"bytes,3,req,name=message"` -} - -type messageSet struct { - Item []*_MessageSet_Item `protobuf:"group,1,rep"` - XXX_unrecognized []byte - // TODO: caching? -} - -// Make sure messageSet is a Message. -var _ Message = (*messageSet)(nil) - -// messageTypeIder is an interface satisfied by a protocol buffer type -// that may be stored in a MessageSet. -type messageTypeIder interface { - MessageTypeId() int32 -} - -func (ms *messageSet) find(pb Message) *_MessageSet_Item { - mti, ok := pb.(messageTypeIder) - if !ok { - return nil - } - id := mti.MessageTypeId() - for _, item := range ms.Item { - if *item.TypeId == id { - return item - } - } - return nil -} - -func (ms *messageSet) Has(pb Message) bool { - if ms.find(pb) != nil { - return true - } - return false -} - -func (ms *messageSet) Unmarshal(pb Message) error { - if item := ms.find(pb); item != nil { - return Unmarshal(item.Message, pb) - } - if _, ok := pb.(messageTypeIder); !ok { - return errNoMessageTypeID - } - return nil // TODO: return error instead? -} - -func (ms *messageSet) Marshal(pb Message) error { - msg, err := Marshal(pb) - if err != nil { - return err - } - if item := ms.find(pb); item != nil { - // reuse existing item - item.Message = msg - return nil - } - - mti, ok := pb.(messageTypeIder) - if !ok { - return errNoMessageTypeID - } - - mtid := mti.MessageTypeId() - ms.Item = append(ms.Item, &_MessageSet_Item{ - TypeId: &mtid, - Message: msg, - }) - return nil -} - -func (ms *messageSet) Reset() { *ms = messageSet{} } -func (ms *messageSet) String() string { return CompactTextString(ms) } -func (*messageSet) ProtoMessage() {} - -// Support for the message_set_wire_format message option. - -func skipVarint(buf []byte) []byte { - i := 0 - for ; buf[i]&0x80 != 0; i++ { - } - return buf[i+1:] -} - -// MarshalMessageSet encodes the extension map represented by m in the message set wire format. -// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. -func MarshalMessageSet(m map[int32]Extension) ([]byte, error) { - if err := encodeExtensionMap(m); err != nil { - return nil, err - } - - // Sort extension IDs to provide a deterministic encoding. - // See also enc_map in encode.go. - ids := make([]int, 0, len(m)) - for id := range m { - ids = append(ids, int(id)) - } - sort.Ints(ids) - - ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))} - for _, id := range ids { - e := m[int32(id)] - // Remove the wire type and field number varint, as well as the length varint. - msg := skipVarint(skipVarint(e.enc)) - - ms.Item = append(ms.Item, &_MessageSet_Item{ - TypeId: Int32(int32(id)), - Message: msg, - }) - } - return Marshal(ms) -} - -// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. -// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option. -func UnmarshalMessageSet(buf []byte, m map[int32]Extension) error { - ms := new(messageSet) - if err := Unmarshal(buf, ms); err != nil { - return err - } - for _, item := range ms.Item { - id := *item.TypeId - msg := item.Message - - // Restore wire type and field number varint, plus length varint. - // Be careful to preserve duplicate items. - b := EncodeVarint(uint64(id)<<3 | WireBytes) - if ext, ok := m[id]; ok { - // Existing data; rip off the tag and length varint - // so we join the new data correctly. - // We can assume that ext.enc is set because we are unmarshaling. - o := ext.enc[len(b):] // skip wire type and field number - _, n := DecodeVarint(o) // calculate length of length varint - o = o[n:] // skip length varint - msg = append(o, msg...) // join old data and new data - } - b = append(b, EncodeVarint(uint64(len(msg)))...) - b = append(b, msg...) - - m[id] = Extension{enc: b} - } - return nil -} - -// MarshalMessageSetJSON encodes the extension map represented by m in JSON format. -// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option. -func MarshalMessageSetJSON(m map[int32]Extension) ([]byte, error) { - var b bytes.Buffer - b.WriteByte('{') - - // Process the map in key order for deterministic output. - ids := make([]int32, 0, len(m)) - for id := range m { - ids = append(ids, id) - } - sort.Sort(int32Slice(ids)) // int32Slice defined in text.go - - for i, id := range ids { - ext := m[id] - if i > 0 { - b.WriteByte(',') - } - - msd, ok := messageSetMap[id] - if !ok { - // Unknown type; we can't render it, so skip it. - continue - } - fmt.Fprintf(&b, `"[%s]":`, msd.name) - - x := ext.value - if x == nil { - x = reflect.New(msd.t.Elem()).Interface() - if err := Unmarshal(ext.enc, x.(Message)); err != nil { - return nil, err - } - } - d, err := json.Marshal(x) - if err != nil { - return nil, err - } - b.Write(d) - } - b.WriteByte('}') - return b.Bytes(), nil -} - -// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format. -// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option. -func UnmarshalMessageSetJSON(buf []byte, m map[int32]Extension) error { - // Common-case fast path. - if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) { - return nil - } - - // This is fairly tricky, and it's not clear that it is needed. - return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented") -} - -// A global registry of types that can be used in a MessageSet. - -var messageSetMap = make(map[int32]messageSetDesc) - -type messageSetDesc struct { - t reflect.Type // pointer to struct - name string -} - -// RegisterMessageSetType is called from the generated code. -func RegisterMessageSetType(m Message, fieldNum int32, name string) { - messageSetMap[fieldNum] = messageSetDesc{ - t: reflect.TypeOf(m), - name: name, - } -} diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go deleted file mode 100644 index 9899141..0000000 --- a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go +++ /dev/null @@ -1,479 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build appengine js - -// This file contains an implementation of proto field accesses using package reflect. -// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can -// be used on App Engine. - -package proto - -import ( - "math" - "reflect" -) - -// A structPointer is a pointer to a struct. -type structPointer struct { - v reflect.Value -} - -// toStructPointer returns a structPointer equivalent to the given reflect value. -// The reflect value must itself be a pointer to a struct. -func toStructPointer(v reflect.Value) structPointer { - return structPointer{v} -} - -// IsNil reports whether p is nil. -func structPointer_IsNil(p structPointer) bool { - return p.v.IsNil() -} - -// Interface returns the struct pointer as an interface value. -func structPointer_Interface(p structPointer, _ reflect.Type) interface{} { - return p.v.Interface() -} - -// A field identifies a field in a struct, accessible from a structPointer. -// In this implementation, a field is identified by the sequence of field indices -// passed to reflect's FieldByIndex. -type field []int - -// toField returns a field equivalent to the given reflect field. -func toField(f *reflect.StructField) field { - return f.Index -} - -// invalidField is an invalid field identifier. -var invalidField = field(nil) - -// IsValid reports whether the field identifier is valid. -func (f field) IsValid() bool { return f != nil } - -// field returns the given field in the struct as a reflect value. -func structPointer_field(p structPointer, f field) reflect.Value { - // Special case: an extension map entry with a value of type T - // passes a *T to the struct-handling code with a zero field, - // expecting that it will be treated as equivalent to *struct{ X T }, - // which has the same memory layout. We have to handle that case - // specially, because reflect will panic if we call FieldByIndex on a - // non-struct. - if f == nil { - return p.v.Elem() - } - - return p.v.Elem().FieldByIndex(f) -} - -// ifield returns the given field in the struct as an interface value. -func structPointer_ifield(p structPointer, f field) interface{} { - return structPointer_field(p, f).Addr().Interface() -} - -// Bytes returns the address of a []byte field in the struct. -func structPointer_Bytes(p structPointer, f field) *[]byte { - return structPointer_ifield(p, f).(*[]byte) -} - -// BytesSlice returns the address of a [][]byte field in the struct. -func structPointer_BytesSlice(p structPointer, f field) *[][]byte { - return structPointer_ifield(p, f).(*[][]byte) -} - -// Bool returns the address of a *bool field in the struct. -func structPointer_Bool(p structPointer, f field) **bool { - return structPointer_ifield(p, f).(**bool) -} - -// BoolVal returns the address of a bool field in the struct. -func structPointer_BoolVal(p structPointer, f field) *bool { - return structPointer_ifield(p, f).(*bool) -} - -// BoolSlice returns the address of a []bool field in the struct. -func structPointer_BoolSlice(p structPointer, f field) *[]bool { - return structPointer_ifield(p, f).(*[]bool) -} - -// String returns the address of a *string field in the struct. -func structPointer_String(p structPointer, f field) **string { - return structPointer_ifield(p, f).(**string) -} - -// StringVal returns the address of a string field in the struct. -func structPointer_StringVal(p structPointer, f field) *string { - return structPointer_ifield(p, f).(*string) -} - -// StringSlice returns the address of a []string field in the struct. -func structPointer_StringSlice(p structPointer, f field) *[]string { - return structPointer_ifield(p, f).(*[]string) -} - -// ExtMap returns the address of an extension map field in the struct. -func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { - return structPointer_ifield(p, f).(*map[int32]Extension) -} - -// NewAt returns the reflect.Value for a pointer to a field in the struct. -func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { - return structPointer_field(p, f).Addr() -} - -// SetStructPointer writes a *struct field in the struct. -func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { - structPointer_field(p, f).Set(q.v) -} - -// GetStructPointer reads a *struct field in the struct. -func structPointer_GetStructPointer(p structPointer, f field) structPointer { - return structPointer{structPointer_field(p, f)} -} - -// StructPointerSlice the address of a []*struct field in the struct. -func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice { - return structPointerSlice{structPointer_field(p, f)} -} - -// A structPointerSlice represents the address of a slice of pointers to structs -// (themselves messages or groups). That is, v.Type() is *[]*struct{...}. -type structPointerSlice struct { - v reflect.Value -} - -func (p structPointerSlice) Len() int { return p.v.Len() } -func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} } -func (p structPointerSlice) Append(q structPointer) { - p.v.Set(reflect.Append(p.v, q.v)) -} - -var ( - int32Type = reflect.TypeOf(int32(0)) - uint32Type = reflect.TypeOf(uint32(0)) - float32Type = reflect.TypeOf(float32(0)) - int64Type = reflect.TypeOf(int64(0)) - uint64Type = reflect.TypeOf(uint64(0)) - float64Type = reflect.TypeOf(float64(0)) -) - -// A word32 represents a field of type *int32, *uint32, *float32, or *enum. -// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable. -type word32 struct { - v reflect.Value -} - -// IsNil reports whether p is nil. -func word32_IsNil(p word32) bool { - return p.v.IsNil() -} - -// Set sets p to point at a newly allocated word with bits set to x. -func word32_Set(p word32, o *Buffer, x uint32) { - t := p.v.Type().Elem() - switch t { - case int32Type: - if len(o.int32s) == 0 { - o.int32s = make([]int32, uint32PoolSize) - } - o.int32s[0] = int32(x) - p.v.Set(reflect.ValueOf(&o.int32s[0])) - o.int32s = o.int32s[1:] - return - case uint32Type: - if len(o.uint32s) == 0 { - o.uint32s = make([]uint32, uint32PoolSize) - } - o.uint32s[0] = x - p.v.Set(reflect.ValueOf(&o.uint32s[0])) - o.uint32s = o.uint32s[1:] - return - case float32Type: - if len(o.float32s) == 0 { - o.float32s = make([]float32, uint32PoolSize) - } - o.float32s[0] = math.Float32frombits(x) - p.v.Set(reflect.ValueOf(&o.float32s[0])) - o.float32s = o.float32s[1:] - return - } - - // must be enum - p.v.Set(reflect.New(t)) - p.v.Elem().SetInt(int64(int32(x))) -} - -// Get gets the bits pointed at by p, as a uint32. -func word32_Get(p word32) uint32 { - elem := p.v.Elem() - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) - } - panic("unreachable") -} - -// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32(p structPointer, f field) word32 { - return word32{structPointer_field(p, f)} -} - -// A word32Val represents a field of type int32, uint32, float32, or enum. -// That is, v.Type() is int32, uint32, float32, or enum and v is assignable. -type word32Val struct { - v reflect.Value -} - -// Set sets *p to x. -func word32Val_Set(p word32Val, x uint32) { - switch p.v.Type() { - case int32Type: - p.v.SetInt(int64(x)) - return - case uint32Type: - p.v.SetUint(uint64(x)) - return - case float32Type: - p.v.SetFloat(float64(math.Float32frombits(x))) - return - } - - // must be enum - p.v.SetInt(int64(int32(x))) -} - -// Get gets the bits pointed at by p, as a uint32. -func word32Val_Get(p word32Val) uint32 { - elem := p.v - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) - } - panic("unreachable") -} - -// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct. -func structPointer_Word32Val(p structPointer, f field) word32Val { - return word32Val{structPointer_field(p, f)} -} - -// A word32Slice is a slice of 32-bit values. -// That is, v.Type() is []int32, []uint32, []float32, or []enum. -type word32Slice struct { - v reflect.Value -} - -func (p word32Slice) Append(x uint32) { - n, m := p.v.Len(), p.v.Cap() - if n < m { - p.v.SetLen(n + 1) - } else { - t := p.v.Type().Elem() - p.v.Set(reflect.Append(p.v, reflect.Zero(t))) - } - elem := p.v.Index(n) - switch elem.Kind() { - case reflect.Int32: - elem.SetInt(int64(int32(x))) - case reflect.Uint32: - elem.SetUint(uint64(x)) - case reflect.Float32: - elem.SetFloat(float64(math.Float32frombits(x))) - } -} - -func (p word32Slice) Len() int { - return p.v.Len() -} - -func (p word32Slice) Index(i int) uint32 { - elem := p.v.Index(i) - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) - } - panic("unreachable") -} - -// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct. -func structPointer_Word32Slice(p structPointer, f field) word32Slice { - return word32Slice{structPointer_field(p, f)} -} - -// word64 is like word32 but for 64-bit values. -type word64 struct { - v reflect.Value -} - -func word64_Set(p word64, o *Buffer, x uint64) { - t := p.v.Type().Elem() - switch t { - case int64Type: - if len(o.int64s) == 0 { - o.int64s = make([]int64, uint64PoolSize) - } - o.int64s[0] = int64(x) - p.v.Set(reflect.ValueOf(&o.int64s[0])) - o.int64s = o.int64s[1:] - return - case uint64Type: - if len(o.uint64s) == 0 { - o.uint64s = make([]uint64, uint64PoolSize) - } - o.uint64s[0] = x - p.v.Set(reflect.ValueOf(&o.uint64s[0])) - o.uint64s = o.uint64s[1:] - return - case float64Type: - if len(o.float64s) == 0 { - o.float64s = make([]float64, uint64PoolSize) - } - o.float64s[0] = math.Float64frombits(x) - p.v.Set(reflect.ValueOf(&o.float64s[0])) - o.float64s = o.float64s[1:] - return - } - panic("unreachable") -} - -func word64_IsNil(p word64) bool { - return p.v.IsNil() -} - -func word64_Get(p word64) uint64 { - elem := p.v.Elem() - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return elem.Uint() - case reflect.Float64: - return math.Float64bits(elem.Float()) - } - panic("unreachable") -} - -func structPointer_Word64(p structPointer, f field) word64 { - return word64{structPointer_field(p, f)} -} - -// word64Val is like word32Val but for 64-bit values. -type word64Val struct { - v reflect.Value -} - -func word64Val_Set(p word64Val, o *Buffer, x uint64) { - switch p.v.Type() { - case int64Type: - p.v.SetInt(int64(x)) - return - case uint64Type: - p.v.SetUint(x) - return - case float64Type: - p.v.SetFloat(math.Float64frombits(x)) - return - } - panic("unreachable") -} - -func word64Val_Get(p word64Val) uint64 { - elem := p.v - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return elem.Uint() - case reflect.Float64: - return math.Float64bits(elem.Float()) - } - panic("unreachable") -} - -func structPointer_Word64Val(p structPointer, f field) word64Val { - return word64Val{structPointer_field(p, f)} -} - -type word64Slice struct { - v reflect.Value -} - -func (p word64Slice) Append(x uint64) { - n, m := p.v.Len(), p.v.Cap() - if n < m { - p.v.SetLen(n + 1) - } else { - t := p.v.Type().Elem() - p.v.Set(reflect.Append(p.v, reflect.Zero(t))) - } - elem := p.v.Index(n) - switch elem.Kind() { - case reflect.Int64: - elem.SetInt(int64(int64(x))) - case reflect.Uint64: - elem.SetUint(uint64(x)) - case reflect.Float64: - elem.SetFloat(float64(math.Float64frombits(x))) - } -} - -func (p word64Slice) Len() int { - return p.v.Len() -} - -func (p word64Slice) Index(i int) uint64 { - elem := p.v.Index(i) - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return uint64(elem.Uint()) - case reflect.Float64: - return math.Float64bits(float64(elem.Float())) - } - panic("unreachable") -} - -func structPointer_Word64Slice(p structPointer, f field) word64Slice { - return word64Slice{structPointer_field(p, f)} -} diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go deleted file mode 100644 index ceece77..0000000 --- a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go +++ /dev/null @@ -1,266 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build !appengine,!js - -// This file contains the implementation of the proto field accesses using package unsafe. - -package proto - -import ( - "reflect" - "unsafe" -) - -// NOTE: These type_Foo functions would more idiomatically be methods, -// but Go does not allow methods on pointer types, and we must preserve -// some pointer type for the garbage collector. We use these -// funcs with clunky names as our poor approximation to methods. -// -// An alternative would be -// type structPointer struct { p unsafe.Pointer } -// but that does not registerize as well. - -// A structPointer is a pointer to a struct. -type structPointer unsafe.Pointer - -// toStructPointer returns a structPointer equivalent to the given reflect value. -func toStructPointer(v reflect.Value) structPointer { - return structPointer(unsafe.Pointer(v.Pointer())) -} - -// IsNil reports whether p is nil. -func structPointer_IsNil(p structPointer) bool { - return p == nil -} - -// Interface returns the struct pointer, assumed to have element type t, -// as an interface value. -func structPointer_Interface(p structPointer, t reflect.Type) interface{} { - return reflect.NewAt(t, unsafe.Pointer(p)).Interface() -} - -// A field identifies a field in a struct, accessible from a structPointer. -// In this implementation, a field is identified by its byte offset from the start of the struct. -type field uintptr - -// toField returns a field equivalent to the given reflect field. -func toField(f *reflect.StructField) field { - return field(f.Offset) -} - -// invalidField is an invalid field identifier. -const invalidField = ^field(0) - -// IsValid reports whether the field identifier is valid. -func (f field) IsValid() bool { - return f != ^field(0) -} - -// Bytes returns the address of a []byte field in the struct. -func structPointer_Bytes(p structPointer, f field) *[]byte { - return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// BytesSlice returns the address of a [][]byte field in the struct. -func structPointer_BytesSlice(p structPointer, f field) *[][]byte { - return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// Bool returns the address of a *bool field in the struct. -func structPointer_Bool(p structPointer, f field) **bool { - return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// BoolVal returns the address of a bool field in the struct. -func structPointer_BoolVal(p structPointer, f field) *bool { - return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// BoolSlice returns the address of a []bool field in the struct. -func structPointer_BoolSlice(p structPointer, f field) *[]bool { - return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// String returns the address of a *string field in the struct. -func structPointer_String(p structPointer, f field) **string { - return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// StringVal returns the address of a string field in the struct. -func structPointer_StringVal(p structPointer, f field) *string { - return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// StringSlice returns the address of a []string field in the struct. -func structPointer_StringSlice(p structPointer, f field) *[]string { - return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// ExtMap returns the address of an extension map field in the struct. -func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { - return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// NewAt returns the reflect.Value for a pointer to a field in the struct. -func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { - return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f))) -} - -// SetStructPointer writes a *struct field in the struct. -func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { - *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q -} - -// GetStructPointer reads a *struct field in the struct. -func structPointer_GetStructPointer(p structPointer, f field) structPointer { - return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// StructPointerSlice the address of a []*struct field in the struct. -func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice { - return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups). -type structPointerSlice []structPointer - -func (v *structPointerSlice) Len() int { return len(*v) } -func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] } -func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) } - -// A word32 is the address of a "pointer to 32-bit value" field. -type word32 **uint32 - -// IsNil reports whether *v is nil. -func word32_IsNil(p word32) bool { - return *p == nil -} - -// Set sets *v to point at a newly allocated word set to x. -func word32_Set(p word32, o *Buffer, x uint32) { - if len(o.uint32s) == 0 { - o.uint32s = make([]uint32, uint32PoolSize) - } - o.uint32s[0] = x - *p = &o.uint32s[0] - o.uint32s = o.uint32s[1:] -} - -// Get gets the value pointed at by *v. -func word32_Get(p word32) uint32 { - return **p -} - -// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32(p structPointer, f field) word32 { - return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) -} - -// A word32Val is the address of a 32-bit value field. -type word32Val *uint32 - -// Set sets *p to x. -func word32Val_Set(p word32Val, x uint32) { - *p = x -} - -// Get gets the value pointed at by p. -func word32Val_Get(p word32Val) uint32 { - return *p -} - -// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32Val(p structPointer, f field) word32Val { - return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) -} - -// A word32Slice is a slice of 32-bit values. -type word32Slice []uint32 - -func (v *word32Slice) Append(x uint32) { *v = append(*v, x) } -func (v *word32Slice) Len() int { return len(*v) } -func (v *word32Slice) Index(i int) uint32 { return (*v)[i] } - -// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct. -func structPointer_Word32Slice(p structPointer, f field) *word32Slice { - return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// word64 is like word32 but for 64-bit values. -type word64 **uint64 - -func word64_Set(p word64, o *Buffer, x uint64) { - if len(o.uint64s) == 0 { - o.uint64s = make([]uint64, uint64PoolSize) - } - o.uint64s[0] = x - *p = &o.uint64s[0] - o.uint64s = o.uint64s[1:] -} - -func word64_IsNil(p word64) bool { - return *p == nil -} - -func word64_Get(p word64) uint64 { - return **p -} - -func structPointer_Word64(p structPointer, f field) word64 { - return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) -} - -// word64Val is like word32Val but for 64-bit values. -type word64Val *uint64 - -func word64Val_Set(p word64Val, o *Buffer, x uint64) { - *p = x -} - -func word64Val_Get(p word64Val) uint64 { - return *p -} - -func structPointer_Word64Val(p structPointer, f field) word64Val { - return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) -} - -// word64Slice is like word32Slice but for 64-bit values. -type word64Slice []uint64 - -func (v *word64Slice) Append(x uint64) { *v = append(*v, x) } -func (v *word64Slice) Len() int { return len(*v) } -func (v *word64Slice) Index(i int) uint64 { return (*v)[i] } - -func structPointer_Word64Slice(p structPointer, f field) *word64Slice { - return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go deleted file mode 100644 index 880eb22..0000000 --- a/vendor/github.com/golang/protobuf/proto/properties.go +++ /dev/null @@ -1,850 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for encoding data into the wire format for protocol buffers. - */ - -import ( - "fmt" - "log" - "os" - "reflect" - "sort" - "strconv" - "strings" - "sync" -) - -const debug bool = false - -// Constants that identify the encoding of a value on the wire. -const ( - WireVarint = 0 - WireFixed64 = 1 - WireBytes = 2 - WireStartGroup = 3 - WireEndGroup = 4 - WireFixed32 = 5 -) - -const startSize = 10 // initial slice/string sizes - -// Encoders are defined in encode.go -// An encoder outputs the full representation of a field, including its -// tag and encoder type. -type encoder func(p *Buffer, prop *Properties, base structPointer) error - -// A valueEncoder encodes a single integer in a particular encoding. -type valueEncoder func(o *Buffer, x uint64) error - -// Sizers are defined in encode.go -// A sizer returns the encoded size of a field, including its tag and encoder -// type. -type sizer func(prop *Properties, base structPointer) int - -// A valueSizer returns the encoded size of a single integer in a particular -// encoding. -type valueSizer func(x uint64) int - -// Decoders are defined in decode.go -// A decoder creates a value from its wire representation. -// Unrecognized subelements are saved in unrec. -type decoder func(p *Buffer, prop *Properties, base structPointer) error - -// A valueDecoder decodes a single integer in a particular encoding. -type valueDecoder func(o *Buffer) (x uint64, err error) - -// A oneofMarshaler does the marshaling for all oneof fields in a message. -type oneofMarshaler func(Message, *Buffer) error - -// A oneofUnmarshaler does the unmarshaling for a oneof field in a message. -type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error) - -// A oneofSizer does the sizing for all oneof fields in a message. -type oneofSizer func(Message) int - -// tagMap is an optimization over map[int]int for typical protocol buffer -// use-cases. Encoded protocol buffers are often in tag order with small tag -// numbers. -type tagMap struct { - fastTags []int - slowTags map[int]int -} - -// tagMapFastLimit is the upper bound on the tag number that will be stored in -// the tagMap slice rather than its map. -const tagMapFastLimit = 1024 - -func (p *tagMap) get(t int) (int, bool) { - if t > 0 && t < tagMapFastLimit { - if t >= len(p.fastTags) { - return 0, false - } - fi := p.fastTags[t] - return fi, fi >= 0 - } - fi, ok := p.slowTags[t] - return fi, ok -} - -func (p *tagMap) put(t int, fi int) { - if t > 0 && t < tagMapFastLimit { - for len(p.fastTags) < t+1 { - p.fastTags = append(p.fastTags, -1) - } - p.fastTags[t] = fi - return - } - if p.slowTags == nil { - p.slowTags = make(map[int]int) - } - p.slowTags[t] = fi -} - -// StructProperties represents properties for all the fields of a struct. -// decoderTags and decoderOrigNames should only be used by the decoder. -type StructProperties struct { - Prop []*Properties // properties for each field - reqCount int // required count - decoderTags tagMap // map from proto tag to struct field number - decoderOrigNames map[string]int // map from original name to struct field number - order []int // list of struct field numbers in tag order - unrecField field // field id of the XXX_unrecognized []byte field - extendable bool // is this an extendable proto - - oneofMarshaler oneofMarshaler - oneofUnmarshaler oneofUnmarshaler - oneofSizer oneofSizer - stype reflect.Type - - // OneofTypes contains information about the oneof fields in this message. - // It is keyed by the original name of a field. - OneofTypes map[string]*OneofProperties -} - -// OneofProperties represents information about a specific field in a oneof. -type OneofProperties struct { - Type reflect.Type // pointer to generated struct type for this oneof field - Field int // struct field number of the containing oneof in the message - Prop *Properties -} - -// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. -// See encode.go, (*Buffer).enc_struct. - -func (sp *StructProperties) Len() int { return len(sp.order) } -func (sp *StructProperties) Less(i, j int) bool { - return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag -} -func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } - -// Properties represents the protocol-specific behavior of a single struct field. -type Properties struct { - Name string // name of the field, for error messages - OrigName string // original name before protocol compiler (always set) - JSONName string // name to use for JSON; determined by protoc - Wire string - WireType int - Tag int - Required bool - Optional bool - Repeated bool - Packed bool // relevant for repeated primitives only - Enum string // set for enum types only - proto3 bool // whether this is known to be a proto3 field; set for []byte only - oneof bool // whether this is a oneof field - - Default string // default value - HasDefault bool // whether an explicit default was provided - def_uint64 uint64 - - enc encoder - valEnc valueEncoder // set for bool and numeric types only - field field - tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType) - tagbuf [8]byte - stype reflect.Type // set for struct types only - sprop *StructProperties // set for struct types only - isMarshaler bool - isUnmarshaler bool - - mtype reflect.Type // set for map types only - mkeyprop *Properties // set for map types only - mvalprop *Properties // set for map types only - - size sizer - valSize valueSizer // set for bool and numeric types only - - dec decoder - valDec valueDecoder // set for bool and numeric types only - - // If this is a packable field, this will be the decoder for the packed version of the field. - packedDec decoder -} - -// String formats the properties in the protobuf struct field tag style. -func (p *Properties) String() string { - s := p.Wire - s = "," - s += strconv.Itoa(p.Tag) - if p.Required { - s += ",req" - } - if p.Optional { - s += ",opt" - } - if p.Repeated { - s += ",rep" - } - if p.Packed { - s += ",packed" - } - s += ",name=" + p.OrigName - if p.JSONName != p.OrigName { - s += ",json=" + p.JSONName - } - if p.proto3 { - s += ",proto3" - } - if p.oneof { - s += ",oneof" - } - if len(p.Enum) > 0 { - s += ",enum=" + p.Enum - } - if p.HasDefault { - s += ",def=" + p.Default - } - return s -} - -// Parse populates p by parsing a string in the protobuf struct field tag style. -func (p *Properties) Parse(s string) { - // "bytes,49,opt,name=foo,def=hello!" - fields := strings.Split(s, ",") // breaks def=, but handled below. - if len(fields) < 2 { - fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s) - return - } - - p.Wire = fields[0] - switch p.Wire { - case "varint": - p.WireType = WireVarint - p.valEnc = (*Buffer).EncodeVarint - p.valDec = (*Buffer).DecodeVarint - p.valSize = sizeVarint - case "fixed32": - p.WireType = WireFixed32 - p.valEnc = (*Buffer).EncodeFixed32 - p.valDec = (*Buffer).DecodeFixed32 - p.valSize = sizeFixed32 - case "fixed64": - p.WireType = WireFixed64 - p.valEnc = (*Buffer).EncodeFixed64 - p.valDec = (*Buffer).DecodeFixed64 - p.valSize = sizeFixed64 - case "zigzag32": - p.WireType = WireVarint - p.valEnc = (*Buffer).EncodeZigzag32 - p.valDec = (*Buffer).DecodeZigzag32 - p.valSize = sizeZigzag32 - case "zigzag64": - p.WireType = WireVarint - p.valEnc = (*Buffer).EncodeZigzag64 - p.valDec = (*Buffer).DecodeZigzag64 - p.valSize = sizeZigzag64 - case "bytes", "group": - p.WireType = WireBytes - // no numeric converter for non-numeric types - default: - fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s) - return - } - - var err error - p.Tag, err = strconv.Atoi(fields[1]) - if err != nil { - return - } - - for i := 2; i < len(fields); i++ { - f := fields[i] - switch { - case f == "req": - p.Required = true - case f == "opt": - p.Optional = true - case f == "rep": - p.Repeated = true - case f == "packed": - p.Packed = true - case strings.HasPrefix(f, "name="): - p.OrigName = f[5:] - case strings.HasPrefix(f, "json="): - p.JSONName = f[5:] - case strings.HasPrefix(f, "enum="): - p.Enum = f[5:] - case f == "proto3": - p.proto3 = true - case f == "oneof": - p.oneof = true - case strings.HasPrefix(f, "def="): - p.HasDefault = true - p.Default = f[4:] // rest of string - if i+1 < len(fields) { - // Commas aren't escaped, and def is always last. - p.Default += "," + strings.Join(fields[i+1:], ",") - break - } - } - } -} - -func logNoSliceEnc(t1, t2 reflect.Type) { - fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2) -} - -var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() - -// Initialize the fields for encoding and decoding. -func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { - p.enc = nil - p.dec = nil - p.size = nil - - switch t1 := typ; t1.Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1) - - // proto3 scalar types - - case reflect.Bool: - p.enc = (*Buffer).enc_proto3_bool - p.dec = (*Buffer).dec_proto3_bool - p.size = size_proto3_bool - case reflect.Int32: - p.enc = (*Buffer).enc_proto3_int32 - p.dec = (*Buffer).dec_proto3_int32 - p.size = size_proto3_int32 - case reflect.Uint32: - p.enc = (*Buffer).enc_proto3_uint32 - p.dec = (*Buffer).dec_proto3_int32 // can reuse - p.size = size_proto3_uint32 - case reflect.Int64, reflect.Uint64: - p.enc = (*Buffer).enc_proto3_int64 - p.dec = (*Buffer).dec_proto3_int64 - p.size = size_proto3_int64 - case reflect.Float32: - p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits - p.dec = (*Buffer).dec_proto3_int32 - p.size = size_proto3_uint32 - case reflect.Float64: - p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits - p.dec = (*Buffer).dec_proto3_int64 - p.size = size_proto3_int64 - case reflect.String: - p.enc = (*Buffer).enc_proto3_string - p.dec = (*Buffer).dec_proto3_string - p.size = size_proto3_string - - case reflect.Ptr: - switch t2 := t1.Elem(); t2.Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2) - break - case reflect.Bool: - p.enc = (*Buffer).enc_bool - p.dec = (*Buffer).dec_bool - p.size = size_bool - case reflect.Int32: - p.enc = (*Buffer).enc_int32 - p.dec = (*Buffer).dec_int32 - p.size = size_int32 - case reflect.Uint32: - p.enc = (*Buffer).enc_uint32 - p.dec = (*Buffer).dec_int32 // can reuse - p.size = size_uint32 - case reflect.Int64, reflect.Uint64: - p.enc = (*Buffer).enc_int64 - p.dec = (*Buffer).dec_int64 - p.size = size_int64 - case reflect.Float32: - p.enc = (*Buffer).enc_uint32 // can just treat them as bits - p.dec = (*Buffer).dec_int32 - p.size = size_uint32 - case reflect.Float64: - p.enc = (*Buffer).enc_int64 // can just treat them as bits - p.dec = (*Buffer).dec_int64 - p.size = size_int64 - case reflect.String: - p.enc = (*Buffer).enc_string - p.dec = (*Buffer).dec_string - p.size = size_string - case reflect.Struct: - p.stype = t1.Elem() - p.isMarshaler = isMarshaler(t1) - p.isUnmarshaler = isUnmarshaler(t1) - if p.Wire == "bytes" { - p.enc = (*Buffer).enc_struct_message - p.dec = (*Buffer).dec_struct_message - p.size = size_struct_message - } else { - p.enc = (*Buffer).enc_struct_group - p.dec = (*Buffer).dec_struct_group - p.size = size_struct_group - } - } - - case reflect.Slice: - switch t2 := t1.Elem(); t2.Kind() { - default: - logNoSliceEnc(t1, t2) - break - case reflect.Bool: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_bool - p.size = size_slice_packed_bool - } else { - p.enc = (*Buffer).enc_slice_bool - p.size = size_slice_bool - } - p.dec = (*Buffer).dec_slice_bool - p.packedDec = (*Buffer).dec_slice_packed_bool - case reflect.Int32: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_int32 - p.size = size_slice_packed_int32 - } else { - p.enc = (*Buffer).enc_slice_int32 - p.size = size_slice_int32 - } - p.dec = (*Buffer).dec_slice_int32 - p.packedDec = (*Buffer).dec_slice_packed_int32 - case reflect.Uint32: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_uint32 - p.size = size_slice_packed_uint32 - } else { - p.enc = (*Buffer).enc_slice_uint32 - p.size = size_slice_uint32 - } - p.dec = (*Buffer).dec_slice_int32 - p.packedDec = (*Buffer).dec_slice_packed_int32 - case reflect.Int64, reflect.Uint64: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_int64 - p.size = size_slice_packed_int64 - } else { - p.enc = (*Buffer).enc_slice_int64 - p.size = size_slice_int64 - } - p.dec = (*Buffer).dec_slice_int64 - p.packedDec = (*Buffer).dec_slice_packed_int64 - case reflect.Uint8: - p.enc = (*Buffer).enc_slice_byte - p.dec = (*Buffer).dec_slice_byte - p.size = size_slice_byte - // This is a []byte, which is either a bytes field, - // or the value of a map field. In the latter case, - // we always encode an empty []byte, so we should not - // use the proto3 enc/size funcs. - // f == nil iff this is the key/value of a map field. - if p.proto3 && f != nil { - p.enc = (*Buffer).enc_proto3_slice_byte - p.size = size_proto3_slice_byte - } - case reflect.Float32, reflect.Float64: - switch t2.Bits() { - case 32: - // can just treat them as bits - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_uint32 - p.size = size_slice_packed_uint32 - } else { - p.enc = (*Buffer).enc_slice_uint32 - p.size = size_slice_uint32 - } - p.dec = (*Buffer).dec_slice_int32 - p.packedDec = (*Buffer).dec_slice_packed_int32 - case 64: - // can just treat them as bits - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_int64 - p.size = size_slice_packed_int64 - } else { - p.enc = (*Buffer).enc_slice_int64 - p.size = size_slice_int64 - } - p.dec = (*Buffer).dec_slice_int64 - p.packedDec = (*Buffer).dec_slice_packed_int64 - default: - logNoSliceEnc(t1, t2) - break - } - case reflect.String: - p.enc = (*Buffer).enc_slice_string - p.dec = (*Buffer).dec_slice_string - p.size = size_slice_string - case reflect.Ptr: - switch t3 := t2.Elem(); t3.Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3) - break - case reflect.Struct: - p.stype = t2.Elem() - p.isMarshaler = isMarshaler(t2) - p.isUnmarshaler = isUnmarshaler(t2) - if p.Wire == "bytes" { - p.enc = (*Buffer).enc_slice_struct_message - p.dec = (*Buffer).dec_slice_struct_message - p.size = size_slice_struct_message - } else { - p.enc = (*Buffer).enc_slice_struct_group - p.dec = (*Buffer).dec_slice_struct_group - p.size = size_slice_struct_group - } - } - case reflect.Slice: - switch t2.Elem().Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem()) - break - case reflect.Uint8: - p.enc = (*Buffer).enc_slice_slice_byte - p.dec = (*Buffer).dec_slice_slice_byte - p.size = size_slice_slice_byte - } - } - - case reflect.Map: - p.enc = (*Buffer).enc_new_map - p.dec = (*Buffer).dec_new_map - p.size = size_new_map - - p.mtype = t1 - p.mkeyprop = &Properties{} - p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) - p.mvalprop = &Properties{} - vtype := p.mtype.Elem() - if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { - // The value type is not a message (*T) or bytes ([]byte), - // so we need encoders for the pointer to this type. - vtype = reflect.PtrTo(vtype) - } - p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) - } - - // precalculate tag code - wire := p.WireType - if p.Packed { - wire = WireBytes - } - x := uint32(p.Tag)<<3 | uint32(wire) - i := 0 - for i = 0; x > 127; i++ { - p.tagbuf[i] = 0x80 | uint8(x&0x7F) - x >>= 7 - } - p.tagbuf[i] = uint8(x) - p.tagcode = p.tagbuf[0 : i+1] - - if p.stype != nil { - if lockGetProp { - p.sprop = GetProperties(p.stype) - } else { - p.sprop = getPropertiesLocked(p.stype) - } - } -} - -var ( - marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() - unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() -) - -// isMarshaler reports whether type t implements Marshaler. -func isMarshaler(t reflect.Type) bool { - // We're checking for (likely) pointer-receiver methods - // so if t is not a pointer, something is very wrong. - // The calls above only invoke isMarshaler on pointer types. - if t.Kind() != reflect.Ptr { - panic("proto: misuse of isMarshaler") - } - return t.Implements(marshalerType) -} - -// isUnmarshaler reports whether type t implements Unmarshaler. -func isUnmarshaler(t reflect.Type) bool { - // We're checking for (likely) pointer-receiver methods - // so if t is not a pointer, something is very wrong. - // The calls above only invoke isUnmarshaler on pointer types. - if t.Kind() != reflect.Ptr { - panic("proto: misuse of isUnmarshaler") - } - return t.Implements(unmarshalerType) -} - -// Init populates the properties from a protocol buffer struct tag. -func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { - p.init(typ, name, tag, f, true) -} - -func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { - // "bytes,49,opt,def=hello!" - p.Name = name - p.OrigName = name - if f != nil { - p.field = toField(f) - } - if tag == "" { - return - } - p.Parse(tag) - p.setEncAndDec(typ, f, lockGetProp) -} - -var ( - propertiesMu sync.RWMutex - propertiesMap = make(map[reflect.Type]*StructProperties) -) - -// GetProperties returns the list of properties for the type represented by t. -// t must represent a generated struct type of a protocol message. -func GetProperties(t reflect.Type) *StructProperties { - if t.Kind() != reflect.Struct { - panic("proto: type must have kind struct") - } - - // Most calls to GetProperties in a long-running program will be - // retrieving details for types we have seen before. - propertiesMu.RLock() - sprop, ok := propertiesMap[t] - propertiesMu.RUnlock() - if ok { - if collectStats { - stats.Chit++ - } - return sprop - } - - propertiesMu.Lock() - sprop = getPropertiesLocked(t) - propertiesMu.Unlock() - return sprop -} - -// getPropertiesLocked requires that propertiesMu is held. -func getPropertiesLocked(t reflect.Type) *StructProperties { - if prop, ok := propertiesMap[t]; ok { - if collectStats { - stats.Chit++ - } - return prop - } - if collectStats { - stats.Cmiss++ - } - - prop := new(StructProperties) - // in case of recursive protos, fill this in now. - propertiesMap[t] = prop - - // build properties - prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) - prop.unrecField = invalidField - prop.Prop = make([]*Properties, t.NumField()) - prop.order = make([]int, t.NumField()) - - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - p := new(Properties) - name := f.Name - p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) - - if f.Name == "XXX_extensions" { // special case - p.enc = (*Buffer).enc_map - p.dec = nil // not needed - p.size = size_map - } - if f.Name == "XXX_unrecognized" { // special case - prop.unrecField = toField(&f) - } - oneof := f.Tag.Get("protobuf_oneof") // special case - if oneof != "" { - // Oneof fields don't use the traditional protobuf tag. - p.OrigName = oneof - } - prop.Prop[i] = p - prop.order[i] = i - if debug { - print(i, " ", f.Name, " ", t.String(), " ") - if p.Tag > 0 { - print(p.String()) - } - print("\n") - } - if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && oneof == "" { - fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]") - } - } - - // Re-order prop.order. - sort.Sort(prop) - - type oneofMessage interface { - XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) - } - if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok { - var oots []interface{} - prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs() - prop.stype = t - - // Interpret oneof metadata. - prop.OneofTypes = make(map[string]*OneofProperties) - for _, oot := range oots { - oop := &OneofProperties{ - Type: reflect.ValueOf(oot).Type(), // *T - Prop: new(Properties), - } - sft := oop.Type.Elem().Field(0) - oop.Prop.Name = sft.Name - oop.Prop.Parse(sft.Tag.Get("protobuf")) - // There will be exactly one interface field that - // this new value is assignable to. - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - if f.Type.Kind() != reflect.Interface { - continue - } - if !oop.Type.AssignableTo(f.Type) { - continue - } - oop.Field = i - break - } - prop.OneofTypes[oop.Prop.OrigName] = oop - } - } - - // build required counts - // build tags - reqCount := 0 - prop.decoderOrigNames = make(map[string]int) - for i, p := range prop.Prop { - if strings.HasPrefix(p.Name, "XXX_") { - // Internal fields should not appear in tags/origNames maps. - // They are handled specially when encoding and decoding. - continue - } - if p.Required { - reqCount++ - } - prop.decoderTags.put(p.Tag, i) - prop.decoderOrigNames[p.OrigName] = i - } - prop.reqCount = reqCount - - return prop -} - -// Return the Properties object for the x[0]'th field of the structure. -func propByIndex(t reflect.Type, x []int) *Properties { - if len(x) != 1 { - fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t) - return nil - } - prop := GetProperties(t) - return prop.Prop[x[0]] -} - -// Get the address and type of a pointer to a struct from an interface. -func getbase(pb Message) (t reflect.Type, b structPointer, err error) { - if pb == nil { - err = ErrNil - return - } - // get the reflect type of the pointer to the struct. - t = reflect.TypeOf(pb) - // get the address of the struct. - value := reflect.ValueOf(pb) - b = toStructPointer(value) - return -} - -// A global registry of enum types. -// The generated code will register the generated maps by calling RegisterEnum. - -var enumValueMaps = make(map[string]map[string]int32) - -// RegisterEnum is called from the generated code to install the enum descriptor -// maps into the global table to aid parsing text format protocol buffers. -func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { - if _, ok := enumValueMaps[typeName]; ok { - panic("proto: duplicate enum registered: " + typeName) - } - enumValueMaps[typeName] = valueMap -} - -// EnumValueMap returns the mapping from names to integers of the -// enum type enumType, or a nil if not found. -func EnumValueMap(enumType string) map[string]int32 { - return enumValueMaps[enumType] -} - -// A registry of all linked message types. -// The string is a fully-qualified proto name ("pkg.Message"). -var ( - protoTypes = make(map[string]reflect.Type) - revProtoTypes = make(map[reflect.Type]string) -) - -// RegisterType is called from generated code and maps from the fully qualified -// proto name to the type (pointer to struct) of the protocol buffer. -func RegisterType(x Message, name string) { - if _, ok := protoTypes[name]; ok { - // TODO: Some day, make this a panic. - log.Printf("proto: duplicate proto type registered: %s", name) - return - } - t := reflect.TypeOf(x) - protoTypes[name] = t - revProtoTypes[t] = name -} - -// MessageName returns the fully-qualified proto name for the given message type. -func MessageName(x Message) string { return revProtoTypes[reflect.TypeOf(x)] } - -// MessageType returns the message type (pointer to struct) for a named message. -func MessageType(name string) reflect.Type { return protoTypes[name] } diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go deleted file mode 100644 index 37c9535..0000000 --- a/vendor/github.com/golang/protobuf/proto/text.go +++ /dev/null @@ -1,849 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// Functions for writing the text protocol buffer format. - -import ( - "bufio" - "bytes" - "encoding" - "errors" - "fmt" - "io" - "log" - "math" - "reflect" - "sort" - "strings" -) - -var ( - newline = []byte("\n") - spaces = []byte(" ") - gtNewline = []byte(">\n") - endBraceNewline = []byte("}\n") - backslashN = []byte{'\\', 'n'} - backslashR = []byte{'\\', 'r'} - backslashT = []byte{'\\', 't'} - backslashDQ = []byte{'\\', '"'} - backslashBS = []byte{'\\', '\\'} - posInf = []byte("inf") - negInf = []byte("-inf") - nan = []byte("nan") -) - -type writer interface { - io.Writer - WriteByte(byte) error -} - -// textWriter is an io.Writer that tracks its indentation level. -type textWriter struct { - ind int - complete bool // if the current position is a complete line - compact bool // whether to write out as a one-liner - w writer -} - -func (w *textWriter) WriteString(s string) (n int, err error) { - if !strings.Contains(s, "\n") { - if !w.compact && w.complete { - w.writeIndent() - } - w.complete = false - return io.WriteString(w.w, s) - } - // WriteString is typically called without newlines, so this - // codepath and its copy are rare. We copy to avoid - // duplicating all of Write's logic here. - return w.Write([]byte(s)) -} - -func (w *textWriter) Write(p []byte) (n int, err error) { - newlines := bytes.Count(p, newline) - if newlines == 0 { - if !w.compact && w.complete { - w.writeIndent() - } - n, err = w.w.Write(p) - w.complete = false - return n, err - } - - frags := bytes.SplitN(p, newline, newlines+1) - if w.compact { - for i, frag := range frags { - if i > 0 { - if err := w.w.WriteByte(' '); err != nil { - return n, err - } - n++ - } - nn, err := w.w.Write(frag) - n += nn - if err != nil { - return n, err - } - } - return n, nil - } - - for i, frag := range frags { - if w.complete { - w.writeIndent() - } - nn, err := w.w.Write(frag) - n += nn - if err != nil { - return n, err - } - if i+1 < len(frags) { - if err := w.w.WriteByte('\n'); err != nil { - return n, err - } - n++ - } - } - w.complete = len(frags[len(frags)-1]) == 0 - return n, nil -} - -func (w *textWriter) WriteByte(c byte) error { - if w.compact && c == '\n' { - c = ' ' - } - if !w.compact && w.complete { - w.writeIndent() - } - err := w.w.WriteByte(c) - w.complete = c == '\n' - return err -} - -func (w *textWriter) indent() { w.ind++ } - -func (w *textWriter) unindent() { - if w.ind == 0 { - log.Printf("proto: textWriter unindented too far") - return - } - w.ind-- -} - -func writeName(w *textWriter, props *Properties) error { - if _, err := w.WriteString(props.OrigName); err != nil { - return err - } - if props.Wire != "group" { - return w.WriteByte(':') - } - return nil -} - -// raw is the interface satisfied by RawMessage. -type raw interface { - Bytes() []byte -} - -func requiresQuotes(u string) bool { - // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. - for _, ch := range u { - switch { - case ch == '.' || ch == '/' || ch == '_': - continue - case '0' <= ch && ch <= '9': - continue - case 'A' <= ch && ch <= 'Z': - continue - case 'a' <= ch && ch <= 'z': - continue - default: - return true - } - } - return false -} - -// isAny reports whether sv is a google.protobuf.Any message -func isAny(sv reflect.Value) bool { - type wkt interface { - XXX_WellKnownType() string - } - t, ok := sv.Addr().Interface().(wkt) - return ok && t.XXX_WellKnownType() == "Any" -} - -// writeProto3Any writes an expanded google.protobuf.Any message. -// -// It returns (false, nil) if sv value can't be unmarshaled (e.g. because -// required messages are not linked in). -// -// It returns (true, error) when sv was written in expanded format or an error -// was encountered. -func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) { - turl := sv.FieldByName("TypeUrl") - val := sv.FieldByName("Value") - if !turl.IsValid() || !val.IsValid() { - return true, errors.New("proto: invalid google.protobuf.Any message") - } - - b, ok := val.Interface().([]byte) - if !ok { - return true, errors.New("proto: invalid google.protobuf.Any message") - } - - parts := strings.Split(turl.String(), "/") - mt := MessageType(parts[len(parts)-1]) - if mt == nil { - return false, nil - } - m := reflect.New(mt.Elem()) - if err := Unmarshal(b, m.Interface().(Message)); err != nil { - return false, nil - } - w.Write([]byte("[")) - u := turl.String() - if requiresQuotes(u) { - writeString(w, u) - } else { - w.Write([]byte(u)) - } - if w.compact { - w.Write([]byte("]:<")) - } else { - w.Write([]byte("]: <\n")) - w.ind++ - } - if err := tm.writeStruct(w, m.Elem()); err != nil { - return true, err - } - if w.compact { - w.Write([]byte("> ")) - } else { - w.ind-- - w.Write([]byte(">\n")) - } - return true, nil -} - -func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { - if tm.ExpandAny && isAny(sv) { - if canExpand, err := tm.writeProto3Any(w, sv); canExpand { - return err - } - } - st := sv.Type() - sprops := GetProperties(st) - for i := 0; i < sv.NumField(); i++ { - fv := sv.Field(i) - props := sprops.Prop[i] - name := st.Field(i).Name - - if strings.HasPrefix(name, "XXX_") { - // There are two XXX_ fields: - // XXX_unrecognized []byte - // XXX_extensions map[int32]proto.Extension - // The first is handled here; - // the second is handled at the bottom of this function. - if name == "XXX_unrecognized" && !fv.IsNil() { - if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { - return err - } - } - continue - } - if fv.Kind() == reflect.Ptr && fv.IsNil() { - // Field not filled in. This could be an optional field or - // a required field that wasn't filled in. Either way, there - // isn't anything we can show for it. - continue - } - if fv.Kind() == reflect.Slice && fv.IsNil() { - // Repeated field that is empty, or a bytes field that is unused. - continue - } - - if props.Repeated && fv.Kind() == reflect.Slice { - // Repeated field. - for j := 0; j < fv.Len(); j++ { - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - v := fv.Index(j) - if v.Kind() == reflect.Ptr && v.IsNil() { - // A nil message in a repeated field is not valid, - // but we can handle that more gracefully than panicking. - if _, err := w.Write([]byte("\n")); err != nil { - return err - } - continue - } - if err := tm.writeAny(w, v, props); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - continue - } - if fv.Kind() == reflect.Map { - // Map fields are rendered as a repeated struct with key/value fields. - keys := fv.MapKeys() - sort.Sort(mapKeys(keys)) - for _, key := range keys { - val := fv.MapIndex(key) - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - // open struct - if err := w.WriteByte('<'); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - // key - if _, err := w.WriteString("key:"); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, key, props.mkeyprop); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - // nil values aren't legal, but we can avoid panicking because of them. - if val.Kind() != reflect.Ptr || !val.IsNil() { - // value - if _, err := w.WriteString("value:"); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, val, props.mvalprop); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - // close struct - w.unindent() - if err := w.WriteByte('>'); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - continue - } - if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { - // empty bytes field - continue - } - if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { - // proto3 non-repeated scalar field; skip if zero value - if isProto3Zero(fv) { - continue - } - } - - if fv.Kind() == reflect.Interface { - // Check if it is a oneof. - if st.Field(i).Tag.Get("protobuf_oneof") != "" { - // fv is nil, or holds a pointer to generated struct. - // That generated struct has exactly one field, - // which has a protobuf struct tag. - if fv.IsNil() { - continue - } - inner := fv.Elem().Elem() // interface -> *T -> T - tag := inner.Type().Field(0).Tag.Get("protobuf") - props = new(Properties) // Overwrite the outer props var, but not its pointee. - props.Parse(tag) - // Write the value in the oneof, not the oneof itself. - fv = inner.Field(0) - - // Special case to cope with malformed messages gracefully: - // If the value in the oneof is a nil pointer, don't panic - // in writeAny. - if fv.Kind() == reflect.Ptr && fv.IsNil() { - // Use errors.New so writeAny won't render quotes. - msg := errors.New("/* nil */") - fv = reflect.ValueOf(&msg).Elem() - } - } - } - - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if b, ok := fv.Interface().(raw); ok { - if err := writeRaw(w, b.Bytes()); err != nil { - return err - } - continue - } - - // Enums have a String method, so writeAny will work fine. - if err := tm.writeAny(w, fv, props); err != nil { - return err - } - - if err := w.WriteByte('\n'); err != nil { - return err - } - } - - // Extensions (the XXX_extensions field). - pv := sv.Addr() - if pv.Type().Implements(extendableProtoType) { - if err := tm.writeExtensions(w, pv); err != nil { - return err - } - } - - return nil -} - -// writeRaw writes an uninterpreted raw message. -func writeRaw(w *textWriter, b []byte) error { - if err := w.WriteByte('<'); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - if err := writeUnknownStruct(w, b); err != nil { - return err - } - w.unindent() - if err := w.WriteByte('>'); err != nil { - return err - } - return nil -} - -// writeAny writes an arbitrary field. -func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { - v = reflect.Indirect(v) - - // Floats have special cases. - if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { - x := v.Float() - var b []byte - switch { - case math.IsInf(x, 1): - b = posInf - case math.IsInf(x, -1): - b = negInf - case math.IsNaN(x): - b = nan - } - if b != nil { - _, err := w.Write(b) - return err - } - // Other values are handled below. - } - - // We don't attempt to serialise every possible value type; only those - // that can occur in protocol buffers. - switch v.Kind() { - case reflect.Slice: - // Should only be a []byte; repeated fields are handled in writeStruct. - if err := writeString(w, string(v.Interface().([]byte))); err != nil { - return err - } - case reflect.String: - if err := writeString(w, v.String()); err != nil { - return err - } - case reflect.Struct: - // Required/optional group/message. - var bra, ket byte = '<', '>' - if props != nil && props.Wire == "group" { - bra, ket = '{', '}' - } - if err := w.WriteByte(bra); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - if etm, ok := v.Interface().(encoding.TextMarshaler); ok { - text, err := etm.MarshalText() - if err != nil { - return err - } - if _, err = w.Write(text); err != nil { - return err - } - } else if err := tm.writeStruct(w, v); err != nil { - return err - } - w.unindent() - if err := w.WriteByte(ket); err != nil { - return err - } - default: - _, err := fmt.Fprint(w, v.Interface()) - return err - } - return nil -} - -// equivalent to C's isprint. -func isprint(c byte) bool { - return c >= 0x20 && c < 0x7f -} - -// writeString writes a string in the protocol buffer text format. -// It is similar to strconv.Quote except we don't use Go escape sequences, -// we treat the string as a byte sequence, and we use octal escapes. -// These differences are to maintain interoperability with the other -// languages' implementations of the text format. -func writeString(w *textWriter, s string) error { - // use WriteByte here to get any needed indent - if err := w.WriteByte('"'); err != nil { - return err - } - // Loop over the bytes, not the runes. - for i := 0; i < len(s); i++ { - var err error - // Divergence from C++: we don't escape apostrophes. - // There's no need to escape them, and the C++ parser - // copes with a naked apostrophe. - switch c := s[i]; c { - case '\n': - _, err = w.w.Write(backslashN) - case '\r': - _, err = w.w.Write(backslashR) - case '\t': - _, err = w.w.Write(backslashT) - case '"': - _, err = w.w.Write(backslashDQ) - case '\\': - _, err = w.w.Write(backslashBS) - default: - if isprint(c) { - err = w.w.WriteByte(c) - } else { - _, err = fmt.Fprintf(w.w, "\\%03o", c) - } - } - if err != nil { - return err - } - } - return w.WriteByte('"') -} - -func writeUnknownStruct(w *textWriter, data []byte) (err error) { - if !w.compact { - if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { - return err - } - } - b := NewBuffer(data) - for b.index < len(b.buf) { - x, err := b.DecodeVarint() - if err != nil { - _, err := fmt.Fprintf(w, "/* %v */\n", err) - return err - } - wire, tag := x&7, x>>3 - if wire == WireEndGroup { - w.unindent() - if _, err := w.Write(endBraceNewline); err != nil { - return err - } - continue - } - if _, err := fmt.Fprint(w, tag); err != nil { - return err - } - if wire != WireStartGroup { - if err := w.WriteByte(':'); err != nil { - return err - } - } - if !w.compact || wire == WireStartGroup { - if err := w.WriteByte(' '); err != nil { - return err - } - } - switch wire { - case WireBytes: - buf, e := b.DecodeRawBytes(false) - if e == nil { - _, err = fmt.Fprintf(w, "%q", buf) - } else { - _, err = fmt.Fprintf(w, "/* %v */", e) - } - case WireFixed32: - x, err = b.DecodeFixed32() - err = writeUnknownInt(w, x, err) - case WireFixed64: - x, err = b.DecodeFixed64() - err = writeUnknownInt(w, x, err) - case WireStartGroup: - err = w.WriteByte('{') - w.indent() - case WireVarint: - x, err = b.DecodeVarint() - err = writeUnknownInt(w, x, err) - default: - _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) - } - if err != nil { - return err - } - if err = w.WriteByte('\n'); err != nil { - return err - } - } - return nil -} - -func writeUnknownInt(w *textWriter, x uint64, err error) error { - if err == nil { - _, err = fmt.Fprint(w, x) - } else { - _, err = fmt.Fprintf(w, "/* %v */", err) - } - return err -} - -type int32Slice []int32 - -func (s int32Slice) Len() int { return len(s) } -func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } -func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// writeExtensions writes all the extensions in pv. -// pv is assumed to be a pointer to a protocol message struct that is extendable. -func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { - emap := extensionMaps[pv.Type().Elem()] - ep := pv.Interface().(extendableProto) - - // Order the extensions by ID. - // This isn't strictly necessary, but it will give us - // canonical output, which will also make testing easier. - m := ep.ExtensionMap() - ids := make([]int32, 0, len(m)) - for id := range m { - ids = append(ids, id) - } - sort.Sort(int32Slice(ids)) - - for _, extNum := range ids { - ext := m[extNum] - var desc *ExtensionDesc - if emap != nil { - desc = emap[extNum] - } - if desc == nil { - // Unknown extension. - if err := writeUnknownStruct(w, ext.enc); err != nil { - return err - } - continue - } - - pb, err := GetExtension(ep, desc) - if err != nil { - return fmt.Errorf("failed getting extension: %v", err) - } - - // Repeated extensions will appear as a slice. - if !desc.repeated() { - if err := tm.writeExtension(w, desc.Name, pb); err != nil { - return err - } - } else { - v := reflect.ValueOf(pb) - for i := 0; i < v.Len(); i++ { - if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { - return err - } - } - } - } - return nil -} - -func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error { - if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - return nil -} - -func (w *textWriter) writeIndent() { - if !w.complete { - return - } - remain := w.ind * 2 - for remain > 0 { - n := remain - if n > len(spaces) { - n = len(spaces) - } - w.w.Write(spaces[:n]) - remain -= n - } - w.complete = false -} - -// TextMarshaler is a configurable text format marshaler. -type TextMarshaler struct { - Compact bool // use compact text format (one line). - ExpandAny bool // expand google.protobuf.Any messages of known types -} - -// Marshal writes a given protocol buffer in text format. -// The only errors returned are from w. -func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { - val := reflect.ValueOf(pb) - if pb == nil || val.IsNil() { - w.Write([]byte("")) - return nil - } - var bw *bufio.Writer - ww, ok := w.(writer) - if !ok { - bw = bufio.NewWriter(w) - ww = bw - } - aw := &textWriter{ - w: ww, - complete: true, - compact: tm.Compact, - } - - if etm, ok := pb.(encoding.TextMarshaler); ok { - text, err := etm.MarshalText() - if err != nil { - return err - } - if _, err = aw.Write(text); err != nil { - return err - } - if bw != nil { - return bw.Flush() - } - return nil - } - // Dereference the received pointer so we don't have outer < and >. - v := reflect.Indirect(val) - if err := tm.writeStruct(aw, v); err != nil { - return err - } - if bw != nil { - return bw.Flush() - } - return nil -} - -// Text is the same as Marshal, but returns the string directly. -func (tm *TextMarshaler) Text(pb Message) string { - var buf bytes.Buffer - tm.Marshal(&buf, pb) - return buf.String() -} - -var ( - defaultTextMarshaler = TextMarshaler{} - compactTextMarshaler = TextMarshaler{Compact: true} -) - -// TODO: consider removing some of the Marshal functions below. - -// MarshalText writes a given protocol buffer in text format. -// The only errors returned are from w. -func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) } - -// MarshalTextString is the same as MarshalText, but returns the string directly. -func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) } - -// CompactText writes a given protocol buffer in compact text format (one line). -func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) } - -// CompactTextString is the same as CompactText, but returns the string directly. -func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) } diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go deleted file mode 100644 index b5fba5b..0000000 --- a/vendor/github.com/golang/protobuf/proto/text_parser.go +++ /dev/null @@ -1,872 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// Functions for parsing the Text protocol buffer format. -// TODO: message sets. - -import ( - "encoding" - "errors" - "fmt" - "reflect" - "strconv" - "strings" - "unicode/utf8" -) - -type ParseError struct { - Message string - Line int // 1-based line number - Offset int // 0-based byte offset from start of input -} - -func (p *ParseError) Error() string { - if p.Line == 1 { - // show offset only for first line - return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) - } - return fmt.Sprintf("line %d: %v", p.Line, p.Message) -} - -type token struct { - value string - err *ParseError - line int // line number - offset int // byte number from start of input, not start of line - unquoted string // the unquoted version of value, if it was a quoted string -} - -func (t *token) String() string { - if t.err == nil { - return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) - } - return fmt.Sprintf("parse error: %v", t.err) -} - -type textParser struct { - s string // remaining input - done bool // whether the parsing is finished (success or error) - backed bool // whether back() was called - offset, line int - cur token -} - -func newTextParser(s string) *textParser { - p := new(textParser) - p.s = s - p.line = 1 - p.cur.line = 1 - return p -} - -func (p *textParser) errorf(format string, a ...interface{}) *ParseError { - pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} - p.cur.err = pe - p.done = true - return pe -} - -// Numbers and identifiers are matched by [-+._A-Za-z0-9] -func isIdentOrNumberChar(c byte) bool { - switch { - case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': - return true - case '0' <= c && c <= '9': - return true - } - switch c { - case '-', '+', '.', '_': - return true - } - return false -} - -func isWhitespace(c byte) bool { - switch c { - case ' ', '\t', '\n', '\r': - return true - } - return false -} - -func isQuote(c byte) bool { - switch c { - case '"', '\'': - return true - } - return false -} - -func (p *textParser) skipWhitespace() { - i := 0 - for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { - if p.s[i] == '#' { - // comment; skip to end of line or input - for i < len(p.s) && p.s[i] != '\n' { - i++ - } - if i == len(p.s) { - break - } - } - if p.s[i] == '\n' { - p.line++ - } - i++ - } - p.offset += i - p.s = p.s[i:len(p.s)] - if len(p.s) == 0 { - p.done = true - } -} - -func (p *textParser) advance() { - // Skip whitespace - p.skipWhitespace() - if p.done { - return - } - - // Start of non-whitespace - p.cur.err = nil - p.cur.offset, p.cur.line = p.offset, p.line - p.cur.unquoted = "" - switch p.s[0] { - case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': - // Single symbol - p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] - case '"', '\'': - // Quoted string - i := 1 - for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { - if p.s[i] == '\\' && i+1 < len(p.s) { - // skip escaped char - i++ - } - i++ - } - if i >= len(p.s) || p.s[i] != p.s[0] { - p.errorf("unmatched quote") - return - } - unq, err := unquoteC(p.s[1:i], rune(p.s[0])) - if err != nil { - p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) - return - } - p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] - p.cur.unquoted = unq - default: - i := 0 - for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { - i++ - } - if i == 0 { - p.errorf("unexpected byte %#x", p.s[0]) - return - } - p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] - } - p.offset += len(p.cur.value) -} - -var ( - errBadUTF8 = errors.New("proto: bad UTF-8") - errBadHex = errors.New("proto: bad hexadecimal") -) - -func unquoteC(s string, quote rune) (string, error) { - // This is based on C++'s tokenizer.cc. - // Despite its name, this is *not* parsing C syntax. - // For instance, "\0" is an invalid quoted string. - - // Avoid allocation in trivial cases. - simple := true - for _, r := range s { - if r == '\\' || r == quote { - simple = false - break - } - } - if simple { - return s, nil - } - - buf := make([]byte, 0, 3*len(s)/2) - for len(s) > 0 { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", errBadUTF8 - } - s = s[n:] - if r != '\\' { - if r < utf8.RuneSelf { - buf = append(buf, byte(r)) - } else { - buf = append(buf, string(r)...) - } - continue - } - - ch, tail, err := unescape(s) - if err != nil { - return "", err - } - buf = append(buf, ch...) - s = tail - } - return string(buf), nil -} - -func unescape(s string) (ch string, tail string, err error) { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", "", errBadUTF8 - } - s = s[n:] - switch r { - case 'a': - return "\a", s, nil - case 'b': - return "\b", s, nil - case 'f': - return "\f", s, nil - case 'n': - return "\n", s, nil - case 'r': - return "\r", s, nil - case 't': - return "\t", s, nil - case 'v': - return "\v", s, nil - case '?': - return "?", s, nil // trigraph workaround - case '\'', '"', '\\': - return string(r), s, nil - case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X': - if len(s) < 2 { - return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) - } - base := 8 - ss := s[:2] - s = s[2:] - if r == 'x' || r == 'X' { - base = 16 - } else { - ss = string(r) + ss - } - i, err := strconv.ParseUint(ss, base, 8) - if err != nil { - return "", "", err - } - return string([]byte{byte(i)}), s, nil - case 'u', 'U': - n := 4 - if r == 'U' { - n = 8 - } - if len(s) < n { - return "", "", fmt.Errorf(`\%c requires %d digits`, r, n) - } - - bs := make([]byte, n/2) - for i := 0; i < n; i += 2 { - a, ok1 := unhex(s[i]) - b, ok2 := unhex(s[i+1]) - if !ok1 || !ok2 { - return "", "", errBadHex - } - bs[i/2] = a<<4 | b - } - s = s[n:] - return string(bs), s, nil - } - return "", "", fmt.Errorf(`unknown escape \%c`, r) -} - -// Adapted from src/pkg/strconv/quote.go. -func unhex(b byte) (v byte, ok bool) { - switch { - case '0' <= b && b <= '9': - return b - '0', true - case 'a' <= b && b <= 'f': - return b - 'a' + 10, true - case 'A' <= b && b <= 'F': - return b - 'A' + 10, true - } - return 0, false -} - -// Back off the parser by one token. Can only be done between calls to next(). -// It makes the next advance() a no-op. -func (p *textParser) back() { p.backed = true } - -// Advances the parser and returns the new current token. -func (p *textParser) next() *token { - if p.backed || p.done { - p.backed = false - return &p.cur - } - p.advance() - if p.done { - p.cur.value = "" - } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { - // Look for multiple quoted strings separated by whitespace, - // and concatenate them. - cat := p.cur - for { - p.skipWhitespace() - if p.done || !isQuote(p.s[0]) { - break - } - p.advance() - if p.cur.err != nil { - return &p.cur - } - cat.value += " " + p.cur.value - cat.unquoted += p.cur.unquoted - } - p.done = false // parser may have seen EOF, but we want to return cat - p.cur = cat - } - return &p.cur -} - -func (p *textParser) consumeToken(s string) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != s { - p.back() - return p.errorf("expected %q, found %q", s, tok.value) - } - return nil -} - -// Return a RequiredNotSetError indicating which required field was not set. -func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { - st := sv.Type() - sprops := GetProperties(st) - for i := 0; i < st.NumField(); i++ { - if !isNil(sv.Field(i)) { - continue - } - - props := sprops.Prop[i] - if props.Required { - return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} - } - } - return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen -} - -// Returns the index in the struct for the named field, as well as the parsed tag properties. -func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) { - i, ok := sprops.decoderOrigNames[name] - if ok { - return i, sprops.Prop[i], true - } - return -1, nil, false -} - -// Consume a ':' from the input stream (if the next token is a colon), -// returning an error if a colon is needed but not present. -func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ":" { - // Colon is optional when the field is a group or message. - needColon := true - switch props.Wire { - case "group": - needColon = false - case "bytes": - // A "bytes" field is either a message, a string, or a repeated field; - // those three become *T, *string and []T respectively, so we can check for - // this field being a pointer to a non-string. - if typ.Kind() == reflect.Ptr { - // *T or *string - if typ.Elem().Kind() == reflect.String { - break - } - } else if typ.Kind() == reflect.Slice { - // []T or []*T - if typ.Elem().Kind() != reflect.Ptr { - break - } - } else if typ.Kind() == reflect.String { - // The proto3 exception is for a string field, - // which requires a colon. - break - } - needColon = false - } - if needColon { - return p.errorf("expected ':', found %q", tok.value) - } - p.back() - } - return nil -} - -func (p *textParser) readStruct(sv reflect.Value, terminator string) error { - st := sv.Type() - sprops := GetProperties(st) - reqCount := sprops.reqCount - var reqFieldErr error - fieldSet := make(map[string]bool) - // A struct is a sequence of "name: value", terminated by one of - // '>' or '}', or the end of the input. A name may also be - // "[extension]" or "[type/url]". - // - // The whole struct can also be an expanded Any message, like: - // [type/url] < ... struct contents ... > - for { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == terminator { - break - } - if tok.value == "[" { - // Looks like an extension or an Any. - // - // TODO: Check whether we need to handle - // namespace rooted names (e.g. ".something.Foo"). - extName, err := p.consumeExtName() - if err != nil { - return err - } - - if s := strings.LastIndex(extName, "/"); s >= 0 { - // If it contains a slash, it's an Any type URL. - messageName := extName[s+1:] - mt := MessageType(messageName) - if mt == nil { - return p.errorf("unrecognized message %q in google.protobuf.Any", messageName) - } - tok = p.next() - if tok.err != nil { - return tok.err - } - // consume an optional colon - if tok.value == ":" { - tok = p.next() - if tok.err != nil { - return tok.err - } - } - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - v := reflect.New(mt.Elem()) - if pe := p.readStruct(v.Elem(), terminator); pe != nil { - return pe - } - b, err := Marshal(v.Interface().(Message)) - if err != nil { - return p.errorf("failed to marshal message of type %q: %v", messageName, err) - } - sv.FieldByName("TypeUrl").SetString(extName) - sv.FieldByName("Value").SetBytes(b) - continue - } - - var desc *ExtensionDesc - // This could be faster, but it's functional. - // TODO: Do something smarter than a linear scan. - for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { - if d.Name == extName { - desc = d - break - } - } - if desc == nil { - return p.errorf("unrecognized extension %q", extName) - } - - props := &Properties{} - props.Parse(desc.Tag) - - typ := reflect.TypeOf(desc.ExtensionType) - if err := p.checkForColon(props, typ); err != nil { - return err - } - - rep := desc.repeated() - - // Read the extension structure, and set it in - // the value we're constructing. - var ext reflect.Value - if !rep { - ext = reflect.New(typ).Elem() - } else { - ext = reflect.New(typ.Elem()).Elem() - } - if err := p.readAny(ext, props); err != nil { - if _, ok := err.(*RequiredNotSetError); !ok { - return err - } - reqFieldErr = err - } - ep := sv.Addr().Interface().(extendableProto) - if !rep { - SetExtension(ep, desc, ext.Interface()) - } else { - old, err := GetExtension(ep, desc) - var sl reflect.Value - if err == nil { - sl = reflect.ValueOf(old) // existing slice - } else { - sl = reflect.MakeSlice(typ, 0, 1) - } - sl = reflect.Append(sl, ext) - SetExtension(ep, desc, sl.Interface()) - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - continue - } - - // This is a normal, non-extension field. - name := tok.value - var dst reflect.Value - fi, props, ok := structFieldByName(sprops, name) - if ok { - dst = sv.Field(fi) - } else if oop, ok := sprops.OneofTypes[name]; ok { - // It is a oneof. - props = oop.Prop - nv := reflect.New(oop.Type.Elem()) - dst = nv.Elem().Field(0) - sv.Field(oop.Field).Set(nv) - } - if !dst.IsValid() { - return p.errorf("unknown field name %q in %v", name, st) - } - - if dst.Kind() == reflect.Map { - // Consume any colon. - if err := p.checkForColon(props, dst.Type()); err != nil { - return err - } - - // Construct the map if it doesn't already exist. - if dst.IsNil() { - dst.Set(reflect.MakeMap(dst.Type())) - } - key := reflect.New(dst.Type().Key()).Elem() - val := reflect.New(dst.Type().Elem()).Elem() - - // The map entry should be this sequence of tokens: - // < key : KEY value : VALUE > - // Technically the "key" and "value" could come in any order, - // but in practice they won't. - - tok := p.next() - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - if err := p.consumeToken("key"); err != nil { - return err - } - if err := p.consumeToken(":"); err != nil { - return err - } - if err := p.readAny(key, props.mkeyprop); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - if err := p.consumeToken("value"); err != nil { - return err - } - if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil { - return err - } - if err := p.readAny(val, props.mvalprop); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - if err := p.consumeToken(terminator); err != nil { - return err - } - - dst.SetMapIndex(key, val) - continue - } - - // Check that it's not already set if it's not a repeated field. - if !props.Repeated && fieldSet[name] { - return p.errorf("non-repeated field %q was repeated", name) - } - - if err := p.checkForColon(props, dst.Type()); err != nil { - return err - } - - // Parse into the field. - fieldSet[name] = true - if err := p.readAny(dst, props); err != nil { - if _, ok := err.(*RequiredNotSetError); !ok { - return err - } - reqFieldErr = err - } - if props.Required { - reqCount-- - } - - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - - } - - if reqCount > 0 { - return p.missingRequiredFieldError(sv) - } - return reqFieldErr -} - -// consumeExtName consumes extension name or expanded Any type URL and the -// following ']'. It returns the name or URL consumed. -func (p *textParser) consumeExtName() (string, error) { - tok := p.next() - if tok.err != nil { - return "", tok.err - } - - // If extension name or type url is quoted, it's a single token. - if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { - name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) - if err != nil { - return "", err - } - return name, p.consumeToken("]") - } - - // Consume everything up to "]" - var parts []string - for tok.value != "]" { - parts = append(parts, tok.value) - tok = p.next() - if tok.err != nil { - return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) - } - } - return strings.Join(parts, ""), nil -} - -// consumeOptionalSeparator consumes an optional semicolon or comma. -// It is used in readStruct to provide backward compatibility. -func (p *textParser) consumeOptionalSeparator() error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ";" && tok.value != "," { - p.back() - } - return nil -} - -func (p *textParser) readAny(v reflect.Value, props *Properties) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == "" { - return p.errorf("unexpected EOF") - } - - switch fv := v; fv.Kind() { - case reflect.Slice: - at := v.Type() - if at.Elem().Kind() == reflect.Uint8 { - // Special case for []byte - if tok.value[0] != '"' && tok.value[0] != '\'' { - // Deliberately written out here, as the error after - // this switch statement would write "invalid []byte: ...", - // which is not as user-friendly. - return p.errorf("invalid string: %v", tok.value) - } - bytes := []byte(tok.unquoted) - fv.Set(reflect.ValueOf(bytes)) - return nil - } - // Repeated field. - if tok.value == "[" { - // Repeated field with list notation, like [1,2,3]. - for { - fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) - err := p.readAny(fv.Index(fv.Len()-1), props) - if err != nil { - return err - } - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == "]" { - break - } - if tok.value != "," { - return p.errorf("Expected ']' or ',' found %q", tok.value) - } - } - return nil - } - // One value of the repeated field. - p.back() - fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) - return p.readAny(fv.Index(fv.Len()-1), props) - case reflect.Bool: - // Either "true", "false", 1 or 0. - switch tok.value { - case "true", "1": - fv.SetBool(true) - return nil - case "false", "0": - fv.SetBool(false) - return nil - } - case reflect.Float32, reflect.Float64: - v := tok.value - // Ignore 'f' for compatibility with output generated by C++, but don't - // remove 'f' when the value is "-inf" or "inf". - if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { - v = v[:len(v)-1] - } - if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { - fv.SetFloat(f) - return nil - } - case reflect.Int32: - if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { - fv.SetInt(x) - return nil - } - - if len(props.Enum) == 0 { - break - } - m, ok := enumValueMaps[props.Enum] - if !ok { - break - } - x, ok := m[tok.value] - if !ok { - break - } - fv.SetInt(int64(x)) - return nil - case reflect.Int64: - if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { - fv.SetInt(x) - return nil - } - - case reflect.Ptr: - // A basic field (indirected through pointer), or a repeated message/group - p.back() - fv.Set(reflect.New(fv.Type().Elem())) - return p.readAny(fv.Elem(), props) - case reflect.String: - if tok.value[0] == '"' || tok.value[0] == '\'' { - fv.SetString(tok.unquoted) - return nil - } - case reflect.Struct: - var terminator string - switch tok.value { - case "{": - terminator = "}" - case "<": - terminator = ">" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - // TODO: Handle nested messages which implement encoding.TextUnmarshaler. - return p.readStruct(fv, terminator) - case reflect.Uint32: - if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { - fv.SetUint(uint64(x)) - return nil - } - case reflect.Uint64: - if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { - fv.SetUint(x) - return nil - } - } - return p.errorf("invalid %v: %v", v.Type(), tok.value) -} - -// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb -// before starting to unmarshal, so any existing data in pb is always removed. -// If a required field is not set and no other error occurs, -// UnmarshalText returns *RequiredNotSetError. -func UnmarshalText(s string, pb Message) error { - if um, ok := pb.(encoding.TextUnmarshaler); ok { - err := um.UnmarshalText([]byte(s)) - return err - } - pb.Reset() - v := reflect.ValueOf(pb) - if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil { - return pe - } - return nil -} diff --git a/vendor/github.com/google/go-github/github/activity.go b/vendor/github.com/google/go-github/github/activity.go deleted file mode 100644 index 355de62..0000000 --- a/vendor/github.com/google/go-github/github/activity.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -// ActivityService handles communication with the activity related -// methods of the GitHub API. -// -// GitHub API docs: http://developer.github.com/v3/activity/ -type ActivityService struct { - client *Client -} diff --git a/vendor/github.com/google/go-github/github/activity_events.go b/vendor/github.com/google/go-github/github/activity_events.go deleted file mode 100644 index 2a40d3e..0000000 --- a/vendor/github.com/google/go-github/github/activity_events.go +++ /dev/null @@ -1,305 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "encoding/json" - "fmt" - "time" -) - -// Event represents a GitHub event. -type Event struct { - Type *string `json:"type,omitempty"` - Public *bool `json:"public"` - RawPayload *json.RawMessage `json:"payload,omitempty"` - Repo *Repository `json:"repo,omitempty"` - Actor *User `json:"actor,omitempty"` - Org *Organization `json:"org,omitempty"` - CreatedAt *time.Time `json:"created_at,omitempty"` - ID *string `json:"id,omitempty"` -} - -func (e Event) String() string { - return Stringify(e) -} - -// Payload returns the parsed event payload. For recognized event types -// (PushEvent), a value of the corresponding struct type will be returned. -func (e *Event) Payload() (payload interface{}) { - switch *e.Type { - case "PushEvent": - payload = &PushEvent{} - } - if err := json.Unmarshal(*e.RawPayload, &payload); err != nil { - panic(err.Error()) - } - return payload -} - -// PushEvent represents a git push to a GitHub repository. -// -// GitHub API docs: http://developer.github.com/v3/activity/events/types/#pushevent -type PushEvent struct { - PushID *int `json:"push_id,omitempty"` - Head *string `json:"head,omitempty"` - Ref *string `json:"ref,omitempty"` - Size *int `json:"size,omitempty"` - Commits []PushEventCommit `json:"commits,omitempty"` - Repo *Repository `json:"repository,omitempty"` -} - -func (p PushEvent) String() string { - return Stringify(p) -} - -// PushEventCommit represents a git commit in a GitHub PushEvent. -type PushEventCommit struct { - SHA *string `json:"sha,omitempty"` - Message *string `json:"message,omitempty"` - Author *CommitAuthor `json:"author,omitempty"` - URL *string `json:"url,omitempty"` - Distinct *bool `json:"distinct,omitempty"` - Added []string `json:"added,omitempty"` - Removed []string `json:"removed,omitempty"` - Modified []string `json:"modified,omitempty"` -} - -func (p PushEventCommit) String() string { - return Stringify(p) -} - -//PullRequestEvent represents the payload delivered by PullRequestEvent webhook -type PullRequestEvent struct { - Action *string `json:"action,omitempty"` - Number *int `json:"number,omitempty"` - PullRequest *PullRequest `json:"pull_request,omitempty"` - Repo *Repository `json:"repository,omitempty"` - Sender *User `json:"sender,omitempty"` -} - -// IssueActivityEvent represents the payload delivered by Issue webhook -type IssueActivityEvent struct { - Action *string `json:"action,omitempty"` - Issue *Issue `json:"issue,omitempty"` - Repo *Repository `json:"repository,omitempty"` - Sender *User `json:"sender,omitempty"` -} - -// IssueCommentEvent represents the payload delivered by IssueComment webhook -// -// This webhook also gets fired for comments on pull requests -type IssueCommentEvent struct { - Action *string `json:"action,omitempty"` - Issue *Issue `json:"issue,omitempty"` - Comment *IssueComment `json:"comment,omitempty"` - Repo *Repository `json:"repository,omitempty"` - Sender *User `json:"sender,omitempty"` -} - -// ListEvents drinks from the firehose of all public events across GitHub. -// -// GitHub API docs: http://developer.github.com/v3/activity/events/#list-public-events -func (s *ActivityService) ListEvents(opt *ListOptions) ([]Event, *Response, error) { - u, err := addOptions("events", opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - events := new([]Event) - resp, err := s.client.Do(req, events) - if err != nil { - return nil, resp, err - } - - return *events, resp, err -} - -// ListRepositoryEvents lists events for a repository. -// -// GitHub API docs: http://developer.github.com/v3/activity/events/#list-repository-events -func (s *ActivityService) ListRepositoryEvents(owner, repo string, opt *ListOptions) ([]Event, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/events", owner, repo) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - events := new([]Event) - resp, err := s.client.Do(req, events) - if err != nil { - return nil, resp, err - } - - return *events, resp, err -} - -// ListIssueEventsForRepository lists issue events for a repository. -// -// GitHub API docs: http://developer.github.com/v3/activity/events/#list-issue-events-for-a-repository -func (s *ActivityService) ListIssueEventsForRepository(owner, repo string, opt *ListOptions) ([]Event, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/issues/events", owner, repo) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - events := new([]Event) - resp, err := s.client.Do(req, events) - if err != nil { - return nil, resp, err - } - - return *events, resp, err -} - -// ListEventsForRepoNetwork lists public events for a network of repositories. -// -// GitHub API docs: http://developer.github.com/v3/activity/events/#list-public-events-for-a-network-of-repositories -func (s *ActivityService) ListEventsForRepoNetwork(owner, repo string, opt *ListOptions) ([]Event, *Response, error) { - u := fmt.Sprintf("networks/%v/%v/events", owner, repo) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - events := new([]Event) - resp, err := s.client.Do(req, events) - if err != nil { - return nil, resp, err - } - - return *events, resp, err -} - -// ListEventsForOrganization lists public events for an organization. -// -// GitHub API docs: http://developer.github.com/v3/activity/events/#list-public-events-for-an-organization -func (s *ActivityService) ListEventsForOrganization(org string, opt *ListOptions) ([]Event, *Response, error) { - u := fmt.Sprintf("orgs/%v/events", org) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - events := new([]Event) - resp, err := s.client.Do(req, events) - if err != nil { - return nil, resp, err - } - - return *events, resp, err -} - -// ListEventsPerformedByUser lists the events performed by a user. If publicOnly is -// true, only public events will be returned. -// -// GitHub API docs: http://developer.github.com/v3/activity/events/#list-events-performed-by-a-user -func (s *ActivityService) ListEventsPerformedByUser(user string, publicOnly bool, opt *ListOptions) ([]Event, *Response, error) { - var u string - if publicOnly { - u = fmt.Sprintf("users/%v/events/public", user) - } else { - u = fmt.Sprintf("users/%v/events", user) - } - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - events := new([]Event) - resp, err := s.client.Do(req, events) - if err != nil { - return nil, resp, err - } - - return *events, resp, err -} - -// ListEventsReceivedByUser lists the events received by a user. If publicOnly is -// true, only public events will be returned. -// -// GitHub API docs: http://developer.github.com/v3/activity/events/#list-events-that-a-user-has-received -func (s *ActivityService) ListEventsReceivedByUser(user string, publicOnly bool, opt *ListOptions) ([]Event, *Response, error) { - var u string - if publicOnly { - u = fmt.Sprintf("users/%v/received_events/public", user) - } else { - u = fmt.Sprintf("users/%v/received_events", user) - } - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - events := new([]Event) - resp, err := s.client.Do(req, events) - if err != nil { - return nil, resp, err - } - - return *events, resp, err -} - -// ListUserEventsForOrganization provides the user’s organization dashboard. You -// must be authenticated as the user to view this. -// -// GitHub API docs: http://developer.github.com/v3/activity/events/#list-events-for-an-organization -func (s *ActivityService) ListUserEventsForOrganization(org, user string, opt *ListOptions) ([]Event, *Response, error) { - u := fmt.Sprintf("users/%v/events/orgs/%v", user, org) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - events := new([]Event) - resp, err := s.client.Do(req, events) - if err != nil { - return nil, resp, err - } - - return *events, resp, err -} diff --git a/vendor/github.com/google/go-github/github/activity_notifications.go b/vendor/github.com/google/go-github/github/activity_notifications.go deleted file mode 100644 index 290b954..0000000 --- a/vendor/github.com/google/go-github/github/activity_notifications.go +++ /dev/null @@ -1,225 +0,0 @@ -// Copyright 2014 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "fmt" - "time" -) - -// Notification identifies a GitHub notification for a user. -type Notification struct { - ID *string `json:"id,omitempty"` - Repository *Repository `json:"repository,omitempty"` - Subject *NotificationSubject `json:"subject,omitempty"` - - // Reason identifies the event that triggered the notification. - // - // GitHub API Docs: https://developer.github.com/v3/activity/notifications/#notification-reasons - Reason *string `json:"reason,omitempty"` - - Unread *bool `json:"unread,omitempty"` - UpdatedAt *time.Time `json:"updated_at,omitempty"` - LastReadAt *time.Time `json:"last_read_at,omitempty"` - URL *string `json:"url,omitempty"` -} - -// NotificationSubject identifies the subject of a notification. -type NotificationSubject struct { - Title *string `json:"title,omitempty"` - URL *string `json:"url,omitempty"` - LatestCommentURL *string `json:"latest_comment_url,omitempty"` - Type *string `json:"type,omitempty"` -} - -// NotificationListOptions specifies the optional parameters to the -// ActivityService.ListNotifications method. -type NotificationListOptions struct { - All bool `url:"all,omitempty"` - Participating bool `url:"participating,omitempty"` - Since time.Time `url:"since,omitempty"` - Before time.Time `url:"before,omitempty"` -} - -// ListNotifications lists all notifications for the authenticated user. -// -// GitHub API Docs: https://developer.github.com/v3/activity/notifications/#list-your-notifications -func (s *ActivityService) ListNotifications(opt *NotificationListOptions) ([]Notification, *Response, error) { - u := fmt.Sprintf("notifications") - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var notifications []Notification - resp, err := s.client.Do(req, ¬ifications) - if err != nil { - return nil, resp, err - } - - return notifications, resp, err -} - -// ListRepositoryNotifications lists all notifications in a given repository -// for the authenticated user. -// -// GitHub API Docs: https://developer.github.com/v3/activity/notifications/#list-your-notifications-in-a-repository -func (s *ActivityService) ListRepositoryNotifications(owner, repo string, opt *NotificationListOptions) ([]Notification, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/notifications", owner, repo) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var notifications []Notification - resp, err := s.client.Do(req, ¬ifications) - if err != nil { - return nil, resp, err - } - - return notifications, resp, err -} - -type markReadOptions struct { - LastReadAt time.Time `url:"last_read_at,omitempty"` -} - -// MarkNotificationsRead marks all notifications up to lastRead as read. -// -// GitHub API Docs: https://developer.github.com/v3/activity/notifications/#mark-as-read -func (s *ActivityService) MarkNotificationsRead(lastRead time.Time) (*Response, error) { - u := fmt.Sprintf("notifications") - u, err := addOptions(u, markReadOptions{lastRead}) - if err != nil { - return nil, err - } - - req, err := s.client.NewRequest("PUT", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// MarkRepositoryNotificationsRead marks all notifications up to lastRead in -// the specified repository as read. -// -// GitHub API Docs: https://developer.github.com/v3/activity/notifications/#mark-notifications-as-read-in-a-repository -func (s *ActivityService) MarkRepositoryNotificationsRead(owner, repo string, lastRead time.Time) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/notifications", owner, repo) - u, err := addOptions(u, markReadOptions{lastRead}) - if err != nil { - return nil, err - } - - req, err := s.client.NewRequest("PUT", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// GetThread gets the specified notification thread. -// -// GitHub API Docs: https://developer.github.com/v3/activity/notifications/#view-a-single-thread -func (s *ActivityService) GetThread(id string) (*Notification, *Response, error) { - u := fmt.Sprintf("notifications/threads/%v", id) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - notification := new(Notification) - resp, err := s.client.Do(req, notification) - if err != nil { - return nil, resp, err - } - - return notification, resp, err -} - -// MarkThreadRead marks the specified thread as read. -// -// GitHub API Docs: https://developer.github.com/v3/activity/notifications/#mark-a-thread-as-read -func (s *ActivityService) MarkThreadRead(id string) (*Response, error) { - u := fmt.Sprintf("notifications/threads/%v", id) - - req, err := s.client.NewRequest("PATCH", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// GetThreadSubscription checks to see if the authenticated user is subscribed -// to a thread. -// -// GitHub API Docs: https://developer.github.com/v3/activity/notifications/#get-a-thread-subscription -func (s *ActivityService) GetThreadSubscription(id string) (*Subscription, *Response, error) { - u := fmt.Sprintf("notifications/threads/%v/subscription", id) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - sub := new(Subscription) - resp, err := s.client.Do(req, sub) - if err != nil { - return nil, resp, err - } - - return sub, resp, err -} - -// SetThreadSubscription sets the subscription for the specified thread for the -// authenticated user. -// -// GitHub API Docs: https://developer.github.com/v3/activity/notifications/#set-a-thread-subscription -func (s *ActivityService) SetThreadSubscription(id string, subscription *Subscription) (*Subscription, *Response, error) { - u := fmt.Sprintf("notifications/threads/%v/subscription", id) - - req, err := s.client.NewRequest("PUT", u, subscription) - if err != nil { - return nil, nil, err - } - - sub := new(Subscription) - resp, err := s.client.Do(req, sub) - if err != nil { - return nil, resp, err - } - - return sub, resp, err -} - -// DeleteThreadSubscription deletes the subscription for the specified thread -// for the authenticated user. -// -// GitHub API Docs: https://developer.github.com/v3/activity/notifications/#delete-a-thread-subscription -func (s *ActivityService) DeleteThreadSubscription(id string) (*Response, error) { - u := fmt.Sprintf("notifications/threads/%v/subscription", id) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/google/go-github/github/activity_star.go b/vendor/github.com/google/go-github/github/activity_star.go deleted file mode 100644 index fac4f41..0000000 --- a/vendor/github.com/google/go-github/github/activity_star.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import "fmt" - -// StarredRepository is returned by ListStarred. -type StarredRepository struct { - StarredAt *Timestamp `json:"starred_at,omitempty"` - Repository *Repository `json:"repo,omitempty"` -} - -// ListStargazers lists people who have starred the specified repo. -// -// GitHub API Docs: https://developer.github.com/v3/activity/starring/#list-stargazers -func (s *ActivityService) ListStargazers(owner, repo string, opt *ListOptions) ([]User, *Response, error) { - u := fmt.Sprintf("repos/%s/%s/stargazers", owner, repo) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - stargazers := new([]User) - resp, err := s.client.Do(req, stargazers) - if err != nil { - return nil, resp, err - } - - return *stargazers, resp, err -} - -// ActivityListStarredOptions specifies the optional parameters to the -// ActivityService.ListStarred method. -type ActivityListStarredOptions struct { - // How to sort the repository list. Possible values are: created, updated, - // pushed, full_name. Default is "full_name". - Sort string `url:"sort,omitempty"` - - // Direction in which to sort repositories. Possible values are: asc, desc. - // Default is "asc" when sort is "full_name", otherwise default is "desc". - Direction string `url:"direction,omitempty"` - - ListOptions -} - -// ListStarred lists all the repos starred by a user. Passing the empty string -// will list the starred repositories for the authenticated user. -// -// GitHub API docs: http://developer.github.com/v3/activity/starring/#list-repositories-being-starred -func (s *ActivityService) ListStarred(user string, opt *ActivityListStarredOptions) ([]StarredRepository, *Response, error) { - var u string - if user != "" { - u = fmt.Sprintf("users/%v/starred", user) - } else { - u = "user/starred" - } - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches - req.Header.Set("Accept", mediaTypeStarringPreview) - - repos := new([]StarredRepository) - resp, err := s.client.Do(req, repos) - if err != nil { - return nil, resp, err - } - - return *repos, resp, err -} - -// IsStarred checks if a repository is starred by authenticated user. -// -// GitHub API docs: https://developer.github.com/v3/activity/starring/#check-if-you-are-starring-a-repository -func (s *ActivityService) IsStarred(owner, repo string) (bool, *Response, error) { - u := fmt.Sprintf("user/starred/%v/%v", owner, repo) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return false, nil, err - } - resp, err := s.client.Do(req, nil) - starred, err := parseBoolResponse(err) - return starred, resp, err -} - -// Star a repository as the authenticated user. -// -// GitHub API docs: https://developer.github.com/v3/activity/starring/#star-a-repository -func (s *ActivityService) Star(owner, repo string) (*Response, error) { - u := fmt.Sprintf("user/starred/%v/%v", owner, repo) - req, err := s.client.NewRequest("PUT", u, nil) - if err != nil { - return nil, err - } - return s.client.Do(req, nil) -} - -// Unstar a repository as the authenticated user. -// -// GitHub API docs: https://developer.github.com/v3/activity/starring/#unstar-a-repository -func (s *ActivityService) Unstar(owner, repo string) (*Response, error) { - u := fmt.Sprintf("user/starred/%v/%v", owner, repo) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/google/go-github/github/activity_watching.go b/vendor/github.com/google/go-github/github/activity_watching.go deleted file mode 100644 index 150cf66..0000000 --- a/vendor/github.com/google/go-github/github/activity_watching.go +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright 2014 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import "fmt" - -// Subscription identifies a repository or thread subscription. -type Subscription struct { - Subscribed *bool `json:"subscribed,omitempty"` - Ignored *bool `json:"ignored,omitempty"` - Reason *string `json:"reason,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - URL *string `json:"url,omitempty"` - - // only populated for repository subscriptions - RepositoryURL *string `json:"repository_url,omitempty"` - - // only populated for thread subscriptions - ThreadURL *string `json:"thread_url,omitempty"` -} - -// ListWatchers lists watchers of a particular repo. -// -// GitHub API Docs: http://developer.github.com/v3/activity/watching/#list-watchers -func (s *ActivityService) ListWatchers(owner, repo string, opt *ListOptions) ([]User, *Response, error) { - u := fmt.Sprintf("repos/%s/%s/subscribers", owner, repo) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - watchers := new([]User) - resp, err := s.client.Do(req, watchers) - if err != nil { - return nil, resp, err - } - - return *watchers, resp, err -} - -// ListWatched lists the repositories the specified user is watching. Passing -// the empty string will fetch watched repos for the authenticated user. -// -// GitHub API Docs: https://developer.github.com/v3/activity/watching/#list-repositories-being-watched -func (s *ActivityService) ListWatched(user string) ([]Repository, *Response, error) { - var u string - if user != "" { - u = fmt.Sprintf("users/%v/subscriptions", user) - } else { - u = "user/subscriptions" - } - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - watched := new([]Repository) - resp, err := s.client.Do(req, watched) - if err != nil { - return nil, resp, err - } - - return *watched, resp, err -} - -// GetRepositorySubscription returns the subscription for the specified -// repository for the authenticated user. If the authenticated user is not -// watching the repository, a nil Subscription is returned. -// -// GitHub API Docs: https://developer.github.com/v3/activity/watching/#get-a-repository-subscription -func (s *ActivityService) GetRepositorySubscription(owner, repo string) (*Subscription, *Response, error) { - u := fmt.Sprintf("repos/%s/%s/subscription", owner, repo) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - sub := new(Subscription) - resp, err := s.client.Do(req, sub) - if err != nil { - // if it's just a 404, don't return that as an error - _, err = parseBoolResponse(err) - return nil, resp, err - } - - return sub, resp, err -} - -// SetRepositorySubscription sets the subscription for the specified repository -// for the authenticated user. -// -// GitHub API Docs: https://developer.github.com/v3/activity/watching/#set-a-repository-subscription -func (s *ActivityService) SetRepositorySubscription(owner, repo string, subscription *Subscription) (*Subscription, *Response, error) { - u := fmt.Sprintf("repos/%s/%s/subscription", owner, repo) - - req, err := s.client.NewRequest("PUT", u, subscription) - if err != nil { - return nil, nil, err - } - - sub := new(Subscription) - resp, err := s.client.Do(req, sub) - if err != nil { - return nil, resp, err - } - - return sub, resp, err -} - -// DeleteRepositorySubscription deletes the subscription for the specified -// repository for the authenticated user. -// -// GitHub API Docs: https://developer.github.com/v3/activity/watching/#delete-a-repository-subscription -func (s *ActivityService) DeleteRepositorySubscription(owner, repo string) (*Response, error) { - u := fmt.Sprintf("repos/%s/%s/subscription", owner, repo) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/google/go-github/github/doc.go b/vendor/github.com/google/go-github/github/doc.go deleted file mode 100644 index b4ac8e6..0000000 --- a/vendor/github.com/google/go-github/github/doc.go +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package github provides a client for using the GitHub API. - -Construct a new GitHub client, then use the various services on the client to -access different parts of the GitHub API. For example: - - client := github.NewClient(nil) - - // list all organizations for user "willnorris" - orgs, _, err := client.Organizations.List("willnorris", nil) - -Set optional parameters for an API method by passing an Options object. - - // list recently updated repositories for org "github" - opt := &github.RepositoryListByOrgOptions{Sort: "updated"} - repos, _, err := client.Repositories.ListByOrg("github", opt) - -The services of a client divide the API into logical chunks and correspond to -the structure of the GitHub API documentation at -http://developer.github.com/v3/. - -Authentication - -The go-github library does not directly handle authentication. Instead, when -creating a new client, pass an http.Client that can handle authentication for -you. The easiest and recommended way to do this is using the golang.org/x/oauth2 -library, but you can always use any other library that provides an http.Client. -If you have an OAuth2 access token (for example, a personal API token), you can -use it with the oauth2 library using: - - import "golang.org/x/oauth2" - - func main() { - ts := oauth2.StaticTokenSource( - &oauth2.Token{AccessToken: "... your access token ..."}, - ) - tc := oauth2.NewClient(oauth2.NoContext, ts) - - client := github.NewClient(tc) - - // list all repositories for the authenticated user - repos, _, err := client.Repositories.List("", nil) - } - -Note that when using an authenticated Client, all calls made by the client will -include the specified OAuth token. Therefore, authenticated clients should -almost never be shared between different users. - -Rate Limiting - -GitHub imposes a rate limit on all API clients. Unauthenticated clients are -limited to 60 requests per hour, while authenticated clients can make up to -5,000 requests per hour. To receive the higher rate limit when making calls -that are not issued on behalf of a user, use the -UnauthenticatedRateLimitedTransport. - -The Rate field on a client tracks the rate limit information based on the most -recent API call. This is updated on every call, but may be out of date if it's -been some time since the last API call and other clients have made subsequent -requests since then. You can always call RateLimit() directly to get the most -up-to-date rate limit data for the client. - -Learn more about GitHub rate limiting at -http://developer.github.com/v3/#rate-limiting. - -Conditional Requests - -The GitHub API has good support for conditional requests which will help -prevent you from burning through your rate limit, as well as help speed up your -application. go-github does not handle conditional requests directly, but is -instead designed to work with a caching http.Transport. We recommend using -https://github.com/gregjones/httpcache, which can be used in conjuction with -https://github.com/sourcegraph/apiproxy to provide additional flexibility and -control of caching rules. - -Learn more about GitHub conditional requests at -https://developer.github.com/v3/#conditional-requests. - -Creating and Updating Resources - -All structs for GitHub resources use pointer values for all non-repeated fields. -This allows distinguishing between unset fields and those set to a zero-value. -Helper functions have been provided to easily create these pointers for string, -bool, and int values. For example: - - // create a new private repository named "foo" - repo := &github.Repository{ - Name: github.String("foo"), - Private: github.Bool(true), - } - client.Repositories.Create("", repo) - -Users who have worked with protocol buffers should find this pattern familiar. - -Pagination - -All requests for resource collections (repos, pull requests, issues, etc) -support pagination. Pagination options are described in the -ListOptions struct and passed to the list methods directly or as an -embedded type of a more specific list options struct (for example -PullRequestListOptions). Pages information is available via Response struct. - - opt := &github.RepositoryListByOrgOptions{ - ListOptions: github.ListOptions{PerPage: 10}, - } - // get all pages of results - var allRepos []github.Repository - for { - repos, resp, err := client.Repositories.ListByOrg("github", opt) - if err != nil { - return err - } - allRepos = append(allRepos, repos...) - if resp.NextPage == 0 { - break - } - opt.ListOptions.Page = resp.NextPage - } - -*/ -package github diff --git a/vendor/github.com/google/go-github/github/gists.go b/vendor/github.com/google/go-github/github/gists.go deleted file mode 100644 index a662d35..0000000 --- a/vendor/github.com/google/go-github/github/gists.go +++ /dev/null @@ -1,281 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "fmt" - "time" -) - -// GistsService handles communication with the Gist related -// methods of the GitHub API. -// -// GitHub API docs: http://developer.github.com/v3/gists/ -type GistsService struct { - client *Client -} - -// Gist represents a GitHub's gist. -type Gist struct { - ID *string `json:"id,omitempty"` - Description *string `json:"description,omitempty"` - Public *bool `json:"public,omitempty"` - Owner *User `json:"owner,omitempty"` - Files map[GistFilename]GistFile `json:"files,omitempty"` - Comments *int `json:"comments,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - GitPullURL *string `json:"git_pull_url,omitempty"` - GitPushURL *string `json:"git_push_url,omitempty"` - CreatedAt *time.Time `json:"created_at,omitempty"` - UpdatedAt *time.Time `json:"updated_at,omitempty"` -} - -func (g Gist) String() string { - return Stringify(g) -} - -// GistFilename represents filename on a gist. -type GistFilename string - -// GistFile represents a file on a gist. -type GistFile struct { - Size *int `json:"size,omitempty"` - Filename *string `json:"filename,omitempty"` - RawURL *string `json:"raw_url,omitempty"` - Content *string `json:"content,omitempty"` -} - -func (g GistFile) String() string { - return Stringify(g) -} - -// GistListOptions specifies the optional parameters to the -// GistsService.List, GistsService.ListAll, and GistsService.ListStarred methods. -type GistListOptions struct { - // Since filters Gists by time. - Since time.Time `url:"since,omitempty"` - - ListOptions -} - -// List gists for a user. Passing the empty string will list -// all public gists if called anonymously. However, if the call -// is authenticated, it will returns all gists for the authenticated -// user. -// -// GitHub API docs: http://developer.github.com/v3/gists/#list-gists -func (s *GistsService) List(user string, opt *GistListOptions) ([]Gist, *Response, error) { - var u string - if user != "" { - u = fmt.Sprintf("users/%v/gists", user) - } else { - u = "gists" - } - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - gists := new([]Gist) - resp, err := s.client.Do(req, gists) - if err != nil { - return nil, resp, err - } - - return *gists, resp, err -} - -// ListAll lists all public gists. -// -// GitHub API docs: http://developer.github.com/v3/gists/#list-gists -func (s *GistsService) ListAll(opt *GistListOptions) ([]Gist, *Response, error) { - u, err := addOptions("gists/public", opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - gists := new([]Gist) - resp, err := s.client.Do(req, gists) - if err != nil { - return nil, resp, err - } - - return *gists, resp, err -} - -// ListStarred lists starred gists of authenticated user. -// -// GitHub API docs: http://developer.github.com/v3/gists/#list-gists -func (s *GistsService) ListStarred(opt *GistListOptions) ([]Gist, *Response, error) { - u, err := addOptions("gists/starred", opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - gists := new([]Gist) - resp, err := s.client.Do(req, gists) - if err != nil { - return nil, resp, err - } - - return *gists, resp, err -} - -// Get a single gist. -// -// GitHub API docs: http://developer.github.com/v3/gists/#get-a-single-gist -func (s *GistsService) Get(id string) (*Gist, *Response, error) { - u := fmt.Sprintf("gists/%v", id) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - gist := new(Gist) - resp, err := s.client.Do(req, gist) - if err != nil { - return nil, resp, err - } - - return gist, resp, err -} - -// GetRevision gets a specific revision of a gist. -// -// GitHub API docs: https://developer.github.com/v3/gists/#get-a-specific-revision-of-a-gist -func (s *GistsService) GetRevision(id, sha string) (*Gist, *Response, error) { - u := fmt.Sprintf("gists/%v/%v", id, sha) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - gist := new(Gist) - resp, err := s.client.Do(req, gist) - if err != nil { - return nil, resp, err - } - - return gist, resp, err -} - -// Create a gist for authenticated user. -// -// GitHub API docs: http://developer.github.com/v3/gists/#create-a-gist -func (s *GistsService) Create(gist *Gist) (*Gist, *Response, error) { - u := "gists" - req, err := s.client.NewRequest("POST", u, gist) - if err != nil { - return nil, nil, err - } - g := new(Gist) - resp, err := s.client.Do(req, g) - if err != nil { - return nil, resp, err - } - - return g, resp, err -} - -// Edit a gist. -// -// GitHub API docs: http://developer.github.com/v3/gists/#edit-a-gist -func (s *GistsService) Edit(id string, gist *Gist) (*Gist, *Response, error) { - u := fmt.Sprintf("gists/%v", id) - req, err := s.client.NewRequest("PATCH", u, gist) - if err != nil { - return nil, nil, err - } - g := new(Gist) - resp, err := s.client.Do(req, g) - if err != nil { - return nil, resp, err - } - - return g, resp, err -} - -// Delete a gist. -// -// GitHub API docs: http://developer.github.com/v3/gists/#delete-a-gist -func (s *GistsService) Delete(id string) (*Response, error) { - u := fmt.Sprintf("gists/%v", id) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - return s.client.Do(req, nil) -} - -// Star a gist on behalf of authenticated user. -// -// GitHub API docs: http://developer.github.com/v3/gists/#star-a-gist -func (s *GistsService) Star(id string) (*Response, error) { - u := fmt.Sprintf("gists/%v/star", id) - req, err := s.client.NewRequest("PUT", u, nil) - if err != nil { - return nil, err - } - return s.client.Do(req, nil) -} - -// Unstar a gist on a behalf of authenticated user. -// -// Github API docs: http://developer.github.com/v3/gists/#unstar-a-gist -func (s *GistsService) Unstar(id string) (*Response, error) { - u := fmt.Sprintf("gists/%v/star", id) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - return s.client.Do(req, nil) -} - -// IsStarred checks if a gist is starred by authenticated user. -// -// GitHub API docs: http://developer.github.com/v3/gists/#check-if-a-gist-is-starred -func (s *GistsService) IsStarred(id string) (bool, *Response, error) { - u := fmt.Sprintf("gists/%v/star", id) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return false, nil, err - } - resp, err := s.client.Do(req, nil) - starred, err := parseBoolResponse(err) - return starred, resp, err -} - -// Fork a gist. -// -// GitHub API docs: http://developer.github.com/v3/gists/#fork-a-gist -func (s *GistsService) Fork(id string) (*Gist, *Response, error) { - u := fmt.Sprintf("gists/%v/forks", id) - req, err := s.client.NewRequest("POST", u, nil) - if err != nil { - return nil, nil, err - } - - g := new(Gist) - resp, err := s.client.Do(req, g) - if err != nil { - return nil, resp, err - } - - return g, resp, err -} diff --git a/vendor/github.com/google/go-github/github/gists_comments.go b/vendor/github.com/google/go-github/github/gists_comments.go deleted file mode 100644 index c5c21bd..0000000 --- a/vendor/github.com/google/go-github/github/gists_comments.go +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "fmt" - "time" -) - -// GistComment represents a Gist comment. -type GistComment struct { - ID *int `json:"id,omitempty"` - URL *string `json:"url,omitempty"` - Body *string `json:"body,omitempty"` - User *User `json:"user,omitempty"` - CreatedAt *time.Time `json:"created_at,omitempty"` -} - -func (g GistComment) String() string { - return Stringify(g) -} - -// ListComments lists all comments for a gist. -// -// GitHub API docs: http://developer.github.com/v3/gists/comments/#list-comments-on-a-gist -func (s *GistsService) ListComments(gistID string, opt *ListOptions) ([]GistComment, *Response, error) { - u := fmt.Sprintf("gists/%v/comments", gistID) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - comments := new([]GistComment) - resp, err := s.client.Do(req, comments) - if err != nil { - return nil, resp, err - } - - return *comments, resp, err -} - -// GetComment retrieves a single comment from a gist. -// -// GitHub API docs: http://developer.github.com/v3/gists/comments/#get-a-single-comment -func (s *GistsService) GetComment(gistID string, commentID int) (*GistComment, *Response, error) { - u := fmt.Sprintf("gists/%v/comments/%v", gistID, commentID) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - c := new(GistComment) - resp, err := s.client.Do(req, c) - if err != nil { - return nil, resp, err - } - - return c, resp, err -} - -// CreateComment creates a comment for a gist. -// -// GitHub API docs: http://developer.github.com/v3/gists/comments/#create-a-comment -func (s *GistsService) CreateComment(gistID string, comment *GistComment) (*GistComment, *Response, error) { - u := fmt.Sprintf("gists/%v/comments", gistID) - req, err := s.client.NewRequest("POST", u, comment) - if err != nil { - return nil, nil, err - } - - c := new(GistComment) - resp, err := s.client.Do(req, c) - if err != nil { - return nil, resp, err - } - - return c, resp, err -} - -// EditComment edits an existing gist comment. -// -// GitHub API docs: http://developer.github.com/v3/gists/comments/#edit-a-comment -func (s *GistsService) EditComment(gistID string, commentID int, comment *GistComment) (*GistComment, *Response, error) { - u := fmt.Sprintf("gists/%v/comments/%v", gistID, commentID) - req, err := s.client.NewRequest("PATCH", u, comment) - if err != nil { - return nil, nil, err - } - - c := new(GistComment) - resp, err := s.client.Do(req, c) - if err != nil { - return nil, resp, err - } - - return c, resp, err -} - -// DeleteComment deletes a gist comment. -// -// GitHub API docs: http://developer.github.com/v3/gists/comments/#delete-a-comment -func (s *GistsService) DeleteComment(gistID string, commentID int) (*Response, error) { - u := fmt.Sprintf("gists/%v/comments/%v", gistID, commentID) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/google/go-github/github/git.go b/vendor/github.com/google/go-github/github/git.go deleted file mode 100644 index a80e55b..0000000 --- a/vendor/github.com/google/go-github/github/git.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -// GitService handles communication with the git data related -// methods of the GitHub API. -// -// GitHub API docs: http://developer.github.com/v3/git/ -type GitService struct { - client *Client -} diff --git a/vendor/github.com/google/go-github/github/git_blobs.go b/vendor/github.com/google/go-github/github/git_blobs.go deleted file mode 100644 index 133780b..0000000 --- a/vendor/github.com/google/go-github/github/git_blobs.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import "fmt" - -// Blob represents a blob object. -type Blob struct { - Content *string `json:"content,omitempty"` - Encoding *string `json:"encoding,omitempty"` - SHA *string `json:"sha,omitempty"` - Size *int `json:"size,omitempty"` - URL *string `json:"url,omitempty"` -} - -// GetBlob fetchs a blob from a repo given a SHA. -// -// GitHub API docs: http://developer.github.com/v3/git/blobs/#get-a-blob -func (s *GitService) GetBlob(owner string, repo string, sha string) (*Blob, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/git/blobs/%v", owner, repo, sha) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - blob := new(Blob) - resp, err := s.client.Do(req, blob) - return blob, resp, err -} - -// CreateBlob creates a blob object. -// -// GitHub API docs: http://developer.github.com/v3/git/blobs/#create-a-blob -func (s *GitService) CreateBlob(owner string, repo string, blob *Blob) (*Blob, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/git/blobs", owner, repo) - req, err := s.client.NewRequest("POST", u, blob) - if err != nil { - return nil, nil, err - } - - t := new(Blob) - resp, err := s.client.Do(req, t) - return t, resp, err -} diff --git a/vendor/github.com/google/go-github/github/git_commits.go b/vendor/github.com/google/go-github/github/git_commits.go deleted file mode 100644 index 6584b77..0000000 --- a/vendor/github.com/google/go-github/github/git_commits.go +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "fmt" - "time" -) - -// Commit represents a GitHub commit. -type Commit struct { - SHA *string `json:"sha,omitempty"` - Author *CommitAuthor `json:"author,omitempty"` - Committer *CommitAuthor `json:"committer,omitempty"` - Message *string `json:"message,omitempty"` - Tree *Tree `json:"tree,omitempty"` - Parents []Commit `json:"parents,omitempty"` - Stats *CommitStats `json:"stats,omitempty"` - URL *string `json:"url,omitempty"` - - // CommentCount is the number of GitHub comments on the commit. This - // is only populated for requests that fetch GitHub data like - // Pulls.ListCommits, Repositories.ListCommits, etc. - CommentCount *int `json:"comment_count,omitempty"` -} - -func (c Commit) String() string { - return Stringify(c) -} - -// CommitAuthor represents the author or committer of a commit. The commit -// author may not correspond to a GitHub User. -type CommitAuthor struct { - Date *time.Time `json:"date,omitempty"` - Name *string `json:"name,omitempty"` - Email *string `json:"email,omitempty"` -} - -func (c CommitAuthor) String() string { - return Stringify(c) -} - -// GetCommit fetchs the Commit object for a given SHA. -// -// GitHub API docs: http://developer.github.com/v3/git/commits/#get-a-commit -func (s *GitService) GetCommit(owner string, repo string, sha string) (*Commit, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/git/commits/%v", owner, repo, sha) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - c := new(Commit) - resp, err := s.client.Do(req, c) - if err != nil { - return nil, resp, err - } - - return c, resp, err -} - -// createCommit represents the body of a CreateCommit request. -type createCommit struct { - Author *CommitAuthor `json:"author,omitempty"` - Committer *CommitAuthor `json:"committer,omitempty"` - Message *string `json:"message,omitempty"` - Tree *string `json:"tree,omitempty"` - Parents []string `json:"parents,omitempty"` -} - -// CreateCommit creates a new commit in a repository. -// -// The commit.Committer is optional and will be filled with the commit.Author -// data if omitted. If the commit.Author is omitted, it will be filled in with -// the authenticated user’s information and the current date. -// -// GitHub API docs: http://developer.github.com/v3/git/commits/#create-a-commit -func (s *GitService) CreateCommit(owner string, repo string, commit *Commit) (*Commit, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/git/commits", owner, repo) - - body := &createCommit{} - if commit != nil { - parents := make([]string, len(commit.Parents)) - for i, parent := range commit.Parents { - parents[i] = *parent.SHA - } - - body = &createCommit{ - Author: commit.Author, - Committer: commit.Committer, - Message: commit.Message, - Tree: commit.Tree.SHA, - Parents: parents, - } - } - - req, err := s.client.NewRequest("POST", u, body) - if err != nil { - return nil, nil, err - } - - c := new(Commit) - resp, err := s.client.Do(req, c) - if err != nil { - return nil, resp, err - } - - return c, resp, err -} diff --git a/vendor/github.com/google/go-github/github/git_refs.go b/vendor/github.com/google/go-github/github/git_refs.go deleted file mode 100644 index 3d2f6c8..0000000 --- a/vendor/github.com/google/go-github/github/git_refs.go +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "fmt" - "strings" -) - -// Reference represents a GitHub reference. -type Reference struct { - Ref *string `json:"ref"` - URL *string `json:"url"` - Object *GitObject `json:"object"` -} - -func (r Reference) String() string { - return Stringify(r) -} - -// GitObject represents a Git object. -type GitObject struct { - Type *string `json:"type"` - SHA *string `json:"sha"` - URL *string `json:"url"` -} - -func (o GitObject) String() string { - return Stringify(o) -} - -// createRefRequest represents the payload for creating a reference. -type createRefRequest struct { - Ref *string `json:"ref"` - SHA *string `json:"sha"` -} - -// updateRefRequest represents the payload for updating a reference. -type updateRefRequest struct { - SHA *string `json:"sha"` - Force *bool `json:"force"` -} - -// GetRef fetches the Reference object for a given Git ref. -// -// GitHub API docs: http://developer.github.com/v3/git/refs/#get-a-reference -func (s *GitService) GetRef(owner string, repo string, ref string) (*Reference, *Response, error) { - ref = strings.TrimPrefix(ref, "refs/") - u := fmt.Sprintf("repos/%v/%v/git/refs/%v", owner, repo, ref) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - r := new(Reference) - resp, err := s.client.Do(req, r) - if err != nil { - return nil, resp, err - } - - return r, resp, err -} - -// ReferenceListOptions specifies optional parameters to the -// GitService.ListRefs method. -type ReferenceListOptions struct { - Type string `url:"-"` - - ListOptions -} - -// ListRefs lists all refs in a repository. -// -// GitHub API docs: http://developer.github.com/v3/git/refs/#get-all-references -func (s *GitService) ListRefs(owner, repo string, opt *ReferenceListOptions) ([]Reference, *Response, error) { - var u string - if opt != nil && opt.Type != "" { - u = fmt.Sprintf("repos/%v/%v/git/refs/%v", owner, repo, opt.Type) - } else { - u = fmt.Sprintf("repos/%v/%v/git/refs", owner, repo) - } - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var rs []Reference - resp, err := s.client.Do(req, &rs) - if err != nil { - return nil, resp, err - } - - return rs, resp, err -} - -// CreateRef creates a new ref in a repository. -// -// GitHub API docs: http://developer.github.com/v3/git/refs/#create-a-reference -func (s *GitService) CreateRef(owner string, repo string, ref *Reference) (*Reference, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/git/refs", owner, repo) - req, err := s.client.NewRequest("POST", u, &createRefRequest{ - // back-compat with previous behavior that didn't require 'refs/' prefix - Ref: String("refs/" + strings.TrimPrefix(*ref.Ref, "refs/")), - SHA: ref.Object.SHA, - }) - if err != nil { - return nil, nil, err - } - - r := new(Reference) - resp, err := s.client.Do(req, r) - if err != nil { - return nil, resp, err - } - - return r, resp, err -} - -// UpdateRef updates an existing ref in a repository. -// -// GitHub API docs: http://developer.github.com/v3/git/refs/#update-a-reference -func (s *GitService) UpdateRef(owner string, repo string, ref *Reference, force bool) (*Reference, *Response, error) { - refPath := strings.TrimPrefix(*ref.Ref, "refs/") - u := fmt.Sprintf("repos/%v/%v/git/refs/%v", owner, repo, refPath) - req, err := s.client.NewRequest("PATCH", u, &updateRefRequest{ - SHA: ref.Object.SHA, - Force: &force, - }) - if err != nil { - return nil, nil, err - } - - r := new(Reference) - resp, err := s.client.Do(req, r) - if err != nil { - return nil, resp, err - } - - return r, resp, err -} - -// DeleteRef deletes a ref from a repository. -// -// GitHub API docs: http://developer.github.com/v3/git/refs/#delete-a-reference -func (s *GitService) DeleteRef(owner string, repo string, ref string) (*Response, error) { - ref = strings.TrimPrefix(ref, "refs/") - u := fmt.Sprintf("repos/%v/%v/git/refs/%v", owner, repo, ref) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/google/go-github/github/git_tags.go b/vendor/github.com/google/go-github/github/git_tags.go deleted file mode 100644 index 7b53f5c..0000000 --- a/vendor/github.com/google/go-github/github/git_tags.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "fmt" -) - -// Tag represents a tag object. -type Tag struct { - Tag *string `json:"tag,omitempty"` - SHA *string `json:"sha,omitempty"` - URL *string `json:"url,omitempty"` - Message *string `json:"message,omitempty"` - Tagger *CommitAuthor `json:"tagger,omitempty"` - Object *GitObject `json:"object,omitempty"` -} - -// createTagRequest represents the body of a CreateTag request. This is mostly -// identical to Tag with the exception that the object SHA and Type are -// top-level fields, rather than being nested inside a JSON object. -type createTagRequest struct { - Tag *string `json:"tag,omitempty"` - Message *string `json:"message,omitempty"` - Object *string `json:"object,omitempty"` - Type *string `json:"type,omitempty"` - Tagger *CommitAuthor `json:"tagger,omitempty"` -} - -// GetTag fetchs a tag from a repo given a SHA. -// -// GitHub API docs: http://developer.github.com/v3/git/tags/#get-a-tag -func (s *GitService) GetTag(owner string, repo string, sha string) (*Tag, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/git/tags/%v", owner, repo, sha) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - tag := new(Tag) - resp, err := s.client.Do(req, tag) - return tag, resp, err -} - -// CreateTag creates a tag object. -// -// GitHub API docs: http://developer.github.com/v3/git/tags/#create-a-tag-object -func (s *GitService) CreateTag(owner string, repo string, tag *Tag) (*Tag, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/git/tags", owner, repo) - - // convert Tag into a createTagRequest - tagRequest := &createTagRequest{ - Tag: tag.Tag, - Message: tag.Message, - Tagger: tag.Tagger, - } - if tag.Object != nil { - tagRequest.Object = tag.Object.SHA - tagRequest.Type = tag.Object.Type - } - - req, err := s.client.NewRequest("POST", u, tagRequest) - if err != nil { - return nil, nil, err - } - - t := new(Tag) - resp, err := s.client.Do(req, t) - return t, resp, err -} diff --git a/vendor/github.com/google/go-github/github/git_trees.go b/vendor/github.com/google/go-github/github/git_trees.go deleted file mode 100644 index 9efa4b3..0000000 --- a/vendor/github.com/google/go-github/github/git_trees.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import "fmt" - -// Tree represents a GitHub tree. -type Tree struct { - SHA *string `json:"sha,omitempty"` - Entries []TreeEntry `json:"tree,omitempty"` -} - -func (t Tree) String() string { - return Stringify(t) -} - -// TreeEntry represents the contents of a tree structure. TreeEntry can -// represent either a blob, a commit (in the case of a submodule), or another -// tree. -type TreeEntry struct { - SHA *string `json:"sha,omitempty"` - Path *string `json:"path,omitempty"` - Mode *string `json:"mode,omitempty"` - Type *string `json:"type,omitempty"` - Size *int `json:"size,omitempty"` - Content *string `json:"content,omitempty"` -} - -func (t TreeEntry) String() string { - return Stringify(t) -} - -// GetTree fetches the Tree object for a given sha hash from a repository. -// -// GitHub API docs: http://developer.github.com/v3/git/trees/#get-a-tree -func (s *GitService) GetTree(owner string, repo string, sha string, recursive bool) (*Tree, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/git/trees/%v", owner, repo, sha) - if recursive { - u += "?recursive=1" - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - t := new(Tree) - resp, err := s.client.Do(req, t) - if err != nil { - return nil, resp, err - } - - return t, resp, err -} - -// createTree represents the body of a CreateTree request. -type createTree struct { - BaseTree string `json:"base_tree,omitempty"` - Entries []TreeEntry `json:"tree"` -} - -// CreateTree creates a new tree in a repository. If both a tree and a nested -// path modifying that tree are specified, it will overwrite the contents of -// that tree with the new path contents and write a new tree out. -// -// GitHub API docs: http://developer.github.com/v3/git/trees/#create-a-tree -func (s *GitService) CreateTree(owner string, repo string, baseTree string, entries []TreeEntry) (*Tree, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/git/trees", owner, repo) - - body := &createTree{ - BaseTree: baseTree, - Entries: entries, - } - req, err := s.client.NewRequest("POST", u, body) - if err != nil { - return nil, nil, err - } - - t := new(Tree) - resp, err := s.client.Do(req, t) - if err != nil { - return nil, resp, err - } - - return t, resp, err -} diff --git a/vendor/github.com/google/go-github/github/github.go b/vendor/github.com/google/go-github/github/github.go deleted file mode 100644 index 30b8390..0000000 --- a/vendor/github.com/google/go-github/github/github.go +++ /dev/null @@ -1,588 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "reflect" - "strconv" - "strings" - "time" - - "github.com/google/go-querystring/query" -) - -const ( - libraryVersion = "0.1" - defaultBaseURL = "https://api.github.com/" - uploadBaseURL = "https://uploads.github.com/" - userAgent = "go-github/" + libraryVersion - - headerRateLimit = "X-RateLimit-Limit" - headerRateRemaining = "X-RateLimit-Remaining" - headerRateReset = "X-RateLimit-Reset" - - mediaTypeV3 = "application/vnd.github.v3+json" - defaultMediaType = "application/octet-stream" - - // Media Type values to access preview APIs - - // https://developer.github.com/changes/2015-03-09-licenses-api/ - mediaTypeLicensesPreview = "application/vnd.github.drax-preview+json" - - // https://developer.github.com/changes/2014-12-09-new-attributes-for-stars-api/ - mediaTypeStarringPreview = "application/vnd.github.v3.star+json" - - // https://developer.github.com/changes/2015-06-24-api-enhancements-for-working-with-organization-permissions/ - mediaTypeOrgPermissionPreview = "application/vnd.github.ironman-preview+json" - mediaTypeOrgPermissionRepoPreview = "application/vnd.github.ironman-preview.repository+json" -) - -// A Client manages communication with the GitHub API. -type Client struct { - // HTTP client used to communicate with the API. - client *http.Client - - // Base URL for API requests. Defaults to the public GitHub API, but can be - // set to a domain endpoint to use with GitHub Enterprise. BaseURL should - // always be specified with a trailing slash. - BaseURL *url.URL - - // Base URL for uploading files. - UploadURL *url.URL - - // User agent used when communicating with the GitHub API. - UserAgent string - - // Rate specifies the current rate limit for the client as determined by the - // most recent API call. If the client is used in a multi-user application, - // this rate may not always be up-to-date. Call RateLimits() to check the - // current rate. - Rate Rate - - // Services used for talking to different parts of the GitHub API. - Activity *ActivityService - Gists *GistsService - Git *GitService - Gitignores *GitignoresService - Issues *IssuesService - Organizations *OrganizationsService - PullRequests *PullRequestsService - Repositories *RepositoriesService - Search *SearchService - Users *UsersService - Licenses *LicensesService -} - -// ListOptions specifies the optional parameters to various List methods that -// support pagination. -type ListOptions struct { - // For paginated result sets, page of results to retrieve. - Page int `url:"page,omitempty"` - - // For paginated result sets, the number of results to include per page. - PerPage int `url:"per_page,omitempty"` -} - -// UploadOptions specifies the parameters to methods that support uploads. -type UploadOptions struct { - Name string `url:"name,omitempty"` -} - -// addOptions adds the parameters in opt as URL query parameters to s. opt -// must be a struct whose fields may contain "url" tags. -func addOptions(s string, opt interface{}) (string, error) { - v := reflect.ValueOf(opt) - if v.Kind() == reflect.Ptr && v.IsNil() { - return s, nil - } - - u, err := url.Parse(s) - if err != nil { - return s, err - } - - qs, err := query.Values(opt) - if err != nil { - return s, err - } - - u.RawQuery = qs.Encode() - return u.String(), nil -} - -// NewClient returns a new GitHub API client. If a nil httpClient is -// provided, http.DefaultClient will be used. To use API methods which require -// authentication, provide an http.Client that will perform the authentication -// for you (such as that provided by the golang.org/x/oauth2 library). -func NewClient(httpClient *http.Client) *Client { - if httpClient == nil { - httpClient = http.DefaultClient - } - baseURL, _ := url.Parse(defaultBaseURL) - uploadURL, _ := url.Parse(uploadBaseURL) - - c := &Client{client: httpClient, BaseURL: baseURL, UserAgent: userAgent, UploadURL: uploadURL} - c.Activity = &ActivityService{client: c} - c.Gists = &GistsService{client: c} - c.Git = &GitService{client: c} - c.Gitignores = &GitignoresService{client: c} - c.Issues = &IssuesService{client: c} - c.Organizations = &OrganizationsService{client: c} - c.PullRequests = &PullRequestsService{client: c} - c.Repositories = &RepositoriesService{client: c} - c.Search = &SearchService{client: c} - c.Users = &UsersService{client: c} - c.Licenses = &LicensesService{client: c} - return c -} - -// NewRequest creates an API request. A relative URL can be provided in urlStr, -// in which case it is resolved relative to the BaseURL of the Client. -// Relative URLs should always be specified without a preceding slash. If -// specified, the value pointed to by body is JSON encoded and included as the -// request body. -func (c *Client) NewRequest(method, urlStr string, body interface{}) (*http.Request, error) { - rel, err := url.Parse(urlStr) - if err != nil { - return nil, err - } - - u := c.BaseURL.ResolveReference(rel) - - var buf io.ReadWriter - if body != nil { - buf = new(bytes.Buffer) - err := json.NewEncoder(buf).Encode(body) - if err != nil { - return nil, err - } - } - - req, err := http.NewRequest(method, u.String(), buf) - if err != nil { - return nil, err - } - - req.Header.Add("Accept", mediaTypeV3) - if c.UserAgent != "" { - req.Header.Add("User-Agent", c.UserAgent) - } - return req, nil -} - -// NewUploadRequest creates an upload request. A relative URL can be provided in -// urlStr, in which case it is resolved relative to the UploadURL of the Client. -// Relative URLs should always be specified without a preceding slash. -func (c *Client) NewUploadRequest(urlStr string, reader io.Reader, size int64, mediaType string) (*http.Request, error) { - rel, err := url.Parse(urlStr) - if err != nil { - return nil, err - } - - u := c.UploadURL.ResolveReference(rel) - req, err := http.NewRequest("POST", u.String(), reader) - if err != nil { - return nil, err - } - req.ContentLength = size - - if len(mediaType) == 0 { - mediaType = defaultMediaType - } - req.Header.Add("Content-Type", mediaType) - req.Header.Add("Accept", mediaTypeV3) - req.Header.Add("User-Agent", c.UserAgent) - return req, nil -} - -// Response is a GitHub API response. This wraps the standard http.Response -// returned from GitHub and provides convenient access to things like -// pagination links. -type Response struct { - *http.Response - - // These fields provide the page values for paginating through a set of - // results. Any or all of these may be set to the zero value for - // responses that are not part of a paginated set, or for which there - // are no additional pages. - - NextPage int - PrevPage int - FirstPage int - LastPage int - - Rate -} - -// newResponse creates a new Response for the provided http.Response. -func newResponse(r *http.Response) *Response { - response := &Response{Response: r} - response.populatePageValues() - response.populateRate() - return response -} - -// populatePageValues parses the HTTP Link response headers and populates the -// various pagination link values in the Reponse. -func (r *Response) populatePageValues() { - if links, ok := r.Response.Header["Link"]; ok && len(links) > 0 { - for _, link := range strings.Split(links[0], ",") { - segments := strings.Split(strings.TrimSpace(link), ";") - - // link must at least have href and rel - if len(segments) < 2 { - continue - } - - // ensure href is properly formatted - if !strings.HasPrefix(segments[0], "<") || !strings.HasSuffix(segments[0], ">") { - continue - } - - // try to pull out page parameter - url, err := url.Parse(segments[0][1 : len(segments[0])-1]) - if err != nil { - continue - } - page := url.Query().Get("page") - if page == "" { - continue - } - - for _, segment := range segments[1:] { - switch strings.TrimSpace(segment) { - case `rel="next"`: - r.NextPage, _ = strconv.Atoi(page) - case `rel="prev"`: - r.PrevPage, _ = strconv.Atoi(page) - case `rel="first"`: - r.FirstPage, _ = strconv.Atoi(page) - case `rel="last"`: - r.LastPage, _ = strconv.Atoi(page) - } - - } - } - } -} - -// populateRate parses the rate related headers and populates the response Rate. -func (r *Response) populateRate() { - if limit := r.Header.Get(headerRateLimit); limit != "" { - r.Rate.Limit, _ = strconv.Atoi(limit) - } - if remaining := r.Header.Get(headerRateRemaining); remaining != "" { - r.Rate.Remaining, _ = strconv.Atoi(remaining) - } - if reset := r.Header.Get(headerRateReset); reset != "" { - if v, _ := strconv.ParseInt(reset, 10, 64); v != 0 { - r.Rate.Reset = Timestamp{time.Unix(v, 0)} - } - } -} - -// Do sends an API request and returns the API response. The API response is -// JSON decoded and stored in the value pointed to by v, or returned as an -// error if an API error has occurred. If v implements the io.Writer -// interface, the raw response body will be written to v, without attempting to -// first decode it. -func (c *Client) Do(req *http.Request, v interface{}) (*Response, error) { - resp, err := c.client.Do(req) - if err != nil { - return nil, err - } - - defer resp.Body.Close() - - response := newResponse(resp) - - c.Rate = response.Rate - - err = CheckResponse(resp) - if err != nil { - // even though there was an error, we still return the response - // in case the caller wants to inspect it further - return response, err - } - - if v != nil { - if w, ok := v.(io.Writer); ok { - io.Copy(w, resp.Body) - } else { - err = json.NewDecoder(resp.Body).Decode(v) - } - } - return response, err -} - -/* -An ErrorResponse reports one or more errors caused by an API request. - -GitHub API docs: http://developer.github.com/v3/#client-errors -*/ -type ErrorResponse struct { - Response *http.Response // HTTP response that caused this error - Message string `json:"message"` // error message - Errors []Error `json:"errors"` // more detail on individual errors -} - -func (r *ErrorResponse) Error() string { - return fmt.Sprintf("%v %v: %d %v %+v", - r.Response.Request.Method, sanitizeURL(r.Response.Request.URL), - r.Response.StatusCode, r.Message, r.Errors) -} - -// sanitizeURL redacts the client_id and client_secret tokens from the URL which -// may be exposed to the user, specifically in the ErrorResponse error message. -func sanitizeURL(uri *url.URL) *url.URL { - if uri == nil { - return nil - } - params := uri.Query() - if len(params.Get("client_secret")) > 0 { - params.Set("client_secret", "REDACTED") - uri.RawQuery = params.Encode() - } - return uri -} - -/* -An Error reports more details on an individual error in an ErrorResponse. -These are the possible validation error codes: - - missing: - resource does not exist - missing_field: - a required field on a resource has not been set - invalid: - the formatting of a field is invalid - already_exists: - another resource has the same valid as this field - -GitHub API docs: http://developer.github.com/v3/#client-errors -*/ -type Error struct { - Resource string `json:"resource"` // resource on which the error occurred - Field string `json:"field"` // field on which the error occurred - Code string `json:"code"` // validation error code -} - -func (e *Error) Error() string { - return fmt.Sprintf("%v error caused by %v field on %v resource", - e.Code, e.Field, e.Resource) -} - -// CheckResponse checks the API response for errors, and returns them if -// present. A response is considered an error if it has a status code outside -// the 200 range. API error responses are expected to have either no response -// body, or a JSON response body that maps to ErrorResponse. Any other -// response body will be silently ignored. -func CheckResponse(r *http.Response) error { - if c := r.StatusCode; 200 <= c && c <= 299 { - return nil - } - errorResponse := &ErrorResponse{Response: r} - data, err := ioutil.ReadAll(r.Body) - if err == nil && data != nil { - json.Unmarshal(data, errorResponse) - } - return errorResponse -} - -// parseBoolResponse determines the boolean result from a GitHub API response. -// Several GitHub API methods return boolean responses indicated by the HTTP -// status code in the response (true indicated by a 204, false indicated by a -// 404). This helper function will determine that result and hide the 404 -// error if present. Any other error will be returned through as-is. -func parseBoolResponse(err error) (bool, error) { - if err == nil { - return true, nil - } - - if err, ok := err.(*ErrorResponse); ok && err.Response.StatusCode == http.StatusNotFound { - // Simply false. In this one case, we do not pass the error through. - return false, nil - } - - // some other real error occurred - return false, err -} - -// Rate represents the rate limit for the current client. -type Rate struct { - // The number of requests per hour the client is currently limited to. - Limit int `json:"limit"` - - // The number of remaining requests the client can make this hour. - Remaining int `json:"remaining"` - - // The time at which the current rate limit will reset. - Reset Timestamp `json:"reset"` -} - -func (r Rate) String() string { - return Stringify(r) -} - -// RateLimits represents the rate limits for the current client. -type RateLimits struct { - // The rate limit for non-search API requests. Unauthenticated - // requests are limited to 60 per hour. Authenticated requests are - // limited to 5,000 per hour. - Core *Rate `json:"core"` - - // The rate limit for search API requests. Unauthenticated requests - // are limited to 5 requests per minutes. Authenticated requests are - // limited to 20 per minute. - // - // GitHub API docs: https://developer.github.com/v3/search/#rate-limit - Search *Rate `json:"search"` -} - -func (r RateLimits) String() string { - return Stringify(r) -} - -// RateLimit is deprecated. Use RateLimits instead. -func (c *Client) RateLimit() (*Rate, *Response, error) { - limits, resp, err := c.RateLimits() - if limits == nil { - return nil, nil, err - } - - return limits.Core, resp, err -} - -// RateLimits returns the rate limits for the current client. -func (c *Client) RateLimits() (*RateLimits, *Response, error) { - req, err := c.NewRequest("GET", "rate_limit", nil) - if err != nil { - return nil, nil, err - } - - response := new(struct { - Resources *RateLimits `json:"resources"` - }) - resp, err := c.Do(req, response) - if err != nil { - return nil, nil, err - } - - return response.Resources, resp, err -} - -/* -UnauthenticatedRateLimitedTransport allows you to make unauthenticated calls -that need to use a higher rate limit associated with your OAuth application. - - t := &github.UnauthenticatedRateLimitedTransport{ - ClientID: "your app's client ID", - ClientSecret: "your app's client secret", - } - client := github.NewClient(t.Client()) - -This will append the querystring params client_id=xxx&client_secret=yyy to all -requests. - -See http://developer.github.com/v3/#unauthenticated-rate-limited-requests for -more information. -*/ -type UnauthenticatedRateLimitedTransport struct { - // ClientID is the GitHub OAuth client ID of the current application, which - // can be found by selecting its entry in the list at - // https://github.com/settings/applications. - ClientID string - - // ClientSecret is the GitHub OAuth client secret of the current - // application. - ClientSecret string - - // Transport is the underlying HTTP transport to use when making requests. - // It will default to http.DefaultTransport if nil. - Transport http.RoundTripper -} - -// RoundTrip implements the RoundTripper interface. -func (t *UnauthenticatedRateLimitedTransport) RoundTrip(req *http.Request) (*http.Response, error) { - if t.ClientID == "" { - return nil, errors.New("t.ClientID is empty") - } - if t.ClientSecret == "" { - return nil, errors.New("t.ClientSecret is empty") - } - - // To set extra querystring params, we must make a copy of the Request so - // that we don't modify the Request we were given. This is required by the - // specification of http.RoundTripper. - req = cloneRequest(req) - q := req.URL.Query() - q.Set("client_id", t.ClientID) - q.Set("client_secret", t.ClientSecret) - req.URL.RawQuery = q.Encode() - - // Make the HTTP request. - return t.transport().RoundTrip(req) -} - -// Client returns an *http.Client that makes requests which are subject to the -// rate limit of your OAuth application. -func (t *UnauthenticatedRateLimitedTransport) Client() *http.Client { - return &http.Client{Transport: t} -} - -func (t *UnauthenticatedRateLimitedTransport) transport() http.RoundTripper { - if t.Transport != nil { - return t.Transport - } - return http.DefaultTransport -} - -// cloneRequest returns a clone of the provided *http.Request. The clone is a -// shallow copy of the struct and its Header map. -func cloneRequest(r *http.Request) *http.Request { - // shallow copy of the struct - r2 := new(http.Request) - *r2 = *r - // deep copy of the Header - r2.Header = make(http.Header) - for k, s := range r.Header { - r2.Header[k] = s - } - return r2 -} - -// Bool is a helper routine that allocates a new bool value -// to store v and returns a pointer to it. -func Bool(v bool) *bool { - p := new(bool) - *p = v - return p -} - -// Int is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it, but unlike Int32 -// its argument value is an int. -func Int(v int) *int { - p := new(int) - *p = v - return p -} - -// String is a helper routine that allocates a new string value -// to store v and returns a pointer to it. -func String(v string) *string { - p := new(string) - *p = v - return p -} diff --git a/vendor/github.com/google/go-github/github/gitignore.go b/vendor/github.com/google/go-github/github/gitignore.go deleted file mode 100644 index 31d5902..0000000 --- a/vendor/github.com/google/go-github/github/gitignore.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import "fmt" - -// GitignoresService provides access to the gitignore related functions in the -// GitHub API. -// -// GitHub API docs: http://developer.github.com/v3/gitignore/ -type GitignoresService struct { - client *Client -} - -// Gitignore represents a .gitignore file as returned by the GitHub API. -type Gitignore struct { - Name *string `json:"name,omitempty"` - Source *string `json:"source,omitempty"` -} - -func (g Gitignore) String() string { - return Stringify(g) -} - -// List all available Gitignore templates. -// -// http://developer.github.com/v3/gitignore/#listing-available-templates -func (s GitignoresService) List() ([]string, *Response, error) { - req, err := s.client.NewRequest("GET", "gitignore/templates", nil) - if err != nil { - return nil, nil, err - } - - availableTemplates := new([]string) - resp, err := s.client.Do(req, availableTemplates) - if err != nil { - return nil, resp, err - } - - return *availableTemplates, resp, err -} - -// Get a Gitignore by name. -// -// http://developer.github.com/v3/gitignore/#get-a-single-template -func (s GitignoresService) Get(name string) (*Gitignore, *Response, error) { - u := fmt.Sprintf("gitignore/templates/%v", name) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - gitignore := new(Gitignore) - resp, err := s.client.Do(req, gitignore) - if err != nil { - return nil, resp, err - } - - return gitignore, resp, err -} diff --git a/vendor/github.com/google/go-github/github/issues.go b/vendor/github.com/google/go-github/github/issues.go deleted file mode 100644 index 34807aa..0000000 --- a/vendor/github.com/google/go-github/github/issues.go +++ /dev/null @@ -1,261 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "fmt" - "time" -) - -// IssuesService handles communication with the issue related -// methods of the GitHub API. -// -// GitHub API docs: http://developer.github.com/v3/issues/ -type IssuesService struct { - client *Client -} - -// Issue represents a GitHub issue on a repository. -type Issue struct { - Number *int `json:"number,omitempty"` - State *string `json:"state,omitempty"` - Title *string `json:"title,omitempty"` - Body *string `json:"body,omitempty"` - User *User `json:"user,omitempty"` - Labels []Label `json:"labels,omitempty"` - Assignee *User `json:"assignee,omitempty"` - Comments *int `json:"comments,omitempty"` - ClosedAt *time.Time `json:"closed_at,omitempty"` - CreatedAt *time.Time `json:"created_at,omitempty"` - UpdatedAt *time.Time `json:"updated_at,omitempty"` - URL *string `json:"url,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - Milestone *Milestone `json:"milestone,omitempty"` - PullRequestLinks *PullRequestLinks `json:"pull_request,omitempty"` - - // TextMatches is only populated from search results that request text matches - // See: search.go and https://developer.github.com/v3/search/#text-match-metadata - TextMatches []TextMatch `json:"text_matches,omitempty"` -} - -func (i Issue) String() string { - return Stringify(i) -} - -// IssueRequest represents a request to create/edit an issue. -// It is separate from Issue above because otherwise Labels -// and Assignee fail to serialize to the correct JSON. -type IssueRequest struct { - Title *string `json:"title,omitempty"` - Body *string `json:"body,omitempty"` - Labels *[]string `json:"labels,omitempty"` - Assignee *string `json:"assignee,omitempty"` - State *string `json:"state,omitempty"` - Milestone *int `json:"milestone,omitempty"` -} - -// IssueListOptions specifies the optional parameters to the IssuesService.List -// and IssuesService.ListByOrg methods. -type IssueListOptions struct { - // Filter specifies which issues to list. Possible values are: assigned, - // created, mentioned, subscribed, all. Default is "assigned". - Filter string `url:"filter,omitempty"` - - // State filters issues based on their state. Possible values are: open, - // closed. Default is "open". - State string `url:"state,omitempty"` - - // Labels filters issues based on their label. - Labels []string `url:"labels,comma,omitempty"` - - // Sort specifies how to sort issues. Possible values are: created, updated, - // and comments. Default value is "created". - Sort string `url:"sort,omitempty"` - - // Direction in which to sort issues. Possible values are: asc, desc. - // Default is "asc". - Direction string `url:"direction,omitempty"` - - // Since filters issues by time. - Since time.Time `url:"since,omitempty"` - - ListOptions -} - -// PullRequestLinks object is added to the Issue object when it's an issue included -// in the IssueCommentEvent webhook payload, if the webhooks is fired by a comment on a PR -type PullRequestLinks struct { - URL *string `json:"url,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - DiffURL *string `json:"diff_url,omitempty"` - PatchURL *string `json:"patch_url,omitempty"` -} - -// List the issues for the authenticated user. If all is true, list issues -// across all the user's visible repositories including owned, member, and -// organization repositories; if false, list only owned and member -// repositories. -// -// GitHub API docs: http://developer.github.com/v3/issues/#list-issues -func (s *IssuesService) List(all bool, opt *IssueListOptions) ([]Issue, *Response, error) { - var u string - if all { - u = "issues" - } else { - u = "user/issues" - } - return s.listIssues(u, opt) -} - -// ListByOrg fetches the issues in the specified organization for the -// authenticated user. -// -// GitHub API docs: http://developer.github.com/v3/issues/#list-issues -func (s *IssuesService) ListByOrg(org string, opt *IssueListOptions) ([]Issue, *Response, error) { - u := fmt.Sprintf("orgs/%v/issues", org) - return s.listIssues(u, opt) -} - -func (s *IssuesService) listIssues(u string, opt *IssueListOptions) ([]Issue, *Response, error) { - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - issues := new([]Issue) - resp, err := s.client.Do(req, issues) - if err != nil { - return nil, resp, err - } - - return *issues, resp, err -} - -// IssueListByRepoOptions specifies the optional parameters to the -// IssuesService.ListByRepo method. -type IssueListByRepoOptions struct { - // Milestone limits issues for the specified milestone. Possible values are - // a milestone number, "none" for issues with no milestone, "*" for issues - // with any milestone. - Milestone string `url:"milestone,omitempty"` - - // State filters issues based on their state. Possible values are: open, - // closed. Default is "open". - State string `url:"state,omitempty"` - - // Assignee filters issues based on their assignee. Possible values are a - // user name, "none" for issues that are not assigned, "*" for issues with - // any assigned user. - Assignee string `url:"assignee,omitempty"` - - // Creator filters issues based on their creator. - Creator string `url:"creator,omitempty"` - - // Mentioned filters issues to those mentioned a specific user. - Mentioned string `url:"mentioned,omitempty"` - - // Labels filters issues based on their label. - Labels []string `url:"labels,omitempty,comma"` - - // Sort specifies how to sort issues. Possible values are: created, updated, - // and comments. Default value is "created". - Sort string `url:"sort,omitempty"` - - // Direction in which to sort issues. Possible values are: asc, desc. - // Default is "asc". - Direction string `url:"direction,omitempty"` - - // Since filters issues by time. - Since time.Time `url:"since,omitempty"` - - ListOptions -} - -// ListByRepo lists the issues for the specified repository. -// -// GitHub API docs: http://developer.github.com/v3/issues/#list-issues-for-a-repository -func (s *IssuesService) ListByRepo(owner string, repo string, opt *IssueListByRepoOptions) ([]Issue, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/issues", owner, repo) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - issues := new([]Issue) - resp, err := s.client.Do(req, issues) - if err != nil { - return nil, resp, err - } - - return *issues, resp, err -} - -// Get a single issue. -// -// GitHub API docs: http://developer.github.com/v3/issues/#get-a-single-issue -func (s *IssuesService) Get(owner string, repo string, number int) (*Issue, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/issues/%d", owner, repo, number) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - issue := new(Issue) - resp, err := s.client.Do(req, issue) - if err != nil { - return nil, resp, err - } - - return issue, resp, err -} - -// Create a new issue on the specified repository. -// -// GitHub API docs: http://developer.github.com/v3/issues/#create-an-issue -func (s *IssuesService) Create(owner string, repo string, issue *IssueRequest) (*Issue, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/issues", owner, repo) - req, err := s.client.NewRequest("POST", u, issue) - if err != nil { - return nil, nil, err - } - - i := new(Issue) - resp, err := s.client.Do(req, i) - if err != nil { - return nil, resp, err - } - - return i, resp, err -} - -// Edit an issue. -// -// GitHub API docs: http://developer.github.com/v3/issues/#edit-an-issue -func (s *IssuesService) Edit(owner string, repo string, number int, issue *IssueRequest) (*Issue, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/issues/%d", owner, repo, number) - req, err := s.client.NewRequest("PATCH", u, issue) - if err != nil { - return nil, nil, err - } - - i := new(Issue) - resp, err := s.client.Do(req, i) - if err != nil { - return nil, resp, err - } - - return i, resp, err -} diff --git a/vendor/github.com/google/go-github/github/issues_assignees.go b/vendor/github.com/google/go-github/github/issues_assignees.go deleted file mode 100644 index 6338c22..0000000 --- a/vendor/github.com/google/go-github/github/issues_assignees.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import "fmt" - -// ListAssignees fetches all available assignees (owners and collaborators) to -// which issues may be assigned. -// -// GitHub API docs: http://developer.github.com/v3/issues/assignees/#list-assignees -func (s *IssuesService) ListAssignees(owner string, repo string, opt *ListOptions) ([]User, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/assignees", owner, repo) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - assignees := new([]User) - resp, err := s.client.Do(req, assignees) - if err != nil { - return nil, resp, err - } - - return *assignees, resp, err -} - -// IsAssignee checks if a user is an assignee for the specified repository. -// -// GitHub API docs: http://developer.github.com/v3/issues/assignees/#check-assignee -func (s *IssuesService) IsAssignee(owner string, repo string, user string) (bool, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/assignees/%v", owner, repo, user) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return false, nil, err - } - resp, err := s.client.Do(req, nil) - assignee, err := parseBoolResponse(err) - return assignee, resp, err -} diff --git a/vendor/github.com/google/go-github/github/issues_comments.go b/vendor/github.com/google/go-github/github/issues_comments.go deleted file mode 100644 index db48e14..0000000 --- a/vendor/github.com/google/go-github/github/issues_comments.go +++ /dev/null @@ -1,138 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "fmt" - "time" -) - -// IssueComment represents a comment left on an issue. -type IssueComment struct { - ID *int `json:"id,omitempty"` - Body *string `json:"body,omitempty"` - User *User `json:"user,omitempty"` - CreatedAt *time.Time `json:"created_at,omitempty"` - UpdatedAt *time.Time `json:"updated_at,omitempty"` - URL *string `json:"url,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - IssueURL *string `json:"issue_url,omitempty"` -} - -func (i IssueComment) String() string { - return Stringify(i) -} - -// IssueListCommentsOptions specifies the optional parameters to the -// IssuesService.ListComments method. -type IssueListCommentsOptions struct { - // Sort specifies how to sort comments. Possible values are: created, updated. - Sort string `url:"sort,omitempty"` - - // Direction in which to sort comments. Possible values are: asc, desc. - Direction string `url:"direction,omitempty"` - - // Since filters comments by time. - Since time.Time `url:"since,omitempty"` - - ListOptions -} - -// ListComments lists all comments on the specified issue. Specifying an issue -// number of 0 will return all comments on all issues for the repository. -// -// GitHub API docs: http://developer.github.com/v3/issues/comments/#list-comments-on-an-issue -func (s *IssuesService) ListComments(owner string, repo string, number int, opt *IssueListCommentsOptions) ([]IssueComment, *Response, error) { - var u string - if number == 0 { - u = fmt.Sprintf("repos/%v/%v/issues/comments", owner, repo) - } else { - u = fmt.Sprintf("repos/%v/%v/issues/%d/comments", owner, repo, number) - } - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - comments := new([]IssueComment) - resp, err := s.client.Do(req, comments) - if err != nil { - return nil, resp, err - } - - return *comments, resp, err -} - -// GetComment fetches the specified issue comment. -// -// GitHub API docs: http://developer.github.com/v3/issues/comments/#get-a-single-comment -func (s *IssuesService) GetComment(owner string, repo string, id int) (*IssueComment, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/issues/comments/%d", owner, repo, id) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - comment := new(IssueComment) - resp, err := s.client.Do(req, comment) - if err != nil { - return nil, resp, err - } - - return comment, resp, err -} - -// CreateComment creates a new comment on the specified issue. -// -// GitHub API docs: http://developer.github.com/v3/issues/comments/#create-a-comment -func (s *IssuesService) CreateComment(owner string, repo string, number int, comment *IssueComment) (*IssueComment, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/issues/%d/comments", owner, repo, number) - req, err := s.client.NewRequest("POST", u, comment) - if err != nil { - return nil, nil, err - } - c := new(IssueComment) - resp, err := s.client.Do(req, c) - if err != nil { - return nil, resp, err - } - - return c, resp, err -} - -// EditComment updates an issue comment. -// -// GitHub API docs: http://developer.github.com/v3/issues/comments/#edit-a-comment -func (s *IssuesService) EditComment(owner string, repo string, id int, comment *IssueComment) (*IssueComment, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/issues/comments/%d", owner, repo, id) - req, err := s.client.NewRequest("PATCH", u, comment) - if err != nil { - return nil, nil, err - } - c := new(IssueComment) - resp, err := s.client.Do(req, c) - if err != nil { - return nil, resp, err - } - - return c, resp, err -} - -// DeleteComment deletes an issue comment. -// -// GitHub API docs: http://developer.github.com/v3/issues/comments/#delete-a-comment -func (s *IssuesService) DeleteComment(owner string, repo string, id int) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/issues/comments/%d", owner, repo, id) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/google/go-github/github/issues_events.go b/vendor/github.com/google/go-github/github/issues_events.go deleted file mode 100644 index 9062d4d..0000000 --- a/vendor/github.com/google/go-github/github/issues_events.go +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright 2014 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "fmt" - "time" -) - -// IssueEvent represents an event that occurred around an Issue or Pull Request. -type IssueEvent struct { - ID *int `json:"id,omitempty"` - URL *string `json:"url,omitempty"` - - // The User that generated this event. - Actor *User `json:"actor,omitempty"` - - // Event identifies the actual type of Event that occurred. Possible - // values are: - // - // closed - // The Actor closed the issue. - // If the issue was closed by commit message, CommitID holds the SHA1 hash of the commit. - // - // merged - // The Actor merged into master a branch containing a commit mentioning the issue. - // CommitID holds the SHA1 of the merge commit. - // - // referenced - // The Actor committed to master a commit mentioning the issue in its commit message. - // CommitID holds the SHA1 of the commit. - // - // reopened, locked, unlocked - // The Actor did that to the issue. - // - // renamed - // The Actor changed the issue title from Rename.From to Rename.To. - // - // mentioned - // Someone unspecified @mentioned the Actor [sic] in an issue comment body. - // - // assigned, unassigned - // The Actor assigned the issue to or removed the assignment from the Assignee. - // - // labeled, unlabeled - // The Actor added or removed the Label from the issue. - // - // milestoned, demilestoned - // The Actor added or removed the issue from the Milestone. - // - // subscribed, unsubscribed - // The Actor subscribed to or unsubscribed from notifications for an issue. - // - // head_ref_deleted, head_ref_restored - // The pull request’s branch was deleted or restored. - // - Event *string `json:"event,omitempty"` - - CreatedAt *time.Time `json:"created_at,omitempty"` - Issue *Issue `json:"issue,omitempty"` - - // Only present on certain events; see above. - Assignee *User `json:"assignee,omitempty"` - CommitID *string `json:"commit_id,omitempty"` - Milestone *Milestone `json:"milestone,omitempty"` - Label *Label `json:"label,omitempty"` - Rename *Rename `json:"rename,omitempty"` -} - -// ListIssueEvents lists events for the specified issue. -// -// GitHub API docs: https://developer.github.com/v3/issues/events/#list-events-for-an-issue -func (s *IssuesService) ListIssueEvents(owner, repo string, number int, opt *ListOptions) ([]IssueEvent, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/issues/%v/events", owner, repo, number) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var events []IssueEvent - resp, err := s.client.Do(req, &events) - if err != nil { - return nil, resp, err - } - - return events, resp, err -} - -// ListRepositoryEvents lists events for the specified repository. -// -// GitHub API docs: https://developer.github.com/v3/issues/events/#list-events-for-a-repository -func (s *IssuesService) ListRepositoryEvents(owner, repo string, opt *ListOptions) ([]IssueEvent, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/issues/events", owner, repo) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var events []IssueEvent - resp, err := s.client.Do(req, &events) - if err != nil { - return nil, resp, err - } - - return events, resp, err -} - -// GetEvent returns the specified issue event. -// -// GitHub API docs: https://developer.github.com/v3/issues/events/#get-a-single-event -func (s *IssuesService) GetEvent(owner, repo string, id int) (*IssueEvent, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/issues/events/%v", owner, repo, id) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - event := new(IssueEvent) - resp, err := s.client.Do(req, event) - if err != nil { - return nil, resp, err - } - - return event, resp, err -} - -// Rename contains details for 'renamed' events. -type Rename struct { - From *string `json:"from,omitempty"` - To *string `json:"to,omitempty"` -} - -func (r Rename) String() string { - return Stringify(r) -} diff --git a/vendor/github.com/google/go-github/github/issues_labels.go b/vendor/github.com/google/go-github/github/issues_labels.go deleted file mode 100644 index 88f9f3f..0000000 --- a/vendor/github.com/google/go-github/github/issues_labels.go +++ /dev/null @@ -1,222 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import "fmt" - -// Label represents a GitHub label on an Issue -type Label struct { - URL *string `json:"url,omitempty"` - Name *string `json:"name,omitempty"` - Color *string `json:"color,omitempty"` -} - -func (l Label) String() string { - return fmt.Sprint(*l.Name) -} - -// ListLabels lists all labels for a repository. -// -// GitHub API docs: http://developer.github.com/v3/issues/labels/#list-all-labels-for-this-repository -func (s *IssuesService) ListLabels(owner string, repo string, opt *ListOptions) ([]Label, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/labels", owner, repo) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - labels := new([]Label) - resp, err := s.client.Do(req, labels) - if err != nil { - return nil, resp, err - } - - return *labels, resp, err -} - -// GetLabel gets a single label. -// -// GitHub API docs: http://developer.github.com/v3/issues/labels/#get-a-single-label -func (s *IssuesService) GetLabel(owner string, repo string, name string) (*Label, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/labels/%v", owner, repo, name) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - label := new(Label) - resp, err := s.client.Do(req, label) - if err != nil { - return nil, resp, err - } - - return label, resp, err -} - -// CreateLabel creates a new label on the specified repository. -// -// GitHub API docs: http://developer.github.com/v3/issues/labels/#create-a-label -func (s *IssuesService) CreateLabel(owner string, repo string, label *Label) (*Label, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/labels", owner, repo) - req, err := s.client.NewRequest("POST", u, label) - if err != nil { - return nil, nil, err - } - - l := new(Label) - resp, err := s.client.Do(req, l) - if err != nil { - return nil, resp, err - } - - return l, resp, err -} - -// EditLabel edits a label. -// -// GitHub API docs: http://developer.github.com/v3/issues/labels/#update-a-label -func (s *IssuesService) EditLabel(owner string, repo string, name string, label *Label) (*Label, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/labels/%v", owner, repo, name) - req, err := s.client.NewRequest("PATCH", u, label) - if err != nil { - return nil, nil, err - } - - l := new(Label) - resp, err := s.client.Do(req, l) - if err != nil { - return nil, resp, err - } - - return l, resp, err -} - -// DeleteLabel deletes a label. -// -// GitHub API docs: http://developer.github.com/v3/issues/labels/#delete-a-label -func (s *IssuesService) DeleteLabel(owner string, repo string, name string) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/labels/%v", owner, repo, name) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - return s.client.Do(req, nil) -} - -// ListLabelsByIssue lists all labels for an issue. -// -// GitHub API docs: http://developer.github.com/v3/issues/labels/#list-all-labels-for-this-repository -func (s *IssuesService) ListLabelsByIssue(owner string, repo string, number int, opt *ListOptions) ([]Label, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/issues/%d/labels", owner, repo, number) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - labels := new([]Label) - resp, err := s.client.Do(req, labels) - if err != nil { - return nil, resp, err - } - - return *labels, resp, err -} - -// AddLabelsToIssue adds labels to an issue. -// -// GitHub API docs: http://developer.github.com/v3/issues/labels/#list-all-labels-for-this-repository -func (s *IssuesService) AddLabelsToIssue(owner string, repo string, number int, labels []string) ([]Label, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/issues/%d/labels", owner, repo, number) - req, err := s.client.NewRequest("POST", u, labels) - if err != nil { - return nil, nil, err - } - - l := new([]Label) - resp, err := s.client.Do(req, l) - if err != nil { - return nil, resp, err - } - - return *l, resp, err -} - -// RemoveLabelForIssue removes a label for an issue. -// -// GitHub API docs: http://developer.github.com/v3/issues/labels/#remove-a-label-from-an-issue -func (s *IssuesService) RemoveLabelForIssue(owner string, repo string, number int, label string) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/issues/%d/labels/%v", owner, repo, number, label) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - return s.client.Do(req, nil) -} - -// ReplaceLabelsForIssue replaces all labels for an issue. -// -// GitHub API docs: http://developer.github.com/v3/issues/labels/#replace-all-labels-for-an-issue -func (s *IssuesService) ReplaceLabelsForIssue(owner string, repo string, number int, labels []string) ([]Label, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/issues/%d/labels", owner, repo, number) - req, err := s.client.NewRequest("PUT", u, labels) - if err != nil { - return nil, nil, err - } - - l := new([]Label) - resp, err := s.client.Do(req, l) - if err != nil { - return nil, resp, err - } - - return *l, resp, err -} - -// RemoveLabelsForIssue removes all labels for an issue. -// -// GitHub API docs: http://developer.github.com/v3/issues/labels/#remove-all-labels-from-an-issue -func (s *IssuesService) RemoveLabelsForIssue(owner string, repo string, number int) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/issues/%d/labels", owner, repo, number) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - return s.client.Do(req, nil) -} - -// ListLabelsForMilestone lists labels for every issue in a milestone. -// -// GitHub API docs: http://developer.github.com/v3/issues/labels/#get-labels-for-every-issue-in-a-milestone -func (s *IssuesService) ListLabelsForMilestone(owner string, repo string, number int, opt *ListOptions) ([]Label, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/milestones/%d/labels", owner, repo, number) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - labels := new([]Label) - resp, err := s.client.Do(req, labels) - if err != nil { - return nil, resp, err - } - - return *labels, resp, err -} diff --git a/vendor/github.com/google/go-github/github/issues_milestones.go b/vendor/github.com/google/go-github/github/issues_milestones.go deleted file mode 100644 index cbd7920..0000000 --- a/vendor/github.com/google/go-github/github/issues_milestones.go +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright 2014 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "fmt" - "time" -) - -// Milestone represents a Github repository milestone. -type Milestone struct { - URL *string `json:"url,omitempty"` - Number *int `json:"number,omitempty"` - State *string `json:"state,omitempty"` - Title *string `json:"title,omitempty"` - Description *string `json:"description,omitempty"` - Creator *User `json:"creator,omitempty"` - OpenIssues *int `json:"open_issues,omitempty"` - ClosedIssues *int `json:"closed_issues,omitempty"` - CreatedAt *time.Time `json:"created_at,omitempty"` - UpdatedAt *time.Time `json:"updated_at,omitempty"` - DueOn *time.Time `json:"due_on,omitempty"` -} - -func (m Milestone) String() string { - return Stringify(m) -} - -// MilestoneListOptions specifies the optional parameters to the -// IssuesService.ListMilestones method. -type MilestoneListOptions struct { - // State filters milestones based on their state. Possible values are: - // open, closed. Default is "open". - State string `url:"state,omitempty"` - - // Sort specifies how to sort milestones. Possible values are: due_date, completeness. - // Default value is "due_date". - Sort string `url:"sort,omitempty"` - - // Direction in which to sort milestones. Possible values are: asc, desc. - // Default is "asc". - Direction string `url:"direction,omitempty"` -} - -// ListMilestones lists all milestones for a repository. -// -// GitHub API docs: https://developer.github.com/v3/issues/milestones/#list-milestones-for-a-repository -func (s *IssuesService) ListMilestones(owner string, repo string, opt *MilestoneListOptions) ([]Milestone, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/milestones", owner, repo) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - milestones := new([]Milestone) - resp, err := s.client.Do(req, milestones) - if err != nil { - return nil, resp, err - } - - return *milestones, resp, err -} - -// GetMilestone gets a single milestone. -// -// GitHub API docs: https://developer.github.com/v3/issues/milestones/#get-a-single-milestone -func (s *IssuesService) GetMilestone(owner string, repo string, number int) (*Milestone, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/milestones/%d", owner, repo, number) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - milestone := new(Milestone) - resp, err := s.client.Do(req, milestone) - if err != nil { - return nil, resp, err - } - - return milestone, resp, err -} - -// CreateMilestone creates a new milestone on the specified repository. -// -// GitHub API docs: https://developer.github.com/v3/issues/milestones/#create-a-milestone -func (s *IssuesService) CreateMilestone(owner string, repo string, milestone *Milestone) (*Milestone, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/milestones", owner, repo) - req, err := s.client.NewRequest("POST", u, milestone) - if err != nil { - return nil, nil, err - } - - m := new(Milestone) - resp, err := s.client.Do(req, m) - if err != nil { - return nil, resp, err - } - - return m, resp, err -} - -// EditMilestone edits a milestone. -// -// GitHub API docs: https://developer.github.com/v3/issues/milestones/#update-a-milestone -func (s *IssuesService) EditMilestone(owner string, repo string, number int, milestone *Milestone) (*Milestone, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/milestones/%d", owner, repo, number) - req, err := s.client.NewRequest("PATCH", u, milestone) - if err != nil { - return nil, nil, err - } - - m := new(Milestone) - resp, err := s.client.Do(req, m) - if err != nil { - return nil, resp, err - } - - return m, resp, err -} - -// DeleteMilestone deletes a milestone. -// -// GitHub API docs: https://developer.github.com/v3/issues/milestones/#delete-a-milestone -func (s *IssuesService) DeleteMilestone(owner string, repo string, number int) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/milestones/%d", owner, repo, number) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/google/go-github/github/licenses.go b/vendor/github.com/google/go-github/github/licenses.go deleted file mode 100644 index fb2fb5a..0000000 --- a/vendor/github.com/google/go-github/github/licenses.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import "fmt" - -// LicensesService handles communication with the license related -// methods of the GitHub API. -// -// GitHub API docs: http://developer.github.com/v3/pulls/ -type LicensesService struct { - client *Client -} - -// License represents an open source license. -type License struct { - Key *string `json:"key,omitempty"` - Name *string `json:"name,omitempty"` - URL *string `json:"url,omitempty"` - - HTMLURL *string `json:"html_url,omitempty"` - Featured *bool `json:"featured,omitempty"` - Description *string `json:"description,omitempty"` - Category *string `json:"category,omitempty"` - Implementation *string `json:"implementation,omitempty"` - Required *[]string `json:"required,omitempty"` - Permitted *[]string `json:"permitted,omitempty"` - Forbidden *[]string `json:"forbidden,omitempty"` - Body *string `json:"body,omitempty"` -} - -func (l License) String() string { - return Stringify(l) -} - -// List popular open source licenses. -// -// GitHub API docs: https://developer.github.com/v3/licenses/#list-all-licenses -func (s *LicensesService) List() ([]License, *Response, error) { - req, err := s.client.NewRequest("GET", "licenses", nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches - req.Header.Set("Accept", mediaTypeLicensesPreview) - - licenses := new([]License) - resp, err := s.client.Do(req, licenses) - if err != nil { - return nil, resp, err - } - - return *licenses, resp, err -} - -// Get extended metadata for one license. -// -// GitHub API docs: https://developer.github.com/v3/licenses/#get-an-individual-license -func (s *LicensesService) Get(licenseName string) (*License, *Response, error) { - u := fmt.Sprintf("licenses/%s", licenseName) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when this API fully launches - req.Header.Set("Accept", mediaTypeLicensesPreview) - - license := new(License) - resp, err := s.client.Do(req, license) - if err != nil { - return nil, resp, err - } - - return license, resp, err -} diff --git a/vendor/github.com/google/go-github/github/misc.go b/vendor/github.com/google/go-github/github/misc.go deleted file mode 100644 index 66e7f52..0000000 --- a/vendor/github.com/google/go-github/github/misc.go +++ /dev/null @@ -1,197 +0,0 @@ -// Copyright 2014 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "bytes" - "fmt" - "net/url" -) - -// MarkdownOptions specifies optional parameters to the Markdown method. -type MarkdownOptions struct { - // Mode identifies the rendering mode. Possible values are: - // markdown - render a document as plain Markdown, just like - // README files are rendered. - // - // gfm - to render a document as user-content, e.g. like user - // comments or issues are rendered. In GFM mode, hard line breaks are - // always taken into account, and issue and user mentions are linked - // accordingly. - // - // Default is "markdown". - Mode string - - // Context identifies the repository context. Only taken into account - // when rendering as "gfm". - Context string -} - -type markdownRequest struct { - Text *string `json:"text,omitempty"` - Mode *string `json:"mode,omitempty"` - Context *string `json:"context,omitempty"` -} - -// Markdown renders an arbitrary Markdown document. -// -// GitHub API docs: https://developer.github.com/v3/markdown/ -func (c *Client) Markdown(text string, opt *MarkdownOptions) (string, *Response, error) { - request := &markdownRequest{Text: String(text)} - if opt != nil { - if opt.Mode != "" { - request.Mode = String(opt.Mode) - } - if opt.Context != "" { - request.Context = String(opt.Context) - } - } - - req, err := c.NewRequest("POST", "markdown", request) - if err != nil { - return "", nil, err - } - - buf := new(bytes.Buffer) - resp, err := c.Do(req, buf) - if err != nil { - return "", resp, err - } - - return buf.String(), resp, nil -} - -// ListEmojis returns the emojis available to use on GitHub. -// -// GitHub API docs: https://developer.github.com/v3/emojis/ -func (c *Client) ListEmojis() (map[string]string, *Response, error) { - req, err := c.NewRequest("GET", "emojis", nil) - if err != nil { - return nil, nil, err - } - - var emoji map[string]string - resp, err := c.Do(req, &emoji) - if err != nil { - return nil, resp, err - } - - return emoji, resp, nil -} - -// APIMeta represents metadata about the GitHub API. -type APIMeta struct { - // An Array of IP addresses in CIDR format specifying the addresses - // that incoming service hooks will originate from on GitHub.com. - Hooks []string `json:"hooks,omitempty"` - - // An Array of IP addresses in CIDR format specifying the Git servers - // for GitHub.com. - Git []string `json:"git,omitempty"` - - // Whether authentication with username and password is supported. - // (GitHub Enterprise instances using CAS or OAuth for authentication - // will return false. Features like Basic Authentication with a - // username and password, sudo mode, and two-factor authentication are - // not supported on these servers.) - VerifiablePasswordAuthentication *bool `json:"verifiable_password_authentication,omitempty"` - - // An array of IP addresses in CIDR format specifying the addresses - // which serve GitHub Pages websites. - Pages []string `json:"pages,omitempty"` -} - -// APIMeta returns information about GitHub.com, the service. Or, if you access -// this endpoint on your organization’s GitHub Enterprise installation, this -// endpoint provides information about that installation. -// -// GitHub API docs: https://developer.github.com/v3/meta/ -func (c *Client) APIMeta() (*APIMeta, *Response, error) { - req, err := c.NewRequest("GET", "meta", nil) - if err != nil { - return nil, nil, err - } - - meta := new(APIMeta) - resp, err := c.Do(req, meta) - if err != nil { - return nil, resp, err - } - - return meta, resp, nil -} - -// Octocat returns an ASCII art octocat with the specified message in a speech -// bubble. If message is empty, a random zen phrase is used. -func (c *Client) Octocat(message string) (string, *Response, error) { - u := "octocat" - if message != "" { - u = fmt.Sprintf("%s?s=%s", u, url.QueryEscape(message)) - } - - req, err := c.NewRequest("GET", u, nil) - if err != nil { - return "", nil, err - } - - buf := new(bytes.Buffer) - resp, err := c.Do(req, buf) - if err != nil { - return "", resp, err - } - - return buf.String(), resp, nil -} - -// Zen returns a random line from The Zen of GitHub. -// -// see also: http://warpspire.com/posts/taste/ -func (c *Client) Zen() (string, *Response, error) { - req, err := c.NewRequest("GET", "zen", nil) - if err != nil { - return "", nil, err - } - - buf := new(bytes.Buffer) - resp, err := c.Do(req, buf) - if err != nil { - return "", resp, err - } - - return buf.String(), resp, nil -} - -// ServiceHook represents a hook that has configuration settings, a list of -// available events, and default events. -type ServiceHook struct { - Name *string `json:"name,omitempty"` - Events []string `json:"events,omitempty"` - SupportedEvents []string `json:"supported_events,omitempty"` - Schema [][]string `json:"schema,omitempty"` -} - -func (s *ServiceHook) String() string { - return Stringify(s) -} - -// ListServiceHooks lists all of the available service hooks. -// -// GitHub API docs: https://developer.github.com/webhooks/#services -func (c *Client) ListServiceHooks() ([]ServiceHook, *Response, error) { - u := "hooks" - req, err := c.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - hooks := new([]ServiceHook) - resp, err := c.Do(req, hooks) - if err != nil { - return nil, resp, err - } - - return *hooks, resp, err -} diff --git a/vendor/github.com/google/go-github/github/orgs.go b/vendor/github.com/google/go-github/github/orgs.go deleted file mode 100644 index 7596873..0000000 --- a/vendor/github.com/google/go-github/github/orgs.go +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "fmt" - "time" -) - -// OrganizationsService provides access to the organization related functions -// in the GitHub API. -// -// GitHub API docs: http://developer.github.com/v3/orgs/ -type OrganizationsService struct { - client *Client -} - -// Organization represents a GitHub organization account. -type Organization struct { - Login *string `json:"login,omitempty"` - ID *int `json:"id,omitempty"` - AvatarURL *string `json:"avatar_url,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - Name *string `json:"name,omitempty"` - Company *string `json:"company,omitempty"` - Blog *string `json:"blog,omitempty"` - Location *string `json:"location,omitempty"` - Email *string `json:"email,omitempty"` - PublicRepos *int `json:"public_repos,omitempty"` - PublicGists *int `json:"public_gists,omitempty"` - Followers *int `json:"followers,omitempty"` - Following *int `json:"following,omitempty"` - CreatedAt *time.Time `json:"created_at,omitempty"` - UpdatedAt *time.Time `json:"updated_at,omitempty"` - TotalPrivateRepos *int `json:"total_private_repos,omitempty"` - OwnedPrivateRepos *int `json:"owned_private_repos,omitempty"` - PrivateGists *int `json:"private_gists,omitempty"` - DiskUsage *int `json:"disk_usage,omitempty"` - Collaborators *int `json:"collaborators,omitempty"` - BillingEmail *string `json:"billing_email,omitempty"` - Type *string `json:"type,omitempty"` - Plan *Plan `json:"plan,omitempty"` - - // API URLs - URL *string `json:"url,omitempty"` - EventsURL *string `json:"events_url,omitempty"` - MembersURL *string `json:"members_url,omitempty"` - PublicMembersURL *string `json:"public_members_url,omitempty"` - ReposURL *string `json:"repos_url,omitempty"` -} - -func (o Organization) String() string { - return Stringify(o) -} - -// Plan represents the payment plan for an account. See plans at https://github.com/plans. -type Plan struct { - Name *string `json:"name,omitempty"` - Space *int `json:"space,omitempty"` - Collaborators *int `json:"collaborators,omitempty"` - PrivateRepos *int `json:"private_repos,omitempty"` -} - -func (p Plan) String() string { - return Stringify(p) -} - -// List the organizations for a user. Passing the empty string will list -// organizations for the authenticated user. -// -// GitHub API docs: http://developer.github.com/v3/orgs/#list-user-organizations -func (s *OrganizationsService) List(user string, opt *ListOptions) ([]Organization, *Response, error) { - var u string - if user != "" { - u = fmt.Sprintf("users/%v/orgs", user) - } else { - u = "user/orgs" - } - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - orgs := new([]Organization) - resp, err := s.client.Do(req, orgs) - if err != nil { - return nil, resp, err - } - - return *orgs, resp, err -} - -// Get fetches an organization by name. -// -// GitHub API docs: http://developer.github.com/v3/orgs/#get-an-organization -func (s *OrganizationsService) Get(org string) (*Organization, *Response, error) { - u := fmt.Sprintf("orgs/%v", org) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - organization := new(Organization) - resp, err := s.client.Do(req, organization) - if err != nil { - return nil, resp, err - } - - return organization, resp, err -} - -// Edit an organization. -// -// GitHub API docs: http://developer.github.com/v3/orgs/#edit-an-organization -func (s *OrganizationsService) Edit(name string, org *Organization) (*Organization, *Response, error) { - u := fmt.Sprintf("orgs/%v", name) - req, err := s.client.NewRequest("PATCH", u, org) - if err != nil { - return nil, nil, err - } - - o := new(Organization) - resp, err := s.client.Do(req, o) - if err != nil { - return nil, resp, err - } - - return o, resp, err -} diff --git a/vendor/github.com/google/go-github/github/orgs_hooks.go b/vendor/github.com/google/go-github/github/orgs_hooks.go deleted file mode 100644 index 3e7ad40..0000000 --- a/vendor/github.com/google/go-github/github/orgs_hooks.go +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright 2015 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import "fmt" - -// ListHooks lists all Hooks for the specified organization. -// -// GitHub API docs: https://developer.github.com/v3/orgs/hooks/#list-hooks -func (s *OrganizationsService) ListHooks(org string, opt *ListOptions) ([]Hook, *Response, error) { - u := fmt.Sprintf("orgs/%v/hooks", org) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - hooks := new([]Hook) - resp, err := s.client.Do(req, hooks) - if err != nil { - return nil, resp, err - } - - return *hooks, resp, err -} - -// GetHook returns a single specified Hook. -// -// GitHub API docs: https://developer.github.com/v3/orgs/hooks/#get-single-hook -func (s *OrganizationsService) GetHook(org string, id int) (*Hook, *Response, error) { - u := fmt.Sprintf("orgs/%v/hooks/%d", org, id) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - hook := new(Hook) - resp, err := s.client.Do(req, hook) - return hook, resp, err -} - -// CreateHook creates a Hook for the specified org. -// Name and Config are required fields. -// -// GitHub API docs: https://developer.github.com/v3/orgs/hooks/#create-a-hook -func (s *OrganizationsService) CreateHook(org string, hook *Hook) (*Hook, *Response, error) { - u := fmt.Sprintf("orgs/%v/hooks", org) - req, err := s.client.NewRequest("POST", u, hook) - if err != nil { - return nil, nil, err - } - - h := new(Hook) - resp, err := s.client.Do(req, h) - if err != nil { - return nil, resp, err - } - - return h, resp, err -} - -// EditHook updates a specified Hook. -// -// GitHub API docs: https://developer.github.com/v3/orgs/hooks/#edit-a-hook -func (s *OrganizationsService) EditHook(org string, id int, hook *Hook) (*Hook, *Response, error) { - u := fmt.Sprintf("orgs/%v/hooks/%d", org, id) - req, err := s.client.NewRequest("PATCH", u, hook) - if err != nil { - return nil, nil, err - } - h := new(Hook) - resp, err := s.client.Do(req, h) - return h, resp, err -} - -// PingHook triggers a 'ping' event to be sent to the Hook. -// -// GitHub API docs: https://developer.github.com/v3/orgs/hooks/#ping-a-hook -func (s *OrganizationsService) PingHook(org string, id int) (*Response, error) { - u := fmt.Sprintf("orgs/%v/hooks/%d/pings", org, id) - req, err := s.client.NewRequest("POST", u, nil) - if err != nil { - return nil, err - } - return s.client.Do(req, nil) -} - -// DeleteHook deletes a specified Hook. -// -// GitHub API docs: https://developer.github.com/v3/orgs/hooks/#delete-a-hook -func (s *OrganizationsService) DeleteHook(org string, id int) (*Response, error) { - u := fmt.Sprintf("orgs/%v/hooks/%d", org, id) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/google/go-github/github/orgs_members.go b/vendor/github.com/google/go-github/github/orgs_members.go deleted file mode 100644 index c326ff8..0000000 --- a/vendor/github.com/google/go-github/github/orgs_members.go +++ /dev/null @@ -1,276 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import "fmt" - -// Membership represents the status of a user's membership in an organization or team. -type Membership struct { - URL *string `json:"url,omitempty"` - - // State is the user's status within the organization or team. - // Possible values are: "active", "pending" - State *string `json:"state,omitempty"` - - // Role identifies the user's role within the organization or team. - // Possible values for organization membership: - // member - non-owner organization member - // admin - organization owner - // - // Possible values for team membership are: - // member - a normal member of the team - // maintainer - a team maintainer. Able to add/remove other team - // members, promote other team members to team - // maintainer, and edit the team’s name and description - Role *string `json:"role,omitempty"` - - // For organization membership, the API URL of the organization. - OrganizationURL *string `json:"organization_url,omitempty"` - - // For organization membership, the organization the membership is for. - Organization *Organization `json:"organization,omitempty"` - - // For organization membership, the user the membership is for. - User *User `json:"user,omitempty"` -} - -func (m Membership) String() string { - return Stringify(m) -} - -// ListMembersOptions specifies optional parameters to the -// OrganizationsService.ListMembers method. -type ListMembersOptions struct { - // If true (or if the authenticated user is not an owner of the - // organization), list only publicly visible members. - PublicOnly bool `url:"-"` - - // Filter members returned in the list. Possible values are: - // 2fa_disabled, all. Default is "all". - Filter string `url:"filter,omitempty"` - - // Role filters memebers returned by their role in the organization. - // Possible values are: - // all - all members of the organization, regardless of role - // admin - organization owners - // member - non-organization members - // - // Default is "all". - Role string `url:"role,omitempty"` - - ListOptions -} - -// ListMembers lists the members for an organization. If the authenticated -// user is an owner of the organization, this will return both concealed and -// public members, otherwise it will only return public members. -// -// GitHub API docs: http://developer.github.com/v3/orgs/members/#members-list -func (s *OrganizationsService) ListMembers(org string, opt *ListMembersOptions) ([]User, *Response, error) { - var u string - if opt != nil && opt.PublicOnly { - u = fmt.Sprintf("orgs/%v/public_members", org) - } else { - u = fmt.Sprintf("orgs/%v/members", org) - } - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - if opt != nil && opt.Role != "" { - req.Header.Set("Accept", mediaTypeOrgPermissionPreview) - } - - members := new([]User) - resp, err := s.client.Do(req, members) - if err != nil { - return nil, resp, err - } - - return *members, resp, err -} - -// IsMember checks if a user is a member of an organization. -// -// GitHub API docs: http://developer.github.com/v3/orgs/members/#check-membership -func (s *OrganizationsService) IsMember(org, user string) (bool, *Response, error) { - u := fmt.Sprintf("orgs/%v/members/%v", org, user) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return false, nil, err - } - - resp, err := s.client.Do(req, nil) - member, err := parseBoolResponse(err) - return member, resp, err -} - -// IsPublicMember checks if a user is a public member of an organization. -// -// GitHub API docs: http://developer.github.com/v3/orgs/members/#check-public-membership -func (s *OrganizationsService) IsPublicMember(org, user string) (bool, *Response, error) { - u := fmt.Sprintf("orgs/%v/public_members/%v", org, user) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return false, nil, err - } - - resp, err := s.client.Do(req, nil) - member, err := parseBoolResponse(err) - return member, resp, err -} - -// RemoveMember removes a user from all teams of an organization. -// -// GitHub API docs: http://developer.github.com/v3/orgs/members/#remove-a-member -func (s *OrganizationsService) RemoveMember(org, user string) (*Response, error) { - u := fmt.Sprintf("orgs/%v/members/%v", org, user) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// PublicizeMembership publicizes a user's membership in an organization. (A -// user cannot publicize the membership for another user.) -// -// GitHub API docs: http://developer.github.com/v3/orgs/members/#publicize-a-users-membership -func (s *OrganizationsService) PublicizeMembership(org, user string) (*Response, error) { - u := fmt.Sprintf("orgs/%v/public_members/%v", org, user) - req, err := s.client.NewRequest("PUT", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// ConcealMembership conceals a user's membership in an organization. -// -// GitHub API docs: http://developer.github.com/v3/orgs/members/#conceal-a-users-membership -func (s *OrganizationsService) ConcealMembership(org, user string) (*Response, error) { - u := fmt.Sprintf("orgs/%v/public_members/%v", org, user) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// ListOrgMembershipsOptions specifies optional parameters to the -// OrganizationsService.ListOrgMemberships method. -type ListOrgMembershipsOptions struct { - // Filter memberships to include only those with the specified state. - // Possible values are: "active", "pending". - State string `url:"state,omitempty"` - - ListOptions -} - -// ListOrgMemberships lists the organization memberships for the authenticated user. -// -// GitHub API docs: https://developer.github.com/v3/orgs/members/#list-your-organization-memberships -func (s *OrganizationsService) ListOrgMemberships(opt *ListOrgMembershipsOptions) ([]Membership, *Response, error) { - u := "user/memberships/orgs" - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var memberships []Membership - resp, err := s.client.Do(req, &memberships) - if err != nil { - return nil, resp, err - } - - return memberships, resp, err -} - -// GetOrgMembership gets the membership for a user in a specified organization. -// Passing an empty string for user will get the membership for the -// authenticated user. -// -// GitHub API docs: https://developer.github.com/v3/orgs/members/#get-organization-membership -// GitHub API docs: https://developer.github.com/v3/orgs/members/#get-your-organization-membership -func (s *OrganizationsService) GetOrgMembership(user, org string) (*Membership, *Response, error) { - var u string - if user != "" { - u = fmt.Sprintf("orgs/%v/memberships/%v", org, user) - } else { - u = fmt.Sprintf("user/memberships/orgs/%v", org) - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - membership := new(Membership) - resp, err := s.client.Do(req, membership) - if err != nil { - return nil, resp, err - } - - return membership, resp, err -} - -// EditOrgMembership edits the membership for user in specified organization. -// Passing an empty string for user will edit the membership for the -// authenticated user. -// -// GitHub API docs: https://developer.github.com/v3/orgs/members/#add-or-update-organization-membership -// GitHub API docs: https://developer.github.com/v3/orgs/members/#edit-your-organization-membership -func (s *OrganizationsService) EditOrgMembership(user, org string, membership *Membership) (*Membership, *Response, error) { - var u, method string - if user != "" { - u = fmt.Sprintf("orgs/%v/memberships/%v", org, user) - method = "PUT" - } else { - u = fmt.Sprintf("user/memberships/orgs/%v", org) - method = "PATCH" - } - - req, err := s.client.NewRequest(method, u, membership) - if err != nil { - return nil, nil, err - } - - m := new(Membership) - resp, err := s.client.Do(req, m) - if err != nil { - return nil, resp, err - } - - return m, resp, err -} - -// RemoveOrgMembership removes user from the specified organization. If the -// user has been invited to the organization, this will cancel their invitation. -// -// GitHub API docs: https://developer.github.com/v3/orgs/members/#remove-organization-membership -func (s *OrganizationsService) RemoveOrgMembership(user, org string) (*Response, error) { - u := fmt.Sprintf("orgs/%v/memberships/%v", org, user) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/google/go-github/github/orgs_teams.go b/vendor/github.com/google/go-github/github/orgs_teams.go deleted file mode 100644 index 858c545..0000000 --- a/vendor/github.com/google/go-github/github/orgs_teams.go +++ /dev/null @@ -1,396 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import "fmt" - -// Team represents a team within a GitHub organization. Teams are used to -// manage access to an organization's repositories. -type Team struct { - ID *int `json:"id,omitempty"` - Name *string `json:"name,omitempty"` - URL *string `json:"url,omitempty"` - Slug *string `json:"slug,omitempty"` - - // Permission is deprecated when creating or editing a team in an org - // using the new GitHub permission model. It no longer identifies the - // permission a team has on its repos, but only specifies the default - // permission a repo is initially added with. Avoid confusion by - // specifying a permission value when calling AddTeamRepo. - Permission *string `json:"permission,omitempty"` - - // Privacy identifies the level of privacy this team should have. - // Possible values are: - // secret - only visible to organization owners and members of this team - // closed - visible to all members of this organization - // Default is "secret". - Privacy *string `json:"privacy,omitempty"` - - MembersCount *int `json:"members_count,omitempty"` - ReposCount *int `json:"repos_count,omitempty"` - Organization *Organization `json:"organization,omitempty"` -} - -func (t Team) String() string { - return Stringify(t) -} - -// ListTeams lists all of the teams for an organization. -// -// GitHub API docs: http://developer.github.com/v3/orgs/teams/#list-teams -func (s *OrganizationsService) ListTeams(org string, opt *ListOptions) ([]Team, *Response, error) { - u := fmt.Sprintf("orgs/%v/teams", org) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - teams := new([]Team) - resp, err := s.client.Do(req, teams) - if err != nil { - return nil, resp, err - } - - return *teams, resp, err -} - -// GetTeam fetches a team by ID. -// -// GitHub API docs: http://developer.github.com/v3/orgs/teams/#get-team -func (s *OrganizationsService) GetTeam(team int) (*Team, *Response, error) { - u := fmt.Sprintf("teams/%v", team) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - t := new(Team) - resp, err := s.client.Do(req, t) - if err != nil { - return nil, resp, err - } - - return t, resp, err -} - -// CreateTeam creates a new team within an organization. -// -// GitHub API docs: http://developer.github.com/v3/orgs/teams/#create-team -func (s *OrganizationsService) CreateTeam(org string, team *Team) (*Team, *Response, error) { - u := fmt.Sprintf("orgs/%v/teams", org) - req, err := s.client.NewRequest("POST", u, team) - if err != nil { - return nil, nil, err - } - - if team.Privacy != nil { - req.Header.Set("Accept", mediaTypeOrgPermissionPreview) - } - - t := new(Team) - resp, err := s.client.Do(req, t) - if err != nil { - return nil, resp, err - } - - return t, resp, err -} - -// EditTeam edits a team. -// -// GitHub API docs: http://developer.github.com/v3/orgs/teams/#edit-team -func (s *OrganizationsService) EditTeam(id int, team *Team) (*Team, *Response, error) { - u := fmt.Sprintf("teams/%v", id) - req, err := s.client.NewRequest("PATCH", u, team) - if err != nil { - return nil, nil, err - } - - if team.Privacy != nil { - req.Header.Set("Accept", mediaTypeOrgPermissionPreview) - } - - t := new(Team) - resp, err := s.client.Do(req, t) - if err != nil { - return nil, resp, err - } - - return t, resp, err -} - -// DeleteTeam deletes a team. -// -// GitHub API docs: http://developer.github.com/v3/orgs/teams/#delete-team -func (s *OrganizationsService) DeleteTeam(team int) (*Response, error) { - u := fmt.Sprintf("teams/%v", team) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// OrganizationListTeamMembersOptions specifies the optional parameters to the -// OrganizationsService.ListTeamMembers method. -type OrganizationListTeamMembersOptions struct { - // Role filters members returned by their role in the team. Possible - // values are "all", "member", "maintainer". Default is "all". - Role string `url:"role,omitempty"` - - ListOptions -} - -// ListTeamMembers lists all of the users who are members of the specified -// team. -// -// GitHub API docs: http://developer.github.com/v3/orgs/teams/#list-team-members -func (s *OrganizationsService) ListTeamMembers(team int, opt *OrganizationListTeamMembersOptions) ([]User, *Response, error) { - u := fmt.Sprintf("teams/%v/members", team) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - if opt != nil && opt.Role != "" { - req.Header.Set("Accept", mediaTypeOrgPermissionPreview) - } - - members := new([]User) - resp, err := s.client.Do(req, members) - if err != nil { - return nil, resp, err - } - - return *members, resp, err -} - -// IsTeamMember checks if a user is a member of the specified team. -// -// GitHub API docs: http://developer.github.com/v3/orgs/teams/#get-team-member -func (s *OrganizationsService) IsTeamMember(team int, user string) (bool, *Response, error) { - u := fmt.Sprintf("teams/%v/members/%v", team, user) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return false, nil, err - } - - resp, err := s.client.Do(req, nil) - member, err := parseBoolResponse(err) - return member, resp, err -} - -// ListTeamRepos lists the repositories that the specified team has access to. -// -// GitHub API docs: http://developer.github.com/v3/orgs/teams/#list-team-repos -func (s *OrganizationsService) ListTeamRepos(team int, opt *ListOptions) ([]Repository, *Response, error) { - u := fmt.Sprintf("teams/%v/repos", team) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - repos := new([]Repository) - resp, err := s.client.Do(req, repos) - if err != nil { - return nil, resp, err - } - - return *repos, resp, err -} - -// IsTeamRepo checks if a team manages the specified repository. If the -// repository is managed by team, a Repository is returned which includes the -// permissions team has for that repo. -// -// GitHub API docs: http://developer.github.com/v3/orgs/teams/#get-team-repo -func (s *OrganizationsService) IsTeamRepo(team int, owner string, repo string) (*Repository, *Response, error) { - u := fmt.Sprintf("teams/%v/repos/%v/%v", team, owner, repo) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - req.Header.Set("Accept", mediaTypeOrgPermissionRepoPreview) - - repository := new(Repository) - resp, err := s.client.Do(req, repository) - if err != nil { - return nil, resp, err - } - - return repository, resp, err -} - -// OrganizationAddTeamRepoOptions specifies the optional parameters to the -// OrganizationsService.AddTeamRepo method. -type OrganizationAddTeamRepoOptions struct { - // Permission specifies the permission to grant the team on this repository. - // Possible values are: - // pull - team members can pull, but not push to or administer this repository - // push - team members can pull and push, but not administer this repository - // admin - team members can pull, push and administer this repository - // - // If not specified, the team's permission attribute will be used. - Permission string `json:"permission,omitempty"` -} - -// AddTeamRepo adds a repository to be managed by the specified team. The -// specified repository must be owned by the organization to which the team -// belongs, or a direct fork of a repository owned by the organization. -// -// GitHub API docs: http://developer.github.com/v3/orgs/teams/#add-team-repo -func (s *OrganizationsService) AddTeamRepo(team int, owner string, repo string, opt *OrganizationAddTeamRepoOptions) (*Response, error) { - u := fmt.Sprintf("teams/%v/repos/%v/%v", team, owner, repo) - req, err := s.client.NewRequest("PUT", u, opt) - if err != nil { - return nil, err - } - - if opt != nil { - req.Header.Set("Accept", mediaTypeOrgPermissionPreview) - } - - return s.client.Do(req, nil) -} - -// RemoveTeamRepo removes a repository from being managed by the specified -// team. Note that this does not delete the repository, it just removes it -// from the team. -// -// GitHub API docs: http://developer.github.com/v3/orgs/teams/#remove-team-repo -func (s *OrganizationsService) RemoveTeamRepo(team int, owner string, repo string) (*Response, error) { - u := fmt.Sprintf("teams/%v/repos/%v/%v", team, owner, repo) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// ListUserTeams lists a user's teams -// GitHub API docs: https://developer.github.com/v3/orgs/teams/#list-user-teams -func (s *OrganizationsService) ListUserTeams(opt *ListOptions) ([]Team, *Response, error) { - u := "user/teams" - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - teams := new([]Team) - resp, err := s.client.Do(req, teams) - if err != nil { - return nil, resp, err - } - - return *teams, resp, err -} - -// GetTeamMembership returns the membership status for a user in a team. -// -// GitHub API docs: https://developer.github.com/v3/orgs/teams/#get-team-membership -func (s *OrganizationsService) GetTeamMembership(team int, user string) (*Membership, *Response, error) { - u := fmt.Sprintf("teams/%v/memberships/%v", team, user) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - t := new(Membership) - resp, err := s.client.Do(req, t) - if err != nil { - return nil, resp, err - } - - return t, resp, err -} - -// OrganizationAddTeamMembershipOptions does stuff specifies the optional -// parameters to the OrganizationsService.AddTeamMembership method. -type OrganizationAddTeamMembershipOptions struct { - // Role specifies the role the user should have in the team. Possible - // values are: - // member - a normal member of the team - // maintainer - a team maintainer. Able to add/remove other team - // members, promote other team members to team - // maintainer, and edit the team’s name and description - // - // Default value is "member". - Role string `json:"role,omitempty"` -} - -// AddTeamMembership adds or invites a user to a team. -// -// In order to add a membership between a user and a team, the authenticated -// user must have 'admin' permissions to the team or be an owner of the -// organization that the team is associated with. -// -// If the user is already a part of the team's organization (meaning they're on -// at least one other team in the organization), this endpoint will add the -// user to the team. -// -// If the user is completely unaffiliated with the team's organization (meaning -// they're on none of the organization's teams), this endpoint will send an -// invitation to the user via email. This newly-created membership will be in -// the "pending" state until the user accepts the invitation, at which point -// the membership will transition to the "active" state and the user will be -// added as a member of the team. -// -// GitHub API docs: https://developer.github.com/v3/orgs/teams/#add-team-membership -func (s *OrganizationsService) AddTeamMembership(team int, user string, opt *OrganizationAddTeamMembershipOptions) (*Membership, *Response, error) { - u := fmt.Sprintf("teams/%v/memberships/%v", team, user) - req, err := s.client.NewRequest("PUT", u, opt) - if err != nil { - return nil, nil, err - } - - if opt != nil { - req.Header.Set("Accept", mediaTypeOrgPermissionPreview) - } - - t := new(Membership) - resp, err := s.client.Do(req, t) - if err != nil { - return nil, resp, err - } - - return t, resp, err -} - -// RemoveTeamMembership removes a user from a team. -// -// GitHub API docs: https://developer.github.com/v3/orgs/teams/#remove-team-membership -func (s *OrganizationsService) RemoveTeamMembership(team int, user string) (*Response, error) { - u := fmt.Sprintf("teams/%v/memberships/%v", team, user) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/google/go-github/github/pulls.go b/vendor/github.com/google/go-github/github/pulls.go deleted file mode 100644 index 71cf2e2..0000000 --- a/vendor/github.com/google/go-github/github/pulls.go +++ /dev/null @@ -1,275 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "fmt" - "time" -) - -// PullRequestsService handles communication with the pull request related -// methods of the GitHub API. -// -// GitHub API docs: http://developer.github.com/v3/pulls/ -type PullRequestsService struct { - client *Client -} - -// PullRequest represents a GitHub pull request on a repository. -type PullRequest struct { - Number *int `json:"number,omitempty"` - State *string `json:"state,omitempty"` - Title *string `json:"title,omitempty"` - Body *string `json:"body,omitempty"` - CreatedAt *time.Time `json:"created_at,omitempty"` - UpdatedAt *time.Time `json:"updated_at,omitempty"` - ClosedAt *time.Time `json:"closed_at,omitempty"` - MergedAt *time.Time `json:"merged_at,omitempty"` - User *User `json:"user,omitempty"` - Merged *bool `json:"merged,omitempty"` - Mergeable *bool `json:"mergeable,omitempty"` - MergedBy *User `json:"merged_by,omitempty"` - Comments *int `json:"comments,omitempty"` - Commits *int `json:"commits,omitempty"` - Additions *int `json:"additions,omitempty"` - Deletions *int `json:"deletions,omitempty"` - ChangedFiles *int `json:"changed_files,omitempty"` - URL *string `json:"url,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - IssueURL *string `json:"issue_url,omitempty"` - StatusesURL *string `json:"statuses_url,omitempty"` - DiffURL *string `json:"diff_url,omitempty"` - PatchURL *string `json:"patch_url,omitempty"` - - Head *PullRequestBranch `json:"head,omitempty"` - Base *PullRequestBranch `json:"base,omitempty"` -} - -func (p PullRequest) String() string { - return Stringify(p) -} - -// PullRequestBranch represents a base or head branch in a GitHub pull request. -type PullRequestBranch struct { - Label *string `json:"label,omitempty"` - Ref *string `json:"ref,omitempty"` - SHA *string `json:"sha,omitempty"` - Repo *Repository `json:"repo,omitempty"` - User *User `json:"user,omitempty"` -} - -// PullRequestListOptions specifies the optional parameters to the -// PullRequestsService.List method. -type PullRequestListOptions struct { - // State filters pull requests based on their state. Possible values are: - // open, closed. Default is "open". - State string `url:"state,omitempty"` - - // Head filters pull requests by head user and branch name in the format of: - // "user:ref-name". - Head string `url:"head,omitempty"` - - // Base filters pull requests by base branch name. - Base string `url:"base,omitempty"` - - // Sort specifies how to sort pull requests. Possible values are: created, - // updated, popularity, long-running. Default is "created". - Sort string `url:"sort,omitempty"` - - // Direction in which to sort pull requests. Possible values are: asc, desc. - // If Sort is "created" or not specified, Default is "desc", otherwise Default - // is "asc" - Direction string `url:"direction,omitempty"` - - ListOptions -} - -// List the pull requests for the specified repository. -// -// GitHub API docs: http://developer.github.com/v3/pulls/#list-pull-requests -func (s *PullRequestsService) List(owner string, repo string, opt *PullRequestListOptions) ([]PullRequest, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/pulls", owner, repo) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - pulls := new([]PullRequest) - resp, err := s.client.Do(req, pulls) - if err != nil { - return nil, resp, err - } - - return *pulls, resp, err -} - -// Get a single pull request. -// -// GitHub API docs: https://developer.github.com/v3/pulls/#get-a-single-pull-request -func (s *PullRequestsService) Get(owner string, repo string, number int) (*PullRequest, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/pulls/%d", owner, repo, number) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - pull := new(PullRequest) - resp, err := s.client.Do(req, pull) - if err != nil { - return nil, resp, err - } - - return pull, resp, err -} - -// NewPullRequest represents a new pull request to be created. -type NewPullRequest struct { - Title *string `json:"title,omitempty"` - Head *string `json:"head,omitempty"` - Base *string `json:"base,omitempty"` - Body *string `json:"body,omitempty"` - Issue *int `json:"issue,omitempty"` -} - -// Create a new pull request on the specified repository. -// -// GitHub API docs: https://developer.github.com/v3/pulls/#create-a-pull-request -func (s *PullRequestsService) Create(owner string, repo string, pull *NewPullRequest) (*PullRequest, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/pulls", owner, repo) - req, err := s.client.NewRequest("POST", u, pull) - if err != nil { - return nil, nil, err - } - - p := new(PullRequest) - resp, err := s.client.Do(req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, err -} - -// Edit a pull request. -// -// GitHub API docs: https://developer.github.com/v3/pulls/#update-a-pull-request -func (s *PullRequestsService) Edit(owner string, repo string, number int, pull *PullRequest) (*PullRequest, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/pulls/%d", owner, repo, number) - req, err := s.client.NewRequest("PATCH", u, pull) - if err != nil { - return nil, nil, err - } - - p := new(PullRequest) - resp, err := s.client.Do(req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, err -} - -// ListCommits lists the commits in a pull request. -// -// GitHub API docs: https://developer.github.com/v3/pulls/#list-commits-on-a-pull-request -func (s *PullRequestsService) ListCommits(owner string, repo string, number int, opt *ListOptions) ([]RepositoryCommit, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/pulls/%d/commits", owner, repo, number) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - commits := new([]RepositoryCommit) - resp, err := s.client.Do(req, commits) - if err != nil { - return nil, resp, err - } - - return *commits, resp, err -} - -// ListFiles lists the files in a pull request. -// -// GitHub API docs: https://developer.github.com/v3/pulls/#list-pull-requests-files -func (s *PullRequestsService) ListFiles(owner string, repo string, number int, opt *ListOptions) ([]CommitFile, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/pulls/%d/files", owner, repo, number) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - commitFiles := new([]CommitFile) - resp, err := s.client.Do(req, commitFiles) - if err != nil { - return nil, resp, err - } - - return *commitFiles, resp, err -} - -// IsMerged checks if a pull request has been merged. -// -// GitHub API docs: https://developer.github.com/v3/pulls/#get-if-a-pull-request-has-been-merged -func (s *PullRequestsService) IsMerged(owner string, repo string, number int) (bool, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/pulls/%d/merge", owner, repo, number) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return false, nil, err - } - - resp, err := s.client.Do(req, nil) - merged, err := parseBoolResponse(err) - return merged, resp, err -} - -// PullRequestMergeResult represents the result of merging a pull request. -type PullRequestMergeResult struct { - SHA *string `json:"sha,omitempty"` - Merged *bool `json:"merged,omitempty"` - Message *string `json:"message,omitempty"` -} - -type pullRequestMergeRequest struct { - CommitMessage *string `json:"commit_message"` -} - -// Merge a pull request (Merge Button™). -// -// GitHub API docs: https://developer.github.com/v3/pulls/#merge-a-pull-request-merge-buttontrade -func (s *PullRequestsService) Merge(owner string, repo string, number int, commitMessage string) (*PullRequestMergeResult, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/pulls/%d/merge", owner, repo, number) - - req, err := s.client.NewRequest("PUT", u, &pullRequestMergeRequest{ - CommitMessage: &commitMessage, - }) - - if err != nil { - return nil, nil, err - } - - mergeResult := new(PullRequestMergeResult) - resp, err := s.client.Do(req, mergeResult) - if err != nil { - return nil, resp, err - } - - return mergeResult, resp, err -} diff --git a/vendor/github.com/google/go-github/github/pulls_comments.go b/vendor/github.com/google/go-github/github/pulls_comments.go deleted file mode 100644 index f165d5f..0000000 --- a/vendor/github.com/google/go-github/github/pulls_comments.go +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "fmt" - "time" -) - -// PullRequestComment represents a comment left on a pull request. -type PullRequestComment struct { - ID *int `json:"id,omitempty"` - Body *string `json:"body,omitempty"` - Path *string `json:"path,omitempty"` - DiffHunk *string `json:"diff_hunk,omitempty"` - Position *int `json:"position,omitempty"` - OriginalPosition *int `json:"original_position,omitempty"` - CommitID *string `json:"commit_id,omitempty"` - OriginalCommitID *string `json:"original_commit_id,omitempty"` - User *User `json:"user,omitempty"` - CreatedAt *time.Time `json:"created_at,omitempty"` - UpdatedAt *time.Time `json:"updated_at,omitempty"` -} - -func (p PullRequestComment) String() string { - return Stringify(p) -} - -// PullRequestListCommentsOptions specifies the optional parameters to the -// PullRequestsService.ListComments method. -type PullRequestListCommentsOptions struct { - // Sort specifies how to sort comments. Possible values are: created, updated. - Sort string `url:"sort,omitempty"` - - // Direction in which to sort comments. Possible values are: asc, desc. - Direction string `url:"direction,omitempty"` - - // Since filters comments by time. - Since time.Time `url:"since,omitempty"` - - ListOptions -} - -// ListComments lists all comments on the specified pull request. Specifying a -// pull request number of 0 will return all comments on all pull requests for -// the repository. -// -// GitHub API docs: https://developer.github.com/v3/pulls/comments/#list-comments-on-a-pull-request -func (s *PullRequestsService) ListComments(owner string, repo string, number int, opt *PullRequestListCommentsOptions) ([]PullRequestComment, *Response, error) { - var u string - if number == 0 { - u = fmt.Sprintf("repos/%v/%v/pulls/comments", owner, repo) - } else { - u = fmt.Sprintf("repos/%v/%v/pulls/%d/comments", owner, repo, number) - } - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - comments := new([]PullRequestComment) - resp, err := s.client.Do(req, comments) - if err != nil { - return nil, resp, err - } - - return *comments, resp, err -} - -// GetComment fetches the specified pull request comment. -// -// GitHub API docs: https://developer.github.com/v3/pulls/comments/#get-a-single-comment -func (s *PullRequestsService) GetComment(owner string, repo string, number int) (*PullRequestComment, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/pulls/comments/%d", owner, repo, number) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - comment := new(PullRequestComment) - resp, err := s.client.Do(req, comment) - if err != nil { - return nil, resp, err - } - - return comment, resp, err -} - -// CreateComment creates a new comment on the specified pull request. -// -// GitHub API docs: https://developer.github.com/v3/pulls/comments/#create-a-comment -func (s *PullRequestsService) CreateComment(owner string, repo string, number int, comment *PullRequestComment) (*PullRequestComment, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/pulls/%d/comments", owner, repo, number) - req, err := s.client.NewRequest("POST", u, comment) - if err != nil { - return nil, nil, err - } - - c := new(PullRequestComment) - resp, err := s.client.Do(req, c) - if err != nil { - return nil, resp, err - } - - return c, resp, err -} - -// EditComment updates a pull request comment. -// -// GitHub API docs: https://developer.github.com/v3/pulls/comments/#edit-a-comment -func (s *PullRequestsService) EditComment(owner string, repo string, number int, comment *PullRequestComment) (*PullRequestComment, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/pulls/comments/%d", owner, repo, number) - req, err := s.client.NewRequest("PATCH", u, comment) - if err != nil { - return nil, nil, err - } - - c := new(PullRequestComment) - resp, err := s.client.Do(req, c) - if err != nil { - return nil, resp, err - } - - return c, resp, err -} - -// DeleteComment deletes a pull request comment. -// -// GitHub API docs: https://developer.github.com/v3/pulls/comments/#delete-a-comment -func (s *PullRequestsService) DeleteComment(owner string, repo string, number int) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/pulls/comments/%d", owner, repo, number) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/google/go-github/github/repos.go b/vendor/github.com/google/go-github/github/repos.go deleted file mode 100644 index 2e9b29e..0000000 --- a/vendor/github.com/google/go-github/github/repos.go +++ /dev/null @@ -1,496 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import "fmt" - -// RepositoriesService handles communication with the repository related -// methods of the GitHub API. -// -// GitHub API docs: http://developer.github.com/v3/repos/ -type RepositoriesService struct { - client *Client -} - -// Repository represents a GitHub repository. -type Repository struct { - ID *int `json:"id,omitempty"` - Owner *User `json:"owner,omitempty"` - Name *string `json:"name,omitempty"` - FullName *string `json:"full_name,omitempty"` - Description *string `json:"description,omitempty"` - Homepage *string `json:"homepage,omitempty"` - DefaultBranch *string `json:"default_branch,omitempty"` - MasterBranch *string `json:"master_branch,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - PushedAt *Timestamp `json:"pushed_at,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - CloneURL *string `json:"clone_url,omitempty"` - GitURL *string `json:"git_url,omitempty"` - MirrorURL *string `json:"mirror_url,omitempty"` - SSHURL *string `json:"ssh_url,omitempty"` - SVNURL *string `json:"svn_url,omitempty"` - Language *string `json:"language,omitempty"` - Fork *bool `json:"fork"` - ForksCount *int `json:"forks_count,omitempty"` - NetworkCount *int `json:"network_count,omitempty"` - OpenIssuesCount *int `json:"open_issues_count,omitempty"` - StargazersCount *int `json:"stargazers_count,omitempty"` - SubscribersCount *int `json:"subscribers_count,omitempty"` - WatchersCount *int `json:"watchers_count,omitempty"` - Size *int `json:"size,omitempty"` - AutoInit *bool `json:"auto_init,omitempty"` - Parent *Repository `json:"parent,omitempty"` - Source *Repository `json:"source,omitempty"` - Organization *Organization `json:"organization,omitempty"` - Permissions *map[string]bool `json:"permissions,omitempty"` - - // Only provided when using RepositoriesService.Get while in preview - License *License `json:"license,omitempty"` - - // Additional mutable fields when creating and editing a repository - Private *bool `json:"private"` - HasIssues *bool `json:"has_issues"` - HasWiki *bool `json:"has_wiki"` - HasDownloads *bool `json:"has_downloads"` - // Creating an organization repository. Required for non-owners. - TeamID *int `json:"team_id"` - - // API URLs - URL *string `json:"url,omitempty"` - ArchiveURL *string `json:"archive_url,omitempty"` - AssigneesURL *string `json:"assignees_url,omitempty"` - BlobsURL *string `json:"blobs_url,omitempty"` - BranchesURL *string `json:"branches_url,omitempty"` - CollaboratorsURL *string `json:"collaborators_url,omitempty"` - CommentsURL *string `json:"comments_url,omitempty"` - CommitsURL *string `json:"commits_url,omitempty"` - CompareURL *string `json:"compare_url,omitempty"` - ContentsURL *string `json:"contents_url,omitempty"` - ContributorsURL *string `json:"contributors_url,omitempty"` - DownloadsURL *string `json:"downloads_url,omitempty"` - EventsURL *string `json:"events_url,omitempty"` - ForksURL *string `json:"forks_url,omitempty"` - GitCommitsURL *string `json:"git_commits_url,omitempty"` - GitRefsURL *string `json:"git_refs_url,omitempty"` - GitTagsURL *string `json:"git_tags_url,omitempty"` - HooksURL *string `json:"hooks_url,omitempty"` - IssueCommentURL *string `json:"issue_comment_url,omitempty"` - IssueEventsURL *string `json:"issue_events_url,omitempty"` - IssuesURL *string `json:"issues_url,omitempty"` - KeysURL *string `json:"keys_url,omitempty"` - LabelsURL *string `json:"labels_url,omitempty"` - LanguagesURL *string `json:"languages_url,omitempty"` - MergesURL *string `json:"merges_url,omitempty"` - MilestonesURL *string `json:"milestones_url,omitempty"` - NotificationsURL *string `json:"notifications_url,omitempty"` - PullsURL *string `json:"pulls_url,omitempty"` - ReleasesURL *string `json:"releases_url,omitempty"` - StargazersURL *string `json:"stargazers_url,omitempty"` - StatusesURL *string `json:"statuses_url,omitempty"` - SubscribersURL *string `json:"subscribers_url,omitempty"` - SubscriptionURL *string `json:"subscription_url,omitempty"` - TagsURL *string `json:"tags_url,omitempty"` - TreesURL *string `json:"trees_url,omitempty"` - TeamsURL *string `json:"teams_url,omitempty"` - - // TextMatches is only populated from search results that request text matches - // See: search.go and https://developer.github.com/v3/search/#text-match-metadata - TextMatches []TextMatch `json:"text_matches,omitempty"` -} - -func (r Repository) String() string { - return Stringify(r) -} - -// RepositoryListOptions specifies the optional parameters to the -// RepositoriesService.List method. -type RepositoryListOptions struct { - // Type of repositories to list. Possible values are: all, owner, public, - // private, member. Default is "all". - Type string `url:"type,omitempty"` - - // How to sort the repository list. Possible values are: created, updated, - // pushed, full_name. Default is "full_name". - Sort string `url:"sort,omitempty"` - - // Direction in which to sort repositories. Possible values are: asc, desc. - // Default is "asc" when sort is "full_name", otherwise default is "desc". - Direction string `url:"direction,omitempty"` - - ListOptions -} - -// List the repositories for a user. Passing the empty string will list -// repositories for the authenticated user. -// -// GitHub API docs: http://developer.github.com/v3/repos/#list-user-repositories -func (s *RepositoriesService) List(user string, opt *RepositoryListOptions) ([]Repository, *Response, error) { - var u string - if user != "" { - u = fmt.Sprintf("users/%v/repos", user) - } else { - u = "user/repos" - } - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when license support fully launches - req.Header.Set("Accept", mediaTypeLicensesPreview) - - repos := new([]Repository) - resp, err := s.client.Do(req, repos) - if err != nil { - return nil, resp, err - } - - return *repos, resp, err -} - -// RepositoryListByOrgOptions specifies the optional parameters to the -// RepositoriesService.ListByOrg method. -type RepositoryListByOrgOptions struct { - // Type of repositories to list. Possible values are: all, public, private, - // forks, sources, member. Default is "all". - Type string `url:"type,omitempty"` - - ListOptions -} - -// ListByOrg lists the repositories for an organization. -// -// GitHub API docs: http://developer.github.com/v3/repos/#list-organization-repositories -func (s *RepositoriesService) ListByOrg(org string, opt *RepositoryListByOrgOptions) ([]Repository, *Response, error) { - u := fmt.Sprintf("orgs/%v/repos", org) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when license support fully launches - req.Header.Set("Accept", mediaTypeLicensesPreview) - - repos := new([]Repository) - resp, err := s.client.Do(req, repos) - if err != nil { - return nil, resp, err - } - - return *repos, resp, err -} - -// RepositoryListAllOptions specifies the optional parameters to the -// RepositoriesService.ListAll method. -type RepositoryListAllOptions struct { - // ID of the last repository seen - Since int `url:"since,omitempty"` - - ListOptions -} - -// ListAll lists all GitHub repositories in the order that they were created. -// -// GitHub API docs: http://developer.github.com/v3/repos/#list-all-public-repositories -func (s *RepositoriesService) ListAll(opt *RepositoryListAllOptions) ([]Repository, *Response, error) { - u, err := addOptions("repositories", opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - repos := new([]Repository) - resp, err := s.client.Do(req, repos) - if err != nil { - return nil, resp, err - } - - return *repos, resp, err -} - -// Create a new repository. If an organization is specified, the new -// repository will be created under that org. If the empty string is -// specified, it will be created for the authenticated user. -// -// GitHub API docs: http://developer.github.com/v3/repos/#create -func (s *RepositoriesService) Create(org string, repo *Repository) (*Repository, *Response, error) { - var u string - if org != "" { - u = fmt.Sprintf("orgs/%v/repos", org) - } else { - u = "user/repos" - } - - req, err := s.client.NewRequest("POST", u, repo) - if err != nil { - return nil, nil, err - } - - r := new(Repository) - resp, err := s.client.Do(req, r) - if err != nil { - return nil, resp, err - } - - return r, resp, err -} - -// Get fetches a repository. -// -// GitHub API docs: http://developer.github.com/v3/repos/#get -func (s *RepositoriesService) Get(owner, repo string) (*Repository, *Response, error) { - u := fmt.Sprintf("repos/%v/%v", owner, repo) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - // TODO: remove custom Accept header when the license support fully launches - // https://developer.github.com/v3/licenses/#get-a-repositorys-license - req.Header.Set("Accept", mediaTypeLicensesPreview) - - repository := new(Repository) - resp, err := s.client.Do(req, repository) - if err != nil { - return nil, resp, err - } - - return repository, resp, err -} - -// Edit updates a repository. -// -// GitHub API docs: http://developer.github.com/v3/repos/#edit -func (s *RepositoriesService) Edit(owner, repo string, repository *Repository) (*Repository, *Response, error) { - u := fmt.Sprintf("repos/%v/%v", owner, repo) - req, err := s.client.NewRequest("PATCH", u, repository) - if err != nil { - return nil, nil, err - } - - r := new(Repository) - resp, err := s.client.Do(req, r) - if err != nil { - return nil, resp, err - } - - return r, resp, err -} - -// Delete a repository. -// -// GitHub API docs: https://developer.github.com/v3/repos/#delete-a-repository -func (s *RepositoriesService) Delete(owner, repo string) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v", owner, repo) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// Contributor represents a repository contributor -type Contributor struct { - Login *string `json:"login,omitempty"` - ID *int `json:"id,omitempty"` - AvatarURL *string `json:"avatar_url,omitempty"` - GravatarID *string `json:"gravatar_id,omitempty"` - URL *string `json:"url,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - FollowersURL *string `json:"followers_url,omitempty"` - FollowingURL *string `json:"following_url,omitempty"` - GistsURL *string `json:"gists_url,omitempty"` - StarredURL *string `json:"starred_url,omitempty"` - SubscriptionsURL *string `json:"subscriptions_url,omitempty"` - OrganizationsURL *string `json:"organizations_url,omitempty"` - ReposURL *string `json:"repos_url,omitempty"` - EventsURL *string `json:"events_url,omitempty"` - ReceivedEventsURL *string `json:"received_events_url,omitempty"` - Type *string `json:"type,omitempty"` - SiteAdmin *bool `json:"site_admin"` - Contributions *int `json:"contributions,omitempty"` -} - -// ListContributorsOptions specifies the optional parameters to the -// RepositoriesService.ListContributors method. -type ListContributorsOptions struct { - // Include anonymous contributors in results or not - Anon string `url:"anon,omitempty"` - - ListOptions -} - -// ListContributors lists contributors for a repository. -// -// GitHub API docs: http://developer.github.com/v3/repos/#list-contributors -func (s *RepositoriesService) ListContributors(owner string, repository string, opt *ListContributorsOptions) ([]Contributor, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/contributors", owner, repository) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - contributor := new([]Contributor) - resp, err := s.client.Do(req, contributor) - if err != nil { - return nil, nil, err - } - - return *contributor, resp, err -} - -// ListLanguages lists languages for the specified repository. The returned map -// specifies the languages and the number of bytes of code written in that -// language. For example: -// -// { -// "C": 78769, -// "Python": 7769 -// } -// -// GitHub API Docs: http://developer.github.com/v3/repos/#list-languages -func (s *RepositoriesService) ListLanguages(owner string, repo string) (map[string]int, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/languages", owner, repo) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - languages := make(map[string]int) - resp, err := s.client.Do(req, &languages) - if err != nil { - return nil, resp, err - } - - return languages, resp, err -} - -// ListTeams lists the teams for the specified repository. -// -// GitHub API docs: https://developer.github.com/v3/repos/#list-teams -func (s *RepositoriesService) ListTeams(owner string, repo string, opt *ListOptions) ([]Team, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/teams", owner, repo) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - teams := new([]Team) - resp, err := s.client.Do(req, teams) - if err != nil { - return nil, resp, err - } - - return *teams, resp, err -} - -// RepositoryTag represents a repository tag. -type RepositoryTag struct { - Name *string `json:"name,omitempty"` - Commit *Commit `json:"commit,omitempty"` - ZipballURL *string `json:"zipball_url,omitempty"` - TarballURL *string `json:"tarball_url,omitempty"` -} - -// ListTags lists tags for the specified repository. -// -// GitHub API docs: https://developer.github.com/v3/repos/#list-tags -func (s *RepositoriesService) ListTags(owner string, repo string, opt *ListOptions) ([]RepositoryTag, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/tags", owner, repo) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - tags := new([]RepositoryTag) - resp, err := s.client.Do(req, tags) - if err != nil { - return nil, resp, err - } - - return *tags, resp, err -} - -// Branch represents a repository branch -type Branch struct { - Name *string `json:"name,omitempty"` - Commit *Commit `json:"commit,omitempty"` -} - -// ListBranches lists branches for the specified repository. -// -// GitHub API docs: http://developer.github.com/v3/repos/#list-branches -func (s *RepositoriesService) ListBranches(owner string, repo string, opt *ListOptions) ([]Branch, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/branches", owner, repo) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - branches := new([]Branch) - resp, err := s.client.Do(req, branches) - if err != nil { - return nil, resp, err - } - - return *branches, resp, err -} - -// GetBranch gets the specified branch for a repository. -// -// GitHub API docs: https://developer.github.com/v3/repos/#get-branch -func (s *RepositoriesService) GetBranch(owner, repo, branch string) (*Branch, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/branches/%v", owner, repo, branch) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - b := new(Branch) - resp, err := s.client.Do(req, b) - if err != nil { - return nil, resp, err - } - - return b, resp, err -} diff --git a/vendor/github.com/google/go-github/github/repos_collaborators.go b/vendor/github.com/google/go-github/github/repos_collaborators.go deleted file mode 100644 index 61dc4ef..0000000 --- a/vendor/github.com/google/go-github/github/repos_collaborators.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import "fmt" - -// ListCollaborators lists the Github users that have access to the repository. -// -// GitHub API docs: http://developer.github.com/v3/repos/collaborators/#list -func (s *RepositoriesService) ListCollaborators(owner, repo string, opt *ListOptions) ([]User, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/collaborators", owner, repo) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - req.Header.Set("Accept", mediaTypeOrgPermissionPreview) - - users := new([]User) - resp, err := s.client.Do(req, users) - if err != nil { - return nil, resp, err - } - - return *users, resp, err -} - -// IsCollaborator checks whether the specified Github user has collaborator -// access to the given repo. -// Note: This will return false if the user is not a collaborator OR the user -// is not a GitHub user. -// -// GitHub API docs: http://developer.github.com/v3/repos/collaborators/#get -func (s *RepositoriesService) IsCollaborator(owner, repo, user string) (bool, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/collaborators/%v", owner, repo, user) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return false, nil, err - } - - resp, err := s.client.Do(req, nil) - isCollab, err := parseBoolResponse(err) - return isCollab, resp, err -} - -// RepositoryAddCollaboratorOptions specifies the optional parameters to the -// RepositoriesService.AddCollaborator method. -type RepositoryAddCollaboratorOptions struct { - // Permission specifies the permission to grant the user on this repository. - // Possible values are: - // pull - team members can pull, but not push to or administer this repository - // push - team members can pull and push, but not administer this repository - // admin - team members can pull, push and administer this repository - // - // Default value is "pull". This option is only valid for organization-owned repositories. - Permission string `json:"permission,omitempty"` -} - -// AddCollaborator adds the specified Github user as collaborator to the given repo. -// -// GitHub API docs: http://developer.github.com/v3/repos/collaborators/#add-collaborator -func (s *RepositoriesService) AddCollaborator(owner, repo, user string, opt *RepositoryAddCollaboratorOptions) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/collaborators/%v", owner, repo, user) - req, err := s.client.NewRequest("PUT", u, opt) - if err != nil { - return nil, err - } - - if opt != nil { - req.Header.Set("Accept", mediaTypeOrgPermissionPreview) - } - - return s.client.Do(req, nil) -} - -// RemoveCollaborator removes the specified Github user as collaborator from the given repo. -// Note: Does not return error if a valid user that is not a collaborator is removed. -// -// GitHub API docs: http://developer.github.com/v3/repos/collaborators/#remove-collaborator -func (s *RepositoriesService) RemoveCollaborator(owner, repo, user string) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/collaborators/%v", owner, repo, user) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/google/go-github/github/repos_comments.go b/vendor/github.com/google/go-github/github/repos_comments.go deleted file mode 100644 index 2d090bb..0000000 --- a/vendor/github.com/google/go-github/github/repos_comments.go +++ /dev/null @@ -1,150 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "fmt" - "time" -) - -// RepositoryComment represents a comment for a commit, file, or line in a repository. -type RepositoryComment struct { - HTMLURL *string `json:"html_url,omitempty"` - URL *string `json:"url,omitempty"` - ID *int `json:"id,omitempty"` - CommitID *string `json:"commit_id,omitempty"` - User *User `json:"user,omitempty"` - CreatedAt *time.Time `json:"created_at,omitempty"` - UpdatedAt *time.Time `json:"updated_at,omitempty"` - - // User-mutable fields - Body *string `json:"body"` - // User-initialized fields - Path *string `json:"path,omitempty"` - Position *int `json:"position,omitempty"` -} - -func (r RepositoryComment) String() string { - return Stringify(r) -} - -// ListComments lists all the comments for the repository. -// -// GitHub API docs: http://developer.github.com/v3/repos/comments/#list-commit-comments-for-a-repository -func (s *RepositoriesService) ListComments(owner, repo string, opt *ListOptions) ([]RepositoryComment, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/comments", owner, repo) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - comments := new([]RepositoryComment) - resp, err := s.client.Do(req, comments) - if err != nil { - return nil, resp, err - } - - return *comments, resp, err -} - -// ListCommitComments lists all the comments for a given commit SHA. -// -// GitHub API docs: http://developer.github.com/v3/repos/comments/#list-comments-for-a-single-commit -func (s *RepositoriesService) ListCommitComments(owner, repo, sha string, opt *ListOptions) ([]RepositoryComment, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/commits/%v/comments", owner, repo, sha) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - comments := new([]RepositoryComment) - resp, err := s.client.Do(req, comments) - if err != nil { - return nil, resp, err - } - - return *comments, resp, err -} - -// CreateComment creates a comment for the given commit. -// Note: GitHub allows for comments to be created for non-existing files and positions. -// -// GitHub API docs: http://developer.github.com/v3/repos/comments/#create-a-commit-comment -func (s *RepositoriesService) CreateComment(owner, repo, sha string, comment *RepositoryComment) (*RepositoryComment, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/commits/%v/comments", owner, repo, sha) - req, err := s.client.NewRequest("POST", u, comment) - if err != nil { - return nil, nil, err - } - - c := new(RepositoryComment) - resp, err := s.client.Do(req, c) - if err != nil { - return nil, resp, err - } - - return c, resp, err -} - -// GetComment gets a single comment from a repository. -// -// GitHub API docs: http://developer.github.com/v3/repos/comments/#get-a-single-commit-comment -func (s *RepositoriesService) GetComment(owner, repo string, id int) (*RepositoryComment, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/comments/%v", owner, repo, id) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - c := new(RepositoryComment) - resp, err := s.client.Do(req, c) - if err != nil { - return nil, resp, err - } - - return c, resp, err -} - -// UpdateComment updates the body of a single comment. -// -// GitHub API docs: http://developer.github.com/v3/repos/comments/#update-a-commit-comment -func (s *RepositoriesService) UpdateComment(owner, repo string, id int, comment *RepositoryComment) (*RepositoryComment, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/comments/%v", owner, repo, id) - req, err := s.client.NewRequest("PATCH", u, comment) - if err != nil { - return nil, nil, err - } - - c := new(RepositoryComment) - resp, err := s.client.Do(req, c) - if err != nil { - return nil, resp, err - } - - return c, resp, err -} - -// DeleteComment deletes a single comment from a repository. -// -// GitHub API docs: http://developer.github.com/v3/repos/comments/#delete-a-commit-comment -func (s *RepositoriesService) DeleteComment(owner, repo string, id int) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/comments/%v", owner, repo, id) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/google/go-github/github/repos_commits.go b/vendor/github.com/google/go-github/github/repos_commits.go deleted file mode 100644 index 6401cb4..0000000 --- a/vendor/github.com/google/go-github/github/repos_commits.go +++ /dev/null @@ -1,168 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "fmt" - "time" -) - -// RepositoryCommit represents a commit in a repo. -// Note that it's wrapping a Commit, so author/committer information is in two places, -// but contain different details about them: in RepositoryCommit "github details", in Commit - "git details". -type RepositoryCommit struct { - SHA *string `json:"sha,omitempty"` - Commit *Commit `json:"commit,omitempty"` - Author *User `json:"author,omitempty"` - Committer *User `json:"committer,omitempty"` - Parents []Commit `json:"parents,omitempty"` - Message *string `json:"message,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - - // Details about how many changes were made in this commit. Only filled in during GetCommit! - Stats *CommitStats `json:"stats,omitempty"` - // Details about which files, and how this commit touched. Only filled in during GetCommit! - Files []CommitFile `json:"files,omitempty"` -} - -func (r RepositoryCommit) String() string { - return Stringify(r) -} - -// CommitStats represents the number of additions / deletions from a file in a given RepositoryCommit. -type CommitStats struct { - Additions *int `json:"additions,omitempty"` - Deletions *int `json:"deletions,omitempty"` - Total *int `json:"total,omitempty"` -} - -func (c CommitStats) String() string { - return Stringify(c) -} - -// CommitFile represents a file modified in a commit. -type CommitFile struct { - SHA *string `json:"sha,omitempty"` - Filename *string `json:"filename,omitempty"` - Additions *int `json:"additions,omitempty"` - Deletions *int `json:"deletions,omitempty"` - Changes *int `json:"changes,omitempty"` - Status *string `json:"status,omitempty"` - Patch *string `json:"patch,omitempty"` -} - -func (c CommitFile) String() string { - return Stringify(c) -} - -// CommitsComparison is the result of comparing two commits. -// See CompareCommits() for details. -type CommitsComparison struct { - BaseCommit *RepositoryCommit `json:"base_commit,omitempty"` - MergeBaseCommit *RepositoryCommit `json:"merge_base_commit,omitempty"` - - // Head can be 'behind' or 'ahead' - Status *string `json:"status,omitempty"` - AheadBy *int `json:"ahead_by,omitempty"` - BehindBy *int `json:"behind_by,omitempty"` - TotalCommits *int `json:"total_commits,omitempty"` - - Commits []RepositoryCommit `json:"commits,omitempty"` - - Files []CommitFile `json:"files,omitempty"` -} - -func (c CommitsComparison) String() string { - return Stringify(c) -} - -// CommitsListOptions specifies the optional parameters to the -// RepositoriesService.ListCommits method. -type CommitsListOptions struct { - // SHA or branch to start listing Commits from. - SHA string `url:"sha,omitempty"` - - // Path that should be touched by the returned Commits. - Path string `url:"path,omitempty"` - - // Author of by which to filter Commits. - Author string `url:"author,omitempty"` - - // Since when should Commits be included in the response. - Since time.Time `url:"since,omitempty"` - - // Until when should Commits be included in the response. - Until time.Time `url:"until,omitempty"` - - ListOptions -} - -// ListCommits lists the commits of a repository. -// -// GitHub API docs: http://developer.github.com/v3/repos/commits/#list -func (s *RepositoriesService) ListCommits(owner, repo string, opt *CommitsListOptions) ([]RepositoryCommit, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/commits", owner, repo) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - commits := new([]RepositoryCommit) - resp, err := s.client.Do(req, commits) - if err != nil { - return nil, resp, err - } - - return *commits, resp, err -} - -// GetCommit fetches the specified commit, including all details about it. -// todo: support media formats - https://github.com/google/go-github/issues/6 -// -// GitHub API docs: http://developer.github.com/v3/repos/commits/#get-a-single-commit -// See also: http://developer.github.com//v3/git/commits/#get-a-single-commit provides the same functionality -func (s *RepositoriesService) GetCommit(owner, repo, sha string) (*RepositoryCommit, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/commits/%v", owner, repo, sha) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - commit := new(RepositoryCommit) - resp, err := s.client.Do(req, commit) - if err != nil { - return nil, resp, err - } - - return commit, resp, err -} - -// CompareCommits compares a range of commits with each other. -// todo: support media formats - https://github.com/google/go-github/issues/6 -// -// GitHub API docs: http://developer.github.com/v3/repos/commits/index.html#compare-two-commits -func (s *RepositoriesService) CompareCommits(owner, repo string, base, head string) (*CommitsComparison, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/compare/%v...%v", owner, repo, base, head) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - comp := new(CommitsComparison) - resp, err := s.client.Do(req, comp) - if err != nil { - return nil, resp, err - } - - return comp, resp, err -} diff --git a/vendor/github.com/google/go-github/github/repos_contents.go b/vendor/github.com/google/go-github/github/repos_contents.go deleted file mode 100644 index 80776f2..0000000 --- a/vendor/github.com/google/go-github/github/repos_contents.go +++ /dev/null @@ -1,248 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Repository contents API methods. -// http://developer.github.com/v3/repos/contents/ - -package github - -import ( - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "io" - "net/http" - "net/url" - "path" -) - -// RepositoryContent represents a file or directory in a github repository. -type RepositoryContent struct { - Type *string `json:"type,omitempty"` - Encoding *string `json:"encoding,omitempty"` - Size *int `json:"size,omitempty"` - Name *string `json:"name,omitempty"` - Path *string `json:"path,omitempty"` - Content *string `json:"content,omitempty"` - SHA *string `json:"sha,omitempty"` - URL *string `json:"url,omitempty"` - GitURL *string `json:"git_url,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - DownloadURL *string `json:"download_url,omitempty"` -} - -// RepositoryContentResponse holds the parsed response from CreateFile, UpdateFile, and DeleteFile. -type RepositoryContentResponse struct { - Content *RepositoryContent `json:"content,omitempty"` - Commit `json:"commit,omitempty"` -} - -// RepositoryContentFileOptions specifies optional parameters for CreateFile, UpdateFile, and DeleteFile. -type RepositoryContentFileOptions struct { - Message *string `json:"message,omitempty"` - Content []byte `json:"content,omitempty"` - SHA *string `json:"sha,omitempty"` - Branch *string `json:"branch,omitempty"` - Author *CommitAuthor `json:"author,omitempty"` - Committer *CommitAuthor `json:"committer,omitempty"` -} - -// RepositoryContentGetOptions represents an optional ref parameter, which can be a SHA, -// branch, or tag -type RepositoryContentGetOptions struct { - Ref string `url:"ref,omitempty"` -} - -func (r RepositoryContent) String() string { - return Stringify(r) -} - -// Decode decodes the file content if it is base64 encoded. -func (r *RepositoryContent) Decode() ([]byte, error) { - if *r.Encoding != "base64" { - return nil, errors.New("cannot decode non-base64") - } - o, err := base64.StdEncoding.DecodeString(*r.Content) - if err != nil { - return nil, err - } - return o, nil -} - -// GetReadme gets the Readme file for the repository. -// -// GitHub API docs: http://developer.github.com/v3/repos/contents/#get-the-readme -func (s *RepositoriesService) GetReadme(owner, repo string, opt *RepositoryContentGetOptions) (*RepositoryContent, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/readme", owner, repo) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - readme := new(RepositoryContent) - resp, err := s.client.Do(req, readme) - if err != nil { - return nil, resp, err - } - return readme, resp, err -} - -// DownloadContents returns an io.ReadCloser that reads the contents of the -// specified file. This function will work with files of any size, as opposed -// to GetContents which is limited to 1 Mb files. It is the caller's -// responsibility to close the ReadCloser. -func (s *RepositoriesService) DownloadContents(owner, repo, filepath string, opt *RepositoryContentGetOptions) (io.ReadCloser, error) { - dir := path.Dir(filepath) - filename := path.Base(filepath) - _, dirContents, _, err := s.GetContents(owner, repo, dir, opt) - if err != nil { - return nil, err - } - for _, contents := range dirContents { - if *contents.Name == filename { - if contents.DownloadURL == nil || *contents.DownloadURL == "" { - return nil, fmt.Errorf("No download link found for %s", filepath) - } - resp, err := s.client.client.Get(*contents.DownloadURL) - if err != nil { - return nil, err - } - return resp.Body, nil - } - } - return nil, fmt.Errorf("No file named %s found in %s", filename, dir) -} - -// GetContents can return either the metadata and content of a single file -// (when path references a file) or the metadata of all the files and/or -// subdirectories of a directory (when path references a directory). To make it -// easy to distinguish between both result types and to mimic the API as much -// as possible, both result types will be returned but only one will contain a -// value and the other will be nil. -// -// GitHub API docs: http://developer.github.com/v3/repos/contents/#get-contents -func (s *RepositoriesService) GetContents(owner, repo, path string, opt *RepositoryContentGetOptions) (fileContent *RepositoryContent, - directoryContent []*RepositoryContent, resp *Response, err error) { - u := fmt.Sprintf("repos/%s/%s/contents/%s", owner, repo, path) - u, err = addOptions(u, opt) - if err != nil { - return nil, nil, nil, err - } - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, nil, err - } - var rawJSON json.RawMessage - resp, err = s.client.Do(req, &rawJSON) - if err != nil { - return nil, nil, resp, err - } - fileUnmarshalError := json.Unmarshal(rawJSON, &fileContent) - if fileUnmarshalError == nil { - return fileContent, nil, resp, fileUnmarshalError - } - directoryUnmarshalError := json.Unmarshal(rawJSON, &directoryContent) - if directoryUnmarshalError == nil { - return nil, directoryContent, resp, directoryUnmarshalError - } - return nil, nil, resp, fmt.Errorf("unmarshalling failed for both file and directory content: %s and %s ", fileUnmarshalError, directoryUnmarshalError) -} - -// CreateFile creates a new file in a repository at the given path and returns -// the commit and file metadata. -// -// GitHub API docs: http://developer.github.com/v3/repos/contents/#create-a-file -func (s *RepositoriesService) CreateFile(owner, repo, path string, opt *RepositoryContentFileOptions) (*RepositoryContentResponse, *Response, error) { - u := fmt.Sprintf("repos/%s/%s/contents/%s", owner, repo, path) - req, err := s.client.NewRequest("PUT", u, opt) - if err != nil { - return nil, nil, err - } - createResponse := new(RepositoryContentResponse) - resp, err := s.client.Do(req, createResponse) - if err != nil { - return nil, resp, err - } - return createResponse, resp, err -} - -// UpdateFile updates a file in a repository at the given path and returns the -// commit and file metadata. Requires the blob SHA of the file being updated. -// -// GitHub API docs: http://developer.github.com/v3/repos/contents/#update-a-file -func (s *RepositoriesService) UpdateFile(owner, repo, path string, opt *RepositoryContentFileOptions) (*RepositoryContentResponse, *Response, error) { - u := fmt.Sprintf("repos/%s/%s/contents/%s", owner, repo, path) - req, err := s.client.NewRequest("PUT", u, opt) - if err != nil { - return nil, nil, err - } - updateResponse := new(RepositoryContentResponse) - resp, err := s.client.Do(req, updateResponse) - if err != nil { - return nil, resp, err - } - return updateResponse, resp, err -} - -// DeleteFile deletes a file from a repository and returns the commit. -// Requires the blob SHA of the file to be deleted. -// -// GitHub API docs: http://developer.github.com/v3/repos/contents/#delete-a-file -func (s *RepositoriesService) DeleteFile(owner, repo, path string, opt *RepositoryContentFileOptions) (*RepositoryContentResponse, *Response, error) { - u := fmt.Sprintf("repos/%s/%s/contents/%s", owner, repo, path) - req, err := s.client.NewRequest("DELETE", u, opt) - if err != nil { - return nil, nil, err - } - deleteResponse := new(RepositoryContentResponse) - resp, err := s.client.Do(req, deleteResponse) - if err != nil { - return nil, resp, err - } - return deleteResponse, resp, err -} - -// archiveFormat is used to define the archive type when calling GetArchiveLink. -type archiveFormat string - -const ( - // Tarball specifies an archive in gzipped tar format. - Tarball archiveFormat = "tarball" - - // Zipball specifies an archive in zip format. - Zipball archiveFormat = "zipball" -) - -// GetArchiveLink returns an URL to download a tarball or zipball archive for a -// repository. The archiveFormat can be specified by either the github.Tarball -// or github.Zipball constant. -// -// GitHub API docs: http://developer.github.com/v3/repos/contents/#get-archive-link -func (s *RepositoriesService) GetArchiveLink(owner, repo string, archiveformat archiveFormat, opt *RepositoryContentGetOptions) (*url.URL, *Response, error) { - u := fmt.Sprintf("repos/%s/%s/%s", owner, repo, archiveformat) - if opt != nil && opt.Ref != "" { - u += fmt.Sprintf("/%s", opt.Ref) - } - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - var resp *http.Response - // Use http.DefaultTransport if no custom Transport is configured - if s.client.client.Transport == nil { - resp, err = http.DefaultTransport.RoundTrip(req) - } else { - resp, err = s.client.client.Transport.RoundTrip(req) - } - if err != nil || resp.StatusCode != http.StatusFound { - return nil, newResponse(resp), err - } - parsedURL, err := url.Parse(resp.Header.Get("Location")) - return parsedURL, newResponse(resp), err -} diff --git a/vendor/github.com/google/go-github/github/repos_deployments.go b/vendor/github.com/google/go-github/github/repos_deployments.go deleted file mode 100644 index 77c7949..0000000 --- a/vendor/github.com/google/go-github/github/repos_deployments.go +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright 2014 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "encoding/json" - "fmt" -) - -// Deployment represents a deployment in a repo -type Deployment struct { - URL *string `json:"url,omitempty"` - ID *int `json:"id,omitempty"` - SHA *string `json:"sha,omitempty"` - Ref *string `json:"ref,omitempty"` - Task *string `json:"task,omitempty"` - Payload json.RawMessage `json:"payload,omitempty"` - Environment *string `json:"environment,omitempty"` - Description *string `json:"description,omitempty"` - Creator *User `json:"creator,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - UpdatedAt *Timestamp `json:"pushed_at,omitempty"` -} - -// DeploymentRequest represents a deployment request -type DeploymentRequest struct { - Ref *string `json:"ref,omitempty"` - Task *string `json:"task,omitempty"` - AutoMerge *bool `json:"auto_merge,omitempty"` - RequiredContexts *[]string `json:"required_contexts,omitempty"` - Payload *string `json:"payload,omitempty"` - Environment *string `json:"environment,omitempty"` - Description *string `json:"description,omitempty"` -} - -// DeploymentsListOptions specifies the optional parameters to the -// RepositoriesService.ListDeployments method. -type DeploymentsListOptions struct { - // SHA of the Deployment. - SHA string `url:"sha,omitempty"` - - // List deployments for a given ref. - Ref string `url:"ref,omitempty"` - - // List deployments for a given task. - Task string `url:"task,omitempty"` - - // List deployments for a given environment. - Environment string `url:"environment,omitempty"` - - ListOptions -} - -// ListDeployments lists the deployments of a repository. -// -// GitHub API docs: https://developer.github.com/v3/repos/deployments/#list-deployments -func (s *RepositoriesService) ListDeployments(owner, repo string, opt *DeploymentsListOptions) ([]Deployment, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/deployments", owner, repo) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - deployments := new([]Deployment) - resp, err := s.client.Do(req, deployments) - if err != nil { - return nil, resp, err - } - - return *deployments, resp, err -} - -// CreateDeployment creates a new deployment for a repository. -// -// GitHub API docs: https://developer.github.com/v3/repos/deployments/#create-a-deployment -func (s *RepositoriesService) CreateDeployment(owner, repo string, request *DeploymentRequest) (*Deployment, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/deployments", owner, repo) - - req, err := s.client.NewRequest("POST", u, request) - if err != nil { - return nil, nil, err - } - - d := new(Deployment) - resp, err := s.client.Do(req, d) - if err != nil { - return nil, resp, err - } - - return d, resp, err -} - -// DeploymentStatus represents the status of a -// particular deployment. -type DeploymentStatus struct { - ID *int `json:"id,omitempty"` - State *string `json:"state,omitempty"` - Creator *User `json:"creator,omitempty"` - Description *string `json:"description,omitempty"` - TargetURL *string `json:"target_url,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - UpdatedAt *Timestamp `json:"pushed_at,omitempty"` -} - -// DeploymentStatusRequest represents a deployment request -type DeploymentStatusRequest struct { - State *string `json:"state,omitempty"` - TargetURL *string `json:"target_url,omitempty"` - Description *string `json:"description,omitempty"` -} - -// ListDeploymentStatuses lists the statuses of a given deployment of a repository. -// -// GitHub API docs: https://developer.github.com/v3/repos/deployments/#list-deployment-statuses -func (s *RepositoriesService) ListDeploymentStatuses(owner, repo string, deployment int, opt *ListOptions) ([]DeploymentStatus, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/deployments/%v/statuses", owner, repo, deployment) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - statuses := new([]DeploymentStatus) - resp, err := s.client.Do(req, statuses) - if err != nil { - return nil, resp, err - } - - return *statuses, resp, err -} - -// CreateDeploymentStatus creates a new status for a deployment. -// -// GitHub API docs: https://developer.github.com/v3/repos/deployments/#create-a-deployment-status -func (s *RepositoriesService) CreateDeploymentStatus(owner, repo string, deployment int, request *DeploymentStatusRequest) (*DeploymentStatus, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/deployments/%v/statuses", owner, repo, deployment) - - req, err := s.client.NewRequest("POST", u, request) - if err != nil { - return nil, nil, err - } - - d := new(DeploymentStatus) - resp, err := s.client.Do(req, d) - if err != nil { - return nil, resp, err - } - - return d, resp, err -} diff --git a/vendor/github.com/google/go-github/github/repos_forks.go b/vendor/github.com/google/go-github/github/repos_forks.go deleted file mode 100644 index 1fec829..0000000 --- a/vendor/github.com/google/go-github/github/repos_forks.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import "fmt" - -// RepositoryListForksOptions specifies the optional parameters to the -// RepositoriesService.ListForks method. -type RepositoryListForksOptions struct { - // How to sort the forks list. Possible values are: newest, oldest, - // watchers. Default is "newest". - Sort string `url:"sort,omitempty"` - - ListOptions -} - -// ListForks lists the forks of the specified repository. -// -// GitHub API docs: http://developer.github.com/v3/repos/forks/#list-forks -func (s *RepositoriesService) ListForks(owner, repo string, opt *RepositoryListForksOptions) ([]Repository, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/forks", owner, repo) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - repos := new([]Repository) - resp, err := s.client.Do(req, repos) - if err != nil { - return nil, resp, err - } - - return *repos, resp, err -} - -// RepositoryCreateForkOptions specifies the optional parameters to the -// RepositoriesService.CreateFork method. -type RepositoryCreateForkOptions struct { - // The organization to fork the repository into. - Organization string `url:"organization,omitempty"` -} - -// CreateFork creates a fork of the specified repository. -// -// GitHub API docs: http://developer.github.com/v3/repos/forks/#list-forks -func (s *RepositoriesService) CreateFork(owner, repo string, opt *RepositoryCreateForkOptions) (*Repository, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/forks", owner, repo) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("POST", u, nil) - if err != nil { - return nil, nil, err - } - - fork := new(Repository) - resp, err := s.client.Do(req, fork) - if err != nil { - return nil, resp, err - } - - return fork, resp, err -} diff --git a/vendor/github.com/google/go-github/github/repos_hooks.go b/vendor/github.com/google/go-github/github/repos_hooks.go deleted file mode 100644 index bc4c8c5..0000000 --- a/vendor/github.com/google/go-github/github/repos_hooks.go +++ /dev/null @@ -1,194 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "fmt" - "time" -) - -// WebHookPayload represents the data that is received from GitHub when a push -// event hook is triggered. The format of these payloads pre-date most of the -// GitHub v3 API, so there are lots of minor incompatibilities with the types -// defined in the rest of the API. Therefore, several types are duplicated -// here to account for these differences. -// -// GitHub API docs: https://help.github.com/articles/post-receive-hooks -type WebHookPayload struct { - After *string `json:"after,omitempty"` - Before *string `json:"before,omitempty"` - Commits []WebHookCommit `json:"commits,omitempty"` - Compare *string `json:"compare,omitempty"` - Created *bool `json:"created,omitempty"` - Deleted *bool `json:"deleted,omitempty"` - Forced *bool `json:"forced,omitempty"` - HeadCommit *WebHookCommit `json:"head_commit,omitempty"` - Pusher *User `json:"pusher,omitempty"` - Ref *string `json:"ref,omitempty"` - Repo *Repository `json:"repository,omitempty"` -} - -func (w WebHookPayload) String() string { - return Stringify(w) -} - -// WebHookCommit represents the commit variant we receive from GitHub in a -// WebHookPayload. -type WebHookCommit struct { - Added []string `json:"added,omitempty"` - Author *WebHookAuthor `json:"author,omitempty"` - Committer *WebHookAuthor `json:"committer,omitempty"` - Distinct *bool `json:"distinct,omitempty"` - ID *string `json:"id,omitempty"` - Message *string `json:"message,omitempty"` - Modified []string `json:"modified,omitempty"` - Removed []string `json:"removed,omitempty"` - Timestamp *time.Time `json:"timestamp,omitempty"` -} - -func (w WebHookCommit) String() string { - return Stringify(w) -} - -// WebHookAuthor represents the author or committer of a commit, as specified -// in a WebHookCommit. The commit author may not correspond to a GitHub User. -type WebHookAuthor struct { - Email *string `json:"email,omitempty"` - Name *string `json:"name,omitempty"` - Username *string `json:"username,omitempty"` -} - -func (w WebHookAuthor) String() string { - return Stringify(w) -} - -// Hook represents a GitHub (web and service) hook for a repository. -type Hook struct { - CreatedAt *time.Time `json:"created_at,omitempty"` - UpdatedAt *time.Time `json:"updated_at,omitempty"` - Name *string `json:"name,omitempty"` - Events []string `json:"events,omitempty"` - Active *bool `json:"active,omitempty"` - Config map[string]interface{} `json:"config,omitempty"` - ID *int `json:"id,omitempty"` -} - -func (h Hook) String() string { - return Stringify(h) -} - -// CreateHook creates a Hook for the specified repository. -// Name and Config are required fields. -// -// GitHub API docs: http://developer.github.com/v3/repos/hooks/#create-a-hook -func (s *RepositoriesService) CreateHook(owner, repo string, hook *Hook) (*Hook, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/hooks", owner, repo) - req, err := s.client.NewRequest("POST", u, hook) - if err != nil { - return nil, nil, err - } - - h := new(Hook) - resp, err := s.client.Do(req, h) - if err != nil { - return nil, resp, err - } - - return h, resp, err -} - -// ListHooks lists all Hooks for the specified repository. -// -// GitHub API docs: http://developer.github.com/v3/repos/hooks/#list -func (s *RepositoriesService) ListHooks(owner, repo string, opt *ListOptions) ([]Hook, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/hooks", owner, repo) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - hooks := new([]Hook) - resp, err := s.client.Do(req, hooks) - if err != nil { - return nil, resp, err - } - - return *hooks, resp, err -} - -// GetHook returns a single specified Hook. -// -// GitHub API docs: http://developer.github.com/v3/repos/hooks/#get-single-hook -func (s *RepositoriesService) GetHook(owner, repo string, id int) (*Hook, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/hooks/%d", owner, repo, id) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - hook := new(Hook) - resp, err := s.client.Do(req, hook) - return hook, resp, err -} - -// EditHook updates a specified Hook. -// -// GitHub API docs: http://developer.github.com/v3/repos/hooks/#edit-a-hook -func (s *RepositoriesService) EditHook(owner, repo string, id int, hook *Hook) (*Hook, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/hooks/%d", owner, repo, id) - req, err := s.client.NewRequest("PATCH", u, hook) - if err != nil { - return nil, nil, err - } - h := new(Hook) - resp, err := s.client.Do(req, h) - return h, resp, err -} - -// DeleteHook deletes a specified Hook. -// -// GitHub API docs: http://developer.github.com/v3/repos/hooks/#delete-a-hook -func (s *RepositoriesService) DeleteHook(owner, repo string, id int) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/hooks/%d", owner, repo, id) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - return s.client.Do(req, nil) -} - -// PingHook triggers a 'ping' event to be sent to the Hook. -// -// GitHub API docs: https://developer.github.com/v3/repos/hooks/#ping-a-hook -func (s *RepositoriesService) PingHook(owner, repo string, id int) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/hooks/%d/pings", owner, repo, id) - req, err := s.client.NewRequest("POST", u, nil) - if err != nil { - return nil, err - } - return s.client.Do(req, nil) -} - -// TestHook triggers a test Hook by github. -// -// GitHub API docs: http://developer.github.com/v3/repos/hooks/#test-a-push-hook -func (s *RepositoriesService) TestHook(owner, repo string, id int) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/hooks/%d/tests", owner, repo, id) - req, err := s.client.NewRequest("POST", u, nil) - if err != nil { - return nil, err - } - return s.client.Do(req, nil) -} - -// ListServiceHooks is deprecated. Use Client.ListServiceHooks instead. -func (s *RepositoriesService) ListServiceHooks() ([]ServiceHook, *Response, error) { - return s.client.ListServiceHooks() -} diff --git a/vendor/github.com/google/go-github/github/repos_keys.go b/vendor/github.com/google/go-github/github/repos_keys.go deleted file mode 100644 index 0d12ec9..0000000 --- a/vendor/github.com/google/go-github/github/repos_keys.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import "fmt" - -// The Key type is defined in users_keys.go - -// ListKeys lists the deploy keys for a repository. -// -// GitHub API docs: http://developer.github.com/v3/repos/keys/#list -func (s *RepositoriesService) ListKeys(owner string, repo string, opt *ListOptions) ([]Key, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/keys", owner, repo) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - keys := new([]Key) - resp, err := s.client.Do(req, keys) - if err != nil { - return nil, resp, err - } - - return *keys, resp, err -} - -// GetKey fetches a single deploy key. -// -// GitHub API docs: http://developer.github.com/v3/repos/keys/#get -func (s *RepositoriesService) GetKey(owner string, repo string, id int) (*Key, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/keys/%v", owner, repo, id) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - key := new(Key) - resp, err := s.client.Do(req, key) - if err != nil { - return nil, resp, err - } - - return key, resp, err -} - -// CreateKey adds a deploy key for a repository. -// -// GitHub API docs: http://developer.github.com/v3/repos/keys/#create -func (s *RepositoriesService) CreateKey(owner string, repo string, key *Key) (*Key, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/keys", owner, repo) - - req, err := s.client.NewRequest("POST", u, key) - if err != nil { - return nil, nil, err - } - - k := new(Key) - resp, err := s.client.Do(req, k) - if err != nil { - return nil, resp, err - } - - return k, resp, err -} - -// EditKey edits a deploy key. -// -// GitHub API docs: http://developer.github.com/v3/repos/keys/#edit -func (s *RepositoriesService) EditKey(owner string, repo string, id int, key *Key) (*Key, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/keys/%v", owner, repo, id) - - req, err := s.client.NewRequest("PATCH", u, key) - if err != nil { - return nil, nil, err - } - - k := new(Key) - resp, err := s.client.Do(req, k) - if err != nil { - return nil, resp, err - } - - return k, resp, err -} - -// DeleteKey deletes a deploy key. -// -// GitHub API docs: http://developer.github.com/v3/repos/keys/#delete -func (s *RepositoriesService) DeleteKey(owner string, repo string, id int) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/keys/%v", owner, repo, id) - - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/google/go-github/github/repos_merging.go b/vendor/github.com/google/go-github/github/repos_merging.go deleted file mode 100644 index 31f8313..0000000 --- a/vendor/github.com/google/go-github/github/repos_merging.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2014 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "fmt" -) - -// RepositoryMergeRequest represents a request to merge a branch in a -// repository. -type RepositoryMergeRequest struct { - Base *string `json:"base,omitempty"` - Head *string `json:"head,omitempty"` - CommitMessage *string `json:"commit_message,omitempty"` -} - -// Merge a branch in the specified repository. -// -// GitHub API docs: https://developer.github.com/v3/repos/merging/#perform-a-merge -func (s *RepositoriesService) Merge(owner, repo string, request *RepositoryMergeRequest) (*RepositoryCommit, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/merges", owner, repo) - req, err := s.client.NewRequest("POST", u, request) - if err != nil { - return nil, nil, err - } - - commit := new(RepositoryCommit) - resp, err := s.client.Do(req, commit) - if err != nil { - return nil, resp, err - } - - return commit, resp, err -} diff --git a/vendor/github.com/google/go-github/github/repos_pages.go b/vendor/github.com/google/go-github/github/repos_pages.go deleted file mode 100644 index 2384eaf..0000000 --- a/vendor/github.com/google/go-github/github/repos_pages.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2014 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import "fmt" - -// Pages represents a GitHub Pages site configuration. -type Pages struct { - URL *string `json:"url,omitempty"` - Status *string `json:"status,omitempty"` - CNAME *string `json:"cname,omitempty"` - Custom404 *bool `json:"custom_404,omitempty"` -} - -// PagesError represents a build error for a GitHub Pages site. -type PagesError struct { - Message *string `json:"message,omitempty"` -} - -// PagesBuild represents the build information for a GitHub Pages site. -type PagesBuild struct { - URL *string `json:"url,omitempty"` - Status *string `json:"status,omitempty"` - Error *PagesError `json:"error,omitempty"` - Pusher *User `json:"pusher,omitempty"` - Commit *string `json:"commit,omitempty"` - Duration *int `json:"duration,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - UpdatedAt *Timestamp `json:"created_at,omitempty"` -} - -// GetPagesInfo fetches information about a GitHub Pages site. -// -// GitHub API docs: https://developer.github.com/v3/repos/pages/#get-information-about-a-pages-site -func (s *RepositoriesService) GetPagesInfo(owner string, repo string) (*Pages, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/pages", owner, repo) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - site := new(Pages) - resp, err := s.client.Do(req, site) - if err != nil { - return nil, resp, err - } - - return site, resp, err -} - -// ListPagesBuilds lists the builds for a GitHub Pages site. -// -// GitHub API docs: https://developer.github.com/v3/repos/pages/#list-pages-builds -func (s *RepositoriesService) ListPagesBuilds(owner string, repo string) ([]PagesBuild, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/pages/builds", owner, repo) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var pages []PagesBuild - resp, err := s.client.Do(req, &pages) - if err != nil { - return nil, resp, err - } - - return pages, resp, err -} - -// GetLatestPagesBuild fetches the latest build information for a GitHub pages site. -// -// GitHub API docs: https://developer.github.com/v3/repos/pages/#list-latest-pages-build -func (s *RepositoriesService) GetLatestPagesBuild(owner string, repo string) (*PagesBuild, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/pages/builds/latest", owner, repo) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - build := new(PagesBuild) - resp, err := s.client.Do(req, build) - if err != nil { - return nil, resp, err - } - - return build, resp, err -} diff --git a/vendor/github.com/google/go-github/github/repos_releases.go b/vendor/github.com/google/go-github/github/repos_releases.go deleted file mode 100644 index 4e4e2ab..0000000 --- a/vendor/github.com/google/go-github/github/repos_releases.go +++ /dev/null @@ -1,301 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "errors" - "fmt" - "io" - "mime" - "os" - "path/filepath" -) - -// RepositoryRelease represents a GitHub release in a repository. -type RepositoryRelease struct { - ID *int `json:"id,omitempty"` - TagName *string `json:"tag_name,omitempty"` - TargetCommitish *string `json:"target_commitish,omitempty"` - Name *string `json:"name,omitempty"` - Body *string `json:"body,omitempty"` - Draft *bool `json:"draft,omitempty"` - Prerelease *bool `json:"prerelease,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - PublishedAt *Timestamp `json:"published_at,omitempty"` - URL *string `json:"url,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - AssetsURL *string `json:"assets_url,omitempty"` - Assets []ReleaseAsset `json:"assets,omitempty"` - UploadURL *string `json:"upload_url,omitempty"` - ZipballURL *string `json:"zipball_url,omitempty"` - TarballURL *string `json:"tarball_url,omitempty"` -} - -func (r RepositoryRelease) String() string { - return Stringify(r) -} - -// ReleaseAsset represents a Github release asset in a repository. -type ReleaseAsset struct { - ID *int `json:"id,omitempty"` - URL *string `json:"url,omitempty"` - Name *string `json:"name,omitempty"` - Label *string `json:"label,omitempty"` - State *string `json:"state,omitempty"` - ContentType *string `json:"content_type,omitempty"` - Size *int `json:"size,omitempty"` - DownloadCount *int `json:"download_count,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` - BrowserDownloadURL *string `json:"browser_download_url,omitempty"` - Uploader *User `json:"uploader,omitempty"` -} - -func (r ReleaseAsset) String() string { - return Stringify(r) -} - -// ListReleases lists the releases for a repository. -// -// GitHub API docs: http://developer.github.com/v3/repos/releases/#list-releases-for-a-repository -func (s *RepositoriesService) ListReleases(owner, repo string, opt *ListOptions) ([]RepositoryRelease, *Response, error) { - u := fmt.Sprintf("repos/%s/%s/releases", owner, repo) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - releases := new([]RepositoryRelease) - resp, err := s.client.Do(req, releases) - if err != nil { - return nil, resp, err - } - return *releases, resp, err -} - -// GetRelease fetches a single release. -// -// GitHub API docs: http://developer.github.com/v3/repos/releases/#get-a-single-release -func (s *RepositoriesService) GetRelease(owner, repo string, id int) (*RepositoryRelease, *Response, error) { - u := fmt.Sprintf("repos/%s/%s/releases/%d", owner, repo, id) - return s.getSingleRelease(u) -} - -// GetLatestRelease fetches the latest published release for the repository. -// -// GitHub API docs: https://developer.github.com/v3/repos/releases/#get-the-latest-release -func (s *RepositoriesService) GetLatestRelease(owner, repo string) (*RepositoryRelease, *Response, error) { - u := fmt.Sprintf("repos/%s/%s/releases/latest", owner, repo) - return s.getSingleRelease(u) -} - -// GetReleaseByTag fetches a release with the specified tag. -// -// GitHub API docs: https://developer.github.com/v3/repos/releases/#get-a-release-by-tag-name -func (s *RepositoriesService) GetReleaseByTag(owner, repo, tag string) (*RepositoryRelease, *Response, error) { - u := fmt.Sprintf("repos/%s/%s/releases/tags/%s", owner, repo, tag) - return s.getSingleRelease(u) -} - -func (s *RepositoriesService) getSingleRelease(url string) (*RepositoryRelease, *Response, error) { - req, err := s.client.NewRequest("GET", url, nil) - if err != nil { - return nil, nil, err - } - - release := new(RepositoryRelease) - resp, err := s.client.Do(req, release) - if err != nil { - return nil, resp, err - } - return release, resp, err -} - -// CreateRelease adds a new release for a repository. -// -// GitHub API docs : http://developer.github.com/v3/repos/releases/#create-a-release -func (s *RepositoriesService) CreateRelease(owner, repo string, release *RepositoryRelease) (*RepositoryRelease, *Response, error) { - u := fmt.Sprintf("repos/%s/%s/releases", owner, repo) - - req, err := s.client.NewRequest("POST", u, release) - if err != nil { - return nil, nil, err - } - - r := new(RepositoryRelease) - resp, err := s.client.Do(req, r) - if err != nil { - return nil, resp, err - } - return r, resp, err -} - -// EditRelease edits a repository release. -// -// GitHub API docs : http://developer.github.com/v3/repos/releases/#edit-a-release -func (s *RepositoriesService) EditRelease(owner, repo string, id int, release *RepositoryRelease) (*RepositoryRelease, *Response, error) { - u := fmt.Sprintf("repos/%s/%s/releases/%d", owner, repo, id) - - req, err := s.client.NewRequest("PATCH", u, release) - if err != nil { - return nil, nil, err - } - - r := new(RepositoryRelease) - resp, err := s.client.Do(req, r) - if err != nil { - return nil, resp, err - } - return r, resp, err -} - -// DeleteRelease delete a single release from a repository. -// -// GitHub API docs : http://developer.github.com/v3/repos/releases/#delete-a-release -func (s *RepositoriesService) DeleteRelease(owner, repo string, id int) (*Response, error) { - u := fmt.Sprintf("repos/%s/%s/releases/%d", owner, repo, id) - - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - return s.client.Do(req, nil) -} - -// ListReleaseAssets lists the release's assets. -// -// GitHub API docs : http://developer.github.com/v3/repos/releases/#list-assets-for-a-release -func (s *RepositoriesService) ListReleaseAssets(owner, repo string, id int, opt *ListOptions) ([]ReleaseAsset, *Response, error) { - u := fmt.Sprintf("repos/%s/%s/releases/%d/assets", owner, repo, id) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - assets := new([]ReleaseAsset) - resp, err := s.client.Do(req, assets) - if err != nil { - return nil, resp, nil - } - return *assets, resp, err -} - -// GetReleaseAsset fetches a single release asset. -// -// GitHub API docs : http://developer.github.com/v3/repos/releases/#get-a-single-release-asset -func (s *RepositoriesService) GetReleaseAsset(owner, repo string, id int) (*ReleaseAsset, *Response, error) { - u := fmt.Sprintf("repos/%s/%s/releases/assets/%d", owner, repo, id) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - asset := new(ReleaseAsset) - resp, err := s.client.Do(req, asset) - if err != nil { - return nil, resp, nil - } - return asset, resp, err -} - -// DownloadReleaseAsset downloads a release asset. -// -// DownloadReleaseAsset returns an io.ReadCloser that reads the contents of the -// specified release asset. It is the caller's responsibility to close the ReadCloser. -// -// GitHub API docs : http://developer.github.com/v3/repos/releases/#get-a-single-release-asset -func (s *RepositoriesService) DownloadReleaseAsset(owner, repo string, id int) (io.ReadCloser, error) { - u := fmt.Sprintf("repos/%s/%s/releases/assets/%d", owner, repo, id) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, err - } - req.Header.Set("Accept", defaultMediaType) - - resp, err := s.client.client.Do(req) - if err != nil { - return nil, err - } - - return resp.Body, nil -} - -// EditReleaseAsset edits a repository release asset. -// -// GitHub API docs : http://developer.github.com/v3/repos/releases/#edit-a-release-asset -func (s *RepositoriesService) EditReleaseAsset(owner, repo string, id int, release *ReleaseAsset) (*ReleaseAsset, *Response, error) { - u := fmt.Sprintf("repos/%s/%s/releases/assets/%d", owner, repo, id) - - req, err := s.client.NewRequest("PATCH", u, release) - if err != nil { - return nil, nil, err - } - - asset := new(ReleaseAsset) - resp, err := s.client.Do(req, asset) - if err != nil { - return nil, resp, err - } - return asset, resp, err -} - -// DeleteReleaseAsset delete a single release asset from a repository. -// -// GitHub API docs : http://developer.github.com/v3/repos/releases/#delete-a-release-asset -func (s *RepositoriesService) DeleteReleaseAsset(owner, repo string, id int) (*Response, error) { - u := fmt.Sprintf("repos/%s/%s/releases/assets/%d", owner, repo, id) - - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - return s.client.Do(req, nil) -} - -// UploadReleaseAsset creates an asset by uploading a file into a release repository. -// To upload assets that cannot be represented by an os.File, call NewUploadRequest directly. -// -// GitHub API docs : http://developer.github.com/v3/repos/releases/#upload-a-release-asset -func (s *RepositoriesService) UploadReleaseAsset(owner, repo string, id int, opt *UploadOptions, file *os.File) (*ReleaseAsset, *Response, error) { - u := fmt.Sprintf("repos/%s/%s/releases/%d/assets", owner, repo, id) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - stat, err := file.Stat() - if err != nil { - return nil, nil, err - } - if stat.IsDir() { - return nil, nil, errors.New("the asset to upload can't be a directory") - } - - mediaType := mime.TypeByExtension(filepath.Ext(file.Name())) - req, err := s.client.NewUploadRequest(u, file, stat.Size(), mediaType) - if err != nil { - return nil, nil, err - } - - asset := new(ReleaseAsset) - resp, err := s.client.Do(req, asset) - if err != nil { - return nil, resp, err - } - return asset, resp, err -} diff --git a/vendor/github.com/google/go-github/github/repos_stats.go b/vendor/github.com/google/go-github/github/repos_stats.go deleted file mode 100644 index 7c1de09..0000000 --- a/vendor/github.com/google/go-github/github/repos_stats.go +++ /dev/null @@ -1,214 +0,0 @@ -// Copyright 2014 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "fmt" - "time" -) - -// ContributorStats represents a contributor to a repository and their -// weekly contributions to a given repo. -type ContributorStats struct { - Author *Contributor `json:"author,omitempty"` - Total *int `json:"total,omitempty"` - Weeks []WeeklyStats `json:"weeks,omitempty"` -} - -func (c ContributorStats) String() string { - return Stringify(c) -} - -// WeeklyStats represents the number of additions, deletions and commits -// a Contributor made in a given week. -type WeeklyStats struct { - Week *Timestamp `json:"w,omitempty"` - Additions *int `json:"a,omitempty"` - Deletions *int `json:"d,omitempty"` - Commits *int `json:"c,omitempty"` -} - -func (w WeeklyStats) String() string { - return Stringify(w) -} - -// ListContributorsStats gets a repo's contributor list with additions, -// deletions and commit counts. -// -// If this is the first time these statistics are requested for the given -// repository, this method will return a non-nil error and a status code of -// 202. This is because this is the status that github returns to signify that -// it is now computing the requested statistics. A follow up request, after a -// delay of a second or so, should result in a successful request. -// -// GitHub API Docs: https://developer.github.com/v3/repos/statistics/#contributors -func (s *RepositoriesService) ListContributorsStats(owner, repo string) ([]ContributorStats, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/stats/contributors", owner, repo) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var contributorStats []ContributorStats - resp, err := s.client.Do(req, &contributorStats) - if err != nil { - return nil, resp, err - } - - return contributorStats, resp, err -} - -// WeeklyCommitActivity represents the weekly commit activity for a repository. -// The days array is a group of commits per day, starting on Sunday. -type WeeklyCommitActivity struct { - Days []int `json:"days,omitempty"` - Total *int `json:"total,omitempty"` - Week *Timestamp `json:"week,omitempty"` -} - -func (w WeeklyCommitActivity) String() string { - return Stringify(w) -} - -// ListCommitActivity returns the last year of commit activity -// grouped by week. The days array is a group of commits per day, -// starting on Sunday. -// -// If this is the first time these statistics are requested for the given -// repository, this method will return a non-nil error and a status code of -// 202. This is because this is the status that github returns to signify that -// it is now computing the requested statistics. A follow up request, after a -// delay of a second or so, should result in a successful request. -// -// GitHub API Docs: https://developer.github.com/v3/repos/statistics/#commit-activity -func (s *RepositoriesService) ListCommitActivity(owner, repo string) ([]WeeklyCommitActivity, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/stats/commit_activity", owner, repo) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var weeklyCommitActivity []WeeklyCommitActivity - resp, err := s.client.Do(req, &weeklyCommitActivity) - if err != nil { - return nil, resp, err - } - - return weeklyCommitActivity, resp, err -} - -// ListCodeFrequency returns a weekly aggregate of the number of additions and -// deletions pushed to a repository. Returned WeeklyStats will contain -// additiona and deletions, but not total commits. -// -// GitHub API Docs: https://developer.github.com/v3/repos/statistics/#code-frequency -func (s *RepositoriesService) ListCodeFrequency(owner, repo string) ([]WeeklyStats, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/stats/code_frequency", owner, repo) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var weeks [][]int - resp, err := s.client.Do(req, &weeks) - - // convert int slices into WeeklyStats - var stats []WeeklyStats - for _, week := range weeks { - if len(week) != 3 { - continue - } - stat := WeeklyStats{ - Week: &Timestamp{time.Unix(int64(week[0]), 0)}, - Additions: Int(week[1]), - Deletions: Int(week[2]), - } - stats = append(stats, stat) - } - - return stats, resp, err -} - -// RepositoryParticipation is the number of commits by everyone -// who has contributed to the repository (including the owner) -// as well as the number of commits by the owner themself. -type RepositoryParticipation struct { - All []int `json:"all,omitempty"` - Owner []int `json:"owner,omitempty"` -} - -func (r RepositoryParticipation) String() string { - return Stringify(r) -} - -// ListParticipation returns the total commit counts for the 'owner' -// and total commit counts in 'all'. 'all' is everyone combined, -// including the 'owner' in the last 52 weeks. If you’d like to get -// the commit counts for non-owners, you can subtract 'all' from 'owner'. -// -// The array order is oldest week (index 0) to most recent week. -// -// If this is the first time these statistics are requested for the given -// repository, this method will return a non-nil error and a status code -// of 202. This is because this is the status that github returns to -// signify that it is now computing the requested statistics. A follow -// up request, after a delay of a second or so, should result in a -// successful request. -// -// GitHub API Docs: https://developer.github.com/v3/repos/statistics/#participation -func (s *RepositoriesService) ListParticipation(owner, repo string) (*RepositoryParticipation, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/stats/participation", owner, repo) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - participation := new(RepositoryParticipation) - resp, err := s.client.Do(req, participation) - if err != nil { - return nil, resp, err - } - - return participation, resp, err -} - -// PunchCard respresents the number of commits made during a given hour of a -// day of thew eek. -type PunchCard struct { - Day *int // Day of the week (0-6: =Sunday - Saturday). - Hour *int // Hour of day (0-23). - Commits *int // Number of commits. -} - -// ListPunchCard returns the number of commits per hour in each day. -// -// GitHub API Docs: https://developer.github.com/v3/repos/statistics/#punch-card -func (s *RepositoriesService) ListPunchCard(owner, repo string) ([]PunchCard, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/stats/punch_card", owner, repo) - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var results [][]int - resp, err := s.client.Do(req, &results) - - // convert int slices into Punchcards - var cards []PunchCard - for _, result := range results { - if len(result) != 3 { - continue - } - card := PunchCard{ - Day: Int(result[0]), - Hour: Int(result[1]), - Commits: Int(result[2]), - } - cards = append(cards, card) - } - - return cards, resp, err -} diff --git a/vendor/github.com/google/go-github/github/repos_statuses.go b/vendor/github.com/google/go-github/github/repos_statuses.go deleted file mode 100644 index 0379a2b..0000000 --- a/vendor/github.com/google/go-github/github/repos_statuses.go +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "fmt" - "time" -) - -// RepoStatus represents the status of a repository at a particular reference. -type RepoStatus struct { - ID *int `json:"id,omitempty"` - URL *string `json:"url,omitempty"` - - // State is the current state of the repository. Possible values are: - // pending, success, error, or failure. - State *string `json:"state,omitempty"` - - // TargetURL is the URL of the page representing this status. It will be - // linked from the GitHub UI to allow users to see the source of the status. - TargetURL *string `json:"target_url,omitempty"` - - // Description is a short high level summary of the status. - Description *string `json:"description,omitempty"` - - // A string label to differentiate this status from the statuses of other systems. - Context *string `json:"context,omitempty"` - - Creator *User `json:"creator,omitempty"` - CreatedAt *time.Time `json:"created_at,omitempty"` - UpdatedAt *time.Time `json:"updated_at,omitempty"` -} - -func (r RepoStatus) String() string { - return Stringify(r) -} - -// ListStatuses lists the statuses of a repository at the specified -// reference. ref can be a SHA, a branch name, or a tag name. -// -// GitHub API docs: http://developer.github.com/v3/repos/statuses/#list-statuses-for-a-specific-ref -func (s *RepositoriesService) ListStatuses(owner, repo, ref string, opt *ListOptions) ([]RepoStatus, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/commits/%v/statuses", owner, repo, ref) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - statuses := new([]RepoStatus) - resp, err := s.client.Do(req, statuses) - if err != nil { - return nil, resp, err - } - - return *statuses, resp, err -} - -// CreateStatus creates a new status for a repository at the specified -// reference. Ref can be a SHA, a branch name, or a tag name. -// -// GitHub API docs: http://developer.github.com/v3/repos/statuses/#create-a-status -func (s *RepositoriesService) CreateStatus(owner, repo, ref string, status *RepoStatus) (*RepoStatus, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/statuses/%v", owner, repo, ref) - req, err := s.client.NewRequest("POST", u, status) - if err != nil { - return nil, nil, err - } - - repoStatus := new(RepoStatus) - resp, err := s.client.Do(req, repoStatus) - if err != nil { - return nil, resp, err - } - - return repoStatus, resp, err -} - -// CombinedStatus represents the combined status of a repository at a particular reference. -type CombinedStatus struct { - // State is the combined state of the repository. Possible values are: - // failture, pending, or success. - State *string `json:"state,omitempty"` - - Name *string `json:"name,omitempty"` - SHA *string `json:"sha,omitempty"` - TotalCount *int `json:"total_count,omitempty"` - Statuses []RepoStatus `json:"statuses,omitempty"` - - CommitURL *string `json:"commit_url,omitempty"` - RepositoryURL *string `json:"repository_url,omitempty"` -} - -func (s CombinedStatus) String() string { - return Stringify(s) -} - -// GetCombinedStatus returns the combined status of a repository at the specified -// reference. ref can be a SHA, a branch name, or a tag name. -// -// GitHub API docs: https://developer.github.com/v3/repos/statuses/#get-the-combined-status-for-a-specific-ref -func (s *RepositoriesService) GetCombinedStatus(owner, repo, ref string, opt *ListOptions) (*CombinedStatus, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/commits/%v/status", owner, repo, ref) - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - status := new(CombinedStatus) - resp, err := s.client.Do(req, status) - if err != nil { - return nil, resp, err - } - - return status, resp, err -} diff --git a/vendor/github.com/google/go-github/github/search.go b/vendor/github.com/google/go-github/github/search.go deleted file mode 100644 index d9e9b41..0000000 --- a/vendor/github.com/google/go-github/github/search.go +++ /dev/null @@ -1,158 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "fmt" - - qs "github.com/google/go-querystring/query" -) - -// SearchService provides access to the search related functions -// in the GitHub API. -// -// GitHub API docs: http://developer.github.com/v3/search/ -type SearchService struct { - client *Client -} - -// SearchOptions specifies optional parameters to the SearchService methods. -type SearchOptions struct { - // How to sort the search results. Possible values are: - // - for repositories: stars, fork, updated - // - for code: indexed - // - for issues: comments, created, updated - // - for users: followers, repositories, joined - // - // Default is to sort by best match. - Sort string `url:"sort,omitempty"` - - // Sort order if sort parameter is provided. Possible values are: asc, - // desc. Default is desc. - Order string `url:"order,omitempty"` - - // Whether to retrieve text match metadata with a query - TextMatch bool `url:"-"` - - ListOptions -} - -// RepositoriesSearchResult represents the result of a repositories search. -type RepositoriesSearchResult struct { - Total *int `json:"total_count,omitempty"` - Repositories []Repository `json:"items,omitempty"` -} - -// Repositories searches repositories via various criteria. -// -// GitHub API docs: http://developer.github.com/v3/search/#search-repositories -func (s *SearchService) Repositories(query string, opt *SearchOptions) (*RepositoriesSearchResult, *Response, error) { - result := new(RepositoriesSearchResult) - resp, err := s.search("repositories", query, opt, result) - return result, resp, err -} - -// IssuesSearchResult represents the result of an issues search. -type IssuesSearchResult struct { - Total *int `json:"total_count,omitempty"` - Issues []Issue `json:"items,omitempty"` -} - -// Issues searches issues via various criteria. -// -// GitHub API docs: http://developer.github.com/v3/search/#search-issues -func (s *SearchService) Issues(query string, opt *SearchOptions) (*IssuesSearchResult, *Response, error) { - result := new(IssuesSearchResult) - resp, err := s.search("issues", query, opt, result) - return result, resp, err -} - -// UsersSearchResult represents the result of an issues search. -type UsersSearchResult struct { - Total *int `json:"total_count,omitempty"` - Users []User `json:"items,omitempty"` -} - -// Users searches users via various criteria. -// -// GitHub API docs: http://developer.github.com/v3/search/#search-users -func (s *SearchService) Users(query string, opt *SearchOptions) (*UsersSearchResult, *Response, error) { - result := new(UsersSearchResult) - resp, err := s.search("users", query, opt, result) - return result, resp, err -} - -// Match represents a single text match. -type Match struct { - Text *string `json:"text,omitempty"` - Indices []int `json:"indices,omitempty"` -} - -// TextMatch represents a text match for a SearchResult -type TextMatch struct { - ObjectURL *string `json:"object_url,omitempty"` - ObjectType *string `json:"object_type,omitempty"` - Property *string `json:"property,omitempty"` - Fragment *string `json:"fragment,omitempty"` - Matches []Match `json:"matches,omitempty"` -} - -func (tm TextMatch) String() string { - return Stringify(tm) -} - -// CodeSearchResult represents the result of an code search. -type CodeSearchResult struct { - Total *int `json:"total_count,omitempty"` - CodeResults []CodeResult `json:"items,omitempty"` -} - -// CodeResult represents a single search result. -type CodeResult struct { - Name *string `json:"name,omitempty"` - Path *string `json:"path,omitempty"` - SHA *string `json:"sha,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - Repository *Repository `json:"repository,omitempty"` - TextMatches []TextMatch `json:"text_matches,omitempty"` -} - -func (c CodeResult) String() string { - return Stringify(c) -} - -// Code searches code via various criteria. -// -// GitHub API docs: http://developer.github.com/v3/search/#search-code -func (s *SearchService) Code(query string, opt *SearchOptions) (*CodeSearchResult, *Response, error) { - result := new(CodeSearchResult) - resp, err := s.search("code", query, opt, result) - return result, resp, err -} - -// Helper function that executes search queries against different -// GitHub search types (repositories, code, issues, users) -func (s *SearchService) search(searchType string, query string, opt *SearchOptions, result interface{}) (*Response, error) { - params, err := qs.Values(opt) - if err != nil { - return nil, err - } - params.Add("q", query) - u := fmt.Sprintf("search/%s?%s", searchType, params.Encode()) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, err - } - - if opt.TextMatch { - // Accept header defaults to "application/vnd.github.v3+json" - // We change it here to fetch back text-match metadata - req.Header.Set("Accept", "application/vnd.github.v3.text-match+json") - } - - return s.client.Do(req, result) -} diff --git a/vendor/github.com/google/go-github/github/strings.go b/vendor/github.com/google/go-github/github/strings.go deleted file mode 100644 index 3857723..0000000 --- a/vendor/github.com/google/go-github/github/strings.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "bytes" - "fmt" - "io" - - "reflect" -) - -var timestampType = reflect.TypeOf(Timestamp{}) - -// Stringify attempts to create a reasonable string representation of types in -// the GitHub library. It does things like resolve pointers to their values -// and omits struct fields with nil values. -func Stringify(message interface{}) string { - var buf bytes.Buffer - v := reflect.ValueOf(message) - stringifyValue(&buf, v) - return buf.String() -} - -// stringifyValue was heavily inspired by the goprotobuf library. - -func stringifyValue(w io.Writer, val reflect.Value) { - if val.Kind() == reflect.Ptr && val.IsNil() { - w.Write([]byte("")) - return - } - - v := reflect.Indirect(val) - - switch v.Kind() { - case reflect.String: - fmt.Fprintf(w, `"%s"`, v) - case reflect.Slice: - w.Write([]byte{'['}) - for i := 0; i < v.Len(); i++ { - if i > 0 { - w.Write([]byte{' '}) - } - - stringifyValue(w, v.Index(i)) - } - - w.Write([]byte{']'}) - return - case reflect.Struct: - if v.Type().Name() != "" { - w.Write([]byte(v.Type().String())) - } - - // special handling of Timestamp values - if v.Type() == timestampType { - fmt.Fprintf(w, "{%s}", v.Interface()) - return - } - - w.Write([]byte{'{'}) - - var sep bool - for i := 0; i < v.NumField(); i++ { - fv := v.Field(i) - if fv.Kind() == reflect.Ptr && fv.IsNil() { - continue - } - if fv.Kind() == reflect.Slice && fv.IsNil() { - continue - } - - if sep { - w.Write([]byte(", ")) - } else { - sep = true - } - - w.Write([]byte(v.Type().Field(i).Name)) - w.Write([]byte{':'}) - stringifyValue(w, fv) - } - - w.Write([]byte{'}'}) - default: - if v.CanInterface() { - fmt.Fprint(w, v.Interface()) - } - } -} diff --git a/vendor/github.com/google/go-github/github/timestamp.go b/vendor/github.com/google/go-github/github/timestamp.go deleted file mode 100644 index a1c1554..0000000 --- a/vendor/github.com/google/go-github/github/timestamp.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "strconv" - "time" -) - -// Timestamp represents a time that can be unmarshalled from a JSON string -// formatted as either an RFC3339 or Unix timestamp. This is necessary for some -// fields since the GitHub API is inconsistent in how it represents times. All -// exported methods of time.Time can be called on Timestamp. -type Timestamp struct { - time.Time -} - -func (t Timestamp) String() string { - return t.Time.String() -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -// Time is expected in RFC3339 or Unix format. -func (t *Timestamp) UnmarshalJSON(data []byte) (err error) { - str := string(data) - i, err := strconv.ParseInt(str, 10, 64) - if err == nil { - (*t).Time = time.Unix(i, 0) - } else { - (*t).Time, err = time.Parse(`"`+time.RFC3339+`"`, str) - } - return -} - -// Equal reports whether t and u are equal based on time.Equal -func (t Timestamp) Equal(u Timestamp) bool { - return t.Time.Equal(u.Time) -} diff --git a/vendor/github.com/google/go-github/github/users.go b/vendor/github.com/google/go-github/github/users.go deleted file mode 100644 index a041bbf..0000000 --- a/vendor/github.com/google/go-github/github/users.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import "fmt" - -// UsersService handles communication with the user related -// methods of the GitHub API. -// -// GitHub API docs: http://developer.github.com/v3/users/ -type UsersService struct { - client *Client -} - -// User represents a GitHub user. -type User struct { - Login *string `json:"login,omitempty"` - ID *int `json:"id,omitempty"` - AvatarURL *string `json:"avatar_url,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - GravatarID *string `json:"gravatar_id,omitempty"` - Name *string `json:"name,omitempty"` - Company *string `json:"company,omitempty"` - Blog *string `json:"blog,omitempty"` - Location *string `json:"location,omitempty"` - Email *string `json:"email,omitempty"` - Hireable *bool `json:"hireable,omitempty"` - Bio *string `json:"bio,omitempty"` - PublicRepos *int `json:"public_repos,omitempty"` - PublicGists *int `json:"public_gists,omitempty"` - Followers *int `json:"followers,omitempty"` - Following *int `json:"following,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` - Type *string `json:"type,omitempty"` - SiteAdmin *bool `json:"site_admin,omitempty"` - TotalPrivateRepos *int `json:"total_private_repos,omitempty"` - OwnedPrivateRepos *int `json:"owned_private_repos,omitempty"` - PrivateGists *int `json:"private_gists,omitempty"` - DiskUsage *int `json:"disk_usage,omitempty"` - Collaborators *int `json:"collaborators,omitempty"` - Plan *Plan `json:"plan,omitempty"` - - // API URLs - URL *string `json:"url,omitempty"` - EventsURL *string `json:"events_url,omitempty"` - FollowingURL *string `json:"following_url,omitempty"` - FollowersURL *string `json:"followers_url,omitempty"` - GistsURL *string `json:"gists_url,omitempty"` - OrganizationsURL *string `json:"organizations_url,omitempty"` - ReceivedEventsURL *string `json:"received_events_url,omitempty"` - ReposURL *string `json:"repos_url,omitempty"` - StarredURL *string `json:"starred_url,omitempty"` - SubscriptionsURL *string `json:"subscriptions_url,omitempty"` - - // TextMatches is only populated from search results that request text matches - // See: search.go and https://developer.github.com/v3/search/#text-match-metadata - TextMatches []TextMatch `json:"text_matches,omitempty"` - - // Permissions identifies the permissions that a user has on a given - // repository. This is only populated when calling Repositories.ListCollaborators. - Permissions *map[string]bool `json:"permissions,omitempty"` -} - -func (u User) String() string { - return Stringify(u) -} - -// Get fetches a user. Passing the empty string will fetch the authenticated -// user. -// -// GitHub API docs: http://developer.github.com/v3/users/#get-a-single-user -func (s *UsersService) Get(user string) (*User, *Response, error) { - var u string - if user != "" { - u = fmt.Sprintf("users/%v", user) - } else { - u = "user" - } - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - uResp := new(User) - resp, err := s.client.Do(req, uResp) - if err != nil { - return nil, resp, err - } - - return uResp, resp, err -} - -// Edit the authenticated user. -// -// GitHub API docs: http://developer.github.com/v3/users/#update-the-authenticated-user -func (s *UsersService) Edit(user *User) (*User, *Response, error) { - u := "user" - req, err := s.client.NewRequest("PATCH", u, user) - if err != nil { - return nil, nil, err - } - - uResp := new(User) - resp, err := s.client.Do(req, uResp) - if err != nil { - return nil, resp, err - } - - return uResp, resp, err -} - -// UserListOptions specifies optional parameters to the UsersService.ListAll -// method. -type UserListOptions struct { - // ID of the last user seen - Since int `url:"since,omitempty"` -} - -// ListAll lists all GitHub users. -// -// GitHub API docs: http://developer.github.com/v3/users/#get-all-users -func (s *UsersService) ListAll(opt *UserListOptions) ([]User, *Response, error) { - u, err := addOptions("users", opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - users := new([]User) - resp, err := s.client.Do(req, users) - if err != nil { - return nil, resp, err - } - - return *users, resp, err -} diff --git a/vendor/github.com/google/go-github/github/users_administration.go b/vendor/github.com/google/go-github/github/users_administration.go deleted file mode 100644 index dc1dcb8..0000000 --- a/vendor/github.com/google/go-github/github/users_administration.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2014 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import "fmt" - -// PromoteSiteAdmin promotes a user to a site administrator of a GitHub Enterprise instance. -// -// GitHub API docs: https://developer.github.com/v3/users/administration/#promote-an-ordinary-user-to-a-site-administrator -func (s *UsersService) PromoteSiteAdmin(user string) (*Response, error) { - u := fmt.Sprintf("users/%v/site_admin", user) - - req, err := s.client.NewRequest("PUT", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// DemoteSiteAdmin demotes a user from site administrator of a GitHub Enterprise instance. -// -// GitHub API docs: https://developer.github.com/v3/users/administration/#demote-a-site-administrator-to-an-ordinary-user -func (s *UsersService) DemoteSiteAdmin(user string) (*Response, error) { - u := fmt.Sprintf("users/%v/site_admin", user) - - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// Suspend a user on a GitHub Enterprise instance. -// -// GitHub API docs: https://developer.github.com/v3/users/administration/#suspend-a-user -func (s *UsersService) Suspend(user string) (*Response, error) { - u := fmt.Sprintf("users/%v/suspended", user) - - req, err := s.client.NewRequest("PUT", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// Unsuspend a user on a GitHub Enterprise instance. -// -// GitHub API docs: https://developer.github.com/v3/users/administration/#unsuspend-a-user -func (s *UsersService) Unsuspend(user string) (*Response, error) { - u := fmt.Sprintf("users/%v/suspended", user) - - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/google/go-github/github/users_emails.go b/vendor/github.com/google/go-github/github/users_emails.go deleted file mode 100644 index 7553191..0000000 --- a/vendor/github.com/google/go-github/github/users_emails.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -// UserEmail represents user's email address -type UserEmail struct { - Email *string `json:"email,omitempty"` - Primary *bool `json:"primary,omitempty"` - Verified *bool `json:"verified,omitempty"` -} - -// ListEmails lists all email addresses for the authenticated user. -// -// GitHub API docs: http://developer.github.com/v3/users/emails/#list-email-addresses-for-a-user -func (s *UsersService) ListEmails(opt *ListOptions) ([]UserEmail, *Response, error) { - u := "user/emails" - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - emails := new([]UserEmail) - resp, err := s.client.Do(req, emails) - if err != nil { - return nil, resp, err - } - - return *emails, resp, err -} - -// AddEmails adds email addresses of the authenticated user. -// -// GitHub API docs: http://developer.github.com/v3/users/emails/#add-email-addresses -func (s *UsersService) AddEmails(emails []string) ([]UserEmail, *Response, error) { - u := "user/emails" - req, err := s.client.NewRequest("POST", u, emails) - if err != nil { - return nil, nil, err - } - - e := new([]UserEmail) - resp, err := s.client.Do(req, e) - if err != nil { - return nil, resp, err - } - - return *e, resp, err -} - -// DeleteEmails deletes email addresses from authenticated user. -// -// GitHub API docs: http://developer.github.com/v3/users/emails/#delete-email-addresses -func (s *UsersService) DeleteEmails(emails []string) (*Response, error) { - u := "user/emails" - req, err := s.client.NewRequest("DELETE", u, emails) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/google/go-github/github/users_followers.go b/vendor/github.com/google/go-github/github/users_followers.go deleted file mode 100644 index 7ecbed9..0000000 --- a/vendor/github.com/google/go-github/github/users_followers.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import "fmt" - -// ListFollowers lists the followers for a user. Passing the empty string will -// fetch followers for the authenticated user. -// -// GitHub API docs: http://developer.github.com/v3/users/followers/#list-followers-of-a-user -func (s *UsersService) ListFollowers(user string, opt *ListOptions) ([]User, *Response, error) { - var u string - if user != "" { - u = fmt.Sprintf("users/%v/followers", user) - } else { - u = "user/followers" - } - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - users := new([]User) - resp, err := s.client.Do(req, users) - if err != nil { - return nil, resp, err - } - - return *users, resp, err -} - -// ListFollowing lists the people that a user is following. Passing the empty -// string will list people the authenticated user is following. -// -// GitHub API docs: http://developer.github.com/v3/users/followers/#list-users-followed-by-another-user -func (s *UsersService) ListFollowing(user string, opt *ListOptions) ([]User, *Response, error) { - var u string - if user != "" { - u = fmt.Sprintf("users/%v/following", user) - } else { - u = "user/following" - } - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - users := new([]User) - resp, err := s.client.Do(req, users) - if err != nil { - return nil, resp, err - } - - return *users, resp, err -} - -// IsFollowing checks if "user" is following "target". Passing the empty -// string for "user" will check if the authenticated user is following "target". -// -// GitHub API docs: http://developer.github.com/v3/users/followers/#check-if-you-are-following-a-user -func (s *UsersService) IsFollowing(user, target string) (bool, *Response, error) { - var u string - if user != "" { - u = fmt.Sprintf("users/%v/following/%v", user, target) - } else { - u = fmt.Sprintf("user/following/%v", target) - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return false, nil, err - } - - resp, err := s.client.Do(req, nil) - following, err := parseBoolResponse(err) - return following, resp, err -} - -// Follow will cause the authenticated user to follow the specified user. -// -// GitHub API docs: http://developer.github.com/v3/users/followers/#follow-a-user -func (s *UsersService) Follow(user string) (*Response, error) { - u := fmt.Sprintf("user/following/%v", user) - req, err := s.client.NewRequest("PUT", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// Unfollow will cause the authenticated user to unfollow the specified user. -// -// GitHub API docs: http://developer.github.com/v3/users/followers/#unfollow-a-user -func (s *UsersService) Unfollow(user string) (*Response, error) { - u := fmt.Sprintf("user/following/%v", user) - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/google/go-github/github/users_keys.go b/vendor/github.com/google/go-github/github/users_keys.go deleted file mode 100644 index dcbd773..0000000 --- a/vendor/github.com/google/go-github/github/users_keys.go +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright 2013 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import "fmt" - -// Key represents a public SSH key used to authenticate a user or deploy script. -type Key struct { - ID *int `json:"id,omitempty"` - Key *string `json:"key,omitempty"` - URL *string `json:"url,omitempty"` - Title *string `json:"title,omitempty"` -} - -func (k Key) String() string { - return Stringify(k) -} - -// ListKeys lists the verified public keys for a user. Passing the empty -// string will fetch keys for the authenticated user. -// -// GitHub API docs: http://developer.github.com/v3/users/keys/#list-public-keys-for-a-user -func (s *UsersService) ListKeys(user string, opt *ListOptions) ([]Key, *Response, error) { - var u string - if user != "" { - u = fmt.Sprintf("users/%v/keys", user) - } else { - u = "user/keys" - } - u, err := addOptions(u, opt) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - keys := new([]Key) - resp, err := s.client.Do(req, keys) - if err != nil { - return nil, resp, err - } - - return *keys, resp, err -} - -// GetKey fetches a single public key. -// -// GitHub API docs: http://developer.github.com/v3/users/keys/#get-a-single-public-key -func (s *UsersService) GetKey(id int) (*Key, *Response, error) { - u := fmt.Sprintf("user/keys/%v", id) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - key := new(Key) - resp, err := s.client.Do(req, key) - if err != nil { - return nil, resp, err - } - - return key, resp, err -} - -// CreateKey adds a public key for the authenticated user. -// -// GitHub API docs: http://developer.github.com/v3/users/keys/#create-a-public-key -func (s *UsersService) CreateKey(key *Key) (*Key, *Response, error) { - u := "user/keys" - - req, err := s.client.NewRequest("POST", u, key) - if err != nil { - return nil, nil, err - } - - k := new(Key) - resp, err := s.client.Do(req, k) - if err != nil { - return nil, resp, err - } - - return k, resp, err -} - -// DeleteKey deletes a public key. -// -// GitHub API docs: http://developer.github.com/v3/users/keys/#delete-a-public-key -func (s *UsersService) DeleteKey(id int) (*Response, error) { - u := fmt.Sprintf("user/keys/%v", id) - - req, err := s.client.NewRequest("DELETE", u, nil) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/google/go-querystring/query/encode.go b/vendor/github.com/google/go-querystring/query/encode.go deleted file mode 100644 index 90dcabb..0000000 --- a/vendor/github.com/google/go-querystring/query/encode.go +++ /dev/null @@ -1,311 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package query implements encoding of structs into URL query parameters. -// -// As a simple example: -// -// type Options struct { -// Query string `url:"q"` -// ShowAll bool `url:"all"` -// Page int `url:"page"` -// } -// -// opt := Options{ "foo", true, 2 } -// v, _ := query.Values(opt) -// fmt.Print(v.Encode()) // will output: "q=foo&all=true&page=2" -// -// The exact mapping between Go values and url.Values is described in the -// documentation for the Values() function. -package query - -import ( - "bytes" - "fmt" - "net/url" - "reflect" - "strconv" - "strings" - "time" -) - -var timeType = reflect.TypeOf(time.Time{}) - -var encoderType = reflect.TypeOf(new(Encoder)).Elem() - -// Encoder is an interface implemented by any type that wishes to encode -// itself into URL values in a non-standard way. -type Encoder interface { - EncodeValues(key string, v *url.Values) error -} - -// Values returns the url.Values encoding of v. -// -// Values expects to be passed a struct, and traverses it recursively using the -// following encoding rules. -// -// Each exported struct field is encoded as a URL parameter unless -// -// - the field's tag is "-", or -// - the field is empty and its tag specifies the "omitempty" option -// -// The empty values are false, 0, any nil pointer or interface value, any array -// slice, map, or string of length zero, and any time.Time that returns true -// for IsZero(). -// -// The URL parameter name defaults to the struct field name but can be -// specified in the struct field's tag value. The "url" key in the struct -// field's tag value is the key name, followed by an optional comma and -// options. For example: -// -// // Field is ignored by this package. -// Field int `url:"-"` -// -// // Field appears as URL parameter "myName". -// Field int `url:"myName"` -// -// // Field appears as URL parameter "myName" and the field is omitted if -// // its value is empty -// Field int `url:"myName,omitempty"` -// -// // Field appears as URL parameter "Field" (the default), but the field -// // is skipped if empty. Note the leading comma. -// Field int `url:",omitempty"` -// -// For encoding individual field values, the following type-dependent rules -// apply: -// -// Boolean values default to encoding as the strings "true" or "false". -// Including the "int" option signals that the field should be encoded as the -// strings "1" or "0". -// -// time.Time values default to encoding as RFC3339 timestamps. Including the -// "unix" option signals that the field should be encoded as a Unix time (see -// time.Unix()) -// -// Slice and Array values default to encoding as multiple URL values of the -// same name. Including the "comma" option signals that the field should be -// encoded as a single comma-delimited value. Including the "space" option -// similarly encodes the value as a single space-delimited string. Including -// the "brackets" option signals that the multiple URL values should have "[]" -// appended to the value name. -// -// Anonymous struct fields are usually encoded as if their inner exported -// fields were fields in the outer struct, subject to the standard Go -// visibility rules. An anonymous struct field with a name given in its URL -// tag is treated as having that name, rather than being anonymous. -// -// Non-nil pointer values are encoded as the value pointed to. -// -// Nested structs are encoded including parent fields in value names for -// scoping. e.g: -// -// "user[name]=acme&user[addr][postcode]=1234&user[addr][city]=SFO" -// -// All other values are encoded using their default string representation. -// -// Multiple fields that encode to the same URL parameter name will be included -// as multiple URL values of the same name. -func Values(v interface{}) (url.Values, error) { - values := make(url.Values) - val := reflect.ValueOf(v) - for val.Kind() == reflect.Ptr { - if val.IsNil() { - return values, nil - } - val = val.Elem() - } - - if v == nil { - return values, nil - } - - if val.Kind() != reflect.Struct { - return nil, fmt.Errorf("query: Values() expects struct input. Got %v", val.Kind()) - } - - err := reflectValue(values, val, "") - return values, err -} - -// reflectValue populates the values parameter from the struct fields in val. -// Embedded structs are followed recursively (using the rules defined in the -// Values function documentation) breadth-first. -func reflectValue(values url.Values, val reflect.Value, scope string) error { - var embedded []reflect.Value - - typ := val.Type() - for i := 0; i < typ.NumField(); i++ { - sf := typ.Field(i) - if sf.PkgPath != "" { // unexported - continue - } - - sv := val.Field(i) - tag := sf.Tag.Get("url") - if tag == "-" { - continue - } - name, opts := parseTag(tag) - if name == "" { - if sf.Anonymous && sv.Kind() == reflect.Struct { - // save embedded struct for later processing - embedded = append(embedded, sv) - continue - } - - name = sf.Name - } - - if scope != "" { - name = scope + "[" + name + "]" - } - - if opts.Contains("omitempty") && isEmptyValue(sv) { - continue - } - - if sv.Type().Implements(encoderType) { - if !reflect.Indirect(sv).IsValid() { - sv = reflect.New(sv.Type().Elem()) - } - - m := sv.Interface().(Encoder) - if err := m.EncodeValues(name, &values); err != nil { - return err - } - continue - } - - if sv.Kind() == reflect.Slice || sv.Kind() == reflect.Array { - var del byte - if opts.Contains("comma") { - del = ',' - } else if opts.Contains("space") { - del = ' ' - } else if opts.Contains("brackets") { - name = name + "[]" - } - - if del != 0 { - s := new(bytes.Buffer) - first := true - for i := 0; i < sv.Len(); i++ { - if first { - first = false - } else { - s.WriteByte(del) - } - s.WriteString(valueString(sv.Index(i), opts)) - } - values.Add(name, s.String()) - } else { - for i := 0; i < sv.Len(); i++ { - values.Add(name, valueString(sv.Index(i), opts)) - } - } - continue - } - - if sv.Type() == timeType { - values.Add(name, valueString(sv, opts)) - continue - } - - for sv.Kind() == reflect.Ptr { - if sv.IsNil() { - break - } - sv = sv.Elem() - } - - if sv.Kind() == reflect.Struct { - reflectValue(values, sv, name) - continue - } - - values.Add(name, valueString(sv, opts)) - } - - for _, f := range embedded { - if err := reflectValue(values, f, scope); err != nil { - return err - } - } - - return nil -} - -// valueString returns the string representation of a value. -func valueString(v reflect.Value, opts tagOptions) string { - for v.Kind() == reflect.Ptr { - if v.IsNil() { - return "" - } - v = v.Elem() - } - - if v.Kind() == reflect.Bool && opts.Contains("int") { - if v.Bool() { - return "1" - } - return "0" - } - - if v.Type() == timeType { - t := v.Interface().(time.Time) - if opts.Contains("unix") { - return strconv.FormatInt(t.Unix(), 10) - } - return t.Format(time.RFC3339) - } - - return fmt.Sprint(v.Interface()) -} - -// isEmptyValue checks if a value should be considered empty for the purposes -// of omitting fields with the "omitempty" option. -func isEmptyValue(v reflect.Value) bool { - switch v.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - } - - if v.Type() == timeType { - return v.Interface().(time.Time).IsZero() - } - - return false -} - -// tagOptions is the string following a comma in a struct field's "url" tag, or -// the empty string. It does not include the leading comma. -type tagOptions []string - -// parseTag splits a struct field's url tag into its name and comma-separated -// options. -func parseTag(tag string) (string, tagOptions) { - s := strings.Split(tag, ",") - return s[0], s[1:] -} - -// Contains checks whether the tagOptions contains the specified option. -func (o tagOptions) Contains(option string) bool { - for _, s := range o { - if s == option { - return true - } - } - return false -} diff --git a/vendor/github.com/ianschenck/envflag/LICENSE b/vendor/github.com/ianschenck/envflag/LICENSE deleted file mode 100644 index dfdbd2a..0000000 --- a/vendor/github.com/ianschenck/envflag/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2013, Ian Schenck -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - - Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - - Redistributions in binary form must reproduce the above copyright notice, this - list of conditions and the following disclaimer in the documentation and/or - other materials provided with the distribution. - - Neither the name of Ian Schenck nor the names of its contributors - may be used to endorse or promote products derived from this - software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/github.com/ianschenck/envflag/README.md b/vendor/github.com/ianschenck/envflag/README.md deleted file mode 100644 index 360daff..0000000 --- a/vendor/github.com/ianschenck/envflag/README.md +++ /dev/null @@ -1,37 +0,0 @@ -envflag -======= - -Golang flags, but bolted onto the environment rather than the command-line. - -Read the [godocs](http://godoc.org/github.com/ianschenck/envflag). - -Motivation -========== - -Some like the distinction that command-line flags control behavior -while environment variables configure. Also -[12-factor](http://12factor.net/) recommends the use of environment -variables for configuration. The interface of the golang flag package -is well designed and easy to use, and allows for other lists -(os.Environ() vs os.Args) to be parsed as flags. It makes sense then -to use the same interface, the same types, and the same parsing -(caveat: there is some ugly string hacking to make environment -variables look like flags) to the same ends. - -Differences -=========== - -Calling `flag.Parse()` will not parse environment flags. Calling -`envflag.Parse()` will not parse command-line flags. There is no good -reason to combine these two when the net savings is a single line in a -`func main()`. Furthermore, doing so would require users to accept a -precedence order of my choosing. - -The presence of an environment variable named `h` or `help` will -probably cause problems (print Usage and os.Exit(0)). Work around this -by defining those flags somewhere (and ignoring them). - -Before calling `Flagset.Parse` on `EnvironmentFlags`, the environment -variables being passed to `Parse` are trimmed down using -`Lookup`. This behavior is different from `flag.Parse` in that extra -environment variables are ignored (and won't crash `envflag.Parse`). diff --git a/vendor/github.com/ianschenck/envflag/envflag.go b/vendor/github.com/ianschenck/envflag/envflag.go deleted file mode 100644 index 128244c..0000000 --- a/vendor/github.com/ianschenck/envflag/envflag.go +++ /dev/null @@ -1,192 +0,0 @@ -// Copyright 2013 Ian Schenck. Use of this source code is governed by -// a license that can be found in the LICENSE file. - -/* - Package envflag adds environment variable flags to the flag package. - - Usage: - - Define flags using envflag.String(), Bool(), Int(), etc. This package - works nearly the same as the stdlib flag package. Parsing the - Environment flags is done by calling envflag.Parse() - - It will *not* attempt to parse any normally-defined command-line - flags. Command-line flags are explicitly left alone and separate. -*/ -package envflag - -import ( - "flag" - "fmt" - "os" - "strings" - "time" -) - -// VisitAll visits the environment flags in lexicographical order, -// calling fn for each. It visits all flags, even those not set. -func VisitAll(fn func(*flag.Flag)) { - EnvironmentFlags.VisitAll(fn) -} - -// Visit visits the environment flags in lexicographical order, -// calling fn for each. It visits only those flags that have been -// set. -func Visit(fn func(*flag.Flag)) { - EnvironmentFlags.Visit(fn) -} - -// Lookup returns the Flag structure of the named environment flag, -// returning nil if none exists. -func Lookup(name string) *flag.Flag { - return EnvironmentFlags.Lookup(name) -} - -// Set sets the value of the named environment flag. -func Set(name, value string) error { - return EnvironmentFlags.Set(name, value) -} - -// BoolVar defines a bool flag with specified name, default value, and -// usage string. The argument p points to a bool variable in which to -// store the value of the flag. -func BoolVar(p *bool, name string, value bool, usage string) { - EnvironmentFlags.BoolVar(p, name, value, usage) -} - -// Bool defines a bool flag with specified name, default value, and -// usage string. The return value is the address of a bool variable -// that stores the value of the flag. -func Bool(name string, value bool, usage string) *bool { - return EnvironmentFlags.Bool(name, value, usage) -} - -// IntVar defines an int flag with specified name, default value, and -// usage string. The argument p points to an int variable in which to -// store the value of the flag. -func IntVar(p *int, name string, value int, usage string) { - EnvironmentFlags.IntVar(p, name, value, usage) -} - -// Int defines an int flag with specified name, default value, and -// usage string. The return value is the address of an int variable -// that stores the value of the flag. -func Int(name string, value int, usage string) *int { - return EnvironmentFlags.Int(name, value, usage) -} - -// Int64Var defines an int64 flag with specified name, default value, -// and usage string. The argument p points to an int64 variable in -// which to store the value of the flag. -func Int64Var(p *int64, name string, value int64, usage string) { - EnvironmentFlags.Int64Var(p, name, value, usage) -} - -// Int64 defines an int64 flag with specified name, default value, and -// usage string. The return value is the address of an int64 variable -// that stores the value of the flag. -func Int64(name string, value int64, usage string) *int64 { - return EnvironmentFlags.Int64(name, value, usage) -} - -// UintVar defines a uint flag with specified name, default value, and -// usage string. The argument p points to a uint variable in which to -// store the value of the flag. -func UintVar(p *uint, name string, value uint, usage string) { - EnvironmentFlags.UintVar(p, name, value, usage) -} - -// Uint defines a uint flag with specified name, default value, and -// usage string. The return value is the address of a uint variable -// that stores the value of the flag. -func Uint(name string, value uint, usage string) *uint { - return EnvironmentFlags.Uint(name, value, usage) -} - -// Uint64Var defines a uint64 flag with specified name, default value, -// and usage string. The argument p points to a uint64 variable in -// which to store the value of the flag. -func Uint64Var(p *uint64, name string, value uint64, usage string) { - EnvironmentFlags.Uint64Var(p, name, value, usage) -} - -// Uint64 defines a uint64 flag with specified name, default value, -// and usage string. The return value is the address of a uint64 -// variable that stores the value of the flag. -func Uint64(name string, value uint64, usage string) *uint64 { - return EnvironmentFlags.Uint64(name, value, usage) -} - -// StringVar defines a string flag with specified name, default value, -// and usage string. The argument p points to a string variable in -// which to store the value of the flag. -func StringVar(p *string, name string, value string, usage string) { - EnvironmentFlags.StringVar(p, name, value, usage) -} - -// String defines a string flag with specified name, default value, -// and usage string. The return value is the address of a string -// variable that stores the value of the flag. -func String(name string, value string, usage string) *string { - return EnvironmentFlags.String(name, value, usage) -} - -// Float64Var defines a float64 flag with specified name, default -// value, and usage string. The argument p points to a float64 -// variable in which to store the value of the flag. -func Float64Var(p *float64, name string, value float64, usage string) { - EnvironmentFlags.Float64Var(p, name, value, usage) -} - -// Float64 defines a float64 flag with specified name, default value, -// and usage string. The return value is the address of a float64 -// variable that stores the value of the flag. -func Float64(name string, value float64, usage string) *float64 { - return EnvironmentFlags.Float64(name, value, usage) -} - -// DurationVar defines a time.Duration flag with specified name, -// default value, and usage string. The argument p points to a -// time.Duration variable in which to store the value of the flag. -func DurationVar(p *time.Duration, name string, value time.Duration, usage string) { - EnvironmentFlags.DurationVar(p, name, value, usage) -} - -// Duration defines a time.Duration flag with specified name, default -// value, and usage string. The return value is the address of a -// time.Duration variable that stores the value of the flag. -func Duration(name string, value time.Duration, usage string) *time.Duration { - return EnvironmentFlags.Duration(name, value, usage) -} - -// PrintDefaults prints to standard error the default values of all -// defined environment flags. -func PrintDefaults() { - EnvironmentFlags.PrintDefaults() -} - -// Parse parses the environment flags from os.Environ. Must be called -// after all flags are defined and before flags are accessed by the -// program. -func Parse() { - env := os.Environ() - // Clean up and "fake" some flag k/v pairs. - args := make([]string, 0, len(env)) - for _, value := range env { - if Lookup(value[:strings.Index(value, "=")]) == nil { - continue - } - args = append(args, fmt.Sprintf("-%s", value)) - } - EnvironmentFlags.Parse(args) -} - -// Parsed returns true if the environment flags have been parsed. -func Parsed() bool { - return EnvironmentFlags.Parsed() -} - -// EnvironmentFlags is the default set of environment flags, parsed -// from os.Environ(). The top-level functions such as BoolVar, Arg, -// and on are wrappers for the methods of EnvironmentFlags. -var EnvironmentFlags = flag.NewFlagSet("environment", flag.ExitOnError) diff --git a/vendor/github.com/joho/godotenv/LICENCE b/vendor/github.com/joho/godotenv/LICENCE deleted file mode 100644 index e7ddd51..0000000 --- a/vendor/github.com/joho/godotenv/LICENCE +++ /dev/null @@ -1,23 +0,0 @@ -Copyright (c) 2013 John Barton - -MIT License - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - diff --git a/vendor/github.com/joho/godotenv/README.md b/vendor/github.com/joho/godotenv/README.md deleted file mode 100644 index 05c47e6..0000000 --- a/vendor/github.com/joho/godotenv/README.md +++ /dev/null @@ -1,127 +0,0 @@ -# GoDotEnv [![wercker status](https://app.wercker.com/status/507594c2ec7e60f19403a568dfea0f78 "wercker status")](https://app.wercker.com/project/bykey/507594c2ec7e60f19403a568dfea0f78) - -A Go (golang) port of the Ruby dotenv project (which loads env vars from a .env file) - -From the original Library: - -> Storing configuration in the environment is one of the tenets of a twelve-factor app. Anything that is likely to change between deployment environments–such as resource handles for databases or credentials for external services–should be extracted from the code into environment variables. -> -> But it is not always practical to set environment variables on development machines or continuous integration servers where multiple projects are run. Dotenv load variables from a .env file into ENV when the environment is bootstrapped. - -It can be used as a library (for loading in env for your own daemons etc) or as a bin command. - -There is test coverage and CI for both linuxish and windows environments, but I make no guarantees about the bin version working on windows. - -## Installation - -As a library - -```shell -go get github.com/joho/godotenv -``` - -or if you want to use it as a bin command -```shell -go get github.com/joho/godotenv/cmd/godotenv -``` - -## Usage - -Add your application configuration to your `.env` file in the root of your project: - -```shell -S3_BUCKET=YOURS3BUCKET -SECRET_KEY=YOURSECRETKEYGOESHERE -``` - -Then in your Go app you can do something like - -```go -package main - -import ( - "github.com/joho/godotenv" - "log" - "os" -) - -func main() { - err := godotenv.Load() - if err != nil { - log.Fatal("Error loading .env file") - } - - s3Bucket := os.Getenv("S3_BUCKET") - secretKey := os.Getenv("SECRET_KEY") - - // now do something with s3 or whatever -} -``` - -If you're even lazier than that, you can just take advantage of the autoload package which will read in `.env` on import - -```go -import _ "github.com/joho/godotenv/autoload" -``` - -While `.env` in the project root is the default, you don't have to be constrained, both examples below are 100% legit - -```go -_ = godotenv.Load("somerandomfile") -_ = godotenv.Load("filenumberone.env", "filenumbertwo.env") -``` - -If you want to be really fancy with your env file you can do comments and exports (below is a valid env file) - -```shell -# I am a comment and that is OK -SOME_VAR=someval -FOO=BAR # comments at line end are OK too -export BAR=BAZ -``` - -Or finally you can do YAML(ish) style - -```yaml -FOO: bar -BAR: baz -``` - -as a final aside, if you don't want godotenv munging your env you can just get a map back instead - -```go -var myEnv map[string]string -myEnv, err := godotenv.Read() - -s3Bucket := myEnv["S3_BUCKET"] -``` - -### Command Mode - -Assuming you've installed the command as above and you've got `$GOPATH/bin` in your `$PATH` - -``` -godotenv -f /some/path/to/.env some_command with some args -``` - -If you don't specify `-f` it will fall back on the default of loading `.env` in `PWD` - -## Contributing - -Contributions are most welcome! The parser itself is pretty stupidly naive and I wouldn't be surprised if it breaks with edge cases. - -*code changes without tests will not be accepted* - -1. Fork it -2. Create your feature branch (`git checkout -b my-new-feature`) -3. Commit your changes (`git commit -am 'Added some feature'`) -4. Push to the branch (`git push origin my-new-feature`) -5. Create new Pull Request - -## CI - -Linux: [![wercker status](https://app.wercker.com/status/507594c2ec7e60f19403a568dfea0f78/m "wercker status")](https://app.wercker.com/project/bykey/507594c2ec7e60f19403a568dfea0f78) Windows: [![Build status](https://ci.appveyor.com/api/projects/status/9v40vnfvvgde64u4)](https://ci.appveyor.com/project/joho/godotenv) - -## Who? - -The original library [dotenv](https://github.com/bkeepers/dotenv) was written by [Brandon Keepers](http://opensoul.org/), and this port was done by [John Barton](http://whoisjohnbarton.com) based off the tests/fixtures in the original library. diff --git a/vendor/github.com/joho/godotenv/autoload/autoload.go b/vendor/github.com/joho/godotenv/autoload/autoload.go deleted file mode 100644 index fbcd2bd..0000000 --- a/vendor/github.com/joho/godotenv/autoload/autoload.go +++ /dev/null @@ -1,15 +0,0 @@ -package autoload - -/* - You can just read the .env file on import just by doing - - import _ "github.com/joho/godotenv/autoload" - - And bob's your mother's brother -*/ - -import "github.com/joho/godotenv" - -func init() { - godotenv.Load() -} diff --git a/vendor/github.com/joho/godotenv/godotenv.go b/vendor/github.com/joho/godotenv/godotenv.go deleted file mode 100644 index 94b2676..0000000 --- a/vendor/github.com/joho/godotenv/godotenv.go +++ /dev/null @@ -1,229 +0,0 @@ -// Package godotenv is a go port of the ruby dotenv library (https://github.com/bkeepers/dotenv) -// -// Examples/readme can be found on the github page at https://github.com/joho/godotenv -// -// The TL;DR is that you make a .env file that looks something like -// -// SOME_ENV_VAR=somevalue -// -// and then in your go code you can call -// -// godotenv.Load() -// -// and all the env vars declared in .env will be avaiable through os.Getenv("SOME_ENV_VAR") -package godotenv - -import ( - "bufio" - "errors" - "os" - "os/exec" - "strings" -) - -// Load will read your env file(s) and load them into ENV for this process. -// -// Call this function as close as possible to the start of your program (ideally in main) -// -// If you call Load without any args it will default to loading .env in the current path -// -// You can otherwise tell it which files to load (there can be more than one) like -// -// godotenv.Load("fileone", "filetwo") -// -// It's important to note that it WILL NOT OVERRIDE an env variable that already exists - consider the .env file to set dev vars or sensible defaults -func Load(filenames ...string) (err error) { - filenames = filenamesOrDefault(filenames) - - for _, filename := range filenames { - err = loadFile(filename, false) - if err != nil { - return // return early on a spazout - } - } - return -} - -// Overload will read your env file(s) and load them into ENV for this process. -// -// Call this function as close as possible to the start of your program (ideally in main) -// -// If you call Overload without any args it will default to loading .env in the current path -// -// You can otherwise tell it which files to load (there can be more than one) like -// -// godotenv.Overload("fileone", "filetwo") -// -// It's important to note this WILL OVERRIDE an env variable that already exists - consider the .env file to forcefilly set all vars. -func Overload(filenames ...string) (err error) { - filenames = filenamesOrDefault(filenames) - - for _, filename := range filenames { - err = loadFile(filename, true) - if err != nil { - return // return early on a spazout - } - } - return -} - -// Read all env (with same file loading semantics as Load) but return values as -// a map rather than automatically writing values into env -func Read(filenames ...string) (envMap map[string]string, err error) { - filenames = filenamesOrDefault(filenames) - envMap = make(map[string]string) - - for _, filename := range filenames { - individualEnvMap, individualErr := readFile(filename) - - if individualErr != nil { - err = individualErr - return // return early on a spazout - } - - for key, value := range individualEnvMap { - envMap[key] = value - } - } - - return -} - -// Exec loads env vars from the specified filenames (empty map falls back to default) -// then executes the cmd specified. -// -// Simply hooks up os.Stdin/err/out to the command and calls Run() -// -// If you want more fine grained control over your command it's recommended -// that you use `Load()` or `Read()` and the `os/exec` package yourself. -func Exec(filenames []string, cmd string, cmdArgs []string) error { - Load(filenames...) - - command := exec.Command(cmd, cmdArgs...) - command.Stdin = os.Stdin - command.Stdout = os.Stdout - command.Stderr = os.Stderr - return command.Run() -} - -func filenamesOrDefault(filenames []string) []string { - if len(filenames) == 0 { - return []string{".env"} - } - return filenames -} - -func loadFile(filename string, overload bool) error { - envMap, err := readFile(filename) - if err != nil { - return err - } - - for key, value := range envMap { - if os.Getenv(key) == "" || overload { - os.Setenv(key, value) - } - } - - return nil -} - -func readFile(filename string) (envMap map[string]string, err error) { - file, err := os.Open(filename) - if err != nil { - return - } - defer file.Close() - - envMap = make(map[string]string) - - var lines []string - scanner := bufio.NewScanner(file) - for scanner.Scan() { - lines = append(lines, scanner.Text()) - } - - for _, fullLine := range lines { - if !isIgnoredLine(fullLine) { - key, value, err := parseLine(fullLine) - - if err == nil { - envMap[key] = value - } - } - } - return -} - -func parseLine(line string) (key string, value string, err error) { - if len(line) == 0 { - err = errors.New("zero length string") - return - } - - // ditch the comments (but keep quoted hashes) - if strings.Contains(line, "#") { - segmentsBetweenHashes := strings.Split(line, "#") - quotesAreOpen := false - var segmentsToKeep []string - for _, segment := range segmentsBetweenHashes { - if strings.Count(segment, "\"") == 1 || strings.Count(segment, "'") == 1 { - if quotesAreOpen { - quotesAreOpen = false - segmentsToKeep = append(segmentsToKeep, segment) - } else { - quotesAreOpen = true - } - } - - if len(segmentsToKeep) == 0 || quotesAreOpen { - segmentsToKeep = append(segmentsToKeep, segment) - } - } - - line = strings.Join(segmentsToKeep, "#") - } - - // now split key from value - splitString := strings.SplitN(line, "=", 2) - - if len(splitString) != 2 { - // try yaml mode! - splitString = strings.SplitN(line, ":", 2) - } - - if len(splitString) != 2 { - err = errors.New("Can't separate key from value") - return - } - - // Parse the key - key = splitString[0] - if strings.HasPrefix(key, "export") { - key = strings.TrimPrefix(key, "export") - } - key = strings.Trim(key, " ") - - // Parse the value - value = splitString[1] - // trim - value = strings.Trim(value, " ") - - // check if we've got quoted values - if strings.Count(value, "\"") == 2 || strings.Count(value, "'") == 2 { - // pull the quotes off the edges - value = strings.Trim(value, "\"'") - - // expand quotes - value = strings.Replace(value, "\\\"", "\"", -1) - // expand newlines - value = strings.Replace(value, "\\n", "\n", -1) - } - - return -} - -func isIgnoredLine(line string) bool { - trimmedLine := strings.Trim(line, " \n\t") - return len(trimmedLine) == 0 || strings.HasPrefix(trimmedLine, "#") -} diff --git a/vendor/github.com/joho/godotenv/wercker.yml b/vendor/github.com/joho/godotenv/wercker.yml deleted file mode 100644 index c716ac9..0000000 --- a/vendor/github.com/joho/godotenv/wercker.yml +++ /dev/null @@ -1 +0,0 @@ -box: pjvds/golang diff --git a/vendor/github.com/koding/cache/README.md b/vendor/github.com/koding/cache/README.md deleted file mode 100644 index cc3f299..0000000 --- a/vendor/github.com/koding/cache/README.md +++ /dev/null @@ -1,32 +0,0 @@ -# Cache [![GoDoc](https://godoc.org/github.com/koding/cache?status.svg)](https://godoc.org/github.com/koding/cache) [![Build Status](https://travis-ci.org/koding/cache.svg?branch=master)](https://travis-ci.org/koding/cache) - - -Cache is a backend provider for common use cases - -## Install and Usage - -Install the package with: - -```bash -go get github.com/koding/cache -``` - -Import it with: - -```go -import "github.com/koding/cache" -``` - - -Example -```go - -// create a cache with 2 second TTL -cache := NewMemoryWithTTL(2 * time.Second) -// start garbage collection for expired keys -cache.StartGC(time.Millisecond * 10) -// set item -err := cache.Set("test_key", "test_data") -// get item -data, err := cache.Get("test_key") -``` diff --git a/vendor/github.com/koding/cache/cache.go b/vendor/github.com/koding/cache/cache.go deleted file mode 100644 index 73ca692..0000000 --- a/vendor/github.com/koding/cache/cache.go +++ /dev/null @@ -1,15 +0,0 @@ -package cache - -// Cache is the contract for all of the cache backends that are supported by -// this package -type Cache interface { - // Get returns single item from the backend if the requested item is not - // found, returns NotFound err - Get(key string) (interface{}, error) - - // Set sets a single item to the backend - Set(key string, value interface{}) error - - // Delete deletes single item from backend - Delete(key string) error -} diff --git a/vendor/github.com/koding/cache/doc.go b/vendor/github.com/koding/cache/doc.go deleted file mode 100644 index 2dcbc4b..0000000 --- a/vendor/github.com/koding/cache/doc.go +++ /dev/null @@ -1,9 +0,0 @@ -// Package cache provides basic caching mechanisms for Go(lang) projects. -// -// Currently supported caching algorithms: -// MemoryNoTS: provides a non-thread safe in-memory caching system -// Memory : provides a thread safe in-memory caching system, built on top of MemoryNoTS cache -// LRUNoTS : provides a non-thread safe, fixed size in-memory caching system, built on top of MemoryNoTS cache -// LRU : provides a thread safe, fixed size in-memory caching system, built on top of LRUNoTS cache -// MemoryTTL : provides a thread safe, expiring in-memory caching system, built on top of MemoryNoTS cache -package cache diff --git a/vendor/github.com/koding/cache/errors.go b/vendor/github.com/koding/cache/errors.go deleted file mode 100644 index 9ffe436..0000000 --- a/vendor/github.com/koding/cache/errors.go +++ /dev/null @@ -1,8 +0,0 @@ -package cache - -import "errors" - -var ( - // ErrNotFound holds exported `not found error` for not found items - ErrNotFound = errors.New("not found") -) diff --git a/vendor/github.com/koding/cache/lru.go b/vendor/github.com/koding/cache/lru.go deleted file mode 100644 index 2eefe5c..0000000 --- a/vendor/github.com/koding/cache/lru.go +++ /dev/null @@ -1,51 +0,0 @@ -package cache - -import "sync" - -// LRU Discards the least recently used items first. This algorithm -// requires keeping track of what was used when. -type LRU struct { - // Mutex is used for handling the concurrent - // read/write requests for cache - sync.Mutex - - // cache holds the all cache values - cache Cache -} - -// NewLRU creates a thread-safe LRU cache -func NewLRU(size int) Cache { - return &LRU{ - cache: NewLRUNoTS(size), - } -} - -// Get returns the value of a given key if it exists, every get item will be -// moved to the head of the linked list for keeping track of least recent used -// item -func (l *LRU) Get(key string) (interface{}, error) { - l.Lock() - defer l.Unlock() - - return l.cache.Get(key) -} - -// Set sets or overrides the given key with the given value, every set item will -// be moved or prepended to the head of the linked list for keeping track of -// least recent used item. When the cache is full, last item of the linked list -// will be evicted from the cache -func (l *LRU) Set(key string, val interface{}) error { - l.Lock() - defer l.Unlock() - - return l.cache.Set(key, val) -} - -// Delete deletes the given key-value pair from cache, this function doesnt -// return an error if item is not in the cache -func (l *LRU) Delete(key string) error { - l.Lock() - defer l.Unlock() - - return l.cache.Delete(key) -} diff --git a/vendor/github.com/koding/cache/lru_nots.go b/vendor/github.com/koding/cache/lru_nots.go deleted file mode 100644 index a6eebcd..0000000 --- a/vendor/github.com/koding/cache/lru_nots.go +++ /dev/null @@ -1,122 +0,0 @@ -package cache - -import ( - "container/list" -) - -// LRUNoTS Discards the least recently used items first. This algorithm -// requires keeping track of what was used when. -type LRUNoTS struct { - // list holds all items in a linked list, for finding the `tail` of the list - list *list.List - - // cache holds the all cache values - cache Cache - - // size holds the limit of the LRU cache - size int -} - -// kv is an helper struct for keeping track of the key for the list item. Only -// place where we need the key of a value is while removing the last item from -// linked list, for other cases, all operations alread have the key -type kv struct { - k string - v interface{} -} - -// NewLRUNoTS creates a new LRU cache struct for further cache operations. Size -// is used for limiting the upper bound of the cache -func NewLRUNoTS(size int) Cache { - if size < 1 { - panic("invalid cache size") - } - - return &LRUNoTS{ - list: list.New(), - cache: NewMemoryNoTS(), - size: size, - } -} - -// Get returns the value of a given key if it exists, every get item will be -// moved to the head of the linked list for keeping track of least recent used -// item -func (l *LRUNoTS) Get(key string) (interface{}, error) { - res, err := l.cache.Get(key) - if err != nil { - return nil, err - } - - elem := res.(*list.Element) - // move found item to the head - l.list.MoveToFront(elem) - - return elem.Value.(*kv).v, nil -} - -// Set sets or overrides the given key with the given value, every set item will -// be moved or prepended to the head of the linked list for keeping track of -// least recent used item. When the cache is full, last item of the linked list -// will be evicted from the cache -func (l *LRUNoTS) Set(key string, val interface{}) error { - // try to get item - res, err := l.cache.Get(key) - if err != nil && err != ErrNotFound { - return err - } - - var elem *list.Element - - // if elem is not in the cache, push it to front of the list - if err == ErrNotFound { - elem = l.list.PushFront(&kv{k: key, v: val}) - } else { - // if elem is in the cache, update the data and move it the front - elem = res.(*list.Element) - - // update the data - elem.Value.(*kv).v = val - - // item already exists, so move it to the front of the list - l.list.MoveToFront(elem) - } - - // in any case, set the item to the cache - err = l.cache.Set(key, elem) - if err != nil { - return err - } - - // if the cache is full, evict last entry - if l.list.Len() > l.size { - // remove last element from cache - return l.removeElem(l.list.Back()) - } - - return nil -} - -// Delete deletes the given key-value pair from cache, this function doesnt -// return an error if item is not in the cache -func (l *LRUNoTS) Delete(key string) error { - res, err := l.cache.Get(key) - if err != nil && err != ErrNotFound { - return err - } - - // item already deleted - if err == ErrNotFound { - // surpress not found errors - return nil - } - - elem := res.(*list.Element) - - return l.removeElem(elem) -} - -func (l *LRUNoTS) removeElem(e *list.Element) error { - l.list.Remove(e) - return l.cache.Delete(e.Value.(*kv).k) -} diff --git a/vendor/github.com/koding/cache/memory.go b/vendor/github.com/koding/cache/memory.go deleted file mode 100644 index 3a6eb86..0000000 --- a/vendor/github.com/koding/cache/memory.go +++ /dev/null @@ -1,46 +0,0 @@ -package cache - -import "sync" - -// Memory provides an inmemory caching mechanism -type Memory struct { - // Mutex is used for handling the concurrent - // read/write requests for cache - sync.Mutex - - // cache holds the cache data - cache Cache -} - -// NewMemory creates an inmemory cache system -// Which everytime will return the true value about a cache hit -func NewMemory() Cache { - return &Memory{ - cache: NewMemoryNoTS(), - } -} - -// Get returns the value of a given key if it exists -func (r *Memory) Get(key string) (interface{}, error) { - r.Lock() - defer r.Unlock() - - return r.cache.Get(key) -} - -// Set sets a value to the cache or overrides existing one with the given value -func (r *Memory) Set(key string, value interface{}) error { - r.Lock() - defer r.Unlock() - - return r.cache.Set(key, value) -} - -// Delete deletes the given key-value pair from cache, this function doesnt -// return an error if item is not in the cache -func (r *Memory) Delete(key string) error { - r.Lock() - defer r.Unlock() - - return r.cache.Delete(key) -} diff --git a/vendor/github.com/koding/cache/memory_nots.go b/vendor/github.com/koding/cache/memory_nots.go deleted file mode 100644 index ac6c523..0000000 --- a/vendor/github.com/koding/cache/memory_nots.go +++ /dev/null @@ -1,39 +0,0 @@ -package cache - -// MemoryNoTS provides a non-thread safe caching mechanism -type MemoryNoTS struct { - // items holds the cache data - items map[string]interface{} -} - -// NewMemoryNoTS creates MemoryNoTS struct -func NewMemoryNoTS() *MemoryNoTS { - return &MemoryNoTS{ - items: map[string]interface{}{}, - } -} - -// Get returns a value of a given key if it exists -// and valid for the time being -func (r *MemoryNoTS) Get(key string) (interface{}, error) { - value, ok := r.items[key] - if !ok { - return nil, ErrNotFound - } - - return value, nil -} - -// Set will persist a value to the cache or -// override existing one with the new one -func (r *MemoryNoTS) Set(key string, value interface{}) error { - r.items[key] = value - return nil -} - -// Delete deletes a given key, it doesnt return error if the item is not in the -// system -func (r *MemoryNoTS) Delete(key string) error { - delete(r.items, key) - return nil -} diff --git a/vendor/github.com/koding/cache/memory_ttl.go b/vendor/github.com/koding/cache/memory_ttl.go deleted file mode 100644 index 2b9a024..0000000 --- a/vendor/github.com/koding/cache/memory_ttl.go +++ /dev/null @@ -1,111 +0,0 @@ -package cache - -import ( - "sync" - "time" -) - -var zeroTTL = time.Duration(0) - -// MemoryTTL holds the required variables to compose an in memory cache system -// which also provides expiring key mechanism -type MemoryTTL struct { - // Mutex is used for handling the concurrent - // read/write requests for cache - sync.Mutex - - // cache holds the cache data - cache *MemoryNoTS - - // setAts holds the time that related item's set at - setAts map[string]time.Time - - // ttl is a duration for a cache key to expire - ttl time.Duration - - // gcInterval is a duration for garbage collection - gcInterval time.Duration -} - -// NewMemoryWithTTL creates an inmemory cache system -// Which everytime will return the true values about a cache hit -// and never will leak memory -// ttl is used for expiration of a key from cache -func NewMemoryWithTTL(ttl time.Duration) *MemoryTTL { - return &MemoryTTL{ - cache: NewMemoryNoTS(), - setAts: map[string]time.Time{}, - ttl: ttl, - } -} - -// StartGC starts the garbage collection process in a go routine -func (r *MemoryTTL) StartGC(gcInterval time.Duration) { - r.gcInterval = gcInterval - go func() { - for _ = range time.Tick(gcInterval) { - for key := range r.cache.items { - if !r.isValid(key) { - r.Delete(key) - } - } - } - }() -} - -// Get returns a value of a given key if it exists -// and valid for the time being -func (r *MemoryTTL) Get(key string) (interface{}, error) { - r.Lock() - defer r.Unlock() - - if !r.isValid(key) { - r.delete(key) - return nil, ErrNotFound - } - - value, err := r.cache.Get(key) - if err != nil { - return nil, err - } - - return value, nil -} - -// Set will persist a value to the cache or -// override existing one with the new one -func (r *MemoryTTL) Set(key string, value interface{}) error { - r.Lock() - defer r.Unlock() - - r.cache.Set(key, value) - r.setAts[key] = time.Now() - return nil -} - -// Delete deletes a given key if exists -func (r *MemoryTTL) Delete(key string) error { - r.Lock() - defer r.Unlock() - - r.delete(key) - return nil -} - -func (r *MemoryTTL) delete(key string) { - r.cache.Delete(key) - delete(r.setAts, key) -} - -func (r *MemoryTTL) isValid(key string) bool { - setAt, ok := r.setAts[key] - if !ok { - return false - } - - if r.ttl == zeroTTL { - return true - } - - return setAt.Add(r.ttl).After(time.Now()) -} diff --git a/vendor/github.com/manucorporat/sse/LICENSE b/vendor/github.com/manucorporat/sse/LICENSE deleted file mode 100644 index 1ff7f37..0000000 --- a/vendor/github.com/manucorporat/sse/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Manuel Martínez-Almeida - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/manucorporat/sse/README.md b/vendor/github.com/manucorporat/sse/README.md deleted file mode 100644 index 4e1cf0e..0000000 --- a/vendor/github.com/manucorporat/sse/README.md +++ /dev/null @@ -1,54 +0,0 @@ -#Server-Sent Events [![GoDoc](https://godoc.org/github.com/manucorporat/sse?status.svg)](https://godoc.org/github.com/manucorporat/sse) [![Build Status](https://travis-ci.org/manucorporat/sse.svg)](https://travis-ci.org/manucorporat/sse) - -Server-sent events (SSE) is a technology where a browser receives automatic updates from a server via HTTP connection. The Server-Sent Events EventSource API is [standardized as part of HTML5[1] by the W3C](http://www.w3.org/TR/2009/WD-eventsource-20091029/). - -- [Real world demostration using Gin](http://sse.getgin.io/) -- [Read this great SSE introduction by the HTML5Rocks guys](http://www.html5rocks.com/en/tutorials/eventsource/basics/) -- [Browser support](http://caniuse.com/#feat=eventsource) - -##Sample code - -```go -import "github.com/manucorporat/sse" - -func httpHandler(w http.ResponseWriter, req *http.Request) { - // data can be a primitive like a string, an integer or a float - sse.Encode(w, sse.Event{ - Event: "message", - Data: "some data\nmore data", - }) - - // also a complex type, like a map, a struct or a slice - sse.Encode(w, sse.Event{ - Id: "124", - Event: "message", - Data: map[string]interface{}{ - "user": "manu", - "date": time.Now().Unix(), - "content": "hi!", - }, - }) -} -``` -``` -event: message -data: some data\\nmore data - -id: 124 -event: message -data: {"content":"hi!","date":1431540810,"user":"manu"} - -``` - -##Content-Type - -```go -fmt.Println(sse.ContentType) -``` -``` -text/event-stream -``` - -##Decoding support - -There is a client-side implementation of SSE coming soon. \ No newline at end of file diff --git a/vendor/github.com/manucorporat/sse/sse-decoder.go b/vendor/github.com/manucorporat/sse/sse-decoder.go deleted file mode 100644 index e1afc6f..0000000 --- a/vendor/github.com/manucorporat/sse/sse-decoder.go +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright 2014 Manu Martinez-Almeida. All rights reserved. -// Use of this source code is governed by a MIT style -// license that can be found in the LICENSE file. - -package sse - -import ( - "bytes" - "io" - "io/ioutil" -) - -type decoder struct { - events []Event -} - -func Decode(r io.Reader) ([]Event, error) { - var dec decoder - return dec.decode(r) -} - -func (d *decoder) dispatchEvent(event Event, data string) { - dataLength := len(data) - if dataLength > 0 { - //If the data buffer's last character is a U+000A LINE FEED (LF) character, then remove the last character from the data buffer. - data = data[:dataLength-1] - dataLength-- - } - if dataLength == 0 && event.Event == "" { - return - } - if event.Event == "" { - event.Event = "message" - } - event.Data = data - d.events = append(d.events, event) -} - -func (d *decoder) decode(r io.Reader) ([]Event, error) { - buf, err := ioutil.ReadAll(r) - if err != nil { - return nil, err - } - - var currentEvent Event - var dataBuffer *bytes.Buffer = new(bytes.Buffer) - // TODO (and unit tests) - // Lines must be separated by either a U+000D CARRIAGE RETURN U+000A LINE FEED (CRLF) character pair, - // a single U+000A LINE FEED (LF) character, - // or a single U+000D CARRIAGE RETURN (CR) character. - lines := bytes.Split(buf, []byte{'\n'}) - for _, line := range lines { - if len(line) == 0 { - // If the line is empty (a blank line). Dispatch the event. - d.dispatchEvent(currentEvent, dataBuffer.String()) - - // reset current event and data buffer - currentEvent = Event{} - dataBuffer.Reset() - continue - } - if line[0] == byte(':') { - // If the line starts with a U+003A COLON character (:), ignore the line. - continue - } - - var field, value []byte - colonIndex := bytes.IndexRune(line, ':') - if colonIndex != -1 { - // If the line contains a U+003A COLON character character (:) - // Collect the characters on the line before the first U+003A COLON character (:), - // and let field be that string. - field = line[:colonIndex] - // Collect the characters on the line after the first U+003A COLON character (:), - // and let value be that string. - value = line[colonIndex+1:] - // If value starts with a single U+0020 SPACE character, remove it from value. - if len(value) > 0 && value[0] == ' ' { - value = value[1:] - } - } else { - // Otherwise, the string is not empty but does not contain a U+003A COLON character character (:) - // Use the whole line as the field name, and the empty string as the field value. - field = line - value = []byte{} - } - // The steps to process the field given a field name and a field value depend on the field name, - // as given in the following list. Field names must be compared literally, - // with no case folding performed. - switch string(field) { - case "event": - // Set the event name buffer to field value. - currentEvent.Event = string(value) - case "id": - // Set the event stream's last event ID to the field value. - currentEvent.Id = string(value) - case "retry": - // If the field value consists of only characters in the range U+0030 DIGIT ZERO (0) to U+0039 DIGIT NINE (9), - // then interpret the field value as an integer in base ten, and set the event stream's reconnection time to that integer. - // Otherwise, ignore the field. - currentEvent.Id = string(value) - case "data": - // Append the field value to the data buffer, - dataBuffer.Write(value) - // then append a single U+000A LINE FEED (LF) character to the data buffer. - dataBuffer.WriteString("\n") - default: - //Otherwise. The field is ignored. - continue - } - } - d.dispatchEvent(currentEvent, dataBuffer.String()) - - return d.events, nil -} diff --git a/vendor/github.com/manucorporat/sse/sse-encoder.go b/vendor/github.com/manucorporat/sse/sse-encoder.go deleted file mode 100644 index 19a385e..0000000 --- a/vendor/github.com/manucorporat/sse/sse-encoder.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2014 Manu Martinez-Almeida. All rights reserved. -// Use of this source code is governed by a MIT style -// license that can be found in the LICENSE file. - -package sse - -import ( - "encoding/json" - "fmt" - "io" - "net/http" - "reflect" - "strconv" - "strings" -) - -// Server-Sent Events -// W3C Working Draft 29 October 2009 -// http://www.w3.org/TR/2009/WD-eventsource-20091029/ - -const ContentType = "text/event-stream" - -var contentType = []string{ContentType} -var noCache = []string{"no-cache"} - -var fieldReplacer = strings.NewReplacer( - "\n", "\\n", - "\r", "\\r") - -var dataReplacer = strings.NewReplacer( - "\n", "\ndata:", - "\r", "\\r") - -type Event struct { - Event string - Id string - Retry uint - Data interface{} -} - -func Encode(writer io.Writer, event Event) error { - w := checkWriter(writer) - writeId(w, event.Id) - writeEvent(w, event.Event) - writeRetry(w, event.Retry) - return writeData(w, event.Data) -} - -func writeId(w stringWriter, id string) { - if len(id) > 0 { - w.WriteString("id:") - fieldReplacer.WriteString(w, id) - w.WriteString("\n") - } -} - -func writeEvent(w stringWriter, event string) { - if len(event) > 0 { - w.WriteString("event:") - fieldReplacer.WriteString(w, event) - w.WriteString("\n") - } -} - -func writeRetry(w stringWriter, retry uint) { - if retry > 0 { - w.WriteString("retry:") - w.WriteString(strconv.FormatUint(uint64(retry), 10)) - w.WriteString("\n") - } -} - -func writeData(w stringWriter, data interface{}) error { - w.WriteString("data:") - switch kindOfData(data) { - case reflect.Struct, reflect.Slice, reflect.Map: - err := json.NewEncoder(w).Encode(data) - if err != nil { - return err - } - w.WriteString("\n") - default: - dataReplacer.WriteString(w, fmt.Sprint(data)) - w.WriteString("\n\n") - } - return nil -} - -func (r Event) Render(w http.ResponseWriter) error { - header := w.Header() - header["Content-Type"] = contentType - - if _, exist := header["Cache-Control"]; !exist { - header["Cache-Control"] = noCache - } - return Encode(w, r) -} - -func kindOfData(data interface{}) reflect.Kind { - value := reflect.ValueOf(data) - valueType := value.Kind() - if valueType == reflect.Ptr { - valueType = value.Elem().Kind() - } - return valueType -} diff --git a/vendor/github.com/manucorporat/sse/writer.go b/vendor/github.com/manucorporat/sse/writer.go deleted file mode 100644 index 6f9806c..0000000 --- a/vendor/github.com/manucorporat/sse/writer.go +++ /dev/null @@ -1,24 +0,0 @@ -package sse - -import "io" - -type stringWriter interface { - io.Writer - WriteString(string) (int, error) -} - -type stringWrapper struct { - io.Writer -} - -func (w stringWrapper) WriteString(str string) (int, error) { - return w.Writer.Write([]byte(str)) -} - -func checkWriter(writer io.Writer) stringWriter { - if w, ok := writer.(stringWriter); ok { - return w - } else { - return stringWrapper{writer} - } -} diff --git a/vendor/github.com/mattn/go-colorable/README.md b/vendor/github.com/mattn/go-colorable/README.md deleted file mode 100644 index e84226a..0000000 --- a/vendor/github.com/mattn/go-colorable/README.md +++ /dev/null @@ -1,43 +0,0 @@ -# go-colorable - -Colorable writer for windows. - -For example, most of logger packages doesn't show colors on windows. (I know we can do it with ansicon. But I don't want.) -This package is possible to handle escape sequence for ansi color on windows. - -## Too Bad! - -![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/bad.png) - - -## So Good! - -![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/good.png) - -## Usage - -```go -logrus.SetFormatter(&logrus.TextFormatter{ForceColors: true}) -logrus.SetOutput(colorable.NewColorableStdout()) - -logrus.Info("succeeded") -logrus.Warn("not correct") -logrus.Error("something error") -logrus.Fatal("panic") -``` - -You can compile above code on non-windows OSs. - -## Installation - -``` -$ go get github.com/mattn/go-colorable -``` - -# License - -MIT - -# Author - -Yasuhiro Matsumoto (a.k.a mattn) diff --git a/vendor/github.com/mattn/go-colorable/colorable_others.go b/vendor/github.com/mattn/go-colorable/colorable_others.go deleted file mode 100644 index 219f02f..0000000 --- a/vendor/github.com/mattn/go-colorable/colorable_others.go +++ /dev/null @@ -1,16 +0,0 @@ -// +build !windows - -package colorable - -import ( - "io" - "os" -) - -func NewColorableStdout() io.Writer { - return os.Stdout -} - -func NewColorableStderr() io.Writer { - return os.Stderr -} diff --git a/vendor/github.com/mattn/go-colorable/colorable_windows.go b/vendor/github.com/mattn/go-colorable/colorable_windows.go deleted file mode 100644 index e63eb38..0000000 --- a/vendor/github.com/mattn/go-colorable/colorable_windows.go +++ /dev/null @@ -1,665 +0,0 @@ -package colorable - -import ( - "bytes" - "fmt" - "io" - "math" - "os" - "strconv" - "strings" - "syscall" - "unsafe" - - "github.com/mattn/go-isatty" -) - -const ( - foregroundBlue = 0x1 - foregroundGreen = 0x2 - foregroundRed = 0x4 - foregroundIntensity = 0x8 - foregroundMask = (foregroundRed | foregroundBlue | foregroundGreen | foregroundIntensity) - backgroundBlue = 0x10 - backgroundGreen = 0x20 - backgroundRed = 0x40 - backgroundIntensity = 0x80 - backgroundMask = (backgroundRed | backgroundBlue | backgroundGreen | backgroundIntensity) -) - -type wchar uint16 -type short int16 -type dword uint32 -type word uint16 - -type coord struct { - x short - y short -} - -type smallRect struct { - left short - top short - right short - bottom short -} - -type consoleScreenBufferInfo struct { - size coord - cursorPosition coord - attributes word - window smallRect - maximumWindowSize coord -} - -var ( - kernel32 = syscall.NewLazyDLL("kernel32.dll") - procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") - procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute") -) - -type Writer struct { - out io.Writer - handle syscall.Handle - lastbuf bytes.Buffer - oldattr word -} - -func NewColorableStdout() io.Writer { - var csbi consoleScreenBufferInfo - out := os.Stdout - if !isatty.IsTerminal(out.Fd()) { - return out - } - handle := syscall.Handle(out.Fd()) - procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) - return &Writer{out: out, handle: handle, oldattr: csbi.attributes} -} - -func NewColorableStderr() io.Writer { - var csbi consoleScreenBufferInfo - out := os.Stderr - if !isatty.IsTerminal(out.Fd()) { - return out - } - handle := syscall.Handle(out.Fd()) - procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) - return &Writer{out: out, handle: handle, oldattr: csbi.attributes} -} - -var color256 = map[int]int{ - 0: 0x000000, - 1: 0x800000, - 2: 0x008000, - 3: 0x808000, - 4: 0x000080, - 5: 0x800080, - 6: 0x008080, - 7: 0xc0c0c0, - 8: 0x808080, - 9: 0xff0000, - 10: 0x00ff00, - 11: 0xffff00, - 12: 0x0000ff, - 13: 0xff00ff, - 14: 0x00ffff, - 15: 0xffffff, - 16: 0x000000, - 17: 0x00005f, - 18: 0x000087, - 19: 0x0000af, - 20: 0x0000d7, - 21: 0x0000ff, - 22: 0x005f00, - 23: 0x005f5f, - 24: 0x005f87, - 25: 0x005faf, - 26: 0x005fd7, - 27: 0x005fff, - 28: 0x008700, - 29: 0x00875f, - 30: 0x008787, - 31: 0x0087af, - 32: 0x0087d7, - 33: 0x0087ff, - 34: 0x00af00, - 35: 0x00af5f, - 36: 0x00af87, - 37: 0x00afaf, - 38: 0x00afd7, - 39: 0x00afff, - 40: 0x00d700, - 41: 0x00d75f, - 42: 0x00d787, - 43: 0x00d7af, - 44: 0x00d7d7, - 45: 0x00d7ff, - 46: 0x00ff00, - 47: 0x00ff5f, - 48: 0x00ff87, - 49: 0x00ffaf, - 50: 0x00ffd7, - 51: 0x00ffff, - 52: 0x5f0000, - 53: 0x5f005f, - 54: 0x5f0087, - 55: 0x5f00af, - 56: 0x5f00d7, - 57: 0x5f00ff, - 58: 0x5f5f00, - 59: 0x5f5f5f, - 60: 0x5f5f87, - 61: 0x5f5faf, - 62: 0x5f5fd7, - 63: 0x5f5fff, - 64: 0x5f8700, - 65: 0x5f875f, - 66: 0x5f8787, - 67: 0x5f87af, - 68: 0x5f87d7, - 69: 0x5f87ff, - 70: 0x5faf00, - 71: 0x5faf5f, - 72: 0x5faf87, - 73: 0x5fafaf, - 74: 0x5fafd7, - 75: 0x5fafff, - 76: 0x5fd700, - 77: 0x5fd75f, - 78: 0x5fd787, - 79: 0x5fd7af, - 80: 0x5fd7d7, - 81: 0x5fd7ff, - 82: 0x5fff00, - 83: 0x5fff5f, - 84: 0x5fff87, - 85: 0x5fffaf, - 86: 0x5fffd7, - 87: 0x5fffff, - 88: 0x870000, - 89: 0x87005f, - 90: 0x870087, - 91: 0x8700af, - 92: 0x8700d7, - 93: 0x8700ff, - 94: 0x875f00, - 95: 0x875f5f, - 96: 0x875f87, - 97: 0x875faf, - 98: 0x875fd7, - 99: 0x875fff, - 100: 0x878700, - 101: 0x87875f, - 102: 0x878787, - 103: 0x8787af, - 104: 0x8787d7, - 105: 0x8787ff, - 106: 0x87af00, - 107: 0x87af5f, - 108: 0x87af87, - 109: 0x87afaf, - 110: 0x87afd7, - 111: 0x87afff, - 112: 0x87d700, - 113: 0x87d75f, - 114: 0x87d787, - 115: 0x87d7af, - 116: 0x87d7d7, - 117: 0x87d7ff, - 118: 0x87ff00, - 119: 0x87ff5f, - 120: 0x87ff87, - 121: 0x87ffaf, - 122: 0x87ffd7, - 123: 0x87ffff, - 124: 0xaf0000, - 125: 0xaf005f, - 126: 0xaf0087, - 127: 0xaf00af, - 128: 0xaf00d7, - 129: 0xaf00ff, - 130: 0xaf5f00, - 131: 0xaf5f5f, - 132: 0xaf5f87, - 133: 0xaf5faf, - 134: 0xaf5fd7, - 135: 0xaf5fff, - 136: 0xaf8700, - 137: 0xaf875f, - 138: 0xaf8787, - 139: 0xaf87af, - 140: 0xaf87d7, - 141: 0xaf87ff, - 142: 0xafaf00, - 143: 0xafaf5f, - 144: 0xafaf87, - 145: 0xafafaf, - 146: 0xafafd7, - 147: 0xafafff, - 148: 0xafd700, - 149: 0xafd75f, - 150: 0xafd787, - 151: 0xafd7af, - 152: 0xafd7d7, - 153: 0xafd7ff, - 154: 0xafff00, - 155: 0xafff5f, - 156: 0xafff87, - 157: 0xafffaf, - 158: 0xafffd7, - 159: 0xafffff, - 160: 0xd70000, - 161: 0xd7005f, - 162: 0xd70087, - 163: 0xd700af, - 164: 0xd700d7, - 165: 0xd700ff, - 166: 0xd75f00, - 167: 0xd75f5f, - 168: 0xd75f87, - 169: 0xd75faf, - 170: 0xd75fd7, - 171: 0xd75fff, - 172: 0xd78700, - 173: 0xd7875f, - 174: 0xd78787, - 175: 0xd787af, - 176: 0xd787d7, - 177: 0xd787ff, - 178: 0xd7af00, - 179: 0xd7af5f, - 180: 0xd7af87, - 181: 0xd7afaf, - 182: 0xd7afd7, - 183: 0xd7afff, - 184: 0xd7d700, - 185: 0xd7d75f, - 186: 0xd7d787, - 187: 0xd7d7af, - 188: 0xd7d7d7, - 189: 0xd7d7ff, - 190: 0xd7ff00, - 191: 0xd7ff5f, - 192: 0xd7ff87, - 193: 0xd7ffaf, - 194: 0xd7ffd7, - 195: 0xd7ffff, - 196: 0xff0000, - 197: 0xff005f, - 198: 0xff0087, - 199: 0xff00af, - 200: 0xff00d7, - 201: 0xff00ff, - 202: 0xff5f00, - 203: 0xff5f5f, - 204: 0xff5f87, - 205: 0xff5faf, - 206: 0xff5fd7, - 207: 0xff5fff, - 208: 0xff8700, - 209: 0xff875f, - 210: 0xff8787, - 211: 0xff87af, - 212: 0xff87d7, - 213: 0xff87ff, - 214: 0xffaf00, - 215: 0xffaf5f, - 216: 0xffaf87, - 217: 0xffafaf, - 218: 0xffafd7, - 219: 0xffafff, - 220: 0xffd700, - 221: 0xffd75f, - 222: 0xffd787, - 223: 0xffd7af, - 224: 0xffd7d7, - 225: 0xffd7ff, - 226: 0xffff00, - 227: 0xffff5f, - 228: 0xffff87, - 229: 0xffffaf, - 230: 0xffffd7, - 231: 0xffffff, - 232: 0x080808, - 233: 0x121212, - 234: 0x1c1c1c, - 235: 0x262626, - 236: 0x303030, - 237: 0x3a3a3a, - 238: 0x444444, - 239: 0x4e4e4e, - 240: 0x585858, - 241: 0x626262, - 242: 0x6c6c6c, - 243: 0x767676, - 244: 0x808080, - 245: 0x8a8a8a, - 246: 0x949494, - 247: 0x9e9e9e, - 248: 0xa8a8a8, - 249: 0xb2b2b2, - 250: 0xbcbcbc, - 251: 0xc6c6c6, - 252: 0xd0d0d0, - 253: 0xdadada, - 254: 0xe4e4e4, - 255: 0xeeeeee, -} - -func (w *Writer) Write(data []byte) (n int, err error) { - var csbi consoleScreenBufferInfo - procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) - - er := bytes.NewBuffer(data) -loop: - for { - r1, _, err := procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) - if r1 == 0 { - break loop - } - - c1, _, err := er.ReadRune() - if err != nil { - break loop - } - if c1 != 0x1b { - fmt.Fprint(w.out, string(c1)) - continue - } - c2, _, err := er.ReadRune() - if err != nil { - w.lastbuf.WriteRune(c1) - break loop - } - if c2 != 0x5b { - w.lastbuf.WriteRune(c1) - w.lastbuf.WriteRune(c2) - continue - } - - var buf bytes.Buffer - var m rune - for { - c, _, err := er.ReadRune() - if err != nil { - w.lastbuf.WriteRune(c1) - w.lastbuf.WriteRune(c2) - w.lastbuf.Write(buf.Bytes()) - break loop - } - if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { - m = c - break - } - buf.Write([]byte(string(c))) - } - - switch m { - case 'm': - attr := csbi.attributes - cs := buf.String() - if cs == "" { - procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(w.oldattr)) - continue - } - token := strings.Split(cs, ";") - for i := 0; i < len(token); i += 1 { - ns := token[i] - if n, err = strconv.Atoi(ns); err == nil { - switch { - case n == 0 || n == 100: - attr = w.oldattr - case 1 <= n && n <= 5: - attr |= foregroundIntensity - case n == 7: - attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4) - case 22 == n || n == 25 || n == 25: - attr |= foregroundIntensity - case n == 27: - attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4) - case 30 <= n && n <= 37: - attr = (attr & backgroundMask) - if (n-30)&1 != 0 { - attr |= foregroundRed - } - if (n-30)&2 != 0 { - attr |= foregroundGreen - } - if (n-30)&4 != 0 { - attr |= foregroundBlue - } - case n == 38: // set foreground color. - if i < len(token)-2 && (token[i+1] == "5" || token[i+1] == "05") { - if n256, err := strconv.Atoi(token[i+2]); err == nil { - if n256foreAttr == nil { - n256setup() - } - attr &= backgroundMask - attr |= n256foreAttr[n256] - i += 2 - } - } else { - attr = attr & (w.oldattr & backgroundMask) - } - case n == 39: // reset foreground color. - attr &= backgroundMask - attr |= w.oldattr & foregroundMask - case 40 <= n && n <= 47: - attr = (attr & foregroundMask) - if (n-40)&1 != 0 { - attr |= backgroundRed - } - if (n-40)&2 != 0 { - attr |= backgroundGreen - } - if (n-40)&4 != 0 { - attr |= backgroundBlue - } - case n == 48: // set background color. - if i < len(token)-2 && token[i+1] == "5" { - if n256, err := strconv.Atoi(token[i+2]); err == nil { - if n256backAttr == nil { - n256setup() - } - attr &= foregroundMask - attr |= n256backAttr[n256] - i += 2 - } - } else { - attr = attr & (w.oldattr & foregroundMask) - } - case n == 49: // reset foreground color. - attr &= foregroundMask - attr |= w.oldattr & backgroundMask - case 90 <= n && n <= 97: - attr = (attr & backgroundMask) - attr |= foregroundIntensity - if (n-90)&1 != 0 { - attr |= foregroundRed - } - if (n-90)&2 != 0 { - attr |= foregroundGreen - } - if (n-90)&4 != 0 { - attr |= foregroundBlue - } - case 100 <= n && n <= 107: - attr = (attr & foregroundMask) - attr |= backgroundIntensity - if (n-100)&1 != 0 { - attr |= backgroundRed - } - if (n-100)&2 != 0 { - attr |= backgroundGreen - } - if (n-100)&4 != 0 { - attr |= backgroundBlue - } - } - procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(attr)) - } - } - } - } - return len(data) - w.lastbuf.Len(), nil -} - -type consoleColor struct { - rgb int - red bool - green bool - blue bool - intensity bool -} - -func (c consoleColor) foregroundAttr() (attr word) { - if c.red { - attr |= foregroundRed - } - if c.green { - attr |= foregroundGreen - } - if c.blue { - attr |= foregroundBlue - } - if c.intensity { - attr |= foregroundIntensity - } - return -} - -func (c consoleColor) backgroundAttr() (attr word) { - if c.red { - attr |= backgroundRed - } - if c.green { - attr |= backgroundGreen - } - if c.blue { - attr |= backgroundBlue - } - if c.intensity { - attr |= backgroundIntensity - } - return -} - -var color16 = []consoleColor{ - consoleColor{0x000000, false, false, false, false}, - consoleColor{0x000080, false, false, true, false}, - consoleColor{0x008000, false, true, false, false}, - consoleColor{0x008080, false, true, true, false}, - consoleColor{0x800000, true, false, false, false}, - consoleColor{0x800080, true, false, true, false}, - consoleColor{0x808000, true, true, false, false}, - consoleColor{0xc0c0c0, true, true, true, false}, - consoleColor{0x808080, false, false, false, true}, - consoleColor{0x0000ff, false, false, true, true}, - consoleColor{0x00ff00, false, true, false, true}, - consoleColor{0x00ffff, false, true, true, true}, - consoleColor{0xff0000, true, false, false, true}, - consoleColor{0xff00ff, true, false, true, true}, - consoleColor{0xffff00, true, true, false, true}, - consoleColor{0xffffff, true, true, true, true}, -} - -type hsv struct { - h, s, v float32 -} - -func (a hsv) dist(b hsv) float32 { - dh := a.h - b.h - switch { - case dh > 0.5: - dh = 1 - dh - case dh < -0.5: - dh = -1 - dh - } - ds := a.s - b.s - dv := a.v - b.v - return float32(math.Sqrt(float64(dh*dh + ds*ds + dv*dv))) -} - -func toHSV(rgb int) hsv { - r, g, b := float32((rgb&0xFF0000)>>16)/256.0, - float32((rgb&0x00FF00)>>8)/256.0, - float32(rgb&0x0000FF)/256.0 - min, max := minmax3f(r, g, b) - h := max - min - if h > 0 { - if max == r { - h = (g - b) / h - if h < 0 { - h += 6 - } - } else if max == g { - h = 2 + (b-r)/h - } else { - h = 4 + (r-g)/h - } - } - h /= 6.0 - s := max - min - if max != 0 { - s /= max - } - v := max - return hsv{h: h, s: s, v: v} -} - -type hsvTable []hsv - -func toHSVTable(rgbTable []consoleColor) hsvTable { - t := make(hsvTable, len(rgbTable)) - for i, c := range rgbTable { - t[i] = toHSV(c.rgb) - } - return t -} - -func (t hsvTable) find(rgb int) consoleColor { - hsv := toHSV(rgb) - n := 7 - l := float32(5.0) - for i, p := range t { - d := hsv.dist(p) - if d < l { - l, n = d, i - } - } - return color16[n] -} - -func minmax3f(a, b, c float32) (min, max float32) { - if a < b { - if b < c { - return a, c - } else if a < c { - return a, b - } else { - return c, b - } - } else { - if a < c { - return b, c - } else if b < c { - return b, a - } else { - return c, a - } - } -} - -var n256foreAttr []word -var n256backAttr []word - -func n256setup() { - n256foreAttr = make([]word, 256) - n256backAttr = make([]word, 256) - t := toHSVTable(color16) - for i, rgb := range color256 { - c := t.find(rgb) - n256foreAttr[i] = c.foregroundAttr() - n256backAttr[i] = c.backgroundAttr() - } -} diff --git a/vendor/github.com/mattn/go-isatty/LICENSE b/vendor/github.com/mattn/go-isatty/LICENSE deleted file mode 100644 index 65dc692..0000000 --- a/vendor/github.com/mattn/go-isatty/LICENSE +++ /dev/null @@ -1,9 +0,0 @@ -Copyright (c) Yasuhiro MATSUMOTO - -MIT License (Expat) - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/mattn/go-isatty/README.md b/vendor/github.com/mattn/go-isatty/README.md deleted file mode 100644 index 74845de..0000000 --- a/vendor/github.com/mattn/go-isatty/README.md +++ /dev/null @@ -1,37 +0,0 @@ -# go-isatty - -isatty for golang - -## Usage - -```go -package main - -import ( - "fmt" - "github.com/mattn/go-isatty" - "os" -) - -func main() { - if isatty.IsTerminal(os.Stdout.Fd()) { - fmt.Println("Is Terminal") - } else { - fmt.Println("Is Not Terminal") - } -} -``` - -## Installation - -``` -$ go get github.com/mattn/go-isatty -``` - -# License - -MIT - -# Author - -Yasuhiro Matsumoto (a.k.a mattn) diff --git a/vendor/github.com/mattn/go-isatty/doc.go b/vendor/github.com/mattn/go-isatty/doc.go deleted file mode 100644 index 17d4f90..0000000 --- a/vendor/github.com/mattn/go-isatty/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package isatty implements interface to isatty -package isatty diff --git a/vendor/github.com/mattn/go-isatty/isatty_bsd.go b/vendor/github.com/mattn/go-isatty/isatty_bsd.go deleted file mode 100644 index e6282b5..0000000 --- a/vendor/github.com/mattn/go-isatty/isatty_bsd.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build darwin freebsd openbsd netbsd - -package isatty - -import ( - "syscall" - "unsafe" -) - -const ioctlReadTermios = syscall.TIOCGETA - -// IsTerminal return true if the file descriptor is terminal. -func IsTerminal(fd uintptr) bool { - var termios syscall.Termios - _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) - return err == 0 -} diff --git a/vendor/github.com/mattn/go-isatty/isatty_linux.go b/vendor/github.com/mattn/go-isatty/isatty_linux.go deleted file mode 100644 index 8b361d7..0000000 --- a/vendor/github.com/mattn/go-isatty/isatty_linux.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build linux - -package isatty - -import ( - "syscall" - "unsafe" -) - -const ioctlReadTermios = syscall.TCGETS - -// IsTerminal return true if the file descriptor is terminal. -func IsTerminal(fd uintptr) bool { - var termios syscall.Termios - _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) - return err == 0 -} diff --git a/vendor/github.com/mattn/go-isatty/isatty_windows.go b/vendor/github.com/mattn/go-isatty/isatty_windows.go deleted file mode 100644 index 562ee39..0000000 --- a/vendor/github.com/mattn/go-isatty/isatty_windows.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build windows - -package isatty - -import ( - "syscall" - "unsafe" -) - -var kernel32 = syscall.NewLazyDLL("kernel32.dll") -var procGetConsoleMode = kernel32.NewProc("GetConsoleMode") - -// IsTerminal return true if the file descriptor is terminal. -func IsTerminal(fd uintptr) bool { - var st uint32 - r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0) - return r != 0 && e == 0 -} diff --git a/vendor/github.com/mattn/go-sqlite3/LICENSE b/vendor/github.com/mattn/go-sqlite3/LICENSE deleted file mode 100644 index ca458bb..0000000 --- a/vendor/github.com/mattn/go-sqlite3/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Yasuhiro Matsumoto - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/mattn/go-sqlite3/README.md b/vendor/github.com/mattn/go-sqlite3/README.md deleted file mode 100644 index 4383f0c..0000000 --- a/vendor/github.com/mattn/go-sqlite3/README.md +++ /dev/null @@ -1,62 +0,0 @@ -go-sqlite3 -========== - -[![Build Status](https://travis-ci.org/mattn/go-sqlite3.png?branch=master)](https://travis-ci.org/mattn/go-sqlite3) -[![Coverage Status](https://coveralls.io/repos/mattn/go-sqlite3/badge.png?branch=master)](https://coveralls.io/r/mattn/go-sqlite3?branch=master) - -Description ------------ - -sqlite3 driver conforming to the built-in database/sql interface - -Installation ------------- - -This package can be installed with the go get command: - - go get github.com/mattn/go-sqlite3 - -Documentation -------------- - -API documentation can be found here: http://godoc.org/github.com/mattn/go-sqlite3 - -Examples can be found under the `./_example` directory - -FAQ ---- - -* Can't build go-sqlite3 on windows 64bit. - - > Probably, you are using go 1.0, go1.0 has a problem when it comes to compiling/linking on windows 64bit. - > See: https://github.com/mattn/go-sqlite3/issues/27 - -* Getting insert error while query is opened. - - > You can pass some arguments into the connection string, for example, a URI. - > See: https://github.com/mattn/go-sqlite3/issues/39 - -* Do you want cross compiling? mingw on Linux or Mac? - - > See: https://github.com/mattn/go-sqlite3/issues/106 - > See also: http://www.limitlessfx.com/cross-compile-golang-app-for-windows-from-linux.html - -* Want to get time.Time with current locale - - Use `loc=auto` in SQLite3 filename schema like `file:foo.db?loc=auto`. - -License -------- - -MIT: http://mattn.mit-license.org/2012 - -sqlite3-binding.c, sqlite3-binding.h, sqlite3ext.h - -The -binding suffix was added to avoid build failures under gccgo. - -In this repository, those files are amalgamation code that copied from SQLite3. The license of those codes are depend on the license of SQLite3. - -Author ------- - -Yasuhiro Matsumoto (a.k.a mattn) diff --git a/vendor/github.com/mattn/go-sqlite3/backup.go b/vendor/github.com/mattn/go-sqlite3/backup.go deleted file mode 100644 index 3807c60..0000000 --- a/vendor/github.com/mattn/go-sqlite3/backup.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright (C) 2014 Yasuhiro Matsumoto . -// -// Use of this source code is governed by an MIT-style -// license that can be found in the LICENSE file. - -package sqlite3 - -/* -#include -#include -*/ -import "C" -import ( - "runtime" - "unsafe" -) - -type SQLiteBackup struct { - b *C.sqlite3_backup -} - -func (c *SQLiteConn) Backup(dest string, conn *SQLiteConn, src string) (*SQLiteBackup, error) { - destptr := C.CString(dest) - defer C.free(unsafe.Pointer(destptr)) - srcptr := C.CString(src) - defer C.free(unsafe.Pointer(srcptr)) - - if b := C.sqlite3_backup_init(c.db, destptr, conn.db, srcptr); b != nil { - bb := &SQLiteBackup{b: b} - runtime.SetFinalizer(bb, (*SQLiteBackup).Finish) - return bb, nil - } - return nil, c.lastError() -} - -// Backs up for one step. Calls the underlying `sqlite3_backup_step` function. -// This function returns a boolean indicating if the backup is done and -// an error signalling any other error. Done is returned if the underlying C -// function returns SQLITE_DONE (Code 101) -func (b *SQLiteBackup) Step(p int) (bool, error) { - ret := C.sqlite3_backup_step(b.b, C.int(p)) - if ret == C.SQLITE_DONE { - return true, nil - } else if ret != 0 && ret != C.SQLITE_LOCKED && ret != C.SQLITE_BUSY { - return false, Error{Code: ErrNo(ret)} - } - return false, nil -} - -func (b *SQLiteBackup) Remaining() int { - return int(C.sqlite3_backup_remaining(b.b)) -} - -func (b *SQLiteBackup) PageCount() int { - return int(C.sqlite3_backup_pagecount(b.b)) -} - -func (b *SQLiteBackup) Finish() error { - return b.Close() -} - -func (b *SQLiteBackup) Close() error { - ret := C.sqlite3_backup_finish(b.b) - if ret != 0 { - return Error{Code: ErrNo(ret)} - } - b.b = nil - runtime.SetFinalizer(b, nil) - return nil -} diff --git a/vendor/github.com/mattn/go-sqlite3/doc.go b/vendor/github.com/mattn/go-sqlite3/doc.go deleted file mode 100644 index 51364c3..0000000 --- a/vendor/github.com/mattn/go-sqlite3/doc.go +++ /dev/null @@ -1,95 +0,0 @@ -/* -Package sqlite3 provides interface to SQLite3 databases. - -This works as driver for database/sql. - -Installation - - go get github.com/mattn/go-sqlite3 - -Supported Types - -Currently, go-sqlite3 support following data types. - - +------------------------------+ - |go | sqlite3 | - |----------|-------------------| - |nil | null | - |int | integer | - |int64 | integer | - |float64 | float | - |bool | integer | - |[]byte | blob | - |string | text | - |time.Time | timestamp/datetime| - +------------------------------+ - -SQLite3 Extension - -You can write your own extension module for sqlite3. For example, below is a -extension for Regexp matcher operation. - - #include - #include - #include - #include - - SQLITE_EXTENSION_INIT1 - static void regexp_func(sqlite3_context *context, int argc, sqlite3_value **argv) { - if (argc >= 2) { - const char *target = (const char *)sqlite3_value_text(argv[1]); - const char *pattern = (const char *)sqlite3_value_text(argv[0]); - const char* errstr = NULL; - int erroff = 0; - int vec[500]; - int n, rc; - pcre* re = pcre_compile(pattern, 0, &errstr, &erroff, NULL); - rc = pcre_exec(re, NULL, target, strlen(target), 0, 0, vec, 500); - if (rc <= 0) { - sqlite3_result_error(context, errstr, 0); - return; - } - sqlite3_result_int(context, 1); - } - } - - #ifdef _WIN32 - __declspec(dllexport) - #endif - int sqlite3_extension_init(sqlite3 *db, char **errmsg, - const sqlite3_api_routines *api) { - SQLITE_EXTENSION_INIT2(api); - return sqlite3_create_function(db, "regexp", 2, SQLITE_UTF8, - (void*)db, regexp_func, NULL, NULL); - } - -It need to build as so/dll shared library. And you need to register -extension module like below. - - sql.Register("sqlite3_with_extensions", - &sqlite3.SQLiteDriver{ - Extensions: []string{ - "sqlite3_mod_regexp", - }, - }) - -Then, you can use this extension. - - rows, err := db.Query("select text from mytable where name regexp '^golang'") - -Connection Hook - -You can hook and inject your codes when connection established. database/sql -doesn't provide the way to get native go-sqlite3 interfaces. So if you want, -you need to hook ConnectHook and get the SQLiteConn. - - sql.Register("sqlite3_with_hook_example", - &sqlite3.SQLiteDriver{ - ConnectHook: func(conn *sqlite3.SQLiteConn) error { - sqlite3conn = append(sqlite3conn, conn) - return nil - }, - }) - -*/ -package sqlite3 diff --git a/vendor/github.com/mattn/go-sqlite3/error.go b/vendor/github.com/mattn/go-sqlite3/error.go deleted file mode 100644 index b910108..0000000 --- a/vendor/github.com/mattn/go-sqlite3/error.go +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright (C) 2014 Yasuhiro Matsumoto . -// -// Use of this source code is governed by an MIT-style -// license that can be found in the LICENSE file. - -package sqlite3 - -import "C" - -type ErrNo int - -const ErrNoMask C.int = 0xff - -type ErrNoExtended int - -type Error struct { - Code ErrNo /* The error code returned by SQLite */ - ExtendedCode ErrNoExtended /* The extended error code returned by SQLite */ - err string /* The error string returned by sqlite3_errmsg(), - this usually contains more specific details. */ -} - -// result codes from http://www.sqlite.org/c3ref/c_abort.html -var ( - ErrError = ErrNo(1) /* SQL error or missing database */ - ErrInternal = ErrNo(2) /* Internal logic error in SQLite */ - ErrPerm = ErrNo(3) /* Access permission denied */ - ErrAbort = ErrNo(4) /* Callback routine requested an abort */ - ErrBusy = ErrNo(5) /* The database file is locked */ - ErrLocked = ErrNo(6) /* A table in the database is locked */ - ErrNomem = ErrNo(7) /* A malloc() failed */ - ErrReadonly = ErrNo(8) /* Attempt to write a readonly database */ - ErrInterrupt = ErrNo(9) /* Operation terminated by sqlite3_interrupt() */ - ErrIoErr = ErrNo(10) /* Some kind of disk I/O error occurred */ - ErrCorrupt = ErrNo(11) /* The database disk image is malformed */ - ErrNotFound = ErrNo(12) /* Unknown opcode in sqlite3_file_control() */ - ErrFull = ErrNo(13) /* Insertion failed because database is full */ - ErrCantOpen = ErrNo(14) /* Unable to open the database file */ - ErrProtocol = ErrNo(15) /* Database lock protocol error */ - ErrEmpty = ErrNo(16) /* Database is empty */ - ErrSchema = ErrNo(17) /* The database schema changed */ - ErrTooBig = ErrNo(18) /* String or BLOB exceeds size limit */ - ErrConstraint = ErrNo(19) /* Abort due to constraint violation */ - ErrMismatch = ErrNo(20) /* Data type mismatch */ - ErrMisuse = ErrNo(21) /* Library used incorrectly */ - ErrNoLFS = ErrNo(22) /* Uses OS features not supported on host */ - ErrAuth = ErrNo(23) /* Authorization denied */ - ErrFormat = ErrNo(24) /* Auxiliary database format error */ - ErrRange = ErrNo(25) /* 2nd parameter to sqlite3_bind out of range */ - ErrNotADB = ErrNo(26) /* File opened that is not a database file */ - ErrNotice = ErrNo(27) /* Notifications from sqlite3_log() */ - ErrWarning = ErrNo(28) /* Warnings from sqlite3_log() */ -) - -func (err ErrNo) Error() string { - return Error{Code: err}.Error() -} - -func (err ErrNo) Extend(by int) ErrNoExtended { - return ErrNoExtended(int(err) | (by << 8)) -} - -func (err ErrNoExtended) Error() string { - return Error{Code: ErrNo(C.int(err) & ErrNoMask), ExtendedCode: err}.Error() -} - -func (err Error) Error() string { - if err.err != "" { - return err.err - } - return errorString(err) -} - -// result codes from http://www.sqlite.org/c3ref/c_abort_rollback.html -var ( - ErrIoErrRead = ErrIoErr.Extend(1) - ErrIoErrShortRead = ErrIoErr.Extend(2) - ErrIoErrWrite = ErrIoErr.Extend(3) - ErrIoErrFsync = ErrIoErr.Extend(4) - ErrIoErrDirFsync = ErrIoErr.Extend(5) - ErrIoErrTruncate = ErrIoErr.Extend(6) - ErrIoErrFstat = ErrIoErr.Extend(7) - ErrIoErrUnlock = ErrIoErr.Extend(8) - ErrIoErrRDlock = ErrIoErr.Extend(9) - ErrIoErrDelete = ErrIoErr.Extend(10) - ErrIoErrBlocked = ErrIoErr.Extend(11) - ErrIoErrNoMem = ErrIoErr.Extend(12) - ErrIoErrAccess = ErrIoErr.Extend(13) - ErrIoErrCheckReservedLock = ErrIoErr.Extend(14) - ErrIoErrLock = ErrIoErr.Extend(15) - ErrIoErrClose = ErrIoErr.Extend(16) - ErrIoErrDirClose = ErrIoErr.Extend(17) - ErrIoErrSHMOpen = ErrIoErr.Extend(18) - ErrIoErrSHMSize = ErrIoErr.Extend(19) - ErrIoErrSHMLock = ErrIoErr.Extend(20) - ErrIoErrSHMMap = ErrIoErr.Extend(21) - ErrIoErrSeek = ErrIoErr.Extend(22) - ErrIoErrDeleteNoent = ErrIoErr.Extend(23) - ErrIoErrMMap = ErrIoErr.Extend(24) - ErrIoErrGetTempPath = ErrIoErr.Extend(25) - ErrIoErrConvPath = ErrIoErr.Extend(26) - ErrLockedSharedCache = ErrLocked.Extend(1) - ErrBusyRecovery = ErrBusy.Extend(1) - ErrBusySnapshot = ErrBusy.Extend(2) - ErrCantOpenNoTempDir = ErrCantOpen.Extend(1) - ErrCantOpenIsDir = ErrCantOpen.Extend(2) - ErrCantOpenFullPath = ErrCantOpen.Extend(3) - ErrCantOpenConvPath = ErrCantOpen.Extend(4) - ErrCorruptVTab = ErrCorrupt.Extend(1) - ErrReadonlyRecovery = ErrReadonly.Extend(1) - ErrReadonlyCantLock = ErrReadonly.Extend(2) - ErrReadonlyRollback = ErrReadonly.Extend(3) - ErrReadonlyDbMoved = ErrReadonly.Extend(4) - ErrAbortRollback = ErrAbort.Extend(2) - ErrConstraintCheck = ErrConstraint.Extend(1) - ErrConstraintCommitHook = ErrConstraint.Extend(2) - ErrConstraintForeignKey = ErrConstraint.Extend(3) - ErrConstraintFunction = ErrConstraint.Extend(4) - ErrConstraintNotNull = ErrConstraint.Extend(5) - ErrConstraintPrimaryKey = ErrConstraint.Extend(6) - ErrConstraintTrigger = ErrConstraint.Extend(7) - ErrConstraintUnique = ErrConstraint.Extend(8) - ErrConstraintVTab = ErrConstraint.Extend(9) - ErrConstraintRowId = ErrConstraint.Extend(10) - ErrNoticeRecoverWAL = ErrNotice.Extend(1) - ErrNoticeRecoverRollback = ErrNotice.Extend(2) - ErrWarningAutoIndex = ErrWarning.Extend(1) -) diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c b/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c deleted file mode 100644 index 9228d24..0000000 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c +++ /dev/null @@ -1,147782 +0,0 @@ -/****************************************************************************** -** This file is an amalgamation of many separate C source files from SQLite -** version 3.8.5. By combining all the individual C code files into this -** single large file, the entire code can be compiled as a single translation -** unit. This allows many compilers to do optimizations that would not be -** possible if the files were compiled separately. Performance improvements -** of 5% or more are commonly seen when SQLite is compiled as a single -** translation unit. -** -** This file is all you need to compile SQLite. To use SQLite in other -** programs, you need this file and the "sqlite3.h" header file that defines -** the programming interface to the SQLite library. (If you do not have -** the "sqlite3.h" header file at hand, you will find a copy embedded within -** the text of this file. Search for "Begin file sqlite3.h" to find the start -** of the embedded sqlite3.h header file.) Additional code files may be needed -** if you want a wrapper to interface SQLite with your choice of programming -** language. The code for the "sqlite3" command-line shell is also in a -** separate file. This file contains only code for the core SQLite library. -*/ -#define SQLITE_CORE 1 -#define SQLITE_AMALGAMATION 1 -#ifndef SQLITE_PRIVATE -# define SQLITE_PRIVATE static -#endif -#ifndef SQLITE_API -# define SQLITE_API -#endif -/************** Begin file sqliteInt.h ***************************************/ -/* -** 2001 September 15 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** Internal interface definitions for SQLite. -** -*/ -#ifndef _SQLITEINT_H_ -#define _SQLITEINT_H_ - -/* -** These #defines should enable >2GB file support on POSIX if the -** underlying operating system supports it. If the OS lacks -** large file support, or if the OS is windows, these should be no-ops. -** -** Ticket #2739: The _LARGEFILE_SOURCE macro must appear before any -** system #includes. Hence, this block of code must be the very first -** code in all source files. -** -** Large file support can be disabled using the -DSQLITE_DISABLE_LFS switch -** on the compiler command line. This is necessary if you are compiling -** on a recent machine (ex: Red Hat 7.2) but you want your code to work -** on an older machine (ex: Red Hat 6.0). If you compile on Red Hat 7.2 -** without this option, LFS is enable. But LFS does not exist in the kernel -** in Red Hat 6.0, so the code won't work. Hence, for maximum binary -** portability you should omit LFS. -** -** The previous paragraph was written in 2005. (This paragraph is written -** on 2008-11-28.) These days, all Linux kernels support large files, so -** you should probably leave LFS enabled. But some embedded platforms might -** lack LFS in which case the SQLITE_DISABLE_LFS macro might still be useful. -** -** Similar is true for Mac OS X. LFS is only supported on Mac OS X 9 and later. -*/ -#ifndef SQLITE_DISABLE_LFS -# define _LARGE_FILE 1 -# ifndef _FILE_OFFSET_BITS -# define _FILE_OFFSET_BITS 64 -# endif -# define _LARGEFILE_SOURCE 1 -#endif - -/* -** For MinGW, check to see if we can include the header file containing its -** version information, among other things. Normally, this internal MinGW -** header file would [only] be included automatically by other MinGW header -** files; however, the contained version information is now required by this -** header file to work around binary compatibility issues (see below) and -** this is the only known way to reliably obtain it. This entire #if block -** would be completely unnecessary if there was any other way of detecting -** MinGW via their preprocessor (e.g. if they customized their GCC to define -** some MinGW-specific macros). When compiling for MinGW, either the -** _HAVE_MINGW_H or _HAVE__MINGW_H (note the extra underscore) macro must be -** defined; otherwise, detection of conditions specific to MinGW will be -** disabled. -*/ -#if defined(_HAVE_MINGW_H) -# include "mingw.h" -#elif defined(_HAVE__MINGW_H) -# include "_mingw.h" -#endif - -/* -** For MinGW version 4.x (and higher), check to see if the _USE_32BIT_TIME_T -** define is required to maintain binary compatibility with the MSVC runtime -** library in use (e.g. for Windows XP). -*/ -#if !defined(_USE_32BIT_TIME_T) && !defined(_USE_64BIT_TIME_T) && \ - defined(_WIN32) && !defined(_WIN64) && \ - defined(__MINGW_MAJOR_VERSION) && __MINGW_MAJOR_VERSION >= 4 && \ - defined(__MSVCRT__) -# define _USE_32BIT_TIME_T -#endif - -/* The public SQLite interface. The _FILE_OFFSET_BITS macro must appear -** first in QNX. Also, the _USE_32BIT_TIME_T macro must appear first for -** MinGW. -*/ -/************** Include sqlite3.h in the middle of sqliteInt.h ***************/ -/************** Begin file sqlite3.h *****************************************/ -/* -** 2001 September 15 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** This header file defines the interface that the SQLite library -** presents to client programs. If a C-function, structure, datatype, -** or constant definition does not appear in this file, then it is -** not a published API of SQLite, is subject to change without -** notice, and should not be referenced by programs that use SQLite. -** -** Some of the definitions that are in this file are marked as -** "experimental". Experimental interfaces are normally new -** features recently added to SQLite. We do not anticipate changes -** to experimental interfaces but reserve the right to make minor changes -** if experience from use "in the wild" suggest such changes are prudent. -** -** The official C-language API documentation for SQLite is derived -** from comments in this file. This file is the authoritative source -** on how SQLite interfaces are suppose to operate. -** -** The name of this file under configuration management is "sqlite.h.in". -** The makefile makes some minor changes to this file (such as inserting -** the version number) and changes its name to "sqlite3.h" as -** part of the build process. -*/ -#ifndef _SQLITE3_H_ -#define _SQLITE3_H_ -#include /* Needed for the definition of va_list */ - -/* -** Make sure we can call this stuff from C++. -*/ -#if 0 -extern "C" { -#endif - - -/* -** Add the ability to override 'extern' -*/ -#ifndef SQLITE_EXTERN -# define SQLITE_EXTERN extern -#endif - -#ifndef SQLITE_API -# define SQLITE_API -#endif - - -/* -** These no-op macros are used in front of interfaces to mark those -** interfaces as either deprecated or experimental. New applications -** should not use deprecated interfaces - they are support for backwards -** compatibility only. Application writers should be aware that -** experimental interfaces are subject to change in point releases. -** -** These macros used to resolve to various kinds of compiler magic that -** would generate warning messages when they were used. But that -** compiler magic ended up generating such a flurry of bug reports -** that we have taken it all out and gone back to using simple -** noop macros. -*/ -#define SQLITE_DEPRECATED -#define SQLITE_EXPERIMENTAL - -/* -** Ensure these symbols were not defined by some previous header file. -*/ -#ifdef SQLITE_VERSION -# undef SQLITE_VERSION -#endif -#ifdef SQLITE_VERSION_NUMBER -# undef SQLITE_VERSION_NUMBER -#endif - -/* -** CAPI3REF: Compile-Time Library Version Numbers -** -** ^(The [SQLITE_VERSION] C preprocessor macro in the sqlite3.h header -** evaluates to a string literal that is the SQLite version in the -** format "X.Y.Z" where X is the major version number (always 3 for -** SQLite3) and Y is the minor version number and Z is the release number.)^ -** ^(The [SQLITE_VERSION_NUMBER] C preprocessor macro resolves to an integer -** with the value (X*1000000 + Y*1000 + Z) where X, Y, and Z are the same -** numbers used in [SQLITE_VERSION].)^ -** The SQLITE_VERSION_NUMBER for any given release of SQLite will also -** be larger than the release from which it is derived. Either Y will -** be held constant and Z will be incremented or else Y will be incremented -** and Z will be reset to zero. -** -** Since version 3.6.18, SQLite source code has been stored in the -** Fossil configuration management -** system. ^The SQLITE_SOURCE_ID macro evaluates to -** a string which identifies a particular check-in of SQLite -** within its configuration management system. ^The SQLITE_SOURCE_ID -** string contains the date and time of the check-in (UTC) and an SHA1 -** hash of the entire source tree. -** -** See also: [sqlite3_libversion()], -** [sqlite3_libversion_number()], [sqlite3_sourceid()], -** [sqlite_version()] and [sqlite_source_id()]. -*/ -#define SQLITE_VERSION "3.8.5" -#define SQLITE_VERSION_NUMBER 3008005 -#define SQLITE_SOURCE_ID "2014-06-04 14:06:34 b1ed4f2a34ba66c29b130f8d13e9092758019212" - -/* -** CAPI3REF: Run-Time Library Version Numbers -** KEYWORDS: sqlite3_version, sqlite3_sourceid -** -** These interfaces provide the same information as the [SQLITE_VERSION], -** [SQLITE_VERSION_NUMBER], and [SQLITE_SOURCE_ID] C preprocessor macros -** but are associated with the library instead of the header file. ^(Cautious -** programmers might include assert() statements in their application to -** verify that values returned by these interfaces match the macros in -** the header, and thus insure that the application is -** compiled with matching library and header files. -** -**
-** assert( sqlite3_libversion_number()==SQLITE_VERSION_NUMBER );
-** assert( strcmp(sqlite3_sourceid(),SQLITE_SOURCE_ID)==0 );
-** assert( strcmp(sqlite3_libversion(),SQLITE_VERSION)==0 );
-** 
)^ -** -** ^The sqlite3_version[] string constant contains the text of [SQLITE_VERSION] -** macro. ^The sqlite3_libversion() function returns a pointer to the -** to the sqlite3_version[] string constant. The sqlite3_libversion() -** function is provided for use in DLLs since DLL users usually do not have -** direct access to string constants within the DLL. ^The -** sqlite3_libversion_number() function returns an integer equal to -** [SQLITE_VERSION_NUMBER]. ^The sqlite3_sourceid() function returns -** a pointer to a string constant whose value is the same as the -** [SQLITE_SOURCE_ID] C preprocessor macro. -** -** See also: [sqlite_version()] and [sqlite_source_id()]. -*/ -SQLITE_API const char sqlite3_version[] = SQLITE_VERSION; -SQLITE_API const char *sqlite3_libversion(void); -SQLITE_API const char *sqlite3_sourceid(void); -SQLITE_API int sqlite3_libversion_number(void); - -/* -** CAPI3REF: Run-Time Library Compilation Options Diagnostics -** -** ^The sqlite3_compileoption_used() function returns 0 or 1 -** indicating whether the specified option was defined at -** compile time. ^The SQLITE_ prefix may be omitted from the -** option name passed to sqlite3_compileoption_used(). -** -** ^The sqlite3_compileoption_get() function allows iterating -** over the list of options that were defined at compile time by -** returning the N-th compile time option string. ^If N is out of range, -** sqlite3_compileoption_get() returns a NULL pointer. ^The SQLITE_ -** prefix is omitted from any strings returned by -** sqlite3_compileoption_get(). -** -** ^Support for the diagnostic functions sqlite3_compileoption_used() -** and sqlite3_compileoption_get() may be omitted by specifying the -** [SQLITE_OMIT_COMPILEOPTION_DIAGS] option at compile time. -** -** See also: SQL functions [sqlite_compileoption_used()] and -** [sqlite_compileoption_get()] and the [compile_options pragma]. -*/ -#ifndef SQLITE_OMIT_COMPILEOPTION_DIAGS -SQLITE_API int sqlite3_compileoption_used(const char *zOptName); -SQLITE_API const char *sqlite3_compileoption_get(int N); -#endif - -/* -** CAPI3REF: Test To See If The Library Is Threadsafe -** -** ^The sqlite3_threadsafe() function returns zero if and only if -** SQLite was compiled with mutexing code omitted due to the -** [SQLITE_THREADSAFE] compile-time option being set to 0. -** -** SQLite can be compiled with or without mutexes. When -** the [SQLITE_THREADSAFE] C preprocessor macro is 1 or 2, mutexes -** are enabled and SQLite is threadsafe. When the -** [SQLITE_THREADSAFE] macro is 0, -** the mutexes are omitted. Without the mutexes, it is not safe -** to use SQLite concurrently from more than one thread. -** -** Enabling mutexes incurs a measurable performance penalty. -** So if speed is of utmost importance, it makes sense to disable -** the mutexes. But for maximum safety, mutexes should be enabled. -** ^The default behavior is for mutexes to be enabled. -** -** This interface can be used by an application to make sure that the -** version of SQLite that it is linking against was compiled with -** the desired setting of the [SQLITE_THREADSAFE] macro. -** -** This interface only reports on the compile-time mutex setting -** of the [SQLITE_THREADSAFE] flag. If SQLite is compiled with -** SQLITE_THREADSAFE=1 or =2 then mutexes are enabled by default but -** can be fully or partially disabled using a call to [sqlite3_config()] -** with the verbs [SQLITE_CONFIG_SINGLETHREAD], [SQLITE_CONFIG_MULTITHREAD], -** or [SQLITE_CONFIG_MUTEX]. ^(The return value of the -** sqlite3_threadsafe() function shows only the compile-time setting of -** thread safety, not any run-time changes to that setting made by -** sqlite3_config(). In other words, the return value from sqlite3_threadsafe() -** is unchanged by calls to sqlite3_config().)^ -** -** See the [threading mode] documentation for additional information. -*/ -SQLITE_API int sqlite3_threadsafe(void); - -/* -** CAPI3REF: Database Connection Handle -** KEYWORDS: {database connection} {database connections} -** -** Each open SQLite database is represented by a pointer to an instance of -** the opaque structure named "sqlite3". It is useful to think of an sqlite3 -** pointer as an object. The [sqlite3_open()], [sqlite3_open16()], and -** [sqlite3_open_v2()] interfaces are its constructors, and [sqlite3_close()] -** and [sqlite3_close_v2()] are its destructors. There are many other -** interfaces (such as -** [sqlite3_prepare_v2()], [sqlite3_create_function()], and -** [sqlite3_busy_timeout()] to name but three) that are methods on an -** sqlite3 object. -*/ -typedef struct sqlite3 sqlite3; - -/* -** CAPI3REF: 64-Bit Integer Types -** KEYWORDS: sqlite_int64 sqlite_uint64 -** -** Because there is no cross-platform way to specify 64-bit integer types -** SQLite includes typedefs for 64-bit signed and unsigned integers. -** -** The sqlite3_int64 and sqlite3_uint64 are the preferred type definitions. -** The sqlite_int64 and sqlite_uint64 types are supported for backwards -** compatibility only. -** -** ^The sqlite3_int64 and sqlite_int64 types can store integer values -** between -9223372036854775808 and +9223372036854775807 inclusive. ^The -** sqlite3_uint64 and sqlite_uint64 types can store integer values -** between 0 and +18446744073709551615 inclusive. -*/ -#ifdef SQLITE_INT64_TYPE - typedef SQLITE_INT64_TYPE sqlite_int64; - typedef unsigned SQLITE_INT64_TYPE sqlite_uint64; -#elif defined(_MSC_VER) || defined(__BORLANDC__) - typedef __int64 sqlite_int64; - typedef unsigned __int64 sqlite_uint64; -#else - typedef long long int sqlite_int64; - typedef unsigned long long int sqlite_uint64; -#endif -typedef sqlite_int64 sqlite3_int64; -typedef sqlite_uint64 sqlite3_uint64; - -/* -** If compiling for a processor that lacks floating point support, -** substitute integer for floating-point. -*/ -#ifdef SQLITE_OMIT_FLOATING_POINT -# define double sqlite3_int64 -#endif - -/* -** CAPI3REF: Closing A Database Connection -** -** ^The sqlite3_close() and sqlite3_close_v2() routines are destructors -** for the [sqlite3] object. -** ^Calls to sqlite3_close() and sqlite3_close_v2() return SQLITE_OK if -** the [sqlite3] object is successfully destroyed and all associated -** resources are deallocated. -** -** ^If the database connection is associated with unfinalized prepared -** statements or unfinished sqlite3_backup objects then sqlite3_close() -** will leave the database connection open and return [SQLITE_BUSY]. -** ^If sqlite3_close_v2() is called with unfinalized prepared statements -** and unfinished sqlite3_backups, then the database connection becomes -** an unusable "zombie" which will automatically be deallocated when the -** last prepared statement is finalized or the last sqlite3_backup is -** finished. The sqlite3_close_v2() interface is intended for use with -** host languages that are garbage collected, and where the order in which -** destructors are called is arbitrary. -** -** Applications should [sqlite3_finalize | finalize] all [prepared statements], -** [sqlite3_blob_close | close] all [BLOB handles], and -** [sqlite3_backup_finish | finish] all [sqlite3_backup] objects associated -** with the [sqlite3] object prior to attempting to close the object. ^If -** sqlite3_close_v2() is called on a [database connection] that still has -** outstanding [prepared statements], [BLOB handles], and/or -** [sqlite3_backup] objects then it returns SQLITE_OK but the deallocation -** of resources is deferred until all [prepared statements], [BLOB handles], -** and [sqlite3_backup] objects are also destroyed. -** -** ^If an [sqlite3] object is destroyed while a transaction is open, -** the transaction is automatically rolled back. -** -** The C parameter to [sqlite3_close(C)] and [sqlite3_close_v2(C)] -** must be either a NULL -** pointer or an [sqlite3] object pointer obtained -** from [sqlite3_open()], [sqlite3_open16()], or -** [sqlite3_open_v2()], and not previously closed. -** ^Calling sqlite3_close() or sqlite3_close_v2() with a NULL pointer -** argument is a harmless no-op. -*/ -SQLITE_API int sqlite3_close(sqlite3*); -SQLITE_API int sqlite3_close_v2(sqlite3*); - -/* -** The type for a callback function. -** This is legacy and deprecated. It is included for historical -** compatibility and is not documented. -*/ -typedef int (*sqlite3_callback)(void*,int,char**, char**); - -/* -** CAPI3REF: One-Step Query Execution Interface -** -** The sqlite3_exec() interface is a convenience wrapper around -** [sqlite3_prepare_v2()], [sqlite3_step()], and [sqlite3_finalize()], -** that allows an application to run multiple statements of SQL -** without having to use a lot of C code. -** -** ^The sqlite3_exec() interface runs zero or more UTF-8 encoded, -** semicolon-separate SQL statements passed into its 2nd argument, -** in the context of the [database connection] passed in as its 1st -** argument. ^If the callback function of the 3rd argument to -** sqlite3_exec() is not NULL, then it is invoked for each result row -** coming out of the evaluated SQL statements. ^The 4th argument to -** sqlite3_exec() is relayed through to the 1st argument of each -** callback invocation. ^If the callback pointer to sqlite3_exec() -** is NULL, then no callback is ever invoked and result rows are -** ignored. -** -** ^If an error occurs while evaluating the SQL statements passed into -** sqlite3_exec(), then execution of the current statement stops and -** subsequent statements are skipped. ^If the 5th parameter to sqlite3_exec() -** is not NULL then any error message is written into memory obtained -** from [sqlite3_malloc()] and passed back through the 5th parameter. -** To avoid memory leaks, the application should invoke [sqlite3_free()] -** on error message strings returned through the 5th parameter of -** of sqlite3_exec() after the error message string is no longer needed. -** ^If the 5th parameter to sqlite3_exec() is not NULL and no errors -** occur, then sqlite3_exec() sets the pointer in its 5th parameter to -** NULL before returning. -** -** ^If an sqlite3_exec() callback returns non-zero, the sqlite3_exec() -** routine returns SQLITE_ABORT without invoking the callback again and -** without running any subsequent SQL statements. -** -** ^The 2nd argument to the sqlite3_exec() callback function is the -** number of columns in the result. ^The 3rd argument to the sqlite3_exec() -** callback is an array of pointers to strings obtained as if from -** [sqlite3_column_text()], one for each column. ^If an element of a -** result row is NULL then the corresponding string pointer for the -** sqlite3_exec() callback is a NULL pointer. ^The 4th argument to the -** sqlite3_exec() callback is an array of pointers to strings where each -** entry represents the name of corresponding result column as obtained -** from [sqlite3_column_name()]. -** -** ^If the 2nd parameter to sqlite3_exec() is a NULL pointer, a pointer -** to an empty string, or a pointer that contains only whitespace and/or -** SQL comments, then no SQL statements are evaluated and the database -** is not changed. -** -** Restrictions: -** -**
    -**
  • The application must insure that the 1st parameter to sqlite3_exec() -** is a valid and open [database connection]. -**
  • The application must not close the [database connection] specified by -** the 1st parameter to sqlite3_exec() while sqlite3_exec() is running. -**
  • The application must not modify the SQL statement text passed into -** the 2nd parameter of sqlite3_exec() while sqlite3_exec() is running. -**
-*/ -SQLITE_API int sqlite3_exec( - sqlite3*, /* An open database */ - const char *sql, /* SQL to be evaluated */ - int (*callback)(void*,int,char**,char**), /* Callback function */ - void *, /* 1st argument to callback */ - char **errmsg /* Error msg written here */ -); - -/* -** CAPI3REF: Result Codes -** KEYWORDS: SQLITE_OK {error code} {error codes} -** KEYWORDS: {result code} {result codes} -** -** Many SQLite functions return an integer result code from the set shown -** here in order to indicate success or failure. -** -** New error codes may be added in future versions of SQLite. -** -** See also: [SQLITE_IOERR_READ | extended result codes], -** [sqlite3_vtab_on_conflict()] [SQLITE_ROLLBACK | result codes]. -*/ -#define SQLITE_OK 0 /* Successful result */ -/* beginning-of-error-codes */ -#define SQLITE_ERROR 1 /* SQL error or missing database */ -#define SQLITE_INTERNAL 2 /* Internal logic error in SQLite */ -#define SQLITE_PERM 3 /* Access permission denied */ -#define SQLITE_ABORT 4 /* Callback routine requested an abort */ -#define SQLITE_BUSY 5 /* The database file is locked */ -#define SQLITE_LOCKED 6 /* A table in the database is locked */ -#define SQLITE_NOMEM 7 /* A malloc() failed */ -#define SQLITE_READONLY 8 /* Attempt to write a readonly database */ -#define SQLITE_INTERRUPT 9 /* Operation terminated by sqlite3_interrupt()*/ -#define SQLITE_IOERR 10 /* Some kind of disk I/O error occurred */ -#define SQLITE_CORRUPT 11 /* The database disk image is malformed */ -#define SQLITE_NOTFOUND 12 /* Unknown opcode in sqlite3_file_control() */ -#define SQLITE_FULL 13 /* Insertion failed because database is full */ -#define SQLITE_CANTOPEN 14 /* Unable to open the database file */ -#define SQLITE_PROTOCOL 15 /* Database lock protocol error */ -#define SQLITE_EMPTY 16 /* Database is empty */ -#define SQLITE_SCHEMA 17 /* The database schema changed */ -#define SQLITE_TOOBIG 18 /* String or BLOB exceeds size limit */ -#define SQLITE_CONSTRAINT 19 /* Abort due to constraint violation */ -#define SQLITE_MISMATCH 20 /* Data type mismatch */ -#define SQLITE_MISUSE 21 /* Library used incorrectly */ -#define SQLITE_NOLFS 22 /* Uses OS features not supported on host */ -#define SQLITE_AUTH 23 /* Authorization denied */ -#define SQLITE_FORMAT 24 /* Auxiliary database format error */ -#define SQLITE_RANGE 25 /* 2nd parameter to sqlite3_bind out of range */ -#define SQLITE_NOTADB 26 /* File opened that is not a database file */ -#define SQLITE_NOTICE 27 /* Notifications from sqlite3_log() */ -#define SQLITE_WARNING 28 /* Warnings from sqlite3_log() */ -#define SQLITE_ROW 100 /* sqlite3_step() has another row ready */ -#define SQLITE_DONE 101 /* sqlite3_step() has finished executing */ -/* end-of-error-codes */ - -/* -** CAPI3REF: Extended Result Codes -** KEYWORDS: {extended error code} {extended error codes} -** KEYWORDS: {extended result code} {extended result codes} -** -** In its default configuration, SQLite API routines return one of 26 integer -** [SQLITE_OK | result codes]. However, experience has shown that many of -** these result codes are too coarse-grained. They do not provide as -** much information about problems as programmers might like. In an effort to -** address this, newer versions of SQLite (version 3.3.8 and later) include -** support for additional result codes that provide more detailed information -** about errors. The extended result codes are enabled or disabled -** on a per database connection basis using the -** [sqlite3_extended_result_codes()] API. -** -** Some of the available extended result codes are listed here. -** One may expect the number of extended result codes will increase -** over time. Software that uses extended result codes should expect -** to see new result codes in future releases of SQLite. -** -** The SQLITE_OK result code will never be extended. It will always -** be exactly zero. -*/ -#define SQLITE_IOERR_READ (SQLITE_IOERR | (1<<8)) -#define SQLITE_IOERR_SHORT_READ (SQLITE_IOERR | (2<<8)) -#define SQLITE_IOERR_WRITE (SQLITE_IOERR | (3<<8)) -#define SQLITE_IOERR_FSYNC (SQLITE_IOERR | (4<<8)) -#define SQLITE_IOERR_DIR_FSYNC (SQLITE_IOERR | (5<<8)) -#define SQLITE_IOERR_TRUNCATE (SQLITE_IOERR | (6<<8)) -#define SQLITE_IOERR_FSTAT (SQLITE_IOERR | (7<<8)) -#define SQLITE_IOERR_UNLOCK (SQLITE_IOERR | (8<<8)) -#define SQLITE_IOERR_RDLOCK (SQLITE_IOERR | (9<<8)) -#define SQLITE_IOERR_DELETE (SQLITE_IOERR | (10<<8)) -#define SQLITE_IOERR_BLOCKED (SQLITE_IOERR | (11<<8)) -#define SQLITE_IOERR_NOMEM (SQLITE_IOERR | (12<<8)) -#define SQLITE_IOERR_ACCESS (SQLITE_IOERR | (13<<8)) -#define SQLITE_IOERR_CHECKRESERVEDLOCK (SQLITE_IOERR | (14<<8)) -#define SQLITE_IOERR_LOCK (SQLITE_IOERR | (15<<8)) -#define SQLITE_IOERR_CLOSE (SQLITE_IOERR | (16<<8)) -#define SQLITE_IOERR_DIR_CLOSE (SQLITE_IOERR | (17<<8)) -#define SQLITE_IOERR_SHMOPEN (SQLITE_IOERR | (18<<8)) -#define SQLITE_IOERR_SHMSIZE (SQLITE_IOERR | (19<<8)) -#define SQLITE_IOERR_SHMLOCK (SQLITE_IOERR | (20<<8)) -#define SQLITE_IOERR_SHMMAP (SQLITE_IOERR | (21<<8)) -#define SQLITE_IOERR_SEEK (SQLITE_IOERR | (22<<8)) -#define SQLITE_IOERR_DELETE_NOENT (SQLITE_IOERR | (23<<8)) -#define SQLITE_IOERR_MMAP (SQLITE_IOERR | (24<<8)) -#define SQLITE_IOERR_GETTEMPPATH (SQLITE_IOERR | (25<<8)) -#define SQLITE_IOERR_CONVPATH (SQLITE_IOERR | (26<<8)) -#define SQLITE_LOCKED_SHAREDCACHE (SQLITE_LOCKED | (1<<8)) -#define SQLITE_BUSY_RECOVERY (SQLITE_BUSY | (1<<8)) -#define SQLITE_BUSY_SNAPSHOT (SQLITE_BUSY | (2<<8)) -#define SQLITE_CANTOPEN_NOTEMPDIR (SQLITE_CANTOPEN | (1<<8)) -#define SQLITE_CANTOPEN_ISDIR (SQLITE_CANTOPEN | (2<<8)) -#define SQLITE_CANTOPEN_FULLPATH (SQLITE_CANTOPEN | (3<<8)) -#define SQLITE_CANTOPEN_CONVPATH (SQLITE_CANTOPEN | (4<<8)) -#define SQLITE_CORRUPT_VTAB (SQLITE_CORRUPT | (1<<8)) -#define SQLITE_READONLY_RECOVERY (SQLITE_READONLY | (1<<8)) -#define SQLITE_READONLY_CANTLOCK (SQLITE_READONLY | (2<<8)) -#define SQLITE_READONLY_ROLLBACK (SQLITE_READONLY | (3<<8)) -#define SQLITE_READONLY_DBMOVED (SQLITE_READONLY | (4<<8)) -#define SQLITE_ABORT_ROLLBACK (SQLITE_ABORT | (2<<8)) -#define SQLITE_CONSTRAINT_CHECK (SQLITE_CONSTRAINT | (1<<8)) -#define SQLITE_CONSTRAINT_COMMITHOOK (SQLITE_CONSTRAINT | (2<<8)) -#define SQLITE_CONSTRAINT_FOREIGNKEY (SQLITE_CONSTRAINT | (3<<8)) -#define SQLITE_CONSTRAINT_FUNCTION (SQLITE_CONSTRAINT | (4<<8)) -#define SQLITE_CONSTRAINT_NOTNULL (SQLITE_CONSTRAINT | (5<<8)) -#define SQLITE_CONSTRAINT_PRIMARYKEY (SQLITE_CONSTRAINT | (6<<8)) -#define SQLITE_CONSTRAINT_TRIGGER (SQLITE_CONSTRAINT | (7<<8)) -#define SQLITE_CONSTRAINT_UNIQUE (SQLITE_CONSTRAINT | (8<<8)) -#define SQLITE_CONSTRAINT_VTAB (SQLITE_CONSTRAINT | (9<<8)) -#define SQLITE_CONSTRAINT_ROWID (SQLITE_CONSTRAINT |(10<<8)) -#define SQLITE_NOTICE_RECOVER_WAL (SQLITE_NOTICE | (1<<8)) -#define SQLITE_NOTICE_RECOVER_ROLLBACK (SQLITE_NOTICE | (2<<8)) -#define SQLITE_WARNING_AUTOINDEX (SQLITE_WARNING | (1<<8)) - -/* -** CAPI3REF: Flags For File Open Operations -** -** These bit values are intended for use in the -** 3rd parameter to the [sqlite3_open_v2()] interface and -** in the 4th parameter to the [sqlite3_vfs.xOpen] method. -*/ -#define SQLITE_OPEN_READONLY 0x00000001 /* Ok for sqlite3_open_v2() */ -#define SQLITE_OPEN_READWRITE 0x00000002 /* Ok for sqlite3_open_v2() */ -#define SQLITE_OPEN_CREATE 0x00000004 /* Ok for sqlite3_open_v2() */ -#define SQLITE_OPEN_DELETEONCLOSE 0x00000008 /* VFS only */ -#define SQLITE_OPEN_EXCLUSIVE 0x00000010 /* VFS only */ -#define SQLITE_OPEN_AUTOPROXY 0x00000020 /* VFS only */ -#define SQLITE_OPEN_URI 0x00000040 /* Ok for sqlite3_open_v2() */ -#define SQLITE_OPEN_MEMORY 0x00000080 /* Ok for sqlite3_open_v2() */ -#define SQLITE_OPEN_MAIN_DB 0x00000100 /* VFS only */ -#define SQLITE_OPEN_TEMP_DB 0x00000200 /* VFS only */ -#define SQLITE_OPEN_TRANSIENT_DB 0x00000400 /* VFS only */ -#define SQLITE_OPEN_MAIN_JOURNAL 0x00000800 /* VFS only */ -#define SQLITE_OPEN_TEMP_JOURNAL 0x00001000 /* VFS only */ -#define SQLITE_OPEN_SUBJOURNAL 0x00002000 /* VFS only */ -#define SQLITE_OPEN_MASTER_JOURNAL 0x00004000 /* VFS only */ -#define SQLITE_OPEN_NOMUTEX 0x00008000 /* Ok for sqlite3_open_v2() */ -#define SQLITE_OPEN_FULLMUTEX 0x00010000 /* Ok for sqlite3_open_v2() */ -#define SQLITE_OPEN_SHAREDCACHE 0x00020000 /* Ok for sqlite3_open_v2() */ -#define SQLITE_OPEN_PRIVATECACHE 0x00040000 /* Ok for sqlite3_open_v2() */ -#define SQLITE_OPEN_WAL 0x00080000 /* VFS only */ - -/* Reserved: 0x00F00000 */ - -/* -** CAPI3REF: Device Characteristics -** -** The xDeviceCharacteristics method of the [sqlite3_io_methods] -** object returns an integer which is a vector of these -** bit values expressing I/O characteristics of the mass storage -** device that holds the file that the [sqlite3_io_methods] -** refers to. -** -** The SQLITE_IOCAP_ATOMIC property means that all writes of -** any size are atomic. The SQLITE_IOCAP_ATOMICnnn values -** mean that writes of blocks that are nnn bytes in size and -** are aligned to an address which is an integer multiple of -** nnn are atomic. The SQLITE_IOCAP_SAFE_APPEND value means -** that when data is appended to a file, the data is appended -** first then the size of the file is extended, never the other -** way around. The SQLITE_IOCAP_SEQUENTIAL property means that -** information is written to disk in the same order as calls -** to xWrite(). The SQLITE_IOCAP_POWERSAFE_OVERWRITE property means that -** after reboot following a crash or power loss, the only bytes in a -** file that were written at the application level might have changed -** and that adjacent bytes, even bytes within the same sector are -** guaranteed to be unchanged. The SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN -** flag indicate that a file cannot be deleted when open. The -** SQLITE_IOCAP_IMMUTABLE flag indicates that the file is on -** read-only media and cannot be changed even by processes with -** elevated privileges. -*/ -#define SQLITE_IOCAP_ATOMIC 0x00000001 -#define SQLITE_IOCAP_ATOMIC512 0x00000002 -#define SQLITE_IOCAP_ATOMIC1K 0x00000004 -#define SQLITE_IOCAP_ATOMIC2K 0x00000008 -#define SQLITE_IOCAP_ATOMIC4K 0x00000010 -#define SQLITE_IOCAP_ATOMIC8K 0x00000020 -#define SQLITE_IOCAP_ATOMIC16K 0x00000040 -#define SQLITE_IOCAP_ATOMIC32K 0x00000080 -#define SQLITE_IOCAP_ATOMIC64K 0x00000100 -#define SQLITE_IOCAP_SAFE_APPEND 0x00000200 -#define SQLITE_IOCAP_SEQUENTIAL 0x00000400 -#define SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN 0x00000800 -#define SQLITE_IOCAP_POWERSAFE_OVERWRITE 0x00001000 -#define SQLITE_IOCAP_IMMUTABLE 0x00002000 - -/* -** CAPI3REF: File Locking Levels -** -** SQLite uses one of these integer values as the second -** argument to calls it makes to the xLock() and xUnlock() methods -** of an [sqlite3_io_methods] object. -*/ -#define SQLITE_LOCK_NONE 0 -#define SQLITE_LOCK_SHARED 1 -#define SQLITE_LOCK_RESERVED 2 -#define SQLITE_LOCK_PENDING 3 -#define SQLITE_LOCK_EXCLUSIVE 4 - -/* -** CAPI3REF: Synchronization Type Flags -** -** When SQLite invokes the xSync() method of an -** [sqlite3_io_methods] object it uses a combination of -** these integer values as the second argument. -** -** When the SQLITE_SYNC_DATAONLY flag is used, it means that the -** sync operation only needs to flush data to mass storage. Inode -** information need not be flushed. If the lower four bits of the flag -** equal SQLITE_SYNC_NORMAL, that means to use normal fsync() semantics. -** If the lower four bits equal SQLITE_SYNC_FULL, that means -** to use Mac OS X style fullsync instead of fsync(). -** -** Do not confuse the SQLITE_SYNC_NORMAL and SQLITE_SYNC_FULL flags -** with the [PRAGMA synchronous]=NORMAL and [PRAGMA synchronous]=FULL -** settings. The [synchronous pragma] determines when calls to the -** xSync VFS method occur and applies uniformly across all platforms. -** The SQLITE_SYNC_NORMAL and SQLITE_SYNC_FULL flags determine how -** energetic or rigorous or forceful the sync operations are and -** only make a difference on Mac OSX for the default SQLite code. -** (Third-party VFS implementations might also make the distinction -** between SQLITE_SYNC_NORMAL and SQLITE_SYNC_FULL, but among the -** operating systems natively supported by SQLite, only Mac OSX -** cares about the difference.) -*/ -#define SQLITE_SYNC_NORMAL 0x00002 -#define SQLITE_SYNC_FULL 0x00003 -#define SQLITE_SYNC_DATAONLY 0x00010 - -/* -** CAPI3REF: OS Interface Open File Handle -** -** An [sqlite3_file] object represents an open file in the -** [sqlite3_vfs | OS interface layer]. Individual OS interface -** implementations will -** want to subclass this object by appending additional fields -** for their own use. The pMethods entry is a pointer to an -** [sqlite3_io_methods] object that defines methods for performing -** I/O operations on the open file. -*/ -typedef struct sqlite3_file sqlite3_file; -struct sqlite3_file { - const struct sqlite3_io_methods *pMethods; /* Methods for an open file */ -}; - -/* -** CAPI3REF: OS Interface File Virtual Methods Object -** -** Every file opened by the [sqlite3_vfs.xOpen] method populates an -** [sqlite3_file] object (or, more commonly, a subclass of the -** [sqlite3_file] object) with a pointer to an instance of this object. -** This object defines the methods used to perform various operations -** against the open file represented by the [sqlite3_file] object. -** -** If the [sqlite3_vfs.xOpen] method sets the sqlite3_file.pMethods element -** to a non-NULL pointer, then the sqlite3_io_methods.xClose method -** may be invoked even if the [sqlite3_vfs.xOpen] reported that it failed. The -** only way to prevent a call to xClose following a failed [sqlite3_vfs.xOpen] -** is for the [sqlite3_vfs.xOpen] to set the sqlite3_file.pMethods element -** to NULL. -** -** The flags argument to xSync may be one of [SQLITE_SYNC_NORMAL] or -** [SQLITE_SYNC_FULL]. The first choice is the normal fsync(). -** The second choice is a Mac OS X style fullsync. The [SQLITE_SYNC_DATAONLY] -** flag may be ORed in to indicate that only the data of the file -** and not its inode needs to be synced. -** -** The integer values to xLock() and xUnlock() are one of -**
    -**
  • [SQLITE_LOCK_NONE], -**
  • [SQLITE_LOCK_SHARED], -**
  • [SQLITE_LOCK_RESERVED], -**
  • [SQLITE_LOCK_PENDING], or -**
  • [SQLITE_LOCK_EXCLUSIVE]. -**
-** xLock() increases the lock. xUnlock() decreases the lock. -** The xCheckReservedLock() method checks whether any database connection, -** either in this process or in some other process, is holding a RESERVED, -** PENDING, or EXCLUSIVE lock on the file. It returns true -** if such a lock exists and false otherwise. -** -** The xFileControl() method is a generic interface that allows custom -** VFS implementations to directly control an open file using the -** [sqlite3_file_control()] interface. The second "op" argument is an -** integer opcode. The third argument is a generic pointer intended to -** point to a structure that may contain arguments or space in which to -** write return values. Potential uses for xFileControl() might be -** functions to enable blocking locks with timeouts, to change the -** locking strategy (for example to use dot-file locks), to inquire -** about the status of a lock, or to break stale locks. The SQLite -** core reserves all opcodes less than 100 for its own use. -** A [SQLITE_FCNTL_LOCKSTATE | list of opcodes] less than 100 is available. -** Applications that define a custom xFileControl method should use opcodes -** greater than 100 to avoid conflicts. VFS implementations should -** return [SQLITE_NOTFOUND] for file control opcodes that they do not -** recognize. -** -** The xSectorSize() method returns the sector size of the -** device that underlies the file. The sector size is the -** minimum write that can be performed without disturbing -** other bytes in the file. The xDeviceCharacteristics() -** method returns a bit vector describing behaviors of the -** underlying device: -** -**
    -**
  • [SQLITE_IOCAP_ATOMIC] -**
  • [SQLITE_IOCAP_ATOMIC512] -**
  • [SQLITE_IOCAP_ATOMIC1K] -**
  • [SQLITE_IOCAP_ATOMIC2K] -**
  • [SQLITE_IOCAP_ATOMIC4K] -**
  • [SQLITE_IOCAP_ATOMIC8K] -**
  • [SQLITE_IOCAP_ATOMIC16K] -**
  • [SQLITE_IOCAP_ATOMIC32K] -**
  • [SQLITE_IOCAP_ATOMIC64K] -**
  • [SQLITE_IOCAP_SAFE_APPEND] -**
  • [SQLITE_IOCAP_SEQUENTIAL] -**
-** -** The SQLITE_IOCAP_ATOMIC property means that all writes of -** any size are atomic. The SQLITE_IOCAP_ATOMICnnn values -** mean that writes of blocks that are nnn bytes in size and -** are aligned to an address which is an integer multiple of -** nnn are atomic. The SQLITE_IOCAP_SAFE_APPEND value means -** that when data is appended to a file, the data is appended -** first then the size of the file is extended, never the other -** way around. The SQLITE_IOCAP_SEQUENTIAL property means that -** information is written to disk in the same order as calls -** to xWrite(). -** -** If xRead() returns SQLITE_IOERR_SHORT_READ it must also fill -** in the unread portions of the buffer with zeros. A VFS that -** fails to zero-fill short reads might seem to work. However, -** failure to zero-fill short reads will eventually lead to -** database corruption. -*/ -typedef struct sqlite3_io_methods sqlite3_io_methods; -struct sqlite3_io_methods { - int iVersion; - int (*xClose)(sqlite3_file*); - int (*xRead)(sqlite3_file*, void*, int iAmt, sqlite3_int64 iOfst); - int (*xWrite)(sqlite3_file*, const void*, int iAmt, sqlite3_int64 iOfst); - int (*xTruncate)(sqlite3_file*, sqlite3_int64 size); - int (*xSync)(sqlite3_file*, int flags); - int (*xFileSize)(sqlite3_file*, sqlite3_int64 *pSize); - int (*xLock)(sqlite3_file*, int); - int (*xUnlock)(sqlite3_file*, int); - int (*xCheckReservedLock)(sqlite3_file*, int *pResOut); - int (*xFileControl)(sqlite3_file*, int op, void *pArg); - int (*xSectorSize)(sqlite3_file*); - int (*xDeviceCharacteristics)(sqlite3_file*); - /* Methods above are valid for version 1 */ - int (*xShmMap)(sqlite3_file*, int iPg, int pgsz, int, void volatile**); - int (*xShmLock)(sqlite3_file*, int offset, int n, int flags); - void (*xShmBarrier)(sqlite3_file*); - int (*xShmUnmap)(sqlite3_file*, int deleteFlag); - /* Methods above are valid for version 2 */ - int (*xFetch)(sqlite3_file*, sqlite3_int64 iOfst, int iAmt, void **pp); - int (*xUnfetch)(sqlite3_file*, sqlite3_int64 iOfst, void *p); - /* Methods above are valid for version 3 */ - /* Additional methods may be added in future releases */ -}; - -/* -** CAPI3REF: Standard File Control Opcodes -** -** These integer constants are opcodes for the xFileControl method -** of the [sqlite3_io_methods] object and for the [sqlite3_file_control()] -** interface. -** -** The [SQLITE_FCNTL_LOCKSTATE] opcode is used for debugging. This -** opcode causes the xFileControl method to write the current state of -** the lock (one of [SQLITE_LOCK_NONE], [SQLITE_LOCK_SHARED], -** [SQLITE_LOCK_RESERVED], [SQLITE_LOCK_PENDING], or [SQLITE_LOCK_EXCLUSIVE]) -** into an integer that the pArg argument points to. This capability -** is used during testing and only needs to be supported when SQLITE_TEST -** is defined. -**
    -**
  • [[SQLITE_FCNTL_SIZE_HINT]] -** The [SQLITE_FCNTL_SIZE_HINT] opcode is used by SQLite to give the VFS -** layer a hint of how large the database file will grow to be during the -** current transaction. This hint is not guaranteed to be accurate but it -** is often close. The underlying VFS might choose to preallocate database -** file space based on this hint in order to help writes to the database -** file run faster. -** -**
  • [[SQLITE_FCNTL_CHUNK_SIZE]] -** The [SQLITE_FCNTL_CHUNK_SIZE] opcode is used to request that the VFS -** extends and truncates the database file in chunks of a size specified -** by the user. The fourth argument to [sqlite3_file_control()] should -** point to an integer (type int) containing the new chunk-size to use -** for the nominated database. Allocating database file space in large -** chunks (say 1MB at a time), may reduce file-system fragmentation and -** improve performance on some systems. -** -**
  • [[SQLITE_FCNTL_FILE_POINTER]] -** The [SQLITE_FCNTL_FILE_POINTER] opcode is used to obtain a pointer -** to the [sqlite3_file] object associated with a particular database -** connection. See the [sqlite3_file_control()] documentation for -** additional information. -** -**
  • [[SQLITE_FCNTL_SYNC_OMITTED]] -** No longer in use. -** -**
  • [[SQLITE_FCNTL_SYNC]] -** The [SQLITE_FCNTL_SYNC] opcode is generated internally by SQLite and -** sent to the VFS immediately before the xSync method is invoked on a -** database file descriptor. Or, if the xSync method is not invoked -** because the user has configured SQLite with -** [PRAGMA synchronous | PRAGMA synchronous=OFF] it is invoked in place -** of the xSync method. In most cases, the pointer argument passed with -** this file-control is NULL. However, if the database file is being synced -** as part of a multi-database commit, the argument points to a nul-terminated -** string containing the transactions master-journal file name. VFSes that -** do not need this signal should silently ignore this opcode. Applications -** should not call [sqlite3_file_control()] with this opcode as doing so may -** disrupt the operation of the specialized VFSes that do require it. -** -**
  • [[SQLITE_FCNTL_COMMIT_PHASETWO]] -** The [SQLITE_FCNTL_COMMIT_PHASETWO] opcode is generated internally by SQLite -** and sent to the VFS after a transaction has been committed immediately -** but before the database is unlocked. VFSes that do not need this signal -** should silently ignore this opcode. Applications should not call -** [sqlite3_file_control()] with this opcode as doing so may disrupt the -** operation of the specialized VFSes that do require it. -** -**
  • [[SQLITE_FCNTL_WIN32_AV_RETRY]] -** ^The [SQLITE_FCNTL_WIN32_AV_RETRY] opcode is used to configure automatic -** retry counts and intervals for certain disk I/O operations for the -** windows [VFS] in order to provide robustness in the presence of -** anti-virus programs. By default, the windows VFS will retry file read, -** file write, and file delete operations up to 10 times, with a delay -** of 25 milliseconds before the first retry and with the delay increasing -** by an additional 25 milliseconds with each subsequent retry. This -** opcode allows these two values (10 retries and 25 milliseconds of delay) -** to be adjusted. The values are changed for all database connections -** within the same process. The argument is a pointer to an array of two -** integers where the first integer i the new retry count and the second -** integer is the delay. If either integer is negative, then the setting -** is not changed but instead the prior value of that setting is written -** into the array entry, allowing the current retry settings to be -** interrogated. The zDbName parameter is ignored. -** -**
  • [[SQLITE_FCNTL_PERSIST_WAL]] -** ^The [SQLITE_FCNTL_PERSIST_WAL] opcode is used to set or query the -** persistent [WAL | Write Ahead Log] setting. By default, the auxiliary -** write ahead log and shared memory files used for transaction control -** are automatically deleted when the latest connection to the database -** closes. Setting persistent WAL mode causes those files to persist after -** close. Persisting the files is useful when other processes that do not -** have write permission on the directory containing the database file want -** to read the database file, as the WAL and shared memory files must exist -** in order for the database to be readable. The fourth parameter to -** [sqlite3_file_control()] for this opcode should be a pointer to an integer. -** That integer is 0 to disable persistent WAL mode or 1 to enable persistent -** WAL mode. If the integer is -1, then it is overwritten with the current -** WAL persistence setting. -** -**
  • [[SQLITE_FCNTL_POWERSAFE_OVERWRITE]] -** ^The [SQLITE_FCNTL_POWERSAFE_OVERWRITE] opcode is used to set or query the -** persistent "powersafe-overwrite" or "PSOW" setting. The PSOW setting -** determines the [SQLITE_IOCAP_POWERSAFE_OVERWRITE] bit of the -** xDeviceCharacteristics methods. The fourth parameter to -** [sqlite3_file_control()] for this opcode should be a pointer to an integer. -** That integer is 0 to disable zero-damage mode or 1 to enable zero-damage -** mode. If the integer is -1, then it is overwritten with the current -** zero-damage mode setting. -** -**
  • [[SQLITE_FCNTL_OVERWRITE]] -** ^The [SQLITE_FCNTL_OVERWRITE] opcode is invoked by SQLite after opening -** a write transaction to indicate that, unless it is rolled back for some -** reason, the entire database file will be overwritten by the current -** transaction. This is used by VACUUM operations. -** -**
  • [[SQLITE_FCNTL_VFSNAME]] -** ^The [SQLITE_FCNTL_VFSNAME] opcode can be used to obtain the names of -** all [VFSes] in the VFS stack. The names are of all VFS shims and the -** final bottom-level VFS are written into memory obtained from -** [sqlite3_malloc()] and the result is stored in the char* variable -** that the fourth parameter of [sqlite3_file_control()] points to. -** The caller is responsible for freeing the memory when done. As with -** all file-control actions, there is no guarantee that this will actually -** do anything. Callers should initialize the char* variable to a NULL -** pointer in case this file-control is not implemented. This file-control -** is intended for diagnostic use only. -** -**
  • [[SQLITE_FCNTL_PRAGMA]] -** ^Whenever a [PRAGMA] statement is parsed, an [SQLITE_FCNTL_PRAGMA] -** file control is sent to the open [sqlite3_file] object corresponding -** to the database file to which the pragma statement refers. ^The argument -** to the [SQLITE_FCNTL_PRAGMA] file control is an array of -** pointers to strings (char**) in which the second element of the array -** is the name of the pragma and the third element is the argument to the -** pragma or NULL if the pragma has no argument. ^The handler for an -** [SQLITE_FCNTL_PRAGMA] file control can optionally make the first element -** of the char** argument point to a string obtained from [sqlite3_mprintf()] -** or the equivalent and that string will become the result of the pragma or -** the error message if the pragma fails. ^If the -** [SQLITE_FCNTL_PRAGMA] file control returns [SQLITE_NOTFOUND], then normal -** [PRAGMA] processing continues. ^If the [SQLITE_FCNTL_PRAGMA] -** file control returns [SQLITE_OK], then the parser assumes that the -** VFS has handled the PRAGMA itself and the parser generates a no-op -** prepared statement. ^If the [SQLITE_FCNTL_PRAGMA] file control returns -** any result code other than [SQLITE_OK] or [SQLITE_NOTFOUND], that means -** that the VFS encountered an error while handling the [PRAGMA] and the -** compilation of the PRAGMA fails with an error. ^The [SQLITE_FCNTL_PRAGMA] -** file control occurs at the beginning of pragma statement analysis and so -** it is able to override built-in [PRAGMA] statements. -** -**
  • [[SQLITE_FCNTL_BUSYHANDLER]] -** ^The [SQLITE_FCNTL_BUSYHANDLER] -** file-control may be invoked by SQLite on the database file handle -** shortly after it is opened in order to provide a custom VFS with access -** to the connections busy-handler callback. The argument is of type (void **) -** - an array of two (void *) values. The first (void *) actually points -** to a function of type (int (*)(void *)). In order to invoke the connections -** busy-handler, this function should be invoked with the second (void *) in -** the array as the only argument. If it returns non-zero, then the operation -** should be retried. If it returns zero, the custom VFS should abandon the -** current operation. -** -**
  • [[SQLITE_FCNTL_TEMPFILENAME]] -** ^Application can invoke the [SQLITE_FCNTL_TEMPFILENAME] file-control -** to have SQLite generate a -** temporary filename using the same algorithm that is followed to generate -** temporary filenames for TEMP tables and other internal uses. The -** argument should be a char** which will be filled with the filename -** written into memory obtained from [sqlite3_malloc()]. The caller should -** invoke [sqlite3_free()] on the result to avoid a memory leak. -** -**
  • [[SQLITE_FCNTL_MMAP_SIZE]] -** The [SQLITE_FCNTL_MMAP_SIZE] file control is used to query or set the -** maximum number of bytes that will be used for memory-mapped I/O. -** The argument is a pointer to a value of type sqlite3_int64 that -** is an advisory maximum number of bytes in the file to memory map. The -** pointer is overwritten with the old value. The limit is not changed if -** the value originally pointed to is negative, and so the current limit -** can be queried by passing in a pointer to a negative number. This -** file-control is used internally to implement [PRAGMA mmap_size]. -** -**
  • [[SQLITE_FCNTL_TRACE]] -** The [SQLITE_FCNTL_TRACE] file control provides advisory information -** to the VFS about what the higher layers of the SQLite stack are doing. -** This file control is used by some VFS activity tracing [shims]. -** The argument is a zero-terminated string. Higher layers in the -** SQLite stack may generate instances of this file control if -** the [SQLITE_USE_FCNTL_TRACE] compile-time option is enabled. -** -**
  • [[SQLITE_FCNTL_HAS_MOVED]] -** The [SQLITE_FCNTL_HAS_MOVED] file control interprets its argument as a -** pointer to an integer and it writes a boolean into that integer depending -** on whether or not the file has been renamed, moved, or deleted since it -** was first opened. -** -**
  • [[SQLITE_FCNTL_WIN32_SET_HANDLE]] -** The [SQLITE_FCNTL_WIN32_SET_HANDLE] opcode is used for debugging. This -** opcode causes the xFileControl method to swap the file handle with the one -** pointed to by the pArg argument. This capability is used during testing -** and only needs to be supported when SQLITE_TEST is defined. -** -**
-*/ -#define SQLITE_FCNTL_LOCKSTATE 1 -#define SQLITE_GET_LOCKPROXYFILE 2 -#define SQLITE_SET_LOCKPROXYFILE 3 -#define SQLITE_LAST_ERRNO 4 -#define SQLITE_FCNTL_SIZE_HINT 5 -#define SQLITE_FCNTL_CHUNK_SIZE 6 -#define SQLITE_FCNTL_FILE_POINTER 7 -#define SQLITE_FCNTL_SYNC_OMITTED 8 -#define SQLITE_FCNTL_WIN32_AV_RETRY 9 -#define SQLITE_FCNTL_PERSIST_WAL 10 -#define SQLITE_FCNTL_OVERWRITE 11 -#define SQLITE_FCNTL_VFSNAME 12 -#define SQLITE_FCNTL_POWERSAFE_OVERWRITE 13 -#define SQLITE_FCNTL_PRAGMA 14 -#define SQLITE_FCNTL_BUSYHANDLER 15 -#define SQLITE_FCNTL_TEMPFILENAME 16 -#define SQLITE_FCNTL_MMAP_SIZE 18 -#define SQLITE_FCNTL_TRACE 19 -#define SQLITE_FCNTL_HAS_MOVED 20 -#define SQLITE_FCNTL_SYNC 21 -#define SQLITE_FCNTL_COMMIT_PHASETWO 22 -#define SQLITE_FCNTL_WIN32_SET_HANDLE 23 - -/* -** CAPI3REF: Mutex Handle -** -** The mutex module within SQLite defines [sqlite3_mutex] to be an -** abstract type for a mutex object. The SQLite core never looks -** at the internal representation of an [sqlite3_mutex]. It only -** deals with pointers to the [sqlite3_mutex] object. -** -** Mutexes are created using [sqlite3_mutex_alloc()]. -*/ -typedef struct sqlite3_mutex sqlite3_mutex; - -/* -** CAPI3REF: OS Interface Object -** -** An instance of the sqlite3_vfs object defines the interface between -** the SQLite core and the underlying operating system. The "vfs" -** in the name of the object stands for "virtual file system". See -** the [VFS | VFS documentation] for further information. -** -** The value of the iVersion field is initially 1 but may be larger in -** future versions of SQLite. Additional fields may be appended to this -** object when the iVersion value is increased. Note that the structure -** of the sqlite3_vfs object changes in the transaction between -** SQLite version 3.5.9 and 3.6.0 and yet the iVersion field was not -** modified. -** -** The szOsFile field is the size of the subclassed [sqlite3_file] -** structure used by this VFS. mxPathname is the maximum length of -** a pathname in this VFS. -** -** Registered sqlite3_vfs objects are kept on a linked list formed by -** the pNext pointer. The [sqlite3_vfs_register()] -** and [sqlite3_vfs_unregister()] interfaces manage this list -** in a thread-safe way. The [sqlite3_vfs_find()] interface -** searches the list. Neither the application code nor the VFS -** implementation should use the pNext pointer. -** -** The pNext field is the only field in the sqlite3_vfs -** structure that SQLite will ever modify. SQLite will only access -** or modify this field while holding a particular static mutex. -** The application should never modify anything within the sqlite3_vfs -** object once the object has been registered. -** -** The zName field holds the name of the VFS module. The name must -** be unique across all VFS modules. -** -** [[sqlite3_vfs.xOpen]] -** ^SQLite guarantees that the zFilename parameter to xOpen -** is either a NULL pointer or string obtained -** from xFullPathname() with an optional suffix added. -** ^If a suffix is added to the zFilename parameter, it will -** consist of a single "-" character followed by no more than -** 11 alphanumeric and/or "-" characters. -** ^SQLite further guarantees that -** the string will be valid and unchanged until xClose() is -** called. Because of the previous sentence, -** the [sqlite3_file] can safely store a pointer to the -** filename if it needs to remember the filename for some reason. -** If the zFilename parameter to xOpen is a NULL pointer then xOpen -** must invent its own temporary name for the file. ^Whenever the -** xFilename parameter is NULL it will also be the case that the -** flags parameter will include [SQLITE_OPEN_DELETEONCLOSE]. -** -** The flags argument to xOpen() includes all bits set in -** the flags argument to [sqlite3_open_v2()]. Or if [sqlite3_open()] -** or [sqlite3_open16()] is used, then flags includes at least -** [SQLITE_OPEN_READWRITE] | [SQLITE_OPEN_CREATE]. -** If xOpen() opens a file read-only then it sets *pOutFlags to -** include [SQLITE_OPEN_READONLY]. Other bits in *pOutFlags may be set. -** -** ^(SQLite will also add one of the following flags to the xOpen() -** call, depending on the object being opened: -** -**
    -**
  • [SQLITE_OPEN_MAIN_DB] -**
  • [SQLITE_OPEN_MAIN_JOURNAL] -**
  • [SQLITE_OPEN_TEMP_DB] -**
  • [SQLITE_OPEN_TEMP_JOURNAL] -**
  • [SQLITE_OPEN_TRANSIENT_DB] -**
  • [SQLITE_OPEN_SUBJOURNAL] -**
  • [SQLITE_OPEN_MASTER_JOURNAL] -**
  • [SQLITE_OPEN_WAL] -**
)^ -** -** The file I/O implementation can use the object type flags to -** change the way it deals with files. For example, an application -** that does not care about crash recovery or rollback might make -** the open of a journal file a no-op. Writes to this journal would -** also be no-ops, and any attempt to read the journal would return -** SQLITE_IOERR. Or the implementation might recognize that a database -** file will be doing page-aligned sector reads and writes in a random -** order and set up its I/O subsystem accordingly. -** -** SQLite might also add one of the following flags to the xOpen method: -** -**
    -**
  • [SQLITE_OPEN_DELETEONCLOSE] -**
  • [SQLITE_OPEN_EXCLUSIVE] -**
-** -** The [SQLITE_OPEN_DELETEONCLOSE] flag means the file should be -** deleted when it is closed. ^The [SQLITE_OPEN_DELETEONCLOSE] -** will be set for TEMP databases and their journals, transient -** databases, and subjournals. -** -** ^The [SQLITE_OPEN_EXCLUSIVE] flag is always used in conjunction -** with the [SQLITE_OPEN_CREATE] flag, which are both directly -** analogous to the O_EXCL and O_CREAT flags of the POSIX open() -** API. The SQLITE_OPEN_EXCLUSIVE flag, when paired with the -** SQLITE_OPEN_CREATE, is used to indicate that file should always -** be created, and that it is an error if it already exists. -** It is not used to indicate the file should be opened -** for exclusive access. -** -** ^At least szOsFile bytes of memory are allocated by SQLite -** to hold the [sqlite3_file] structure passed as the third -** argument to xOpen. The xOpen method does not have to -** allocate the structure; it should just fill it in. Note that -** the xOpen method must set the sqlite3_file.pMethods to either -** a valid [sqlite3_io_methods] object or to NULL. xOpen must do -** this even if the open fails. SQLite expects that the sqlite3_file.pMethods -** element will be valid after xOpen returns regardless of the success -** or failure of the xOpen call. -** -** [[sqlite3_vfs.xAccess]] -** ^The flags argument to xAccess() may be [SQLITE_ACCESS_EXISTS] -** to test for the existence of a file, or [SQLITE_ACCESS_READWRITE] to -** test whether a file is readable and writable, or [SQLITE_ACCESS_READ] -** to test whether a file is at least readable. The file can be a -** directory. -** -** ^SQLite will always allocate at least mxPathname+1 bytes for the -** output buffer xFullPathname. The exact size of the output buffer -** is also passed as a parameter to both methods. If the output buffer -** is not large enough, [SQLITE_CANTOPEN] should be returned. Since this is -** handled as a fatal error by SQLite, vfs implementations should endeavor -** to prevent this by setting mxPathname to a sufficiently large value. -** -** The xRandomness(), xSleep(), xCurrentTime(), and xCurrentTimeInt64() -** interfaces are not strictly a part of the filesystem, but they are -** included in the VFS structure for completeness. -** The xRandomness() function attempts to return nBytes bytes -** of good-quality randomness into zOut. The return value is -** the actual number of bytes of randomness obtained. -** The xSleep() method causes the calling thread to sleep for at -** least the number of microseconds given. ^The xCurrentTime() -** method returns a Julian Day Number for the current date and time as -** a floating point value. -** ^The xCurrentTimeInt64() method returns, as an integer, the Julian -** Day Number multiplied by 86400000 (the number of milliseconds in -** a 24-hour day). -** ^SQLite will use the xCurrentTimeInt64() method to get the current -** date and time if that method is available (if iVersion is 2 or -** greater and the function pointer is not NULL) and will fall back -** to xCurrentTime() if xCurrentTimeInt64() is unavailable. -** -** ^The xSetSystemCall(), xGetSystemCall(), and xNestSystemCall() interfaces -** are not used by the SQLite core. These optional interfaces are provided -** by some VFSes to facilitate testing of the VFS code. By overriding -** system calls with functions under its control, a test program can -** simulate faults and error conditions that would otherwise be difficult -** or impossible to induce. The set of system calls that can be overridden -** varies from one VFS to another, and from one version of the same VFS to the -** next. Applications that use these interfaces must be prepared for any -** or all of these interfaces to be NULL or for their behavior to change -** from one release to the next. Applications must not attempt to access -** any of these methods if the iVersion of the VFS is less than 3. -*/ -typedef struct sqlite3_vfs sqlite3_vfs; -typedef void (*sqlite3_syscall_ptr)(void); -struct sqlite3_vfs { - int iVersion; /* Structure version number (currently 3) */ - int szOsFile; /* Size of subclassed sqlite3_file */ - int mxPathname; /* Maximum file pathname length */ - sqlite3_vfs *pNext; /* Next registered VFS */ - const char *zName; /* Name of this virtual file system */ - void *pAppData; /* Pointer to application-specific data */ - int (*xOpen)(sqlite3_vfs*, const char *zName, sqlite3_file*, - int flags, int *pOutFlags); - int (*xDelete)(sqlite3_vfs*, const char *zName, int syncDir); - int (*xAccess)(sqlite3_vfs*, const char *zName, int flags, int *pResOut); - int (*xFullPathname)(sqlite3_vfs*, const char *zName, int nOut, char *zOut); - void *(*xDlOpen)(sqlite3_vfs*, const char *zFilename); - void (*xDlError)(sqlite3_vfs*, int nByte, char *zErrMsg); - void (*(*xDlSym)(sqlite3_vfs*,void*, const char *zSymbol))(void); - void (*xDlClose)(sqlite3_vfs*, void*); - int (*xRandomness)(sqlite3_vfs*, int nByte, char *zOut); - int (*xSleep)(sqlite3_vfs*, int microseconds); - int (*xCurrentTime)(sqlite3_vfs*, double*); - int (*xGetLastError)(sqlite3_vfs*, int, char *); - /* - ** The methods above are in version 1 of the sqlite_vfs object - ** definition. Those that follow are added in version 2 or later - */ - int (*xCurrentTimeInt64)(sqlite3_vfs*, sqlite3_int64*); - /* - ** The methods above are in versions 1 and 2 of the sqlite_vfs object. - ** Those below are for version 3 and greater. - */ - int (*xSetSystemCall)(sqlite3_vfs*, const char *zName, sqlite3_syscall_ptr); - sqlite3_syscall_ptr (*xGetSystemCall)(sqlite3_vfs*, const char *zName); - const char *(*xNextSystemCall)(sqlite3_vfs*, const char *zName); - /* - ** The methods above are in versions 1 through 3 of the sqlite_vfs object. - ** New fields may be appended in figure versions. The iVersion - ** value will increment whenever this happens. - */ -}; - -/* -** CAPI3REF: Flags for the xAccess VFS method -** -** These integer constants can be used as the third parameter to -** the xAccess method of an [sqlite3_vfs] object. They determine -** what kind of permissions the xAccess method is looking for. -** With SQLITE_ACCESS_EXISTS, the xAccess method -** simply checks whether the file exists. -** With SQLITE_ACCESS_READWRITE, the xAccess method -** checks whether the named directory is both readable and writable -** (in other words, if files can be added, removed, and renamed within -** the directory). -** The SQLITE_ACCESS_READWRITE constant is currently used only by the -** [temp_store_directory pragma], though this could change in a future -** release of SQLite. -** With SQLITE_ACCESS_READ, the xAccess method -** checks whether the file is readable. The SQLITE_ACCESS_READ constant is -** currently unused, though it might be used in a future release of -** SQLite. -*/ -#define SQLITE_ACCESS_EXISTS 0 -#define SQLITE_ACCESS_READWRITE 1 /* Used by PRAGMA temp_store_directory */ -#define SQLITE_ACCESS_READ 2 /* Unused */ - -/* -** CAPI3REF: Flags for the xShmLock VFS method -** -** These integer constants define the various locking operations -** allowed by the xShmLock method of [sqlite3_io_methods]. The -** following are the only legal combinations of flags to the -** xShmLock method: -** -**
    -**
  • SQLITE_SHM_LOCK | SQLITE_SHM_SHARED -**
  • SQLITE_SHM_LOCK | SQLITE_SHM_EXCLUSIVE -**
  • SQLITE_SHM_UNLOCK | SQLITE_SHM_SHARED -**
  • SQLITE_SHM_UNLOCK | SQLITE_SHM_EXCLUSIVE -**
-** -** When unlocking, the same SHARED or EXCLUSIVE flag must be supplied as -** was given no the corresponding lock. -** -** The xShmLock method can transition between unlocked and SHARED or -** between unlocked and EXCLUSIVE. It cannot transition between SHARED -** and EXCLUSIVE. -*/ -#define SQLITE_SHM_UNLOCK 1 -#define SQLITE_SHM_LOCK 2 -#define SQLITE_SHM_SHARED 4 -#define SQLITE_SHM_EXCLUSIVE 8 - -/* -** CAPI3REF: Maximum xShmLock index -** -** The xShmLock method on [sqlite3_io_methods] may use values -** between 0 and this upper bound as its "offset" argument. -** The SQLite core will never attempt to acquire or release a -** lock outside of this range -*/ -#define SQLITE_SHM_NLOCK 8 - - -/* -** CAPI3REF: Initialize The SQLite Library -** -** ^The sqlite3_initialize() routine initializes the -** SQLite library. ^The sqlite3_shutdown() routine -** deallocates any resources that were allocated by sqlite3_initialize(). -** These routines are designed to aid in process initialization and -** shutdown on embedded systems. Workstation applications using -** SQLite normally do not need to invoke either of these routines. -** -** A call to sqlite3_initialize() is an "effective" call if it is -** the first time sqlite3_initialize() is invoked during the lifetime of -** the process, or if it is the first time sqlite3_initialize() is invoked -** following a call to sqlite3_shutdown(). ^(Only an effective call -** of sqlite3_initialize() does any initialization. All other calls -** are harmless no-ops.)^ -** -** A call to sqlite3_shutdown() is an "effective" call if it is the first -** call to sqlite3_shutdown() since the last sqlite3_initialize(). ^(Only -** an effective call to sqlite3_shutdown() does any deinitialization. -** All other valid calls to sqlite3_shutdown() are harmless no-ops.)^ -** -** The sqlite3_initialize() interface is threadsafe, but sqlite3_shutdown() -** is not. The sqlite3_shutdown() interface must only be called from a -** single thread. All open [database connections] must be closed and all -** other SQLite resources must be deallocated prior to invoking -** sqlite3_shutdown(). -** -** Among other things, ^sqlite3_initialize() will invoke -** sqlite3_os_init(). Similarly, ^sqlite3_shutdown() -** will invoke sqlite3_os_end(). -** -** ^The sqlite3_initialize() routine returns [SQLITE_OK] on success. -** ^If for some reason, sqlite3_initialize() is unable to initialize -** the library (perhaps it is unable to allocate a needed resource such -** as a mutex) it returns an [error code] other than [SQLITE_OK]. -** -** ^The sqlite3_initialize() routine is called internally by many other -** SQLite interfaces so that an application usually does not need to -** invoke sqlite3_initialize() directly. For example, [sqlite3_open()] -** calls sqlite3_initialize() so the SQLite library will be automatically -** initialized when [sqlite3_open()] is called if it has not be initialized -** already. ^However, if SQLite is compiled with the [SQLITE_OMIT_AUTOINIT] -** compile-time option, then the automatic calls to sqlite3_initialize() -** are omitted and the application must call sqlite3_initialize() directly -** prior to using any other SQLite interface. For maximum portability, -** it is recommended that applications always invoke sqlite3_initialize() -** directly prior to using any other SQLite interface. Future releases -** of SQLite may require this. In other words, the behavior exhibited -** when SQLite is compiled with [SQLITE_OMIT_AUTOINIT] might become the -** default behavior in some future release of SQLite. -** -** The sqlite3_os_init() routine does operating-system specific -** initialization of the SQLite library. The sqlite3_os_end() -** routine undoes the effect of sqlite3_os_init(). Typical tasks -** performed by these routines include allocation or deallocation -** of static resources, initialization of global variables, -** setting up a default [sqlite3_vfs] module, or setting up -** a default configuration using [sqlite3_config()]. -** -** The application should never invoke either sqlite3_os_init() -** or sqlite3_os_end() directly. The application should only invoke -** sqlite3_initialize() and sqlite3_shutdown(). The sqlite3_os_init() -** interface is called automatically by sqlite3_initialize() and -** sqlite3_os_end() is called by sqlite3_shutdown(). Appropriate -** implementations for sqlite3_os_init() and sqlite3_os_end() -** are built into SQLite when it is compiled for Unix, Windows, or OS/2. -** When [custom builds | built for other platforms] -** (using the [SQLITE_OS_OTHER=1] compile-time -** option) the application must supply a suitable implementation for -** sqlite3_os_init() and sqlite3_os_end(). An application-supplied -** implementation of sqlite3_os_init() or sqlite3_os_end() -** must return [SQLITE_OK] on success and some other [error code] upon -** failure. -*/ -SQLITE_API int sqlite3_initialize(void); -SQLITE_API int sqlite3_shutdown(void); -SQLITE_API int sqlite3_os_init(void); -SQLITE_API int sqlite3_os_end(void); - -/* -** CAPI3REF: Configuring The SQLite Library -** -** The sqlite3_config() interface is used to make global configuration -** changes to SQLite in order to tune SQLite to the specific needs of -** the application. The default configuration is recommended for most -** applications and so this routine is usually not necessary. It is -** provided to support rare applications with unusual needs. -** -** The sqlite3_config() interface is not threadsafe. The application -** must insure that no other SQLite interfaces are invoked by other -** threads while sqlite3_config() is running. Furthermore, sqlite3_config() -** may only be invoked prior to library initialization using -** [sqlite3_initialize()] or after shutdown by [sqlite3_shutdown()]. -** ^If sqlite3_config() is called after [sqlite3_initialize()] and before -** [sqlite3_shutdown()] then it will return SQLITE_MISUSE. -** Note, however, that ^sqlite3_config() can be called as part of the -** implementation of an application-defined [sqlite3_os_init()]. -** -** The first argument to sqlite3_config() is an integer -** [configuration option] that determines -** what property of SQLite is to be configured. Subsequent arguments -** vary depending on the [configuration option] -** in the first argument. -** -** ^When a configuration option is set, sqlite3_config() returns [SQLITE_OK]. -** ^If the option is unknown or SQLite is unable to set the option -** then this routine returns a non-zero [error code]. -*/ -SQLITE_API int sqlite3_config(int, ...); - -/* -** CAPI3REF: Configure database connections -** -** The sqlite3_db_config() interface is used to make configuration -** changes to a [database connection]. The interface is similar to -** [sqlite3_config()] except that the changes apply to a single -** [database connection] (specified in the first argument). -** -** The second argument to sqlite3_db_config(D,V,...) is the -** [SQLITE_DBCONFIG_LOOKASIDE | configuration verb] - an integer code -** that indicates what aspect of the [database connection] is being configured. -** Subsequent arguments vary depending on the configuration verb. -** -** ^Calls to sqlite3_db_config() return SQLITE_OK if and only if -** the call is considered successful. -*/ -SQLITE_API int sqlite3_db_config(sqlite3*, int op, ...); - -/* -** CAPI3REF: Memory Allocation Routines -** -** An instance of this object defines the interface between SQLite -** and low-level memory allocation routines. -** -** This object is used in only one place in the SQLite interface. -** A pointer to an instance of this object is the argument to -** [sqlite3_config()] when the configuration option is -** [SQLITE_CONFIG_MALLOC] or [SQLITE_CONFIG_GETMALLOC]. -** By creating an instance of this object -** and passing it to [sqlite3_config]([SQLITE_CONFIG_MALLOC]) -** during configuration, an application can specify an alternative -** memory allocation subsystem for SQLite to use for all of its -** dynamic memory needs. -** -** Note that SQLite comes with several [built-in memory allocators] -** that are perfectly adequate for the overwhelming majority of applications -** and that this object is only useful to a tiny minority of applications -** with specialized memory allocation requirements. This object is -** also used during testing of SQLite in order to specify an alternative -** memory allocator that simulates memory out-of-memory conditions in -** order to verify that SQLite recovers gracefully from such -** conditions. -** -** The xMalloc, xRealloc, and xFree methods must work like the -** malloc(), realloc() and free() functions from the standard C library. -** ^SQLite guarantees that the second argument to -** xRealloc is always a value returned by a prior call to xRoundup. -** -** xSize should return the allocated size of a memory allocation -** previously obtained from xMalloc or xRealloc. The allocated size -** is always at least as big as the requested size but may be larger. -** -** The xRoundup method returns what would be the allocated size of -** a memory allocation given a particular requested size. Most memory -** allocators round up memory allocations at least to the next multiple -** of 8. Some allocators round up to a larger multiple or to a power of 2. -** Every memory allocation request coming in through [sqlite3_malloc()] -** or [sqlite3_realloc()] first calls xRoundup. If xRoundup returns 0, -** that causes the corresponding memory allocation to fail. -** -** The xInit method initializes the memory allocator. For example, -** it might allocate any require mutexes or initialize internal data -** structures. The xShutdown method is invoked (indirectly) by -** [sqlite3_shutdown()] and should deallocate any resources acquired -** by xInit. The pAppData pointer is used as the only parameter to -** xInit and xShutdown. -** -** SQLite holds the [SQLITE_MUTEX_STATIC_MASTER] mutex when it invokes -** the xInit method, so the xInit method need not be threadsafe. The -** xShutdown method is only called from [sqlite3_shutdown()] so it does -** not need to be threadsafe either. For all other methods, SQLite -** holds the [SQLITE_MUTEX_STATIC_MEM] mutex as long as the -** [SQLITE_CONFIG_MEMSTATUS] configuration option is turned on (which -** it is by default) and so the methods are automatically serialized. -** However, if [SQLITE_CONFIG_MEMSTATUS] is disabled, then the other -** methods must be threadsafe or else make their own arrangements for -** serialization. -** -** SQLite will never invoke xInit() more than once without an intervening -** call to xShutdown(). -*/ -typedef struct sqlite3_mem_methods sqlite3_mem_methods; -struct sqlite3_mem_methods { - void *(*xMalloc)(int); /* Memory allocation function */ - void (*xFree)(void*); /* Free a prior allocation */ - void *(*xRealloc)(void*,int); /* Resize an allocation */ - int (*xSize)(void*); /* Return the size of an allocation */ - int (*xRoundup)(int); /* Round up request size to allocation size */ - int (*xInit)(void*); /* Initialize the memory allocator */ - void (*xShutdown)(void*); /* Deinitialize the memory allocator */ - void *pAppData; /* Argument to xInit() and xShutdown() */ -}; - -/* -** CAPI3REF: Configuration Options -** KEYWORDS: {configuration option} -** -** These constants are the available integer configuration options that -** can be passed as the first argument to the [sqlite3_config()] interface. -** -** New configuration options may be added in future releases of SQLite. -** Existing configuration options might be discontinued. Applications -** should check the return code from [sqlite3_config()] to make sure that -** the call worked. The [sqlite3_config()] interface will return a -** non-zero [error code] if a discontinued or unsupported configuration option -** is invoked. -** -**
-** [[SQLITE_CONFIG_SINGLETHREAD]]
SQLITE_CONFIG_SINGLETHREAD
-**
There are no arguments to this option. ^This option sets the -** [threading mode] to Single-thread. In other words, it disables -** all mutexing and puts SQLite into a mode where it can only be used -** by a single thread. ^If SQLite is compiled with -** the [SQLITE_THREADSAFE | SQLITE_THREADSAFE=0] compile-time option then -** it is not possible to change the [threading mode] from its default -** value of Single-thread and so [sqlite3_config()] will return -** [SQLITE_ERROR] if called with the SQLITE_CONFIG_SINGLETHREAD -** configuration option.
-** -** [[SQLITE_CONFIG_MULTITHREAD]]
SQLITE_CONFIG_MULTITHREAD
-**
There are no arguments to this option. ^This option sets the -** [threading mode] to Multi-thread. In other words, it disables -** mutexing on [database connection] and [prepared statement] objects. -** The application is responsible for serializing access to -** [database connections] and [prepared statements]. But other mutexes -** are enabled so that SQLite will be safe to use in a multi-threaded -** environment as long as no two threads attempt to use the same -** [database connection] at the same time. ^If SQLite is compiled with -** the [SQLITE_THREADSAFE | SQLITE_THREADSAFE=0] compile-time option then -** it is not possible to set the Multi-thread [threading mode] and -** [sqlite3_config()] will return [SQLITE_ERROR] if called with the -** SQLITE_CONFIG_MULTITHREAD configuration option.
-** -** [[SQLITE_CONFIG_SERIALIZED]]
SQLITE_CONFIG_SERIALIZED
-**
There are no arguments to this option. ^This option sets the -** [threading mode] to Serialized. In other words, this option enables -** all mutexes including the recursive -** mutexes on [database connection] and [prepared statement] objects. -** In this mode (which is the default when SQLite is compiled with -** [SQLITE_THREADSAFE=1]) the SQLite library will itself serialize access -** to [database connections] and [prepared statements] so that the -** application is free to use the same [database connection] or the -** same [prepared statement] in different threads at the same time. -** ^If SQLite is compiled with -** the [SQLITE_THREADSAFE | SQLITE_THREADSAFE=0] compile-time option then -** it is not possible to set the Serialized [threading mode] and -** [sqlite3_config()] will return [SQLITE_ERROR] if called with the -** SQLITE_CONFIG_SERIALIZED configuration option.
-** -** [[SQLITE_CONFIG_MALLOC]]
SQLITE_CONFIG_MALLOC
-**
^(This option takes a single argument which is a pointer to an -** instance of the [sqlite3_mem_methods] structure. The argument specifies -** alternative low-level memory allocation routines to be used in place of -** the memory allocation routines built into SQLite.)^ ^SQLite makes -** its own private copy of the content of the [sqlite3_mem_methods] structure -** before the [sqlite3_config()] call returns.
-** -** [[SQLITE_CONFIG_GETMALLOC]]
SQLITE_CONFIG_GETMALLOC
-**
^(This option takes a single argument which is a pointer to an -** instance of the [sqlite3_mem_methods] structure. The [sqlite3_mem_methods] -** structure is filled with the currently defined memory allocation routines.)^ -** This option can be used to overload the default memory allocation -** routines with a wrapper that simulations memory allocation failure or -** tracks memory usage, for example.
-** -** [[SQLITE_CONFIG_MEMSTATUS]]
SQLITE_CONFIG_MEMSTATUS
-**
^This option takes single argument of type int, interpreted as a -** boolean, which enables or disables the collection of memory allocation -** statistics. ^(When memory allocation statistics are disabled, the -** following SQLite interfaces become non-operational: -**
    -**
  • [sqlite3_memory_used()] -**
  • [sqlite3_memory_highwater()] -**
  • [sqlite3_soft_heap_limit64()] -**
  • [sqlite3_status()] -**
)^ -** ^Memory allocation statistics are enabled by default unless SQLite is -** compiled with [SQLITE_DEFAULT_MEMSTATUS]=0 in which case memory -** allocation statistics are disabled by default. -**
-** -** [[SQLITE_CONFIG_SCRATCH]]
SQLITE_CONFIG_SCRATCH
-**
^This option specifies a static memory buffer that SQLite can use for -** scratch memory. There are three arguments: A pointer an 8-byte -** aligned memory buffer from which the scratch allocations will be -** drawn, the size of each scratch allocation (sz), -** and the maximum number of scratch allocations (N). The sz -** argument must be a multiple of 16. -** The first argument must be a pointer to an 8-byte aligned buffer -** of at least sz*N bytes of memory. -** ^SQLite will use no more than two scratch buffers per thread. So -** N should be set to twice the expected maximum number of threads. -** ^SQLite will never require a scratch buffer that is more than 6 -** times the database page size. ^If SQLite needs needs additional -** scratch memory beyond what is provided by this configuration option, then -** [sqlite3_malloc()] will be used to obtain the memory needed.
-** -** [[SQLITE_CONFIG_PAGECACHE]]
SQLITE_CONFIG_PAGECACHE
-**
^This option specifies a static memory buffer that SQLite can use for -** the database page cache with the default page cache implementation. -** This configuration should not be used if an application-define page -** cache implementation is loaded using the SQLITE_CONFIG_PCACHE2 option. -** There are three arguments to this option: A pointer to 8-byte aligned -** memory, the size of each page buffer (sz), and the number of pages (N). -** The sz argument should be the size of the largest database page -** (a power of two between 512 and 32768) plus a little extra for each -** page header. ^The page header size is 20 to 40 bytes depending on -** the host architecture. ^It is harmless, apart from the wasted memory, -** to make sz a little too large. The first -** argument should point to an allocation of at least sz*N bytes of memory. -** ^SQLite will use the memory provided by the first argument to satisfy its -** memory needs for the first N pages that it adds to cache. ^If additional -** page cache memory is needed beyond what is provided by this option, then -** SQLite goes to [sqlite3_malloc()] for the additional storage space. -** The pointer in the first argument must -** be aligned to an 8-byte boundary or subsequent behavior of SQLite -** will be undefined.
-** -** [[SQLITE_CONFIG_HEAP]]
SQLITE_CONFIG_HEAP
-**
^This option specifies a static memory buffer that SQLite will use -** for all of its dynamic memory allocation needs beyond those provided -** for by [SQLITE_CONFIG_SCRATCH] and [SQLITE_CONFIG_PAGECACHE]. -** There are three arguments: An 8-byte aligned pointer to the memory, -** the number of bytes in the memory buffer, and the minimum allocation size. -** ^If the first pointer (the memory pointer) is NULL, then SQLite reverts -** to using its default memory allocator (the system malloc() implementation), -** undoing any prior invocation of [SQLITE_CONFIG_MALLOC]. ^If the -** memory pointer is not NULL and either [SQLITE_ENABLE_MEMSYS3] or -** [SQLITE_ENABLE_MEMSYS5] are defined, then the alternative memory -** allocator is engaged to handle all of SQLites memory allocation needs. -** The first pointer (the memory pointer) must be aligned to an 8-byte -** boundary or subsequent behavior of SQLite will be undefined. -** The minimum allocation size is capped at 2**12. Reasonable values -** for the minimum allocation size are 2**5 through 2**8.
-** -** [[SQLITE_CONFIG_MUTEX]]
SQLITE_CONFIG_MUTEX
-**
^(This option takes a single argument which is a pointer to an -** instance of the [sqlite3_mutex_methods] structure. The argument specifies -** alternative low-level mutex routines to be used in place -** the mutex routines built into SQLite.)^ ^SQLite makes a copy of the -** content of the [sqlite3_mutex_methods] structure before the call to -** [sqlite3_config()] returns. ^If SQLite is compiled with -** the [SQLITE_THREADSAFE | SQLITE_THREADSAFE=0] compile-time option then -** the entire mutexing subsystem is omitted from the build and hence calls to -** [sqlite3_config()] with the SQLITE_CONFIG_MUTEX configuration option will -** return [SQLITE_ERROR].
-** -** [[SQLITE_CONFIG_GETMUTEX]]
SQLITE_CONFIG_GETMUTEX
-**
^(This option takes a single argument which is a pointer to an -** instance of the [sqlite3_mutex_methods] structure. The -** [sqlite3_mutex_methods] -** structure is filled with the currently defined mutex routines.)^ -** This option can be used to overload the default mutex allocation -** routines with a wrapper used to track mutex usage for performance -** profiling or testing, for example. ^If SQLite is compiled with -** the [SQLITE_THREADSAFE | SQLITE_THREADSAFE=0] compile-time option then -** the entire mutexing subsystem is omitted from the build and hence calls to -** [sqlite3_config()] with the SQLITE_CONFIG_GETMUTEX configuration option will -** return [SQLITE_ERROR].
-** -** [[SQLITE_CONFIG_LOOKASIDE]]
SQLITE_CONFIG_LOOKASIDE
-**
^(This option takes two arguments that determine the default -** memory allocation for the lookaside memory allocator on each -** [database connection]. The first argument is the -** size of each lookaside buffer slot and the second is the number of -** slots allocated to each database connection.)^ ^(This option sets the -** default lookaside size. The [SQLITE_DBCONFIG_LOOKASIDE] -** verb to [sqlite3_db_config()] can be used to change the lookaside -** configuration on individual connections.)^
-** -** [[SQLITE_CONFIG_PCACHE2]]
SQLITE_CONFIG_PCACHE2
-**
^(This option takes a single argument which is a pointer to -** an [sqlite3_pcache_methods2] object. This object specifies the interface -** to a custom page cache implementation.)^ ^SQLite makes a copy of the -** object and uses it for page cache memory allocations.
-** -** [[SQLITE_CONFIG_GETPCACHE2]]
SQLITE_CONFIG_GETPCACHE2
-**
^(This option takes a single argument which is a pointer to an -** [sqlite3_pcache_methods2] object. SQLite copies of the current -** page cache implementation into that object.)^
-** -** [[SQLITE_CONFIG_LOG]]
SQLITE_CONFIG_LOG
-**
The SQLITE_CONFIG_LOG option is used to configure the SQLite -** global [error log]. -** (^The SQLITE_CONFIG_LOG option takes two arguments: a pointer to a -** function with a call signature of void(*)(void*,int,const char*), -** and a pointer to void. ^If the function pointer is not NULL, it is -** invoked by [sqlite3_log()] to process each logging event. ^If the -** function pointer is NULL, the [sqlite3_log()] interface becomes a no-op. -** ^The void pointer that is the second argument to SQLITE_CONFIG_LOG is -** passed through as the first parameter to the application-defined logger -** function whenever that function is invoked. ^The second parameter to -** the logger function is a copy of the first parameter to the corresponding -** [sqlite3_log()] call and is intended to be a [result code] or an -** [extended result code]. ^The third parameter passed to the logger is -** log message after formatting via [sqlite3_snprintf()]. -** The SQLite logging interface is not reentrant; the logger function -** supplied by the application must not invoke any SQLite interface. -** In a multi-threaded application, the application-defined logger -** function must be threadsafe.
-** -** [[SQLITE_CONFIG_URI]]
SQLITE_CONFIG_URI -**
^(This option takes a single argument of type int. If non-zero, then -** URI handling is globally enabled. If the parameter is zero, then URI handling -** is globally disabled.)^ ^If URI handling is globally enabled, all filenames -** passed to [sqlite3_open()], [sqlite3_open_v2()], [sqlite3_open16()] or -** specified as part of [ATTACH] commands are interpreted as URIs, regardless -** of whether or not the [SQLITE_OPEN_URI] flag is set when the database -** connection is opened. ^If it is globally disabled, filenames are -** only interpreted as URIs if the SQLITE_OPEN_URI flag is set when the -** database connection is opened. ^(By default, URI handling is globally -** disabled. The default value may be changed by compiling with the -** [SQLITE_USE_URI] symbol defined.)^ -** -** [[SQLITE_CONFIG_COVERING_INDEX_SCAN]]
SQLITE_CONFIG_COVERING_INDEX_SCAN -**
^This option takes a single integer argument which is interpreted as -** a boolean in order to enable or disable the use of covering indices for -** full table scans in the query optimizer. ^The default setting is determined -** by the [SQLITE_ALLOW_COVERING_INDEX_SCAN] compile-time option, or is "on" -** if that compile-time option is omitted. -** The ability to disable the use of covering indices for full table scans -** is because some incorrectly coded legacy applications might malfunction -** when the optimization is enabled. Providing the ability to -** disable the optimization allows the older, buggy application code to work -** without change even with newer versions of SQLite. -** -** [[SQLITE_CONFIG_PCACHE]] [[SQLITE_CONFIG_GETPCACHE]] -**
SQLITE_CONFIG_PCACHE and SQLITE_CONFIG_GETPCACHE -**
These options are obsolete and should not be used by new code. -** They are retained for backwards compatibility but are now no-ops. -**
-** -** [[SQLITE_CONFIG_SQLLOG]] -**
SQLITE_CONFIG_SQLLOG -**
This option is only available if sqlite is compiled with the -** [SQLITE_ENABLE_SQLLOG] pre-processor macro defined. The first argument should -** be a pointer to a function of type void(*)(void*,sqlite3*,const char*, int). -** The second should be of type (void*). The callback is invoked by the library -** in three separate circumstances, identified by the value passed as the -** fourth parameter. If the fourth parameter is 0, then the database connection -** passed as the second argument has just been opened. The third argument -** points to a buffer containing the name of the main database file. If the -** fourth parameter is 1, then the SQL statement that the third parameter -** points to has just been executed. Or, if the fourth parameter is 2, then -** the connection being passed as the second parameter is being closed. The -** third parameter is passed NULL In this case. An example of using this -** configuration option can be seen in the "test_sqllog.c" source file in -** the canonical SQLite source tree.
-** -** [[SQLITE_CONFIG_MMAP_SIZE]] -**
SQLITE_CONFIG_MMAP_SIZE -**
^SQLITE_CONFIG_MMAP_SIZE takes two 64-bit integer (sqlite3_int64) values -** that are the default mmap size limit (the default setting for -** [PRAGMA mmap_size]) and the maximum allowed mmap size limit. -** ^The default setting can be overridden by each database connection using -** either the [PRAGMA mmap_size] command, or by using the -** [SQLITE_FCNTL_MMAP_SIZE] file control. ^(The maximum allowed mmap size -** cannot be changed at run-time. Nor may the maximum allowed mmap size -** exceed the compile-time maximum mmap size set by the -** [SQLITE_MAX_MMAP_SIZE] compile-time option.)^ -** ^If either argument to this option is negative, then that argument is -** changed to its compile-time default. -** -** [[SQLITE_CONFIG_WIN32_HEAPSIZE]] -**
SQLITE_CONFIG_WIN32_HEAPSIZE -**
^This option is only available if SQLite is compiled for Windows -** with the [SQLITE_WIN32_MALLOC] pre-processor macro defined. -** SQLITE_CONFIG_WIN32_HEAPSIZE takes a 32-bit unsigned integer value -** that specifies the maximum size of the created heap. -**
-*/ -#define SQLITE_CONFIG_SINGLETHREAD 1 /* nil */ -#define SQLITE_CONFIG_MULTITHREAD 2 /* nil */ -#define SQLITE_CONFIG_SERIALIZED 3 /* nil */ -#define SQLITE_CONFIG_MALLOC 4 /* sqlite3_mem_methods* */ -#define SQLITE_CONFIG_GETMALLOC 5 /* sqlite3_mem_methods* */ -#define SQLITE_CONFIG_SCRATCH 6 /* void*, int sz, int N */ -#define SQLITE_CONFIG_PAGECACHE 7 /* void*, int sz, int N */ -#define SQLITE_CONFIG_HEAP 8 /* void*, int nByte, int min */ -#define SQLITE_CONFIG_MEMSTATUS 9 /* boolean */ -#define SQLITE_CONFIG_MUTEX 10 /* sqlite3_mutex_methods* */ -#define SQLITE_CONFIG_GETMUTEX 11 /* sqlite3_mutex_methods* */ -/* previously SQLITE_CONFIG_CHUNKALLOC 12 which is now unused. */ -#define SQLITE_CONFIG_LOOKASIDE 13 /* int int */ -#define SQLITE_CONFIG_PCACHE 14 /* no-op */ -#define SQLITE_CONFIG_GETPCACHE 15 /* no-op */ -#define SQLITE_CONFIG_LOG 16 /* xFunc, void* */ -#define SQLITE_CONFIG_URI 17 /* int */ -#define SQLITE_CONFIG_PCACHE2 18 /* sqlite3_pcache_methods2* */ -#define SQLITE_CONFIG_GETPCACHE2 19 /* sqlite3_pcache_methods2* */ -#define SQLITE_CONFIG_COVERING_INDEX_SCAN 20 /* int */ -#define SQLITE_CONFIG_SQLLOG 21 /* xSqllog, void* */ -#define SQLITE_CONFIG_MMAP_SIZE 22 /* sqlite3_int64, sqlite3_int64 */ -#define SQLITE_CONFIG_WIN32_HEAPSIZE 23 /* int nByte */ - -/* -** CAPI3REF: Database Connection Configuration Options -** -** These constants are the available integer configuration options that -** can be passed as the second argument to the [sqlite3_db_config()] interface. -** -** New configuration options may be added in future releases of SQLite. -** Existing configuration options might be discontinued. Applications -** should check the return code from [sqlite3_db_config()] to make sure that -** the call worked. ^The [sqlite3_db_config()] interface will return a -** non-zero [error code] if a discontinued or unsupported configuration option -** is invoked. -** -**
-**
SQLITE_DBCONFIG_LOOKASIDE
-**
^This option takes three additional arguments that determine the -** [lookaside memory allocator] configuration for the [database connection]. -** ^The first argument (the third parameter to [sqlite3_db_config()] is a -** pointer to a memory buffer to use for lookaside memory. -** ^The first argument after the SQLITE_DBCONFIG_LOOKASIDE verb -** may be NULL in which case SQLite will allocate the -** lookaside buffer itself using [sqlite3_malloc()]. ^The second argument is the -** size of each lookaside buffer slot. ^The third argument is the number of -** slots. The size of the buffer in the first argument must be greater than -** or equal to the product of the second and third arguments. The buffer -** must be aligned to an 8-byte boundary. ^If the second argument to -** SQLITE_DBCONFIG_LOOKASIDE is not a multiple of 8, it is internally -** rounded down to the next smaller multiple of 8. ^(The lookaside memory -** configuration for a database connection can only be changed when that -** connection is not currently using lookaside memory, or in other words -** when the "current value" returned by -** [sqlite3_db_status](D,[SQLITE_CONFIG_LOOKASIDE],...) is zero. -** Any attempt to change the lookaside memory configuration when lookaside -** memory is in use leaves the configuration unchanged and returns -** [SQLITE_BUSY].)^
-** -**
SQLITE_DBCONFIG_ENABLE_FKEY
-**
^This option is used to enable or disable the enforcement of -** [foreign key constraints]. There should be two additional arguments. -** The first argument is an integer which is 0 to disable FK enforcement, -** positive to enable FK enforcement or negative to leave FK enforcement -** unchanged. The second parameter is a pointer to an integer into which -** is written 0 or 1 to indicate whether FK enforcement is off or on -** following this call. The second parameter may be a NULL pointer, in -** which case the FK enforcement setting is not reported back.
-** -**
SQLITE_DBCONFIG_ENABLE_TRIGGER
-**
^This option is used to enable or disable [CREATE TRIGGER | triggers]. -** There should be two additional arguments. -** The first argument is an integer which is 0 to disable triggers, -** positive to enable triggers or negative to leave the setting unchanged. -** The second parameter is a pointer to an integer into which -** is written 0 or 1 to indicate whether triggers are disabled or enabled -** following this call. The second parameter may be a NULL pointer, in -** which case the trigger setting is not reported back.
-** -**
-*/ -#define SQLITE_DBCONFIG_LOOKASIDE 1001 /* void* int int */ -#define SQLITE_DBCONFIG_ENABLE_FKEY 1002 /* int int* */ -#define SQLITE_DBCONFIG_ENABLE_TRIGGER 1003 /* int int* */ - - -/* -** CAPI3REF: Enable Or Disable Extended Result Codes -** -** ^The sqlite3_extended_result_codes() routine enables or disables the -** [extended result codes] feature of SQLite. ^The extended result -** codes are disabled by default for historical compatibility. -*/ -SQLITE_API int sqlite3_extended_result_codes(sqlite3*, int onoff); - -/* -** CAPI3REF: Last Insert Rowid -** -** ^Each entry in most SQLite tables (except for [WITHOUT ROWID] tables) -** has a unique 64-bit signed -** integer key called the [ROWID | "rowid"]. ^The rowid is always available -** as an undeclared column named ROWID, OID, or _ROWID_ as long as those -** names are not also used by explicitly declared columns. ^If -** the table has a column of type [INTEGER PRIMARY KEY] then that column -** is another alias for the rowid. -** -** ^The sqlite3_last_insert_rowid(D) interface returns the [rowid] of the -** most recent successful [INSERT] into a rowid table or [virtual table] -** on database connection D. -** ^Inserts into [WITHOUT ROWID] tables are not recorded. -** ^If no successful [INSERT]s into rowid tables -** have ever occurred on the database connection D, -** then sqlite3_last_insert_rowid(D) returns zero. -** -** ^(If an [INSERT] occurs within a trigger or within a [virtual table] -** method, then this routine will return the [rowid] of the inserted -** row as long as the trigger or virtual table method is running. -** But once the trigger or virtual table method ends, the value returned -** by this routine reverts to what it was before the trigger or virtual -** table method began.)^ -** -** ^An [INSERT] that fails due to a constraint violation is not a -** successful [INSERT] and does not change the value returned by this -** routine. ^Thus INSERT OR FAIL, INSERT OR IGNORE, INSERT OR ROLLBACK, -** and INSERT OR ABORT make no changes to the return value of this -** routine when their insertion fails. ^(When INSERT OR REPLACE -** encounters a constraint violation, it does not fail. The -** INSERT continues to completion after deleting rows that caused -** the constraint problem so INSERT OR REPLACE will always change -** the return value of this interface.)^ -** -** ^For the purposes of this routine, an [INSERT] is considered to -** be successful even if it is subsequently rolled back. -** -** This function is accessible to SQL statements via the -** [last_insert_rowid() SQL function]. -** -** If a separate thread performs a new [INSERT] on the same -** database connection while the [sqlite3_last_insert_rowid()] -** function is running and thus changes the last insert [rowid], -** then the value returned by [sqlite3_last_insert_rowid()] is -** unpredictable and might not equal either the old or the new -** last insert [rowid]. -*/ -SQLITE_API sqlite3_int64 sqlite3_last_insert_rowid(sqlite3*); - -/* -** CAPI3REF: Count The Number Of Rows Modified -** -** ^This function returns the number of database rows that were changed -** or inserted or deleted by the most recently completed SQL statement -** on the [database connection] specified by the first parameter. -** ^(Only changes that are directly specified by the [INSERT], [UPDATE], -** or [DELETE] statement are counted. Auxiliary changes caused by -** triggers or [foreign key actions] are not counted.)^ Use the -** [sqlite3_total_changes()] function to find the total number of changes -** including changes caused by triggers and foreign key actions. -** -** ^Changes to a view that are simulated by an [INSTEAD OF trigger] -** are not counted. Only real table changes are counted. -** -** ^(A "row change" is a change to a single row of a single table -** caused by an INSERT, DELETE, or UPDATE statement. Rows that -** are changed as side effects of [REPLACE] constraint resolution, -** rollback, ABORT processing, [DROP TABLE], or by any other -** mechanisms do not count as direct row changes.)^ -** -** A "trigger context" is a scope of execution that begins and -** ends with the script of a [CREATE TRIGGER | trigger]. -** Most SQL statements are -** evaluated outside of any trigger. This is the "top level" -** trigger context. If a trigger fires from the top level, a -** new trigger context is entered for the duration of that one -** trigger. Subtriggers create subcontexts for their duration. -** -** ^Calling [sqlite3_exec()] or [sqlite3_step()] recursively does -** not create a new trigger context. -** -** ^This function returns the number of direct row changes in the -** most recent INSERT, UPDATE, or DELETE statement within the same -** trigger context. -** -** ^Thus, when called from the top level, this function returns the -** number of changes in the most recent INSERT, UPDATE, or DELETE -** that also occurred at the top level. ^(Within the body of a trigger, -** the sqlite3_changes() interface can be called to find the number of -** changes in the most recently completed INSERT, UPDATE, or DELETE -** statement within the body of the same trigger. -** However, the number returned does not include changes -** caused by subtriggers since those have their own context.)^ -** -** See also the [sqlite3_total_changes()] interface, the -** [count_changes pragma], and the [changes() SQL function]. -** -** If a separate thread makes changes on the same database connection -** while [sqlite3_changes()] is running then the value returned -** is unpredictable and not meaningful. -*/ -SQLITE_API int sqlite3_changes(sqlite3*); - -/* -** CAPI3REF: Total Number Of Rows Modified -** -** ^This function returns the number of row changes caused by [INSERT], -** [UPDATE] or [DELETE] statements since the [database connection] was opened. -** ^(The count returned by sqlite3_total_changes() includes all changes -** from all [CREATE TRIGGER | trigger] contexts and changes made by -** [foreign key actions]. However, -** the count does not include changes used to implement [REPLACE] constraints, -** do rollbacks or ABORT processing, or [DROP TABLE] processing. The -** count does not include rows of views that fire an [INSTEAD OF trigger], -** though if the INSTEAD OF trigger makes changes of its own, those changes -** are counted.)^ -** ^The sqlite3_total_changes() function counts the changes as soon as -** the statement that makes them is completed (when the statement handle -** is passed to [sqlite3_reset()] or [sqlite3_finalize()]). -** -** See also the [sqlite3_changes()] interface, the -** [count_changes pragma], and the [total_changes() SQL function]. -** -** If a separate thread makes changes on the same database connection -** while [sqlite3_total_changes()] is running then the value -** returned is unpredictable and not meaningful. -*/ -SQLITE_API int sqlite3_total_changes(sqlite3*); - -/* -** CAPI3REF: Interrupt A Long-Running Query -** -** ^This function causes any pending database operation to abort and -** return at its earliest opportunity. This routine is typically -** called in response to a user action such as pressing "Cancel" -** or Ctrl-C where the user wants a long query operation to halt -** immediately. -** -** ^It is safe to call this routine from a thread different from the -** thread that is currently running the database operation. But it -** is not safe to call this routine with a [database connection] that -** is closed or might close before sqlite3_interrupt() returns. -** -** ^If an SQL operation is very nearly finished at the time when -** sqlite3_interrupt() is called, then it might not have an opportunity -** to be interrupted and might continue to completion. -** -** ^An SQL operation that is interrupted will return [SQLITE_INTERRUPT]. -** ^If the interrupted SQL operation is an INSERT, UPDATE, or DELETE -** that is inside an explicit transaction, then the entire transaction -** will be rolled back automatically. -** -** ^The sqlite3_interrupt(D) call is in effect until all currently running -** SQL statements on [database connection] D complete. ^Any new SQL statements -** that are started after the sqlite3_interrupt() call and before the -** running statements reaches zero are interrupted as if they had been -** running prior to the sqlite3_interrupt() call. ^New SQL statements -** that are started after the running statement count reaches zero are -** not effected by the sqlite3_interrupt(). -** ^A call to sqlite3_interrupt(D) that occurs when there are no running -** SQL statements is a no-op and has no effect on SQL statements -** that are started after the sqlite3_interrupt() call returns. -** -** If the database connection closes while [sqlite3_interrupt()] -** is running then bad things will likely happen. -*/ -SQLITE_API void sqlite3_interrupt(sqlite3*); - -/* -** CAPI3REF: Determine If An SQL Statement Is Complete -** -** These routines are useful during command-line input to determine if the -** currently entered text seems to form a complete SQL statement or -** if additional input is needed before sending the text into -** SQLite for parsing. ^These routines return 1 if the input string -** appears to be a complete SQL statement. ^A statement is judged to be -** complete if it ends with a semicolon token and is not a prefix of a -** well-formed CREATE TRIGGER statement. ^Semicolons that are embedded within -** string literals or quoted identifier names or comments are not -** independent tokens (they are part of the token in which they are -** embedded) and thus do not count as a statement terminator. ^Whitespace -** and comments that follow the final semicolon are ignored. -** -** ^These routines return 0 if the statement is incomplete. ^If a -** memory allocation fails, then SQLITE_NOMEM is returned. -** -** ^These routines do not parse the SQL statements thus -** will not detect syntactically incorrect SQL. -** -** ^(If SQLite has not been initialized using [sqlite3_initialize()] prior -** to invoking sqlite3_complete16() then sqlite3_initialize() is invoked -** automatically by sqlite3_complete16(). If that initialization fails, -** then the return value from sqlite3_complete16() will be non-zero -** regardless of whether or not the input SQL is complete.)^ -** -** The input to [sqlite3_complete()] must be a zero-terminated -** UTF-8 string. -** -** The input to [sqlite3_complete16()] must be a zero-terminated -** UTF-16 string in native byte order. -*/ -SQLITE_API int sqlite3_complete(const char *sql); -SQLITE_API int sqlite3_complete16(const void *sql); - -/* -** CAPI3REF: Register A Callback To Handle SQLITE_BUSY Errors -** -** ^This routine sets a callback function that might be invoked whenever -** an attempt is made to open a database table that another thread -** or process has locked. -** -** ^If the busy callback is NULL, then [SQLITE_BUSY] or [SQLITE_IOERR_BLOCKED] -** is returned immediately upon encountering the lock. ^If the busy callback -** is not NULL, then the callback might be invoked with two arguments. -** -** ^The first argument to the busy handler is a copy of the void* pointer which -** is the third argument to sqlite3_busy_handler(). ^The second argument to -** the busy handler callback is the number of times that the busy handler has -** been invoked for this locking event. ^If the -** busy callback returns 0, then no additional attempts are made to -** access the database and [SQLITE_BUSY] or [SQLITE_IOERR_BLOCKED] is returned. -** ^If the callback returns non-zero, then another attempt -** is made to open the database for reading and the cycle repeats. -** -** The presence of a busy handler does not guarantee that it will be invoked -** when there is lock contention. ^If SQLite determines that invoking the busy -** handler could result in a deadlock, it will go ahead and return [SQLITE_BUSY] -** or [SQLITE_IOERR_BLOCKED] instead of invoking the busy handler. -** Consider a scenario where one process is holding a read lock that -** it is trying to promote to a reserved lock and -** a second process is holding a reserved lock that it is trying -** to promote to an exclusive lock. The first process cannot proceed -** because it is blocked by the second and the second process cannot -** proceed because it is blocked by the first. If both processes -** invoke the busy handlers, neither will make any progress. Therefore, -** SQLite returns [SQLITE_BUSY] for the first process, hoping that this -** will induce the first process to release its read lock and allow -** the second process to proceed. -** -** ^The default busy callback is NULL. -** -** ^The [SQLITE_BUSY] error is converted to [SQLITE_IOERR_BLOCKED] -** when SQLite is in the middle of a large transaction where all the -** changes will not fit into the in-memory cache. SQLite will -** already hold a RESERVED lock on the database file, but it needs -** to promote this lock to EXCLUSIVE so that it can spill cache -** pages into the database file without harm to concurrent -** readers. ^If it is unable to promote the lock, then the in-memory -** cache will be left in an inconsistent state and so the error -** code is promoted from the relatively benign [SQLITE_BUSY] to -** the more severe [SQLITE_IOERR_BLOCKED]. ^This error code promotion -** forces an automatic rollback of the changes. See the -** -** CorruptionFollowingBusyError wiki page for a discussion of why -** this is important. -** -** ^(There can only be a single busy handler defined for each -** [database connection]. Setting a new busy handler clears any -** previously set handler.)^ ^Note that calling [sqlite3_busy_timeout()] -** will also set or clear the busy handler. -** -** The busy callback should not take any actions which modify the -** database connection that invoked the busy handler. Any such actions -** result in undefined behavior. -** -** A busy handler must not close the database connection -** or [prepared statement] that invoked the busy handler. -*/ -SQLITE_API int sqlite3_busy_handler(sqlite3*, int(*)(void*,int), void*); - -/* -** CAPI3REF: Set A Busy Timeout -** -** ^This routine sets a [sqlite3_busy_handler | busy handler] that sleeps -** for a specified amount of time when a table is locked. ^The handler -** will sleep multiple times until at least "ms" milliseconds of sleeping -** have accumulated. ^After at least "ms" milliseconds of sleeping, -** the handler returns 0 which causes [sqlite3_step()] to return -** [SQLITE_BUSY] or [SQLITE_IOERR_BLOCKED]. -** -** ^Calling this routine with an argument less than or equal to zero -** turns off all busy handlers. -** -** ^(There can only be a single busy handler for a particular -** [database connection] any any given moment. If another busy handler -** was defined (using [sqlite3_busy_handler()]) prior to calling -** this routine, that other busy handler is cleared.)^ -*/ -SQLITE_API int sqlite3_busy_timeout(sqlite3*, int ms); - -/* -** CAPI3REF: Convenience Routines For Running Queries -** -** This is a legacy interface that is preserved for backwards compatibility. -** Use of this interface is not recommended. -** -** Definition: A result table is memory data structure created by the -** [sqlite3_get_table()] interface. A result table records the -** complete query results from one or more queries. -** -** The table conceptually has a number of rows and columns. But -** these numbers are not part of the result table itself. These -** numbers are obtained separately. Let N be the number of rows -** and M be the number of columns. -** -** A result table is an array of pointers to zero-terminated UTF-8 strings. -** There are (N+1)*M elements in the array. The first M pointers point -** to zero-terminated strings that contain the names of the columns. -** The remaining entries all point to query results. NULL values result -** in NULL pointers. All other values are in their UTF-8 zero-terminated -** string representation as returned by [sqlite3_column_text()]. -** -** A result table might consist of one or more memory allocations. -** It is not safe to pass a result table directly to [sqlite3_free()]. -** A result table should be deallocated using [sqlite3_free_table()]. -** -** ^(As an example of the result table format, suppose a query result -** is as follows: -** -**
-**        Name        | Age
-**        -----------------------
-**        Alice       | 43
-**        Bob         | 28
-**        Cindy       | 21
-** 
-** -** There are two column (M==2) and three rows (N==3). Thus the -** result table has 8 entries. Suppose the result table is stored -** in an array names azResult. Then azResult holds this content: -** -**
-**        azResult[0] = "Name";
-**        azResult[1] = "Age";
-**        azResult[2] = "Alice";
-**        azResult[3] = "43";
-**        azResult[4] = "Bob";
-**        azResult[5] = "28";
-**        azResult[6] = "Cindy";
-**        azResult[7] = "21";
-** 
)^ -** -** ^The sqlite3_get_table() function evaluates one or more -** semicolon-separated SQL statements in the zero-terminated UTF-8 -** string of its 2nd parameter and returns a result table to the -** pointer given in its 3rd parameter. -** -** After the application has finished with the result from sqlite3_get_table(), -** it must pass the result table pointer to sqlite3_free_table() in order to -** release the memory that was malloced. Because of the way the -** [sqlite3_malloc()] happens within sqlite3_get_table(), the calling -** function must not try to call [sqlite3_free()] directly. Only -** [sqlite3_free_table()] is able to release the memory properly and safely. -** -** The sqlite3_get_table() interface is implemented as a wrapper around -** [sqlite3_exec()]. The sqlite3_get_table() routine does not have access -** to any internal data structures of SQLite. It uses only the public -** interface defined here. As a consequence, errors that occur in the -** wrapper layer outside of the internal [sqlite3_exec()] call are not -** reflected in subsequent calls to [sqlite3_errcode()] or -** [sqlite3_errmsg()]. -*/ -SQLITE_API int sqlite3_get_table( - sqlite3 *db, /* An open database */ - const char *zSql, /* SQL to be evaluated */ - char ***pazResult, /* Results of the query */ - int *pnRow, /* Number of result rows written here */ - int *pnColumn, /* Number of result columns written here */ - char **pzErrmsg /* Error msg written here */ -); -SQLITE_API void sqlite3_free_table(char **result); - -/* -** CAPI3REF: Formatted String Printing Functions -** -** These routines are work-alikes of the "printf()" family of functions -** from the standard C library. -** -** ^The sqlite3_mprintf() and sqlite3_vmprintf() routines write their -** results into memory obtained from [sqlite3_malloc()]. -** The strings returned by these two routines should be -** released by [sqlite3_free()]. ^Both routines return a -** NULL pointer if [sqlite3_malloc()] is unable to allocate enough -** memory to hold the resulting string. -** -** ^(The sqlite3_snprintf() routine is similar to "snprintf()" from -** the standard C library. The result is written into the -** buffer supplied as the second parameter whose size is given by -** the first parameter. Note that the order of the -** first two parameters is reversed from snprintf().)^ This is an -** historical accident that cannot be fixed without breaking -** backwards compatibility. ^(Note also that sqlite3_snprintf() -** returns a pointer to its buffer instead of the number of -** characters actually written into the buffer.)^ We admit that -** the number of characters written would be a more useful return -** value but we cannot change the implementation of sqlite3_snprintf() -** now without breaking compatibility. -** -** ^As long as the buffer size is greater than zero, sqlite3_snprintf() -** guarantees that the buffer is always zero-terminated. ^The first -** parameter "n" is the total size of the buffer, including space for -** the zero terminator. So the longest string that can be completely -** written will be n-1 characters. -** -** ^The sqlite3_vsnprintf() routine is a varargs version of sqlite3_snprintf(). -** -** These routines all implement some additional formatting -** options that are useful for constructing SQL statements. -** All of the usual printf() formatting options apply. In addition, there -** is are "%q", "%Q", and "%z" options. -** -** ^(The %q option works like %s in that it substitutes a nul-terminated -** string from the argument list. But %q also doubles every '\'' character. -** %q is designed for use inside a string literal.)^ By doubling each '\'' -** character it escapes that character and allows it to be inserted into -** the string. -** -** For example, assume the string variable zText contains text as follows: -** -**
-**  char *zText = "It's a happy day!";
-** 
-** -** One can use this text in an SQL statement as follows: -** -**
-**  char *zSQL = sqlite3_mprintf("INSERT INTO table VALUES('%q')", zText);
-**  sqlite3_exec(db, zSQL, 0, 0, 0);
-**  sqlite3_free(zSQL);
-** 
-** -** Because the %q format string is used, the '\'' character in zText -** is escaped and the SQL generated is as follows: -** -**
-**  INSERT INTO table1 VALUES('It''s a happy day!')
-** 
-** -** This is correct. Had we used %s instead of %q, the generated SQL -** would have looked like this: -** -**
-**  INSERT INTO table1 VALUES('It's a happy day!');
-** 
-** -** This second example is an SQL syntax error. As a general rule you should -** always use %q instead of %s when inserting text into a string literal. -** -** ^(The %Q option works like %q except it also adds single quotes around -** the outside of the total string. Additionally, if the parameter in the -** argument list is a NULL pointer, %Q substitutes the text "NULL" (without -** single quotes).)^ So, for example, one could say: -** -**
-**  char *zSQL = sqlite3_mprintf("INSERT INTO table VALUES(%Q)", zText);
-**  sqlite3_exec(db, zSQL, 0, 0, 0);
-**  sqlite3_free(zSQL);
-** 
-** -** The code above will render a correct SQL statement in the zSQL -** variable even if the zText variable is a NULL pointer. -** -** ^(The "%z" formatting option works like "%s" but with the -** addition that after the string has been read and copied into -** the result, [sqlite3_free()] is called on the input string.)^ -*/ -SQLITE_API char *sqlite3_mprintf(const char*,...); -SQLITE_API char *sqlite3_vmprintf(const char*, va_list); -SQLITE_API char *sqlite3_snprintf(int,char*,const char*, ...); -SQLITE_API char *sqlite3_vsnprintf(int,char*,const char*, va_list); - -/* -** CAPI3REF: Memory Allocation Subsystem -** -** The SQLite core uses these three routines for all of its own -** internal memory allocation needs. "Core" in the previous sentence -** does not include operating-system specific VFS implementation. The -** Windows VFS uses native malloc() and free() for some operations. -** -** ^The sqlite3_malloc() routine returns a pointer to a block -** of memory at least N bytes in length, where N is the parameter. -** ^If sqlite3_malloc() is unable to obtain sufficient free -** memory, it returns a NULL pointer. ^If the parameter N to -** sqlite3_malloc() is zero or negative then sqlite3_malloc() returns -** a NULL pointer. -** -** ^Calling sqlite3_free() with a pointer previously returned -** by sqlite3_malloc() or sqlite3_realloc() releases that memory so -** that it might be reused. ^The sqlite3_free() routine is -** a no-op if is called with a NULL pointer. Passing a NULL pointer -** to sqlite3_free() is harmless. After being freed, memory -** should neither be read nor written. Even reading previously freed -** memory might result in a segmentation fault or other severe error. -** Memory corruption, a segmentation fault, or other severe error -** might result if sqlite3_free() is called with a non-NULL pointer that -** was not obtained from sqlite3_malloc() or sqlite3_realloc(). -** -** ^(The sqlite3_realloc() interface attempts to resize a -** prior memory allocation to be at least N bytes, where N is the -** second parameter. The memory allocation to be resized is the first -** parameter.)^ ^ If the first parameter to sqlite3_realloc() -** is a NULL pointer then its behavior is identical to calling -** sqlite3_malloc(N) where N is the second parameter to sqlite3_realloc(). -** ^If the second parameter to sqlite3_realloc() is zero or -** negative then the behavior is exactly the same as calling -** sqlite3_free(P) where P is the first parameter to sqlite3_realloc(). -** ^sqlite3_realloc() returns a pointer to a memory allocation -** of at least N bytes in size or NULL if sufficient memory is unavailable. -** ^If M is the size of the prior allocation, then min(N,M) bytes -** of the prior allocation are copied into the beginning of buffer returned -** by sqlite3_realloc() and the prior allocation is freed. -** ^If sqlite3_realloc() returns NULL, then the prior allocation -** is not freed. -** -** ^The memory returned by sqlite3_malloc() and sqlite3_realloc() -** is always aligned to at least an 8 byte boundary, or to a -** 4 byte boundary if the [SQLITE_4_BYTE_ALIGNED_MALLOC] compile-time -** option is used. -** -** In SQLite version 3.5.0 and 3.5.1, it was possible to define -** the SQLITE_OMIT_MEMORY_ALLOCATION which would cause the built-in -** implementation of these routines to be omitted. That capability -** is no longer provided. Only built-in memory allocators can be used. -** -** Prior to SQLite version 3.7.10, the Windows OS interface layer called -** the system malloc() and free() directly when converting -** filenames between the UTF-8 encoding used by SQLite -** and whatever filename encoding is used by the particular Windows -** installation. Memory allocation errors were detected, but -** they were reported back as [SQLITE_CANTOPEN] or -** [SQLITE_IOERR] rather than [SQLITE_NOMEM]. -** -** The pointer arguments to [sqlite3_free()] and [sqlite3_realloc()] -** must be either NULL or else pointers obtained from a prior -** invocation of [sqlite3_malloc()] or [sqlite3_realloc()] that have -** not yet been released. -** -** The application must not read or write any part of -** a block of memory after it has been released using -** [sqlite3_free()] or [sqlite3_realloc()]. -*/ -SQLITE_API void *sqlite3_malloc(int); -SQLITE_API void *sqlite3_realloc(void*, int); -SQLITE_API void sqlite3_free(void*); - -/* -** CAPI3REF: Memory Allocator Statistics -** -** SQLite provides these two interfaces for reporting on the status -** of the [sqlite3_malloc()], [sqlite3_free()], and [sqlite3_realloc()] -** routines, which form the built-in memory allocation subsystem. -** -** ^The [sqlite3_memory_used()] routine returns the number of bytes -** of memory currently outstanding (malloced but not freed). -** ^The [sqlite3_memory_highwater()] routine returns the maximum -** value of [sqlite3_memory_used()] since the high-water mark -** was last reset. ^The values returned by [sqlite3_memory_used()] and -** [sqlite3_memory_highwater()] include any overhead -** added by SQLite in its implementation of [sqlite3_malloc()], -** but not overhead added by the any underlying system library -** routines that [sqlite3_malloc()] may call. -** -** ^The memory high-water mark is reset to the current value of -** [sqlite3_memory_used()] if and only if the parameter to -** [sqlite3_memory_highwater()] is true. ^The value returned -** by [sqlite3_memory_highwater(1)] is the high-water mark -** prior to the reset. -*/ -SQLITE_API sqlite3_int64 sqlite3_memory_used(void); -SQLITE_API sqlite3_int64 sqlite3_memory_highwater(int resetFlag); - -/* -** CAPI3REF: Pseudo-Random Number Generator -** -** SQLite contains a high-quality pseudo-random number generator (PRNG) used to -** select random [ROWID | ROWIDs] when inserting new records into a table that -** already uses the largest possible [ROWID]. The PRNG is also used for -** the build-in random() and randomblob() SQL functions. This interface allows -** applications to access the same PRNG for other purposes. -** -** ^A call to this routine stores N bytes of randomness into buffer P. -** ^If N is less than one, then P can be a NULL pointer. -** -** ^If this routine has not been previously called or if the previous -** call had N less than one, then the PRNG is seeded using randomness -** obtained from the xRandomness method of the default [sqlite3_vfs] object. -** ^If the previous call to this routine had an N of 1 or more then -** the pseudo-randomness is generated -** internally and without recourse to the [sqlite3_vfs] xRandomness -** method. -*/ -SQLITE_API void sqlite3_randomness(int N, void *P); - -/* -** CAPI3REF: Compile-Time Authorization Callbacks -** -** ^This routine registers an authorizer callback with a particular -** [database connection], supplied in the first argument. -** ^The authorizer callback is invoked as SQL statements are being compiled -** by [sqlite3_prepare()] or its variants [sqlite3_prepare_v2()], -** [sqlite3_prepare16()] and [sqlite3_prepare16_v2()]. ^At various -** points during the compilation process, as logic is being created -** to perform various actions, the authorizer callback is invoked to -** see if those actions are allowed. ^The authorizer callback should -** return [SQLITE_OK] to allow the action, [SQLITE_IGNORE] to disallow the -** specific action but allow the SQL statement to continue to be -** compiled, or [SQLITE_DENY] to cause the entire SQL statement to be -** rejected with an error. ^If the authorizer callback returns -** any value other than [SQLITE_IGNORE], [SQLITE_OK], or [SQLITE_DENY] -** then the [sqlite3_prepare_v2()] or equivalent call that triggered -** the authorizer will fail with an error message. -** -** When the callback returns [SQLITE_OK], that means the operation -** requested is ok. ^When the callback returns [SQLITE_DENY], the -** [sqlite3_prepare_v2()] or equivalent call that triggered the -** authorizer will fail with an error message explaining that -** access is denied. -** -** ^The first parameter to the authorizer callback is a copy of the third -** parameter to the sqlite3_set_authorizer() interface. ^The second parameter -** to the callback is an integer [SQLITE_COPY | action code] that specifies -** the particular action to be authorized. ^The third through sixth parameters -** to the callback are zero-terminated strings that contain additional -** details about the action to be authorized. -** -** ^If the action code is [SQLITE_READ] -** and the callback returns [SQLITE_IGNORE] then the -** [prepared statement] statement is constructed to substitute -** a NULL value in place of the table column that would have -** been read if [SQLITE_OK] had been returned. The [SQLITE_IGNORE] -** return can be used to deny an untrusted user access to individual -** columns of a table. -** ^If the action code is [SQLITE_DELETE] and the callback returns -** [SQLITE_IGNORE] then the [DELETE] operation proceeds but the -** [truncate optimization] is disabled and all rows are deleted individually. -** -** An authorizer is used when [sqlite3_prepare | preparing] -** SQL statements from an untrusted source, to ensure that the SQL statements -** do not try to access data they are not allowed to see, or that they do not -** try to execute malicious statements that damage the database. For -** example, an application may allow a user to enter arbitrary -** SQL queries for evaluation by a database. But the application does -** not want the user to be able to make arbitrary changes to the -** database. An authorizer could then be put in place while the -** user-entered SQL is being [sqlite3_prepare | prepared] that -** disallows everything except [SELECT] statements. -** -** Applications that need to process SQL from untrusted sources -** might also consider lowering resource limits using [sqlite3_limit()] -** and limiting database size using the [max_page_count] [PRAGMA] -** in addition to using an authorizer. -** -** ^(Only a single authorizer can be in place on a database connection -** at a time. Each call to sqlite3_set_authorizer overrides the -** previous call.)^ ^Disable the authorizer by installing a NULL callback. -** The authorizer is disabled by default. -** -** The authorizer callback must not do anything that will modify -** the database connection that invoked the authorizer callback. -** Note that [sqlite3_prepare_v2()] and [sqlite3_step()] both modify their -** database connections for the meaning of "modify" in this paragraph. -** -** ^When [sqlite3_prepare_v2()] is used to prepare a statement, the -** statement might be re-prepared during [sqlite3_step()] due to a -** schema change. Hence, the application should ensure that the -** correct authorizer callback remains in place during the [sqlite3_step()]. -** -** ^Note that the authorizer callback is invoked only during -** [sqlite3_prepare()] or its variants. Authorization is not -** performed during statement evaluation in [sqlite3_step()], unless -** as stated in the previous paragraph, sqlite3_step() invokes -** sqlite3_prepare_v2() to reprepare a statement after a schema change. -*/ -SQLITE_API int sqlite3_set_authorizer( - sqlite3*, - int (*xAuth)(void*,int,const char*,const char*,const char*,const char*), - void *pUserData -); - -/* -** CAPI3REF: Authorizer Return Codes -** -** The [sqlite3_set_authorizer | authorizer callback function] must -** return either [SQLITE_OK] or one of these two constants in order -** to signal SQLite whether or not the action is permitted. See the -** [sqlite3_set_authorizer | authorizer documentation] for additional -** information. -** -** Note that SQLITE_IGNORE is also used as a [SQLITE_ROLLBACK | return code] -** from the [sqlite3_vtab_on_conflict()] interface. -*/ -#define SQLITE_DENY 1 /* Abort the SQL statement with an error */ -#define SQLITE_IGNORE 2 /* Don't allow access, but don't generate an error */ - -/* -** CAPI3REF: Authorizer Action Codes -** -** The [sqlite3_set_authorizer()] interface registers a callback function -** that is invoked to authorize certain SQL statement actions. The -** second parameter to the callback is an integer code that specifies -** what action is being authorized. These are the integer action codes that -** the authorizer callback may be passed. -** -** These action code values signify what kind of operation is to be -** authorized. The 3rd and 4th parameters to the authorization -** callback function will be parameters or NULL depending on which of these -** codes is used as the second parameter. ^(The 5th parameter to the -** authorizer callback is the name of the database ("main", "temp", -** etc.) if applicable.)^ ^The 6th parameter to the authorizer callback -** is the name of the inner-most trigger or view that is responsible for -** the access attempt or NULL if this access attempt is directly from -** top-level SQL code. -*/ -/******************************************* 3rd ************ 4th ***********/ -#define SQLITE_CREATE_INDEX 1 /* Index Name Table Name */ -#define SQLITE_CREATE_TABLE 2 /* Table Name NULL */ -#define SQLITE_CREATE_TEMP_INDEX 3 /* Index Name Table Name */ -#define SQLITE_CREATE_TEMP_TABLE 4 /* Table Name NULL */ -#define SQLITE_CREATE_TEMP_TRIGGER 5 /* Trigger Name Table Name */ -#define SQLITE_CREATE_TEMP_VIEW 6 /* View Name NULL */ -#define SQLITE_CREATE_TRIGGER 7 /* Trigger Name Table Name */ -#define SQLITE_CREATE_VIEW 8 /* View Name NULL */ -#define SQLITE_DELETE 9 /* Table Name NULL */ -#define SQLITE_DROP_INDEX 10 /* Index Name Table Name */ -#define SQLITE_DROP_TABLE 11 /* Table Name NULL */ -#define SQLITE_DROP_TEMP_INDEX 12 /* Index Name Table Name */ -#define SQLITE_DROP_TEMP_TABLE 13 /* Table Name NULL */ -#define SQLITE_DROP_TEMP_TRIGGER 14 /* Trigger Name Table Name */ -#define SQLITE_DROP_TEMP_VIEW 15 /* View Name NULL */ -#define SQLITE_DROP_TRIGGER 16 /* Trigger Name Table Name */ -#define SQLITE_DROP_VIEW 17 /* View Name NULL */ -#define SQLITE_INSERT 18 /* Table Name NULL */ -#define SQLITE_PRAGMA 19 /* Pragma Name 1st arg or NULL */ -#define SQLITE_READ 20 /* Table Name Column Name */ -#define SQLITE_SELECT 21 /* NULL NULL */ -#define SQLITE_TRANSACTION 22 /* Operation NULL */ -#define SQLITE_UPDATE 23 /* Table Name Column Name */ -#define SQLITE_ATTACH 24 /* Filename NULL */ -#define SQLITE_DETACH 25 /* Database Name NULL */ -#define SQLITE_ALTER_TABLE 26 /* Database Name Table Name */ -#define SQLITE_REINDEX 27 /* Index Name NULL */ -#define SQLITE_ANALYZE 28 /* Table Name NULL */ -#define SQLITE_CREATE_VTABLE 29 /* Table Name Module Name */ -#define SQLITE_DROP_VTABLE 30 /* Table Name Module Name */ -#define SQLITE_FUNCTION 31 /* NULL Function Name */ -#define SQLITE_SAVEPOINT 32 /* Operation Savepoint Name */ -#define SQLITE_COPY 0 /* No longer used */ -#define SQLITE_RECURSIVE 33 /* NULL NULL */ - -/* -** CAPI3REF: Tracing And Profiling Functions -** -** These routines register callback functions that can be used for -** tracing and profiling the execution of SQL statements. -** -** ^The callback function registered by sqlite3_trace() is invoked at -** various times when an SQL statement is being run by [sqlite3_step()]. -** ^The sqlite3_trace() callback is invoked with a UTF-8 rendering of the -** SQL statement text as the statement first begins executing. -** ^(Additional sqlite3_trace() callbacks might occur -** as each triggered subprogram is entered. The callbacks for triggers -** contain a UTF-8 SQL comment that identifies the trigger.)^ -** -** The [SQLITE_TRACE_SIZE_LIMIT] compile-time option can be used to limit -** the length of [bound parameter] expansion in the output of sqlite3_trace(). -** -** ^The callback function registered by sqlite3_profile() is invoked -** as each SQL statement finishes. ^The profile callback contains -** the original statement text and an estimate of wall-clock time -** of how long that statement took to run. ^The profile callback -** time is in units of nanoseconds, however the current implementation -** is only capable of millisecond resolution so the six least significant -** digits in the time are meaningless. Future versions of SQLite -** might provide greater resolution on the profiler callback. The -** sqlite3_profile() function is considered experimental and is -** subject to change in future versions of SQLite. -*/ -SQLITE_API void *sqlite3_trace(sqlite3*, void(*xTrace)(void*,const char*), void*); -SQLITE_API SQLITE_EXPERIMENTAL void *sqlite3_profile(sqlite3*, - void(*xProfile)(void*,const char*,sqlite3_uint64), void*); - -/* -** CAPI3REF: Query Progress Callbacks -** -** ^The sqlite3_progress_handler(D,N,X,P) interface causes the callback -** function X to be invoked periodically during long running calls to -** [sqlite3_exec()], [sqlite3_step()] and [sqlite3_get_table()] for -** database connection D. An example use for this -** interface is to keep a GUI updated during a large query. -** -** ^The parameter P is passed through as the only parameter to the -** callback function X. ^The parameter N is the approximate number of -** [virtual machine instructions] that are evaluated between successive -** invocations of the callback X. ^If N is less than one then the progress -** handler is disabled. -** -** ^Only a single progress handler may be defined at one time per -** [database connection]; setting a new progress handler cancels the -** old one. ^Setting parameter X to NULL disables the progress handler. -** ^The progress handler is also disabled by setting N to a value less -** than 1. -** -** ^If the progress callback returns non-zero, the operation is -** interrupted. This feature can be used to implement a -** "Cancel" button on a GUI progress dialog box. -** -** The progress handler callback must not do anything that will modify -** the database connection that invoked the progress handler. -** Note that [sqlite3_prepare_v2()] and [sqlite3_step()] both modify their -** database connections for the meaning of "modify" in this paragraph. -** -*/ -SQLITE_API void sqlite3_progress_handler(sqlite3*, int, int(*)(void*), void*); - -/* -** CAPI3REF: Opening A New Database Connection -** -** ^These routines open an SQLite database file as specified by the -** filename argument. ^The filename argument is interpreted as UTF-8 for -** sqlite3_open() and sqlite3_open_v2() and as UTF-16 in the native byte -** order for sqlite3_open16(). ^(A [database connection] handle is usually -** returned in *ppDb, even if an error occurs. The only exception is that -** if SQLite is unable to allocate memory to hold the [sqlite3] object, -** a NULL will be written into *ppDb instead of a pointer to the [sqlite3] -** object.)^ ^(If the database is opened (and/or created) successfully, then -** [SQLITE_OK] is returned. Otherwise an [error code] is returned.)^ ^The -** [sqlite3_errmsg()] or [sqlite3_errmsg16()] routines can be used to obtain -** an English language description of the error following a failure of any -** of the sqlite3_open() routines. -** -** ^The default encoding for the database will be UTF-8 if -** sqlite3_open() or sqlite3_open_v2() is called and -** UTF-16 in the native byte order if sqlite3_open16() is used. -** -** Whether or not an error occurs when it is opened, resources -** associated with the [database connection] handle should be released by -** passing it to [sqlite3_close()] when it is no longer required. -** -** The sqlite3_open_v2() interface works like sqlite3_open() -** except that it accepts two additional parameters for additional control -** over the new database connection. ^(The flags parameter to -** sqlite3_open_v2() can take one of -** the following three values, optionally combined with the -** [SQLITE_OPEN_NOMUTEX], [SQLITE_OPEN_FULLMUTEX], [SQLITE_OPEN_SHAREDCACHE], -** [SQLITE_OPEN_PRIVATECACHE], and/or [SQLITE_OPEN_URI] flags:)^ -** -**
-** ^(
[SQLITE_OPEN_READONLY]
-**
The database is opened in read-only mode. If the database does not -** already exist, an error is returned.
)^ -** -** ^(
[SQLITE_OPEN_READWRITE]
-**
The database is opened for reading and writing if possible, or reading -** only if the file is write protected by the operating system. In either -** case the database must already exist, otherwise an error is returned.
)^ -** -** ^(
[SQLITE_OPEN_READWRITE] | [SQLITE_OPEN_CREATE]
-**
The database is opened for reading and writing, and is created if -** it does not already exist. This is the behavior that is always used for -** sqlite3_open() and sqlite3_open16().
)^ -**
-** -** If the 3rd parameter to sqlite3_open_v2() is not one of the -** combinations shown above optionally combined with other -** [SQLITE_OPEN_READONLY | SQLITE_OPEN_* bits] -** then the behavior is undefined. -** -** ^If the [SQLITE_OPEN_NOMUTEX] flag is set, then the database connection -** opens in the multi-thread [threading mode] as long as the single-thread -** mode has not been set at compile-time or start-time. ^If the -** [SQLITE_OPEN_FULLMUTEX] flag is set then the database connection opens -** in the serialized [threading mode] unless single-thread was -** previously selected at compile-time or start-time. -** ^The [SQLITE_OPEN_SHAREDCACHE] flag causes the database connection to be -** eligible to use [shared cache mode], regardless of whether or not shared -** cache is enabled using [sqlite3_enable_shared_cache()]. ^The -** [SQLITE_OPEN_PRIVATECACHE] flag causes the database connection to not -** participate in [shared cache mode] even if it is enabled. -** -** ^The fourth parameter to sqlite3_open_v2() is the name of the -** [sqlite3_vfs] object that defines the operating system interface that -** the new database connection should use. ^If the fourth parameter is -** a NULL pointer then the default [sqlite3_vfs] object is used. -** -** ^If the filename is ":memory:", then a private, temporary in-memory database -** is created for the connection. ^This in-memory database will vanish when -** the database connection is closed. Future versions of SQLite might -** make use of additional special filenames that begin with the ":" character. -** It is recommended that when a database filename actually does begin with -** a ":" character you should prefix the filename with a pathname such as -** "./" to avoid ambiguity. -** -** ^If the filename is an empty string, then a private, temporary -** on-disk database will be created. ^This private database will be -** automatically deleted as soon as the database connection is closed. -** -** [[URI filenames in sqlite3_open()]]

URI Filenames

-** -** ^If [URI filename] interpretation is enabled, and the filename argument -** begins with "file:", then the filename is interpreted as a URI. ^URI -** filename interpretation is enabled if the [SQLITE_OPEN_URI] flag is -** set in the fourth argument to sqlite3_open_v2(), or if it has -** been enabled globally using the [SQLITE_CONFIG_URI] option with the -** [sqlite3_config()] method or by the [SQLITE_USE_URI] compile-time option. -** As of SQLite version 3.7.7, URI filename interpretation is turned off -** by default, but future releases of SQLite might enable URI filename -** interpretation by default. See "[URI filenames]" for additional -** information. -** -** URI filenames are parsed according to RFC 3986. ^If the URI contains an -** authority, then it must be either an empty string or the string -** "localhost". ^If the authority is not an empty string or "localhost", an -** error is returned to the caller. ^The fragment component of a URI, if -** present, is ignored. -** -** ^SQLite uses the path component of the URI as the name of the disk file -** which contains the database. ^If the path begins with a '/' character, -** then it is interpreted as an absolute path. ^If the path does not begin -** with a '/' (meaning that the authority section is omitted from the URI) -** then the path is interpreted as a relative path. -** ^On windows, the first component of an absolute path -** is a drive specification (e.g. "C:"). -** -** [[core URI query parameters]] -** The query component of a URI may contain parameters that are interpreted -** either by SQLite itself, or by a [VFS | custom VFS implementation]. -** SQLite interprets the following three query parameters: -** -**
    -**
  • vfs: ^The "vfs" parameter may be used to specify the name of -** a VFS object that provides the operating system interface that should -** be used to access the database file on disk. ^If this option is set to -** an empty string the default VFS object is used. ^Specifying an unknown -** VFS is an error. ^If sqlite3_open_v2() is used and the vfs option is -** present, then the VFS specified by the option takes precedence over -** the value passed as the fourth parameter to sqlite3_open_v2(). -** -**
  • mode: ^(The mode parameter may be set to either "ro", "rw", -** "rwc", or "memory". Attempting to set it to any other value is -** an error)^. -** ^If "ro" is specified, then the database is opened for read-only -** access, just as if the [SQLITE_OPEN_READONLY] flag had been set in the -** third argument to sqlite3_open_v2(). ^If the mode option is set to -** "rw", then the database is opened for read-write (but not create) -** access, as if SQLITE_OPEN_READWRITE (but not SQLITE_OPEN_CREATE) had -** been set. ^Value "rwc" is equivalent to setting both -** SQLITE_OPEN_READWRITE and SQLITE_OPEN_CREATE. ^If the mode option is -** set to "memory" then a pure [in-memory database] that never reads -** or writes from disk is used. ^It is an error to specify a value for -** the mode parameter that is less restrictive than that specified by -** the flags passed in the third parameter to sqlite3_open_v2(). -** -**
  • cache: ^The cache parameter may be set to either "shared" or -** "private". ^Setting it to "shared" is equivalent to setting the -** SQLITE_OPEN_SHAREDCACHE bit in the flags argument passed to -** sqlite3_open_v2(). ^Setting the cache parameter to "private" is -** equivalent to setting the SQLITE_OPEN_PRIVATECACHE bit. -** ^If sqlite3_open_v2() is used and the "cache" parameter is present in -** a URI filename, its value overrides any behavior requested by setting -** SQLITE_OPEN_PRIVATECACHE or SQLITE_OPEN_SHAREDCACHE flag. -** -**
  • psow: ^The psow parameter may be "true" (or "on" or "yes" or -** "1") or "false" (or "off" or "no" or "0") to indicate that the -** [powersafe overwrite] property does or does not apply to the -** storage media on which the database file resides. ^The psow query -** parameter only works for the built-in unix and Windows VFSes. -** -**
  • nolock: ^The nolock parameter is a boolean query parameter -** which if set disables file locking in rollback journal modes. This -** is useful for accessing a database on a filesystem that does not -** support locking. Caution: Database corruption might result if two -** or more processes write to the same database and any one of those -** processes uses nolock=1. -** -**
  • immutable: ^The immutable parameter is a boolean query -** parameter that indicates that the database file is stored on -** read-only media. ^When immutable is set, SQLite assumes that the -** database file cannot be changed, even by a process with higher -** privilege, and so the database is opened read-only and all locking -** and change detection is disabled. Caution: Setting the immutable -** property on a database file that does in fact change can result -** in incorrect query results and/or [SQLITE_CORRUPT] errors. -** See also: [SQLITE_IOCAP_IMMUTABLE]. -** -**
-** -** ^Specifying an unknown parameter in the query component of a URI is not an -** error. Future versions of SQLite might understand additional query -** parameters. See "[query parameters with special meaning to SQLite]" for -** additional information. -** -** [[URI filename examples]]

URI filename examples

-** -** -**
URI filenames Results -**
file:data.db -** Open the file "data.db" in the current directory. -**
file:/home/fred/data.db
-** file:///home/fred/data.db
-** file://localhost/home/fred/data.db
-** Open the database file "/home/fred/data.db". -**
file://darkstar/home/fred/data.db -** An error. "darkstar" is not a recognized authority. -**
-** file:///C:/Documents%20and%20Settings/fred/Desktop/data.db -** Windows only: Open the file "data.db" on fred's desktop on drive -** C:. Note that the %20 escaping in this example is not strictly -** necessary - space characters can be used literally -** in URI filenames. -**
file:data.db?mode=ro&cache=private -** Open file "data.db" in the current directory for read-only access. -** Regardless of whether or not shared-cache mode is enabled by -** default, use a private cache. -**
file:/home/fred/data.db?vfs=unix-dotfile -** Open file "/home/fred/data.db". Use the special VFS "unix-dotfile" -** that uses dot-files in place of posix advisory locking. -**
file:data.db?mode=readonly -** An error. "readonly" is not a valid option for the "mode" parameter. -**
-** -** ^URI hexadecimal escape sequences (%HH) are supported within the path and -** query components of a URI. A hexadecimal escape sequence consists of a -** percent sign - "%" - followed by exactly two hexadecimal digits -** specifying an octet value. ^Before the path or query components of a -** URI filename are interpreted, they are encoded using UTF-8 and all -** hexadecimal escape sequences replaced by a single byte containing the -** corresponding octet. If this process generates an invalid UTF-8 encoding, -** the results are undefined. -** -** Note to Windows users: The encoding used for the filename argument -** of sqlite3_open() and sqlite3_open_v2() must be UTF-8, not whatever -** codepage is currently defined. Filenames containing international -** characters must be converted to UTF-8 prior to passing them into -** sqlite3_open() or sqlite3_open_v2(). -** -** Note to Windows Runtime users: The temporary directory must be set -** prior to calling sqlite3_open() or sqlite3_open_v2(). Otherwise, various -** features that require the use of temporary files may fail. -** -** See also: [sqlite3_temp_directory] -*/ -SQLITE_API int sqlite3_open( - const char *filename, /* Database filename (UTF-8) */ - sqlite3 **ppDb /* OUT: SQLite db handle */ -); -SQLITE_API int sqlite3_open16( - const void *filename, /* Database filename (UTF-16) */ - sqlite3 **ppDb /* OUT: SQLite db handle */ -); -SQLITE_API int sqlite3_open_v2( - const char *filename, /* Database filename (UTF-8) */ - sqlite3 **ppDb, /* OUT: SQLite db handle */ - int flags, /* Flags */ - const char *zVfs /* Name of VFS module to use */ -); - -/* -** CAPI3REF: Obtain Values For URI Parameters -** -** These are utility routines, useful to VFS implementations, that check -** to see if a database file was a URI that contained a specific query -** parameter, and if so obtains the value of that query parameter. -** -** If F is the database filename pointer passed into the xOpen() method of -** a VFS implementation when the flags parameter to xOpen() has one or -** more of the [SQLITE_OPEN_URI] or [SQLITE_OPEN_MAIN_DB] bits set and -** P is the name of the query parameter, then -** sqlite3_uri_parameter(F,P) returns the value of the P -** parameter if it exists or a NULL pointer if P does not appear as a -** query parameter on F. If P is a query parameter of F -** has no explicit value, then sqlite3_uri_parameter(F,P) returns -** a pointer to an empty string. -** -** The sqlite3_uri_boolean(F,P,B) routine assumes that P is a boolean -** parameter and returns true (1) or false (0) according to the value -** of P. The sqlite3_uri_boolean(F,P,B) routine returns true (1) if the -** value of query parameter P is one of "yes", "true", or "on" in any -** case or if the value begins with a non-zero number. The -** sqlite3_uri_boolean(F,P,B) routines returns false (0) if the value of -** query parameter P is one of "no", "false", or "off" in any case or -** if the value begins with a numeric zero. If P is not a query -** parameter on F or if the value of P is does not match any of the -** above, then sqlite3_uri_boolean(F,P,B) returns (B!=0). -** -** The sqlite3_uri_int64(F,P,D) routine converts the value of P into a -** 64-bit signed integer and returns that integer, or D if P does not -** exist. If the value of P is something other than an integer, then -** zero is returned. -** -** If F is a NULL pointer, then sqlite3_uri_parameter(F,P) returns NULL and -** sqlite3_uri_boolean(F,P,B) returns B. If F is not a NULL pointer and -** is not a database file pathname pointer that SQLite passed into the xOpen -** VFS method, then the behavior of this routine is undefined and probably -** undesirable. -*/ -SQLITE_API const char *sqlite3_uri_parameter(const char *zFilename, const char *zParam); -SQLITE_API int sqlite3_uri_boolean(const char *zFile, const char *zParam, int bDefault); -SQLITE_API sqlite3_int64 sqlite3_uri_int64(const char*, const char*, sqlite3_int64); - - -/* -** CAPI3REF: Error Codes And Messages -** -** ^The sqlite3_errcode() interface returns the numeric [result code] or -** [extended result code] for the most recent failed sqlite3_* API call -** associated with a [database connection]. If a prior API call failed -** but the most recent API call succeeded, the return value from -** sqlite3_errcode() is undefined. ^The sqlite3_extended_errcode() -** interface is the same except that it always returns the -** [extended result code] even when extended result codes are -** disabled. -** -** ^The sqlite3_errmsg() and sqlite3_errmsg16() return English-language -** text that describes the error, as either UTF-8 or UTF-16 respectively. -** ^(Memory to hold the error message string is managed internally. -** The application does not need to worry about freeing the result. -** However, the error string might be overwritten or deallocated by -** subsequent calls to other SQLite interface functions.)^ -** -** ^The sqlite3_errstr() interface returns the English-language text -** that describes the [result code], as UTF-8. -** ^(Memory to hold the error message string is managed internally -** and must not be freed by the application)^. -** -** When the serialized [threading mode] is in use, it might be the -** case that a second error occurs on a separate thread in between -** the time of the first error and the call to these interfaces. -** When that happens, the second error will be reported since these -** interfaces always report the most recent result. To avoid -** this, each thread can obtain exclusive use of the [database connection] D -** by invoking [sqlite3_mutex_enter]([sqlite3_db_mutex](D)) before beginning -** to use D and invoking [sqlite3_mutex_leave]([sqlite3_db_mutex](D)) after -** all calls to the interfaces listed here are completed. -** -** If an interface fails with SQLITE_MISUSE, that means the interface -** was invoked incorrectly by the application. In that case, the -** error code and message may or may not be set. -*/ -SQLITE_API int sqlite3_errcode(sqlite3 *db); -SQLITE_API int sqlite3_extended_errcode(sqlite3 *db); -SQLITE_API const char *sqlite3_errmsg(sqlite3*); -SQLITE_API const void *sqlite3_errmsg16(sqlite3*); -SQLITE_API const char *sqlite3_errstr(int); - -/* -** CAPI3REF: SQL Statement Object -** KEYWORDS: {prepared statement} {prepared statements} -** -** An instance of this object represents a single SQL statement. -** This object is variously known as a "prepared statement" or a -** "compiled SQL statement" or simply as a "statement". -** -** The life of a statement object goes something like this: -** -**
    -**
  1. Create the object using [sqlite3_prepare_v2()] or a related -** function. -**
  2. Bind values to [host parameters] using the sqlite3_bind_*() -** interfaces. -**
  3. Run the SQL by calling [sqlite3_step()] one or more times. -**
  4. Reset the statement using [sqlite3_reset()] then go back -** to step 2. Do this zero or more times. -**
  5. Destroy the object using [sqlite3_finalize()]. -**
-** -** Refer to documentation on individual methods above for additional -** information. -*/ -typedef struct sqlite3_stmt sqlite3_stmt; - -/* -** CAPI3REF: Run-time Limits -** -** ^(This interface allows the size of various constructs to be limited -** on a connection by connection basis. The first parameter is the -** [database connection] whose limit is to be set or queried. The -** second parameter is one of the [limit categories] that define a -** class of constructs to be size limited. The third parameter is the -** new limit for that construct.)^ -** -** ^If the new limit is a negative number, the limit is unchanged. -** ^(For each limit category SQLITE_LIMIT_NAME there is a -** [limits | hard upper bound] -** set at compile-time by a C preprocessor macro called -** [limits | SQLITE_MAX_NAME]. -** (The "_LIMIT_" in the name is changed to "_MAX_".))^ -** ^Attempts to increase a limit above its hard upper bound are -** silently truncated to the hard upper bound. -** -** ^Regardless of whether or not the limit was changed, the -** [sqlite3_limit()] interface returns the prior value of the limit. -** ^Hence, to find the current value of a limit without changing it, -** simply invoke this interface with the third parameter set to -1. -** -** Run-time limits are intended for use in applications that manage -** both their own internal database and also databases that are controlled -** by untrusted external sources. An example application might be a -** web browser that has its own databases for storing history and -** separate databases controlled by JavaScript applications downloaded -** off the Internet. The internal databases can be given the -** large, default limits. Databases managed by external sources can -** be given much smaller limits designed to prevent a denial of service -** attack. Developers might also want to use the [sqlite3_set_authorizer()] -** interface to further control untrusted SQL. The size of the database -** created by an untrusted script can be contained using the -** [max_page_count] [PRAGMA]. -** -** New run-time limit categories may be added in future releases. -*/ -SQLITE_API int sqlite3_limit(sqlite3*, int id, int newVal); - -/* -** CAPI3REF: Run-Time Limit Categories -** KEYWORDS: {limit category} {*limit categories} -** -** These constants define various performance limits -** that can be lowered at run-time using [sqlite3_limit()]. -** The synopsis of the meanings of the various limits is shown below. -** Additional information is available at [limits | Limits in SQLite]. -** -**
-** [[SQLITE_LIMIT_LENGTH]] ^(
SQLITE_LIMIT_LENGTH
-**
The maximum size of any string or BLOB or table row, in bytes.
)^ -** -** [[SQLITE_LIMIT_SQL_LENGTH]] ^(
SQLITE_LIMIT_SQL_LENGTH
-**
The maximum length of an SQL statement, in bytes.
)^ -** -** [[SQLITE_LIMIT_COLUMN]] ^(
SQLITE_LIMIT_COLUMN
-**
The maximum number of columns in a table definition or in the -** result set of a [SELECT] or the maximum number of columns in an index -** or in an ORDER BY or GROUP BY clause.
)^ -** -** [[SQLITE_LIMIT_EXPR_DEPTH]] ^(
SQLITE_LIMIT_EXPR_DEPTH
-**
The maximum depth of the parse tree on any expression.
)^ -** -** [[SQLITE_LIMIT_COMPOUND_SELECT]] ^(
SQLITE_LIMIT_COMPOUND_SELECT
-**
The maximum number of terms in a compound SELECT statement.
)^ -** -** [[SQLITE_LIMIT_VDBE_OP]] ^(
SQLITE_LIMIT_VDBE_OP
-**
The maximum number of instructions in a virtual machine program -** used to implement an SQL statement. This limit is not currently -** enforced, though that might be added in some future release of -** SQLite.
)^ -** -** [[SQLITE_LIMIT_FUNCTION_ARG]] ^(
SQLITE_LIMIT_FUNCTION_ARG
-**
The maximum number of arguments on a function.
)^ -** -** [[SQLITE_LIMIT_ATTACHED]] ^(
SQLITE_LIMIT_ATTACHED
-**
The maximum number of [ATTACH | attached databases].)^
-** -** [[SQLITE_LIMIT_LIKE_PATTERN_LENGTH]] -** ^(
SQLITE_LIMIT_LIKE_PATTERN_LENGTH
-**
The maximum length of the pattern argument to the [LIKE] or -** [GLOB] operators.
)^ -** -** [[SQLITE_LIMIT_VARIABLE_NUMBER]] -** ^(
SQLITE_LIMIT_VARIABLE_NUMBER
-**
The maximum index number of any [parameter] in an SQL statement.)^ -** -** [[SQLITE_LIMIT_TRIGGER_DEPTH]] ^(
SQLITE_LIMIT_TRIGGER_DEPTH
-**
The maximum depth of recursion for triggers.
)^ -**
-*/ -#define SQLITE_LIMIT_LENGTH 0 -#define SQLITE_LIMIT_SQL_LENGTH 1 -#define SQLITE_LIMIT_COLUMN 2 -#define SQLITE_LIMIT_EXPR_DEPTH 3 -#define SQLITE_LIMIT_COMPOUND_SELECT 4 -#define SQLITE_LIMIT_VDBE_OP 5 -#define SQLITE_LIMIT_FUNCTION_ARG 6 -#define SQLITE_LIMIT_ATTACHED 7 -#define SQLITE_LIMIT_LIKE_PATTERN_LENGTH 8 -#define SQLITE_LIMIT_VARIABLE_NUMBER 9 -#define SQLITE_LIMIT_TRIGGER_DEPTH 10 - -/* -** CAPI3REF: Compiling An SQL Statement -** KEYWORDS: {SQL statement compiler} -** -** To execute an SQL query, it must first be compiled into a byte-code -** program using one of these routines. -** -** The first argument, "db", is a [database connection] obtained from a -** prior successful call to [sqlite3_open()], [sqlite3_open_v2()] or -** [sqlite3_open16()]. The database connection must not have been closed. -** -** The second argument, "zSql", is the statement to be compiled, encoded -** as either UTF-8 or UTF-16. The sqlite3_prepare() and sqlite3_prepare_v2() -** interfaces use UTF-8, and sqlite3_prepare16() and sqlite3_prepare16_v2() -** use UTF-16. -** -** ^If the nByte argument is less than zero, then zSql is read up to the -** first zero terminator. ^If nByte is non-negative, then it is the maximum -** number of bytes read from zSql. ^When nByte is non-negative, the -** zSql string ends at either the first '\000' or '\u0000' character or -** the nByte-th byte, whichever comes first. If the caller knows -** that the supplied string is nul-terminated, then there is a small -** performance advantage to be gained by passing an nByte parameter that -** is equal to the number of bytes in the input string including -** the nul-terminator bytes as this saves SQLite from having to -** make a copy of the input string. -** -** ^If pzTail is not NULL then *pzTail is made to point to the first byte -** past the end of the first SQL statement in zSql. These routines only -** compile the first statement in zSql, so *pzTail is left pointing to -** what remains uncompiled. -** -** ^*ppStmt is left pointing to a compiled [prepared statement] that can be -** executed using [sqlite3_step()]. ^If there is an error, *ppStmt is set -** to NULL. ^If the input text contains no SQL (if the input is an empty -** string or a comment) then *ppStmt is set to NULL. -** The calling procedure is responsible for deleting the compiled -** SQL statement using [sqlite3_finalize()] after it has finished with it. -** ppStmt may not be NULL. -** -** ^On success, the sqlite3_prepare() family of routines return [SQLITE_OK]; -** otherwise an [error code] is returned. -** -** The sqlite3_prepare_v2() and sqlite3_prepare16_v2() interfaces are -** recommended for all new programs. The two older interfaces are retained -** for backwards compatibility, but their use is discouraged. -** ^In the "v2" interfaces, the prepared statement -** that is returned (the [sqlite3_stmt] object) contains a copy of the -** original SQL text. This causes the [sqlite3_step()] interface to -** behave differently in three ways: -** -**
    -**
  1. -** ^If the database schema changes, instead of returning [SQLITE_SCHEMA] as it -** always used to do, [sqlite3_step()] will automatically recompile the SQL -** statement and try to run it again. As many as [SQLITE_MAX_SCHEMA_RETRY] -** retries will occur before sqlite3_step() gives up and returns an error. -**
  2. -** -**
  3. -** ^When an error occurs, [sqlite3_step()] will return one of the detailed -** [error codes] or [extended error codes]. ^The legacy behavior was that -** [sqlite3_step()] would only return a generic [SQLITE_ERROR] result code -** and the application would have to make a second call to [sqlite3_reset()] -** in order to find the underlying cause of the problem. With the "v2" prepare -** interfaces, the underlying reason for the error is returned immediately. -**
  4. -** -**
  5. -** ^If the specific value bound to [parameter | host parameter] in the -** WHERE clause might influence the choice of query plan for a statement, -** then the statement will be automatically recompiled, as if there had been -** a schema change, on the first [sqlite3_step()] call following any change -** to the [sqlite3_bind_text | bindings] of that [parameter]. -** ^The specific value of WHERE-clause [parameter] might influence the -** choice of query plan if the parameter is the left-hand side of a [LIKE] -** or [GLOB] operator or if the parameter is compared to an indexed column -** and the [SQLITE_ENABLE_STAT3] compile-time option is enabled. -**
  6. -**
-*/ -SQLITE_API int sqlite3_prepare( - sqlite3 *db, /* Database handle */ - const char *zSql, /* SQL statement, UTF-8 encoded */ - int nByte, /* Maximum length of zSql in bytes. */ - sqlite3_stmt **ppStmt, /* OUT: Statement handle */ - const char **pzTail /* OUT: Pointer to unused portion of zSql */ -); -SQLITE_API int sqlite3_prepare_v2( - sqlite3 *db, /* Database handle */ - const char *zSql, /* SQL statement, UTF-8 encoded */ - int nByte, /* Maximum length of zSql in bytes. */ - sqlite3_stmt **ppStmt, /* OUT: Statement handle */ - const char **pzTail /* OUT: Pointer to unused portion of zSql */ -); -SQLITE_API int sqlite3_prepare16( - sqlite3 *db, /* Database handle */ - const void *zSql, /* SQL statement, UTF-16 encoded */ - int nByte, /* Maximum length of zSql in bytes. */ - sqlite3_stmt **ppStmt, /* OUT: Statement handle */ - const void **pzTail /* OUT: Pointer to unused portion of zSql */ -); -SQLITE_API int sqlite3_prepare16_v2( - sqlite3 *db, /* Database handle */ - const void *zSql, /* SQL statement, UTF-16 encoded */ - int nByte, /* Maximum length of zSql in bytes. */ - sqlite3_stmt **ppStmt, /* OUT: Statement handle */ - const void **pzTail /* OUT: Pointer to unused portion of zSql */ -); - -/* -** CAPI3REF: Retrieving Statement SQL -** -** ^This interface can be used to retrieve a saved copy of the original -** SQL text used to create a [prepared statement] if that statement was -** compiled using either [sqlite3_prepare_v2()] or [sqlite3_prepare16_v2()]. -*/ -SQLITE_API const char *sqlite3_sql(sqlite3_stmt *pStmt); - -/* -** CAPI3REF: Determine If An SQL Statement Writes The Database -** -** ^The sqlite3_stmt_readonly(X) interface returns true (non-zero) if -** and only if the [prepared statement] X makes no direct changes to -** the content of the database file. -** -** Note that [application-defined SQL functions] or -** [virtual tables] might change the database indirectly as a side effect. -** ^(For example, if an application defines a function "eval()" that -** calls [sqlite3_exec()], then the following SQL statement would -** change the database file through side-effects: -** -**
-**    SELECT eval('DELETE FROM t1') FROM t2;
-** 
-** -** But because the [SELECT] statement does not change the database file -** directly, sqlite3_stmt_readonly() would still return true.)^ -** -** ^Transaction control statements such as [BEGIN], [COMMIT], [ROLLBACK], -** [SAVEPOINT], and [RELEASE] cause sqlite3_stmt_readonly() to return true, -** since the statements themselves do not actually modify the database but -** rather they control the timing of when other statements modify the -** database. ^The [ATTACH] and [DETACH] statements also cause -** sqlite3_stmt_readonly() to return true since, while those statements -** change the configuration of a database connection, they do not make -** changes to the content of the database files on disk. -*/ -SQLITE_API int sqlite3_stmt_readonly(sqlite3_stmt *pStmt); - -/* -** CAPI3REF: Determine If A Prepared Statement Has Been Reset -** -** ^The sqlite3_stmt_busy(S) interface returns true (non-zero) if the -** [prepared statement] S has been stepped at least once using -** [sqlite3_step(S)] but has not run to completion and/or has not -** been reset using [sqlite3_reset(S)]. ^The sqlite3_stmt_busy(S) -** interface returns false if S is a NULL pointer. If S is not a -** NULL pointer and is not a pointer to a valid [prepared statement] -** object, then the behavior is undefined and probably undesirable. -** -** This interface can be used in combination [sqlite3_next_stmt()] -** to locate all prepared statements associated with a database -** connection that are in need of being reset. This can be used, -** for example, in diagnostic routines to search for prepared -** statements that are holding a transaction open. -*/ -SQLITE_API int sqlite3_stmt_busy(sqlite3_stmt*); - -/* -** CAPI3REF: Dynamically Typed Value Object -** KEYWORDS: {protected sqlite3_value} {unprotected sqlite3_value} -** -** SQLite uses the sqlite3_value object to represent all values -** that can be stored in a database table. SQLite uses dynamic typing -** for the values it stores. ^Values stored in sqlite3_value objects -** can be integers, floating point values, strings, BLOBs, or NULL. -** -** An sqlite3_value object may be either "protected" or "unprotected". -** Some interfaces require a protected sqlite3_value. Other interfaces -** will accept either a protected or an unprotected sqlite3_value. -** Every interface that accepts sqlite3_value arguments specifies -** whether or not it requires a protected sqlite3_value. -** -** The terms "protected" and "unprotected" refer to whether or not -** a mutex is held. An internal mutex is held for a protected -** sqlite3_value object but no mutex is held for an unprotected -** sqlite3_value object. If SQLite is compiled to be single-threaded -** (with [SQLITE_THREADSAFE=0] and with [sqlite3_threadsafe()] returning 0) -** or if SQLite is run in one of reduced mutex modes -** [SQLITE_CONFIG_SINGLETHREAD] or [SQLITE_CONFIG_MULTITHREAD] -** then there is no distinction between protected and unprotected -** sqlite3_value objects and they can be used interchangeably. However, -** for maximum code portability it is recommended that applications -** still make the distinction between protected and unprotected -** sqlite3_value objects even when not strictly required. -** -** ^The sqlite3_value objects that are passed as parameters into the -** implementation of [application-defined SQL functions] are protected. -** ^The sqlite3_value object returned by -** [sqlite3_column_value()] is unprotected. -** Unprotected sqlite3_value objects may only be used with -** [sqlite3_result_value()] and [sqlite3_bind_value()]. -** The [sqlite3_value_blob | sqlite3_value_type()] family of -** interfaces require protected sqlite3_value objects. -*/ -typedef struct Mem sqlite3_value; - -/* -** CAPI3REF: SQL Function Context Object -** -** The context in which an SQL function executes is stored in an -** sqlite3_context object. ^A pointer to an sqlite3_context object -** is always first parameter to [application-defined SQL functions]. -** The application-defined SQL function implementation will pass this -** pointer through into calls to [sqlite3_result_int | sqlite3_result()], -** [sqlite3_aggregate_context()], [sqlite3_user_data()], -** [sqlite3_context_db_handle()], [sqlite3_get_auxdata()], -** and/or [sqlite3_set_auxdata()]. -*/ -typedef struct sqlite3_context sqlite3_context; - -/* -** CAPI3REF: Binding Values To Prepared Statements -** KEYWORDS: {host parameter} {host parameters} {host parameter name} -** KEYWORDS: {SQL parameter} {SQL parameters} {parameter binding} -** -** ^(In the SQL statement text input to [sqlite3_prepare_v2()] and its variants, -** literals may be replaced by a [parameter] that matches one of following -** templates: -** -**
    -**
  • ? -**
  • ?NNN -**
  • :VVV -**
  • @VVV -**
  • $VVV -**
-** -** In the templates above, NNN represents an integer literal, -** and VVV represents an alphanumeric identifier.)^ ^The values of these -** parameters (also called "host parameter names" or "SQL parameters") -** can be set using the sqlite3_bind_*() routines defined here. -** -** ^The first argument to the sqlite3_bind_*() routines is always -** a pointer to the [sqlite3_stmt] object returned from -** [sqlite3_prepare_v2()] or its variants. -** -** ^The second argument is the index of the SQL parameter to be set. -** ^The leftmost SQL parameter has an index of 1. ^When the same named -** SQL parameter is used more than once, second and subsequent -** occurrences have the same index as the first occurrence. -** ^The index for named parameters can be looked up using the -** [sqlite3_bind_parameter_index()] API if desired. ^The index -** for "?NNN" parameters is the value of NNN. -** ^The NNN value must be between 1 and the [sqlite3_limit()] -** parameter [SQLITE_LIMIT_VARIABLE_NUMBER] (default value: 999). -** -** ^The third argument is the value to bind to the parameter. -** ^If the third parameter to sqlite3_bind_text() or sqlite3_bind_text16() -** or sqlite3_bind_blob() is a NULL pointer then the fourth parameter -** is ignored and the end result is the same as sqlite3_bind_null(). -** -** ^(In those routines that have a fourth argument, its value is the -** number of bytes in the parameter. To be clear: the value is the -** number of bytes in the value, not the number of characters.)^ -** ^If the fourth parameter to sqlite3_bind_text() or sqlite3_bind_text16() -** is negative, then the length of the string is -** the number of bytes up to the first zero terminator. -** If the fourth parameter to sqlite3_bind_blob() is negative, then -** the behavior is undefined. -** If a non-negative fourth parameter is provided to sqlite3_bind_text() -** or sqlite3_bind_text16() then that parameter must be the byte offset -** where the NUL terminator would occur assuming the string were NUL -** terminated. If any NUL characters occur at byte offsets less than -** the value of the fourth parameter then the resulting string value will -** contain embedded NULs. The result of expressions involving strings -** with embedded NULs is undefined. -** -** ^The fifth argument to sqlite3_bind_blob(), sqlite3_bind_text(), and -** sqlite3_bind_text16() is a destructor used to dispose of the BLOB or -** string after SQLite has finished with it. ^The destructor is called -** to dispose of the BLOB or string even if the call to sqlite3_bind_blob(), -** sqlite3_bind_text(), or sqlite3_bind_text16() fails. -** ^If the fifth argument is -** the special value [SQLITE_STATIC], then SQLite assumes that the -** information is in static, unmanaged space and does not need to be freed. -** ^If the fifth argument has the value [SQLITE_TRANSIENT], then -** SQLite makes its own private copy of the data immediately, before -** the sqlite3_bind_*() routine returns. -** -** ^The sqlite3_bind_zeroblob() routine binds a BLOB of length N that -** is filled with zeroes. ^A zeroblob uses a fixed amount of memory -** (just an integer to hold its size) while it is being processed. -** Zeroblobs are intended to serve as placeholders for BLOBs whose -** content is later written using -** [sqlite3_blob_open | incremental BLOB I/O] routines. -** ^A negative value for the zeroblob results in a zero-length BLOB. -** -** ^If any of the sqlite3_bind_*() routines are called with a NULL pointer -** for the [prepared statement] or with a prepared statement for which -** [sqlite3_step()] has been called more recently than [sqlite3_reset()], -** then the call will return [SQLITE_MISUSE]. If any sqlite3_bind_() -** routine is passed a [prepared statement] that has been finalized, the -** result is undefined and probably harmful. -** -** ^Bindings are not cleared by the [sqlite3_reset()] routine. -** ^Unbound parameters are interpreted as NULL. -** -** ^The sqlite3_bind_* routines return [SQLITE_OK] on success or an -** [error code] if anything goes wrong. -** ^[SQLITE_RANGE] is returned if the parameter -** index is out of range. ^[SQLITE_NOMEM] is returned if malloc() fails. -** -** See also: [sqlite3_bind_parameter_count()], -** [sqlite3_bind_parameter_name()], and [sqlite3_bind_parameter_index()]. -*/ -SQLITE_API int sqlite3_bind_blob(sqlite3_stmt*, int, const void*, int n, void(*)(void*)); -SQLITE_API int sqlite3_bind_double(sqlite3_stmt*, int, double); -SQLITE_API int sqlite3_bind_int(sqlite3_stmt*, int, int); -SQLITE_API int sqlite3_bind_int64(sqlite3_stmt*, int, sqlite3_int64); -SQLITE_API int sqlite3_bind_null(sqlite3_stmt*, int); -SQLITE_API int sqlite3_bind_text(sqlite3_stmt*, int, const char*, int n, void(*)(void*)); -SQLITE_API int sqlite3_bind_text16(sqlite3_stmt*, int, const void*, int, void(*)(void*)); -SQLITE_API int sqlite3_bind_value(sqlite3_stmt*, int, const sqlite3_value*); -SQLITE_API int sqlite3_bind_zeroblob(sqlite3_stmt*, int, int n); - -/* -** CAPI3REF: Number Of SQL Parameters -** -** ^This routine can be used to find the number of [SQL parameters] -** in a [prepared statement]. SQL parameters are tokens of the -** form "?", "?NNN", ":AAA", "$AAA", or "@AAA" that serve as -** placeholders for values that are [sqlite3_bind_blob | bound] -** to the parameters at a later time. -** -** ^(This routine actually returns the index of the largest (rightmost) -** parameter. For all forms except ?NNN, this will correspond to the -** number of unique parameters. If parameters of the ?NNN form are used, -** there may be gaps in the list.)^ -** -** See also: [sqlite3_bind_blob|sqlite3_bind()], -** [sqlite3_bind_parameter_name()], and -** [sqlite3_bind_parameter_index()]. -*/ -SQLITE_API int sqlite3_bind_parameter_count(sqlite3_stmt*); - -/* -** CAPI3REF: Name Of A Host Parameter -** -** ^The sqlite3_bind_parameter_name(P,N) interface returns -** the name of the N-th [SQL parameter] in the [prepared statement] P. -** ^(SQL parameters of the form "?NNN" or ":AAA" or "@AAA" or "$AAA" -** have a name which is the string "?NNN" or ":AAA" or "@AAA" or "$AAA" -** respectively. -** In other words, the initial ":" or "$" or "@" or "?" -** is included as part of the name.)^ -** ^Parameters of the form "?" without a following integer have no name -** and are referred to as "nameless" or "anonymous parameters". -** -** ^The first host parameter has an index of 1, not 0. -** -** ^If the value N is out of range or if the N-th parameter is -** nameless, then NULL is returned. ^The returned string is -** always in UTF-8 encoding even if the named parameter was -** originally specified as UTF-16 in [sqlite3_prepare16()] or -** [sqlite3_prepare16_v2()]. -** -** See also: [sqlite3_bind_blob|sqlite3_bind()], -** [sqlite3_bind_parameter_count()], and -** [sqlite3_bind_parameter_index()]. -*/ -SQLITE_API const char *sqlite3_bind_parameter_name(sqlite3_stmt*, int); - -/* -** CAPI3REF: Index Of A Parameter With A Given Name -** -** ^Return the index of an SQL parameter given its name. ^The -** index value returned is suitable for use as the second -** parameter to [sqlite3_bind_blob|sqlite3_bind()]. ^A zero -** is returned if no matching parameter is found. ^The parameter -** name must be given in UTF-8 even if the original statement -** was prepared from UTF-16 text using [sqlite3_prepare16_v2()]. -** -** See also: [sqlite3_bind_blob|sqlite3_bind()], -** [sqlite3_bind_parameter_count()], and -** [sqlite3_bind_parameter_index()]. -*/ -SQLITE_API int sqlite3_bind_parameter_index(sqlite3_stmt*, const char *zName); - -/* -** CAPI3REF: Reset All Bindings On A Prepared Statement -** -** ^Contrary to the intuition of many, [sqlite3_reset()] does not reset -** the [sqlite3_bind_blob | bindings] on a [prepared statement]. -** ^Use this routine to reset all host parameters to NULL. -*/ -SQLITE_API int sqlite3_clear_bindings(sqlite3_stmt*); - -/* -** CAPI3REF: Number Of Columns In A Result Set -** -** ^Return the number of columns in the result set returned by the -** [prepared statement]. ^This routine returns 0 if pStmt is an SQL -** statement that does not return data (for example an [UPDATE]). -** -** See also: [sqlite3_data_count()] -*/ -SQLITE_API int sqlite3_column_count(sqlite3_stmt *pStmt); - -/* -** CAPI3REF: Column Names In A Result Set -** -** ^These routines return the name assigned to a particular column -** in the result set of a [SELECT] statement. ^The sqlite3_column_name() -** interface returns a pointer to a zero-terminated UTF-8 string -** and sqlite3_column_name16() returns a pointer to a zero-terminated -** UTF-16 string. ^The first parameter is the [prepared statement] -** that implements the [SELECT] statement. ^The second parameter is the -** column number. ^The leftmost column is number 0. -** -** ^The returned string pointer is valid until either the [prepared statement] -** is destroyed by [sqlite3_finalize()] or until the statement is automatically -** reprepared by the first call to [sqlite3_step()] for a particular run -** or until the next call to -** sqlite3_column_name() or sqlite3_column_name16() on the same column. -** -** ^If sqlite3_malloc() fails during the processing of either routine -** (for example during a conversion from UTF-8 to UTF-16) then a -** NULL pointer is returned. -** -** ^The name of a result column is the value of the "AS" clause for -** that column, if there is an AS clause. If there is no AS clause -** then the name of the column is unspecified and may change from -** one release of SQLite to the next. -*/ -SQLITE_API const char *sqlite3_column_name(sqlite3_stmt*, int N); -SQLITE_API const void *sqlite3_column_name16(sqlite3_stmt*, int N); - -/* -** CAPI3REF: Source Of Data In A Query Result -** -** ^These routines provide a means to determine the database, table, and -** table column that is the origin of a particular result column in -** [SELECT] statement. -** ^The name of the database or table or column can be returned as -** either a UTF-8 or UTF-16 string. ^The _database_ routines return -** the database name, the _table_ routines return the table name, and -** the origin_ routines return the column name. -** ^The returned string is valid until the [prepared statement] is destroyed -** using [sqlite3_finalize()] or until the statement is automatically -** reprepared by the first call to [sqlite3_step()] for a particular run -** or until the same information is requested -** again in a different encoding. -** -** ^The names returned are the original un-aliased names of the -** database, table, and column. -** -** ^The first argument to these interfaces is a [prepared statement]. -** ^These functions return information about the Nth result column returned by -** the statement, where N is the second function argument. -** ^The left-most column is column 0 for these routines. -** -** ^If the Nth column returned by the statement is an expression or -** subquery and is not a column value, then all of these functions return -** NULL. ^These routine might also return NULL if a memory allocation error -** occurs. ^Otherwise, they return the name of the attached database, table, -** or column that query result column was extracted from. -** -** ^As with all other SQLite APIs, those whose names end with "16" return -** UTF-16 encoded strings and the other functions return UTF-8. -** -** ^These APIs are only available if the library was compiled with the -** [SQLITE_ENABLE_COLUMN_METADATA] C-preprocessor symbol. -** -** If two or more threads call one or more of these routines against the same -** prepared statement and column at the same time then the results are -** undefined. -** -** If two or more threads call one or more -** [sqlite3_column_database_name | column metadata interfaces] -** for the same [prepared statement] and result column -** at the same time then the results are undefined. -*/ -SQLITE_API const char *sqlite3_column_database_name(sqlite3_stmt*,int); -SQLITE_API const void *sqlite3_column_database_name16(sqlite3_stmt*,int); -SQLITE_API const char *sqlite3_column_table_name(sqlite3_stmt*,int); -SQLITE_API const void *sqlite3_column_table_name16(sqlite3_stmt*,int); -SQLITE_API const char *sqlite3_column_origin_name(sqlite3_stmt*,int); -SQLITE_API const void *sqlite3_column_origin_name16(sqlite3_stmt*,int); - -/* -** CAPI3REF: Declared Datatype Of A Query Result -** -** ^(The first parameter is a [prepared statement]. -** If this statement is a [SELECT] statement and the Nth column of the -** returned result set of that [SELECT] is a table column (not an -** expression or subquery) then the declared type of the table -** column is returned.)^ ^If the Nth column of the result set is an -** expression or subquery, then a NULL pointer is returned. -** ^The returned string is always UTF-8 encoded. -** -** ^(For example, given the database schema: -** -** CREATE TABLE t1(c1 VARIANT); -** -** and the following statement to be compiled: -** -** SELECT c1 + 1, c1 FROM t1; -** -** this routine would return the string "VARIANT" for the second result -** column (i==1), and a NULL pointer for the first result column (i==0).)^ -** -** ^SQLite uses dynamic run-time typing. ^So just because a column -** is declared to contain a particular type does not mean that the -** data stored in that column is of the declared type. SQLite is -** strongly typed, but the typing is dynamic not static. ^Type -** is associated with individual values, not with the containers -** used to hold those values. -*/ -SQLITE_API const char *sqlite3_column_decltype(sqlite3_stmt*,int); -SQLITE_API const void *sqlite3_column_decltype16(sqlite3_stmt*,int); - -/* -** CAPI3REF: Evaluate An SQL Statement -** -** After a [prepared statement] has been prepared using either -** [sqlite3_prepare_v2()] or [sqlite3_prepare16_v2()] or one of the legacy -** interfaces [sqlite3_prepare()] or [sqlite3_prepare16()], this function -** must be called one or more times to evaluate the statement. -** -** The details of the behavior of the sqlite3_step() interface depend -** on whether the statement was prepared using the newer "v2" interface -** [sqlite3_prepare_v2()] and [sqlite3_prepare16_v2()] or the older legacy -** interface [sqlite3_prepare()] and [sqlite3_prepare16()]. The use of the -** new "v2" interface is recommended for new applications but the legacy -** interface will continue to be supported. -** -** ^In the legacy interface, the return value will be either [SQLITE_BUSY], -** [SQLITE_DONE], [SQLITE_ROW], [SQLITE_ERROR], or [SQLITE_MISUSE]. -** ^With the "v2" interface, any of the other [result codes] or -** [extended result codes] might be returned as well. -** -** ^[SQLITE_BUSY] means that the database engine was unable to acquire the -** database locks it needs to do its job. ^If the statement is a [COMMIT] -** or occurs outside of an explicit transaction, then you can retry the -** statement. If the statement is not a [COMMIT] and occurs within an -** explicit transaction then you should rollback the transaction before -** continuing. -** -** ^[SQLITE_DONE] means that the statement has finished executing -** successfully. sqlite3_step() should not be called again on this virtual -** machine without first calling [sqlite3_reset()] to reset the virtual -** machine back to its initial state. -** -** ^If the SQL statement being executed returns any data, then [SQLITE_ROW] -** is returned each time a new row of data is ready for processing by the -** caller. The values may be accessed using the [column access functions]. -** sqlite3_step() is called again to retrieve the next row of data. -** -** ^[SQLITE_ERROR] means that a run-time error (such as a constraint -** violation) has occurred. sqlite3_step() should not be called again on -** the VM. More information may be found by calling [sqlite3_errmsg()]. -** ^With the legacy interface, a more specific error code (for example, -** [SQLITE_INTERRUPT], [SQLITE_SCHEMA], [SQLITE_CORRUPT], and so forth) -** can be obtained by calling [sqlite3_reset()] on the -** [prepared statement]. ^In the "v2" interface, -** the more specific error code is returned directly by sqlite3_step(). -** -** [SQLITE_MISUSE] means that the this routine was called inappropriately. -** Perhaps it was called on a [prepared statement] that has -** already been [sqlite3_finalize | finalized] or on one that had -** previously returned [SQLITE_ERROR] or [SQLITE_DONE]. Or it could -** be the case that the same database connection is being used by two or -** more threads at the same moment in time. -** -** For all versions of SQLite up to and including 3.6.23.1, a call to -** [sqlite3_reset()] was required after sqlite3_step() returned anything -** other than [SQLITE_ROW] before any subsequent invocation of -** sqlite3_step(). Failure to reset the prepared statement using -** [sqlite3_reset()] would result in an [SQLITE_MISUSE] return from -** sqlite3_step(). But after version 3.6.23.1, sqlite3_step() began -** calling [sqlite3_reset()] automatically in this circumstance rather -** than returning [SQLITE_MISUSE]. This is not considered a compatibility -** break because any application that ever receives an SQLITE_MISUSE error -** is broken by definition. The [SQLITE_OMIT_AUTORESET] compile-time option -** can be used to restore the legacy behavior. -** -** Goofy Interface Alert: In the legacy interface, the sqlite3_step() -** API always returns a generic error code, [SQLITE_ERROR], following any -** error other than [SQLITE_BUSY] and [SQLITE_MISUSE]. You must call -** [sqlite3_reset()] or [sqlite3_finalize()] in order to find one of the -** specific [error codes] that better describes the error. -** We admit that this is a goofy design. The problem has been fixed -** with the "v2" interface. If you prepare all of your SQL statements -** using either [sqlite3_prepare_v2()] or [sqlite3_prepare16_v2()] instead -** of the legacy [sqlite3_prepare()] and [sqlite3_prepare16()] interfaces, -** then the more specific [error codes] are returned directly -** by sqlite3_step(). The use of the "v2" interface is recommended. -*/ -SQLITE_API int sqlite3_step(sqlite3_stmt*); - -/* -** CAPI3REF: Number of columns in a result set -** -** ^The sqlite3_data_count(P) interface returns the number of columns in the -** current row of the result set of [prepared statement] P. -** ^If prepared statement P does not have results ready to return -** (via calls to the [sqlite3_column_int | sqlite3_column_*()] of -** interfaces) then sqlite3_data_count(P) returns 0. -** ^The sqlite3_data_count(P) routine also returns 0 if P is a NULL pointer. -** ^The sqlite3_data_count(P) routine returns 0 if the previous call to -** [sqlite3_step](P) returned [SQLITE_DONE]. ^The sqlite3_data_count(P) -** will return non-zero if previous call to [sqlite3_step](P) returned -** [SQLITE_ROW], except in the case of the [PRAGMA incremental_vacuum] -** where it always returns zero since each step of that multi-step -** pragma returns 0 columns of data. -** -** See also: [sqlite3_column_count()] -*/ -SQLITE_API int sqlite3_data_count(sqlite3_stmt *pStmt); - -/* -** CAPI3REF: Fundamental Datatypes -** KEYWORDS: SQLITE_TEXT -** -** ^(Every value in SQLite has one of five fundamental datatypes: -** -**
    -**
  • 64-bit signed integer -**
  • 64-bit IEEE floating point number -**
  • string -**
  • BLOB -**
  • NULL -**
)^ -** -** These constants are codes for each of those types. -** -** Note that the SQLITE_TEXT constant was also used in SQLite version 2 -** for a completely different meaning. Software that links against both -** SQLite version 2 and SQLite version 3 should use SQLITE3_TEXT, not -** SQLITE_TEXT. -*/ -#define SQLITE_INTEGER 1 -#define SQLITE_FLOAT 2 -#define SQLITE_BLOB 4 -#define SQLITE_NULL 5 -#ifdef SQLITE_TEXT -# undef SQLITE_TEXT -#else -# define SQLITE_TEXT 3 -#endif -#define SQLITE3_TEXT 3 - -/* -** CAPI3REF: Result Values From A Query -** KEYWORDS: {column access functions} -** -** These routines form the "result set" interface. -** -** ^These routines return information about a single column of the current -** result row of a query. ^In every case the first argument is a pointer -** to the [prepared statement] that is being evaluated (the [sqlite3_stmt*] -** that was returned from [sqlite3_prepare_v2()] or one of its variants) -** and the second argument is the index of the column for which information -** should be returned. ^The leftmost column of the result set has the index 0. -** ^The number of columns in the result can be determined using -** [sqlite3_column_count()]. -** -** If the SQL statement does not currently point to a valid row, or if the -** column index is out of range, the result is undefined. -** These routines may only be called when the most recent call to -** [sqlite3_step()] has returned [SQLITE_ROW] and neither -** [sqlite3_reset()] nor [sqlite3_finalize()] have been called subsequently. -** If any of these routines are called after [sqlite3_reset()] or -** [sqlite3_finalize()] or after [sqlite3_step()] has returned -** something other than [SQLITE_ROW], the results are undefined. -** If [sqlite3_step()] or [sqlite3_reset()] or [sqlite3_finalize()] -** are called from a different thread while any of these routines -** are pending, then the results are undefined. -** -** ^The sqlite3_column_type() routine returns the -** [SQLITE_INTEGER | datatype code] for the initial data type -** of the result column. ^The returned value is one of [SQLITE_INTEGER], -** [SQLITE_FLOAT], [SQLITE_TEXT], [SQLITE_BLOB], or [SQLITE_NULL]. The value -** returned by sqlite3_column_type() is only meaningful if no type -** conversions have occurred as described below. After a type conversion, -** the value returned by sqlite3_column_type() is undefined. Future -** versions of SQLite may change the behavior of sqlite3_column_type() -** following a type conversion. -** -** ^If the result is a BLOB or UTF-8 string then the sqlite3_column_bytes() -** routine returns the number of bytes in that BLOB or string. -** ^If the result is a UTF-16 string, then sqlite3_column_bytes() converts -** the string to UTF-8 and then returns the number of bytes. -** ^If the result is a numeric value then sqlite3_column_bytes() uses -** [sqlite3_snprintf()] to convert that value to a UTF-8 string and returns -** the number of bytes in that string. -** ^If the result is NULL, then sqlite3_column_bytes() returns zero. -** -** ^If the result is a BLOB or UTF-16 string then the sqlite3_column_bytes16() -** routine returns the number of bytes in that BLOB or string. -** ^If the result is a UTF-8 string, then sqlite3_column_bytes16() converts -** the string to UTF-16 and then returns the number of bytes. -** ^If the result is a numeric value then sqlite3_column_bytes16() uses -** [sqlite3_snprintf()] to convert that value to a UTF-16 string and returns -** the number of bytes in that string. -** ^If the result is NULL, then sqlite3_column_bytes16() returns zero. -** -** ^The values returned by [sqlite3_column_bytes()] and -** [sqlite3_column_bytes16()] do not include the zero terminators at the end -** of the string. ^For clarity: the values returned by -** [sqlite3_column_bytes()] and [sqlite3_column_bytes16()] are the number of -** bytes in the string, not the number of characters. -** -** ^Strings returned by sqlite3_column_text() and sqlite3_column_text16(), -** even empty strings, are always zero-terminated. ^The return -** value from sqlite3_column_blob() for a zero-length BLOB is a NULL pointer. -** -** ^The object returned by [sqlite3_column_value()] is an -** [unprotected sqlite3_value] object. An unprotected sqlite3_value object -** may only be used with [sqlite3_bind_value()] and [sqlite3_result_value()]. -** If the [unprotected sqlite3_value] object returned by -** [sqlite3_column_value()] is used in any other way, including calls -** to routines like [sqlite3_value_int()], [sqlite3_value_text()], -** or [sqlite3_value_bytes()], then the behavior is undefined. -** -** These routines attempt to convert the value where appropriate. ^For -** example, if the internal representation is FLOAT and a text result -** is requested, [sqlite3_snprintf()] is used internally to perform the -** conversion automatically. ^(The following table details the conversions -** that are applied: -** -**
-** -**
Internal
Type
Requested
Type
Conversion -** -**
NULL INTEGER Result is 0 -**
NULL FLOAT Result is 0.0 -**
NULL TEXT Result is a NULL pointer -**
NULL BLOB Result is a NULL pointer -**
INTEGER FLOAT Convert from integer to float -**
INTEGER TEXT ASCII rendering of the integer -**
INTEGER BLOB Same as INTEGER->TEXT -**
FLOAT INTEGER [CAST] to INTEGER -**
FLOAT TEXT ASCII rendering of the float -**
FLOAT BLOB [CAST] to BLOB -**
TEXT INTEGER [CAST] to INTEGER -**
TEXT FLOAT [CAST] to REAL -**
TEXT BLOB No change -**
BLOB INTEGER [CAST] to INTEGER -**
BLOB FLOAT [CAST] to REAL -**
BLOB TEXT Add a zero terminator if needed -**
-**
)^ -** -** The table above makes reference to standard C library functions atoi() -** and atof(). SQLite does not really use these functions. It has its -** own equivalent internal routines. The atoi() and atof() names are -** used in the table for brevity and because they are familiar to most -** C programmers. -** -** Note that when type conversions occur, pointers returned by prior -** calls to sqlite3_column_blob(), sqlite3_column_text(), and/or -** sqlite3_column_text16() may be invalidated. -** Type conversions and pointer invalidations might occur -** in the following cases: -** -**
    -**
  • The initial content is a BLOB and sqlite3_column_text() or -** sqlite3_column_text16() is called. A zero-terminator might -** need to be added to the string.
  • -**
  • The initial content is UTF-8 text and sqlite3_column_bytes16() or -** sqlite3_column_text16() is called. The content must be converted -** to UTF-16.
  • -**
  • The initial content is UTF-16 text and sqlite3_column_bytes() or -** sqlite3_column_text() is called. The content must be converted -** to UTF-8.
  • -**
-** -** ^Conversions between UTF-16be and UTF-16le are always done in place and do -** not invalidate a prior pointer, though of course the content of the buffer -** that the prior pointer references will have been modified. Other kinds -** of conversion are done in place when it is possible, but sometimes they -** are not possible and in those cases prior pointers are invalidated. -** -** The safest and easiest to remember policy is to invoke these routines -** in one of the following ways: -** -**
    -**
  • sqlite3_column_text() followed by sqlite3_column_bytes()
  • -**
  • sqlite3_column_blob() followed by sqlite3_column_bytes()
  • -**
  • sqlite3_column_text16() followed by sqlite3_column_bytes16()
  • -**
-** -** In other words, you should call sqlite3_column_text(), -** sqlite3_column_blob(), or sqlite3_column_text16() first to force the result -** into the desired format, then invoke sqlite3_column_bytes() or -** sqlite3_column_bytes16() to find the size of the result. Do not mix calls -** to sqlite3_column_text() or sqlite3_column_blob() with calls to -** sqlite3_column_bytes16(), and do not mix calls to sqlite3_column_text16() -** with calls to sqlite3_column_bytes(). -** -** ^The pointers returned are valid until a type conversion occurs as -** described above, or until [sqlite3_step()] or [sqlite3_reset()] or -** [sqlite3_finalize()] is called. ^The memory space used to hold strings -** and BLOBs is freed automatically. Do not pass the pointers returned -** from [sqlite3_column_blob()], [sqlite3_column_text()], etc. into -** [sqlite3_free()]. -** -** ^(If a memory allocation error occurs during the evaluation of any -** of these routines, a default value is returned. The default value -** is either the integer 0, the floating point number 0.0, or a NULL -** pointer. Subsequent calls to [sqlite3_errcode()] will return -** [SQLITE_NOMEM].)^ -*/ -SQLITE_API const void *sqlite3_column_blob(sqlite3_stmt*, int iCol); -SQLITE_API int sqlite3_column_bytes(sqlite3_stmt*, int iCol); -SQLITE_API int sqlite3_column_bytes16(sqlite3_stmt*, int iCol); -SQLITE_API double sqlite3_column_double(sqlite3_stmt*, int iCol); -SQLITE_API int sqlite3_column_int(sqlite3_stmt*, int iCol); -SQLITE_API sqlite3_int64 sqlite3_column_int64(sqlite3_stmt*, int iCol); -SQLITE_API const unsigned char *sqlite3_column_text(sqlite3_stmt*, int iCol); -SQLITE_API const void *sqlite3_column_text16(sqlite3_stmt*, int iCol); -SQLITE_API int sqlite3_column_type(sqlite3_stmt*, int iCol); -SQLITE_API sqlite3_value *sqlite3_column_value(sqlite3_stmt*, int iCol); - -/* -** CAPI3REF: Destroy A Prepared Statement Object -** -** ^The sqlite3_finalize() function is called to delete a [prepared statement]. -** ^If the most recent evaluation of the statement encountered no errors -** or if the statement is never been evaluated, then sqlite3_finalize() returns -** SQLITE_OK. ^If the most recent evaluation of statement S failed, then -** sqlite3_finalize(S) returns the appropriate [error code] or -** [extended error code]. -** -** ^The sqlite3_finalize(S) routine can be called at any point during -** the life cycle of [prepared statement] S: -** before statement S is ever evaluated, after -** one or more calls to [sqlite3_reset()], or after any call -** to [sqlite3_step()] regardless of whether or not the statement has -** completed execution. -** -** ^Invoking sqlite3_finalize() on a NULL pointer is a harmless no-op. -** -** The application must finalize every [prepared statement] in order to avoid -** resource leaks. It is a grievous error for the application to try to use -** a prepared statement after it has been finalized. Any use of a prepared -** statement after it has been finalized can result in undefined and -** undesirable behavior such as segfaults and heap corruption. -*/ -SQLITE_API int sqlite3_finalize(sqlite3_stmt *pStmt); - -/* -** CAPI3REF: Reset A Prepared Statement Object -** -** The sqlite3_reset() function is called to reset a [prepared statement] -** object back to its initial state, ready to be re-executed. -** ^Any SQL statement variables that had values bound to them using -** the [sqlite3_bind_blob | sqlite3_bind_*() API] retain their values. -** Use [sqlite3_clear_bindings()] to reset the bindings. -** -** ^The [sqlite3_reset(S)] interface resets the [prepared statement] S -** back to the beginning of its program. -** -** ^If the most recent call to [sqlite3_step(S)] for the -** [prepared statement] S returned [SQLITE_ROW] or [SQLITE_DONE], -** or if [sqlite3_step(S)] has never before been called on S, -** then [sqlite3_reset(S)] returns [SQLITE_OK]. -** -** ^If the most recent call to [sqlite3_step(S)] for the -** [prepared statement] S indicated an error, then -** [sqlite3_reset(S)] returns an appropriate [error code]. -** -** ^The [sqlite3_reset(S)] interface does not change the values -** of any [sqlite3_bind_blob|bindings] on the [prepared statement] S. -*/ -SQLITE_API int sqlite3_reset(sqlite3_stmt *pStmt); - -/* -** CAPI3REF: Create Or Redefine SQL Functions -** KEYWORDS: {function creation routines} -** KEYWORDS: {application-defined SQL function} -** KEYWORDS: {application-defined SQL functions} -** -** ^These functions (collectively known as "function creation routines") -** are used to add SQL functions or aggregates or to redefine the behavior -** of existing SQL functions or aggregates. The only differences between -** these routines are the text encoding expected for -** the second parameter (the name of the function being created) -** and the presence or absence of a destructor callback for -** the application data pointer. -** -** ^The first parameter is the [database connection] to which the SQL -** function is to be added. ^If an application uses more than one database -** connection then application-defined SQL functions must be added -** to each database connection separately. -** -** ^The second parameter is the name of the SQL function to be created or -** redefined. ^The length of the name is limited to 255 bytes in a UTF-8 -** representation, exclusive of the zero-terminator. ^Note that the name -** length limit is in UTF-8 bytes, not characters nor UTF-16 bytes. -** ^Any attempt to create a function with a longer name -** will result in [SQLITE_MISUSE] being returned. -** -** ^The third parameter (nArg) -** is the number of arguments that the SQL function or -** aggregate takes. ^If this parameter is -1, then the SQL function or -** aggregate may take any number of arguments between 0 and the limit -** set by [sqlite3_limit]([SQLITE_LIMIT_FUNCTION_ARG]). If the third -** parameter is less than -1 or greater than 127 then the behavior is -** undefined. -** -** ^The fourth parameter, eTextRep, specifies what -** [SQLITE_UTF8 | text encoding] this SQL function prefers for -** its parameters. The application should set this parameter to -** [SQLITE_UTF16LE] if the function implementation invokes -** [sqlite3_value_text16le()] on an input, or [SQLITE_UTF16BE] if the -** implementation invokes [sqlite3_value_text16be()] on an input, or -** [SQLITE_UTF16] if [sqlite3_value_text16()] is used, or [SQLITE_UTF8] -** otherwise. ^The same SQL function may be registered multiple times using -** different preferred text encodings, with different implementations for -** each encoding. -** ^When multiple implementations of the same function are available, SQLite -** will pick the one that involves the least amount of data conversion. -** -** ^The fourth parameter may optionally be ORed with [SQLITE_DETERMINISTIC] -** to signal that the function will always return the same result given -** the same inputs within a single SQL statement. Most SQL functions are -** deterministic. The built-in [random()] SQL function is an example of a -** function that is not deterministic. The SQLite query planner is able to -** perform additional optimizations on deterministic functions, so use -** of the [SQLITE_DETERMINISTIC] flag is recommended where possible. -** -** ^(The fifth parameter is an arbitrary pointer. The implementation of the -** function can gain access to this pointer using [sqlite3_user_data()].)^ -** -** ^The sixth, seventh and eighth parameters, xFunc, xStep and xFinal, are -** pointers to C-language functions that implement the SQL function or -** aggregate. ^A scalar SQL function requires an implementation of the xFunc -** callback only; NULL pointers must be passed as the xStep and xFinal -** parameters. ^An aggregate SQL function requires an implementation of xStep -** and xFinal and NULL pointer must be passed for xFunc. ^To delete an existing -** SQL function or aggregate, pass NULL pointers for all three function -** callbacks. -** -** ^(If the ninth parameter to sqlite3_create_function_v2() is not NULL, -** then it is destructor for the application data pointer. -** The destructor is invoked when the function is deleted, either by being -** overloaded or when the database connection closes.)^ -** ^The destructor is also invoked if the call to -** sqlite3_create_function_v2() fails. -** ^When the destructor callback of the tenth parameter is invoked, it -** is passed a single argument which is a copy of the application data -** pointer which was the fifth parameter to sqlite3_create_function_v2(). -** -** ^It is permitted to register multiple implementations of the same -** functions with the same name but with either differing numbers of -** arguments or differing preferred text encodings. ^SQLite will use -** the implementation that most closely matches the way in which the -** SQL function is used. ^A function implementation with a non-negative -** nArg parameter is a better match than a function implementation with -** a negative nArg. ^A function where the preferred text encoding -** matches the database encoding is a better -** match than a function where the encoding is different. -** ^A function where the encoding difference is between UTF16le and UTF16be -** is a closer match than a function where the encoding difference is -** between UTF8 and UTF16. -** -** ^Built-in functions may be overloaded by new application-defined functions. -** -** ^An application-defined function is permitted to call other -** SQLite interfaces. However, such calls must not -** close the database connection nor finalize or reset the prepared -** statement in which the function is running. -*/ -SQLITE_API int sqlite3_create_function( - sqlite3 *db, - const char *zFunctionName, - int nArg, - int eTextRep, - void *pApp, - void (*xFunc)(sqlite3_context*,int,sqlite3_value**), - void (*xStep)(sqlite3_context*,int,sqlite3_value**), - void (*xFinal)(sqlite3_context*) -); -SQLITE_API int sqlite3_create_function16( - sqlite3 *db, - const void *zFunctionName, - int nArg, - int eTextRep, - void *pApp, - void (*xFunc)(sqlite3_context*,int,sqlite3_value**), - void (*xStep)(sqlite3_context*,int,sqlite3_value**), - void (*xFinal)(sqlite3_context*) -); -SQLITE_API int sqlite3_create_function_v2( - sqlite3 *db, - const char *zFunctionName, - int nArg, - int eTextRep, - void *pApp, - void (*xFunc)(sqlite3_context*,int,sqlite3_value**), - void (*xStep)(sqlite3_context*,int,sqlite3_value**), - void (*xFinal)(sqlite3_context*), - void(*xDestroy)(void*) -); - -/* -** CAPI3REF: Text Encodings -** -** These constant define integer codes that represent the various -** text encodings supported by SQLite. -*/ -#define SQLITE_UTF8 1 -#define SQLITE_UTF16LE 2 -#define SQLITE_UTF16BE 3 -#define SQLITE_UTF16 4 /* Use native byte order */ -#define SQLITE_ANY 5 /* Deprecated */ -#define SQLITE_UTF16_ALIGNED 8 /* sqlite3_create_collation only */ - -/* -** CAPI3REF: Function Flags -** -** These constants may be ORed together with the -** [SQLITE_UTF8 | preferred text encoding] as the fourth argument -** to [sqlite3_create_function()], [sqlite3_create_function16()], or -** [sqlite3_create_function_v2()]. -*/ -#define SQLITE_DETERMINISTIC 0x800 - -/* -** CAPI3REF: Deprecated Functions -** DEPRECATED -** -** These functions are [deprecated]. In order to maintain -** backwards compatibility with older code, these functions continue -** to be supported. However, new applications should avoid -** the use of these functions. To help encourage people to avoid -** using these functions, we are not going to tell you what they do. -*/ -#ifndef SQLITE_OMIT_DEPRECATED -SQLITE_API SQLITE_DEPRECATED int sqlite3_aggregate_count(sqlite3_context*); -SQLITE_API SQLITE_DEPRECATED int sqlite3_expired(sqlite3_stmt*); -SQLITE_API SQLITE_DEPRECATED int sqlite3_transfer_bindings(sqlite3_stmt*, sqlite3_stmt*); -SQLITE_API SQLITE_DEPRECATED int sqlite3_global_recover(void); -SQLITE_API SQLITE_DEPRECATED void sqlite3_thread_cleanup(void); -SQLITE_API SQLITE_DEPRECATED int sqlite3_memory_alarm(void(*)(void*,sqlite3_int64,int), - void*,sqlite3_int64); -#endif - -/* -** CAPI3REF: Obtaining SQL Function Parameter Values -** -** The C-language implementation of SQL functions and aggregates uses -** this set of interface routines to access the parameter values on -** the function or aggregate. -** -** The xFunc (for scalar functions) or xStep (for aggregates) parameters -** to [sqlite3_create_function()] and [sqlite3_create_function16()] -** define callbacks that implement the SQL functions and aggregates. -** The 3rd parameter to these callbacks is an array of pointers to -** [protected sqlite3_value] objects. There is one [sqlite3_value] object for -** each parameter to the SQL function. These routines are used to -** extract values from the [sqlite3_value] objects. -** -** These routines work only with [protected sqlite3_value] objects. -** Any attempt to use these routines on an [unprotected sqlite3_value] -** object results in undefined behavior. -** -** ^These routines work just like the corresponding [column access functions] -** except that these routines take a single [protected sqlite3_value] object -** pointer instead of a [sqlite3_stmt*] pointer and an integer column number. -** -** ^The sqlite3_value_text16() interface extracts a UTF-16 string -** in the native byte-order of the host machine. ^The -** sqlite3_value_text16be() and sqlite3_value_text16le() interfaces -** extract UTF-16 strings as big-endian and little-endian respectively. -** -** ^(The sqlite3_value_numeric_type() interface attempts to apply -** numeric affinity to the value. This means that an attempt is -** made to convert the value to an integer or floating point. If -** such a conversion is possible without loss of information (in other -** words, if the value is a string that looks like a number) -** then the conversion is performed. Otherwise no conversion occurs. -** The [SQLITE_INTEGER | datatype] after conversion is returned.)^ -** -** Please pay particular attention to the fact that the pointer returned -** from [sqlite3_value_blob()], [sqlite3_value_text()], or -** [sqlite3_value_text16()] can be invalidated by a subsequent call to -** [sqlite3_value_bytes()], [sqlite3_value_bytes16()], [sqlite3_value_text()], -** or [sqlite3_value_text16()]. -** -** These routines must be called from the same thread as -** the SQL function that supplied the [sqlite3_value*] parameters. -*/ -SQLITE_API const void *sqlite3_value_blob(sqlite3_value*); -SQLITE_API int sqlite3_value_bytes(sqlite3_value*); -SQLITE_API int sqlite3_value_bytes16(sqlite3_value*); -SQLITE_API double sqlite3_value_double(sqlite3_value*); -SQLITE_API int sqlite3_value_int(sqlite3_value*); -SQLITE_API sqlite3_int64 sqlite3_value_int64(sqlite3_value*); -SQLITE_API const unsigned char *sqlite3_value_text(sqlite3_value*); -SQLITE_API const void *sqlite3_value_text16(sqlite3_value*); -SQLITE_API const void *sqlite3_value_text16le(sqlite3_value*); -SQLITE_API const void *sqlite3_value_text16be(sqlite3_value*); -SQLITE_API int sqlite3_value_type(sqlite3_value*); -SQLITE_API int sqlite3_value_numeric_type(sqlite3_value*); - -/* -** CAPI3REF: Obtain Aggregate Function Context -** -** Implementations of aggregate SQL functions use this -** routine to allocate memory for storing their state. -** -** ^The first time the sqlite3_aggregate_context(C,N) routine is called -** for a particular aggregate function, SQLite -** allocates N of memory, zeroes out that memory, and returns a pointer -** to the new memory. ^On second and subsequent calls to -** sqlite3_aggregate_context() for the same aggregate function instance, -** the same buffer is returned. Sqlite3_aggregate_context() is normally -** called once for each invocation of the xStep callback and then one -** last time when the xFinal callback is invoked. ^(When no rows match -** an aggregate query, the xStep() callback of the aggregate function -** implementation is never called and xFinal() is called exactly once. -** In those cases, sqlite3_aggregate_context() might be called for the -** first time from within xFinal().)^ -** -** ^The sqlite3_aggregate_context(C,N) routine returns a NULL pointer -** when first called if N is less than or equal to zero or if a memory -** allocate error occurs. -** -** ^(The amount of space allocated by sqlite3_aggregate_context(C,N) is -** determined by the N parameter on first successful call. Changing the -** value of N in subsequent call to sqlite3_aggregate_context() within -** the same aggregate function instance will not resize the memory -** allocation.)^ Within the xFinal callback, it is customary to set -** N=0 in calls to sqlite3_aggregate_context(C,N) so that no -** pointless memory allocations occur. -** -** ^SQLite automatically frees the memory allocated by -** sqlite3_aggregate_context() when the aggregate query concludes. -** -** The first parameter must be a copy of the -** [sqlite3_context | SQL function context] that is the first parameter -** to the xStep or xFinal callback routine that implements the aggregate -** function. -** -** This routine must be called from the same thread in which -** the aggregate SQL function is running. -*/ -SQLITE_API void *sqlite3_aggregate_context(sqlite3_context*, int nBytes); - -/* -** CAPI3REF: User Data For Functions -** -** ^The sqlite3_user_data() interface returns a copy of -** the pointer that was the pUserData parameter (the 5th parameter) -** of the [sqlite3_create_function()] -** and [sqlite3_create_function16()] routines that originally -** registered the application defined function. -** -** This routine must be called from the same thread in which -** the application-defined function is running. -*/ -SQLITE_API void *sqlite3_user_data(sqlite3_context*); - -/* -** CAPI3REF: Database Connection For Functions -** -** ^The sqlite3_context_db_handle() interface returns a copy of -** the pointer to the [database connection] (the 1st parameter) -** of the [sqlite3_create_function()] -** and [sqlite3_create_function16()] routines that originally -** registered the application defined function. -*/ -SQLITE_API sqlite3 *sqlite3_context_db_handle(sqlite3_context*); - -/* -** CAPI3REF: Function Auxiliary Data -** -** These functions may be used by (non-aggregate) SQL functions to -** associate metadata with argument values. If the same value is passed to -** multiple invocations of the same SQL function during query execution, under -** some circumstances the associated metadata may be preserved. An example -** of where this might be useful is in a regular-expression matching -** function. The compiled version of the regular expression can be stored as -** metadata associated with the pattern string. -** Then as long as the pattern string remains the same, -** the compiled regular expression can be reused on multiple -** invocations of the same function. -** -** ^The sqlite3_get_auxdata() interface returns a pointer to the metadata -** associated by the sqlite3_set_auxdata() function with the Nth argument -** value to the application-defined function. ^If there is no metadata -** associated with the function argument, this sqlite3_get_auxdata() interface -** returns a NULL pointer. -** -** ^The sqlite3_set_auxdata(C,N,P,X) interface saves P as metadata for the N-th -** argument of the application-defined function. ^Subsequent -** calls to sqlite3_get_auxdata(C,N) return P from the most recent -** sqlite3_set_auxdata(C,N,P,X) call if the metadata is still valid or -** NULL if the metadata has been discarded. -** ^After each call to sqlite3_set_auxdata(C,N,P,X) where X is not NULL, -** SQLite will invoke the destructor function X with parameter P exactly -** once, when the metadata is discarded. -** SQLite is free to discard the metadata at any time, including:
    -**
  • when the corresponding function parameter changes, or -**
  • when [sqlite3_reset()] or [sqlite3_finalize()] is called for the -** SQL statement, or -**
  • when sqlite3_set_auxdata() is invoked again on the same parameter, or -**
  • during the original sqlite3_set_auxdata() call when a memory -** allocation error occurs.
)^ -** -** Note the last bullet in particular. The destructor X in -** sqlite3_set_auxdata(C,N,P,X) might be called immediately, before the -** sqlite3_set_auxdata() interface even returns. Hence sqlite3_set_auxdata() -** should be called near the end of the function implementation and the -** function implementation should not make any use of P after -** sqlite3_set_auxdata() has been called. -** -** ^(In practice, metadata is preserved between function calls for -** function parameters that are compile-time constants, including literal -** values and [parameters] and expressions composed from the same.)^ -** -** These routines must be called from the same thread in which -** the SQL function is running. -*/ -SQLITE_API void *sqlite3_get_auxdata(sqlite3_context*, int N); -SQLITE_API void sqlite3_set_auxdata(sqlite3_context*, int N, void*, void (*)(void*)); - - -/* -** CAPI3REF: Constants Defining Special Destructor Behavior -** -** These are special values for the destructor that is passed in as the -** final argument to routines like [sqlite3_result_blob()]. ^If the destructor -** argument is SQLITE_STATIC, it means that the content pointer is constant -** and will never change. It does not need to be destroyed. ^The -** SQLITE_TRANSIENT value means that the content will likely change in -** the near future and that SQLite should make its own private copy of -** the content before returning. -** -** The typedef is necessary to work around problems in certain -** C++ compilers. -*/ -typedef void (*sqlite3_destructor_type)(void*); -#define SQLITE_STATIC ((sqlite3_destructor_type)0) -#define SQLITE_TRANSIENT ((sqlite3_destructor_type)-1) - -/* -** CAPI3REF: Setting The Result Of An SQL Function -** -** These routines are used by the xFunc or xFinal callbacks that -** implement SQL functions and aggregates. See -** [sqlite3_create_function()] and [sqlite3_create_function16()] -** for additional information. -** -** These functions work very much like the [parameter binding] family of -** functions used to bind values to host parameters in prepared statements. -** Refer to the [SQL parameter] documentation for additional information. -** -** ^The sqlite3_result_blob() interface sets the result from -** an application-defined function to be the BLOB whose content is pointed -** to by the second parameter and which is N bytes long where N is the -** third parameter. -** -** ^The sqlite3_result_zeroblob() interfaces set the result of -** the application-defined function to be a BLOB containing all zero -** bytes and N bytes in size, where N is the value of the 2nd parameter. -** -** ^The sqlite3_result_double() interface sets the result from -** an application-defined function to be a floating point value specified -** by its 2nd argument. -** -** ^The sqlite3_result_error() and sqlite3_result_error16() functions -** cause the implemented SQL function to throw an exception. -** ^SQLite uses the string pointed to by the -** 2nd parameter of sqlite3_result_error() or sqlite3_result_error16() -** as the text of an error message. ^SQLite interprets the error -** message string from sqlite3_result_error() as UTF-8. ^SQLite -** interprets the string from sqlite3_result_error16() as UTF-16 in native -** byte order. ^If the third parameter to sqlite3_result_error() -** or sqlite3_result_error16() is negative then SQLite takes as the error -** message all text up through the first zero character. -** ^If the third parameter to sqlite3_result_error() or -** sqlite3_result_error16() is non-negative then SQLite takes that many -** bytes (not characters) from the 2nd parameter as the error message. -** ^The sqlite3_result_error() and sqlite3_result_error16() -** routines make a private copy of the error message text before -** they return. Hence, the calling function can deallocate or -** modify the text after they return without harm. -** ^The sqlite3_result_error_code() function changes the error code -** returned by SQLite as a result of an error in a function. ^By default, -** the error code is SQLITE_ERROR. ^A subsequent call to sqlite3_result_error() -** or sqlite3_result_error16() resets the error code to SQLITE_ERROR. -** -** ^The sqlite3_result_error_toobig() interface causes SQLite to throw an -** error indicating that a string or BLOB is too long to represent. -** -** ^The sqlite3_result_error_nomem() interface causes SQLite to throw an -** error indicating that a memory allocation failed. -** -** ^The sqlite3_result_int() interface sets the return value -** of the application-defined function to be the 32-bit signed integer -** value given in the 2nd argument. -** ^The sqlite3_result_int64() interface sets the return value -** of the application-defined function to be the 64-bit signed integer -** value given in the 2nd argument. -** -** ^The sqlite3_result_null() interface sets the return value -** of the application-defined function to be NULL. -** -** ^The sqlite3_result_text(), sqlite3_result_text16(), -** sqlite3_result_text16le(), and sqlite3_result_text16be() interfaces -** set the return value of the application-defined function to be -** a text string which is represented as UTF-8, UTF-16 native byte order, -** UTF-16 little endian, or UTF-16 big endian, respectively. -** ^SQLite takes the text result from the application from -** the 2nd parameter of the sqlite3_result_text* interfaces. -** ^If the 3rd parameter to the sqlite3_result_text* interfaces -** is negative, then SQLite takes result text from the 2nd parameter -** through the first zero character. -** ^If the 3rd parameter to the sqlite3_result_text* interfaces -** is non-negative, then as many bytes (not characters) of the text -** pointed to by the 2nd parameter are taken as the application-defined -** function result. If the 3rd parameter is non-negative, then it -** must be the byte offset into the string where the NUL terminator would -** appear if the string where NUL terminated. If any NUL characters occur -** in the string at a byte offset that is less than the value of the 3rd -** parameter, then the resulting string will contain embedded NULs and the -** result of expressions operating on strings with embedded NULs is undefined. -** ^If the 4th parameter to the sqlite3_result_text* interfaces -** or sqlite3_result_blob is a non-NULL pointer, then SQLite calls that -** function as the destructor on the text or BLOB result when it has -** finished using that result. -** ^If the 4th parameter to the sqlite3_result_text* interfaces or to -** sqlite3_result_blob is the special constant SQLITE_STATIC, then SQLite -** assumes that the text or BLOB result is in constant space and does not -** copy the content of the parameter nor call a destructor on the content -** when it has finished using that result. -** ^If the 4th parameter to the sqlite3_result_text* interfaces -** or sqlite3_result_blob is the special constant SQLITE_TRANSIENT -** then SQLite makes a copy of the result into space obtained from -** from [sqlite3_malloc()] before it returns. -** -** ^The sqlite3_result_value() interface sets the result of -** the application-defined function to be a copy the -** [unprotected sqlite3_value] object specified by the 2nd parameter. ^The -** sqlite3_result_value() interface makes a copy of the [sqlite3_value] -** so that the [sqlite3_value] specified in the parameter may change or -** be deallocated after sqlite3_result_value() returns without harm. -** ^A [protected sqlite3_value] object may always be used where an -** [unprotected sqlite3_value] object is required, so either -** kind of [sqlite3_value] object can be used with this interface. -** -** If these routines are called from within the different thread -** than the one containing the application-defined function that received -** the [sqlite3_context] pointer, the results are undefined. -*/ -SQLITE_API void sqlite3_result_blob(sqlite3_context*, const void*, int, void(*)(void*)); -SQLITE_API void sqlite3_result_double(sqlite3_context*, double); -SQLITE_API void sqlite3_result_error(sqlite3_context*, const char*, int); -SQLITE_API void sqlite3_result_error16(sqlite3_context*, const void*, int); -SQLITE_API void sqlite3_result_error_toobig(sqlite3_context*); -SQLITE_API void sqlite3_result_error_nomem(sqlite3_context*); -SQLITE_API void sqlite3_result_error_code(sqlite3_context*, int); -SQLITE_API void sqlite3_result_int(sqlite3_context*, int); -SQLITE_API void sqlite3_result_int64(sqlite3_context*, sqlite3_int64); -SQLITE_API void sqlite3_result_null(sqlite3_context*); -SQLITE_API void sqlite3_result_text(sqlite3_context*, const char*, int, void(*)(void*)); -SQLITE_API void sqlite3_result_text16(sqlite3_context*, const void*, int, void(*)(void*)); -SQLITE_API void sqlite3_result_text16le(sqlite3_context*, const void*, int,void(*)(void*)); -SQLITE_API void sqlite3_result_text16be(sqlite3_context*, const void*, int,void(*)(void*)); -SQLITE_API void sqlite3_result_value(sqlite3_context*, sqlite3_value*); -SQLITE_API void sqlite3_result_zeroblob(sqlite3_context*, int n); - -/* -** CAPI3REF: Define New Collating Sequences -** -** ^These functions add, remove, or modify a [collation] associated -** with the [database connection] specified as the first argument. -** -** ^The name of the collation is a UTF-8 string -** for sqlite3_create_collation() and sqlite3_create_collation_v2() -** and a UTF-16 string in native byte order for sqlite3_create_collation16(). -** ^Collation names that compare equal according to [sqlite3_strnicmp()] are -** considered to be the same name. -** -** ^(The third argument (eTextRep) must be one of the constants: -**
    -**
  • [SQLITE_UTF8], -**
  • [SQLITE_UTF16LE], -**
  • [SQLITE_UTF16BE], -**
  • [SQLITE_UTF16], or -**
  • [SQLITE_UTF16_ALIGNED]. -**
)^ -** ^The eTextRep argument determines the encoding of strings passed -** to the collating function callback, xCallback. -** ^The [SQLITE_UTF16] and [SQLITE_UTF16_ALIGNED] values for eTextRep -** force strings to be UTF16 with native byte order. -** ^The [SQLITE_UTF16_ALIGNED] value for eTextRep forces strings to begin -** on an even byte address. -** -** ^The fourth argument, pArg, is an application data pointer that is passed -** through as the first argument to the collating function callback. -** -** ^The fifth argument, xCallback, is a pointer to the collating function. -** ^Multiple collating functions can be registered using the same name but -** with different eTextRep parameters and SQLite will use whichever -** function requires the least amount of data transformation. -** ^If the xCallback argument is NULL then the collating function is -** deleted. ^When all collating functions having the same name are deleted, -** that collation is no longer usable. -** -** ^The collating function callback is invoked with a copy of the pArg -** application data pointer and with two strings in the encoding specified -** by the eTextRep argument. The collating function must return an -** integer that is negative, zero, or positive -** if the first string is less than, equal to, or greater than the second, -** respectively. A collating function must always return the same answer -** given the same inputs. If two or more collating functions are registered -** to the same collation name (using different eTextRep values) then all -** must give an equivalent answer when invoked with equivalent strings. -** The collating function must obey the following properties for all -** strings A, B, and C: -** -**
    -**
  1. If A==B then B==A. -**
  2. If A==B and B==C then A==C. -**
  3. If A<B THEN B>A. -**
  4. If A<B and B<C then A<C. -**
-** -** If a collating function fails any of the above constraints and that -** collating function is registered and used, then the behavior of SQLite -** is undefined. -** -** ^The sqlite3_create_collation_v2() works like sqlite3_create_collation() -** with the addition that the xDestroy callback is invoked on pArg when -** the collating function is deleted. -** ^Collating functions are deleted when they are overridden by later -** calls to the collation creation functions or when the -** [database connection] is closed using [sqlite3_close()]. -** -** ^The xDestroy callback is not called if the -** sqlite3_create_collation_v2() function fails. Applications that invoke -** sqlite3_create_collation_v2() with a non-NULL xDestroy argument should -** check the return code and dispose of the application data pointer -** themselves rather than expecting SQLite to deal with it for them. -** This is different from every other SQLite interface. The inconsistency -** is unfortunate but cannot be changed without breaking backwards -** compatibility. -** -** See also: [sqlite3_collation_needed()] and [sqlite3_collation_needed16()]. -*/ -SQLITE_API int sqlite3_create_collation( - sqlite3*, - const char *zName, - int eTextRep, - void *pArg, - int(*xCompare)(void*,int,const void*,int,const void*) -); -SQLITE_API int sqlite3_create_collation_v2( - sqlite3*, - const char *zName, - int eTextRep, - void *pArg, - int(*xCompare)(void*,int,const void*,int,const void*), - void(*xDestroy)(void*) -); -SQLITE_API int sqlite3_create_collation16( - sqlite3*, - const void *zName, - int eTextRep, - void *pArg, - int(*xCompare)(void*,int,const void*,int,const void*) -); - -/* -** CAPI3REF: Collation Needed Callbacks -** -** ^To avoid having to register all collation sequences before a database -** can be used, a single callback function may be registered with the -** [database connection] to be invoked whenever an undefined collation -** sequence is required. -** -** ^If the function is registered using the sqlite3_collation_needed() API, -** then it is passed the names of undefined collation sequences as strings -** encoded in UTF-8. ^If sqlite3_collation_needed16() is used, -** the names are passed as UTF-16 in machine native byte order. -** ^A call to either function replaces the existing collation-needed callback. -** -** ^(When the callback is invoked, the first argument passed is a copy -** of the second argument to sqlite3_collation_needed() or -** sqlite3_collation_needed16(). The second argument is the database -** connection. The third argument is one of [SQLITE_UTF8], [SQLITE_UTF16BE], -** or [SQLITE_UTF16LE], indicating the most desirable form of the collation -** sequence function required. The fourth parameter is the name of the -** required collation sequence.)^ -** -** The callback function should register the desired collation using -** [sqlite3_create_collation()], [sqlite3_create_collation16()], or -** [sqlite3_create_collation_v2()]. -*/ -SQLITE_API int sqlite3_collation_needed( - sqlite3*, - void*, - void(*)(void*,sqlite3*,int eTextRep,const char*) -); -SQLITE_API int sqlite3_collation_needed16( - sqlite3*, - void*, - void(*)(void*,sqlite3*,int eTextRep,const void*) -); - -#ifdef SQLITE_HAS_CODEC -/* -** Specify the key for an encrypted database. This routine should be -** called right after sqlite3_open(). -** -** The code to implement this API is not available in the public release -** of SQLite. -*/ -SQLITE_API int sqlite3_key( - sqlite3 *db, /* Database to be rekeyed */ - const void *pKey, int nKey /* The key */ -); -SQLITE_API int sqlite3_key_v2( - sqlite3 *db, /* Database to be rekeyed */ - const char *zDbName, /* Name of the database */ - const void *pKey, int nKey /* The key */ -); - -/* -** Change the key on an open database. If the current database is not -** encrypted, this routine will encrypt it. If pNew==0 or nNew==0, the -** database is decrypted. -** -** The code to implement this API is not available in the public release -** of SQLite. -*/ -SQLITE_API int sqlite3_rekey( - sqlite3 *db, /* Database to be rekeyed */ - const void *pKey, int nKey /* The new key */ -); -SQLITE_API int sqlite3_rekey_v2( - sqlite3 *db, /* Database to be rekeyed */ - const char *zDbName, /* Name of the database */ - const void *pKey, int nKey /* The new key */ -); - -/* -** Specify the activation key for a SEE database. Unless -** activated, none of the SEE routines will work. -*/ -SQLITE_API void sqlite3_activate_see( - const char *zPassPhrase /* Activation phrase */ -); -#endif - -#ifdef SQLITE_ENABLE_CEROD -/* -** Specify the activation key for a CEROD database. Unless -** activated, none of the CEROD routines will work. -*/ -SQLITE_API void sqlite3_activate_cerod( - const char *zPassPhrase /* Activation phrase */ -); -#endif - -/* -** CAPI3REF: Suspend Execution For A Short Time -** -** The sqlite3_sleep() function causes the current thread to suspend execution -** for at least a number of milliseconds specified in its parameter. -** -** If the operating system does not support sleep requests with -** millisecond time resolution, then the time will be rounded up to -** the nearest second. The number of milliseconds of sleep actually -** requested from the operating system is returned. -** -** ^SQLite implements this interface by calling the xSleep() -** method of the default [sqlite3_vfs] object. If the xSleep() method -** of the default VFS is not implemented correctly, or not implemented at -** all, then the behavior of sqlite3_sleep() may deviate from the description -** in the previous paragraphs. -*/ -SQLITE_API int sqlite3_sleep(int); - -/* -** CAPI3REF: Name Of The Folder Holding Temporary Files -** -** ^(If this global variable is made to point to a string which is -** the name of a folder (a.k.a. directory), then all temporary files -** created by SQLite when using a built-in [sqlite3_vfs | VFS] -** will be placed in that directory.)^ ^If this variable -** is a NULL pointer, then SQLite performs a search for an appropriate -** temporary file directory. -** -** It is not safe to read or modify this variable in more than one -** thread at a time. It is not safe to read or modify this variable -** if a [database connection] is being used at the same time in a separate -** thread. -** It is intended that this variable be set once -** as part of process initialization and before any SQLite interface -** routines have been called and that this variable remain unchanged -** thereafter. -** -** ^The [temp_store_directory pragma] may modify this variable and cause -** it to point to memory obtained from [sqlite3_malloc]. ^Furthermore, -** the [temp_store_directory pragma] always assumes that any string -** that this variable points to is held in memory obtained from -** [sqlite3_malloc] and the pragma may attempt to free that memory -** using [sqlite3_free]. -** Hence, if this variable is modified directly, either it should be -** made NULL or made to point to memory obtained from [sqlite3_malloc] -** or else the use of the [temp_store_directory pragma] should be avoided. -** -** Note to Windows Runtime users: The temporary directory must be set -** prior to calling [sqlite3_open] or [sqlite3_open_v2]. Otherwise, various -** features that require the use of temporary files may fail. Here is an -** example of how to do this using C++ with the Windows Runtime: -** -**
-** LPCWSTR zPath = Windows::Storage::ApplicationData::Current->
-**       TemporaryFolder->Path->Data();
-** char zPathBuf[MAX_PATH + 1];
-** memset(zPathBuf, 0, sizeof(zPathBuf));
-** WideCharToMultiByte(CP_UTF8, 0, zPath, -1, zPathBuf, sizeof(zPathBuf),
-**       NULL, NULL);
-** sqlite3_temp_directory = sqlite3_mprintf("%s", zPathBuf);
-** 
-*/ -SQLITE_API char *sqlite3_temp_directory; - -/* -** CAPI3REF: Name Of The Folder Holding Database Files -** -** ^(If this global variable is made to point to a string which is -** the name of a folder (a.k.a. directory), then all database files -** specified with a relative pathname and created or accessed by -** SQLite when using a built-in windows [sqlite3_vfs | VFS] will be assumed -** to be relative to that directory.)^ ^If this variable is a NULL -** pointer, then SQLite assumes that all database files specified -** with a relative pathname are relative to the current directory -** for the process. Only the windows VFS makes use of this global -** variable; it is ignored by the unix VFS. -** -** Changing the value of this variable while a database connection is -** open can result in a corrupt database. -** -** It is not safe to read or modify this variable in more than one -** thread at a time. It is not safe to read or modify this variable -** if a [database connection] is being used at the same time in a separate -** thread. -** It is intended that this variable be set once -** as part of process initialization and before any SQLite interface -** routines have been called and that this variable remain unchanged -** thereafter. -** -** ^The [data_store_directory pragma] may modify this variable and cause -** it to point to memory obtained from [sqlite3_malloc]. ^Furthermore, -** the [data_store_directory pragma] always assumes that any string -** that this variable points to is held in memory obtained from -** [sqlite3_malloc] and the pragma may attempt to free that memory -** using [sqlite3_free]. -** Hence, if this variable is modified directly, either it should be -** made NULL or made to point to memory obtained from [sqlite3_malloc] -** or else the use of the [data_store_directory pragma] should be avoided. -*/ -SQLITE_API char *sqlite3_data_directory; - -/* -** CAPI3REF: Test For Auto-Commit Mode -** KEYWORDS: {autocommit mode} -** -** ^The sqlite3_get_autocommit() interface returns non-zero or -** zero if the given database connection is or is not in autocommit mode, -** respectively. ^Autocommit mode is on by default. -** ^Autocommit mode is disabled by a [BEGIN] statement. -** ^Autocommit mode is re-enabled by a [COMMIT] or [ROLLBACK]. -** -** If certain kinds of errors occur on a statement within a multi-statement -** transaction (errors including [SQLITE_FULL], [SQLITE_IOERR], -** [SQLITE_NOMEM], [SQLITE_BUSY], and [SQLITE_INTERRUPT]) then the -** transaction might be rolled back automatically. The only way to -** find out whether SQLite automatically rolled back the transaction after -** an error is to use this function. -** -** If another thread changes the autocommit status of the database -** connection while this routine is running, then the return value -** is undefined. -*/ -SQLITE_API int sqlite3_get_autocommit(sqlite3*); - -/* -** CAPI3REF: Find The Database Handle Of A Prepared Statement -** -** ^The sqlite3_db_handle interface returns the [database connection] handle -** to which a [prepared statement] belongs. ^The [database connection] -** returned by sqlite3_db_handle is the same [database connection] -** that was the first argument -** to the [sqlite3_prepare_v2()] call (or its variants) that was used to -** create the statement in the first place. -*/ -SQLITE_API sqlite3 *sqlite3_db_handle(sqlite3_stmt*); - -/* -** CAPI3REF: Return The Filename For A Database Connection -** -** ^The sqlite3_db_filename(D,N) interface returns a pointer to a filename -** associated with database N of connection D. ^The main database file -** has the name "main". If there is no attached database N on the database -** connection D, or if database N is a temporary or in-memory database, then -** a NULL pointer is returned. -** -** ^The filename returned by this function is the output of the -** xFullPathname method of the [VFS]. ^In other words, the filename -** will be an absolute pathname, even if the filename used -** to open the database originally was a URI or relative pathname. -*/ -SQLITE_API const char *sqlite3_db_filename(sqlite3 *db, const char *zDbName); - -/* -** CAPI3REF: Determine if a database is read-only -** -** ^The sqlite3_db_readonly(D,N) interface returns 1 if the database N -** of connection D is read-only, 0 if it is read/write, or -1 if N is not -** the name of a database on connection D. -*/ -SQLITE_API int sqlite3_db_readonly(sqlite3 *db, const char *zDbName); - -/* -** CAPI3REF: Find the next prepared statement -** -** ^This interface returns a pointer to the next [prepared statement] after -** pStmt associated with the [database connection] pDb. ^If pStmt is NULL -** then this interface returns a pointer to the first prepared statement -** associated with the database connection pDb. ^If no prepared statement -** satisfies the conditions of this routine, it returns NULL. -** -** The [database connection] pointer D in a call to -** [sqlite3_next_stmt(D,S)] must refer to an open database -** connection and in particular must not be a NULL pointer. -*/ -SQLITE_API sqlite3_stmt *sqlite3_next_stmt(sqlite3 *pDb, sqlite3_stmt *pStmt); - -/* -** CAPI3REF: Commit And Rollback Notification Callbacks -** -** ^The sqlite3_commit_hook() interface registers a callback -** function to be invoked whenever a transaction is [COMMIT | committed]. -** ^Any callback set by a previous call to sqlite3_commit_hook() -** for the same database connection is overridden. -** ^The sqlite3_rollback_hook() interface registers a callback -** function to be invoked whenever a transaction is [ROLLBACK | rolled back]. -** ^Any callback set by a previous call to sqlite3_rollback_hook() -** for the same database connection is overridden. -** ^The pArg argument is passed through to the callback. -** ^If the callback on a commit hook function returns non-zero, -** then the commit is converted into a rollback. -** -** ^The sqlite3_commit_hook(D,C,P) and sqlite3_rollback_hook(D,C,P) functions -** return the P argument from the previous call of the same function -** on the same [database connection] D, or NULL for -** the first call for each function on D. -** -** The commit and rollback hook callbacks are not reentrant. -** The callback implementation must not do anything that will modify -** the database connection that invoked the callback. Any actions -** to modify the database connection must be deferred until after the -** completion of the [sqlite3_step()] call that triggered the commit -** or rollback hook in the first place. -** Note that running any other SQL statements, including SELECT statements, -** or merely calling [sqlite3_prepare_v2()] and [sqlite3_step()] will modify -** the database connections for the meaning of "modify" in this paragraph. -** -** ^Registering a NULL function disables the callback. -** -** ^When the commit hook callback routine returns zero, the [COMMIT] -** operation is allowed to continue normally. ^If the commit hook -** returns non-zero, then the [COMMIT] is converted into a [ROLLBACK]. -** ^The rollback hook is invoked on a rollback that results from a commit -** hook returning non-zero, just as it would be with any other rollback. -** -** ^For the purposes of this API, a transaction is said to have been -** rolled back if an explicit "ROLLBACK" statement is executed, or -** an error or constraint causes an implicit rollback to occur. -** ^The rollback callback is not invoked if a transaction is -** automatically rolled back because the database connection is closed. -** -** See also the [sqlite3_update_hook()] interface. -*/ -SQLITE_API void *sqlite3_commit_hook(sqlite3*, int(*)(void*), void*); -SQLITE_API void *sqlite3_rollback_hook(sqlite3*, void(*)(void *), void*); - -/* -** CAPI3REF: Data Change Notification Callbacks -** -** ^The sqlite3_update_hook() interface registers a callback function -** with the [database connection] identified by the first argument -** to be invoked whenever a row is updated, inserted or deleted in -** a rowid table. -** ^Any callback set by a previous call to this function -** for the same database connection is overridden. -** -** ^The second argument is a pointer to the function to invoke when a -** row is updated, inserted or deleted in a rowid table. -** ^The first argument to the callback is a copy of the third argument -** to sqlite3_update_hook(). -** ^The second callback argument is one of [SQLITE_INSERT], [SQLITE_DELETE], -** or [SQLITE_UPDATE], depending on the operation that caused the callback -** to be invoked. -** ^The third and fourth arguments to the callback contain pointers to the -** database and table name containing the affected row. -** ^The final callback parameter is the [rowid] of the row. -** ^In the case of an update, this is the [rowid] after the update takes place. -** -** ^(The update hook is not invoked when internal system tables are -** modified (i.e. sqlite_master and sqlite_sequence).)^ -** ^The update hook is not invoked when [WITHOUT ROWID] tables are modified. -** -** ^In the current implementation, the update hook -** is not invoked when duplication rows are deleted because of an -** [ON CONFLICT | ON CONFLICT REPLACE] clause. ^Nor is the update hook -** invoked when rows are deleted using the [truncate optimization]. -** The exceptions defined in this paragraph might change in a future -** release of SQLite. -** -** The update hook implementation must not do anything that will modify -** the database connection that invoked the update hook. Any actions -** to modify the database connection must be deferred until after the -** completion of the [sqlite3_step()] call that triggered the update hook. -** Note that [sqlite3_prepare_v2()] and [sqlite3_step()] both modify their -** database connections for the meaning of "modify" in this paragraph. -** -** ^The sqlite3_update_hook(D,C,P) function -** returns the P argument from the previous call -** on the same [database connection] D, or NULL for -** the first call on D. -** -** See also the [sqlite3_commit_hook()] and [sqlite3_rollback_hook()] -** interfaces. -*/ -SQLITE_API void *sqlite3_update_hook( - sqlite3*, - void(*)(void *,int ,char const *,char const *,sqlite3_int64), - void* -); - -/* -** CAPI3REF: Enable Or Disable Shared Pager Cache -** -** ^(This routine enables or disables the sharing of the database cache -** and schema data structures between [database connection | connections] -** to the same database. Sharing is enabled if the argument is true -** and disabled if the argument is false.)^ -** -** ^Cache sharing is enabled and disabled for an entire process. -** This is a change as of SQLite version 3.5.0. In prior versions of SQLite, -** sharing was enabled or disabled for each thread separately. -** -** ^(The cache sharing mode set by this interface effects all subsequent -** calls to [sqlite3_open()], [sqlite3_open_v2()], and [sqlite3_open16()]. -** Existing database connections continue use the sharing mode -** that was in effect at the time they were opened.)^ -** -** ^(This routine returns [SQLITE_OK] if shared cache was enabled or disabled -** successfully. An [error code] is returned otherwise.)^ -** -** ^Shared cache is disabled by default. But this might change in -** future releases of SQLite. Applications that care about shared -** cache setting should set it explicitly. -** -** This interface is threadsafe on processors where writing a -** 32-bit integer is atomic. -** -** See Also: [SQLite Shared-Cache Mode] -*/ -SQLITE_API int sqlite3_enable_shared_cache(int); - -/* -** CAPI3REF: Attempt To Free Heap Memory -** -** ^The sqlite3_release_memory() interface attempts to free N bytes -** of heap memory by deallocating non-essential memory allocations -** held by the database library. Memory used to cache database -** pages to improve performance is an example of non-essential memory. -** ^sqlite3_release_memory() returns the number of bytes actually freed, -** which might be more or less than the amount requested. -** ^The sqlite3_release_memory() routine is a no-op returning zero -** if SQLite is not compiled with [SQLITE_ENABLE_MEMORY_MANAGEMENT]. -** -** See also: [sqlite3_db_release_memory()] -*/ -SQLITE_API int sqlite3_release_memory(int); - -/* -** CAPI3REF: Free Memory Used By A Database Connection -** -** ^The sqlite3_db_release_memory(D) interface attempts to free as much heap -** memory as possible from database connection D. Unlike the -** [sqlite3_release_memory()] interface, this interface is in effect even -** when the [SQLITE_ENABLE_MEMORY_MANAGEMENT] compile-time option is -** omitted. -** -** See also: [sqlite3_release_memory()] -*/ -SQLITE_API int sqlite3_db_release_memory(sqlite3*); - -/* -** CAPI3REF: Impose A Limit On Heap Size -** -** ^The sqlite3_soft_heap_limit64() interface sets and/or queries the -** soft limit on the amount of heap memory that may be allocated by SQLite. -** ^SQLite strives to keep heap memory utilization below the soft heap -** limit by reducing the number of pages held in the page cache -** as heap memory usages approaches the limit. -** ^The soft heap limit is "soft" because even though SQLite strives to stay -** below the limit, it will exceed the limit rather than generate -** an [SQLITE_NOMEM] error. In other words, the soft heap limit -** is advisory only. -** -** ^The return value from sqlite3_soft_heap_limit64() is the size of -** the soft heap limit prior to the call, or negative in the case of an -** error. ^If the argument N is negative -** then no change is made to the soft heap limit. Hence, the current -** size of the soft heap limit can be determined by invoking -** sqlite3_soft_heap_limit64() with a negative argument. -** -** ^If the argument N is zero then the soft heap limit is disabled. -** -** ^(The soft heap limit is not enforced in the current implementation -** if one or more of following conditions are true: -** -**
    -**
  • The soft heap limit is set to zero. -**
  • Memory accounting is disabled using a combination of the -** [sqlite3_config]([SQLITE_CONFIG_MEMSTATUS],...) start-time option and -** the [SQLITE_DEFAULT_MEMSTATUS] compile-time option. -**
  • An alternative page cache implementation is specified using -** [sqlite3_config]([SQLITE_CONFIG_PCACHE2],...). -**
  • The page cache allocates from its own memory pool supplied -** by [sqlite3_config]([SQLITE_CONFIG_PAGECACHE],...) rather than -** from the heap. -**
)^ -** -** Beginning with SQLite version 3.7.3, the soft heap limit is enforced -** regardless of whether or not the [SQLITE_ENABLE_MEMORY_MANAGEMENT] -** compile-time option is invoked. With [SQLITE_ENABLE_MEMORY_MANAGEMENT], -** the soft heap limit is enforced on every memory allocation. Without -** [SQLITE_ENABLE_MEMORY_MANAGEMENT], the soft heap limit is only enforced -** when memory is allocated by the page cache. Testing suggests that because -** the page cache is the predominate memory user in SQLite, most -** applications will achieve adequate soft heap limit enforcement without -** the use of [SQLITE_ENABLE_MEMORY_MANAGEMENT]. -** -** The circumstances under which SQLite will enforce the soft heap limit may -** changes in future releases of SQLite. -*/ -SQLITE_API sqlite3_int64 sqlite3_soft_heap_limit64(sqlite3_int64 N); - -/* -** CAPI3REF: Deprecated Soft Heap Limit Interface -** DEPRECATED -** -** This is a deprecated version of the [sqlite3_soft_heap_limit64()] -** interface. This routine is provided for historical compatibility -** only. All new applications should use the -** [sqlite3_soft_heap_limit64()] interface rather than this one. -*/ -SQLITE_API SQLITE_DEPRECATED void sqlite3_soft_heap_limit(int N); - - -/* -** CAPI3REF: Extract Metadata About A Column Of A Table -** -** ^This routine returns metadata about a specific column of a specific -** database table accessible using the [database connection] handle -** passed as the first function argument. -** -** ^The column is identified by the second, third and fourth parameters to -** this function. ^The second parameter is either the name of the database -** (i.e. "main", "temp", or an attached database) containing the specified -** table or NULL. ^If it is NULL, then all attached databases are searched -** for the table using the same algorithm used by the database engine to -** resolve unqualified table references. -** -** ^The third and fourth parameters to this function are the table and column -** name of the desired column, respectively. Neither of these parameters -** may be NULL. -** -** ^Metadata is returned by writing to the memory locations passed as the 5th -** and subsequent parameters to this function. ^Any of these arguments may be -** NULL, in which case the corresponding element of metadata is omitted. -** -** ^(
-** -**
Parameter Output
Type
Description -** -**
5th const char* Data type -**
6th const char* Name of default collation sequence -**
7th int True if column has a NOT NULL constraint -**
8th int True if column is part of the PRIMARY KEY -**
9th int True if column is [AUTOINCREMENT] -**
-**
)^ -** -** ^The memory pointed to by the character pointers returned for the -** declaration type and collation sequence is valid only until the next -** call to any SQLite API function. -** -** ^If the specified table is actually a view, an [error code] is returned. -** -** ^If the specified column is "rowid", "oid" or "_rowid_" and an -** [INTEGER PRIMARY KEY] column has been explicitly declared, then the output -** parameters are set for the explicitly declared column. ^(If there is no -** explicitly declared [INTEGER PRIMARY KEY] column, then the output -** parameters are set as follows: -** -**
-**     data type: "INTEGER"
-**     collation sequence: "BINARY"
-**     not null: 0
-**     primary key: 1
-**     auto increment: 0
-** 
)^ -** -** ^(This function may load one or more schemas from database files. If an -** error occurs during this process, or if the requested table or column -** cannot be found, an [error code] is returned and an error message left -** in the [database connection] (to be retrieved using sqlite3_errmsg()).)^ -** -** ^This API is only available if the library was compiled with the -** [SQLITE_ENABLE_COLUMN_METADATA] C-preprocessor symbol defined. -*/ -SQLITE_API int sqlite3_table_column_metadata( - sqlite3 *db, /* Connection handle */ - const char *zDbName, /* Database name or NULL */ - const char *zTableName, /* Table name */ - const char *zColumnName, /* Column name */ - char const **pzDataType, /* OUTPUT: Declared data type */ - char const **pzCollSeq, /* OUTPUT: Collation sequence name */ - int *pNotNull, /* OUTPUT: True if NOT NULL constraint exists */ - int *pPrimaryKey, /* OUTPUT: True if column part of PK */ - int *pAutoinc /* OUTPUT: True if column is auto-increment */ -); - -/* -** CAPI3REF: Load An Extension -** -** ^This interface loads an SQLite extension library from the named file. -** -** ^The sqlite3_load_extension() interface attempts to load an -** [SQLite extension] library contained in the file zFile. If -** the file cannot be loaded directly, attempts are made to load -** with various operating-system specific extensions added. -** So for example, if "samplelib" cannot be loaded, then names like -** "samplelib.so" or "samplelib.dylib" or "samplelib.dll" might -** be tried also. -** -** ^The entry point is zProc. -** ^(zProc may be 0, in which case SQLite will try to come up with an -** entry point name on its own. It first tries "sqlite3_extension_init". -** If that does not work, it constructs a name "sqlite3_X_init" where the -** X is consists of the lower-case equivalent of all ASCII alphabetic -** characters in the filename from the last "/" to the first following -** "." and omitting any initial "lib".)^ -** ^The sqlite3_load_extension() interface returns -** [SQLITE_OK] on success and [SQLITE_ERROR] if something goes wrong. -** ^If an error occurs and pzErrMsg is not 0, then the -** [sqlite3_load_extension()] interface shall attempt to -** fill *pzErrMsg with error message text stored in memory -** obtained from [sqlite3_malloc()]. The calling function -** should free this memory by calling [sqlite3_free()]. -** -** ^Extension loading must be enabled using -** [sqlite3_enable_load_extension()] prior to calling this API, -** otherwise an error will be returned. -** -** See also the [load_extension() SQL function]. -*/ -SQLITE_API int sqlite3_load_extension( - sqlite3 *db, /* Load the extension into this database connection */ - const char *zFile, /* Name of the shared library containing extension */ - const char *zProc, /* Entry point. Derived from zFile if 0 */ - char **pzErrMsg /* Put error message here if not 0 */ -); - -/* -** CAPI3REF: Enable Or Disable Extension Loading -** -** ^So as not to open security holes in older applications that are -** unprepared to deal with [extension loading], and as a means of disabling -** [extension loading] while evaluating user-entered SQL, the following API -** is provided to turn the [sqlite3_load_extension()] mechanism on and off. -** -** ^Extension loading is off by default. -** ^Call the sqlite3_enable_load_extension() routine with onoff==1 -** to turn extension loading on and call it with onoff==0 to turn -** it back off again. -*/ -SQLITE_API int sqlite3_enable_load_extension(sqlite3 *db, int onoff); - -/* -** CAPI3REF: Automatically Load Statically Linked Extensions -** -** ^This interface causes the xEntryPoint() function to be invoked for -** each new [database connection] that is created. The idea here is that -** xEntryPoint() is the entry point for a statically linked [SQLite extension] -** that is to be automatically loaded into all new database connections. -** -** ^(Even though the function prototype shows that xEntryPoint() takes -** no arguments and returns void, SQLite invokes xEntryPoint() with three -** arguments and expects and integer result as if the signature of the -** entry point where as follows: -** -**
-**    int xEntryPoint(
-**      sqlite3 *db,
-**      const char **pzErrMsg,
-**      const struct sqlite3_api_routines *pThunk
-**    );
-** 
)^ -** -** If the xEntryPoint routine encounters an error, it should make *pzErrMsg -** point to an appropriate error message (obtained from [sqlite3_mprintf()]) -** and return an appropriate [error code]. ^SQLite ensures that *pzErrMsg -** is NULL before calling the xEntryPoint(). ^SQLite will invoke -** [sqlite3_free()] on *pzErrMsg after xEntryPoint() returns. ^If any -** xEntryPoint() returns an error, the [sqlite3_open()], [sqlite3_open16()], -** or [sqlite3_open_v2()] call that provoked the xEntryPoint() will fail. -** -** ^Calling sqlite3_auto_extension(X) with an entry point X that is already -** on the list of automatic extensions is a harmless no-op. ^No entry point -** will be called more than once for each database connection that is opened. -** -** See also: [sqlite3_reset_auto_extension()] -** and [sqlite3_cancel_auto_extension()] -*/ -SQLITE_API int sqlite3_auto_extension(void (*xEntryPoint)(void)); - -/* -** CAPI3REF: Cancel Automatic Extension Loading -** -** ^The [sqlite3_cancel_auto_extension(X)] interface unregisters the -** initialization routine X that was registered using a prior call to -** [sqlite3_auto_extension(X)]. ^The [sqlite3_cancel_auto_extension(X)] -** routine returns 1 if initialization routine X was successfully -** unregistered and it returns 0 if X was not on the list of initialization -** routines. -*/ -SQLITE_API int sqlite3_cancel_auto_extension(void (*xEntryPoint)(void)); - -/* -** CAPI3REF: Reset Automatic Extension Loading -** -** ^This interface disables all automatic extensions previously -** registered using [sqlite3_auto_extension()]. -*/ -SQLITE_API void sqlite3_reset_auto_extension(void); - -/* -** The interface to the virtual-table mechanism is currently considered -** to be experimental. The interface might change in incompatible ways. -** If this is a problem for you, do not use the interface at this time. -** -** When the virtual-table mechanism stabilizes, we will declare the -** interface fixed, support it indefinitely, and remove this comment. -*/ - -/* -** Structures used by the virtual table interface -*/ -typedef struct sqlite3_vtab sqlite3_vtab; -typedef struct sqlite3_index_info sqlite3_index_info; -typedef struct sqlite3_vtab_cursor sqlite3_vtab_cursor; -typedef struct sqlite3_module sqlite3_module; - -/* -** CAPI3REF: Virtual Table Object -** KEYWORDS: sqlite3_module {virtual table module} -** -** This structure, sometimes called a "virtual table module", -** defines the implementation of a [virtual tables]. -** This structure consists mostly of methods for the module. -** -** ^A virtual table module is created by filling in a persistent -** instance of this structure and passing a pointer to that instance -** to [sqlite3_create_module()] or [sqlite3_create_module_v2()]. -** ^The registration remains valid until it is replaced by a different -** module or until the [database connection] closes. The content -** of this structure must not change while it is registered with -** any database connection. -*/ -struct sqlite3_module { - int iVersion; - int (*xCreate)(sqlite3*, void *pAux, - int argc, const char *const*argv, - sqlite3_vtab **ppVTab, char**); - int (*xConnect)(sqlite3*, void *pAux, - int argc, const char *const*argv, - sqlite3_vtab **ppVTab, char**); - int (*xBestIndex)(sqlite3_vtab *pVTab, sqlite3_index_info*); - int (*xDisconnect)(sqlite3_vtab *pVTab); - int (*xDestroy)(sqlite3_vtab *pVTab); - int (*xOpen)(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCursor); - int (*xClose)(sqlite3_vtab_cursor*); - int (*xFilter)(sqlite3_vtab_cursor*, int idxNum, const char *idxStr, - int argc, sqlite3_value **argv); - int (*xNext)(sqlite3_vtab_cursor*); - int (*xEof)(sqlite3_vtab_cursor*); - int (*xColumn)(sqlite3_vtab_cursor*, sqlite3_context*, int); - int (*xRowid)(sqlite3_vtab_cursor*, sqlite3_int64 *pRowid); - int (*xUpdate)(sqlite3_vtab *, int, sqlite3_value **, sqlite3_int64 *); - int (*xBegin)(sqlite3_vtab *pVTab); - int (*xSync)(sqlite3_vtab *pVTab); - int (*xCommit)(sqlite3_vtab *pVTab); - int (*xRollback)(sqlite3_vtab *pVTab); - int (*xFindFunction)(sqlite3_vtab *pVtab, int nArg, const char *zName, - void (**pxFunc)(sqlite3_context*,int,sqlite3_value**), - void **ppArg); - int (*xRename)(sqlite3_vtab *pVtab, const char *zNew); - /* The methods above are in version 1 of the sqlite_module object. Those - ** below are for version 2 and greater. */ - int (*xSavepoint)(sqlite3_vtab *pVTab, int); - int (*xRelease)(sqlite3_vtab *pVTab, int); - int (*xRollbackTo)(sqlite3_vtab *pVTab, int); -}; - -/* -** CAPI3REF: Virtual Table Indexing Information -** KEYWORDS: sqlite3_index_info -** -** The sqlite3_index_info structure and its substructures is used as part -** of the [virtual table] interface to -** pass information into and receive the reply from the [xBestIndex] -** method of a [virtual table module]. The fields under **Inputs** are the -** inputs to xBestIndex and are read-only. xBestIndex inserts its -** results into the **Outputs** fields. -** -** ^(The aConstraint[] array records WHERE clause constraints of the form: -** -**
column OP expr
-** -** where OP is =, <, <=, >, or >=.)^ ^(The particular operator is -** stored in aConstraint[].op using one of the -** [SQLITE_INDEX_CONSTRAINT_EQ | SQLITE_INDEX_CONSTRAINT_ values].)^ -** ^(The index of the column is stored in -** aConstraint[].iColumn.)^ ^(aConstraint[].usable is TRUE if the -** expr on the right-hand side can be evaluated (and thus the constraint -** is usable) and false if it cannot.)^ -** -** ^The optimizer automatically inverts terms of the form "expr OP column" -** and makes other simplifications to the WHERE clause in an attempt to -** get as many WHERE clause terms into the form shown above as possible. -** ^The aConstraint[] array only reports WHERE clause terms that are -** relevant to the particular virtual table being queried. -** -** ^Information about the ORDER BY clause is stored in aOrderBy[]. -** ^Each term of aOrderBy records a column of the ORDER BY clause. -** -** The [xBestIndex] method must fill aConstraintUsage[] with information -** about what parameters to pass to xFilter. ^If argvIndex>0 then -** the right-hand side of the corresponding aConstraint[] is evaluated -** and becomes the argvIndex-th entry in argv. ^(If aConstraintUsage[].omit -** is true, then the constraint is assumed to be fully handled by the -** virtual table and is not checked again by SQLite.)^ -** -** ^The idxNum and idxPtr values are recorded and passed into the -** [xFilter] method. -** ^[sqlite3_free()] is used to free idxPtr if and only if -** needToFreeIdxPtr is true. -** -** ^The orderByConsumed means that output from [xFilter]/[xNext] will occur in -** the correct order to satisfy the ORDER BY clause so that no separate -** sorting step is required. -** -** ^The estimatedCost value is an estimate of the cost of a particular -** strategy. A cost of N indicates that the cost of the strategy is similar -** to a linear scan of an SQLite table with N rows. A cost of log(N) -** indicates that the expense of the operation is similar to that of a -** binary search on a unique indexed field of an SQLite table with N rows. -** -** ^The estimatedRows value is an estimate of the number of rows that -** will be returned by the strategy. -** -** IMPORTANT: The estimatedRows field was added to the sqlite3_index_info -** structure for SQLite version 3.8.2. If a virtual table extension is -** used with an SQLite version earlier than 3.8.2, the results of attempting -** to read or write the estimatedRows field are undefined (but are likely -** to included crashing the application). The estimatedRows field should -** therefore only be used if [sqlite3_libversion_number()] returns a -** value greater than or equal to 3008002. -*/ -struct sqlite3_index_info { - /* Inputs */ - int nConstraint; /* Number of entries in aConstraint */ - struct sqlite3_index_constraint { - int iColumn; /* Column on left-hand side of constraint */ - unsigned char op; /* Constraint operator */ - unsigned char usable; /* True if this constraint is usable */ - int iTermOffset; /* Used internally - xBestIndex should ignore */ - } *aConstraint; /* Table of WHERE clause constraints */ - int nOrderBy; /* Number of terms in the ORDER BY clause */ - struct sqlite3_index_orderby { - int iColumn; /* Column number */ - unsigned char desc; /* True for DESC. False for ASC. */ - } *aOrderBy; /* The ORDER BY clause */ - /* Outputs */ - struct sqlite3_index_constraint_usage { - int argvIndex; /* if >0, constraint is part of argv to xFilter */ - unsigned char omit; /* Do not code a test for this constraint */ - } *aConstraintUsage; - int idxNum; /* Number used to identify the index */ - char *idxStr; /* String, possibly obtained from sqlite3_malloc */ - int needToFreeIdxStr; /* Free idxStr using sqlite3_free() if true */ - int orderByConsumed; /* True if output is already ordered */ - double estimatedCost; /* Estimated cost of using this index */ - /* Fields below are only available in SQLite 3.8.2 and later */ - sqlite3_int64 estimatedRows; /* Estimated number of rows returned */ -}; - -/* -** CAPI3REF: Virtual Table Constraint Operator Codes -** -** These macros defined the allowed values for the -** [sqlite3_index_info].aConstraint[].op field. Each value represents -** an operator that is part of a constraint term in the wHERE clause of -** a query that uses a [virtual table]. -*/ -#define SQLITE_INDEX_CONSTRAINT_EQ 2 -#define SQLITE_INDEX_CONSTRAINT_GT 4 -#define SQLITE_INDEX_CONSTRAINT_LE 8 -#define SQLITE_INDEX_CONSTRAINT_LT 16 -#define SQLITE_INDEX_CONSTRAINT_GE 32 -#define SQLITE_INDEX_CONSTRAINT_MATCH 64 - -/* -** CAPI3REF: Register A Virtual Table Implementation -** -** ^These routines are used to register a new [virtual table module] name. -** ^Module names must be registered before -** creating a new [virtual table] using the module and before using a -** preexisting [virtual table] for the module. -** -** ^The module name is registered on the [database connection] specified -** by the first parameter. ^The name of the module is given by the -** second parameter. ^The third parameter is a pointer to -** the implementation of the [virtual table module]. ^The fourth -** parameter is an arbitrary client data pointer that is passed through -** into the [xCreate] and [xConnect] methods of the virtual table module -** when a new virtual table is be being created or reinitialized. -** -** ^The sqlite3_create_module_v2() interface has a fifth parameter which -** is a pointer to a destructor for the pClientData. ^SQLite will -** invoke the destructor function (if it is not NULL) when SQLite -** no longer needs the pClientData pointer. ^The destructor will also -** be invoked if the call to sqlite3_create_module_v2() fails. -** ^The sqlite3_create_module() -** interface is equivalent to sqlite3_create_module_v2() with a NULL -** destructor. -*/ -SQLITE_API int sqlite3_create_module( - sqlite3 *db, /* SQLite connection to register module with */ - const char *zName, /* Name of the module */ - const sqlite3_module *p, /* Methods for the module */ - void *pClientData /* Client data for xCreate/xConnect */ -); -SQLITE_API int sqlite3_create_module_v2( - sqlite3 *db, /* SQLite connection to register module with */ - const char *zName, /* Name of the module */ - const sqlite3_module *p, /* Methods for the module */ - void *pClientData, /* Client data for xCreate/xConnect */ - void(*xDestroy)(void*) /* Module destructor function */ -); - -/* -** CAPI3REF: Virtual Table Instance Object -** KEYWORDS: sqlite3_vtab -** -** Every [virtual table module] implementation uses a subclass -** of this object to describe a particular instance -** of the [virtual table]. Each subclass will -** be tailored to the specific needs of the module implementation. -** The purpose of this superclass is to define certain fields that are -** common to all module implementations. -** -** ^Virtual tables methods can set an error message by assigning a -** string obtained from [sqlite3_mprintf()] to zErrMsg. The method should -** take care that any prior string is freed by a call to [sqlite3_free()] -** prior to assigning a new string to zErrMsg. ^After the error message -** is delivered up to the client application, the string will be automatically -** freed by sqlite3_free() and the zErrMsg field will be zeroed. -*/ -struct sqlite3_vtab { - const sqlite3_module *pModule; /* The module for this virtual table */ - int nRef; /* NO LONGER USED */ - char *zErrMsg; /* Error message from sqlite3_mprintf() */ - /* Virtual table implementations will typically add additional fields */ -}; - -/* -** CAPI3REF: Virtual Table Cursor Object -** KEYWORDS: sqlite3_vtab_cursor {virtual table cursor} -** -** Every [virtual table module] implementation uses a subclass of the -** following structure to describe cursors that point into the -** [virtual table] and are used -** to loop through the virtual table. Cursors are created using the -** [sqlite3_module.xOpen | xOpen] method of the module and are destroyed -** by the [sqlite3_module.xClose | xClose] method. Cursors are used -** by the [xFilter], [xNext], [xEof], [xColumn], and [xRowid] methods -** of the module. Each module implementation will define -** the content of a cursor structure to suit its own needs. -** -** This superclass exists in order to define fields of the cursor that -** are common to all implementations. -*/ -struct sqlite3_vtab_cursor { - sqlite3_vtab *pVtab; /* Virtual table of this cursor */ - /* Virtual table implementations will typically add additional fields */ -}; - -/* -** CAPI3REF: Declare The Schema Of A Virtual Table -** -** ^The [xCreate] and [xConnect] methods of a -** [virtual table module] call this interface -** to declare the format (the names and datatypes of the columns) of -** the virtual tables they implement. -*/ -SQLITE_API int sqlite3_declare_vtab(sqlite3*, const char *zSQL); - -/* -** CAPI3REF: Overload A Function For A Virtual Table -** -** ^(Virtual tables can provide alternative implementations of functions -** using the [xFindFunction] method of the [virtual table module]. -** But global versions of those functions -** must exist in order to be overloaded.)^ -** -** ^(This API makes sure a global version of a function with a particular -** name and number of parameters exists. If no such function exists -** before this API is called, a new function is created.)^ ^The implementation -** of the new function always causes an exception to be thrown. So -** the new function is not good for anything by itself. Its only -** purpose is to be a placeholder function that can be overloaded -** by a [virtual table]. -*/ -SQLITE_API int sqlite3_overload_function(sqlite3*, const char *zFuncName, int nArg); - -/* -** The interface to the virtual-table mechanism defined above (back up -** to a comment remarkably similar to this one) is currently considered -** to be experimental. The interface might change in incompatible ways. -** If this is a problem for you, do not use the interface at this time. -** -** When the virtual-table mechanism stabilizes, we will declare the -** interface fixed, support it indefinitely, and remove this comment. -*/ - -/* -** CAPI3REF: A Handle To An Open BLOB -** KEYWORDS: {BLOB handle} {BLOB handles} -** -** An instance of this object represents an open BLOB on which -** [sqlite3_blob_open | incremental BLOB I/O] can be performed. -** ^Objects of this type are created by [sqlite3_blob_open()] -** and destroyed by [sqlite3_blob_close()]. -** ^The [sqlite3_blob_read()] and [sqlite3_blob_write()] interfaces -** can be used to read or write small subsections of the BLOB. -** ^The [sqlite3_blob_bytes()] interface returns the size of the BLOB in bytes. -*/ -typedef struct sqlite3_blob sqlite3_blob; - -/* -** CAPI3REF: Open A BLOB For Incremental I/O -** -** ^(This interfaces opens a [BLOB handle | handle] to the BLOB located -** in row iRow, column zColumn, table zTable in database zDb; -** in other words, the same BLOB that would be selected by: -** -**
-**     SELECT zColumn FROM zDb.zTable WHERE [rowid] = iRow;
-** 
)^ -** -** ^If the flags parameter is non-zero, then the BLOB is opened for read -** and write access. ^If it is zero, the BLOB is opened for read access. -** ^It is not possible to open a column that is part of an index or primary -** key for writing. ^If [foreign key constraints] are enabled, it is -** not possible to open a column that is part of a [child key] for writing. -** -** ^Note that the database name is not the filename that contains -** the database but rather the symbolic name of the database that -** appears after the AS keyword when the database is connected using [ATTACH]. -** ^For the main database file, the database name is "main". -** ^For TEMP tables, the database name is "temp". -** -** ^(On success, [SQLITE_OK] is returned and the new [BLOB handle] is written -** to *ppBlob. Otherwise an [error code] is returned and *ppBlob is set -** to be a null pointer.)^ -** ^This function sets the [database connection] error code and message -** accessible via [sqlite3_errcode()] and [sqlite3_errmsg()] and related -** functions. ^Note that the *ppBlob variable is always initialized in a -** way that makes it safe to invoke [sqlite3_blob_close()] on *ppBlob -** regardless of the success or failure of this routine. -** -** ^(If the row that a BLOB handle points to is modified by an -** [UPDATE], [DELETE], or by [ON CONFLICT] side-effects -** then the BLOB handle is marked as "expired". -** This is true if any column of the row is changed, even a column -** other than the one the BLOB handle is open on.)^ -** ^Calls to [sqlite3_blob_read()] and [sqlite3_blob_write()] for -** an expired BLOB handle fail with a return code of [SQLITE_ABORT]. -** ^(Changes written into a BLOB prior to the BLOB expiring are not -** rolled back by the expiration of the BLOB. Such changes will eventually -** commit if the transaction continues to completion.)^ -** -** ^Use the [sqlite3_blob_bytes()] interface to determine the size of -** the opened blob. ^The size of a blob may not be changed by this -** interface. Use the [UPDATE] SQL command to change the size of a -** blob. -** -** ^The [sqlite3_blob_open()] interface will fail for a [WITHOUT ROWID] -** table. Incremental BLOB I/O is not possible on [WITHOUT ROWID] tables. -** -** ^The [sqlite3_bind_zeroblob()] and [sqlite3_result_zeroblob()] interfaces -** and the built-in [zeroblob] SQL function can be used, if desired, -** to create an empty, zero-filled blob in which to read or write using -** this interface. -** -** To avoid a resource leak, every open [BLOB handle] should eventually -** be released by a call to [sqlite3_blob_close()]. -*/ -SQLITE_API int sqlite3_blob_open( - sqlite3*, - const char *zDb, - const char *zTable, - const char *zColumn, - sqlite3_int64 iRow, - int flags, - sqlite3_blob **ppBlob -); - -/* -** CAPI3REF: Move a BLOB Handle to a New Row -** -** ^This function is used to move an existing blob handle so that it points -** to a different row of the same database table. ^The new row is identified -** by the rowid value passed as the second argument. Only the row can be -** changed. ^The database, table and column on which the blob handle is open -** remain the same. Moving an existing blob handle to a new row can be -** faster than closing the existing handle and opening a new one. -** -** ^(The new row must meet the same criteria as for [sqlite3_blob_open()] - -** it must exist and there must be either a blob or text value stored in -** the nominated column.)^ ^If the new row is not present in the table, or if -** it does not contain a blob or text value, or if another error occurs, an -** SQLite error code is returned and the blob handle is considered aborted. -** ^All subsequent calls to [sqlite3_blob_read()], [sqlite3_blob_write()] or -** [sqlite3_blob_reopen()] on an aborted blob handle immediately return -** SQLITE_ABORT. ^Calling [sqlite3_blob_bytes()] on an aborted blob handle -** always returns zero. -** -** ^This function sets the database handle error code and message. -*/ -SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_blob_reopen(sqlite3_blob *, sqlite3_int64); - -/* -** CAPI3REF: Close A BLOB Handle -** -** ^Closes an open [BLOB handle]. -** -** ^Closing a BLOB shall cause the current transaction to commit -** if there are no other BLOBs, no pending prepared statements, and the -** database connection is in [autocommit mode]. -** ^If any writes were made to the BLOB, they might be held in cache -** until the close operation if they will fit. -** -** ^(Closing the BLOB often forces the changes -** out to disk and so if any I/O errors occur, they will likely occur -** at the time when the BLOB is closed. Any errors that occur during -** closing are reported as a non-zero return value.)^ -** -** ^(The BLOB is closed unconditionally. Even if this routine returns -** an error code, the BLOB is still closed.)^ -** -** ^Calling this routine with a null pointer (such as would be returned -** by a failed call to [sqlite3_blob_open()]) is a harmless no-op. -*/ -SQLITE_API int sqlite3_blob_close(sqlite3_blob *); - -/* -** CAPI3REF: Return The Size Of An Open BLOB -** -** ^Returns the size in bytes of the BLOB accessible via the -** successfully opened [BLOB handle] in its only argument. ^The -** incremental blob I/O routines can only read or overwriting existing -** blob content; they cannot change the size of a blob. -** -** This routine only works on a [BLOB handle] which has been created -** by a prior successful call to [sqlite3_blob_open()] and which has not -** been closed by [sqlite3_blob_close()]. Passing any other pointer in -** to this routine results in undefined and probably undesirable behavior. -*/ -SQLITE_API int sqlite3_blob_bytes(sqlite3_blob *); - -/* -** CAPI3REF: Read Data From A BLOB Incrementally -** -** ^(This function is used to read data from an open [BLOB handle] into a -** caller-supplied buffer. N bytes of data are copied into buffer Z -** from the open BLOB, starting at offset iOffset.)^ -** -** ^If offset iOffset is less than N bytes from the end of the BLOB, -** [SQLITE_ERROR] is returned and no data is read. ^If N or iOffset is -** less than zero, [SQLITE_ERROR] is returned and no data is read. -** ^The size of the blob (and hence the maximum value of N+iOffset) -** can be determined using the [sqlite3_blob_bytes()] interface. -** -** ^An attempt to read from an expired [BLOB handle] fails with an -** error code of [SQLITE_ABORT]. -** -** ^(On success, sqlite3_blob_read() returns SQLITE_OK. -** Otherwise, an [error code] or an [extended error code] is returned.)^ -** -** This routine only works on a [BLOB handle] which has been created -** by a prior successful call to [sqlite3_blob_open()] and which has not -** been closed by [sqlite3_blob_close()]. Passing any other pointer in -** to this routine results in undefined and probably undesirable behavior. -** -** See also: [sqlite3_blob_write()]. -*/ -SQLITE_API int sqlite3_blob_read(sqlite3_blob *, void *Z, int N, int iOffset); - -/* -** CAPI3REF: Write Data Into A BLOB Incrementally -** -** ^This function is used to write data into an open [BLOB handle] from a -** caller-supplied buffer. ^N bytes of data are copied from the buffer Z -** into the open BLOB, starting at offset iOffset. -** -** ^If the [BLOB handle] passed as the first argument was not opened for -** writing (the flags parameter to [sqlite3_blob_open()] was zero), -** this function returns [SQLITE_READONLY]. -** -** ^This function may only modify the contents of the BLOB; it is -** not possible to increase the size of a BLOB using this API. -** ^If offset iOffset is less than N bytes from the end of the BLOB, -** [SQLITE_ERROR] is returned and no data is written. ^If N is -** less than zero [SQLITE_ERROR] is returned and no data is written. -** The size of the BLOB (and hence the maximum value of N+iOffset) -** can be determined using the [sqlite3_blob_bytes()] interface. -** -** ^An attempt to write to an expired [BLOB handle] fails with an -** error code of [SQLITE_ABORT]. ^Writes to the BLOB that occurred -** before the [BLOB handle] expired are not rolled back by the -** expiration of the handle, though of course those changes might -** have been overwritten by the statement that expired the BLOB handle -** or by other independent statements. -** -** ^(On success, sqlite3_blob_write() returns SQLITE_OK. -** Otherwise, an [error code] or an [extended error code] is returned.)^ -** -** This routine only works on a [BLOB handle] which has been created -** by a prior successful call to [sqlite3_blob_open()] and which has not -** been closed by [sqlite3_blob_close()]. Passing any other pointer in -** to this routine results in undefined and probably undesirable behavior. -** -** See also: [sqlite3_blob_read()]. -*/ -SQLITE_API int sqlite3_blob_write(sqlite3_blob *, const void *z, int n, int iOffset); - -/* -** CAPI3REF: Virtual File System Objects -** -** A virtual filesystem (VFS) is an [sqlite3_vfs] object -** that SQLite uses to interact -** with the underlying operating system. Most SQLite builds come with a -** single default VFS that is appropriate for the host computer. -** New VFSes can be registered and existing VFSes can be unregistered. -** The following interfaces are provided. -** -** ^The sqlite3_vfs_find() interface returns a pointer to a VFS given its name. -** ^Names are case sensitive. -** ^Names are zero-terminated UTF-8 strings. -** ^If there is no match, a NULL pointer is returned. -** ^If zVfsName is NULL then the default VFS is returned. -** -** ^New VFSes are registered with sqlite3_vfs_register(). -** ^Each new VFS becomes the default VFS if the makeDflt flag is set. -** ^The same VFS can be registered multiple times without injury. -** ^To make an existing VFS into the default VFS, register it again -** with the makeDflt flag set. If two different VFSes with the -** same name are registered, the behavior is undefined. If a -** VFS is registered with a name that is NULL or an empty string, -** then the behavior is undefined. -** -** ^Unregister a VFS with the sqlite3_vfs_unregister() interface. -** ^(If the default VFS is unregistered, another VFS is chosen as -** the default. The choice for the new VFS is arbitrary.)^ -*/ -SQLITE_API sqlite3_vfs *sqlite3_vfs_find(const char *zVfsName); -SQLITE_API int sqlite3_vfs_register(sqlite3_vfs*, int makeDflt); -SQLITE_API int sqlite3_vfs_unregister(sqlite3_vfs*); - -/* -** CAPI3REF: Mutexes -** -** The SQLite core uses these routines for thread -** synchronization. Though they are intended for internal -** use by SQLite, code that links against SQLite is -** permitted to use any of these routines. -** -** The SQLite source code contains multiple implementations -** of these mutex routines. An appropriate implementation -** is selected automatically at compile-time. ^(The following -** implementations are available in the SQLite core: -** -**
    -**
  • SQLITE_MUTEX_PTHREADS -**
  • SQLITE_MUTEX_W32 -**
  • SQLITE_MUTEX_NOOP -**
)^ -** -** ^The SQLITE_MUTEX_NOOP implementation is a set of routines -** that does no real locking and is appropriate for use in -** a single-threaded application. ^The SQLITE_MUTEX_PTHREADS and -** SQLITE_MUTEX_W32 implementations are appropriate for use on Unix -** and Windows. -** -** ^(If SQLite is compiled with the SQLITE_MUTEX_APPDEF preprocessor -** macro defined (with "-DSQLITE_MUTEX_APPDEF=1"), then no mutex -** implementation is included with the library. In this case the -** application must supply a custom mutex implementation using the -** [SQLITE_CONFIG_MUTEX] option of the sqlite3_config() function -** before calling sqlite3_initialize() or any other public sqlite3_ -** function that calls sqlite3_initialize().)^ -** -** ^The sqlite3_mutex_alloc() routine allocates a new -** mutex and returns a pointer to it. ^If it returns NULL -** that means that a mutex could not be allocated. ^SQLite -** will unwind its stack and return an error. ^(The argument -** to sqlite3_mutex_alloc() is one of these integer constants: -** -**
    -**
  • SQLITE_MUTEX_FAST -**
  • SQLITE_MUTEX_RECURSIVE -**
  • SQLITE_MUTEX_STATIC_MASTER -**
  • SQLITE_MUTEX_STATIC_MEM -**
  • SQLITE_MUTEX_STATIC_MEM2 -**
  • SQLITE_MUTEX_STATIC_PRNG -**
  • SQLITE_MUTEX_STATIC_LRU -**
  • SQLITE_MUTEX_STATIC_LRU2 -**
)^ -** -** ^The first two constants (SQLITE_MUTEX_FAST and SQLITE_MUTEX_RECURSIVE) -** cause sqlite3_mutex_alloc() to create -** a new mutex. ^The new mutex is recursive when SQLITE_MUTEX_RECURSIVE -** is used but not necessarily so when SQLITE_MUTEX_FAST is used. -** The mutex implementation does not need to make a distinction -** between SQLITE_MUTEX_RECURSIVE and SQLITE_MUTEX_FAST if it does -** not want to. ^SQLite will only request a recursive mutex in -** cases where it really needs one. ^If a faster non-recursive mutex -** implementation is available on the host platform, the mutex subsystem -** might return such a mutex in response to SQLITE_MUTEX_FAST. -** -** ^The other allowed parameters to sqlite3_mutex_alloc() (anything other -** than SQLITE_MUTEX_FAST and SQLITE_MUTEX_RECURSIVE) each return -** a pointer to a static preexisting mutex. ^Six static mutexes are -** used by the current version of SQLite. Future versions of SQLite -** may add additional static mutexes. Static mutexes are for internal -** use by SQLite only. Applications that use SQLite mutexes should -** use only the dynamic mutexes returned by SQLITE_MUTEX_FAST or -** SQLITE_MUTEX_RECURSIVE. -** -** ^Note that if one of the dynamic mutex parameters (SQLITE_MUTEX_FAST -** or SQLITE_MUTEX_RECURSIVE) is used then sqlite3_mutex_alloc() -** returns a different mutex on every call. ^But for the static -** mutex types, the same mutex is returned on every call that has -** the same type number. -** -** ^The sqlite3_mutex_free() routine deallocates a previously -** allocated dynamic mutex. ^SQLite is careful to deallocate every -** dynamic mutex that it allocates. The dynamic mutexes must not be in -** use when they are deallocated. Attempting to deallocate a static -** mutex results in undefined behavior. ^SQLite never deallocates -** a static mutex. -** -** ^The sqlite3_mutex_enter() and sqlite3_mutex_try() routines attempt -** to enter a mutex. ^If another thread is already within the mutex, -** sqlite3_mutex_enter() will block and sqlite3_mutex_try() will return -** SQLITE_BUSY. ^The sqlite3_mutex_try() interface returns [SQLITE_OK] -** upon successful entry. ^(Mutexes created using -** SQLITE_MUTEX_RECURSIVE can be entered multiple times by the same thread. -** In such cases the, -** mutex must be exited an equal number of times before another thread -** can enter.)^ ^(If the same thread tries to enter any other -** kind of mutex more than once, the behavior is undefined. -** SQLite will never exhibit -** such behavior in its own use of mutexes.)^ -** -** ^(Some systems (for example, Windows 95) do not support the operation -** implemented by sqlite3_mutex_try(). On those systems, sqlite3_mutex_try() -** will always return SQLITE_BUSY. The SQLite core only ever uses -** sqlite3_mutex_try() as an optimization so this is acceptable behavior.)^ -** -** ^The sqlite3_mutex_leave() routine exits a mutex that was -** previously entered by the same thread. ^(The behavior -** is undefined if the mutex is not currently entered by the -** calling thread or is not currently allocated. SQLite will -** never do either.)^ -** -** ^If the argument to sqlite3_mutex_enter(), sqlite3_mutex_try(), or -** sqlite3_mutex_leave() is a NULL pointer, then all three routines -** behave as no-ops. -** -** See also: [sqlite3_mutex_held()] and [sqlite3_mutex_notheld()]. -*/ -SQLITE_API sqlite3_mutex *sqlite3_mutex_alloc(int); -SQLITE_API void sqlite3_mutex_free(sqlite3_mutex*); -SQLITE_API void sqlite3_mutex_enter(sqlite3_mutex*); -SQLITE_API int sqlite3_mutex_try(sqlite3_mutex*); -SQLITE_API void sqlite3_mutex_leave(sqlite3_mutex*); - -/* -** CAPI3REF: Mutex Methods Object -** -** An instance of this structure defines the low-level routines -** used to allocate and use mutexes. -** -** Usually, the default mutex implementations provided by SQLite are -** sufficient, however the user has the option of substituting a custom -** implementation for specialized deployments or systems for which SQLite -** does not provide a suitable implementation. In this case, the user -** creates and populates an instance of this structure to pass -** to sqlite3_config() along with the [SQLITE_CONFIG_MUTEX] option. -** Additionally, an instance of this structure can be used as an -** output variable when querying the system for the current mutex -** implementation, using the [SQLITE_CONFIG_GETMUTEX] option. -** -** ^The xMutexInit method defined by this structure is invoked as -** part of system initialization by the sqlite3_initialize() function. -** ^The xMutexInit routine is called by SQLite exactly once for each -** effective call to [sqlite3_initialize()]. -** -** ^The xMutexEnd method defined by this structure is invoked as -** part of system shutdown by the sqlite3_shutdown() function. The -** implementation of this method is expected to release all outstanding -** resources obtained by the mutex methods implementation, especially -** those obtained by the xMutexInit method. ^The xMutexEnd() -** interface is invoked exactly once for each call to [sqlite3_shutdown()]. -** -** ^(The remaining seven methods defined by this structure (xMutexAlloc, -** xMutexFree, xMutexEnter, xMutexTry, xMutexLeave, xMutexHeld and -** xMutexNotheld) implement the following interfaces (respectively): -** -**
    -**
  • [sqlite3_mutex_alloc()]
  • -**
  • [sqlite3_mutex_free()]
  • -**
  • [sqlite3_mutex_enter()]
  • -**
  • [sqlite3_mutex_try()]
  • -**
  • [sqlite3_mutex_leave()]
  • -**
  • [sqlite3_mutex_held()]
  • -**
  • [sqlite3_mutex_notheld()]
  • -**
)^ -** -** The only difference is that the public sqlite3_XXX functions enumerated -** above silently ignore any invocations that pass a NULL pointer instead -** of a valid mutex handle. The implementations of the methods defined -** by this structure are not required to handle this case, the results -** of passing a NULL pointer instead of a valid mutex handle are undefined -** (i.e. it is acceptable to provide an implementation that segfaults if -** it is passed a NULL pointer). -** -** The xMutexInit() method must be threadsafe. ^It must be harmless to -** invoke xMutexInit() multiple times within the same process and without -** intervening calls to xMutexEnd(). Second and subsequent calls to -** xMutexInit() must be no-ops. -** -** ^xMutexInit() must not use SQLite memory allocation ([sqlite3_malloc()] -** and its associates). ^Similarly, xMutexAlloc() must not use SQLite memory -** allocation for a static mutex. ^However xMutexAlloc() may use SQLite -** memory allocation for a fast or recursive mutex. -** -** ^SQLite will invoke the xMutexEnd() method when [sqlite3_shutdown()] is -** called, but only if the prior call to xMutexInit returned SQLITE_OK. -** If xMutexInit fails in any way, it is expected to clean up after itself -** prior to returning. -*/ -typedef struct sqlite3_mutex_methods sqlite3_mutex_methods; -struct sqlite3_mutex_methods { - int (*xMutexInit)(void); - int (*xMutexEnd)(void); - sqlite3_mutex *(*xMutexAlloc)(int); - void (*xMutexFree)(sqlite3_mutex *); - void (*xMutexEnter)(sqlite3_mutex *); - int (*xMutexTry)(sqlite3_mutex *); - void (*xMutexLeave)(sqlite3_mutex *); - int (*xMutexHeld)(sqlite3_mutex *); - int (*xMutexNotheld)(sqlite3_mutex *); -}; - -/* -** CAPI3REF: Mutex Verification Routines -** -** The sqlite3_mutex_held() and sqlite3_mutex_notheld() routines -** are intended for use inside assert() statements. ^The SQLite core -** never uses these routines except inside an assert() and applications -** are advised to follow the lead of the core. ^The SQLite core only -** provides implementations for these routines when it is compiled -** with the SQLITE_DEBUG flag. ^External mutex implementations -** are only required to provide these routines if SQLITE_DEBUG is -** defined and if NDEBUG is not defined. -** -** ^These routines should return true if the mutex in their argument -** is held or not held, respectively, by the calling thread. -** -** ^The implementation is not required to provide versions of these -** routines that actually work. If the implementation does not provide working -** versions of these routines, it should at least provide stubs that always -** return true so that one does not get spurious assertion failures. -** -** ^If the argument to sqlite3_mutex_held() is a NULL pointer then -** the routine should return 1. This seems counter-intuitive since -** clearly the mutex cannot be held if it does not exist. But -** the reason the mutex does not exist is because the build is not -** using mutexes. And we do not want the assert() containing the -** call to sqlite3_mutex_held() to fail, so a non-zero return is -** the appropriate thing to do. ^The sqlite3_mutex_notheld() -** interface should also return 1 when given a NULL pointer. -*/ -#ifndef NDEBUG -SQLITE_API int sqlite3_mutex_held(sqlite3_mutex*); -SQLITE_API int sqlite3_mutex_notheld(sqlite3_mutex*); -#endif - -/* -** CAPI3REF: Mutex Types -** -** The [sqlite3_mutex_alloc()] interface takes a single argument -** which is one of these integer constants. -** -** The set of static mutexes may change from one SQLite release to the -** next. Applications that override the built-in mutex logic must be -** prepared to accommodate additional static mutexes. -*/ -#define SQLITE_MUTEX_FAST 0 -#define SQLITE_MUTEX_RECURSIVE 1 -#define SQLITE_MUTEX_STATIC_MASTER 2 -#define SQLITE_MUTEX_STATIC_MEM 3 /* sqlite3_malloc() */ -#define SQLITE_MUTEX_STATIC_MEM2 4 /* NOT USED */ -#define SQLITE_MUTEX_STATIC_OPEN 4 /* sqlite3BtreeOpen() */ -#define SQLITE_MUTEX_STATIC_PRNG 5 /* sqlite3_random() */ -#define SQLITE_MUTEX_STATIC_LRU 6 /* lru page list */ -#define SQLITE_MUTEX_STATIC_LRU2 7 /* NOT USED */ -#define SQLITE_MUTEX_STATIC_PMEM 7 /* sqlite3PageMalloc() */ - -/* -** CAPI3REF: Retrieve the mutex for a database connection -** -** ^This interface returns a pointer the [sqlite3_mutex] object that -** serializes access to the [database connection] given in the argument -** when the [threading mode] is Serialized. -** ^If the [threading mode] is Single-thread or Multi-thread then this -** routine returns a NULL pointer. -*/ -SQLITE_API sqlite3_mutex *sqlite3_db_mutex(sqlite3*); - -/* -** CAPI3REF: Low-Level Control Of Database Files -** -** ^The [sqlite3_file_control()] interface makes a direct call to the -** xFileControl method for the [sqlite3_io_methods] object associated -** with a particular database identified by the second argument. ^The -** name of the database is "main" for the main database or "temp" for the -** TEMP database, or the name that appears after the AS keyword for -** databases that are added using the [ATTACH] SQL command. -** ^A NULL pointer can be used in place of "main" to refer to the -** main database file. -** ^The third and fourth parameters to this routine -** are passed directly through to the second and third parameters of -** the xFileControl method. ^The return value of the xFileControl -** method becomes the return value of this routine. -** -** ^The SQLITE_FCNTL_FILE_POINTER value for the op parameter causes -** a pointer to the underlying [sqlite3_file] object to be written into -** the space pointed to by the 4th parameter. ^The SQLITE_FCNTL_FILE_POINTER -** case is a short-circuit path which does not actually invoke the -** underlying sqlite3_io_methods.xFileControl method. -** -** ^If the second parameter (zDbName) does not match the name of any -** open database file, then SQLITE_ERROR is returned. ^This error -** code is not remembered and will not be recalled by [sqlite3_errcode()] -** or [sqlite3_errmsg()]. The underlying xFileControl method might -** also return SQLITE_ERROR. There is no way to distinguish between -** an incorrect zDbName and an SQLITE_ERROR return from the underlying -** xFileControl method. -** -** See also: [SQLITE_FCNTL_LOCKSTATE] -*/ -SQLITE_API int sqlite3_file_control(sqlite3*, const char *zDbName, int op, void*); - -/* -** CAPI3REF: Testing Interface -** -** ^The sqlite3_test_control() interface is used to read out internal -** state of SQLite and to inject faults into SQLite for testing -** purposes. ^The first parameter is an operation code that determines -** the number, meaning, and operation of all subsequent parameters. -** -** This interface is not for use by applications. It exists solely -** for verifying the correct operation of the SQLite library. Depending -** on how the SQLite library is compiled, this interface might not exist. -** -** The details of the operation codes, their meanings, the parameters -** they take, and what they do are all subject to change without notice. -** Unlike most of the SQLite API, this function is not guaranteed to -** operate consistently from one release to the next. -*/ -SQLITE_API int sqlite3_test_control(int op, ...); - -/* -** CAPI3REF: Testing Interface Operation Codes -** -** These constants are the valid operation code parameters used -** as the first argument to [sqlite3_test_control()]. -** -** These parameters and their meanings are subject to change -** without notice. These values are for testing purposes only. -** Applications should not use any of these parameters or the -** [sqlite3_test_control()] interface. -*/ -#define SQLITE_TESTCTRL_FIRST 5 -#define SQLITE_TESTCTRL_PRNG_SAVE 5 -#define SQLITE_TESTCTRL_PRNG_RESTORE 6 -#define SQLITE_TESTCTRL_PRNG_RESET 7 -#define SQLITE_TESTCTRL_BITVEC_TEST 8 -#define SQLITE_TESTCTRL_FAULT_INSTALL 9 -#define SQLITE_TESTCTRL_BENIGN_MALLOC_HOOKS 10 -#define SQLITE_TESTCTRL_PENDING_BYTE 11 -#define SQLITE_TESTCTRL_ASSERT 12 -#define SQLITE_TESTCTRL_ALWAYS 13 -#define SQLITE_TESTCTRL_RESERVE 14 -#define SQLITE_TESTCTRL_OPTIMIZATIONS 15 -#define SQLITE_TESTCTRL_ISKEYWORD 16 -#define SQLITE_TESTCTRL_SCRATCHMALLOC 17 -#define SQLITE_TESTCTRL_LOCALTIME_FAULT 18 -#define SQLITE_TESTCTRL_EXPLAIN_STMT 19 -#define SQLITE_TESTCTRL_NEVER_CORRUPT 20 -#define SQLITE_TESTCTRL_VDBE_COVERAGE 21 -#define SQLITE_TESTCTRL_BYTEORDER 22 -#define SQLITE_TESTCTRL_LAST 22 - -/* -** CAPI3REF: SQLite Runtime Status -** -** ^This interface is used to retrieve runtime status information -** about the performance of SQLite, and optionally to reset various -** highwater marks. ^The first argument is an integer code for -** the specific parameter to measure. ^(Recognized integer codes -** are of the form [status parameters | SQLITE_STATUS_...].)^ -** ^The current value of the parameter is returned into *pCurrent. -** ^The highest recorded value is returned in *pHighwater. ^If the -** resetFlag is true, then the highest record value is reset after -** *pHighwater is written. ^(Some parameters do not record the highest -** value. For those parameters -** nothing is written into *pHighwater and the resetFlag is ignored.)^ -** ^(Other parameters record only the highwater mark and not the current -** value. For these latter parameters nothing is written into *pCurrent.)^ -** -** ^The sqlite3_status() routine returns SQLITE_OK on success and a -** non-zero [error code] on failure. -** -** This routine is threadsafe but is not atomic. This routine can be -** called while other threads are running the same or different SQLite -** interfaces. However the values returned in *pCurrent and -** *pHighwater reflect the status of SQLite at different points in time -** and it is possible that another thread might change the parameter -** in between the times when *pCurrent and *pHighwater are written. -** -** See also: [sqlite3_db_status()] -*/ -SQLITE_API int sqlite3_status(int op, int *pCurrent, int *pHighwater, int resetFlag); - - -/* -** CAPI3REF: Status Parameters -** KEYWORDS: {status parameters} -** -** These integer constants designate various run-time status parameters -** that can be returned by [sqlite3_status()]. -** -**
-** [[SQLITE_STATUS_MEMORY_USED]] ^(
SQLITE_STATUS_MEMORY_USED
-**
This parameter is the current amount of memory checked out -** using [sqlite3_malloc()], either directly or indirectly. The -** figure includes calls made to [sqlite3_malloc()] by the application -** and internal memory usage by the SQLite library. Scratch memory -** controlled by [SQLITE_CONFIG_SCRATCH] and auxiliary page-cache -** memory controlled by [SQLITE_CONFIG_PAGECACHE] is not included in -** this parameter. The amount returned is the sum of the allocation -** sizes as reported by the xSize method in [sqlite3_mem_methods].
)^ -** -** [[SQLITE_STATUS_MALLOC_SIZE]] ^(
SQLITE_STATUS_MALLOC_SIZE
-**
This parameter records the largest memory allocation request -** handed to [sqlite3_malloc()] or [sqlite3_realloc()] (or their -** internal equivalents). Only the value returned in the -** *pHighwater parameter to [sqlite3_status()] is of interest. -** The value written into the *pCurrent parameter is undefined.
)^ -** -** [[SQLITE_STATUS_MALLOC_COUNT]] ^(
SQLITE_STATUS_MALLOC_COUNT
-**
This parameter records the number of separate memory allocations -** currently checked out.
)^ -** -** [[SQLITE_STATUS_PAGECACHE_USED]] ^(
SQLITE_STATUS_PAGECACHE_USED
-**
This parameter returns the number of pages used out of the -** [pagecache memory allocator] that was configured using -** [SQLITE_CONFIG_PAGECACHE]. The -** value returned is in pages, not in bytes.
)^ -** -** [[SQLITE_STATUS_PAGECACHE_OVERFLOW]] -** ^(
SQLITE_STATUS_PAGECACHE_OVERFLOW
-**
This parameter returns the number of bytes of page cache -** allocation which could not be satisfied by the [SQLITE_CONFIG_PAGECACHE] -** buffer and where forced to overflow to [sqlite3_malloc()]. The -** returned value includes allocations that overflowed because they -** where too large (they were larger than the "sz" parameter to -** [SQLITE_CONFIG_PAGECACHE]) and allocations that overflowed because -** no space was left in the page cache.
)^ -** -** [[SQLITE_STATUS_PAGECACHE_SIZE]] ^(
SQLITE_STATUS_PAGECACHE_SIZE
-**
This parameter records the largest memory allocation request -** handed to [pagecache memory allocator]. Only the value returned in the -** *pHighwater parameter to [sqlite3_status()] is of interest. -** The value written into the *pCurrent parameter is undefined.
)^ -** -** [[SQLITE_STATUS_SCRATCH_USED]] ^(
SQLITE_STATUS_SCRATCH_USED
-**
This parameter returns the number of allocations used out of the -** [scratch memory allocator] configured using -** [SQLITE_CONFIG_SCRATCH]. The value returned is in allocations, not -** in bytes. Since a single thread may only have one scratch allocation -** outstanding at time, this parameter also reports the number of threads -** using scratch memory at the same time.
)^ -** -** [[SQLITE_STATUS_SCRATCH_OVERFLOW]] ^(
SQLITE_STATUS_SCRATCH_OVERFLOW
-**
This parameter returns the number of bytes of scratch memory -** allocation which could not be satisfied by the [SQLITE_CONFIG_SCRATCH] -** buffer and where forced to overflow to [sqlite3_malloc()]. The values -** returned include overflows because the requested allocation was too -** larger (that is, because the requested allocation was larger than the -** "sz" parameter to [SQLITE_CONFIG_SCRATCH]) and because no scratch buffer -** slots were available. -**
)^ -** -** [[SQLITE_STATUS_SCRATCH_SIZE]] ^(
SQLITE_STATUS_SCRATCH_SIZE
-**
This parameter records the largest memory allocation request -** handed to [scratch memory allocator]. Only the value returned in the -** *pHighwater parameter to [sqlite3_status()] is of interest. -** The value written into the *pCurrent parameter is undefined.
)^ -** -** [[SQLITE_STATUS_PARSER_STACK]] ^(
SQLITE_STATUS_PARSER_STACK
-**
This parameter records the deepest parser stack. It is only -** meaningful if SQLite is compiled with [YYTRACKMAXSTACKDEPTH].
)^ -**
-** -** New status parameters may be added from time to time. -*/ -#define SQLITE_STATUS_MEMORY_USED 0 -#define SQLITE_STATUS_PAGECACHE_USED 1 -#define SQLITE_STATUS_PAGECACHE_OVERFLOW 2 -#define SQLITE_STATUS_SCRATCH_USED 3 -#define SQLITE_STATUS_SCRATCH_OVERFLOW 4 -#define SQLITE_STATUS_MALLOC_SIZE 5 -#define SQLITE_STATUS_PARSER_STACK 6 -#define SQLITE_STATUS_PAGECACHE_SIZE 7 -#define SQLITE_STATUS_SCRATCH_SIZE 8 -#define SQLITE_STATUS_MALLOC_COUNT 9 - -/* -** CAPI3REF: Database Connection Status -** -** ^This interface is used to retrieve runtime status information -** about a single [database connection]. ^The first argument is the -** database connection object to be interrogated. ^The second argument -** is an integer constant, taken from the set of -** [SQLITE_DBSTATUS options], that -** determines the parameter to interrogate. The set of -** [SQLITE_DBSTATUS options] is likely -** to grow in future releases of SQLite. -** -** ^The current value of the requested parameter is written into *pCur -** and the highest instantaneous value is written into *pHiwtr. ^If -** the resetFlg is true, then the highest instantaneous value is -** reset back down to the current value. -** -** ^The sqlite3_db_status() routine returns SQLITE_OK on success and a -** non-zero [error code] on failure. -** -** See also: [sqlite3_status()] and [sqlite3_stmt_status()]. -*/ -SQLITE_API int sqlite3_db_status(sqlite3*, int op, int *pCur, int *pHiwtr, int resetFlg); - -/* -** CAPI3REF: Status Parameters for database connections -** KEYWORDS: {SQLITE_DBSTATUS options} -** -** These constants are the available integer "verbs" that can be passed as -** the second argument to the [sqlite3_db_status()] interface. -** -** New verbs may be added in future releases of SQLite. Existing verbs -** might be discontinued. Applications should check the return code from -** [sqlite3_db_status()] to make sure that the call worked. -** The [sqlite3_db_status()] interface will return a non-zero error code -** if a discontinued or unsupported verb is invoked. -** -**
-** [[SQLITE_DBSTATUS_LOOKASIDE_USED]] ^(
SQLITE_DBSTATUS_LOOKASIDE_USED
-**
This parameter returns the number of lookaside memory slots currently -** checked out.
)^ -** -** [[SQLITE_DBSTATUS_LOOKASIDE_HIT]] ^(
SQLITE_DBSTATUS_LOOKASIDE_HIT
-**
This parameter returns the number malloc attempts that were -** satisfied using lookaside memory. Only the high-water value is meaningful; -** the current value is always zero.)^ -** -** [[SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE]] -** ^(
SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE
-**
This parameter returns the number malloc attempts that might have -** been satisfied using lookaside memory but failed due to the amount of -** memory requested being larger than the lookaside slot size. -** Only the high-water value is meaningful; -** the current value is always zero.)^ -** -** [[SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL]] -** ^(
SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL
-**
This parameter returns the number malloc attempts that might have -** been satisfied using lookaside memory but failed due to all lookaside -** memory already being in use. -** Only the high-water value is meaningful; -** the current value is always zero.)^ -** -** [[SQLITE_DBSTATUS_CACHE_USED]] ^(
SQLITE_DBSTATUS_CACHE_USED
-**
This parameter returns the approximate number of of bytes of heap -** memory used by all pager caches associated with the database connection.)^ -** ^The highwater mark associated with SQLITE_DBSTATUS_CACHE_USED is always 0. -** -** [[SQLITE_DBSTATUS_SCHEMA_USED]] ^(
SQLITE_DBSTATUS_SCHEMA_USED
-**
This parameter returns the approximate number of of bytes of heap -** memory used to store the schema for all databases associated -** with the connection - main, temp, and any [ATTACH]-ed databases.)^ -** ^The full amount of memory used by the schemas is reported, even if the -** schema memory is shared with other database connections due to -** [shared cache mode] being enabled. -** ^The highwater mark associated with SQLITE_DBSTATUS_SCHEMA_USED is always 0. -** -** [[SQLITE_DBSTATUS_STMT_USED]] ^(
SQLITE_DBSTATUS_STMT_USED
-**
This parameter returns the approximate number of of bytes of heap -** and lookaside memory used by all prepared statements associated with -** the database connection.)^ -** ^The highwater mark associated with SQLITE_DBSTATUS_STMT_USED is always 0. -**
-** -** [[SQLITE_DBSTATUS_CACHE_HIT]] ^(
SQLITE_DBSTATUS_CACHE_HIT
-**
This parameter returns the number of pager cache hits that have -** occurred.)^ ^The highwater mark associated with SQLITE_DBSTATUS_CACHE_HIT -** is always 0. -**
-** -** [[SQLITE_DBSTATUS_CACHE_MISS]] ^(
SQLITE_DBSTATUS_CACHE_MISS
-**
This parameter returns the number of pager cache misses that have -** occurred.)^ ^The highwater mark associated with SQLITE_DBSTATUS_CACHE_MISS -** is always 0. -**
-** -** [[SQLITE_DBSTATUS_CACHE_WRITE]] ^(
SQLITE_DBSTATUS_CACHE_WRITE
-**
This parameter returns the number of dirty cache entries that have -** been written to disk. Specifically, the number of pages written to the -** wal file in wal mode databases, or the number of pages written to the -** database file in rollback mode databases. Any pages written as part of -** transaction rollback or database recovery operations are not included. -** If an IO or other error occurs while writing a page to disk, the effect -** on subsequent SQLITE_DBSTATUS_CACHE_WRITE requests is undefined.)^ ^The -** highwater mark associated with SQLITE_DBSTATUS_CACHE_WRITE is always 0. -**
-** -** [[SQLITE_DBSTATUS_DEFERRED_FKS]] ^(
SQLITE_DBSTATUS_DEFERRED_FKS
-**
This parameter returns zero for the current value if and only if -** all foreign key constraints (deferred or immediate) have been -** resolved.)^ ^The highwater mark is always 0. -**
-**
-*/ -#define SQLITE_DBSTATUS_LOOKASIDE_USED 0 -#define SQLITE_DBSTATUS_CACHE_USED 1 -#define SQLITE_DBSTATUS_SCHEMA_USED 2 -#define SQLITE_DBSTATUS_STMT_USED 3 -#define SQLITE_DBSTATUS_LOOKASIDE_HIT 4 -#define SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE 5 -#define SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL 6 -#define SQLITE_DBSTATUS_CACHE_HIT 7 -#define SQLITE_DBSTATUS_CACHE_MISS 8 -#define SQLITE_DBSTATUS_CACHE_WRITE 9 -#define SQLITE_DBSTATUS_DEFERRED_FKS 10 -#define SQLITE_DBSTATUS_MAX 10 /* Largest defined DBSTATUS */ - - -/* -** CAPI3REF: Prepared Statement Status -** -** ^(Each prepared statement maintains various -** [SQLITE_STMTSTATUS counters] that measure the number -** of times it has performed specific operations.)^ These counters can -** be used to monitor the performance characteristics of the prepared -** statements. For example, if the number of table steps greatly exceeds -** the number of table searches or result rows, that would tend to indicate -** that the prepared statement is using a full table scan rather than -** an index. -** -** ^(This interface is used to retrieve and reset counter values from -** a [prepared statement]. The first argument is the prepared statement -** object to be interrogated. The second argument -** is an integer code for a specific [SQLITE_STMTSTATUS counter] -** to be interrogated.)^ -** ^The current value of the requested counter is returned. -** ^If the resetFlg is true, then the counter is reset to zero after this -** interface call returns. -** -** See also: [sqlite3_status()] and [sqlite3_db_status()]. -*/ -SQLITE_API int sqlite3_stmt_status(sqlite3_stmt*, int op,int resetFlg); - -/* -** CAPI3REF: Status Parameters for prepared statements -** KEYWORDS: {SQLITE_STMTSTATUS counter} {SQLITE_STMTSTATUS counters} -** -** These preprocessor macros define integer codes that name counter -** values associated with the [sqlite3_stmt_status()] interface. -** The meanings of the various counters are as follows: -** -**
-** [[SQLITE_STMTSTATUS_FULLSCAN_STEP]]
SQLITE_STMTSTATUS_FULLSCAN_STEP
-**
^This is the number of times that SQLite has stepped forward in -** a table as part of a full table scan. Large numbers for this counter -** may indicate opportunities for performance improvement through -** careful use of indices.
-** -** [[SQLITE_STMTSTATUS_SORT]]
SQLITE_STMTSTATUS_SORT
-**
^This is the number of sort operations that have occurred. -** A non-zero value in this counter may indicate an opportunity to -** improvement performance through careful use of indices.
-** -** [[SQLITE_STMTSTATUS_AUTOINDEX]]
SQLITE_STMTSTATUS_AUTOINDEX
-**
^This is the number of rows inserted into transient indices that -** were created automatically in order to help joins run faster. -** A non-zero value in this counter may indicate an opportunity to -** improvement performance by adding permanent indices that do not -** need to be reinitialized each time the statement is run.
-** -** [[SQLITE_STMTSTATUS_VM_STEP]]
SQLITE_STMTSTATUS_VM_STEP
-**
^This is the number of virtual machine operations executed -** by the prepared statement if that number is less than or equal -** to 2147483647. The number of virtual machine operations can be -** used as a proxy for the total work done by the prepared statement. -** If the number of virtual machine operations exceeds 2147483647 -** then the value returned by this statement status code is undefined. -**
-**
-*/ -#define SQLITE_STMTSTATUS_FULLSCAN_STEP 1 -#define SQLITE_STMTSTATUS_SORT 2 -#define SQLITE_STMTSTATUS_AUTOINDEX 3 -#define SQLITE_STMTSTATUS_VM_STEP 4 - -/* -** CAPI3REF: Custom Page Cache Object -** -** The sqlite3_pcache type is opaque. It is implemented by -** the pluggable module. The SQLite core has no knowledge of -** its size or internal structure and never deals with the -** sqlite3_pcache object except by holding and passing pointers -** to the object. -** -** See [sqlite3_pcache_methods2] for additional information. -*/ -typedef struct sqlite3_pcache sqlite3_pcache; - -/* -** CAPI3REF: Custom Page Cache Object -** -** The sqlite3_pcache_page object represents a single page in the -** page cache. The page cache will allocate instances of this -** object. Various methods of the page cache use pointers to instances -** of this object as parameters or as their return value. -** -** See [sqlite3_pcache_methods2] for additional information. -*/ -typedef struct sqlite3_pcache_page sqlite3_pcache_page; -struct sqlite3_pcache_page { - void *pBuf; /* The content of the page */ - void *pExtra; /* Extra information associated with the page */ -}; - -/* -** CAPI3REF: Application Defined Page Cache. -** KEYWORDS: {page cache} -** -** ^(The [sqlite3_config]([SQLITE_CONFIG_PCACHE2], ...) interface can -** register an alternative page cache implementation by passing in an -** instance of the sqlite3_pcache_methods2 structure.)^ -** In many applications, most of the heap memory allocated by -** SQLite is used for the page cache. -** By implementing a -** custom page cache using this API, an application can better control -** the amount of memory consumed by SQLite, the way in which -** that memory is allocated and released, and the policies used to -** determine exactly which parts of a database file are cached and for -** how long. -** -** The alternative page cache mechanism is an -** extreme measure that is only needed by the most demanding applications. -** The built-in page cache is recommended for most uses. -** -** ^(The contents of the sqlite3_pcache_methods2 structure are copied to an -** internal buffer by SQLite within the call to [sqlite3_config]. Hence -** the application may discard the parameter after the call to -** [sqlite3_config()] returns.)^ -** -** [[the xInit() page cache method]] -** ^(The xInit() method is called once for each effective -** call to [sqlite3_initialize()])^ -** (usually only once during the lifetime of the process). ^(The xInit() -** method is passed a copy of the sqlite3_pcache_methods2.pArg value.)^ -** The intent of the xInit() method is to set up global data structures -** required by the custom page cache implementation. -** ^(If the xInit() method is NULL, then the -** built-in default page cache is used instead of the application defined -** page cache.)^ -** -** [[the xShutdown() page cache method]] -** ^The xShutdown() method is called by [sqlite3_shutdown()]. -** It can be used to clean up -** any outstanding resources before process shutdown, if required. -** ^The xShutdown() method may be NULL. -** -** ^SQLite automatically serializes calls to the xInit method, -** so the xInit method need not be threadsafe. ^The -** xShutdown method is only called from [sqlite3_shutdown()] so it does -** not need to be threadsafe either. All other methods must be threadsafe -** in multithreaded applications. -** -** ^SQLite will never invoke xInit() more than once without an intervening -** call to xShutdown(). -** -** [[the xCreate() page cache methods]] -** ^SQLite invokes the xCreate() method to construct a new cache instance. -** SQLite will typically create one cache instance for each open database file, -** though this is not guaranteed. ^The -** first parameter, szPage, is the size in bytes of the pages that must -** be allocated by the cache. ^szPage will always a power of two. ^The -** second parameter szExtra is a number of bytes of extra storage -** associated with each page cache entry. ^The szExtra parameter will -** a number less than 250. SQLite will use the -** extra szExtra bytes on each page to store metadata about the underlying -** database page on disk. The value passed into szExtra depends -** on the SQLite version, the target platform, and how SQLite was compiled. -** ^The third argument to xCreate(), bPurgeable, is true if the cache being -** created will be used to cache database pages of a file stored on disk, or -** false if it is used for an in-memory database. The cache implementation -** does not have to do anything special based with the value of bPurgeable; -** it is purely advisory. ^On a cache where bPurgeable is false, SQLite will -** never invoke xUnpin() except to deliberately delete a page. -** ^In other words, calls to xUnpin() on a cache with bPurgeable set to -** false will always have the "discard" flag set to true. -** ^Hence, a cache created with bPurgeable false will -** never contain any unpinned pages. -** -** [[the xCachesize() page cache method]] -** ^(The xCachesize() method may be called at any time by SQLite to set the -** suggested maximum cache-size (number of pages stored by) the cache -** instance passed as the first argument. This is the value configured using -** the SQLite "[PRAGMA cache_size]" command.)^ As with the bPurgeable -** parameter, the implementation is not required to do anything with this -** value; it is advisory only. -** -** [[the xPagecount() page cache methods]] -** The xPagecount() method must return the number of pages currently -** stored in the cache, both pinned and unpinned. -** -** [[the xFetch() page cache methods]] -** The xFetch() method locates a page in the cache and returns a pointer to -** an sqlite3_pcache_page object associated with that page, or a NULL pointer. -** The pBuf element of the returned sqlite3_pcache_page object will be a -** pointer to a buffer of szPage bytes used to store the content of a -** single database page. The pExtra element of sqlite3_pcache_page will be -** a pointer to the szExtra bytes of extra storage that SQLite has requested -** for each entry in the page cache. -** -** The page to be fetched is determined by the key. ^The minimum key value -** is 1. After it has been retrieved using xFetch, the page is considered -** to be "pinned". -** -** If the requested page is already in the page cache, then the page cache -** implementation must return a pointer to the page buffer with its content -** intact. If the requested page is not already in the cache, then the -** cache implementation should use the value of the createFlag -** parameter to help it determined what action to take: -** -** -**
createFlag Behavior when page is not already in cache -**
0 Do not allocate a new page. Return NULL. -**
1 Allocate a new page if it easy and convenient to do so. -** Otherwise return NULL. -**
2 Make every effort to allocate a new page. Only return -** NULL if allocating a new page is effectively impossible. -**
-** -** ^(SQLite will normally invoke xFetch() with a createFlag of 0 or 1. SQLite -** will only use a createFlag of 2 after a prior call with a createFlag of 1 -** failed.)^ In between the to xFetch() calls, SQLite may -** attempt to unpin one or more cache pages by spilling the content of -** pinned pages to disk and synching the operating system disk cache. -** -** [[the xUnpin() page cache method]] -** ^xUnpin() is called by SQLite with a pointer to a currently pinned page -** as its second argument. If the third parameter, discard, is non-zero, -** then the page must be evicted from the cache. -** ^If the discard parameter is -** zero, then the page may be discarded or retained at the discretion of -** page cache implementation. ^The page cache implementation -** may choose to evict unpinned pages at any time. -** -** The cache must not perform any reference counting. A single -** call to xUnpin() unpins the page regardless of the number of prior calls -** to xFetch(). -** -** [[the xRekey() page cache methods]] -** The xRekey() method is used to change the key value associated with the -** page passed as the second argument. If the cache -** previously contains an entry associated with newKey, it must be -** discarded. ^Any prior cache entry associated with newKey is guaranteed not -** to be pinned. -** -** When SQLite calls the xTruncate() method, the cache must discard all -** existing cache entries with page numbers (keys) greater than or equal -** to the value of the iLimit parameter passed to xTruncate(). If any -** of these pages are pinned, they are implicitly unpinned, meaning that -** they can be safely discarded. -** -** [[the xDestroy() page cache method]] -** ^The xDestroy() method is used to delete a cache allocated by xCreate(). -** All resources associated with the specified cache should be freed. ^After -** calling the xDestroy() method, SQLite considers the [sqlite3_pcache*] -** handle invalid, and will not use it with any other sqlite3_pcache_methods2 -** functions. -** -** [[the xShrink() page cache method]] -** ^SQLite invokes the xShrink() method when it wants the page cache to -** free up as much of heap memory as possible. The page cache implementation -** is not obligated to free any memory, but well-behaved implementations should -** do their best. -*/ -typedef struct sqlite3_pcache_methods2 sqlite3_pcache_methods2; -struct sqlite3_pcache_methods2 { - int iVersion; - void *pArg; - int (*xInit)(void*); - void (*xShutdown)(void*); - sqlite3_pcache *(*xCreate)(int szPage, int szExtra, int bPurgeable); - void (*xCachesize)(sqlite3_pcache*, int nCachesize); - int (*xPagecount)(sqlite3_pcache*); - sqlite3_pcache_page *(*xFetch)(sqlite3_pcache*, unsigned key, int createFlag); - void (*xUnpin)(sqlite3_pcache*, sqlite3_pcache_page*, int discard); - void (*xRekey)(sqlite3_pcache*, sqlite3_pcache_page*, - unsigned oldKey, unsigned newKey); - void (*xTruncate)(sqlite3_pcache*, unsigned iLimit); - void (*xDestroy)(sqlite3_pcache*); - void (*xShrink)(sqlite3_pcache*); -}; - -/* -** This is the obsolete pcache_methods object that has now been replaced -** by sqlite3_pcache_methods2. This object is not used by SQLite. It is -** retained in the header file for backwards compatibility only. -*/ -typedef struct sqlite3_pcache_methods sqlite3_pcache_methods; -struct sqlite3_pcache_methods { - void *pArg; - int (*xInit)(void*); - void (*xShutdown)(void*); - sqlite3_pcache *(*xCreate)(int szPage, int bPurgeable); - void (*xCachesize)(sqlite3_pcache*, int nCachesize); - int (*xPagecount)(sqlite3_pcache*); - void *(*xFetch)(sqlite3_pcache*, unsigned key, int createFlag); - void (*xUnpin)(sqlite3_pcache*, void*, int discard); - void (*xRekey)(sqlite3_pcache*, void*, unsigned oldKey, unsigned newKey); - void (*xTruncate)(sqlite3_pcache*, unsigned iLimit); - void (*xDestroy)(sqlite3_pcache*); -}; - - -/* -** CAPI3REF: Online Backup Object -** -** The sqlite3_backup object records state information about an ongoing -** online backup operation. ^The sqlite3_backup object is created by -** a call to [sqlite3_backup_init()] and is destroyed by a call to -** [sqlite3_backup_finish()]. -** -** See Also: [Using the SQLite Online Backup API] -*/ -typedef struct sqlite3_backup sqlite3_backup; - -/* -** CAPI3REF: Online Backup API. -** -** The backup API copies the content of one database into another. -** It is useful either for creating backups of databases or -** for copying in-memory databases to or from persistent files. -** -** See Also: [Using the SQLite Online Backup API] -** -** ^SQLite holds a write transaction open on the destination database file -** for the duration of the backup operation. -** ^The source database is read-locked only while it is being read; -** it is not locked continuously for the entire backup operation. -** ^Thus, the backup may be performed on a live source database without -** preventing other database connections from -** reading or writing to the source database while the backup is underway. -** -** ^(To perform a backup operation: -**
    -**
  1. sqlite3_backup_init() is called once to initialize the -** backup, -**
  2. sqlite3_backup_step() is called one or more times to transfer -** the data between the two databases, and finally -**
  3. sqlite3_backup_finish() is called to release all resources -** associated with the backup operation. -**
)^ -** There should be exactly one call to sqlite3_backup_finish() for each -** successful call to sqlite3_backup_init(). -** -** [[sqlite3_backup_init()]] sqlite3_backup_init() -** -** ^The D and N arguments to sqlite3_backup_init(D,N,S,M) are the -** [database connection] associated with the destination database -** and the database name, respectively. -** ^The database name is "main" for the main database, "temp" for the -** temporary database, or the name specified after the AS keyword in -** an [ATTACH] statement for an attached database. -** ^The S and M arguments passed to -** sqlite3_backup_init(D,N,S,M) identify the [database connection] -** and database name of the source database, respectively. -** ^The source and destination [database connections] (parameters S and D) -** must be different or else sqlite3_backup_init(D,N,S,M) will fail with -** an error. -** -** ^If an error occurs within sqlite3_backup_init(D,N,S,M), then NULL is -** returned and an error code and error message are stored in the -** destination [database connection] D. -** ^The error code and message for the failed call to sqlite3_backup_init() -** can be retrieved using the [sqlite3_errcode()], [sqlite3_errmsg()], and/or -** [sqlite3_errmsg16()] functions. -** ^A successful call to sqlite3_backup_init() returns a pointer to an -** [sqlite3_backup] object. -** ^The [sqlite3_backup] object may be used with the sqlite3_backup_step() and -** sqlite3_backup_finish() functions to perform the specified backup -** operation. -** -** [[sqlite3_backup_step()]] sqlite3_backup_step() -** -** ^Function sqlite3_backup_step(B,N) will copy up to N pages between -** the source and destination databases specified by [sqlite3_backup] object B. -** ^If N is negative, all remaining source pages are copied. -** ^If sqlite3_backup_step(B,N) successfully copies N pages and there -** are still more pages to be copied, then the function returns [SQLITE_OK]. -** ^If sqlite3_backup_step(B,N) successfully finishes copying all pages -** from source to destination, then it returns [SQLITE_DONE]. -** ^If an error occurs while running sqlite3_backup_step(B,N), -** then an [error code] is returned. ^As well as [SQLITE_OK] and -** [SQLITE_DONE], a call to sqlite3_backup_step() may return [SQLITE_READONLY], -** [SQLITE_NOMEM], [SQLITE_BUSY], [SQLITE_LOCKED], or an -** [SQLITE_IOERR_ACCESS | SQLITE_IOERR_XXX] extended error code. -** -** ^(The sqlite3_backup_step() might return [SQLITE_READONLY] if -**
    -**
  1. the destination database was opened read-only, or -**
  2. the destination database is using write-ahead-log journaling -** and the destination and source page sizes differ, or -**
  3. the destination database is an in-memory database and the -** destination and source page sizes differ. -**
)^ -** -** ^If sqlite3_backup_step() cannot obtain a required file-system lock, then -** the [sqlite3_busy_handler | busy-handler function] -** is invoked (if one is specified). ^If the -** busy-handler returns non-zero before the lock is available, then -** [SQLITE_BUSY] is returned to the caller. ^In this case the call to -** sqlite3_backup_step() can be retried later. ^If the source -** [database connection] -** is being used to write to the source database when sqlite3_backup_step() -** is called, then [SQLITE_LOCKED] is returned immediately. ^Again, in this -** case the call to sqlite3_backup_step() can be retried later on. ^(If -** [SQLITE_IOERR_ACCESS | SQLITE_IOERR_XXX], [SQLITE_NOMEM], or -** [SQLITE_READONLY] is returned, then -** there is no point in retrying the call to sqlite3_backup_step(). These -** errors are considered fatal.)^ The application must accept -** that the backup operation has failed and pass the backup operation handle -** to the sqlite3_backup_finish() to release associated resources. -** -** ^The first call to sqlite3_backup_step() obtains an exclusive lock -** on the destination file. ^The exclusive lock is not released until either -** sqlite3_backup_finish() is called or the backup operation is complete -** and sqlite3_backup_step() returns [SQLITE_DONE]. ^Every call to -** sqlite3_backup_step() obtains a [shared lock] on the source database that -** lasts for the duration of the sqlite3_backup_step() call. -** ^Because the source database is not locked between calls to -** sqlite3_backup_step(), the source database may be modified mid-way -** through the backup process. ^If the source database is modified by an -** external process or via a database connection other than the one being -** used by the backup operation, then the backup will be automatically -** restarted by the next call to sqlite3_backup_step(). ^If the source -** database is modified by the using the same database connection as is used -** by the backup operation, then the backup database is automatically -** updated at the same time. -** -** [[sqlite3_backup_finish()]] sqlite3_backup_finish() -** -** When sqlite3_backup_step() has returned [SQLITE_DONE], or when the -** application wishes to abandon the backup operation, the application -** should destroy the [sqlite3_backup] by passing it to sqlite3_backup_finish(). -** ^The sqlite3_backup_finish() interfaces releases all -** resources associated with the [sqlite3_backup] object. -** ^If sqlite3_backup_step() has not yet returned [SQLITE_DONE], then any -** active write-transaction on the destination database is rolled back. -** The [sqlite3_backup] object is invalid -** and may not be used following a call to sqlite3_backup_finish(). -** -** ^The value returned by sqlite3_backup_finish is [SQLITE_OK] if no -** sqlite3_backup_step() errors occurred, regardless or whether or not -** sqlite3_backup_step() completed. -** ^If an out-of-memory condition or IO error occurred during any prior -** sqlite3_backup_step() call on the same [sqlite3_backup] object, then -** sqlite3_backup_finish() returns the corresponding [error code]. -** -** ^A return of [SQLITE_BUSY] or [SQLITE_LOCKED] from sqlite3_backup_step() -** is not a permanent error and does not affect the return value of -** sqlite3_backup_finish(). -** -** [[sqlite3_backup__remaining()]] [[sqlite3_backup_pagecount()]] -** sqlite3_backup_remaining() and sqlite3_backup_pagecount() -** -** ^Each call to sqlite3_backup_step() sets two values inside -** the [sqlite3_backup] object: the number of pages still to be backed -** up and the total number of pages in the source database file. -** The sqlite3_backup_remaining() and sqlite3_backup_pagecount() interfaces -** retrieve these two values, respectively. -** -** ^The values returned by these functions are only updated by -** sqlite3_backup_step(). ^If the source database is modified during a backup -** operation, then the values are not updated to account for any extra -** pages that need to be updated or the size of the source database file -** changing. -** -** Concurrent Usage of Database Handles -** -** ^The source [database connection] may be used by the application for other -** purposes while a backup operation is underway or being initialized. -** ^If SQLite is compiled and configured to support threadsafe database -** connections, then the source database connection may be used concurrently -** from within other threads. -** -** However, the application must guarantee that the destination -** [database connection] is not passed to any other API (by any thread) after -** sqlite3_backup_init() is called and before the corresponding call to -** sqlite3_backup_finish(). SQLite does not currently check to see -** if the application incorrectly accesses the destination [database connection] -** and so no error code is reported, but the operations may malfunction -** nevertheless. Use of the destination database connection while a -** backup is in progress might also also cause a mutex deadlock. -** -** If running in [shared cache mode], the application must -** guarantee that the shared cache used by the destination database -** is not accessed while the backup is running. In practice this means -** that the application must guarantee that the disk file being -** backed up to is not accessed by any connection within the process, -** not just the specific connection that was passed to sqlite3_backup_init(). -** -** The [sqlite3_backup] object itself is partially threadsafe. Multiple -** threads may safely make multiple concurrent calls to sqlite3_backup_step(). -** However, the sqlite3_backup_remaining() and sqlite3_backup_pagecount() -** APIs are not strictly speaking threadsafe. If they are invoked at the -** same time as another thread is invoking sqlite3_backup_step() it is -** possible that they return invalid values. -*/ -SQLITE_API sqlite3_backup *sqlite3_backup_init( - sqlite3 *pDest, /* Destination database handle */ - const char *zDestName, /* Destination database name */ - sqlite3 *pSource, /* Source database handle */ - const char *zSourceName /* Source database name */ -); -SQLITE_API int sqlite3_backup_step(sqlite3_backup *p, int nPage); -SQLITE_API int sqlite3_backup_finish(sqlite3_backup *p); -SQLITE_API int sqlite3_backup_remaining(sqlite3_backup *p); -SQLITE_API int sqlite3_backup_pagecount(sqlite3_backup *p); - -/* -** CAPI3REF: Unlock Notification -** -** ^When running in shared-cache mode, a database operation may fail with -** an [SQLITE_LOCKED] error if the required locks on the shared-cache or -** individual tables within the shared-cache cannot be obtained. See -** [SQLite Shared-Cache Mode] for a description of shared-cache locking. -** ^This API may be used to register a callback that SQLite will invoke -** when the connection currently holding the required lock relinquishes it. -** ^This API is only available if the library was compiled with the -** [SQLITE_ENABLE_UNLOCK_NOTIFY] C-preprocessor symbol defined. -** -** See Also: [Using the SQLite Unlock Notification Feature]. -** -** ^Shared-cache locks are released when a database connection concludes -** its current transaction, either by committing it or rolling it back. -** -** ^When a connection (known as the blocked connection) fails to obtain a -** shared-cache lock and SQLITE_LOCKED is returned to the caller, the -** identity of the database connection (the blocking connection) that -** has locked the required resource is stored internally. ^After an -** application receives an SQLITE_LOCKED error, it may call the -** sqlite3_unlock_notify() method with the blocked connection handle as -** the first argument to register for a callback that will be invoked -** when the blocking connections current transaction is concluded. ^The -** callback is invoked from within the [sqlite3_step] or [sqlite3_close] -** call that concludes the blocking connections transaction. -** -** ^(If sqlite3_unlock_notify() is called in a multi-threaded application, -** there is a chance that the blocking connection will have already -** concluded its transaction by the time sqlite3_unlock_notify() is invoked. -** If this happens, then the specified callback is invoked immediately, -** from within the call to sqlite3_unlock_notify().)^ -** -** ^If the blocked connection is attempting to obtain a write-lock on a -** shared-cache table, and more than one other connection currently holds -** a read-lock on the same table, then SQLite arbitrarily selects one of -** the other connections to use as the blocking connection. -** -** ^(There may be at most one unlock-notify callback registered by a -** blocked connection. If sqlite3_unlock_notify() is called when the -** blocked connection already has a registered unlock-notify callback, -** then the new callback replaces the old.)^ ^If sqlite3_unlock_notify() is -** called with a NULL pointer as its second argument, then any existing -** unlock-notify callback is canceled. ^The blocked connections -** unlock-notify callback may also be canceled by closing the blocked -** connection using [sqlite3_close()]. -** -** The unlock-notify callback is not reentrant. If an application invokes -** any sqlite3_xxx API functions from within an unlock-notify callback, a -** crash or deadlock may be the result. -** -** ^Unless deadlock is detected (see below), sqlite3_unlock_notify() always -** returns SQLITE_OK. -** -** Callback Invocation Details -** -** When an unlock-notify callback is registered, the application provides a -** single void* pointer that is passed to the callback when it is invoked. -** However, the signature of the callback function allows SQLite to pass -** it an array of void* context pointers. The first argument passed to -** an unlock-notify callback is a pointer to an array of void* pointers, -** and the second is the number of entries in the array. -** -** When a blocking connections transaction is concluded, there may be -** more than one blocked connection that has registered for an unlock-notify -** callback. ^If two or more such blocked connections have specified the -** same callback function, then instead of invoking the callback function -** multiple times, it is invoked once with the set of void* context pointers -** specified by the blocked connections bundled together into an array. -** This gives the application an opportunity to prioritize any actions -** related to the set of unblocked database connections. -** -** Deadlock Detection -** -** Assuming that after registering for an unlock-notify callback a -** database waits for the callback to be issued before taking any further -** action (a reasonable assumption), then using this API may cause the -** application to deadlock. For example, if connection X is waiting for -** connection Y's transaction to be concluded, and similarly connection -** Y is waiting on connection X's transaction, then neither connection -** will proceed and the system may remain deadlocked indefinitely. -** -** To avoid this scenario, the sqlite3_unlock_notify() performs deadlock -** detection. ^If a given call to sqlite3_unlock_notify() would put the -** system in a deadlocked state, then SQLITE_LOCKED is returned and no -** unlock-notify callback is registered. The system is said to be in -** a deadlocked state if connection A has registered for an unlock-notify -** callback on the conclusion of connection B's transaction, and connection -** B has itself registered for an unlock-notify callback when connection -** A's transaction is concluded. ^Indirect deadlock is also detected, so -** the system is also considered to be deadlocked if connection B has -** registered for an unlock-notify callback on the conclusion of connection -** C's transaction, where connection C is waiting on connection A. ^Any -** number of levels of indirection are allowed. -** -** The "DROP TABLE" Exception -** -** When a call to [sqlite3_step()] returns SQLITE_LOCKED, it is almost -** always appropriate to call sqlite3_unlock_notify(). There is however, -** one exception. When executing a "DROP TABLE" or "DROP INDEX" statement, -** SQLite checks if there are any currently executing SELECT statements -** that belong to the same connection. If there are, SQLITE_LOCKED is -** returned. In this case there is no "blocking connection", so invoking -** sqlite3_unlock_notify() results in the unlock-notify callback being -** invoked immediately. If the application then re-attempts the "DROP TABLE" -** or "DROP INDEX" query, an infinite loop might be the result. -** -** One way around this problem is to check the extended error code returned -** by an sqlite3_step() call. ^(If there is a blocking connection, then the -** extended error code is set to SQLITE_LOCKED_SHAREDCACHE. Otherwise, in -** the special "DROP TABLE/INDEX" case, the extended error code is just -** SQLITE_LOCKED.)^ -*/ -SQLITE_API int sqlite3_unlock_notify( - sqlite3 *pBlocked, /* Waiting connection */ - void (*xNotify)(void **apArg, int nArg), /* Callback function to invoke */ - void *pNotifyArg /* Argument to pass to xNotify */ -); - - -/* -** CAPI3REF: String Comparison -** -** ^The [sqlite3_stricmp()] and [sqlite3_strnicmp()] APIs allow applications -** and extensions to compare the contents of two buffers containing UTF-8 -** strings in a case-independent fashion, using the same definition of "case -** independence" that SQLite uses internally when comparing identifiers. -*/ -SQLITE_API int sqlite3_stricmp(const char *, const char *); -SQLITE_API int sqlite3_strnicmp(const char *, const char *, int); - -/* -** CAPI3REF: String Globbing -* -** ^The [sqlite3_strglob(P,X)] interface returns zero if string X matches -** the glob pattern P, and it returns non-zero if string X does not match -** the glob pattern P. ^The definition of glob pattern matching used in -** [sqlite3_strglob(P,X)] is the same as for the "X GLOB P" operator in the -** SQL dialect used by SQLite. ^The sqlite3_strglob(P,X) function is case -** sensitive. -** -** Note that this routine returns zero on a match and non-zero if the strings -** do not match, the same as [sqlite3_stricmp()] and [sqlite3_strnicmp()]. -*/ -SQLITE_API int sqlite3_strglob(const char *zGlob, const char *zStr); - -/* -** CAPI3REF: Error Logging Interface -** -** ^The [sqlite3_log()] interface writes a message into the [error log] -** established by the [SQLITE_CONFIG_LOG] option to [sqlite3_config()]. -** ^If logging is enabled, the zFormat string and subsequent arguments are -** used with [sqlite3_snprintf()] to generate the final output string. -** -** The sqlite3_log() interface is intended for use by extensions such as -** virtual tables, collating functions, and SQL functions. While there is -** nothing to prevent an application from calling sqlite3_log(), doing so -** is considered bad form. -** -** The zFormat string must not be NULL. -** -** To avoid deadlocks and other threading problems, the sqlite3_log() routine -** will not use dynamically allocated memory. The log message is stored in -** a fixed-length buffer on the stack. If the log message is longer than -** a few hundred characters, it will be truncated to the length of the -** buffer. -*/ -SQLITE_API void sqlite3_log(int iErrCode, const char *zFormat, ...); - -/* -** CAPI3REF: Write-Ahead Log Commit Hook -** -** ^The [sqlite3_wal_hook()] function is used to register a callback that -** will be invoked each time a database connection commits data to a -** [write-ahead log] (i.e. whenever a transaction is committed in -** [journal_mode | journal_mode=WAL mode]). -** -** ^The callback is invoked by SQLite after the commit has taken place and -** the associated write-lock on the database released, so the implementation -** may read, write or [checkpoint] the database as required. -** -** ^The first parameter passed to the callback function when it is invoked -** is a copy of the third parameter passed to sqlite3_wal_hook() when -** registering the callback. ^The second is a copy of the database handle. -** ^The third parameter is the name of the database that was written to - -** either "main" or the name of an [ATTACH]-ed database. ^The fourth parameter -** is the number of pages currently in the write-ahead log file, -** including those that were just committed. -** -** The callback function should normally return [SQLITE_OK]. ^If an error -** code is returned, that error will propagate back up through the -** SQLite code base to cause the statement that provoked the callback -** to report an error, though the commit will have still occurred. If the -** callback returns [SQLITE_ROW] or [SQLITE_DONE], or if it returns a value -** that does not correspond to any valid SQLite error code, the results -** are undefined. -** -** A single database handle may have at most a single write-ahead log callback -** registered at one time. ^Calling [sqlite3_wal_hook()] replaces any -** previously registered write-ahead log callback. ^Note that the -** [sqlite3_wal_autocheckpoint()] interface and the -** [wal_autocheckpoint pragma] both invoke [sqlite3_wal_hook()] and will -** those overwrite any prior [sqlite3_wal_hook()] settings. -*/ -SQLITE_API void *sqlite3_wal_hook( - sqlite3*, - int(*)(void *,sqlite3*,const char*,int), - void* -); - -/* -** CAPI3REF: Configure an auto-checkpoint -** -** ^The [sqlite3_wal_autocheckpoint(D,N)] is a wrapper around -** [sqlite3_wal_hook()] that causes any database on [database connection] D -** to automatically [checkpoint] -** after committing a transaction if there are N or -** more frames in the [write-ahead log] file. ^Passing zero or -** a negative value as the nFrame parameter disables automatic -** checkpoints entirely. -** -** ^The callback registered by this function replaces any existing callback -** registered using [sqlite3_wal_hook()]. ^Likewise, registering a callback -** using [sqlite3_wal_hook()] disables the automatic checkpoint mechanism -** configured by this function. -** -** ^The [wal_autocheckpoint pragma] can be used to invoke this interface -** from SQL. -** -** ^Every new [database connection] defaults to having the auto-checkpoint -** enabled with a threshold of 1000 or [SQLITE_DEFAULT_WAL_AUTOCHECKPOINT] -** pages. The use of this interface -** is only necessary if the default setting is found to be suboptimal -** for a particular application. -*/ -SQLITE_API int sqlite3_wal_autocheckpoint(sqlite3 *db, int N); - -/* -** CAPI3REF: Checkpoint a database -** -** ^The [sqlite3_wal_checkpoint(D,X)] interface causes database named X -** on [database connection] D to be [checkpointed]. ^If X is NULL or an -** empty string, then a checkpoint is run on all databases of -** connection D. ^If the database connection D is not in -** [WAL | write-ahead log mode] then this interface is a harmless no-op. -** -** ^The [wal_checkpoint pragma] can be used to invoke this interface -** from SQL. ^The [sqlite3_wal_autocheckpoint()] interface and the -** [wal_autocheckpoint pragma] can be used to cause this interface to be -** run whenever the WAL reaches a certain size threshold. -** -** See also: [sqlite3_wal_checkpoint_v2()] -*/ -SQLITE_API int sqlite3_wal_checkpoint(sqlite3 *db, const char *zDb); - -/* -** CAPI3REF: Checkpoint a database -** -** Run a checkpoint operation on WAL database zDb attached to database -** handle db. The specific operation is determined by the value of the -** eMode parameter: -** -**
-**
SQLITE_CHECKPOINT_PASSIVE
-** Checkpoint as many frames as possible without waiting for any database -** readers or writers to finish. Sync the db file if all frames in the log -** are checkpointed. This mode is the same as calling -** sqlite3_wal_checkpoint(). The busy-handler callback is never invoked. -** -**
SQLITE_CHECKPOINT_FULL
-** This mode blocks (calls the busy-handler callback) until there is no -** database writer and all readers are reading from the most recent database -** snapshot. It then checkpoints all frames in the log file and syncs the -** database file. This call blocks database writers while it is running, -** but not database readers. -** -**
SQLITE_CHECKPOINT_RESTART
-** This mode works the same way as SQLITE_CHECKPOINT_FULL, except after -** checkpointing the log file it blocks (calls the busy-handler callback) -** until all readers are reading from the database file only. This ensures -** that the next client to write to the database file restarts the log file -** from the beginning. This call blocks database writers while it is running, -** but not database readers. -**
-** -** If pnLog is not NULL, then *pnLog is set to the total number of frames in -** the log file before returning. If pnCkpt is not NULL, then *pnCkpt is set to -** the total number of checkpointed frames (including any that were already -** checkpointed when this function is called). *pnLog and *pnCkpt may be -** populated even if sqlite3_wal_checkpoint_v2() returns other than SQLITE_OK. -** If no values are available because of an error, they are both set to -1 -** before returning to communicate this to the caller. -** -** All calls obtain an exclusive "checkpoint" lock on the database file. If -** any other process is running a checkpoint operation at the same time, the -** lock cannot be obtained and SQLITE_BUSY is returned. Even if there is a -** busy-handler configured, it will not be invoked in this case. -** -** The SQLITE_CHECKPOINT_FULL and RESTART modes also obtain the exclusive -** "writer" lock on the database file. If the writer lock cannot be obtained -** immediately, and a busy-handler is configured, it is invoked and the writer -** lock retried until either the busy-handler returns 0 or the lock is -** successfully obtained. The busy-handler is also invoked while waiting for -** database readers as described above. If the busy-handler returns 0 before -** the writer lock is obtained or while waiting for database readers, the -** checkpoint operation proceeds from that point in the same way as -** SQLITE_CHECKPOINT_PASSIVE - checkpointing as many frames as possible -** without blocking any further. SQLITE_BUSY is returned in this case. -** -** If parameter zDb is NULL or points to a zero length string, then the -** specified operation is attempted on all WAL databases. In this case the -** values written to output parameters *pnLog and *pnCkpt are undefined. If -** an SQLITE_BUSY error is encountered when processing one or more of the -** attached WAL databases, the operation is still attempted on any remaining -** attached databases and SQLITE_BUSY is returned to the caller. If any other -** error occurs while processing an attached database, processing is abandoned -** and the error code returned to the caller immediately. If no error -** (SQLITE_BUSY or otherwise) is encountered while processing the attached -** databases, SQLITE_OK is returned. -** -** If database zDb is the name of an attached database that is not in WAL -** mode, SQLITE_OK is returned and both *pnLog and *pnCkpt set to -1. If -** zDb is not NULL (or a zero length string) and is not the name of any -** attached database, SQLITE_ERROR is returned to the caller. -*/ -SQLITE_API int sqlite3_wal_checkpoint_v2( - sqlite3 *db, /* Database handle */ - const char *zDb, /* Name of attached database (or NULL) */ - int eMode, /* SQLITE_CHECKPOINT_* value */ - int *pnLog, /* OUT: Size of WAL log in frames */ - int *pnCkpt /* OUT: Total number of frames checkpointed */ -); - -/* -** CAPI3REF: Checkpoint operation parameters -** -** These constants can be used as the 3rd parameter to -** [sqlite3_wal_checkpoint_v2()]. See the [sqlite3_wal_checkpoint_v2()] -** documentation for additional information about the meaning and use of -** each of these values. -*/ -#define SQLITE_CHECKPOINT_PASSIVE 0 -#define SQLITE_CHECKPOINT_FULL 1 -#define SQLITE_CHECKPOINT_RESTART 2 - -/* -** CAPI3REF: Virtual Table Interface Configuration -** -** This function may be called by either the [xConnect] or [xCreate] method -** of a [virtual table] implementation to configure -** various facets of the virtual table interface. -** -** If this interface is invoked outside the context of an xConnect or -** xCreate virtual table method then the behavior is undefined. -** -** At present, there is only one option that may be configured using -** this function. (See [SQLITE_VTAB_CONSTRAINT_SUPPORT].) Further options -** may be added in the future. -*/ -SQLITE_API int sqlite3_vtab_config(sqlite3*, int op, ...); - -/* -** CAPI3REF: Virtual Table Configuration Options -** -** These macros define the various options to the -** [sqlite3_vtab_config()] interface that [virtual table] implementations -** can use to customize and optimize their behavior. -** -**
-**
SQLITE_VTAB_CONSTRAINT_SUPPORT -**
Calls of the form -** [sqlite3_vtab_config](db,SQLITE_VTAB_CONSTRAINT_SUPPORT,X) are supported, -** where X is an integer. If X is zero, then the [virtual table] whose -** [xCreate] or [xConnect] method invoked [sqlite3_vtab_config()] does not -** support constraints. In this configuration (which is the default) if -** a call to the [xUpdate] method returns [SQLITE_CONSTRAINT], then the entire -** statement is rolled back as if [ON CONFLICT | OR ABORT] had been -** specified as part of the users SQL statement, regardless of the actual -** ON CONFLICT mode specified. -** -** If X is non-zero, then the virtual table implementation guarantees -** that if [xUpdate] returns [SQLITE_CONSTRAINT], it will do so before -** any modifications to internal or persistent data structures have been made. -** If the [ON CONFLICT] mode is ABORT, FAIL, IGNORE or ROLLBACK, SQLite -** is able to roll back a statement or database transaction, and abandon -** or continue processing the current SQL statement as appropriate. -** If the ON CONFLICT mode is REPLACE and the [xUpdate] method returns -** [SQLITE_CONSTRAINT], SQLite handles this as if the ON CONFLICT mode -** had been ABORT. -** -** Virtual table implementations that are required to handle OR REPLACE -** must do so within the [xUpdate] method. If a call to the -** [sqlite3_vtab_on_conflict()] function indicates that the current ON -** CONFLICT policy is REPLACE, the virtual table implementation should -** silently replace the appropriate rows within the xUpdate callback and -** return SQLITE_OK. Or, if this is not possible, it may return -** SQLITE_CONSTRAINT, in which case SQLite falls back to OR ABORT -** constraint handling. -**
-*/ -#define SQLITE_VTAB_CONSTRAINT_SUPPORT 1 - -/* -** CAPI3REF: Determine The Virtual Table Conflict Policy -** -** This function may only be called from within a call to the [xUpdate] method -** of a [virtual table] implementation for an INSERT or UPDATE operation. ^The -** value returned is one of [SQLITE_ROLLBACK], [SQLITE_IGNORE], [SQLITE_FAIL], -** [SQLITE_ABORT], or [SQLITE_REPLACE], according to the [ON CONFLICT] mode -** of the SQL statement that triggered the call to the [xUpdate] method of the -** [virtual table]. -*/ -SQLITE_API int sqlite3_vtab_on_conflict(sqlite3 *); - -/* -** CAPI3REF: Conflict resolution modes -** -** These constants are returned by [sqlite3_vtab_on_conflict()] to -** inform a [virtual table] implementation what the [ON CONFLICT] mode -** is for the SQL statement being evaluated. -** -** Note that the [SQLITE_IGNORE] constant is also used as a potential -** return value from the [sqlite3_set_authorizer()] callback and that -** [SQLITE_ABORT] is also a [result code]. -*/ -#define SQLITE_ROLLBACK 1 -/* #define SQLITE_IGNORE 2 // Also used by sqlite3_authorizer() callback */ -#define SQLITE_FAIL 3 -/* #define SQLITE_ABORT 4 // Also an error code */ -#define SQLITE_REPLACE 5 - - - -/* -** Undo the hack that converts floating point types to integer for -** builds on processors without floating point support. -*/ -#ifdef SQLITE_OMIT_FLOATING_POINT -# undef double -#endif - -#if 0 -} /* End of the 'extern "C"' block */ -#endif -#endif /* _SQLITE3_H_ */ - -/* -** 2010 August 30 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -*/ - -#ifndef _SQLITE3RTREE_H_ -#define _SQLITE3RTREE_H_ - - -#if 0 -extern "C" { -#endif - -typedef struct sqlite3_rtree_geometry sqlite3_rtree_geometry; -typedef struct sqlite3_rtree_query_info sqlite3_rtree_query_info; - -/* The double-precision datatype used by RTree depends on the -** SQLITE_RTREE_INT_ONLY compile-time option. -*/ -#ifdef SQLITE_RTREE_INT_ONLY - typedef sqlite3_int64 sqlite3_rtree_dbl; -#else - typedef double sqlite3_rtree_dbl; -#endif - -/* -** Register a geometry callback named zGeom that can be used as part of an -** R-Tree geometry query as follows: -** -** SELECT ... FROM WHERE MATCH $zGeom(... params ...) -*/ -SQLITE_API int sqlite3_rtree_geometry_callback( - sqlite3 *db, - const char *zGeom, - int (*xGeom)(sqlite3_rtree_geometry*, int, sqlite3_rtree_dbl*,int*), - void *pContext -); - - -/* -** A pointer to a structure of the following type is passed as the first -** argument to callbacks registered using rtree_geometry_callback(). -*/ -struct sqlite3_rtree_geometry { - void *pContext; /* Copy of pContext passed to s_r_g_c() */ - int nParam; /* Size of array aParam[] */ - sqlite3_rtree_dbl *aParam; /* Parameters passed to SQL geom function */ - void *pUser; /* Callback implementation user data */ - void (*xDelUser)(void *); /* Called by SQLite to clean up pUser */ -}; - -/* -** Register a 2nd-generation geometry callback named zScore that can be -** used as part of an R-Tree geometry query as follows: -** -** SELECT ... FROM WHERE MATCH $zQueryFunc(... params ...) -*/ -SQLITE_API int sqlite3_rtree_query_callback( - sqlite3 *db, - const char *zQueryFunc, - int (*xQueryFunc)(sqlite3_rtree_query_info*), - void *pContext, - void (*xDestructor)(void*) -); - - -/* -** A pointer to a structure of the following type is passed as the -** argument to scored geometry callback registered using -** sqlite3_rtree_query_callback(). -** -** Note that the first 5 fields of this structure are identical to -** sqlite3_rtree_geometry. This structure is a subclass of -** sqlite3_rtree_geometry. -*/ -struct sqlite3_rtree_query_info { - void *pContext; /* pContext from when function registered */ - int nParam; /* Number of function parameters */ - sqlite3_rtree_dbl *aParam; /* value of function parameters */ - void *pUser; /* callback can use this, if desired */ - void (*xDelUser)(void*); /* function to free pUser */ - sqlite3_rtree_dbl *aCoord; /* Coordinates of node or entry to check */ - unsigned int *anQueue; /* Number of pending entries in the queue */ - int nCoord; /* Number of coordinates */ - int iLevel; /* Level of current node or entry */ - int mxLevel; /* The largest iLevel value in the tree */ - sqlite3_int64 iRowid; /* Rowid for current entry */ - sqlite3_rtree_dbl rParentScore; /* Score of parent node */ - int eParentWithin; /* Visibility of parent node */ - int eWithin; /* OUT: Visiblity */ - sqlite3_rtree_dbl rScore; /* OUT: Write the score here */ -}; - -/* -** Allowed values for sqlite3_rtree_query.eWithin and .eParentWithin. -*/ -#define NOT_WITHIN 0 /* Object completely outside of query region */ -#define PARTLY_WITHIN 1 /* Object partially overlaps query region */ -#define FULLY_WITHIN 2 /* Object fully contained within query region */ - - -#if 0 -} /* end of the 'extern "C"' block */ -#endif - -#endif /* ifndef _SQLITE3RTREE_H_ */ - - -/************** End of sqlite3.h *********************************************/ -/************** Continuing where we left off in sqliteInt.h ******************/ - -/* -** Include the configuration header output by 'configure' if we're using the -** autoconf-based build -*/ -#ifdef _HAVE_SQLITE_CONFIG_H -#include "config.h" -#endif - -/************** Include sqliteLimit.h in the middle of sqliteInt.h ***********/ -/************** Begin file sqliteLimit.h *************************************/ -/* -** 2007 May 7 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** -** This file defines various limits of what SQLite can process. -*/ - -/* -** The maximum length of a TEXT or BLOB in bytes. This also -** limits the size of a row in a table or index. -** -** The hard limit is the ability of a 32-bit signed integer -** to count the size: 2^31-1 or 2147483647. -*/ -#ifndef SQLITE_MAX_LENGTH -# define SQLITE_MAX_LENGTH 1000000000 -#endif - -/* -** This is the maximum number of -** -** * Columns in a table -** * Columns in an index -** * Columns in a view -** * Terms in the SET clause of an UPDATE statement -** * Terms in the result set of a SELECT statement -** * Terms in the GROUP BY or ORDER BY clauses of a SELECT statement. -** * Terms in the VALUES clause of an INSERT statement -** -** The hard upper limit here is 32676. Most database people will -** tell you that in a well-normalized database, you usually should -** not have more than a dozen or so columns in any table. And if -** that is the case, there is no point in having more than a few -** dozen values in any of the other situations described above. -*/ -#ifndef SQLITE_MAX_COLUMN -# define SQLITE_MAX_COLUMN 2000 -#endif - -/* -** The maximum length of a single SQL statement in bytes. -** -** It used to be the case that setting this value to zero would -** turn the limit off. That is no longer true. It is not possible -** to turn this limit off. -*/ -#ifndef SQLITE_MAX_SQL_LENGTH -# define SQLITE_MAX_SQL_LENGTH 1000000000 -#endif - -/* -** The maximum depth of an expression tree. This is limited to -** some extent by SQLITE_MAX_SQL_LENGTH. But sometime you might -** want to place more severe limits on the complexity of an -** expression. -** -** A value of 0 used to mean that the limit was not enforced. -** But that is no longer true. The limit is now strictly enforced -** at all times. -*/ -#ifndef SQLITE_MAX_EXPR_DEPTH -# define SQLITE_MAX_EXPR_DEPTH 1000 -#endif - -/* -** The maximum number of terms in a compound SELECT statement. -** The code generator for compound SELECT statements does one -** level of recursion for each term. A stack overflow can result -** if the number of terms is too large. In practice, most SQL -** never has more than 3 or 4 terms. Use a value of 0 to disable -** any limit on the number of terms in a compount SELECT. -*/ -#ifndef SQLITE_MAX_COMPOUND_SELECT -# define SQLITE_MAX_COMPOUND_SELECT 500 -#endif - -/* -** The maximum number of opcodes in a VDBE program. -** Not currently enforced. -*/ -#ifndef SQLITE_MAX_VDBE_OP -# define SQLITE_MAX_VDBE_OP 25000 -#endif - -/* -** The maximum number of arguments to an SQL function. -*/ -#ifndef SQLITE_MAX_FUNCTION_ARG -# define SQLITE_MAX_FUNCTION_ARG 127 -#endif - -/* -** The maximum number of in-memory pages to use for the main database -** table and for temporary tables. The SQLITE_DEFAULT_CACHE_SIZE -*/ -#ifndef SQLITE_DEFAULT_CACHE_SIZE -# define SQLITE_DEFAULT_CACHE_SIZE 2000 -#endif -#ifndef SQLITE_DEFAULT_TEMP_CACHE_SIZE -# define SQLITE_DEFAULT_TEMP_CACHE_SIZE 500 -#endif - -/* -** The default number of frames to accumulate in the log file before -** checkpointing the database in WAL mode. -*/ -#ifndef SQLITE_DEFAULT_WAL_AUTOCHECKPOINT -# define SQLITE_DEFAULT_WAL_AUTOCHECKPOINT 1000 -#endif - -/* -** The maximum number of attached databases. This must be between 0 -** and 62. The upper bound on 62 is because a 64-bit integer bitmap -** is used internally to track attached databases. -*/ -#ifndef SQLITE_MAX_ATTACHED -# define SQLITE_MAX_ATTACHED 10 -#endif - - -/* -** The maximum value of a ?nnn wildcard that the parser will accept. -*/ -#ifndef SQLITE_MAX_VARIABLE_NUMBER -# define SQLITE_MAX_VARIABLE_NUMBER 999 -#endif - -/* Maximum page size. The upper bound on this value is 65536. This a limit -** imposed by the use of 16-bit offsets within each page. -** -** Earlier versions of SQLite allowed the user to change this value at -** compile time. This is no longer permitted, on the grounds that it creates -** a library that is technically incompatible with an SQLite library -** compiled with a different limit. If a process operating on a database -** with a page-size of 65536 bytes crashes, then an instance of SQLite -** compiled with the default page-size limit will not be able to rollback -** the aborted transaction. This could lead to database corruption. -*/ -#ifdef SQLITE_MAX_PAGE_SIZE -# undef SQLITE_MAX_PAGE_SIZE -#endif -#define SQLITE_MAX_PAGE_SIZE 65536 - - -/* -** The default size of a database page. -*/ -#ifndef SQLITE_DEFAULT_PAGE_SIZE -# define SQLITE_DEFAULT_PAGE_SIZE 1024 -#endif -#if SQLITE_DEFAULT_PAGE_SIZE>SQLITE_MAX_PAGE_SIZE -# undef SQLITE_DEFAULT_PAGE_SIZE -# define SQLITE_DEFAULT_PAGE_SIZE SQLITE_MAX_PAGE_SIZE -#endif - -/* -** Ordinarily, if no value is explicitly provided, SQLite creates databases -** with page size SQLITE_DEFAULT_PAGE_SIZE. However, based on certain -** device characteristics (sector-size and atomic write() support), -** SQLite may choose a larger value. This constant is the maximum value -** SQLite will choose on its own. -*/ -#ifndef SQLITE_MAX_DEFAULT_PAGE_SIZE -# define SQLITE_MAX_DEFAULT_PAGE_SIZE 8192 -#endif -#if SQLITE_MAX_DEFAULT_PAGE_SIZE>SQLITE_MAX_PAGE_SIZE -# undef SQLITE_MAX_DEFAULT_PAGE_SIZE -# define SQLITE_MAX_DEFAULT_PAGE_SIZE SQLITE_MAX_PAGE_SIZE -#endif - - -/* -** Maximum number of pages in one database file. -** -** This is really just the default value for the max_page_count pragma. -** This value can be lowered (or raised) at run-time using that the -** max_page_count macro. -*/ -#ifndef SQLITE_MAX_PAGE_COUNT -# define SQLITE_MAX_PAGE_COUNT 1073741823 -#endif - -/* -** Maximum length (in bytes) of the pattern in a LIKE or GLOB -** operator. -*/ -#ifndef SQLITE_MAX_LIKE_PATTERN_LENGTH -# define SQLITE_MAX_LIKE_PATTERN_LENGTH 50000 -#endif - -/* -** Maximum depth of recursion for triggers. -** -** A value of 1 means that a trigger program will not be able to itself -** fire any triggers. A value of 0 means that no trigger programs at all -** may be executed. -*/ -#ifndef SQLITE_MAX_TRIGGER_DEPTH -# define SQLITE_MAX_TRIGGER_DEPTH 1000 -#endif - -/************** End of sqliteLimit.h *****************************************/ -/************** Continuing where we left off in sqliteInt.h ******************/ - -/* Disable nuisance warnings on Borland compilers */ -#if defined(__BORLANDC__) -#pragma warn -rch /* unreachable code */ -#pragma warn -ccc /* Condition is always true or false */ -#pragma warn -aus /* Assigned value is never used */ -#pragma warn -csu /* Comparing signed and unsigned */ -#pragma warn -spa /* Suspicious pointer arithmetic */ -#endif - -/* Needed for various definitions... */ -#ifndef _GNU_SOURCE -# define _GNU_SOURCE -#endif - -#if defined(__OpenBSD__) && !defined(_BSD_SOURCE) -# define _BSD_SOURCE -#endif - -/* -** Include standard header files as necessary -*/ -#ifdef HAVE_STDINT_H -#include -#endif -#ifdef HAVE_INTTYPES_H -#include -#endif - -/* -** The following macros are used to cast pointers to integers and -** integers to pointers. The way you do this varies from one compiler -** to the next, so we have developed the following set of #if statements -** to generate appropriate macros for a wide range of compilers. -** -** The correct "ANSI" way to do this is to use the intptr_t type. -** Unfortunately, that typedef is not available on all compilers, or -** if it is available, it requires an #include of specific headers -** that vary from one machine to the next. -** -** Ticket #3860: The llvm-gcc-4.2 compiler from Apple chokes on -** the ((void*)&((char*)0)[X]) construct. But MSVC chokes on ((void*)(X)). -** So we have to define the macros in different ways depending on the -** compiler. -*/ -#if defined(__PTRDIFF_TYPE__) /* This case should work for GCC */ -# define SQLITE_INT_TO_PTR(X) ((void*)(__PTRDIFF_TYPE__)(X)) -# define SQLITE_PTR_TO_INT(X) ((int)(__PTRDIFF_TYPE__)(X)) -#elif !defined(__GNUC__) /* Works for compilers other than LLVM */ -# define SQLITE_INT_TO_PTR(X) ((void*)&((char*)0)[X]) -# define SQLITE_PTR_TO_INT(X) ((int)(((char*)X)-(char*)0)) -#elif defined(HAVE_STDINT_H) /* Use this case if we have ANSI headers */ -# define SQLITE_INT_TO_PTR(X) ((void*)(intptr_t)(X)) -# define SQLITE_PTR_TO_INT(X) ((int)(intptr_t)(X)) -#else /* Generates a warning - but it always works */ -# define SQLITE_INT_TO_PTR(X) ((void*)(X)) -# define SQLITE_PTR_TO_INT(X) ((int)(X)) -#endif - -/* -** The SQLITE_THREADSAFE macro must be defined as 0, 1, or 2. -** 0 means mutexes are permanently disable and the library is never -** threadsafe. 1 means the library is serialized which is the highest -** level of threadsafety. 2 means the library is multithreaded - multiple -** threads can use SQLite as long as no two threads try to use the same -** database connection at the same time. -** -** Older versions of SQLite used an optional THREADSAFE macro. -** We support that for legacy. -*/ -#if !defined(SQLITE_THREADSAFE) -# if defined(THREADSAFE) -# define SQLITE_THREADSAFE THREADSAFE -# else -# define SQLITE_THREADSAFE 1 /* IMP: R-07272-22309 */ -# endif -#endif - -/* -** Powersafe overwrite is on by default. But can be turned off using -** the -DSQLITE_POWERSAFE_OVERWRITE=0 command-line option. -*/ -#ifndef SQLITE_POWERSAFE_OVERWRITE -# define SQLITE_POWERSAFE_OVERWRITE 1 -#endif - -/* -** The SQLITE_DEFAULT_MEMSTATUS macro must be defined as either 0 or 1. -** It determines whether or not the features related to -** SQLITE_CONFIG_MEMSTATUS are available by default or not. This value can -** be overridden at runtime using the sqlite3_config() API. -*/ -#if !defined(SQLITE_DEFAULT_MEMSTATUS) -# define SQLITE_DEFAULT_MEMSTATUS 1 -#endif - -/* -** Exactly one of the following macros must be defined in order to -** specify which memory allocation subsystem to use. -** -** SQLITE_SYSTEM_MALLOC // Use normal system malloc() -** SQLITE_WIN32_MALLOC // Use Win32 native heap API -** SQLITE_ZERO_MALLOC // Use a stub allocator that always fails -** SQLITE_MEMDEBUG // Debugging version of system malloc() -** -** On Windows, if the SQLITE_WIN32_MALLOC_VALIDATE macro is defined and the -** assert() macro is enabled, each call into the Win32 native heap subsystem -** will cause HeapValidate to be called. If heap validation should fail, an -** assertion will be triggered. -** -** If none of the above are defined, then set SQLITE_SYSTEM_MALLOC as -** the default. -*/ -#if defined(SQLITE_SYSTEM_MALLOC) \ - + defined(SQLITE_WIN32_MALLOC) \ - + defined(SQLITE_ZERO_MALLOC) \ - + defined(SQLITE_MEMDEBUG)>1 -# error "Two or more of the following compile-time configuration options\ - are defined but at most one is allowed:\ - SQLITE_SYSTEM_MALLOC, SQLITE_WIN32_MALLOC, SQLITE_MEMDEBUG,\ - SQLITE_ZERO_MALLOC" -#endif -#if defined(SQLITE_SYSTEM_MALLOC) \ - + defined(SQLITE_WIN32_MALLOC) \ - + defined(SQLITE_ZERO_MALLOC) \ - + defined(SQLITE_MEMDEBUG)==0 -# define SQLITE_SYSTEM_MALLOC 1 -#endif - -/* -** If SQLITE_MALLOC_SOFT_LIMIT is not zero, then try to keep the -** sizes of memory allocations below this value where possible. -*/ -#if !defined(SQLITE_MALLOC_SOFT_LIMIT) -# define SQLITE_MALLOC_SOFT_LIMIT 1024 -#endif - -/* -** We need to define _XOPEN_SOURCE as follows in order to enable -** recursive mutexes on most Unix systems and fchmod() on OpenBSD. -** But _XOPEN_SOURCE define causes problems for Mac OS X, so omit -** it. -*/ -#if !defined(_XOPEN_SOURCE) && !defined(__DARWIN__) && !defined(__APPLE__) -# define _XOPEN_SOURCE 600 -#endif - -/* -** NDEBUG and SQLITE_DEBUG are opposites. It should always be true that -** defined(NDEBUG)==!defined(SQLITE_DEBUG). If this is not currently true, -** make it true by defining or undefining NDEBUG. -** -** Setting NDEBUG makes the code smaller and faster by disabling the -** assert() statements in the code. So we want the default action -** to be for NDEBUG to be set and NDEBUG to be undefined only if SQLITE_DEBUG -** is set. Thus NDEBUG becomes an opt-in rather than an opt-out -** feature. -*/ -#if !defined(NDEBUG) && !defined(SQLITE_DEBUG) -# define NDEBUG 1 -#endif -#if defined(NDEBUG) && defined(SQLITE_DEBUG) -# undef NDEBUG -#endif - -/* -** Enable SQLITE_ENABLE_EXPLAIN_COMMENTS if SQLITE_DEBUG is turned on. -*/ -#if !defined(SQLITE_ENABLE_EXPLAIN_COMMENTS) && defined(SQLITE_DEBUG) -# define SQLITE_ENABLE_EXPLAIN_COMMENTS 1 -#endif - -/* -** The testcase() macro is used to aid in coverage testing. When -** doing coverage testing, the condition inside the argument to -** testcase() must be evaluated both true and false in order to -** get full branch coverage. The testcase() macro is inserted -** to help ensure adequate test coverage in places where simple -** condition/decision coverage is inadequate. For example, testcase() -** can be used to make sure boundary values are tested. For -** bitmask tests, testcase() can be used to make sure each bit -** is significant and used at least once. On switch statements -** where multiple cases go to the same block of code, testcase() -** can insure that all cases are evaluated. -** -*/ -#ifdef SQLITE_COVERAGE_TEST -SQLITE_PRIVATE void sqlite3Coverage(int); -# define testcase(X) if( X ){ sqlite3Coverage(__LINE__); } -#else -# define testcase(X) -#endif - -/* -** The TESTONLY macro is used to enclose variable declarations or -** other bits of code that are needed to support the arguments -** within testcase() and assert() macros. -*/ -#if !defined(NDEBUG) || defined(SQLITE_COVERAGE_TEST) -# define TESTONLY(X) X -#else -# define TESTONLY(X) -#endif - -/* -** Sometimes we need a small amount of code such as a variable initialization -** to setup for a later assert() statement. We do not want this code to -** appear when assert() is disabled. The following macro is therefore -** used to contain that setup code. The "VVA" acronym stands for -** "Verification, Validation, and Accreditation". In other words, the -** code within VVA_ONLY() will only run during verification processes. -*/ -#ifndef NDEBUG -# define VVA_ONLY(X) X -#else -# define VVA_ONLY(X) -#endif - -/* -** The ALWAYS and NEVER macros surround boolean expressions which -** are intended to always be true or false, respectively. Such -** expressions could be omitted from the code completely. But they -** are included in a few cases in order to enhance the resilience -** of SQLite to unexpected behavior - to make the code "self-healing" -** or "ductile" rather than being "brittle" and crashing at the first -** hint of unplanned behavior. -** -** In other words, ALWAYS and NEVER are added for defensive code. -** -** When doing coverage testing ALWAYS and NEVER are hard-coded to -** be true and false so that the unreachable code they specify will -** not be counted as untested code. -*/ -#if defined(SQLITE_COVERAGE_TEST) -# define ALWAYS(X) (1) -# define NEVER(X) (0) -#elif !defined(NDEBUG) -# define ALWAYS(X) ((X)?1:(assert(0),0)) -# define NEVER(X) ((X)?(assert(0),1):0) -#else -# define ALWAYS(X) (X) -# define NEVER(X) (X) -#endif - -/* -** Return true (non-zero) if the input is a integer that is too large -** to fit in 32-bits. This macro is used inside of various testcase() -** macros to verify that we have tested SQLite for large-file support. -*/ -#define IS_BIG_INT(X) (((X)&~(i64)0xffffffff)!=0) - -/* -** The macro unlikely() is a hint that surrounds a boolean -** expression that is usually false. Macro likely() surrounds -** a boolean expression that is usually true. These hints could, -** in theory, be used by the compiler to generate better code, but -** currently they are just comments for human readers. -*/ -#define likely(X) (X) -#define unlikely(X) (X) - -/************** Include hash.h in the middle of sqliteInt.h ******************/ -/************** Begin file hash.h ********************************************/ -/* -** 2001 September 22 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** This is the header file for the generic hash-table implementation -** used in SQLite. -*/ -#ifndef _SQLITE_HASH_H_ -#define _SQLITE_HASH_H_ - -/* Forward declarations of structures. */ -typedef struct Hash Hash; -typedef struct HashElem HashElem; - -/* A complete hash table is an instance of the following structure. -** The internals of this structure are intended to be opaque -- client -** code should not attempt to access or modify the fields of this structure -** directly. Change this structure only by using the routines below. -** However, some of the "procedures" and "functions" for modifying and -** accessing this structure are really macros, so we can't really make -** this structure opaque. -** -** All elements of the hash table are on a single doubly-linked list. -** Hash.first points to the head of this list. -** -** There are Hash.htsize buckets. Each bucket points to a spot in -** the global doubly-linked list. The contents of the bucket are the -** element pointed to plus the next _ht.count-1 elements in the list. -** -** Hash.htsize and Hash.ht may be zero. In that case lookup is done -** by a linear search of the global list. For small tables, the -** Hash.ht table is never allocated because if there are few elements -** in the table, it is faster to do a linear search than to manage -** the hash table. -*/ -struct Hash { - unsigned int htsize; /* Number of buckets in the hash table */ - unsigned int count; /* Number of entries in this table */ - HashElem *first; /* The first element of the array */ - struct _ht { /* the hash table */ - int count; /* Number of entries with this hash */ - HashElem *chain; /* Pointer to first entry with this hash */ - } *ht; -}; - -/* Each element in the hash table is an instance of the following -** structure. All elements are stored on a single doubly-linked list. -** -** Again, this structure is intended to be opaque, but it can't really -** be opaque because it is used by macros. -*/ -struct HashElem { - HashElem *next, *prev; /* Next and previous elements in the table */ - void *data; /* Data associated with this element */ - const char *pKey; int nKey; /* Key associated with this element */ -}; - -/* -** Access routines. To delete, insert a NULL pointer. -*/ -SQLITE_PRIVATE void sqlite3HashInit(Hash*); -SQLITE_PRIVATE void *sqlite3HashInsert(Hash*, const char *pKey, int nKey, void *pData); -SQLITE_PRIVATE void *sqlite3HashFind(const Hash*, const char *pKey, int nKey); -SQLITE_PRIVATE void sqlite3HashClear(Hash*); - -/* -** Macros for looping over all elements of a hash table. The idiom is -** like this: -** -** Hash h; -** HashElem *p; -** ... -** for(p=sqliteHashFirst(&h); p; p=sqliteHashNext(p)){ -** SomeStructure *pData = sqliteHashData(p); -** // do something with pData -** } -*/ -#define sqliteHashFirst(H) ((H)->first) -#define sqliteHashNext(E) ((E)->next) -#define sqliteHashData(E) ((E)->data) -/* #define sqliteHashKey(E) ((E)->pKey) // NOT USED */ -/* #define sqliteHashKeysize(E) ((E)->nKey) // NOT USED */ - -/* -** Number of entries in a hash table -*/ -/* #define sqliteHashCount(H) ((H)->count) // NOT USED */ - -#endif /* _SQLITE_HASH_H_ */ - -/************** End of hash.h ************************************************/ -/************** Continuing where we left off in sqliteInt.h ******************/ -/************** Include parse.h in the middle of sqliteInt.h *****************/ -/************** Begin file parse.h *******************************************/ -#define TK_SEMI 1 -#define TK_EXPLAIN 2 -#define TK_QUERY 3 -#define TK_PLAN 4 -#define TK_BEGIN 5 -#define TK_TRANSACTION 6 -#define TK_DEFERRED 7 -#define TK_IMMEDIATE 8 -#define TK_EXCLUSIVE 9 -#define TK_COMMIT 10 -#define TK_END 11 -#define TK_ROLLBACK 12 -#define TK_SAVEPOINT 13 -#define TK_RELEASE 14 -#define TK_TO 15 -#define TK_TABLE 16 -#define TK_CREATE 17 -#define TK_IF 18 -#define TK_NOT 19 -#define TK_EXISTS 20 -#define TK_TEMP 21 -#define TK_LP 22 -#define TK_RP 23 -#define TK_AS 24 -#define TK_WITHOUT 25 -#define TK_COMMA 26 -#define TK_ID 27 -#define TK_INDEXED 28 -#define TK_ABORT 29 -#define TK_ACTION 30 -#define TK_AFTER 31 -#define TK_ANALYZE 32 -#define TK_ASC 33 -#define TK_ATTACH 34 -#define TK_BEFORE 35 -#define TK_BY 36 -#define TK_CASCADE 37 -#define TK_CAST 38 -#define TK_COLUMNKW 39 -#define TK_CONFLICT 40 -#define TK_DATABASE 41 -#define TK_DESC 42 -#define TK_DETACH 43 -#define TK_EACH 44 -#define TK_FAIL 45 -#define TK_FOR 46 -#define TK_IGNORE 47 -#define TK_INITIALLY 48 -#define TK_INSTEAD 49 -#define TK_LIKE_KW 50 -#define TK_MATCH 51 -#define TK_NO 52 -#define TK_KEY 53 -#define TK_OF 54 -#define TK_OFFSET 55 -#define TK_PRAGMA 56 -#define TK_RAISE 57 -#define TK_RECURSIVE 58 -#define TK_REPLACE 59 -#define TK_RESTRICT 60 -#define TK_ROW 61 -#define TK_TRIGGER 62 -#define TK_VACUUM 63 -#define TK_VIEW 64 -#define TK_VIRTUAL 65 -#define TK_WITH 66 -#define TK_REINDEX 67 -#define TK_RENAME 68 -#define TK_CTIME_KW 69 -#define TK_ANY 70 -#define TK_OR 71 -#define TK_AND 72 -#define TK_IS 73 -#define TK_BETWEEN 74 -#define TK_IN 75 -#define TK_ISNULL 76 -#define TK_NOTNULL 77 -#define TK_NE 78 -#define TK_EQ 79 -#define TK_GT 80 -#define TK_LE 81 -#define TK_LT 82 -#define TK_GE 83 -#define TK_ESCAPE 84 -#define TK_BITAND 85 -#define TK_BITOR 86 -#define TK_LSHIFT 87 -#define TK_RSHIFT 88 -#define TK_PLUS 89 -#define TK_MINUS 90 -#define TK_STAR 91 -#define TK_SLASH 92 -#define TK_REM 93 -#define TK_CONCAT 94 -#define TK_COLLATE 95 -#define TK_BITNOT 96 -#define TK_STRING 97 -#define TK_JOIN_KW 98 -#define TK_CONSTRAINT 99 -#define TK_DEFAULT 100 -#define TK_NULL 101 -#define TK_PRIMARY 102 -#define TK_UNIQUE 103 -#define TK_CHECK 104 -#define TK_REFERENCES 105 -#define TK_AUTOINCR 106 -#define TK_ON 107 -#define TK_INSERT 108 -#define TK_DELETE 109 -#define TK_UPDATE 110 -#define TK_SET 111 -#define TK_DEFERRABLE 112 -#define TK_FOREIGN 113 -#define TK_DROP 114 -#define TK_UNION 115 -#define TK_ALL 116 -#define TK_EXCEPT 117 -#define TK_INTERSECT 118 -#define TK_SELECT 119 -#define TK_VALUES 120 -#define TK_DISTINCT 121 -#define TK_DOT 122 -#define TK_FROM 123 -#define TK_JOIN 124 -#define TK_USING 125 -#define TK_ORDER 126 -#define TK_GROUP 127 -#define TK_HAVING 128 -#define TK_LIMIT 129 -#define TK_WHERE 130 -#define TK_INTO 131 -#define TK_INTEGER 132 -#define TK_FLOAT 133 -#define TK_BLOB 134 -#define TK_VARIABLE 135 -#define TK_CASE 136 -#define TK_WHEN 137 -#define TK_THEN 138 -#define TK_ELSE 139 -#define TK_INDEX 140 -#define TK_ALTER 141 -#define TK_ADD 142 -#define TK_TO_TEXT 143 -#define TK_TO_BLOB 144 -#define TK_TO_NUMERIC 145 -#define TK_TO_INT 146 -#define TK_TO_REAL 147 -#define TK_ISNOT 148 -#define TK_END_OF_FILE 149 -#define TK_ILLEGAL 150 -#define TK_SPACE 151 -#define TK_UNCLOSED_STRING 152 -#define TK_FUNCTION 153 -#define TK_COLUMN 154 -#define TK_AGG_FUNCTION 155 -#define TK_AGG_COLUMN 156 -#define TK_UMINUS 157 -#define TK_UPLUS 158 -#define TK_REGISTER 159 - -/************** End of parse.h ***********************************************/ -/************** Continuing where we left off in sqliteInt.h ******************/ -#include -#include -#include -#include -#include - -/* -** If compiling for a processor that lacks floating point support, -** substitute integer for floating-point -*/ -#ifdef SQLITE_OMIT_FLOATING_POINT -# define double sqlite_int64 -# define float sqlite_int64 -# define LONGDOUBLE_TYPE sqlite_int64 -# ifndef SQLITE_BIG_DBL -# define SQLITE_BIG_DBL (((sqlite3_int64)1)<<50) -# endif -# define SQLITE_OMIT_DATETIME_FUNCS 1 -# define SQLITE_OMIT_TRACE 1 -# undef SQLITE_MIXED_ENDIAN_64BIT_FLOAT -# undef SQLITE_HAVE_ISNAN -#endif -#ifndef SQLITE_BIG_DBL -# define SQLITE_BIG_DBL (1e99) -#endif - -/* -** OMIT_TEMPDB is set to 1 if SQLITE_OMIT_TEMPDB is defined, or 0 -** afterward. Having this macro allows us to cause the C compiler -** to omit code used by TEMP tables without messy #ifndef statements. -*/ -#ifdef SQLITE_OMIT_TEMPDB -#define OMIT_TEMPDB 1 -#else -#define OMIT_TEMPDB 0 -#endif - -/* -** The "file format" number is an integer that is incremented whenever -** the VDBE-level file format changes. The following macros define the -** the default file format for new databases and the maximum file format -** that the library can read. -*/ -#define SQLITE_MAX_FILE_FORMAT 4 -#ifndef SQLITE_DEFAULT_FILE_FORMAT -# define SQLITE_DEFAULT_FILE_FORMAT 4 -#endif - -/* -** Determine whether triggers are recursive by default. This can be -** changed at run-time using a pragma. -*/ -#ifndef SQLITE_DEFAULT_RECURSIVE_TRIGGERS -# define SQLITE_DEFAULT_RECURSIVE_TRIGGERS 0 -#endif - -/* -** Provide a default value for SQLITE_TEMP_STORE in case it is not specified -** on the command-line -*/ -#ifndef SQLITE_TEMP_STORE -# define SQLITE_TEMP_STORE 1 -# define SQLITE_TEMP_STORE_xc 1 /* Exclude from ctime.c */ -#endif - -/* -** GCC does not define the offsetof() macro so we'll have to do it -** ourselves. -*/ -#ifndef offsetof -#define offsetof(STRUCTURE,FIELD) ((int)((char*)&((STRUCTURE*)0)->FIELD)) -#endif - -/* -** Macros to compute minimum and maximum of two numbers. -*/ -#define MIN(A,B) ((A)<(B)?(A):(B)) -#define MAX(A,B) ((A)>(B)?(A):(B)) - -/* -** Check to see if this machine uses EBCDIC. (Yes, believe it or -** not, there are still machines out there that use EBCDIC.) -*/ -#if 'A' == '\301' -# define SQLITE_EBCDIC 1 -#else -# define SQLITE_ASCII 1 -#endif - -/* -** Integers of known sizes. These typedefs might change for architectures -** where the sizes very. Preprocessor macros are available so that the -** types can be conveniently redefined at compile-type. Like this: -** -** cc '-DUINTPTR_TYPE=long long int' ... -*/ -#ifndef UINT32_TYPE -# ifdef HAVE_UINT32_T -# define UINT32_TYPE uint32_t -# else -# define UINT32_TYPE unsigned int -# endif -#endif -#ifndef UINT16_TYPE -# ifdef HAVE_UINT16_T -# define UINT16_TYPE uint16_t -# else -# define UINT16_TYPE unsigned short int -# endif -#endif -#ifndef INT16_TYPE -# ifdef HAVE_INT16_T -# define INT16_TYPE int16_t -# else -# define INT16_TYPE short int -# endif -#endif -#ifndef UINT8_TYPE -# ifdef HAVE_UINT8_T -# define UINT8_TYPE uint8_t -# else -# define UINT8_TYPE unsigned char -# endif -#endif -#ifndef INT8_TYPE -# ifdef HAVE_INT8_T -# define INT8_TYPE int8_t -# else -# define INT8_TYPE signed char -# endif -#endif -#ifndef LONGDOUBLE_TYPE -# define LONGDOUBLE_TYPE long double -#endif -typedef sqlite_int64 i64; /* 8-byte signed integer */ -typedef sqlite_uint64 u64; /* 8-byte unsigned integer */ -typedef UINT32_TYPE u32; /* 4-byte unsigned integer */ -typedef UINT16_TYPE u16; /* 2-byte unsigned integer */ -typedef INT16_TYPE i16; /* 2-byte signed integer */ -typedef UINT8_TYPE u8; /* 1-byte unsigned integer */ -typedef INT8_TYPE i8; /* 1-byte signed integer */ - -/* -** SQLITE_MAX_U32 is a u64 constant that is the maximum u64 value -** that can be stored in a u32 without loss of data. The value -** is 0x00000000ffffffff. But because of quirks of some compilers, we -** have to specify the value in the less intuitive manner shown: -*/ -#define SQLITE_MAX_U32 ((((u64)1)<<32)-1) - -/* -** The datatype used to store estimates of the number of rows in a -** table or index. This is an unsigned integer type. For 99.9% of -** the world, a 32-bit integer is sufficient. But a 64-bit integer -** can be used at compile-time if desired. -*/ -#ifdef SQLITE_64BIT_STATS - typedef u64 tRowcnt; /* 64-bit only if requested at compile-time */ -#else - typedef u32 tRowcnt; /* 32-bit is the default */ -#endif - -/* -** Estimated quantities used for query planning are stored as 16-bit -** logarithms. For quantity X, the value stored is 10*log2(X). This -** gives a possible range of values of approximately 1.0e986 to 1e-986. -** But the allowed values are "grainy". Not every value is representable. -** For example, quantities 16 and 17 are both represented by a LogEst -** of 40. However, since LogEst quantaties are suppose to be estimates, -** not exact values, this imprecision is not a problem. -** -** "LogEst" is short for "Logarithmic Estimate". -** -** Examples: -** 1 -> 0 20 -> 43 10000 -> 132 -** 2 -> 10 25 -> 46 25000 -> 146 -** 3 -> 16 100 -> 66 1000000 -> 199 -** 4 -> 20 1000 -> 99 1048576 -> 200 -** 10 -> 33 1024 -> 100 4294967296 -> 320 -** -** The LogEst can be negative to indicate fractional values. -** Examples: -** -** 0.5 -> -10 0.1 -> -33 0.0625 -> -40 -*/ -typedef INT16_TYPE LogEst; - -/* -** Macros to determine whether the machine is big or little endian, -** and whether or not that determination is run-time or compile-time. -** -** For best performance, an attempt is made to guess at the byte-order -** using C-preprocessor macros. If that is unsuccessful, or if -** -DSQLITE_RUNTIME_BYTEORDER=1 is set, then byte-order is determined -** at run-time. -*/ -#ifdef SQLITE_AMALGAMATION -SQLITE_PRIVATE const int sqlite3one = 1; -#else -SQLITE_PRIVATE const int sqlite3one; -#endif -#if (defined(i386) || defined(__i386__) || defined(_M_IX86) || \ - defined(__x86_64) || defined(__x86_64__) || defined(_M_X64) || \ - defined(_M_AMD64) || defined(_M_ARM) || defined(__x86) || \ - defined(__arm__)) && !defined(SQLITE_RUNTIME_BYTEORDER) -# define SQLITE_BYTEORDER 1234 -# define SQLITE_BIGENDIAN 0 -# define SQLITE_LITTLEENDIAN 1 -# define SQLITE_UTF16NATIVE SQLITE_UTF16LE -#endif -#if (defined(sparc) || defined(__ppc__)) \ - && !defined(SQLITE_RUNTIME_BYTEORDER) -# define SQLITE_BYTEORDER 4321 -# define SQLITE_BIGENDIAN 1 -# define SQLITE_LITTLEENDIAN 0 -# define SQLITE_UTF16NATIVE SQLITE_UTF16BE -#endif -#if !defined(SQLITE_BYTEORDER) -# define SQLITE_BYTEORDER 0 /* 0 means "unknown at compile-time" */ -# define SQLITE_BIGENDIAN (*(char *)(&sqlite3one)==0) -# define SQLITE_LITTLEENDIAN (*(char *)(&sqlite3one)==1) -# define SQLITE_UTF16NATIVE (SQLITE_BIGENDIAN?SQLITE_UTF16BE:SQLITE_UTF16LE) -#endif - -/* -** Constants for the largest and smallest possible 64-bit signed integers. -** These macros are designed to work correctly on both 32-bit and 64-bit -** compilers. -*/ -#define LARGEST_INT64 (0xffffffff|(((i64)0x7fffffff)<<32)) -#define SMALLEST_INT64 (((i64)-1) - LARGEST_INT64) - -/* -** Round up a number to the next larger multiple of 8. This is used -** to force 8-byte alignment on 64-bit architectures. -*/ -#define ROUND8(x) (((x)+7)&~7) - -/* -** Round down to the nearest multiple of 8 -*/ -#define ROUNDDOWN8(x) ((x)&~7) - -/* -** Assert that the pointer X is aligned to an 8-byte boundary. This -** macro is used only within assert() to verify that the code gets -** all alignment restrictions correct. -** -** Except, if SQLITE_4_BYTE_ALIGNED_MALLOC is defined, then the -** underlying malloc() implemention might return us 4-byte aligned -** pointers. In that case, only verify 4-byte alignment. -*/ -#ifdef SQLITE_4_BYTE_ALIGNED_MALLOC -# define EIGHT_BYTE_ALIGNMENT(X) ((((char*)(X) - (char*)0)&3)==0) -#else -# define EIGHT_BYTE_ALIGNMENT(X) ((((char*)(X) - (char*)0)&7)==0) -#endif - -/* -** Disable MMAP on platforms where it is known to not work -*/ -#if defined(__OpenBSD__) || defined(__QNXNTO__) -# undef SQLITE_MAX_MMAP_SIZE -# define SQLITE_MAX_MMAP_SIZE 0 -#endif - -/* -** Default maximum size of memory used by memory-mapped I/O in the VFS -*/ -#ifdef __APPLE__ -# include -# if TARGET_OS_IPHONE -# undef SQLITE_MAX_MMAP_SIZE -# define SQLITE_MAX_MMAP_SIZE 0 -# endif -#endif -#ifndef SQLITE_MAX_MMAP_SIZE -# if defined(__linux__) \ - || defined(_WIN32) \ - || (defined(__APPLE__) && defined(__MACH__)) \ - || defined(__sun) -# define SQLITE_MAX_MMAP_SIZE 0x7fff0000 /* 2147418112 */ -# else -# define SQLITE_MAX_MMAP_SIZE 0 -# endif -# define SQLITE_MAX_MMAP_SIZE_xc 1 /* exclude from ctime.c */ -#endif - -/* -** The default MMAP_SIZE is zero on all platforms. Or, even if a larger -** default MMAP_SIZE is specified at compile-time, make sure that it does -** not exceed the maximum mmap size. -*/ -#ifndef SQLITE_DEFAULT_MMAP_SIZE -# define SQLITE_DEFAULT_MMAP_SIZE 0 -# define SQLITE_DEFAULT_MMAP_SIZE_xc 1 /* Exclude from ctime.c */ -#endif -#if SQLITE_DEFAULT_MMAP_SIZE>SQLITE_MAX_MMAP_SIZE -# undef SQLITE_DEFAULT_MMAP_SIZE -# define SQLITE_DEFAULT_MMAP_SIZE SQLITE_MAX_MMAP_SIZE -#endif - -/* -** Only one of SQLITE_ENABLE_STAT3 or SQLITE_ENABLE_STAT4 can be defined. -** Priority is given to SQLITE_ENABLE_STAT4. If either are defined, also -** define SQLITE_ENABLE_STAT3_OR_STAT4 -*/ -#ifdef SQLITE_ENABLE_STAT4 -# undef SQLITE_ENABLE_STAT3 -# define SQLITE_ENABLE_STAT3_OR_STAT4 1 -#elif SQLITE_ENABLE_STAT3 -# define SQLITE_ENABLE_STAT3_OR_STAT4 1 -#elif SQLITE_ENABLE_STAT3_OR_STAT4 -# undef SQLITE_ENABLE_STAT3_OR_STAT4 -#endif - -/* -** An instance of the following structure is used to store the busy-handler -** callback for a given sqlite handle. -** -** The sqlite.busyHandler member of the sqlite struct contains the busy -** callback for the database handle. Each pager opened via the sqlite -** handle is passed a pointer to sqlite.busyHandler. The busy-handler -** callback is currently invoked only from within pager.c. -*/ -typedef struct BusyHandler BusyHandler; -struct BusyHandler { - int (*xFunc)(void *,int); /* The busy callback */ - void *pArg; /* First arg to busy callback */ - int nBusy; /* Incremented with each busy call */ -}; - -/* -** Name of the master database table. The master database table -** is a special table that holds the names and attributes of all -** user tables and indices. -*/ -#define MASTER_NAME "sqlite_master" -#define TEMP_MASTER_NAME "sqlite_temp_master" - -/* -** The root-page of the master database table. -*/ -#define MASTER_ROOT 1 - -/* -** The name of the schema table. -*/ -#define SCHEMA_TABLE(x) ((!OMIT_TEMPDB)&&(x==1)?TEMP_MASTER_NAME:MASTER_NAME) - -/* -** A convenience macro that returns the number of elements in -** an array. -*/ -#define ArraySize(X) ((int)(sizeof(X)/sizeof(X[0]))) - -/* -** Determine if the argument is a power of two -*/ -#define IsPowerOfTwo(X) (((X)&((X)-1))==0) - -/* -** The following value as a destructor means to use sqlite3DbFree(). -** The sqlite3DbFree() routine requires two parameters instead of the -** one parameter that destructors normally want. So we have to introduce -** this magic value that the code knows to handle differently. Any -** pointer will work here as long as it is distinct from SQLITE_STATIC -** and SQLITE_TRANSIENT. -*/ -#define SQLITE_DYNAMIC ((sqlite3_destructor_type)sqlite3MallocSize) - -/* -** When SQLITE_OMIT_WSD is defined, it means that the target platform does -** not support Writable Static Data (WSD) such as global and static variables. -** All variables must either be on the stack or dynamically allocated from -** the heap. When WSD is unsupported, the variable declarations scattered -** throughout the SQLite code must become constants instead. The SQLITE_WSD -** macro is used for this purpose. And instead of referencing the variable -** directly, we use its constant as a key to lookup the run-time allocated -** buffer that holds real variable. The constant is also the initializer -** for the run-time allocated buffer. -** -** In the usual case where WSD is supported, the SQLITE_WSD and GLOBAL -** macros become no-ops and have zero performance impact. -*/ -#ifdef SQLITE_OMIT_WSD - #define SQLITE_WSD const - #define GLOBAL(t,v) (*(t*)sqlite3_wsd_find((void*)&(v), sizeof(v))) - #define sqlite3GlobalConfig GLOBAL(struct Sqlite3Config, sqlite3Config) -SQLITE_API int sqlite3_wsd_init(int N, int J); -SQLITE_API void *sqlite3_wsd_find(void *K, int L); -#else - #define SQLITE_WSD - #define GLOBAL(t,v) v - #define sqlite3GlobalConfig sqlite3Config -#endif - -/* -** The following macros are used to suppress compiler warnings and to -** make it clear to human readers when a function parameter is deliberately -** left unused within the body of a function. This usually happens when -** a function is called via a function pointer. For example the -** implementation of an SQL aggregate step callback may not use the -** parameter indicating the number of arguments passed to the aggregate, -** if it knows that this is enforced elsewhere. -** -** When a function parameter is not used at all within the body of a function, -** it is generally named "NotUsed" or "NotUsed2" to make things even clearer. -** However, these macros may also be used to suppress warnings related to -** parameters that may or may not be used depending on compilation options. -** For example those parameters only used in assert() statements. In these -** cases the parameters are named as per the usual conventions. -*/ -#define UNUSED_PARAMETER(x) (void)(x) -#define UNUSED_PARAMETER2(x,y) UNUSED_PARAMETER(x),UNUSED_PARAMETER(y) - -/* -** Forward references to structures -*/ -typedef struct AggInfo AggInfo; -typedef struct AuthContext AuthContext; -typedef struct AutoincInfo AutoincInfo; -typedef struct Bitvec Bitvec; -typedef struct CollSeq CollSeq; -typedef struct Column Column; -typedef struct Db Db; -typedef struct Schema Schema; -typedef struct Expr Expr; -typedef struct ExprList ExprList; -typedef struct ExprSpan ExprSpan; -typedef struct FKey FKey; -typedef struct FuncDestructor FuncDestructor; -typedef struct FuncDef FuncDef; -typedef struct FuncDefHash FuncDefHash; -typedef struct IdList IdList; -typedef struct Index Index; -typedef struct IndexSample IndexSample; -typedef struct KeyClass KeyClass; -typedef struct KeyInfo KeyInfo; -typedef struct Lookaside Lookaside; -typedef struct LookasideSlot LookasideSlot; -typedef struct Module Module; -typedef struct NameContext NameContext; -typedef struct Parse Parse; -typedef struct PrintfArguments PrintfArguments; -typedef struct RowSet RowSet; -typedef struct Savepoint Savepoint; -typedef struct Select Select; -typedef struct SelectDest SelectDest; -typedef struct SrcList SrcList; -typedef struct StrAccum StrAccum; -typedef struct Table Table; -typedef struct TableLock TableLock; -typedef struct Token Token; -typedef struct Trigger Trigger; -typedef struct TriggerPrg TriggerPrg; -typedef struct TriggerStep TriggerStep; -typedef struct UnpackedRecord UnpackedRecord; -typedef struct VTable VTable; -typedef struct VtabCtx VtabCtx; -typedef struct Walker Walker; -typedef struct WhereInfo WhereInfo; -typedef struct With With; - -/* -** Defer sourcing vdbe.h and btree.h until after the "u8" and -** "BusyHandler" typedefs. vdbe.h also requires a few of the opaque -** pointer types (i.e. FuncDef) defined above. -*/ -/************** Include btree.h in the middle of sqliteInt.h *****************/ -/************** Begin file btree.h *******************************************/ -/* -** 2001 September 15 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** This header file defines the interface that the sqlite B-Tree file -** subsystem. See comments in the source code for a detailed description -** of what each interface routine does. -*/ -#ifndef _BTREE_H_ -#define _BTREE_H_ - -/* TODO: This definition is just included so other modules compile. It -** needs to be revisited. -*/ -#define SQLITE_N_BTREE_META 10 - -/* -** If defined as non-zero, auto-vacuum is enabled by default. Otherwise -** it must be turned on for each database using "PRAGMA auto_vacuum = 1". -*/ -#ifndef SQLITE_DEFAULT_AUTOVACUUM - #define SQLITE_DEFAULT_AUTOVACUUM 0 -#endif - -#define BTREE_AUTOVACUUM_NONE 0 /* Do not do auto-vacuum */ -#define BTREE_AUTOVACUUM_FULL 1 /* Do full auto-vacuum */ -#define BTREE_AUTOVACUUM_INCR 2 /* Incremental vacuum */ - -/* -** Forward declarations of structure -*/ -typedef struct Btree Btree; -typedef struct BtCursor BtCursor; -typedef struct BtShared BtShared; - - -SQLITE_PRIVATE int sqlite3BtreeOpen( - sqlite3_vfs *pVfs, /* VFS to use with this b-tree */ - const char *zFilename, /* Name of database file to open */ - sqlite3 *db, /* Associated database connection */ - Btree **ppBtree, /* Return open Btree* here */ - int flags, /* Flags */ - int vfsFlags /* Flags passed through to VFS open */ -); - -/* The flags parameter to sqlite3BtreeOpen can be the bitwise or of the -** following values. -** -** NOTE: These values must match the corresponding PAGER_ values in -** pager.h. -*/ -#define BTREE_OMIT_JOURNAL 1 /* Do not create or use a rollback journal */ -#define BTREE_MEMORY 2 /* This is an in-memory DB */ -#define BTREE_SINGLE 4 /* The file contains at most 1 b-tree */ -#define BTREE_UNORDERED 8 /* Use of a hash implementation is OK */ - -SQLITE_PRIVATE int sqlite3BtreeClose(Btree*); -SQLITE_PRIVATE int sqlite3BtreeSetCacheSize(Btree*,int); -#if SQLITE_MAX_MMAP_SIZE>0 -SQLITE_PRIVATE int sqlite3BtreeSetMmapLimit(Btree*,sqlite3_int64); -#endif -SQLITE_PRIVATE int sqlite3BtreeSetPagerFlags(Btree*,unsigned); -SQLITE_PRIVATE int sqlite3BtreeSyncDisabled(Btree*); -SQLITE_PRIVATE int sqlite3BtreeSetPageSize(Btree *p, int nPagesize, int nReserve, int eFix); -SQLITE_PRIVATE int sqlite3BtreeGetPageSize(Btree*); -SQLITE_PRIVATE int sqlite3BtreeMaxPageCount(Btree*,int); -SQLITE_PRIVATE u32 sqlite3BtreeLastPage(Btree*); -SQLITE_PRIVATE int sqlite3BtreeSecureDelete(Btree*,int); -SQLITE_PRIVATE int sqlite3BtreeGetReserve(Btree*); -#if defined(SQLITE_HAS_CODEC) || defined(SQLITE_DEBUG) -SQLITE_PRIVATE int sqlite3BtreeGetReserveNoMutex(Btree *p); -#endif -SQLITE_PRIVATE int sqlite3BtreeSetAutoVacuum(Btree *, int); -SQLITE_PRIVATE int sqlite3BtreeGetAutoVacuum(Btree *); -SQLITE_PRIVATE int sqlite3BtreeBeginTrans(Btree*,int); -SQLITE_PRIVATE int sqlite3BtreeCommitPhaseOne(Btree*, const char *zMaster); -SQLITE_PRIVATE int sqlite3BtreeCommitPhaseTwo(Btree*, int); -SQLITE_PRIVATE int sqlite3BtreeCommit(Btree*); -SQLITE_PRIVATE int sqlite3BtreeRollback(Btree*,int); -SQLITE_PRIVATE int sqlite3BtreeBeginStmt(Btree*,int); -SQLITE_PRIVATE int sqlite3BtreeCreateTable(Btree*, int*, int flags); -SQLITE_PRIVATE int sqlite3BtreeIsInTrans(Btree*); -SQLITE_PRIVATE int sqlite3BtreeIsInReadTrans(Btree*); -SQLITE_PRIVATE int sqlite3BtreeIsInBackup(Btree*); -SQLITE_PRIVATE void *sqlite3BtreeSchema(Btree *, int, void(*)(void *)); -SQLITE_PRIVATE int sqlite3BtreeSchemaLocked(Btree *pBtree); -SQLITE_PRIVATE int sqlite3BtreeLockTable(Btree *pBtree, int iTab, u8 isWriteLock); -SQLITE_PRIVATE int sqlite3BtreeSavepoint(Btree *, int, int); - -SQLITE_PRIVATE const char *sqlite3BtreeGetFilename(Btree *); -SQLITE_PRIVATE const char *sqlite3BtreeGetJournalname(Btree *); -SQLITE_PRIVATE int sqlite3BtreeCopyFile(Btree *, Btree *); - -SQLITE_PRIVATE int sqlite3BtreeIncrVacuum(Btree *); - -/* The flags parameter to sqlite3BtreeCreateTable can be the bitwise OR -** of the flags shown below. -** -** Every SQLite table must have either BTREE_INTKEY or BTREE_BLOBKEY set. -** With BTREE_INTKEY, the table key is a 64-bit integer and arbitrary data -** is stored in the leaves. (BTREE_INTKEY is used for SQL tables.) With -** BTREE_BLOBKEY, the key is an arbitrary BLOB and no content is stored -** anywhere - the key is the content. (BTREE_BLOBKEY is used for SQL -** indices.) -*/ -#define BTREE_INTKEY 1 /* Table has only 64-bit signed integer keys */ -#define BTREE_BLOBKEY 2 /* Table has keys only - no data */ - -SQLITE_PRIVATE int sqlite3BtreeDropTable(Btree*, int, int*); -SQLITE_PRIVATE int sqlite3BtreeClearTable(Btree*, int, int*); -SQLITE_PRIVATE int sqlite3BtreeClearTableOfCursor(BtCursor*); -SQLITE_PRIVATE void sqlite3BtreeTripAllCursors(Btree*, int); - -SQLITE_PRIVATE void sqlite3BtreeGetMeta(Btree *pBtree, int idx, u32 *pValue); -SQLITE_PRIVATE int sqlite3BtreeUpdateMeta(Btree*, int idx, u32 value); - -SQLITE_PRIVATE int sqlite3BtreeNewDb(Btree *p); - -/* -** The second parameter to sqlite3BtreeGetMeta or sqlite3BtreeUpdateMeta -** should be one of the following values. The integer values are assigned -** to constants so that the offset of the corresponding field in an -** SQLite database header may be found using the following formula: -** -** offset = 36 + (idx * 4) -** -** For example, the free-page-count field is located at byte offset 36 of -** the database file header. The incr-vacuum-flag field is located at -** byte offset 64 (== 36+4*7). -*/ -#define BTREE_FREE_PAGE_COUNT 0 -#define BTREE_SCHEMA_VERSION 1 -#define BTREE_FILE_FORMAT 2 -#define BTREE_DEFAULT_CACHE_SIZE 3 -#define BTREE_LARGEST_ROOT_PAGE 4 -#define BTREE_TEXT_ENCODING 5 -#define BTREE_USER_VERSION 6 -#define BTREE_INCR_VACUUM 7 -#define BTREE_APPLICATION_ID 8 - -/* -** Values that may be OR'd together to form the second argument of an -** sqlite3BtreeCursorHints() call. -*/ -#define BTREE_BULKLOAD 0x00000001 - -SQLITE_PRIVATE int sqlite3BtreeCursor( - Btree*, /* BTree containing table to open */ - int iTable, /* Index of root page */ - int wrFlag, /* 1 for writing. 0 for read-only */ - struct KeyInfo*, /* First argument to compare function */ - BtCursor *pCursor /* Space to write cursor structure */ -); -SQLITE_PRIVATE int sqlite3BtreeCursorSize(void); -SQLITE_PRIVATE void sqlite3BtreeCursorZero(BtCursor*); - -SQLITE_PRIVATE int sqlite3BtreeCloseCursor(BtCursor*); -SQLITE_PRIVATE int sqlite3BtreeMovetoUnpacked( - BtCursor*, - UnpackedRecord *pUnKey, - i64 intKey, - int bias, - int *pRes -); -SQLITE_PRIVATE int sqlite3BtreeCursorHasMoved(BtCursor*, int*); -SQLITE_PRIVATE int sqlite3BtreeDelete(BtCursor*); -SQLITE_PRIVATE int sqlite3BtreeInsert(BtCursor*, const void *pKey, i64 nKey, - const void *pData, int nData, - int nZero, int bias, int seekResult); -SQLITE_PRIVATE int sqlite3BtreeFirst(BtCursor*, int *pRes); -SQLITE_PRIVATE int sqlite3BtreeLast(BtCursor*, int *pRes); -SQLITE_PRIVATE int sqlite3BtreeNext(BtCursor*, int *pRes); -SQLITE_PRIVATE int sqlite3BtreeEof(BtCursor*); -SQLITE_PRIVATE int sqlite3BtreePrevious(BtCursor*, int *pRes); -SQLITE_PRIVATE int sqlite3BtreeKeySize(BtCursor*, i64 *pSize); -SQLITE_PRIVATE int sqlite3BtreeKey(BtCursor*, u32 offset, u32 amt, void*); -SQLITE_PRIVATE const void *sqlite3BtreeKeyFetch(BtCursor*, u32 *pAmt); -SQLITE_PRIVATE const void *sqlite3BtreeDataFetch(BtCursor*, u32 *pAmt); -SQLITE_PRIVATE int sqlite3BtreeDataSize(BtCursor*, u32 *pSize); -SQLITE_PRIVATE int sqlite3BtreeData(BtCursor*, u32 offset, u32 amt, void*); - -SQLITE_PRIVATE char *sqlite3BtreeIntegrityCheck(Btree*, int *aRoot, int nRoot, int, int*); -SQLITE_PRIVATE struct Pager *sqlite3BtreePager(Btree*); - -SQLITE_PRIVATE int sqlite3BtreePutData(BtCursor*, u32 offset, u32 amt, void*); -SQLITE_PRIVATE void sqlite3BtreeIncrblobCursor(BtCursor *); -SQLITE_PRIVATE void sqlite3BtreeClearCursor(BtCursor *); -SQLITE_PRIVATE int sqlite3BtreeSetVersion(Btree *pBt, int iVersion); -SQLITE_PRIVATE void sqlite3BtreeCursorHints(BtCursor *, unsigned int mask); -SQLITE_PRIVATE int sqlite3BtreeIsReadonly(Btree *pBt); - -#ifndef NDEBUG -SQLITE_PRIVATE int sqlite3BtreeCursorIsValid(BtCursor*); -#endif - -#ifndef SQLITE_OMIT_BTREECOUNT -SQLITE_PRIVATE int sqlite3BtreeCount(BtCursor *, i64 *); -#endif - -#ifdef SQLITE_TEST -SQLITE_PRIVATE int sqlite3BtreeCursorInfo(BtCursor*, int*, int); -SQLITE_PRIVATE void sqlite3BtreeCursorList(Btree*); -#endif - -#ifndef SQLITE_OMIT_WAL -SQLITE_PRIVATE int sqlite3BtreeCheckpoint(Btree*, int, int *, int *); -#endif - -/* -** If we are not using shared cache, then there is no need to -** use mutexes to access the BtShared structures. So make the -** Enter and Leave procedures no-ops. -*/ -#ifndef SQLITE_OMIT_SHARED_CACHE -SQLITE_PRIVATE void sqlite3BtreeEnter(Btree*); -SQLITE_PRIVATE void sqlite3BtreeEnterAll(sqlite3*); -#else -# define sqlite3BtreeEnter(X) -# define sqlite3BtreeEnterAll(X) -#endif - -#if !defined(SQLITE_OMIT_SHARED_CACHE) && SQLITE_THREADSAFE -SQLITE_PRIVATE int sqlite3BtreeSharable(Btree*); -SQLITE_PRIVATE void sqlite3BtreeLeave(Btree*); -SQLITE_PRIVATE void sqlite3BtreeEnterCursor(BtCursor*); -SQLITE_PRIVATE void sqlite3BtreeLeaveCursor(BtCursor*); -SQLITE_PRIVATE void sqlite3BtreeLeaveAll(sqlite3*); -#ifndef NDEBUG - /* These routines are used inside assert() statements only. */ -SQLITE_PRIVATE int sqlite3BtreeHoldsMutex(Btree*); -SQLITE_PRIVATE int sqlite3BtreeHoldsAllMutexes(sqlite3*); -SQLITE_PRIVATE int sqlite3SchemaMutexHeld(sqlite3*,int,Schema*); -#endif -#else - -# define sqlite3BtreeSharable(X) 0 -# define sqlite3BtreeLeave(X) -# define sqlite3BtreeEnterCursor(X) -# define sqlite3BtreeLeaveCursor(X) -# define sqlite3BtreeLeaveAll(X) - -# define sqlite3BtreeHoldsMutex(X) 1 -# define sqlite3BtreeHoldsAllMutexes(X) 1 -# define sqlite3SchemaMutexHeld(X,Y,Z) 1 -#endif - - -#endif /* _BTREE_H_ */ - -/************** End of btree.h ***********************************************/ -/************** Continuing where we left off in sqliteInt.h ******************/ -/************** Include vdbe.h in the middle of sqliteInt.h ******************/ -/************** Begin file vdbe.h ********************************************/ -/* -** 2001 September 15 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** Header file for the Virtual DataBase Engine (VDBE) -** -** This header defines the interface to the virtual database engine -** or VDBE. The VDBE implements an abstract machine that runs a -** simple program to access and modify the underlying database. -*/ -#ifndef _SQLITE_VDBE_H_ -#define _SQLITE_VDBE_H_ -/* #include */ - -/* -** A single VDBE is an opaque structure named "Vdbe". Only routines -** in the source file sqliteVdbe.c are allowed to see the insides -** of this structure. -*/ -typedef struct Vdbe Vdbe; - -/* -** The names of the following types declared in vdbeInt.h are required -** for the VdbeOp definition. -*/ -typedef struct Mem Mem; -typedef struct SubProgram SubProgram; - -/* -** A single instruction of the virtual machine has an opcode -** and as many as three operands. The instruction is recorded -** as an instance of the following structure: -*/ -struct VdbeOp { - u8 opcode; /* What operation to perform */ - signed char p4type; /* One of the P4_xxx constants for p4 */ - u8 opflags; /* Mask of the OPFLG_* flags in opcodes.h */ - u8 p5; /* Fifth parameter is an unsigned character */ - int p1; /* First operand */ - int p2; /* Second parameter (often the jump destination) */ - int p3; /* The third parameter */ - union { /* fourth parameter */ - int i; /* Integer value if p4type==P4_INT32 */ - void *p; /* Generic pointer */ - char *z; /* Pointer to data for string (char array) types */ - i64 *pI64; /* Used when p4type is P4_INT64 */ - double *pReal; /* Used when p4type is P4_REAL */ - FuncDef *pFunc; /* Used when p4type is P4_FUNCDEF */ - CollSeq *pColl; /* Used when p4type is P4_COLLSEQ */ - Mem *pMem; /* Used when p4type is P4_MEM */ - VTable *pVtab; /* Used when p4type is P4_VTAB */ - KeyInfo *pKeyInfo; /* Used when p4type is P4_KEYINFO */ - int *ai; /* Used when p4type is P4_INTARRAY */ - SubProgram *pProgram; /* Used when p4type is P4_SUBPROGRAM */ - int (*xAdvance)(BtCursor *, int *); - } p4; -#ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS - char *zComment; /* Comment to improve readability */ -#endif -#ifdef VDBE_PROFILE - u32 cnt; /* Number of times this instruction was executed */ - u64 cycles; /* Total time spent executing this instruction */ -#endif -#ifdef SQLITE_VDBE_COVERAGE - int iSrcLine; /* Source-code line that generated this opcode */ -#endif -}; -typedef struct VdbeOp VdbeOp; - - -/* -** A sub-routine used to implement a trigger program. -*/ -struct SubProgram { - VdbeOp *aOp; /* Array of opcodes for sub-program */ - int nOp; /* Elements in aOp[] */ - int nMem; /* Number of memory cells required */ - int nCsr; /* Number of cursors required */ - int nOnce; /* Number of OP_Once instructions */ - void *token; /* id that may be used to recursive triggers */ - SubProgram *pNext; /* Next sub-program already visited */ -}; - -/* -** A smaller version of VdbeOp used for the VdbeAddOpList() function because -** it takes up less space. -*/ -struct VdbeOpList { - u8 opcode; /* What operation to perform */ - signed char p1; /* First operand */ - signed char p2; /* Second parameter (often the jump destination) */ - signed char p3; /* Third parameter */ -}; -typedef struct VdbeOpList VdbeOpList; - -/* -** Allowed values of VdbeOp.p4type -*/ -#define P4_NOTUSED 0 /* The P4 parameter is not used */ -#define P4_DYNAMIC (-1) /* Pointer to a string obtained from sqliteMalloc() */ -#define P4_STATIC (-2) /* Pointer to a static string */ -#define P4_COLLSEQ (-4) /* P4 is a pointer to a CollSeq structure */ -#define P4_FUNCDEF (-5) /* P4 is a pointer to a FuncDef structure */ -#define P4_KEYINFO (-6) /* P4 is a pointer to a KeyInfo structure */ -#define P4_MEM (-8) /* P4 is a pointer to a Mem* structure */ -#define P4_TRANSIENT 0 /* P4 is a pointer to a transient string */ -#define P4_VTAB (-10) /* P4 is a pointer to an sqlite3_vtab structure */ -#define P4_MPRINTF (-11) /* P4 is a string obtained from sqlite3_mprintf() */ -#define P4_REAL (-12) /* P4 is a 64-bit floating point value */ -#define P4_INT64 (-13) /* P4 is a 64-bit signed integer */ -#define P4_INT32 (-14) /* P4 is a 32-bit signed integer */ -#define P4_INTARRAY (-15) /* P4 is a vector of 32-bit integers */ -#define P4_SUBPROGRAM (-18) /* P4 is a pointer to a SubProgram structure */ -#define P4_ADVANCE (-19) /* P4 is a pointer to BtreeNext() or BtreePrev() */ - -/* Error message codes for OP_Halt */ -#define P5_ConstraintNotNull 1 -#define P5_ConstraintUnique 2 -#define P5_ConstraintCheck 3 -#define P5_ConstraintFK 4 - -/* -** The Vdbe.aColName array contains 5n Mem structures, where n is the -** number of columns of data returned by the statement. -*/ -#define COLNAME_NAME 0 -#define COLNAME_DECLTYPE 1 -#define COLNAME_DATABASE 2 -#define COLNAME_TABLE 3 -#define COLNAME_COLUMN 4 -#ifdef SQLITE_ENABLE_COLUMN_METADATA -# define COLNAME_N 5 /* Number of COLNAME_xxx symbols */ -#else -# ifdef SQLITE_OMIT_DECLTYPE -# define COLNAME_N 1 /* Store only the name */ -# else -# define COLNAME_N 2 /* Store the name and decltype */ -# endif -#endif - -/* -** The following macro converts a relative address in the p2 field -** of a VdbeOp structure into a negative number so that -** sqlite3VdbeAddOpList() knows that the address is relative. Calling -** the macro again restores the address. -*/ -#define ADDR(X) (-1-(X)) - -/* -** The makefile scans the vdbe.c source file and creates the "opcodes.h" -** header file that defines a number for each opcode used by the VDBE. -*/ -/************** Include opcodes.h in the middle of vdbe.h ********************/ -/************** Begin file opcodes.h *****************************************/ -/* Automatically generated. Do not edit */ -/* See the mkopcodeh.awk script for details */ -#define OP_Function 1 /* synopsis: r[P3]=func(r[P2@P5]) */ -#define OP_Savepoint 2 -#define OP_AutoCommit 3 -#define OP_Transaction 4 -#define OP_SorterNext 5 -#define OP_PrevIfOpen 6 -#define OP_NextIfOpen 7 -#define OP_Prev 8 -#define OP_Next 9 -#define OP_AggStep 10 /* synopsis: accum=r[P3] step(r[P2@P5]) */ -#define OP_Checkpoint 11 -#define OP_JournalMode 12 -#define OP_Vacuum 13 -#define OP_VFilter 14 /* synopsis: iplan=r[P3] zplan='P4' */ -#define OP_VUpdate 15 /* synopsis: data=r[P3@P2] */ -#define OP_Goto 16 -#define OP_Gosub 17 -#define OP_Return 18 -#define OP_Not 19 /* same as TK_NOT, synopsis: r[P2]= !r[P1] */ -#define OP_InitCoroutine 20 -#define OP_EndCoroutine 21 -#define OP_Yield 22 -#define OP_HaltIfNull 23 /* synopsis: if r[P3]=null halt */ -#define OP_Halt 24 -#define OP_Integer 25 /* synopsis: r[P2]=P1 */ -#define OP_Int64 26 /* synopsis: r[P2]=P4 */ -#define OP_String 27 /* synopsis: r[P2]='P4' (len=P1) */ -#define OP_Null 28 /* synopsis: r[P2..P3]=NULL */ -#define OP_SoftNull 29 /* synopsis: r[P1]=NULL */ -#define OP_Blob 30 /* synopsis: r[P2]=P4 (len=P1) */ -#define OP_Variable 31 /* synopsis: r[P2]=parameter(P1,P4) */ -#define OP_Move 32 /* synopsis: r[P2@P3]=r[P1@P3] */ -#define OP_Copy 33 /* synopsis: r[P2@P3+1]=r[P1@P3+1] */ -#define OP_SCopy 34 /* synopsis: r[P2]=r[P1] */ -#define OP_ResultRow 35 /* synopsis: output=r[P1@P2] */ -#define OP_CollSeq 36 -#define OP_AddImm 37 /* synopsis: r[P1]=r[P1]+P2 */ -#define OP_MustBeInt 38 -#define OP_RealAffinity 39 -#define OP_Permutation 40 -#define OP_Compare 41 /* synopsis: r[P1@P3] <-> r[P2@P3] */ -#define OP_Jump 42 -#define OP_Once 43 -#define OP_If 44 -#define OP_IfNot 45 -#define OP_Column 46 /* synopsis: r[P3]=PX */ -#define OP_Affinity 47 /* synopsis: affinity(r[P1@P2]) */ -#define OP_MakeRecord 48 /* synopsis: r[P3]=mkrec(r[P1@P2]) */ -#define OP_Count 49 /* synopsis: r[P2]=count() */ -#define OP_ReadCookie 50 -#define OP_SetCookie 51 -#define OP_OpenRead 52 /* synopsis: root=P2 iDb=P3 */ -#define OP_OpenWrite 53 /* synopsis: root=P2 iDb=P3 */ -#define OP_OpenAutoindex 54 /* synopsis: nColumn=P2 */ -#define OP_OpenEphemeral 55 /* synopsis: nColumn=P2 */ -#define OP_SorterOpen 56 -#define OP_OpenPseudo 57 /* synopsis: P3 columns in r[P2] */ -#define OP_Close 58 -#define OP_SeekLT 59 -#define OP_SeekLE 60 -#define OP_SeekGE 61 -#define OP_SeekGT 62 -#define OP_Seek 63 /* synopsis: intkey=r[P2] */ -#define OP_NoConflict 64 /* synopsis: key=r[P3@P4] */ -#define OP_NotFound 65 /* synopsis: key=r[P3@P4] */ -#define OP_Found 66 /* synopsis: key=r[P3@P4] */ -#define OP_NotExists 67 /* synopsis: intkey=r[P3] */ -#define OP_Sequence 68 /* synopsis: r[P2]=cursor[P1].ctr++ */ -#define OP_NewRowid 69 /* synopsis: r[P2]=rowid */ -#define OP_Insert 70 /* synopsis: intkey=r[P3] data=r[P2] */ -#define OP_Or 71 /* same as TK_OR, synopsis: r[P3]=(r[P1] || r[P2]) */ -#define OP_And 72 /* same as TK_AND, synopsis: r[P3]=(r[P1] && r[P2]) */ -#define OP_InsertInt 73 /* synopsis: intkey=P3 data=r[P2] */ -#define OP_Delete 74 -#define OP_ResetCount 75 -#define OP_IsNull 76 /* same as TK_ISNULL, synopsis: if r[P1]==NULL goto P2 */ -#define OP_NotNull 77 /* same as TK_NOTNULL, synopsis: if r[P1]!=NULL goto P2 */ -#define OP_Ne 78 /* same as TK_NE, synopsis: if r[P1]!=r[P3] goto P2 */ -#define OP_Eq 79 /* same as TK_EQ, synopsis: if r[P1]==r[P3] goto P2 */ -#define OP_Gt 80 /* same as TK_GT, synopsis: if r[P1]>r[P3] goto P2 */ -#define OP_Le 81 /* same as TK_LE, synopsis: if r[P1]<=r[P3] goto P2 */ -#define OP_Lt 82 /* same as TK_LT, synopsis: if r[P1]=r[P3] goto P2 */ -#define OP_SorterCompare 84 /* synopsis: if key(P1)!=rtrim(r[P3],P4) goto P2 */ -#define OP_BitAnd 85 /* same as TK_BITAND, synopsis: r[P3]=r[P1]&r[P2] */ -#define OP_BitOr 86 /* same as TK_BITOR, synopsis: r[P3]=r[P1]|r[P2] */ -#define OP_ShiftLeft 87 /* same as TK_LSHIFT, synopsis: r[P3]=r[P2]<>r[P1] */ -#define OP_Add 89 /* same as TK_PLUS, synopsis: r[P3]=r[P1]+r[P2] */ -#define OP_Subtract 90 /* same as TK_MINUS, synopsis: r[P3]=r[P2]-r[P1] */ -#define OP_Multiply 91 /* same as TK_STAR, synopsis: r[P3]=r[P1]*r[P2] */ -#define OP_Divide 92 /* same as TK_SLASH, synopsis: r[P3]=r[P2]/r[P1] */ -#define OP_Remainder 93 /* same as TK_REM, synopsis: r[P3]=r[P2]%r[P1] */ -#define OP_Concat 94 /* same as TK_CONCAT, synopsis: r[P3]=r[P2]+r[P1] */ -#define OP_SorterData 95 /* synopsis: r[P2]=data */ -#define OP_BitNot 96 /* same as TK_BITNOT, synopsis: r[P1]= ~r[P1] */ -#define OP_String8 97 /* same as TK_STRING, synopsis: r[P2]='P4' */ -#define OP_RowKey 98 /* synopsis: r[P2]=key */ -#define OP_RowData 99 /* synopsis: r[P2]=data */ -#define OP_Rowid 100 /* synopsis: r[P2]=rowid */ -#define OP_NullRow 101 -#define OP_Last 102 -#define OP_SorterSort 103 -#define OP_Sort 104 -#define OP_Rewind 105 -#define OP_SorterInsert 106 -#define OP_IdxInsert 107 /* synopsis: key=r[P2] */ -#define OP_IdxDelete 108 /* synopsis: key=r[P2@P3] */ -#define OP_IdxRowid 109 /* synopsis: r[P2]=rowid */ -#define OP_IdxLE 110 /* synopsis: key=r[P3@P4] */ -#define OP_IdxGT 111 /* synopsis: key=r[P3@P4] */ -#define OP_IdxLT 112 /* synopsis: key=r[P3@P4] */ -#define OP_IdxGE 113 /* synopsis: key=r[P3@P4] */ -#define OP_Destroy 114 -#define OP_Clear 115 -#define OP_ResetSorter 116 -#define OP_CreateIndex 117 /* synopsis: r[P2]=root iDb=P1 */ -#define OP_CreateTable 118 /* synopsis: r[P2]=root iDb=P1 */ -#define OP_ParseSchema 119 -#define OP_LoadAnalysis 120 -#define OP_DropTable 121 -#define OP_DropIndex 122 -#define OP_DropTrigger 123 -#define OP_IntegrityCk 124 -#define OP_RowSetAdd 125 /* synopsis: rowset(P1)=r[P2] */ -#define OP_RowSetRead 126 /* synopsis: r[P3]=rowset(P1) */ -#define OP_RowSetTest 127 /* synopsis: if r[P3] in rowset(P1) goto P2 */ -#define OP_Program 128 -#define OP_Param 129 -#define OP_FkCounter 130 /* synopsis: fkctr[P1]+=P2 */ -#define OP_FkIfZero 131 /* synopsis: if fkctr[P1]==0 goto P2 */ -#define OP_MemMax 132 /* synopsis: r[P1]=max(r[P1],r[P2]) */ -#define OP_Real 133 /* same as TK_FLOAT, synopsis: r[P2]=P4 */ -#define OP_IfPos 134 /* synopsis: if r[P1]>0 goto P2 */ -#define OP_IfNeg 135 /* synopsis: if r[P1]<0 goto P2 */ -#define OP_IfZero 136 /* synopsis: r[P1]+=P3, if r[P1]==0 goto P2 */ -#define OP_AggFinal 137 /* synopsis: accum=r[P1] N=P2 */ -#define OP_IncrVacuum 138 -#define OP_Expire 139 -#define OP_TableLock 140 /* synopsis: iDb=P1 root=P2 write=P3 */ -#define OP_VBegin 141 -#define OP_VCreate 142 -#define OP_ToText 143 /* same as TK_TO_TEXT */ -#define OP_ToBlob 144 /* same as TK_TO_BLOB */ -#define OP_ToNumeric 145 /* same as TK_TO_NUMERIC */ -#define OP_ToInt 146 /* same as TK_TO_INT */ -#define OP_ToReal 147 /* same as TK_TO_REAL */ -#define OP_VDestroy 148 -#define OP_VOpen 149 -#define OP_VColumn 150 /* synopsis: r[P3]=vcolumn(P2) */ -#define OP_VNext 151 -#define OP_VRename 152 -#define OP_Pagecount 153 -#define OP_MaxPgcnt 154 -#define OP_Init 155 /* synopsis: Start at P2 */ -#define OP_Noop 156 -#define OP_Explain 157 - - -/* Properties such as "out2" or "jump" that are specified in -** comments following the "case" for each opcode in the vdbe.c -** are encoded into bitvectors as follows: -*/ -#define OPFLG_JUMP 0x0001 /* jump: P2 holds jmp target */ -#define OPFLG_OUT2_PRERELEASE 0x0002 /* out2-prerelease: */ -#define OPFLG_IN1 0x0004 /* in1: P1 is an input */ -#define OPFLG_IN2 0x0008 /* in2: P2 is an input */ -#define OPFLG_IN3 0x0010 /* in3: P3 is an input */ -#define OPFLG_OUT2 0x0020 /* out2: P2 is an output */ -#define OPFLG_OUT3 0x0040 /* out3: P3 is an output */ -#define OPFLG_INITIALIZER {\ -/* 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01,\ -/* 8 */ 0x01, 0x01, 0x00, 0x00, 0x02, 0x00, 0x01, 0x00,\ -/* 16 */ 0x01, 0x01, 0x04, 0x24, 0x01, 0x04, 0x05, 0x10,\ -/* 24 */ 0x00, 0x02, 0x02, 0x02, 0x02, 0x00, 0x02, 0x02,\ -/* 32 */ 0x00, 0x00, 0x20, 0x00, 0x00, 0x04, 0x05, 0x04,\ -/* 40 */ 0x00, 0x00, 0x01, 0x01, 0x05, 0x05, 0x00, 0x00,\ -/* 48 */ 0x00, 0x02, 0x02, 0x10, 0x00, 0x00, 0x00, 0x00,\ -/* 56 */ 0x00, 0x00, 0x00, 0x11, 0x11, 0x11, 0x11, 0x08,\ -/* 64 */ 0x11, 0x11, 0x11, 0x11, 0x02, 0x02, 0x00, 0x4c,\ -/* 72 */ 0x4c, 0x00, 0x00, 0x00, 0x05, 0x05, 0x15, 0x15,\ -/* 80 */ 0x15, 0x15, 0x15, 0x15, 0x00, 0x4c, 0x4c, 0x4c,\ -/* 88 */ 0x4c, 0x4c, 0x4c, 0x4c, 0x4c, 0x4c, 0x4c, 0x00,\ -/* 96 */ 0x24, 0x02, 0x00, 0x00, 0x02, 0x00, 0x01, 0x01,\ -/* 104 */ 0x01, 0x01, 0x08, 0x08, 0x00, 0x02, 0x01, 0x01,\ -/* 112 */ 0x01, 0x01, 0x02, 0x00, 0x00, 0x02, 0x02, 0x00,\ -/* 120 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x45, 0x15,\ -/* 128 */ 0x01, 0x02, 0x00, 0x01, 0x08, 0x02, 0x05, 0x05,\ -/* 136 */ 0x05, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x04,\ -/* 144 */ 0x04, 0x04, 0x04, 0x04, 0x00, 0x00, 0x00, 0x01,\ -/* 152 */ 0x00, 0x02, 0x02, 0x01, 0x00, 0x00,} - -/************** End of opcodes.h *********************************************/ -/************** Continuing where we left off in vdbe.h ***********************/ - -/* -** Prototypes for the VDBE interface. See comments on the implementation -** for a description of what each of these routines does. -*/ -SQLITE_PRIVATE Vdbe *sqlite3VdbeCreate(Parse*); -SQLITE_PRIVATE int sqlite3VdbeAddOp0(Vdbe*,int); -SQLITE_PRIVATE int sqlite3VdbeAddOp1(Vdbe*,int,int); -SQLITE_PRIVATE int sqlite3VdbeAddOp2(Vdbe*,int,int,int); -SQLITE_PRIVATE int sqlite3VdbeAddOp3(Vdbe*,int,int,int,int); -SQLITE_PRIVATE int sqlite3VdbeAddOp4(Vdbe*,int,int,int,int,const char *zP4,int); -SQLITE_PRIVATE int sqlite3VdbeAddOp4Int(Vdbe*,int,int,int,int,int); -SQLITE_PRIVATE int sqlite3VdbeAddOpList(Vdbe*, int nOp, VdbeOpList const *aOp, int iLineno); -SQLITE_PRIVATE void sqlite3VdbeAddParseSchemaOp(Vdbe*,int,char*); -SQLITE_PRIVATE void sqlite3VdbeChangeP1(Vdbe*, u32 addr, int P1); -SQLITE_PRIVATE void sqlite3VdbeChangeP2(Vdbe*, u32 addr, int P2); -SQLITE_PRIVATE void sqlite3VdbeChangeP3(Vdbe*, u32 addr, int P3); -SQLITE_PRIVATE void sqlite3VdbeChangeP5(Vdbe*, u8 P5); -SQLITE_PRIVATE void sqlite3VdbeJumpHere(Vdbe*, int addr); -SQLITE_PRIVATE void sqlite3VdbeChangeToNoop(Vdbe*, int addr); -SQLITE_PRIVATE int sqlite3VdbeDeletePriorOpcode(Vdbe*, u8 op); -SQLITE_PRIVATE void sqlite3VdbeChangeP4(Vdbe*, int addr, const char *zP4, int N); -SQLITE_PRIVATE void sqlite3VdbeSetP4KeyInfo(Parse*, Index*); -SQLITE_PRIVATE void sqlite3VdbeUsesBtree(Vdbe*, int); -SQLITE_PRIVATE VdbeOp *sqlite3VdbeGetOp(Vdbe*, int); -SQLITE_PRIVATE int sqlite3VdbeMakeLabel(Vdbe*); -SQLITE_PRIVATE void sqlite3VdbeRunOnlyOnce(Vdbe*); -SQLITE_PRIVATE void sqlite3VdbeDelete(Vdbe*); -SQLITE_PRIVATE void sqlite3VdbeClearObject(sqlite3*,Vdbe*); -SQLITE_PRIVATE void sqlite3VdbeMakeReady(Vdbe*,Parse*); -SQLITE_PRIVATE int sqlite3VdbeFinalize(Vdbe*); -SQLITE_PRIVATE void sqlite3VdbeResolveLabel(Vdbe*, int); -SQLITE_PRIVATE int sqlite3VdbeCurrentAddr(Vdbe*); -#ifdef SQLITE_DEBUG -SQLITE_PRIVATE int sqlite3VdbeAssertMayAbort(Vdbe *, int); -#endif -SQLITE_PRIVATE void sqlite3VdbeResetStepResult(Vdbe*); -SQLITE_PRIVATE void sqlite3VdbeRewind(Vdbe*); -SQLITE_PRIVATE int sqlite3VdbeReset(Vdbe*); -SQLITE_PRIVATE void sqlite3VdbeSetNumCols(Vdbe*,int); -SQLITE_PRIVATE int sqlite3VdbeSetColName(Vdbe*, int, int, const char *, void(*)(void*)); -SQLITE_PRIVATE void sqlite3VdbeCountChanges(Vdbe*); -SQLITE_PRIVATE sqlite3 *sqlite3VdbeDb(Vdbe*); -SQLITE_PRIVATE void sqlite3VdbeSetSql(Vdbe*, const char *z, int n, int); -SQLITE_PRIVATE void sqlite3VdbeSwap(Vdbe*,Vdbe*); -SQLITE_PRIVATE VdbeOp *sqlite3VdbeTakeOpArray(Vdbe*, int*, int*); -SQLITE_PRIVATE sqlite3_value *sqlite3VdbeGetBoundValue(Vdbe*, int, u8); -SQLITE_PRIVATE void sqlite3VdbeSetVarmask(Vdbe*, int); -#ifndef SQLITE_OMIT_TRACE -SQLITE_PRIVATE char *sqlite3VdbeExpandSql(Vdbe*, const char*); -#endif - -SQLITE_PRIVATE void sqlite3VdbeRecordUnpack(KeyInfo*,int,const void*,UnpackedRecord*); -SQLITE_PRIVATE int sqlite3VdbeRecordCompare(int,const void*,UnpackedRecord*,int); -SQLITE_PRIVATE UnpackedRecord *sqlite3VdbeAllocUnpackedRecord(KeyInfo *, char *, int, char **); - -typedef int (*RecordCompare)(int,const void*,UnpackedRecord*,int); -SQLITE_PRIVATE RecordCompare sqlite3VdbeFindCompare(UnpackedRecord*); - -#ifndef SQLITE_OMIT_TRIGGER -SQLITE_PRIVATE void sqlite3VdbeLinkSubProgram(Vdbe *, SubProgram *); -#endif - -/* Use SQLITE_ENABLE_COMMENTS to enable generation of extra comments on -** each VDBE opcode. -** -** Use the SQLITE_ENABLE_MODULE_COMMENTS macro to see some extra no-op -** comments in VDBE programs that show key decision points in the code -** generator. -*/ -#ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS -SQLITE_PRIVATE void sqlite3VdbeComment(Vdbe*, const char*, ...); -# define VdbeComment(X) sqlite3VdbeComment X -SQLITE_PRIVATE void sqlite3VdbeNoopComment(Vdbe*, const char*, ...); -# define VdbeNoopComment(X) sqlite3VdbeNoopComment X -# ifdef SQLITE_ENABLE_MODULE_COMMENTS -# define VdbeModuleComment(X) sqlite3VdbeNoopComment X -# else -# define VdbeModuleComment(X) -# endif -#else -# define VdbeComment(X) -# define VdbeNoopComment(X) -# define VdbeModuleComment(X) -#endif - -/* -** The VdbeCoverage macros are used to set a coverage testing point -** for VDBE branch instructions. The coverage testing points are line -** numbers in the sqlite3.c source file. VDBE branch coverage testing -** only works with an amalagmation build. That's ok since a VDBE branch -** coverage build designed for testing the test suite only. No application -** should ever ship with VDBE branch coverage measuring turned on. -** -** VdbeCoverage(v) // Mark the previously coded instruction -** // as a branch -** -** VdbeCoverageIf(v, conditional) // Mark previous if conditional true -** -** VdbeCoverageAlwaysTaken(v) // Previous branch is always taken -** -** VdbeCoverageNeverTaken(v) // Previous branch is never taken -** -** Every VDBE branch operation must be tagged with one of the macros above. -** If not, then when "make test" is run with -DSQLITE_VDBE_COVERAGE and -** -DSQLITE_DEBUG then an ALWAYS() will fail in the vdbeTakeBranch() -** routine in vdbe.c, alerting the developer to the missed tag. -*/ -#ifdef SQLITE_VDBE_COVERAGE -SQLITE_PRIVATE void sqlite3VdbeSetLineNumber(Vdbe*,int); -# define VdbeCoverage(v) sqlite3VdbeSetLineNumber(v,__LINE__) -# define VdbeCoverageIf(v,x) if(x)sqlite3VdbeSetLineNumber(v,__LINE__) -# define VdbeCoverageAlwaysTaken(v) sqlite3VdbeSetLineNumber(v,2); -# define VdbeCoverageNeverTaken(v) sqlite3VdbeSetLineNumber(v,1); -# define VDBE_OFFSET_LINENO(x) (__LINE__+x) -#else -# define VdbeCoverage(v) -# define VdbeCoverageIf(v,x) -# define VdbeCoverageAlwaysTaken(v) -# define VdbeCoverageNeverTaken(v) -# define VDBE_OFFSET_LINENO(x) 0 -#endif - -#endif - -/************** End of vdbe.h ************************************************/ -/************** Continuing where we left off in sqliteInt.h ******************/ -/************** Include pager.h in the middle of sqliteInt.h *****************/ -/************** Begin file pager.h *******************************************/ -/* -** 2001 September 15 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** This header file defines the interface that the sqlite page cache -** subsystem. The page cache subsystem reads and writes a file a page -** at a time and provides a journal for rollback. -*/ - -#ifndef _PAGER_H_ -#define _PAGER_H_ - -/* -** Default maximum size for persistent journal files. A negative -** value means no limit. This value may be overridden using the -** sqlite3PagerJournalSizeLimit() API. See also "PRAGMA journal_size_limit". -*/ -#ifndef SQLITE_DEFAULT_JOURNAL_SIZE_LIMIT - #define SQLITE_DEFAULT_JOURNAL_SIZE_LIMIT -1 -#endif - -/* -** The type used to represent a page number. The first page in a file -** is called page 1. 0 is used to represent "not a page". -*/ -typedef u32 Pgno; - -/* -** Each open file is managed by a separate instance of the "Pager" structure. -*/ -typedef struct Pager Pager; - -/* -** Handle type for pages. -*/ -typedef struct PgHdr DbPage; - -/* -** Page number PAGER_MJ_PGNO is never used in an SQLite database (it is -** reserved for working around a windows/posix incompatibility). It is -** used in the journal to signify that the remainder of the journal file -** is devoted to storing a master journal name - there are no more pages to -** roll back. See comments for function writeMasterJournal() in pager.c -** for details. -*/ -#define PAGER_MJ_PGNO(x) ((Pgno)((PENDING_BYTE/((x)->pageSize))+1)) - -/* -** Allowed values for the flags parameter to sqlite3PagerOpen(). -** -** NOTE: These values must match the corresponding BTREE_ values in btree.h. -*/ -#define PAGER_OMIT_JOURNAL 0x0001 /* Do not use a rollback journal */ -#define PAGER_MEMORY 0x0002 /* In-memory database */ - -/* -** Valid values for the second argument to sqlite3PagerLockingMode(). -*/ -#define PAGER_LOCKINGMODE_QUERY -1 -#define PAGER_LOCKINGMODE_NORMAL 0 -#define PAGER_LOCKINGMODE_EXCLUSIVE 1 - -/* -** Numeric constants that encode the journalmode. -*/ -#define PAGER_JOURNALMODE_QUERY (-1) /* Query the value of journalmode */ -#define PAGER_JOURNALMODE_DELETE 0 /* Commit by deleting journal file */ -#define PAGER_JOURNALMODE_PERSIST 1 /* Commit by zeroing journal header */ -#define PAGER_JOURNALMODE_OFF 2 /* Journal omitted. */ -#define PAGER_JOURNALMODE_TRUNCATE 3 /* Commit by truncating journal */ -#define PAGER_JOURNALMODE_MEMORY 4 /* In-memory journal file */ -#define PAGER_JOURNALMODE_WAL 5 /* Use write-ahead logging */ - -/* -** Flags that make up the mask passed to sqlite3PagerAcquire(). -*/ -#define PAGER_GET_NOCONTENT 0x01 /* Do not load data from disk */ -#define PAGER_GET_READONLY 0x02 /* Read-only page is acceptable */ - -/* -** Flags for sqlite3PagerSetFlags() -*/ -#define PAGER_SYNCHRONOUS_OFF 0x01 /* PRAGMA synchronous=OFF */ -#define PAGER_SYNCHRONOUS_NORMAL 0x02 /* PRAGMA synchronous=NORMAL */ -#define PAGER_SYNCHRONOUS_FULL 0x03 /* PRAGMA synchronous=FULL */ -#define PAGER_SYNCHRONOUS_MASK 0x03 /* Mask for three values above */ -#define PAGER_FULLFSYNC 0x04 /* PRAGMA fullfsync=ON */ -#define PAGER_CKPT_FULLFSYNC 0x08 /* PRAGMA checkpoint_fullfsync=ON */ -#define PAGER_CACHESPILL 0x10 /* PRAGMA cache_spill=ON */ -#define PAGER_FLAGS_MASK 0x1c /* All above except SYNCHRONOUS */ - -/* -** The remainder of this file contains the declarations of the functions -** that make up the Pager sub-system API. See source code comments for -** a detailed description of each routine. -*/ - -/* Open and close a Pager connection. */ -SQLITE_PRIVATE int sqlite3PagerOpen( - sqlite3_vfs*, - Pager **ppPager, - const char*, - int, - int, - int, - void(*)(DbPage*) -); -SQLITE_PRIVATE int sqlite3PagerClose(Pager *pPager); -SQLITE_PRIVATE int sqlite3PagerReadFileheader(Pager*, int, unsigned char*); - -/* Functions used to configure a Pager object. */ -SQLITE_PRIVATE void sqlite3PagerSetBusyhandler(Pager*, int(*)(void *), void *); -SQLITE_PRIVATE int sqlite3PagerSetPagesize(Pager*, u32*, int); -SQLITE_PRIVATE int sqlite3PagerMaxPageCount(Pager*, int); -SQLITE_PRIVATE void sqlite3PagerSetCachesize(Pager*, int); -SQLITE_PRIVATE void sqlite3PagerSetMmapLimit(Pager *, sqlite3_int64); -SQLITE_PRIVATE void sqlite3PagerShrink(Pager*); -SQLITE_PRIVATE void sqlite3PagerSetFlags(Pager*,unsigned); -SQLITE_PRIVATE int sqlite3PagerLockingMode(Pager *, int); -SQLITE_PRIVATE int sqlite3PagerSetJournalMode(Pager *, int); -SQLITE_PRIVATE int sqlite3PagerGetJournalMode(Pager*); -SQLITE_PRIVATE int sqlite3PagerOkToChangeJournalMode(Pager*); -SQLITE_PRIVATE i64 sqlite3PagerJournalSizeLimit(Pager *, i64); -SQLITE_PRIVATE sqlite3_backup **sqlite3PagerBackupPtr(Pager*); - -/* Functions used to obtain and release page references. */ -SQLITE_PRIVATE int sqlite3PagerAcquire(Pager *pPager, Pgno pgno, DbPage **ppPage, int clrFlag); -#define sqlite3PagerGet(A,B,C) sqlite3PagerAcquire(A,B,C,0) -SQLITE_PRIVATE DbPage *sqlite3PagerLookup(Pager *pPager, Pgno pgno); -SQLITE_PRIVATE void sqlite3PagerRef(DbPage*); -SQLITE_PRIVATE void sqlite3PagerUnref(DbPage*); -SQLITE_PRIVATE void sqlite3PagerUnrefNotNull(DbPage*); - -/* Operations on page references. */ -SQLITE_PRIVATE int sqlite3PagerWrite(DbPage*); -SQLITE_PRIVATE void sqlite3PagerDontWrite(DbPage*); -SQLITE_PRIVATE int sqlite3PagerMovepage(Pager*,DbPage*,Pgno,int); -SQLITE_PRIVATE int sqlite3PagerPageRefcount(DbPage*); -SQLITE_PRIVATE void *sqlite3PagerGetData(DbPage *); -SQLITE_PRIVATE void *sqlite3PagerGetExtra(DbPage *); - -/* Functions used to manage pager transactions and savepoints. */ -SQLITE_PRIVATE void sqlite3PagerPagecount(Pager*, int*); -SQLITE_PRIVATE int sqlite3PagerBegin(Pager*, int exFlag, int); -SQLITE_PRIVATE int sqlite3PagerCommitPhaseOne(Pager*,const char *zMaster, int); -SQLITE_PRIVATE int sqlite3PagerExclusiveLock(Pager*); -SQLITE_PRIVATE int sqlite3PagerSync(Pager *pPager, const char *zMaster); -SQLITE_PRIVATE int sqlite3PagerCommitPhaseTwo(Pager*); -SQLITE_PRIVATE int sqlite3PagerRollback(Pager*); -SQLITE_PRIVATE int sqlite3PagerOpenSavepoint(Pager *pPager, int n); -SQLITE_PRIVATE int sqlite3PagerSavepoint(Pager *pPager, int op, int iSavepoint); -SQLITE_PRIVATE int sqlite3PagerSharedLock(Pager *pPager); - -#ifndef SQLITE_OMIT_WAL -SQLITE_PRIVATE int sqlite3PagerCheckpoint(Pager *pPager, int, int*, int*); -SQLITE_PRIVATE int sqlite3PagerWalSupported(Pager *pPager); -SQLITE_PRIVATE int sqlite3PagerWalCallback(Pager *pPager); -SQLITE_PRIVATE int sqlite3PagerOpenWal(Pager *pPager, int *pisOpen); -SQLITE_PRIVATE int sqlite3PagerCloseWal(Pager *pPager); -#endif - -#ifdef SQLITE_ENABLE_ZIPVFS -SQLITE_PRIVATE int sqlite3PagerWalFramesize(Pager *pPager); -#endif - -/* Functions used to query pager state and configuration. */ -SQLITE_PRIVATE u8 sqlite3PagerIsreadonly(Pager*); -SQLITE_PRIVATE int sqlite3PagerRefcount(Pager*); -SQLITE_PRIVATE int sqlite3PagerMemUsed(Pager*); -SQLITE_PRIVATE const char *sqlite3PagerFilename(Pager*, int); -SQLITE_PRIVATE const sqlite3_vfs *sqlite3PagerVfs(Pager*); -SQLITE_PRIVATE sqlite3_file *sqlite3PagerFile(Pager*); -SQLITE_PRIVATE const char *sqlite3PagerJournalname(Pager*); -SQLITE_PRIVATE int sqlite3PagerNosync(Pager*); -SQLITE_PRIVATE void *sqlite3PagerTempSpace(Pager*); -SQLITE_PRIVATE int sqlite3PagerIsMemdb(Pager*); -SQLITE_PRIVATE void sqlite3PagerCacheStat(Pager *, int, int, int *); -SQLITE_PRIVATE void sqlite3PagerClearCache(Pager *); -SQLITE_PRIVATE int sqlite3SectorSize(sqlite3_file *); - -/* Functions used to truncate the database file. */ -SQLITE_PRIVATE void sqlite3PagerTruncateImage(Pager*,Pgno); - -#if defined(SQLITE_HAS_CODEC) && !defined(SQLITE_OMIT_WAL) -SQLITE_PRIVATE void *sqlite3PagerCodec(DbPage *); -#endif - -/* Functions to support testing and debugging. */ -#if !defined(NDEBUG) || defined(SQLITE_TEST) -SQLITE_PRIVATE Pgno sqlite3PagerPagenumber(DbPage*); -SQLITE_PRIVATE int sqlite3PagerIswriteable(DbPage*); -#endif -#ifdef SQLITE_TEST -SQLITE_PRIVATE int *sqlite3PagerStats(Pager*); -SQLITE_PRIVATE void sqlite3PagerRefdump(Pager*); - void disable_simulated_io_errors(void); - void enable_simulated_io_errors(void); -#else -# define disable_simulated_io_errors() -# define enable_simulated_io_errors() -#endif - -#endif /* _PAGER_H_ */ - -/************** End of pager.h ***********************************************/ -/************** Continuing where we left off in sqliteInt.h ******************/ -/************** Include pcache.h in the middle of sqliteInt.h ****************/ -/************** Begin file pcache.h ******************************************/ -/* -** 2008 August 05 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** This header file defines the interface that the sqlite page cache -** subsystem. -*/ - -#ifndef _PCACHE_H_ - -typedef struct PgHdr PgHdr; -typedef struct PCache PCache; - -/* -** Every page in the cache is controlled by an instance of the following -** structure. -*/ -struct PgHdr { - sqlite3_pcache_page *pPage; /* Pcache object page handle */ - void *pData; /* Page data */ - void *pExtra; /* Extra content */ - PgHdr *pDirty; /* Transient list of dirty pages */ - Pager *pPager; /* The pager this page is part of */ - Pgno pgno; /* Page number for this page */ -#ifdef SQLITE_CHECK_PAGES - u32 pageHash; /* Hash of page content */ -#endif - u16 flags; /* PGHDR flags defined below */ - - /********************************************************************** - ** Elements above are public. All that follows is private to pcache.c - ** and should not be accessed by other modules. - */ - i16 nRef; /* Number of users of this page */ - PCache *pCache; /* Cache that owns this page */ - - PgHdr *pDirtyNext; /* Next element in list of dirty pages */ - PgHdr *pDirtyPrev; /* Previous element in list of dirty pages */ -}; - -/* Bit values for PgHdr.flags */ -#define PGHDR_DIRTY 0x002 /* Page has changed */ -#define PGHDR_NEED_SYNC 0x004 /* Fsync the rollback journal before - ** writing this page to the database */ -#define PGHDR_NEED_READ 0x008 /* Content is unread */ -#define PGHDR_REUSE_UNLIKELY 0x010 /* A hint that reuse is unlikely */ -#define PGHDR_DONT_WRITE 0x020 /* Do not write content to disk */ - -#define PGHDR_MMAP 0x040 /* This is an mmap page object */ - -/* Initialize and shutdown the page cache subsystem */ -SQLITE_PRIVATE int sqlite3PcacheInitialize(void); -SQLITE_PRIVATE void sqlite3PcacheShutdown(void); - -/* Page cache buffer management: -** These routines implement SQLITE_CONFIG_PAGECACHE. -*/ -SQLITE_PRIVATE void sqlite3PCacheBufferSetup(void *, int sz, int n); - -/* Create a new pager cache. -** Under memory stress, invoke xStress to try to make pages clean. -** Only clean and unpinned pages can be reclaimed. -*/ -SQLITE_PRIVATE void sqlite3PcacheOpen( - int szPage, /* Size of every page */ - int szExtra, /* Extra space associated with each page */ - int bPurgeable, /* True if pages are on backing store */ - int (*xStress)(void*, PgHdr*), /* Call to try to make pages clean */ - void *pStress, /* Argument to xStress */ - PCache *pToInit /* Preallocated space for the PCache */ -); - -/* Modify the page-size after the cache has been created. */ -SQLITE_PRIVATE void sqlite3PcacheSetPageSize(PCache *, int); - -/* Return the size in bytes of a PCache object. Used to preallocate -** storage space. -*/ -SQLITE_PRIVATE int sqlite3PcacheSize(void); - -/* One release per successful fetch. Page is pinned until released. -** Reference counted. -*/ -SQLITE_PRIVATE int sqlite3PcacheFetch(PCache*, Pgno, int createFlag, PgHdr**); -SQLITE_PRIVATE void sqlite3PcacheRelease(PgHdr*); - -SQLITE_PRIVATE void sqlite3PcacheDrop(PgHdr*); /* Remove page from cache */ -SQLITE_PRIVATE void sqlite3PcacheMakeDirty(PgHdr*); /* Make sure page is marked dirty */ -SQLITE_PRIVATE void sqlite3PcacheMakeClean(PgHdr*); /* Mark a single page as clean */ -SQLITE_PRIVATE void sqlite3PcacheCleanAll(PCache*); /* Mark all dirty list pages as clean */ - -/* Change a page number. Used by incr-vacuum. */ -SQLITE_PRIVATE void sqlite3PcacheMove(PgHdr*, Pgno); - -/* Remove all pages with pgno>x. Reset the cache if x==0 */ -SQLITE_PRIVATE void sqlite3PcacheTruncate(PCache*, Pgno x); - -/* Get a list of all dirty pages in the cache, sorted by page number */ -SQLITE_PRIVATE PgHdr *sqlite3PcacheDirtyList(PCache*); - -/* Reset and close the cache object */ -SQLITE_PRIVATE void sqlite3PcacheClose(PCache*); - -/* Clear flags from pages of the page cache */ -SQLITE_PRIVATE void sqlite3PcacheClearSyncFlags(PCache *); - -/* Discard the contents of the cache */ -SQLITE_PRIVATE void sqlite3PcacheClear(PCache*); - -/* Return the total number of outstanding page references */ -SQLITE_PRIVATE int sqlite3PcacheRefCount(PCache*); - -/* Increment the reference count of an existing page */ -SQLITE_PRIVATE void sqlite3PcacheRef(PgHdr*); - -SQLITE_PRIVATE int sqlite3PcachePageRefcount(PgHdr*); - -/* Return the total number of pages stored in the cache */ -SQLITE_PRIVATE int sqlite3PcachePagecount(PCache*); - -#if defined(SQLITE_CHECK_PAGES) || defined(SQLITE_DEBUG) -/* Iterate through all dirty pages currently stored in the cache. This -** interface is only available if SQLITE_CHECK_PAGES is defined when the -** library is built. -*/ -SQLITE_PRIVATE void sqlite3PcacheIterateDirty(PCache *pCache, void (*xIter)(PgHdr *)); -#endif - -/* Set and get the suggested cache-size for the specified pager-cache. -** -** If no global maximum is configured, then the system attempts to limit -** the total number of pages cached by purgeable pager-caches to the sum -** of the suggested cache-sizes. -*/ -SQLITE_PRIVATE void sqlite3PcacheSetCachesize(PCache *, int); -#ifdef SQLITE_TEST -SQLITE_PRIVATE int sqlite3PcacheGetCachesize(PCache *); -#endif - -/* Free up as much memory as possible from the page cache */ -SQLITE_PRIVATE void sqlite3PcacheShrink(PCache*); - -#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT -/* Try to return memory used by the pcache module to the main memory heap */ -SQLITE_PRIVATE int sqlite3PcacheReleaseMemory(int); -#endif - -#ifdef SQLITE_TEST -SQLITE_PRIVATE void sqlite3PcacheStats(int*,int*,int*,int*); -#endif - -SQLITE_PRIVATE void sqlite3PCacheSetDefault(void); - -#endif /* _PCACHE_H_ */ - -/************** End of pcache.h **********************************************/ -/************** Continuing where we left off in sqliteInt.h ******************/ - -/************** Include os.h in the middle of sqliteInt.h ********************/ -/************** Begin file os.h **********************************************/ -/* -** 2001 September 16 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -****************************************************************************** -** -** This header file (together with is companion C source-code file -** "os.c") attempt to abstract the underlying operating system so that -** the SQLite library will work on both POSIX and windows systems. -** -** This header file is #include-ed by sqliteInt.h and thus ends up -** being included by every source file. -*/ -#ifndef _SQLITE_OS_H_ -#define _SQLITE_OS_H_ - -/* -** Attempt to automatically detect the operating system and setup the -** necessary pre-processor macros for it. -*/ -/************** Include os_setup.h in the middle of os.h *********************/ -/************** Begin file os_setup.h ****************************************/ -/* -** 2013 November 25 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -****************************************************************************** -** -** This file contains pre-processor directives related to operating system -** detection and/or setup. -*/ -#ifndef _OS_SETUP_H_ -#define _OS_SETUP_H_ - -/* -** Figure out if we are dealing with Unix, Windows, or some other operating -** system. -** -** After the following block of preprocess macros, all of SQLITE_OS_UNIX, -** SQLITE_OS_WIN, and SQLITE_OS_OTHER will defined to either 1 or 0. One of -** the three will be 1. The other two will be 0. -*/ -#if defined(SQLITE_OS_OTHER) -# if SQLITE_OS_OTHER==1 -# undef SQLITE_OS_UNIX -# define SQLITE_OS_UNIX 0 -# undef SQLITE_OS_WIN -# define SQLITE_OS_WIN 0 -# else -# undef SQLITE_OS_OTHER -# endif -#endif -#if !defined(SQLITE_OS_UNIX) && !defined(SQLITE_OS_OTHER) -# define SQLITE_OS_OTHER 0 -# ifndef SQLITE_OS_WIN -# if defined(_WIN32) || defined(WIN32) || defined(__CYGWIN__) || \ - defined(__MINGW32__) || defined(__BORLANDC__) -# define SQLITE_OS_WIN 1 -# define SQLITE_OS_UNIX 0 -# else -# define SQLITE_OS_WIN 0 -# define SQLITE_OS_UNIX 1 -# endif -# else -# define SQLITE_OS_UNIX 0 -# endif -#else -# ifndef SQLITE_OS_WIN -# define SQLITE_OS_WIN 0 -# endif -#endif - -#endif /* _OS_SETUP_H_ */ - -/************** End of os_setup.h ********************************************/ -/************** Continuing where we left off in os.h *************************/ - -/* If the SET_FULLSYNC macro is not defined above, then make it -** a no-op -*/ -#ifndef SET_FULLSYNC -# define SET_FULLSYNC(x,y) -#endif - -/* -** The default size of a disk sector -*/ -#ifndef SQLITE_DEFAULT_SECTOR_SIZE -# define SQLITE_DEFAULT_SECTOR_SIZE 4096 -#endif - -/* -** Temporary files are named starting with this prefix followed by 16 random -** alphanumeric characters, and no file extension. They are stored in the -** OS's standard temporary file directory, and are deleted prior to exit. -** If sqlite is being embedded in another program, you may wish to change the -** prefix to reflect your program's name, so that if your program exits -** prematurely, old temporary files can be easily identified. This can be done -** using -DSQLITE_TEMP_FILE_PREFIX=myprefix_ on the compiler command line. -** -** 2006-10-31: The default prefix used to be "sqlite_". But then -** Mcafee started using SQLite in their anti-virus product and it -** started putting files with the "sqlite" name in the c:/temp folder. -** This annoyed many windows users. Those users would then do a -** Google search for "sqlite", find the telephone numbers of the -** developers and call to wake them up at night and complain. -** For this reason, the default name prefix is changed to be "sqlite" -** spelled backwards. So the temp files are still identified, but -** anybody smart enough to figure out the code is also likely smart -** enough to know that calling the developer will not help get rid -** of the file. -*/ -#ifndef SQLITE_TEMP_FILE_PREFIX -# define SQLITE_TEMP_FILE_PREFIX "etilqs_" -#endif - -/* -** The following values may be passed as the second argument to -** sqlite3OsLock(). The various locks exhibit the following semantics: -** -** SHARED: Any number of processes may hold a SHARED lock simultaneously. -** RESERVED: A single process may hold a RESERVED lock on a file at -** any time. Other processes may hold and obtain new SHARED locks. -** PENDING: A single process may hold a PENDING lock on a file at -** any one time. Existing SHARED locks may persist, but no new -** SHARED locks may be obtained by other processes. -** EXCLUSIVE: An EXCLUSIVE lock precludes all other locks. -** -** PENDING_LOCK may not be passed directly to sqlite3OsLock(). Instead, a -** process that requests an EXCLUSIVE lock may actually obtain a PENDING -** lock. This can be upgraded to an EXCLUSIVE lock by a subsequent call to -** sqlite3OsLock(). -*/ -#define NO_LOCK 0 -#define SHARED_LOCK 1 -#define RESERVED_LOCK 2 -#define PENDING_LOCK 3 -#define EXCLUSIVE_LOCK 4 - -/* -** File Locking Notes: (Mostly about windows but also some info for Unix) -** -** We cannot use LockFileEx() or UnlockFileEx() on Win95/98/ME because -** those functions are not available. So we use only LockFile() and -** UnlockFile(). -** -** LockFile() prevents not just writing but also reading by other processes. -** A SHARED_LOCK is obtained by locking a single randomly-chosen -** byte out of a specific range of bytes. The lock byte is obtained at -** random so two separate readers can probably access the file at the -** same time, unless they are unlucky and choose the same lock byte. -** An EXCLUSIVE_LOCK is obtained by locking all bytes in the range. -** There can only be one writer. A RESERVED_LOCK is obtained by locking -** a single byte of the file that is designated as the reserved lock byte. -** A PENDING_LOCK is obtained by locking a designated byte different from -** the RESERVED_LOCK byte. -** -** On WinNT/2K/XP systems, LockFileEx() and UnlockFileEx() are available, -** which means we can use reader/writer locks. When reader/writer locks -** are used, the lock is placed on the same range of bytes that is used -** for probabilistic locking in Win95/98/ME. Hence, the locking scheme -** will support two or more Win95 readers or two or more WinNT readers. -** But a single Win95 reader will lock out all WinNT readers and a single -** WinNT reader will lock out all other Win95 readers. -** -** The following #defines specify the range of bytes used for locking. -** SHARED_SIZE is the number of bytes available in the pool from which -** a random byte is selected for a shared lock. The pool of bytes for -** shared locks begins at SHARED_FIRST. -** -** The same locking strategy and -** byte ranges are used for Unix. This leaves open the possiblity of having -** clients on win95, winNT, and unix all talking to the same shared file -** and all locking correctly. To do so would require that samba (or whatever -** tool is being used for file sharing) implements locks correctly between -** windows and unix. I'm guessing that isn't likely to happen, but by -** using the same locking range we are at least open to the possibility. -** -** Locking in windows is manditory. For this reason, we cannot store -** actual data in the bytes used for locking. The pager never allocates -** the pages involved in locking therefore. SHARED_SIZE is selected so -** that all locks will fit on a single page even at the minimum page size. -** PENDING_BYTE defines the beginning of the locks. By default PENDING_BYTE -** is set high so that we don't have to allocate an unused page except -** for very large databases. But one should test the page skipping logic -** by setting PENDING_BYTE low and running the entire regression suite. -** -** Changing the value of PENDING_BYTE results in a subtly incompatible -** file format. Depending on how it is changed, you might not notice -** the incompatibility right away, even running a full regression test. -** The default location of PENDING_BYTE is the first byte past the -** 1GB boundary. -** -*/ -#ifdef SQLITE_OMIT_WSD -# define PENDING_BYTE (0x40000000) -#else -# define PENDING_BYTE sqlite3PendingByte -#endif -#define RESERVED_BYTE (PENDING_BYTE+1) -#define SHARED_FIRST (PENDING_BYTE+2) -#define SHARED_SIZE 510 - -/* -** Wrapper around OS specific sqlite3_os_init() function. -*/ -SQLITE_PRIVATE int sqlite3OsInit(void); - -/* -** Functions for accessing sqlite3_file methods -*/ -SQLITE_PRIVATE int sqlite3OsClose(sqlite3_file*); -SQLITE_PRIVATE int sqlite3OsRead(sqlite3_file*, void*, int amt, i64 offset); -SQLITE_PRIVATE int sqlite3OsWrite(sqlite3_file*, const void*, int amt, i64 offset); -SQLITE_PRIVATE int sqlite3OsTruncate(sqlite3_file*, i64 size); -SQLITE_PRIVATE int sqlite3OsSync(sqlite3_file*, int); -SQLITE_PRIVATE int sqlite3OsFileSize(sqlite3_file*, i64 *pSize); -SQLITE_PRIVATE int sqlite3OsLock(sqlite3_file*, int); -SQLITE_PRIVATE int sqlite3OsUnlock(sqlite3_file*, int); -SQLITE_PRIVATE int sqlite3OsCheckReservedLock(sqlite3_file *id, int *pResOut); -SQLITE_PRIVATE int sqlite3OsFileControl(sqlite3_file*,int,void*); -SQLITE_PRIVATE void sqlite3OsFileControlHint(sqlite3_file*,int,void*); -#define SQLITE_FCNTL_DB_UNCHANGED 0xca093fa0 -SQLITE_PRIVATE int sqlite3OsSectorSize(sqlite3_file *id); -SQLITE_PRIVATE int sqlite3OsDeviceCharacteristics(sqlite3_file *id); -SQLITE_PRIVATE int sqlite3OsShmMap(sqlite3_file *,int,int,int,void volatile **); -SQLITE_PRIVATE int sqlite3OsShmLock(sqlite3_file *id, int, int, int); -SQLITE_PRIVATE void sqlite3OsShmBarrier(sqlite3_file *id); -SQLITE_PRIVATE int sqlite3OsShmUnmap(sqlite3_file *id, int); -SQLITE_PRIVATE int sqlite3OsFetch(sqlite3_file *id, i64, int, void **); -SQLITE_PRIVATE int sqlite3OsUnfetch(sqlite3_file *, i64, void *); - - -/* -** Functions for accessing sqlite3_vfs methods -*/ -SQLITE_PRIVATE int sqlite3OsOpen(sqlite3_vfs *, const char *, sqlite3_file*, int, int *); -SQLITE_PRIVATE int sqlite3OsDelete(sqlite3_vfs *, const char *, int); -SQLITE_PRIVATE int sqlite3OsAccess(sqlite3_vfs *, const char *, int, int *pResOut); -SQLITE_PRIVATE int sqlite3OsFullPathname(sqlite3_vfs *, const char *, int, char *); -#ifndef SQLITE_OMIT_LOAD_EXTENSION -SQLITE_PRIVATE void *sqlite3OsDlOpen(sqlite3_vfs *, const char *); -SQLITE_PRIVATE void sqlite3OsDlError(sqlite3_vfs *, int, char *); -SQLITE_PRIVATE void (*sqlite3OsDlSym(sqlite3_vfs *, void *, const char *))(void); -SQLITE_PRIVATE void sqlite3OsDlClose(sqlite3_vfs *, void *); -#endif /* SQLITE_OMIT_LOAD_EXTENSION */ -SQLITE_PRIVATE int sqlite3OsRandomness(sqlite3_vfs *, int, char *); -SQLITE_PRIVATE int sqlite3OsSleep(sqlite3_vfs *, int); -SQLITE_PRIVATE int sqlite3OsCurrentTimeInt64(sqlite3_vfs *, sqlite3_int64*); - -/* -** Convenience functions for opening and closing files using -** sqlite3_malloc() to obtain space for the file-handle structure. -*/ -SQLITE_PRIVATE int sqlite3OsOpenMalloc(sqlite3_vfs *, const char *, sqlite3_file **, int,int*); -SQLITE_PRIVATE int sqlite3OsCloseFree(sqlite3_file *); - -#endif /* _SQLITE_OS_H_ */ - -/************** End of os.h **************************************************/ -/************** Continuing where we left off in sqliteInt.h ******************/ -/************** Include mutex.h in the middle of sqliteInt.h *****************/ -/************** Begin file mutex.h *******************************************/ -/* -** 2007 August 28 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** -** This file contains the common header for all mutex implementations. -** The sqliteInt.h header #includes this file so that it is available -** to all source files. We break it out in an effort to keep the code -** better organized. -** -** NOTE: source files should *not* #include this header file directly. -** Source files should #include the sqliteInt.h file and let that file -** include this one indirectly. -*/ - - -/* -** Figure out what version of the code to use. The choices are -** -** SQLITE_MUTEX_OMIT No mutex logic. Not even stubs. The -** mutexes implemention cannot be overridden -** at start-time. -** -** SQLITE_MUTEX_NOOP For single-threaded applications. No -** mutual exclusion is provided. But this -** implementation can be overridden at -** start-time. -** -** SQLITE_MUTEX_PTHREADS For multi-threaded applications on Unix. -** -** SQLITE_MUTEX_W32 For multi-threaded applications on Win32. -*/ -#if !SQLITE_THREADSAFE -# define SQLITE_MUTEX_OMIT -#endif -#if SQLITE_THREADSAFE && !defined(SQLITE_MUTEX_NOOP) -# if SQLITE_OS_UNIX -# define SQLITE_MUTEX_PTHREADS -# elif SQLITE_OS_WIN -# define SQLITE_MUTEX_W32 -# else -# define SQLITE_MUTEX_NOOP -# endif -#endif - -#ifdef SQLITE_MUTEX_OMIT -/* -** If this is a no-op implementation, implement everything as macros. -*/ -#define sqlite3_mutex_alloc(X) ((sqlite3_mutex*)8) -#define sqlite3_mutex_free(X) -#define sqlite3_mutex_enter(X) -#define sqlite3_mutex_try(X) SQLITE_OK -#define sqlite3_mutex_leave(X) -#define sqlite3_mutex_held(X) ((void)(X),1) -#define sqlite3_mutex_notheld(X) ((void)(X),1) -#define sqlite3MutexAlloc(X) ((sqlite3_mutex*)8) -#define sqlite3MutexInit() SQLITE_OK -#define sqlite3MutexEnd() -#define MUTEX_LOGIC(X) -#else -#define MUTEX_LOGIC(X) X -#endif /* defined(SQLITE_MUTEX_OMIT) */ - -/************** End of mutex.h ***********************************************/ -/************** Continuing where we left off in sqliteInt.h ******************/ - - -/* -** Each database file to be accessed by the system is an instance -** of the following structure. There are normally two of these structures -** in the sqlite.aDb[] array. aDb[0] is the main database file and -** aDb[1] is the database file used to hold temporary tables. Additional -** databases may be attached. -*/ -struct Db { - char *zName; /* Name of this database */ - Btree *pBt; /* The B*Tree structure for this database file */ - u8 safety_level; /* How aggressive at syncing data to disk */ - Schema *pSchema; /* Pointer to database schema (possibly shared) */ -}; - -/* -** An instance of the following structure stores a database schema. -** -** Most Schema objects are associated with a Btree. The exception is -** the Schema for the TEMP databaes (sqlite3.aDb[1]) which is free-standing. -** In shared cache mode, a single Schema object can be shared by multiple -** Btrees that refer to the same underlying BtShared object. -** -** Schema objects are automatically deallocated when the last Btree that -** references them is destroyed. The TEMP Schema is manually freed by -** sqlite3_close(). -* -** A thread must be holding a mutex on the corresponding Btree in order -** to access Schema content. This implies that the thread must also be -** holding a mutex on the sqlite3 connection pointer that owns the Btree. -** For a TEMP Schema, only the connection mutex is required. -*/ -struct Schema { - int schema_cookie; /* Database schema version number for this file */ - int iGeneration; /* Generation counter. Incremented with each change */ - Hash tblHash; /* All tables indexed by name */ - Hash idxHash; /* All (named) indices indexed by name */ - Hash trigHash; /* All triggers indexed by name */ - Hash fkeyHash; /* All foreign keys by referenced table name */ - Table *pSeqTab; /* The sqlite_sequence table used by AUTOINCREMENT */ - u8 file_format; /* Schema format version for this file */ - u8 enc; /* Text encoding used by this database */ - u16 flags; /* Flags associated with this schema */ - int cache_size; /* Number of pages to use in the cache */ -}; - -/* -** These macros can be used to test, set, or clear bits in the -** Db.pSchema->flags field. -*/ -#define DbHasProperty(D,I,P) (((D)->aDb[I].pSchema->flags&(P))==(P)) -#define DbHasAnyProperty(D,I,P) (((D)->aDb[I].pSchema->flags&(P))!=0) -#define DbSetProperty(D,I,P) (D)->aDb[I].pSchema->flags|=(P) -#define DbClearProperty(D,I,P) (D)->aDb[I].pSchema->flags&=~(P) - -/* -** Allowed values for the DB.pSchema->flags field. -** -** The DB_SchemaLoaded flag is set after the database schema has been -** read into internal hash tables. -** -** DB_UnresetViews means that one or more views have column names that -** have been filled out. If the schema changes, these column names might -** changes and so the view will need to be reset. -*/ -#define DB_SchemaLoaded 0x0001 /* The schema has been loaded */ -#define DB_UnresetViews 0x0002 /* Some views have defined column names */ -#define DB_Empty 0x0004 /* The file is empty (length 0 bytes) */ - -/* -** The number of different kinds of things that can be limited -** using the sqlite3_limit() interface. -*/ -#define SQLITE_N_LIMIT (SQLITE_LIMIT_TRIGGER_DEPTH+1) - -/* -** Lookaside malloc is a set of fixed-size buffers that can be used -** to satisfy small transient memory allocation requests for objects -** associated with a particular database connection. The use of -** lookaside malloc provides a significant performance enhancement -** (approx 10%) by avoiding numerous malloc/free requests while parsing -** SQL statements. -** -** The Lookaside structure holds configuration information about the -** lookaside malloc subsystem. Each available memory allocation in -** the lookaside subsystem is stored on a linked list of LookasideSlot -** objects. -** -** Lookaside allocations are only allowed for objects that are associated -** with a particular database connection. Hence, schema information cannot -** be stored in lookaside because in shared cache mode the schema information -** is shared by multiple database connections. Therefore, while parsing -** schema information, the Lookaside.bEnabled flag is cleared so that -** lookaside allocations are not used to construct the schema objects. -*/ -struct Lookaside { - u16 sz; /* Size of each buffer in bytes */ - u8 bEnabled; /* False to disable new lookaside allocations */ - u8 bMalloced; /* True if pStart obtained from sqlite3_malloc() */ - int nOut; /* Number of buffers currently checked out */ - int mxOut; /* Highwater mark for nOut */ - int anStat[3]; /* 0: hits. 1: size misses. 2: full misses */ - LookasideSlot *pFree; /* List of available buffers */ - void *pStart; /* First byte of available memory space */ - void *pEnd; /* First byte past end of available space */ -}; -struct LookasideSlot { - LookasideSlot *pNext; /* Next buffer in the list of free buffers */ -}; - -/* -** A hash table for function definitions. -** -** Hash each FuncDef structure into one of the FuncDefHash.a[] slots. -** Collisions are on the FuncDef.pHash chain. -*/ -struct FuncDefHash { - FuncDef *a[23]; /* Hash table for functions */ -}; - -/* -** Each database connection is an instance of the following structure. -*/ -struct sqlite3 { - sqlite3_vfs *pVfs; /* OS Interface */ - struct Vdbe *pVdbe; /* List of active virtual machines */ - CollSeq *pDfltColl; /* The default collating sequence (BINARY) */ - sqlite3_mutex *mutex; /* Connection mutex */ - Db *aDb; /* All backends */ - int nDb; /* Number of backends currently in use */ - int flags; /* Miscellaneous flags. See below */ - i64 lastRowid; /* ROWID of most recent insert (see above) */ - i64 szMmap; /* Default mmap_size setting */ - unsigned int openFlags; /* Flags passed to sqlite3_vfs.xOpen() */ - int errCode; /* Most recent error code (SQLITE_*) */ - int errMask; /* & result codes with this before returning */ - u16 dbOptFlags; /* Flags to enable/disable optimizations */ - u8 autoCommit; /* The auto-commit flag. */ - u8 temp_store; /* 1: file 2: memory 0: default */ - u8 mallocFailed; /* True if we have seen a malloc failure */ - u8 dfltLockMode; /* Default locking-mode for attached dbs */ - signed char nextAutovac; /* Autovac setting after VACUUM if >=0 */ - u8 suppressErr; /* Do not issue error messages if true */ - u8 vtabOnConflict; /* Value to return for s3_vtab_on_conflict() */ - u8 isTransactionSavepoint; /* True if the outermost savepoint is a TS */ - int nextPagesize; /* Pagesize after VACUUM if >0 */ - u32 magic; /* Magic number for detect library misuse */ - int nChange; /* Value returned by sqlite3_changes() */ - int nTotalChange; /* Value returned by sqlite3_total_changes() */ - int aLimit[SQLITE_N_LIMIT]; /* Limits */ - struct sqlite3InitInfo { /* Information used during initialization */ - int newTnum; /* Rootpage of table being initialized */ - u8 iDb; /* Which db file is being initialized */ - u8 busy; /* TRUE if currently initializing */ - u8 orphanTrigger; /* Last statement is orphaned TEMP trigger */ - } init; - int nVdbeActive; /* Number of VDBEs currently running */ - int nVdbeRead; /* Number of active VDBEs that read or write */ - int nVdbeWrite; /* Number of active VDBEs that read and write */ - int nVdbeExec; /* Number of nested calls to VdbeExec() */ - int nExtension; /* Number of loaded extensions */ - void **aExtension; /* Array of shared library handles */ - void (*xTrace)(void*,const char*); /* Trace function */ - void *pTraceArg; /* Argument to the trace function */ - void (*xProfile)(void*,const char*,u64); /* Profiling function */ - void *pProfileArg; /* Argument to profile function */ - void *pCommitArg; /* Argument to xCommitCallback() */ - int (*xCommitCallback)(void*); /* Invoked at every commit. */ - void *pRollbackArg; /* Argument to xRollbackCallback() */ - void (*xRollbackCallback)(void*); /* Invoked at every commit. */ - void *pUpdateArg; - void (*xUpdateCallback)(void*,int, const char*,const char*,sqlite_int64); -#ifndef SQLITE_OMIT_WAL - int (*xWalCallback)(void *, sqlite3 *, const char *, int); - void *pWalArg; -#endif - void(*xCollNeeded)(void*,sqlite3*,int eTextRep,const char*); - void(*xCollNeeded16)(void*,sqlite3*,int eTextRep,const void*); - void *pCollNeededArg; - sqlite3_value *pErr; /* Most recent error message */ - union { - volatile int isInterrupted; /* True if sqlite3_interrupt has been called */ - double notUsed1; /* Spacer */ - } u1; - Lookaside lookaside; /* Lookaside malloc configuration */ -#ifndef SQLITE_OMIT_AUTHORIZATION - int (*xAuth)(void*,int,const char*,const char*,const char*,const char*); - /* Access authorization function */ - void *pAuthArg; /* 1st argument to the access auth function */ -#endif -#ifndef SQLITE_OMIT_PROGRESS_CALLBACK - int (*xProgress)(void *); /* The progress callback */ - void *pProgressArg; /* Argument to the progress callback */ - unsigned nProgressOps; /* Number of opcodes for progress callback */ -#endif -#ifndef SQLITE_OMIT_VIRTUALTABLE - int nVTrans; /* Allocated size of aVTrans */ - Hash aModule; /* populated by sqlite3_create_module() */ - VtabCtx *pVtabCtx; /* Context for active vtab connect/create */ - VTable **aVTrans; /* Virtual tables with open transactions */ - VTable *pDisconnect; /* Disconnect these in next sqlite3_prepare() */ -#endif - FuncDefHash aFunc; /* Hash table of connection functions */ - Hash aCollSeq; /* All collating sequences */ - BusyHandler busyHandler; /* Busy callback */ - Db aDbStatic[2]; /* Static space for the 2 default backends */ - Savepoint *pSavepoint; /* List of active savepoints */ - int busyTimeout; /* Busy handler timeout, in msec */ - int nSavepoint; /* Number of non-transaction savepoints */ - int nStatement; /* Number of nested statement-transactions */ - i64 nDeferredCons; /* Net deferred constraints this transaction. */ - i64 nDeferredImmCons; /* Net deferred immediate constraints */ - int *pnBytesFreed; /* If not NULL, increment this in DbFree() */ - -#ifdef SQLITE_ENABLE_UNLOCK_NOTIFY - /* The following variables are all protected by the STATIC_MASTER - ** mutex, not by sqlite3.mutex. They are used by code in notify.c. - ** - ** When X.pUnlockConnection==Y, that means that X is waiting for Y to - ** unlock so that it can proceed. - ** - ** When X.pBlockingConnection==Y, that means that something that X tried - ** tried to do recently failed with an SQLITE_LOCKED error due to locks - ** held by Y. - */ - sqlite3 *pBlockingConnection; /* Connection that caused SQLITE_LOCKED */ - sqlite3 *pUnlockConnection; /* Connection to watch for unlock */ - void *pUnlockArg; /* Argument to xUnlockNotify */ - void (*xUnlockNotify)(void **, int); /* Unlock notify callback */ - sqlite3 *pNextBlocked; /* Next in list of all blocked connections */ -#endif -}; - -/* -** A macro to discover the encoding of a database. -*/ -#define ENC(db) ((db)->aDb[0].pSchema->enc) - -/* -** Possible values for the sqlite3.flags. -*/ -#define SQLITE_VdbeTrace 0x00000001 /* True to trace VDBE execution */ -#define SQLITE_InternChanges 0x00000002 /* Uncommitted Hash table changes */ -#define SQLITE_FullFSync 0x00000004 /* Use full fsync on the backend */ -#define SQLITE_CkptFullFSync 0x00000008 /* Use full fsync for checkpoint */ -#define SQLITE_CacheSpill 0x00000010 /* OK to spill pager cache */ -#define SQLITE_FullColNames 0x00000020 /* Show full column names on SELECT */ -#define SQLITE_ShortColNames 0x00000040 /* Show short columns names */ -#define SQLITE_CountRows 0x00000080 /* Count rows changed by INSERT, */ - /* DELETE, or UPDATE and return */ - /* the count using a callback. */ -#define SQLITE_NullCallback 0x00000100 /* Invoke the callback once if the */ - /* result set is empty */ -#define SQLITE_SqlTrace 0x00000200 /* Debug print SQL as it executes */ -#define SQLITE_VdbeListing 0x00000400 /* Debug listings of VDBE programs */ -#define SQLITE_WriteSchema 0x00000800 /* OK to update SQLITE_MASTER */ -#define SQLITE_VdbeAddopTrace 0x00001000 /* Trace sqlite3VdbeAddOp() calls */ -#define SQLITE_IgnoreChecks 0x00002000 /* Do not enforce check constraints */ -#define SQLITE_ReadUncommitted 0x0004000 /* For shared-cache mode */ -#define SQLITE_LegacyFileFmt 0x00008000 /* Create new databases in format 1 */ -#define SQLITE_RecoveryMode 0x00010000 /* Ignore schema errors */ -#define SQLITE_ReverseOrder 0x00020000 /* Reverse unordered SELECTs */ -#define SQLITE_RecTriggers 0x00040000 /* Enable recursive triggers */ -#define SQLITE_ForeignKeys 0x00080000 /* Enforce foreign key constraints */ -#define SQLITE_AutoIndex 0x00100000 /* Enable automatic indexes */ -#define SQLITE_PreferBuiltin 0x00200000 /* Preference to built-in funcs */ -#define SQLITE_LoadExtension 0x00400000 /* Enable load_extension */ -#define SQLITE_EnableTrigger 0x00800000 /* True to enable triggers */ -#define SQLITE_DeferFKs 0x01000000 /* Defer all FK constraints */ -#define SQLITE_QueryOnly 0x02000000 /* Disable database changes */ -#define SQLITE_VdbeEQP 0x04000000 /* Debug EXPLAIN QUERY PLAN */ - - -/* -** Bits of the sqlite3.dbOptFlags field that are used by the -** sqlite3_test_control(SQLITE_TESTCTRL_OPTIMIZATIONS,...) interface to -** selectively disable various optimizations. -*/ -#define SQLITE_QueryFlattener 0x0001 /* Query flattening */ -#define SQLITE_ColumnCache 0x0002 /* Column cache */ -#define SQLITE_GroupByOrder 0x0004 /* GROUPBY cover of ORDERBY */ -#define SQLITE_FactorOutConst 0x0008 /* Constant factoring */ -/* not used 0x0010 // Was: SQLITE_IdxRealAsInt */ -#define SQLITE_DistinctOpt 0x0020 /* DISTINCT using indexes */ -#define SQLITE_CoverIdxScan 0x0040 /* Covering index scans */ -#define SQLITE_OrderByIdxJoin 0x0080 /* ORDER BY of joins via index */ -#define SQLITE_SubqCoroutine 0x0100 /* Evaluate subqueries as coroutines */ -#define SQLITE_Transitive 0x0200 /* Transitive constraints */ -#define SQLITE_OmitNoopJoin 0x0400 /* Omit unused tables in joins */ -#define SQLITE_Stat3 0x0800 /* Use the SQLITE_STAT3 table */ -#define SQLITE_AdjustOutEst 0x1000 /* Adjust output estimates using WHERE */ -#define SQLITE_AllOpts 0xffff /* All optimizations */ - -/* -** Macros for testing whether or not optimizations are enabled or disabled. -*/ -#ifndef SQLITE_OMIT_BUILTIN_TEST -#define OptimizationDisabled(db, mask) (((db)->dbOptFlags&(mask))!=0) -#define OptimizationEnabled(db, mask) (((db)->dbOptFlags&(mask))==0) -#else -#define OptimizationDisabled(db, mask) 0 -#define OptimizationEnabled(db, mask) 1 -#endif - -/* -** Return true if it OK to factor constant expressions into the initialization -** code. The argument is a Parse object for the code generator. -*/ -#define ConstFactorOk(P) ((P)->okConstFactor) - -/* -** Possible values for the sqlite.magic field. -** The numbers are obtained at random and have no special meaning, other -** than being distinct from one another. -*/ -#define SQLITE_MAGIC_OPEN 0xa029a697 /* Database is open */ -#define SQLITE_MAGIC_CLOSED 0x9f3c2d33 /* Database is closed */ -#define SQLITE_MAGIC_SICK 0x4b771290 /* Error and awaiting close */ -#define SQLITE_MAGIC_BUSY 0xf03b7906 /* Database currently in use */ -#define SQLITE_MAGIC_ERROR 0xb5357930 /* An SQLITE_MISUSE error occurred */ -#define SQLITE_MAGIC_ZOMBIE 0x64cffc7f /* Close with last statement close */ - -/* -** Each SQL function is defined by an instance of the following -** structure. A pointer to this structure is stored in the sqlite.aFunc -** hash table. When multiple functions have the same name, the hash table -** points to a linked list of these structures. -*/ -struct FuncDef { - i16 nArg; /* Number of arguments. -1 means unlimited */ - u16 funcFlags; /* Some combination of SQLITE_FUNC_* */ - void *pUserData; /* User data parameter */ - FuncDef *pNext; /* Next function with same name */ - void (*xFunc)(sqlite3_context*,int,sqlite3_value**); /* Regular function */ - void (*xStep)(sqlite3_context*,int,sqlite3_value**); /* Aggregate step */ - void (*xFinalize)(sqlite3_context*); /* Aggregate finalizer */ - char *zName; /* SQL name of the function. */ - FuncDef *pHash; /* Next with a different name but the same hash */ - FuncDestructor *pDestructor; /* Reference counted destructor function */ -}; - -/* -** This structure encapsulates a user-function destructor callback (as -** configured using create_function_v2()) and a reference counter. When -** create_function_v2() is called to create a function with a destructor, -** a single object of this type is allocated. FuncDestructor.nRef is set to -** the number of FuncDef objects created (either 1 or 3, depending on whether -** or not the specified encoding is SQLITE_ANY). The FuncDef.pDestructor -** member of each of the new FuncDef objects is set to point to the allocated -** FuncDestructor. -** -** Thereafter, when one of the FuncDef objects is deleted, the reference -** count on this object is decremented. When it reaches 0, the destructor -** is invoked and the FuncDestructor structure freed. -*/ -struct FuncDestructor { - int nRef; - void (*xDestroy)(void *); - void *pUserData; -}; - -/* -** Possible values for FuncDef.flags. Note that the _LENGTH and _TYPEOF -** values must correspond to OPFLAG_LENGTHARG and OPFLAG_TYPEOFARG. There -** are assert() statements in the code to verify this. -*/ -#define SQLITE_FUNC_ENCMASK 0x003 /* SQLITE_UTF8, SQLITE_UTF16BE or UTF16LE */ -#define SQLITE_FUNC_LIKE 0x004 /* Candidate for the LIKE optimization */ -#define SQLITE_FUNC_CASE 0x008 /* Case-sensitive LIKE-type function */ -#define SQLITE_FUNC_EPHEM 0x010 /* Ephemeral. Delete with VDBE */ -#define SQLITE_FUNC_NEEDCOLL 0x020 /* sqlite3GetFuncCollSeq() might be called */ -#define SQLITE_FUNC_LENGTH 0x040 /* Built-in length() function */ -#define SQLITE_FUNC_TYPEOF 0x080 /* Built-in typeof() function */ -#define SQLITE_FUNC_COUNT 0x100 /* Built-in count(*) aggregate */ -#define SQLITE_FUNC_COALESCE 0x200 /* Built-in coalesce() or ifnull() */ -#define SQLITE_FUNC_UNLIKELY 0x400 /* Built-in unlikely() function */ -#define SQLITE_FUNC_CONSTANT 0x800 /* Constant inputs give a constant output */ - -/* -** The following three macros, FUNCTION(), LIKEFUNC() and AGGREGATE() are -** used to create the initializers for the FuncDef structures. -** -** FUNCTION(zName, nArg, iArg, bNC, xFunc) -** Used to create a scalar function definition of a function zName -** implemented by C function xFunc that accepts nArg arguments. The -** value passed as iArg is cast to a (void*) and made available -** as the user-data (sqlite3_user_data()) for the function. If -** argument bNC is true, then the SQLITE_FUNC_NEEDCOLL flag is set. -** -** VFUNCTION(zName, nArg, iArg, bNC, xFunc) -** Like FUNCTION except it omits the SQLITE_FUNC_CONSTANT flag. -** -** AGGREGATE(zName, nArg, iArg, bNC, xStep, xFinal) -** Used to create an aggregate function definition implemented by -** the C functions xStep and xFinal. The first four parameters -** are interpreted in the same way as the first 4 parameters to -** FUNCTION(). -** -** LIKEFUNC(zName, nArg, pArg, flags) -** Used to create a scalar function definition of a function zName -** that accepts nArg arguments and is implemented by a call to C -** function likeFunc. Argument pArg is cast to a (void *) and made -** available as the function user-data (sqlite3_user_data()). The -** FuncDef.flags variable is set to the value passed as the flags -** parameter. -*/ -#define FUNCTION(zName, nArg, iArg, bNC, xFunc) \ - {nArg, SQLITE_FUNC_CONSTANT|SQLITE_UTF8|(bNC*SQLITE_FUNC_NEEDCOLL), \ - SQLITE_INT_TO_PTR(iArg), 0, xFunc, 0, 0, #zName, 0, 0} -#define VFUNCTION(zName, nArg, iArg, bNC, xFunc) \ - {nArg, SQLITE_UTF8|(bNC*SQLITE_FUNC_NEEDCOLL), \ - SQLITE_INT_TO_PTR(iArg), 0, xFunc, 0, 0, #zName, 0, 0} -#define FUNCTION2(zName, nArg, iArg, bNC, xFunc, extraFlags) \ - {nArg,SQLITE_FUNC_CONSTANT|SQLITE_UTF8|(bNC*SQLITE_FUNC_NEEDCOLL)|extraFlags,\ - SQLITE_INT_TO_PTR(iArg), 0, xFunc, 0, 0, #zName, 0, 0} -#define STR_FUNCTION(zName, nArg, pArg, bNC, xFunc) \ - {nArg, SQLITE_FUNC_CONSTANT|SQLITE_UTF8|(bNC*SQLITE_FUNC_NEEDCOLL), \ - pArg, 0, xFunc, 0, 0, #zName, 0, 0} -#define LIKEFUNC(zName, nArg, arg, flags) \ - {nArg, SQLITE_FUNC_CONSTANT|SQLITE_UTF8|flags, \ - (void *)arg, 0, likeFunc, 0, 0, #zName, 0, 0} -#define AGGREGATE(zName, nArg, arg, nc, xStep, xFinal) \ - {nArg, SQLITE_UTF8|(nc*SQLITE_FUNC_NEEDCOLL), \ - SQLITE_INT_TO_PTR(arg), 0, 0, xStep,xFinal,#zName,0,0} - -/* -** All current savepoints are stored in a linked list starting at -** sqlite3.pSavepoint. The first element in the list is the most recently -** opened savepoint. Savepoints are added to the list by the vdbe -** OP_Savepoint instruction. -*/ -struct Savepoint { - char *zName; /* Savepoint name (nul-terminated) */ - i64 nDeferredCons; /* Number of deferred fk violations */ - i64 nDeferredImmCons; /* Number of deferred imm fk. */ - Savepoint *pNext; /* Parent savepoint (if any) */ -}; - -/* -** The following are used as the second parameter to sqlite3Savepoint(), -** and as the P1 argument to the OP_Savepoint instruction. -*/ -#define SAVEPOINT_BEGIN 0 -#define SAVEPOINT_RELEASE 1 -#define SAVEPOINT_ROLLBACK 2 - - -/* -** Each SQLite module (virtual table definition) is defined by an -** instance of the following structure, stored in the sqlite3.aModule -** hash table. -*/ -struct Module { - const sqlite3_module *pModule; /* Callback pointers */ - const char *zName; /* Name passed to create_module() */ - void *pAux; /* pAux passed to create_module() */ - void (*xDestroy)(void *); /* Module destructor function */ -}; - -/* -** information about each column of an SQL table is held in an instance -** of this structure. -*/ -struct Column { - char *zName; /* Name of this column */ - Expr *pDflt; /* Default value of this column */ - char *zDflt; /* Original text of the default value */ - char *zType; /* Data type for this column */ - char *zColl; /* Collating sequence. If NULL, use the default */ - u8 notNull; /* An OE_ code for handling a NOT NULL constraint */ - char affinity; /* One of the SQLITE_AFF_... values */ - u8 szEst; /* Estimated size of this column. INT==1 */ - u8 colFlags; /* Boolean properties. See COLFLAG_ defines below */ -}; - -/* Allowed values for Column.colFlags: -*/ -#define COLFLAG_PRIMKEY 0x0001 /* Column is part of the primary key */ -#define COLFLAG_HIDDEN 0x0002 /* A hidden column in a virtual table */ - -/* -** A "Collating Sequence" is defined by an instance of the following -** structure. Conceptually, a collating sequence consists of a name and -** a comparison routine that defines the order of that sequence. -** -** If CollSeq.xCmp is NULL, it means that the -** collating sequence is undefined. Indices built on an undefined -** collating sequence may not be read or written. -*/ -struct CollSeq { - char *zName; /* Name of the collating sequence, UTF-8 encoded */ - u8 enc; /* Text encoding handled by xCmp() */ - void *pUser; /* First argument to xCmp() */ - int (*xCmp)(void*,int, const void*, int, const void*); - void (*xDel)(void*); /* Destructor for pUser */ -}; - -/* -** A sort order can be either ASC or DESC. -*/ -#define SQLITE_SO_ASC 0 /* Sort in ascending order */ -#define SQLITE_SO_DESC 1 /* Sort in ascending order */ - -/* -** Column affinity types. -** -** These used to have mnemonic name like 'i' for SQLITE_AFF_INTEGER and -** 't' for SQLITE_AFF_TEXT. But we can save a little space and improve -** the speed a little by numbering the values consecutively. -** -** But rather than start with 0 or 1, we begin with 'a'. That way, -** when multiple affinity types are concatenated into a string and -** used as the P4 operand, they will be more readable. -** -** Note also that the numeric types are grouped together so that testing -** for a numeric type is a single comparison. -*/ -#define SQLITE_AFF_TEXT 'a' -#define SQLITE_AFF_NONE 'b' -#define SQLITE_AFF_NUMERIC 'c' -#define SQLITE_AFF_INTEGER 'd' -#define SQLITE_AFF_REAL 'e' - -#define sqlite3IsNumericAffinity(X) ((X)>=SQLITE_AFF_NUMERIC) - -/* -** The SQLITE_AFF_MASK values masks off the significant bits of an -** affinity value. -*/ -#define SQLITE_AFF_MASK 0x67 - -/* -** Additional bit values that can be ORed with an affinity without -** changing the affinity. -** -** The SQLITE_NOTNULL flag is a combination of NULLEQ and JUMPIFNULL. -** It causes an assert() to fire if either operand to a comparison -** operator is NULL. It is added to certain comparison operators to -** prove that the operands are always NOT NULL. -*/ -#define SQLITE_JUMPIFNULL 0x08 /* jumps if either operand is NULL */ -#define SQLITE_STOREP2 0x10 /* Store result in reg[P2] rather than jump */ -#define SQLITE_NULLEQ 0x80 /* NULL=NULL */ -#define SQLITE_NOTNULL 0x88 /* Assert that operands are never NULL */ - -/* -** An object of this type is created for each virtual table present in -** the database schema. -** -** If the database schema is shared, then there is one instance of this -** structure for each database connection (sqlite3*) that uses the shared -** schema. This is because each database connection requires its own unique -** instance of the sqlite3_vtab* handle used to access the virtual table -** implementation. sqlite3_vtab* handles can not be shared between -** database connections, even when the rest of the in-memory database -** schema is shared, as the implementation often stores the database -** connection handle passed to it via the xConnect() or xCreate() method -** during initialization internally. This database connection handle may -** then be used by the virtual table implementation to access real tables -** within the database. So that they appear as part of the callers -** transaction, these accesses need to be made via the same database -** connection as that used to execute SQL operations on the virtual table. -** -** All VTable objects that correspond to a single table in a shared -** database schema are initially stored in a linked-list pointed to by -** the Table.pVTable member variable of the corresponding Table object. -** When an sqlite3_prepare() operation is required to access the virtual -** table, it searches the list for the VTable that corresponds to the -** database connection doing the preparing so as to use the correct -** sqlite3_vtab* handle in the compiled query. -** -** When an in-memory Table object is deleted (for example when the -** schema is being reloaded for some reason), the VTable objects are not -** deleted and the sqlite3_vtab* handles are not xDisconnect()ed -** immediately. Instead, they are moved from the Table.pVTable list to -** another linked list headed by the sqlite3.pDisconnect member of the -** corresponding sqlite3 structure. They are then deleted/xDisconnected -** next time a statement is prepared using said sqlite3*. This is done -** to avoid deadlock issues involving multiple sqlite3.mutex mutexes. -** Refer to comments above function sqlite3VtabUnlockList() for an -** explanation as to why it is safe to add an entry to an sqlite3.pDisconnect -** list without holding the corresponding sqlite3.mutex mutex. -** -** The memory for objects of this type is always allocated by -** sqlite3DbMalloc(), using the connection handle stored in VTable.db as -** the first argument. -*/ -struct VTable { - sqlite3 *db; /* Database connection associated with this table */ - Module *pMod; /* Pointer to module implementation */ - sqlite3_vtab *pVtab; /* Pointer to vtab instance */ - int nRef; /* Number of pointers to this structure */ - u8 bConstraint; /* True if constraints are supported */ - int iSavepoint; /* Depth of the SAVEPOINT stack */ - VTable *pNext; /* Next in linked list (see above) */ -}; - -/* -** Each SQL table is represented in memory by an instance of the -** following structure. -** -** Table.zName is the name of the table. The case of the original -** CREATE TABLE statement is stored, but case is not significant for -** comparisons. -** -** Table.nCol is the number of columns in this table. Table.aCol is a -** pointer to an array of Column structures, one for each column. -** -** If the table has an INTEGER PRIMARY KEY, then Table.iPKey is the index of -** the column that is that key. Otherwise Table.iPKey is negative. Note -** that the datatype of the PRIMARY KEY must be INTEGER for this field to -** be set. An INTEGER PRIMARY KEY is used as the rowid for each row of -** the table. If a table has no INTEGER PRIMARY KEY, then a random rowid -** is generated for each row of the table. TF_HasPrimaryKey is set if -** the table has any PRIMARY KEY, INTEGER or otherwise. -** -** Table.tnum is the page number for the root BTree page of the table in the -** database file. If Table.iDb is the index of the database table backend -** in sqlite.aDb[]. 0 is for the main database and 1 is for the file that -** holds temporary tables and indices. If TF_Ephemeral is set -** then the table is stored in a file that is automatically deleted -** when the VDBE cursor to the table is closed. In this case Table.tnum -** refers VDBE cursor number that holds the table open, not to the root -** page number. Transient tables are used to hold the results of a -** sub-query that appears instead of a real table name in the FROM clause -** of a SELECT statement. -*/ -struct Table { - char *zName; /* Name of the table or view */ - Column *aCol; /* Information about each column */ - Index *pIndex; /* List of SQL indexes on this table. */ - Select *pSelect; /* NULL for tables. Points to definition if a view. */ - FKey *pFKey; /* Linked list of all foreign keys in this table */ - char *zColAff; /* String defining the affinity of each column */ -#ifndef SQLITE_OMIT_CHECK - ExprList *pCheck; /* All CHECK constraints */ -#endif - LogEst nRowLogEst; /* Estimated rows in table - from sqlite_stat1 table */ - int tnum; /* Root BTree node for this table (see note above) */ - i16 iPKey; /* If not negative, use aCol[iPKey] as the primary key */ - i16 nCol; /* Number of columns in this table */ - u16 nRef; /* Number of pointers to this Table */ - LogEst szTabRow; /* Estimated size of each table row in bytes */ - u8 tabFlags; /* Mask of TF_* values */ - u8 keyConf; /* What to do in case of uniqueness conflict on iPKey */ -#ifndef SQLITE_OMIT_ALTERTABLE - int addColOffset; /* Offset in CREATE TABLE stmt to add a new column */ -#endif -#ifndef SQLITE_OMIT_VIRTUALTABLE - int nModuleArg; /* Number of arguments to the module */ - char **azModuleArg; /* Text of all module args. [0] is module name */ - VTable *pVTable; /* List of VTable objects. */ -#endif - Trigger *pTrigger; /* List of triggers stored in pSchema */ - Schema *pSchema; /* Schema that contains this table */ - Table *pNextZombie; /* Next on the Parse.pZombieTab list */ -}; - -/* -** Allowed values for Table.tabFlags. -*/ -#define TF_Readonly 0x01 /* Read-only system table */ -#define TF_Ephemeral 0x02 /* An ephemeral table */ -#define TF_HasPrimaryKey 0x04 /* Table has a primary key */ -#define TF_Autoincrement 0x08 /* Integer primary key is autoincrement */ -#define TF_Virtual 0x10 /* Is a virtual table */ -#define TF_WithoutRowid 0x20 /* No rowid used. PRIMARY KEY is the key */ - - -/* -** Test to see whether or not a table is a virtual table. This is -** done as a macro so that it will be optimized out when virtual -** table support is omitted from the build. -*/ -#ifndef SQLITE_OMIT_VIRTUALTABLE -# define IsVirtual(X) (((X)->tabFlags & TF_Virtual)!=0) -# define IsHiddenColumn(X) (((X)->colFlags & COLFLAG_HIDDEN)!=0) -#else -# define IsVirtual(X) 0 -# define IsHiddenColumn(X) 0 -#endif - -/* Does the table have a rowid */ -#define HasRowid(X) (((X)->tabFlags & TF_WithoutRowid)==0) - -/* -** Each foreign key constraint is an instance of the following structure. -** -** A foreign key is associated with two tables. The "from" table is -** the table that contains the REFERENCES clause that creates the foreign -** key. The "to" table is the table that is named in the REFERENCES clause. -** Consider this example: -** -** CREATE TABLE ex1( -** a INTEGER PRIMARY KEY, -** b INTEGER CONSTRAINT fk1 REFERENCES ex2(x) -** ); -** -** For foreign key "fk1", the from-table is "ex1" and the to-table is "ex2". -** Equivalent names: -** -** from-table == child-table -** to-table == parent-table -** -** Each REFERENCES clause generates an instance of the following structure -** which is attached to the from-table. The to-table need not exist when -** the from-table is created. The existence of the to-table is not checked. -** -** The list of all parents for child Table X is held at X.pFKey. -** -** A list of all children for a table named Z (which might not even exist) -** is held in Schema.fkeyHash with a hash key of Z. -*/ -struct FKey { - Table *pFrom; /* Table containing the REFERENCES clause (aka: Child) */ - FKey *pNextFrom; /* Next FKey with the same in pFrom. Next parent of pFrom */ - char *zTo; /* Name of table that the key points to (aka: Parent) */ - FKey *pNextTo; /* Next with the same zTo. Next child of zTo. */ - FKey *pPrevTo; /* Previous with the same zTo */ - int nCol; /* Number of columns in this key */ - /* EV: R-30323-21917 */ - u8 isDeferred; /* True if constraint checking is deferred till COMMIT */ - u8 aAction[2]; /* ON DELETE and ON UPDATE actions, respectively */ - Trigger *apTrigger[2];/* Triggers for aAction[] actions */ - struct sColMap { /* Mapping of columns in pFrom to columns in zTo */ - int iFrom; /* Index of column in pFrom */ - char *zCol; /* Name of column in zTo. If NULL use PRIMARY KEY */ - } aCol[1]; /* One entry for each of nCol columns */ -}; - -/* -** SQLite supports many different ways to resolve a constraint -** error. ROLLBACK processing means that a constraint violation -** causes the operation in process to fail and for the current transaction -** to be rolled back. ABORT processing means the operation in process -** fails and any prior changes from that one operation are backed out, -** but the transaction is not rolled back. FAIL processing means that -** the operation in progress stops and returns an error code. But prior -** changes due to the same operation are not backed out and no rollback -** occurs. IGNORE means that the particular row that caused the constraint -** error is not inserted or updated. Processing continues and no error -** is returned. REPLACE means that preexisting database rows that caused -** a UNIQUE constraint violation are removed so that the new insert or -** update can proceed. Processing continues and no error is reported. -** -** RESTRICT, SETNULL, and CASCADE actions apply only to foreign keys. -** RESTRICT is the same as ABORT for IMMEDIATE foreign keys and the -** same as ROLLBACK for DEFERRED keys. SETNULL means that the foreign -** key is set to NULL. CASCADE means that a DELETE or UPDATE of the -** referenced table row is propagated into the row that holds the -** foreign key. -** -** The following symbolic values are used to record which type -** of action to take. -*/ -#define OE_None 0 /* There is no constraint to check */ -#define OE_Rollback 1 /* Fail the operation and rollback the transaction */ -#define OE_Abort 2 /* Back out changes but do no rollback transaction */ -#define OE_Fail 3 /* Stop the operation but leave all prior changes */ -#define OE_Ignore 4 /* Ignore the error. Do not do the INSERT or UPDATE */ -#define OE_Replace 5 /* Delete existing record, then do INSERT or UPDATE */ - -#define OE_Restrict 6 /* OE_Abort for IMMEDIATE, OE_Rollback for DEFERRED */ -#define OE_SetNull 7 /* Set the foreign key value to NULL */ -#define OE_SetDflt 8 /* Set the foreign key value to its default */ -#define OE_Cascade 9 /* Cascade the changes */ - -#define OE_Default 10 /* Do whatever the default action is */ - - -/* -** An instance of the following structure is passed as the first -** argument to sqlite3VdbeKeyCompare and is used to control the -** comparison of the two index keys. -** -** Note that aSortOrder[] and aColl[] have nField+1 slots. There -** are nField slots for the columns of an index then one extra slot -** for the rowid at the end. -*/ -struct KeyInfo { - u32 nRef; /* Number of references to this KeyInfo object */ - u8 enc; /* Text encoding - one of the SQLITE_UTF* values */ - u16 nField; /* Number of key columns in the index */ - u16 nXField; /* Number of columns beyond the key columns */ - sqlite3 *db; /* The database connection */ - u8 *aSortOrder; /* Sort order for each column. */ - CollSeq *aColl[1]; /* Collating sequence for each term of the key */ -}; - -/* -** An instance of the following structure holds information about a -** single index record that has already been parsed out into individual -** values. -** -** A record is an object that contains one or more fields of data. -** Records are used to store the content of a table row and to store -** the key of an index. A blob encoding of a record is created by -** the OP_MakeRecord opcode of the VDBE and is disassembled by the -** OP_Column opcode. -** -** This structure holds a record that has already been disassembled -** into its constituent fields. -** -** The r1 and r2 member variables are only used by the optimized comparison -** functions vdbeRecordCompareInt() and vdbeRecordCompareString(). -*/ -struct UnpackedRecord { - KeyInfo *pKeyInfo; /* Collation and sort-order information */ - u16 nField; /* Number of entries in apMem[] */ - i8 default_rc; /* Comparison result if keys are equal */ - u8 isCorrupt; /* Corruption detected by xRecordCompare() */ - Mem *aMem; /* Values */ - int r1; /* Value to return if (lhs > rhs) */ - int r2; /* Value to return if (rhs < lhs) */ -}; - - -/* -** Each SQL index is represented in memory by an -** instance of the following structure. -** -** The columns of the table that are to be indexed are described -** by the aiColumn[] field of this structure. For example, suppose -** we have the following table and index: -** -** CREATE TABLE Ex1(c1 int, c2 int, c3 text); -** CREATE INDEX Ex2 ON Ex1(c3,c1); -** -** In the Table structure describing Ex1, nCol==3 because there are -** three columns in the table. In the Index structure describing -** Ex2, nColumn==2 since 2 of the 3 columns of Ex1 are indexed. -** The value of aiColumn is {2, 0}. aiColumn[0]==2 because the -** first column to be indexed (c3) has an index of 2 in Ex1.aCol[]. -** The second column to be indexed (c1) has an index of 0 in -** Ex1.aCol[], hence Ex2.aiColumn[1]==0. -** -** The Index.onError field determines whether or not the indexed columns -** must be unique and what to do if they are not. When Index.onError=OE_None, -** it means this is not a unique index. Otherwise it is a unique index -** and the value of Index.onError indicate the which conflict resolution -** algorithm to employ whenever an attempt is made to insert a non-unique -** element. -*/ -struct Index { - char *zName; /* Name of this index */ - i16 *aiColumn; /* Which columns are used by this index. 1st is 0 */ - LogEst *aiRowLogEst; /* From ANALYZE: Est. rows selected by each column */ - Table *pTable; /* The SQL table being indexed */ - char *zColAff; /* String defining the affinity of each column */ - Index *pNext; /* The next index associated with the same table */ - Schema *pSchema; /* Schema containing this index */ - u8 *aSortOrder; /* for each column: True==DESC, False==ASC */ - char **azColl; /* Array of collation sequence names for index */ - Expr *pPartIdxWhere; /* WHERE clause for partial indices */ - KeyInfo *pKeyInfo; /* A KeyInfo object suitable for this index */ - int tnum; /* DB Page containing root of this index */ - LogEst szIdxRow; /* Estimated average row size in bytes */ - u16 nKeyCol; /* Number of columns forming the key */ - u16 nColumn; /* Number of columns stored in the index */ - u8 onError; /* OE_Abort, OE_Ignore, OE_Replace, or OE_None */ - unsigned idxType:2; /* 1==UNIQUE, 2==PRIMARY KEY, 0==CREATE INDEX */ - unsigned bUnordered:1; /* Use this index for == or IN queries only */ - unsigned uniqNotNull:1; /* True if UNIQUE and NOT NULL for all columns */ - unsigned isResized:1; /* True if resizeIndexObject() has been called */ - unsigned isCovering:1; /* True if this is a covering index */ -#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 - int nSample; /* Number of elements in aSample[] */ - int nSampleCol; /* Size of IndexSample.anEq[] and so on */ - tRowcnt *aAvgEq; /* Average nEq values for keys not in aSample */ - IndexSample *aSample; /* Samples of the left-most key */ -#endif -}; - -/* -** Allowed values for Index.idxType -*/ -#define SQLITE_IDXTYPE_APPDEF 0 /* Created using CREATE INDEX */ -#define SQLITE_IDXTYPE_UNIQUE 1 /* Implements a UNIQUE constraint */ -#define SQLITE_IDXTYPE_PRIMARYKEY 2 /* Is the PRIMARY KEY for the table */ - -/* Return true if index X is a PRIMARY KEY index */ -#define IsPrimaryKeyIndex(X) ((X)->idxType==SQLITE_IDXTYPE_PRIMARYKEY) - -/* -** Each sample stored in the sqlite_stat3 table is represented in memory -** using a structure of this type. See documentation at the top of the -** analyze.c source file for additional information. -*/ -struct IndexSample { - void *p; /* Pointer to sampled record */ - int n; /* Size of record in bytes */ - tRowcnt *anEq; /* Est. number of rows where the key equals this sample */ - tRowcnt *anLt; /* Est. number of rows where key is less than this sample */ - tRowcnt *anDLt; /* Est. number of distinct keys less than this sample */ -}; - -/* -** Each token coming out of the lexer is an instance of -** this structure. Tokens are also used as part of an expression. -** -** Note if Token.z==0 then Token.dyn and Token.n are undefined and -** may contain random values. Do not make any assumptions about Token.dyn -** and Token.n when Token.z==0. -*/ -struct Token { - const char *z; /* Text of the token. Not NULL-terminated! */ - unsigned int n; /* Number of characters in this token */ -}; - -/* -** An instance of this structure contains information needed to generate -** code for a SELECT that contains aggregate functions. -** -** If Expr.op==TK_AGG_COLUMN or TK_AGG_FUNCTION then Expr.pAggInfo is a -** pointer to this structure. The Expr.iColumn field is the index in -** AggInfo.aCol[] or AggInfo.aFunc[] of information needed to generate -** code for that node. -** -** AggInfo.pGroupBy and AggInfo.aFunc.pExpr point to fields within the -** original Select structure that describes the SELECT statement. These -** fields do not need to be freed when deallocating the AggInfo structure. -*/ -struct AggInfo { - u8 directMode; /* Direct rendering mode means take data directly - ** from source tables rather than from accumulators */ - u8 useSortingIdx; /* In direct mode, reference the sorting index rather - ** than the source table */ - int sortingIdx; /* Cursor number of the sorting index */ - int sortingIdxPTab; /* Cursor number of pseudo-table */ - int nSortingColumn; /* Number of columns in the sorting index */ - int mnReg, mxReg; /* Range of registers allocated for aCol and aFunc */ - ExprList *pGroupBy; /* The group by clause */ - struct AggInfo_col { /* For each column used in source tables */ - Table *pTab; /* Source table */ - int iTable; /* Cursor number of the source table */ - int iColumn; /* Column number within the source table */ - int iSorterColumn; /* Column number in the sorting index */ - int iMem; /* Memory location that acts as accumulator */ - Expr *pExpr; /* The original expression */ - } *aCol; - int nColumn; /* Number of used entries in aCol[] */ - int nAccumulator; /* Number of columns that show through to the output. - ** Additional columns are used only as parameters to - ** aggregate functions */ - struct AggInfo_func { /* For each aggregate function */ - Expr *pExpr; /* Expression encoding the function */ - FuncDef *pFunc; /* The aggregate function implementation */ - int iMem; /* Memory location that acts as accumulator */ - int iDistinct; /* Ephemeral table used to enforce DISTINCT */ - } *aFunc; - int nFunc; /* Number of entries in aFunc[] */ -}; - -/* -** The datatype ynVar is a signed integer, either 16-bit or 32-bit. -** Usually it is 16-bits. But if SQLITE_MAX_VARIABLE_NUMBER is greater -** than 32767 we have to make it 32-bit. 16-bit is preferred because -** it uses less memory in the Expr object, which is a big memory user -** in systems with lots of prepared statements. And few applications -** need more than about 10 or 20 variables. But some extreme users want -** to have prepared statements with over 32767 variables, and for them -** the option is available (at compile-time). -*/ -#if SQLITE_MAX_VARIABLE_NUMBER<=32767 -typedef i16 ynVar; -#else -typedef int ynVar; -#endif - -/* -** Each node of an expression in the parse tree is an instance -** of this structure. -** -** Expr.op is the opcode. The integer parser token codes are reused -** as opcodes here. For example, the parser defines TK_GE to be an integer -** code representing the ">=" operator. This same integer code is reused -** to represent the greater-than-or-equal-to operator in the expression -** tree. -** -** If the expression is an SQL literal (TK_INTEGER, TK_FLOAT, TK_BLOB, -** or TK_STRING), then Expr.token contains the text of the SQL literal. If -** the expression is a variable (TK_VARIABLE), then Expr.token contains the -** variable name. Finally, if the expression is an SQL function (TK_FUNCTION), -** then Expr.token contains the name of the function. -** -** Expr.pRight and Expr.pLeft are the left and right subexpressions of a -** binary operator. Either or both may be NULL. -** -** Expr.x.pList is a list of arguments if the expression is an SQL function, -** a CASE expression or an IN expression of the form " IN (, ...)". -** Expr.x.pSelect is used if the expression is a sub-select or an expression of -** the form " IN (SELECT ...)". If the EP_xIsSelect bit is set in the -** Expr.flags mask, then Expr.x.pSelect is valid. Otherwise, Expr.x.pList is -** valid. -** -** An expression of the form ID or ID.ID refers to a column in a table. -** For such expressions, Expr.op is set to TK_COLUMN and Expr.iTable is -** the integer cursor number of a VDBE cursor pointing to that table and -** Expr.iColumn is the column number for the specific column. If the -** expression is used as a result in an aggregate SELECT, then the -** value is also stored in the Expr.iAgg column in the aggregate so that -** it can be accessed after all aggregates are computed. -** -** If the expression is an unbound variable marker (a question mark -** character '?' in the original SQL) then the Expr.iTable holds the index -** number for that variable. -** -** If the expression is a subquery then Expr.iColumn holds an integer -** register number containing the result of the subquery. If the -** subquery gives a constant result, then iTable is -1. If the subquery -** gives a different answer at different times during statement processing -** then iTable is the address of a subroutine that computes the subquery. -** -** If the Expr is of type OP_Column, and the table it is selecting from -** is a disk table or the "old.*" pseudo-table, then pTab points to the -** corresponding table definition. -** -** ALLOCATION NOTES: -** -** Expr objects can use a lot of memory space in database schema. To -** help reduce memory requirements, sometimes an Expr object will be -** truncated. And to reduce the number of memory allocations, sometimes -** two or more Expr objects will be stored in a single memory allocation, -** together with Expr.zToken strings. -** -** If the EP_Reduced and EP_TokenOnly flags are set when -** an Expr object is truncated. When EP_Reduced is set, then all -** the child Expr objects in the Expr.pLeft and Expr.pRight subtrees -** are contained within the same memory allocation. Note, however, that -** the subtrees in Expr.x.pList or Expr.x.pSelect are always separately -** allocated, regardless of whether or not EP_Reduced is set. -*/ -struct Expr { - u8 op; /* Operation performed by this node */ - char affinity; /* The affinity of the column or 0 if not a column */ - u32 flags; /* Various flags. EP_* See below */ - union { - char *zToken; /* Token value. Zero terminated and dequoted */ - int iValue; /* Non-negative integer value if EP_IntValue */ - } u; - - /* If the EP_TokenOnly flag is set in the Expr.flags mask, then no - ** space is allocated for the fields below this point. An attempt to - ** access them will result in a segfault or malfunction. - *********************************************************************/ - - Expr *pLeft; /* Left subnode */ - Expr *pRight; /* Right subnode */ - union { - ExprList *pList; /* op = IN, EXISTS, SELECT, CASE, FUNCTION, BETWEEN */ - Select *pSelect; /* EP_xIsSelect and op = IN, EXISTS, SELECT */ - } x; - - /* If the EP_Reduced flag is set in the Expr.flags mask, then no - ** space is allocated for the fields below this point. An attempt to - ** access them will result in a segfault or malfunction. - *********************************************************************/ - -#if SQLITE_MAX_EXPR_DEPTH>0 - int nHeight; /* Height of the tree headed by this node */ -#endif - int iTable; /* TK_COLUMN: cursor number of table holding column - ** TK_REGISTER: register number - ** TK_TRIGGER: 1 -> new, 0 -> old - ** EP_Unlikely: 1000 times likelihood */ - ynVar iColumn; /* TK_COLUMN: column index. -1 for rowid. - ** TK_VARIABLE: variable number (always >= 1). */ - i16 iAgg; /* Which entry in pAggInfo->aCol[] or ->aFunc[] */ - i16 iRightJoinTable; /* If EP_FromJoin, the right table of the join */ - u8 op2; /* TK_REGISTER: original value of Expr.op - ** TK_COLUMN: the value of p5 for OP_Column - ** TK_AGG_FUNCTION: nesting depth */ - AggInfo *pAggInfo; /* Used by TK_AGG_COLUMN and TK_AGG_FUNCTION */ - Table *pTab; /* Table for TK_COLUMN expressions. */ -}; - -/* -** The following are the meanings of bits in the Expr.flags field. -*/ -#define EP_FromJoin 0x000001 /* Originated in ON or USING clause of a join */ -#define EP_Agg 0x000002 /* Contains one or more aggregate functions */ -#define EP_Resolved 0x000004 /* IDs have been resolved to COLUMNs */ -#define EP_Error 0x000008 /* Expression contains one or more errors */ -#define EP_Distinct 0x000010 /* Aggregate function with DISTINCT keyword */ -#define EP_VarSelect 0x000020 /* pSelect is correlated, not constant */ -#define EP_DblQuoted 0x000040 /* token.z was originally in "..." */ -#define EP_InfixFunc 0x000080 /* True for an infix function: LIKE, GLOB, etc */ -#define EP_Collate 0x000100 /* Tree contains a TK_COLLATE operator */ -#define EP_Generic 0x000200 /* Ignore COLLATE or affinity on this tree */ -#define EP_IntValue 0x000400 /* Integer value contained in u.iValue */ -#define EP_xIsSelect 0x000800 /* x.pSelect is valid (otherwise x.pList is) */ -#define EP_Skip 0x001000 /* COLLATE, AS, or UNLIKELY */ -#define EP_Reduced 0x002000 /* Expr struct EXPR_REDUCEDSIZE bytes only */ -#define EP_TokenOnly 0x004000 /* Expr struct EXPR_TOKENONLYSIZE bytes only */ -#define EP_Static 0x008000 /* Held in memory not obtained from malloc() */ -#define EP_MemToken 0x010000 /* Need to sqlite3DbFree() Expr.zToken */ -#define EP_NoReduce 0x020000 /* Cannot EXPRDUP_REDUCE this Expr */ -#define EP_Unlikely 0x040000 /* unlikely() or likelihood() function */ -#define EP_Constant 0x080000 /* Node is a constant */ - -/* -** These macros can be used to test, set, or clear bits in the -** Expr.flags field. -*/ -#define ExprHasProperty(E,P) (((E)->flags&(P))!=0) -#define ExprHasAllProperty(E,P) (((E)->flags&(P))==(P)) -#define ExprSetProperty(E,P) (E)->flags|=(P) -#define ExprClearProperty(E,P) (E)->flags&=~(P) - -/* The ExprSetVVAProperty() macro is used for Verification, Validation, -** and Accreditation only. It works like ExprSetProperty() during VVA -** processes but is a no-op for delivery. -*/ -#ifdef SQLITE_DEBUG -# define ExprSetVVAProperty(E,P) (E)->flags|=(P) -#else -# define ExprSetVVAProperty(E,P) -#endif - -/* -** Macros to determine the number of bytes required by a normal Expr -** struct, an Expr struct with the EP_Reduced flag set in Expr.flags -** and an Expr struct with the EP_TokenOnly flag set. -*/ -#define EXPR_FULLSIZE sizeof(Expr) /* Full size */ -#define EXPR_REDUCEDSIZE offsetof(Expr,iTable) /* Common features */ -#define EXPR_TOKENONLYSIZE offsetof(Expr,pLeft) /* Fewer features */ - -/* -** Flags passed to the sqlite3ExprDup() function. See the header comment -** above sqlite3ExprDup() for details. -*/ -#define EXPRDUP_REDUCE 0x0001 /* Used reduced-size Expr nodes */ - -/* -** A list of expressions. Each expression may optionally have a -** name. An expr/name combination can be used in several ways, such -** as the list of "expr AS ID" fields following a "SELECT" or in the -** list of "ID = expr" items in an UPDATE. A list of expressions can -** also be used as the argument to a function, in which case the a.zName -** field is not used. -** -** By default the Expr.zSpan field holds a human-readable description of -** the expression that is used in the generation of error messages and -** column labels. In this case, Expr.zSpan is typically the text of a -** column expression as it exists in a SELECT statement. However, if -** the bSpanIsTab flag is set, then zSpan is overloaded to mean the name -** of the result column in the form: DATABASE.TABLE.COLUMN. This later -** form is used for name resolution with nested FROM clauses. -*/ -struct ExprList { - int nExpr; /* Number of expressions on the list */ - struct ExprList_item { /* For each expression in the list */ - Expr *pExpr; /* The list of expressions */ - char *zName; /* Token associated with this expression */ - char *zSpan; /* Original text of the expression */ - u8 sortOrder; /* 1 for DESC or 0 for ASC */ - unsigned done :1; /* A flag to indicate when processing is finished */ - unsigned bSpanIsTab :1; /* zSpan holds DB.TABLE.COLUMN */ - unsigned reusable :1; /* Constant expression is reusable */ - union { - struct { - u16 iOrderByCol; /* For ORDER BY, column number in result set */ - u16 iAlias; /* Index into Parse.aAlias[] for zName */ - } x; - int iConstExprReg; /* Register in which Expr value is cached */ - } u; - } *a; /* Alloc a power of two greater or equal to nExpr */ -}; - -/* -** An instance of this structure is used by the parser to record both -** the parse tree for an expression and the span of input text for an -** expression. -*/ -struct ExprSpan { - Expr *pExpr; /* The expression parse tree */ - const char *zStart; /* First character of input text */ - const char *zEnd; /* One character past the end of input text */ -}; - -/* -** An instance of this structure can hold a simple list of identifiers, -** such as the list "a,b,c" in the following statements: -** -** INSERT INTO t(a,b,c) VALUES ...; -** CREATE INDEX idx ON t(a,b,c); -** CREATE TRIGGER trig BEFORE UPDATE ON t(a,b,c) ...; -** -** The IdList.a.idx field is used when the IdList represents the list of -** column names after a table name in an INSERT statement. In the statement -** -** INSERT INTO t(a,b,c) ... -** -** If "a" is the k-th column of table "t", then IdList.a[0].idx==k. -*/ -struct IdList { - struct IdList_item { - char *zName; /* Name of the identifier */ - int idx; /* Index in some Table.aCol[] of a column named zName */ - } *a; - int nId; /* Number of identifiers on the list */ -}; - -/* -** The bitmask datatype defined below is used for various optimizations. -** -** Changing this from a 64-bit to a 32-bit type limits the number of -** tables in a join to 32 instead of 64. But it also reduces the size -** of the library by 738 bytes on ix86. -*/ -typedef u64 Bitmask; - -/* -** The number of bits in a Bitmask. "BMS" means "BitMask Size". -*/ -#define BMS ((int)(sizeof(Bitmask)*8)) - -/* -** A bit in a Bitmask -*/ -#define MASKBIT(n) (((Bitmask)1)<<(n)) -#define MASKBIT32(n) (((unsigned int)1)<<(n)) - -/* -** The following structure describes the FROM clause of a SELECT statement. -** Each table or subquery in the FROM clause is a separate element of -** the SrcList.a[] array. -** -** With the addition of multiple database support, the following structure -** can also be used to describe a particular table such as the table that -** is modified by an INSERT, DELETE, or UPDATE statement. In standard SQL, -** such a table must be a simple name: ID. But in SQLite, the table can -** now be identified by a database name, a dot, then the table name: ID.ID. -** -** The jointype starts out showing the join type between the current table -** and the next table on the list. The parser builds the list this way. -** But sqlite3SrcListShiftJoinType() later shifts the jointypes so that each -** jointype expresses the join between the table and the previous table. -** -** In the colUsed field, the high-order bit (bit 63) is set if the table -** contains more than 63 columns and the 64-th or later column is used. -*/ -struct SrcList { - int nSrc; /* Number of tables or subqueries in the FROM clause */ - u32 nAlloc; /* Number of entries allocated in a[] below */ - struct SrcList_item { - Schema *pSchema; /* Schema to which this item is fixed */ - char *zDatabase; /* Name of database holding this table */ - char *zName; /* Name of the table */ - char *zAlias; /* The "B" part of a "A AS B" phrase. zName is the "A" */ - Table *pTab; /* An SQL table corresponding to zName */ - Select *pSelect; /* A SELECT statement used in place of a table name */ - int addrFillSub; /* Address of subroutine to manifest a subquery */ - int regReturn; /* Register holding return address of addrFillSub */ - int regResult; /* Registers holding results of a co-routine */ - u8 jointype; /* Type of join between this able and the previous */ - unsigned notIndexed :1; /* True if there is a NOT INDEXED clause */ - unsigned isCorrelated :1; /* True if sub-query is correlated */ - unsigned viaCoroutine :1; /* Implemented as a co-routine */ - unsigned isRecursive :1; /* True for recursive reference in WITH */ -#ifndef SQLITE_OMIT_EXPLAIN - u8 iSelectId; /* If pSelect!=0, the id of the sub-select in EQP */ -#endif - int iCursor; /* The VDBE cursor number used to access this table */ - Expr *pOn; /* The ON clause of a join */ - IdList *pUsing; /* The USING clause of a join */ - Bitmask colUsed; /* Bit N (1<" clause */ - Index *pIndex; /* Index structure corresponding to zIndex, if any */ - } a[1]; /* One entry for each identifier on the list */ -}; - -/* -** Permitted values of the SrcList.a.jointype field -*/ -#define JT_INNER 0x0001 /* Any kind of inner or cross join */ -#define JT_CROSS 0x0002 /* Explicit use of the CROSS keyword */ -#define JT_NATURAL 0x0004 /* True for a "natural" join */ -#define JT_LEFT 0x0008 /* Left outer join */ -#define JT_RIGHT 0x0010 /* Right outer join */ -#define JT_OUTER 0x0020 /* The "OUTER" keyword is present */ -#define JT_ERROR 0x0040 /* unknown or unsupported join type */ - - -/* -** Flags appropriate for the wctrlFlags parameter of sqlite3WhereBegin() -** and the WhereInfo.wctrlFlags member. -*/ -#define WHERE_ORDERBY_NORMAL 0x0000 /* No-op */ -#define WHERE_ORDERBY_MIN 0x0001 /* ORDER BY processing for min() func */ -#define WHERE_ORDERBY_MAX 0x0002 /* ORDER BY processing for max() func */ -#define WHERE_ONEPASS_DESIRED 0x0004 /* Want to do one-pass UPDATE/DELETE */ -#define WHERE_DUPLICATES_OK 0x0008 /* Ok to return a row more than once */ -#define WHERE_OMIT_OPEN_CLOSE 0x0010 /* Table cursors are already open */ -#define WHERE_FORCE_TABLE 0x0020 /* Do not use an index-only search */ -#define WHERE_ONETABLE_ONLY 0x0040 /* Only code the 1st table in pTabList */ -#define WHERE_AND_ONLY 0x0080 /* Don't use indices for OR terms */ -#define WHERE_GROUPBY 0x0100 /* pOrderBy is really a GROUP BY */ -#define WHERE_DISTINCTBY 0x0200 /* pOrderby is really a DISTINCT clause */ -#define WHERE_WANT_DISTINCT 0x0400 /* All output needs to be distinct */ -#define WHERE_SORTBYGROUP 0x0800 /* Support sqlite3WhereIsSorted() */ - -/* Allowed return values from sqlite3WhereIsDistinct() -*/ -#define WHERE_DISTINCT_NOOP 0 /* DISTINCT keyword not used */ -#define WHERE_DISTINCT_UNIQUE 1 /* No duplicates */ -#define WHERE_DISTINCT_ORDERED 2 /* All duplicates are adjacent */ -#define WHERE_DISTINCT_UNORDERED 3 /* Duplicates are scattered */ - -/* -** A NameContext defines a context in which to resolve table and column -** names. The context consists of a list of tables (the pSrcList) field and -** a list of named expression (pEList). The named expression list may -** be NULL. The pSrc corresponds to the FROM clause of a SELECT or -** to the table being operated on by INSERT, UPDATE, or DELETE. The -** pEList corresponds to the result set of a SELECT and is NULL for -** other statements. -** -** NameContexts can be nested. When resolving names, the inner-most -** context is searched first. If no match is found, the next outer -** context is checked. If there is still no match, the next context -** is checked. This process continues until either a match is found -** or all contexts are check. When a match is found, the nRef member of -** the context containing the match is incremented. -** -** Each subquery gets a new NameContext. The pNext field points to the -** NameContext in the parent query. Thus the process of scanning the -** NameContext list corresponds to searching through successively outer -** subqueries looking for a match. -*/ -struct NameContext { - Parse *pParse; /* The parser */ - SrcList *pSrcList; /* One or more tables used to resolve names */ - ExprList *pEList; /* Optional list of result-set columns */ - AggInfo *pAggInfo; /* Information about aggregates at this level */ - NameContext *pNext; /* Next outer name context. NULL for outermost */ - int nRef; /* Number of names resolved by this context */ - int nErr; /* Number of errors encountered while resolving names */ - u8 ncFlags; /* Zero or more NC_* flags defined below */ -}; - -/* -** Allowed values for the NameContext, ncFlags field. -*/ -#define NC_AllowAgg 0x01 /* Aggregate functions are allowed here */ -#define NC_HasAgg 0x02 /* One or more aggregate functions seen */ -#define NC_IsCheck 0x04 /* True if resolving names in a CHECK constraint */ -#define NC_InAggFunc 0x08 /* True if analyzing arguments to an agg func */ -#define NC_PartIdx 0x10 /* True if resolving a partial index WHERE */ - -/* -** An instance of the following structure contains all information -** needed to generate code for a single SELECT statement. -** -** nLimit is set to -1 if there is no LIMIT clause. nOffset is set to 0. -** If there is a LIMIT clause, the parser sets nLimit to the value of the -** limit and nOffset to the value of the offset (or 0 if there is not -** offset). But later on, nLimit and nOffset become the memory locations -** in the VDBE that record the limit and offset counters. -** -** addrOpenEphm[] entries contain the address of OP_OpenEphemeral opcodes. -** These addresses must be stored so that we can go back and fill in -** the P4_KEYINFO and P2 parameters later. Neither the KeyInfo nor -** the number of columns in P2 can be computed at the same time -** as the OP_OpenEphm instruction is coded because not -** enough information about the compound query is known at that point. -** The KeyInfo for addrOpenTran[0] and [1] contains collating sequences -** for the result set. The KeyInfo for addrOpenEphm[2] contains collating -** sequences for the ORDER BY clause. -*/ -struct Select { - ExprList *pEList; /* The fields of the result */ - u8 op; /* One of: TK_UNION TK_ALL TK_INTERSECT TK_EXCEPT */ - u16 selFlags; /* Various SF_* values */ - int iLimit, iOffset; /* Memory registers holding LIMIT & OFFSET counters */ - int addrOpenEphm[2]; /* OP_OpenEphem opcodes related to this select */ - u64 nSelectRow; /* Estimated number of result rows */ - SrcList *pSrc; /* The FROM clause */ - Expr *pWhere; /* The WHERE clause */ - ExprList *pGroupBy; /* The GROUP BY clause */ - Expr *pHaving; /* The HAVING clause */ - ExprList *pOrderBy; /* The ORDER BY clause */ - Select *pPrior; /* Prior select in a compound select statement */ - Select *pNext; /* Next select to the left in a compound */ - Expr *pLimit; /* LIMIT expression. NULL means not used. */ - Expr *pOffset; /* OFFSET expression. NULL means not used. */ - With *pWith; /* WITH clause attached to this select. Or NULL. */ -}; - -/* -** Allowed values for Select.selFlags. The "SF" prefix stands for -** "Select Flag". -*/ -#define SF_Distinct 0x0001 /* Output should be DISTINCT */ -#define SF_Resolved 0x0002 /* Identifiers have been resolved */ -#define SF_Aggregate 0x0004 /* Contains aggregate functions */ -#define SF_UsesEphemeral 0x0008 /* Uses the OpenEphemeral opcode */ -#define SF_Expanded 0x0010 /* sqlite3SelectExpand() called on this */ -#define SF_HasTypeInfo 0x0020 /* FROM subqueries have Table metadata */ - /* 0x0040 NOT USED */ -#define SF_Values 0x0080 /* Synthesized from VALUES clause */ - /* 0x0100 NOT USED */ -#define SF_NestedFrom 0x0200 /* Part of a parenthesized FROM clause */ -#define SF_MaybeConvert 0x0400 /* Need convertCompoundSelectToSubquery() */ -#define SF_Recursive 0x0800 /* The recursive part of a recursive CTE */ -#define SF_Compound 0x1000 /* Part of a compound query */ - - -/* -** The results of a SELECT can be distributed in several ways, as defined -** by one of the following macros. The "SRT" prefix means "SELECT Result -** Type". -** -** SRT_Union Store results as a key in a temporary index -** identified by pDest->iSDParm. -** -** SRT_Except Remove results from the temporary index pDest->iSDParm. -** -** SRT_Exists Store a 1 in memory cell pDest->iSDParm if the result -** set is not empty. -** -** SRT_Discard Throw the results away. This is used by SELECT -** statements within triggers whose only purpose is -** the side-effects of functions. -** -** All of the above are free to ignore their ORDER BY clause. Those that -** follow must honor the ORDER BY clause. -** -** SRT_Output Generate a row of output (using the OP_ResultRow -** opcode) for each row in the result set. -** -** SRT_Mem Only valid if the result is a single column. -** Store the first column of the first result row -** in register pDest->iSDParm then abandon the rest -** of the query. This destination implies "LIMIT 1". -** -** SRT_Set The result must be a single column. Store each -** row of result as the key in table pDest->iSDParm. -** Apply the affinity pDest->affSdst before storing -** results. Used to implement "IN (SELECT ...)". -** -** SRT_EphemTab Create an temporary table pDest->iSDParm and store -** the result there. The cursor is left open after -** returning. This is like SRT_Table except that -** this destination uses OP_OpenEphemeral to create -** the table first. -** -** SRT_Coroutine Generate a co-routine that returns a new row of -** results each time it is invoked. The entry point -** of the co-routine is stored in register pDest->iSDParm -** and the result row is stored in pDest->nDest registers -** starting with pDest->iSdst. -** -** SRT_Table Store results in temporary table pDest->iSDParm. -** SRT_Fifo This is like SRT_EphemTab except that the table -** is assumed to already be open. SRT_Fifo has -** the additional property of being able to ignore -** the ORDER BY clause. -** -** SRT_DistFifo Store results in a temporary table pDest->iSDParm. -** But also use temporary table pDest->iSDParm+1 as -** a record of all prior results and ignore any duplicate -** rows. Name means: "Distinct Fifo". -** -** SRT_Queue Store results in priority queue pDest->iSDParm (really -** an index). Append a sequence number so that all entries -** are distinct. -** -** SRT_DistQueue Store results in priority queue pDest->iSDParm only if -** the same record has never been stored before. The -** index at pDest->iSDParm+1 hold all prior stores. -*/ -#define SRT_Union 1 /* Store result as keys in an index */ -#define SRT_Except 2 /* Remove result from a UNION index */ -#define SRT_Exists 3 /* Store 1 if the result is not empty */ -#define SRT_Discard 4 /* Do not save the results anywhere */ -#define SRT_Fifo 5 /* Store result as data with an automatic rowid */ -#define SRT_DistFifo 6 /* Like SRT_Fifo, but unique results only */ -#define SRT_Queue 7 /* Store result in an queue */ -#define SRT_DistQueue 8 /* Like SRT_Queue, but unique results only */ - -/* The ORDER BY clause is ignored for all of the above */ -#define IgnorableOrderby(X) ((X->eDest)<=SRT_DistQueue) - -#define SRT_Output 9 /* Output each row of result */ -#define SRT_Mem 10 /* Store result in a memory cell */ -#define SRT_Set 11 /* Store results as keys in an index */ -#define SRT_EphemTab 12 /* Create transient tab and store like SRT_Table */ -#define SRT_Coroutine 13 /* Generate a single row of result */ -#define SRT_Table 14 /* Store result as data with an automatic rowid */ - -/* -** An instance of this object describes where to put of the results of -** a SELECT statement. -*/ -struct SelectDest { - u8 eDest; /* How to dispose of the results. On of SRT_* above. */ - char affSdst; /* Affinity used when eDest==SRT_Set */ - int iSDParm; /* A parameter used by the eDest disposal method */ - int iSdst; /* Base register where results are written */ - int nSdst; /* Number of registers allocated */ - ExprList *pOrderBy; /* Key columns for SRT_Queue and SRT_DistQueue */ -}; - -/* -** During code generation of statements that do inserts into AUTOINCREMENT -** tables, the following information is attached to the Table.u.autoInc.p -** pointer of each autoincrement table to record some side information that -** the code generator needs. We have to keep per-table autoincrement -** information in case inserts are down within triggers. Triggers do not -** normally coordinate their activities, but we do need to coordinate the -** loading and saving of autoincrement information. -*/ -struct AutoincInfo { - AutoincInfo *pNext; /* Next info block in a list of them all */ - Table *pTab; /* Table this info block refers to */ - int iDb; /* Index in sqlite3.aDb[] of database holding pTab */ - int regCtr; /* Memory register holding the rowid counter */ -}; - -/* -** Size of the column cache -*/ -#ifndef SQLITE_N_COLCACHE -# define SQLITE_N_COLCACHE 10 -#endif - -/* -** At least one instance of the following structure is created for each -** trigger that may be fired while parsing an INSERT, UPDATE or DELETE -** statement. All such objects are stored in the linked list headed at -** Parse.pTriggerPrg and deleted once statement compilation has been -** completed. -** -** A Vdbe sub-program that implements the body and WHEN clause of trigger -** TriggerPrg.pTrigger, assuming a default ON CONFLICT clause of -** TriggerPrg.orconf, is stored in the TriggerPrg.pProgram variable. -** The Parse.pTriggerPrg list never contains two entries with the same -** values for both pTrigger and orconf. -** -** The TriggerPrg.aColmask[0] variable is set to a mask of old.* columns -** accessed (or set to 0 for triggers fired as a result of INSERT -** statements). Similarly, the TriggerPrg.aColmask[1] variable is set to -** a mask of new.* columns used by the program. -*/ -struct TriggerPrg { - Trigger *pTrigger; /* Trigger this program was coded from */ - TriggerPrg *pNext; /* Next entry in Parse.pTriggerPrg list */ - SubProgram *pProgram; /* Program implementing pTrigger/orconf */ - int orconf; /* Default ON CONFLICT policy */ - u32 aColmask[2]; /* Masks of old.*, new.* columns accessed */ -}; - -/* -** The yDbMask datatype for the bitmask of all attached databases. -*/ -#if SQLITE_MAX_ATTACHED>30 - typedef sqlite3_uint64 yDbMask; -#else - typedef unsigned int yDbMask; -#endif - -/* -** An SQL parser context. A copy of this structure is passed through -** the parser and down into all the parser action routine in order to -** carry around information that is global to the entire parse. -** -** The structure is divided into two parts. When the parser and code -** generate call themselves recursively, the first part of the structure -** is constant but the second part is reset at the beginning and end of -** each recursion. -** -** The nTableLock and aTableLock variables are only used if the shared-cache -** feature is enabled (if sqlite3Tsd()->useSharedData is true). They are -** used to store the set of table-locks required by the statement being -** compiled. Function sqlite3TableLock() is used to add entries to the -** list. -*/ -struct Parse { - sqlite3 *db; /* The main database structure */ - char *zErrMsg; /* An error message */ - Vdbe *pVdbe; /* An engine for executing database bytecode */ - int rc; /* Return code from execution */ - u8 colNamesSet; /* TRUE after OP_ColumnName has been issued to pVdbe */ - u8 checkSchema; /* Causes schema cookie check after an error */ - u8 nested; /* Number of nested calls to the parser/code generator */ - u8 nTempReg; /* Number of temporary registers in aTempReg[] */ - u8 isMultiWrite; /* True if statement may modify/insert multiple rows */ - u8 mayAbort; /* True if statement may throw an ABORT exception */ - u8 hasCompound; /* Need to invoke convertCompoundSelectToSubquery() */ - u8 okConstFactor; /* OK to factor out constants */ - int aTempReg[8]; /* Holding area for temporary registers */ - int nRangeReg; /* Size of the temporary register block */ - int iRangeReg; /* First register in temporary register block */ - int nErr; /* Number of errors seen */ - int nTab; /* Number of previously allocated VDBE cursors */ - int nMem; /* Number of memory cells used so far */ - int nSet; /* Number of sets used so far */ - int nOnce; /* Number of OP_Once instructions so far */ - int nOpAlloc; /* Number of slots allocated for Vdbe.aOp[] */ - int iFixedOp; /* Never back out opcodes iFixedOp-1 or earlier */ - int ckBase; /* Base register of data during check constraints */ - int iPartIdxTab; /* Table corresponding to a partial index */ - int iCacheLevel; /* ColCache valid when aColCache[].iLevel<=iCacheLevel */ - int iCacheCnt; /* Counter used to generate aColCache[].lru values */ - int nLabel; /* Number of labels used */ - int *aLabel; /* Space to hold the labels */ - struct yColCache { - int iTable; /* Table cursor number */ - i16 iColumn; /* Table column number */ - u8 tempReg; /* iReg is a temp register that needs to be freed */ - int iLevel; /* Nesting level */ - int iReg; /* Reg with value of this column. 0 means none. */ - int lru; /* Least recently used entry has the smallest value */ - } aColCache[SQLITE_N_COLCACHE]; /* One for each column cache entry */ - ExprList *pConstExpr;/* Constant expressions */ - Token constraintName;/* Name of the constraint currently being parsed */ - yDbMask writeMask; /* Start a write transaction on these databases */ - yDbMask cookieMask; /* Bitmask of schema verified databases */ - int cookieValue[SQLITE_MAX_ATTACHED+2]; /* Values of cookies to verify */ - int regRowid; /* Register holding rowid of CREATE TABLE entry */ - int regRoot; /* Register holding root page number for new objects */ - int nMaxArg; /* Max args passed to user function by sub-program */ -#ifndef SQLITE_OMIT_SHARED_CACHE - int nTableLock; /* Number of locks in aTableLock */ - TableLock *aTableLock; /* Required table locks for shared-cache mode */ -#endif - AutoincInfo *pAinc; /* Information about AUTOINCREMENT counters */ - - /* Information used while coding trigger programs. */ - Parse *pToplevel; /* Parse structure for main program (or NULL) */ - Table *pTriggerTab; /* Table triggers are being coded for */ - int addrCrTab; /* Address of OP_CreateTable opcode on CREATE TABLE */ - int addrSkipPK; /* Address of instruction to skip PRIMARY KEY index */ - u32 nQueryLoop; /* Est number of iterations of a query (10*log2(N)) */ - u32 oldmask; /* Mask of old.* columns referenced */ - u32 newmask; /* Mask of new.* columns referenced */ - u8 eTriggerOp; /* TK_UPDATE, TK_INSERT or TK_DELETE */ - u8 eOrconf; /* Default ON CONFLICT policy for trigger steps */ - u8 disableTriggers; /* True to disable triggers */ - - /************************************************************************ - ** Above is constant between recursions. Below is reset before and after - ** each recursion. The boundary between these two regions is determined - ** using offsetof(Parse,nVar) so the nVar field must be the first field - ** in the recursive region. - ************************************************************************/ - - int nVar; /* Number of '?' variables seen in the SQL so far */ - int nzVar; /* Number of available slots in azVar[] */ - u8 iPkSortOrder; /* ASC or DESC for INTEGER PRIMARY KEY */ - u8 bFreeWith; /* True if pWith should be freed with parser */ - u8 explain; /* True if the EXPLAIN flag is found on the query */ -#ifndef SQLITE_OMIT_VIRTUALTABLE - u8 declareVtab; /* True if inside sqlite3_declare_vtab() */ - int nVtabLock; /* Number of virtual tables to lock */ -#endif - int nAlias; /* Number of aliased result set columns */ - int nHeight; /* Expression tree height of current sub-select */ -#ifndef SQLITE_OMIT_EXPLAIN - int iSelectId; /* ID of current select for EXPLAIN output */ - int iNextSelectId; /* Next available select ID for EXPLAIN output */ -#endif - char **azVar; /* Pointers to names of parameters */ - Vdbe *pReprepare; /* VM being reprepared (sqlite3Reprepare()) */ - const char *zTail; /* All SQL text past the last semicolon parsed */ - Table *pNewTable; /* A table being constructed by CREATE TABLE */ - Trigger *pNewTrigger; /* Trigger under construct by a CREATE TRIGGER */ - const char *zAuthContext; /* The 6th parameter to db->xAuth callbacks */ - Token sNameToken; /* Token with unqualified schema object name */ - Token sLastToken; /* The last token parsed */ -#ifndef SQLITE_OMIT_VIRTUALTABLE - Token sArg; /* Complete text of a module argument */ - Table **apVtabLock; /* Pointer to virtual tables needing locking */ -#endif - Table *pZombieTab; /* List of Table objects to delete after code gen */ - TriggerPrg *pTriggerPrg; /* Linked list of coded triggers */ - With *pWith; /* Current WITH clause, or NULL */ -}; - -/* -** Return true if currently inside an sqlite3_declare_vtab() call. -*/ -#ifdef SQLITE_OMIT_VIRTUALTABLE - #define IN_DECLARE_VTAB 0 -#else - #define IN_DECLARE_VTAB (pParse->declareVtab) -#endif - -/* -** An instance of the following structure can be declared on a stack and used -** to save the Parse.zAuthContext value so that it can be restored later. -*/ -struct AuthContext { - const char *zAuthContext; /* Put saved Parse.zAuthContext here */ - Parse *pParse; /* The Parse structure */ -}; - -/* -** Bitfield flags for P5 value in various opcodes. -*/ -#define OPFLAG_NCHANGE 0x01 /* Set to update db->nChange */ -#define OPFLAG_LASTROWID 0x02 /* Set to update db->lastRowid */ -#define OPFLAG_ISUPDATE 0x04 /* This OP_Insert is an sql UPDATE */ -#define OPFLAG_APPEND 0x08 /* This is likely to be an append */ -#define OPFLAG_USESEEKRESULT 0x10 /* Try to avoid a seek in BtreeInsert() */ -#define OPFLAG_CLEARCACHE 0x20 /* Clear pseudo-table cache in OP_Column */ -#define OPFLAG_LENGTHARG 0x40 /* OP_Column only used for length() */ -#define OPFLAG_TYPEOFARG 0x80 /* OP_Column only used for typeof() */ -#define OPFLAG_BULKCSR 0x01 /* OP_Open** used to open bulk cursor */ -#define OPFLAG_P2ISREG 0x02 /* P2 to OP_Open** is a register number */ -#define OPFLAG_PERMUTE 0x01 /* OP_Compare: use the permutation */ - -/* - * Each trigger present in the database schema is stored as an instance of - * struct Trigger. - * - * Pointers to instances of struct Trigger are stored in two ways. - * 1. In the "trigHash" hash table (part of the sqlite3* that represents the - * database). This allows Trigger structures to be retrieved by name. - * 2. All triggers associated with a single table form a linked list, using the - * pNext member of struct Trigger. A pointer to the first element of the - * linked list is stored as the "pTrigger" member of the associated - * struct Table. - * - * The "step_list" member points to the first element of a linked list - * containing the SQL statements specified as the trigger program. - */ -struct Trigger { - char *zName; /* The name of the trigger */ - char *table; /* The table or view to which the trigger applies */ - u8 op; /* One of TK_DELETE, TK_UPDATE, TK_INSERT */ - u8 tr_tm; /* One of TRIGGER_BEFORE, TRIGGER_AFTER */ - Expr *pWhen; /* The WHEN clause of the expression (may be NULL) */ - IdList *pColumns; /* If this is an UPDATE OF trigger, - the is stored here */ - Schema *pSchema; /* Schema containing the trigger */ - Schema *pTabSchema; /* Schema containing the table */ - TriggerStep *step_list; /* Link list of trigger program steps */ - Trigger *pNext; /* Next trigger associated with the table */ -}; - -/* -** A trigger is either a BEFORE or an AFTER trigger. The following constants -** determine which. -** -** If there are multiple triggers, you might of some BEFORE and some AFTER. -** In that cases, the constants below can be ORed together. -*/ -#define TRIGGER_BEFORE 1 -#define TRIGGER_AFTER 2 - -/* - * An instance of struct TriggerStep is used to store a single SQL statement - * that is a part of a trigger-program. - * - * Instances of struct TriggerStep are stored in a singly linked list (linked - * using the "pNext" member) referenced by the "step_list" member of the - * associated struct Trigger instance. The first element of the linked list is - * the first step of the trigger-program. - * - * The "op" member indicates whether this is a "DELETE", "INSERT", "UPDATE" or - * "SELECT" statement. The meanings of the other members is determined by the - * value of "op" as follows: - * - * (op == TK_INSERT) - * orconf -> stores the ON CONFLICT algorithm - * pSelect -> If this is an INSERT INTO ... SELECT ... statement, then - * this stores a pointer to the SELECT statement. Otherwise NULL. - * target -> A token holding the quoted name of the table to insert into. - * pExprList -> If this is an INSERT INTO ... VALUES ... statement, then - * this stores values to be inserted. Otherwise NULL. - * pIdList -> If this is an INSERT INTO ... () VALUES ... - * statement, then this stores the column-names to be - * inserted into. - * - * (op == TK_DELETE) - * target -> A token holding the quoted name of the table to delete from. - * pWhere -> The WHERE clause of the DELETE statement if one is specified. - * Otherwise NULL. - * - * (op == TK_UPDATE) - * target -> A token holding the quoted name of the table to update rows of. - * pWhere -> The WHERE clause of the UPDATE statement if one is specified. - * Otherwise NULL. - * pExprList -> A list of the columns to update and the expressions to update - * them to. See sqlite3Update() documentation of "pChanges" - * argument. - * - */ -struct TriggerStep { - u8 op; /* One of TK_DELETE, TK_UPDATE, TK_INSERT, TK_SELECT */ - u8 orconf; /* OE_Rollback etc. */ - Trigger *pTrig; /* The trigger that this step is a part of */ - Select *pSelect; /* SELECT statment or RHS of INSERT INTO .. SELECT ... */ - Token target; /* Target table for DELETE, UPDATE, INSERT */ - Expr *pWhere; /* The WHERE clause for DELETE or UPDATE steps */ - ExprList *pExprList; /* SET clause for UPDATE. */ - IdList *pIdList; /* Column names for INSERT */ - TriggerStep *pNext; /* Next in the link-list */ - TriggerStep *pLast; /* Last element in link-list. Valid for 1st elem only */ -}; - -/* -** The following structure contains information used by the sqliteFix... -** routines as they walk the parse tree to make database references -** explicit. -*/ -typedef struct DbFixer DbFixer; -struct DbFixer { - Parse *pParse; /* The parsing context. Error messages written here */ - Schema *pSchema; /* Fix items to this schema */ - int bVarOnly; /* Check for variable references only */ - const char *zDb; /* Make sure all objects are contained in this database */ - const char *zType; /* Type of the container - used for error messages */ - const Token *pName; /* Name of the container - used for error messages */ -}; - -/* -** An objected used to accumulate the text of a string where we -** do not necessarily know how big the string will be in the end. -*/ -struct StrAccum { - sqlite3 *db; /* Optional database for lookaside. Can be NULL */ - char *zBase; /* A base allocation. Not from malloc. */ - char *zText; /* The string collected so far */ - int nChar; /* Length of the string so far */ - int nAlloc; /* Amount of space allocated in zText */ - int mxAlloc; /* Maximum allowed string length */ - u8 useMalloc; /* 0: none, 1: sqlite3DbMalloc, 2: sqlite3_malloc */ - u8 accError; /* STRACCUM_NOMEM or STRACCUM_TOOBIG */ -}; -#define STRACCUM_NOMEM 1 -#define STRACCUM_TOOBIG 2 - -/* -** A pointer to this structure is used to communicate information -** from sqlite3Init and OP_ParseSchema into the sqlite3InitCallback. -*/ -typedef struct { - sqlite3 *db; /* The database being initialized */ - char **pzErrMsg; /* Error message stored here */ - int iDb; /* 0 for main database. 1 for TEMP, 2.. for ATTACHed */ - int rc; /* Result code stored here */ -} InitData; - -/* -** Structure containing global configuration data for the SQLite library. -** -** This structure also contains some state information. -*/ -struct Sqlite3Config { - int bMemstat; /* True to enable memory status */ - int bCoreMutex; /* True to enable core mutexing */ - int bFullMutex; /* True to enable full mutexing */ - int bOpenUri; /* True to interpret filenames as URIs */ - int bUseCis; /* Use covering indices for full-scans */ - int mxStrlen; /* Maximum string length */ - int neverCorrupt; /* Database is always well-formed */ - int szLookaside; /* Default lookaside buffer size */ - int nLookaside; /* Default lookaside buffer count */ - sqlite3_mem_methods m; /* Low-level memory allocation interface */ - sqlite3_mutex_methods mutex; /* Low-level mutex interface */ - sqlite3_pcache_methods2 pcache2; /* Low-level page-cache interface */ - void *pHeap; /* Heap storage space */ - int nHeap; /* Size of pHeap[] */ - int mnReq, mxReq; /* Min and max heap requests sizes */ - sqlite3_int64 szMmap; /* mmap() space per open file */ - sqlite3_int64 mxMmap; /* Maximum value for szMmap */ - void *pScratch; /* Scratch memory */ - int szScratch; /* Size of each scratch buffer */ - int nScratch; /* Number of scratch buffers */ - void *pPage; /* Page cache memory */ - int szPage; /* Size of each page in pPage[] */ - int nPage; /* Number of pages in pPage[] */ - int mxParserStack; /* maximum depth of the parser stack */ - int sharedCacheEnabled; /* true if shared-cache mode enabled */ - /* The above might be initialized to non-zero. The following need to always - ** initially be zero, however. */ - int isInit; /* True after initialization has finished */ - int inProgress; /* True while initialization in progress */ - int isMutexInit; /* True after mutexes are initialized */ - int isMallocInit; /* True after malloc is initialized */ - int isPCacheInit; /* True after malloc is initialized */ - int nRefInitMutex; /* Number of users of pInitMutex */ - sqlite3_mutex *pInitMutex; /* Mutex used by sqlite3_initialize() */ - void (*xLog)(void*,int,const char*); /* Function for logging */ - void *pLogArg; /* First argument to xLog() */ -#ifdef SQLITE_ENABLE_SQLLOG - void(*xSqllog)(void*,sqlite3*,const char*, int); - void *pSqllogArg; -#endif -#ifdef SQLITE_VDBE_COVERAGE - /* The following callback (if not NULL) is invoked on every VDBE branch - ** operation. Set the callback using SQLITE_TESTCTRL_VDBE_COVERAGE. - */ - void (*xVdbeBranch)(void*,int iSrcLine,u8 eThis,u8 eMx); /* Callback */ - void *pVdbeBranchArg; /* 1st argument */ -#endif -#ifndef SQLITE_OMIT_BUILTIN_TEST - int (*xTestCallback)(int); /* Invoked by sqlite3FaultSim() */ -#endif - int bLocaltimeFault; /* True to fail localtime() calls */ -}; - -/* -** This macro is used inside of assert() statements to indicate that -** the assert is only valid on a well-formed database. Instead of: -** -** assert( X ); -** -** One writes: -** -** assert( X || CORRUPT_DB ); -** -** CORRUPT_DB is true during normal operation. CORRUPT_DB does not indicate -** that the database is definitely corrupt, only that it might be corrupt. -** For most test cases, CORRUPT_DB is set to false using a special -** sqlite3_test_control(). This enables assert() statements to prove -** things that are always true for well-formed databases. -*/ -#define CORRUPT_DB (sqlite3Config.neverCorrupt==0) - -/* -** Context pointer passed down through the tree-walk. -*/ -struct Walker { - int (*xExprCallback)(Walker*, Expr*); /* Callback for expressions */ - int (*xSelectCallback)(Walker*,Select*); /* Callback for SELECTs */ - void (*xSelectCallback2)(Walker*,Select*);/* Second callback for SELECTs */ - Parse *pParse; /* Parser context. */ - int walkerDepth; /* Number of subqueries */ - union { /* Extra data for callback */ - NameContext *pNC; /* Naming context */ - int i; /* Integer value */ - SrcList *pSrcList; /* FROM clause */ - struct SrcCount *pSrcCount; /* Counting column references */ - } u; -}; - -/* Forward declarations */ -SQLITE_PRIVATE int sqlite3WalkExpr(Walker*, Expr*); -SQLITE_PRIVATE int sqlite3WalkExprList(Walker*, ExprList*); -SQLITE_PRIVATE int sqlite3WalkSelect(Walker*, Select*); -SQLITE_PRIVATE int sqlite3WalkSelectExpr(Walker*, Select*); -SQLITE_PRIVATE int sqlite3WalkSelectFrom(Walker*, Select*); - -/* -** Return code from the parse-tree walking primitives and their -** callbacks. -*/ -#define WRC_Continue 0 /* Continue down into children */ -#define WRC_Prune 1 /* Omit children but continue walking siblings */ -#define WRC_Abort 2 /* Abandon the tree walk */ - -/* -** An instance of this structure represents a set of one or more CTEs -** (common table expressions) created by a single WITH clause. -*/ -struct With { - int nCte; /* Number of CTEs in the WITH clause */ - With *pOuter; /* Containing WITH clause, or NULL */ - struct Cte { /* For each CTE in the WITH clause.... */ - char *zName; /* Name of this CTE */ - ExprList *pCols; /* List of explicit column names, or NULL */ - Select *pSelect; /* The definition of this CTE */ - const char *zErr; /* Error message for circular references */ - } a[1]; -}; - -/* -** Assuming zIn points to the first byte of a UTF-8 character, -** advance zIn to point to the first byte of the next UTF-8 character. -*/ -#define SQLITE_SKIP_UTF8(zIn) { \ - if( (*(zIn++))>=0xc0 ){ \ - while( (*zIn & 0xc0)==0x80 ){ zIn++; } \ - } \ -} - -/* -** The SQLITE_*_BKPT macros are substitutes for the error codes with -** the same name but without the _BKPT suffix. These macros invoke -** routines that report the line-number on which the error originated -** using sqlite3_log(). The routines also provide a convenient place -** to set a debugger breakpoint. -*/ -SQLITE_PRIVATE int sqlite3CorruptError(int); -SQLITE_PRIVATE int sqlite3MisuseError(int); -SQLITE_PRIVATE int sqlite3CantopenError(int); -#define SQLITE_CORRUPT_BKPT sqlite3CorruptError(__LINE__) -#define SQLITE_MISUSE_BKPT sqlite3MisuseError(__LINE__) -#define SQLITE_CANTOPEN_BKPT sqlite3CantopenError(__LINE__) - - -/* -** FTS4 is really an extension for FTS3. It is enabled using the -** SQLITE_ENABLE_FTS3 macro. But to avoid confusion we also all -** the SQLITE_ENABLE_FTS4 macro to serve as an alisse for SQLITE_ENABLE_FTS3. -*/ -#if defined(SQLITE_ENABLE_FTS4) && !defined(SQLITE_ENABLE_FTS3) -# define SQLITE_ENABLE_FTS3 -#endif - -/* -** The ctype.h header is needed for non-ASCII systems. It is also -** needed by FTS3 when FTS3 is included in the amalgamation. -*/ -#if !defined(SQLITE_ASCII) || \ - (defined(SQLITE_ENABLE_FTS3) && defined(SQLITE_AMALGAMATION)) -# include -#endif - -/* -** The following macros mimic the standard library functions toupper(), -** isspace(), isalnum(), isdigit() and isxdigit(), respectively. The -** sqlite versions only work for ASCII characters, regardless of locale. -*/ -#ifdef SQLITE_ASCII -# define sqlite3Toupper(x) ((x)&~(sqlite3CtypeMap[(unsigned char)(x)]&0x20)) -# define sqlite3Isspace(x) (sqlite3CtypeMap[(unsigned char)(x)]&0x01) -# define sqlite3Isalnum(x) (sqlite3CtypeMap[(unsigned char)(x)]&0x06) -# define sqlite3Isalpha(x) (sqlite3CtypeMap[(unsigned char)(x)]&0x02) -# define sqlite3Isdigit(x) (sqlite3CtypeMap[(unsigned char)(x)]&0x04) -# define sqlite3Isxdigit(x) (sqlite3CtypeMap[(unsigned char)(x)]&0x08) -# define sqlite3Tolower(x) (sqlite3UpperToLower[(unsigned char)(x)]) -#else -# define sqlite3Toupper(x) toupper((unsigned char)(x)) -# define sqlite3Isspace(x) isspace((unsigned char)(x)) -# define sqlite3Isalnum(x) isalnum((unsigned char)(x)) -# define sqlite3Isalpha(x) isalpha((unsigned char)(x)) -# define sqlite3Isdigit(x) isdigit((unsigned char)(x)) -# define sqlite3Isxdigit(x) isxdigit((unsigned char)(x)) -# define sqlite3Tolower(x) tolower((unsigned char)(x)) -#endif - -/* -** Internal function prototypes -*/ -#define sqlite3StrICmp sqlite3_stricmp -SQLITE_PRIVATE int sqlite3Strlen30(const char*); -#define sqlite3StrNICmp sqlite3_strnicmp - -SQLITE_PRIVATE int sqlite3MallocInit(void); -SQLITE_PRIVATE void sqlite3MallocEnd(void); -SQLITE_PRIVATE void *sqlite3Malloc(int); -SQLITE_PRIVATE void *sqlite3MallocZero(int); -SQLITE_PRIVATE void *sqlite3DbMallocZero(sqlite3*, int); -SQLITE_PRIVATE void *sqlite3DbMallocRaw(sqlite3*, int); -SQLITE_PRIVATE char *sqlite3DbStrDup(sqlite3*,const char*); -SQLITE_PRIVATE char *sqlite3DbStrNDup(sqlite3*,const char*, int); -SQLITE_PRIVATE void *sqlite3Realloc(void*, int); -SQLITE_PRIVATE void *sqlite3DbReallocOrFree(sqlite3 *, void *, int); -SQLITE_PRIVATE void *sqlite3DbRealloc(sqlite3 *, void *, int); -SQLITE_PRIVATE void sqlite3DbFree(sqlite3*, void*); -SQLITE_PRIVATE int sqlite3MallocSize(void*); -SQLITE_PRIVATE int sqlite3DbMallocSize(sqlite3*, void*); -SQLITE_PRIVATE void *sqlite3ScratchMalloc(int); -SQLITE_PRIVATE void sqlite3ScratchFree(void*); -SQLITE_PRIVATE void *sqlite3PageMalloc(int); -SQLITE_PRIVATE void sqlite3PageFree(void*); -SQLITE_PRIVATE void sqlite3MemSetDefault(void); -SQLITE_PRIVATE void sqlite3BenignMallocHooks(void (*)(void), void (*)(void)); -SQLITE_PRIVATE int sqlite3HeapNearlyFull(void); - -/* -** On systems with ample stack space and that support alloca(), make -** use of alloca() to obtain space for large automatic objects. By default, -** obtain space from malloc(). -** -** The alloca() routine never returns NULL. This will cause code paths -** that deal with sqlite3StackAlloc() failures to be unreachable. -*/ -#ifdef SQLITE_USE_ALLOCA -# define sqlite3StackAllocRaw(D,N) alloca(N) -# define sqlite3StackAllocZero(D,N) memset(alloca(N), 0, N) -# define sqlite3StackFree(D,P) -#else -# define sqlite3StackAllocRaw(D,N) sqlite3DbMallocRaw(D,N) -# define sqlite3StackAllocZero(D,N) sqlite3DbMallocZero(D,N) -# define sqlite3StackFree(D,P) sqlite3DbFree(D,P) -#endif - -#ifdef SQLITE_ENABLE_MEMSYS3 -SQLITE_PRIVATE const sqlite3_mem_methods *sqlite3MemGetMemsys3(void); -#endif -#ifdef SQLITE_ENABLE_MEMSYS5 -SQLITE_PRIVATE const sqlite3_mem_methods *sqlite3MemGetMemsys5(void); -#endif - - -#ifndef SQLITE_MUTEX_OMIT -SQLITE_PRIVATE sqlite3_mutex_methods const *sqlite3DefaultMutex(void); -SQLITE_PRIVATE sqlite3_mutex_methods const *sqlite3NoopMutex(void); -SQLITE_PRIVATE sqlite3_mutex *sqlite3MutexAlloc(int); -SQLITE_PRIVATE int sqlite3MutexInit(void); -SQLITE_PRIVATE int sqlite3MutexEnd(void); -#endif - -SQLITE_PRIVATE int sqlite3StatusValue(int); -SQLITE_PRIVATE void sqlite3StatusAdd(int, int); -SQLITE_PRIVATE void sqlite3StatusSet(int, int); - -#ifndef SQLITE_OMIT_FLOATING_POINT -SQLITE_PRIVATE int sqlite3IsNaN(double); -#else -# define sqlite3IsNaN(X) 0 -#endif - -/* -** An instance of the following structure holds information about SQL -** functions arguments that are the parameters to the printf() function. -*/ -struct PrintfArguments { - int nArg; /* Total number of arguments */ - int nUsed; /* Number of arguments used so far */ - sqlite3_value **apArg; /* The argument values */ -}; - -#define SQLITE_PRINTF_INTERNAL 0x01 -#define SQLITE_PRINTF_SQLFUNC 0x02 -SQLITE_PRIVATE void sqlite3VXPrintf(StrAccum*, u32, const char*, va_list); -SQLITE_PRIVATE void sqlite3XPrintf(StrAccum*, u32, const char*, ...); -SQLITE_PRIVATE char *sqlite3MPrintf(sqlite3*,const char*, ...); -SQLITE_PRIVATE char *sqlite3VMPrintf(sqlite3*,const char*, va_list); -SQLITE_PRIVATE char *sqlite3MAppendf(sqlite3*,char*,const char*,...); -#if defined(SQLITE_TEST) || defined(SQLITE_DEBUG) -SQLITE_PRIVATE void sqlite3DebugPrintf(const char*, ...); -#endif -#if defined(SQLITE_TEST) -SQLITE_PRIVATE void *sqlite3TestTextToPtr(const char*); -#endif - -/* Output formatting for SQLITE_TESTCTRL_EXPLAIN */ -#if defined(SQLITE_ENABLE_TREE_EXPLAIN) -SQLITE_PRIVATE void sqlite3ExplainBegin(Vdbe*); -SQLITE_PRIVATE void sqlite3ExplainPrintf(Vdbe*, const char*, ...); -SQLITE_PRIVATE void sqlite3ExplainNL(Vdbe*); -SQLITE_PRIVATE void sqlite3ExplainPush(Vdbe*); -SQLITE_PRIVATE void sqlite3ExplainPop(Vdbe*); -SQLITE_PRIVATE void sqlite3ExplainFinish(Vdbe*); -SQLITE_PRIVATE void sqlite3ExplainSelect(Vdbe*, Select*); -SQLITE_PRIVATE void sqlite3ExplainExpr(Vdbe*, Expr*); -SQLITE_PRIVATE void sqlite3ExplainExprList(Vdbe*, ExprList*); -SQLITE_PRIVATE const char *sqlite3VdbeExplanation(Vdbe*); -#else -# define sqlite3ExplainBegin(X) -# define sqlite3ExplainSelect(A,B) -# define sqlite3ExplainExpr(A,B) -# define sqlite3ExplainExprList(A,B) -# define sqlite3ExplainFinish(X) -# define sqlite3VdbeExplanation(X) 0 -#endif - - -SQLITE_PRIVATE void sqlite3SetString(char **, sqlite3*, const char*, ...); -SQLITE_PRIVATE void sqlite3ErrorMsg(Parse*, const char*, ...); -SQLITE_PRIVATE int sqlite3Dequote(char*); -SQLITE_PRIVATE int sqlite3KeywordCode(const unsigned char*, int); -SQLITE_PRIVATE int sqlite3RunParser(Parse*, const char*, char **); -SQLITE_PRIVATE void sqlite3FinishCoding(Parse*); -SQLITE_PRIVATE int sqlite3GetTempReg(Parse*); -SQLITE_PRIVATE void sqlite3ReleaseTempReg(Parse*,int); -SQLITE_PRIVATE int sqlite3GetTempRange(Parse*,int); -SQLITE_PRIVATE void sqlite3ReleaseTempRange(Parse*,int,int); -SQLITE_PRIVATE void sqlite3ClearTempRegCache(Parse*); -SQLITE_PRIVATE Expr *sqlite3ExprAlloc(sqlite3*,int,const Token*,int); -SQLITE_PRIVATE Expr *sqlite3Expr(sqlite3*,int,const char*); -SQLITE_PRIVATE void sqlite3ExprAttachSubtrees(sqlite3*,Expr*,Expr*,Expr*); -SQLITE_PRIVATE Expr *sqlite3PExpr(Parse*, int, Expr*, Expr*, const Token*); -SQLITE_PRIVATE Expr *sqlite3ExprAnd(sqlite3*,Expr*, Expr*); -SQLITE_PRIVATE Expr *sqlite3ExprFunction(Parse*,ExprList*, Token*); -SQLITE_PRIVATE void sqlite3ExprAssignVarNumber(Parse*, Expr*); -SQLITE_PRIVATE void sqlite3ExprDelete(sqlite3*, Expr*); -SQLITE_PRIVATE ExprList *sqlite3ExprListAppend(Parse*,ExprList*,Expr*); -SQLITE_PRIVATE void sqlite3ExprListSetName(Parse*,ExprList*,Token*,int); -SQLITE_PRIVATE void sqlite3ExprListSetSpan(Parse*,ExprList*,ExprSpan*); -SQLITE_PRIVATE void sqlite3ExprListDelete(sqlite3*, ExprList*); -SQLITE_PRIVATE int sqlite3Init(sqlite3*, char**); -SQLITE_PRIVATE int sqlite3InitCallback(void*, int, char**, char**); -SQLITE_PRIVATE void sqlite3Pragma(Parse*,Token*,Token*,Token*,int); -SQLITE_PRIVATE void sqlite3ResetAllSchemasOfConnection(sqlite3*); -SQLITE_PRIVATE void sqlite3ResetOneSchema(sqlite3*,int); -SQLITE_PRIVATE void sqlite3CollapseDatabaseArray(sqlite3*); -SQLITE_PRIVATE void sqlite3BeginParse(Parse*,int); -SQLITE_PRIVATE void sqlite3CommitInternalChanges(sqlite3*); -SQLITE_PRIVATE Table *sqlite3ResultSetOfSelect(Parse*,Select*); -SQLITE_PRIVATE void sqlite3OpenMasterTable(Parse *, int); -SQLITE_PRIVATE Index *sqlite3PrimaryKeyIndex(Table*); -SQLITE_PRIVATE i16 sqlite3ColumnOfIndex(Index*, i16); -SQLITE_PRIVATE void sqlite3StartTable(Parse*,Token*,Token*,int,int,int,int); -SQLITE_PRIVATE void sqlite3AddColumn(Parse*,Token*); -SQLITE_PRIVATE void sqlite3AddNotNull(Parse*, int); -SQLITE_PRIVATE void sqlite3AddPrimaryKey(Parse*, ExprList*, int, int, int); -SQLITE_PRIVATE void sqlite3AddCheckConstraint(Parse*, Expr*); -SQLITE_PRIVATE void sqlite3AddColumnType(Parse*,Token*); -SQLITE_PRIVATE void sqlite3AddDefaultValue(Parse*,ExprSpan*); -SQLITE_PRIVATE void sqlite3AddCollateType(Parse*, Token*); -SQLITE_PRIVATE void sqlite3EndTable(Parse*,Token*,Token*,u8,Select*); -SQLITE_PRIVATE int sqlite3ParseUri(const char*,const char*,unsigned int*, - sqlite3_vfs**,char**,char **); -SQLITE_PRIVATE Btree *sqlite3DbNameToBtree(sqlite3*,const char*); -SQLITE_PRIVATE int sqlite3CodeOnce(Parse *); - -#ifdef SQLITE_OMIT_BUILTIN_TEST -# define sqlite3FaultSim(X) SQLITE_OK -#else -SQLITE_PRIVATE int sqlite3FaultSim(int); -#endif - -SQLITE_PRIVATE Bitvec *sqlite3BitvecCreate(u32); -SQLITE_PRIVATE int sqlite3BitvecTest(Bitvec*, u32); -SQLITE_PRIVATE int sqlite3BitvecSet(Bitvec*, u32); -SQLITE_PRIVATE void sqlite3BitvecClear(Bitvec*, u32, void*); -SQLITE_PRIVATE void sqlite3BitvecDestroy(Bitvec*); -SQLITE_PRIVATE u32 sqlite3BitvecSize(Bitvec*); -SQLITE_PRIVATE int sqlite3BitvecBuiltinTest(int,int*); - -SQLITE_PRIVATE RowSet *sqlite3RowSetInit(sqlite3*, void*, unsigned int); -SQLITE_PRIVATE void sqlite3RowSetClear(RowSet*); -SQLITE_PRIVATE void sqlite3RowSetInsert(RowSet*, i64); -SQLITE_PRIVATE int sqlite3RowSetTest(RowSet*, int iBatch, i64); -SQLITE_PRIVATE int sqlite3RowSetNext(RowSet*, i64*); - -SQLITE_PRIVATE void sqlite3CreateView(Parse*,Token*,Token*,Token*,Select*,int,int); - -#if !defined(SQLITE_OMIT_VIEW) || !defined(SQLITE_OMIT_VIRTUALTABLE) -SQLITE_PRIVATE int sqlite3ViewGetColumnNames(Parse*,Table*); -#else -# define sqlite3ViewGetColumnNames(A,B) 0 -#endif - -SQLITE_PRIVATE void sqlite3DropTable(Parse*, SrcList*, int, int); -SQLITE_PRIVATE void sqlite3CodeDropTable(Parse*, Table*, int, int); -SQLITE_PRIVATE void sqlite3DeleteTable(sqlite3*, Table*); -#ifndef SQLITE_OMIT_AUTOINCREMENT -SQLITE_PRIVATE void sqlite3AutoincrementBegin(Parse *pParse); -SQLITE_PRIVATE void sqlite3AutoincrementEnd(Parse *pParse); -#else -# define sqlite3AutoincrementBegin(X) -# define sqlite3AutoincrementEnd(X) -#endif -SQLITE_PRIVATE void sqlite3Insert(Parse*, SrcList*, Select*, IdList*, int); -SQLITE_PRIVATE void *sqlite3ArrayAllocate(sqlite3*,void*,int,int*,int*); -SQLITE_PRIVATE IdList *sqlite3IdListAppend(sqlite3*, IdList*, Token*); -SQLITE_PRIVATE int sqlite3IdListIndex(IdList*,const char*); -SQLITE_PRIVATE SrcList *sqlite3SrcListEnlarge(sqlite3*, SrcList*, int, int); -SQLITE_PRIVATE SrcList *sqlite3SrcListAppend(sqlite3*, SrcList*, Token*, Token*); -SQLITE_PRIVATE SrcList *sqlite3SrcListAppendFromTerm(Parse*, SrcList*, Token*, Token*, - Token*, Select*, Expr*, IdList*); -SQLITE_PRIVATE void sqlite3SrcListIndexedBy(Parse *, SrcList *, Token *); -SQLITE_PRIVATE int sqlite3IndexedByLookup(Parse *, struct SrcList_item *); -SQLITE_PRIVATE void sqlite3SrcListShiftJoinType(SrcList*); -SQLITE_PRIVATE void sqlite3SrcListAssignCursors(Parse*, SrcList*); -SQLITE_PRIVATE void sqlite3IdListDelete(sqlite3*, IdList*); -SQLITE_PRIVATE void sqlite3SrcListDelete(sqlite3*, SrcList*); -SQLITE_PRIVATE Index *sqlite3AllocateIndexObject(sqlite3*,i16,int,char**); -SQLITE_PRIVATE Index *sqlite3CreateIndex(Parse*,Token*,Token*,SrcList*,ExprList*,int,Token*, - Expr*, int, int); -SQLITE_PRIVATE void sqlite3DropIndex(Parse*, SrcList*, int); -SQLITE_PRIVATE int sqlite3Select(Parse*, Select*, SelectDest*); -SQLITE_PRIVATE Select *sqlite3SelectNew(Parse*,ExprList*,SrcList*,Expr*,ExprList*, - Expr*,ExprList*,u16,Expr*,Expr*); -SQLITE_PRIVATE void sqlite3SelectDelete(sqlite3*, Select*); -SQLITE_PRIVATE Table *sqlite3SrcListLookup(Parse*, SrcList*); -SQLITE_PRIVATE int sqlite3IsReadOnly(Parse*, Table*, int); -SQLITE_PRIVATE void sqlite3OpenTable(Parse*, int iCur, int iDb, Table*, int); -#if defined(SQLITE_ENABLE_UPDATE_DELETE_LIMIT) && !defined(SQLITE_OMIT_SUBQUERY) -SQLITE_PRIVATE Expr *sqlite3LimitWhere(Parse*,SrcList*,Expr*,ExprList*,Expr*,Expr*,char*); -#endif -SQLITE_PRIVATE void sqlite3DeleteFrom(Parse*, SrcList*, Expr*); -SQLITE_PRIVATE void sqlite3Update(Parse*, SrcList*, ExprList*, Expr*, int); -SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin(Parse*,SrcList*,Expr*,ExprList*,ExprList*,u16,int); -SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo*); -SQLITE_PRIVATE u64 sqlite3WhereOutputRowCount(WhereInfo*); -SQLITE_PRIVATE int sqlite3WhereIsDistinct(WhereInfo*); -SQLITE_PRIVATE int sqlite3WhereIsOrdered(WhereInfo*); -SQLITE_PRIVATE int sqlite3WhereIsSorted(WhereInfo*); -SQLITE_PRIVATE int sqlite3WhereContinueLabel(WhereInfo*); -SQLITE_PRIVATE int sqlite3WhereBreakLabel(WhereInfo*); -SQLITE_PRIVATE int sqlite3WhereOkOnePass(WhereInfo*, int*); -SQLITE_PRIVATE int sqlite3ExprCodeGetColumn(Parse*, Table*, int, int, int, u8); -SQLITE_PRIVATE void sqlite3ExprCodeGetColumnOfTable(Vdbe*, Table*, int, int, int); -SQLITE_PRIVATE void sqlite3ExprCodeMove(Parse*, int, int, int); -SQLITE_PRIVATE void sqlite3ExprCacheStore(Parse*, int, int, int); -SQLITE_PRIVATE void sqlite3ExprCachePush(Parse*); -SQLITE_PRIVATE void sqlite3ExprCachePop(Parse*); -SQLITE_PRIVATE void sqlite3ExprCacheRemove(Parse*, int, int); -SQLITE_PRIVATE void sqlite3ExprCacheClear(Parse*); -SQLITE_PRIVATE void sqlite3ExprCacheAffinityChange(Parse*, int, int); -SQLITE_PRIVATE void sqlite3ExprCode(Parse*, Expr*, int); -SQLITE_PRIVATE void sqlite3ExprCodeFactorable(Parse*, Expr*, int); -SQLITE_PRIVATE void sqlite3ExprCodeAtInit(Parse*, Expr*, int, u8); -SQLITE_PRIVATE int sqlite3ExprCodeTemp(Parse*, Expr*, int*); -SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse*, Expr*, int); -SQLITE_PRIVATE void sqlite3ExprCodeAndCache(Parse*, Expr*, int); -SQLITE_PRIVATE int sqlite3ExprCodeExprList(Parse*, ExprList*, int, u8); -#define SQLITE_ECEL_DUP 0x01 /* Deep, not shallow copies */ -#define SQLITE_ECEL_FACTOR 0x02 /* Factor out constant terms */ -SQLITE_PRIVATE void sqlite3ExprIfTrue(Parse*, Expr*, int, int); -SQLITE_PRIVATE void sqlite3ExprIfFalse(Parse*, Expr*, int, int); -SQLITE_PRIVATE Table *sqlite3FindTable(sqlite3*,const char*, const char*); -SQLITE_PRIVATE Table *sqlite3LocateTable(Parse*,int isView,const char*, const char*); -SQLITE_PRIVATE Table *sqlite3LocateTableItem(Parse*,int isView,struct SrcList_item *); -SQLITE_PRIVATE Index *sqlite3FindIndex(sqlite3*,const char*, const char*); -SQLITE_PRIVATE void sqlite3UnlinkAndDeleteTable(sqlite3*,int,const char*); -SQLITE_PRIVATE void sqlite3UnlinkAndDeleteIndex(sqlite3*,int,const char*); -SQLITE_PRIVATE void sqlite3Vacuum(Parse*); -SQLITE_PRIVATE int sqlite3RunVacuum(char**, sqlite3*); -SQLITE_PRIVATE char *sqlite3NameFromToken(sqlite3*, Token*); -SQLITE_PRIVATE int sqlite3ExprCompare(Expr*, Expr*, int); -SQLITE_PRIVATE int sqlite3ExprListCompare(ExprList*, ExprList*, int); -SQLITE_PRIVATE int sqlite3ExprImpliesExpr(Expr*, Expr*, int); -SQLITE_PRIVATE void sqlite3ExprAnalyzeAggregates(NameContext*, Expr*); -SQLITE_PRIVATE void sqlite3ExprAnalyzeAggList(NameContext*,ExprList*); -SQLITE_PRIVATE int sqlite3FunctionUsesThisSrc(Expr*, SrcList*); -SQLITE_PRIVATE Vdbe *sqlite3GetVdbe(Parse*); -SQLITE_PRIVATE void sqlite3PrngSaveState(void); -SQLITE_PRIVATE void sqlite3PrngRestoreState(void); -SQLITE_PRIVATE void sqlite3RollbackAll(sqlite3*,int); -SQLITE_PRIVATE void sqlite3CodeVerifySchema(Parse*, int); -SQLITE_PRIVATE void sqlite3CodeVerifyNamedSchema(Parse*, const char *zDb); -SQLITE_PRIVATE void sqlite3BeginTransaction(Parse*, int); -SQLITE_PRIVATE void sqlite3CommitTransaction(Parse*); -SQLITE_PRIVATE void sqlite3RollbackTransaction(Parse*); -SQLITE_PRIVATE void sqlite3Savepoint(Parse*, int, Token*); -SQLITE_PRIVATE void sqlite3CloseSavepoints(sqlite3 *); -SQLITE_PRIVATE void sqlite3LeaveMutexAndCloseZombie(sqlite3*); -SQLITE_PRIVATE int sqlite3ExprIsConstant(Expr*); -SQLITE_PRIVATE int sqlite3ExprIsConstantNotJoin(Expr*); -SQLITE_PRIVATE int sqlite3ExprIsConstantOrFunction(Expr*); -SQLITE_PRIVATE int sqlite3ExprIsInteger(Expr*, int*); -SQLITE_PRIVATE int sqlite3ExprCanBeNull(const Expr*); -SQLITE_PRIVATE int sqlite3ExprNeedsNoAffinityChange(const Expr*, char); -SQLITE_PRIVATE int sqlite3IsRowid(const char*); -SQLITE_PRIVATE void sqlite3GenerateRowDelete(Parse*,Table*,Trigger*,int,int,int,i16,u8,u8,u8); -SQLITE_PRIVATE void sqlite3GenerateRowIndexDelete(Parse*, Table*, int, int, int*); -SQLITE_PRIVATE int sqlite3GenerateIndexKey(Parse*, Index*, int, int, int, int*,Index*,int); -SQLITE_PRIVATE void sqlite3ResolvePartIdxLabel(Parse*,int); -SQLITE_PRIVATE void sqlite3GenerateConstraintChecks(Parse*,Table*,int*,int,int,int,int, - u8,u8,int,int*); -SQLITE_PRIVATE void sqlite3CompleteInsertion(Parse*,Table*,int,int,int,int*,int,int,int); -SQLITE_PRIVATE int sqlite3OpenTableAndIndices(Parse*, Table*, int, int, u8*, int*, int*); -SQLITE_PRIVATE void sqlite3BeginWriteOperation(Parse*, int, int); -SQLITE_PRIVATE void sqlite3MultiWrite(Parse*); -SQLITE_PRIVATE void sqlite3MayAbort(Parse*); -SQLITE_PRIVATE void sqlite3HaltConstraint(Parse*, int, int, char*, i8, u8); -SQLITE_PRIVATE void sqlite3UniqueConstraint(Parse*, int, Index*); -SQLITE_PRIVATE void sqlite3RowidConstraint(Parse*, int, Table*); -SQLITE_PRIVATE Expr *sqlite3ExprDup(sqlite3*,Expr*,int); -SQLITE_PRIVATE ExprList *sqlite3ExprListDup(sqlite3*,ExprList*,int); -SQLITE_PRIVATE SrcList *sqlite3SrcListDup(sqlite3*,SrcList*,int); -SQLITE_PRIVATE IdList *sqlite3IdListDup(sqlite3*,IdList*); -SQLITE_PRIVATE Select *sqlite3SelectDup(sqlite3*,Select*,int); -SQLITE_PRIVATE void sqlite3FuncDefInsert(FuncDefHash*, FuncDef*); -SQLITE_PRIVATE FuncDef *sqlite3FindFunction(sqlite3*,const char*,int,int,u8,u8); -SQLITE_PRIVATE void sqlite3RegisterBuiltinFunctions(sqlite3*); -SQLITE_PRIVATE void sqlite3RegisterDateTimeFunctions(void); -SQLITE_PRIVATE void sqlite3RegisterGlobalFunctions(void); -SQLITE_PRIVATE int sqlite3SafetyCheckOk(sqlite3*); -SQLITE_PRIVATE int sqlite3SafetyCheckSickOrOk(sqlite3*); -SQLITE_PRIVATE void sqlite3ChangeCookie(Parse*, int); - -#if !defined(SQLITE_OMIT_VIEW) && !defined(SQLITE_OMIT_TRIGGER) -SQLITE_PRIVATE void sqlite3MaterializeView(Parse*, Table*, Expr*, int); -#endif - -#ifndef SQLITE_OMIT_TRIGGER -SQLITE_PRIVATE void sqlite3BeginTrigger(Parse*, Token*,Token*,int,int,IdList*,SrcList*, - Expr*,int, int); -SQLITE_PRIVATE void sqlite3FinishTrigger(Parse*, TriggerStep*, Token*); -SQLITE_PRIVATE void sqlite3DropTrigger(Parse*, SrcList*, int); -SQLITE_PRIVATE void sqlite3DropTriggerPtr(Parse*, Trigger*); -SQLITE_PRIVATE Trigger *sqlite3TriggersExist(Parse *, Table*, int, ExprList*, int *pMask); -SQLITE_PRIVATE Trigger *sqlite3TriggerList(Parse *, Table *); -SQLITE_PRIVATE void sqlite3CodeRowTrigger(Parse*, Trigger *, int, ExprList*, int, Table *, - int, int, int); -SQLITE_PRIVATE void sqlite3CodeRowTriggerDirect(Parse *, Trigger *, Table *, int, int, int); - void sqliteViewTriggers(Parse*, Table*, Expr*, int, ExprList*); -SQLITE_PRIVATE void sqlite3DeleteTriggerStep(sqlite3*, TriggerStep*); -SQLITE_PRIVATE TriggerStep *sqlite3TriggerSelectStep(sqlite3*,Select*); -SQLITE_PRIVATE TriggerStep *sqlite3TriggerInsertStep(sqlite3*,Token*, IdList*, - Select*,u8); -SQLITE_PRIVATE TriggerStep *sqlite3TriggerUpdateStep(sqlite3*,Token*,ExprList*, Expr*, u8); -SQLITE_PRIVATE TriggerStep *sqlite3TriggerDeleteStep(sqlite3*,Token*, Expr*); -SQLITE_PRIVATE void sqlite3DeleteTrigger(sqlite3*, Trigger*); -SQLITE_PRIVATE void sqlite3UnlinkAndDeleteTrigger(sqlite3*,int,const char*); -SQLITE_PRIVATE u32 sqlite3TriggerColmask(Parse*,Trigger*,ExprList*,int,int,Table*,int); -# define sqlite3ParseToplevel(p) ((p)->pToplevel ? (p)->pToplevel : (p)) -#else -# define sqlite3TriggersExist(B,C,D,E,F) 0 -# define sqlite3DeleteTrigger(A,B) -# define sqlite3DropTriggerPtr(A,B) -# define sqlite3UnlinkAndDeleteTrigger(A,B,C) -# define sqlite3CodeRowTrigger(A,B,C,D,E,F,G,H,I) -# define sqlite3CodeRowTriggerDirect(A,B,C,D,E,F) -# define sqlite3TriggerList(X, Y) 0 -# define sqlite3ParseToplevel(p) p -# define sqlite3TriggerColmask(A,B,C,D,E,F,G) 0 -#endif - -SQLITE_PRIVATE int sqlite3JoinType(Parse*, Token*, Token*, Token*); -SQLITE_PRIVATE void sqlite3CreateForeignKey(Parse*, ExprList*, Token*, ExprList*, int); -SQLITE_PRIVATE void sqlite3DeferForeignKey(Parse*, int); -#ifndef SQLITE_OMIT_AUTHORIZATION -SQLITE_PRIVATE void sqlite3AuthRead(Parse*,Expr*,Schema*,SrcList*); -SQLITE_PRIVATE int sqlite3AuthCheck(Parse*,int, const char*, const char*, const char*); -SQLITE_PRIVATE void sqlite3AuthContextPush(Parse*, AuthContext*, const char*); -SQLITE_PRIVATE void sqlite3AuthContextPop(AuthContext*); -SQLITE_PRIVATE int sqlite3AuthReadCol(Parse*, const char *, const char *, int); -#else -# define sqlite3AuthRead(a,b,c,d) -# define sqlite3AuthCheck(a,b,c,d,e) SQLITE_OK -# define sqlite3AuthContextPush(a,b,c) -# define sqlite3AuthContextPop(a) ((void)(a)) -#endif -SQLITE_PRIVATE void sqlite3Attach(Parse*, Expr*, Expr*, Expr*); -SQLITE_PRIVATE void sqlite3Detach(Parse*, Expr*); -SQLITE_PRIVATE void sqlite3FixInit(DbFixer*, Parse*, int, const char*, const Token*); -SQLITE_PRIVATE int sqlite3FixSrcList(DbFixer*, SrcList*); -SQLITE_PRIVATE int sqlite3FixSelect(DbFixer*, Select*); -SQLITE_PRIVATE int sqlite3FixExpr(DbFixer*, Expr*); -SQLITE_PRIVATE int sqlite3FixExprList(DbFixer*, ExprList*); -SQLITE_PRIVATE int sqlite3FixTriggerStep(DbFixer*, TriggerStep*); -SQLITE_PRIVATE int sqlite3AtoF(const char *z, double*, int, u8); -SQLITE_PRIVATE int sqlite3GetInt32(const char *, int*); -SQLITE_PRIVATE int sqlite3Atoi(const char*); -SQLITE_PRIVATE int sqlite3Utf16ByteLen(const void *pData, int nChar); -SQLITE_PRIVATE int sqlite3Utf8CharLen(const char *pData, int nByte); -SQLITE_PRIVATE u32 sqlite3Utf8Read(const u8**); -SQLITE_PRIVATE LogEst sqlite3LogEst(u64); -SQLITE_PRIVATE LogEst sqlite3LogEstAdd(LogEst,LogEst); -#ifndef SQLITE_OMIT_VIRTUALTABLE -SQLITE_PRIVATE LogEst sqlite3LogEstFromDouble(double); -#endif -SQLITE_PRIVATE u64 sqlite3LogEstToInt(LogEst); - -/* -** Routines to read and write variable-length integers. These used to -** be defined locally, but now we use the varint routines in the util.c -** file. Code should use the MACRO forms below, as the Varint32 versions -** are coded to assume the single byte case is already handled (which -** the MACRO form does). -*/ -SQLITE_PRIVATE int sqlite3PutVarint(unsigned char*, u64); -SQLITE_PRIVATE int sqlite3PutVarint32(unsigned char*, u32); -SQLITE_PRIVATE u8 sqlite3GetVarint(const unsigned char *, u64 *); -SQLITE_PRIVATE u8 sqlite3GetVarint32(const unsigned char *, u32 *); -SQLITE_PRIVATE int sqlite3VarintLen(u64 v); - -/* -** The header of a record consists of a sequence variable-length integers. -** These integers are almost always small and are encoded as a single byte. -** The following macros take advantage this fact to provide a fast encode -** and decode of the integers in a record header. It is faster for the common -** case where the integer is a single byte. It is a little slower when the -** integer is two or more bytes. But overall it is faster. -** -** The following expressions are equivalent: -** -** x = sqlite3GetVarint32( A, &B ); -** x = sqlite3PutVarint32( A, B ); -** -** x = getVarint32( A, B ); -** x = putVarint32( A, B ); -** -*/ -#define getVarint32(A,B) \ - (u8)((*(A)<(u8)0x80)?((B)=(u32)*(A)),1:sqlite3GetVarint32((A),(u32 *)&(B))) -#define putVarint32(A,B) \ - (u8)(((u32)(B)<(u32)0x80)?(*(A)=(unsigned char)(B)),1:\ - sqlite3PutVarint32((A),(B))) -#define getVarint sqlite3GetVarint -#define putVarint sqlite3PutVarint - - -SQLITE_PRIVATE const char *sqlite3IndexAffinityStr(Vdbe *, Index *); -SQLITE_PRIVATE void sqlite3TableAffinity(Vdbe*, Table*, int); -SQLITE_PRIVATE char sqlite3CompareAffinity(Expr *pExpr, char aff2); -SQLITE_PRIVATE int sqlite3IndexAffinityOk(Expr *pExpr, char idx_affinity); -SQLITE_PRIVATE char sqlite3ExprAffinity(Expr *pExpr); -SQLITE_PRIVATE int sqlite3Atoi64(const char*, i64*, int, u8); -SQLITE_PRIVATE void sqlite3Error(sqlite3*, int, const char*,...); -SQLITE_PRIVATE void *sqlite3HexToBlob(sqlite3*, const char *z, int n); -SQLITE_PRIVATE u8 sqlite3HexToInt(int h); -SQLITE_PRIVATE int sqlite3TwoPartName(Parse *, Token *, Token *, Token **); - -#if defined(SQLITE_TEST) -SQLITE_PRIVATE const char *sqlite3ErrName(int); -#endif - -SQLITE_PRIVATE const char *sqlite3ErrStr(int); -SQLITE_PRIVATE int sqlite3ReadSchema(Parse *pParse); -SQLITE_PRIVATE CollSeq *sqlite3FindCollSeq(sqlite3*,u8 enc, const char*,int); -SQLITE_PRIVATE CollSeq *sqlite3LocateCollSeq(Parse *pParse, const char*zName); -SQLITE_PRIVATE CollSeq *sqlite3ExprCollSeq(Parse *pParse, Expr *pExpr); -SQLITE_PRIVATE Expr *sqlite3ExprAddCollateToken(Parse *pParse, Expr*, const Token*); -SQLITE_PRIVATE Expr *sqlite3ExprAddCollateString(Parse*,Expr*,const char*); -SQLITE_PRIVATE Expr *sqlite3ExprSkipCollate(Expr*); -SQLITE_PRIVATE int sqlite3CheckCollSeq(Parse *, CollSeq *); -SQLITE_PRIVATE int sqlite3CheckObjectName(Parse *, const char *); -SQLITE_PRIVATE void sqlite3VdbeSetChanges(sqlite3 *, int); -SQLITE_PRIVATE int sqlite3AddInt64(i64*,i64); -SQLITE_PRIVATE int sqlite3SubInt64(i64*,i64); -SQLITE_PRIVATE int sqlite3MulInt64(i64*,i64); -SQLITE_PRIVATE int sqlite3AbsInt32(int); -#ifdef SQLITE_ENABLE_8_3_NAMES -SQLITE_PRIVATE void sqlite3FileSuffix3(const char*, char*); -#else -# define sqlite3FileSuffix3(X,Y) -#endif -SQLITE_PRIVATE u8 sqlite3GetBoolean(const char *z,int); - -SQLITE_PRIVATE const void *sqlite3ValueText(sqlite3_value*, u8); -SQLITE_PRIVATE int sqlite3ValueBytes(sqlite3_value*, u8); -SQLITE_PRIVATE void sqlite3ValueSetStr(sqlite3_value*, int, const void *,u8, - void(*)(void*)); -SQLITE_PRIVATE void sqlite3ValueSetNull(sqlite3_value*); -SQLITE_PRIVATE void sqlite3ValueFree(sqlite3_value*); -SQLITE_PRIVATE sqlite3_value *sqlite3ValueNew(sqlite3 *); -SQLITE_PRIVATE char *sqlite3Utf16to8(sqlite3 *, const void*, int, u8); -SQLITE_PRIVATE int sqlite3ValueFromExpr(sqlite3 *, Expr *, u8, u8, sqlite3_value **); -SQLITE_PRIVATE void sqlite3ValueApplyAffinity(sqlite3_value *, u8, u8); -#ifndef SQLITE_AMALGAMATION -SQLITE_PRIVATE const unsigned char sqlite3OpcodeProperty[]; -SQLITE_PRIVATE const unsigned char sqlite3UpperToLower[]; -SQLITE_PRIVATE const unsigned char sqlite3CtypeMap[]; -SQLITE_PRIVATE const Token sqlite3IntTokens[]; -SQLITE_PRIVATE SQLITE_WSD struct Sqlite3Config sqlite3Config; -SQLITE_PRIVATE SQLITE_WSD FuncDefHash sqlite3GlobalFunctions; -#ifndef SQLITE_OMIT_WSD -SQLITE_PRIVATE int sqlite3PendingByte; -#endif -#endif -SQLITE_PRIVATE void sqlite3RootPageMoved(sqlite3*, int, int, int); -SQLITE_PRIVATE void sqlite3Reindex(Parse*, Token*, Token*); -SQLITE_PRIVATE void sqlite3AlterFunctions(void); -SQLITE_PRIVATE void sqlite3AlterRenameTable(Parse*, SrcList*, Token*); -SQLITE_PRIVATE int sqlite3GetToken(const unsigned char *, int *); -SQLITE_PRIVATE void sqlite3NestedParse(Parse*, const char*, ...); -SQLITE_PRIVATE void sqlite3ExpirePreparedStatements(sqlite3*); -SQLITE_PRIVATE int sqlite3CodeSubselect(Parse *, Expr *, int, int); -SQLITE_PRIVATE void sqlite3SelectPrep(Parse*, Select*, NameContext*); -SQLITE_PRIVATE int sqlite3MatchSpanName(const char*, const char*, const char*, const char*); -SQLITE_PRIVATE int sqlite3ResolveExprNames(NameContext*, Expr*); -SQLITE_PRIVATE void sqlite3ResolveSelectNames(Parse*, Select*, NameContext*); -SQLITE_PRIVATE void sqlite3ResolveSelfReference(Parse*,Table*,int,Expr*,ExprList*); -SQLITE_PRIVATE int sqlite3ResolveOrderGroupBy(Parse*, Select*, ExprList*, const char*); -SQLITE_PRIVATE void sqlite3ColumnDefault(Vdbe *, Table *, int, int); -SQLITE_PRIVATE void sqlite3AlterFinishAddColumn(Parse *, Token *); -SQLITE_PRIVATE void sqlite3AlterBeginAddColumn(Parse *, SrcList *); -SQLITE_PRIVATE CollSeq *sqlite3GetCollSeq(Parse*, u8, CollSeq *, const char*); -SQLITE_PRIVATE char sqlite3AffinityType(const char*, u8*); -SQLITE_PRIVATE void sqlite3Analyze(Parse*, Token*, Token*); -SQLITE_PRIVATE int sqlite3InvokeBusyHandler(BusyHandler*); -SQLITE_PRIVATE int sqlite3FindDb(sqlite3*, Token*); -SQLITE_PRIVATE int sqlite3FindDbName(sqlite3 *, const char *); -SQLITE_PRIVATE int sqlite3AnalysisLoad(sqlite3*,int iDB); -SQLITE_PRIVATE void sqlite3DeleteIndexSamples(sqlite3*,Index*); -SQLITE_PRIVATE void sqlite3DefaultRowEst(Index*); -SQLITE_PRIVATE void sqlite3RegisterLikeFunctions(sqlite3*, int); -SQLITE_PRIVATE int sqlite3IsLikeFunction(sqlite3*,Expr*,int*,char*); -SQLITE_PRIVATE void sqlite3MinimumFileFormat(Parse*, int, int); -SQLITE_PRIVATE void sqlite3SchemaClear(void *); -SQLITE_PRIVATE Schema *sqlite3SchemaGet(sqlite3 *, Btree *); -SQLITE_PRIVATE int sqlite3SchemaToIndex(sqlite3 *db, Schema *); -SQLITE_PRIVATE KeyInfo *sqlite3KeyInfoAlloc(sqlite3*,int,int); -SQLITE_PRIVATE void sqlite3KeyInfoUnref(KeyInfo*); -SQLITE_PRIVATE KeyInfo *sqlite3KeyInfoRef(KeyInfo*); -SQLITE_PRIVATE KeyInfo *sqlite3KeyInfoOfIndex(Parse*, Index*); -#ifdef SQLITE_DEBUG -SQLITE_PRIVATE int sqlite3KeyInfoIsWriteable(KeyInfo*); -#endif -SQLITE_PRIVATE int sqlite3CreateFunc(sqlite3 *, const char *, int, int, void *, - void (*)(sqlite3_context*,int,sqlite3_value **), - void (*)(sqlite3_context*,int,sqlite3_value **), void (*)(sqlite3_context*), - FuncDestructor *pDestructor -); -SQLITE_PRIVATE int sqlite3ApiExit(sqlite3 *db, int); -SQLITE_PRIVATE int sqlite3OpenTempDatabase(Parse *); - -SQLITE_PRIVATE void sqlite3StrAccumInit(StrAccum*, char*, int, int); -SQLITE_PRIVATE void sqlite3StrAccumAppend(StrAccum*,const char*,int); -SQLITE_PRIVATE void sqlite3StrAccumAppendAll(StrAccum*,const char*); -SQLITE_PRIVATE void sqlite3AppendSpace(StrAccum*,int); -SQLITE_PRIVATE char *sqlite3StrAccumFinish(StrAccum*); -SQLITE_PRIVATE void sqlite3StrAccumReset(StrAccum*); -SQLITE_PRIVATE void sqlite3SelectDestInit(SelectDest*,int,int); -SQLITE_PRIVATE Expr *sqlite3CreateColumnExpr(sqlite3 *, SrcList *, int, int); - -SQLITE_PRIVATE void sqlite3BackupRestart(sqlite3_backup *); -SQLITE_PRIVATE void sqlite3BackupUpdate(sqlite3_backup *, Pgno, const u8 *); - -#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 -SQLITE_PRIVATE void sqlite3AnalyzeFunctions(void); -SQLITE_PRIVATE int sqlite3Stat4ProbeSetValue(Parse*,Index*,UnpackedRecord**,Expr*,u8,int,int*); -SQLITE_PRIVATE void sqlite3Stat4ProbeFree(UnpackedRecord*); -#endif - -/* -** The interface to the LEMON-generated parser -*/ -SQLITE_PRIVATE void *sqlite3ParserAlloc(void*(*)(size_t)); -SQLITE_PRIVATE void sqlite3ParserFree(void*, void(*)(void*)); -SQLITE_PRIVATE void sqlite3Parser(void*, int, Token, Parse*); -#ifdef YYTRACKMAXSTACKDEPTH -SQLITE_PRIVATE int sqlite3ParserStackPeak(void*); -#endif - -SQLITE_PRIVATE void sqlite3AutoLoadExtensions(sqlite3*); -#ifndef SQLITE_OMIT_LOAD_EXTENSION -SQLITE_PRIVATE void sqlite3CloseExtensions(sqlite3*); -#else -# define sqlite3CloseExtensions(X) -#endif - -#ifndef SQLITE_OMIT_SHARED_CACHE -SQLITE_PRIVATE void sqlite3TableLock(Parse *, int, int, u8, const char *); -#else - #define sqlite3TableLock(v,w,x,y,z) -#endif - -#ifdef SQLITE_TEST -SQLITE_PRIVATE int sqlite3Utf8To8(unsigned char*); -#endif - -#ifdef SQLITE_OMIT_VIRTUALTABLE -# define sqlite3VtabClear(Y) -# define sqlite3VtabSync(X,Y) SQLITE_OK -# define sqlite3VtabRollback(X) -# define sqlite3VtabCommit(X) -# define sqlite3VtabInSync(db) 0 -# define sqlite3VtabLock(X) -# define sqlite3VtabUnlock(X) -# define sqlite3VtabUnlockList(X) -# define sqlite3VtabSavepoint(X, Y, Z) SQLITE_OK -# define sqlite3GetVTable(X,Y) ((VTable*)0) -#else -SQLITE_PRIVATE void sqlite3VtabClear(sqlite3 *db, Table*); -SQLITE_PRIVATE void sqlite3VtabDisconnect(sqlite3 *db, Table *p); -SQLITE_PRIVATE int sqlite3VtabSync(sqlite3 *db, Vdbe*); -SQLITE_PRIVATE int sqlite3VtabRollback(sqlite3 *db); -SQLITE_PRIVATE int sqlite3VtabCommit(sqlite3 *db); -SQLITE_PRIVATE void sqlite3VtabLock(VTable *); -SQLITE_PRIVATE void sqlite3VtabUnlock(VTable *); -SQLITE_PRIVATE void sqlite3VtabUnlockList(sqlite3*); -SQLITE_PRIVATE int sqlite3VtabSavepoint(sqlite3 *, int, int); -SQLITE_PRIVATE void sqlite3VtabImportErrmsg(Vdbe*, sqlite3_vtab*); -SQLITE_PRIVATE VTable *sqlite3GetVTable(sqlite3*, Table*); -# define sqlite3VtabInSync(db) ((db)->nVTrans>0 && (db)->aVTrans==0) -#endif -SQLITE_PRIVATE void sqlite3VtabMakeWritable(Parse*,Table*); -SQLITE_PRIVATE void sqlite3VtabBeginParse(Parse*, Token*, Token*, Token*, int); -SQLITE_PRIVATE void sqlite3VtabFinishParse(Parse*, Token*); -SQLITE_PRIVATE void sqlite3VtabArgInit(Parse*); -SQLITE_PRIVATE void sqlite3VtabArgExtend(Parse*, Token*); -SQLITE_PRIVATE int sqlite3VtabCallCreate(sqlite3*, int, const char *, char **); -SQLITE_PRIVATE int sqlite3VtabCallConnect(Parse*, Table*); -SQLITE_PRIVATE int sqlite3VtabCallDestroy(sqlite3*, int, const char *); -SQLITE_PRIVATE int sqlite3VtabBegin(sqlite3 *, VTable *); -SQLITE_PRIVATE FuncDef *sqlite3VtabOverloadFunction(sqlite3 *,FuncDef*, int nArg, Expr*); -SQLITE_PRIVATE void sqlite3InvalidFunction(sqlite3_context*,int,sqlite3_value**); -SQLITE_PRIVATE sqlite3_int64 sqlite3StmtCurrentTime(sqlite3_context*); -SQLITE_PRIVATE int sqlite3VdbeParameterIndex(Vdbe*, const char*, int); -SQLITE_PRIVATE int sqlite3TransferBindings(sqlite3_stmt *, sqlite3_stmt *); -SQLITE_PRIVATE void sqlite3ParserReset(Parse*); -SQLITE_PRIVATE int sqlite3Reprepare(Vdbe*); -SQLITE_PRIVATE void sqlite3ExprListCheckLength(Parse*, ExprList*, const char*); -SQLITE_PRIVATE CollSeq *sqlite3BinaryCompareCollSeq(Parse *, Expr *, Expr *); -SQLITE_PRIVATE int sqlite3TempInMemory(const sqlite3*); -SQLITE_PRIVATE const char *sqlite3JournalModename(int); -#ifndef SQLITE_OMIT_WAL -SQLITE_PRIVATE int sqlite3Checkpoint(sqlite3*, int, int, int*, int*); -SQLITE_PRIVATE int sqlite3WalDefaultHook(void*,sqlite3*,const char*,int); -#endif -#ifndef SQLITE_OMIT_CTE -SQLITE_PRIVATE With *sqlite3WithAdd(Parse*,With*,Token*,ExprList*,Select*); -SQLITE_PRIVATE void sqlite3WithDelete(sqlite3*,With*); -SQLITE_PRIVATE void sqlite3WithPush(Parse*, With*, u8); -#else -#define sqlite3WithPush(x,y,z) -#define sqlite3WithDelete(x,y) -#endif - -/* Declarations for functions in fkey.c. All of these are replaced by -** no-op macros if OMIT_FOREIGN_KEY is defined. In this case no foreign -** key functionality is available. If OMIT_TRIGGER is defined but -** OMIT_FOREIGN_KEY is not, only some of the functions are no-oped. In -** this case foreign keys are parsed, but no other functionality is -** provided (enforcement of FK constraints requires the triggers sub-system). -*/ -#if !defined(SQLITE_OMIT_FOREIGN_KEY) && !defined(SQLITE_OMIT_TRIGGER) -SQLITE_PRIVATE void sqlite3FkCheck(Parse*, Table*, int, int, int*, int); -SQLITE_PRIVATE void sqlite3FkDropTable(Parse*, SrcList *, Table*); -SQLITE_PRIVATE void sqlite3FkActions(Parse*, Table*, ExprList*, int, int*, int); -SQLITE_PRIVATE int sqlite3FkRequired(Parse*, Table*, int*, int); -SQLITE_PRIVATE u32 sqlite3FkOldmask(Parse*, Table*); -SQLITE_PRIVATE FKey *sqlite3FkReferences(Table *); -#else - #define sqlite3FkActions(a,b,c,d,e,f) - #define sqlite3FkCheck(a,b,c,d,e,f) - #define sqlite3FkDropTable(a,b,c) - #define sqlite3FkOldmask(a,b) 0 - #define sqlite3FkRequired(a,b,c,d) 0 -#endif -#ifndef SQLITE_OMIT_FOREIGN_KEY -SQLITE_PRIVATE void sqlite3FkDelete(sqlite3 *, Table*); -SQLITE_PRIVATE int sqlite3FkLocateIndex(Parse*,Table*,FKey*,Index**,int**); -#else - #define sqlite3FkDelete(a,b) - #define sqlite3FkLocateIndex(a,b,c,d,e) -#endif - - -/* -** Available fault injectors. Should be numbered beginning with 0. -*/ -#define SQLITE_FAULTINJECTOR_MALLOC 0 -#define SQLITE_FAULTINJECTOR_COUNT 1 - -/* -** The interface to the code in fault.c used for identifying "benign" -** malloc failures. This is only present if SQLITE_OMIT_BUILTIN_TEST -** is not defined. -*/ -#ifndef SQLITE_OMIT_BUILTIN_TEST -SQLITE_PRIVATE void sqlite3BeginBenignMalloc(void); -SQLITE_PRIVATE void sqlite3EndBenignMalloc(void); -#else - #define sqlite3BeginBenignMalloc() - #define sqlite3EndBenignMalloc() -#endif - -#define IN_INDEX_ROWID 1 -#define IN_INDEX_EPH 2 -#define IN_INDEX_INDEX_ASC 3 -#define IN_INDEX_INDEX_DESC 4 -SQLITE_PRIVATE int sqlite3FindInIndex(Parse *, Expr *, int*); - -#ifdef SQLITE_ENABLE_ATOMIC_WRITE -SQLITE_PRIVATE int sqlite3JournalOpen(sqlite3_vfs *, const char *, sqlite3_file *, int, int); -SQLITE_PRIVATE int sqlite3JournalSize(sqlite3_vfs *); -SQLITE_PRIVATE int sqlite3JournalCreate(sqlite3_file *); -SQLITE_PRIVATE int sqlite3JournalExists(sqlite3_file *p); -#else - #define sqlite3JournalSize(pVfs) ((pVfs)->szOsFile) - #define sqlite3JournalExists(p) 1 -#endif - -SQLITE_PRIVATE void sqlite3MemJournalOpen(sqlite3_file *); -SQLITE_PRIVATE int sqlite3MemJournalSize(void); -SQLITE_PRIVATE int sqlite3IsMemJournal(sqlite3_file *); - -#if SQLITE_MAX_EXPR_DEPTH>0 -SQLITE_PRIVATE void sqlite3ExprSetHeight(Parse *pParse, Expr *p); -SQLITE_PRIVATE int sqlite3SelectExprHeight(Select *); -SQLITE_PRIVATE int sqlite3ExprCheckHeight(Parse*, int); -#else - #define sqlite3ExprSetHeight(x,y) - #define sqlite3SelectExprHeight(x) 0 - #define sqlite3ExprCheckHeight(x,y) -#endif - -SQLITE_PRIVATE u32 sqlite3Get4byte(const u8*); -SQLITE_PRIVATE void sqlite3Put4byte(u8*, u32); - -#ifdef SQLITE_ENABLE_UNLOCK_NOTIFY -SQLITE_PRIVATE void sqlite3ConnectionBlocked(sqlite3 *, sqlite3 *); -SQLITE_PRIVATE void sqlite3ConnectionUnlocked(sqlite3 *db); -SQLITE_PRIVATE void sqlite3ConnectionClosed(sqlite3 *db); -#else - #define sqlite3ConnectionBlocked(x,y) - #define sqlite3ConnectionUnlocked(x) - #define sqlite3ConnectionClosed(x) -#endif - -#ifdef SQLITE_DEBUG -SQLITE_PRIVATE void sqlite3ParserTrace(FILE*, char *); -#endif - -/* -** If the SQLITE_ENABLE IOTRACE exists then the global variable -** sqlite3IoTrace is a pointer to a printf-like routine used to -** print I/O tracing messages. -*/ -#ifdef SQLITE_ENABLE_IOTRACE -# define IOTRACE(A) if( sqlite3IoTrace ){ sqlite3IoTrace A; } -SQLITE_PRIVATE void sqlite3VdbeIOTraceSql(Vdbe*); -SQLITE_PRIVATE void (*sqlite3IoTrace)(const char*,...); -#else -# define IOTRACE(A) -# define sqlite3VdbeIOTraceSql(X) -#endif - -/* -** These routines are available for the mem2.c debugging memory allocator -** only. They are used to verify that different "types" of memory -** allocations are properly tracked by the system. -** -** sqlite3MemdebugSetType() sets the "type" of an allocation to one of -** the MEMTYPE_* macros defined below. The type must be a bitmask with -** a single bit set. -** -** sqlite3MemdebugHasType() returns true if any of the bits in its second -** argument match the type set by the previous sqlite3MemdebugSetType(). -** sqlite3MemdebugHasType() is intended for use inside assert() statements. -** -** sqlite3MemdebugNoType() returns true if none of the bits in its second -** argument match the type set by the previous sqlite3MemdebugSetType(). -** -** Perhaps the most important point is the difference between MEMTYPE_HEAP -** and MEMTYPE_LOOKASIDE. If an allocation is MEMTYPE_LOOKASIDE, that means -** it might have been allocated by lookaside, except the allocation was -** too large or lookaside was already full. It is important to verify -** that allocations that might have been satisfied by lookaside are not -** passed back to non-lookaside free() routines. Asserts such as the -** example above are placed on the non-lookaside free() routines to verify -** this constraint. -** -** All of this is no-op for a production build. It only comes into -** play when the SQLITE_MEMDEBUG compile-time option is used. -*/ -#ifdef SQLITE_MEMDEBUG -SQLITE_PRIVATE void sqlite3MemdebugSetType(void*,u8); -SQLITE_PRIVATE int sqlite3MemdebugHasType(void*,u8); -SQLITE_PRIVATE int sqlite3MemdebugNoType(void*,u8); -#else -# define sqlite3MemdebugSetType(X,Y) /* no-op */ -# define sqlite3MemdebugHasType(X,Y) 1 -# define sqlite3MemdebugNoType(X,Y) 1 -#endif -#define MEMTYPE_HEAP 0x01 /* General heap allocations */ -#define MEMTYPE_LOOKASIDE 0x02 /* Might have been lookaside memory */ -#define MEMTYPE_SCRATCH 0x04 /* Scratch allocations */ -#define MEMTYPE_PCACHE 0x08 /* Page cache allocations */ -#define MEMTYPE_DB 0x10 /* Uses sqlite3DbMalloc, not sqlite_malloc */ - -#endif /* _SQLITEINT_H_ */ - -/************** End of sqliteInt.h *******************************************/ -/************** Begin file global.c ******************************************/ -/* -** 2008 June 13 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** -** This file contains definitions of global variables and contants. -*/ - -/* An array to map all upper-case characters into their corresponding -** lower-case character. -** -** SQLite only considers US-ASCII (or EBCDIC) characters. We do not -** handle case conversions for the UTF character set since the tables -** involved are nearly as big or bigger than SQLite itself. -*/ -SQLITE_PRIVATE const unsigned char sqlite3UpperToLower[] = { -#ifdef SQLITE_ASCII - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, - 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, - 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, - 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 97, 98, 99,100,101,102,103, - 104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121, - 122, 91, 92, 93, 94, 95, 96, 97, 98, 99,100,101,102,103,104,105,106,107, - 108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125, - 126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143, - 144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161, - 162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179, - 180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197, - 198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215, - 216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233, - 234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251, - 252,253,254,255 -#endif -#ifdef SQLITE_EBCDIC - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, /* 0x */ - 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, /* 1x */ - 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, /* 2x */ - 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, /* 3x */ - 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, /* 4x */ - 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, /* 5x */ - 96, 97, 66, 67, 68, 69, 70, 71, 72, 73,106,107,108,109,110,111, /* 6x */ - 112, 81, 82, 83, 84, 85, 86, 87, 88, 89,122,123,124,125,126,127, /* 7x */ - 128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143, /* 8x */ - 144,145,146,147,148,149,150,151,152,153,154,155,156,157,156,159, /* 9x */ - 160,161,162,163,164,165,166,167,168,169,170,171,140,141,142,175, /* Ax */ - 176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191, /* Bx */ - 192,129,130,131,132,133,134,135,136,137,202,203,204,205,206,207, /* Cx */ - 208,145,146,147,148,149,150,151,152,153,218,219,220,221,222,223, /* Dx */ - 224,225,162,163,164,165,166,167,168,169,232,203,204,205,206,207, /* Ex */ - 239,240,241,242,243,244,245,246,247,248,249,219,220,221,222,255, /* Fx */ -#endif -}; - -/* -** The following 256 byte lookup table is used to support SQLites built-in -** equivalents to the following standard library functions: -** -** isspace() 0x01 -** isalpha() 0x02 -** isdigit() 0x04 -** isalnum() 0x06 -** isxdigit() 0x08 -** toupper() 0x20 -** SQLite identifier character 0x40 -** -** Bit 0x20 is set if the mapped character requires translation to upper -** case. i.e. if the character is a lower-case ASCII character. -** If x is a lower-case ASCII character, then its upper-case equivalent -** is (x - 0x20). Therefore toupper() can be implemented as: -** -** (x & ~(map[x]&0x20)) -** -** Standard function tolower() is implemented using the sqlite3UpperToLower[] -** array. tolower() is used more often than toupper() by SQLite. -** -** Bit 0x40 is set if the character non-alphanumeric and can be used in an -** SQLite identifier. Identifiers are alphanumerics, "_", "$", and any -** non-ASCII UTF character. Hence the test for whether or not a character is -** part of an identifier is 0x46. -** -** SQLite's versions are identical to the standard versions assuming a -** locale of "C". They are implemented as macros in sqliteInt.h. -*/ -#ifdef SQLITE_ASCII -SQLITE_PRIVATE const unsigned char sqlite3CtypeMap[256] = { - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 00..07 ........ */ - 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, /* 08..0f ........ */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 10..17 ........ */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 18..1f ........ */ - 0x01, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, /* 20..27 !"#$%&' */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 28..2f ()*+,-./ */ - 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, /* 30..37 01234567 */ - 0x0c, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 38..3f 89:;<=>? */ - - 0x00, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x02, /* 40..47 @ABCDEFG */ - 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, /* 48..4f HIJKLMNO */ - 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, /* 50..57 PQRSTUVW */ - 0x02, 0x02, 0x02, 0x00, 0x00, 0x00, 0x00, 0x40, /* 58..5f XYZ[\]^_ */ - 0x00, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x22, /* 60..67 `abcdefg */ - 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, /* 68..6f hijklmno */ - 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, /* 70..77 pqrstuvw */ - 0x22, 0x22, 0x22, 0x00, 0x00, 0x00, 0x00, 0x00, /* 78..7f xyz{|}~. */ - - 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, /* 80..87 ........ */ - 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, /* 88..8f ........ */ - 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, /* 90..97 ........ */ - 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, /* 98..9f ........ */ - 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, /* a0..a7 ........ */ - 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, /* a8..af ........ */ - 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, /* b0..b7 ........ */ - 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, /* b8..bf ........ */ - - 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, /* c0..c7 ........ */ - 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, /* c8..cf ........ */ - 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, /* d0..d7 ........ */ - 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, /* d8..df ........ */ - 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, /* e0..e7 ........ */ - 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, /* e8..ef ........ */ - 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, /* f0..f7 ........ */ - 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40 /* f8..ff ........ */ -}; -#endif - -#ifndef SQLITE_USE_URI -# define SQLITE_USE_URI 0 -#endif - -#ifndef SQLITE_ALLOW_COVERING_INDEX_SCAN -# define SQLITE_ALLOW_COVERING_INDEX_SCAN 1 -#endif - -/* -** The following singleton contains the global configuration for -** the SQLite library. -*/ -SQLITE_PRIVATE SQLITE_WSD struct Sqlite3Config sqlite3Config = { - SQLITE_DEFAULT_MEMSTATUS, /* bMemstat */ - 1, /* bCoreMutex */ - SQLITE_THREADSAFE==1, /* bFullMutex */ - SQLITE_USE_URI, /* bOpenUri */ - SQLITE_ALLOW_COVERING_INDEX_SCAN, /* bUseCis */ - 0x7ffffffe, /* mxStrlen */ - 0, /* neverCorrupt */ - 128, /* szLookaside */ - 500, /* nLookaside */ - {0,0,0,0,0,0,0,0}, /* m */ - {0,0,0,0,0,0,0,0,0}, /* mutex */ - {0,0,0,0,0,0,0,0,0,0,0,0,0},/* pcache2 */ - (void*)0, /* pHeap */ - 0, /* nHeap */ - 0, 0, /* mnHeap, mxHeap */ - SQLITE_DEFAULT_MMAP_SIZE, /* szMmap */ - SQLITE_MAX_MMAP_SIZE, /* mxMmap */ - (void*)0, /* pScratch */ - 0, /* szScratch */ - 0, /* nScratch */ - (void*)0, /* pPage */ - 0, /* szPage */ - 0, /* nPage */ - 0, /* mxParserStack */ - 0, /* sharedCacheEnabled */ - /* All the rest should always be initialized to zero */ - 0, /* isInit */ - 0, /* inProgress */ - 0, /* isMutexInit */ - 0, /* isMallocInit */ - 0, /* isPCacheInit */ - 0, /* nRefInitMutex */ - 0, /* pInitMutex */ - 0, /* xLog */ - 0, /* pLogArg */ -#ifdef SQLITE_ENABLE_SQLLOG - 0, /* xSqllog */ - 0, /* pSqllogArg */ -#endif -#ifdef SQLITE_VDBE_COVERAGE - 0, /* xVdbeBranch */ - 0, /* pVbeBranchArg */ -#endif -#ifndef SQLITE_OMIT_BUILTIN_TEST - 0, /* xTestCallback */ -#endif - 0 /* bLocaltimeFault */ -}; - -/* -** Hash table for global functions - functions common to all -** database connections. After initialization, this table is -** read-only. -*/ -SQLITE_PRIVATE SQLITE_WSD FuncDefHash sqlite3GlobalFunctions; - -/* -** Constant tokens for values 0 and 1. -*/ -SQLITE_PRIVATE const Token sqlite3IntTokens[] = { - { "0", 1 }, - { "1", 1 } -}; - - -/* -** The value of the "pending" byte must be 0x40000000 (1 byte past the -** 1-gibabyte boundary) in a compatible database. SQLite never uses -** the database page that contains the pending byte. It never attempts -** to read or write that page. The pending byte page is set assign -** for use by the VFS layers as space for managing file locks. -** -** During testing, it is often desirable to move the pending byte to -** a different position in the file. This allows code that has to -** deal with the pending byte to run on files that are much smaller -** than 1 GiB. The sqlite3_test_control() interface can be used to -** move the pending byte. -** -** IMPORTANT: Changing the pending byte to any value other than -** 0x40000000 results in an incompatible database file format! -** Changing the pending byte during operating results in undefined -** and dileterious behavior. -*/ -#ifndef SQLITE_OMIT_WSD -SQLITE_PRIVATE int sqlite3PendingByte = 0x40000000; -#endif - -/* -** Properties of opcodes. The OPFLG_INITIALIZER macro is -** created by mkopcodeh.awk during compilation. Data is obtained -** from the comments following the "case OP_xxxx:" statements in -** the vdbe.c file. -*/ -SQLITE_PRIVATE const unsigned char sqlite3OpcodeProperty[] = OPFLG_INITIALIZER; - -/************** End of global.c **********************************************/ -/************** Begin file ctime.c *******************************************/ -/* -** 2010 February 23 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** -** This file implements routines used to report what compile-time options -** SQLite was built with. -*/ - -#ifndef SQLITE_OMIT_COMPILEOPTION_DIAGS - - -/* -** An array of names of all compile-time options. This array should -** be sorted A-Z. -** -** This array looks large, but in a typical installation actually uses -** only a handful of compile-time options, so most times this array is usually -** rather short and uses little memory space. -*/ -static const char * const azCompileOpt[] = { - -/* These macros are provided to "stringify" the value of the define -** for those options in which the value is meaningful. */ -#define CTIMEOPT_VAL_(opt) #opt -#define CTIMEOPT_VAL(opt) CTIMEOPT_VAL_(opt) - -#ifdef SQLITE_32BIT_ROWID - "32BIT_ROWID", -#endif -#ifdef SQLITE_4_BYTE_ALIGNED_MALLOC - "4_BYTE_ALIGNED_MALLOC", -#endif -#ifdef SQLITE_CASE_SENSITIVE_LIKE - "CASE_SENSITIVE_LIKE", -#endif -#ifdef SQLITE_CHECK_PAGES - "CHECK_PAGES", -#endif -#ifdef SQLITE_COVERAGE_TEST - "COVERAGE_TEST", -#endif -#ifdef SQLITE_DEBUG - "DEBUG", -#endif -#ifdef SQLITE_DEFAULT_LOCKING_MODE - "DEFAULT_LOCKING_MODE=" CTIMEOPT_VAL(SQLITE_DEFAULT_LOCKING_MODE), -#endif -#if defined(SQLITE_DEFAULT_MMAP_SIZE) && !defined(SQLITE_DEFAULT_MMAP_SIZE_xc) - "DEFAULT_MMAP_SIZE=" CTIMEOPT_VAL(SQLITE_DEFAULT_MMAP_SIZE), -#endif -#ifdef SQLITE_DISABLE_DIRSYNC - "DISABLE_DIRSYNC", -#endif -#ifdef SQLITE_DISABLE_LFS - "DISABLE_LFS", -#endif -#ifdef SQLITE_ENABLE_ATOMIC_WRITE - "ENABLE_ATOMIC_WRITE", -#endif -#ifdef SQLITE_ENABLE_CEROD - "ENABLE_CEROD", -#endif -#ifdef SQLITE_ENABLE_COLUMN_METADATA - "ENABLE_COLUMN_METADATA", -#endif -#ifdef SQLITE_ENABLE_EXPENSIVE_ASSERT - "ENABLE_EXPENSIVE_ASSERT", -#endif -#ifdef SQLITE_ENABLE_FTS1 - "ENABLE_FTS1", -#endif -#ifdef SQLITE_ENABLE_FTS2 - "ENABLE_FTS2", -#endif -#ifdef SQLITE_ENABLE_FTS3 - "ENABLE_FTS3", -#endif -#ifdef SQLITE_ENABLE_FTS3_PARENTHESIS - "ENABLE_FTS3_PARENTHESIS", -#endif -#ifdef SQLITE_ENABLE_FTS4 - "ENABLE_FTS4", -#endif -#ifdef SQLITE_ENABLE_ICU - "ENABLE_ICU", -#endif -#ifdef SQLITE_ENABLE_IOTRACE - "ENABLE_IOTRACE", -#endif -#ifdef SQLITE_ENABLE_LOAD_EXTENSION - "ENABLE_LOAD_EXTENSION", -#endif -#ifdef SQLITE_ENABLE_LOCKING_STYLE - "ENABLE_LOCKING_STYLE=" CTIMEOPT_VAL(SQLITE_ENABLE_LOCKING_STYLE), -#endif -#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT - "ENABLE_MEMORY_MANAGEMENT", -#endif -#ifdef SQLITE_ENABLE_MEMSYS3 - "ENABLE_MEMSYS3", -#endif -#ifdef SQLITE_ENABLE_MEMSYS5 - "ENABLE_MEMSYS5", -#endif -#ifdef SQLITE_ENABLE_OVERSIZE_CELL_CHECK - "ENABLE_OVERSIZE_CELL_CHECK", -#endif -#ifdef SQLITE_ENABLE_RTREE - "ENABLE_RTREE", -#endif -#if defined(SQLITE_ENABLE_STAT4) - "ENABLE_STAT4", -#elif defined(SQLITE_ENABLE_STAT3) - "ENABLE_STAT3", -#endif -#ifdef SQLITE_ENABLE_UNLOCK_NOTIFY - "ENABLE_UNLOCK_NOTIFY", -#endif -#ifdef SQLITE_ENABLE_UPDATE_DELETE_LIMIT - "ENABLE_UPDATE_DELETE_LIMIT", -#endif -#ifdef SQLITE_HAS_CODEC - "HAS_CODEC", -#endif -#ifdef SQLITE_HAVE_ISNAN - "HAVE_ISNAN", -#endif -#ifdef SQLITE_HOMEGROWN_RECURSIVE_MUTEX - "HOMEGROWN_RECURSIVE_MUTEX", -#endif -#ifdef SQLITE_IGNORE_AFP_LOCK_ERRORS - "IGNORE_AFP_LOCK_ERRORS", -#endif -#ifdef SQLITE_IGNORE_FLOCK_LOCK_ERRORS - "IGNORE_FLOCK_LOCK_ERRORS", -#endif -#ifdef SQLITE_INT64_TYPE - "INT64_TYPE", -#endif -#ifdef SQLITE_LOCK_TRACE - "LOCK_TRACE", -#endif -#if defined(SQLITE_MAX_MMAP_SIZE) && !defined(SQLITE_MAX_MMAP_SIZE_xc) - "MAX_MMAP_SIZE=" CTIMEOPT_VAL(SQLITE_MAX_MMAP_SIZE), -#endif -#ifdef SQLITE_MAX_SCHEMA_RETRY - "MAX_SCHEMA_RETRY=" CTIMEOPT_VAL(SQLITE_MAX_SCHEMA_RETRY), -#endif -#ifdef SQLITE_MEMDEBUG - "MEMDEBUG", -#endif -#ifdef SQLITE_MIXED_ENDIAN_64BIT_FLOAT - "MIXED_ENDIAN_64BIT_FLOAT", -#endif -#ifdef SQLITE_NO_SYNC - "NO_SYNC", -#endif -#ifdef SQLITE_OMIT_ALTERTABLE - "OMIT_ALTERTABLE", -#endif -#ifdef SQLITE_OMIT_ANALYZE - "OMIT_ANALYZE", -#endif -#ifdef SQLITE_OMIT_ATTACH - "OMIT_ATTACH", -#endif -#ifdef SQLITE_OMIT_AUTHORIZATION - "OMIT_AUTHORIZATION", -#endif -#ifdef SQLITE_OMIT_AUTOINCREMENT - "OMIT_AUTOINCREMENT", -#endif -#ifdef SQLITE_OMIT_AUTOINIT - "OMIT_AUTOINIT", -#endif -#ifdef SQLITE_OMIT_AUTOMATIC_INDEX - "OMIT_AUTOMATIC_INDEX", -#endif -#ifdef SQLITE_OMIT_AUTORESET - "OMIT_AUTORESET", -#endif -#ifdef SQLITE_OMIT_AUTOVACUUM - "OMIT_AUTOVACUUM", -#endif -#ifdef SQLITE_OMIT_BETWEEN_OPTIMIZATION - "OMIT_BETWEEN_OPTIMIZATION", -#endif -#ifdef SQLITE_OMIT_BLOB_LITERAL - "OMIT_BLOB_LITERAL", -#endif -#ifdef SQLITE_OMIT_BTREECOUNT - "OMIT_BTREECOUNT", -#endif -#ifdef SQLITE_OMIT_BUILTIN_TEST - "OMIT_BUILTIN_TEST", -#endif -#ifdef SQLITE_OMIT_CAST - "OMIT_CAST", -#endif -#ifdef SQLITE_OMIT_CHECK - "OMIT_CHECK", -#endif -#ifdef SQLITE_OMIT_COMPLETE - "OMIT_COMPLETE", -#endif -#ifdef SQLITE_OMIT_COMPOUND_SELECT - "OMIT_COMPOUND_SELECT", -#endif -#ifdef SQLITE_OMIT_CTE - "OMIT_CTE", -#endif -#ifdef SQLITE_OMIT_DATETIME_FUNCS - "OMIT_DATETIME_FUNCS", -#endif -#ifdef SQLITE_OMIT_DECLTYPE - "OMIT_DECLTYPE", -#endif -#ifdef SQLITE_OMIT_DEPRECATED - "OMIT_DEPRECATED", -#endif -#ifdef SQLITE_OMIT_DISKIO - "OMIT_DISKIO", -#endif -#ifdef SQLITE_OMIT_EXPLAIN - "OMIT_EXPLAIN", -#endif -#ifdef SQLITE_OMIT_FLAG_PRAGMAS - "OMIT_FLAG_PRAGMAS", -#endif -#ifdef SQLITE_OMIT_FLOATING_POINT - "OMIT_FLOATING_POINT", -#endif -#ifdef SQLITE_OMIT_FOREIGN_KEY - "OMIT_FOREIGN_KEY", -#endif -#ifdef SQLITE_OMIT_GET_TABLE - "OMIT_GET_TABLE", -#endif -#ifdef SQLITE_OMIT_INCRBLOB - "OMIT_INCRBLOB", -#endif -#ifdef SQLITE_OMIT_INTEGRITY_CHECK - "OMIT_INTEGRITY_CHECK", -#endif -#ifdef SQLITE_OMIT_LIKE_OPTIMIZATION - "OMIT_LIKE_OPTIMIZATION", -#endif -#ifdef SQLITE_OMIT_LOAD_EXTENSION - "OMIT_LOAD_EXTENSION", -#endif -#ifdef SQLITE_OMIT_LOCALTIME - "OMIT_LOCALTIME", -#endif -#ifdef SQLITE_OMIT_LOOKASIDE - "OMIT_LOOKASIDE", -#endif -#ifdef SQLITE_OMIT_MEMORYDB - "OMIT_MEMORYDB", -#endif -#ifdef SQLITE_OMIT_OR_OPTIMIZATION - "OMIT_OR_OPTIMIZATION", -#endif -#ifdef SQLITE_OMIT_PAGER_PRAGMAS - "OMIT_PAGER_PRAGMAS", -#endif -#ifdef SQLITE_OMIT_PRAGMA - "OMIT_PRAGMA", -#endif -#ifdef SQLITE_OMIT_PROGRESS_CALLBACK - "OMIT_PROGRESS_CALLBACK", -#endif -#ifdef SQLITE_OMIT_QUICKBALANCE - "OMIT_QUICKBALANCE", -#endif -#ifdef SQLITE_OMIT_REINDEX - "OMIT_REINDEX", -#endif -#ifdef SQLITE_OMIT_SCHEMA_PRAGMAS - "OMIT_SCHEMA_PRAGMAS", -#endif -#ifdef SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS - "OMIT_SCHEMA_VERSION_PRAGMAS", -#endif -#ifdef SQLITE_OMIT_SHARED_CACHE - "OMIT_SHARED_CACHE", -#endif -#ifdef SQLITE_OMIT_SUBQUERY - "OMIT_SUBQUERY", -#endif -#ifdef SQLITE_OMIT_TCL_VARIABLE - "OMIT_TCL_VARIABLE", -#endif -#ifdef SQLITE_OMIT_TEMPDB - "OMIT_TEMPDB", -#endif -#ifdef SQLITE_OMIT_TRACE - "OMIT_TRACE", -#endif -#ifdef SQLITE_OMIT_TRIGGER - "OMIT_TRIGGER", -#endif -#ifdef SQLITE_OMIT_TRUNCATE_OPTIMIZATION - "OMIT_TRUNCATE_OPTIMIZATION", -#endif -#ifdef SQLITE_OMIT_UTF16 - "OMIT_UTF16", -#endif -#ifdef SQLITE_OMIT_VACUUM - "OMIT_VACUUM", -#endif -#ifdef SQLITE_OMIT_VIEW - "OMIT_VIEW", -#endif -#ifdef SQLITE_OMIT_VIRTUALTABLE - "OMIT_VIRTUALTABLE", -#endif -#ifdef SQLITE_OMIT_WAL - "OMIT_WAL", -#endif -#ifdef SQLITE_OMIT_WSD - "OMIT_WSD", -#endif -#ifdef SQLITE_OMIT_XFER_OPT - "OMIT_XFER_OPT", -#endif -#ifdef SQLITE_PERFORMANCE_TRACE - "PERFORMANCE_TRACE", -#endif -#ifdef SQLITE_PROXY_DEBUG - "PROXY_DEBUG", -#endif -#ifdef SQLITE_RTREE_INT_ONLY - "RTREE_INT_ONLY", -#endif -#ifdef SQLITE_SECURE_DELETE - "SECURE_DELETE", -#endif -#ifdef SQLITE_SMALL_STACK - "SMALL_STACK", -#endif -#ifdef SQLITE_SOUNDEX - "SOUNDEX", -#endif -#ifdef SQLITE_SYSTEM_MALLOC - "SYSTEM_MALLOC", -#endif -#ifdef SQLITE_TCL - "TCL", -#endif -#if defined(SQLITE_TEMP_STORE) && !defined(SQLITE_TEMP_STORE_xc) - "TEMP_STORE=" CTIMEOPT_VAL(SQLITE_TEMP_STORE), -#endif -#ifdef SQLITE_TEST - "TEST", -#endif -#if defined(SQLITE_THREADSAFE) - "THREADSAFE=" CTIMEOPT_VAL(SQLITE_THREADSAFE), -#endif -#ifdef SQLITE_USE_ALLOCA - "USE_ALLOCA", -#endif -#ifdef SQLITE_WIN32_MALLOC - "WIN32_MALLOC", -#endif -#ifdef SQLITE_ZERO_MALLOC - "ZERO_MALLOC" -#endif -}; - -/* -** Given the name of a compile-time option, return true if that option -** was used and false if not. -** -** The name can optionally begin with "SQLITE_" but the "SQLITE_" prefix -** is not required for a match. -*/ -SQLITE_API int sqlite3_compileoption_used(const char *zOptName){ - int i, n; - if( sqlite3StrNICmp(zOptName, "SQLITE_", 7)==0 ) zOptName += 7; - n = sqlite3Strlen30(zOptName); - - /* Since ArraySize(azCompileOpt) is normally in single digits, a - ** linear search is adequate. No need for a binary search. */ - for(i=0; i=0 && NaDb[] (or -1) */ - u8 nullRow; /* True if pointing to a row with no data */ - u8 rowidIsValid; /* True if lastRowid is valid */ - u8 deferredMoveto; /* A call to sqlite3BtreeMoveto() is needed */ - Bool isEphemeral:1; /* True for an ephemeral table */ - Bool useRandomRowid:1;/* Generate new record numbers semi-randomly */ - Bool isTable:1; /* True if a table requiring integer keys */ - Bool isOrdered:1; /* True if the underlying table is BTREE_UNORDERED */ - sqlite3_vtab_cursor *pVtabCursor; /* The cursor for a virtual table */ - i64 seqCount; /* Sequence counter */ - i64 movetoTarget; /* Argument to the deferred sqlite3BtreeMoveto() */ - i64 lastRowid; /* Rowid being deleted by OP_Delete */ - VdbeSorter *pSorter; /* Sorter object for OP_SorterOpen cursors */ - - /* Cached information about the header for the data record that the - ** cursor is currently pointing to. Only valid if cacheStatus matches - ** Vdbe.cacheCtr. Vdbe.cacheCtr will never take on the value of - ** CACHE_STALE and so setting cacheStatus=CACHE_STALE guarantees that - ** the cache is out of date. - ** - ** aRow might point to (ephemeral) data for the current row, or it might - ** be NULL. - */ - u32 cacheStatus; /* Cache is valid if this matches Vdbe.cacheCtr */ - u32 payloadSize; /* Total number of bytes in the record */ - u32 szRow; /* Byte available in aRow */ - u32 iHdrOffset; /* Offset to next unparsed byte of the header */ - const u8 *aRow; /* Data for the current row, if all on one page */ - u32 aType[1]; /* Type values for all entries in the record */ - /* 2*nField extra array elements allocated for aType[], beyond the one - ** static element declared in the structure. nField total array slots for - ** aType[] and nField+1 array slots for aOffset[] */ -}; -typedef struct VdbeCursor VdbeCursor; - -/* -** When a sub-program is executed (OP_Program), a structure of this type -** is allocated to store the current value of the program counter, as -** well as the current memory cell array and various other frame specific -** values stored in the Vdbe struct. When the sub-program is finished, -** these values are copied back to the Vdbe from the VdbeFrame structure, -** restoring the state of the VM to as it was before the sub-program -** began executing. -** -** The memory for a VdbeFrame object is allocated and managed by a memory -** cell in the parent (calling) frame. When the memory cell is deleted or -** overwritten, the VdbeFrame object is not freed immediately. Instead, it -** is linked into the Vdbe.pDelFrame list. The contents of the Vdbe.pDelFrame -** list is deleted when the VM is reset in VdbeHalt(). The reason for doing -** this instead of deleting the VdbeFrame immediately is to avoid recursive -** calls to sqlite3VdbeMemRelease() when the memory cells belonging to the -** child frame are released. -** -** The currently executing frame is stored in Vdbe.pFrame. Vdbe.pFrame is -** set to NULL if the currently executing frame is the main program. -*/ -typedef struct VdbeFrame VdbeFrame; -struct VdbeFrame { - Vdbe *v; /* VM this frame belongs to */ - VdbeFrame *pParent; /* Parent of this frame, or NULL if parent is main */ - Op *aOp; /* Program instructions for parent frame */ - Mem *aMem; /* Array of memory cells for parent frame */ - u8 *aOnceFlag; /* Array of OP_Once flags for parent frame */ - VdbeCursor **apCsr; /* Array of Vdbe cursors for parent frame */ - void *token; /* Copy of SubProgram.token */ - i64 lastRowid; /* Last insert rowid (sqlite3.lastRowid) */ - int nCursor; /* Number of entries in apCsr */ - int pc; /* Program Counter in parent (calling) frame */ - int nOp; /* Size of aOp array */ - int nMem; /* Number of entries in aMem */ - int nOnceFlag; /* Number of entries in aOnceFlag */ - int nChildMem; /* Number of memory cells for child frame */ - int nChildCsr; /* Number of cursors for child frame */ - int nChange; /* Statement changes (Vdbe.nChanges) */ -}; - -#define VdbeFrameMem(p) ((Mem *)&((u8 *)p)[ROUND8(sizeof(VdbeFrame))]) - -/* -** A value for VdbeCursor.cacheValid that means the cache is always invalid. -*/ -#define CACHE_STALE 0 - -/* -** Internally, the vdbe manipulates nearly all SQL values as Mem -** structures. Each Mem struct may cache multiple representations (string, -** integer etc.) of the same value. -*/ -struct Mem { - sqlite3 *db; /* The associated database connection */ - char *z; /* String or BLOB value */ - double r; /* Real value */ - union { - i64 i; /* Integer value used when MEM_Int is set in flags */ - int nZero; /* Used when bit MEM_Zero is set in flags */ - FuncDef *pDef; /* Used only when flags==MEM_Agg */ - RowSet *pRowSet; /* Used only when flags==MEM_RowSet */ - VdbeFrame *pFrame; /* Used when flags==MEM_Frame */ - } u; - int n; /* Number of characters in string value, excluding '\0' */ - u16 flags; /* Some combination of MEM_Null, MEM_Str, MEM_Dyn, etc. */ - u8 enc; /* SQLITE_UTF8, SQLITE_UTF16BE, SQLITE_UTF16LE */ -#ifdef SQLITE_DEBUG - Mem *pScopyFrom; /* This Mem is a shallow copy of pScopyFrom */ - void *pFiller; /* So that sizeof(Mem) is a multiple of 8 */ -#endif - void (*xDel)(void *); /* If not null, call this function to delete Mem.z */ - char *zMalloc; /* Dynamic buffer allocated by sqlite3_malloc() */ -}; - -/* One or more of the following flags are set to indicate the validOK -** representations of the value stored in the Mem struct. -** -** If the MEM_Null flag is set, then the value is an SQL NULL value. -** No other flags may be set in this case. -** -** If the MEM_Str flag is set then Mem.z points at a string representation. -** Usually this is encoded in the same unicode encoding as the main -** database (see below for exceptions). If the MEM_Term flag is also -** set, then the string is nul terminated. The MEM_Int and MEM_Real -** flags may coexist with the MEM_Str flag. -*/ -#define MEM_Null 0x0001 /* Value is NULL */ -#define MEM_Str 0x0002 /* Value is a string */ -#define MEM_Int 0x0004 /* Value is an integer */ -#define MEM_Real 0x0008 /* Value is a real number */ -#define MEM_Blob 0x0010 /* Value is a BLOB */ -#define MEM_AffMask 0x001f /* Mask of affinity bits */ -#define MEM_RowSet 0x0020 /* Value is a RowSet object */ -#define MEM_Frame 0x0040 /* Value is a VdbeFrame object */ -#define MEM_Undefined 0x0080 /* Value is undefined */ -#define MEM_Cleared 0x0100 /* NULL set by OP_Null, not from data */ -#define MEM_TypeMask 0x01ff /* Mask of type bits */ - - -/* Whenever Mem contains a valid string or blob representation, one of -** the following flags must be set to determine the memory management -** policy for Mem.z. The MEM_Term flag tells us whether or not the -** string is \000 or \u0000 terminated -*/ -#define MEM_Term 0x0200 /* String rep is nul terminated */ -#define MEM_Dyn 0x0400 /* Need to call Mem.xDel() on Mem.z */ -#define MEM_Static 0x0800 /* Mem.z points to a static string */ -#define MEM_Ephem 0x1000 /* Mem.z points to an ephemeral string */ -#define MEM_Agg 0x2000 /* Mem.z points to an agg function context */ -#define MEM_Zero 0x4000 /* Mem.i contains count of 0s appended to blob */ -#ifdef SQLITE_OMIT_INCRBLOB - #undef MEM_Zero - #define MEM_Zero 0x0000 -#endif - -/* -** Clear any existing type flags from a Mem and replace them with f -*/ -#define MemSetTypeFlag(p, f) \ - ((p)->flags = ((p)->flags&~(MEM_TypeMask|MEM_Zero))|f) - -/* -** Return true if a memory cell is not marked as invalid. This macro -** is for use inside assert() statements only. -*/ -#ifdef SQLITE_DEBUG -#define memIsValid(M) ((M)->flags & MEM_Undefined)==0 -#endif - -/* -** Each auxilliary data pointer stored by a user defined function -** implementation calling sqlite3_set_auxdata() is stored in an instance -** of this structure. All such structures associated with a single VM -** are stored in a linked list headed at Vdbe.pAuxData. All are destroyed -** when the VM is halted (if not before). -*/ -struct AuxData { - int iOp; /* Instruction number of OP_Function opcode */ - int iArg; /* Index of function argument. */ - void *pAux; /* Aux data pointer */ - void (*xDelete)(void *); /* Destructor for the aux data */ - AuxData *pNext; /* Next element in list */ -}; - -/* -** The "context" argument for a installable function. A pointer to an -** instance of this structure is the first argument to the routines used -** implement the SQL functions. -** -** There is a typedef for this structure in sqlite.h. So all routines, -** even the public interface to SQLite, can use a pointer to this structure. -** But this file is the only place where the internal details of this -** structure are known. -** -** This structure is defined inside of vdbeInt.h because it uses substructures -** (Mem) which are only defined there. -*/ -struct sqlite3_context { - FuncDef *pFunc; /* Pointer to function information. MUST BE FIRST */ - Mem s; /* The return value is stored here */ - Mem *pMem; /* Memory cell used to store aggregate context */ - CollSeq *pColl; /* Collating sequence */ - Vdbe *pVdbe; /* The VM that owns this context */ - int iOp; /* Instruction number of OP_Function */ - int isError; /* Error code returned by the function. */ - u8 skipFlag; /* Skip skip accumulator loading if true */ - u8 fErrorOrAux; /* isError!=0 or pVdbe->pAuxData modified */ -}; - -/* -** An Explain object accumulates indented output which is helpful -** in describing recursive data structures. -*/ -struct Explain { - Vdbe *pVdbe; /* Attach the explanation to this Vdbe */ - StrAccum str; /* The string being accumulated */ - int nIndent; /* Number of elements in aIndent */ - u16 aIndent[100]; /* Levels of indentation */ - char zBase[100]; /* Initial space */ -}; - -/* A bitfield type for use inside of structures. Always follow with :N where -** N is the number of bits. -*/ -typedef unsigned bft; /* Bit Field Type */ - -/* -** An instance of the virtual machine. This structure contains the complete -** state of the virtual machine. -** -** The "sqlite3_stmt" structure pointer that is returned by sqlite3_prepare() -** is really a pointer to an instance of this structure. -** -** The Vdbe.inVtabMethod variable is set to non-zero for the duration of -** any virtual table method invocations made by the vdbe program. It is -** set to 2 for xDestroy method calls and 1 for all other methods. This -** variable is used for two purposes: to allow xDestroy methods to execute -** "DROP TABLE" statements and to prevent some nasty side effects of -** malloc failure when SQLite is invoked recursively by a virtual table -** method function. -*/ -struct Vdbe { - sqlite3 *db; /* The database connection that owns this statement */ - Op *aOp; /* Space to hold the virtual machine's program */ - Mem *aMem; /* The memory locations */ - Mem **apArg; /* Arguments to currently executing user function */ - Mem *aColName; /* Column names to return */ - Mem *pResultSet; /* Pointer to an array of results */ - Parse *pParse; /* Parsing context used to create this Vdbe */ - int nMem; /* Number of memory locations currently allocated */ - int nOp; /* Number of instructions in the program */ - int nCursor; /* Number of slots in apCsr[] */ - u32 magic; /* Magic number for sanity checking */ - char *zErrMsg; /* Error message written here */ - Vdbe *pPrev,*pNext; /* Linked list of VDBEs with the same Vdbe.db */ - VdbeCursor **apCsr; /* One element of this array for each open cursor */ - Mem *aVar; /* Values for the OP_Variable opcode. */ - char **azVar; /* Name of variables */ - ynVar nVar; /* Number of entries in aVar[] */ - ynVar nzVar; /* Number of entries in azVar[] */ - u32 cacheCtr; /* VdbeCursor row cache generation counter */ - int pc; /* The program counter */ - int rc; /* Value to return */ - u16 nResColumn; /* Number of columns in one row of the result set */ - u8 errorAction; /* Recovery action to do in case of an error */ - u8 minWriteFileFormat; /* Minimum file format for writable database files */ - bft explain:2; /* True if EXPLAIN present on SQL command */ - bft inVtabMethod:2; /* See comments above */ - bft changeCntOn:1; /* True to update the change-counter */ - bft expired:1; /* True if the VM needs to be recompiled */ - bft runOnlyOnce:1; /* Automatically expire on reset */ - bft usesStmtJournal:1; /* True if uses a statement journal */ - bft readOnly:1; /* True for statements that do not write */ - bft bIsReader:1; /* True for statements that read */ - bft isPrepareV2:1; /* True if prepared with prepare_v2() */ - bft doingRerun:1; /* True if rerunning after an auto-reprepare */ - int nChange; /* Number of db changes made since last reset */ - yDbMask btreeMask; /* Bitmask of db->aDb[] entries referenced */ - yDbMask lockMask; /* Subset of btreeMask that requires a lock */ - int iStatement; /* Statement number (or 0 if has not opened stmt) */ - u32 aCounter[5]; /* Counters used by sqlite3_stmt_status() */ -#ifndef SQLITE_OMIT_TRACE - i64 startTime; /* Time when query started - used for profiling */ -#endif - i64 iCurrentTime; /* Value of julianday('now') for this statement */ - i64 nFkConstraint; /* Number of imm. FK constraints this VM */ - i64 nStmtDefCons; /* Number of def. constraints when stmt started */ - i64 nStmtDefImmCons; /* Number of def. imm constraints when stmt started */ - char *zSql; /* Text of the SQL statement that generated this */ - void *pFree; /* Free this when deleting the vdbe */ -#ifdef SQLITE_ENABLE_TREE_EXPLAIN - Explain *pExplain; /* The explainer */ - char *zExplain; /* Explanation of data structures */ -#endif - VdbeFrame *pFrame; /* Parent frame */ - VdbeFrame *pDelFrame; /* List of frame objects to free on VM reset */ - int nFrame; /* Number of frames in pFrame list */ - u32 expmask; /* Binding to these vars invalidates VM */ - SubProgram *pProgram; /* Linked list of all sub-programs used by VM */ - int nOnceFlag; /* Size of array aOnceFlag[] */ - u8 *aOnceFlag; /* Flags for OP_Once */ - AuxData *pAuxData; /* Linked list of auxdata allocations */ -}; - -/* -** The following are allowed values for Vdbe.magic -*/ -#define VDBE_MAGIC_INIT 0x26bceaa5 /* Building a VDBE program */ -#define VDBE_MAGIC_RUN 0xbdf20da3 /* VDBE is ready to execute */ -#define VDBE_MAGIC_HALT 0x519c2973 /* VDBE has completed execution */ -#define VDBE_MAGIC_DEAD 0xb606c3c8 /* The VDBE has been deallocated */ - -/* -** Function prototypes -*/ -SQLITE_PRIVATE void sqlite3VdbeFreeCursor(Vdbe *, VdbeCursor*); -void sqliteVdbePopStack(Vdbe*,int); -SQLITE_PRIVATE int sqlite3VdbeCursorMoveto(VdbeCursor*); -#if defined(SQLITE_DEBUG) || defined(VDBE_PROFILE) -SQLITE_PRIVATE void sqlite3VdbePrintOp(FILE*, int, Op*); -#endif -SQLITE_PRIVATE u32 sqlite3VdbeSerialTypeLen(u32); -SQLITE_PRIVATE u32 sqlite3VdbeSerialType(Mem*, int); -SQLITE_PRIVATE u32 sqlite3VdbeSerialPut(unsigned char*, Mem*, u32); -SQLITE_PRIVATE u32 sqlite3VdbeSerialGet(const unsigned char*, u32, Mem*); -SQLITE_PRIVATE void sqlite3VdbeDeleteAuxData(Vdbe*, int, int); - -int sqlite2BtreeKeyCompare(BtCursor *, const void *, int, int, int *); -SQLITE_PRIVATE int sqlite3VdbeIdxKeyCompare(VdbeCursor*,UnpackedRecord*,int*); -SQLITE_PRIVATE int sqlite3VdbeIdxRowid(sqlite3*, BtCursor *, i64 *); -SQLITE_PRIVATE int sqlite3MemCompare(const Mem*, const Mem*, const CollSeq*); -SQLITE_PRIVATE int sqlite3VdbeExec(Vdbe*); -SQLITE_PRIVATE int sqlite3VdbeList(Vdbe*); -SQLITE_PRIVATE int sqlite3VdbeHalt(Vdbe*); -SQLITE_PRIVATE int sqlite3VdbeChangeEncoding(Mem *, int); -SQLITE_PRIVATE int sqlite3VdbeMemTooBig(Mem*); -SQLITE_PRIVATE int sqlite3VdbeMemCopy(Mem*, const Mem*); -SQLITE_PRIVATE void sqlite3VdbeMemShallowCopy(Mem*, const Mem*, int); -SQLITE_PRIVATE void sqlite3VdbeMemMove(Mem*, Mem*); -SQLITE_PRIVATE int sqlite3VdbeMemNulTerminate(Mem*); -SQLITE_PRIVATE int sqlite3VdbeMemSetStr(Mem*, const char*, int, u8, void(*)(void*)); -SQLITE_PRIVATE void sqlite3VdbeMemSetInt64(Mem*, i64); -#ifdef SQLITE_OMIT_FLOATING_POINT -# define sqlite3VdbeMemSetDouble sqlite3VdbeMemSetInt64 -#else -SQLITE_PRIVATE void sqlite3VdbeMemSetDouble(Mem*, double); -#endif -SQLITE_PRIVATE void sqlite3VdbeMemSetNull(Mem*); -SQLITE_PRIVATE void sqlite3VdbeMemSetZeroBlob(Mem*,int); -SQLITE_PRIVATE void sqlite3VdbeMemSetRowSet(Mem*); -SQLITE_PRIVATE int sqlite3VdbeMemMakeWriteable(Mem*); -SQLITE_PRIVATE int sqlite3VdbeMemStringify(Mem*, int); -SQLITE_PRIVATE i64 sqlite3VdbeIntValue(Mem*); -SQLITE_PRIVATE int sqlite3VdbeMemIntegerify(Mem*); -SQLITE_PRIVATE double sqlite3VdbeRealValue(Mem*); -SQLITE_PRIVATE void sqlite3VdbeIntegerAffinity(Mem*); -SQLITE_PRIVATE int sqlite3VdbeMemRealify(Mem*); -SQLITE_PRIVATE int sqlite3VdbeMemNumerify(Mem*); -SQLITE_PRIVATE int sqlite3VdbeMemFromBtree(BtCursor*,u32,u32,int,Mem*); -SQLITE_PRIVATE void sqlite3VdbeMemRelease(Mem *p); -SQLITE_PRIVATE void sqlite3VdbeMemReleaseExternal(Mem *p); -#define VdbeMemDynamic(X) \ - (((X)->flags&(MEM_Agg|MEM_Dyn|MEM_RowSet|MEM_Frame))!=0) -#define VdbeMemRelease(X) \ - if( VdbeMemDynamic(X) ) sqlite3VdbeMemReleaseExternal(X); -SQLITE_PRIVATE int sqlite3VdbeMemFinalize(Mem*, FuncDef*); -SQLITE_PRIVATE const char *sqlite3OpcodeName(int); -SQLITE_PRIVATE int sqlite3VdbeMemGrow(Mem *pMem, int n, int preserve); -SQLITE_PRIVATE int sqlite3VdbeCloseStatement(Vdbe *, int); -SQLITE_PRIVATE void sqlite3VdbeFrameDelete(VdbeFrame*); -SQLITE_PRIVATE int sqlite3VdbeFrameRestore(VdbeFrame *); -SQLITE_PRIVATE int sqlite3VdbeTransferError(Vdbe *p); - -SQLITE_PRIVATE int sqlite3VdbeSorterInit(sqlite3 *, VdbeCursor *); -SQLITE_PRIVATE void sqlite3VdbeSorterReset(sqlite3 *, VdbeSorter *); -SQLITE_PRIVATE void sqlite3VdbeSorterClose(sqlite3 *, VdbeCursor *); -SQLITE_PRIVATE int sqlite3VdbeSorterRowkey(const VdbeCursor *, Mem *); -SQLITE_PRIVATE int sqlite3VdbeSorterNext(sqlite3 *, const VdbeCursor *, int *); -SQLITE_PRIVATE int sqlite3VdbeSorterRewind(sqlite3 *, const VdbeCursor *, int *); -SQLITE_PRIVATE int sqlite3VdbeSorterWrite(sqlite3 *, const VdbeCursor *, Mem *); -SQLITE_PRIVATE int sqlite3VdbeSorterCompare(const VdbeCursor *, Mem *, int, int *); - -#if !defined(SQLITE_OMIT_SHARED_CACHE) && SQLITE_THREADSAFE>0 -SQLITE_PRIVATE void sqlite3VdbeEnter(Vdbe*); -SQLITE_PRIVATE void sqlite3VdbeLeave(Vdbe*); -#else -# define sqlite3VdbeEnter(X) -# define sqlite3VdbeLeave(X) -#endif - -#ifdef SQLITE_DEBUG -SQLITE_PRIVATE void sqlite3VdbeMemAboutToChange(Vdbe*,Mem*); -SQLITE_PRIVATE int sqlite3VdbeCheckMemInvariants(Mem*); -#endif - -#ifndef SQLITE_OMIT_FOREIGN_KEY -SQLITE_PRIVATE int sqlite3VdbeCheckFk(Vdbe *, int); -#else -# define sqlite3VdbeCheckFk(p,i) 0 -#endif - -SQLITE_PRIVATE int sqlite3VdbeMemTranslate(Mem*, u8); -#ifdef SQLITE_DEBUG -SQLITE_PRIVATE void sqlite3VdbePrintSql(Vdbe*); -SQLITE_PRIVATE void sqlite3VdbeMemPrettyPrint(Mem *pMem, char *zBuf); -#endif -SQLITE_PRIVATE int sqlite3VdbeMemHandleBom(Mem *pMem); - -#ifndef SQLITE_OMIT_INCRBLOB -SQLITE_PRIVATE int sqlite3VdbeMemExpandBlob(Mem *); - #define ExpandBlob(P) (((P)->flags&MEM_Zero)?sqlite3VdbeMemExpandBlob(P):0) -#else - #define sqlite3VdbeMemExpandBlob(x) SQLITE_OK - #define ExpandBlob(P) SQLITE_OK -#endif - -#endif /* !defined(_VDBEINT_H_) */ - -/************** End of vdbeInt.h *********************************************/ -/************** Continuing where we left off in status.c *********************/ - -/* -** Variables in which to record status information. -*/ -typedef struct sqlite3StatType sqlite3StatType; -static SQLITE_WSD struct sqlite3StatType { - int nowValue[10]; /* Current value */ - int mxValue[10]; /* Maximum value */ -} sqlite3Stat = { {0,}, {0,} }; - - -/* The "wsdStat" macro will resolve to the status information -** state vector. If writable static data is unsupported on the target, -** we have to locate the state vector at run-time. In the more common -** case where writable static data is supported, wsdStat can refer directly -** to the "sqlite3Stat" state vector declared above. -*/ -#ifdef SQLITE_OMIT_WSD -# define wsdStatInit sqlite3StatType *x = &GLOBAL(sqlite3StatType,sqlite3Stat) -# define wsdStat x[0] -#else -# define wsdStatInit -# define wsdStat sqlite3Stat -#endif - -/* -** Return the current value of a status parameter. -*/ -SQLITE_PRIVATE int sqlite3StatusValue(int op){ - wsdStatInit; - assert( op>=0 && op=0 && opwsdStat.mxValue[op] ){ - wsdStat.mxValue[op] = wsdStat.nowValue[op]; - } -} - -/* -** Set the value of a status to X. -*/ -SQLITE_PRIVATE void sqlite3StatusSet(int op, int X){ - wsdStatInit; - assert( op>=0 && opwsdStat.mxValue[op] ){ - wsdStat.mxValue[op] = wsdStat.nowValue[op]; - } -} - -/* -** Query status information. -** -** This implementation assumes that reading or writing an aligned -** 32-bit integer is an atomic operation. If that assumption is not true, -** then this routine is not threadsafe. -*/ -SQLITE_API int sqlite3_status(int op, int *pCurrent, int *pHighwater, int resetFlag){ - wsdStatInit; - if( op<0 || op>=ArraySize(wsdStat.nowValue) ){ - return SQLITE_MISUSE_BKPT; - } - *pCurrent = wsdStat.nowValue[op]; - *pHighwater = wsdStat.mxValue[op]; - if( resetFlag ){ - wsdStat.mxValue[op] = wsdStat.nowValue[op]; - } - return SQLITE_OK; -} - -/* -** Query status information for a single database connection -*/ -SQLITE_API int sqlite3_db_status( - sqlite3 *db, /* The database connection whose status is desired */ - int op, /* Status verb */ - int *pCurrent, /* Write current value here */ - int *pHighwater, /* Write high-water mark here */ - int resetFlag /* Reset high-water mark if true */ -){ - int rc = SQLITE_OK; /* Return code */ - sqlite3_mutex_enter(db->mutex); - switch( op ){ - case SQLITE_DBSTATUS_LOOKASIDE_USED: { - *pCurrent = db->lookaside.nOut; - *pHighwater = db->lookaside.mxOut; - if( resetFlag ){ - db->lookaside.mxOut = db->lookaside.nOut; - } - break; - } - - case SQLITE_DBSTATUS_LOOKASIDE_HIT: - case SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE: - case SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL: { - testcase( op==SQLITE_DBSTATUS_LOOKASIDE_HIT ); - testcase( op==SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE ); - testcase( op==SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL ); - assert( (op-SQLITE_DBSTATUS_LOOKASIDE_HIT)>=0 ); - assert( (op-SQLITE_DBSTATUS_LOOKASIDE_HIT)<3 ); - *pCurrent = 0; - *pHighwater = db->lookaside.anStat[op - SQLITE_DBSTATUS_LOOKASIDE_HIT]; - if( resetFlag ){ - db->lookaside.anStat[op - SQLITE_DBSTATUS_LOOKASIDE_HIT] = 0; - } - break; - } - - /* - ** Return an approximation for the amount of memory currently used - ** by all pagers associated with the given database connection. The - ** highwater mark is meaningless and is returned as zero. - */ - case SQLITE_DBSTATUS_CACHE_USED: { - int totalUsed = 0; - int i; - sqlite3BtreeEnterAll(db); - for(i=0; inDb; i++){ - Btree *pBt = db->aDb[i].pBt; - if( pBt ){ - Pager *pPager = sqlite3BtreePager(pBt); - totalUsed += sqlite3PagerMemUsed(pPager); - } - } - sqlite3BtreeLeaveAll(db); - *pCurrent = totalUsed; - *pHighwater = 0; - break; - } - - /* - ** *pCurrent gets an accurate estimate of the amount of memory used - ** to store the schema for all databases (main, temp, and any ATTACHed - ** databases. *pHighwater is set to zero. - */ - case SQLITE_DBSTATUS_SCHEMA_USED: { - int i; /* Used to iterate through schemas */ - int nByte = 0; /* Used to accumulate return value */ - - sqlite3BtreeEnterAll(db); - db->pnBytesFreed = &nByte; - for(i=0; inDb; i++){ - Schema *pSchema = db->aDb[i].pSchema; - if( ALWAYS(pSchema!=0) ){ - HashElem *p; - - nByte += sqlite3GlobalConfig.m.xRoundup(sizeof(HashElem)) * ( - pSchema->tblHash.count - + pSchema->trigHash.count - + pSchema->idxHash.count - + pSchema->fkeyHash.count - ); - nByte += sqlite3MallocSize(pSchema->tblHash.ht); - nByte += sqlite3MallocSize(pSchema->trigHash.ht); - nByte += sqlite3MallocSize(pSchema->idxHash.ht); - nByte += sqlite3MallocSize(pSchema->fkeyHash.ht); - - for(p=sqliteHashFirst(&pSchema->trigHash); p; p=sqliteHashNext(p)){ - sqlite3DeleteTrigger(db, (Trigger*)sqliteHashData(p)); - } - for(p=sqliteHashFirst(&pSchema->tblHash); p; p=sqliteHashNext(p)){ - sqlite3DeleteTable(db, (Table *)sqliteHashData(p)); - } - } - } - db->pnBytesFreed = 0; - sqlite3BtreeLeaveAll(db); - - *pHighwater = 0; - *pCurrent = nByte; - break; - } - - /* - ** *pCurrent gets an accurate estimate of the amount of memory used - ** to store all prepared statements. - ** *pHighwater is set to zero. - */ - case SQLITE_DBSTATUS_STMT_USED: { - struct Vdbe *pVdbe; /* Used to iterate through VMs */ - int nByte = 0; /* Used to accumulate return value */ - - db->pnBytesFreed = &nByte; - for(pVdbe=db->pVdbe; pVdbe; pVdbe=pVdbe->pNext){ - sqlite3VdbeClearObject(db, pVdbe); - sqlite3DbFree(db, pVdbe); - } - db->pnBytesFreed = 0; - - *pHighwater = 0; - *pCurrent = nByte; - - break; - } - - /* - ** Set *pCurrent to the total cache hits or misses encountered by all - ** pagers the database handle is connected to. *pHighwater is always set - ** to zero. - */ - case SQLITE_DBSTATUS_CACHE_HIT: - case SQLITE_DBSTATUS_CACHE_MISS: - case SQLITE_DBSTATUS_CACHE_WRITE:{ - int i; - int nRet = 0; - assert( SQLITE_DBSTATUS_CACHE_MISS==SQLITE_DBSTATUS_CACHE_HIT+1 ); - assert( SQLITE_DBSTATUS_CACHE_WRITE==SQLITE_DBSTATUS_CACHE_HIT+2 ); - - for(i=0; inDb; i++){ - if( db->aDb[i].pBt ){ - Pager *pPager = sqlite3BtreePager(db->aDb[i].pBt); - sqlite3PagerCacheStat(pPager, op, resetFlag, &nRet); - } - } - *pHighwater = 0; - *pCurrent = nRet; - break; - } - - /* Set *pCurrent to non-zero if there are unresolved deferred foreign - ** key constraints. Set *pCurrent to zero if all foreign key constraints - ** have been satisfied. The *pHighwater is always set to zero. - */ - case SQLITE_DBSTATUS_DEFERRED_FKS: { - *pHighwater = 0; - *pCurrent = db->nDeferredImmCons>0 || db->nDeferredCons>0; - break; - } - - default: { - rc = SQLITE_ERROR; - } - } - sqlite3_mutex_leave(db->mutex); - return rc; -} - -/************** End of status.c **********************************************/ -/************** Begin file date.c ********************************************/ -/* -** 2003 October 31 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** This file contains the C functions that implement date and time -** functions for SQLite. -** -** There is only one exported symbol in this file - the function -** sqlite3RegisterDateTimeFunctions() found at the bottom of the file. -** All other code has file scope. -** -** SQLite processes all times and dates as Julian Day numbers. The -** dates and times are stored as the number of days since noon -** in Greenwich on November 24, 4714 B.C. according to the Gregorian -** calendar system. -** -** 1970-01-01 00:00:00 is JD 2440587.5 -** 2000-01-01 00:00:00 is JD 2451544.5 -** -** This implemention requires years to be expressed as a 4-digit number -** which means that only dates between 0000-01-01 and 9999-12-31 can -** be represented, even though julian day numbers allow a much wider -** range of dates. -** -** The Gregorian calendar system is used for all dates and times, -** even those that predate the Gregorian calendar. Historians usually -** use the Julian calendar for dates prior to 1582-10-15 and for some -** dates afterwards, depending on locale. Beware of this difference. -** -** The conversion algorithms are implemented based on descriptions -** in the following text: -** -** Jean Meeus -** Astronomical Algorithms, 2nd Edition, 1998 -** ISBM 0-943396-61-1 -** Willmann-Bell, Inc -** Richmond, Virginia (USA) -*/ -/* #include */ -/* #include */ -#include - -#ifndef SQLITE_OMIT_DATETIME_FUNCS - - -/* -** A structure for holding a single date and time. -*/ -typedef struct DateTime DateTime; -struct DateTime { - sqlite3_int64 iJD; /* The julian day number times 86400000 */ - int Y, M, D; /* Year, month, and day */ - int h, m; /* Hour and minutes */ - int tz; /* Timezone offset in minutes */ - double s; /* Seconds */ - char validYMD; /* True (1) if Y,M,D are valid */ - char validHMS; /* True (1) if h,m,s are valid */ - char validJD; /* True (1) if iJD is valid */ - char validTZ; /* True (1) if tz is valid */ -}; - - -/* -** Convert zDate into one or more integers. Additional arguments -** come in groups of 5 as follows: -** -** N number of digits in the integer -** min minimum allowed value of the integer -** max maximum allowed value of the integer -** nextC first character after the integer -** pVal where to write the integers value. -** -** Conversions continue until one with nextC==0 is encountered. -** The function returns the number of successful conversions. -*/ -static int getDigits(const char *zDate, ...){ - va_list ap; - int val; - int N; - int min; - int max; - int nextC; - int *pVal; - int cnt = 0; - va_start(ap, zDate); - do{ - N = va_arg(ap, int); - min = va_arg(ap, int); - max = va_arg(ap, int); - nextC = va_arg(ap, int); - pVal = va_arg(ap, int*); - val = 0; - while( N-- ){ - if( !sqlite3Isdigit(*zDate) ){ - goto end_getDigits; - } - val = val*10 + *zDate - '0'; - zDate++; - } - if( valmax || (nextC!=0 && nextC!=*zDate) ){ - goto end_getDigits; - } - *pVal = val; - zDate++; - cnt++; - }while( nextC ); -end_getDigits: - va_end(ap); - return cnt; -} - -/* -** Parse a timezone extension on the end of a date-time. -** The extension is of the form: -** -** (+/-)HH:MM -** -** Or the "zulu" notation: -** -** Z -** -** If the parse is successful, write the number of minutes -** of change in p->tz and return 0. If a parser error occurs, -** return non-zero. -** -** A missing specifier is not considered an error. -*/ -static int parseTimezone(const char *zDate, DateTime *p){ - int sgn = 0; - int nHr, nMn; - int c; - while( sqlite3Isspace(*zDate) ){ zDate++; } - p->tz = 0; - c = *zDate; - if( c=='-' ){ - sgn = -1; - }else if( c=='+' ){ - sgn = +1; - }else if( c=='Z' || c=='z' ){ - zDate++; - goto zulu_time; - }else{ - return c!=0; - } - zDate++; - if( getDigits(zDate, 2, 0, 14, ':', &nHr, 2, 0, 59, 0, &nMn)!=2 ){ - return 1; - } - zDate += 5; - p->tz = sgn*(nMn + nHr*60); -zulu_time: - while( sqlite3Isspace(*zDate) ){ zDate++; } - return *zDate!=0; -} - -/* -** Parse times of the form HH:MM or HH:MM:SS or HH:MM:SS.FFFF. -** The HH, MM, and SS must each be exactly 2 digits. The -** fractional seconds FFFF can be one or more digits. -** -** Return 1 if there is a parsing error and 0 on success. -*/ -static int parseHhMmSs(const char *zDate, DateTime *p){ - int h, m, s; - double ms = 0.0; - if( getDigits(zDate, 2, 0, 24, ':', &h, 2, 0, 59, 0, &m)!=2 ){ - return 1; - } - zDate += 5; - if( *zDate==':' ){ - zDate++; - if( getDigits(zDate, 2, 0, 59, 0, &s)!=1 ){ - return 1; - } - zDate += 2; - if( *zDate=='.' && sqlite3Isdigit(zDate[1]) ){ - double rScale = 1.0; - zDate++; - while( sqlite3Isdigit(*zDate) ){ - ms = ms*10.0 + *zDate - '0'; - rScale *= 10.0; - zDate++; - } - ms /= rScale; - } - }else{ - s = 0; - } - p->validJD = 0; - p->validHMS = 1; - p->h = h; - p->m = m; - p->s = s + ms; - if( parseTimezone(zDate, p) ) return 1; - p->validTZ = (p->tz!=0)?1:0; - return 0; -} - -/* -** Convert from YYYY-MM-DD HH:MM:SS to julian day. We always assume -** that the YYYY-MM-DD is according to the Gregorian calendar. -** -** Reference: Meeus page 61 -*/ -static void computeJD(DateTime *p){ - int Y, M, D, A, B, X1, X2; - - if( p->validJD ) return; - if( p->validYMD ){ - Y = p->Y; - M = p->M; - D = p->D; - }else{ - Y = 2000; /* If no YMD specified, assume 2000-Jan-01 */ - M = 1; - D = 1; - } - if( M<=2 ){ - Y--; - M += 12; - } - A = Y/100; - B = 2 - A + (A/4); - X1 = 36525*(Y+4716)/100; - X2 = 306001*(M+1)/10000; - p->iJD = (sqlite3_int64)((X1 + X2 + D + B - 1524.5 ) * 86400000); - p->validJD = 1; - if( p->validHMS ){ - p->iJD += p->h*3600000 + p->m*60000 + (sqlite3_int64)(p->s*1000); - if( p->validTZ ){ - p->iJD -= p->tz*60000; - p->validYMD = 0; - p->validHMS = 0; - p->validTZ = 0; - } - } -} - -/* -** Parse dates of the form -** -** YYYY-MM-DD HH:MM:SS.FFF -** YYYY-MM-DD HH:MM:SS -** YYYY-MM-DD HH:MM -** YYYY-MM-DD -** -** Write the result into the DateTime structure and return 0 -** on success and 1 if the input string is not a well-formed -** date. -*/ -static int parseYyyyMmDd(const char *zDate, DateTime *p){ - int Y, M, D, neg; - - if( zDate[0]=='-' ){ - zDate++; - neg = 1; - }else{ - neg = 0; - } - if( getDigits(zDate,4,0,9999,'-',&Y,2,1,12,'-',&M,2,1,31,0,&D)!=3 ){ - return 1; - } - zDate += 10; - while( sqlite3Isspace(*zDate) || 'T'==*(u8*)zDate ){ zDate++; } - if( parseHhMmSs(zDate, p)==0 ){ - /* We got the time */ - }else if( *zDate==0 ){ - p->validHMS = 0; - }else{ - return 1; - } - p->validJD = 0; - p->validYMD = 1; - p->Y = neg ? -Y : Y; - p->M = M; - p->D = D; - if( p->validTZ ){ - computeJD(p); - } - return 0; -} - -/* -** Set the time to the current time reported by the VFS. -** -** Return the number of errors. -*/ -static int setDateTimeToCurrent(sqlite3_context *context, DateTime *p){ - p->iJD = sqlite3StmtCurrentTime(context); - if( p->iJD>0 ){ - p->validJD = 1; - return 0; - }else{ - return 1; - } -} - -/* -** Attempt to parse the given string into a Julian Day Number. Return -** the number of errors. -** -** The following are acceptable forms for the input string: -** -** YYYY-MM-DD HH:MM:SS.FFF +/-HH:MM -** DDDD.DD -** now -** -** In the first form, the +/-HH:MM is always optional. The fractional -** seconds extension (the ".FFF") is optional. The seconds portion -** (":SS.FFF") is option. The year and date can be omitted as long -** as there is a time string. The time string can be omitted as long -** as there is a year and date. -*/ -static int parseDateOrTime( - sqlite3_context *context, - const char *zDate, - DateTime *p -){ - double r; - if( parseYyyyMmDd(zDate,p)==0 ){ - return 0; - }else if( parseHhMmSs(zDate, p)==0 ){ - return 0; - }else if( sqlite3StrICmp(zDate,"now")==0){ - return setDateTimeToCurrent(context, p); - }else if( sqlite3AtoF(zDate, &r, sqlite3Strlen30(zDate), SQLITE_UTF8) ){ - p->iJD = (sqlite3_int64)(r*86400000.0 + 0.5); - p->validJD = 1; - return 0; - } - return 1; -} - -/* -** Compute the Year, Month, and Day from the julian day number. -*/ -static void computeYMD(DateTime *p){ - int Z, A, B, C, D, E, X1; - if( p->validYMD ) return; - if( !p->validJD ){ - p->Y = 2000; - p->M = 1; - p->D = 1; - }else{ - Z = (int)((p->iJD + 43200000)/86400000); - A = (int)((Z - 1867216.25)/36524.25); - A = Z + 1 + A - (A/4); - B = A + 1524; - C = (int)((B - 122.1)/365.25); - D = (36525*C)/100; - E = (int)((B-D)/30.6001); - X1 = (int)(30.6001*E); - p->D = B - D - X1; - p->M = E<14 ? E-1 : E-13; - p->Y = p->M>2 ? C - 4716 : C - 4715; - } - p->validYMD = 1; -} - -/* -** Compute the Hour, Minute, and Seconds from the julian day number. -*/ -static void computeHMS(DateTime *p){ - int s; - if( p->validHMS ) return; - computeJD(p); - s = (int)((p->iJD + 43200000) % 86400000); - p->s = s/1000.0; - s = (int)p->s; - p->s -= s; - p->h = s/3600; - s -= p->h*3600; - p->m = s/60; - p->s += s - p->m*60; - p->validHMS = 1; -} - -/* -** Compute both YMD and HMS -*/ -static void computeYMD_HMS(DateTime *p){ - computeYMD(p); - computeHMS(p); -} - -/* -** Clear the YMD and HMS and the TZ -*/ -static void clearYMD_HMS_TZ(DateTime *p){ - p->validYMD = 0; - p->validHMS = 0; - p->validTZ = 0; -} - -/* -** On recent Windows platforms, the localtime_s() function is available -** as part of the "Secure CRT". It is essentially equivalent to -** localtime_r() available under most POSIX platforms, except that the -** order of the parameters is reversed. -** -** See http://msdn.microsoft.com/en-us/library/a442x3ye(VS.80).aspx. -** -** If the user has not indicated to use localtime_r() or localtime_s() -** already, check for an MSVC build environment that provides -** localtime_s(). -*/ -#if !defined(HAVE_LOCALTIME_R) && !defined(HAVE_LOCALTIME_S) && \ - defined(_MSC_VER) && defined(_CRT_INSECURE_DEPRECATE) -#define HAVE_LOCALTIME_S 1 -#endif - -#ifndef SQLITE_OMIT_LOCALTIME -/* -** The following routine implements the rough equivalent of localtime_r() -** using whatever operating-system specific localtime facility that -** is available. This routine returns 0 on success and -** non-zero on any kind of error. -** -** If the sqlite3GlobalConfig.bLocaltimeFault variable is true then this -** routine will always fail. -** -** EVIDENCE-OF: R-62172-00036 In this implementation, the standard C -** library function localtime_r() is used to assist in the calculation of -** local time. -*/ -static int osLocaltime(time_t *t, struct tm *pTm){ - int rc; -#if (!defined(HAVE_LOCALTIME_R) || !HAVE_LOCALTIME_R) \ - && (!defined(HAVE_LOCALTIME_S) || !HAVE_LOCALTIME_S) - struct tm *pX; -#if SQLITE_THREADSAFE>0 - sqlite3_mutex *mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER); -#endif - sqlite3_mutex_enter(mutex); - pX = localtime(t); -#ifndef SQLITE_OMIT_BUILTIN_TEST - if( sqlite3GlobalConfig.bLocaltimeFault ) pX = 0; -#endif - if( pX ) *pTm = *pX; - sqlite3_mutex_leave(mutex); - rc = pX==0; -#else -#ifndef SQLITE_OMIT_BUILTIN_TEST - if( sqlite3GlobalConfig.bLocaltimeFault ) return 1; -#endif -#if defined(HAVE_LOCALTIME_R) && HAVE_LOCALTIME_R - rc = localtime_r(t, pTm)==0; -#else - rc = localtime_s(pTm, t); -#endif /* HAVE_LOCALTIME_R */ -#endif /* HAVE_LOCALTIME_R || HAVE_LOCALTIME_S */ - return rc; -} -#endif /* SQLITE_OMIT_LOCALTIME */ - - -#ifndef SQLITE_OMIT_LOCALTIME -/* -** Compute the difference (in milliseconds) between localtime and UTC -** (a.k.a. GMT) for the time value p where p is in UTC. If no error occurs, -** return this value and set *pRc to SQLITE_OK. -** -** Or, if an error does occur, set *pRc to SQLITE_ERROR. The returned value -** is undefined in this case. -*/ -static sqlite3_int64 localtimeOffset( - DateTime *p, /* Date at which to calculate offset */ - sqlite3_context *pCtx, /* Write error here if one occurs */ - int *pRc /* OUT: Error code. SQLITE_OK or ERROR */ -){ - DateTime x, y; - time_t t; - struct tm sLocal; - - /* Initialize the contents of sLocal to avoid a compiler warning. */ - memset(&sLocal, 0, sizeof(sLocal)); - - x = *p; - computeYMD_HMS(&x); - if( x.Y<1971 || x.Y>=2038 ){ - /* EVIDENCE-OF: R-55269-29598 The localtime_r() C function normally only - ** works for years between 1970 and 2037. For dates outside this range, - ** SQLite attempts to map the year into an equivalent year within this - ** range, do the calculation, then map the year back. - */ - x.Y = 2000; - x.M = 1; - x.D = 1; - x.h = 0; - x.m = 0; - x.s = 0.0; - } else { - int s = (int)(x.s + 0.5); - x.s = s; - } - x.tz = 0; - x.validJD = 0; - computeJD(&x); - t = (time_t)(x.iJD/1000 - 21086676*(i64)10000); - if( osLocaltime(&t, &sLocal) ){ - sqlite3_result_error(pCtx, "local time unavailable", -1); - *pRc = SQLITE_ERROR; - return 0; - } - y.Y = sLocal.tm_year + 1900; - y.M = sLocal.tm_mon + 1; - y.D = sLocal.tm_mday; - y.h = sLocal.tm_hour; - y.m = sLocal.tm_min; - y.s = sLocal.tm_sec; - y.validYMD = 1; - y.validHMS = 1; - y.validJD = 0; - y.validTZ = 0; - computeJD(&y); - *pRc = SQLITE_OK; - return y.iJD - x.iJD; -} -#endif /* SQLITE_OMIT_LOCALTIME */ - -/* -** Process a modifier to a date-time stamp. The modifiers are -** as follows: -** -** NNN days -** NNN hours -** NNN minutes -** NNN.NNNN seconds -** NNN months -** NNN years -** start of month -** start of year -** start of week -** start of day -** weekday N -** unixepoch -** localtime -** utc -** -** Return 0 on success and 1 if there is any kind of error. If the error -** is in a system call (i.e. localtime()), then an error message is written -** to context pCtx. If the error is an unrecognized modifier, no error is -** written to pCtx. -*/ -static int parseModifier(sqlite3_context *pCtx, const char *zMod, DateTime *p){ - int rc = 1; - int n; - double r; - char *z, zBuf[30]; - z = zBuf; - for(n=0; niJD += localtimeOffset(p, pCtx, &rc); - clearYMD_HMS_TZ(p); - } - break; - } -#endif - case 'u': { - /* - ** unixepoch - ** - ** Treat the current value of p->iJD as the number of - ** seconds since 1970. Convert to a real julian day number. - */ - if( strcmp(z, "unixepoch")==0 && p->validJD ){ - p->iJD = (p->iJD + 43200)/86400 + 21086676*(i64)10000000; - clearYMD_HMS_TZ(p); - rc = 0; - } -#ifndef SQLITE_OMIT_LOCALTIME - else if( strcmp(z, "utc")==0 ){ - sqlite3_int64 c1; - computeJD(p); - c1 = localtimeOffset(p, pCtx, &rc); - if( rc==SQLITE_OK ){ - p->iJD -= c1; - clearYMD_HMS_TZ(p); - p->iJD += c1 - localtimeOffset(p, pCtx, &rc); - } - } -#endif - break; - } - case 'w': { - /* - ** weekday N - ** - ** Move the date to the same time on the next occurrence of - ** weekday N where 0==Sunday, 1==Monday, and so forth. If the - ** date is already on the appropriate weekday, this is a no-op. - */ - if( strncmp(z, "weekday ", 8)==0 - && sqlite3AtoF(&z[8], &r, sqlite3Strlen30(&z[8]), SQLITE_UTF8) - && (n=(int)r)==r && n>=0 && r<7 ){ - sqlite3_int64 Z; - computeYMD_HMS(p); - p->validTZ = 0; - p->validJD = 0; - computeJD(p); - Z = ((p->iJD + 129600000)/86400000) % 7; - if( Z>n ) Z -= 7; - p->iJD += (n - Z)*86400000; - clearYMD_HMS_TZ(p); - rc = 0; - } - break; - } - case 's': { - /* - ** start of TTTTT - ** - ** Move the date backwards to the beginning of the current day, - ** or month or year. - */ - if( strncmp(z, "start of ", 9)!=0 ) break; - z += 9; - computeYMD(p); - p->validHMS = 1; - p->h = p->m = 0; - p->s = 0.0; - p->validTZ = 0; - p->validJD = 0; - if( strcmp(z,"month")==0 ){ - p->D = 1; - rc = 0; - }else if( strcmp(z,"year")==0 ){ - computeYMD(p); - p->M = 1; - p->D = 1; - rc = 0; - }else if( strcmp(z,"day")==0 ){ - rc = 0; - } - break; - } - case '+': - case '-': - case '0': - case '1': - case '2': - case '3': - case '4': - case '5': - case '6': - case '7': - case '8': - case '9': { - double rRounder; - for(n=1; z[n] && z[n]!=':' && !sqlite3Isspace(z[n]); n++){} - if( !sqlite3AtoF(z, &r, n, SQLITE_UTF8) ){ - rc = 1; - break; - } - if( z[n]==':' ){ - /* A modifier of the form (+|-)HH:MM:SS.FFF adds (or subtracts) the - ** specified number of hours, minutes, seconds, and fractional seconds - ** to the time. The ".FFF" may be omitted. The ":SS.FFF" may be - ** omitted. - */ - const char *z2 = z; - DateTime tx; - sqlite3_int64 day; - if( !sqlite3Isdigit(*z2) ) z2++; - memset(&tx, 0, sizeof(tx)); - if( parseHhMmSs(z2, &tx) ) break; - computeJD(&tx); - tx.iJD -= 43200000; - day = tx.iJD/86400000; - tx.iJD -= day*86400000; - if( z[0]=='-' ) tx.iJD = -tx.iJD; - computeJD(p); - clearYMD_HMS_TZ(p); - p->iJD += tx.iJD; - rc = 0; - break; - } - z += n; - while( sqlite3Isspace(*z) ) z++; - n = sqlite3Strlen30(z); - if( n>10 || n<3 ) break; - if( z[n-1]=='s' ){ z[n-1] = 0; n--; } - computeJD(p); - rc = 0; - rRounder = r<0 ? -0.5 : +0.5; - if( n==3 && strcmp(z,"day")==0 ){ - p->iJD += (sqlite3_int64)(r*86400000.0 + rRounder); - }else if( n==4 && strcmp(z,"hour")==0 ){ - p->iJD += (sqlite3_int64)(r*(86400000.0/24.0) + rRounder); - }else if( n==6 && strcmp(z,"minute")==0 ){ - p->iJD += (sqlite3_int64)(r*(86400000.0/(24.0*60.0)) + rRounder); - }else if( n==6 && strcmp(z,"second")==0 ){ - p->iJD += (sqlite3_int64)(r*(86400000.0/(24.0*60.0*60.0)) + rRounder); - }else if( n==5 && strcmp(z,"month")==0 ){ - int x, y; - computeYMD_HMS(p); - p->M += (int)r; - x = p->M>0 ? (p->M-1)/12 : (p->M-12)/12; - p->Y += x; - p->M -= x*12; - p->validJD = 0; - computeJD(p); - y = (int)r; - if( y!=r ){ - p->iJD += (sqlite3_int64)((r - y)*30.0*86400000.0 + rRounder); - } - }else if( n==4 && strcmp(z,"year")==0 ){ - int y = (int)r; - computeYMD_HMS(p); - p->Y += y; - p->validJD = 0; - computeJD(p); - if( y!=r ){ - p->iJD += (sqlite3_int64)((r - y)*365.0*86400000.0 + rRounder); - } - }else{ - rc = 1; - } - clearYMD_HMS_TZ(p); - break; - } - default: { - break; - } - } - return rc; -} - -/* -** Process time function arguments. argv[0] is a date-time stamp. -** argv[1] and following are modifiers. Parse them all and write -** the resulting time into the DateTime structure p. Return 0 -** on success and 1 if there are any errors. -** -** If there are zero parameters (if even argv[0] is undefined) -** then assume a default value of "now" for argv[0]. -*/ -static int isDate( - sqlite3_context *context, - int argc, - sqlite3_value **argv, - DateTime *p -){ - int i; - const unsigned char *z; - int eType; - memset(p, 0, sizeof(*p)); - if( argc==0 ){ - return setDateTimeToCurrent(context, p); - } - if( (eType = sqlite3_value_type(argv[0]))==SQLITE_FLOAT - || eType==SQLITE_INTEGER ){ - p->iJD = (sqlite3_int64)(sqlite3_value_double(argv[0])*86400000.0 + 0.5); - p->validJD = 1; - }else{ - z = sqlite3_value_text(argv[0]); - if( !z || parseDateOrTime(context, (char*)z, p) ){ - return 1; - } - } - for(i=1; iaLimit[SQLITE_LIMIT_LENGTH]+1 ); - testcase( n==(u64)db->aLimit[SQLITE_LIMIT_LENGTH] ); - if( n(u64)db->aLimit[SQLITE_LIMIT_LENGTH] ){ - sqlite3_result_error_toobig(context); - return; - }else{ - z = sqlite3DbMallocRaw(db, (int)n); - if( z==0 ){ - sqlite3_result_error_nomem(context); - return; - } - } - computeJD(&x); - computeYMD_HMS(&x); - for(i=j=0; zFmt[i]; i++){ - if( zFmt[i]!='%' ){ - z[j++] = zFmt[i]; - }else{ - i++; - switch( zFmt[i] ){ - case 'd': sqlite3_snprintf(3, &z[j],"%02d",x.D); j+=2; break; - case 'f': { - double s = x.s; - if( s>59.999 ) s = 59.999; - sqlite3_snprintf(7, &z[j],"%06.3f", s); - j += sqlite3Strlen30(&z[j]); - break; - } - case 'H': sqlite3_snprintf(3, &z[j],"%02d",x.h); j+=2; break; - case 'W': /* Fall thru */ - case 'j': { - int nDay; /* Number of days since 1st day of year */ - DateTime y = x; - y.validJD = 0; - y.M = 1; - y.D = 1; - computeJD(&y); - nDay = (int)((x.iJD-y.iJD+43200000)/86400000); - if( zFmt[i]=='W' ){ - int wd; /* 0=Monday, 1=Tuesday, ... 6=Sunday */ - wd = (int)(((x.iJD+43200000)/86400000)%7); - sqlite3_snprintf(3, &z[j],"%02d",(nDay+7-wd)/7); - j += 2; - }else{ - sqlite3_snprintf(4, &z[j],"%03d",nDay+1); - j += 3; - } - break; - } - case 'J': { - sqlite3_snprintf(20, &z[j],"%.16g",x.iJD/86400000.0); - j+=sqlite3Strlen30(&z[j]); - break; - } - case 'm': sqlite3_snprintf(3, &z[j],"%02d",x.M); j+=2; break; - case 'M': sqlite3_snprintf(3, &z[j],"%02d",x.m); j+=2; break; - case 's': { - sqlite3_snprintf(30,&z[j],"%lld", - (i64)(x.iJD/1000 - 21086676*(i64)10000)); - j += sqlite3Strlen30(&z[j]); - break; - } - case 'S': sqlite3_snprintf(3,&z[j],"%02d",(int)x.s); j+=2; break; - case 'w': { - z[j++] = (char)(((x.iJD+129600000)/86400000) % 7) + '0'; - break; - } - case 'Y': { - sqlite3_snprintf(5,&z[j],"%04d",x.Y); j+=sqlite3Strlen30(&z[j]); - break; - } - default: z[j++] = '%'; break; - } - } - } - z[j] = 0; - sqlite3_result_text(context, z, -1, - z==zBuf ? SQLITE_TRANSIENT : SQLITE_DYNAMIC); -} - -/* -** current_time() -** -** This function returns the same value as time('now'). -*/ -static void ctimeFunc( - sqlite3_context *context, - int NotUsed, - sqlite3_value **NotUsed2 -){ - UNUSED_PARAMETER2(NotUsed, NotUsed2); - timeFunc(context, 0, 0); -} - -/* -** current_date() -** -** This function returns the same value as date('now'). -*/ -static void cdateFunc( - sqlite3_context *context, - int NotUsed, - sqlite3_value **NotUsed2 -){ - UNUSED_PARAMETER2(NotUsed, NotUsed2); - dateFunc(context, 0, 0); -} - -/* -** current_timestamp() -** -** This function returns the same value as datetime('now'). -*/ -static void ctimestampFunc( - sqlite3_context *context, - int NotUsed, - sqlite3_value **NotUsed2 -){ - UNUSED_PARAMETER2(NotUsed, NotUsed2); - datetimeFunc(context, 0, 0); -} -#endif /* !defined(SQLITE_OMIT_DATETIME_FUNCS) */ - -#ifdef SQLITE_OMIT_DATETIME_FUNCS -/* -** If the library is compiled to omit the full-scale date and time -** handling (to get a smaller binary), the following minimal version -** of the functions current_time(), current_date() and current_timestamp() -** are included instead. This is to support column declarations that -** include "DEFAULT CURRENT_TIME" etc. -** -** This function uses the C-library functions time(), gmtime() -** and strftime(). The format string to pass to strftime() is supplied -** as the user-data for the function. -*/ -static void currentTimeFunc( - sqlite3_context *context, - int argc, - sqlite3_value **argv -){ - time_t t; - char *zFormat = (char *)sqlite3_user_data(context); - sqlite3 *db; - sqlite3_int64 iT; - struct tm *pTm; - struct tm sNow; - char zBuf[20]; - - UNUSED_PARAMETER(argc); - UNUSED_PARAMETER(argv); - - iT = sqlite3StmtCurrentTime(context); - if( iT<=0 ) return; - t = iT/1000 - 10000*(sqlite3_int64)21086676; -#ifdef HAVE_GMTIME_R - pTm = gmtime_r(&t, &sNow); -#else - sqlite3_mutex_enter(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER)); - pTm = gmtime(&t); - if( pTm ) memcpy(&sNow, pTm, sizeof(sNow)); - sqlite3_mutex_leave(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER)); -#endif - if( pTm ){ - strftime(zBuf, 20, zFormat, &sNow); - sqlite3_result_text(context, zBuf, -1, SQLITE_TRANSIENT); - } -} -#endif - -/* -** This function registered all of the above C functions as SQL -** functions. This should be the only routine in this file with -** external linkage. -*/ -SQLITE_PRIVATE void sqlite3RegisterDateTimeFunctions(void){ - static SQLITE_WSD FuncDef aDateTimeFuncs[] = { -#ifndef SQLITE_OMIT_DATETIME_FUNCS - FUNCTION(julianday, -1, 0, 0, juliandayFunc ), - FUNCTION(date, -1, 0, 0, dateFunc ), - FUNCTION(time, -1, 0, 0, timeFunc ), - FUNCTION(datetime, -1, 0, 0, datetimeFunc ), - FUNCTION(strftime, -1, 0, 0, strftimeFunc ), - FUNCTION(current_time, 0, 0, 0, ctimeFunc ), - FUNCTION(current_timestamp, 0, 0, 0, ctimestampFunc), - FUNCTION(current_date, 0, 0, 0, cdateFunc ), -#else - STR_FUNCTION(current_time, 0, "%H:%M:%S", 0, currentTimeFunc), - STR_FUNCTION(current_date, 0, "%Y-%m-%d", 0, currentTimeFunc), - STR_FUNCTION(current_timestamp, 0, "%Y-%m-%d %H:%M:%S", 0, currentTimeFunc), -#endif - }; - int i; - FuncDefHash *pHash = &GLOBAL(FuncDefHash, sqlite3GlobalFunctions); - FuncDef *aFunc = (FuncDef*)&GLOBAL(FuncDef, aDateTimeFuncs); - - for(i=0; ipMethods ){ - rc = pId->pMethods->xClose(pId); - pId->pMethods = 0; - } - return rc; -} -SQLITE_PRIVATE int sqlite3OsRead(sqlite3_file *id, void *pBuf, int amt, i64 offset){ - DO_OS_MALLOC_TEST(id); - return id->pMethods->xRead(id, pBuf, amt, offset); -} -SQLITE_PRIVATE int sqlite3OsWrite(sqlite3_file *id, const void *pBuf, int amt, i64 offset){ - DO_OS_MALLOC_TEST(id); - return id->pMethods->xWrite(id, pBuf, amt, offset); -} -SQLITE_PRIVATE int sqlite3OsTruncate(sqlite3_file *id, i64 size){ - return id->pMethods->xTruncate(id, size); -} -SQLITE_PRIVATE int sqlite3OsSync(sqlite3_file *id, int flags){ - DO_OS_MALLOC_TEST(id); - return id->pMethods->xSync(id, flags); -} -SQLITE_PRIVATE int sqlite3OsFileSize(sqlite3_file *id, i64 *pSize){ - DO_OS_MALLOC_TEST(id); - return id->pMethods->xFileSize(id, pSize); -} -SQLITE_PRIVATE int sqlite3OsLock(sqlite3_file *id, int lockType){ - DO_OS_MALLOC_TEST(id); - return id->pMethods->xLock(id, lockType); -} -SQLITE_PRIVATE int sqlite3OsUnlock(sqlite3_file *id, int lockType){ - return id->pMethods->xUnlock(id, lockType); -} -SQLITE_PRIVATE int sqlite3OsCheckReservedLock(sqlite3_file *id, int *pResOut){ - DO_OS_MALLOC_TEST(id); - return id->pMethods->xCheckReservedLock(id, pResOut); -} - -/* -** Use sqlite3OsFileControl() when we are doing something that might fail -** and we need to know about the failures. Use sqlite3OsFileControlHint() -** when simply tossing information over the wall to the VFS and we do not -** really care if the VFS receives and understands the information since it -** is only a hint and can be safely ignored. The sqlite3OsFileControlHint() -** routine has no return value since the return value would be meaningless. -*/ -SQLITE_PRIVATE int sqlite3OsFileControl(sqlite3_file *id, int op, void *pArg){ -#ifdef SQLITE_TEST - if( op!=SQLITE_FCNTL_COMMIT_PHASETWO ){ - /* Faults are not injected into COMMIT_PHASETWO because, assuming SQLite - ** is using a regular VFS, it is called after the corresponding - ** transaction has been committed. Injecting a fault at this point - ** confuses the test scripts - the COMMIT comand returns SQLITE_NOMEM - ** but the transaction is committed anyway. - ** - ** The core must call OsFileControl() though, not OsFileControlHint(), - ** as if a custom VFS (e.g. zipvfs) returns an error here, it probably - ** means the commit really has failed and an error should be returned - ** to the user. */ - DO_OS_MALLOC_TEST(id); - } -#endif - return id->pMethods->xFileControl(id, op, pArg); -} -SQLITE_PRIVATE void sqlite3OsFileControlHint(sqlite3_file *id, int op, void *pArg){ - (void)id->pMethods->xFileControl(id, op, pArg); -} - -SQLITE_PRIVATE int sqlite3OsSectorSize(sqlite3_file *id){ - int (*xSectorSize)(sqlite3_file*) = id->pMethods->xSectorSize; - return (xSectorSize ? xSectorSize(id) : SQLITE_DEFAULT_SECTOR_SIZE); -} -SQLITE_PRIVATE int sqlite3OsDeviceCharacteristics(sqlite3_file *id){ - return id->pMethods->xDeviceCharacteristics(id); -} -SQLITE_PRIVATE int sqlite3OsShmLock(sqlite3_file *id, int offset, int n, int flags){ - return id->pMethods->xShmLock(id, offset, n, flags); -} -SQLITE_PRIVATE void sqlite3OsShmBarrier(sqlite3_file *id){ - id->pMethods->xShmBarrier(id); -} -SQLITE_PRIVATE int sqlite3OsShmUnmap(sqlite3_file *id, int deleteFlag){ - return id->pMethods->xShmUnmap(id, deleteFlag); -} -SQLITE_PRIVATE int sqlite3OsShmMap( - sqlite3_file *id, /* Database file handle */ - int iPage, - int pgsz, - int bExtend, /* True to extend file if necessary */ - void volatile **pp /* OUT: Pointer to mapping */ -){ - DO_OS_MALLOC_TEST(id); - return id->pMethods->xShmMap(id, iPage, pgsz, bExtend, pp); -} - -#if SQLITE_MAX_MMAP_SIZE>0 -/* The real implementation of xFetch and xUnfetch */ -SQLITE_PRIVATE int sqlite3OsFetch(sqlite3_file *id, i64 iOff, int iAmt, void **pp){ - DO_OS_MALLOC_TEST(id); - return id->pMethods->xFetch(id, iOff, iAmt, pp); -} -SQLITE_PRIVATE int sqlite3OsUnfetch(sqlite3_file *id, i64 iOff, void *p){ - return id->pMethods->xUnfetch(id, iOff, p); -} -#else -/* No-op stubs to use when memory-mapped I/O is disabled */ -SQLITE_PRIVATE int sqlite3OsFetch(sqlite3_file *id, i64 iOff, int iAmt, void **pp){ - *pp = 0; - return SQLITE_OK; -} -SQLITE_PRIVATE int sqlite3OsUnfetch(sqlite3_file *id, i64 iOff, void *p){ - return SQLITE_OK; -} -#endif - -/* -** The next group of routines are convenience wrappers around the -** VFS methods. -*/ -SQLITE_PRIVATE int sqlite3OsOpen( - sqlite3_vfs *pVfs, - const char *zPath, - sqlite3_file *pFile, - int flags, - int *pFlagsOut -){ - int rc; - DO_OS_MALLOC_TEST(0); - /* 0x87f7f is a mask of SQLITE_OPEN_ flags that are valid to be passed - ** down into the VFS layer. Some SQLITE_OPEN_ flags (for example, - ** SQLITE_OPEN_FULLMUTEX or SQLITE_OPEN_SHAREDCACHE) are blocked before - ** reaching the VFS. */ - rc = pVfs->xOpen(pVfs, zPath, pFile, flags & 0x87f7f, pFlagsOut); - assert( rc==SQLITE_OK || pFile->pMethods==0 ); - return rc; -} -SQLITE_PRIVATE int sqlite3OsDelete(sqlite3_vfs *pVfs, const char *zPath, int dirSync){ - DO_OS_MALLOC_TEST(0); - assert( dirSync==0 || dirSync==1 ); - return pVfs->xDelete(pVfs, zPath, dirSync); -} -SQLITE_PRIVATE int sqlite3OsAccess( - sqlite3_vfs *pVfs, - const char *zPath, - int flags, - int *pResOut -){ - DO_OS_MALLOC_TEST(0); - return pVfs->xAccess(pVfs, zPath, flags, pResOut); -} -SQLITE_PRIVATE int sqlite3OsFullPathname( - sqlite3_vfs *pVfs, - const char *zPath, - int nPathOut, - char *zPathOut -){ - DO_OS_MALLOC_TEST(0); - zPathOut[0] = 0; - return pVfs->xFullPathname(pVfs, zPath, nPathOut, zPathOut); -} -#ifndef SQLITE_OMIT_LOAD_EXTENSION -SQLITE_PRIVATE void *sqlite3OsDlOpen(sqlite3_vfs *pVfs, const char *zPath){ - return pVfs->xDlOpen(pVfs, zPath); -} -SQLITE_PRIVATE void sqlite3OsDlError(sqlite3_vfs *pVfs, int nByte, char *zBufOut){ - pVfs->xDlError(pVfs, nByte, zBufOut); -} -SQLITE_PRIVATE void (*sqlite3OsDlSym(sqlite3_vfs *pVfs, void *pHdle, const char *zSym))(void){ - return pVfs->xDlSym(pVfs, pHdle, zSym); -} -SQLITE_PRIVATE void sqlite3OsDlClose(sqlite3_vfs *pVfs, void *pHandle){ - pVfs->xDlClose(pVfs, pHandle); -} -#endif /* SQLITE_OMIT_LOAD_EXTENSION */ -SQLITE_PRIVATE int sqlite3OsRandomness(sqlite3_vfs *pVfs, int nByte, char *zBufOut){ - return pVfs->xRandomness(pVfs, nByte, zBufOut); -} -SQLITE_PRIVATE int sqlite3OsSleep(sqlite3_vfs *pVfs, int nMicro){ - return pVfs->xSleep(pVfs, nMicro); -} -SQLITE_PRIVATE int sqlite3OsCurrentTimeInt64(sqlite3_vfs *pVfs, sqlite3_int64 *pTimeOut){ - int rc; - /* IMPLEMENTATION-OF: R-49045-42493 SQLite will use the xCurrentTimeInt64() - ** method to get the current date and time if that method is available - ** (if iVersion is 2 or greater and the function pointer is not NULL) and - ** will fall back to xCurrentTime() if xCurrentTimeInt64() is - ** unavailable. - */ - if( pVfs->iVersion>=2 && pVfs->xCurrentTimeInt64 ){ - rc = pVfs->xCurrentTimeInt64(pVfs, pTimeOut); - }else{ - double r; - rc = pVfs->xCurrentTime(pVfs, &r); - *pTimeOut = (sqlite3_int64)(r*86400000.0); - } - return rc; -} - -SQLITE_PRIVATE int sqlite3OsOpenMalloc( - sqlite3_vfs *pVfs, - const char *zFile, - sqlite3_file **ppFile, - int flags, - int *pOutFlags -){ - int rc = SQLITE_NOMEM; - sqlite3_file *pFile; - pFile = (sqlite3_file *)sqlite3MallocZero(pVfs->szOsFile); - if( pFile ){ - rc = sqlite3OsOpen(pVfs, zFile, pFile, flags, pOutFlags); - if( rc!=SQLITE_OK ){ - sqlite3_free(pFile); - }else{ - *ppFile = pFile; - } - } - return rc; -} -SQLITE_PRIVATE int sqlite3OsCloseFree(sqlite3_file *pFile){ - int rc = SQLITE_OK; - assert( pFile ); - rc = sqlite3OsClose(pFile); - sqlite3_free(pFile); - return rc; -} - -/* -** This function is a wrapper around the OS specific implementation of -** sqlite3_os_init(). The purpose of the wrapper is to provide the -** ability to simulate a malloc failure, so that the handling of an -** error in sqlite3_os_init() by the upper layers can be tested. -*/ -SQLITE_PRIVATE int sqlite3OsInit(void){ - void *p = sqlite3_malloc(10); - if( p==0 ) return SQLITE_NOMEM; - sqlite3_free(p); - return sqlite3_os_init(); -} - -/* -** The list of all registered VFS implementations. -*/ -static sqlite3_vfs * SQLITE_WSD vfsList = 0; -#define vfsList GLOBAL(sqlite3_vfs *, vfsList) - -/* -** Locate a VFS by name. If no name is given, simply return the -** first VFS on the list. -*/ -SQLITE_API sqlite3_vfs *sqlite3_vfs_find(const char *zVfs){ - sqlite3_vfs *pVfs = 0; -#if SQLITE_THREADSAFE - sqlite3_mutex *mutex; -#endif -#ifndef SQLITE_OMIT_AUTOINIT - int rc = sqlite3_initialize(); - if( rc ) return 0; -#endif -#if SQLITE_THREADSAFE - mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER); -#endif - sqlite3_mutex_enter(mutex); - for(pVfs = vfsList; pVfs; pVfs=pVfs->pNext){ - if( zVfs==0 ) break; - if( strcmp(zVfs, pVfs->zName)==0 ) break; - } - sqlite3_mutex_leave(mutex); - return pVfs; -} - -/* -** Unlink a VFS from the linked list -*/ -static void vfsUnlink(sqlite3_vfs *pVfs){ - assert( sqlite3_mutex_held(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER)) ); - if( pVfs==0 ){ - /* No-op */ - }else if( vfsList==pVfs ){ - vfsList = pVfs->pNext; - }else if( vfsList ){ - sqlite3_vfs *p = vfsList; - while( p->pNext && p->pNext!=pVfs ){ - p = p->pNext; - } - if( p->pNext==pVfs ){ - p->pNext = pVfs->pNext; - } - } -} - -/* -** Register a VFS with the system. It is harmless to register the same -** VFS multiple times. The new VFS becomes the default if makeDflt is -** true. -*/ -SQLITE_API int sqlite3_vfs_register(sqlite3_vfs *pVfs, int makeDflt){ - MUTEX_LOGIC(sqlite3_mutex *mutex;) -#ifndef SQLITE_OMIT_AUTOINIT - int rc = sqlite3_initialize(); - if( rc ) return rc; -#endif - MUTEX_LOGIC( mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER); ) - sqlite3_mutex_enter(mutex); - vfsUnlink(pVfs); - if( makeDflt || vfsList==0 ){ - pVfs->pNext = vfsList; - vfsList = pVfs; - }else{ - pVfs->pNext = vfsList->pNext; - vfsList->pNext = pVfs; - } - assert(vfsList); - sqlite3_mutex_leave(mutex); - return SQLITE_OK; -} - -/* -** Unregister a VFS so that it is no longer accessible. -*/ -SQLITE_API int sqlite3_vfs_unregister(sqlite3_vfs *pVfs){ -#if SQLITE_THREADSAFE - sqlite3_mutex *mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER); -#endif - sqlite3_mutex_enter(mutex); - vfsUnlink(pVfs); - sqlite3_mutex_leave(mutex); - return SQLITE_OK; -} - -/************** End of os.c **************************************************/ -/************** Begin file fault.c *******************************************/ -/* -** 2008 Jan 22 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** -** This file contains code to support the concept of "benign" -** malloc failures (when the xMalloc() or xRealloc() method of the -** sqlite3_mem_methods structure fails to allocate a block of memory -** and returns 0). -** -** Most malloc failures are non-benign. After they occur, SQLite -** abandons the current operation and returns an error code (usually -** SQLITE_NOMEM) to the user. However, sometimes a fault is not necessarily -** fatal. For example, if a malloc fails while resizing a hash table, this -** is completely recoverable simply by not carrying out the resize. The -** hash table will continue to function normally. So a malloc failure -** during a hash table resize is a benign fault. -*/ - - -#ifndef SQLITE_OMIT_BUILTIN_TEST - -/* -** Global variables. -*/ -typedef struct BenignMallocHooks BenignMallocHooks; -static SQLITE_WSD struct BenignMallocHooks { - void (*xBenignBegin)(void); - void (*xBenignEnd)(void); -} sqlite3Hooks = { 0, 0 }; - -/* The "wsdHooks" macro will resolve to the appropriate BenignMallocHooks -** structure. If writable static data is unsupported on the target, -** we have to locate the state vector at run-time. In the more common -** case where writable static data is supported, wsdHooks can refer directly -** to the "sqlite3Hooks" state vector declared above. -*/ -#ifdef SQLITE_OMIT_WSD -# define wsdHooksInit \ - BenignMallocHooks *x = &GLOBAL(BenignMallocHooks,sqlite3Hooks) -# define wsdHooks x[0] -#else -# define wsdHooksInit -# define wsdHooks sqlite3Hooks -#endif - - -/* -** Register hooks to call when sqlite3BeginBenignMalloc() and -** sqlite3EndBenignMalloc() are called, respectively. -*/ -SQLITE_PRIVATE void sqlite3BenignMallocHooks( - void (*xBenignBegin)(void), - void (*xBenignEnd)(void) -){ - wsdHooksInit; - wsdHooks.xBenignBegin = xBenignBegin; - wsdHooks.xBenignEnd = xBenignEnd; -} - -/* -** This (sqlite3EndBenignMalloc()) is called by SQLite code to indicate that -** subsequent malloc failures are benign. A call to sqlite3EndBenignMalloc() -** indicates that subsequent malloc failures are non-benign. -*/ -SQLITE_PRIVATE void sqlite3BeginBenignMalloc(void){ - wsdHooksInit; - if( wsdHooks.xBenignBegin ){ - wsdHooks.xBenignBegin(); - } -} -SQLITE_PRIVATE void sqlite3EndBenignMalloc(void){ - wsdHooksInit; - if( wsdHooks.xBenignEnd ){ - wsdHooks.xBenignEnd(); - } -} - -#endif /* #ifndef SQLITE_OMIT_BUILTIN_TEST */ - -/************** End of fault.c ***********************************************/ -/************** Begin file mem0.c ********************************************/ -/* -** 2008 October 28 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** -** This file contains a no-op memory allocation drivers for use when -** SQLITE_ZERO_MALLOC is defined. The allocation drivers implemented -** here always fail. SQLite will not operate with these drivers. These -** are merely placeholders. Real drivers must be substituted using -** sqlite3_config() before SQLite will operate. -*/ - -/* -** This version of the memory allocator is the default. It is -** used when no other memory allocator is specified using compile-time -** macros. -*/ -#ifdef SQLITE_ZERO_MALLOC - -/* -** No-op versions of all memory allocation routines -*/ -static void *sqlite3MemMalloc(int nByte){ return 0; } -static void sqlite3MemFree(void *pPrior){ return; } -static void *sqlite3MemRealloc(void *pPrior, int nByte){ return 0; } -static int sqlite3MemSize(void *pPrior){ return 0; } -static int sqlite3MemRoundup(int n){ return n; } -static int sqlite3MemInit(void *NotUsed){ return SQLITE_OK; } -static void sqlite3MemShutdown(void *NotUsed){ return; } - -/* -** This routine is the only routine in this file with external linkage. -** -** Populate the low-level memory allocation function pointers in -** sqlite3GlobalConfig.m with pointers to the routines in this file. -*/ -SQLITE_PRIVATE void sqlite3MemSetDefault(void){ - static const sqlite3_mem_methods defaultMethods = { - sqlite3MemMalloc, - sqlite3MemFree, - sqlite3MemRealloc, - sqlite3MemSize, - sqlite3MemRoundup, - sqlite3MemInit, - sqlite3MemShutdown, - 0 - }; - sqlite3_config(SQLITE_CONFIG_MALLOC, &defaultMethods); -} - -#endif /* SQLITE_ZERO_MALLOC */ - -/************** End of mem0.c ************************************************/ -/************** Begin file mem1.c ********************************************/ -/* -** 2007 August 14 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** -** This file contains low-level memory allocation drivers for when -** SQLite will use the standard C-library malloc/realloc/free interface -** to obtain the memory it needs. -** -** This file contains implementations of the low-level memory allocation -** routines specified in the sqlite3_mem_methods object. The content of -** this file is only used if SQLITE_SYSTEM_MALLOC is defined. The -** SQLITE_SYSTEM_MALLOC macro is defined automatically if neither the -** SQLITE_MEMDEBUG nor the SQLITE_WIN32_MALLOC macros are defined. The -** default configuration is to use memory allocation routines in this -** file. -** -** C-preprocessor macro summary: -** -** HAVE_MALLOC_USABLE_SIZE The configure script sets this symbol if -** the malloc_usable_size() interface exists -** on the target platform. Or, this symbol -** can be set manually, if desired. -** If an equivalent interface exists by -** a different name, using a separate -D -** option to rename it. -** -** SQLITE_WITHOUT_ZONEMALLOC Some older macs lack support for the zone -** memory allocator. Set this symbol to enable -** building on older macs. -** -** SQLITE_WITHOUT_MSIZE Set this symbol to disable the use of -** _msize() on windows systems. This might -** be necessary when compiling for Delphi, -** for example. -*/ - -/* -** This version of the memory allocator is the default. It is -** used when no other memory allocator is specified using compile-time -** macros. -*/ -#ifdef SQLITE_SYSTEM_MALLOC -#if defined(__APPLE__) && !defined(SQLITE_WITHOUT_ZONEMALLOC) - -/* -** Use the zone allocator available on apple products unless the -** SQLITE_WITHOUT_ZONEMALLOC symbol is defined. -*/ -#include -#include -#include -static malloc_zone_t* _sqliteZone_; -#define SQLITE_MALLOC(x) malloc_zone_malloc(_sqliteZone_, (x)) -#define SQLITE_FREE(x) malloc_zone_free(_sqliteZone_, (x)); -#define SQLITE_REALLOC(x,y) malloc_zone_realloc(_sqliteZone_, (x), (y)) -#define SQLITE_MALLOCSIZE(x) \ - (_sqliteZone_ ? _sqliteZone_->size(_sqliteZone_,x) : malloc_size(x)) - -#else /* if not __APPLE__ */ - -/* -** Use standard C library malloc and free on non-Apple systems. -** Also used by Apple systems if SQLITE_WITHOUT_ZONEMALLOC is defined. -*/ -#define SQLITE_MALLOC(x) malloc(x) -#define SQLITE_FREE(x) free(x) -#define SQLITE_REALLOC(x,y) realloc((x),(y)) - -/* -** The malloc.h header file is needed for malloc_usable_size() function -** on some systems (e.g. Linux). -*/ -#if defined(HAVE_MALLOC_H) && defined(HAVE_MALLOC_USABLE_SIZE) -# define SQLITE_USE_MALLOC_H -# define SQLITE_USE_MALLOC_USABLE_SIZE -/* -** The MSVCRT has malloc_usable_size(), but it is called _msize(). The -** use of _msize() is automatic, but can be disabled by compiling with -** -DSQLITE_WITHOUT_MSIZE. Using the _msize() function also requires -** the malloc.h header file. -*/ -#elif defined(_MSC_VER) && !defined(SQLITE_WITHOUT_MSIZE) -# define SQLITE_USE_MALLOC_H -# define SQLITE_USE_MSIZE -#endif - -/* -** Include the malloc.h header file, if necessary. Also set define macro -** SQLITE_MALLOCSIZE to the appropriate function name, which is _msize() -** for MSVC and malloc_usable_size() for most other systems (e.g. Linux). -** The memory size function can always be overridden manually by defining -** the macro SQLITE_MALLOCSIZE to the desired function name. -*/ -#if defined(SQLITE_USE_MALLOC_H) -# include -# if defined(SQLITE_USE_MALLOC_USABLE_SIZE) -# if !defined(SQLITE_MALLOCSIZE) -# define SQLITE_MALLOCSIZE(x) malloc_usable_size(x) -# endif -# elif defined(SQLITE_USE_MSIZE) -# if !defined(SQLITE_MALLOCSIZE) -# define SQLITE_MALLOCSIZE _msize -# endif -# endif -#endif /* defined(SQLITE_USE_MALLOC_H) */ - -#endif /* __APPLE__ or not __APPLE__ */ - -/* -** Like malloc(), but remember the size of the allocation -** so that we can find it later using sqlite3MemSize(). -** -** For this low-level routine, we are guaranteed that nByte>0 because -** cases of nByte<=0 will be intercepted and dealt with by higher level -** routines. -*/ -static void *sqlite3MemMalloc(int nByte){ -#ifdef SQLITE_MALLOCSIZE - void *p = SQLITE_MALLOC( nByte ); - if( p==0 ){ - testcase( sqlite3GlobalConfig.xLog!=0 ); - sqlite3_log(SQLITE_NOMEM, "failed to allocate %u bytes of memory", nByte); - } - return p; -#else - sqlite3_int64 *p; - assert( nByte>0 ); - nByte = ROUND8(nByte); - p = SQLITE_MALLOC( nByte+8 ); - if( p ){ - p[0] = nByte; - p++; - }else{ - testcase( sqlite3GlobalConfig.xLog!=0 ); - sqlite3_log(SQLITE_NOMEM, "failed to allocate %u bytes of memory", nByte); - } - return (void *)p; -#endif -} - -/* -** Like free() but works for allocations obtained from sqlite3MemMalloc() -** or sqlite3MemRealloc(). -** -** For this low-level routine, we already know that pPrior!=0 since -** cases where pPrior==0 will have been intecepted and dealt with -** by higher-level routines. -*/ -static void sqlite3MemFree(void *pPrior){ -#ifdef SQLITE_MALLOCSIZE - SQLITE_FREE(pPrior); -#else - sqlite3_int64 *p = (sqlite3_int64*)pPrior; - assert( pPrior!=0 ); - p--; - SQLITE_FREE(p); -#endif -} - -/* -** Report the allocated size of a prior return from xMalloc() -** or xRealloc(). -*/ -static int sqlite3MemSize(void *pPrior){ -#ifdef SQLITE_MALLOCSIZE - return pPrior ? (int)SQLITE_MALLOCSIZE(pPrior) : 0; -#else - sqlite3_int64 *p; - if( pPrior==0 ) return 0; - p = (sqlite3_int64*)pPrior; - p--; - return (int)p[0]; -#endif -} - -/* -** Like realloc(). Resize an allocation previously obtained from -** sqlite3MemMalloc(). -** -** For this low-level interface, we know that pPrior!=0. Cases where -** pPrior==0 while have been intercepted by higher-level routine and -** redirected to xMalloc. Similarly, we know that nByte>0 becauses -** cases where nByte<=0 will have been intercepted by higher-level -** routines and redirected to xFree. -*/ -static void *sqlite3MemRealloc(void *pPrior, int nByte){ -#ifdef SQLITE_MALLOCSIZE - void *p = SQLITE_REALLOC(pPrior, nByte); - if( p==0 ){ - testcase( sqlite3GlobalConfig.xLog!=0 ); - sqlite3_log(SQLITE_NOMEM, - "failed memory resize %u to %u bytes", - SQLITE_MALLOCSIZE(pPrior), nByte); - } - return p; -#else - sqlite3_int64 *p = (sqlite3_int64*)pPrior; - assert( pPrior!=0 && nByte>0 ); - assert( nByte==ROUND8(nByte) ); /* EV: R-46199-30249 */ - p--; - p = SQLITE_REALLOC(p, nByte+8 ); - if( p ){ - p[0] = nByte; - p++; - }else{ - testcase( sqlite3GlobalConfig.xLog!=0 ); - sqlite3_log(SQLITE_NOMEM, - "failed memory resize %u to %u bytes", - sqlite3MemSize(pPrior), nByte); - } - return (void*)p; -#endif -} - -/* -** Round up a request size to the next valid allocation size. -*/ -static int sqlite3MemRoundup(int n){ - return ROUND8(n); -} - -/* -** Initialize this module. -*/ -static int sqlite3MemInit(void *NotUsed){ -#if defined(__APPLE__) && !defined(SQLITE_WITHOUT_ZONEMALLOC) - int cpuCount; - size_t len; - if( _sqliteZone_ ){ - return SQLITE_OK; - } - len = sizeof(cpuCount); - /* One usually wants to use hw.acctivecpu for MT decisions, but not here */ - sysctlbyname("hw.ncpu", &cpuCount, &len, NULL, 0); - if( cpuCount>1 ){ - /* defer MT decisions to system malloc */ - _sqliteZone_ = malloc_default_zone(); - }else{ - /* only 1 core, use our own zone to contention over global locks, - ** e.g. we have our own dedicated locks */ - bool success; - malloc_zone_t* newzone = malloc_create_zone(4096, 0); - malloc_set_zone_name(newzone, "Sqlite_Heap"); - do{ - success = OSAtomicCompareAndSwapPtrBarrier(NULL, newzone, - (void * volatile *)&_sqliteZone_); - }while(!_sqliteZone_); - if( !success ){ - /* somebody registered a zone first */ - malloc_destroy_zone(newzone); - } - } -#endif - UNUSED_PARAMETER(NotUsed); - return SQLITE_OK; -} - -/* -** Deinitialize this module. -*/ -static void sqlite3MemShutdown(void *NotUsed){ - UNUSED_PARAMETER(NotUsed); - return; -} - -/* -** This routine is the only routine in this file with external linkage. -** -** Populate the low-level memory allocation function pointers in -** sqlite3GlobalConfig.m with pointers to the routines in this file. -*/ -SQLITE_PRIVATE void sqlite3MemSetDefault(void){ - static const sqlite3_mem_methods defaultMethods = { - sqlite3MemMalloc, - sqlite3MemFree, - sqlite3MemRealloc, - sqlite3MemSize, - sqlite3MemRoundup, - sqlite3MemInit, - sqlite3MemShutdown, - 0 - }; - sqlite3_config(SQLITE_CONFIG_MALLOC, &defaultMethods); -} - -#endif /* SQLITE_SYSTEM_MALLOC */ - -/************** End of mem1.c ************************************************/ -/************** Begin file mem2.c ********************************************/ -/* -** 2007 August 15 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** -** This file contains low-level memory allocation drivers for when -** SQLite will use the standard C-library malloc/realloc/free interface -** to obtain the memory it needs while adding lots of additional debugging -** information to each allocation in order to help detect and fix memory -** leaks and memory usage errors. -** -** This file contains implementations of the low-level memory allocation -** routines specified in the sqlite3_mem_methods object. -*/ - -/* -** This version of the memory allocator is used only if the -** SQLITE_MEMDEBUG macro is defined -*/ -#ifdef SQLITE_MEMDEBUG - -/* -** The backtrace functionality is only available with GLIBC -*/ -#ifdef __GLIBC__ - extern int backtrace(void**,int); - extern void backtrace_symbols_fd(void*const*,int,int); -#else -# define backtrace(A,B) 1 -# define backtrace_symbols_fd(A,B,C) -#endif -/* #include */ - -/* -** Each memory allocation looks like this: -** -** ------------------------------------------------------------------------ -** | Title | backtrace pointers | MemBlockHdr | allocation | EndGuard | -** ------------------------------------------------------------------------ -** -** The application code sees only a pointer to the allocation. We have -** to back up from the allocation pointer to find the MemBlockHdr. The -** MemBlockHdr tells us the size of the allocation and the number of -** backtrace pointers. There is also a guard word at the end of the -** MemBlockHdr. -*/ -struct MemBlockHdr { - i64 iSize; /* Size of this allocation */ - struct MemBlockHdr *pNext, *pPrev; /* Linked list of all unfreed memory */ - char nBacktrace; /* Number of backtraces on this alloc */ - char nBacktraceSlots; /* Available backtrace slots */ - u8 nTitle; /* Bytes of title; includes '\0' */ - u8 eType; /* Allocation type code */ - int iForeGuard; /* Guard word for sanity */ -}; - -/* -** Guard words -*/ -#define FOREGUARD 0x80F5E153 -#define REARGUARD 0xE4676B53 - -/* -** Number of malloc size increments to track. -*/ -#define NCSIZE 1000 - -/* -** All of the static variables used by this module are collected -** into a single structure named "mem". This is to keep the -** static variables organized and to reduce namespace pollution -** when this module is combined with other in the amalgamation. -*/ -static struct { - - /* - ** Mutex to control access to the memory allocation subsystem. - */ - sqlite3_mutex *mutex; - - /* - ** Head and tail of a linked list of all outstanding allocations - */ - struct MemBlockHdr *pFirst; - struct MemBlockHdr *pLast; - - /* - ** The number of levels of backtrace to save in new allocations. - */ - int nBacktrace; - void (*xBacktrace)(int, int, void **); - - /* - ** Title text to insert in front of each block - */ - int nTitle; /* Bytes of zTitle to save. Includes '\0' and padding */ - char zTitle[100]; /* The title text */ - - /* - ** sqlite3MallocDisallow() increments the following counter. - ** sqlite3MallocAllow() decrements it. - */ - int disallow; /* Do not allow memory allocation */ - - /* - ** Gather statistics on the sizes of memory allocations. - ** nAlloc[i] is the number of allocation attempts of i*8 - ** bytes. i==NCSIZE is the number of allocation attempts for - ** sizes more than NCSIZE*8 bytes. - */ - int nAlloc[NCSIZE]; /* Total number of allocations */ - int nCurrent[NCSIZE]; /* Current number of allocations */ - int mxCurrent[NCSIZE]; /* Highwater mark for nCurrent */ - -} mem; - - -/* -** Adjust memory usage statistics -*/ -static void adjustStats(int iSize, int increment){ - int i = ROUND8(iSize)/8; - if( i>NCSIZE-1 ){ - i = NCSIZE - 1; - } - if( increment>0 ){ - mem.nAlloc[i]++; - mem.nCurrent[i]++; - if( mem.nCurrent[i]>mem.mxCurrent[i] ){ - mem.mxCurrent[i] = mem.nCurrent[i]; - } - }else{ - mem.nCurrent[i]--; - assert( mem.nCurrent[i]>=0 ); - } -} - -/* -** Given an allocation, find the MemBlockHdr for that allocation. -** -** This routine checks the guards at either end of the allocation and -** if they are incorrect it asserts. -*/ -static struct MemBlockHdr *sqlite3MemsysGetHeader(void *pAllocation){ - struct MemBlockHdr *p; - int *pInt; - u8 *pU8; - int nReserve; - - p = (struct MemBlockHdr*)pAllocation; - p--; - assert( p->iForeGuard==(int)FOREGUARD ); - nReserve = ROUND8(p->iSize); - pInt = (int*)pAllocation; - pU8 = (u8*)pAllocation; - assert( pInt[nReserve/sizeof(int)]==(int)REARGUARD ); - /* This checks any of the "extra" bytes allocated due - ** to rounding up to an 8 byte boundary to ensure - ** they haven't been overwritten. - */ - while( nReserve-- > p->iSize ) assert( pU8[nReserve]==0x65 ); - return p; -} - -/* -** Return the number of bytes currently allocated at address p. -*/ -static int sqlite3MemSize(void *p){ - struct MemBlockHdr *pHdr; - if( !p ){ - return 0; - } - pHdr = sqlite3MemsysGetHeader(p); - return (int)pHdr->iSize; -} - -/* -** Initialize the memory allocation subsystem. -*/ -static int sqlite3MemInit(void *NotUsed){ - UNUSED_PARAMETER(NotUsed); - assert( (sizeof(struct MemBlockHdr)&7) == 0 ); - if( !sqlite3GlobalConfig.bMemstat ){ - /* If memory status is enabled, then the malloc.c wrapper will already - ** hold the STATIC_MEM mutex when the routines here are invoked. */ - mem.mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MEM); - } - return SQLITE_OK; -} - -/* -** Deinitialize the memory allocation subsystem. -*/ -static void sqlite3MemShutdown(void *NotUsed){ - UNUSED_PARAMETER(NotUsed); - mem.mutex = 0; -} - -/* -** Round up a request size to the next valid allocation size. -*/ -static int sqlite3MemRoundup(int n){ - return ROUND8(n); -} - -/* -** Fill a buffer with pseudo-random bytes. This is used to preset -** the content of a new memory allocation to unpredictable values and -** to clear the content of a freed allocation to unpredictable values. -*/ -static void randomFill(char *pBuf, int nByte){ - unsigned int x, y, r; - x = SQLITE_PTR_TO_INT(pBuf); - y = nByte | 1; - while( nByte >= 4 ){ - x = (x>>1) ^ (-(int)(x&1) & 0xd0000001); - y = y*1103515245 + 12345; - r = x ^ y; - *(int*)pBuf = r; - pBuf += 4; - nByte -= 4; - } - while( nByte-- > 0 ){ - x = (x>>1) ^ (-(int)(x&1) & 0xd0000001); - y = y*1103515245 + 12345; - r = x ^ y; - *(pBuf++) = r & 0xff; - } -} - -/* -** Allocate nByte bytes of memory. -*/ -static void *sqlite3MemMalloc(int nByte){ - struct MemBlockHdr *pHdr; - void **pBt; - char *z; - int *pInt; - void *p = 0; - int totalSize; - int nReserve; - sqlite3_mutex_enter(mem.mutex); - assert( mem.disallow==0 ); - nReserve = ROUND8(nByte); - totalSize = nReserve + sizeof(*pHdr) + sizeof(int) + - mem.nBacktrace*sizeof(void*) + mem.nTitle; - p = malloc(totalSize); - if( p ){ - z = p; - pBt = (void**)&z[mem.nTitle]; - pHdr = (struct MemBlockHdr*)&pBt[mem.nBacktrace]; - pHdr->pNext = 0; - pHdr->pPrev = mem.pLast; - if( mem.pLast ){ - mem.pLast->pNext = pHdr; - }else{ - mem.pFirst = pHdr; - } - mem.pLast = pHdr; - pHdr->iForeGuard = FOREGUARD; - pHdr->eType = MEMTYPE_HEAP; - pHdr->nBacktraceSlots = mem.nBacktrace; - pHdr->nTitle = mem.nTitle; - if( mem.nBacktrace ){ - void *aAddr[40]; - pHdr->nBacktrace = backtrace(aAddr, mem.nBacktrace+1)-1; - memcpy(pBt, &aAddr[1], pHdr->nBacktrace*sizeof(void*)); - assert(pBt[0]); - if( mem.xBacktrace ){ - mem.xBacktrace(nByte, pHdr->nBacktrace-1, &aAddr[1]); - } - }else{ - pHdr->nBacktrace = 0; - } - if( mem.nTitle ){ - memcpy(z, mem.zTitle, mem.nTitle); - } - pHdr->iSize = nByte; - adjustStats(nByte, +1); - pInt = (int*)&pHdr[1]; - pInt[nReserve/sizeof(int)] = REARGUARD; - randomFill((char*)pInt, nByte); - memset(((char*)pInt)+nByte, 0x65, nReserve-nByte); - p = (void*)pInt; - } - sqlite3_mutex_leave(mem.mutex); - return p; -} - -/* -** Free memory. -*/ -static void sqlite3MemFree(void *pPrior){ - struct MemBlockHdr *pHdr; - void **pBt; - char *z; - assert( sqlite3GlobalConfig.bMemstat || sqlite3GlobalConfig.bCoreMutex==0 - || mem.mutex!=0 ); - pHdr = sqlite3MemsysGetHeader(pPrior); - pBt = (void**)pHdr; - pBt -= pHdr->nBacktraceSlots; - sqlite3_mutex_enter(mem.mutex); - if( pHdr->pPrev ){ - assert( pHdr->pPrev->pNext==pHdr ); - pHdr->pPrev->pNext = pHdr->pNext; - }else{ - assert( mem.pFirst==pHdr ); - mem.pFirst = pHdr->pNext; - } - if( pHdr->pNext ){ - assert( pHdr->pNext->pPrev==pHdr ); - pHdr->pNext->pPrev = pHdr->pPrev; - }else{ - assert( mem.pLast==pHdr ); - mem.pLast = pHdr->pPrev; - } - z = (char*)pBt; - z -= pHdr->nTitle; - adjustStats((int)pHdr->iSize, -1); - randomFill(z, sizeof(void*)*pHdr->nBacktraceSlots + sizeof(*pHdr) + - (int)pHdr->iSize + sizeof(int) + pHdr->nTitle); - free(z); - sqlite3_mutex_leave(mem.mutex); -} - -/* -** Change the size of an existing memory allocation. -** -** For this debugging implementation, we *always* make a copy of the -** allocation into a new place in memory. In this way, if the -** higher level code is using pointer to the old allocation, it is -** much more likely to break and we are much more liking to find -** the error. -*/ -static void *sqlite3MemRealloc(void *pPrior, int nByte){ - struct MemBlockHdr *pOldHdr; - void *pNew; - assert( mem.disallow==0 ); - assert( (nByte & 7)==0 ); /* EV: R-46199-30249 */ - pOldHdr = sqlite3MemsysGetHeader(pPrior); - pNew = sqlite3MemMalloc(nByte); - if( pNew ){ - memcpy(pNew, pPrior, (int)(nByteiSize ? nByte : pOldHdr->iSize)); - if( nByte>pOldHdr->iSize ){ - randomFill(&((char*)pNew)[pOldHdr->iSize], nByte - (int)pOldHdr->iSize); - } - sqlite3MemFree(pPrior); - } - return pNew; -} - -/* -** Populate the low-level memory allocation function pointers in -** sqlite3GlobalConfig.m with pointers to the routines in this file. -*/ -SQLITE_PRIVATE void sqlite3MemSetDefault(void){ - static const sqlite3_mem_methods defaultMethods = { - sqlite3MemMalloc, - sqlite3MemFree, - sqlite3MemRealloc, - sqlite3MemSize, - sqlite3MemRoundup, - sqlite3MemInit, - sqlite3MemShutdown, - 0 - }; - sqlite3_config(SQLITE_CONFIG_MALLOC, &defaultMethods); -} - -/* -** Set the "type" of an allocation. -*/ -SQLITE_PRIVATE void sqlite3MemdebugSetType(void *p, u8 eType){ - if( p && sqlite3GlobalConfig.m.xMalloc==sqlite3MemMalloc ){ - struct MemBlockHdr *pHdr; - pHdr = sqlite3MemsysGetHeader(p); - assert( pHdr->iForeGuard==FOREGUARD ); - pHdr->eType = eType; - } -} - -/* -** Return TRUE if the mask of type in eType matches the type of the -** allocation p. Also return true if p==NULL. -** -** This routine is designed for use within an assert() statement, to -** verify the type of an allocation. For example: -** -** assert( sqlite3MemdebugHasType(p, MEMTYPE_DB) ); -*/ -SQLITE_PRIVATE int sqlite3MemdebugHasType(void *p, u8 eType){ - int rc = 1; - if( p && sqlite3GlobalConfig.m.xMalloc==sqlite3MemMalloc ){ - struct MemBlockHdr *pHdr; - pHdr = sqlite3MemsysGetHeader(p); - assert( pHdr->iForeGuard==FOREGUARD ); /* Allocation is valid */ - if( (pHdr->eType&eType)==0 ){ - rc = 0; - } - } - return rc; -} - -/* -** Return TRUE if the mask of type in eType matches no bits of the type of the -** allocation p. Also return true if p==NULL. -** -** This routine is designed for use within an assert() statement, to -** verify the type of an allocation. For example: -** -** assert( sqlite3MemdebugNoType(p, MEMTYPE_DB) ); -*/ -SQLITE_PRIVATE int sqlite3MemdebugNoType(void *p, u8 eType){ - int rc = 1; - if( p && sqlite3GlobalConfig.m.xMalloc==sqlite3MemMalloc ){ - struct MemBlockHdr *pHdr; - pHdr = sqlite3MemsysGetHeader(p); - assert( pHdr->iForeGuard==FOREGUARD ); /* Allocation is valid */ - if( (pHdr->eType&eType)!=0 ){ - rc = 0; - } - } - return rc; -} - -/* -** Set the number of backtrace levels kept for each allocation. -** A value of zero turns off backtracing. The number is always rounded -** up to a multiple of 2. -*/ -SQLITE_PRIVATE void sqlite3MemdebugBacktrace(int depth){ - if( depth<0 ){ depth = 0; } - if( depth>20 ){ depth = 20; } - depth = (depth+1)&0xfe; - mem.nBacktrace = depth; -} - -SQLITE_PRIVATE void sqlite3MemdebugBacktraceCallback(void (*xBacktrace)(int, int, void **)){ - mem.xBacktrace = xBacktrace; -} - -/* -** Set the title string for subsequent allocations. -*/ -SQLITE_PRIVATE void sqlite3MemdebugSettitle(const char *zTitle){ - unsigned int n = sqlite3Strlen30(zTitle) + 1; - sqlite3_mutex_enter(mem.mutex); - if( n>=sizeof(mem.zTitle) ) n = sizeof(mem.zTitle)-1; - memcpy(mem.zTitle, zTitle, n); - mem.zTitle[n] = 0; - mem.nTitle = ROUND8(n); - sqlite3_mutex_leave(mem.mutex); -} - -SQLITE_PRIVATE void sqlite3MemdebugSync(){ - struct MemBlockHdr *pHdr; - for(pHdr=mem.pFirst; pHdr; pHdr=pHdr->pNext){ - void **pBt = (void**)pHdr; - pBt -= pHdr->nBacktraceSlots; - mem.xBacktrace((int)pHdr->iSize, pHdr->nBacktrace-1, &pBt[1]); - } -} - -/* -** Open the file indicated and write a log of all unfreed memory -** allocations into that log. -*/ -SQLITE_PRIVATE void sqlite3MemdebugDump(const char *zFilename){ - FILE *out; - struct MemBlockHdr *pHdr; - void **pBt; - int i; - out = fopen(zFilename, "w"); - if( out==0 ){ - fprintf(stderr, "** Unable to output memory debug output log: %s **\n", - zFilename); - return; - } - for(pHdr=mem.pFirst; pHdr; pHdr=pHdr->pNext){ - char *z = (char*)pHdr; - z -= pHdr->nBacktraceSlots*sizeof(void*) + pHdr->nTitle; - fprintf(out, "**** %lld bytes at %p from %s ****\n", - pHdr->iSize, &pHdr[1], pHdr->nTitle ? z : "???"); - if( pHdr->nBacktrace ){ - fflush(out); - pBt = (void**)pHdr; - pBt -= pHdr->nBacktraceSlots; - backtrace_symbols_fd(pBt, pHdr->nBacktrace, fileno(out)); - fprintf(out, "\n"); - } - } - fprintf(out, "COUNTS:\n"); - for(i=0; i=1 ); - size = mem3.aPool[i-1].u.hdr.size4x/4; - assert( size==mem3.aPool[i+size-1].u.hdr.prevSize ); - assert( size>=2 ); - if( size <= MX_SMALL ){ - memsys3UnlinkFromList(i, &mem3.aiSmall[size-2]); - }else{ - hash = size % N_HASH; - memsys3UnlinkFromList(i, &mem3.aiHash[hash]); - } -} - -/* -** Link the chunk at mem3.aPool[i] so that is on the list rooted -** at *pRoot. -*/ -static void memsys3LinkIntoList(u32 i, u32 *pRoot){ - assert( sqlite3_mutex_held(mem3.mutex) ); - mem3.aPool[i].u.list.next = *pRoot; - mem3.aPool[i].u.list.prev = 0; - if( *pRoot ){ - mem3.aPool[*pRoot].u.list.prev = i; - } - *pRoot = i; -} - -/* -** Link the chunk at index i into either the appropriate -** small chunk list, or into the large chunk hash table. -*/ -static void memsys3Link(u32 i){ - u32 size, hash; - assert( sqlite3_mutex_held(mem3.mutex) ); - assert( i>=1 ); - assert( (mem3.aPool[i-1].u.hdr.size4x & 1)==0 ); - size = mem3.aPool[i-1].u.hdr.size4x/4; - assert( size==mem3.aPool[i+size-1].u.hdr.prevSize ); - assert( size>=2 ); - if( size <= MX_SMALL ){ - memsys3LinkIntoList(i, &mem3.aiSmall[size-2]); - }else{ - hash = size % N_HASH; - memsys3LinkIntoList(i, &mem3.aiHash[hash]); - } -} - -/* -** If the STATIC_MEM mutex is not already held, obtain it now. The mutex -** will already be held (obtained by code in malloc.c) if -** sqlite3GlobalConfig.bMemStat is true. -*/ -static void memsys3Enter(void){ - if( sqlite3GlobalConfig.bMemstat==0 && mem3.mutex==0 ){ - mem3.mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MEM); - } - sqlite3_mutex_enter(mem3.mutex); -} -static void memsys3Leave(void){ - sqlite3_mutex_leave(mem3.mutex); -} - -/* -** Called when we are unable to satisfy an allocation of nBytes. -*/ -static void memsys3OutOfMemory(int nByte){ - if( !mem3.alarmBusy ){ - mem3.alarmBusy = 1; - assert( sqlite3_mutex_held(mem3.mutex) ); - sqlite3_mutex_leave(mem3.mutex); - sqlite3_release_memory(nByte); - sqlite3_mutex_enter(mem3.mutex); - mem3.alarmBusy = 0; - } -} - - -/* -** Chunk i is a free chunk that has been unlinked. Adjust its -** size parameters for check-out and return a pointer to the -** user portion of the chunk. -*/ -static void *memsys3Checkout(u32 i, u32 nBlock){ - u32 x; - assert( sqlite3_mutex_held(mem3.mutex) ); - assert( i>=1 ); - assert( mem3.aPool[i-1].u.hdr.size4x/4==nBlock ); - assert( mem3.aPool[i+nBlock-1].u.hdr.prevSize==nBlock ); - x = mem3.aPool[i-1].u.hdr.size4x; - mem3.aPool[i-1].u.hdr.size4x = nBlock*4 | 1 | (x&2); - mem3.aPool[i+nBlock-1].u.hdr.prevSize = nBlock; - mem3.aPool[i+nBlock-1].u.hdr.size4x |= 2; - return &mem3.aPool[i]; -} - -/* -** Carve a piece off of the end of the mem3.iMaster free chunk. -** Return a pointer to the new allocation. Or, if the master chunk -** is not large enough, return 0. -*/ -static void *memsys3FromMaster(u32 nBlock){ - assert( sqlite3_mutex_held(mem3.mutex) ); - assert( mem3.szMaster>=nBlock ); - if( nBlock>=mem3.szMaster-1 ){ - /* Use the entire master */ - void *p = memsys3Checkout(mem3.iMaster, mem3.szMaster); - mem3.iMaster = 0; - mem3.szMaster = 0; - mem3.mnMaster = 0; - return p; - }else{ - /* Split the master block. Return the tail. */ - u32 newi, x; - newi = mem3.iMaster + mem3.szMaster - nBlock; - assert( newi > mem3.iMaster+1 ); - mem3.aPool[mem3.iMaster+mem3.szMaster-1].u.hdr.prevSize = nBlock; - mem3.aPool[mem3.iMaster+mem3.szMaster-1].u.hdr.size4x |= 2; - mem3.aPool[newi-1].u.hdr.size4x = nBlock*4 + 1; - mem3.szMaster -= nBlock; - mem3.aPool[newi-1].u.hdr.prevSize = mem3.szMaster; - x = mem3.aPool[mem3.iMaster-1].u.hdr.size4x & 2; - mem3.aPool[mem3.iMaster-1].u.hdr.size4x = mem3.szMaster*4 | x; - if( mem3.szMaster < mem3.mnMaster ){ - mem3.mnMaster = mem3.szMaster; - } - return (void*)&mem3.aPool[newi]; - } -} - -/* -** *pRoot is the head of a list of free chunks of the same size -** or same size hash. In other words, *pRoot is an entry in either -** mem3.aiSmall[] or mem3.aiHash[]. -** -** This routine examines all entries on the given list and tries -** to coalesce each entries with adjacent free chunks. -** -** If it sees a chunk that is larger than mem3.iMaster, it replaces -** the current mem3.iMaster with the new larger chunk. In order for -** this mem3.iMaster replacement to work, the master chunk must be -** linked into the hash tables. That is not the normal state of -** affairs, of course. The calling routine must link the master -** chunk before invoking this routine, then must unlink the (possibly -** changed) master chunk once this routine has finished. -*/ -static void memsys3Merge(u32 *pRoot){ - u32 iNext, prev, size, i, x; - - assert( sqlite3_mutex_held(mem3.mutex) ); - for(i=*pRoot; i>0; i=iNext){ - iNext = mem3.aPool[i].u.list.next; - size = mem3.aPool[i-1].u.hdr.size4x; - assert( (size&1)==0 ); - if( (size&2)==0 ){ - memsys3UnlinkFromList(i, pRoot); - assert( i > mem3.aPool[i-1].u.hdr.prevSize ); - prev = i - mem3.aPool[i-1].u.hdr.prevSize; - if( prev==iNext ){ - iNext = mem3.aPool[prev].u.list.next; - } - memsys3Unlink(prev); - size = i + size/4 - prev; - x = mem3.aPool[prev-1].u.hdr.size4x & 2; - mem3.aPool[prev-1].u.hdr.size4x = size*4 | x; - mem3.aPool[prev+size-1].u.hdr.prevSize = size; - memsys3Link(prev); - i = prev; - }else{ - size /= 4; - } - if( size>mem3.szMaster ){ - mem3.iMaster = i; - mem3.szMaster = size; - } - } -} - -/* -** Return a block of memory of at least nBytes in size. -** Return NULL if unable. -** -** This function assumes that the necessary mutexes, if any, are -** already held by the caller. Hence "Unsafe". -*/ -static void *memsys3MallocUnsafe(int nByte){ - u32 i; - u32 nBlock; - u32 toFree; - - assert( sqlite3_mutex_held(mem3.mutex) ); - assert( sizeof(Mem3Block)==8 ); - if( nByte<=12 ){ - nBlock = 2; - }else{ - nBlock = (nByte + 11)/8; - } - assert( nBlock>=2 ); - - /* STEP 1: - ** Look for an entry of the correct size in either the small - ** chunk table or in the large chunk hash table. This is - ** successful most of the time (about 9 times out of 10). - */ - if( nBlock <= MX_SMALL ){ - i = mem3.aiSmall[nBlock-2]; - if( i>0 ){ - memsys3UnlinkFromList(i, &mem3.aiSmall[nBlock-2]); - return memsys3Checkout(i, nBlock); - } - }else{ - int hash = nBlock % N_HASH; - for(i=mem3.aiHash[hash]; i>0; i=mem3.aPool[i].u.list.next){ - if( mem3.aPool[i-1].u.hdr.size4x/4==nBlock ){ - memsys3UnlinkFromList(i, &mem3.aiHash[hash]); - return memsys3Checkout(i, nBlock); - } - } - } - - /* STEP 2: - ** Try to satisfy the allocation by carving a piece off of the end - ** of the master chunk. This step usually works if step 1 fails. - */ - if( mem3.szMaster>=nBlock ){ - return memsys3FromMaster(nBlock); - } - - - /* STEP 3: - ** Loop through the entire memory pool. Coalesce adjacent free - ** chunks. Recompute the master chunk as the largest free chunk. - ** Then try again to satisfy the allocation by carving a piece off - ** of the end of the master chunk. This step happens very - ** rarely (we hope!) - */ - for(toFree=nBlock*16; toFree<(mem3.nPool*16); toFree *= 2){ - memsys3OutOfMemory(toFree); - if( mem3.iMaster ){ - memsys3Link(mem3.iMaster); - mem3.iMaster = 0; - mem3.szMaster = 0; - } - for(i=0; i=nBlock ){ - return memsys3FromMaster(nBlock); - } - } - } - - /* If none of the above worked, then we fail. */ - return 0; -} - -/* -** Free an outstanding memory allocation. -** -** This function assumes that the necessary mutexes, if any, are -** already held by the caller. Hence "Unsafe". -*/ -static void memsys3FreeUnsafe(void *pOld){ - Mem3Block *p = (Mem3Block*)pOld; - int i; - u32 size, x; - assert( sqlite3_mutex_held(mem3.mutex) ); - assert( p>mem3.aPool && p<&mem3.aPool[mem3.nPool] ); - i = p - mem3.aPool; - assert( (mem3.aPool[i-1].u.hdr.size4x&1)==1 ); - size = mem3.aPool[i-1].u.hdr.size4x/4; - assert( i+size<=mem3.nPool+1 ); - mem3.aPool[i-1].u.hdr.size4x &= ~1; - mem3.aPool[i+size-1].u.hdr.prevSize = size; - mem3.aPool[i+size-1].u.hdr.size4x &= ~2; - memsys3Link(i); - - /* Try to expand the master using the newly freed chunk */ - if( mem3.iMaster ){ - while( (mem3.aPool[mem3.iMaster-1].u.hdr.size4x&2)==0 ){ - size = mem3.aPool[mem3.iMaster-1].u.hdr.prevSize; - mem3.iMaster -= size; - mem3.szMaster += size; - memsys3Unlink(mem3.iMaster); - x = mem3.aPool[mem3.iMaster-1].u.hdr.size4x & 2; - mem3.aPool[mem3.iMaster-1].u.hdr.size4x = mem3.szMaster*4 | x; - mem3.aPool[mem3.iMaster+mem3.szMaster-1].u.hdr.prevSize = mem3.szMaster; - } - x = mem3.aPool[mem3.iMaster-1].u.hdr.size4x & 2; - while( (mem3.aPool[mem3.iMaster+mem3.szMaster-1].u.hdr.size4x&1)==0 ){ - memsys3Unlink(mem3.iMaster+mem3.szMaster); - mem3.szMaster += mem3.aPool[mem3.iMaster+mem3.szMaster-1].u.hdr.size4x/4; - mem3.aPool[mem3.iMaster-1].u.hdr.size4x = mem3.szMaster*4 | x; - mem3.aPool[mem3.iMaster+mem3.szMaster-1].u.hdr.prevSize = mem3.szMaster; - } - } -} - -/* -** Return the size of an outstanding allocation, in bytes. The -** size returned omits the 8-byte header overhead. This only -** works for chunks that are currently checked out. -*/ -static int memsys3Size(void *p){ - Mem3Block *pBlock; - if( p==0 ) return 0; - pBlock = (Mem3Block*)p; - assert( (pBlock[-1].u.hdr.size4x&1)!=0 ); - return (pBlock[-1].u.hdr.size4x&~3)*2 - 4; -} - -/* -** Round up a request size to the next valid allocation size. -*/ -static int memsys3Roundup(int n){ - if( n<=12 ){ - return 12; - }else{ - return ((n+11)&~7) - 4; - } -} - -/* -** Allocate nBytes of memory. -*/ -static void *memsys3Malloc(int nBytes){ - sqlite3_int64 *p; - assert( nBytes>0 ); /* malloc.c filters out 0 byte requests */ - memsys3Enter(); - p = memsys3MallocUnsafe(nBytes); - memsys3Leave(); - return (void*)p; -} - -/* -** Free memory. -*/ -static void memsys3Free(void *pPrior){ - assert( pPrior ); - memsys3Enter(); - memsys3FreeUnsafe(pPrior); - memsys3Leave(); -} - -/* -** Change the size of an existing memory allocation -*/ -static void *memsys3Realloc(void *pPrior, int nBytes){ - int nOld; - void *p; - if( pPrior==0 ){ - return sqlite3_malloc(nBytes); - } - if( nBytes<=0 ){ - sqlite3_free(pPrior); - return 0; - } - nOld = memsys3Size(pPrior); - if( nBytes<=nOld && nBytes>=nOld-128 ){ - return pPrior; - } - memsys3Enter(); - p = memsys3MallocUnsafe(nBytes); - if( p ){ - if( nOld>1)!=(size&1) ){ - fprintf(out, "%p tail checkout bit is incorrect\n", &mem3.aPool[i]); - assert( 0 ); - break; - } - if( size&1 ){ - fprintf(out, "%p %6d bytes checked out\n", &mem3.aPool[i], (size/4)*8-8); - }else{ - fprintf(out, "%p %6d bytes free%s\n", &mem3.aPool[i], (size/4)*8-8, - i==mem3.iMaster ? " **master**" : ""); - } - } - for(i=0; i0; j=mem3.aPool[j].u.list.next){ - fprintf(out, " %p(%d)", &mem3.aPool[j], - (mem3.aPool[j-1].u.hdr.size4x/4)*8-8); - } - fprintf(out, "\n"); - } - for(i=0; i0; j=mem3.aPool[j].u.list.next){ - fprintf(out, " %p(%d)", &mem3.aPool[j], - (mem3.aPool[j-1].u.hdr.size4x/4)*8-8); - } - fprintf(out, "\n"); - } - fprintf(out, "master=%d\n", mem3.iMaster); - fprintf(out, "nowUsed=%d\n", mem3.nPool*8 - mem3.szMaster*8); - fprintf(out, "mxUsed=%d\n", mem3.nPool*8 - mem3.mnMaster*8); - sqlite3_mutex_leave(mem3.mutex); - if( out==stdout ){ - fflush(stdout); - }else{ - fclose(out); - } -#else - UNUSED_PARAMETER(zFilename); -#endif -} - -/* -** This routine is the only routine in this file with external -** linkage. -** -** Populate the low-level memory allocation function pointers in -** sqlite3GlobalConfig.m with pointers to the routines in this file. The -** arguments specify the block of memory to manage. -** -** This routine is only called by sqlite3_config(), and therefore -** is not required to be threadsafe (it is not). -*/ -SQLITE_PRIVATE const sqlite3_mem_methods *sqlite3MemGetMemsys3(void){ - static const sqlite3_mem_methods mempoolMethods = { - memsys3Malloc, - memsys3Free, - memsys3Realloc, - memsys3Size, - memsys3Roundup, - memsys3Init, - memsys3Shutdown, - 0 - }; - return &mempoolMethods; -} - -#endif /* SQLITE_ENABLE_MEMSYS3 */ - -/************** End of mem3.c ************************************************/ -/************** Begin file mem5.c ********************************************/ -/* -** 2007 October 14 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** This file contains the C functions that implement a memory -** allocation subsystem for use by SQLite. -** -** This version of the memory allocation subsystem omits all -** use of malloc(). The application gives SQLite a block of memory -** before calling sqlite3_initialize() from which allocations -** are made and returned by the xMalloc() and xRealloc() -** implementations. Once sqlite3_initialize() has been called, -** the amount of memory available to SQLite is fixed and cannot -** be changed. -** -** This version of the memory allocation subsystem is included -** in the build only if SQLITE_ENABLE_MEMSYS5 is defined. -** -** This memory allocator uses the following algorithm: -** -** 1. All memory allocations sizes are rounded up to a power of 2. -** -** 2. If two adjacent free blocks are the halves of a larger block, -** then the two blocks are coalesed into the single larger block. -** -** 3. New memory is allocated from the first available free block. -** -** This algorithm is described in: J. M. Robson. "Bounds for Some Functions -** Concerning Dynamic Storage Allocation". Journal of the Association for -** Computing Machinery, Volume 21, Number 8, July 1974, pages 491-499. -** -** Let n be the size of the largest allocation divided by the minimum -** allocation size (after rounding all sizes up to a power of 2.) Let M -** be the maximum amount of memory ever outstanding at one time. Let -** N be the total amount of memory available for allocation. Robson -** proved that this memory allocator will never breakdown due to -** fragmentation as long as the following constraint holds: -** -** N >= M*(1 + log2(n)/2) - n + 1 -** -** The sqlite3_status() logic tracks the maximum values of n and M so -** that an application can, at any time, verify this constraint. -*/ - -/* -** This version of the memory allocator is used only when -** SQLITE_ENABLE_MEMSYS5 is defined. -*/ -#ifdef SQLITE_ENABLE_MEMSYS5 - -/* -** A minimum allocation is an instance of the following structure. -** Larger allocations are an array of these structures where the -** size of the array is a power of 2. -** -** The size of this object must be a power of two. That fact is -** verified in memsys5Init(). -*/ -typedef struct Mem5Link Mem5Link; -struct Mem5Link { - int next; /* Index of next free chunk */ - int prev; /* Index of previous free chunk */ -}; - -/* -** Maximum size of any allocation is ((1<=0 && i=0 && iLogsize<=LOGMAX ); - assert( (mem5.aCtrl[i] & CTRL_LOGSIZE)==iLogsize ); - - next = MEM5LINK(i)->next; - prev = MEM5LINK(i)->prev; - if( prev<0 ){ - mem5.aiFreelist[iLogsize] = next; - }else{ - MEM5LINK(prev)->next = next; - } - if( next>=0 ){ - MEM5LINK(next)->prev = prev; - } -} - -/* -** Link the chunk at mem5.aPool[i] so that is on the iLogsize -** free list. -*/ -static void memsys5Link(int i, int iLogsize){ - int x; - assert( sqlite3_mutex_held(mem5.mutex) ); - assert( i>=0 && i=0 && iLogsize<=LOGMAX ); - assert( (mem5.aCtrl[i] & CTRL_LOGSIZE)==iLogsize ); - - x = MEM5LINK(i)->next = mem5.aiFreelist[iLogsize]; - MEM5LINK(i)->prev = -1; - if( x>=0 ){ - assert( xprev = i; - } - mem5.aiFreelist[iLogsize] = i; -} - -/* -** If the STATIC_MEM mutex is not already held, obtain it now. The mutex -** will already be held (obtained by code in malloc.c) if -** sqlite3GlobalConfig.bMemStat is true. -*/ -static void memsys5Enter(void){ - sqlite3_mutex_enter(mem5.mutex); -} -static void memsys5Leave(void){ - sqlite3_mutex_leave(mem5.mutex); -} - -/* -** Return the size of an outstanding allocation, in bytes. The -** size returned omits the 8-byte header overhead. This only -** works for chunks that are currently checked out. -*/ -static int memsys5Size(void *p){ - int iSize = 0; - if( p ){ - int i = (int)(((u8 *)p-mem5.zPool)/mem5.szAtom); - assert( i>=0 && i0 ); - - /* Keep track of the maximum allocation request. Even unfulfilled - ** requests are counted */ - if( (u32)nByte>mem5.maxRequest ){ - mem5.maxRequest = nByte; - } - - /* Abort if the requested allocation size is larger than the largest - ** power of two that we can represent using 32-bit signed integers. - */ - if( nByte > 0x40000000 ){ - return 0; - } - - /* Round nByte up to the next valid power of two */ - for(iFullSz=mem5.szAtom, iLogsize=0; iFullSzLOGMAX ){ - testcase( sqlite3GlobalConfig.xLog!=0 ); - sqlite3_log(SQLITE_NOMEM, "failed to allocate %u bytes", nByte); - return 0; - } - i = mem5.aiFreelist[iBin]; - memsys5Unlink(i, iBin); - while( iBin>iLogsize ){ - int newSize; - - iBin--; - newSize = 1 << iBin; - mem5.aCtrl[i+newSize] = CTRL_FREE | iBin; - memsys5Link(i+newSize, iBin); - } - mem5.aCtrl[i] = iLogsize; - - /* Update allocator performance statistics. */ - mem5.nAlloc++; - mem5.totalAlloc += iFullSz; - mem5.totalExcess += iFullSz - nByte; - mem5.currentCount++; - mem5.currentOut += iFullSz; - if( mem5.maxCount=0 && iBlock0 ); - assert( mem5.currentOut>=(size*mem5.szAtom) ); - mem5.currentCount--; - mem5.currentOut -= size*mem5.szAtom; - assert( mem5.currentOut>0 || mem5.currentCount==0 ); - assert( mem5.currentCount>0 || mem5.currentOut==0 ); - - mem5.aCtrl[iBlock] = CTRL_FREE | iLogsize; - while( ALWAYS(iLogsize>iLogsize) & 1 ){ - iBuddy = iBlock - size; - }else{ - iBuddy = iBlock + size; - } - assert( iBuddy>=0 ); - if( (iBuddy+(1<mem5.nBlock ) break; - if( mem5.aCtrl[iBuddy]!=(CTRL_FREE | iLogsize) ) break; - memsys5Unlink(iBuddy, iLogsize); - iLogsize++; - if( iBuddy0 ){ - memsys5Enter(); - p = memsys5MallocUnsafe(nBytes); - memsys5Leave(); - } - return (void*)p; -} - -/* -** Free memory. -** -** The outer layer memory allocator prevents this routine from -** being called with pPrior==0. -*/ -static void memsys5Free(void *pPrior){ - assert( pPrior!=0 ); - memsys5Enter(); - memsys5FreeUnsafe(pPrior); - memsys5Leave(); -} - -/* -** Change the size of an existing memory allocation. -** -** The outer layer memory allocator prevents this routine from -** being called with pPrior==0. -** -** nBytes is always a value obtained from a prior call to -** memsys5Round(). Hence nBytes is always a non-negative power -** of two. If nBytes==0 that means that an oversize allocation -** (an allocation larger than 0x40000000) was requested and this -** routine should return 0 without freeing pPrior. -*/ -static void *memsys5Realloc(void *pPrior, int nBytes){ - int nOld; - void *p; - assert( pPrior!=0 ); - assert( (nBytes&(nBytes-1))==0 ); /* EV: R-46199-30249 */ - assert( nBytes>=0 ); - if( nBytes==0 ){ - return 0; - } - nOld = memsys5Size(pPrior); - if( nBytes<=nOld ){ - return pPrior; - } - memsys5Enter(); - p = memsys5MallocUnsafe(nBytes); - if( p ){ - memcpy(p, pPrior, nOld); - memsys5FreeUnsafe(pPrior); - } - memsys5Leave(); - return p; -} - -/* -** Round up a request size to the next valid allocation size. If -** the allocation is too large to be handled by this allocation system, -** return 0. -** -** All allocations must be a power of two and must be expressed by a -** 32-bit signed integer. Hence the largest allocation is 0x40000000 -** or 1073741824 bytes. -*/ -static int memsys5Roundup(int n){ - int iFullSz; - if( n > 0x40000000 ) return 0; - for(iFullSz=mem5.szAtom; iFullSz 0 -** memsys5Log(2) -> 1 -** memsys5Log(4) -> 2 -** memsys5Log(5) -> 3 -** memsys5Log(8) -> 3 -** memsys5Log(9) -> 4 -*/ -static int memsys5Log(int iValue){ - int iLog; - for(iLog=0; (iLog<(int)((sizeof(int)*8)-1)) && (1<mem5.szAtom ){ - mem5.szAtom = mem5.szAtom << 1; - } - - mem5.nBlock = (nByte / (mem5.szAtom+sizeof(u8))); - mem5.zPool = zByte; - mem5.aCtrl = (u8 *)&mem5.zPool[mem5.nBlock*mem5.szAtom]; - - for(ii=0; ii<=LOGMAX; ii++){ - mem5.aiFreelist[ii] = -1; - } - - iOffset = 0; - for(ii=LOGMAX; ii>=0; ii--){ - int nAlloc = (1<mem5.nBlock); - } - - /* If a mutex is required for normal operation, allocate one */ - if( sqlite3GlobalConfig.bMemstat==0 ){ - mem5.mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MEM); - } - - return SQLITE_OK; -} - -/* -** Deinitialize this module. -*/ -static void memsys5Shutdown(void *NotUsed){ - UNUSED_PARAMETER(NotUsed); - mem5.mutex = 0; - return; -} - -#ifdef SQLITE_TEST -/* -** Open the file indicated and write a log of all unfreed memory -** allocations into that log. -*/ -SQLITE_PRIVATE void sqlite3Memsys5Dump(const char *zFilename){ - FILE *out; - int i, j, n; - int nMinLog; - - if( zFilename==0 || zFilename[0]==0 ){ - out = stdout; - }else{ - out = fopen(zFilename, "w"); - if( out==0 ){ - fprintf(stderr, "** Unable to output memory debug output log: %s **\n", - zFilename); - return; - } - } - memsys5Enter(); - nMinLog = memsys5Log(mem5.szAtom); - for(i=0; i<=LOGMAX && i+nMinLog<32; i++){ - for(n=0, j=mem5.aiFreelist[i]; j>=0; j = MEM5LINK(j)->next, n++){} - fprintf(out, "freelist items of size %d: %d\n", mem5.szAtom << i, n); - } - fprintf(out, "mem5.nAlloc = %llu\n", mem5.nAlloc); - fprintf(out, "mem5.totalAlloc = %llu\n", mem5.totalAlloc); - fprintf(out, "mem5.totalExcess = %llu\n", mem5.totalExcess); - fprintf(out, "mem5.currentOut = %u\n", mem5.currentOut); - fprintf(out, "mem5.currentCount = %u\n", mem5.currentCount); - fprintf(out, "mem5.maxOut = %u\n", mem5.maxOut); - fprintf(out, "mem5.maxCount = %u\n", mem5.maxCount); - fprintf(out, "mem5.maxRequest = %u\n", mem5.maxRequest); - memsys5Leave(); - if( out==stdout ){ - fflush(stdout); - }else{ - fclose(out); - } -} -#endif - -/* -** This routine is the only routine in this file with external -** linkage. It returns a pointer to a static sqlite3_mem_methods -** struct populated with the memsys5 methods. -*/ -SQLITE_PRIVATE const sqlite3_mem_methods *sqlite3MemGetMemsys5(void){ - static const sqlite3_mem_methods memsys5Methods = { - memsys5Malloc, - memsys5Free, - memsys5Realloc, - memsys5Size, - memsys5Roundup, - memsys5Init, - memsys5Shutdown, - 0 - }; - return &memsys5Methods; -} - -#endif /* SQLITE_ENABLE_MEMSYS5 */ - -/************** End of mem5.c ************************************************/ -/************** Begin file mutex.c *******************************************/ -/* -** 2007 August 14 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** This file contains the C functions that implement mutexes. -** -** This file contains code that is common across all mutex implementations. -*/ - -#if defined(SQLITE_DEBUG) && !defined(SQLITE_MUTEX_OMIT) -/* -** For debugging purposes, record when the mutex subsystem is initialized -** and uninitialized so that we can assert() if there is an attempt to -** allocate a mutex while the system is uninitialized. -*/ -static SQLITE_WSD int mutexIsInit = 0; -#endif /* SQLITE_DEBUG */ - - -#ifndef SQLITE_MUTEX_OMIT -/* -** Initialize the mutex system. -*/ -SQLITE_PRIVATE int sqlite3MutexInit(void){ - int rc = SQLITE_OK; - if( !sqlite3GlobalConfig.mutex.xMutexAlloc ){ - /* If the xMutexAlloc method has not been set, then the user did not - ** install a mutex implementation via sqlite3_config() prior to - ** sqlite3_initialize() being called. This block copies pointers to - ** the default implementation into the sqlite3GlobalConfig structure. - */ - sqlite3_mutex_methods const *pFrom; - sqlite3_mutex_methods *pTo = &sqlite3GlobalConfig.mutex; - - if( sqlite3GlobalConfig.bCoreMutex ){ - pFrom = sqlite3DefaultMutex(); - }else{ - pFrom = sqlite3NoopMutex(); - } - memcpy(pTo, pFrom, offsetof(sqlite3_mutex_methods, xMutexAlloc)); - memcpy(&pTo->xMutexFree, &pFrom->xMutexFree, - sizeof(*pTo) - offsetof(sqlite3_mutex_methods, xMutexFree)); - pTo->xMutexAlloc = pFrom->xMutexAlloc; - } - rc = sqlite3GlobalConfig.mutex.xMutexInit(); - -#ifdef SQLITE_DEBUG - GLOBAL(int, mutexIsInit) = 1; -#endif - - return rc; -} - -/* -** Shutdown the mutex system. This call frees resources allocated by -** sqlite3MutexInit(). -*/ -SQLITE_PRIVATE int sqlite3MutexEnd(void){ - int rc = SQLITE_OK; - if( sqlite3GlobalConfig.mutex.xMutexEnd ){ - rc = sqlite3GlobalConfig.mutex.xMutexEnd(); - } - -#ifdef SQLITE_DEBUG - GLOBAL(int, mutexIsInit) = 0; -#endif - - return rc; -} - -/* -** Retrieve a pointer to a static mutex or allocate a new dynamic one. -*/ -SQLITE_API sqlite3_mutex *sqlite3_mutex_alloc(int id){ -#ifndef SQLITE_OMIT_AUTOINIT - if( sqlite3_initialize() ) return 0; -#endif - return sqlite3GlobalConfig.mutex.xMutexAlloc(id); -} - -SQLITE_PRIVATE sqlite3_mutex *sqlite3MutexAlloc(int id){ - if( !sqlite3GlobalConfig.bCoreMutex ){ - return 0; - } - assert( GLOBAL(int, mutexIsInit) ); - return sqlite3GlobalConfig.mutex.xMutexAlloc(id); -} - -/* -** Free a dynamic mutex. -*/ -SQLITE_API void sqlite3_mutex_free(sqlite3_mutex *p){ - if( p ){ - sqlite3GlobalConfig.mutex.xMutexFree(p); - } -} - -/* -** Obtain the mutex p. If some other thread already has the mutex, block -** until it can be obtained. -*/ -SQLITE_API void sqlite3_mutex_enter(sqlite3_mutex *p){ - if( p ){ - sqlite3GlobalConfig.mutex.xMutexEnter(p); - } -} - -/* -** Obtain the mutex p. If successful, return SQLITE_OK. Otherwise, if another -** thread holds the mutex and it cannot be obtained, return SQLITE_BUSY. -*/ -SQLITE_API int sqlite3_mutex_try(sqlite3_mutex *p){ - int rc = SQLITE_OK; - if( p ){ - return sqlite3GlobalConfig.mutex.xMutexTry(p); - } - return rc; -} - -/* -** The sqlite3_mutex_leave() routine exits a mutex that was previously -** entered by the same thread. The behavior is undefined if the mutex -** is not currently entered. If a NULL pointer is passed as an argument -** this function is a no-op. -*/ -SQLITE_API void sqlite3_mutex_leave(sqlite3_mutex *p){ - if( p ){ - sqlite3GlobalConfig.mutex.xMutexLeave(p); - } -} - -#ifndef NDEBUG -/* -** The sqlite3_mutex_held() and sqlite3_mutex_notheld() routine are -** intended for use inside assert() statements. -*/ -SQLITE_API int sqlite3_mutex_held(sqlite3_mutex *p){ - return p==0 || sqlite3GlobalConfig.mutex.xMutexHeld(p); -} -SQLITE_API int sqlite3_mutex_notheld(sqlite3_mutex *p){ - return p==0 || sqlite3GlobalConfig.mutex.xMutexNotheld(p); -} -#endif - -#endif /* !defined(SQLITE_MUTEX_OMIT) */ - -/************** End of mutex.c ***********************************************/ -/************** Begin file mutex_noop.c **************************************/ -/* -** 2008 October 07 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** This file contains the C functions that implement mutexes. -** -** This implementation in this file does not provide any mutual -** exclusion and is thus suitable for use only in applications -** that use SQLite in a single thread. The routines defined -** here are place-holders. Applications can substitute working -** mutex routines at start-time using the -** -** sqlite3_config(SQLITE_CONFIG_MUTEX,...) -** -** interface. -** -** If compiled with SQLITE_DEBUG, then additional logic is inserted -** that does error checking on mutexes to make sure they are being -** called correctly. -*/ - -#ifndef SQLITE_MUTEX_OMIT - -#ifndef SQLITE_DEBUG -/* -** Stub routines for all mutex methods. -** -** This routines provide no mutual exclusion or error checking. -*/ -static int noopMutexInit(void){ return SQLITE_OK; } -static int noopMutexEnd(void){ return SQLITE_OK; } -static sqlite3_mutex *noopMutexAlloc(int id){ - UNUSED_PARAMETER(id); - return (sqlite3_mutex*)8; -} -static void noopMutexFree(sqlite3_mutex *p){ UNUSED_PARAMETER(p); return; } -static void noopMutexEnter(sqlite3_mutex *p){ UNUSED_PARAMETER(p); return; } -static int noopMutexTry(sqlite3_mutex *p){ - UNUSED_PARAMETER(p); - return SQLITE_OK; -} -static void noopMutexLeave(sqlite3_mutex *p){ UNUSED_PARAMETER(p); return; } - -SQLITE_PRIVATE sqlite3_mutex_methods const *sqlite3NoopMutex(void){ - static const sqlite3_mutex_methods sMutex = { - noopMutexInit, - noopMutexEnd, - noopMutexAlloc, - noopMutexFree, - noopMutexEnter, - noopMutexTry, - noopMutexLeave, - - 0, - 0, - }; - - return &sMutex; -} -#endif /* !SQLITE_DEBUG */ - -#ifdef SQLITE_DEBUG -/* -** In this implementation, error checking is provided for testing -** and debugging purposes. The mutexes still do not provide any -** mutual exclusion. -*/ - -/* -** The mutex object -*/ -typedef struct sqlite3_debug_mutex { - int id; /* The mutex type */ - int cnt; /* Number of entries without a matching leave */ -} sqlite3_debug_mutex; - -/* -** The sqlite3_mutex_held() and sqlite3_mutex_notheld() routine are -** intended for use inside assert() statements. -*/ -static int debugMutexHeld(sqlite3_mutex *pX){ - sqlite3_debug_mutex *p = (sqlite3_debug_mutex*)pX; - return p==0 || p->cnt>0; -} -static int debugMutexNotheld(sqlite3_mutex *pX){ - sqlite3_debug_mutex *p = (sqlite3_debug_mutex*)pX; - return p==0 || p->cnt==0; -} - -/* -** Initialize and deinitialize the mutex subsystem. -*/ -static int debugMutexInit(void){ return SQLITE_OK; } -static int debugMutexEnd(void){ return SQLITE_OK; } - -/* -** The sqlite3_mutex_alloc() routine allocates a new -** mutex and returns a pointer to it. If it returns NULL -** that means that a mutex could not be allocated. -*/ -static sqlite3_mutex *debugMutexAlloc(int id){ - static sqlite3_debug_mutex aStatic[6]; - sqlite3_debug_mutex *pNew = 0; - switch( id ){ - case SQLITE_MUTEX_FAST: - case SQLITE_MUTEX_RECURSIVE: { - pNew = sqlite3Malloc(sizeof(*pNew)); - if( pNew ){ - pNew->id = id; - pNew->cnt = 0; - } - break; - } - default: { - assert( id-2 >= 0 ); - assert( id-2 < (int)(sizeof(aStatic)/sizeof(aStatic[0])) ); - pNew = &aStatic[id-2]; - pNew->id = id; - break; - } - } - return (sqlite3_mutex*)pNew; -} - -/* -** This routine deallocates a previously allocated mutex. -*/ -static void debugMutexFree(sqlite3_mutex *pX){ - sqlite3_debug_mutex *p = (sqlite3_debug_mutex*)pX; - assert( p->cnt==0 ); - assert( p->id==SQLITE_MUTEX_FAST || p->id==SQLITE_MUTEX_RECURSIVE ); - sqlite3_free(p); -} - -/* -** The sqlite3_mutex_enter() and sqlite3_mutex_try() routines attempt -** to enter a mutex. If another thread is already within the mutex, -** sqlite3_mutex_enter() will block and sqlite3_mutex_try() will return -** SQLITE_BUSY. The sqlite3_mutex_try() interface returns SQLITE_OK -** upon successful entry. Mutexes created using SQLITE_MUTEX_RECURSIVE can -** be entered multiple times by the same thread. In such cases the, -** mutex must be exited an equal number of times before another thread -** can enter. If the same thread tries to enter any other kind of mutex -** more than once, the behavior is undefined. -*/ -static void debugMutexEnter(sqlite3_mutex *pX){ - sqlite3_debug_mutex *p = (sqlite3_debug_mutex*)pX; - assert( p->id==SQLITE_MUTEX_RECURSIVE || debugMutexNotheld(pX) ); - p->cnt++; -} -static int debugMutexTry(sqlite3_mutex *pX){ - sqlite3_debug_mutex *p = (sqlite3_debug_mutex*)pX; - assert( p->id==SQLITE_MUTEX_RECURSIVE || debugMutexNotheld(pX) ); - p->cnt++; - return SQLITE_OK; -} - -/* -** The sqlite3_mutex_leave() routine exits a mutex that was -** previously entered by the same thread. The behavior -** is undefined if the mutex is not currently entered or -** is not currently allocated. SQLite will never do either. -*/ -static void debugMutexLeave(sqlite3_mutex *pX){ - sqlite3_debug_mutex *p = (sqlite3_debug_mutex*)pX; - assert( debugMutexHeld(pX) ); - p->cnt--; - assert( p->id==SQLITE_MUTEX_RECURSIVE || debugMutexNotheld(pX) ); -} - -SQLITE_PRIVATE sqlite3_mutex_methods const *sqlite3NoopMutex(void){ - static const sqlite3_mutex_methods sMutex = { - debugMutexInit, - debugMutexEnd, - debugMutexAlloc, - debugMutexFree, - debugMutexEnter, - debugMutexTry, - debugMutexLeave, - - debugMutexHeld, - debugMutexNotheld - }; - - return &sMutex; -} -#endif /* SQLITE_DEBUG */ - -/* -** If compiled with SQLITE_MUTEX_NOOP, then the no-op mutex implementation -** is used regardless of the run-time threadsafety setting. -*/ -#ifdef SQLITE_MUTEX_NOOP -SQLITE_PRIVATE sqlite3_mutex_methods const *sqlite3DefaultMutex(void){ - return sqlite3NoopMutex(); -} -#endif /* defined(SQLITE_MUTEX_NOOP) */ -#endif /* !defined(SQLITE_MUTEX_OMIT) */ - -/************** End of mutex_noop.c ******************************************/ -/************** Begin file mutex_unix.c **************************************/ -/* -** 2007 August 28 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** This file contains the C functions that implement mutexes for pthreads -*/ - -/* -** The code in this file is only used if we are compiling threadsafe -** under unix with pthreads. -** -** Note that this implementation requires a version of pthreads that -** supports recursive mutexes. -*/ -#ifdef SQLITE_MUTEX_PTHREADS - -#include - -/* -** The sqlite3_mutex.id, sqlite3_mutex.nRef, and sqlite3_mutex.owner fields -** are necessary under two condidtions: (1) Debug builds and (2) using -** home-grown mutexes. Encapsulate these conditions into a single #define. -*/ -#if defined(SQLITE_DEBUG) || defined(SQLITE_HOMEGROWN_RECURSIVE_MUTEX) -# define SQLITE_MUTEX_NREF 1 -#else -# define SQLITE_MUTEX_NREF 0 -#endif - -/* -** Each recursive mutex is an instance of the following structure. -*/ -struct sqlite3_mutex { - pthread_mutex_t mutex; /* Mutex controlling the lock */ -#if SQLITE_MUTEX_NREF - int id; /* Mutex type */ - volatile int nRef; /* Number of entrances */ - volatile pthread_t owner; /* Thread that is within this mutex */ - int trace; /* True to trace changes */ -#endif -}; -#if SQLITE_MUTEX_NREF -#define SQLITE3_MUTEX_INITIALIZER { PTHREAD_MUTEX_INITIALIZER, 0, 0, (pthread_t)0, 0 } -#else -#define SQLITE3_MUTEX_INITIALIZER { PTHREAD_MUTEX_INITIALIZER } -#endif - -/* -** The sqlite3_mutex_held() and sqlite3_mutex_notheld() routine are -** intended for use only inside assert() statements. On some platforms, -** there might be race conditions that can cause these routines to -** deliver incorrect results. In particular, if pthread_equal() is -** not an atomic operation, then these routines might delivery -** incorrect results. On most platforms, pthread_equal() is a -** comparison of two integers and is therefore atomic. But we are -** told that HPUX is not such a platform. If so, then these routines -** will not always work correctly on HPUX. -** -** On those platforms where pthread_equal() is not atomic, SQLite -** should be compiled without -DSQLITE_DEBUG and with -DNDEBUG to -** make sure no assert() statements are evaluated and hence these -** routines are never called. -*/ -#if !defined(NDEBUG) || defined(SQLITE_DEBUG) -static int pthreadMutexHeld(sqlite3_mutex *p){ - return (p->nRef!=0 && pthread_equal(p->owner, pthread_self())); -} -static int pthreadMutexNotheld(sqlite3_mutex *p){ - return p->nRef==0 || pthread_equal(p->owner, pthread_self())==0; -} -#endif - -/* -** Initialize and deinitialize the mutex subsystem. -*/ -static int pthreadMutexInit(void){ return SQLITE_OK; } -static int pthreadMutexEnd(void){ return SQLITE_OK; } - -/* -** The sqlite3_mutex_alloc() routine allocates a new -** mutex and returns a pointer to it. If it returns NULL -** that means that a mutex could not be allocated. SQLite -** will unwind its stack and return an error. The argument -** to sqlite3_mutex_alloc() is one of these integer constants: -** -**
    -**
  • SQLITE_MUTEX_FAST -**
  • SQLITE_MUTEX_RECURSIVE -**
  • SQLITE_MUTEX_STATIC_MASTER -**
  • SQLITE_MUTEX_STATIC_MEM -**
  • SQLITE_MUTEX_STATIC_MEM2 -**
  • SQLITE_MUTEX_STATIC_PRNG -**
  • SQLITE_MUTEX_STATIC_LRU -**
  • SQLITE_MUTEX_STATIC_PMEM -**
-** -** The first two constants cause sqlite3_mutex_alloc() to create -** a new mutex. The new mutex is recursive when SQLITE_MUTEX_RECURSIVE -** is used but not necessarily so when SQLITE_MUTEX_FAST is used. -** The mutex implementation does not need to make a distinction -** between SQLITE_MUTEX_RECURSIVE and SQLITE_MUTEX_FAST if it does -** not want to. But SQLite will only request a recursive mutex in -** cases where it really needs one. If a faster non-recursive mutex -** implementation is available on the host platform, the mutex subsystem -** might return such a mutex in response to SQLITE_MUTEX_FAST. -** -** The other allowed parameters to sqlite3_mutex_alloc() each return -** a pointer to a static preexisting mutex. Six static mutexes are -** used by the current version of SQLite. Future versions of SQLite -** may add additional static mutexes. Static mutexes are for internal -** use by SQLite only. Applications that use SQLite mutexes should -** use only the dynamic mutexes returned by SQLITE_MUTEX_FAST or -** SQLITE_MUTEX_RECURSIVE. -** -** Note that if one of the dynamic mutex parameters (SQLITE_MUTEX_FAST -** or SQLITE_MUTEX_RECURSIVE) is used then sqlite3_mutex_alloc() -** returns a different mutex on every call. But for the static -** mutex types, the same mutex is returned on every call that has -** the same type number. -*/ -static sqlite3_mutex *pthreadMutexAlloc(int iType){ - static sqlite3_mutex staticMutexes[] = { - SQLITE3_MUTEX_INITIALIZER, - SQLITE3_MUTEX_INITIALIZER, - SQLITE3_MUTEX_INITIALIZER, - SQLITE3_MUTEX_INITIALIZER, - SQLITE3_MUTEX_INITIALIZER, - SQLITE3_MUTEX_INITIALIZER - }; - sqlite3_mutex *p; - switch( iType ){ - case SQLITE_MUTEX_RECURSIVE: { - p = sqlite3MallocZero( sizeof(*p) ); - if( p ){ -#ifdef SQLITE_HOMEGROWN_RECURSIVE_MUTEX - /* If recursive mutexes are not available, we will have to - ** build our own. See below. */ - pthread_mutex_init(&p->mutex, 0); -#else - /* Use a recursive mutex if it is available */ - pthread_mutexattr_t recursiveAttr; - pthread_mutexattr_init(&recursiveAttr); - pthread_mutexattr_settype(&recursiveAttr, PTHREAD_MUTEX_RECURSIVE); - pthread_mutex_init(&p->mutex, &recursiveAttr); - pthread_mutexattr_destroy(&recursiveAttr); -#endif -#if SQLITE_MUTEX_NREF - p->id = iType; -#endif - } - break; - } - case SQLITE_MUTEX_FAST: { - p = sqlite3MallocZero( sizeof(*p) ); - if( p ){ -#if SQLITE_MUTEX_NREF - p->id = iType; -#endif - pthread_mutex_init(&p->mutex, 0); - } - break; - } - default: { - assert( iType-2 >= 0 ); - assert( iType-2 < ArraySize(staticMutexes) ); - p = &staticMutexes[iType-2]; -#if SQLITE_MUTEX_NREF - p->id = iType; -#endif - break; - } - } - return p; -} - - -/* -** This routine deallocates a previously -** allocated mutex. SQLite is careful to deallocate every -** mutex that it allocates. -*/ -static void pthreadMutexFree(sqlite3_mutex *p){ - assert( p->nRef==0 ); - assert( p->id==SQLITE_MUTEX_FAST || p->id==SQLITE_MUTEX_RECURSIVE ); - pthread_mutex_destroy(&p->mutex); - sqlite3_free(p); -} - -/* -** The sqlite3_mutex_enter() and sqlite3_mutex_try() routines attempt -** to enter a mutex. If another thread is already within the mutex, -** sqlite3_mutex_enter() will block and sqlite3_mutex_try() will return -** SQLITE_BUSY. The sqlite3_mutex_try() interface returns SQLITE_OK -** upon successful entry. Mutexes created using SQLITE_MUTEX_RECURSIVE can -** be entered multiple times by the same thread. In such cases the, -** mutex must be exited an equal number of times before another thread -** can enter. If the same thread tries to enter any other kind of mutex -** more than once, the behavior is undefined. -*/ -static void pthreadMutexEnter(sqlite3_mutex *p){ - assert( p->id==SQLITE_MUTEX_RECURSIVE || pthreadMutexNotheld(p) ); - -#ifdef SQLITE_HOMEGROWN_RECURSIVE_MUTEX - /* If recursive mutexes are not available, then we have to grow - ** our own. This implementation assumes that pthread_equal() - ** is atomic - that it cannot be deceived into thinking self - ** and p->owner are equal if p->owner changes between two values - ** that are not equal to self while the comparison is taking place. - ** This implementation also assumes a coherent cache - that - ** separate processes cannot read different values from the same - ** address at the same time. If either of these two conditions - ** are not met, then the mutexes will fail and problems will result. - */ - { - pthread_t self = pthread_self(); - if( p->nRef>0 && pthread_equal(p->owner, self) ){ - p->nRef++; - }else{ - pthread_mutex_lock(&p->mutex); - assert( p->nRef==0 ); - p->owner = self; - p->nRef = 1; - } - } -#else - /* Use the built-in recursive mutexes if they are available. - */ - pthread_mutex_lock(&p->mutex); -#if SQLITE_MUTEX_NREF - assert( p->nRef>0 || p->owner==0 ); - p->owner = pthread_self(); - p->nRef++; -#endif -#endif - -#ifdef SQLITE_DEBUG - if( p->trace ){ - printf("enter mutex %p (%d) with nRef=%d\n", p, p->trace, p->nRef); - } -#endif -} -static int pthreadMutexTry(sqlite3_mutex *p){ - int rc; - assert( p->id==SQLITE_MUTEX_RECURSIVE || pthreadMutexNotheld(p) ); - -#ifdef SQLITE_HOMEGROWN_RECURSIVE_MUTEX - /* If recursive mutexes are not available, then we have to grow - ** our own. This implementation assumes that pthread_equal() - ** is atomic - that it cannot be deceived into thinking self - ** and p->owner are equal if p->owner changes between two values - ** that are not equal to self while the comparison is taking place. - ** This implementation also assumes a coherent cache - that - ** separate processes cannot read different values from the same - ** address at the same time. If either of these two conditions - ** are not met, then the mutexes will fail and problems will result. - */ - { - pthread_t self = pthread_self(); - if( p->nRef>0 && pthread_equal(p->owner, self) ){ - p->nRef++; - rc = SQLITE_OK; - }else if( pthread_mutex_trylock(&p->mutex)==0 ){ - assert( p->nRef==0 ); - p->owner = self; - p->nRef = 1; - rc = SQLITE_OK; - }else{ - rc = SQLITE_BUSY; - } - } -#else - /* Use the built-in recursive mutexes if they are available. - */ - if( pthread_mutex_trylock(&p->mutex)==0 ){ -#if SQLITE_MUTEX_NREF - p->owner = pthread_self(); - p->nRef++; -#endif - rc = SQLITE_OK; - }else{ - rc = SQLITE_BUSY; - } -#endif - -#ifdef SQLITE_DEBUG - if( rc==SQLITE_OK && p->trace ){ - printf("enter mutex %p (%d) with nRef=%d\n", p, p->trace, p->nRef); - } -#endif - return rc; -} - -/* -** The sqlite3_mutex_leave() routine exits a mutex that was -** previously entered by the same thread. The behavior -** is undefined if the mutex is not currently entered or -** is not currently allocated. SQLite will never do either. -*/ -static void pthreadMutexLeave(sqlite3_mutex *p){ - assert( pthreadMutexHeld(p) ); -#if SQLITE_MUTEX_NREF - p->nRef--; - if( p->nRef==0 ) p->owner = 0; -#endif - assert( p->nRef==0 || p->id==SQLITE_MUTEX_RECURSIVE ); - -#ifdef SQLITE_HOMEGROWN_RECURSIVE_MUTEX - if( p->nRef==0 ){ - pthread_mutex_unlock(&p->mutex); - } -#else - pthread_mutex_unlock(&p->mutex); -#endif - -#ifdef SQLITE_DEBUG - if( p->trace ){ - printf("leave mutex %p (%d) with nRef=%d\n", p, p->trace, p->nRef); - } -#endif -} - -SQLITE_PRIVATE sqlite3_mutex_methods const *sqlite3DefaultMutex(void){ - static const sqlite3_mutex_methods sMutex = { - pthreadMutexInit, - pthreadMutexEnd, - pthreadMutexAlloc, - pthreadMutexFree, - pthreadMutexEnter, - pthreadMutexTry, - pthreadMutexLeave, -#ifdef SQLITE_DEBUG - pthreadMutexHeld, - pthreadMutexNotheld -#else - 0, - 0 -#endif - }; - - return &sMutex; -} - -#endif /* SQLITE_MUTEX_PTHREADS */ - -/************** End of mutex_unix.c ******************************************/ -/************** Begin file mutex_w32.c ***************************************/ -/* -** 2007 August 14 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** This file contains the C functions that implement mutexes for win32 -*/ - -#if SQLITE_OS_WIN -/* -** Include the header file for the Windows VFS. -*/ -/************** Include os_win.h in the middle of mutex_w32.c ****************/ -/************** Begin file os_win.h ******************************************/ -/* -** 2013 November 25 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -****************************************************************************** -** -** This file contains code that is specific to Windows. -*/ -#ifndef _OS_WIN_H_ -#define _OS_WIN_H_ - -/* -** Include the primary Windows SDK header file. -*/ -#include "windows.h" - -#ifdef __CYGWIN__ -# include -# include /* amalgamator: dontcache */ -#endif - -/* -** Determine if we are dealing with Windows NT. -** -** We ought to be able to determine if we are compiling for Windows 9x or -** Windows NT using the _WIN32_WINNT macro as follows: -** -** #if defined(_WIN32_WINNT) -** # define SQLITE_OS_WINNT 1 -** #else -** # define SQLITE_OS_WINNT 0 -** #endif -** -** However, Visual Studio 2005 does not set _WIN32_WINNT by default, as -** it ought to, so the above test does not work. We'll just assume that -** everything is Windows NT unless the programmer explicitly says otherwise -** by setting SQLITE_OS_WINNT to 0. -*/ -#if SQLITE_OS_WIN && !defined(SQLITE_OS_WINNT) -# define SQLITE_OS_WINNT 1 -#endif - -/* -** Determine if we are dealing with Windows CE - which has a much reduced -** API. -*/ -#if defined(_WIN32_WCE) -# define SQLITE_OS_WINCE 1 -#else -# define SQLITE_OS_WINCE 0 -#endif - -/* -** Determine if we are dealing with WinRT, which provides only a subset of -** the full Win32 API. -*/ -#if !defined(SQLITE_OS_WINRT) -# define SQLITE_OS_WINRT 0 -#endif - -#endif /* _OS_WIN_H_ */ - -/************** End of os_win.h **********************************************/ -/************** Continuing where we left off in mutex_w32.c ******************/ -#endif - -/* -** The code in this file is only used if we are compiling multithreaded -** on a win32 system. -*/ -#ifdef SQLITE_MUTEX_W32 - -/* -** Each recursive mutex is an instance of the following structure. -*/ -struct sqlite3_mutex { - CRITICAL_SECTION mutex; /* Mutex controlling the lock */ - int id; /* Mutex type */ -#ifdef SQLITE_DEBUG - volatile int nRef; /* Number of enterances */ - volatile DWORD owner; /* Thread holding this mutex */ - int trace; /* True to trace changes */ -#endif -}; -#define SQLITE_W32_MUTEX_INITIALIZER { 0 } -#ifdef SQLITE_DEBUG -#define SQLITE3_MUTEX_INITIALIZER { SQLITE_W32_MUTEX_INITIALIZER, 0, 0L, (DWORD)0, 0 } -#else -#define SQLITE3_MUTEX_INITIALIZER { SQLITE_W32_MUTEX_INITIALIZER, 0 } -#endif - -/* -** Return true (non-zero) if we are running under WinNT, Win2K, WinXP, -** or WinCE. Return false (zero) for Win95, Win98, or WinME. -** -** Here is an interesting observation: Win95, Win98, and WinME lack -** the LockFileEx() API. But we can still statically link against that -** API as long as we don't call it win running Win95/98/ME. A call to -** this routine is used to determine if the host is Win95/98/ME or -** WinNT/2K/XP so that we will know whether or not we can safely call -** the LockFileEx() API. -** -** mutexIsNT() is only used for the TryEnterCriticalSection() API call, -** which is only available if your application was compiled with -** _WIN32_WINNT defined to a value >= 0x0400. Currently, the only -** call to TryEnterCriticalSection() is #ifdef'ed out, so #ifdef -** this out as well. -*/ -#if 0 -#if SQLITE_OS_WINCE || SQLITE_OS_WINRT -# define mutexIsNT() (1) -#else - static int mutexIsNT(void){ - static int osType = 0; - if( osType==0 ){ - OSVERSIONINFO sInfo; - sInfo.dwOSVersionInfoSize = sizeof(sInfo); - GetVersionEx(&sInfo); - osType = sInfo.dwPlatformId==VER_PLATFORM_WIN32_NT ? 2 : 1; - } - return osType==2; - } -#endif /* SQLITE_OS_WINCE || SQLITE_OS_WINRT */ -#endif - -#ifdef SQLITE_DEBUG -/* -** The sqlite3_mutex_held() and sqlite3_mutex_notheld() routine are -** intended for use only inside assert() statements. -*/ -static int winMutexHeld(sqlite3_mutex *p){ - return p->nRef!=0 && p->owner==GetCurrentThreadId(); -} -static int winMutexNotheld2(sqlite3_mutex *p, DWORD tid){ - return p->nRef==0 || p->owner!=tid; -} -static int winMutexNotheld(sqlite3_mutex *p){ - DWORD tid = GetCurrentThreadId(); - return winMutexNotheld2(p, tid); -} -#endif - - -/* -** Initialize and deinitialize the mutex subsystem. -*/ -static sqlite3_mutex winMutex_staticMutexes[6] = { - SQLITE3_MUTEX_INITIALIZER, - SQLITE3_MUTEX_INITIALIZER, - SQLITE3_MUTEX_INITIALIZER, - SQLITE3_MUTEX_INITIALIZER, - SQLITE3_MUTEX_INITIALIZER, - SQLITE3_MUTEX_INITIALIZER -}; -static int winMutex_isInit = 0; -/* As winMutexInit() and winMutexEnd() are called as part -** of the sqlite3_initialize and sqlite3_shutdown() -** processing, the "interlocked" magic is probably not -** strictly necessary. -*/ -static LONG winMutex_lock = 0; - -SQLITE_API void sqlite3_win32_sleep(DWORD milliseconds); /* os_win.c */ - -static int winMutexInit(void){ - /* The first to increment to 1 does actual initialization */ - if( InterlockedCompareExchange(&winMutex_lock, 1, 0)==0 ){ - int i; - for(i=0; i -**
  • SQLITE_MUTEX_FAST -**
  • SQLITE_MUTEX_RECURSIVE -**
  • SQLITE_MUTEX_STATIC_MASTER -**
  • SQLITE_MUTEX_STATIC_MEM -**
  • SQLITE_MUTEX_STATIC_MEM2 -**
  • SQLITE_MUTEX_STATIC_PRNG -**
  • SQLITE_MUTEX_STATIC_LRU -**
  • SQLITE_MUTEX_STATIC_PMEM -** -** -** The first two constants cause sqlite3_mutex_alloc() to create -** a new mutex. The new mutex is recursive when SQLITE_MUTEX_RECURSIVE -** is used but not necessarily so when SQLITE_MUTEX_FAST is used. -** The mutex implementation does not need to make a distinction -** between SQLITE_MUTEX_RECURSIVE and SQLITE_MUTEX_FAST if it does -** not want to. But SQLite will only request a recursive mutex in -** cases where it really needs one. If a faster non-recursive mutex -** implementation is available on the host platform, the mutex subsystem -** might return such a mutex in response to SQLITE_MUTEX_FAST. -** -** The other allowed parameters to sqlite3_mutex_alloc() each return -** a pointer to a static preexisting mutex. Six static mutexes are -** used by the current version of SQLite. Future versions of SQLite -** may add additional static mutexes. Static mutexes are for internal -** use by SQLite only. Applications that use SQLite mutexes should -** use only the dynamic mutexes returned by SQLITE_MUTEX_FAST or -** SQLITE_MUTEX_RECURSIVE. -** -** Note that if one of the dynamic mutex parameters (SQLITE_MUTEX_FAST -** or SQLITE_MUTEX_RECURSIVE) is used then sqlite3_mutex_alloc() -** returns a different mutex on every call. But for the static -** mutex types, the same mutex is returned on every call that has -** the same type number. -*/ -static sqlite3_mutex *winMutexAlloc(int iType){ - sqlite3_mutex *p; - - switch( iType ){ - case SQLITE_MUTEX_FAST: - case SQLITE_MUTEX_RECURSIVE: { - p = sqlite3MallocZero( sizeof(*p) ); - if( p ){ -#ifdef SQLITE_DEBUG - p->id = iType; -#endif -#if SQLITE_OS_WINRT - InitializeCriticalSectionEx(&p->mutex, 0, 0); -#else - InitializeCriticalSection(&p->mutex); -#endif - } - break; - } - default: { - assert( winMutex_isInit==1 ); - assert( iType-2 >= 0 ); - assert( iType-2 < ArraySize(winMutex_staticMutexes) ); - p = &winMutex_staticMutexes[iType-2]; -#ifdef SQLITE_DEBUG - p->id = iType; -#endif - break; - } - } - return p; -} - - -/* -** This routine deallocates a previously -** allocated mutex. SQLite is careful to deallocate every -** mutex that it allocates. -*/ -static void winMutexFree(sqlite3_mutex *p){ - assert( p ); - assert( p->nRef==0 && p->owner==0 ); - assert( p->id==SQLITE_MUTEX_FAST || p->id==SQLITE_MUTEX_RECURSIVE ); - DeleteCriticalSection(&p->mutex); - sqlite3_free(p); -} - -/* -** The sqlite3_mutex_enter() and sqlite3_mutex_try() routines attempt -** to enter a mutex. If another thread is already within the mutex, -** sqlite3_mutex_enter() will block and sqlite3_mutex_try() will return -** SQLITE_BUSY. The sqlite3_mutex_try() interface returns SQLITE_OK -** upon successful entry. Mutexes created using SQLITE_MUTEX_RECURSIVE can -** be entered multiple times by the same thread. In such cases the, -** mutex must be exited an equal number of times before another thread -** can enter. If the same thread tries to enter any other kind of mutex -** more than once, the behavior is undefined. -*/ -static void winMutexEnter(sqlite3_mutex *p){ -#ifdef SQLITE_DEBUG - DWORD tid = GetCurrentThreadId(); - assert( p->id==SQLITE_MUTEX_RECURSIVE || winMutexNotheld2(p, tid) ); -#endif - EnterCriticalSection(&p->mutex); -#ifdef SQLITE_DEBUG - assert( p->nRef>0 || p->owner==0 ); - p->owner = tid; - p->nRef++; - if( p->trace ){ - printf("enter mutex %p (%d) with nRef=%d\n", p, p->trace, p->nRef); - } -#endif -} -static int winMutexTry(sqlite3_mutex *p){ -#ifndef NDEBUG - DWORD tid = GetCurrentThreadId(); -#endif - int rc = SQLITE_BUSY; - assert( p->id==SQLITE_MUTEX_RECURSIVE || winMutexNotheld2(p, tid) ); - /* - ** The sqlite3_mutex_try() routine is very rarely used, and when it - ** is used it is merely an optimization. So it is OK for it to always - ** fail. - ** - ** The TryEnterCriticalSection() interface is only available on WinNT. - ** And some windows compilers complain if you try to use it without - ** first doing some #defines that prevent SQLite from building on Win98. - ** For that reason, we will omit this optimization for now. See - ** ticket #2685. - */ -#if 0 - if( mutexIsNT() && TryEnterCriticalSection(&p->mutex) ){ - p->owner = tid; - p->nRef++; - rc = SQLITE_OK; - } -#else - UNUSED_PARAMETER(p); -#endif -#ifdef SQLITE_DEBUG - if( rc==SQLITE_OK && p->trace ){ - printf("try mutex %p (%d) with nRef=%d\n", p, p->trace, p->nRef); - } -#endif - return rc; -} - -/* -** The sqlite3_mutex_leave() routine exits a mutex that was -** previously entered by the same thread. The behavior -** is undefined if the mutex is not currently entered or -** is not currently allocated. SQLite will never do either. -*/ -static void winMutexLeave(sqlite3_mutex *p){ -#ifndef NDEBUG - DWORD tid = GetCurrentThreadId(); - assert( p->nRef>0 ); - assert( p->owner==tid ); - p->nRef--; - if( p->nRef==0 ) p->owner = 0; - assert( p->nRef==0 || p->id==SQLITE_MUTEX_RECURSIVE ); -#endif - LeaveCriticalSection(&p->mutex); -#ifdef SQLITE_DEBUG - if( p->trace ){ - printf("leave mutex %p (%d) with nRef=%d\n", p, p->trace, p->nRef); - } -#endif -} - -SQLITE_PRIVATE sqlite3_mutex_methods const *sqlite3DefaultMutex(void){ - static const sqlite3_mutex_methods sMutex = { - winMutexInit, - winMutexEnd, - winMutexAlloc, - winMutexFree, - winMutexEnter, - winMutexTry, - winMutexLeave, -#ifdef SQLITE_DEBUG - winMutexHeld, - winMutexNotheld -#else - 0, - 0 -#endif - }; - - return &sMutex; -} -#endif /* SQLITE_MUTEX_W32 */ - -/************** End of mutex_w32.c *******************************************/ -/************** Begin file malloc.c ******************************************/ -/* -** 2001 September 15 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** -** Memory allocation functions used throughout sqlite. -*/ -/* #include */ - -/* -** Attempt to release up to n bytes of non-essential memory currently -** held by SQLite. An example of non-essential memory is memory used to -** cache database pages that are not currently in use. -*/ -SQLITE_API int sqlite3_release_memory(int n){ -#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT - return sqlite3PcacheReleaseMemory(n); -#else - /* IMPLEMENTATION-OF: R-34391-24921 The sqlite3_release_memory() routine - ** is a no-op returning zero if SQLite is not compiled with - ** SQLITE_ENABLE_MEMORY_MANAGEMENT. */ - UNUSED_PARAMETER(n); - return 0; -#endif -} - -/* -** An instance of the following object records the location of -** each unused scratch buffer. -*/ -typedef struct ScratchFreeslot { - struct ScratchFreeslot *pNext; /* Next unused scratch buffer */ -} ScratchFreeslot; - -/* -** State information local to the memory allocation subsystem. -*/ -static SQLITE_WSD struct Mem0Global { - sqlite3_mutex *mutex; /* Mutex to serialize access */ - - /* - ** The alarm callback and its arguments. The mem0.mutex lock will - ** be held while the callback is running. Recursive calls into - ** the memory subsystem are allowed, but no new callbacks will be - ** issued. - */ - sqlite3_int64 alarmThreshold; - void (*alarmCallback)(void*, sqlite3_int64,int); - void *alarmArg; - - /* - ** Pointers to the end of sqlite3GlobalConfig.pScratch memory - ** (so that a range test can be used to determine if an allocation - ** being freed came from pScratch) and a pointer to the list of - ** unused scratch allocations. - */ - void *pScratchEnd; - ScratchFreeslot *pScratchFree; - u32 nScratchFree; - - /* - ** True if heap is nearly "full" where "full" is defined by the - ** sqlite3_soft_heap_limit() setting. - */ - int nearlyFull; -} mem0 = { 0, 0, 0, 0, 0, 0, 0, 0 }; - -#define mem0 GLOBAL(struct Mem0Global, mem0) - -/* -** This routine runs when the memory allocator sees that the -** total memory allocation is about to exceed the soft heap -** limit. -*/ -static void softHeapLimitEnforcer( - void *NotUsed, - sqlite3_int64 NotUsed2, - int allocSize -){ - UNUSED_PARAMETER2(NotUsed, NotUsed2); - sqlite3_release_memory(allocSize); -} - -/* -** Change the alarm callback -*/ -static int sqlite3MemoryAlarm( - void(*xCallback)(void *pArg, sqlite3_int64 used,int N), - void *pArg, - sqlite3_int64 iThreshold -){ - int nUsed; - sqlite3_mutex_enter(mem0.mutex); - mem0.alarmCallback = xCallback; - mem0.alarmArg = pArg; - mem0.alarmThreshold = iThreshold; - nUsed = sqlite3StatusValue(SQLITE_STATUS_MEMORY_USED); - mem0.nearlyFull = (iThreshold>0 && iThreshold<=nUsed); - sqlite3_mutex_leave(mem0.mutex); - return SQLITE_OK; -} - -#ifndef SQLITE_OMIT_DEPRECATED -/* -** Deprecated external interface. Internal/core SQLite code -** should call sqlite3MemoryAlarm. -*/ -SQLITE_API int sqlite3_memory_alarm( - void(*xCallback)(void *pArg, sqlite3_int64 used,int N), - void *pArg, - sqlite3_int64 iThreshold -){ - return sqlite3MemoryAlarm(xCallback, pArg, iThreshold); -} -#endif - -/* -** Set the soft heap-size limit for the library. Passing a zero or -** negative value indicates no limit. -*/ -SQLITE_API sqlite3_int64 sqlite3_soft_heap_limit64(sqlite3_int64 n){ - sqlite3_int64 priorLimit; - sqlite3_int64 excess; -#ifndef SQLITE_OMIT_AUTOINIT - int rc = sqlite3_initialize(); - if( rc ) return -1; -#endif - sqlite3_mutex_enter(mem0.mutex); - priorLimit = mem0.alarmThreshold; - sqlite3_mutex_leave(mem0.mutex); - if( n<0 ) return priorLimit; - if( n>0 ){ - sqlite3MemoryAlarm(softHeapLimitEnforcer, 0, n); - }else{ - sqlite3MemoryAlarm(0, 0, 0); - } - excess = sqlite3_memory_used() - n; - if( excess>0 ) sqlite3_release_memory((int)(excess & 0x7fffffff)); - return priorLimit; -} -SQLITE_API void sqlite3_soft_heap_limit(int n){ - if( n<0 ) n = 0; - sqlite3_soft_heap_limit64(n); -} - -/* -** Initialize the memory allocation subsystem. -*/ -SQLITE_PRIVATE int sqlite3MallocInit(void){ - if( sqlite3GlobalConfig.m.xMalloc==0 ){ - sqlite3MemSetDefault(); - } - memset(&mem0, 0, sizeof(mem0)); - if( sqlite3GlobalConfig.bCoreMutex ){ - mem0.mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MEM); - } - if( sqlite3GlobalConfig.pScratch && sqlite3GlobalConfig.szScratch>=100 - && sqlite3GlobalConfig.nScratch>0 ){ - int i, n, sz; - ScratchFreeslot *pSlot; - sz = ROUNDDOWN8(sqlite3GlobalConfig.szScratch); - sqlite3GlobalConfig.szScratch = sz; - pSlot = (ScratchFreeslot*)sqlite3GlobalConfig.pScratch; - n = sqlite3GlobalConfig.nScratch; - mem0.pScratchFree = pSlot; - mem0.nScratchFree = n; - for(i=0; ipNext = (ScratchFreeslot*)(sz+(char*)pSlot); - pSlot = pSlot->pNext; - } - pSlot->pNext = 0; - mem0.pScratchEnd = (void*)&pSlot[1]; - }else{ - mem0.pScratchEnd = 0; - sqlite3GlobalConfig.pScratch = 0; - sqlite3GlobalConfig.szScratch = 0; - sqlite3GlobalConfig.nScratch = 0; - } - if( sqlite3GlobalConfig.pPage==0 || sqlite3GlobalConfig.szPage<512 - || sqlite3GlobalConfig.nPage<1 ){ - sqlite3GlobalConfig.pPage = 0; - sqlite3GlobalConfig.szPage = 0; - sqlite3GlobalConfig.nPage = 0; - } - return sqlite3GlobalConfig.m.xInit(sqlite3GlobalConfig.m.pAppData); -} - -/* -** Return true if the heap is currently under memory pressure - in other -** words if the amount of heap used is close to the limit set by -** sqlite3_soft_heap_limit(). -*/ -SQLITE_PRIVATE int sqlite3HeapNearlyFull(void){ - return mem0.nearlyFull; -} - -/* -** Deinitialize the memory allocation subsystem. -*/ -SQLITE_PRIVATE void sqlite3MallocEnd(void){ - if( sqlite3GlobalConfig.m.xShutdown ){ - sqlite3GlobalConfig.m.xShutdown(sqlite3GlobalConfig.m.pAppData); - } - memset(&mem0, 0, sizeof(mem0)); -} - -/* -** Return the amount of memory currently checked out. -*/ -SQLITE_API sqlite3_int64 sqlite3_memory_used(void){ - int n, mx; - sqlite3_int64 res; - sqlite3_status(SQLITE_STATUS_MEMORY_USED, &n, &mx, 0); - res = (sqlite3_int64)n; /* Work around bug in Borland C. Ticket #3216 */ - return res; -} - -/* -** Return the maximum amount of memory that has ever been -** checked out since either the beginning of this process -** or since the most recent reset. -*/ -SQLITE_API sqlite3_int64 sqlite3_memory_highwater(int resetFlag){ - int n, mx; - sqlite3_int64 res; - sqlite3_status(SQLITE_STATUS_MEMORY_USED, &n, &mx, resetFlag); - res = (sqlite3_int64)mx; /* Work around bug in Borland C. Ticket #3216 */ - return res; -} - -/* -** Trigger the alarm -*/ -static void sqlite3MallocAlarm(int nByte){ - void (*xCallback)(void*,sqlite3_int64,int); - sqlite3_int64 nowUsed; - void *pArg; - if( mem0.alarmCallback==0 ) return; - xCallback = mem0.alarmCallback; - nowUsed = sqlite3StatusValue(SQLITE_STATUS_MEMORY_USED); - pArg = mem0.alarmArg; - mem0.alarmCallback = 0; - sqlite3_mutex_leave(mem0.mutex); - xCallback(pArg, nowUsed, nByte); - sqlite3_mutex_enter(mem0.mutex); - mem0.alarmCallback = xCallback; - mem0.alarmArg = pArg; -} - -/* -** Do a memory allocation with statistics and alarms. Assume the -** lock is already held. -*/ -static int mallocWithAlarm(int n, void **pp){ - int nFull; - void *p; - assert( sqlite3_mutex_held(mem0.mutex) ); - nFull = sqlite3GlobalConfig.m.xRoundup(n); - sqlite3StatusSet(SQLITE_STATUS_MALLOC_SIZE, n); - if( mem0.alarmCallback!=0 ){ - int nUsed = sqlite3StatusValue(SQLITE_STATUS_MEMORY_USED); - if( nUsed >= mem0.alarmThreshold - nFull ){ - mem0.nearlyFull = 1; - sqlite3MallocAlarm(nFull); - }else{ - mem0.nearlyFull = 0; - } - } - p = sqlite3GlobalConfig.m.xMalloc(nFull); -#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT - if( p==0 && mem0.alarmCallback ){ - sqlite3MallocAlarm(nFull); - p = sqlite3GlobalConfig.m.xMalloc(nFull); - } -#endif - if( p ){ - nFull = sqlite3MallocSize(p); - sqlite3StatusAdd(SQLITE_STATUS_MEMORY_USED, nFull); - sqlite3StatusAdd(SQLITE_STATUS_MALLOC_COUNT, 1); - } - *pp = p; - return nFull; -} - -/* -** Allocate memory. This routine is like sqlite3_malloc() except that it -** assumes the memory subsystem has already been initialized. -*/ -SQLITE_PRIVATE void *sqlite3Malloc(int n){ - void *p; - if( n<=0 /* IMP: R-65312-04917 */ - || n>=0x7fffff00 - ){ - /* A memory allocation of a number of bytes which is near the maximum - ** signed integer value might cause an integer overflow inside of the - ** xMalloc(). Hence we limit the maximum size to 0x7fffff00, giving - ** 255 bytes of overhead. SQLite itself will never use anything near - ** this amount. The only way to reach the limit is with sqlite3_malloc() */ - p = 0; - }else if( sqlite3GlobalConfig.bMemstat ){ - sqlite3_mutex_enter(mem0.mutex); - mallocWithAlarm(n, &p); - sqlite3_mutex_leave(mem0.mutex); - }else{ - p = sqlite3GlobalConfig.m.xMalloc(n); - } - assert( EIGHT_BYTE_ALIGNMENT(p) ); /* IMP: R-04675-44850 */ - return p; -} - -/* -** This version of the memory allocation is for use by the application. -** First make sure the memory subsystem is initialized, then do the -** allocation. -*/ -SQLITE_API void *sqlite3_malloc(int n){ -#ifndef SQLITE_OMIT_AUTOINIT - if( sqlite3_initialize() ) return 0; -#endif - return sqlite3Malloc(n); -} - -/* -** Each thread may only have a single outstanding allocation from -** xScratchMalloc(). We verify this constraint in the single-threaded -** case by setting scratchAllocOut to 1 when an allocation -** is outstanding clearing it when the allocation is freed. -*/ -#if SQLITE_THREADSAFE==0 && !defined(NDEBUG) -static int scratchAllocOut = 0; -#endif - - -/* -** Allocate memory that is to be used and released right away. -** This routine is similar to alloca() in that it is not intended -** for situations where the memory might be held long-term. This -** routine is intended to get memory to old large transient data -** structures that would not normally fit on the stack of an -** embedded processor. -*/ -SQLITE_PRIVATE void *sqlite3ScratchMalloc(int n){ - void *p; - assert( n>0 ); - - sqlite3_mutex_enter(mem0.mutex); - if( mem0.nScratchFree && sqlite3GlobalConfig.szScratch>=n ){ - p = mem0.pScratchFree; - mem0.pScratchFree = mem0.pScratchFree->pNext; - mem0.nScratchFree--; - sqlite3StatusAdd(SQLITE_STATUS_SCRATCH_USED, 1); - sqlite3StatusSet(SQLITE_STATUS_SCRATCH_SIZE, n); - sqlite3_mutex_leave(mem0.mutex); - }else{ - if( sqlite3GlobalConfig.bMemstat ){ - sqlite3StatusSet(SQLITE_STATUS_SCRATCH_SIZE, n); - n = mallocWithAlarm(n, &p); - if( p ) sqlite3StatusAdd(SQLITE_STATUS_SCRATCH_OVERFLOW, n); - sqlite3_mutex_leave(mem0.mutex); - }else{ - sqlite3_mutex_leave(mem0.mutex); - p = sqlite3GlobalConfig.m.xMalloc(n); - } - sqlite3MemdebugSetType(p, MEMTYPE_SCRATCH); - } - assert( sqlite3_mutex_notheld(mem0.mutex) ); - - -#if SQLITE_THREADSAFE==0 && !defined(NDEBUG) - /* Verify that no more than two scratch allocations per thread - ** are outstanding at one time. (This is only checked in the - ** single-threaded case since checking in the multi-threaded case - ** would be much more complicated.) */ - assert( scratchAllocOut<=1 ); - if( p ) scratchAllocOut++; -#endif - - return p; -} -SQLITE_PRIVATE void sqlite3ScratchFree(void *p){ - if( p ){ - -#if SQLITE_THREADSAFE==0 && !defined(NDEBUG) - /* Verify that no more than two scratch allocation per thread - ** is outstanding at one time. (This is only checked in the - ** single-threaded case since checking in the multi-threaded case - ** would be much more complicated.) */ - assert( scratchAllocOut>=1 && scratchAllocOut<=2 ); - scratchAllocOut--; -#endif - - if( p>=sqlite3GlobalConfig.pScratch && ppNext = mem0.pScratchFree; - mem0.pScratchFree = pSlot; - mem0.nScratchFree++; - assert( mem0.nScratchFree <= (u32)sqlite3GlobalConfig.nScratch ); - sqlite3StatusAdd(SQLITE_STATUS_SCRATCH_USED, -1); - sqlite3_mutex_leave(mem0.mutex); - }else{ - /* Release memory back to the heap */ - assert( sqlite3MemdebugHasType(p, MEMTYPE_SCRATCH) ); - assert( sqlite3MemdebugNoType(p, ~MEMTYPE_SCRATCH) ); - sqlite3MemdebugSetType(p, MEMTYPE_HEAP); - if( sqlite3GlobalConfig.bMemstat ){ - int iSize = sqlite3MallocSize(p); - sqlite3_mutex_enter(mem0.mutex); - sqlite3StatusAdd(SQLITE_STATUS_SCRATCH_OVERFLOW, -iSize); - sqlite3StatusAdd(SQLITE_STATUS_MEMORY_USED, -iSize); - sqlite3StatusAdd(SQLITE_STATUS_MALLOC_COUNT, -1); - sqlite3GlobalConfig.m.xFree(p); - sqlite3_mutex_leave(mem0.mutex); - }else{ - sqlite3GlobalConfig.m.xFree(p); - } - } - } -} - -/* -** TRUE if p is a lookaside memory allocation from db -*/ -#ifndef SQLITE_OMIT_LOOKASIDE -static int isLookaside(sqlite3 *db, void *p){ - return p>=db->lookaside.pStart && plookaside.pEnd; -} -#else -#define isLookaside(A,B) 0 -#endif - -/* -** Return the size of a memory allocation previously obtained from -** sqlite3Malloc() or sqlite3_malloc(). -*/ -SQLITE_PRIVATE int sqlite3MallocSize(void *p){ - assert( sqlite3MemdebugHasType(p, MEMTYPE_HEAP) ); - assert( sqlite3MemdebugNoType(p, MEMTYPE_DB) ); - return sqlite3GlobalConfig.m.xSize(p); -} -SQLITE_PRIVATE int sqlite3DbMallocSize(sqlite3 *db, void *p){ - assert( db!=0 ); - assert( sqlite3_mutex_held(db->mutex) ); - if( isLookaside(db, p) ){ - return db->lookaside.sz; - }else{ - assert( sqlite3MemdebugHasType(p, MEMTYPE_DB) ); - assert( sqlite3MemdebugHasType(p, MEMTYPE_LOOKASIDE|MEMTYPE_HEAP) ); - assert( db!=0 || sqlite3MemdebugNoType(p, MEMTYPE_LOOKASIDE) ); - return sqlite3GlobalConfig.m.xSize(p); - } -} - -/* -** Free memory previously obtained from sqlite3Malloc(). -*/ -SQLITE_API void sqlite3_free(void *p){ - if( p==0 ) return; /* IMP: R-49053-54554 */ - assert( sqlite3MemdebugNoType(p, MEMTYPE_DB) ); - assert( sqlite3MemdebugHasType(p, MEMTYPE_HEAP) ); - if( sqlite3GlobalConfig.bMemstat ){ - sqlite3_mutex_enter(mem0.mutex); - sqlite3StatusAdd(SQLITE_STATUS_MEMORY_USED, -sqlite3MallocSize(p)); - sqlite3StatusAdd(SQLITE_STATUS_MALLOC_COUNT, -1); - sqlite3GlobalConfig.m.xFree(p); - sqlite3_mutex_leave(mem0.mutex); - }else{ - sqlite3GlobalConfig.m.xFree(p); - } -} - -/* -** Free memory that might be associated with a particular database -** connection. -*/ -SQLITE_PRIVATE void sqlite3DbFree(sqlite3 *db, void *p){ - assert( db==0 || sqlite3_mutex_held(db->mutex) ); - if( p==0 ) return; - if( db ){ - if( db->pnBytesFreed ){ - *db->pnBytesFreed += sqlite3DbMallocSize(db, p); - return; - } - if( isLookaside(db, p) ){ - LookasideSlot *pBuf = (LookasideSlot*)p; -#if SQLITE_DEBUG - /* Trash all content in the buffer being freed */ - memset(p, 0xaa, db->lookaside.sz); -#endif - pBuf->pNext = db->lookaside.pFree; - db->lookaside.pFree = pBuf; - db->lookaside.nOut--; - return; - } - } - assert( sqlite3MemdebugHasType(p, MEMTYPE_DB) ); - assert( sqlite3MemdebugHasType(p, MEMTYPE_LOOKASIDE|MEMTYPE_HEAP) ); - assert( db!=0 || sqlite3MemdebugNoType(p, MEMTYPE_LOOKASIDE) ); - sqlite3MemdebugSetType(p, MEMTYPE_HEAP); - sqlite3_free(p); -} - -/* -** Change the size of an existing memory allocation -*/ -SQLITE_PRIVATE void *sqlite3Realloc(void *pOld, int nBytes){ - int nOld, nNew, nDiff; - void *pNew; - if( pOld==0 ){ - return sqlite3Malloc(nBytes); /* IMP: R-28354-25769 */ - } - if( nBytes<=0 ){ - sqlite3_free(pOld); /* IMP: R-31593-10574 */ - return 0; - } - if( nBytes>=0x7fffff00 ){ - /* The 0x7ffff00 limit term is explained in comments on sqlite3Malloc() */ - return 0; - } - nOld = sqlite3MallocSize(pOld); - /* IMPLEMENTATION-OF: R-46199-30249 SQLite guarantees that the second - ** argument to xRealloc is always a value returned by a prior call to - ** xRoundup. */ - nNew = sqlite3GlobalConfig.m.xRoundup(nBytes); - if( nOld==nNew ){ - pNew = pOld; - }else if( sqlite3GlobalConfig.bMemstat ){ - sqlite3_mutex_enter(mem0.mutex); - sqlite3StatusSet(SQLITE_STATUS_MALLOC_SIZE, nBytes); - nDiff = nNew - nOld; - if( sqlite3StatusValue(SQLITE_STATUS_MEMORY_USED) >= - mem0.alarmThreshold-nDiff ){ - sqlite3MallocAlarm(nDiff); - } - assert( sqlite3MemdebugHasType(pOld, MEMTYPE_HEAP) ); - assert( sqlite3MemdebugNoType(pOld, ~MEMTYPE_HEAP) ); - pNew = sqlite3GlobalConfig.m.xRealloc(pOld, nNew); - if( pNew==0 && mem0.alarmCallback ){ - sqlite3MallocAlarm(nBytes); - pNew = sqlite3GlobalConfig.m.xRealloc(pOld, nNew); - } - if( pNew ){ - nNew = sqlite3MallocSize(pNew); - sqlite3StatusAdd(SQLITE_STATUS_MEMORY_USED, nNew-nOld); - } - sqlite3_mutex_leave(mem0.mutex); - }else{ - pNew = sqlite3GlobalConfig.m.xRealloc(pOld, nNew); - } - assert( EIGHT_BYTE_ALIGNMENT(pNew) ); /* IMP: R-04675-44850 */ - return pNew; -} - -/* -** The public interface to sqlite3Realloc. Make sure that the memory -** subsystem is initialized prior to invoking sqliteRealloc. -*/ -SQLITE_API void *sqlite3_realloc(void *pOld, int n){ -#ifndef SQLITE_OMIT_AUTOINIT - if( sqlite3_initialize() ) return 0; -#endif - return sqlite3Realloc(pOld, n); -} - - -/* -** Allocate and zero memory. -*/ -SQLITE_PRIVATE void *sqlite3MallocZero(int n){ - void *p = sqlite3Malloc(n); - if( p ){ - memset(p, 0, n); - } - return p; -} - -/* -** Allocate and zero memory. If the allocation fails, make -** the mallocFailed flag in the connection pointer. -*/ -SQLITE_PRIVATE void *sqlite3DbMallocZero(sqlite3 *db, int n){ - void *p = sqlite3DbMallocRaw(db, n); - if( p ){ - memset(p, 0, n); - } - return p; -} - -/* -** Allocate and zero memory. If the allocation fails, make -** the mallocFailed flag in the connection pointer. -** -** If db!=0 and db->mallocFailed is true (indicating a prior malloc -** failure on the same database connection) then always return 0. -** Hence for a particular database connection, once malloc starts -** failing, it fails consistently until mallocFailed is reset. -** This is an important assumption. There are many places in the -** code that do things like this: -** -** int *a = (int*)sqlite3DbMallocRaw(db, 100); -** int *b = (int*)sqlite3DbMallocRaw(db, 200); -** if( b ) a[10] = 9; -** -** In other words, if a subsequent malloc (ex: "b") worked, it is assumed -** that all prior mallocs (ex: "a") worked too. -*/ -SQLITE_PRIVATE void *sqlite3DbMallocRaw(sqlite3 *db, int n){ - void *p; - assert( db==0 || sqlite3_mutex_held(db->mutex) ); - assert( db==0 || db->pnBytesFreed==0 ); -#ifndef SQLITE_OMIT_LOOKASIDE - if( db ){ - LookasideSlot *pBuf; - if( db->mallocFailed ){ - return 0; - } - if( db->lookaside.bEnabled ){ - if( n>db->lookaside.sz ){ - db->lookaside.anStat[1]++; - }else if( (pBuf = db->lookaside.pFree)==0 ){ - db->lookaside.anStat[2]++; - }else{ - db->lookaside.pFree = pBuf->pNext; - db->lookaside.nOut++; - db->lookaside.anStat[0]++; - if( db->lookaside.nOut>db->lookaside.mxOut ){ - db->lookaside.mxOut = db->lookaside.nOut; - } - return (void*)pBuf; - } - } - } -#else - if( db && db->mallocFailed ){ - return 0; - } -#endif - p = sqlite3Malloc(n); - if( !p && db ){ - db->mallocFailed = 1; - } - sqlite3MemdebugSetType(p, MEMTYPE_DB | - ((db && db->lookaside.bEnabled) ? MEMTYPE_LOOKASIDE : MEMTYPE_HEAP)); - return p; -} - -/* -** Resize the block of memory pointed to by p to n bytes. If the -** resize fails, set the mallocFailed flag in the connection object. -*/ -SQLITE_PRIVATE void *sqlite3DbRealloc(sqlite3 *db, void *p, int n){ - void *pNew = 0; - assert( db!=0 ); - assert( sqlite3_mutex_held(db->mutex) ); - if( db->mallocFailed==0 ){ - if( p==0 ){ - return sqlite3DbMallocRaw(db, n); - } - if( isLookaside(db, p) ){ - if( n<=db->lookaside.sz ){ - return p; - } - pNew = sqlite3DbMallocRaw(db, n); - if( pNew ){ - memcpy(pNew, p, db->lookaside.sz); - sqlite3DbFree(db, p); - } - }else{ - assert( sqlite3MemdebugHasType(p, MEMTYPE_DB) ); - assert( sqlite3MemdebugHasType(p, MEMTYPE_LOOKASIDE|MEMTYPE_HEAP) ); - sqlite3MemdebugSetType(p, MEMTYPE_HEAP); - pNew = sqlite3_realloc(p, n); - if( !pNew ){ - sqlite3MemdebugSetType(p, MEMTYPE_DB|MEMTYPE_HEAP); - db->mallocFailed = 1; - } - sqlite3MemdebugSetType(pNew, MEMTYPE_DB | - (db->lookaside.bEnabled ? MEMTYPE_LOOKASIDE : MEMTYPE_HEAP)); - } - } - return pNew; -} - -/* -** Attempt to reallocate p. If the reallocation fails, then free p -** and set the mallocFailed flag in the database connection. -*/ -SQLITE_PRIVATE void *sqlite3DbReallocOrFree(sqlite3 *db, void *p, int n){ - void *pNew; - pNew = sqlite3DbRealloc(db, p, n); - if( !pNew ){ - sqlite3DbFree(db, p); - } - return pNew; -} - -/* -** Make a copy of a string in memory obtained from sqliteMalloc(). These -** functions call sqlite3MallocRaw() directly instead of sqliteMalloc(). This -** is because when memory debugging is turned on, these two functions are -** called via macros that record the current file and line number in the -** ThreadData structure. -*/ -SQLITE_PRIVATE char *sqlite3DbStrDup(sqlite3 *db, const char *z){ - char *zNew; - size_t n; - if( z==0 ){ - return 0; - } - n = sqlite3Strlen30(z) + 1; - assert( (n&0x7fffffff)==n ); - zNew = sqlite3DbMallocRaw(db, (int)n); - if( zNew ){ - memcpy(zNew, z, n); - } - return zNew; -} -SQLITE_PRIVATE char *sqlite3DbStrNDup(sqlite3 *db, const char *z, int n){ - char *zNew; - if( z==0 ){ - return 0; - } - assert( (n&0x7fffffff)==n ); - zNew = sqlite3DbMallocRaw(db, n+1); - if( zNew ){ - memcpy(zNew, z, n); - zNew[n] = 0; - } - return zNew; -} - -/* -** Create a string from the zFromat argument and the va_list that follows. -** Store the string in memory obtained from sqliteMalloc() and make *pz -** point to that string. -*/ -SQLITE_PRIVATE void sqlite3SetString(char **pz, sqlite3 *db, const char *zFormat, ...){ - va_list ap; - char *z; - - va_start(ap, zFormat); - z = sqlite3VMPrintf(db, zFormat, ap); - va_end(ap); - sqlite3DbFree(db, *pz); - *pz = z; -} - - -/* -** This function must be called before exiting any API function (i.e. -** returning control to the user) that has called sqlite3_malloc or -** sqlite3_realloc. -** -** The returned value is normally a copy of the second argument to this -** function. However, if a malloc() failure has occurred since the previous -** invocation SQLITE_NOMEM is returned instead. -** -** If the first argument, db, is not NULL and a malloc() error has occurred, -** then the connection error-code (the value returned by sqlite3_errcode()) -** is set to SQLITE_NOMEM. -*/ -SQLITE_PRIVATE int sqlite3ApiExit(sqlite3* db, int rc){ - /* If the db handle is not NULL, then we must hold the connection handle - ** mutex here. Otherwise the read (and possible write) of db->mallocFailed - ** is unsafe, as is the call to sqlite3Error(). - */ - assert( !db || sqlite3_mutex_held(db->mutex) ); - if( db && (db->mallocFailed || rc==SQLITE_IOERR_NOMEM) ){ - sqlite3Error(db, SQLITE_NOMEM, 0); - db->mallocFailed = 0; - rc = SQLITE_NOMEM; - } - return rc & (db ? db->errMask : 0xff); -} - -/************** End of malloc.c **********************************************/ -/************** Begin file printf.c ******************************************/ -/* -** The "printf" code that follows dates from the 1980's. It is in -** the public domain. The original comments are included here for -** completeness. They are very out-of-date but might be useful as -** an historical reference. Most of the "enhancements" have been backed -** out so that the functionality is now the same as standard printf(). -** -************************************************************************** -** -** This file contains code for a set of "printf"-like routines. These -** routines format strings much like the printf() from the standard C -** library, though the implementation here has enhancements to support -** SQLlite. -*/ - -/* -** Conversion types fall into various categories as defined by the -** following enumeration. -*/ -#define etRADIX 1 /* Integer types. %d, %x, %o, and so forth */ -#define etFLOAT 2 /* Floating point. %f */ -#define etEXP 3 /* Exponentional notation. %e and %E */ -#define etGENERIC 4 /* Floating or exponential, depending on exponent. %g */ -#define etSIZE 5 /* Return number of characters processed so far. %n */ -#define etSTRING 6 /* Strings. %s */ -#define etDYNSTRING 7 /* Dynamically allocated strings. %z */ -#define etPERCENT 8 /* Percent symbol. %% */ -#define etCHARX 9 /* Characters. %c */ -/* The rest are extensions, not normally found in printf() */ -#define etSQLESCAPE 10 /* Strings with '\'' doubled. %q */ -#define etSQLESCAPE2 11 /* Strings with '\'' doubled and enclosed in '', - NULL pointers replaced by SQL NULL. %Q */ -#define etTOKEN 12 /* a pointer to a Token structure */ -#define etSRCLIST 13 /* a pointer to a SrcList */ -#define etPOINTER 14 /* The %p conversion */ -#define etSQLESCAPE3 15 /* %w -> Strings with '\"' doubled */ -#define etORDINAL 16 /* %r -> 1st, 2nd, 3rd, 4th, etc. English only */ - -#define etINVALID 0 /* Any unrecognized conversion type */ - - -/* -** An "etByte" is an 8-bit unsigned value. -*/ -typedef unsigned char etByte; - -/* -** Each builtin conversion character (ex: the 'd' in "%d") is described -** by an instance of the following structure -*/ -typedef struct et_info { /* Information about each format field */ - char fmttype; /* The format field code letter */ - etByte base; /* The base for radix conversion */ - etByte flags; /* One or more of FLAG_ constants below */ - etByte type; /* Conversion paradigm */ - etByte charset; /* Offset into aDigits[] of the digits string */ - etByte prefix; /* Offset into aPrefix[] of the prefix string */ -} et_info; - -/* -** Allowed values for et_info.flags -*/ -#define FLAG_SIGNED 1 /* True if the value to convert is signed */ -#define FLAG_INTERN 2 /* True if for internal use only */ -#define FLAG_STRING 4 /* Allow infinity precision */ - - -/* -** The following table is searched linearly, so it is good to put the -** most frequently used conversion types first. -*/ -static const char aDigits[] = "0123456789ABCDEF0123456789abcdef"; -static const char aPrefix[] = "-x0\000X0"; -static const et_info fmtinfo[] = { - { 'd', 10, 1, etRADIX, 0, 0 }, - { 's', 0, 4, etSTRING, 0, 0 }, - { 'g', 0, 1, etGENERIC, 30, 0 }, - { 'z', 0, 4, etDYNSTRING, 0, 0 }, - { 'q', 0, 4, etSQLESCAPE, 0, 0 }, - { 'Q', 0, 4, etSQLESCAPE2, 0, 0 }, - { 'w', 0, 4, etSQLESCAPE3, 0, 0 }, - { 'c', 0, 0, etCHARX, 0, 0 }, - { 'o', 8, 0, etRADIX, 0, 2 }, - { 'u', 10, 0, etRADIX, 0, 0 }, - { 'x', 16, 0, etRADIX, 16, 1 }, - { 'X', 16, 0, etRADIX, 0, 4 }, -#ifndef SQLITE_OMIT_FLOATING_POINT - { 'f', 0, 1, etFLOAT, 0, 0 }, - { 'e', 0, 1, etEXP, 30, 0 }, - { 'E', 0, 1, etEXP, 14, 0 }, - { 'G', 0, 1, etGENERIC, 14, 0 }, -#endif - { 'i', 10, 1, etRADIX, 0, 0 }, - { 'n', 0, 0, etSIZE, 0, 0 }, - { '%', 0, 0, etPERCENT, 0, 0 }, - { 'p', 16, 0, etPOINTER, 0, 1 }, - -/* All the rest have the FLAG_INTERN bit set and are thus for internal -** use only */ - { 'T', 0, 2, etTOKEN, 0, 0 }, - { 'S', 0, 2, etSRCLIST, 0, 0 }, - { 'r', 10, 3, etORDINAL, 0, 0 }, -}; - -/* -** If SQLITE_OMIT_FLOATING_POINT is defined, then none of the floating point -** conversions will work. -*/ -#ifndef SQLITE_OMIT_FLOATING_POINT -/* -** "*val" is a double such that 0.1 <= *val < 10.0 -** Return the ascii code for the leading digit of *val, then -** multiply "*val" by 10.0 to renormalize. -** -** Example: -** input: *val = 3.14159 -** output: *val = 1.4159 function return = '3' -** -** The counter *cnt is incremented each time. After counter exceeds -** 16 (the number of significant digits in a 64-bit float) '0' is -** always returned. -*/ -static char et_getdigit(LONGDOUBLE_TYPE *val, int *cnt){ - int digit; - LONGDOUBLE_TYPE d; - if( (*cnt)<=0 ) return '0'; - (*cnt)--; - digit = (int)*val; - d = digit; - digit += '0'; - *val = (*val - d)*10.0; - return (char)digit; -} -#endif /* SQLITE_OMIT_FLOATING_POINT */ - -/* -** Set the StrAccum object to an error mode. -*/ -static void setStrAccumError(StrAccum *p, u8 eError){ - p->accError = eError; - p->nAlloc = 0; -} - -/* -** Extra argument values from a PrintfArguments object -*/ -static sqlite3_int64 getIntArg(PrintfArguments *p){ - if( p->nArg<=p->nUsed ) return 0; - return sqlite3_value_int64(p->apArg[p->nUsed++]); -} -static double getDoubleArg(PrintfArguments *p){ - if( p->nArg<=p->nUsed ) return 0.0; - return sqlite3_value_double(p->apArg[p->nUsed++]); -} -static char *getTextArg(PrintfArguments *p){ - if( p->nArg<=p->nUsed ) return 0; - return (char*)sqlite3_value_text(p->apArg[p->nUsed++]); -} - - -/* -** On machines with a small stack size, you can redefine the -** SQLITE_PRINT_BUF_SIZE to be something smaller, if desired. -*/ -#ifndef SQLITE_PRINT_BUF_SIZE -# define SQLITE_PRINT_BUF_SIZE 70 -#endif -#define etBUFSIZE SQLITE_PRINT_BUF_SIZE /* Size of the output buffer */ - -/* -** Render a string given by "fmt" into the StrAccum object. -*/ -SQLITE_PRIVATE void sqlite3VXPrintf( - StrAccum *pAccum, /* Accumulate results here */ - u32 bFlags, /* SQLITE_PRINTF_* flags */ - const char *fmt, /* Format string */ - va_list ap /* arguments */ -){ - int c; /* Next character in the format string */ - char *bufpt; /* Pointer to the conversion buffer */ - int precision; /* Precision of the current field */ - int length; /* Length of the field */ - int idx; /* A general purpose loop counter */ - int width; /* Width of the current field */ - etByte flag_leftjustify; /* True if "-" flag is present */ - etByte flag_plussign; /* True if "+" flag is present */ - etByte flag_blanksign; /* True if " " flag is present */ - etByte flag_alternateform; /* True if "#" flag is present */ - etByte flag_altform2; /* True if "!" flag is present */ - etByte flag_zeropad; /* True if field width constant starts with zero */ - etByte flag_long; /* True if "l" flag is present */ - etByte flag_longlong; /* True if the "ll" flag is present */ - etByte done; /* Loop termination flag */ - etByte xtype = 0; /* Conversion paradigm */ - u8 bArgList; /* True for SQLITE_PRINTF_SQLFUNC */ - u8 useIntern; /* Ok to use internal conversions (ex: %T) */ - char prefix; /* Prefix character. "+" or "-" or " " or '\0'. */ - sqlite_uint64 longvalue; /* Value for integer types */ - LONGDOUBLE_TYPE realvalue; /* Value for real types */ - const et_info *infop; /* Pointer to the appropriate info structure */ - char *zOut; /* Rendering buffer */ - int nOut; /* Size of the rendering buffer */ - char *zExtra; /* Malloced memory used by some conversion */ -#ifndef SQLITE_OMIT_FLOATING_POINT - int exp, e2; /* exponent of real numbers */ - int nsd; /* Number of significant digits returned */ - double rounder; /* Used for rounding floating point values */ - etByte flag_dp; /* True if decimal point should be shown */ - etByte flag_rtz; /* True if trailing zeros should be removed */ -#endif - PrintfArguments *pArgList = 0; /* Arguments for SQLITE_PRINTF_SQLFUNC */ - char buf[etBUFSIZE]; /* Conversion buffer */ - - bufpt = 0; - if( bFlags ){ - if( (bArgList = (bFlags & SQLITE_PRINTF_SQLFUNC))!=0 ){ - pArgList = va_arg(ap, PrintfArguments*); - } - useIntern = bFlags & SQLITE_PRINTF_INTERNAL; - }else{ - bArgList = useIntern = 0; - } - for(; (c=(*fmt))!=0; ++fmt){ - if( c!='%' ){ - bufpt = (char *)fmt; - while( (c=(*++fmt))!='%' && c!=0 ){}; - sqlite3StrAccumAppend(pAccum, bufpt, (int)(fmt - bufpt)); - if( c==0 ) break; - } - if( (c=(*++fmt))==0 ){ - sqlite3StrAccumAppend(pAccum, "%", 1); - break; - } - /* Find out what flags are present */ - flag_leftjustify = flag_plussign = flag_blanksign = - flag_alternateform = flag_altform2 = flag_zeropad = 0; - done = 0; - do{ - switch( c ){ - case '-': flag_leftjustify = 1; break; - case '+': flag_plussign = 1; break; - case ' ': flag_blanksign = 1; break; - case '#': flag_alternateform = 1; break; - case '!': flag_altform2 = 1; break; - case '0': flag_zeropad = 1; break; - default: done = 1; break; - } - }while( !done && (c=(*++fmt))!=0 ); - /* Get the field width */ - width = 0; - if( c=='*' ){ - if( bArgList ){ - width = (int)getIntArg(pArgList); - }else{ - width = va_arg(ap,int); - } - if( width<0 ){ - flag_leftjustify = 1; - width = -width; - } - c = *++fmt; - }else{ - while( c>='0' && c<='9' ){ - width = width*10 + c - '0'; - c = *++fmt; - } - } - /* Get the precision */ - if( c=='.' ){ - precision = 0; - c = *++fmt; - if( c=='*' ){ - if( bArgList ){ - precision = (int)getIntArg(pArgList); - }else{ - precision = va_arg(ap,int); - } - if( precision<0 ) precision = -precision; - c = *++fmt; - }else{ - while( c>='0' && c<='9' ){ - precision = precision*10 + c - '0'; - c = *++fmt; - } - } - }else{ - precision = -1; - } - /* Get the conversion type modifier */ - if( c=='l' ){ - flag_long = 1; - c = *++fmt; - if( c=='l' ){ - flag_longlong = 1; - c = *++fmt; - }else{ - flag_longlong = 0; - } - }else{ - flag_long = flag_longlong = 0; - } - /* Fetch the info entry for the field */ - infop = &fmtinfo[0]; - xtype = etINVALID; - for(idx=0; idxflags & FLAG_INTERN)==0 ){ - xtype = infop->type; - }else{ - return; - } - break; - } - } - zExtra = 0; - - /* - ** At this point, variables are initialized as follows: - ** - ** flag_alternateform TRUE if a '#' is present. - ** flag_altform2 TRUE if a '!' is present. - ** flag_plussign TRUE if a '+' is present. - ** flag_leftjustify TRUE if a '-' is present or if the - ** field width was negative. - ** flag_zeropad TRUE if the width began with 0. - ** flag_long TRUE if the letter 'l' (ell) prefixed - ** the conversion character. - ** flag_longlong TRUE if the letter 'll' (ell ell) prefixed - ** the conversion character. - ** flag_blanksign TRUE if a ' ' is present. - ** width The specified field width. This is - ** always non-negative. Zero is the default. - ** precision The specified precision. The default - ** is -1. - ** xtype The class of the conversion. - ** infop Pointer to the appropriate info struct. - */ - switch( xtype ){ - case etPOINTER: - flag_longlong = sizeof(char*)==sizeof(i64); - flag_long = sizeof(char*)==sizeof(long int); - /* Fall through into the next case */ - case etORDINAL: - case etRADIX: - if( infop->flags & FLAG_SIGNED ){ - i64 v; - if( bArgList ){ - v = getIntArg(pArgList); - }else if( flag_longlong ){ - v = va_arg(ap,i64); - }else if( flag_long ){ - v = va_arg(ap,long int); - }else{ - v = va_arg(ap,int); - } - if( v<0 ){ - if( v==SMALLEST_INT64 ){ - longvalue = ((u64)1)<<63; - }else{ - longvalue = -v; - } - prefix = '-'; - }else{ - longvalue = v; - if( flag_plussign ) prefix = '+'; - else if( flag_blanksign ) prefix = ' '; - else prefix = 0; - } - }else{ - if( bArgList ){ - longvalue = (u64)getIntArg(pArgList); - }else if( flag_longlong ){ - longvalue = va_arg(ap,u64); - }else if( flag_long ){ - longvalue = va_arg(ap,unsigned long int); - }else{ - longvalue = va_arg(ap,unsigned int); - } - prefix = 0; - } - if( longvalue==0 ) flag_alternateform = 0; - if( flag_zeropad && precision=4 || (longvalue/10)%10==1 ){ - x = 0; - } - *(--bufpt) = zOrd[x*2+1]; - *(--bufpt) = zOrd[x*2]; - } - { - const char *cset = &aDigits[infop->charset]; - u8 base = infop->base; - do{ /* Convert to ascii */ - *(--bufpt) = cset[longvalue%base]; - longvalue = longvalue/base; - }while( longvalue>0 ); - } - length = (int)(&zOut[nOut-1]-bufpt); - for(idx=precision-length; idx>0; idx--){ - *(--bufpt) = '0'; /* Zero pad */ - } - if( prefix ) *(--bufpt) = prefix; /* Add sign */ - if( flag_alternateform && infop->prefix ){ /* Add "0" or "0x" */ - const char *pre; - char x; - pre = &aPrefix[infop->prefix]; - for(; (x=(*pre))!=0; pre++) *(--bufpt) = x; - } - length = (int)(&zOut[nOut-1]-bufpt); - break; - case etFLOAT: - case etEXP: - case etGENERIC: - if( bArgList ){ - realvalue = getDoubleArg(pArgList); - }else{ - realvalue = va_arg(ap,double); - } -#ifdef SQLITE_OMIT_FLOATING_POINT - length = 0; -#else - if( precision<0 ) precision = 6; /* Set default precision */ - if( realvalue<0.0 ){ - realvalue = -realvalue; - prefix = '-'; - }else{ - if( flag_plussign ) prefix = '+'; - else if( flag_blanksign ) prefix = ' '; - else prefix = 0; - } - if( xtype==etGENERIC && precision>0 ) precision--; - for(idx=precision, rounder=0.5; idx>0; idx--, rounder*=0.1){} - if( xtype==etFLOAT ) realvalue += rounder; - /* Normalize realvalue to within 10.0 > realvalue >= 1.0 */ - exp = 0; - if( sqlite3IsNaN((double)realvalue) ){ - bufpt = "NaN"; - length = 3; - break; - } - if( realvalue>0.0 ){ - LONGDOUBLE_TYPE scale = 1.0; - while( realvalue>=1e100*scale && exp<=350 ){ scale *= 1e100;exp+=100;} - while( realvalue>=1e64*scale && exp<=350 ){ scale *= 1e64; exp+=64; } - while( realvalue>=1e8*scale && exp<=350 ){ scale *= 1e8; exp+=8; } - while( realvalue>=10.0*scale && exp<=350 ){ scale *= 10.0; exp++; } - realvalue /= scale; - while( realvalue<1e-8 ){ realvalue *= 1e8; exp-=8; } - while( realvalue<1.0 ){ realvalue *= 10.0; exp--; } - if( exp>350 ){ - if( prefix=='-' ){ - bufpt = "-Inf"; - }else if( prefix=='+' ){ - bufpt = "+Inf"; - }else{ - bufpt = "Inf"; - } - length = sqlite3Strlen30(bufpt); - break; - } - } - bufpt = buf; - /* - ** If the field type is etGENERIC, then convert to either etEXP - ** or etFLOAT, as appropriate. - */ - if( xtype!=etFLOAT ){ - realvalue += rounder; - if( realvalue>=10.0 ){ realvalue *= 0.1; exp++; } - } - if( xtype==etGENERIC ){ - flag_rtz = !flag_alternateform; - if( exp<-4 || exp>precision ){ - xtype = etEXP; - }else{ - precision = precision - exp; - xtype = etFLOAT; - } - }else{ - flag_rtz = flag_altform2; - } - if( xtype==etEXP ){ - e2 = 0; - }else{ - e2 = exp; - } - if( MAX(e2,0)+precision+width > etBUFSIZE - 15 ){ - bufpt = zExtra = sqlite3Malloc( MAX(e2,0)+precision+width+15 ); - if( bufpt==0 ){ - setStrAccumError(pAccum, STRACCUM_NOMEM); - return; - } - } - zOut = bufpt; - nsd = 16 + flag_altform2*10; - flag_dp = (precision>0 ?1:0) | flag_alternateform | flag_altform2; - /* The sign in front of the number */ - if( prefix ){ - *(bufpt++) = prefix; - } - /* Digits prior to the decimal point */ - if( e2<0 ){ - *(bufpt++) = '0'; - }else{ - for(; e2>=0; e2--){ - *(bufpt++) = et_getdigit(&realvalue,&nsd); - } - } - /* The decimal point */ - if( flag_dp ){ - *(bufpt++) = '.'; - } - /* "0" digits after the decimal point but before the first - ** significant digit of the number */ - for(e2++; e2<0; precision--, e2++){ - assert( precision>0 ); - *(bufpt++) = '0'; - } - /* Significant digits after the decimal point */ - while( (precision--)>0 ){ - *(bufpt++) = et_getdigit(&realvalue,&nsd); - } - /* Remove trailing zeros and the "." if no digits follow the "." */ - if( flag_rtz && flag_dp ){ - while( bufpt[-1]=='0' ) *(--bufpt) = 0; - assert( bufpt>zOut ); - if( bufpt[-1]=='.' ){ - if( flag_altform2 ){ - *(bufpt++) = '0'; - }else{ - *(--bufpt) = 0; - } - } - } - /* Add the "eNNN" suffix */ - if( xtype==etEXP ){ - *(bufpt++) = aDigits[infop->charset]; - if( exp<0 ){ - *(bufpt++) = '-'; exp = -exp; - }else{ - *(bufpt++) = '+'; - } - if( exp>=100 ){ - *(bufpt++) = (char)((exp/100)+'0'); /* 100's digit */ - exp %= 100; - } - *(bufpt++) = (char)(exp/10+'0'); /* 10's digit */ - *(bufpt++) = (char)(exp%10+'0'); /* 1's digit */ - } - *bufpt = 0; - - /* The converted number is in buf[] and zero terminated. Output it. - ** Note that the number is in the usual order, not reversed as with - ** integer conversions. */ - length = (int)(bufpt-zOut); - bufpt = zOut; - - /* Special case: Add leading zeros if the flag_zeropad flag is - ** set and we are not left justified */ - if( flag_zeropad && !flag_leftjustify && length < width){ - int i; - int nPad = width - length; - for(i=width; i>=nPad; i--){ - bufpt[i] = bufpt[i-nPad]; - } - i = prefix!=0; - while( nPad-- ) bufpt[i++] = '0'; - length = width; - } -#endif /* !defined(SQLITE_OMIT_FLOATING_POINT) */ - break; - case etSIZE: - if( !bArgList ){ - *(va_arg(ap,int*)) = pAccum->nChar; - } - length = width = 0; - break; - case etPERCENT: - buf[0] = '%'; - bufpt = buf; - length = 1; - break; - case etCHARX: - if( bArgList ){ - bufpt = getTextArg(pArgList); - c = bufpt ? bufpt[0] : 0; - }else{ - c = va_arg(ap,int); - } - buf[0] = (char)c; - if( precision>=0 ){ - for(idx=1; idx=0 ){ - for(length=0; lengthetBUFSIZE ){ - bufpt = zExtra = sqlite3Malloc( n ); - if( bufpt==0 ){ - setStrAccumError(pAccum, STRACCUM_NOMEM); - return; - } - }else{ - bufpt = buf; - } - j = 0; - if( needQuote ) bufpt[j++] = q; - k = i; - for(i=0; i=0 && precisionn ){ - sqlite3StrAccumAppend(pAccum, (const char*)pToken->z, pToken->n); - } - length = width = 0; - break; - } - case etSRCLIST: { - SrcList *pSrc = va_arg(ap, SrcList*); - int k = va_arg(ap, int); - struct SrcList_item *pItem = &pSrc->a[k]; - assert( bArgList==0 ); - assert( k>=0 && knSrc ); - if( pItem->zDatabase ){ - sqlite3StrAccumAppendAll(pAccum, pItem->zDatabase); - sqlite3StrAccumAppend(pAccum, ".", 1); - } - sqlite3StrAccumAppendAll(pAccum, pItem->zName); - length = width = 0; - break; - } - default: { - assert( xtype==etINVALID ); - return; - } - }/* End switch over the format type */ - /* - ** The text of the conversion is pointed to by "bufpt" and is - ** "length" characters long. The field width is "width". Do - ** the output. - */ - width -= length; - if( width>0 && !flag_leftjustify ) sqlite3AppendSpace(pAccum, width); - sqlite3StrAccumAppend(pAccum, bufpt, length); - if( width>0 && flag_leftjustify ) sqlite3AppendSpace(pAccum, width); - - if( zExtra ) sqlite3_free(zExtra); - }/* End for loop over the format string */ -} /* End of function */ - -/* -** Enlarge the memory allocation on a StrAccum object so that it is -** able to accept at least N more bytes of text. -** -** Return the number of bytes of text that StrAccum is able to accept -** after the attempted enlargement. The value returned might be zero. -*/ -static int sqlite3StrAccumEnlarge(StrAccum *p, int N){ - char *zNew; - assert( p->nChar+N >= p->nAlloc ); /* Only called if really needed */ - if( p->accError ){ - testcase(p->accError==STRACCUM_TOOBIG); - testcase(p->accError==STRACCUM_NOMEM); - return 0; - } - if( !p->useMalloc ){ - N = p->nAlloc - p->nChar - 1; - setStrAccumError(p, STRACCUM_TOOBIG); - return N; - }else{ - char *zOld = (p->zText==p->zBase ? 0 : p->zText); - i64 szNew = p->nChar; - szNew += N + 1; - if( szNew > p->mxAlloc ){ - sqlite3StrAccumReset(p); - setStrAccumError(p, STRACCUM_TOOBIG); - return 0; - }else{ - p->nAlloc = (int)szNew; - } - if( p->useMalloc==1 ){ - zNew = sqlite3DbRealloc(p->db, zOld, p->nAlloc); - }else{ - zNew = sqlite3_realloc(zOld, p->nAlloc); - } - if( zNew ){ - assert( p->zText!=0 || p->nChar==0 ); - if( zOld==0 && p->nChar>0 ) memcpy(zNew, p->zText, p->nChar); - p->zText = zNew; - }else{ - sqlite3StrAccumReset(p); - setStrAccumError(p, STRACCUM_NOMEM); - return 0; - } - } - return N; -} - -/* -** Append N space characters to the given string buffer. -*/ -SQLITE_PRIVATE void sqlite3AppendSpace(StrAccum *p, int N){ - if( p->nChar+N >= p->nAlloc && (N = sqlite3StrAccumEnlarge(p, N))<=0 ) return; - while( (N--)>0 ) p->zText[p->nChar++] = ' '; -} - -/* -** The StrAccum "p" is not large enough to accept N new bytes of z[]. -** So enlarge if first, then do the append. -** -** This is a helper routine to sqlite3StrAccumAppend() that does special-case -** work (enlarging the buffer) using tail recursion, so that the -** sqlite3StrAccumAppend() routine can use fast calling semantics. -*/ -static void enlargeAndAppend(StrAccum *p, const char *z, int N){ - N = sqlite3StrAccumEnlarge(p, N); - if( N>0 ){ - memcpy(&p->zText[p->nChar], z, N); - p->nChar += N; - } -} - -/* -** Append N bytes of text from z to the StrAccum object. Increase the -** size of the memory allocation for StrAccum if necessary. -*/ -SQLITE_PRIVATE void sqlite3StrAccumAppend(StrAccum *p, const char *z, int N){ - assert( z!=0 ); - assert( p->zText!=0 || p->nChar==0 || p->accError ); - assert( N>=0 ); - assert( p->accError==0 || p->nAlloc==0 ); - if( p->nChar+N >= p->nAlloc ){ - enlargeAndAppend(p,z,N); - return; - } - assert( p->zText ); - memcpy(&p->zText[p->nChar], z, N); - p->nChar += N; -} - -/* -** Append the complete text of zero-terminated string z[] to the p string. -*/ -SQLITE_PRIVATE void sqlite3StrAccumAppendAll(StrAccum *p, const char *z){ - sqlite3StrAccumAppend(p, z, sqlite3Strlen30(z)); -} - - -/* -** Finish off a string by making sure it is zero-terminated. -** Return a pointer to the resulting string. Return a NULL -** pointer if any kind of error was encountered. -*/ -SQLITE_PRIVATE char *sqlite3StrAccumFinish(StrAccum *p){ - if( p->zText ){ - p->zText[p->nChar] = 0; - if( p->useMalloc && p->zText==p->zBase ){ - if( p->useMalloc==1 ){ - p->zText = sqlite3DbMallocRaw(p->db, p->nChar+1 ); - }else{ - p->zText = sqlite3_malloc(p->nChar+1); - } - if( p->zText ){ - memcpy(p->zText, p->zBase, p->nChar+1); - }else{ - setStrAccumError(p, STRACCUM_NOMEM); - } - } - } - return p->zText; -} - -/* -** Reset an StrAccum string. Reclaim all malloced memory. -*/ -SQLITE_PRIVATE void sqlite3StrAccumReset(StrAccum *p){ - if( p->zText!=p->zBase ){ - if( p->useMalloc==1 ){ - sqlite3DbFree(p->db, p->zText); - }else{ - sqlite3_free(p->zText); - } - } - p->zText = 0; -} - -/* -** Initialize a string accumulator -*/ -SQLITE_PRIVATE void sqlite3StrAccumInit(StrAccum *p, char *zBase, int n, int mx){ - p->zText = p->zBase = zBase; - p->db = 0; - p->nChar = 0; - p->nAlloc = n; - p->mxAlloc = mx; - p->useMalloc = 1; - p->accError = 0; -} - -/* -** Print into memory obtained from sqliteMalloc(). Use the internal -** %-conversion extensions. -*/ -SQLITE_PRIVATE char *sqlite3VMPrintf(sqlite3 *db, const char *zFormat, va_list ap){ - char *z; - char zBase[SQLITE_PRINT_BUF_SIZE]; - StrAccum acc; - assert( db!=0 ); - sqlite3StrAccumInit(&acc, zBase, sizeof(zBase), - db->aLimit[SQLITE_LIMIT_LENGTH]); - acc.db = db; - sqlite3VXPrintf(&acc, SQLITE_PRINTF_INTERNAL, zFormat, ap); - z = sqlite3StrAccumFinish(&acc); - if( acc.accError==STRACCUM_NOMEM ){ - db->mallocFailed = 1; - } - return z; -} - -/* -** Print into memory obtained from sqliteMalloc(). Use the internal -** %-conversion extensions. -*/ -SQLITE_PRIVATE char *sqlite3MPrintf(sqlite3 *db, const char *zFormat, ...){ - va_list ap; - char *z; - va_start(ap, zFormat); - z = sqlite3VMPrintf(db, zFormat, ap); - va_end(ap); - return z; -} - -/* -** Like sqlite3MPrintf(), but call sqlite3DbFree() on zStr after formatting -** the string and before returnning. This routine is intended to be used -** to modify an existing string. For example: -** -** x = sqlite3MPrintf(db, x, "prefix %s suffix", x); -** -*/ -SQLITE_PRIVATE char *sqlite3MAppendf(sqlite3 *db, char *zStr, const char *zFormat, ...){ - va_list ap; - char *z; - va_start(ap, zFormat); - z = sqlite3VMPrintf(db, zFormat, ap); - va_end(ap); - sqlite3DbFree(db, zStr); - return z; -} - -/* -** Print into memory obtained from sqlite3_malloc(). Omit the internal -** %-conversion extensions. -*/ -SQLITE_API char *sqlite3_vmprintf(const char *zFormat, va_list ap){ - char *z; - char zBase[SQLITE_PRINT_BUF_SIZE]; - StrAccum acc; -#ifndef SQLITE_OMIT_AUTOINIT - if( sqlite3_initialize() ) return 0; -#endif - sqlite3StrAccumInit(&acc, zBase, sizeof(zBase), SQLITE_MAX_LENGTH); - acc.useMalloc = 2; - sqlite3VXPrintf(&acc, 0, zFormat, ap); - z = sqlite3StrAccumFinish(&acc); - return z; -} - -/* -** Print into memory obtained from sqlite3_malloc()(). Omit the internal -** %-conversion extensions. -*/ -SQLITE_API char *sqlite3_mprintf(const char *zFormat, ...){ - va_list ap; - char *z; -#ifndef SQLITE_OMIT_AUTOINIT - if( sqlite3_initialize() ) return 0; -#endif - va_start(ap, zFormat); - z = sqlite3_vmprintf(zFormat, ap); - va_end(ap); - return z; -} - -/* -** sqlite3_snprintf() works like snprintf() except that it ignores the -** current locale settings. This is important for SQLite because we -** are not able to use a "," as the decimal point in place of "." as -** specified by some locales. -** -** Oops: The first two arguments of sqlite3_snprintf() are backwards -** from the snprintf() standard. Unfortunately, it is too late to change -** this without breaking compatibility, so we just have to live with the -** mistake. -** -** sqlite3_vsnprintf() is the varargs version. -*/ -SQLITE_API char *sqlite3_vsnprintf(int n, char *zBuf, const char *zFormat, va_list ap){ - StrAccum acc; - if( n<=0 ) return zBuf; - sqlite3StrAccumInit(&acc, zBuf, n, 0); - acc.useMalloc = 0; - sqlite3VXPrintf(&acc, 0, zFormat, ap); - return sqlite3StrAccumFinish(&acc); -} -SQLITE_API char *sqlite3_snprintf(int n, char *zBuf, const char *zFormat, ...){ - char *z; - va_list ap; - va_start(ap,zFormat); - z = sqlite3_vsnprintf(n, zBuf, zFormat, ap); - va_end(ap); - return z; -} - -/* -** This is the routine that actually formats the sqlite3_log() message. -** We house it in a separate routine from sqlite3_log() to avoid using -** stack space on small-stack systems when logging is disabled. -** -** sqlite3_log() must render into a static buffer. It cannot dynamically -** allocate memory because it might be called while the memory allocator -** mutex is held. -*/ -static void renderLogMsg(int iErrCode, const char *zFormat, va_list ap){ - StrAccum acc; /* String accumulator */ - char zMsg[SQLITE_PRINT_BUF_SIZE*3]; /* Complete log message */ - - sqlite3StrAccumInit(&acc, zMsg, sizeof(zMsg), 0); - acc.useMalloc = 0; - sqlite3VXPrintf(&acc, 0, zFormat, ap); - sqlite3GlobalConfig.xLog(sqlite3GlobalConfig.pLogArg, iErrCode, - sqlite3StrAccumFinish(&acc)); -} - -/* -** Format and write a message to the log if logging is enabled. -*/ -SQLITE_API void sqlite3_log(int iErrCode, const char *zFormat, ...){ - va_list ap; /* Vararg list */ - if( sqlite3GlobalConfig.xLog ){ - va_start(ap, zFormat); - renderLogMsg(iErrCode, zFormat, ap); - va_end(ap); - } -} - -#if defined(SQLITE_DEBUG) -/* -** A version of printf() that understands %lld. Used for debugging. -** The printf() built into some versions of windows does not understand %lld -** and segfaults if you give it a long long int. -*/ -SQLITE_PRIVATE void sqlite3DebugPrintf(const char *zFormat, ...){ - va_list ap; - StrAccum acc; - char zBuf[500]; - sqlite3StrAccumInit(&acc, zBuf, sizeof(zBuf), 0); - acc.useMalloc = 0; - va_start(ap,zFormat); - sqlite3VXPrintf(&acc, 0, zFormat, ap); - va_end(ap); - sqlite3StrAccumFinish(&acc); - fprintf(stdout,"%s", zBuf); - fflush(stdout); -} -#endif - -/* -** variable-argument wrapper around sqlite3VXPrintf(). -*/ -SQLITE_PRIVATE void sqlite3XPrintf(StrAccum *p, u32 bFlags, const char *zFormat, ...){ - va_list ap; - va_start(ap,zFormat); - sqlite3VXPrintf(p, bFlags, zFormat, ap); - va_end(ap); -} - -/************** End of printf.c **********************************************/ -/************** Begin file random.c ******************************************/ -/* -** 2001 September 15 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** This file contains code to implement a pseudo-random number -** generator (PRNG) for SQLite. -** -** Random numbers are used by some of the database backends in order -** to generate random integer keys for tables or random filenames. -*/ - - -/* All threads share a single random number generator. -** This structure is the current state of the generator. -*/ -static SQLITE_WSD struct sqlite3PrngType { - unsigned char isInit; /* True if initialized */ - unsigned char i, j; /* State variables */ - unsigned char s[256]; /* State variables */ -} sqlite3Prng; - -/* -** Return N random bytes. -*/ -SQLITE_API void sqlite3_randomness(int N, void *pBuf){ - unsigned char t; - unsigned char *zBuf = pBuf; - - /* The "wsdPrng" macro will resolve to the pseudo-random number generator - ** state vector. If writable static data is unsupported on the target, - ** we have to locate the state vector at run-time. In the more common - ** case where writable static data is supported, wsdPrng can refer directly - ** to the "sqlite3Prng" state vector declared above. - */ -#ifdef SQLITE_OMIT_WSD - struct sqlite3PrngType *p = &GLOBAL(struct sqlite3PrngType, sqlite3Prng); -# define wsdPrng p[0] -#else -# define wsdPrng sqlite3Prng -#endif - -#if SQLITE_THREADSAFE - sqlite3_mutex *mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_PRNG); - sqlite3_mutex_enter(mutex); -#endif - - if( N<=0 ){ - wsdPrng.isInit = 0; - sqlite3_mutex_leave(mutex); - return; - } - - /* Initialize the state of the random number generator once, - ** the first time this routine is called. The seed value does - ** not need to contain a lot of randomness since we are not - ** trying to do secure encryption or anything like that... - ** - ** Nothing in this file or anywhere else in SQLite does any kind of - ** encryption. The RC4 algorithm is being used as a PRNG (pseudo-random - ** number generator) not as an encryption device. - */ - if( !wsdPrng.isInit ){ - int i; - char k[256]; - wsdPrng.j = 0; - wsdPrng.i = 0; - sqlite3OsRandomness(sqlite3_vfs_find(0), 256, k); - for(i=0; i<256; i++){ - wsdPrng.s[i] = (u8)i; - } - for(i=0; i<256; i++){ - wsdPrng.j += wsdPrng.s[i] + k[i]; - t = wsdPrng.s[wsdPrng.j]; - wsdPrng.s[wsdPrng.j] = wsdPrng.s[i]; - wsdPrng.s[i] = t; - } - wsdPrng.isInit = 1; - } - - assert( N>0 ); - do{ - wsdPrng.i++; - t = wsdPrng.s[wsdPrng.i]; - wsdPrng.j += t; - wsdPrng.s[wsdPrng.i] = wsdPrng.s[wsdPrng.j]; - wsdPrng.s[wsdPrng.j] = t; - t += wsdPrng.s[wsdPrng.i]; - *(zBuf++) = wsdPrng.s[t]; - }while( --N ); - sqlite3_mutex_leave(mutex); -} - -#ifndef SQLITE_OMIT_BUILTIN_TEST -/* -** For testing purposes, we sometimes want to preserve the state of -** PRNG and restore the PRNG to its saved state at a later time, or -** to reset the PRNG to its initial state. These routines accomplish -** those tasks. -** -** The sqlite3_test_control() interface calls these routines to -** control the PRNG. -*/ -static SQLITE_WSD struct sqlite3PrngType sqlite3SavedPrng; -SQLITE_PRIVATE void sqlite3PrngSaveState(void){ - memcpy( - &GLOBAL(struct sqlite3PrngType, sqlite3SavedPrng), - &GLOBAL(struct sqlite3PrngType, sqlite3Prng), - sizeof(sqlite3Prng) - ); -} -SQLITE_PRIVATE void sqlite3PrngRestoreState(void){ - memcpy( - &GLOBAL(struct sqlite3PrngType, sqlite3Prng), - &GLOBAL(struct sqlite3PrngType, sqlite3SavedPrng), - sizeof(sqlite3Prng) - ); -} -#endif /* SQLITE_OMIT_BUILTIN_TEST */ - -/************** End of random.c **********************************************/ -/************** Begin file utf.c *********************************************/ -/* -** 2004 April 13 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** This file contains routines used to translate between UTF-8, -** UTF-16, UTF-16BE, and UTF-16LE. -** -** Notes on UTF-8: -** -** Byte-0 Byte-1 Byte-2 Byte-3 Value -** 0xxxxxxx 00000000 00000000 0xxxxxxx -** 110yyyyy 10xxxxxx 00000000 00000yyy yyxxxxxx -** 1110zzzz 10yyyyyy 10xxxxxx 00000000 zzzzyyyy yyxxxxxx -** 11110uuu 10uuzzzz 10yyyyyy 10xxxxxx 000uuuuu zzzzyyyy yyxxxxxx -** -** -** Notes on UTF-16: (with wwww+1==uuuuu) -** -** Word-0 Word-1 Value -** 110110ww wwzzzzyy 110111yy yyxxxxxx 000uuuuu zzzzyyyy yyxxxxxx -** zzzzyyyy yyxxxxxx 00000000 zzzzyyyy yyxxxxxx -** -** -** BOM or Byte Order Mark: -** 0xff 0xfe little-endian utf-16 follows -** 0xfe 0xff big-endian utf-16 follows -** -*/ -/* #include */ - -#ifndef SQLITE_AMALGAMATION -/* -** The following constant value is used by the SQLITE_BIGENDIAN and -** SQLITE_LITTLEENDIAN macros. -*/ -SQLITE_PRIVATE const int sqlite3one = 1; -#endif /* SQLITE_AMALGAMATION */ - -/* -** This lookup table is used to help decode the first byte of -** a multi-byte UTF8 character. -*/ -static const unsigned char sqlite3Utf8Trans1[] = { - 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, - 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, - 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, - 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, - 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, - 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, - 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, - 0x00, 0x01, 0x02, 0x03, 0x00, 0x01, 0x00, 0x00, -}; - - -#define WRITE_UTF8(zOut, c) { \ - if( c<0x00080 ){ \ - *zOut++ = (u8)(c&0xFF); \ - } \ - else if( c<0x00800 ){ \ - *zOut++ = 0xC0 + (u8)((c>>6)&0x1F); \ - *zOut++ = 0x80 + (u8)(c & 0x3F); \ - } \ - else if( c<0x10000 ){ \ - *zOut++ = 0xE0 + (u8)((c>>12)&0x0F); \ - *zOut++ = 0x80 + (u8)((c>>6) & 0x3F); \ - *zOut++ = 0x80 + (u8)(c & 0x3F); \ - }else{ \ - *zOut++ = 0xF0 + (u8)((c>>18) & 0x07); \ - *zOut++ = 0x80 + (u8)((c>>12) & 0x3F); \ - *zOut++ = 0x80 + (u8)((c>>6) & 0x3F); \ - *zOut++ = 0x80 + (u8)(c & 0x3F); \ - } \ -} - -#define WRITE_UTF16LE(zOut, c) { \ - if( c<=0xFFFF ){ \ - *zOut++ = (u8)(c&0x00FF); \ - *zOut++ = (u8)((c>>8)&0x00FF); \ - }else{ \ - *zOut++ = (u8)(((c>>10)&0x003F) + (((c-0x10000)>>10)&0x00C0)); \ - *zOut++ = (u8)(0x00D8 + (((c-0x10000)>>18)&0x03)); \ - *zOut++ = (u8)(c&0x00FF); \ - *zOut++ = (u8)(0x00DC + ((c>>8)&0x03)); \ - } \ -} - -#define WRITE_UTF16BE(zOut, c) { \ - if( c<=0xFFFF ){ \ - *zOut++ = (u8)((c>>8)&0x00FF); \ - *zOut++ = (u8)(c&0x00FF); \ - }else{ \ - *zOut++ = (u8)(0x00D8 + (((c-0x10000)>>18)&0x03)); \ - *zOut++ = (u8)(((c>>10)&0x003F) + (((c-0x10000)>>10)&0x00C0)); \ - *zOut++ = (u8)(0x00DC + ((c>>8)&0x03)); \ - *zOut++ = (u8)(c&0x00FF); \ - } \ -} - -#define READ_UTF16LE(zIn, TERM, c){ \ - c = (*zIn++); \ - c += ((*zIn++)<<8); \ - if( c>=0xD800 && c<0xE000 && TERM ){ \ - int c2 = (*zIn++); \ - c2 += ((*zIn++)<<8); \ - c = (c2&0x03FF) + ((c&0x003F)<<10) + (((c&0x03C0)+0x0040)<<10); \ - } \ -} - -#define READ_UTF16BE(zIn, TERM, c){ \ - c = ((*zIn++)<<8); \ - c += (*zIn++); \ - if( c>=0xD800 && c<0xE000 && TERM ){ \ - int c2 = ((*zIn++)<<8); \ - c2 += (*zIn++); \ - c = (c2&0x03FF) + ((c&0x003F)<<10) + (((c&0x03C0)+0x0040)<<10); \ - } \ -} - -/* -** Translate a single UTF-8 character. Return the unicode value. -** -** During translation, assume that the byte that zTerm points -** is a 0x00. -** -** Write a pointer to the next unread byte back into *pzNext. -** -** Notes On Invalid UTF-8: -** -** * This routine never allows a 7-bit character (0x00 through 0x7f) to -** be encoded as a multi-byte character. Any multi-byte character that -** attempts to encode a value between 0x00 and 0x7f is rendered as 0xfffd. -** -** * This routine never allows a UTF16 surrogate value to be encoded. -** If a multi-byte character attempts to encode a value between -** 0xd800 and 0xe000 then it is rendered as 0xfffd. -** -** * Bytes in the range of 0x80 through 0xbf which occur as the first -** byte of a character are interpreted as single-byte characters -** and rendered as themselves even though they are technically -** invalid characters. -** -** * This routine accepts an infinite number of different UTF8 encodings -** for unicode values 0x80 and greater. It do not change over-length -** encodings to 0xfffd as some systems recommend. -*/ -#define READ_UTF8(zIn, zTerm, c) \ - c = *(zIn++); \ - if( c>=0xc0 ){ \ - c = sqlite3Utf8Trans1[c-0xc0]; \ - while( zIn!=zTerm && (*zIn & 0xc0)==0x80 ){ \ - c = (c<<6) + (0x3f & *(zIn++)); \ - } \ - if( c<0x80 \ - || (c&0xFFFFF800)==0xD800 \ - || (c&0xFFFFFFFE)==0xFFFE ){ c = 0xFFFD; } \ - } -SQLITE_PRIVATE u32 sqlite3Utf8Read( - const unsigned char **pz /* Pointer to string from which to read char */ -){ - unsigned int c; - - /* Same as READ_UTF8() above but without the zTerm parameter. - ** For this routine, we assume the UTF8 string is always zero-terminated. - */ - c = *((*pz)++); - if( c>=0xc0 ){ - c = sqlite3Utf8Trans1[c-0xc0]; - while( (*(*pz) & 0xc0)==0x80 ){ - c = (c<<6) + (0x3f & *((*pz)++)); - } - if( c<0x80 - || (c&0xFFFFF800)==0xD800 - || (c&0xFFFFFFFE)==0xFFFE ){ c = 0xFFFD; } - } - return c; -} - - - - -/* -** If the TRANSLATE_TRACE macro is defined, the value of each Mem is -** printed on stderr on the way into and out of sqlite3VdbeMemTranslate(). -*/ -/* #define TRANSLATE_TRACE 1 */ - -#ifndef SQLITE_OMIT_UTF16 -/* -** This routine transforms the internal text encoding used by pMem to -** desiredEnc. It is an error if the string is already of the desired -** encoding, or if *pMem does not contain a string value. -*/ -SQLITE_PRIVATE int sqlite3VdbeMemTranslate(Mem *pMem, u8 desiredEnc){ - int len; /* Maximum length of output string in bytes */ - unsigned char *zOut; /* Output buffer */ - unsigned char *zIn; /* Input iterator */ - unsigned char *zTerm; /* End of input */ - unsigned char *z; /* Output iterator */ - unsigned int c; - - assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) ); - assert( pMem->flags&MEM_Str ); - assert( pMem->enc!=desiredEnc ); - assert( pMem->enc!=0 ); - assert( pMem->n>=0 ); - -#if defined(TRANSLATE_TRACE) && defined(SQLITE_DEBUG) - { - char zBuf[100]; - sqlite3VdbeMemPrettyPrint(pMem, zBuf); - fprintf(stderr, "INPUT: %s\n", zBuf); - } -#endif - - /* If the translation is between UTF-16 little and big endian, then - ** all that is required is to swap the byte order. This case is handled - ** differently from the others. - */ - if( pMem->enc!=SQLITE_UTF8 && desiredEnc!=SQLITE_UTF8 ){ - u8 temp; - int rc; - rc = sqlite3VdbeMemMakeWriteable(pMem); - if( rc!=SQLITE_OK ){ - assert( rc==SQLITE_NOMEM ); - return SQLITE_NOMEM; - } - zIn = (u8*)pMem->z; - zTerm = &zIn[pMem->n&~1]; - while( zInenc = desiredEnc; - goto translate_out; - } - - /* Set len to the maximum number of bytes required in the output buffer. */ - if( desiredEnc==SQLITE_UTF8 ){ - /* When converting from UTF-16, the maximum growth results from - ** translating a 2-byte character to a 4-byte UTF-8 character. - ** A single byte is required for the output string - ** nul-terminator. - */ - pMem->n &= ~1; - len = pMem->n * 2 + 1; - }else{ - /* When converting from UTF-8 to UTF-16 the maximum growth is caused - ** when a 1-byte UTF-8 character is translated into a 2-byte UTF-16 - ** character. Two bytes are required in the output buffer for the - ** nul-terminator. - */ - len = pMem->n * 2 + 2; - } - - /* Set zIn to point at the start of the input buffer and zTerm to point 1 - ** byte past the end. - ** - ** Variable zOut is set to point at the output buffer, space obtained - ** from sqlite3_malloc(). - */ - zIn = (u8*)pMem->z; - zTerm = &zIn[pMem->n]; - zOut = sqlite3DbMallocRaw(pMem->db, len); - if( !zOut ){ - return SQLITE_NOMEM; - } - z = zOut; - - if( pMem->enc==SQLITE_UTF8 ){ - if( desiredEnc==SQLITE_UTF16LE ){ - /* UTF-8 -> UTF-16 Little-endian */ - while( zIn UTF-16 Big-endian */ - while( zInn = (int)(z - zOut); - *z++ = 0; - }else{ - assert( desiredEnc==SQLITE_UTF8 ); - if( pMem->enc==SQLITE_UTF16LE ){ - /* UTF-16 Little-endian -> UTF-8 */ - while( zIn UTF-8 */ - while( zInn = (int)(z - zOut); - } - *z = 0; - assert( (pMem->n+(desiredEnc==SQLITE_UTF8?1:2))<=len ); - - sqlite3VdbeMemRelease(pMem); - pMem->flags &= ~(MEM_Static|MEM_Dyn|MEM_Ephem); - pMem->enc = desiredEnc; - pMem->flags |= (MEM_Term); - pMem->z = (char*)zOut; - pMem->zMalloc = pMem->z; - -translate_out: -#if defined(TRANSLATE_TRACE) && defined(SQLITE_DEBUG) - { - char zBuf[100]; - sqlite3VdbeMemPrettyPrint(pMem, zBuf); - fprintf(stderr, "OUTPUT: %s\n", zBuf); - } -#endif - return SQLITE_OK; -} - -/* -** This routine checks for a byte-order mark at the beginning of the -** UTF-16 string stored in *pMem. If one is present, it is removed and -** the encoding of the Mem adjusted. This routine does not do any -** byte-swapping, it just sets Mem.enc appropriately. -** -** The allocation (static, dynamic etc.) and encoding of the Mem may be -** changed by this function. -*/ -SQLITE_PRIVATE int sqlite3VdbeMemHandleBom(Mem *pMem){ - int rc = SQLITE_OK; - u8 bom = 0; - - assert( pMem->n>=0 ); - if( pMem->n>1 ){ - u8 b1 = *(u8 *)pMem->z; - u8 b2 = *(((u8 *)pMem->z) + 1); - if( b1==0xFE && b2==0xFF ){ - bom = SQLITE_UTF16BE; - } - if( b1==0xFF && b2==0xFE ){ - bom = SQLITE_UTF16LE; - } - } - - if( bom ){ - rc = sqlite3VdbeMemMakeWriteable(pMem); - if( rc==SQLITE_OK ){ - pMem->n -= 2; - memmove(pMem->z, &pMem->z[2], pMem->n); - pMem->z[pMem->n] = '\0'; - pMem->z[pMem->n+1] = '\0'; - pMem->flags |= MEM_Term; - pMem->enc = bom; - } - } - return rc; -} -#endif /* SQLITE_OMIT_UTF16 */ - -/* -** pZ is a UTF-8 encoded unicode string. If nByte is less than zero, -** return the number of unicode characters in pZ up to (but not including) -** the first 0x00 byte. If nByte is not less than zero, return the -** number of unicode characters in the first nByte of pZ (or up to -** the first 0x00, whichever comes first). -*/ -SQLITE_PRIVATE int sqlite3Utf8CharLen(const char *zIn, int nByte){ - int r = 0; - const u8 *z = (const u8*)zIn; - const u8 *zTerm; - if( nByte>=0 ){ - zTerm = &z[nByte]; - }else{ - zTerm = (const u8*)(-1); - } - assert( z<=zTerm ); - while( *z!=0 && zmallocFailed ){ - sqlite3VdbeMemRelease(&m); - m.z = 0; - } - assert( (m.flags & MEM_Term)!=0 || db->mallocFailed ); - assert( (m.flags & MEM_Str)!=0 || db->mallocFailed ); - assert( m.z || db->mallocFailed ); - return m.z; -} - -/* -** zIn is a UTF-16 encoded unicode string at least nChar characters long. -** Return the number of bytes in the first nChar unicode characters -** in pZ. nChar must be non-negative. -*/ -SQLITE_PRIVATE int sqlite3Utf16ByteLen(const void *zIn, int nChar){ - int c; - unsigned char const *z = zIn; - int n = 0; - - if( SQLITE_UTF16NATIVE==SQLITE_UTF16BE ){ - while( n0 && n<=4 ); - z[0] = 0; - z = zBuf; - c = sqlite3Utf8Read((const u8**)&z); - t = i; - if( i>=0xD800 && i<=0xDFFF ) t = 0xFFFD; - if( (i&0xFFFFFFFE)==0xFFFE ) t = 0xFFFD; - assert( c==t ); - assert( (z-zBuf)==n ); - } - for(i=0; i<0x00110000; i++){ - if( i>=0xD800 && i<0xE000 ) continue; - z = zBuf; - WRITE_UTF16LE(z, i); - n = (int)(z-zBuf); - assert( n>0 && n<=4 ); - z[0] = 0; - z = zBuf; - READ_UTF16LE(z, 1, c); - assert( c==i ); - assert( (z-zBuf)==n ); - } - for(i=0; i<0x00110000; i++){ - if( i>=0xD800 && i<0xE000 ) continue; - z = zBuf; - WRITE_UTF16BE(z, i); - n = (int)(z-zBuf); - assert( n>0 && n<=4 ); - z[0] = 0; - z = zBuf; - READ_UTF16BE(z, 1, c); - assert( c==i ); - assert( (z-zBuf)==n ); - } -} -#endif /* SQLITE_TEST */ -#endif /* SQLITE_OMIT_UTF16 */ - -/************** End of utf.c *************************************************/ -/************** Begin file util.c ********************************************/ -/* -** 2001 September 15 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** Utility functions used throughout sqlite. -** -** This file contains functions for allocating memory, comparing -** strings, and stuff like that. -** -*/ -/* #include */ -#ifdef SQLITE_HAVE_ISNAN -# include -#endif - -/* -** Routine needed to support the testcase() macro. -*/ -#ifdef SQLITE_COVERAGE_TEST -SQLITE_PRIVATE void sqlite3Coverage(int x){ - static unsigned dummy = 0; - dummy += (unsigned)x; -} -#endif - -/* -** Give a callback to the test harness that can be used to simulate faults -** in places where it is difficult or expensive to do so purely by means -** of inputs. -** -** The intent of the integer argument is to let the fault simulator know -** which of multiple sqlite3FaultSim() calls has been hit. -** -** Return whatever integer value the test callback returns, or return -** SQLITE_OK if no test callback is installed. -*/ -#ifndef SQLITE_OMIT_BUILTIN_TEST -SQLITE_PRIVATE int sqlite3FaultSim(int iTest){ - int (*xCallback)(int) = sqlite3GlobalConfig.xTestCallback; - return xCallback ? xCallback(iTest) : SQLITE_OK; -} -#endif - -#ifndef SQLITE_OMIT_FLOATING_POINT -/* -** Return true if the floating point value is Not a Number (NaN). -** -** Use the math library isnan() function if compiled with SQLITE_HAVE_ISNAN. -** Otherwise, we have our own implementation that works on most systems. -*/ -SQLITE_PRIVATE int sqlite3IsNaN(double x){ - int rc; /* The value return */ -#if !defined(SQLITE_HAVE_ISNAN) - /* - ** Systems that support the isnan() library function should probably - ** make use of it by compiling with -DSQLITE_HAVE_ISNAN. But we have - ** found that many systems do not have a working isnan() function so - ** this implementation is provided as an alternative. - ** - ** This NaN test sometimes fails if compiled on GCC with -ffast-math. - ** On the other hand, the use of -ffast-math comes with the following - ** warning: - ** - ** This option [-ffast-math] should never be turned on by any - ** -O option since it can result in incorrect output for programs - ** which depend on an exact implementation of IEEE or ISO - ** rules/specifications for math functions. - ** - ** Under MSVC, this NaN test may fail if compiled with a floating- - ** point precision mode other than /fp:precise. From the MSDN - ** documentation: - ** - ** The compiler [with /fp:precise] will properly handle comparisons - ** involving NaN. For example, x != x evaluates to true if x is NaN - ** ... - */ -#ifdef __FAST_MATH__ -# error SQLite will not work correctly with the -ffast-math option of GCC. -#endif - volatile double y = x; - volatile double z = y; - rc = (y!=z); -#else /* if defined(SQLITE_HAVE_ISNAN) */ - rc = isnan(x); -#endif /* SQLITE_HAVE_ISNAN */ - testcase( rc ); - return rc; -} -#endif /* SQLITE_OMIT_FLOATING_POINT */ - -/* -** Compute a string length that is limited to what can be stored in -** lower 30 bits of a 32-bit signed integer. -** -** The value returned will never be negative. Nor will it ever be greater -** than the actual length of the string. For very long strings (greater -** than 1GiB) the value returned might be less than the true string length. -*/ -SQLITE_PRIVATE int sqlite3Strlen30(const char *z){ - const char *z2 = z; - if( z==0 ) return 0; - while( *z2 ){ z2++; } - return 0x3fffffff & (int)(z2 - z); -} - -/* -** Set the most recent error code and error string for the sqlite -** handle "db". The error code is set to "err_code". -** -** If it is not NULL, string zFormat specifies the format of the -** error string in the style of the printf functions: The following -** format characters are allowed: -** -** %s Insert a string -** %z A string that should be freed after use -** %d Insert an integer -** %T Insert a token -** %S Insert the first element of a SrcList -** -** zFormat and any string tokens that follow it are assumed to be -** encoded in UTF-8. -** -** To clear the most recent error for sqlite handle "db", sqlite3Error -** should be called with err_code set to SQLITE_OK and zFormat set -** to NULL. -*/ -SQLITE_PRIVATE void sqlite3Error(sqlite3 *db, int err_code, const char *zFormat, ...){ - assert( db!=0 ); - db->errCode = err_code; - if( zFormat && (db->pErr || (db->pErr = sqlite3ValueNew(db))!=0) ){ - char *z; - va_list ap; - va_start(ap, zFormat); - z = sqlite3VMPrintf(db, zFormat, ap); - va_end(ap); - sqlite3ValueSetStr(db->pErr, -1, z, SQLITE_UTF8, SQLITE_DYNAMIC); - }else if( db->pErr ){ - sqlite3ValueSetNull(db->pErr); - } -} - -/* -** Add an error message to pParse->zErrMsg and increment pParse->nErr. -** The following formatting characters are allowed: -** -** %s Insert a string -** %z A string that should be freed after use -** %d Insert an integer -** %T Insert a token -** %S Insert the first element of a SrcList -** -** This function should be used to report any error that occurs whilst -** compiling an SQL statement (i.e. within sqlite3_prepare()). The -** last thing the sqlite3_prepare() function does is copy the error -** stored by this function into the database handle using sqlite3Error(). -** Function sqlite3Error() should be used during statement execution -** (sqlite3_step() etc.). -*/ -SQLITE_PRIVATE void sqlite3ErrorMsg(Parse *pParse, const char *zFormat, ...){ - char *zMsg; - va_list ap; - sqlite3 *db = pParse->db; - va_start(ap, zFormat); - zMsg = sqlite3VMPrintf(db, zFormat, ap); - va_end(ap); - if( db->suppressErr ){ - sqlite3DbFree(db, zMsg); - }else{ - pParse->nErr++; - sqlite3DbFree(db, pParse->zErrMsg); - pParse->zErrMsg = zMsg; - pParse->rc = SQLITE_ERROR; - } -} - -/* -** Convert an SQL-style quoted string into a normal string by removing -** the quote characters. The conversion is done in-place. If the -** input does not begin with a quote character, then this routine -** is a no-op. -** -** The input string must be zero-terminated. A new zero-terminator -** is added to the dequoted string. -** -** The return value is -1 if no dequoting occurs or the length of the -** dequoted string, exclusive of the zero terminator, if dequoting does -** occur. -** -** 2002-Feb-14: This routine is extended to remove MS-Access style -** brackets from around identifers. For example: "[a-b-c]" becomes -** "a-b-c". -*/ -SQLITE_PRIVATE int sqlite3Dequote(char *z){ - char quote; - int i, j; - if( z==0 ) return -1; - quote = z[0]; - switch( quote ){ - case '\'': break; - case '"': break; - case '`': break; /* For MySQL compatibility */ - case '[': quote = ']'; break; /* For MS SqlServer compatibility */ - default: return -1; - } - for(i=1, j=0;; i++){ - assert( z[i] ); - if( z[i]==quote ){ - if( z[i+1]==quote ){ - z[j++] = quote; - i++; - }else{ - break; - } - }else{ - z[j++] = z[i]; - } - } - z[j] = 0; - return j; -} - -/* Convenient short-hand */ -#define UpperToLower sqlite3UpperToLower - -/* -** Some systems have stricmp(). Others have strcasecmp(). Because -** there is no consistency, we will define our own. -** -** IMPLEMENTATION-OF: R-30243-02494 The sqlite3_stricmp() and -** sqlite3_strnicmp() APIs allow applications and extensions to compare -** the contents of two buffers containing UTF-8 strings in a -** case-independent fashion, using the same definition of "case -** independence" that SQLite uses internally when comparing identifiers. -*/ -SQLITE_API int sqlite3_stricmp(const char *zLeft, const char *zRight){ - register unsigned char *a, *b; - a = (unsigned char *)zLeft; - b = (unsigned char *)zRight; - while( *a!=0 && UpperToLower[*a]==UpperToLower[*b]){ a++; b++; } - return UpperToLower[*a] - UpperToLower[*b]; -} -SQLITE_API int sqlite3_strnicmp(const char *zLeft, const char *zRight, int N){ - register unsigned char *a, *b; - a = (unsigned char *)zLeft; - b = (unsigned char *)zRight; - while( N-- > 0 && *a!=0 && UpperToLower[*a]==UpperToLower[*b]){ a++; b++; } - return N<0 ? 0 : UpperToLower[*a] - UpperToLower[*b]; -} - -/* -** The string z[] is an text representation of a real number. -** Convert this string to a double and write it into *pResult. -** -** The string z[] is length bytes in length (bytes, not characters) and -** uses the encoding enc. The string is not necessarily zero-terminated. -** -** Return TRUE if the result is a valid real number (or integer) and FALSE -** if the string is empty or contains extraneous text. Valid numbers -** are in one of these formats: -** -** [+-]digits[E[+-]digits] -** [+-]digits.[digits][E[+-]digits] -** [+-].digits[E[+-]digits] -** -** Leading and trailing whitespace is ignored for the purpose of determining -** validity. -** -** If some prefix of the input string is a valid number, this routine -** returns FALSE but it still converts the prefix and writes the result -** into *pResult. -*/ -SQLITE_PRIVATE int sqlite3AtoF(const char *z, double *pResult, int length, u8 enc){ -#ifndef SQLITE_OMIT_FLOATING_POINT - int incr; - const char *zEnd = z + length; - /* sign * significand * (10 ^ (esign * exponent)) */ - int sign = 1; /* sign of significand */ - i64 s = 0; /* significand */ - int d = 0; /* adjust exponent for shifting decimal point */ - int esign = 1; /* sign of exponent */ - int e = 0; /* exponent */ - int eValid = 1; /* True exponent is either not used or is well-formed */ - double result; - int nDigits = 0; - int nonNum = 0; - - assert( enc==SQLITE_UTF8 || enc==SQLITE_UTF16LE || enc==SQLITE_UTF16BE ); - *pResult = 0.0; /* Default return value, in case of an error */ - - if( enc==SQLITE_UTF8 ){ - incr = 1; - }else{ - int i; - incr = 2; - assert( SQLITE_UTF16LE==2 && SQLITE_UTF16BE==3 ); - for(i=3-enc; i=zEnd ) return 0; - - /* get sign of significand */ - if( *z=='-' ){ - sign = -1; - z+=incr; - }else if( *z=='+' ){ - z+=incr; - } - - /* skip leading zeroes */ - while( z=zEnd ) goto do_atof_calc; - - /* if decimal point is present */ - if( *z=='.' ){ - z+=incr; - /* copy digits from after decimal to significand - ** (decrease exponent by d to shift decimal right) */ - while( z=zEnd ) goto do_atof_calc; - - /* if exponent is present */ - if( *z=='e' || *z=='E' ){ - z+=incr; - eValid = 0; - if( z>=zEnd ) goto do_atof_calc; - /* get sign of exponent */ - if( *z=='-' ){ - esign = -1; - z+=incr; - }else if( *z=='+' ){ - z+=incr; - } - /* copy digits to exponent */ - while( z0 ){ - while( s<(LARGEST_INT64/10) && e>0 ) e--,s*=10; - }else{ - while( !(s%10) && e>0 ) e--,s/=10; - } - - /* adjust the sign of significand */ - s = sign<0 ? -s : s; - - /* if exponent, scale significand as appropriate - ** and store in result. */ - if( e ){ - LONGDOUBLE_TYPE scale = 1.0; - /* attempt to handle extremely small/large numbers better */ - if( e>307 && e<342 ){ - while( e%308 ) { scale *= 1.0e+1; e -= 1; } - if( esign<0 ){ - result = s / scale; - result /= 1.0e+308; - }else{ - result = s * scale; - result *= 1.0e+308; - } - }else if( e>=342 ){ - if( esign<0 ){ - result = 0.0*s; - }else{ - result = 1e308*1e308*s; /* Infinity */ - } - }else{ - /* 1.0e+22 is the largest power of 10 than can be - ** represented exactly. */ - while( e%22 ) { scale *= 1.0e+1; e -= 1; } - while( e>0 ) { scale *= 1.0e+22; e -= 22; } - if( esign<0 ){ - result = s / scale; - }else{ - result = s * scale; - } - } - } else { - result = (double)s; - } - } - - /* store the result */ - *pResult = result; - - /* return true if number and no extra non-whitespace chracters after */ - return z>=zEnd && nDigits>0 && eValid && nonNum==0; -#else - return !sqlite3Atoi64(z, pResult, length, enc); -#endif /* SQLITE_OMIT_FLOATING_POINT */ -} - -/* -** Compare the 19-character string zNum against the text representation -** value 2^63: 9223372036854775808. Return negative, zero, or positive -** if zNum is less than, equal to, or greater than the string. -** Note that zNum must contain exactly 19 characters. -** -** Unlike memcmp() this routine is guaranteed to return the difference -** in the values of the last digit if the only difference is in the -** last digit. So, for example, -** -** compare2pow63("9223372036854775800", 1) -** -** will return -8. -*/ -static int compare2pow63(const char *zNum, int incr){ - int c = 0; - int i; - /* 012345678901234567 */ - const char *pow63 = "922337203685477580"; - for(i=0; c==0 && i<18; i++){ - c = (zNum[i*incr]-pow63[i])*10; - } - if( c==0 ){ - c = zNum[18*incr] - '8'; - testcase( c==(-1) ); - testcase( c==0 ); - testcase( c==(+1) ); - } - return c; -} - - -/* -** Convert zNum to a 64-bit signed integer. -** -** If the zNum value is representable as a 64-bit twos-complement -** integer, then write that value into *pNum and return 0. -** -** If zNum is exactly 9223372036854775808, return 2. This special -** case is broken out because while 9223372036854775808 cannot be a -** signed 64-bit integer, its negative -9223372036854775808 can be. -** -** If zNum is too big for a 64-bit integer and is not -** 9223372036854775808 or if zNum contains any non-numeric text, -** then return 1. -** -** length is the number of bytes in the string (bytes, not characters). -** The string is not necessarily zero-terminated. The encoding is -** given by enc. -*/ -SQLITE_PRIVATE int sqlite3Atoi64(const char *zNum, i64 *pNum, int length, u8 enc){ - int incr; - u64 u = 0; - int neg = 0; /* assume positive */ - int i; - int c = 0; - int nonNum = 0; - const char *zStart; - const char *zEnd = zNum + length; - assert( enc==SQLITE_UTF8 || enc==SQLITE_UTF16LE || enc==SQLITE_UTF16BE ); - if( enc==SQLITE_UTF8 ){ - incr = 1; - }else{ - incr = 2; - assert( SQLITE_UTF16LE==2 && SQLITE_UTF16BE==3 ); - for(i=3-enc; i='0' && c<='9'; i+=incr){ - u = u*10 + c - '0'; - } - if( u>LARGEST_INT64 ){ - *pNum = neg ? SMALLEST_INT64 : LARGEST_INT64; - }else if( neg ){ - *pNum = -(i64)u; - }else{ - *pNum = (i64)u; - } - testcase( i==18 ); - testcase( i==19 ); - testcase( i==20 ); - if( (c!=0 && &zNum[i]19*incr || nonNum ){ - /* zNum is empty or contains non-numeric text or is longer - ** than 19 digits (thus guaranteeing that it is too large) */ - return 1; - }else if( i<19*incr ){ - /* Less than 19 digits, so we know that it fits in 64 bits */ - assert( u<=LARGEST_INT64 ); - return 0; - }else{ - /* zNum is a 19-digit numbers. Compare it against 9223372036854775808. */ - c = compare2pow63(zNum, incr); - if( c<0 ){ - /* zNum is less than 9223372036854775808 so it fits */ - assert( u<=LARGEST_INT64 ); - return 0; - }else if( c>0 ){ - /* zNum is greater than 9223372036854775808 so it overflows */ - return 1; - }else{ - /* zNum is exactly 9223372036854775808. Fits if negative. The - ** special case 2 overflow if positive */ - assert( u-1==LARGEST_INT64 ); - return neg ? 0 : 2; - } - } -} - -/* -** If zNum represents an integer that will fit in 32-bits, then set -** *pValue to that integer and return true. Otherwise return false. -** -** Any non-numeric characters that following zNum are ignored. -** This is different from sqlite3Atoi64() which requires the -** input number to be zero-terminated. -*/ -SQLITE_PRIVATE int sqlite3GetInt32(const char *zNum, int *pValue){ - sqlite_int64 v = 0; - int i, c; - int neg = 0; - if( zNum[0]=='-' ){ - neg = 1; - zNum++; - }else if( zNum[0]=='+' ){ - zNum++; - } - while( zNum[0]=='0' ) zNum++; - for(i=0; i<11 && (c = zNum[i] - '0')>=0 && c<=9; i++){ - v = v*10 + c; - } - - /* The longest decimal representation of a 32 bit integer is 10 digits: - ** - ** 1234567890 - ** 2^31 -> 2147483648 - */ - testcase( i==10 ); - if( i>10 ){ - return 0; - } - testcase( v-neg==2147483647 ); - if( v-neg>2147483647 ){ - return 0; - } - if( neg ){ - v = -v; - } - *pValue = (int)v; - return 1; -} - -/* -** Return a 32-bit integer value extracted from a string. If the -** string is not an integer, just return 0. -*/ -SQLITE_PRIVATE int sqlite3Atoi(const char *z){ - int x = 0; - if( z ) sqlite3GetInt32(z, &x); - return x; -} - -/* -** The variable-length integer encoding is as follows: -** -** KEY: -** A = 0xxxxxxx 7 bits of data and one flag bit -** B = 1xxxxxxx 7 bits of data and one flag bit -** C = xxxxxxxx 8 bits of data -** -** 7 bits - A -** 14 bits - BA -** 21 bits - BBA -** 28 bits - BBBA -** 35 bits - BBBBA -** 42 bits - BBBBBA -** 49 bits - BBBBBBA -** 56 bits - BBBBBBBA -** 64 bits - BBBBBBBBC -*/ - -/* -** Write a 64-bit variable-length integer to memory starting at p[0]. -** The length of data write will be between 1 and 9 bytes. The number -** of bytes written is returned. -** -** A variable-length integer consists of the lower 7 bits of each byte -** for all bytes that have the 8th bit set and one byte with the 8th -** bit clear. Except, if we get to the 9th byte, it stores the full -** 8 bits and is the last byte. -*/ -SQLITE_PRIVATE int sqlite3PutVarint(unsigned char *p, u64 v){ - int i, j, n; - u8 buf[10]; - if( v & (((u64)0xff000000)<<32) ){ - p[8] = (u8)v; - v >>= 8; - for(i=7; i>=0; i--){ - p[i] = (u8)((v & 0x7f) | 0x80); - v >>= 7; - } - return 9; - } - n = 0; - do{ - buf[n++] = (u8)((v & 0x7f) | 0x80); - v >>= 7; - }while( v!=0 ); - buf[0] &= 0x7f; - assert( n<=9 ); - for(i=0, j=n-1; j>=0; j--, i++){ - p[i] = buf[j]; - } - return n; -} - -/* -** This routine is a faster version of sqlite3PutVarint() that only -** works for 32-bit positive integers and which is optimized for -** the common case of small integers. A MACRO version, putVarint32, -** is provided which inlines the single-byte case. All code should use -** the MACRO version as this function assumes the single-byte case has -** already been handled. -*/ -SQLITE_PRIVATE int sqlite3PutVarint32(unsigned char *p, u32 v){ -#ifndef putVarint32 - if( (v & ~0x7f)==0 ){ - p[0] = v; - return 1; - } -#endif - if( (v & ~0x3fff)==0 ){ - p[0] = (u8)((v>>7) | 0x80); - p[1] = (u8)(v & 0x7f); - return 2; - } - return sqlite3PutVarint(p, v); -} - -/* -** Bitmasks used by sqlite3GetVarint(). These precomputed constants -** are defined here rather than simply putting the constant expressions -** inline in order to work around bugs in the RVT compiler. -** -** SLOT_2_0 A mask for (0x7f<<14) | 0x7f -** -** SLOT_4_2_0 A mask for (0x7f<<28) | SLOT_2_0 -*/ -#define SLOT_2_0 0x001fc07f -#define SLOT_4_2_0 0xf01fc07f - - -/* -** Read a 64-bit variable-length integer from memory starting at p[0]. -** Return the number of bytes read. The value is stored in *v. -*/ -SQLITE_PRIVATE u8 sqlite3GetVarint(const unsigned char *p, u64 *v){ - u32 a,b,s; - - a = *p; - /* a: p0 (unmasked) */ - if (!(a&0x80)) - { - *v = a; - return 1; - } - - p++; - b = *p; - /* b: p1 (unmasked) */ - if (!(b&0x80)) - { - a &= 0x7f; - a = a<<7; - a |= b; - *v = a; - return 2; - } - - /* Verify that constants are precomputed correctly */ - assert( SLOT_2_0 == ((0x7f<<14) | (0x7f)) ); - assert( SLOT_4_2_0 == ((0xfU<<28) | (0x7f<<14) | (0x7f)) ); - - p++; - a = a<<14; - a |= *p; - /* a: p0<<14 | p2 (unmasked) */ - if (!(a&0x80)) - { - a &= SLOT_2_0; - b &= 0x7f; - b = b<<7; - a |= b; - *v = a; - return 3; - } - - /* CSE1 from below */ - a &= SLOT_2_0; - p++; - b = b<<14; - b |= *p; - /* b: p1<<14 | p3 (unmasked) */ - if (!(b&0x80)) - { - b &= SLOT_2_0; - /* moved CSE1 up */ - /* a &= (0x7f<<14)|(0x7f); */ - a = a<<7; - a |= b; - *v = a; - return 4; - } - - /* a: p0<<14 | p2 (masked) */ - /* b: p1<<14 | p3 (unmasked) */ - /* 1:save off p0<<21 | p1<<14 | p2<<7 | p3 (masked) */ - /* moved CSE1 up */ - /* a &= (0x7f<<14)|(0x7f); */ - b &= SLOT_2_0; - s = a; - /* s: p0<<14 | p2 (masked) */ - - p++; - a = a<<14; - a |= *p; - /* a: p0<<28 | p2<<14 | p4 (unmasked) */ - if (!(a&0x80)) - { - /* we can skip these cause they were (effectively) done above in calc'ing s */ - /* a &= (0x7f<<28)|(0x7f<<14)|(0x7f); */ - /* b &= (0x7f<<14)|(0x7f); */ - b = b<<7; - a |= b; - s = s>>18; - *v = ((u64)s)<<32 | a; - return 5; - } - - /* 2:save off p0<<21 | p1<<14 | p2<<7 | p3 (masked) */ - s = s<<7; - s |= b; - /* s: p0<<21 | p1<<14 | p2<<7 | p3 (masked) */ - - p++; - b = b<<14; - b |= *p; - /* b: p1<<28 | p3<<14 | p5 (unmasked) */ - if (!(b&0x80)) - { - /* we can skip this cause it was (effectively) done above in calc'ing s */ - /* b &= (0x7f<<28)|(0x7f<<14)|(0x7f); */ - a &= SLOT_2_0; - a = a<<7; - a |= b; - s = s>>18; - *v = ((u64)s)<<32 | a; - return 6; - } - - p++; - a = a<<14; - a |= *p; - /* a: p2<<28 | p4<<14 | p6 (unmasked) */ - if (!(a&0x80)) - { - a &= SLOT_4_2_0; - b &= SLOT_2_0; - b = b<<7; - a |= b; - s = s>>11; - *v = ((u64)s)<<32 | a; - return 7; - } - - /* CSE2 from below */ - a &= SLOT_2_0; - p++; - b = b<<14; - b |= *p; - /* b: p3<<28 | p5<<14 | p7 (unmasked) */ - if (!(b&0x80)) - { - b &= SLOT_4_2_0; - /* moved CSE2 up */ - /* a &= (0x7f<<14)|(0x7f); */ - a = a<<7; - a |= b; - s = s>>4; - *v = ((u64)s)<<32 | a; - return 8; - } - - p++; - a = a<<15; - a |= *p; - /* a: p4<<29 | p6<<15 | p8 (unmasked) */ - - /* moved CSE2 up */ - /* a &= (0x7f<<29)|(0x7f<<15)|(0xff); */ - b &= SLOT_2_0; - b = b<<8; - a |= b; - - s = s<<4; - b = p[-4]; - b &= 0x7f; - b = b>>3; - s |= b; - - *v = ((u64)s)<<32 | a; - - return 9; -} - -/* -** Read a 32-bit variable-length integer from memory starting at p[0]. -** Return the number of bytes read. The value is stored in *v. -** -** If the varint stored in p[0] is larger than can fit in a 32-bit unsigned -** integer, then set *v to 0xffffffff. -** -** A MACRO version, getVarint32, is provided which inlines the -** single-byte case. All code should use the MACRO version as -** this function assumes the single-byte case has already been handled. -*/ -SQLITE_PRIVATE u8 sqlite3GetVarint32(const unsigned char *p, u32 *v){ - u32 a,b; - - /* The 1-byte case. Overwhelmingly the most common. Handled inline - ** by the getVarin32() macro */ - a = *p; - /* a: p0 (unmasked) */ -#ifndef getVarint32 - if (!(a&0x80)) - { - /* Values between 0 and 127 */ - *v = a; - return 1; - } -#endif - - /* The 2-byte case */ - p++; - b = *p; - /* b: p1 (unmasked) */ - if (!(b&0x80)) - { - /* Values between 128 and 16383 */ - a &= 0x7f; - a = a<<7; - *v = a | b; - return 2; - } - - /* The 3-byte case */ - p++; - a = a<<14; - a |= *p; - /* a: p0<<14 | p2 (unmasked) */ - if (!(a&0x80)) - { - /* Values between 16384 and 2097151 */ - a &= (0x7f<<14)|(0x7f); - b &= 0x7f; - b = b<<7; - *v = a | b; - return 3; - } - - /* A 32-bit varint is used to store size information in btrees. - ** Objects are rarely larger than 2MiB limit of a 3-byte varint. - ** A 3-byte varint is sufficient, for example, to record the size - ** of a 1048569-byte BLOB or string. - ** - ** We only unroll the first 1-, 2-, and 3- byte cases. The very - ** rare larger cases can be handled by the slower 64-bit varint - ** routine. - */ -#if 1 - { - u64 v64; - u8 n; - - p -= 2; - n = sqlite3GetVarint(p, &v64); - assert( n>3 && n<=9 ); - if( (v64 & SQLITE_MAX_U32)!=v64 ){ - *v = 0xffffffff; - }else{ - *v = (u32)v64; - } - return n; - } - -#else - /* For following code (kept for historical record only) shows an - ** unrolling for the 3- and 4-byte varint cases. This code is - ** slightly faster, but it is also larger and much harder to test. - */ - p++; - b = b<<14; - b |= *p; - /* b: p1<<14 | p3 (unmasked) */ - if (!(b&0x80)) - { - /* Values between 2097152 and 268435455 */ - b &= (0x7f<<14)|(0x7f); - a &= (0x7f<<14)|(0x7f); - a = a<<7; - *v = a | b; - return 4; - } - - p++; - a = a<<14; - a |= *p; - /* a: p0<<28 | p2<<14 | p4 (unmasked) */ - if (!(a&0x80)) - { - /* Values between 268435456 and 34359738367 */ - a &= SLOT_4_2_0; - b &= SLOT_4_2_0; - b = b<<7; - *v = a | b; - return 5; - } - - /* We can only reach this point when reading a corrupt database - ** file. In that case we are not in any hurry. Use the (relatively - ** slow) general-purpose sqlite3GetVarint() routine to extract the - ** value. */ - { - u64 v64; - u8 n; - - p -= 4; - n = sqlite3GetVarint(p, &v64); - assert( n>5 && n<=9 ); - *v = (u32)v64; - return n; - } -#endif -} - -/* -** Return the number of bytes that will be needed to store the given -** 64-bit integer. -*/ -SQLITE_PRIVATE int sqlite3VarintLen(u64 v){ - int i = 0; - do{ - i++; - v >>= 7; - }while( v!=0 && ALWAYS(i<9) ); - return i; -} - - -/* -** Read or write a four-byte big-endian integer value. -*/ -SQLITE_PRIVATE u32 sqlite3Get4byte(const u8 *p){ - testcase( p[0]&0x80 ); - return ((unsigned)p[0]<<24) | (p[1]<<16) | (p[2]<<8) | p[3]; -} -SQLITE_PRIVATE void sqlite3Put4byte(unsigned char *p, u32 v){ - p[0] = (u8)(v>>24); - p[1] = (u8)(v>>16); - p[2] = (u8)(v>>8); - p[3] = (u8)v; -} - - - -/* -** Translate a single byte of Hex into an integer. -** This routine only works if h really is a valid hexadecimal -** character: 0..9a..fA..F -*/ -SQLITE_PRIVATE u8 sqlite3HexToInt(int h){ - assert( (h>='0' && h<='9') || (h>='a' && h<='f') || (h>='A' && h<='F') ); -#ifdef SQLITE_ASCII - h += 9*(1&(h>>6)); -#endif -#ifdef SQLITE_EBCDIC - h += 9*(1&~(h>>4)); -#endif - return (u8)(h & 0xf); -} - -#if !defined(SQLITE_OMIT_BLOB_LITERAL) || defined(SQLITE_HAS_CODEC) -/* -** Convert a BLOB literal of the form "x'hhhhhh'" into its binary -** value. Return a pointer to its binary value. Space to hold the -** binary value has been obtained from malloc and must be freed by -** the calling routine. -*/ -SQLITE_PRIVATE void *sqlite3HexToBlob(sqlite3 *db, const char *z, int n){ - char *zBlob; - int i; - - zBlob = (char *)sqlite3DbMallocRaw(db, n/2 + 1); - n--; - if( zBlob ){ - for(i=0; imagic; - if( magic!=SQLITE_MAGIC_OPEN ){ - if( sqlite3SafetyCheckSickOrOk(db) ){ - testcase( sqlite3GlobalConfig.xLog!=0 ); - logBadConnection("unopened"); - } - return 0; - }else{ - return 1; - } -} -SQLITE_PRIVATE int sqlite3SafetyCheckSickOrOk(sqlite3 *db){ - u32 magic; - magic = db->magic; - if( magic!=SQLITE_MAGIC_SICK && - magic!=SQLITE_MAGIC_OPEN && - magic!=SQLITE_MAGIC_BUSY ){ - testcase( sqlite3GlobalConfig.xLog!=0 ); - logBadConnection("invalid"); - return 0; - }else{ - return 1; - } -} - -/* -** Attempt to add, substract, or multiply the 64-bit signed value iB against -** the other 64-bit signed integer at *pA and store the result in *pA. -** Return 0 on success. Or if the operation would have resulted in an -** overflow, leave *pA unchanged and return 1. -*/ -SQLITE_PRIVATE int sqlite3AddInt64(i64 *pA, i64 iB){ - i64 iA = *pA; - testcase( iA==0 ); testcase( iA==1 ); - testcase( iB==-1 ); testcase( iB==0 ); - if( iB>=0 ){ - testcase( iA>0 && LARGEST_INT64 - iA == iB ); - testcase( iA>0 && LARGEST_INT64 - iA == iB - 1 ); - if( iA>0 && LARGEST_INT64 - iA < iB ) return 1; - }else{ - testcase( iA<0 && -(iA + LARGEST_INT64) == iB + 1 ); - testcase( iA<0 && -(iA + LARGEST_INT64) == iB + 2 ); - if( iA<0 && -(iA + LARGEST_INT64) > iB + 1 ) return 1; - } - *pA += iB; - return 0; -} -SQLITE_PRIVATE int sqlite3SubInt64(i64 *pA, i64 iB){ - testcase( iB==SMALLEST_INT64+1 ); - if( iB==SMALLEST_INT64 ){ - testcase( (*pA)==(-1) ); testcase( (*pA)==0 ); - if( (*pA)>=0 ) return 1; - *pA -= iB; - return 0; - }else{ - return sqlite3AddInt64(pA, -iB); - } -} -#define TWOPOWER32 (((i64)1)<<32) -#define TWOPOWER31 (((i64)1)<<31) -SQLITE_PRIVATE int sqlite3MulInt64(i64 *pA, i64 iB){ - i64 iA = *pA; - i64 iA1, iA0, iB1, iB0, r; - - iA1 = iA/TWOPOWER32; - iA0 = iA % TWOPOWER32; - iB1 = iB/TWOPOWER32; - iB0 = iB % TWOPOWER32; - if( iA1==0 ){ - if( iB1==0 ){ - *pA *= iB; - return 0; - } - r = iA0*iB1; - }else if( iB1==0 ){ - r = iA1*iB0; - }else{ - /* If both iA1 and iB1 are non-zero, overflow will result */ - return 1; - } - testcase( r==(-TWOPOWER31)-1 ); - testcase( r==(-TWOPOWER31) ); - testcase( r==TWOPOWER31 ); - testcase( r==TWOPOWER31-1 ); - if( r<(-TWOPOWER31) || r>=TWOPOWER31 ) return 1; - r *= TWOPOWER32; - if( sqlite3AddInt64(&r, iA0*iB0) ) return 1; - *pA = r; - return 0; -} - -/* -** Compute the absolute value of a 32-bit signed integer, of possible. Or -** if the integer has a value of -2147483648, return +2147483647 -*/ -SQLITE_PRIVATE int sqlite3AbsInt32(int x){ - if( x>=0 ) return x; - if( x==(int)0x80000000 ) return 0x7fffffff; - return -x; -} - -#ifdef SQLITE_ENABLE_8_3_NAMES -/* -** If SQLITE_ENABLE_8_3_NAMES is set at compile-time and if the database -** filename in zBaseFilename is a URI with the "8_3_names=1" parameter and -** if filename in z[] has a suffix (a.k.a. "extension") that is longer than -** three characters, then shorten the suffix on z[] to be the last three -** characters of the original suffix. -** -** If SQLITE_ENABLE_8_3_NAMES is set to 2 at compile-time, then always -** do the suffix shortening regardless of URI parameter. -** -** Examples: -** -** test.db-journal => test.nal -** test.db-wal => test.wal -** test.db-shm => test.shm -** test.db-mj7f3319fa => test.9fa -*/ -SQLITE_PRIVATE void sqlite3FileSuffix3(const char *zBaseFilename, char *z){ -#if SQLITE_ENABLE_8_3_NAMES<2 - if( sqlite3_uri_boolean(zBaseFilename, "8_3_names", 0) ) -#endif - { - int i, sz; - sz = sqlite3Strlen30(z); - for(i=sz-1; i>0 && z[i]!='/' && z[i]!='.'; i--){} - if( z[i]=='.' && ALWAYS(sz>i+4) ) memmove(&z[i+1], &z[sz-3], 4); - } -} -#endif - -/* -** Find (an approximate) sum of two LogEst values. This computation is -** not a simple "+" operator because LogEst is stored as a logarithmic -** value. -** -*/ -SQLITE_PRIVATE LogEst sqlite3LogEstAdd(LogEst a, LogEst b){ - static const unsigned char x[] = { - 10, 10, /* 0,1 */ - 9, 9, /* 2,3 */ - 8, 8, /* 4,5 */ - 7, 7, 7, /* 6,7,8 */ - 6, 6, 6, /* 9,10,11 */ - 5, 5, 5, /* 12-14 */ - 4, 4, 4, 4, /* 15-18 */ - 3, 3, 3, 3, 3, 3, /* 19-24 */ - 2, 2, 2, 2, 2, 2, 2, /* 25-31 */ - }; - if( a>=b ){ - if( a>b+49 ) return a; - if( a>b+31 ) return a+1; - return a+x[a-b]; - }else{ - if( b>a+49 ) return b; - if( b>a+31 ) return b+1; - return b+x[b-a]; - } -} - -/* -** Convert an integer into a LogEst. In other words, compute an -** approximation for 10*log2(x). -*/ -SQLITE_PRIVATE LogEst sqlite3LogEst(u64 x){ - static LogEst a[] = { 0, 2, 3, 5, 6, 7, 8, 9 }; - LogEst y = 40; - if( x<8 ){ - if( x<2 ) return 0; - while( x<8 ){ y -= 10; x <<= 1; } - }else{ - while( x>255 ){ y += 40; x >>= 4; } - while( x>15 ){ y += 10; x >>= 1; } - } - return a[x&7] + y - 10; -} - -#ifndef SQLITE_OMIT_VIRTUALTABLE -/* -** Convert a double into a LogEst -** In other words, compute an approximation for 10*log2(x). -*/ -SQLITE_PRIVATE LogEst sqlite3LogEstFromDouble(double x){ - u64 a; - LogEst e; - assert( sizeof(x)==8 && sizeof(a)==8 ); - if( x<=1 ) return 0; - if( x<=2000000000 ) return sqlite3LogEst((u64)x); - memcpy(&a, &x, 8); - e = (a>>52) - 1022; - return e*10; -} -#endif /* SQLITE_OMIT_VIRTUALTABLE */ - -/* -** Convert a LogEst into an integer. -*/ -SQLITE_PRIVATE u64 sqlite3LogEstToInt(LogEst x){ - u64 n; - if( x<10 ) return 1; - n = x%10; - x /= 10; - if( n>=5 ) n -= 2; - else if( n>=1 ) n -= 1; - if( x>=3 ){ - return x>60 ? (u64)LARGEST_INT64 : (n+8)<<(x-3); - } - return (n+8)>>(3-x); -} - -/************** End of util.c ************************************************/ -/************** Begin file hash.c ********************************************/ -/* -** 2001 September 22 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** This is the implementation of generic hash-tables -** used in SQLite. -*/ -/* #include */ - -/* Turn bulk memory into a hash table object by initializing the -** fields of the Hash structure. -** -** "pNew" is a pointer to the hash table that is to be initialized. -*/ -SQLITE_PRIVATE void sqlite3HashInit(Hash *pNew){ - assert( pNew!=0 ); - pNew->first = 0; - pNew->count = 0; - pNew->htsize = 0; - pNew->ht = 0; -} - -/* Remove all entries from a hash table. Reclaim all memory. -** Call this routine to delete a hash table or to reset a hash table -** to the empty state. -*/ -SQLITE_PRIVATE void sqlite3HashClear(Hash *pH){ - HashElem *elem; /* For looping over all elements of the table */ - - assert( pH!=0 ); - elem = pH->first; - pH->first = 0; - sqlite3_free(pH->ht); - pH->ht = 0; - pH->htsize = 0; - while( elem ){ - HashElem *next_elem = elem->next; - sqlite3_free(elem); - elem = next_elem; - } - pH->count = 0; -} - -/* -** The hashing function. -*/ -static unsigned int strHash(const char *z, int nKey){ - unsigned int h = 0; - assert( nKey>=0 ); - while( nKey > 0 ){ - h = (h<<3) ^ h ^ sqlite3UpperToLower[(unsigned char)*z++]; - nKey--; - } - return h; -} - - -/* Link pNew element into the hash table pH. If pEntry!=0 then also -** insert pNew into the pEntry hash bucket. -*/ -static void insertElement( - Hash *pH, /* The complete hash table */ - struct _ht *pEntry, /* The entry into which pNew is inserted */ - HashElem *pNew /* The element to be inserted */ -){ - HashElem *pHead; /* First element already in pEntry */ - if( pEntry ){ - pHead = pEntry->count ? pEntry->chain : 0; - pEntry->count++; - pEntry->chain = pNew; - }else{ - pHead = 0; - } - if( pHead ){ - pNew->next = pHead; - pNew->prev = pHead->prev; - if( pHead->prev ){ pHead->prev->next = pNew; } - else { pH->first = pNew; } - pHead->prev = pNew; - }else{ - pNew->next = pH->first; - if( pH->first ){ pH->first->prev = pNew; } - pNew->prev = 0; - pH->first = pNew; - } -} - - -/* Resize the hash table so that it cantains "new_size" buckets. -** -** The hash table might fail to resize if sqlite3_malloc() fails or -** if the new size is the same as the prior size. -** Return TRUE if the resize occurs and false if not. -*/ -static int rehash(Hash *pH, unsigned int new_size){ - struct _ht *new_ht; /* The new hash table */ - HashElem *elem, *next_elem; /* For looping over existing elements */ - -#if SQLITE_MALLOC_SOFT_LIMIT>0 - if( new_size*sizeof(struct _ht)>SQLITE_MALLOC_SOFT_LIMIT ){ - new_size = SQLITE_MALLOC_SOFT_LIMIT/sizeof(struct _ht); - } - if( new_size==pH->htsize ) return 0; -#endif - - /* The inability to allocates space for a larger hash table is - ** a performance hit but it is not a fatal error. So mark the - ** allocation as a benign. Use sqlite3Malloc()/memset(0) instead of - ** sqlite3MallocZero() to make the allocation, as sqlite3MallocZero() - ** only zeroes the requested number of bytes whereas this module will - ** use the actual amount of space allocated for the hash table (which - ** may be larger than the requested amount). - */ - sqlite3BeginBenignMalloc(); - new_ht = (struct _ht *)sqlite3Malloc( new_size*sizeof(struct _ht) ); - sqlite3EndBenignMalloc(); - - if( new_ht==0 ) return 0; - sqlite3_free(pH->ht); - pH->ht = new_ht; - pH->htsize = new_size = sqlite3MallocSize(new_ht)/sizeof(struct _ht); - memset(new_ht, 0, new_size*sizeof(struct _ht)); - for(elem=pH->first, pH->first=0; elem; elem = next_elem){ - unsigned int h = strHash(elem->pKey, elem->nKey) % new_size; - next_elem = elem->next; - insertElement(pH, &new_ht[h], elem); - } - return 1; -} - -/* This function (for internal use only) locates an element in an -** hash table that matches the given key. The hash for this key has -** already been computed and is passed as the 4th parameter. -*/ -static HashElem *findElementGivenHash( - const Hash *pH, /* The pH to be searched */ - const char *pKey, /* The key we are searching for */ - int nKey, /* Bytes in key (not counting zero terminator) */ - unsigned int h /* The hash for this key. */ -){ - HashElem *elem; /* Used to loop thru the element list */ - int count; /* Number of elements left to test */ - - if( pH->ht ){ - struct _ht *pEntry = &pH->ht[h]; - elem = pEntry->chain; - count = pEntry->count; - }else{ - elem = pH->first; - count = pH->count; - } - while( count-- && ALWAYS(elem) ){ - if( elem->nKey==nKey && sqlite3StrNICmp(elem->pKey,pKey,nKey)==0 ){ - return elem; - } - elem = elem->next; - } - return 0; -} - -/* Remove a single entry from the hash table given a pointer to that -** element and a hash on the element's key. -*/ -static void removeElementGivenHash( - Hash *pH, /* The pH containing "elem" */ - HashElem* elem, /* The element to be removed from the pH */ - unsigned int h /* Hash value for the element */ -){ - struct _ht *pEntry; - if( elem->prev ){ - elem->prev->next = elem->next; - }else{ - pH->first = elem->next; - } - if( elem->next ){ - elem->next->prev = elem->prev; - } - if( pH->ht ){ - pEntry = &pH->ht[h]; - if( pEntry->chain==elem ){ - pEntry->chain = elem->next; - } - pEntry->count--; - assert( pEntry->count>=0 ); - } - sqlite3_free( elem ); - pH->count--; - if( pH->count==0 ){ - assert( pH->first==0 ); - assert( pH->count==0 ); - sqlite3HashClear(pH); - } -} - -/* Attempt to locate an element of the hash table pH with a key -** that matches pKey,nKey. Return the data for this element if it is -** found, or NULL if there is no match. -*/ -SQLITE_PRIVATE void *sqlite3HashFind(const Hash *pH, const char *pKey, int nKey){ - HashElem *elem; /* The element that matches key */ - unsigned int h; /* A hash on key */ - - assert( pH!=0 ); - assert( pKey!=0 ); - assert( nKey>=0 ); - if( pH->ht ){ - h = strHash(pKey, nKey) % pH->htsize; - }else{ - h = 0; - } - elem = findElementGivenHash(pH, pKey, nKey, h); - return elem ? elem->data : 0; -} - -/* Insert an element into the hash table pH. The key is pKey,nKey -** and the data is "data". -** -** If no element exists with a matching key, then a new -** element is created and NULL is returned. -** -** If another element already exists with the same key, then the -** new data replaces the old data and the old data is returned. -** The key is not copied in this instance. If a malloc fails, then -** the new data is returned and the hash table is unchanged. -** -** If the "data" parameter to this function is NULL, then the -** element corresponding to "key" is removed from the hash table. -*/ -SQLITE_PRIVATE void *sqlite3HashInsert(Hash *pH, const char *pKey, int nKey, void *data){ - unsigned int h; /* the hash of the key modulo hash table size */ - HashElem *elem; /* Used to loop thru the element list */ - HashElem *new_elem; /* New element added to the pH */ - - assert( pH!=0 ); - assert( pKey!=0 ); - assert( nKey>=0 ); - if( pH->htsize ){ - h = strHash(pKey, nKey) % pH->htsize; - }else{ - h = 0; - } - elem = findElementGivenHash(pH,pKey,nKey,h); - if( elem ){ - void *old_data = elem->data; - if( data==0 ){ - removeElementGivenHash(pH,elem,h); - }else{ - elem->data = data; - elem->pKey = pKey; - assert(nKey==elem->nKey); - } - return old_data; - } - if( data==0 ) return 0; - new_elem = (HashElem*)sqlite3Malloc( sizeof(HashElem) ); - if( new_elem==0 ) return data; - new_elem->pKey = pKey; - new_elem->nKey = nKey; - new_elem->data = data; - pH->count++; - if( pH->count>=10 && pH->count > 2*pH->htsize ){ - if( rehash(pH, pH->count*2) ){ - assert( pH->htsize>0 ); - h = strHash(pKey, nKey) % pH->htsize; - } - } - if( pH->ht ){ - insertElement(pH, &pH->ht[h], new_elem); - }else{ - insertElement(pH, 0, new_elem); - } - return 0; -} - -/************** End of hash.c ************************************************/ -/************** Begin file opcodes.c *****************************************/ -/* Automatically generated. Do not edit */ -/* See the mkopcodec.awk script for details. */ -#if !defined(SQLITE_OMIT_EXPLAIN) || defined(VDBE_PROFILE) || defined(SQLITE_DEBUG) -#if defined(SQLITE_ENABLE_EXPLAIN_COMMENTS) || defined(SQLITE_DEBUG) -# define OpHelp(X) "\0" X -#else -# define OpHelp(X) -#endif -SQLITE_PRIVATE const char *sqlite3OpcodeName(int i){ - static const char *const azName[] = { "?", - /* 1 */ "Function" OpHelp("r[P3]=func(r[P2@P5])"), - /* 2 */ "Savepoint" OpHelp(""), - /* 3 */ "AutoCommit" OpHelp(""), - /* 4 */ "Transaction" OpHelp(""), - /* 5 */ "SorterNext" OpHelp(""), - /* 6 */ "PrevIfOpen" OpHelp(""), - /* 7 */ "NextIfOpen" OpHelp(""), - /* 8 */ "Prev" OpHelp(""), - /* 9 */ "Next" OpHelp(""), - /* 10 */ "AggStep" OpHelp("accum=r[P3] step(r[P2@P5])"), - /* 11 */ "Checkpoint" OpHelp(""), - /* 12 */ "JournalMode" OpHelp(""), - /* 13 */ "Vacuum" OpHelp(""), - /* 14 */ "VFilter" OpHelp("iplan=r[P3] zplan='P4'"), - /* 15 */ "VUpdate" OpHelp("data=r[P3@P2]"), - /* 16 */ "Goto" OpHelp(""), - /* 17 */ "Gosub" OpHelp(""), - /* 18 */ "Return" OpHelp(""), - /* 19 */ "Not" OpHelp("r[P2]= !r[P1]"), - /* 20 */ "InitCoroutine" OpHelp(""), - /* 21 */ "EndCoroutine" OpHelp(""), - /* 22 */ "Yield" OpHelp(""), - /* 23 */ "HaltIfNull" OpHelp("if r[P3]=null halt"), - /* 24 */ "Halt" OpHelp(""), - /* 25 */ "Integer" OpHelp("r[P2]=P1"), - /* 26 */ "Int64" OpHelp("r[P2]=P4"), - /* 27 */ "String" OpHelp("r[P2]='P4' (len=P1)"), - /* 28 */ "Null" OpHelp("r[P2..P3]=NULL"), - /* 29 */ "SoftNull" OpHelp("r[P1]=NULL"), - /* 30 */ "Blob" OpHelp("r[P2]=P4 (len=P1)"), - /* 31 */ "Variable" OpHelp("r[P2]=parameter(P1,P4)"), - /* 32 */ "Move" OpHelp("r[P2@P3]=r[P1@P3]"), - /* 33 */ "Copy" OpHelp("r[P2@P3+1]=r[P1@P3+1]"), - /* 34 */ "SCopy" OpHelp("r[P2]=r[P1]"), - /* 35 */ "ResultRow" OpHelp("output=r[P1@P2]"), - /* 36 */ "CollSeq" OpHelp(""), - /* 37 */ "AddImm" OpHelp("r[P1]=r[P1]+P2"), - /* 38 */ "MustBeInt" OpHelp(""), - /* 39 */ "RealAffinity" OpHelp(""), - /* 40 */ "Permutation" OpHelp(""), - /* 41 */ "Compare" OpHelp("r[P1@P3] <-> r[P2@P3]"), - /* 42 */ "Jump" OpHelp(""), - /* 43 */ "Once" OpHelp(""), - /* 44 */ "If" OpHelp(""), - /* 45 */ "IfNot" OpHelp(""), - /* 46 */ "Column" OpHelp("r[P3]=PX"), - /* 47 */ "Affinity" OpHelp("affinity(r[P1@P2])"), - /* 48 */ "MakeRecord" OpHelp("r[P3]=mkrec(r[P1@P2])"), - /* 49 */ "Count" OpHelp("r[P2]=count()"), - /* 50 */ "ReadCookie" OpHelp(""), - /* 51 */ "SetCookie" OpHelp(""), - /* 52 */ "OpenRead" OpHelp("root=P2 iDb=P3"), - /* 53 */ "OpenWrite" OpHelp("root=P2 iDb=P3"), - /* 54 */ "OpenAutoindex" OpHelp("nColumn=P2"), - /* 55 */ "OpenEphemeral" OpHelp("nColumn=P2"), - /* 56 */ "SorterOpen" OpHelp(""), - /* 57 */ "OpenPseudo" OpHelp("P3 columns in r[P2]"), - /* 58 */ "Close" OpHelp(""), - /* 59 */ "SeekLT" OpHelp(""), - /* 60 */ "SeekLE" OpHelp(""), - /* 61 */ "SeekGE" OpHelp(""), - /* 62 */ "SeekGT" OpHelp(""), - /* 63 */ "Seek" OpHelp("intkey=r[P2]"), - /* 64 */ "NoConflict" OpHelp("key=r[P3@P4]"), - /* 65 */ "NotFound" OpHelp("key=r[P3@P4]"), - /* 66 */ "Found" OpHelp("key=r[P3@P4]"), - /* 67 */ "NotExists" OpHelp("intkey=r[P3]"), - /* 68 */ "Sequence" OpHelp("r[P2]=cursor[P1].ctr++"), - /* 69 */ "NewRowid" OpHelp("r[P2]=rowid"), - /* 70 */ "Insert" OpHelp("intkey=r[P3] data=r[P2]"), - /* 71 */ "Or" OpHelp("r[P3]=(r[P1] || r[P2])"), - /* 72 */ "And" OpHelp("r[P3]=(r[P1] && r[P2])"), - /* 73 */ "InsertInt" OpHelp("intkey=P3 data=r[P2]"), - /* 74 */ "Delete" OpHelp(""), - /* 75 */ "ResetCount" OpHelp(""), - /* 76 */ "IsNull" OpHelp("if r[P1]==NULL goto P2"), - /* 77 */ "NotNull" OpHelp("if r[P1]!=NULL goto P2"), - /* 78 */ "Ne" OpHelp("if r[P1]!=r[P3] goto P2"), - /* 79 */ "Eq" OpHelp("if r[P1]==r[P3] goto P2"), - /* 80 */ "Gt" OpHelp("if r[P1]>r[P3] goto P2"), - /* 81 */ "Le" OpHelp("if r[P1]<=r[P3] goto P2"), - /* 82 */ "Lt" OpHelp("if r[P1]=r[P3] goto P2"), - /* 84 */ "SorterCompare" OpHelp("if key(P1)!=rtrim(r[P3],P4) goto P2"), - /* 85 */ "BitAnd" OpHelp("r[P3]=r[P1]&r[P2]"), - /* 86 */ "BitOr" OpHelp("r[P3]=r[P1]|r[P2]"), - /* 87 */ "ShiftLeft" OpHelp("r[P3]=r[P2]<>r[P1]"), - /* 89 */ "Add" OpHelp("r[P3]=r[P1]+r[P2]"), - /* 90 */ "Subtract" OpHelp("r[P3]=r[P2]-r[P1]"), - /* 91 */ "Multiply" OpHelp("r[P3]=r[P1]*r[P2]"), - /* 92 */ "Divide" OpHelp("r[P3]=r[P2]/r[P1]"), - /* 93 */ "Remainder" OpHelp("r[P3]=r[P2]%r[P1]"), - /* 94 */ "Concat" OpHelp("r[P3]=r[P2]+r[P1]"), - /* 95 */ "SorterData" OpHelp("r[P2]=data"), - /* 96 */ "BitNot" OpHelp("r[P1]= ~r[P1]"), - /* 97 */ "String8" OpHelp("r[P2]='P4'"), - /* 98 */ "RowKey" OpHelp("r[P2]=key"), - /* 99 */ "RowData" OpHelp("r[P2]=data"), - /* 100 */ "Rowid" OpHelp("r[P2]=rowid"), - /* 101 */ "NullRow" OpHelp(""), - /* 102 */ "Last" OpHelp(""), - /* 103 */ "SorterSort" OpHelp(""), - /* 104 */ "Sort" OpHelp(""), - /* 105 */ "Rewind" OpHelp(""), - /* 106 */ "SorterInsert" OpHelp(""), - /* 107 */ "IdxInsert" OpHelp("key=r[P2]"), - /* 108 */ "IdxDelete" OpHelp("key=r[P2@P3]"), - /* 109 */ "IdxRowid" OpHelp("r[P2]=rowid"), - /* 110 */ "IdxLE" OpHelp("key=r[P3@P4]"), - /* 111 */ "IdxGT" OpHelp("key=r[P3@P4]"), - /* 112 */ "IdxLT" OpHelp("key=r[P3@P4]"), - /* 113 */ "IdxGE" OpHelp("key=r[P3@P4]"), - /* 114 */ "Destroy" OpHelp(""), - /* 115 */ "Clear" OpHelp(""), - /* 116 */ "ResetSorter" OpHelp(""), - /* 117 */ "CreateIndex" OpHelp("r[P2]=root iDb=P1"), - /* 118 */ "CreateTable" OpHelp("r[P2]=root iDb=P1"), - /* 119 */ "ParseSchema" OpHelp(""), - /* 120 */ "LoadAnalysis" OpHelp(""), - /* 121 */ "DropTable" OpHelp(""), - /* 122 */ "DropIndex" OpHelp(""), - /* 123 */ "DropTrigger" OpHelp(""), - /* 124 */ "IntegrityCk" OpHelp(""), - /* 125 */ "RowSetAdd" OpHelp("rowset(P1)=r[P2]"), - /* 126 */ "RowSetRead" OpHelp("r[P3]=rowset(P1)"), - /* 127 */ "RowSetTest" OpHelp("if r[P3] in rowset(P1) goto P2"), - /* 128 */ "Program" OpHelp(""), - /* 129 */ "Param" OpHelp(""), - /* 130 */ "FkCounter" OpHelp("fkctr[P1]+=P2"), - /* 131 */ "FkIfZero" OpHelp("if fkctr[P1]==0 goto P2"), - /* 132 */ "MemMax" OpHelp("r[P1]=max(r[P1],r[P2])"), - /* 133 */ "Real" OpHelp("r[P2]=P4"), - /* 134 */ "IfPos" OpHelp("if r[P1]>0 goto P2"), - /* 135 */ "IfNeg" OpHelp("if r[P1]<0 goto P2"), - /* 136 */ "IfZero" OpHelp("r[P1]+=P3, if r[P1]==0 goto P2"), - /* 137 */ "AggFinal" OpHelp("accum=r[P1] N=P2"), - /* 138 */ "IncrVacuum" OpHelp(""), - /* 139 */ "Expire" OpHelp(""), - /* 140 */ "TableLock" OpHelp("iDb=P1 root=P2 write=P3"), - /* 141 */ "VBegin" OpHelp(""), - /* 142 */ "VCreate" OpHelp(""), - /* 143 */ "ToText" OpHelp(""), - /* 144 */ "ToBlob" OpHelp(""), - /* 145 */ "ToNumeric" OpHelp(""), - /* 146 */ "ToInt" OpHelp(""), - /* 147 */ "ToReal" OpHelp(""), - /* 148 */ "VDestroy" OpHelp(""), - /* 149 */ "VOpen" OpHelp(""), - /* 150 */ "VColumn" OpHelp("r[P3]=vcolumn(P2)"), - /* 151 */ "VNext" OpHelp(""), - /* 152 */ "VRename" OpHelp(""), - /* 153 */ "Pagecount" OpHelp(""), - /* 154 */ "MaxPgcnt" OpHelp(""), - /* 155 */ "Init" OpHelp("Start at P2"), - /* 156 */ "Noop" OpHelp(""), - /* 157 */ "Explain" OpHelp(""), - }; - return azName[i]; -} -#endif - -/************** End of opcodes.c *********************************************/ -/************** Begin file os_unix.c *****************************************/ -/* -** 2004 May 22 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -****************************************************************************** -** -** This file contains the VFS implementation for unix-like operating systems -** include Linux, MacOSX, *BSD, QNX, VxWorks, AIX, HPUX, and others. -** -** There are actually several different VFS implementations in this file. -** The differences are in the way that file locking is done. The default -** implementation uses Posix Advisory Locks. Alternative implementations -** use flock(), dot-files, various proprietary locking schemas, or simply -** skip locking all together. -** -** This source file is organized into divisions where the logic for various -** subfunctions is contained within the appropriate division. PLEASE -** KEEP THE STRUCTURE OF THIS FILE INTACT. New code should be placed -** in the correct division and should be clearly labeled. -** -** The layout of divisions is as follows: -** -** * General-purpose declarations and utility functions. -** * Unique file ID logic used by VxWorks. -** * Various locking primitive implementations (all except proxy locking): -** + for Posix Advisory Locks -** + for no-op locks -** + for dot-file locks -** + for flock() locking -** + for named semaphore locks (VxWorks only) -** + for AFP filesystem locks (MacOSX only) -** * sqlite3_file methods not associated with locking. -** * Definitions of sqlite3_io_methods objects for all locking -** methods plus "finder" functions for each locking method. -** * sqlite3_vfs method implementations. -** * Locking primitives for the proxy uber-locking-method. (MacOSX only) -** * Definitions of sqlite3_vfs objects for all locking methods -** plus implementations of sqlite3_os_init() and sqlite3_os_end(). -*/ -#if SQLITE_OS_UNIX /* This file is used on unix only */ - -/* -** There are various methods for file locking used for concurrency -** control: -** -** 1. POSIX locking (the default), -** 2. No locking, -** 3. Dot-file locking, -** 4. flock() locking, -** 5. AFP locking (OSX only), -** 6. Named POSIX semaphores (VXWorks only), -** 7. proxy locking. (OSX only) -** -** Styles 4, 5, and 7 are only available of SQLITE_ENABLE_LOCKING_STYLE -** is defined to 1. The SQLITE_ENABLE_LOCKING_STYLE also enables automatic -** selection of the appropriate locking style based on the filesystem -** where the database is located. -*/ -#if !defined(SQLITE_ENABLE_LOCKING_STYLE) -# if defined(__APPLE__) -# define SQLITE_ENABLE_LOCKING_STYLE 1 -# else -# define SQLITE_ENABLE_LOCKING_STYLE 0 -# endif -#endif - -/* -** Define the OS_VXWORKS pre-processor macro to 1 if building on -** vxworks, or 0 otherwise. -*/ -#ifndef OS_VXWORKS -# if defined(__RTP__) || defined(_WRS_KERNEL) -# define OS_VXWORKS 1 -# else -# define OS_VXWORKS 0 -# endif -#endif - -/* -** standard include files. -*/ -#include -#include -#include -#include -/* #include */ -#include -#include -#if !defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0 -#include -#endif - - -#if SQLITE_ENABLE_LOCKING_STYLE -# include -# if OS_VXWORKS -# include -# include -# else -# include -# include -# endif -#endif /* SQLITE_ENABLE_LOCKING_STYLE */ - -#if defined(__APPLE__) || (SQLITE_ENABLE_LOCKING_STYLE && !OS_VXWORKS) -# include -#endif - -#ifdef HAVE_UTIME -# include -#endif - -/* -** Allowed values of unixFile.fsFlags -*/ -#define SQLITE_FSFLAGS_IS_MSDOS 0x1 - -/* -** If we are to be thread-safe, include the pthreads header and define -** the SQLITE_UNIX_THREADS macro. -*/ -#if SQLITE_THREADSAFE -/* # include */ -# define SQLITE_UNIX_THREADS 1 -#endif - -/* -** Default permissions when creating a new file -*/ -#ifndef SQLITE_DEFAULT_FILE_PERMISSIONS -# define SQLITE_DEFAULT_FILE_PERMISSIONS 0644 -#endif - -/* -** Default permissions when creating auto proxy dir -*/ -#ifndef SQLITE_DEFAULT_PROXYDIR_PERMISSIONS -# define SQLITE_DEFAULT_PROXYDIR_PERMISSIONS 0755 -#endif - -/* -** Maximum supported path-length. -*/ -#define MAX_PATHNAME 512 - -/* -** Only set the lastErrno if the error code is a real error and not -** a normal expected return code of SQLITE_BUSY or SQLITE_OK -*/ -#define IS_LOCK_ERROR(x) ((x != SQLITE_OK) && (x != SQLITE_BUSY)) - -/* Forward references */ -typedef struct unixShm unixShm; /* Connection shared memory */ -typedef struct unixShmNode unixShmNode; /* Shared memory instance */ -typedef struct unixInodeInfo unixInodeInfo; /* An i-node */ -typedef struct UnixUnusedFd UnixUnusedFd; /* An unused file descriptor */ - -/* -** Sometimes, after a file handle is closed by SQLite, the file descriptor -** cannot be closed immediately. In these cases, instances of the following -** structure are used to store the file descriptor while waiting for an -** opportunity to either close or reuse it. -*/ -struct UnixUnusedFd { - int fd; /* File descriptor to close */ - int flags; /* Flags this file descriptor was opened with */ - UnixUnusedFd *pNext; /* Next unused file descriptor on same file */ -}; - -/* -** The unixFile structure is subclass of sqlite3_file specific to the unix -** VFS implementations. -*/ -typedef struct unixFile unixFile; -struct unixFile { - sqlite3_io_methods const *pMethod; /* Always the first entry */ - sqlite3_vfs *pVfs; /* The VFS that created this unixFile */ - unixInodeInfo *pInode; /* Info about locks on this inode */ - int h; /* The file descriptor */ - unsigned char eFileLock; /* The type of lock held on this fd */ - unsigned short int ctrlFlags; /* Behavioral bits. UNIXFILE_* flags */ - int lastErrno; /* The unix errno from last I/O error */ - void *lockingContext; /* Locking style specific state */ - UnixUnusedFd *pUnused; /* Pre-allocated UnixUnusedFd */ - const char *zPath; /* Name of the file */ - unixShm *pShm; /* Shared memory segment information */ - int szChunk; /* Configured by FCNTL_CHUNK_SIZE */ -#if SQLITE_MAX_MMAP_SIZE>0 - int nFetchOut; /* Number of outstanding xFetch refs */ - sqlite3_int64 mmapSize; /* Usable size of mapping at pMapRegion */ - sqlite3_int64 mmapSizeActual; /* Actual size of mapping at pMapRegion */ - sqlite3_int64 mmapSizeMax; /* Configured FCNTL_MMAP_SIZE value */ - void *pMapRegion; /* Memory mapped region */ -#endif -#ifdef __QNXNTO__ - int sectorSize; /* Device sector size */ - int deviceCharacteristics; /* Precomputed device characteristics */ -#endif -#if SQLITE_ENABLE_LOCKING_STYLE - int openFlags; /* The flags specified at open() */ -#endif -#if SQLITE_ENABLE_LOCKING_STYLE || defined(__APPLE__) - unsigned fsFlags; /* cached details from statfs() */ -#endif -#if OS_VXWORKS - struct vxworksFileId *pId; /* Unique file ID */ -#endif -#ifdef SQLITE_DEBUG - /* The next group of variables are used to track whether or not the - ** transaction counter in bytes 24-27 of database files are updated - ** whenever any part of the database changes. An assertion fault will - ** occur if a file is updated without also updating the transaction - ** counter. This test is made to avoid new problems similar to the - ** one described by ticket #3584. - */ - unsigned char transCntrChng; /* True if the transaction counter changed */ - unsigned char dbUpdate; /* True if any part of database file changed */ - unsigned char inNormalWrite; /* True if in a normal write operation */ - -#endif - -#ifdef SQLITE_TEST - /* In test mode, increase the size of this structure a bit so that - ** it is larger than the struct CrashFile defined in test6.c. - */ - char aPadding[32]; -#endif -}; - -/* This variable holds the process id (pid) from when the xRandomness() -** method was called. If xOpen() is called from a different process id, -** indicating that a fork() has occurred, the PRNG will be reset. -*/ -static int randomnessPid = 0; - -/* -** Allowed values for the unixFile.ctrlFlags bitmask: -*/ -#define UNIXFILE_EXCL 0x01 /* Connections from one process only */ -#define UNIXFILE_RDONLY 0x02 /* Connection is read only */ -#define UNIXFILE_PERSIST_WAL 0x04 /* Persistent WAL mode */ -#ifndef SQLITE_DISABLE_DIRSYNC -# define UNIXFILE_DIRSYNC 0x08 /* Directory sync needed */ -#else -# define UNIXFILE_DIRSYNC 0x00 -#endif -#define UNIXFILE_PSOW 0x10 /* SQLITE_IOCAP_POWERSAFE_OVERWRITE */ -#define UNIXFILE_DELETE 0x20 /* Delete on close */ -#define UNIXFILE_URI 0x40 /* Filename might have query parameters */ -#define UNIXFILE_NOLOCK 0x80 /* Do no file locking */ -#define UNIXFILE_WARNED 0x0100 /* verifyDbFile() warnings have been issued */ - -/* -** Include code that is common to all os_*.c files -*/ -/************** Include os_common.h in the middle of os_unix.c ***************/ -/************** Begin file os_common.h ***************************************/ -/* -** 2004 May 22 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -****************************************************************************** -** -** This file contains macros and a little bit of code that is common to -** all of the platform-specific files (os_*.c) and is #included into those -** files. -** -** This file should be #included by the os_*.c files only. It is not a -** general purpose header file. -*/ -#ifndef _OS_COMMON_H_ -#define _OS_COMMON_H_ - -/* -** At least two bugs have slipped in because we changed the MEMORY_DEBUG -** macro to SQLITE_DEBUG and some older makefiles have not yet made the -** switch. The following code should catch this problem at compile-time. -*/ -#ifdef MEMORY_DEBUG -# error "The MEMORY_DEBUG macro is obsolete. Use SQLITE_DEBUG instead." -#endif - -#if defined(SQLITE_TEST) && defined(SQLITE_DEBUG) -# ifndef SQLITE_DEBUG_OS_TRACE -# define SQLITE_DEBUG_OS_TRACE 0 -# endif - int sqlite3OSTrace = SQLITE_DEBUG_OS_TRACE; -# define OSTRACE(X) if( sqlite3OSTrace ) sqlite3DebugPrintf X -#else -# define OSTRACE(X) -#endif - -/* -** Macros for performance tracing. Normally turned off. Only works -** on i486 hardware. -*/ -#ifdef SQLITE_PERFORMANCE_TRACE - -/* -** hwtime.h contains inline assembler code for implementing -** high-performance timing routines. -*/ -/************** Include hwtime.h in the middle of os_common.h ****************/ -/************** Begin file hwtime.h ******************************************/ -/* -** 2008 May 27 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -****************************************************************************** -** -** This file contains inline asm code for retrieving "high-performance" -** counters for x86 class CPUs. -*/ -#ifndef _HWTIME_H_ -#define _HWTIME_H_ - -/* -** The following routine only works on pentium-class (or newer) processors. -** It uses the RDTSC opcode to read the cycle count value out of the -** processor and returns that value. This can be used for high-res -** profiling. -*/ -#if (defined(__GNUC__) || defined(_MSC_VER)) && \ - (defined(i386) || defined(__i386__) || defined(_M_IX86)) - - #if defined(__GNUC__) - - __inline__ sqlite_uint64 sqlite3Hwtime(void){ - unsigned int lo, hi; - __asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi)); - return (sqlite_uint64)hi << 32 | lo; - } - - #elif defined(_MSC_VER) - - __declspec(naked) __inline sqlite_uint64 __cdecl sqlite3Hwtime(void){ - __asm { - rdtsc - ret ; return value at EDX:EAX - } - } - - #endif - -#elif (defined(__GNUC__) && defined(__x86_64__)) - - __inline__ sqlite_uint64 sqlite3Hwtime(void){ - unsigned long val; - __asm__ __volatile__ ("rdtsc" : "=A" (val)); - return val; - } - -#elif (defined(__GNUC__) && defined(__ppc__)) - - __inline__ sqlite_uint64 sqlite3Hwtime(void){ - unsigned long long retval; - unsigned long junk; - __asm__ __volatile__ ("\n\ - 1: mftbu %1\n\ - mftb %L0\n\ - mftbu %0\n\ - cmpw %0,%1\n\ - bne 1b" - : "=r" (retval), "=r" (junk)); - return retval; - } - -#else - - #error Need implementation of sqlite3Hwtime() for your platform. - - /* - ** To compile without implementing sqlite3Hwtime() for your platform, - ** you can remove the above #error and use the following - ** stub function. You will lose timing support for many - ** of the debugging and testing utilities, but it should at - ** least compile and run. - */ -SQLITE_PRIVATE sqlite_uint64 sqlite3Hwtime(void){ return ((sqlite_uint64)0); } - -#endif - -#endif /* !defined(_HWTIME_H_) */ - -/************** End of hwtime.h **********************************************/ -/************** Continuing where we left off in os_common.h ******************/ - -static sqlite_uint64 g_start; -static sqlite_uint64 g_elapsed; -#define TIMER_START g_start=sqlite3Hwtime() -#define TIMER_END g_elapsed=sqlite3Hwtime()-g_start -#define TIMER_ELAPSED g_elapsed -#else -#define TIMER_START -#define TIMER_END -#define TIMER_ELAPSED ((sqlite_uint64)0) -#endif - -/* -** If we compile with the SQLITE_TEST macro set, then the following block -** of code will give us the ability to simulate a disk I/O error. This -** is used for testing the I/O recovery logic. -*/ -#ifdef SQLITE_TEST -SQLITE_API int sqlite3_io_error_hit = 0; /* Total number of I/O Errors */ -SQLITE_API int sqlite3_io_error_hardhit = 0; /* Number of non-benign errors */ -SQLITE_API int sqlite3_io_error_pending = 0; /* Count down to first I/O error */ -SQLITE_API int sqlite3_io_error_persist = 0; /* True if I/O errors persist */ -SQLITE_API int sqlite3_io_error_benign = 0; /* True if errors are benign */ -SQLITE_API int sqlite3_diskfull_pending = 0; -SQLITE_API int sqlite3_diskfull = 0; -#define SimulateIOErrorBenign(X) sqlite3_io_error_benign=(X) -#define SimulateIOError(CODE) \ - if( (sqlite3_io_error_persist && sqlite3_io_error_hit) \ - || sqlite3_io_error_pending-- == 1 ) \ - { local_ioerr(); CODE; } -static void local_ioerr(){ - IOTRACE(("IOERR\n")); - sqlite3_io_error_hit++; - if( !sqlite3_io_error_benign ) sqlite3_io_error_hardhit++; -} -#define SimulateDiskfullError(CODE) \ - if( sqlite3_diskfull_pending ){ \ - if( sqlite3_diskfull_pending == 1 ){ \ - local_ioerr(); \ - sqlite3_diskfull = 1; \ - sqlite3_io_error_hit = 1; \ - CODE; \ - }else{ \ - sqlite3_diskfull_pending--; \ - } \ - } -#else -#define SimulateIOErrorBenign(X) -#define SimulateIOError(A) -#define SimulateDiskfullError(A) -#endif - -/* -** When testing, keep a count of the number of open files. -*/ -#ifdef SQLITE_TEST -SQLITE_API int sqlite3_open_file_count = 0; -#define OpenCounter(X) sqlite3_open_file_count+=(X) -#else -#define OpenCounter(X) -#endif - -#endif /* !defined(_OS_COMMON_H_) */ - -/************** End of os_common.h *******************************************/ -/************** Continuing where we left off in os_unix.c ********************/ - -/* -** Define various macros that are missing from some systems. -*/ -#ifndef O_LARGEFILE -# define O_LARGEFILE 0 -#endif -#ifdef SQLITE_DISABLE_LFS -# undef O_LARGEFILE -# define O_LARGEFILE 0 -#endif -#ifndef O_NOFOLLOW -# define O_NOFOLLOW 0 -#endif -#ifndef O_BINARY -# define O_BINARY 0 -#endif - -/* -** The threadid macro resolves to the thread-id or to 0. Used for -** testing and debugging only. -*/ -#if SQLITE_THREADSAFE -#define threadid pthread_self() -#else -#define threadid 0 -#endif - -/* -** HAVE_MREMAP defaults to true on Linux and false everywhere else. -*/ -#if !defined(HAVE_MREMAP) -# if defined(__linux__) && defined(_GNU_SOURCE) -# define HAVE_MREMAP 1 -# else -# define HAVE_MREMAP 0 -# endif -#endif - -/* -** Different Unix systems declare open() in different ways. Same use -** open(const char*,int,mode_t). Others use open(const char*,int,...). -** The difference is important when using a pointer to the function. -** -** The safest way to deal with the problem is to always use this wrapper -** which always has the same well-defined interface. -*/ -static int posixOpen(const char *zFile, int flags, int mode){ - return open(zFile, flags, mode); -} - -/* -** On some systems, calls to fchown() will trigger a message in a security -** log if they come from non-root processes. So avoid calling fchown() if -** we are not running as root. -*/ -static int posixFchown(int fd, uid_t uid, gid_t gid){ - return geteuid() ? 0 : fchown(fd,uid,gid); -} - -/* Forward reference */ -static int openDirectory(const char*, int*); -static int unixGetpagesize(void); - -/* -** Many system calls are accessed through pointer-to-functions so that -** they may be overridden at runtime to facilitate fault injection during -** testing and sandboxing. The following array holds the names and pointers -** to all overrideable system calls. -*/ -static struct unix_syscall { - const char *zName; /* Name of the system call */ - sqlite3_syscall_ptr pCurrent; /* Current value of the system call */ - sqlite3_syscall_ptr pDefault; /* Default value */ -} aSyscall[] = { - { "open", (sqlite3_syscall_ptr)posixOpen, 0 }, -#define osOpen ((int(*)(const char*,int,int))aSyscall[0].pCurrent) - - { "close", (sqlite3_syscall_ptr)close, 0 }, -#define osClose ((int(*)(int))aSyscall[1].pCurrent) - - { "access", (sqlite3_syscall_ptr)access, 0 }, -#define osAccess ((int(*)(const char*,int))aSyscall[2].pCurrent) - - { "getcwd", (sqlite3_syscall_ptr)getcwd, 0 }, -#define osGetcwd ((char*(*)(char*,size_t))aSyscall[3].pCurrent) - - { "stat", (sqlite3_syscall_ptr)stat, 0 }, -#define osStat ((int(*)(const char*,struct stat*))aSyscall[4].pCurrent) - -/* -** The DJGPP compiler environment looks mostly like Unix, but it -** lacks the fcntl() system call. So redefine fcntl() to be something -** that always succeeds. This means that locking does not occur under -** DJGPP. But it is DOS - what did you expect? -*/ -#ifdef __DJGPP__ - { "fstat", 0, 0 }, -#define osFstat(a,b,c) 0 -#else - { "fstat", (sqlite3_syscall_ptr)fstat, 0 }, -#define osFstat ((int(*)(int,struct stat*))aSyscall[5].pCurrent) -#endif - - { "ftruncate", (sqlite3_syscall_ptr)ftruncate, 0 }, -#define osFtruncate ((int(*)(int,off_t))aSyscall[6].pCurrent) - - { "fcntl", (sqlite3_syscall_ptr)fcntl, 0 }, -#define osFcntl ((int(*)(int,int,...))aSyscall[7].pCurrent) - - { "read", (sqlite3_syscall_ptr)read, 0 }, -#define osRead ((ssize_t(*)(int,void*,size_t))aSyscall[8].pCurrent) - -#if defined(USE_PREAD) || SQLITE_ENABLE_LOCKING_STYLE - { "pread", (sqlite3_syscall_ptr)pread, 0 }, -#else - { "pread", (sqlite3_syscall_ptr)0, 0 }, -#endif -#define osPread ((ssize_t(*)(int,void*,size_t,off_t))aSyscall[9].pCurrent) - -#if defined(USE_PREAD64) - { "pread64", (sqlite3_syscall_ptr)pread64, 0 }, -#else - { "pread64", (sqlite3_syscall_ptr)0, 0 }, -#endif -#define osPread64 ((ssize_t(*)(int,void*,size_t,off_t))aSyscall[10].pCurrent) - - { "write", (sqlite3_syscall_ptr)write, 0 }, -#define osWrite ((ssize_t(*)(int,const void*,size_t))aSyscall[11].pCurrent) - -#if defined(USE_PREAD) || SQLITE_ENABLE_LOCKING_STYLE - { "pwrite", (sqlite3_syscall_ptr)pwrite, 0 }, -#else - { "pwrite", (sqlite3_syscall_ptr)0, 0 }, -#endif -#define osPwrite ((ssize_t(*)(int,const void*,size_t,off_t))\ - aSyscall[12].pCurrent) - -#if defined(USE_PREAD64) - { "pwrite64", (sqlite3_syscall_ptr)pwrite64, 0 }, -#else - { "pwrite64", (sqlite3_syscall_ptr)0, 0 }, -#endif -#define osPwrite64 ((ssize_t(*)(int,const void*,size_t,off_t))\ - aSyscall[13].pCurrent) - - { "fchmod", (sqlite3_syscall_ptr)fchmod, 0 }, -#define osFchmod ((int(*)(int,mode_t))aSyscall[14].pCurrent) - -#if defined(HAVE_POSIX_FALLOCATE) && HAVE_POSIX_FALLOCATE - { "fallocate", (sqlite3_syscall_ptr)posix_fallocate, 0 }, -#else - { "fallocate", (sqlite3_syscall_ptr)0, 0 }, -#endif -#define osFallocate ((int(*)(int,off_t,off_t))aSyscall[15].pCurrent) - - { "unlink", (sqlite3_syscall_ptr)unlink, 0 }, -#define osUnlink ((int(*)(const char*))aSyscall[16].pCurrent) - - { "openDirectory", (sqlite3_syscall_ptr)openDirectory, 0 }, -#define osOpenDirectory ((int(*)(const char*,int*))aSyscall[17].pCurrent) - - { "mkdir", (sqlite3_syscall_ptr)mkdir, 0 }, -#define osMkdir ((int(*)(const char*,mode_t))aSyscall[18].pCurrent) - - { "rmdir", (sqlite3_syscall_ptr)rmdir, 0 }, -#define osRmdir ((int(*)(const char*))aSyscall[19].pCurrent) - - { "fchown", (sqlite3_syscall_ptr)posixFchown, 0 }, -#define osFchown ((int(*)(int,uid_t,gid_t))aSyscall[20].pCurrent) - -#if !defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0 - { "mmap", (sqlite3_syscall_ptr)mmap, 0 }, -#define osMmap ((void*(*)(void*,size_t,int,int,int,off_t))aSyscall[21].pCurrent) - - { "munmap", (sqlite3_syscall_ptr)munmap, 0 }, -#define osMunmap ((void*(*)(void*,size_t))aSyscall[22].pCurrent) - -#if HAVE_MREMAP - { "mremap", (sqlite3_syscall_ptr)mremap, 0 }, -#else - { "mremap", (sqlite3_syscall_ptr)0, 0 }, -#endif -#define osMremap ((void*(*)(void*,size_t,size_t,int,...))aSyscall[23].pCurrent) -#endif - - { "getpagesize", (sqlite3_syscall_ptr)unixGetpagesize, 0 }, -#define osGetpagesize ((int(*)(void))aSyscall[24].pCurrent) - -}; /* End of the overrideable system calls */ - -/* -** This is the xSetSystemCall() method of sqlite3_vfs for all of the -** "unix" VFSes. Return SQLITE_OK opon successfully updating the -** system call pointer, or SQLITE_NOTFOUND if there is no configurable -** system call named zName. -*/ -static int unixSetSystemCall( - sqlite3_vfs *pNotUsed, /* The VFS pointer. Not used */ - const char *zName, /* Name of system call to override */ - sqlite3_syscall_ptr pNewFunc /* Pointer to new system call value */ -){ - unsigned int i; - int rc = SQLITE_NOTFOUND; - - UNUSED_PARAMETER(pNotUsed); - if( zName==0 ){ - /* If no zName is given, restore all system calls to their default - ** settings and return NULL - */ - rc = SQLITE_OK; - for(i=0; i=SQLITE_MINIMUM_FILE_DESCRIPTOR ) break; - osClose(fd); - sqlite3_log(SQLITE_WARNING, - "attempt to open \"%s\" as file descriptor %d", z, fd); - fd = -1; - if( osOpen("/dev/null", f, m)<0 ) break; - } - if( fd>=0 ){ - if( m!=0 ){ - struct stat statbuf; - if( osFstat(fd, &statbuf)==0 - && statbuf.st_size==0 - && (statbuf.st_mode&0777)!=m - ){ - osFchmod(fd, m); - } - } -#if defined(FD_CLOEXEC) && (!defined(O_CLOEXEC) || O_CLOEXEC==0) - osFcntl(fd, F_SETFD, osFcntl(fd, F_GETFD, 0) | FD_CLOEXEC); -#endif - } - return fd; -} - -/* -** Helper functions to obtain and relinquish the global mutex. The -** global mutex is used to protect the unixInodeInfo and -** vxworksFileId objects used by this file, all of which may be -** shared by multiple threads. -** -** Function unixMutexHeld() is used to assert() that the global mutex -** is held when required. This function is only used as part of assert() -** statements. e.g. -** -** unixEnterMutex() -** assert( unixMutexHeld() ); -** unixEnterLeave() -*/ -static void unixEnterMutex(void){ - sqlite3_mutex_enter(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER)); -} -static void unixLeaveMutex(void){ - sqlite3_mutex_leave(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER)); -} -#ifdef SQLITE_DEBUG -static int unixMutexHeld(void) { - return sqlite3_mutex_held(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER)); -} -#endif - - -#if defined(SQLITE_TEST) && defined(SQLITE_DEBUG) -/* -** Helper function for printing out trace information from debugging -** binaries. This returns the string represetation of the supplied -** integer lock-type. -*/ -static const char *azFileLock(int eFileLock){ - switch( eFileLock ){ - case NO_LOCK: return "NONE"; - case SHARED_LOCK: return "SHARED"; - case RESERVED_LOCK: return "RESERVED"; - case PENDING_LOCK: return "PENDING"; - case EXCLUSIVE_LOCK: return "EXCLUSIVE"; - } - return "ERROR"; -} -#endif - -#ifdef SQLITE_LOCK_TRACE -/* -** Print out information about all locking operations. -** -** This routine is used for troubleshooting locks on multithreaded -** platforms. Enable by compiling with the -DSQLITE_LOCK_TRACE -** command-line option on the compiler. This code is normally -** turned off. -*/ -static int lockTrace(int fd, int op, struct flock *p){ - char *zOpName, *zType; - int s; - int savedErrno; - if( op==F_GETLK ){ - zOpName = "GETLK"; - }else if( op==F_SETLK ){ - zOpName = "SETLK"; - }else{ - s = osFcntl(fd, op, p); - sqlite3DebugPrintf("fcntl unknown %d %d %d\n", fd, op, s); - return s; - } - if( p->l_type==F_RDLCK ){ - zType = "RDLCK"; - }else if( p->l_type==F_WRLCK ){ - zType = "WRLCK"; - }else if( p->l_type==F_UNLCK ){ - zType = "UNLCK"; - }else{ - assert( 0 ); - } - assert( p->l_whence==SEEK_SET ); - s = osFcntl(fd, op, p); - savedErrno = errno; - sqlite3DebugPrintf("fcntl %d %d %s %s %d %d %d %d\n", - threadid, fd, zOpName, zType, (int)p->l_start, (int)p->l_len, - (int)p->l_pid, s); - if( s==(-1) && op==F_SETLK && (p->l_type==F_RDLCK || p->l_type==F_WRLCK) ){ - struct flock l2; - l2 = *p; - osFcntl(fd, F_GETLK, &l2); - if( l2.l_type==F_RDLCK ){ - zType = "RDLCK"; - }else if( l2.l_type==F_WRLCK ){ - zType = "WRLCK"; - }else if( l2.l_type==F_UNLCK ){ - zType = "UNLCK"; - }else{ - assert( 0 ); - } - sqlite3DebugPrintf("fcntl-failure-reason: %s %d %d %d\n", - zType, (int)l2.l_start, (int)l2.l_len, (int)l2.l_pid); - } - errno = savedErrno; - return s; -} -#undef osFcntl -#define osFcntl lockTrace -#endif /* SQLITE_LOCK_TRACE */ - -/* -** Retry ftruncate() calls that fail due to EINTR -*/ -static int robust_ftruncate(int h, sqlite3_int64 sz){ - int rc; - do{ rc = osFtruncate(h,sz); }while( rc<0 && errno==EINTR ); - return rc; -} - -/* -** This routine translates a standard POSIX errno code into something -** useful to the clients of the sqlite3 functions. Specifically, it is -** intended to translate a variety of "try again" errors into SQLITE_BUSY -** and a variety of "please close the file descriptor NOW" errors into -** SQLITE_IOERR -** -** Errors during initialization of locks, or file system support for locks, -** should handle ENOLCK, ENOTSUP, EOPNOTSUPP separately. -*/ -static int sqliteErrorFromPosixError(int posixError, int sqliteIOErr) { - switch (posixError) { -#if 0 - /* At one point this code was not commented out. In theory, this branch - ** should never be hit, as this function should only be called after - ** a locking-related function (i.e. fcntl()) has returned non-zero with - ** the value of errno as the first argument. Since a system call has failed, - ** errno should be non-zero. - ** - ** Despite this, if errno really is zero, we still don't want to return - ** SQLITE_OK. The system call failed, and *some* SQLite error should be - ** propagated back to the caller. Commenting this branch out means errno==0 - ** will be handled by the "default:" case below. - */ - case 0: - return SQLITE_OK; -#endif - - case EAGAIN: - case ETIMEDOUT: - case EBUSY: - case EINTR: - case ENOLCK: - /* random NFS retry error, unless during file system support - * introspection, in which it actually means what it says */ - return SQLITE_BUSY; - - case EACCES: - /* EACCES is like EAGAIN during locking operations, but not any other time*/ - if( (sqliteIOErr == SQLITE_IOERR_LOCK) || - (sqliteIOErr == SQLITE_IOERR_UNLOCK) || - (sqliteIOErr == SQLITE_IOERR_RDLOCK) || - (sqliteIOErr == SQLITE_IOERR_CHECKRESERVEDLOCK) ){ - return SQLITE_BUSY; - } - /* else fall through */ - case EPERM: - return SQLITE_PERM; - - /* EDEADLK is only possible if a call to fcntl(F_SETLKW) is made. And - ** this module never makes such a call. And the code in SQLite itself - ** asserts that SQLITE_IOERR_BLOCKED is never returned. For these reasons - ** this case is also commented out. If the system does set errno to EDEADLK, - ** the default SQLITE_IOERR_XXX code will be returned. */ -#if 0 - case EDEADLK: - return SQLITE_IOERR_BLOCKED; -#endif - -#if EOPNOTSUPP!=ENOTSUP - case EOPNOTSUPP: - /* something went terribly awry, unless during file system support - * introspection, in which it actually means what it says */ -#endif -#ifdef ENOTSUP - case ENOTSUP: - /* invalid fd, unless during file system support introspection, in which - * it actually means what it says */ -#endif - case EIO: - case EBADF: - case EINVAL: - case ENOTCONN: - case ENODEV: - case ENXIO: - case ENOENT: -#ifdef ESTALE /* ESTALE is not defined on Interix systems */ - case ESTALE: -#endif - case ENOSYS: - /* these should force the client to close the file and reconnect */ - - default: - return sqliteIOErr; - } -} - - -/****************************************************************************** -****************** Begin Unique File ID Utility Used By VxWorks *************** -** -** On most versions of unix, we can get a unique ID for a file by concatenating -** the device number and the inode number. But this does not work on VxWorks. -** On VxWorks, a unique file id must be based on the canonical filename. -** -** A pointer to an instance of the following structure can be used as a -** unique file ID in VxWorks. Each instance of this structure contains -** a copy of the canonical filename. There is also a reference count. -** The structure is reclaimed when the number of pointers to it drops to -** zero. -** -** There are never very many files open at one time and lookups are not -** a performance-critical path, so it is sufficient to put these -** structures on a linked list. -*/ -struct vxworksFileId { - struct vxworksFileId *pNext; /* Next in a list of them all */ - int nRef; /* Number of references to this one */ - int nName; /* Length of the zCanonicalName[] string */ - char *zCanonicalName; /* Canonical filename */ -}; - -#if OS_VXWORKS -/* -** All unique filenames are held on a linked list headed by this -** variable: -*/ -static struct vxworksFileId *vxworksFileList = 0; - -/* -** Simplify a filename into its canonical form -** by making the following changes: -** -** * removing any trailing and duplicate / -** * convert /./ into just / -** * convert /A/../ where A is any simple name into just / -** -** Changes are made in-place. Return the new name length. -** -** The original filename is in z[0..n-1]. Return the number of -** characters in the simplified name. -*/ -static int vxworksSimplifyName(char *z, int n){ - int i, j; - while( n>1 && z[n-1]=='/' ){ n--; } - for(i=j=0; i0 && z[j-1]!='/' ){ j--; } - if( j>0 ){ j--; } - i += 2; - continue; - } - } - z[j++] = z[i]; - } - z[j] = 0; - return j; -} - -/* -** Find a unique file ID for the given absolute pathname. Return -** a pointer to the vxworksFileId object. This pointer is the unique -** file ID. -** -** The nRef field of the vxworksFileId object is incremented before -** the object is returned. A new vxworksFileId object is created -** and added to the global list if necessary. -** -** If a memory allocation error occurs, return NULL. -*/ -static struct vxworksFileId *vxworksFindFileId(const char *zAbsoluteName){ - struct vxworksFileId *pNew; /* search key and new file ID */ - struct vxworksFileId *pCandidate; /* For looping over existing file IDs */ - int n; /* Length of zAbsoluteName string */ - - assert( zAbsoluteName[0]=='/' ); - n = (int)strlen(zAbsoluteName); - pNew = sqlite3_malloc( sizeof(*pNew) + (n+1) ); - if( pNew==0 ) return 0; - pNew->zCanonicalName = (char*)&pNew[1]; - memcpy(pNew->zCanonicalName, zAbsoluteName, n+1); - n = vxworksSimplifyName(pNew->zCanonicalName, n); - - /* Search for an existing entry that matching the canonical name. - ** If found, increment the reference count and return a pointer to - ** the existing file ID. - */ - unixEnterMutex(); - for(pCandidate=vxworksFileList; pCandidate; pCandidate=pCandidate->pNext){ - if( pCandidate->nName==n - && memcmp(pCandidate->zCanonicalName, pNew->zCanonicalName, n)==0 - ){ - sqlite3_free(pNew); - pCandidate->nRef++; - unixLeaveMutex(); - return pCandidate; - } - } - - /* No match was found. We will make a new file ID */ - pNew->nRef = 1; - pNew->nName = n; - pNew->pNext = vxworksFileList; - vxworksFileList = pNew; - unixLeaveMutex(); - return pNew; -} - -/* -** Decrement the reference count on a vxworksFileId object. Free -** the object when the reference count reaches zero. -*/ -static void vxworksReleaseFileId(struct vxworksFileId *pId){ - unixEnterMutex(); - assert( pId->nRef>0 ); - pId->nRef--; - if( pId->nRef==0 ){ - struct vxworksFileId **pp; - for(pp=&vxworksFileList; *pp && *pp!=pId; pp = &((*pp)->pNext)){} - assert( *pp==pId ); - *pp = pId->pNext; - sqlite3_free(pId); - } - unixLeaveMutex(); -} -#endif /* OS_VXWORKS */ -/*************** End of Unique File ID Utility Used By VxWorks **************** -******************************************************************************/ - - -/****************************************************************************** -*************************** Posix Advisory Locking **************************** -** -** POSIX advisory locks are broken by design. ANSI STD 1003.1 (1996) -** section 6.5.2.2 lines 483 through 490 specify that when a process -** sets or clears a lock, that operation overrides any prior locks set -** by the same process. It does not explicitly say so, but this implies -** that it overrides locks set by the same process using a different -** file descriptor. Consider this test case: -** -** int fd1 = open("./file1", O_RDWR|O_CREAT, 0644); -** int fd2 = open("./file2", O_RDWR|O_CREAT, 0644); -** -** Suppose ./file1 and ./file2 are really the same file (because -** one is a hard or symbolic link to the other) then if you set -** an exclusive lock on fd1, then try to get an exclusive lock -** on fd2, it works. I would have expected the second lock to -** fail since there was already a lock on the file due to fd1. -** But not so. Since both locks came from the same process, the -** second overrides the first, even though they were on different -** file descriptors opened on different file names. -** -** This means that we cannot use POSIX locks to synchronize file access -** among competing threads of the same process. POSIX locks will work fine -** to synchronize access for threads in separate processes, but not -** threads within the same process. -** -** To work around the problem, SQLite has to manage file locks internally -** on its own. Whenever a new database is opened, we have to find the -** specific inode of the database file (the inode is determined by the -** st_dev and st_ino fields of the stat structure that fstat() fills in) -** and check for locks already existing on that inode. When locks are -** created or removed, we have to look at our own internal record of the -** locks to see if another thread has previously set a lock on that same -** inode. -** -** (Aside: The use of inode numbers as unique IDs does not work on VxWorks. -** For VxWorks, we have to use the alternative unique ID system based on -** canonical filename and implemented in the previous division.) -** -** The sqlite3_file structure for POSIX is no longer just an integer file -** descriptor. It is now a structure that holds the integer file -** descriptor and a pointer to a structure that describes the internal -** locks on the corresponding inode. There is one locking structure -** per inode, so if the same inode is opened twice, both unixFile structures -** point to the same locking structure. The locking structure keeps -** a reference count (so we will know when to delete it) and a "cnt" -** field that tells us its internal lock status. cnt==0 means the -** file is unlocked. cnt==-1 means the file has an exclusive lock. -** cnt>0 means there are cnt shared locks on the file. -** -** Any attempt to lock or unlock a file first checks the locking -** structure. The fcntl() system call is only invoked to set a -** POSIX lock if the internal lock structure transitions between -** a locked and an unlocked state. -** -** But wait: there are yet more problems with POSIX advisory locks. -** -** If you close a file descriptor that points to a file that has locks, -** all locks on that file that are owned by the current process are -** released. To work around this problem, each unixInodeInfo object -** maintains a count of the number of pending locks on tha inode. -** When an attempt is made to close an unixFile, if there are -** other unixFile open on the same inode that are holding locks, the call -** to close() the file descriptor is deferred until all of the locks clear. -** The unixInodeInfo structure keeps a list of file descriptors that need to -** be closed and that list is walked (and cleared) when the last lock -** clears. -** -** Yet another problem: LinuxThreads do not play well with posix locks. -** -** Many older versions of linux use the LinuxThreads library which is -** not posix compliant. Under LinuxThreads, a lock created by thread -** A cannot be modified or overridden by a different thread B. -** Only thread A can modify the lock. Locking behavior is correct -** if the appliation uses the newer Native Posix Thread Library (NPTL) -** on linux - with NPTL a lock created by thread A can override locks -** in thread B. But there is no way to know at compile-time which -** threading library is being used. So there is no way to know at -** compile-time whether or not thread A can override locks on thread B. -** One has to do a run-time check to discover the behavior of the -** current process. -** -** SQLite used to support LinuxThreads. But support for LinuxThreads -** was dropped beginning with version 3.7.0. SQLite will still work with -** LinuxThreads provided that (1) there is no more than one connection -** per database file in the same process and (2) database connections -** do not move across threads. -*/ - -/* -** An instance of the following structure serves as the key used -** to locate a particular unixInodeInfo object. -*/ -struct unixFileId { - dev_t dev; /* Device number */ -#if OS_VXWORKS - struct vxworksFileId *pId; /* Unique file ID for vxworks. */ -#else - ino_t ino; /* Inode number */ -#endif -}; - -/* -** An instance of the following structure is allocated for each open -** inode. Or, on LinuxThreads, there is one of these structures for -** each inode opened by each thread. -** -** A single inode can have multiple file descriptors, so each unixFile -** structure contains a pointer to an instance of this object and this -** object keeps a count of the number of unixFile pointing to it. -*/ -struct unixInodeInfo { - struct unixFileId fileId; /* The lookup key */ - int nShared; /* Number of SHARED locks held */ - unsigned char eFileLock; /* One of SHARED_LOCK, RESERVED_LOCK etc. */ - unsigned char bProcessLock; /* An exclusive process lock is held */ - int nRef; /* Number of pointers to this structure */ - unixShmNode *pShmNode; /* Shared memory associated with this inode */ - int nLock; /* Number of outstanding file locks */ - UnixUnusedFd *pUnused; /* Unused file descriptors to close */ - unixInodeInfo *pNext; /* List of all unixInodeInfo objects */ - unixInodeInfo *pPrev; /* .... doubly linked */ -#if SQLITE_ENABLE_LOCKING_STYLE - unsigned long long sharedByte; /* for AFP simulated shared lock */ -#endif -#if OS_VXWORKS - sem_t *pSem; /* Named POSIX semaphore */ - char aSemName[MAX_PATHNAME+2]; /* Name of that semaphore */ -#endif -}; - -/* -** A lists of all unixInodeInfo objects. -*/ -static unixInodeInfo *inodeList = 0; - -/* -** -** This function - unixLogError_x(), is only ever called via the macro -** unixLogError(). -** -** It is invoked after an error occurs in an OS function and errno has been -** set. It logs a message using sqlite3_log() containing the current value of -** errno and, if possible, the human-readable equivalent from strerror() or -** strerror_r(). -** -** The first argument passed to the macro should be the error code that -** will be returned to SQLite (e.g. SQLITE_IOERR_DELETE, SQLITE_CANTOPEN). -** The two subsequent arguments should be the name of the OS function that -** failed (e.g. "unlink", "open") and the associated file-system path, -** if any. -*/ -#define unixLogError(a,b,c) unixLogErrorAtLine(a,b,c,__LINE__) -static int unixLogErrorAtLine( - int errcode, /* SQLite error code */ - const char *zFunc, /* Name of OS function that failed */ - const char *zPath, /* File path associated with error */ - int iLine /* Source line number where error occurred */ -){ - char *zErr; /* Message from strerror() or equivalent */ - int iErrno = errno; /* Saved syscall error number */ - - /* If this is not a threadsafe build (SQLITE_THREADSAFE==0), then use - ** the strerror() function to obtain the human-readable error message - ** equivalent to errno. Otherwise, use strerror_r(). - */ -#if SQLITE_THREADSAFE && defined(HAVE_STRERROR_R) - char aErr[80]; - memset(aErr, 0, sizeof(aErr)); - zErr = aErr; - - /* If STRERROR_R_CHAR_P (set by autoconf scripts) or __USE_GNU is defined, - ** assume that the system provides the GNU version of strerror_r() that - ** returns a pointer to a buffer containing the error message. That pointer - ** may point to aErr[], or it may point to some static storage somewhere. - ** Otherwise, assume that the system provides the POSIX version of - ** strerror_r(), which always writes an error message into aErr[]. - ** - ** If the code incorrectly assumes that it is the POSIX version that is - ** available, the error message will often be an empty string. Not a - ** huge problem. Incorrectly concluding that the GNU version is available - ** could lead to a segfault though. - */ -#if defined(STRERROR_R_CHAR_P) || defined(__USE_GNU) - zErr = -# endif - strerror_r(iErrno, aErr, sizeof(aErr)-1); - -#elif SQLITE_THREADSAFE - /* This is a threadsafe build, but strerror_r() is not available. */ - zErr = ""; -#else - /* Non-threadsafe build, use strerror(). */ - zErr = strerror(iErrno); -#endif - - if( zPath==0 ) zPath = ""; - sqlite3_log(errcode, - "os_unix.c:%d: (%d) %s(%s) - %s", - iLine, iErrno, zFunc, zPath, zErr - ); - - return errcode; -} - -/* -** Close a file descriptor. -** -** We assume that close() almost always works, since it is only in a -** very sick application or on a very sick platform that it might fail. -** If it does fail, simply leak the file descriptor, but do log the -** error. -** -** Note that it is not safe to retry close() after EINTR since the -** file descriptor might have already been reused by another thread. -** So we don't even try to recover from an EINTR. Just log the error -** and move on. -*/ -static void robust_close(unixFile *pFile, int h, int lineno){ - if( osClose(h) ){ - unixLogErrorAtLine(SQLITE_IOERR_CLOSE, "close", - pFile ? pFile->zPath : 0, lineno); - } -} - -/* -** Close all file descriptors accumuated in the unixInodeInfo->pUnused list. -*/ -static void closePendingFds(unixFile *pFile){ - unixInodeInfo *pInode = pFile->pInode; - UnixUnusedFd *p; - UnixUnusedFd *pNext; - for(p=pInode->pUnused; p; p=pNext){ - pNext = p->pNext; - robust_close(pFile, p->fd, __LINE__); - sqlite3_free(p); - } - pInode->pUnused = 0; -} - -/* -** Release a unixInodeInfo structure previously allocated by findInodeInfo(). -** -** The mutex entered using the unixEnterMutex() function must be held -** when this function is called. -*/ -static void releaseInodeInfo(unixFile *pFile){ - unixInodeInfo *pInode = pFile->pInode; - assert( unixMutexHeld() ); - if( ALWAYS(pInode) ){ - pInode->nRef--; - if( pInode->nRef==0 ){ - assert( pInode->pShmNode==0 ); - closePendingFds(pFile); - if( pInode->pPrev ){ - assert( pInode->pPrev->pNext==pInode ); - pInode->pPrev->pNext = pInode->pNext; - }else{ - assert( inodeList==pInode ); - inodeList = pInode->pNext; - } - if( pInode->pNext ){ - assert( pInode->pNext->pPrev==pInode ); - pInode->pNext->pPrev = pInode->pPrev; - } - sqlite3_free(pInode); - } - } -} - -/* -** Given a file descriptor, locate the unixInodeInfo object that -** describes that file descriptor. Create a new one if necessary. The -** return value might be uninitialized if an error occurs. -** -** The mutex entered using the unixEnterMutex() function must be held -** when this function is called. -** -** Return an appropriate error code. -*/ -static int findInodeInfo( - unixFile *pFile, /* Unix file with file desc used in the key */ - unixInodeInfo **ppInode /* Return the unixInodeInfo object here */ -){ - int rc; /* System call return code */ - int fd; /* The file descriptor for pFile */ - struct unixFileId fileId; /* Lookup key for the unixInodeInfo */ - struct stat statbuf; /* Low-level file information */ - unixInodeInfo *pInode = 0; /* Candidate unixInodeInfo object */ - - assert( unixMutexHeld() ); - - /* Get low-level information about the file that we can used to - ** create a unique name for the file. - */ - fd = pFile->h; - rc = osFstat(fd, &statbuf); - if( rc!=0 ){ - pFile->lastErrno = errno; -#ifdef EOVERFLOW - if( pFile->lastErrno==EOVERFLOW ) return SQLITE_NOLFS; -#endif - return SQLITE_IOERR; - } - -#ifdef __APPLE__ - /* On OS X on an msdos filesystem, the inode number is reported - ** incorrectly for zero-size files. See ticket #3260. To work - ** around this problem (we consider it a bug in OS X, not SQLite) - ** we always increase the file size to 1 by writing a single byte - ** prior to accessing the inode number. The one byte written is - ** an ASCII 'S' character which also happens to be the first byte - ** in the header of every SQLite database. In this way, if there - ** is a race condition such that another thread has already populated - ** the first page of the database, no damage is done. - */ - if( statbuf.st_size==0 && (pFile->fsFlags & SQLITE_FSFLAGS_IS_MSDOS)!=0 ){ - do{ rc = osWrite(fd, "S", 1); }while( rc<0 && errno==EINTR ); - if( rc!=1 ){ - pFile->lastErrno = errno; - return SQLITE_IOERR; - } - rc = osFstat(fd, &statbuf); - if( rc!=0 ){ - pFile->lastErrno = errno; - return SQLITE_IOERR; - } - } -#endif - - memset(&fileId, 0, sizeof(fileId)); - fileId.dev = statbuf.st_dev; -#if OS_VXWORKS - fileId.pId = pFile->pId; -#else - fileId.ino = statbuf.st_ino; -#endif - pInode = inodeList; - while( pInode && memcmp(&fileId, &pInode->fileId, sizeof(fileId)) ){ - pInode = pInode->pNext; - } - if( pInode==0 ){ - pInode = sqlite3_malloc( sizeof(*pInode) ); - if( pInode==0 ){ - return SQLITE_NOMEM; - } - memset(pInode, 0, sizeof(*pInode)); - memcpy(&pInode->fileId, &fileId, sizeof(fileId)); - pInode->nRef = 1; - pInode->pNext = inodeList; - pInode->pPrev = 0; - if( inodeList ) inodeList->pPrev = pInode; - inodeList = pInode; - }else{ - pInode->nRef++; - } - *ppInode = pInode; - return SQLITE_OK; -} - -/* -** Return TRUE if pFile has been renamed or unlinked since it was first opened. -*/ -static int fileHasMoved(unixFile *pFile){ - struct stat buf; - return pFile->pInode!=0 && - (osStat(pFile->zPath, &buf)!=0 || buf.st_ino!=pFile->pInode->fileId.ino); -} - - -/* -** Check a unixFile that is a database. Verify the following: -** -** (1) There is exactly one hard link on the file -** (2) The file is not a symbolic link -** (3) The file has not been renamed or unlinked -** -** Issue sqlite3_log(SQLITE_WARNING,...) messages if anything is not right. -*/ -static void verifyDbFile(unixFile *pFile){ - struct stat buf; - int rc; - if( pFile->ctrlFlags & UNIXFILE_WARNED ){ - /* One or more of the following warnings have already been issued. Do not - ** repeat them so as not to clutter the error log */ - return; - } - rc = osFstat(pFile->h, &buf); - if( rc!=0 ){ - sqlite3_log(SQLITE_WARNING, "cannot fstat db file %s", pFile->zPath); - pFile->ctrlFlags |= UNIXFILE_WARNED; - return; - } - if( buf.st_nlink==0 && (pFile->ctrlFlags & UNIXFILE_DELETE)==0 ){ - sqlite3_log(SQLITE_WARNING, "file unlinked while open: %s", pFile->zPath); - pFile->ctrlFlags |= UNIXFILE_WARNED; - return; - } - if( buf.st_nlink>1 ){ - sqlite3_log(SQLITE_WARNING, "multiple links to file: %s", pFile->zPath); - pFile->ctrlFlags |= UNIXFILE_WARNED; - return; - } - if( fileHasMoved(pFile) ){ - sqlite3_log(SQLITE_WARNING, "file renamed while open: %s", pFile->zPath); - pFile->ctrlFlags |= UNIXFILE_WARNED; - return; - } -} - - -/* -** This routine checks if there is a RESERVED lock held on the specified -** file by this or any other process. If such a lock is held, set *pResOut -** to a non-zero value otherwise *pResOut is set to zero. The return value -** is set to SQLITE_OK unless an I/O error occurs during lock checking. -*/ -static int unixCheckReservedLock(sqlite3_file *id, int *pResOut){ - int rc = SQLITE_OK; - int reserved = 0; - unixFile *pFile = (unixFile*)id; - - SimulateIOError( return SQLITE_IOERR_CHECKRESERVEDLOCK; ); - - assert( pFile ); - unixEnterMutex(); /* Because pFile->pInode is shared across threads */ - - /* Check if a thread in this process holds such a lock */ - if( pFile->pInode->eFileLock>SHARED_LOCK ){ - reserved = 1; - } - - /* Otherwise see if some other process holds it. - */ -#ifndef __DJGPP__ - if( !reserved && !pFile->pInode->bProcessLock ){ - struct flock lock; - lock.l_whence = SEEK_SET; - lock.l_start = RESERVED_BYTE; - lock.l_len = 1; - lock.l_type = F_WRLCK; - if( osFcntl(pFile->h, F_GETLK, &lock) ){ - rc = SQLITE_IOERR_CHECKRESERVEDLOCK; - pFile->lastErrno = errno; - } else if( lock.l_type!=F_UNLCK ){ - reserved = 1; - } - } -#endif - - unixLeaveMutex(); - OSTRACE(("TEST WR-LOCK %d %d %d (unix)\n", pFile->h, rc, reserved)); - - *pResOut = reserved; - return rc; -} - -/* -** Attempt to set a system-lock on the file pFile. The lock is -** described by pLock. -** -** If the pFile was opened read/write from unix-excl, then the only lock -** ever obtained is an exclusive lock, and it is obtained exactly once -** the first time any lock is attempted. All subsequent system locking -** operations become no-ops. Locking operations still happen internally, -** in order to coordinate access between separate database connections -** within this process, but all of that is handled in memory and the -** operating system does not participate. -** -** This function is a pass-through to fcntl(F_SETLK) if pFile is using -** any VFS other than "unix-excl" or if pFile is opened on "unix-excl" -** and is read-only. -** -** Zero is returned if the call completes successfully, or -1 if a call -** to fcntl() fails. In this case, errno is set appropriately (by fcntl()). -*/ -static int unixFileLock(unixFile *pFile, struct flock *pLock){ - int rc; - unixInodeInfo *pInode = pFile->pInode; - assert( unixMutexHeld() ); - assert( pInode!=0 ); - if( ((pFile->ctrlFlags & UNIXFILE_EXCL)!=0 || pInode->bProcessLock) - && ((pFile->ctrlFlags & UNIXFILE_RDONLY)==0) - ){ - if( pInode->bProcessLock==0 ){ - struct flock lock; - assert( pInode->nLock==0 ); - lock.l_whence = SEEK_SET; - lock.l_start = SHARED_FIRST; - lock.l_len = SHARED_SIZE; - lock.l_type = F_WRLCK; - rc = osFcntl(pFile->h, F_SETLK, &lock); - if( rc<0 ) return rc; - pInode->bProcessLock = 1; - pInode->nLock++; - }else{ - rc = 0; - } - }else{ - rc = osFcntl(pFile->h, F_SETLK, pLock); - } - return rc; -} - -/* -** Lock the file with the lock specified by parameter eFileLock - one -** of the following: -** -** (1) SHARED_LOCK -** (2) RESERVED_LOCK -** (3) PENDING_LOCK -** (4) EXCLUSIVE_LOCK -** -** Sometimes when requesting one lock state, additional lock states -** are inserted in between. The locking might fail on one of the later -** transitions leaving the lock state different from what it started but -** still short of its goal. The following chart shows the allowed -** transitions and the inserted intermediate states: -** -** UNLOCKED -> SHARED -** SHARED -> RESERVED -** SHARED -> (PENDING) -> EXCLUSIVE -** RESERVED -> (PENDING) -> EXCLUSIVE -** PENDING -> EXCLUSIVE -** -** This routine will only increase a lock. Use the sqlite3OsUnlock() -** routine to lower a locking level. -*/ -static int unixLock(sqlite3_file *id, int eFileLock){ - /* The following describes the implementation of the various locks and - ** lock transitions in terms of the POSIX advisory shared and exclusive - ** lock primitives (called read-locks and write-locks below, to avoid - ** confusion with SQLite lock names). The algorithms are complicated - ** slightly in order to be compatible with windows systems simultaneously - ** accessing the same database file, in case that is ever required. - ** - ** Symbols defined in os.h indentify the 'pending byte' and the 'reserved - ** byte', each single bytes at well known offsets, and the 'shared byte - ** range', a range of 510 bytes at a well known offset. - ** - ** To obtain a SHARED lock, a read-lock is obtained on the 'pending - ** byte'. If this is successful, a random byte from the 'shared byte - ** range' is read-locked and the lock on the 'pending byte' released. - ** - ** A process may only obtain a RESERVED lock after it has a SHARED lock. - ** A RESERVED lock is implemented by grabbing a write-lock on the - ** 'reserved byte'. - ** - ** A process may only obtain a PENDING lock after it has obtained a - ** SHARED lock. A PENDING lock is implemented by obtaining a write-lock - ** on the 'pending byte'. This ensures that no new SHARED locks can be - ** obtained, but existing SHARED locks are allowed to persist. A process - ** does not have to obtain a RESERVED lock on the way to a PENDING lock. - ** This property is used by the algorithm for rolling back a journal file - ** after a crash. - ** - ** An EXCLUSIVE lock, obtained after a PENDING lock is held, is - ** implemented by obtaining a write-lock on the entire 'shared byte - ** range'. Since all other locks require a read-lock on one of the bytes - ** within this range, this ensures that no other locks are held on the - ** database. - ** - ** The reason a single byte cannot be used instead of the 'shared byte - ** range' is that some versions of windows do not support read-locks. By - ** locking a random byte from a range, concurrent SHARED locks may exist - ** even if the locking primitive used is always a write-lock. - */ - int rc = SQLITE_OK; - unixFile *pFile = (unixFile*)id; - unixInodeInfo *pInode; - struct flock lock; - int tErrno = 0; - - assert( pFile ); - OSTRACE(("LOCK %d %s was %s(%s,%d) pid=%d (unix)\n", pFile->h, - azFileLock(eFileLock), azFileLock(pFile->eFileLock), - azFileLock(pFile->pInode->eFileLock), pFile->pInode->nShared , getpid())); - - /* If there is already a lock of this type or more restrictive on the - ** unixFile, do nothing. Don't use the end_lock: exit path, as - ** unixEnterMutex() hasn't been called yet. - */ - if( pFile->eFileLock>=eFileLock ){ - OSTRACE(("LOCK %d %s ok (already held) (unix)\n", pFile->h, - azFileLock(eFileLock))); - return SQLITE_OK; - } - - /* Make sure the locking sequence is correct. - ** (1) We never move from unlocked to anything higher than shared lock. - ** (2) SQLite never explicitly requests a pendig lock. - ** (3) A shared lock is always held when a reserve lock is requested. - */ - assert( pFile->eFileLock!=NO_LOCK || eFileLock==SHARED_LOCK ); - assert( eFileLock!=PENDING_LOCK ); - assert( eFileLock!=RESERVED_LOCK || pFile->eFileLock==SHARED_LOCK ); - - /* This mutex is needed because pFile->pInode is shared across threads - */ - unixEnterMutex(); - pInode = pFile->pInode; - - /* If some thread using this PID has a lock via a different unixFile* - ** handle that precludes the requested lock, return BUSY. - */ - if( (pFile->eFileLock!=pInode->eFileLock && - (pInode->eFileLock>=PENDING_LOCK || eFileLock>SHARED_LOCK)) - ){ - rc = SQLITE_BUSY; - goto end_lock; - } - - /* If a SHARED lock is requested, and some thread using this PID already - ** has a SHARED or RESERVED lock, then increment reference counts and - ** return SQLITE_OK. - */ - if( eFileLock==SHARED_LOCK && - (pInode->eFileLock==SHARED_LOCK || pInode->eFileLock==RESERVED_LOCK) ){ - assert( eFileLock==SHARED_LOCK ); - assert( pFile->eFileLock==0 ); - assert( pInode->nShared>0 ); - pFile->eFileLock = SHARED_LOCK; - pInode->nShared++; - pInode->nLock++; - goto end_lock; - } - - - /* A PENDING lock is needed before acquiring a SHARED lock and before - ** acquiring an EXCLUSIVE lock. For the SHARED lock, the PENDING will - ** be released. - */ - lock.l_len = 1L; - lock.l_whence = SEEK_SET; - if( eFileLock==SHARED_LOCK - || (eFileLock==EXCLUSIVE_LOCK && pFile->eFileLocklastErrno = tErrno; - } - goto end_lock; - } - } - - - /* If control gets to this point, then actually go ahead and make - ** operating system calls for the specified lock. - */ - if( eFileLock==SHARED_LOCK ){ - assert( pInode->nShared==0 ); - assert( pInode->eFileLock==0 ); - assert( rc==SQLITE_OK ); - - /* Now get the read-lock */ - lock.l_start = SHARED_FIRST; - lock.l_len = SHARED_SIZE; - if( unixFileLock(pFile, &lock) ){ - tErrno = errno; - rc = sqliteErrorFromPosixError(tErrno, SQLITE_IOERR_LOCK); - } - - /* Drop the temporary PENDING lock */ - lock.l_start = PENDING_BYTE; - lock.l_len = 1L; - lock.l_type = F_UNLCK; - if( unixFileLock(pFile, &lock) && rc==SQLITE_OK ){ - /* This could happen with a network mount */ - tErrno = errno; - rc = SQLITE_IOERR_UNLOCK; - } - - if( rc ){ - if( rc!=SQLITE_BUSY ){ - pFile->lastErrno = tErrno; - } - goto end_lock; - }else{ - pFile->eFileLock = SHARED_LOCK; - pInode->nLock++; - pInode->nShared = 1; - } - }else if( eFileLock==EXCLUSIVE_LOCK && pInode->nShared>1 ){ - /* We are trying for an exclusive lock but another thread in this - ** same process is still holding a shared lock. */ - rc = SQLITE_BUSY; - }else{ - /* The request was for a RESERVED or EXCLUSIVE lock. It is - ** assumed that there is a SHARED or greater lock on the file - ** already. - */ - assert( 0!=pFile->eFileLock ); - lock.l_type = F_WRLCK; - - assert( eFileLock==RESERVED_LOCK || eFileLock==EXCLUSIVE_LOCK ); - if( eFileLock==RESERVED_LOCK ){ - lock.l_start = RESERVED_BYTE; - lock.l_len = 1L; - }else{ - lock.l_start = SHARED_FIRST; - lock.l_len = SHARED_SIZE; - } - - if( unixFileLock(pFile, &lock) ){ - tErrno = errno; - rc = sqliteErrorFromPosixError(tErrno, SQLITE_IOERR_LOCK); - if( rc!=SQLITE_BUSY ){ - pFile->lastErrno = tErrno; - } - } - } - - -#ifdef SQLITE_DEBUG - /* Set up the transaction-counter change checking flags when - ** transitioning from a SHARED to a RESERVED lock. The change - ** from SHARED to RESERVED marks the beginning of a normal - ** write operation (not a hot journal rollback). - */ - if( rc==SQLITE_OK - && pFile->eFileLock<=SHARED_LOCK - && eFileLock==RESERVED_LOCK - ){ - pFile->transCntrChng = 0; - pFile->dbUpdate = 0; - pFile->inNormalWrite = 1; - } -#endif - - - if( rc==SQLITE_OK ){ - pFile->eFileLock = eFileLock; - pInode->eFileLock = eFileLock; - }else if( eFileLock==EXCLUSIVE_LOCK ){ - pFile->eFileLock = PENDING_LOCK; - pInode->eFileLock = PENDING_LOCK; - } - -end_lock: - unixLeaveMutex(); - OSTRACE(("LOCK %d %s %s (unix)\n", pFile->h, azFileLock(eFileLock), - rc==SQLITE_OK ? "ok" : "failed")); - return rc; -} - -/* -** Add the file descriptor used by file handle pFile to the corresponding -** pUnused list. -*/ -static void setPendingFd(unixFile *pFile){ - unixInodeInfo *pInode = pFile->pInode; - UnixUnusedFd *p = pFile->pUnused; - p->pNext = pInode->pUnused; - pInode->pUnused = p; - pFile->h = -1; - pFile->pUnused = 0; -} - -/* -** Lower the locking level on file descriptor pFile to eFileLock. eFileLock -** must be either NO_LOCK or SHARED_LOCK. -** -** If the locking level of the file descriptor is already at or below -** the requested locking level, this routine is a no-op. -** -** If handleNFSUnlock is true, then on downgrading an EXCLUSIVE_LOCK to SHARED -** the byte range is divided into 2 parts and the first part is unlocked then -** set to a read lock, then the other part is simply unlocked. This works -** around a bug in BSD NFS lockd (also seen on MacOSX 10.3+) that fails to -** remove the write lock on a region when a read lock is set. -*/ -static int posixUnlock(sqlite3_file *id, int eFileLock, int handleNFSUnlock){ - unixFile *pFile = (unixFile*)id; - unixInodeInfo *pInode; - struct flock lock; - int rc = SQLITE_OK; - - assert( pFile ); - OSTRACE(("UNLOCK %d %d was %d(%d,%d) pid=%d (unix)\n", pFile->h, eFileLock, - pFile->eFileLock, pFile->pInode->eFileLock, pFile->pInode->nShared, - getpid())); - - assert( eFileLock<=SHARED_LOCK ); - if( pFile->eFileLock<=eFileLock ){ - return SQLITE_OK; - } - unixEnterMutex(); - pInode = pFile->pInode; - assert( pInode->nShared!=0 ); - if( pFile->eFileLock>SHARED_LOCK ){ - assert( pInode->eFileLock==pFile->eFileLock ); - -#ifdef SQLITE_DEBUG - /* When reducing a lock such that other processes can start - ** reading the database file again, make sure that the - ** transaction counter was updated if any part of the database - ** file changed. If the transaction counter is not updated, - ** other connections to the same file might not realize that - ** the file has changed and hence might not know to flush their - ** cache. The use of a stale cache can lead to database corruption. - */ - pFile->inNormalWrite = 0; -#endif - - /* downgrading to a shared lock on NFS involves clearing the write lock - ** before establishing the readlock - to avoid a race condition we downgrade - ** the lock in 2 blocks, so that part of the range will be covered by a - ** write lock until the rest is covered by a read lock: - ** 1: [WWWWW] - ** 2: [....W] - ** 3: [RRRRW] - ** 4: [RRRR.] - */ - if( eFileLock==SHARED_LOCK ){ - -#if !defined(__APPLE__) || !SQLITE_ENABLE_LOCKING_STYLE - (void)handleNFSUnlock; - assert( handleNFSUnlock==0 ); -#endif -#if defined(__APPLE__) && SQLITE_ENABLE_LOCKING_STYLE - if( handleNFSUnlock ){ - int tErrno; /* Error code from system call errors */ - off_t divSize = SHARED_SIZE - 1; - - lock.l_type = F_UNLCK; - lock.l_whence = SEEK_SET; - lock.l_start = SHARED_FIRST; - lock.l_len = divSize; - if( unixFileLock(pFile, &lock)==(-1) ){ - tErrno = errno; - rc = SQLITE_IOERR_UNLOCK; - if( IS_LOCK_ERROR(rc) ){ - pFile->lastErrno = tErrno; - } - goto end_unlock; - } - lock.l_type = F_RDLCK; - lock.l_whence = SEEK_SET; - lock.l_start = SHARED_FIRST; - lock.l_len = divSize; - if( unixFileLock(pFile, &lock)==(-1) ){ - tErrno = errno; - rc = sqliteErrorFromPosixError(tErrno, SQLITE_IOERR_RDLOCK); - if( IS_LOCK_ERROR(rc) ){ - pFile->lastErrno = tErrno; - } - goto end_unlock; - } - lock.l_type = F_UNLCK; - lock.l_whence = SEEK_SET; - lock.l_start = SHARED_FIRST+divSize; - lock.l_len = SHARED_SIZE-divSize; - if( unixFileLock(pFile, &lock)==(-1) ){ - tErrno = errno; - rc = SQLITE_IOERR_UNLOCK; - if( IS_LOCK_ERROR(rc) ){ - pFile->lastErrno = tErrno; - } - goto end_unlock; - } - }else -#endif /* defined(__APPLE__) && SQLITE_ENABLE_LOCKING_STYLE */ - { - lock.l_type = F_RDLCK; - lock.l_whence = SEEK_SET; - lock.l_start = SHARED_FIRST; - lock.l_len = SHARED_SIZE; - if( unixFileLock(pFile, &lock) ){ - /* In theory, the call to unixFileLock() cannot fail because another - ** process is holding an incompatible lock. If it does, this - ** indicates that the other process is not following the locking - ** protocol. If this happens, return SQLITE_IOERR_RDLOCK. Returning - ** SQLITE_BUSY would confuse the upper layer (in practice it causes - ** an assert to fail). */ - rc = SQLITE_IOERR_RDLOCK; - pFile->lastErrno = errno; - goto end_unlock; - } - } - } - lock.l_type = F_UNLCK; - lock.l_whence = SEEK_SET; - lock.l_start = PENDING_BYTE; - lock.l_len = 2L; assert( PENDING_BYTE+1==RESERVED_BYTE ); - if( unixFileLock(pFile, &lock)==0 ){ - pInode->eFileLock = SHARED_LOCK; - }else{ - rc = SQLITE_IOERR_UNLOCK; - pFile->lastErrno = errno; - goto end_unlock; - } - } - if( eFileLock==NO_LOCK ){ - /* Decrement the shared lock counter. Release the lock using an - ** OS call only when all threads in this same process have released - ** the lock. - */ - pInode->nShared--; - if( pInode->nShared==0 ){ - lock.l_type = F_UNLCK; - lock.l_whence = SEEK_SET; - lock.l_start = lock.l_len = 0L; - if( unixFileLock(pFile, &lock)==0 ){ - pInode->eFileLock = NO_LOCK; - }else{ - rc = SQLITE_IOERR_UNLOCK; - pFile->lastErrno = errno; - pInode->eFileLock = NO_LOCK; - pFile->eFileLock = NO_LOCK; - } - } - - /* Decrement the count of locks against this same file. When the - ** count reaches zero, close any other file descriptors whose close - ** was deferred because of outstanding locks. - */ - pInode->nLock--; - assert( pInode->nLock>=0 ); - if( pInode->nLock==0 ){ - closePendingFds(pFile); - } - } - -end_unlock: - unixLeaveMutex(); - if( rc==SQLITE_OK ) pFile->eFileLock = eFileLock; - return rc; -} - -/* -** Lower the locking level on file descriptor pFile to eFileLock. eFileLock -** must be either NO_LOCK or SHARED_LOCK. -** -** If the locking level of the file descriptor is already at or below -** the requested locking level, this routine is a no-op. -*/ -static int unixUnlock(sqlite3_file *id, int eFileLock){ -#if SQLITE_MAX_MMAP_SIZE>0 - assert( eFileLock==SHARED_LOCK || ((unixFile *)id)->nFetchOut==0 ); -#endif - return posixUnlock(id, eFileLock, 0); -} - -#if SQLITE_MAX_MMAP_SIZE>0 -static int unixMapfile(unixFile *pFd, i64 nByte); -static void unixUnmapfile(unixFile *pFd); -#endif - -/* -** This function performs the parts of the "close file" operation -** common to all locking schemes. It closes the directory and file -** handles, if they are valid, and sets all fields of the unixFile -** structure to 0. -** -** It is *not* necessary to hold the mutex when this routine is called, -** even on VxWorks. A mutex will be acquired on VxWorks by the -** vxworksReleaseFileId() routine. -*/ -static int closeUnixFile(sqlite3_file *id){ - unixFile *pFile = (unixFile*)id; -#if SQLITE_MAX_MMAP_SIZE>0 - unixUnmapfile(pFile); -#endif - if( pFile->h>=0 ){ - robust_close(pFile, pFile->h, __LINE__); - pFile->h = -1; - } -#if OS_VXWORKS - if( pFile->pId ){ - if( pFile->ctrlFlags & UNIXFILE_DELETE ){ - osUnlink(pFile->pId->zCanonicalName); - } - vxworksReleaseFileId(pFile->pId); - pFile->pId = 0; - } -#endif - OSTRACE(("CLOSE %-3d\n", pFile->h)); - OpenCounter(-1); - sqlite3_free(pFile->pUnused); - memset(pFile, 0, sizeof(unixFile)); - return SQLITE_OK; -} - -/* -** Close a file. -*/ -static int unixClose(sqlite3_file *id){ - int rc = SQLITE_OK; - unixFile *pFile = (unixFile *)id; - verifyDbFile(pFile); - unixUnlock(id, NO_LOCK); - unixEnterMutex(); - - /* unixFile.pInode is always valid here. Otherwise, a different close - ** routine (e.g. nolockClose()) would be called instead. - */ - assert( pFile->pInode->nLock>0 || pFile->pInode->bProcessLock==0 ); - if( ALWAYS(pFile->pInode) && pFile->pInode->nLock ){ - /* If there are outstanding locks, do not actually close the file just - ** yet because that would clear those locks. Instead, add the file - ** descriptor to pInode->pUnused list. It will be automatically closed - ** when the last lock is cleared. - */ - setPendingFd(pFile); - } - releaseInodeInfo(pFile); - rc = closeUnixFile(id); - unixLeaveMutex(); - return rc; -} - -/************** End of the posix advisory lock implementation ***************** -******************************************************************************/ - -/****************************************************************************** -****************************** No-op Locking ********************************** -** -** Of the various locking implementations available, this is by far the -** simplest: locking is ignored. No attempt is made to lock the database -** file for reading or writing. -** -** This locking mode is appropriate for use on read-only databases -** (ex: databases that are burned into CD-ROM, for example.) It can -** also be used if the application employs some external mechanism to -** prevent simultaneous access of the same database by two or more -** database connections. But there is a serious risk of database -** corruption if this locking mode is used in situations where multiple -** database connections are accessing the same database file at the same -** time and one or more of those connections are writing. -*/ - -static int nolockCheckReservedLock(sqlite3_file *NotUsed, int *pResOut){ - UNUSED_PARAMETER(NotUsed); - *pResOut = 0; - return SQLITE_OK; -} -static int nolockLock(sqlite3_file *NotUsed, int NotUsed2){ - UNUSED_PARAMETER2(NotUsed, NotUsed2); - return SQLITE_OK; -} -static int nolockUnlock(sqlite3_file *NotUsed, int NotUsed2){ - UNUSED_PARAMETER2(NotUsed, NotUsed2); - return SQLITE_OK; -} - -/* -** Close the file. -*/ -static int nolockClose(sqlite3_file *id) { - return closeUnixFile(id); -} - -/******************* End of the no-op lock implementation ********************* -******************************************************************************/ - -/****************************************************************************** -************************* Begin dot-file Locking ****************************** -** -** The dotfile locking implementation uses the existence of separate lock -** files (really a directory) to control access to the database. This works -** on just about every filesystem imaginable. But there are serious downsides: -** -** (1) There is zero concurrency. A single reader blocks all other -** connections from reading or writing the database. -** -** (2) An application crash or power loss can leave stale lock files -** sitting around that need to be cleared manually. -** -** Nevertheless, a dotlock is an appropriate locking mode for use if no -** other locking strategy is available. -** -** Dotfile locking works by creating a subdirectory in the same directory as -** the database and with the same name but with a ".lock" extension added. -** The existence of a lock directory implies an EXCLUSIVE lock. All other -** lock types (SHARED, RESERVED, PENDING) are mapped into EXCLUSIVE. -*/ - -/* -** The file suffix added to the data base filename in order to create the -** lock directory. -*/ -#define DOTLOCK_SUFFIX ".lock" - -/* -** This routine checks if there is a RESERVED lock held on the specified -** file by this or any other process. If such a lock is held, set *pResOut -** to a non-zero value otherwise *pResOut is set to zero. The return value -** is set to SQLITE_OK unless an I/O error occurs during lock checking. -** -** In dotfile locking, either a lock exists or it does not. So in this -** variation of CheckReservedLock(), *pResOut is set to true if any lock -** is held on the file and false if the file is unlocked. -*/ -static int dotlockCheckReservedLock(sqlite3_file *id, int *pResOut) { - int rc = SQLITE_OK; - int reserved = 0; - unixFile *pFile = (unixFile*)id; - - SimulateIOError( return SQLITE_IOERR_CHECKRESERVEDLOCK; ); - - assert( pFile ); - - /* Check if a thread in this process holds such a lock */ - if( pFile->eFileLock>SHARED_LOCK ){ - /* Either this connection or some other connection in the same process - ** holds a lock on the file. No need to check further. */ - reserved = 1; - }else{ - /* The lock is held if and only if the lockfile exists */ - const char *zLockFile = (const char*)pFile->lockingContext; - reserved = osAccess(zLockFile, 0)==0; - } - OSTRACE(("TEST WR-LOCK %d %d %d (dotlock)\n", pFile->h, rc, reserved)); - *pResOut = reserved; - return rc; -} - -/* -** Lock the file with the lock specified by parameter eFileLock - one -** of the following: -** -** (1) SHARED_LOCK -** (2) RESERVED_LOCK -** (3) PENDING_LOCK -** (4) EXCLUSIVE_LOCK -** -** Sometimes when requesting one lock state, additional lock states -** are inserted in between. The locking might fail on one of the later -** transitions leaving the lock state different from what it started but -** still short of its goal. The following chart shows the allowed -** transitions and the inserted intermediate states: -** -** UNLOCKED -> SHARED -** SHARED -> RESERVED -** SHARED -> (PENDING) -> EXCLUSIVE -** RESERVED -> (PENDING) -> EXCLUSIVE -** PENDING -> EXCLUSIVE -** -** This routine will only increase a lock. Use the sqlite3OsUnlock() -** routine to lower a locking level. -** -** With dotfile locking, we really only support state (4): EXCLUSIVE. -** But we track the other locking levels internally. -*/ -static int dotlockLock(sqlite3_file *id, int eFileLock) { - unixFile *pFile = (unixFile*)id; - char *zLockFile = (char *)pFile->lockingContext; - int rc = SQLITE_OK; - - - /* If we have any lock, then the lock file already exists. All we have - ** to do is adjust our internal record of the lock level. - */ - if( pFile->eFileLock > NO_LOCK ){ - pFile->eFileLock = eFileLock; - /* Always update the timestamp on the old file */ -#ifdef HAVE_UTIME - utime(zLockFile, NULL); -#else - utimes(zLockFile, NULL); -#endif - return SQLITE_OK; - } - - /* grab an exclusive lock */ - rc = osMkdir(zLockFile, 0777); - if( rc<0 ){ - /* failed to open/create the lock directory */ - int tErrno = errno; - if( EEXIST == tErrno ){ - rc = SQLITE_BUSY; - } else { - rc = sqliteErrorFromPosixError(tErrno, SQLITE_IOERR_LOCK); - if( IS_LOCK_ERROR(rc) ){ - pFile->lastErrno = tErrno; - } - } - return rc; - } - - /* got it, set the type and return ok */ - pFile->eFileLock = eFileLock; - return rc; -} - -/* -** Lower the locking level on file descriptor pFile to eFileLock. eFileLock -** must be either NO_LOCK or SHARED_LOCK. -** -** If the locking level of the file descriptor is already at or below -** the requested locking level, this routine is a no-op. -** -** When the locking level reaches NO_LOCK, delete the lock file. -*/ -static int dotlockUnlock(sqlite3_file *id, int eFileLock) { - unixFile *pFile = (unixFile*)id; - char *zLockFile = (char *)pFile->lockingContext; - int rc; - - assert( pFile ); - OSTRACE(("UNLOCK %d %d was %d pid=%d (dotlock)\n", pFile->h, eFileLock, - pFile->eFileLock, getpid())); - assert( eFileLock<=SHARED_LOCK ); - - /* no-op if possible */ - if( pFile->eFileLock==eFileLock ){ - return SQLITE_OK; - } - - /* To downgrade to shared, simply update our internal notion of the - ** lock state. No need to mess with the file on disk. - */ - if( eFileLock==SHARED_LOCK ){ - pFile->eFileLock = SHARED_LOCK; - return SQLITE_OK; - } - - /* To fully unlock the database, delete the lock file */ - assert( eFileLock==NO_LOCK ); - rc = osRmdir(zLockFile); - if( rc<0 && errno==ENOTDIR ) rc = osUnlink(zLockFile); - if( rc<0 ){ - int tErrno = errno; - rc = 0; - if( ENOENT != tErrno ){ - rc = SQLITE_IOERR_UNLOCK; - } - if( IS_LOCK_ERROR(rc) ){ - pFile->lastErrno = tErrno; - } - return rc; - } - pFile->eFileLock = NO_LOCK; - return SQLITE_OK; -} - -/* -** Close a file. Make sure the lock has been released before closing. -*/ -static int dotlockClose(sqlite3_file *id) { - int rc = SQLITE_OK; - if( id ){ - unixFile *pFile = (unixFile*)id; - dotlockUnlock(id, NO_LOCK); - sqlite3_free(pFile->lockingContext); - rc = closeUnixFile(id); - } - return rc; -} -/****************** End of the dot-file lock implementation ******************* -******************************************************************************/ - -/****************************************************************************** -************************** Begin flock Locking ******************************** -** -** Use the flock() system call to do file locking. -** -** flock() locking is like dot-file locking in that the various -** fine-grain locking levels supported by SQLite are collapsed into -** a single exclusive lock. In other words, SHARED, RESERVED, and -** PENDING locks are the same thing as an EXCLUSIVE lock. SQLite -** still works when you do this, but concurrency is reduced since -** only a single process can be reading the database at a time. -** -** Omit this section if SQLITE_ENABLE_LOCKING_STYLE is turned off or if -** compiling for VXWORKS. -*/ -#if SQLITE_ENABLE_LOCKING_STYLE && !OS_VXWORKS - -/* -** Retry flock() calls that fail with EINTR -*/ -#ifdef EINTR -static int robust_flock(int fd, int op){ - int rc; - do{ rc = flock(fd,op); }while( rc<0 && errno==EINTR ); - return rc; -} -#else -# define robust_flock(a,b) flock(a,b) -#endif - - -/* -** This routine checks if there is a RESERVED lock held on the specified -** file by this or any other process. If such a lock is held, set *pResOut -** to a non-zero value otherwise *pResOut is set to zero. The return value -** is set to SQLITE_OK unless an I/O error occurs during lock checking. -*/ -static int flockCheckReservedLock(sqlite3_file *id, int *pResOut){ - int rc = SQLITE_OK; - int reserved = 0; - unixFile *pFile = (unixFile*)id; - - SimulateIOError( return SQLITE_IOERR_CHECKRESERVEDLOCK; ); - - assert( pFile ); - - /* Check if a thread in this process holds such a lock */ - if( pFile->eFileLock>SHARED_LOCK ){ - reserved = 1; - } - - /* Otherwise see if some other process holds it. */ - if( !reserved ){ - /* attempt to get the lock */ - int lrc = robust_flock(pFile->h, LOCK_EX | LOCK_NB); - if( !lrc ){ - /* got the lock, unlock it */ - lrc = robust_flock(pFile->h, LOCK_UN); - if ( lrc ) { - int tErrno = errno; - /* unlock failed with an error */ - lrc = SQLITE_IOERR_UNLOCK; - if( IS_LOCK_ERROR(lrc) ){ - pFile->lastErrno = tErrno; - rc = lrc; - } - } - } else { - int tErrno = errno; - reserved = 1; - /* someone else might have it reserved */ - lrc = sqliteErrorFromPosixError(tErrno, SQLITE_IOERR_LOCK); - if( IS_LOCK_ERROR(lrc) ){ - pFile->lastErrno = tErrno; - rc = lrc; - } - } - } - OSTRACE(("TEST WR-LOCK %d %d %d (flock)\n", pFile->h, rc, reserved)); - -#ifdef SQLITE_IGNORE_FLOCK_LOCK_ERRORS - if( (rc & SQLITE_IOERR) == SQLITE_IOERR ){ - rc = SQLITE_OK; - reserved=1; - } -#endif /* SQLITE_IGNORE_FLOCK_LOCK_ERRORS */ - *pResOut = reserved; - return rc; -} - -/* -** Lock the file with the lock specified by parameter eFileLock - one -** of the following: -** -** (1) SHARED_LOCK -** (2) RESERVED_LOCK -** (3) PENDING_LOCK -** (4) EXCLUSIVE_LOCK -** -** Sometimes when requesting one lock state, additional lock states -** are inserted in between. The locking might fail on one of the later -** transitions leaving the lock state different from what it started but -** still short of its goal. The following chart shows the allowed -** transitions and the inserted intermediate states: -** -** UNLOCKED -> SHARED -** SHARED -> RESERVED -** SHARED -> (PENDING) -> EXCLUSIVE -** RESERVED -> (PENDING) -> EXCLUSIVE -** PENDING -> EXCLUSIVE -** -** flock() only really support EXCLUSIVE locks. We track intermediate -** lock states in the sqlite3_file structure, but all locks SHARED or -** above are really EXCLUSIVE locks and exclude all other processes from -** access the file. -** -** This routine will only increase a lock. Use the sqlite3OsUnlock() -** routine to lower a locking level. -*/ -static int flockLock(sqlite3_file *id, int eFileLock) { - int rc = SQLITE_OK; - unixFile *pFile = (unixFile*)id; - - assert( pFile ); - - /* if we already have a lock, it is exclusive. - ** Just adjust level and punt on outta here. */ - if (pFile->eFileLock > NO_LOCK) { - pFile->eFileLock = eFileLock; - return SQLITE_OK; - } - - /* grab an exclusive lock */ - - if (robust_flock(pFile->h, LOCK_EX | LOCK_NB)) { - int tErrno = errno; - /* didn't get, must be busy */ - rc = sqliteErrorFromPosixError(tErrno, SQLITE_IOERR_LOCK); - if( IS_LOCK_ERROR(rc) ){ - pFile->lastErrno = tErrno; - } - } else { - /* got it, set the type and return ok */ - pFile->eFileLock = eFileLock; - } - OSTRACE(("LOCK %d %s %s (flock)\n", pFile->h, azFileLock(eFileLock), - rc==SQLITE_OK ? "ok" : "failed")); -#ifdef SQLITE_IGNORE_FLOCK_LOCK_ERRORS - if( (rc & SQLITE_IOERR) == SQLITE_IOERR ){ - rc = SQLITE_BUSY; - } -#endif /* SQLITE_IGNORE_FLOCK_LOCK_ERRORS */ - return rc; -} - - -/* -** Lower the locking level on file descriptor pFile to eFileLock. eFileLock -** must be either NO_LOCK or SHARED_LOCK. -** -** If the locking level of the file descriptor is already at or below -** the requested locking level, this routine is a no-op. -*/ -static int flockUnlock(sqlite3_file *id, int eFileLock) { - unixFile *pFile = (unixFile*)id; - - assert( pFile ); - OSTRACE(("UNLOCK %d %d was %d pid=%d (flock)\n", pFile->h, eFileLock, - pFile->eFileLock, getpid())); - assert( eFileLock<=SHARED_LOCK ); - - /* no-op if possible */ - if( pFile->eFileLock==eFileLock ){ - return SQLITE_OK; - } - - /* shared can just be set because we always have an exclusive */ - if (eFileLock==SHARED_LOCK) { - pFile->eFileLock = eFileLock; - return SQLITE_OK; - } - - /* no, really, unlock. */ - if( robust_flock(pFile->h, LOCK_UN) ){ -#ifdef SQLITE_IGNORE_FLOCK_LOCK_ERRORS - return SQLITE_OK; -#endif /* SQLITE_IGNORE_FLOCK_LOCK_ERRORS */ - return SQLITE_IOERR_UNLOCK; - }else{ - pFile->eFileLock = NO_LOCK; - return SQLITE_OK; - } -} - -/* -** Close a file. -*/ -static int flockClose(sqlite3_file *id) { - int rc = SQLITE_OK; - if( id ){ - flockUnlock(id, NO_LOCK); - rc = closeUnixFile(id); - } - return rc; -} - -#endif /* SQLITE_ENABLE_LOCKING_STYLE && !OS_VXWORK */ - -/******************* End of the flock lock implementation ********************* -******************************************************************************/ - -/****************************************************************************** -************************ Begin Named Semaphore Locking ************************ -** -** Named semaphore locking is only supported on VxWorks. -** -** Semaphore locking is like dot-lock and flock in that it really only -** supports EXCLUSIVE locking. Only a single process can read or write -** the database file at a time. This reduces potential concurrency, but -** makes the lock implementation much easier. -*/ -#if OS_VXWORKS - -/* -** This routine checks if there is a RESERVED lock held on the specified -** file by this or any other process. If such a lock is held, set *pResOut -** to a non-zero value otherwise *pResOut is set to zero. The return value -** is set to SQLITE_OK unless an I/O error occurs during lock checking. -*/ -static int semCheckReservedLock(sqlite3_file *id, int *pResOut) { - int rc = SQLITE_OK; - int reserved = 0; - unixFile *pFile = (unixFile*)id; - - SimulateIOError( return SQLITE_IOERR_CHECKRESERVEDLOCK; ); - - assert( pFile ); - - /* Check if a thread in this process holds such a lock */ - if( pFile->eFileLock>SHARED_LOCK ){ - reserved = 1; - } - - /* Otherwise see if some other process holds it. */ - if( !reserved ){ - sem_t *pSem = pFile->pInode->pSem; - struct stat statBuf; - - if( sem_trywait(pSem)==-1 ){ - int tErrno = errno; - if( EAGAIN != tErrno ){ - rc = sqliteErrorFromPosixError(tErrno, SQLITE_IOERR_CHECKRESERVEDLOCK); - pFile->lastErrno = tErrno; - } else { - /* someone else has the lock when we are in NO_LOCK */ - reserved = (pFile->eFileLock < SHARED_LOCK); - } - }else{ - /* we could have it if we want it */ - sem_post(pSem); - } - } - OSTRACE(("TEST WR-LOCK %d %d %d (sem)\n", pFile->h, rc, reserved)); - - *pResOut = reserved; - return rc; -} - -/* -** Lock the file with the lock specified by parameter eFileLock - one -** of the following: -** -** (1) SHARED_LOCK -** (2) RESERVED_LOCK -** (3) PENDING_LOCK -** (4) EXCLUSIVE_LOCK -** -** Sometimes when requesting one lock state, additional lock states -** are inserted in between. The locking might fail on one of the later -** transitions leaving the lock state different from what it started but -** still short of its goal. The following chart shows the allowed -** transitions and the inserted intermediate states: -** -** UNLOCKED -> SHARED -** SHARED -> RESERVED -** SHARED -> (PENDING) -> EXCLUSIVE -** RESERVED -> (PENDING) -> EXCLUSIVE -** PENDING -> EXCLUSIVE -** -** Semaphore locks only really support EXCLUSIVE locks. We track intermediate -** lock states in the sqlite3_file structure, but all locks SHARED or -** above are really EXCLUSIVE locks and exclude all other processes from -** access the file. -** -** This routine will only increase a lock. Use the sqlite3OsUnlock() -** routine to lower a locking level. -*/ -static int semLock(sqlite3_file *id, int eFileLock) { - unixFile *pFile = (unixFile*)id; - int fd; - sem_t *pSem = pFile->pInode->pSem; - int rc = SQLITE_OK; - - /* if we already have a lock, it is exclusive. - ** Just adjust level and punt on outta here. */ - if (pFile->eFileLock > NO_LOCK) { - pFile->eFileLock = eFileLock; - rc = SQLITE_OK; - goto sem_end_lock; - } - - /* lock semaphore now but bail out when already locked. */ - if( sem_trywait(pSem)==-1 ){ - rc = SQLITE_BUSY; - goto sem_end_lock; - } - - /* got it, set the type and return ok */ - pFile->eFileLock = eFileLock; - - sem_end_lock: - return rc; -} - -/* -** Lower the locking level on file descriptor pFile to eFileLock. eFileLock -** must be either NO_LOCK or SHARED_LOCK. -** -** If the locking level of the file descriptor is already at or below -** the requested locking level, this routine is a no-op. -*/ -static int semUnlock(sqlite3_file *id, int eFileLock) { - unixFile *pFile = (unixFile*)id; - sem_t *pSem = pFile->pInode->pSem; - - assert( pFile ); - assert( pSem ); - OSTRACE(("UNLOCK %d %d was %d pid=%d (sem)\n", pFile->h, eFileLock, - pFile->eFileLock, getpid())); - assert( eFileLock<=SHARED_LOCK ); - - /* no-op if possible */ - if( pFile->eFileLock==eFileLock ){ - return SQLITE_OK; - } - - /* shared can just be set because we always have an exclusive */ - if (eFileLock==SHARED_LOCK) { - pFile->eFileLock = eFileLock; - return SQLITE_OK; - } - - /* no, really unlock. */ - if ( sem_post(pSem)==-1 ) { - int rc, tErrno = errno; - rc = sqliteErrorFromPosixError(tErrno, SQLITE_IOERR_UNLOCK); - if( IS_LOCK_ERROR(rc) ){ - pFile->lastErrno = tErrno; - } - return rc; - } - pFile->eFileLock = NO_LOCK; - return SQLITE_OK; -} - -/* - ** Close a file. - */ -static int semClose(sqlite3_file *id) { - if( id ){ - unixFile *pFile = (unixFile*)id; - semUnlock(id, NO_LOCK); - assert( pFile ); - unixEnterMutex(); - releaseInodeInfo(pFile); - unixLeaveMutex(); - closeUnixFile(id); - } - return SQLITE_OK; -} - -#endif /* OS_VXWORKS */ -/* -** Named semaphore locking is only available on VxWorks. -** -*************** End of the named semaphore lock implementation **************** -******************************************************************************/ - - -/****************************************************************************** -*************************** Begin AFP Locking ********************************* -** -** AFP is the Apple Filing Protocol. AFP is a network filesystem found -** on Apple Macintosh computers - both OS9 and OSX. -** -** Third-party implementations of AFP are available. But this code here -** only works on OSX. -*/ - -#if defined(__APPLE__) && SQLITE_ENABLE_LOCKING_STYLE -/* -** The afpLockingContext structure contains all afp lock specific state -*/ -typedef struct afpLockingContext afpLockingContext; -struct afpLockingContext { - int reserved; - const char *dbPath; /* Name of the open file */ -}; - -struct ByteRangeLockPB2 -{ - unsigned long long offset; /* offset to first byte to lock */ - unsigned long long length; /* nbr of bytes to lock */ - unsigned long long retRangeStart; /* nbr of 1st byte locked if successful */ - unsigned char unLockFlag; /* 1 = unlock, 0 = lock */ - unsigned char startEndFlag; /* 1=rel to end of fork, 0=rel to start */ - int fd; /* file desc to assoc this lock with */ -}; - -#define afpfsByteRangeLock2FSCTL _IOWR('z', 23, struct ByteRangeLockPB2) - -/* -** This is a utility for setting or clearing a bit-range lock on an -** AFP filesystem. -** -** Return SQLITE_OK on success, SQLITE_BUSY on failure. -*/ -static int afpSetLock( - const char *path, /* Name of the file to be locked or unlocked */ - unixFile *pFile, /* Open file descriptor on path */ - unsigned long long offset, /* First byte to be locked */ - unsigned long long length, /* Number of bytes to lock */ - int setLockFlag /* True to set lock. False to clear lock */ -){ - struct ByteRangeLockPB2 pb; - int err; - - pb.unLockFlag = setLockFlag ? 0 : 1; - pb.startEndFlag = 0; - pb.offset = offset; - pb.length = length; - pb.fd = pFile->h; - - OSTRACE(("AFPSETLOCK [%s] for %d%s in range %llx:%llx\n", - (setLockFlag?"ON":"OFF"), pFile->h, (pb.fd==-1?"[testval-1]":""), - offset, length)); - err = fsctl(path, afpfsByteRangeLock2FSCTL, &pb, 0); - if ( err==-1 ) { - int rc; - int tErrno = errno; - OSTRACE(("AFPSETLOCK failed to fsctl() '%s' %d %s\n", - path, tErrno, strerror(tErrno))); -#ifdef SQLITE_IGNORE_AFP_LOCK_ERRORS - rc = SQLITE_BUSY; -#else - rc = sqliteErrorFromPosixError(tErrno, - setLockFlag ? SQLITE_IOERR_LOCK : SQLITE_IOERR_UNLOCK); -#endif /* SQLITE_IGNORE_AFP_LOCK_ERRORS */ - if( IS_LOCK_ERROR(rc) ){ - pFile->lastErrno = tErrno; - } - return rc; - } else { - return SQLITE_OK; - } -} - -/* -** This routine checks if there is a RESERVED lock held on the specified -** file by this or any other process. If such a lock is held, set *pResOut -** to a non-zero value otherwise *pResOut is set to zero. The return value -** is set to SQLITE_OK unless an I/O error occurs during lock checking. -*/ -static int afpCheckReservedLock(sqlite3_file *id, int *pResOut){ - int rc = SQLITE_OK; - int reserved = 0; - unixFile *pFile = (unixFile*)id; - afpLockingContext *context; - - SimulateIOError( return SQLITE_IOERR_CHECKRESERVEDLOCK; ); - - assert( pFile ); - context = (afpLockingContext *) pFile->lockingContext; - if( context->reserved ){ - *pResOut = 1; - return SQLITE_OK; - } - unixEnterMutex(); /* Because pFile->pInode is shared across threads */ - - /* Check if a thread in this process holds such a lock */ - if( pFile->pInode->eFileLock>SHARED_LOCK ){ - reserved = 1; - } - - /* Otherwise see if some other process holds it. - */ - if( !reserved ){ - /* lock the RESERVED byte */ - int lrc = afpSetLock(context->dbPath, pFile, RESERVED_BYTE, 1,1); - if( SQLITE_OK==lrc ){ - /* if we succeeded in taking the reserved lock, unlock it to restore - ** the original state */ - lrc = afpSetLock(context->dbPath, pFile, RESERVED_BYTE, 1, 0); - } else { - /* if we failed to get the lock then someone else must have it */ - reserved = 1; - } - if( IS_LOCK_ERROR(lrc) ){ - rc=lrc; - } - } - - unixLeaveMutex(); - OSTRACE(("TEST WR-LOCK %d %d %d (afp)\n", pFile->h, rc, reserved)); - - *pResOut = reserved; - return rc; -} - -/* -** Lock the file with the lock specified by parameter eFileLock - one -** of the following: -** -** (1) SHARED_LOCK -** (2) RESERVED_LOCK -** (3) PENDING_LOCK -** (4) EXCLUSIVE_LOCK -** -** Sometimes when requesting one lock state, additional lock states -** are inserted in between. The locking might fail on one of the later -** transitions leaving the lock state different from what it started but -** still short of its goal. The following chart shows the allowed -** transitions and the inserted intermediate states: -** -** UNLOCKED -> SHARED -** SHARED -> RESERVED -** SHARED -> (PENDING) -> EXCLUSIVE -** RESERVED -> (PENDING) -> EXCLUSIVE -** PENDING -> EXCLUSIVE -** -** This routine will only increase a lock. Use the sqlite3OsUnlock() -** routine to lower a locking level. -*/ -static int afpLock(sqlite3_file *id, int eFileLock){ - int rc = SQLITE_OK; - unixFile *pFile = (unixFile*)id; - unixInodeInfo *pInode = pFile->pInode; - afpLockingContext *context = (afpLockingContext *) pFile->lockingContext; - - assert( pFile ); - OSTRACE(("LOCK %d %s was %s(%s,%d) pid=%d (afp)\n", pFile->h, - azFileLock(eFileLock), azFileLock(pFile->eFileLock), - azFileLock(pInode->eFileLock), pInode->nShared , getpid())); - - /* If there is already a lock of this type or more restrictive on the - ** unixFile, do nothing. Don't use the afp_end_lock: exit path, as - ** unixEnterMutex() hasn't been called yet. - */ - if( pFile->eFileLock>=eFileLock ){ - OSTRACE(("LOCK %d %s ok (already held) (afp)\n", pFile->h, - azFileLock(eFileLock))); - return SQLITE_OK; - } - - /* Make sure the locking sequence is correct - ** (1) We never move from unlocked to anything higher than shared lock. - ** (2) SQLite never explicitly requests a pendig lock. - ** (3) A shared lock is always held when a reserve lock is requested. - */ - assert( pFile->eFileLock!=NO_LOCK || eFileLock==SHARED_LOCK ); - assert( eFileLock!=PENDING_LOCK ); - assert( eFileLock!=RESERVED_LOCK || pFile->eFileLock==SHARED_LOCK ); - - /* This mutex is needed because pFile->pInode is shared across threads - */ - unixEnterMutex(); - pInode = pFile->pInode; - - /* If some thread using this PID has a lock via a different unixFile* - ** handle that precludes the requested lock, return BUSY. - */ - if( (pFile->eFileLock!=pInode->eFileLock && - (pInode->eFileLock>=PENDING_LOCK || eFileLock>SHARED_LOCK)) - ){ - rc = SQLITE_BUSY; - goto afp_end_lock; - } - - /* If a SHARED lock is requested, and some thread using this PID already - ** has a SHARED or RESERVED lock, then increment reference counts and - ** return SQLITE_OK. - */ - if( eFileLock==SHARED_LOCK && - (pInode->eFileLock==SHARED_LOCK || pInode->eFileLock==RESERVED_LOCK) ){ - assert( eFileLock==SHARED_LOCK ); - assert( pFile->eFileLock==0 ); - assert( pInode->nShared>0 ); - pFile->eFileLock = SHARED_LOCK; - pInode->nShared++; - pInode->nLock++; - goto afp_end_lock; - } - - /* A PENDING lock is needed before acquiring a SHARED lock and before - ** acquiring an EXCLUSIVE lock. For the SHARED lock, the PENDING will - ** be released. - */ - if( eFileLock==SHARED_LOCK - || (eFileLock==EXCLUSIVE_LOCK && pFile->eFileLockdbPath, pFile, PENDING_BYTE, 1, 1); - if (failed) { - rc = failed; - goto afp_end_lock; - } - } - - /* If control gets to this point, then actually go ahead and make - ** operating system calls for the specified lock. - */ - if( eFileLock==SHARED_LOCK ){ - int lrc1, lrc2, lrc1Errno = 0; - long lk, mask; - - assert( pInode->nShared==0 ); - assert( pInode->eFileLock==0 ); - - mask = (sizeof(long)==8) ? LARGEST_INT64 : 0x7fffffff; - /* Now get the read-lock SHARED_LOCK */ - /* note that the quality of the randomness doesn't matter that much */ - lk = random(); - pInode->sharedByte = (lk & mask)%(SHARED_SIZE - 1); - lrc1 = afpSetLock(context->dbPath, pFile, - SHARED_FIRST+pInode->sharedByte, 1, 1); - if( IS_LOCK_ERROR(lrc1) ){ - lrc1Errno = pFile->lastErrno; - } - /* Drop the temporary PENDING lock */ - lrc2 = afpSetLock(context->dbPath, pFile, PENDING_BYTE, 1, 0); - - if( IS_LOCK_ERROR(lrc1) ) { - pFile->lastErrno = lrc1Errno; - rc = lrc1; - goto afp_end_lock; - } else if( IS_LOCK_ERROR(lrc2) ){ - rc = lrc2; - goto afp_end_lock; - } else if( lrc1 != SQLITE_OK ) { - rc = lrc1; - } else { - pFile->eFileLock = SHARED_LOCK; - pInode->nLock++; - pInode->nShared = 1; - } - }else if( eFileLock==EXCLUSIVE_LOCK && pInode->nShared>1 ){ - /* We are trying for an exclusive lock but another thread in this - ** same process is still holding a shared lock. */ - rc = SQLITE_BUSY; - }else{ - /* The request was for a RESERVED or EXCLUSIVE lock. It is - ** assumed that there is a SHARED or greater lock on the file - ** already. - */ - int failed = 0; - assert( 0!=pFile->eFileLock ); - if (eFileLock >= RESERVED_LOCK && pFile->eFileLock < RESERVED_LOCK) { - /* Acquire a RESERVED lock */ - failed = afpSetLock(context->dbPath, pFile, RESERVED_BYTE, 1,1); - if( !failed ){ - context->reserved = 1; - } - } - if (!failed && eFileLock == EXCLUSIVE_LOCK) { - /* Acquire an EXCLUSIVE lock */ - - /* Remove the shared lock before trying the range. we'll need to - ** reestablish the shared lock if we can't get the afpUnlock - */ - if( !(failed = afpSetLock(context->dbPath, pFile, SHARED_FIRST + - pInode->sharedByte, 1, 0)) ){ - int failed2 = SQLITE_OK; - /* now attemmpt to get the exclusive lock range */ - failed = afpSetLock(context->dbPath, pFile, SHARED_FIRST, - SHARED_SIZE, 1); - if( failed && (failed2 = afpSetLock(context->dbPath, pFile, - SHARED_FIRST + pInode->sharedByte, 1, 1)) ){ - /* Can't reestablish the shared lock. Sqlite can't deal, this is - ** a critical I/O error - */ - rc = ((failed & SQLITE_IOERR) == SQLITE_IOERR) ? failed2 : - SQLITE_IOERR_LOCK; - goto afp_end_lock; - } - }else{ - rc = failed; - } - } - if( failed ){ - rc = failed; - } - } - - if( rc==SQLITE_OK ){ - pFile->eFileLock = eFileLock; - pInode->eFileLock = eFileLock; - }else if( eFileLock==EXCLUSIVE_LOCK ){ - pFile->eFileLock = PENDING_LOCK; - pInode->eFileLock = PENDING_LOCK; - } - -afp_end_lock: - unixLeaveMutex(); - OSTRACE(("LOCK %d %s %s (afp)\n", pFile->h, azFileLock(eFileLock), - rc==SQLITE_OK ? "ok" : "failed")); - return rc; -} - -/* -** Lower the locking level on file descriptor pFile to eFileLock. eFileLock -** must be either NO_LOCK or SHARED_LOCK. -** -** If the locking level of the file descriptor is already at or below -** the requested locking level, this routine is a no-op. -*/ -static int afpUnlock(sqlite3_file *id, int eFileLock) { - int rc = SQLITE_OK; - unixFile *pFile = (unixFile*)id; - unixInodeInfo *pInode; - afpLockingContext *context = (afpLockingContext *) pFile->lockingContext; - int skipShared = 0; -#ifdef SQLITE_TEST - int h = pFile->h; -#endif - - assert( pFile ); - OSTRACE(("UNLOCK %d %d was %d(%d,%d) pid=%d (afp)\n", pFile->h, eFileLock, - pFile->eFileLock, pFile->pInode->eFileLock, pFile->pInode->nShared, - getpid())); - - assert( eFileLock<=SHARED_LOCK ); - if( pFile->eFileLock<=eFileLock ){ - return SQLITE_OK; - } - unixEnterMutex(); - pInode = pFile->pInode; - assert( pInode->nShared!=0 ); - if( pFile->eFileLock>SHARED_LOCK ){ - assert( pInode->eFileLock==pFile->eFileLock ); - SimulateIOErrorBenign(1); - SimulateIOError( h=(-1) ) - SimulateIOErrorBenign(0); - -#ifdef SQLITE_DEBUG - /* When reducing a lock such that other processes can start - ** reading the database file again, make sure that the - ** transaction counter was updated if any part of the database - ** file changed. If the transaction counter is not updated, - ** other connections to the same file might not realize that - ** the file has changed and hence might not know to flush their - ** cache. The use of a stale cache can lead to database corruption. - */ - assert( pFile->inNormalWrite==0 - || pFile->dbUpdate==0 - || pFile->transCntrChng==1 ); - pFile->inNormalWrite = 0; -#endif - - if( pFile->eFileLock==EXCLUSIVE_LOCK ){ - rc = afpSetLock(context->dbPath, pFile, SHARED_FIRST, SHARED_SIZE, 0); - if( rc==SQLITE_OK && (eFileLock==SHARED_LOCK || pInode->nShared>1) ){ - /* only re-establish the shared lock if necessary */ - int sharedLockByte = SHARED_FIRST+pInode->sharedByte; - rc = afpSetLock(context->dbPath, pFile, sharedLockByte, 1, 1); - } else { - skipShared = 1; - } - } - if( rc==SQLITE_OK && pFile->eFileLock>=PENDING_LOCK ){ - rc = afpSetLock(context->dbPath, pFile, PENDING_BYTE, 1, 0); - } - if( rc==SQLITE_OK && pFile->eFileLock>=RESERVED_LOCK && context->reserved ){ - rc = afpSetLock(context->dbPath, pFile, RESERVED_BYTE, 1, 0); - if( !rc ){ - context->reserved = 0; - } - } - if( rc==SQLITE_OK && (eFileLock==SHARED_LOCK || pInode->nShared>1)){ - pInode->eFileLock = SHARED_LOCK; - } - } - if( rc==SQLITE_OK && eFileLock==NO_LOCK ){ - - /* Decrement the shared lock counter. Release the lock using an - ** OS call only when all threads in this same process have released - ** the lock. - */ - unsigned long long sharedLockByte = SHARED_FIRST+pInode->sharedByte; - pInode->nShared--; - if( pInode->nShared==0 ){ - SimulateIOErrorBenign(1); - SimulateIOError( h=(-1) ) - SimulateIOErrorBenign(0); - if( !skipShared ){ - rc = afpSetLock(context->dbPath, pFile, sharedLockByte, 1, 0); - } - if( !rc ){ - pInode->eFileLock = NO_LOCK; - pFile->eFileLock = NO_LOCK; - } - } - if( rc==SQLITE_OK ){ - pInode->nLock--; - assert( pInode->nLock>=0 ); - if( pInode->nLock==0 ){ - closePendingFds(pFile); - } - } - } - - unixLeaveMutex(); - if( rc==SQLITE_OK ) pFile->eFileLock = eFileLock; - return rc; -} - -/* -** Close a file & cleanup AFP specific locking context -*/ -static int afpClose(sqlite3_file *id) { - int rc = SQLITE_OK; - if( id ){ - unixFile *pFile = (unixFile*)id; - afpUnlock(id, NO_LOCK); - unixEnterMutex(); - if( pFile->pInode && pFile->pInode->nLock ){ - /* If there are outstanding locks, do not actually close the file just - ** yet because that would clear those locks. Instead, add the file - ** descriptor to pInode->aPending. It will be automatically closed when - ** the last lock is cleared. - */ - setPendingFd(pFile); - } - releaseInodeInfo(pFile); - sqlite3_free(pFile->lockingContext); - rc = closeUnixFile(id); - unixLeaveMutex(); - } - return rc; -} - -#endif /* defined(__APPLE__) && SQLITE_ENABLE_LOCKING_STYLE */ -/* -** The code above is the AFP lock implementation. The code is specific -** to MacOSX and does not work on other unix platforms. No alternative -** is available. If you don't compile for a mac, then the "unix-afp" -** VFS is not available. -** -********************* End of the AFP lock implementation ********************** -******************************************************************************/ - -/****************************************************************************** -*************************** Begin NFS Locking ********************************/ - -#if defined(__APPLE__) && SQLITE_ENABLE_LOCKING_STYLE -/* - ** Lower the locking level on file descriptor pFile to eFileLock. eFileLock - ** must be either NO_LOCK or SHARED_LOCK. - ** - ** If the locking level of the file descriptor is already at or below - ** the requested locking level, this routine is a no-op. - */ -static int nfsUnlock(sqlite3_file *id, int eFileLock){ - return posixUnlock(id, eFileLock, 1); -} - -#endif /* defined(__APPLE__) && SQLITE_ENABLE_LOCKING_STYLE */ -/* -** The code above is the NFS lock implementation. The code is specific -** to MacOSX and does not work on other unix platforms. No alternative -** is available. -** -********************* End of the NFS lock implementation ********************** -******************************************************************************/ - -/****************************************************************************** -**************** Non-locking sqlite3_file methods ***************************** -** -** The next division contains implementations for all methods of the -** sqlite3_file object other than the locking methods. The locking -** methods were defined in divisions above (one locking method per -** division). Those methods that are common to all locking modes -** are gather together into this division. -*/ - -/* -** Seek to the offset passed as the second argument, then read cnt -** bytes into pBuf. Return the number of bytes actually read. -** -** NB: If you define USE_PREAD or USE_PREAD64, then it might also -** be necessary to define _XOPEN_SOURCE to be 500. This varies from -** one system to another. Since SQLite does not define USE_PREAD -** any any form by default, we will not attempt to define _XOPEN_SOURCE. -** See tickets #2741 and #2681. -** -** To avoid stomping the errno value on a failed read the lastErrno value -** is set before returning. -*/ -static int seekAndRead(unixFile *id, sqlite3_int64 offset, void *pBuf, int cnt){ - int got; - int prior = 0; -#if (!defined(USE_PREAD) && !defined(USE_PREAD64)) - i64 newOffset; -#endif - TIMER_START; - assert( cnt==(cnt&0x1ffff) ); - assert( id->h>2 ); - cnt &= 0x1ffff; - do{ -#if defined(USE_PREAD) - got = osPread(id->h, pBuf, cnt, offset); - SimulateIOError( got = -1 ); -#elif defined(USE_PREAD64) - got = osPread64(id->h, pBuf, cnt, offset); - SimulateIOError( got = -1 ); -#else - newOffset = lseek(id->h, offset, SEEK_SET); - SimulateIOError( newOffset-- ); - if( newOffset!=offset ){ - if( newOffset == -1 ){ - ((unixFile*)id)->lastErrno = errno; - }else{ - ((unixFile*)id)->lastErrno = 0; - } - return -1; - } - got = osRead(id->h, pBuf, cnt); -#endif - if( got==cnt ) break; - if( got<0 ){ - if( errno==EINTR ){ got = 1; continue; } - prior = 0; - ((unixFile*)id)->lastErrno = errno; - break; - }else if( got>0 ){ - cnt -= got; - offset += got; - prior += got; - pBuf = (void*)(got + (char*)pBuf); - } - }while( got>0 ); - TIMER_END; - OSTRACE(("READ %-3d %5d %7lld %llu\n", - id->h, got+prior, offset-prior, TIMER_ELAPSED)); - return got+prior; -} - -/* -** Read data from a file into a buffer. Return SQLITE_OK if all -** bytes were read successfully and SQLITE_IOERR if anything goes -** wrong. -*/ -static int unixRead( - sqlite3_file *id, - void *pBuf, - int amt, - sqlite3_int64 offset -){ - unixFile *pFile = (unixFile *)id; - int got; - assert( id ); - assert( offset>=0 ); - assert( amt>0 ); - - /* If this is a database file (not a journal, master-journal or temp - ** file), the bytes in the locking range should never be read or written. */ -#if 0 - assert( pFile->pUnused==0 - || offset>=PENDING_BYTE+512 - || offset+amt<=PENDING_BYTE - ); -#endif - -#if SQLITE_MAX_MMAP_SIZE>0 - /* Deal with as much of this read request as possible by transfering - ** data from the memory mapping using memcpy(). */ - if( offsetmmapSize ){ - if( offset+amt <= pFile->mmapSize ){ - memcpy(pBuf, &((u8 *)(pFile->pMapRegion))[offset], amt); - return SQLITE_OK; - }else{ - int nCopy = pFile->mmapSize - offset; - memcpy(pBuf, &((u8 *)(pFile->pMapRegion))[offset], nCopy); - pBuf = &((u8 *)pBuf)[nCopy]; - amt -= nCopy; - offset += nCopy; - } - } -#endif - - got = seekAndRead(pFile, offset, pBuf, amt); - if( got==amt ){ - return SQLITE_OK; - }else if( got<0 ){ - /* lastErrno set by seekAndRead */ - return SQLITE_IOERR_READ; - }else{ - pFile->lastErrno = 0; /* not a system error */ - /* Unread parts of the buffer must be zero-filled */ - memset(&((char*)pBuf)[got], 0, amt-got); - return SQLITE_IOERR_SHORT_READ; - } -} - -/* -** Attempt to seek the file-descriptor passed as the first argument to -** absolute offset iOff, then attempt to write nBuf bytes of data from -** pBuf to it. If an error occurs, return -1 and set *piErrno. Otherwise, -** return the actual number of bytes written (which may be less than -** nBuf). -*/ -static int seekAndWriteFd( - int fd, /* File descriptor to write to */ - i64 iOff, /* File offset to begin writing at */ - const void *pBuf, /* Copy data from this buffer to the file */ - int nBuf, /* Size of buffer pBuf in bytes */ - int *piErrno /* OUT: Error number if error occurs */ -){ - int rc = 0; /* Value returned by system call */ - - assert( nBuf==(nBuf&0x1ffff) ); - assert( fd>2 ); - nBuf &= 0x1ffff; - TIMER_START; - -#if defined(USE_PREAD) - do{ rc = osPwrite(fd, pBuf, nBuf, iOff); }while( rc<0 && errno==EINTR ); -#elif defined(USE_PREAD64) - do{ rc = osPwrite64(fd, pBuf, nBuf, iOff);}while( rc<0 && errno==EINTR); -#else - do{ - i64 iSeek = lseek(fd, iOff, SEEK_SET); - SimulateIOError( iSeek-- ); - - if( iSeek!=iOff ){ - if( piErrno ) *piErrno = (iSeek==-1 ? errno : 0); - return -1; - } - rc = osWrite(fd, pBuf, nBuf); - }while( rc<0 && errno==EINTR ); -#endif - - TIMER_END; - OSTRACE(("WRITE %-3d %5d %7lld %llu\n", fd, rc, iOff, TIMER_ELAPSED)); - - if( rc<0 && piErrno ) *piErrno = errno; - return rc; -} - - -/* -** Seek to the offset in id->offset then read cnt bytes into pBuf. -** Return the number of bytes actually read. Update the offset. -** -** To avoid stomping the errno value on a failed write the lastErrno value -** is set before returning. -*/ -static int seekAndWrite(unixFile *id, i64 offset, const void *pBuf, int cnt){ - return seekAndWriteFd(id->h, offset, pBuf, cnt, &id->lastErrno); -} - - -/* -** Write data from a buffer into a file. Return SQLITE_OK on success -** or some other error code on failure. -*/ -static int unixWrite( - sqlite3_file *id, - const void *pBuf, - int amt, - sqlite3_int64 offset -){ - unixFile *pFile = (unixFile*)id; - int wrote = 0; - assert( id ); - assert( amt>0 ); - - /* If this is a database file (not a journal, master-journal or temp - ** file), the bytes in the locking range should never be read or written. */ -#if 0 - assert( pFile->pUnused==0 - || offset>=PENDING_BYTE+512 - || offset+amt<=PENDING_BYTE - ); -#endif - -#ifdef SQLITE_DEBUG - /* If we are doing a normal write to a database file (as opposed to - ** doing a hot-journal rollback or a write to some file other than a - ** normal database file) then record the fact that the database - ** has changed. If the transaction counter is modified, record that - ** fact too. - */ - if( pFile->inNormalWrite ){ - pFile->dbUpdate = 1; /* The database has been modified */ - if( offset<=24 && offset+amt>=27 ){ - int rc; - char oldCntr[4]; - SimulateIOErrorBenign(1); - rc = seekAndRead(pFile, 24, oldCntr, 4); - SimulateIOErrorBenign(0); - if( rc!=4 || memcmp(oldCntr, &((char*)pBuf)[24-offset], 4)!=0 ){ - pFile->transCntrChng = 1; /* The transaction counter has changed */ - } - } - } -#endif - -#if SQLITE_MAX_MMAP_SIZE>0 - /* Deal with as much of this write request as possible by transfering - ** data from the memory mapping using memcpy(). */ - if( offsetmmapSize ){ - if( offset+amt <= pFile->mmapSize ){ - memcpy(&((u8 *)(pFile->pMapRegion))[offset], pBuf, amt); - return SQLITE_OK; - }else{ - int nCopy = pFile->mmapSize - offset; - memcpy(&((u8 *)(pFile->pMapRegion))[offset], pBuf, nCopy); - pBuf = &((u8 *)pBuf)[nCopy]; - amt -= nCopy; - offset += nCopy; - } - } -#endif - - while( amt>0 && (wrote = seekAndWrite(pFile, offset, pBuf, amt))>0 ){ - amt -= wrote; - offset += wrote; - pBuf = &((char*)pBuf)[wrote]; - } - SimulateIOError(( wrote=(-1), amt=1 )); - SimulateDiskfullError(( wrote=0, amt=1 )); - - if( amt>0 ){ - if( wrote<0 && pFile->lastErrno!=ENOSPC ){ - /* lastErrno set by seekAndWrite */ - return SQLITE_IOERR_WRITE; - }else{ - pFile->lastErrno = 0; /* not a system error */ - return SQLITE_FULL; - } - } - - return SQLITE_OK; -} - -#ifdef SQLITE_TEST -/* -** Count the number of fullsyncs and normal syncs. This is used to test -** that syncs and fullsyncs are occurring at the right times. -*/ -SQLITE_API int sqlite3_sync_count = 0; -SQLITE_API int sqlite3_fullsync_count = 0; -#endif - -/* -** We do not trust systems to provide a working fdatasync(). Some do. -** Others do no. To be safe, we will stick with the (slightly slower) -** fsync(). If you know that your system does support fdatasync() correctly, -** then simply compile with -Dfdatasync=fdatasync -*/ -#if !defined(fdatasync) -# define fdatasync fsync -#endif - -/* -** Define HAVE_FULLFSYNC to 0 or 1 depending on whether or not -** the F_FULLFSYNC macro is defined. F_FULLFSYNC is currently -** only available on Mac OS X. But that could change. -*/ -#ifdef F_FULLFSYNC -# define HAVE_FULLFSYNC 1 -#else -# define HAVE_FULLFSYNC 0 -#endif - - -/* -** The fsync() system call does not work as advertised on many -** unix systems. The following procedure is an attempt to make -** it work better. -** -** The SQLITE_NO_SYNC macro disables all fsync()s. This is useful -** for testing when we want to run through the test suite quickly. -** You are strongly advised *not* to deploy with SQLITE_NO_SYNC -** enabled, however, since with SQLITE_NO_SYNC enabled, an OS crash -** or power failure will likely corrupt the database file. -** -** SQLite sets the dataOnly flag if the size of the file is unchanged. -** The idea behind dataOnly is that it should only write the file content -** to disk, not the inode. We only set dataOnly if the file size is -** unchanged since the file size is part of the inode. However, -** Ted Ts'o tells us that fdatasync() will also write the inode if the -** file size has changed. The only real difference between fdatasync() -** and fsync(), Ted tells us, is that fdatasync() will not flush the -** inode if the mtime or owner or other inode attributes have changed. -** We only care about the file size, not the other file attributes, so -** as far as SQLite is concerned, an fdatasync() is always adequate. -** So, we always use fdatasync() if it is available, regardless of -** the value of the dataOnly flag. -*/ -static int full_fsync(int fd, int fullSync, int dataOnly){ - int rc; - - /* The following "ifdef/elif/else/" block has the same structure as - ** the one below. It is replicated here solely to avoid cluttering - ** up the real code with the UNUSED_PARAMETER() macros. - */ -#ifdef SQLITE_NO_SYNC - UNUSED_PARAMETER(fd); - UNUSED_PARAMETER(fullSync); - UNUSED_PARAMETER(dataOnly); -#elif HAVE_FULLFSYNC - UNUSED_PARAMETER(dataOnly); -#else - UNUSED_PARAMETER(fullSync); - UNUSED_PARAMETER(dataOnly); -#endif - - /* Record the number of times that we do a normal fsync() and - ** FULLSYNC. This is used during testing to verify that this procedure - ** gets called with the correct arguments. - */ -#ifdef SQLITE_TEST - if( fullSync ) sqlite3_fullsync_count++; - sqlite3_sync_count++; -#endif - - /* If we compiled with the SQLITE_NO_SYNC flag, then syncing is a - ** no-op - */ -#ifdef SQLITE_NO_SYNC - rc = SQLITE_OK; -#elif HAVE_FULLFSYNC - if( fullSync ){ - rc = osFcntl(fd, F_FULLFSYNC, 0); - }else{ - rc = 1; - } - /* If the FULLFSYNC failed, fall back to attempting an fsync(). - ** It shouldn't be possible for fullfsync to fail on the local - ** file system (on OSX), so failure indicates that FULLFSYNC - ** isn't supported for this file system. So, attempt an fsync - ** and (for now) ignore the overhead of a superfluous fcntl call. - ** It'd be better to detect fullfsync support once and avoid - ** the fcntl call every time sync is called. - */ - if( rc ) rc = fsync(fd); - -#elif defined(__APPLE__) - /* fdatasync() on HFS+ doesn't yet flush the file size if it changed correctly - ** so currently we default to the macro that redefines fdatasync to fsync - */ - rc = fsync(fd); -#else - rc = fdatasync(fd); -#if OS_VXWORKS - if( rc==-1 && errno==ENOTSUP ){ - rc = fsync(fd); - } -#endif /* OS_VXWORKS */ -#endif /* ifdef SQLITE_NO_SYNC elif HAVE_FULLFSYNC */ - - if( OS_VXWORKS && rc!= -1 ){ - rc = 0; - } - return rc; -} - -/* -** Open a file descriptor to the directory containing file zFilename. -** If successful, *pFd is set to the opened file descriptor and -** SQLITE_OK is returned. If an error occurs, either SQLITE_NOMEM -** or SQLITE_CANTOPEN is returned and *pFd is set to an undefined -** value. -** -** The directory file descriptor is used for only one thing - to -** fsync() a directory to make sure file creation and deletion events -** are flushed to disk. Such fsyncs are not needed on newer -** journaling filesystems, but are required on older filesystems. -** -** This routine can be overridden using the xSetSysCall interface. -** The ability to override this routine was added in support of the -** chromium sandbox. Opening a directory is a security risk (we are -** told) so making it overrideable allows the chromium sandbox to -** replace this routine with a harmless no-op. To make this routine -** a no-op, replace it with a stub that returns SQLITE_OK but leaves -** *pFd set to a negative number. -** -** If SQLITE_OK is returned, the caller is responsible for closing -** the file descriptor *pFd using close(). -*/ -static int openDirectory(const char *zFilename, int *pFd){ - int ii; - int fd = -1; - char zDirname[MAX_PATHNAME+1]; - - sqlite3_snprintf(MAX_PATHNAME, zDirname, "%s", zFilename); - for(ii=(int)strlen(zDirname); ii>1 && zDirname[ii]!='/'; ii--); - if( ii>0 ){ - zDirname[ii] = '\0'; - fd = robust_open(zDirname, O_RDONLY|O_BINARY, 0); - if( fd>=0 ){ - OSTRACE(("OPENDIR %-3d %s\n", fd, zDirname)); - } - } - *pFd = fd; - return (fd>=0?SQLITE_OK:unixLogError(SQLITE_CANTOPEN_BKPT, "open", zDirname)); -} - -/* -** Make sure all writes to a particular file are committed to disk. -** -** If dataOnly==0 then both the file itself and its metadata (file -** size, access time, etc) are synced. If dataOnly!=0 then only the -** file data is synced. -** -** Under Unix, also make sure that the directory entry for the file -** has been created by fsync-ing the directory that contains the file. -** If we do not do this and we encounter a power failure, the directory -** entry for the journal might not exist after we reboot. The next -** SQLite to access the file will not know that the journal exists (because -** the directory entry for the journal was never created) and the transaction -** will not roll back - possibly leading to database corruption. -*/ -static int unixSync(sqlite3_file *id, int flags){ - int rc; - unixFile *pFile = (unixFile*)id; - - int isDataOnly = (flags&SQLITE_SYNC_DATAONLY); - int isFullsync = (flags&0x0F)==SQLITE_SYNC_FULL; - - /* Check that one of SQLITE_SYNC_NORMAL or FULL was passed */ - assert((flags&0x0F)==SQLITE_SYNC_NORMAL - || (flags&0x0F)==SQLITE_SYNC_FULL - ); - - /* Unix cannot, but some systems may return SQLITE_FULL from here. This - ** line is to test that doing so does not cause any problems. - */ - SimulateDiskfullError( return SQLITE_FULL ); - - assert( pFile ); - OSTRACE(("SYNC %-3d\n", pFile->h)); - rc = full_fsync(pFile->h, isFullsync, isDataOnly); - SimulateIOError( rc=1 ); - if( rc ){ - pFile->lastErrno = errno; - return unixLogError(SQLITE_IOERR_FSYNC, "full_fsync", pFile->zPath); - } - - /* Also fsync the directory containing the file if the DIRSYNC flag - ** is set. This is a one-time occurrence. Many systems (examples: AIX) - ** are unable to fsync a directory, so ignore errors on the fsync. - */ - if( pFile->ctrlFlags & UNIXFILE_DIRSYNC ){ - int dirfd; - OSTRACE(("DIRSYNC %s (have_fullfsync=%d fullsync=%d)\n", pFile->zPath, - HAVE_FULLFSYNC, isFullsync)); - rc = osOpenDirectory(pFile->zPath, &dirfd); - if( rc==SQLITE_OK && dirfd>=0 ){ - full_fsync(dirfd, 0, 0); - robust_close(pFile, dirfd, __LINE__); - }else if( rc==SQLITE_CANTOPEN ){ - rc = SQLITE_OK; - } - pFile->ctrlFlags &= ~UNIXFILE_DIRSYNC; - } - return rc; -} - -/* -** Truncate an open file to a specified size -*/ -static int unixTruncate(sqlite3_file *id, i64 nByte){ - unixFile *pFile = (unixFile *)id; - int rc; - assert( pFile ); - SimulateIOError( return SQLITE_IOERR_TRUNCATE ); - - /* If the user has configured a chunk-size for this file, truncate the - ** file so that it consists of an integer number of chunks (i.e. the - ** actual file size after the operation may be larger than the requested - ** size). - */ - if( pFile->szChunk>0 ){ - nByte = ((nByte + pFile->szChunk - 1)/pFile->szChunk) * pFile->szChunk; - } - - rc = robust_ftruncate(pFile->h, (off_t)nByte); - if( rc ){ - pFile->lastErrno = errno; - return unixLogError(SQLITE_IOERR_TRUNCATE, "ftruncate", pFile->zPath); - }else{ -#ifdef SQLITE_DEBUG - /* If we are doing a normal write to a database file (as opposed to - ** doing a hot-journal rollback or a write to some file other than a - ** normal database file) and we truncate the file to zero length, - ** that effectively updates the change counter. This might happen - ** when restoring a database using the backup API from a zero-length - ** source. - */ - if( pFile->inNormalWrite && nByte==0 ){ - pFile->transCntrChng = 1; - } -#endif - -#if SQLITE_MAX_MMAP_SIZE>0 - /* If the file was just truncated to a size smaller than the currently - ** mapped region, reduce the effective mapping size as well. SQLite will - ** use read() and write() to access data beyond this point from now on. - */ - if( nBytemmapSize ){ - pFile->mmapSize = nByte; - } -#endif - - return SQLITE_OK; - } -} - -/* -** Determine the current size of a file in bytes -*/ -static int unixFileSize(sqlite3_file *id, i64 *pSize){ - int rc; - struct stat buf; - assert( id ); - rc = osFstat(((unixFile*)id)->h, &buf); - SimulateIOError( rc=1 ); - if( rc!=0 ){ - ((unixFile*)id)->lastErrno = errno; - return SQLITE_IOERR_FSTAT; - } - *pSize = buf.st_size; - - /* When opening a zero-size database, the findInodeInfo() procedure - ** writes a single byte into that file in order to work around a bug - ** in the OS-X msdos filesystem. In order to avoid problems with upper - ** layers, we need to report this file size as zero even though it is - ** really 1. Ticket #3260. - */ - if( *pSize==1 ) *pSize = 0; - - - return SQLITE_OK; -} - -#if SQLITE_ENABLE_LOCKING_STYLE && defined(__APPLE__) -/* -** Handler for proxy-locking file-control verbs. Defined below in the -** proxying locking division. -*/ -static int proxyFileControl(sqlite3_file*,int,void*); -#endif - -/* -** This function is called to handle the SQLITE_FCNTL_SIZE_HINT -** file-control operation. Enlarge the database to nBytes in size -** (rounded up to the next chunk-size). If the database is already -** nBytes or larger, this routine is a no-op. -*/ -static int fcntlSizeHint(unixFile *pFile, i64 nByte){ - if( pFile->szChunk>0 ){ - i64 nSize; /* Required file size */ - struct stat buf; /* Used to hold return values of fstat() */ - - if( osFstat(pFile->h, &buf) ) return SQLITE_IOERR_FSTAT; - - nSize = ((nByte+pFile->szChunk-1) / pFile->szChunk) * pFile->szChunk; - if( nSize>(i64)buf.st_size ){ - -#if defined(HAVE_POSIX_FALLOCATE) && HAVE_POSIX_FALLOCATE - /* The code below is handling the return value of osFallocate() - ** correctly. posix_fallocate() is defined to "returns zero on success, - ** or an error number on failure". See the manpage for details. */ - int err; - do{ - err = osFallocate(pFile->h, buf.st_size, nSize-buf.st_size); - }while( err==EINTR ); - if( err ) return SQLITE_IOERR_WRITE; -#else - /* If the OS does not have posix_fallocate(), fake it. First use - ** ftruncate() to set the file size, then write a single byte to - ** the last byte in each block within the extended region. This - ** is the same technique used by glibc to implement posix_fallocate() - ** on systems that do not have a real fallocate() system call. - */ - int nBlk = buf.st_blksize; /* File-system block size */ - i64 iWrite; /* Next offset to write to */ - - if( robust_ftruncate(pFile->h, nSize) ){ - pFile->lastErrno = errno; - return unixLogError(SQLITE_IOERR_TRUNCATE, "ftruncate", pFile->zPath); - } - iWrite = ((buf.st_size + 2*nBlk - 1)/nBlk)*nBlk-1; - while( iWrite0 - if( pFile->mmapSizeMax>0 && nByte>pFile->mmapSize ){ - int rc; - if( pFile->szChunk<=0 ){ - if( robust_ftruncate(pFile->h, nByte) ){ - pFile->lastErrno = errno; - return unixLogError(SQLITE_IOERR_TRUNCATE, "ftruncate", pFile->zPath); - } - } - - rc = unixMapfile(pFile, nByte); - return rc; - } -#endif - - return SQLITE_OK; -} - -/* -** If *pArg is inititially negative then this is a query. Set *pArg to -** 1 or 0 depending on whether or not bit mask of pFile->ctrlFlags is set. -** -** If *pArg is 0 or 1, then clear or set the mask bit of pFile->ctrlFlags. -*/ -static void unixModeBit(unixFile *pFile, unsigned char mask, int *pArg){ - if( *pArg<0 ){ - *pArg = (pFile->ctrlFlags & mask)!=0; - }else if( (*pArg)==0 ){ - pFile->ctrlFlags &= ~mask; - }else{ - pFile->ctrlFlags |= mask; - } -} - -/* Forward declaration */ -static int unixGetTempname(int nBuf, char *zBuf); - -/* -** Information and control of an open file handle. -*/ -static int unixFileControl(sqlite3_file *id, int op, void *pArg){ - unixFile *pFile = (unixFile*)id; - switch( op ){ - case SQLITE_FCNTL_LOCKSTATE: { - *(int*)pArg = pFile->eFileLock; - return SQLITE_OK; - } - case SQLITE_LAST_ERRNO: { - *(int*)pArg = pFile->lastErrno; - return SQLITE_OK; - } - case SQLITE_FCNTL_CHUNK_SIZE: { - pFile->szChunk = *(int *)pArg; - return SQLITE_OK; - } - case SQLITE_FCNTL_SIZE_HINT: { - int rc; - SimulateIOErrorBenign(1); - rc = fcntlSizeHint(pFile, *(i64 *)pArg); - SimulateIOErrorBenign(0); - return rc; - } - case SQLITE_FCNTL_PERSIST_WAL: { - unixModeBit(pFile, UNIXFILE_PERSIST_WAL, (int*)pArg); - return SQLITE_OK; - } - case SQLITE_FCNTL_POWERSAFE_OVERWRITE: { - unixModeBit(pFile, UNIXFILE_PSOW, (int*)pArg); - return SQLITE_OK; - } - case SQLITE_FCNTL_VFSNAME: { - *(char**)pArg = sqlite3_mprintf("%s", pFile->pVfs->zName); - return SQLITE_OK; - } - case SQLITE_FCNTL_TEMPFILENAME: { - char *zTFile = sqlite3_malloc( pFile->pVfs->mxPathname ); - if( zTFile ){ - unixGetTempname(pFile->pVfs->mxPathname, zTFile); - *(char**)pArg = zTFile; - } - return SQLITE_OK; - } - case SQLITE_FCNTL_HAS_MOVED: { - *(int*)pArg = fileHasMoved(pFile); - return SQLITE_OK; - } -#if SQLITE_MAX_MMAP_SIZE>0 - case SQLITE_FCNTL_MMAP_SIZE: { - i64 newLimit = *(i64*)pArg; - int rc = SQLITE_OK; - if( newLimit>sqlite3GlobalConfig.mxMmap ){ - newLimit = sqlite3GlobalConfig.mxMmap; - } - *(i64*)pArg = pFile->mmapSizeMax; - if( newLimit>=0 && newLimit!=pFile->mmapSizeMax && pFile->nFetchOut==0 ){ - pFile->mmapSizeMax = newLimit; - if( pFile->mmapSize>0 ){ - unixUnmapfile(pFile); - rc = unixMapfile(pFile, -1); - } - } - return rc; - } -#endif -#ifdef SQLITE_DEBUG - /* The pager calls this method to signal that it has done - ** a rollback and that the database is therefore unchanged and - ** it hence it is OK for the transaction change counter to be - ** unchanged. - */ - case SQLITE_FCNTL_DB_UNCHANGED: { - ((unixFile*)id)->dbUpdate = 0; - return SQLITE_OK; - } -#endif -#if SQLITE_ENABLE_LOCKING_STYLE && defined(__APPLE__) - case SQLITE_SET_LOCKPROXYFILE: - case SQLITE_GET_LOCKPROXYFILE: { - return proxyFileControl(id,op,pArg); - } -#endif /* SQLITE_ENABLE_LOCKING_STYLE && defined(__APPLE__) */ - } - return SQLITE_NOTFOUND; -} - -/* -** Return the sector size in bytes of the underlying block device for -** the specified file. This is almost always 512 bytes, but may be -** larger for some devices. -** -** SQLite code assumes this function cannot fail. It also assumes that -** if two files are created in the same file-system directory (i.e. -** a database and its journal file) that the sector size will be the -** same for both. -*/ -#ifndef __QNXNTO__ -static int unixSectorSize(sqlite3_file *NotUsed){ - UNUSED_PARAMETER(NotUsed); - return SQLITE_DEFAULT_SECTOR_SIZE; -} -#endif - -/* -** The following version of unixSectorSize() is optimized for QNX. -*/ -#ifdef __QNXNTO__ -#include -#include -static int unixSectorSize(sqlite3_file *id){ - unixFile *pFile = (unixFile*)id; - if( pFile->sectorSize == 0 ){ - struct statvfs fsInfo; - - /* Set defaults for non-supported filesystems */ - pFile->sectorSize = SQLITE_DEFAULT_SECTOR_SIZE; - pFile->deviceCharacteristics = 0; - if( fstatvfs(pFile->h, &fsInfo) == -1 ) { - return pFile->sectorSize; - } - - if( !strcmp(fsInfo.f_basetype, "tmp") ) { - pFile->sectorSize = fsInfo.f_bsize; - pFile->deviceCharacteristics = - SQLITE_IOCAP_ATOMIC4K | /* All ram filesystem writes are atomic */ - SQLITE_IOCAP_SAFE_APPEND | /* growing the file does not occur until - ** the write succeeds */ - SQLITE_IOCAP_SEQUENTIAL | /* The ram filesystem has no write behind - ** so it is ordered */ - 0; - }else if( strstr(fsInfo.f_basetype, "etfs") ){ - pFile->sectorSize = fsInfo.f_bsize; - pFile->deviceCharacteristics = - /* etfs cluster size writes are atomic */ - (pFile->sectorSize / 512 * SQLITE_IOCAP_ATOMIC512) | - SQLITE_IOCAP_SAFE_APPEND | /* growing the file does not occur until - ** the write succeeds */ - SQLITE_IOCAP_SEQUENTIAL | /* The ram filesystem has no write behind - ** so it is ordered */ - 0; - }else if( !strcmp(fsInfo.f_basetype, "qnx6") ){ - pFile->sectorSize = fsInfo.f_bsize; - pFile->deviceCharacteristics = - SQLITE_IOCAP_ATOMIC | /* All filesystem writes are atomic */ - SQLITE_IOCAP_SAFE_APPEND | /* growing the file does not occur until - ** the write succeeds */ - SQLITE_IOCAP_SEQUENTIAL | /* The ram filesystem has no write behind - ** so it is ordered */ - 0; - }else if( !strcmp(fsInfo.f_basetype, "qnx4") ){ - pFile->sectorSize = fsInfo.f_bsize; - pFile->deviceCharacteristics = - /* full bitset of atomics from max sector size and smaller */ - ((pFile->sectorSize / 512 * SQLITE_IOCAP_ATOMIC512) << 1) - 2 | - SQLITE_IOCAP_SEQUENTIAL | /* The ram filesystem has no write behind - ** so it is ordered */ - 0; - }else if( strstr(fsInfo.f_basetype, "dos") ){ - pFile->sectorSize = fsInfo.f_bsize; - pFile->deviceCharacteristics = - /* full bitset of atomics from max sector size and smaller */ - ((pFile->sectorSize / 512 * SQLITE_IOCAP_ATOMIC512) << 1) - 2 | - SQLITE_IOCAP_SEQUENTIAL | /* The ram filesystem has no write behind - ** so it is ordered */ - 0; - }else{ - pFile->deviceCharacteristics = - SQLITE_IOCAP_ATOMIC512 | /* blocks are atomic */ - SQLITE_IOCAP_SAFE_APPEND | /* growing the file does not occur until - ** the write succeeds */ - 0; - } - } - /* Last chance verification. If the sector size isn't a multiple of 512 - ** then it isn't valid.*/ - if( pFile->sectorSize % 512 != 0 ){ - pFile->deviceCharacteristics = 0; - pFile->sectorSize = SQLITE_DEFAULT_SECTOR_SIZE; - } - return pFile->sectorSize; -} -#endif /* __QNXNTO__ */ - -/* -** Return the device characteristics for the file. -** -** This VFS is set up to return SQLITE_IOCAP_POWERSAFE_OVERWRITE by default. -** However, that choice is contraversial since technically the underlying -** file system does not always provide powersafe overwrites. (In other -** words, after a power-loss event, parts of the file that were never -** written might end up being altered.) However, non-PSOW behavior is very, -** very rare. And asserting PSOW makes a large reduction in the amount -** of required I/O for journaling, since a lot of padding is eliminated. -** Hence, while POWERSAFE_OVERWRITE is on by default, there is a file-control -** available to turn it off and URI query parameter available to turn it off. -*/ -static int unixDeviceCharacteristics(sqlite3_file *id){ - unixFile *p = (unixFile*)id; - int rc = 0; -#ifdef __QNXNTO__ - if( p->sectorSize==0 ) unixSectorSize(id); - rc = p->deviceCharacteristics; -#endif - if( p->ctrlFlags & UNIXFILE_PSOW ){ - rc |= SQLITE_IOCAP_POWERSAFE_OVERWRITE; - } - return rc; -} - -#ifndef SQLITE_OMIT_WAL - - -/* -** Object used to represent an shared memory buffer. -** -** When multiple threads all reference the same wal-index, each thread -** has its own unixShm object, but they all point to a single instance -** of this unixShmNode object. In other words, each wal-index is opened -** only once per process. -** -** Each unixShmNode object is connected to a single unixInodeInfo object. -** We could coalesce this object into unixInodeInfo, but that would mean -** every open file that does not use shared memory (in other words, most -** open files) would have to carry around this extra information. So -** the unixInodeInfo object contains a pointer to this unixShmNode object -** and the unixShmNode object is created only when needed. -** -** unixMutexHeld() must be true when creating or destroying -** this object or while reading or writing the following fields: -** -** nRef -** -** The following fields are read-only after the object is created: -** -** fid -** zFilename -** -** Either unixShmNode.mutex must be held or unixShmNode.nRef==0 and -** unixMutexHeld() is true when reading or writing any other field -** in this structure. -*/ -struct unixShmNode { - unixInodeInfo *pInode; /* unixInodeInfo that owns this SHM node */ - sqlite3_mutex *mutex; /* Mutex to access this object */ - char *zFilename; /* Name of the mmapped file */ - int h; /* Open file descriptor */ - int szRegion; /* Size of shared-memory regions */ - u16 nRegion; /* Size of array apRegion */ - u8 isReadonly; /* True if read-only */ - char **apRegion; /* Array of mapped shared-memory regions */ - int nRef; /* Number of unixShm objects pointing to this */ - unixShm *pFirst; /* All unixShm objects pointing to this */ -#ifdef SQLITE_DEBUG - u8 exclMask; /* Mask of exclusive locks held */ - u8 sharedMask; /* Mask of shared locks held */ - u8 nextShmId; /* Next available unixShm.id value */ -#endif -}; - -/* -** Structure used internally by this VFS to record the state of an -** open shared memory connection. -** -** The following fields are initialized when this object is created and -** are read-only thereafter: -** -** unixShm.pFile -** unixShm.id -** -** All other fields are read/write. The unixShm.pFile->mutex must be held -** while accessing any read/write fields. -*/ -struct unixShm { - unixShmNode *pShmNode; /* The underlying unixShmNode object */ - unixShm *pNext; /* Next unixShm with the same unixShmNode */ - u8 hasMutex; /* True if holding the unixShmNode mutex */ - u8 id; /* Id of this connection within its unixShmNode */ - u16 sharedMask; /* Mask of shared locks held */ - u16 exclMask; /* Mask of exclusive locks held */ -}; - -/* -** Constants used for locking -*/ -#define UNIX_SHM_BASE ((22+SQLITE_SHM_NLOCK)*4) /* first lock byte */ -#define UNIX_SHM_DMS (UNIX_SHM_BASE+SQLITE_SHM_NLOCK) /* deadman switch */ - -/* -** Apply posix advisory locks for all bytes from ofst through ofst+n-1. -** -** Locks block if the mask is exactly UNIX_SHM_C and are non-blocking -** otherwise. -*/ -static int unixShmSystemLock( - unixShmNode *pShmNode, /* Apply locks to this open shared-memory segment */ - int lockType, /* F_UNLCK, F_RDLCK, or F_WRLCK */ - int ofst, /* First byte of the locking range */ - int n /* Number of bytes to lock */ -){ - struct flock f; /* The posix advisory locking structure */ - int rc = SQLITE_OK; /* Result code form fcntl() */ - - /* Access to the unixShmNode object is serialized by the caller */ - assert( sqlite3_mutex_held(pShmNode->mutex) || pShmNode->nRef==0 ); - - /* Shared locks never span more than one byte */ - assert( n==1 || lockType!=F_RDLCK ); - - /* Locks are within range */ - assert( n>=1 && nh>=0 ){ - /* Initialize the locking parameters */ - memset(&f, 0, sizeof(f)); - f.l_type = lockType; - f.l_whence = SEEK_SET; - f.l_start = ofst; - f.l_len = n; - - rc = osFcntl(pShmNode->h, F_SETLK, &f); - rc = (rc!=(-1)) ? SQLITE_OK : SQLITE_BUSY; - } - - /* Update the global lock state and do debug tracing */ -#ifdef SQLITE_DEBUG - { u16 mask; - OSTRACE(("SHM-LOCK ")); - mask = ofst>31 ? 0xffff : (1<<(ofst+n)) - (1<exclMask &= ~mask; - pShmNode->sharedMask &= ~mask; - }else if( lockType==F_RDLCK ){ - OSTRACE(("read-lock %d ok", ofst)); - pShmNode->exclMask &= ~mask; - pShmNode->sharedMask |= mask; - }else{ - assert( lockType==F_WRLCK ); - OSTRACE(("write-lock %d ok", ofst)); - pShmNode->exclMask |= mask; - pShmNode->sharedMask &= ~mask; - } - }else{ - if( lockType==F_UNLCK ){ - OSTRACE(("unlock %d failed", ofst)); - }else if( lockType==F_RDLCK ){ - OSTRACE(("read-lock failed")); - }else{ - assert( lockType==F_WRLCK ); - OSTRACE(("write-lock %d failed", ofst)); - } - } - OSTRACE((" - afterwards %03x,%03x\n", - pShmNode->sharedMask, pShmNode->exclMask)); - } -#endif - - return rc; -} - -/* -** Return the system page size. -** -** This function should not be called directly by other code in this file. -** Instead, it should be called via macro osGetpagesize(). -*/ -static int unixGetpagesize(void){ -#if defined(_BSD_SOURCE) - return getpagesize(); -#else - return (int)sysconf(_SC_PAGESIZE); -#endif -} - -/* -** Return the minimum number of 32KB shm regions that should be mapped at -** a time, assuming that each mapping must be an integer multiple of the -** current system page-size. -** -** Usually, this is 1. The exception seems to be systems that are configured -** to use 64KB pages - in this case each mapping must cover at least two -** shm regions. -*/ -static int unixShmRegionPerMap(void){ - int shmsz = 32*1024; /* SHM region size */ - int pgsz = osGetpagesize(); /* System page size */ - assert( ((pgsz-1)&pgsz)==0 ); /* Page size must be a power of 2 */ - if( pgszpInode->pShmNode; - assert( unixMutexHeld() ); - if( p && p->nRef==0 ){ - int nShmPerMap = unixShmRegionPerMap(); - int i; - assert( p->pInode==pFd->pInode ); - sqlite3_mutex_free(p->mutex); - for(i=0; inRegion; i+=nShmPerMap){ - if( p->h>=0 ){ - osMunmap(p->apRegion[i], p->szRegion); - }else{ - sqlite3_free(p->apRegion[i]); - } - } - sqlite3_free(p->apRegion); - if( p->h>=0 ){ - robust_close(pFd, p->h, __LINE__); - p->h = -1; - } - p->pInode->pShmNode = 0; - sqlite3_free(p); - } -} - -/* -** Open a shared-memory area associated with open database file pDbFd. -** This particular implementation uses mmapped files. -** -** The file used to implement shared-memory is in the same directory -** as the open database file and has the same name as the open database -** file with the "-shm" suffix added. For example, if the database file -** is "/home/user1/config.db" then the file that is created and mmapped -** for shared memory will be called "/home/user1/config.db-shm". -** -** Another approach to is to use files in /dev/shm or /dev/tmp or an -** some other tmpfs mount. But if a file in a different directory -** from the database file is used, then differing access permissions -** or a chroot() might cause two different processes on the same -** database to end up using different files for shared memory - -** meaning that their memory would not really be shared - resulting -** in database corruption. Nevertheless, this tmpfs file usage -** can be enabled at compile-time using -DSQLITE_SHM_DIRECTORY="/dev/shm" -** or the equivalent. The use of the SQLITE_SHM_DIRECTORY compile-time -** option results in an incompatible build of SQLite; builds of SQLite -** that with differing SQLITE_SHM_DIRECTORY settings attempt to use the -** same database file at the same time, database corruption will likely -** result. The SQLITE_SHM_DIRECTORY compile-time option is considered -** "unsupported" and may go away in a future SQLite release. -** -** When opening a new shared-memory file, if no other instances of that -** file are currently open, in this process or in other processes, then -** the file must be truncated to zero length or have its header cleared. -** -** If the original database file (pDbFd) is using the "unix-excl" VFS -** that means that an exclusive lock is held on the database file and -** that no other processes are able to read or write the database. In -** that case, we do not really need shared memory. No shared memory -** file is created. The shared memory will be simulated with heap memory. -*/ -static int unixOpenSharedMemory(unixFile *pDbFd){ - struct unixShm *p = 0; /* The connection to be opened */ - struct unixShmNode *pShmNode; /* The underlying mmapped file */ - int rc; /* Result code */ - unixInodeInfo *pInode; /* The inode of fd */ - char *zShmFilename; /* Name of the file used for SHM */ - int nShmFilename; /* Size of the SHM filename in bytes */ - - /* Allocate space for the new unixShm object. */ - p = sqlite3_malloc( sizeof(*p) ); - if( p==0 ) return SQLITE_NOMEM; - memset(p, 0, sizeof(*p)); - assert( pDbFd->pShm==0 ); - - /* Check to see if a unixShmNode object already exists. Reuse an existing - ** one if present. Create a new one if necessary. - */ - unixEnterMutex(); - pInode = pDbFd->pInode; - pShmNode = pInode->pShmNode; - if( pShmNode==0 ){ - struct stat sStat; /* fstat() info for database file */ - - /* Call fstat() to figure out the permissions on the database file. If - ** a new *-shm file is created, an attempt will be made to create it - ** with the same permissions. - */ - if( osFstat(pDbFd->h, &sStat) && pInode->bProcessLock==0 ){ - rc = SQLITE_IOERR_FSTAT; - goto shm_open_err; - } - -#ifdef SQLITE_SHM_DIRECTORY - nShmFilename = sizeof(SQLITE_SHM_DIRECTORY) + 31; -#else - nShmFilename = 6 + (int)strlen(pDbFd->zPath); -#endif - pShmNode = sqlite3_malloc( sizeof(*pShmNode) + nShmFilename ); - if( pShmNode==0 ){ - rc = SQLITE_NOMEM; - goto shm_open_err; - } - memset(pShmNode, 0, sizeof(*pShmNode)+nShmFilename); - zShmFilename = pShmNode->zFilename = (char*)&pShmNode[1]; -#ifdef SQLITE_SHM_DIRECTORY - sqlite3_snprintf(nShmFilename, zShmFilename, - SQLITE_SHM_DIRECTORY "/sqlite-shm-%x-%x", - (u32)sStat.st_ino, (u32)sStat.st_dev); -#else - sqlite3_snprintf(nShmFilename, zShmFilename, "%s-shm", pDbFd->zPath); - sqlite3FileSuffix3(pDbFd->zPath, zShmFilename); -#endif - pShmNode->h = -1; - pDbFd->pInode->pShmNode = pShmNode; - pShmNode->pInode = pDbFd->pInode; - pShmNode->mutex = sqlite3_mutex_alloc(SQLITE_MUTEX_FAST); - if( pShmNode->mutex==0 ){ - rc = SQLITE_NOMEM; - goto shm_open_err; - } - - if( pInode->bProcessLock==0 ){ - int openFlags = O_RDWR | O_CREAT; - if( sqlite3_uri_boolean(pDbFd->zPath, "readonly_shm", 0) ){ - openFlags = O_RDONLY; - pShmNode->isReadonly = 1; - } - pShmNode->h = robust_open(zShmFilename, openFlags, (sStat.st_mode&0777)); - if( pShmNode->h<0 ){ - rc = unixLogError(SQLITE_CANTOPEN_BKPT, "open", zShmFilename); - goto shm_open_err; - } - - /* If this process is running as root, make sure that the SHM file - ** is owned by the same user that owns the original database. Otherwise, - ** the original owner will not be able to connect. - */ - osFchown(pShmNode->h, sStat.st_uid, sStat.st_gid); - - /* Check to see if another process is holding the dead-man switch. - ** If not, truncate the file to zero length. - */ - rc = SQLITE_OK; - if( unixShmSystemLock(pShmNode, F_WRLCK, UNIX_SHM_DMS, 1)==SQLITE_OK ){ - if( robust_ftruncate(pShmNode->h, 0) ){ - rc = unixLogError(SQLITE_IOERR_SHMOPEN, "ftruncate", zShmFilename); - } - } - if( rc==SQLITE_OK ){ - rc = unixShmSystemLock(pShmNode, F_RDLCK, UNIX_SHM_DMS, 1); - } - if( rc ) goto shm_open_err; - } - } - - /* Make the new connection a child of the unixShmNode */ - p->pShmNode = pShmNode; -#ifdef SQLITE_DEBUG - p->id = pShmNode->nextShmId++; -#endif - pShmNode->nRef++; - pDbFd->pShm = p; - unixLeaveMutex(); - - /* The reference count on pShmNode has already been incremented under - ** the cover of the unixEnterMutex() mutex and the pointer from the - ** new (struct unixShm) object to the pShmNode has been set. All that is - ** left to do is to link the new object into the linked list starting - ** at pShmNode->pFirst. This must be done while holding the pShmNode->mutex - ** mutex. - */ - sqlite3_mutex_enter(pShmNode->mutex); - p->pNext = pShmNode->pFirst; - pShmNode->pFirst = p; - sqlite3_mutex_leave(pShmNode->mutex); - return SQLITE_OK; - - /* Jump here on any error */ -shm_open_err: - unixShmPurge(pDbFd); /* This call frees pShmNode if required */ - sqlite3_free(p); - unixLeaveMutex(); - return rc; -} - -/* -** This function is called to obtain a pointer to region iRegion of the -** shared-memory associated with the database file fd. Shared-memory regions -** are numbered starting from zero. Each shared-memory region is szRegion -** bytes in size. -** -** If an error occurs, an error code is returned and *pp is set to NULL. -** -** Otherwise, if the bExtend parameter is 0 and the requested shared-memory -** region has not been allocated (by any client, including one running in a -** separate process), then *pp is set to NULL and SQLITE_OK returned. If -** bExtend is non-zero and the requested shared-memory region has not yet -** been allocated, it is allocated by this function. -** -** If the shared-memory region has already been allocated or is allocated by -** this call as described above, then it is mapped into this processes -** address space (if it is not already), *pp is set to point to the mapped -** memory and SQLITE_OK returned. -*/ -static int unixShmMap( - sqlite3_file *fd, /* Handle open on database file */ - int iRegion, /* Region to retrieve */ - int szRegion, /* Size of regions */ - int bExtend, /* True to extend file if necessary */ - void volatile **pp /* OUT: Mapped memory */ -){ - unixFile *pDbFd = (unixFile*)fd; - unixShm *p; - unixShmNode *pShmNode; - int rc = SQLITE_OK; - int nShmPerMap = unixShmRegionPerMap(); - int nReqRegion; - - /* If the shared-memory file has not yet been opened, open it now. */ - if( pDbFd->pShm==0 ){ - rc = unixOpenSharedMemory(pDbFd); - if( rc!=SQLITE_OK ) return rc; - } - - p = pDbFd->pShm; - pShmNode = p->pShmNode; - sqlite3_mutex_enter(pShmNode->mutex); - assert( szRegion==pShmNode->szRegion || pShmNode->nRegion==0 ); - assert( pShmNode->pInode==pDbFd->pInode ); - assert( pShmNode->h>=0 || pDbFd->pInode->bProcessLock==1 ); - assert( pShmNode->h<0 || pDbFd->pInode->bProcessLock==0 ); - - /* Minimum number of regions required to be mapped. */ - nReqRegion = ((iRegion+nShmPerMap) / nShmPerMap) * nShmPerMap; - - if( pShmNode->nRegionszRegion = szRegion; - - if( pShmNode->h>=0 ){ - /* The requested region is not mapped into this processes address space. - ** Check to see if it has been allocated (i.e. if the wal-index file is - ** large enough to contain the requested region). - */ - if( osFstat(pShmNode->h, &sStat) ){ - rc = SQLITE_IOERR_SHMSIZE; - goto shmpage_out; - } - - if( sStat.st_sizeh, iPg*pgsz + pgsz-1, "", 1, 0)!=1 ){ - const char *zFile = pShmNode->zFilename; - rc = unixLogError(SQLITE_IOERR_SHMSIZE, "write", zFile); - goto shmpage_out; - } - } - } - } - } - - /* Map the requested memory region into this processes address space. */ - apNew = (char **)sqlite3_realloc( - pShmNode->apRegion, nReqRegion*sizeof(char *) - ); - if( !apNew ){ - rc = SQLITE_IOERR_NOMEM; - goto shmpage_out; - } - pShmNode->apRegion = apNew; - while( pShmNode->nRegionh>=0 ){ - pMem = osMmap(0, nMap, - pShmNode->isReadonly ? PROT_READ : PROT_READ|PROT_WRITE, - MAP_SHARED, pShmNode->h, szRegion*(i64)pShmNode->nRegion - ); - if( pMem==MAP_FAILED ){ - rc = unixLogError(SQLITE_IOERR_SHMMAP, "mmap", pShmNode->zFilename); - goto shmpage_out; - } - }else{ - pMem = sqlite3_malloc(szRegion); - if( pMem==0 ){ - rc = SQLITE_NOMEM; - goto shmpage_out; - } - memset(pMem, 0, szRegion); - } - - for(i=0; iapRegion[pShmNode->nRegion+i] = &((char*)pMem)[szRegion*i]; - } - pShmNode->nRegion += nShmPerMap; - } - } - -shmpage_out: - if( pShmNode->nRegion>iRegion ){ - *pp = pShmNode->apRegion[iRegion]; - }else{ - *pp = 0; - } - if( pShmNode->isReadonly && rc==SQLITE_OK ) rc = SQLITE_READONLY; - sqlite3_mutex_leave(pShmNode->mutex); - return rc; -} - -/* -** Change the lock state for a shared-memory segment. -** -** Note that the relationship between SHAREd and EXCLUSIVE locks is a little -** different here than in posix. In xShmLock(), one can go from unlocked -** to shared and back or from unlocked to exclusive and back. But one may -** not go from shared to exclusive or from exclusive to shared. -*/ -static int unixShmLock( - sqlite3_file *fd, /* Database file holding the shared memory */ - int ofst, /* First lock to acquire or release */ - int n, /* Number of locks to acquire or release */ - int flags /* What to do with the lock */ -){ - unixFile *pDbFd = (unixFile*)fd; /* Connection holding shared memory */ - unixShm *p = pDbFd->pShm; /* The shared memory being locked */ - unixShm *pX; /* For looping over all siblings */ - unixShmNode *pShmNode = p->pShmNode; /* The underlying file iNode */ - int rc = SQLITE_OK; /* Result code */ - u16 mask; /* Mask of locks to take or release */ - - assert( pShmNode==pDbFd->pInode->pShmNode ); - assert( pShmNode->pInode==pDbFd->pInode ); - assert( ofst>=0 && ofst+n<=SQLITE_SHM_NLOCK ); - assert( n>=1 ); - assert( flags==(SQLITE_SHM_LOCK | SQLITE_SHM_SHARED) - || flags==(SQLITE_SHM_LOCK | SQLITE_SHM_EXCLUSIVE) - || flags==(SQLITE_SHM_UNLOCK | SQLITE_SHM_SHARED) - || flags==(SQLITE_SHM_UNLOCK | SQLITE_SHM_EXCLUSIVE) ); - assert( n==1 || (flags & SQLITE_SHM_EXCLUSIVE)!=0 ); - assert( pShmNode->h>=0 || pDbFd->pInode->bProcessLock==1 ); - assert( pShmNode->h<0 || pDbFd->pInode->bProcessLock==0 ); - - mask = (1<<(ofst+n)) - (1<1 || mask==(1<mutex); - if( flags & SQLITE_SHM_UNLOCK ){ - u16 allMask = 0; /* Mask of locks held by siblings */ - - /* See if any siblings hold this same lock */ - for(pX=pShmNode->pFirst; pX; pX=pX->pNext){ - if( pX==p ) continue; - assert( (pX->exclMask & (p->exclMask|p->sharedMask))==0 ); - allMask |= pX->sharedMask; - } - - /* Unlock the system-level locks */ - if( (mask & allMask)==0 ){ - rc = unixShmSystemLock(pShmNode, F_UNLCK, ofst+UNIX_SHM_BASE, n); - }else{ - rc = SQLITE_OK; - } - - /* Undo the local locks */ - if( rc==SQLITE_OK ){ - p->exclMask &= ~mask; - p->sharedMask &= ~mask; - } - }else if( flags & SQLITE_SHM_SHARED ){ - u16 allShared = 0; /* Union of locks held by connections other than "p" */ - - /* Find out which shared locks are already held by sibling connections. - ** If any sibling already holds an exclusive lock, go ahead and return - ** SQLITE_BUSY. - */ - for(pX=pShmNode->pFirst; pX; pX=pX->pNext){ - if( (pX->exclMask & mask)!=0 ){ - rc = SQLITE_BUSY; - break; - } - allShared |= pX->sharedMask; - } - - /* Get shared locks at the system level, if necessary */ - if( rc==SQLITE_OK ){ - if( (allShared & mask)==0 ){ - rc = unixShmSystemLock(pShmNode, F_RDLCK, ofst+UNIX_SHM_BASE, n); - }else{ - rc = SQLITE_OK; - } - } - - /* Get the local shared locks */ - if( rc==SQLITE_OK ){ - p->sharedMask |= mask; - } - }else{ - /* Make sure no sibling connections hold locks that will block this - ** lock. If any do, return SQLITE_BUSY right away. - */ - for(pX=pShmNode->pFirst; pX; pX=pX->pNext){ - if( (pX->exclMask & mask)!=0 || (pX->sharedMask & mask)!=0 ){ - rc = SQLITE_BUSY; - break; - } - } - - /* Get the exclusive locks at the system level. Then if successful - ** also mark the local connection as being locked. - */ - if( rc==SQLITE_OK ){ - rc = unixShmSystemLock(pShmNode, F_WRLCK, ofst+UNIX_SHM_BASE, n); - if( rc==SQLITE_OK ){ - assert( (p->sharedMask & mask)==0 ); - p->exclMask |= mask; - } - } - } - sqlite3_mutex_leave(pShmNode->mutex); - OSTRACE(("SHM-LOCK shmid-%d, pid-%d got %03x,%03x\n", - p->id, getpid(), p->sharedMask, p->exclMask)); - return rc; -} - -/* -** Implement a memory barrier or memory fence on shared memory. -** -** All loads and stores begun before the barrier must complete before -** any load or store begun after the barrier. -*/ -static void unixShmBarrier( - sqlite3_file *fd /* Database file holding the shared memory */ -){ - UNUSED_PARAMETER(fd); - unixEnterMutex(); - unixLeaveMutex(); -} - -/* -** Close a connection to shared-memory. Delete the underlying -** storage if deleteFlag is true. -** -** If there is no shared memory associated with the connection then this -** routine is a harmless no-op. -*/ -static int unixShmUnmap( - sqlite3_file *fd, /* The underlying database file */ - int deleteFlag /* Delete shared-memory if true */ -){ - unixShm *p; /* The connection to be closed */ - unixShmNode *pShmNode; /* The underlying shared-memory file */ - unixShm **pp; /* For looping over sibling connections */ - unixFile *pDbFd; /* The underlying database file */ - - pDbFd = (unixFile*)fd; - p = pDbFd->pShm; - if( p==0 ) return SQLITE_OK; - pShmNode = p->pShmNode; - - assert( pShmNode==pDbFd->pInode->pShmNode ); - assert( pShmNode->pInode==pDbFd->pInode ); - - /* Remove connection p from the set of connections associated - ** with pShmNode */ - sqlite3_mutex_enter(pShmNode->mutex); - for(pp=&pShmNode->pFirst; (*pp)!=p; pp = &(*pp)->pNext){} - *pp = p->pNext; - - /* Free the connection p */ - sqlite3_free(p); - pDbFd->pShm = 0; - sqlite3_mutex_leave(pShmNode->mutex); - - /* If pShmNode->nRef has reached 0, then close the underlying - ** shared-memory file, too */ - unixEnterMutex(); - assert( pShmNode->nRef>0 ); - pShmNode->nRef--; - if( pShmNode->nRef==0 ){ - if( deleteFlag && pShmNode->h>=0 ) osUnlink(pShmNode->zFilename); - unixShmPurge(pDbFd); - } - unixLeaveMutex(); - - return SQLITE_OK; -} - - -#else -# define unixShmMap 0 -# define unixShmLock 0 -# define unixShmBarrier 0 -# define unixShmUnmap 0 -#endif /* #ifndef SQLITE_OMIT_WAL */ - -#if SQLITE_MAX_MMAP_SIZE>0 -/* -** If it is currently memory mapped, unmap file pFd. -*/ -static void unixUnmapfile(unixFile *pFd){ - assert( pFd->nFetchOut==0 ); - if( pFd->pMapRegion ){ - osMunmap(pFd->pMapRegion, pFd->mmapSizeActual); - pFd->pMapRegion = 0; - pFd->mmapSize = 0; - pFd->mmapSizeActual = 0; - } -} - -/* -** Attempt to set the size of the memory mapping maintained by file -** descriptor pFd to nNew bytes. Any existing mapping is discarded. -** -** If successful, this function sets the following variables: -** -** unixFile.pMapRegion -** unixFile.mmapSize -** unixFile.mmapSizeActual -** -** If unsuccessful, an error message is logged via sqlite3_log() and -** the three variables above are zeroed. In this case SQLite should -** continue accessing the database using the xRead() and xWrite() -** methods. -*/ -static void unixRemapfile( - unixFile *pFd, /* File descriptor object */ - i64 nNew /* Required mapping size */ -){ - const char *zErr = "mmap"; - int h = pFd->h; /* File descriptor open on db file */ - u8 *pOrig = (u8 *)pFd->pMapRegion; /* Pointer to current file mapping */ - i64 nOrig = pFd->mmapSizeActual; /* Size of pOrig region in bytes */ - u8 *pNew = 0; /* Location of new mapping */ - int flags = PROT_READ; /* Flags to pass to mmap() */ - - assert( pFd->nFetchOut==0 ); - assert( nNew>pFd->mmapSize ); - assert( nNew<=pFd->mmapSizeMax ); - assert( nNew>0 ); - assert( pFd->mmapSizeActual>=pFd->mmapSize ); - assert( MAP_FAILED!=0 ); - - if( (pFd->ctrlFlags & UNIXFILE_RDONLY)==0 ) flags |= PROT_WRITE; - - if( pOrig ){ -#if HAVE_MREMAP - i64 nReuse = pFd->mmapSize; -#else - const int szSyspage = osGetpagesize(); - i64 nReuse = (pFd->mmapSize & ~(szSyspage-1)); -#endif - u8 *pReq = &pOrig[nReuse]; - - /* Unmap any pages of the existing mapping that cannot be reused. */ - if( nReuse!=nOrig ){ - osMunmap(pReq, nOrig-nReuse); - } - -#if HAVE_MREMAP - pNew = osMremap(pOrig, nReuse, nNew, MREMAP_MAYMOVE); - zErr = "mremap"; -#else - pNew = osMmap(pReq, nNew-nReuse, flags, MAP_SHARED, h, nReuse); - if( pNew!=MAP_FAILED ){ - if( pNew!=pReq ){ - osMunmap(pNew, nNew - nReuse); - pNew = 0; - }else{ - pNew = pOrig; - } - } -#endif - - /* The attempt to extend the existing mapping failed. Free it. */ - if( pNew==MAP_FAILED || pNew==0 ){ - osMunmap(pOrig, nReuse); - } - } - - /* If pNew is still NULL, try to create an entirely new mapping. */ - if( pNew==0 ){ - pNew = osMmap(0, nNew, flags, MAP_SHARED, h, 0); - } - - if( pNew==MAP_FAILED ){ - pNew = 0; - nNew = 0; - unixLogError(SQLITE_OK, zErr, pFd->zPath); - - /* If the mmap() above failed, assume that all subsequent mmap() calls - ** will probably fail too. Fall back to using xRead/xWrite exclusively - ** in this case. */ - pFd->mmapSizeMax = 0; - } - pFd->pMapRegion = (void *)pNew; - pFd->mmapSize = pFd->mmapSizeActual = nNew; -} - -/* -** Memory map or remap the file opened by file-descriptor pFd (if the file -** is already mapped, the existing mapping is replaced by the new). Or, if -** there already exists a mapping for this file, and there are still -** outstanding xFetch() references to it, this function is a no-op. -** -** If parameter nByte is non-negative, then it is the requested size of -** the mapping to create. Otherwise, if nByte is less than zero, then the -** requested size is the size of the file on disk. The actual size of the -** created mapping is either the requested size or the value configured -** using SQLITE_FCNTL_MMAP_LIMIT, whichever is smaller. -** -** SQLITE_OK is returned if no error occurs (even if the mapping is not -** recreated as a result of outstanding references) or an SQLite error -** code otherwise. -*/ -static int unixMapfile(unixFile *pFd, i64 nByte){ - i64 nMap = nByte; - int rc; - - assert( nMap>=0 || pFd->nFetchOut==0 ); - if( pFd->nFetchOut>0 ) return SQLITE_OK; - - if( nMap<0 ){ - struct stat statbuf; /* Low-level file information */ - rc = osFstat(pFd->h, &statbuf); - if( rc!=SQLITE_OK ){ - return SQLITE_IOERR_FSTAT; - } - nMap = statbuf.st_size; - } - if( nMap>pFd->mmapSizeMax ){ - nMap = pFd->mmapSizeMax; - } - - if( nMap!=pFd->mmapSize ){ - if( nMap>0 ){ - unixRemapfile(pFd, nMap); - }else{ - unixUnmapfile(pFd); - } - } - - return SQLITE_OK; -} -#endif /* SQLITE_MAX_MMAP_SIZE>0 */ - -/* -** If possible, return a pointer to a mapping of file fd starting at offset -** iOff. The mapping must be valid for at least nAmt bytes. -** -** If such a pointer can be obtained, store it in *pp and return SQLITE_OK. -** Or, if one cannot but no error occurs, set *pp to 0 and return SQLITE_OK. -** Finally, if an error does occur, return an SQLite error code. The final -** value of *pp is undefined in this case. -** -** If this function does return a pointer, the caller must eventually -** release the reference by calling unixUnfetch(). -*/ -static int unixFetch(sqlite3_file *fd, i64 iOff, int nAmt, void **pp){ -#if SQLITE_MAX_MMAP_SIZE>0 - unixFile *pFd = (unixFile *)fd; /* The underlying database file */ -#endif - *pp = 0; - -#if SQLITE_MAX_MMAP_SIZE>0 - if( pFd->mmapSizeMax>0 ){ - if( pFd->pMapRegion==0 ){ - int rc = unixMapfile(pFd, -1); - if( rc!=SQLITE_OK ) return rc; - } - if( pFd->mmapSize >= iOff+nAmt ){ - *pp = &((u8 *)pFd->pMapRegion)[iOff]; - pFd->nFetchOut++; - } - } -#endif - return SQLITE_OK; -} - -/* -** If the third argument is non-NULL, then this function releases a -** reference obtained by an earlier call to unixFetch(). The second -** argument passed to this function must be the same as the corresponding -** argument that was passed to the unixFetch() invocation. -** -** Or, if the third argument is NULL, then this function is being called -** to inform the VFS layer that, according to POSIX, any existing mapping -** may now be invalid and should be unmapped. -*/ -static int unixUnfetch(sqlite3_file *fd, i64 iOff, void *p){ -#if SQLITE_MAX_MMAP_SIZE>0 - unixFile *pFd = (unixFile *)fd; /* The underlying database file */ - UNUSED_PARAMETER(iOff); - - /* If p==0 (unmap the entire file) then there must be no outstanding - ** xFetch references. Or, if p!=0 (meaning it is an xFetch reference), - ** then there must be at least one outstanding. */ - assert( (p==0)==(pFd->nFetchOut==0) ); - - /* If p!=0, it must match the iOff value. */ - assert( p==0 || p==&((u8 *)pFd->pMapRegion)[iOff] ); - - if( p ){ - pFd->nFetchOut--; - }else{ - unixUnmapfile(pFd); - } - - assert( pFd->nFetchOut>=0 ); -#else - UNUSED_PARAMETER(fd); - UNUSED_PARAMETER(p); - UNUSED_PARAMETER(iOff); -#endif - return SQLITE_OK; -} - -/* -** Here ends the implementation of all sqlite3_file methods. -** -********************** End sqlite3_file Methods ******************************* -******************************************************************************/ - -/* -** This division contains definitions of sqlite3_io_methods objects that -** implement various file locking strategies. It also contains definitions -** of "finder" functions. A finder-function is used to locate the appropriate -** sqlite3_io_methods object for a particular database file. The pAppData -** field of the sqlite3_vfs VFS objects are initialized to be pointers to -** the correct finder-function for that VFS. -** -** Most finder functions return a pointer to a fixed sqlite3_io_methods -** object. The only interesting finder-function is autolockIoFinder, which -** looks at the filesystem type and tries to guess the best locking -** strategy from that. -** -** For finder-funtion F, two objects are created: -** -** (1) The real finder-function named "FImpt()". -** -** (2) A constant pointer to this function named just "F". -** -** -** A pointer to the F pointer is used as the pAppData value for VFS -** objects. We have to do this instead of letting pAppData point -** directly at the finder-function since C90 rules prevent a void* -** from be cast into a function pointer. -** -** -** Each instance of this macro generates two objects: -** -** * A constant sqlite3_io_methods object call METHOD that has locking -** methods CLOSE, LOCK, UNLOCK, CKRESLOCK. -** -** * An I/O method finder function called FINDER that returns a pointer -** to the METHOD object in the previous bullet. -*/ -#define IOMETHODS(FINDER, METHOD, VERSION, CLOSE, LOCK, UNLOCK, CKLOCK) \ -static const sqlite3_io_methods METHOD = { \ - VERSION, /* iVersion */ \ - CLOSE, /* xClose */ \ - unixRead, /* xRead */ \ - unixWrite, /* xWrite */ \ - unixTruncate, /* xTruncate */ \ - unixSync, /* xSync */ \ - unixFileSize, /* xFileSize */ \ - LOCK, /* xLock */ \ - UNLOCK, /* xUnlock */ \ - CKLOCK, /* xCheckReservedLock */ \ - unixFileControl, /* xFileControl */ \ - unixSectorSize, /* xSectorSize */ \ - unixDeviceCharacteristics, /* xDeviceCapabilities */ \ - unixShmMap, /* xShmMap */ \ - unixShmLock, /* xShmLock */ \ - unixShmBarrier, /* xShmBarrier */ \ - unixShmUnmap, /* xShmUnmap */ \ - unixFetch, /* xFetch */ \ - unixUnfetch, /* xUnfetch */ \ -}; \ -static const sqlite3_io_methods *FINDER##Impl(const char *z, unixFile *p){ \ - UNUSED_PARAMETER(z); UNUSED_PARAMETER(p); \ - return &METHOD; \ -} \ -static const sqlite3_io_methods *(*const FINDER)(const char*,unixFile *p) \ - = FINDER##Impl; - -/* -** Here are all of the sqlite3_io_methods objects for each of the -** locking strategies. Functions that return pointers to these methods -** are also created. -*/ -IOMETHODS( - posixIoFinder, /* Finder function name */ - posixIoMethods, /* sqlite3_io_methods object name */ - 3, /* shared memory and mmap are enabled */ - unixClose, /* xClose method */ - unixLock, /* xLock method */ - unixUnlock, /* xUnlock method */ - unixCheckReservedLock /* xCheckReservedLock method */ -) -IOMETHODS( - nolockIoFinder, /* Finder function name */ - nolockIoMethods, /* sqlite3_io_methods object name */ - 1, /* shared memory is disabled */ - nolockClose, /* xClose method */ - nolockLock, /* xLock method */ - nolockUnlock, /* xUnlock method */ - nolockCheckReservedLock /* xCheckReservedLock method */ -) -IOMETHODS( - dotlockIoFinder, /* Finder function name */ - dotlockIoMethods, /* sqlite3_io_methods object name */ - 1, /* shared memory is disabled */ - dotlockClose, /* xClose method */ - dotlockLock, /* xLock method */ - dotlockUnlock, /* xUnlock method */ - dotlockCheckReservedLock /* xCheckReservedLock method */ -) - -#if SQLITE_ENABLE_LOCKING_STYLE && !OS_VXWORKS -IOMETHODS( - flockIoFinder, /* Finder function name */ - flockIoMethods, /* sqlite3_io_methods object name */ - 1, /* shared memory is disabled */ - flockClose, /* xClose method */ - flockLock, /* xLock method */ - flockUnlock, /* xUnlock method */ - flockCheckReservedLock /* xCheckReservedLock method */ -) -#endif - -#if OS_VXWORKS -IOMETHODS( - semIoFinder, /* Finder function name */ - semIoMethods, /* sqlite3_io_methods object name */ - 1, /* shared memory is disabled */ - semClose, /* xClose method */ - semLock, /* xLock method */ - semUnlock, /* xUnlock method */ - semCheckReservedLock /* xCheckReservedLock method */ -) -#endif - -#if defined(__APPLE__) && SQLITE_ENABLE_LOCKING_STYLE -IOMETHODS( - afpIoFinder, /* Finder function name */ - afpIoMethods, /* sqlite3_io_methods object name */ - 1, /* shared memory is disabled */ - afpClose, /* xClose method */ - afpLock, /* xLock method */ - afpUnlock, /* xUnlock method */ - afpCheckReservedLock /* xCheckReservedLock method */ -) -#endif - -/* -** The proxy locking method is a "super-method" in the sense that it -** opens secondary file descriptors for the conch and lock files and -** it uses proxy, dot-file, AFP, and flock() locking methods on those -** secondary files. For this reason, the division that implements -** proxy locking is located much further down in the file. But we need -** to go ahead and define the sqlite3_io_methods and finder function -** for proxy locking here. So we forward declare the I/O methods. -*/ -#if defined(__APPLE__) && SQLITE_ENABLE_LOCKING_STYLE -static int proxyClose(sqlite3_file*); -static int proxyLock(sqlite3_file*, int); -static int proxyUnlock(sqlite3_file*, int); -static int proxyCheckReservedLock(sqlite3_file*, int*); -IOMETHODS( - proxyIoFinder, /* Finder function name */ - proxyIoMethods, /* sqlite3_io_methods object name */ - 1, /* shared memory is disabled */ - proxyClose, /* xClose method */ - proxyLock, /* xLock method */ - proxyUnlock, /* xUnlock method */ - proxyCheckReservedLock /* xCheckReservedLock method */ -) -#endif - -/* nfs lockd on OSX 10.3+ doesn't clear write locks when a read lock is set */ -#if defined(__APPLE__) && SQLITE_ENABLE_LOCKING_STYLE -IOMETHODS( - nfsIoFinder, /* Finder function name */ - nfsIoMethods, /* sqlite3_io_methods object name */ - 1, /* shared memory is disabled */ - unixClose, /* xClose method */ - unixLock, /* xLock method */ - nfsUnlock, /* xUnlock method */ - unixCheckReservedLock /* xCheckReservedLock method */ -) -#endif - -#if defined(__APPLE__) && SQLITE_ENABLE_LOCKING_STYLE -/* -** This "finder" function attempts to determine the best locking strategy -** for the database file "filePath". It then returns the sqlite3_io_methods -** object that implements that strategy. -** -** This is for MacOSX only. -*/ -static const sqlite3_io_methods *autolockIoFinderImpl( - const char *filePath, /* name of the database file */ - unixFile *pNew /* open file object for the database file */ -){ - static const struct Mapping { - const char *zFilesystem; /* Filesystem type name */ - const sqlite3_io_methods *pMethods; /* Appropriate locking method */ - } aMap[] = { - { "hfs", &posixIoMethods }, - { "ufs", &posixIoMethods }, - { "afpfs", &afpIoMethods }, - { "smbfs", &afpIoMethods }, - { "webdav", &nolockIoMethods }, - { 0, 0 } - }; - int i; - struct statfs fsInfo; - struct flock lockInfo; - - if( !filePath ){ - /* If filePath==NULL that means we are dealing with a transient file - ** that does not need to be locked. */ - return &nolockIoMethods; - } - if( statfs(filePath, &fsInfo) != -1 ){ - if( fsInfo.f_flags & MNT_RDONLY ){ - return &nolockIoMethods; - } - for(i=0; aMap[i].zFilesystem; i++){ - if( strcmp(fsInfo.f_fstypename, aMap[i].zFilesystem)==0 ){ - return aMap[i].pMethods; - } - } - } - - /* Default case. Handles, amongst others, "nfs". - ** Test byte-range lock using fcntl(). If the call succeeds, - ** assume that the file-system supports POSIX style locks. - */ - lockInfo.l_len = 1; - lockInfo.l_start = 0; - lockInfo.l_whence = SEEK_SET; - lockInfo.l_type = F_RDLCK; - if( osFcntl(pNew->h, F_GETLK, &lockInfo)!=-1 ) { - if( strcmp(fsInfo.f_fstypename, "nfs")==0 ){ - return &nfsIoMethods; - } else { - return &posixIoMethods; - } - }else{ - return &dotlockIoMethods; - } -} -static const sqlite3_io_methods - *(*const autolockIoFinder)(const char*,unixFile*) = autolockIoFinderImpl; - -#endif /* defined(__APPLE__) && SQLITE_ENABLE_LOCKING_STYLE */ - -#if OS_VXWORKS && SQLITE_ENABLE_LOCKING_STYLE -/* -** This "finder" function attempts to determine the best locking strategy -** for the database file "filePath". It then returns the sqlite3_io_methods -** object that implements that strategy. -** -** This is for VXWorks only. -*/ -static const sqlite3_io_methods *autolockIoFinderImpl( - const char *filePath, /* name of the database file */ - unixFile *pNew /* the open file object */ -){ - struct flock lockInfo; - - if( !filePath ){ - /* If filePath==NULL that means we are dealing with a transient file - ** that does not need to be locked. */ - return &nolockIoMethods; - } - - /* Test if fcntl() is supported and use POSIX style locks. - ** Otherwise fall back to the named semaphore method. - */ - lockInfo.l_len = 1; - lockInfo.l_start = 0; - lockInfo.l_whence = SEEK_SET; - lockInfo.l_type = F_RDLCK; - if( osFcntl(pNew->h, F_GETLK, &lockInfo)!=-1 ) { - return &posixIoMethods; - }else{ - return &semIoMethods; - } -} -static const sqlite3_io_methods - *(*const autolockIoFinder)(const char*,unixFile*) = autolockIoFinderImpl; - -#endif /* OS_VXWORKS && SQLITE_ENABLE_LOCKING_STYLE */ - -/* -** An abstract type for a pointer to a IO method finder function: -*/ -typedef const sqlite3_io_methods *(*finder_type)(const char*,unixFile*); - - -/**************************************************************************** -**************************** sqlite3_vfs methods **************************** -** -** This division contains the implementation of methods on the -** sqlite3_vfs object. -*/ - -/* -** Initialize the contents of the unixFile structure pointed to by pId. -*/ -static int fillInUnixFile( - sqlite3_vfs *pVfs, /* Pointer to vfs object */ - int h, /* Open file descriptor of file being opened */ - sqlite3_file *pId, /* Write to the unixFile structure here */ - const char *zFilename, /* Name of the file being opened */ - int ctrlFlags /* Zero or more UNIXFILE_* values */ -){ - const sqlite3_io_methods *pLockingStyle; - unixFile *pNew = (unixFile *)pId; - int rc = SQLITE_OK; - - assert( pNew->pInode==NULL ); - - /* Usually the path zFilename should not be a relative pathname. The - ** exception is when opening the proxy "conch" file in builds that - ** include the special Apple locking styles. - */ -#if defined(__APPLE__) && SQLITE_ENABLE_LOCKING_STYLE - assert( zFilename==0 || zFilename[0]=='/' - || pVfs->pAppData==(void*)&autolockIoFinder ); -#else - assert( zFilename==0 || zFilename[0]=='/' ); -#endif - - /* No locking occurs in temporary files */ - assert( zFilename!=0 || (ctrlFlags & UNIXFILE_NOLOCK)!=0 ); - - OSTRACE(("OPEN %-3d %s\n", h, zFilename)); - pNew->h = h; - pNew->pVfs = pVfs; - pNew->zPath = zFilename; - pNew->ctrlFlags = (u8)ctrlFlags; -#if SQLITE_MAX_MMAP_SIZE>0 - pNew->mmapSizeMax = sqlite3GlobalConfig.szMmap; -#endif - if( sqlite3_uri_boolean(((ctrlFlags & UNIXFILE_URI) ? zFilename : 0), - "psow", SQLITE_POWERSAFE_OVERWRITE) ){ - pNew->ctrlFlags |= UNIXFILE_PSOW; - } - if( strcmp(pVfs->zName,"unix-excl")==0 ){ - pNew->ctrlFlags |= UNIXFILE_EXCL; - } - -#if OS_VXWORKS - pNew->pId = vxworksFindFileId(zFilename); - if( pNew->pId==0 ){ - ctrlFlags |= UNIXFILE_NOLOCK; - rc = SQLITE_NOMEM; - } -#endif - - if( ctrlFlags & UNIXFILE_NOLOCK ){ - pLockingStyle = &nolockIoMethods; - }else{ - pLockingStyle = (**(finder_type*)pVfs->pAppData)(zFilename, pNew); -#if SQLITE_ENABLE_LOCKING_STYLE - /* Cache zFilename in the locking context (AFP and dotlock override) for - ** proxyLock activation is possible (remote proxy is based on db name) - ** zFilename remains valid until file is closed, to support */ - pNew->lockingContext = (void*)zFilename; -#endif - } - - if( pLockingStyle == &posixIoMethods -#if defined(__APPLE__) && SQLITE_ENABLE_LOCKING_STYLE - || pLockingStyle == &nfsIoMethods -#endif - ){ - unixEnterMutex(); - rc = findInodeInfo(pNew, &pNew->pInode); - if( rc!=SQLITE_OK ){ - /* If an error occurred in findInodeInfo(), close the file descriptor - ** immediately, before releasing the mutex. findInodeInfo() may fail - ** in two scenarios: - ** - ** (a) A call to fstat() failed. - ** (b) A malloc failed. - ** - ** Scenario (b) may only occur if the process is holding no other - ** file descriptors open on the same file. If there were other file - ** descriptors on this file, then no malloc would be required by - ** findInodeInfo(). If this is the case, it is quite safe to close - ** handle h - as it is guaranteed that no posix locks will be released - ** by doing so. - ** - ** If scenario (a) caused the error then things are not so safe. The - ** implicit assumption here is that if fstat() fails, things are in - ** such bad shape that dropping a lock or two doesn't matter much. - */ - robust_close(pNew, h, __LINE__); - h = -1; - } - unixLeaveMutex(); - } - -#if SQLITE_ENABLE_LOCKING_STYLE && defined(__APPLE__) - else if( pLockingStyle == &afpIoMethods ){ - /* AFP locking uses the file path so it needs to be included in - ** the afpLockingContext. - */ - afpLockingContext *pCtx; - pNew->lockingContext = pCtx = sqlite3_malloc( sizeof(*pCtx) ); - if( pCtx==0 ){ - rc = SQLITE_NOMEM; - }else{ - /* NB: zFilename exists and remains valid until the file is closed - ** according to requirement F11141. So we do not need to make a - ** copy of the filename. */ - pCtx->dbPath = zFilename; - pCtx->reserved = 0; - srandomdev(); - unixEnterMutex(); - rc = findInodeInfo(pNew, &pNew->pInode); - if( rc!=SQLITE_OK ){ - sqlite3_free(pNew->lockingContext); - robust_close(pNew, h, __LINE__); - h = -1; - } - unixLeaveMutex(); - } - } -#endif - - else if( pLockingStyle == &dotlockIoMethods ){ - /* Dotfile locking uses the file path so it needs to be included in - ** the dotlockLockingContext - */ - char *zLockFile; - int nFilename; - assert( zFilename!=0 ); - nFilename = (int)strlen(zFilename) + 6; - zLockFile = (char *)sqlite3_malloc(nFilename); - if( zLockFile==0 ){ - rc = SQLITE_NOMEM; - }else{ - sqlite3_snprintf(nFilename, zLockFile, "%s" DOTLOCK_SUFFIX, zFilename); - } - pNew->lockingContext = zLockFile; - } - -#if OS_VXWORKS - else if( pLockingStyle == &semIoMethods ){ - /* Named semaphore locking uses the file path so it needs to be - ** included in the semLockingContext - */ - unixEnterMutex(); - rc = findInodeInfo(pNew, &pNew->pInode); - if( (rc==SQLITE_OK) && (pNew->pInode->pSem==NULL) ){ - char *zSemName = pNew->pInode->aSemName; - int n; - sqlite3_snprintf(MAX_PATHNAME, zSemName, "/%s.sem", - pNew->pId->zCanonicalName); - for( n=1; zSemName[n]; n++ ) - if( zSemName[n]=='/' ) zSemName[n] = '_'; - pNew->pInode->pSem = sem_open(zSemName, O_CREAT, 0666, 1); - if( pNew->pInode->pSem == SEM_FAILED ){ - rc = SQLITE_NOMEM; - pNew->pInode->aSemName[0] = '\0'; - } - } - unixLeaveMutex(); - } -#endif - - pNew->lastErrno = 0; -#if OS_VXWORKS - if( rc!=SQLITE_OK ){ - if( h>=0 ) robust_close(pNew, h, __LINE__); - h = -1; - osUnlink(zFilename); - pNew->ctrlFlags |= UNIXFILE_DELETE; - } -#endif - if( rc!=SQLITE_OK ){ - if( h>=0 ) robust_close(pNew, h, __LINE__); - }else{ - pNew->pMethod = pLockingStyle; - OpenCounter(+1); - verifyDbFile(pNew); - } - return rc; -} - -/* -** Return the name of a directory in which to put temporary files. -** If no suitable temporary file directory can be found, return NULL. -*/ -static const char *unixTempFileDir(void){ - static const char *azDirs[] = { - 0, - 0, - 0, - "/var/tmp", - "/usr/tmp", - "/tmp", - 0 /* List terminator */ - }; - unsigned int i; - struct stat buf; - const char *zDir = 0; - - azDirs[0] = sqlite3_temp_directory; - if( !azDirs[1] ) azDirs[1] = getenv("SQLITE_TMPDIR"); - if( !azDirs[2] ) azDirs[2] = getenv("TMPDIR"); - for(i=0; imxPathname bytes. -*/ -static int unixGetTempname(int nBuf, char *zBuf){ - static const unsigned char zChars[] = - "abcdefghijklmnopqrstuvwxyz" - "ABCDEFGHIJKLMNOPQRSTUVWXYZ" - "0123456789"; - unsigned int i, j; - const char *zDir; - - /* It's odd to simulate an io-error here, but really this is just - ** using the io-error infrastructure to test that SQLite handles this - ** function failing. - */ - SimulateIOError( return SQLITE_IOERR ); - - zDir = unixTempFileDir(); - if( zDir==0 ) zDir = "."; - - /* Check that the output buffer is large enough for the temporary file - ** name. If it is not, return SQLITE_ERROR. - */ - if( (strlen(zDir) + strlen(SQLITE_TEMP_FILE_PREFIX) + 18) >= (size_t)nBuf ){ - return SQLITE_ERROR; - } - - do{ - sqlite3_snprintf(nBuf-18, zBuf, "%s/"SQLITE_TEMP_FILE_PREFIX, zDir); - j = (int)strlen(zBuf); - sqlite3_randomness(15, &zBuf[j]); - for(i=0; i<15; i++, j++){ - zBuf[j] = (char)zChars[ ((unsigned char)zBuf[j])%(sizeof(zChars)-1) ]; - } - zBuf[j] = 0; - zBuf[j+1] = 0; - }while( osAccess(zBuf,0)==0 ); - return SQLITE_OK; -} - -#if SQLITE_ENABLE_LOCKING_STYLE && defined(__APPLE__) -/* -** Routine to transform a unixFile into a proxy-locking unixFile. -** Implementation in the proxy-lock division, but used by unixOpen() -** if SQLITE_PREFER_PROXY_LOCKING is defined. -*/ -static int proxyTransformUnixFile(unixFile*, const char*); -#endif - -/* -** Search for an unused file descriptor that was opened on the database -** file (not a journal or master-journal file) identified by pathname -** zPath with SQLITE_OPEN_XXX flags matching those passed as the second -** argument to this function. -** -** Such a file descriptor may exist if a database connection was closed -** but the associated file descriptor could not be closed because some -** other file descriptor open on the same file is holding a file-lock. -** Refer to comments in the unixClose() function and the lengthy comment -** describing "Posix Advisory Locking" at the start of this file for -** further details. Also, ticket #4018. -** -** If a suitable file descriptor is found, then it is returned. If no -** such file descriptor is located, -1 is returned. -*/ -static UnixUnusedFd *findReusableFd(const char *zPath, int flags){ - UnixUnusedFd *pUnused = 0; - - /* Do not search for an unused file descriptor on vxworks. Not because - ** vxworks would not benefit from the change (it might, we're not sure), - ** but because no way to test it is currently available. It is better - ** not to risk breaking vxworks support for the sake of such an obscure - ** feature. */ -#if !OS_VXWORKS - struct stat sStat; /* Results of stat() call */ - - /* A stat() call may fail for various reasons. If this happens, it is - ** almost certain that an open() call on the same path will also fail. - ** For this reason, if an error occurs in the stat() call here, it is - ** ignored and -1 is returned. The caller will try to open a new file - ** descriptor on the same path, fail, and return an error to SQLite. - ** - ** Even if a subsequent open() call does succeed, the consequences of - ** not searching for a resusable file descriptor are not dire. */ - if( 0==osStat(zPath, &sStat) ){ - unixInodeInfo *pInode; - - unixEnterMutex(); - pInode = inodeList; - while( pInode && (pInode->fileId.dev!=sStat.st_dev - || pInode->fileId.ino!=sStat.st_ino) ){ - pInode = pInode->pNext; - } - if( pInode ){ - UnixUnusedFd **pp; - for(pp=&pInode->pUnused; *pp && (*pp)->flags!=flags; pp=&((*pp)->pNext)); - pUnused = *pp; - if( pUnused ){ - *pp = pUnused->pNext; - } - } - unixLeaveMutex(); - } -#endif /* if !OS_VXWORKS */ - return pUnused; -} - -/* -** This function is called by unixOpen() to determine the unix permissions -** to create new files with. If no error occurs, then SQLITE_OK is returned -** and a value suitable for passing as the third argument to open(2) is -** written to *pMode. If an IO error occurs, an SQLite error code is -** returned and the value of *pMode is not modified. -** -** In most cases cases, this routine sets *pMode to 0, which will become -** an indication to robust_open() to create the file using -** SQLITE_DEFAULT_FILE_PERMISSIONS adjusted by the umask. -** But if the file being opened is a WAL or regular journal file, then -** this function queries the file-system for the permissions on the -** corresponding database file and sets *pMode to this value. Whenever -** possible, WAL and journal files are created using the same permissions -** as the associated database file. -** -** If the SQLITE_ENABLE_8_3_NAMES option is enabled, then the -** original filename is unavailable. But 8_3_NAMES is only used for -** FAT filesystems and permissions do not matter there, so just use -** the default permissions. -*/ -static int findCreateFileMode( - const char *zPath, /* Path of file (possibly) being created */ - int flags, /* Flags passed as 4th argument to xOpen() */ - mode_t *pMode, /* OUT: Permissions to open file with */ - uid_t *pUid, /* OUT: uid to set on the file */ - gid_t *pGid /* OUT: gid to set on the file */ -){ - int rc = SQLITE_OK; /* Return Code */ - *pMode = 0; - *pUid = 0; - *pGid = 0; - if( flags & (SQLITE_OPEN_WAL|SQLITE_OPEN_MAIN_JOURNAL) ){ - char zDb[MAX_PATHNAME+1]; /* Database file path */ - int nDb; /* Number of valid bytes in zDb */ - struct stat sStat; /* Output of stat() on database file */ - - /* zPath is a path to a WAL or journal file. The following block derives - ** the path to the associated database file from zPath. This block handles - ** the following naming conventions: - ** - ** "-journal" - ** "-wal" - ** "-journalNN" - ** "-walNN" - ** - ** where NN is a decimal number. The NN naming schemes are - ** used by the test_multiplex.c module. - */ - nDb = sqlite3Strlen30(zPath) - 1; -#ifdef SQLITE_ENABLE_8_3_NAMES - while( nDb>0 && sqlite3Isalnum(zPath[nDb]) ) nDb--; - if( nDb==0 || zPath[nDb]!='-' ) return SQLITE_OK; -#else - while( zPath[nDb]!='-' ){ - assert( nDb>0 ); - assert( zPath[nDb]!='\n' ); - nDb--; - } -#endif - memcpy(zDb, zPath, nDb); - zDb[nDb] = '\0'; - - if( 0==osStat(zDb, &sStat) ){ - *pMode = sStat.st_mode & 0777; - *pUid = sStat.st_uid; - *pGid = sStat.st_gid; - }else{ - rc = SQLITE_IOERR_FSTAT; - } - }else if( flags & SQLITE_OPEN_DELETEONCLOSE ){ - *pMode = 0600; - } - return rc; -} - -/* -** Open the file zPath. -** -** Previously, the SQLite OS layer used three functions in place of this -** one: -** -** sqlite3OsOpenReadWrite(); -** sqlite3OsOpenReadOnly(); -** sqlite3OsOpenExclusive(); -** -** These calls correspond to the following combinations of flags: -** -** ReadWrite() -> (READWRITE | CREATE) -** ReadOnly() -> (READONLY) -** OpenExclusive() -> (READWRITE | CREATE | EXCLUSIVE) -** -** The old OpenExclusive() accepted a boolean argument - "delFlag". If -** true, the file was configured to be automatically deleted when the -** file handle closed. To achieve the same effect using this new -** interface, add the DELETEONCLOSE flag to those specified above for -** OpenExclusive(). -*/ -static int unixOpen( - sqlite3_vfs *pVfs, /* The VFS for which this is the xOpen method */ - const char *zPath, /* Pathname of file to be opened */ - sqlite3_file *pFile, /* The file descriptor to be filled in */ - int flags, /* Input flags to control the opening */ - int *pOutFlags /* Output flags returned to SQLite core */ -){ - unixFile *p = (unixFile *)pFile; - int fd = -1; /* File descriptor returned by open() */ - int openFlags = 0; /* Flags to pass to open() */ - int eType = flags&0xFFFFFF00; /* Type of file to open */ - int noLock; /* True to omit locking primitives */ - int rc = SQLITE_OK; /* Function Return Code */ - int ctrlFlags = 0; /* UNIXFILE_* flags */ - - int isExclusive = (flags & SQLITE_OPEN_EXCLUSIVE); - int isDelete = (flags & SQLITE_OPEN_DELETEONCLOSE); - int isCreate = (flags & SQLITE_OPEN_CREATE); - int isReadonly = (flags & SQLITE_OPEN_READONLY); - int isReadWrite = (flags & SQLITE_OPEN_READWRITE); -#if SQLITE_ENABLE_LOCKING_STYLE - int isAutoProxy = (flags & SQLITE_OPEN_AUTOPROXY); -#endif -#if defined(__APPLE__) || SQLITE_ENABLE_LOCKING_STYLE - struct statfs fsInfo; -#endif - - /* If creating a master or main-file journal, this function will open - ** a file-descriptor on the directory too. The first time unixSync() - ** is called the directory file descriptor will be fsync()ed and close()d. - */ - int syncDir = (isCreate && ( - eType==SQLITE_OPEN_MASTER_JOURNAL - || eType==SQLITE_OPEN_MAIN_JOURNAL - || eType==SQLITE_OPEN_WAL - )); - - /* If argument zPath is a NULL pointer, this function is required to open - ** a temporary file. Use this buffer to store the file name in. - */ - char zTmpname[MAX_PATHNAME+2]; - const char *zName = zPath; - - /* Check the following statements are true: - ** - ** (a) Exactly one of the READWRITE and READONLY flags must be set, and - ** (b) if CREATE is set, then READWRITE must also be set, and - ** (c) if EXCLUSIVE is set, then CREATE must also be set. - ** (d) if DELETEONCLOSE is set, then CREATE must also be set. - */ - assert((isReadonly==0 || isReadWrite==0) && (isReadWrite || isReadonly)); - assert(isCreate==0 || isReadWrite); - assert(isExclusive==0 || isCreate); - assert(isDelete==0 || isCreate); - - /* The main DB, main journal, WAL file and master journal are never - ** automatically deleted. Nor are they ever temporary files. */ - assert( (!isDelete && zName) || eType!=SQLITE_OPEN_MAIN_DB ); - assert( (!isDelete && zName) || eType!=SQLITE_OPEN_MAIN_JOURNAL ); - assert( (!isDelete && zName) || eType!=SQLITE_OPEN_MASTER_JOURNAL ); - assert( (!isDelete && zName) || eType!=SQLITE_OPEN_WAL ); - - /* Assert that the upper layer has set one of the "file-type" flags. */ - assert( eType==SQLITE_OPEN_MAIN_DB || eType==SQLITE_OPEN_TEMP_DB - || eType==SQLITE_OPEN_MAIN_JOURNAL || eType==SQLITE_OPEN_TEMP_JOURNAL - || eType==SQLITE_OPEN_SUBJOURNAL || eType==SQLITE_OPEN_MASTER_JOURNAL - || eType==SQLITE_OPEN_TRANSIENT_DB || eType==SQLITE_OPEN_WAL - ); - - /* Detect a pid change and reset the PRNG. There is a race condition - ** here such that two or more threads all trying to open databases at - ** the same instant might all reset the PRNG. But multiple resets - ** are harmless. - */ - if( randomnessPid!=getpid() ){ - randomnessPid = getpid(); - sqlite3_randomness(0,0); - } - - memset(p, 0, sizeof(unixFile)); - - if( eType==SQLITE_OPEN_MAIN_DB ){ - UnixUnusedFd *pUnused; - pUnused = findReusableFd(zName, flags); - if( pUnused ){ - fd = pUnused->fd; - }else{ - pUnused = sqlite3_malloc(sizeof(*pUnused)); - if( !pUnused ){ - return SQLITE_NOMEM; - } - } - p->pUnused = pUnused; - - /* Database filenames are double-zero terminated if they are not - ** URIs with parameters. Hence, they can always be passed into - ** sqlite3_uri_parameter(). */ - assert( (flags & SQLITE_OPEN_URI) || zName[strlen(zName)+1]==0 ); - - }else if( !zName ){ - /* If zName is NULL, the upper layer is requesting a temp file. */ - assert(isDelete && !syncDir); - rc = unixGetTempname(MAX_PATHNAME+2, zTmpname); - if( rc!=SQLITE_OK ){ - return rc; - } - zName = zTmpname; - - /* Generated temporary filenames are always double-zero terminated - ** for use by sqlite3_uri_parameter(). */ - assert( zName[strlen(zName)+1]==0 ); - } - - /* Determine the value of the flags parameter passed to POSIX function - ** open(). These must be calculated even if open() is not called, as - ** they may be stored as part of the file handle and used by the - ** 'conch file' locking functions later on. */ - if( isReadonly ) openFlags |= O_RDONLY; - if( isReadWrite ) openFlags |= O_RDWR; - if( isCreate ) openFlags |= O_CREAT; - if( isExclusive ) openFlags |= (O_EXCL|O_NOFOLLOW); - openFlags |= (O_LARGEFILE|O_BINARY); - - if( fd<0 ){ - mode_t openMode; /* Permissions to create file with */ - uid_t uid; /* Userid for the file */ - gid_t gid; /* Groupid for the file */ - rc = findCreateFileMode(zName, flags, &openMode, &uid, &gid); - if( rc!=SQLITE_OK ){ - assert( !p->pUnused ); - assert( eType==SQLITE_OPEN_WAL || eType==SQLITE_OPEN_MAIN_JOURNAL ); - return rc; - } - fd = robust_open(zName, openFlags, openMode); - OSTRACE(("OPENX %-3d %s 0%o\n", fd, zName, openFlags)); - if( fd<0 && errno!=EISDIR && isReadWrite && !isExclusive ){ - /* Failed to open the file for read/write access. Try read-only. */ - flags &= ~(SQLITE_OPEN_READWRITE|SQLITE_OPEN_CREATE); - openFlags &= ~(O_RDWR|O_CREAT); - flags |= SQLITE_OPEN_READONLY; - openFlags |= O_RDONLY; - isReadonly = 1; - fd = robust_open(zName, openFlags, openMode); - } - if( fd<0 ){ - rc = unixLogError(SQLITE_CANTOPEN_BKPT, "open", zName); - goto open_finished; - } - - /* If this process is running as root and if creating a new rollback - ** journal or WAL file, set the ownership of the journal or WAL to be - ** the same as the original database. - */ - if( flags & (SQLITE_OPEN_WAL|SQLITE_OPEN_MAIN_JOURNAL) ){ - osFchown(fd, uid, gid); - } - } - assert( fd>=0 ); - if( pOutFlags ){ - *pOutFlags = flags; - } - - if( p->pUnused ){ - p->pUnused->fd = fd; - p->pUnused->flags = flags; - } - - if( isDelete ){ -#if OS_VXWORKS - zPath = zName; -#else - osUnlink(zName); -#endif - } -#if SQLITE_ENABLE_LOCKING_STYLE - else{ - p->openFlags = openFlags; - } -#endif - - noLock = eType!=SQLITE_OPEN_MAIN_DB; - - -#if defined(__APPLE__) || SQLITE_ENABLE_LOCKING_STYLE - if( fstatfs(fd, &fsInfo) == -1 ){ - ((unixFile*)pFile)->lastErrno = errno; - robust_close(p, fd, __LINE__); - return SQLITE_IOERR_ACCESS; - } - if (0 == strncmp("msdos", fsInfo.f_fstypename, 5)) { - ((unixFile*)pFile)->fsFlags |= SQLITE_FSFLAGS_IS_MSDOS; - } -#endif - - /* Set up appropriate ctrlFlags */ - if( isDelete ) ctrlFlags |= UNIXFILE_DELETE; - if( isReadonly ) ctrlFlags |= UNIXFILE_RDONLY; - if( noLock ) ctrlFlags |= UNIXFILE_NOLOCK; - if( syncDir ) ctrlFlags |= UNIXFILE_DIRSYNC; - if( flags & SQLITE_OPEN_URI ) ctrlFlags |= UNIXFILE_URI; - -#if SQLITE_ENABLE_LOCKING_STYLE -#if SQLITE_PREFER_PROXY_LOCKING - isAutoProxy = 1; -#endif - if( isAutoProxy && (zPath!=NULL) && (!noLock) && pVfs->xOpen ){ - char *envforce = getenv("SQLITE_FORCE_PROXY_LOCKING"); - int useProxy = 0; - - /* SQLITE_FORCE_PROXY_LOCKING==1 means force always use proxy, 0 means - ** never use proxy, NULL means use proxy for non-local files only. */ - if( envforce!=NULL ){ - useProxy = atoi(envforce)>0; - }else{ - if( statfs(zPath, &fsInfo) == -1 ){ - /* In theory, the close(fd) call is sub-optimal. If the file opened - ** with fd is a database file, and there are other connections open - ** on that file that are currently holding advisory locks on it, - ** then the call to close() will cancel those locks. In practice, - ** we're assuming that statfs() doesn't fail very often. At least - ** not while other file descriptors opened by the same process on - ** the same file are working. */ - p->lastErrno = errno; - robust_close(p, fd, __LINE__); - rc = SQLITE_IOERR_ACCESS; - goto open_finished; - } - useProxy = !(fsInfo.f_flags&MNT_LOCAL); - } - if( useProxy ){ - rc = fillInUnixFile(pVfs, fd, pFile, zPath, ctrlFlags); - if( rc==SQLITE_OK ){ - rc = proxyTransformUnixFile((unixFile*)pFile, ":auto:"); - if( rc!=SQLITE_OK ){ - /* Use unixClose to clean up the resources added in fillInUnixFile - ** and clear all the structure's references. Specifically, - ** pFile->pMethods will be NULL so sqlite3OsClose will be a no-op - */ - unixClose(pFile); - return rc; - } - } - goto open_finished; - } - } -#endif - - rc = fillInUnixFile(pVfs, fd, pFile, zPath, ctrlFlags); - -open_finished: - if( rc!=SQLITE_OK ){ - sqlite3_free(p->pUnused); - } - return rc; -} - - -/* -** Delete the file at zPath. If the dirSync argument is true, fsync() -** the directory after deleting the file. -*/ -static int unixDelete( - sqlite3_vfs *NotUsed, /* VFS containing this as the xDelete method */ - const char *zPath, /* Name of file to be deleted */ - int dirSync /* If true, fsync() directory after deleting file */ -){ - int rc = SQLITE_OK; - UNUSED_PARAMETER(NotUsed); - SimulateIOError(return SQLITE_IOERR_DELETE); - if( osUnlink(zPath)==(-1) ){ - if( errno==ENOENT ){ - rc = SQLITE_IOERR_DELETE_NOENT; - }else{ - rc = unixLogError(SQLITE_IOERR_DELETE, "unlink", zPath); - } - return rc; - } -#ifndef SQLITE_DISABLE_DIRSYNC - if( (dirSync & 1)!=0 ){ - int fd; - rc = osOpenDirectory(zPath, &fd); - if( rc==SQLITE_OK ){ -#if OS_VXWORKS - if( fsync(fd)==-1 ) -#else - if( fsync(fd) ) -#endif - { - rc = unixLogError(SQLITE_IOERR_DIR_FSYNC, "fsync", zPath); - } - robust_close(0, fd, __LINE__); - }else if( rc==SQLITE_CANTOPEN ){ - rc = SQLITE_OK; - } - } -#endif - return rc; -} - -/* -** Test the existence of or access permissions of file zPath. The -** test performed depends on the value of flags: -** -** SQLITE_ACCESS_EXISTS: Return 1 if the file exists -** SQLITE_ACCESS_READWRITE: Return 1 if the file is read and writable. -** SQLITE_ACCESS_READONLY: Return 1 if the file is readable. -** -** Otherwise return 0. -*/ -static int unixAccess( - sqlite3_vfs *NotUsed, /* The VFS containing this xAccess method */ - const char *zPath, /* Path of the file to examine */ - int flags, /* What do we want to learn about the zPath file? */ - int *pResOut /* Write result boolean here */ -){ - int amode = 0; - UNUSED_PARAMETER(NotUsed); - SimulateIOError( return SQLITE_IOERR_ACCESS; ); - switch( flags ){ - case SQLITE_ACCESS_EXISTS: - amode = F_OK; - break; - case SQLITE_ACCESS_READWRITE: - amode = W_OK|R_OK; - break; - case SQLITE_ACCESS_READ: - amode = R_OK; - break; - - default: - assert(!"Invalid flags argument"); - } - *pResOut = (osAccess(zPath, amode)==0); - if( flags==SQLITE_ACCESS_EXISTS && *pResOut ){ - struct stat buf; - if( 0==osStat(zPath, &buf) && buf.st_size==0 ){ - *pResOut = 0; - } - } - return SQLITE_OK; -} - - -/* -** Turn a relative pathname into a full pathname. The relative path -** is stored as a nul-terminated string in the buffer pointed to by -** zPath. -** -** zOut points to a buffer of at least sqlite3_vfs.mxPathname bytes -** (in this case, MAX_PATHNAME bytes). The full-path is written to -** this buffer before returning. -*/ -static int unixFullPathname( - sqlite3_vfs *pVfs, /* Pointer to vfs object */ - const char *zPath, /* Possibly relative input path */ - int nOut, /* Size of output buffer in bytes */ - char *zOut /* Output buffer */ -){ - - /* It's odd to simulate an io-error here, but really this is just - ** using the io-error infrastructure to test that SQLite handles this - ** function failing. This function could fail if, for example, the - ** current working directory has been unlinked. - */ - SimulateIOError( return SQLITE_ERROR ); - - assert( pVfs->mxPathname==MAX_PATHNAME ); - UNUSED_PARAMETER(pVfs); - - zOut[nOut-1] = '\0'; - if( zPath[0]=='/' ){ - sqlite3_snprintf(nOut, zOut, "%s", zPath); - }else{ - int nCwd; - if( osGetcwd(zOut, nOut-1)==0 ){ - return unixLogError(SQLITE_CANTOPEN_BKPT, "getcwd", zPath); - } - nCwd = (int)strlen(zOut); - sqlite3_snprintf(nOut-nCwd, &zOut[nCwd], "/%s", zPath); - } - return SQLITE_OK; -} - - -#ifndef SQLITE_OMIT_LOAD_EXTENSION -/* -** Interfaces for opening a shared library, finding entry points -** within the shared library, and closing the shared library. -*/ -#include -static void *unixDlOpen(sqlite3_vfs *NotUsed, const char *zFilename){ - UNUSED_PARAMETER(NotUsed); - return dlopen(zFilename, RTLD_NOW | RTLD_GLOBAL); -} - -/* -** SQLite calls this function immediately after a call to unixDlSym() or -** unixDlOpen() fails (returns a null pointer). If a more detailed error -** message is available, it is written to zBufOut. If no error message -** is available, zBufOut is left unmodified and SQLite uses a default -** error message. -*/ -static void unixDlError(sqlite3_vfs *NotUsed, int nBuf, char *zBufOut){ - const char *zErr; - UNUSED_PARAMETER(NotUsed); - unixEnterMutex(); - zErr = dlerror(); - if( zErr ){ - sqlite3_snprintf(nBuf, zBufOut, "%s", zErr); - } - unixLeaveMutex(); -} -static void (*unixDlSym(sqlite3_vfs *NotUsed, void *p, const char*zSym))(void){ - /* - ** GCC with -pedantic-errors says that C90 does not allow a void* to be - ** cast into a pointer to a function. And yet the library dlsym() routine - ** returns a void* which is really a pointer to a function. So how do we - ** use dlsym() with -pedantic-errors? - ** - ** Variable x below is defined to be a pointer to a function taking - ** parameters void* and const char* and returning a pointer to a function. - ** We initialize x by assigning it a pointer to the dlsym() function. - ** (That assignment requires a cast.) Then we call the function that - ** x points to. - ** - ** This work-around is unlikely to work correctly on any system where - ** you really cannot cast a function pointer into void*. But then, on the - ** other hand, dlsym() will not work on such a system either, so we have - ** not really lost anything. - */ - void (*(*x)(void*,const char*))(void); - UNUSED_PARAMETER(NotUsed); - x = (void(*(*)(void*,const char*))(void))dlsym; - return (*x)(p, zSym); -} -static void unixDlClose(sqlite3_vfs *NotUsed, void *pHandle){ - UNUSED_PARAMETER(NotUsed); - dlclose(pHandle); -} -#else /* if SQLITE_OMIT_LOAD_EXTENSION is defined: */ - #define unixDlOpen 0 - #define unixDlError 0 - #define unixDlSym 0 - #define unixDlClose 0 -#endif - -/* -** Write nBuf bytes of random data to the supplied buffer zBuf. -*/ -static int unixRandomness(sqlite3_vfs *NotUsed, int nBuf, char *zBuf){ - UNUSED_PARAMETER(NotUsed); - assert((size_t)nBuf>=(sizeof(time_t)+sizeof(int))); - - /* We have to initialize zBuf to prevent valgrind from reporting - ** errors. The reports issued by valgrind are incorrect - we would - ** prefer that the randomness be increased by making use of the - ** uninitialized space in zBuf - but valgrind errors tend to worry - ** some users. Rather than argue, it seems easier just to initialize - ** the whole array and silence valgrind, even if that means less randomness - ** in the random seed. - ** - ** When testing, initializing zBuf[] to zero is all we do. That means - ** that we always use the same random number sequence. This makes the - ** tests repeatable. - */ - memset(zBuf, 0, nBuf); - randomnessPid = getpid(); -#if !defined(SQLITE_TEST) - { - int fd, got; - fd = robust_open("/dev/urandom", O_RDONLY, 0); - if( fd<0 ){ - time_t t; - time(&t); - memcpy(zBuf, &t, sizeof(t)); - memcpy(&zBuf[sizeof(t)], &randomnessPid, sizeof(randomnessPid)); - assert( sizeof(t)+sizeof(randomnessPid)<=(size_t)nBuf ); - nBuf = sizeof(t) + sizeof(randomnessPid); - }else{ - do{ got = osRead(fd, zBuf, nBuf); }while( got<0 && errno==EINTR ); - robust_close(0, fd, __LINE__); - } - } -#endif - return nBuf; -} - - -/* -** Sleep for a little while. Return the amount of time slept. -** The argument is the number of microseconds we want to sleep. -** The return value is the number of microseconds of sleep actually -** requested from the underlying operating system, a number which -** might be greater than or equal to the argument, but not less -** than the argument. -*/ -static int unixSleep(sqlite3_vfs *NotUsed, int microseconds){ -#if OS_VXWORKS - struct timespec sp; - - sp.tv_sec = microseconds / 1000000; - sp.tv_nsec = (microseconds % 1000000) * 1000; - nanosleep(&sp, NULL); - UNUSED_PARAMETER(NotUsed); - return microseconds; -#elif defined(HAVE_USLEEP) && HAVE_USLEEP - usleep(microseconds); - UNUSED_PARAMETER(NotUsed); - return microseconds; -#else - int seconds = (microseconds+999999)/1000000; - sleep(seconds); - UNUSED_PARAMETER(NotUsed); - return seconds*1000000; -#endif -} - -/* -** The following variable, if set to a non-zero value, is interpreted as -** the number of seconds since 1970 and is used to set the result of -** sqlite3OsCurrentTime() during testing. -*/ -#ifdef SQLITE_TEST -SQLITE_API int sqlite3_current_time = 0; /* Fake system time in seconds since 1970. */ -#endif - -/* -** Find the current time (in Universal Coordinated Time). Write into *piNow -** the current time and date as a Julian Day number times 86_400_000. In -** other words, write into *piNow the number of milliseconds since the Julian -** epoch of noon in Greenwich on November 24, 4714 B.C according to the -** proleptic Gregorian calendar. -** -** On success, return SQLITE_OK. Return SQLITE_ERROR if the time and date -** cannot be found. -*/ -static int unixCurrentTimeInt64(sqlite3_vfs *NotUsed, sqlite3_int64 *piNow){ - static const sqlite3_int64 unixEpoch = 24405875*(sqlite3_int64)8640000; - int rc = SQLITE_OK; -#if defined(NO_GETTOD) - time_t t; - time(&t); - *piNow = ((sqlite3_int64)t)*1000 + unixEpoch; -#elif OS_VXWORKS - struct timespec sNow; - clock_gettime(CLOCK_REALTIME, &sNow); - *piNow = unixEpoch + 1000*(sqlite3_int64)sNow.tv_sec + sNow.tv_nsec/1000000; -#else - struct timeval sNow; - if( gettimeofday(&sNow, 0)==0 ){ - *piNow = unixEpoch + 1000*(sqlite3_int64)sNow.tv_sec + sNow.tv_usec/1000; - }else{ - rc = SQLITE_ERROR; - } -#endif - -#ifdef SQLITE_TEST - if( sqlite3_current_time ){ - *piNow = 1000*(sqlite3_int64)sqlite3_current_time + unixEpoch; - } -#endif - UNUSED_PARAMETER(NotUsed); - return rc; -} - -/* -** Find the current time (in Universal Coordinated Time). Write the -** current time and date as a Julian Day number into *prNow and -** return 0. Return 1 if the time and date cannot be found. -*/ -static int unixCurrentTime(sqlite3_vfs *NotUsed, double *prNow){ - sqlite3_int64 i = 0; - int rc; - UNUSED_PARAMETER(NotUsed); - rc = unixCurrentTimeInt64(0, &i); - *prNow = i/86400000.0; - return rc; -} - -/* -** We added the xGetLastError() method with the intention of providing -** better low-level error messages when operating-system problems come up -** during SQLite operation. But so far, none of that has been implemented -** in the core. So this routine is never called. For now, it is merely -** a place-holder. -*/ -static int unixGetLastError(sqlite3_vfs *NotUsed, int NotUsed2, char *NotUsed3){ - UNUSED_PARAMETER(NotUsed); - UNUSED_PARAMETER(NotUsed2); - UNUSED_PARAMETER(NotUsed3); - return 0; -} - - -/* -************************ End of sqlite3_vfs methods *************************** -******************************************************************************/ - -/****************************************************************************** -************************** Begin Proxy Locking ******************************** -** -** Proxy locking is a "uber-locking-method" in this sense: It uses the -** other locking methods on secondary lock files. Proxy locking is a -** meta-layer over top of the primitive locking implemented above. For -** this reason, the division that implements of proxy locking is deferred -** until late in the file (here) after all of the other I/O methods have -** been defined - so that the primitive locking methods are available -** as services to help with the implementation of proxy locking. -** -**** -** -** The default locking schemes in SQLite use byte-range locks on the -** database file to coordinate safe, concurrent access by multiple readers -** and writers [http://sqlite.org/lockingv3.html]. The five file locking -** states (UNLOCKED, PENDING, SHARED, RESERVED, EXCLUSIVE) are implemented -** as POSIX read & write locks over fixed set of locations (via fsctl), -** on AFP and SMB only exclusive byte-range locks are available via fsctl -** with _IOWR('z', 23, struct ByteRangeLockPB2) to track the same 5 states. -** To simulate a F_RDLCK on the shared range, on AFP a randomly selected -** address in the shared range is taken for a SHARED lock, the entire -** shared range is taken for an EXCLUSIVE lock): -** -** PENDING_BYTE 0x40000000 -** RESERVED_BYTE 0x40000001 -** SHARED_RANGE 0x40000002 -> 0x40000200 -** -** This works well on the local file system, but shows a nearly 100x -** slowdown in read performance on AFP because the AFP client disables -** the read cache when byte-range locks are present. Enabling the read -** cache exposes a cache coherency problem that is present on all OS X -** supported network file systems. NFS and AFP both observe the -** close-to-open semantics for ensuring cache coherency -** [http://nfs.sourceforge.net/#faq_a8], which does not effectively -** address the requirements for concurrent database access by multiple -** readers and writers -** [http://www.nabble.com/SQLite-on-NFS-cache-coherency-td15655701.html]. -** -** To address the performance and cache coherency issues, proxy file locking -** changes the way database access is controlled by limiting access to a -** single host at a time and moving file locks off of the database file -** and onto a proxy file on the local file system. -** -** -** Using proxy locks -** ----------------- -** -** C APIs -** -** sqlite3_file_control(db, dbname, SQLITE_SET_LOCKPROXYFILE, -** | ":auto:"); -** sqlite3_file_control(db, dbname, SQLITE_GET_LOCKPROXYFILE, &); -** -** -** SQL pragmas -** -** PRAGMA [database.]lock_proxy_file= | :auto: -** PRAGMA [database.]lock_proxy_file -** -** Specifying ":auto:" means that if there is a conch file with a matching -** host ID in it, the proxy path in the conch file will be used, otherwise -** a proxy path based on the user's temp dir -** (via confstr(_CS_DARWIN_USER_TEMP_DIR,...)) will be used and the -** actual proxy file name is generated from the name and path of the -** database file. For example: -** -** For database path "/Users/me/foo.db" -** The lock path will be "/sqliteplocks/_Users_me_foo.db:auto:") -** -** Once a lock proxy is configured for a database connection, it can not -** be removed, however it may be switched to a different proxy path via -** the above APIs (assuming the conch file is not being held by another -** connection or process). -** -** -** How proxy locking works -** ----------------------- -** -** Proxy file locking relies primarily on two new supporting files: -** -** * conch file to limit access to the database file to a single host -** at a time -** -** * proxy file to act as a proxy for the advisory locks normally -** taken on the database -** -** The conch file - to use a proxy file, sqlite must first "hold the conch" -** by taking an sqlite-style shared lock on the conch file, reading the -** contents and comparing the host's unique host ID (see below) and lock -** proxy path against the values stored in the conch. The conch file is -** stored in the same directory as the database file and the file name -** is patterned after the database file name as ".-conch". -** If the conch file does not exist, or it's contents do not match the -** host ID and/or proxy path, then the lock is escalated to an exclusive -** lock and the conch file contents is updated with the host ID and proxy -** path and the lock is downgraded to a shared lock again. If the conch -** is held by another process (with a shared lock), the exclusive lock -** will fail and SQLITE_BUSY is returned. -** -** The proxy file - a single-byte file used for all advisory file locks -** normally taken on the database file. This allows for safe sharing -** of the database file for multiple readers and writers on the same -** host (the conch ensures that they all use the same local lock file). -** -** Requesting the lock proxy does not immediately take the conch, it is -** only taken when the first request to lock database file is made. -** This matches the semantics of the traditional locking behavior, where -** opening a connection to a database file does not take a lock on it. -** The shared lock and an open file descriptor are maintained until -** the connection to the database is closed. -** -** The proxy file and the lock file are never deleted so they only need -** to be created the first time they are used. -** -** Configuration options -** --------------------- -** -** SQLITE_PREFER_PROXY_LOCKING -** -** Database files accessed on non-local file systems are -** automatically configured for proxy locking, lock files are -** named automatically using the same logic as -** PRAGMA lock_proxy_file=":auto:" -** -** SQLITE_PROXY_DEBUG -** -** Enables the logging of error messages during host id file -** retrieval and creation -** -** LOCKPROXYDIR -** -** Overrides the default directory used for lock proxy files that -** are named automatically via the ":auto:" setting -** -** SQLITE_DEFAULT_PROXYDIR_PERMISSIONS -** -** Permissions to use when creating a directory for storing the -** lock proxy files, only used when LOCKPROXYDIR is not set. -** -** -** As mentioned above, when compiled with SQLITE_PREFER_PROXY_LOCKING, -** setting the environment variable SQLITE_FORCE_PROXY_LOCKING to 1 will -** force proxy locking to be used for every database file opened, and 0 -** will force automatic proxy locking to be disabled for all database -** files (explicity calling the SQLITE_SET_LOCKPROXYFILE pragma or -** sqlite_file_control API is not affected by SQLITE_FORCE_PROXY_LOCKING). -*/ - -/* -** Proxy locking is only available on MacOSX -*/ -#if defined(__APPLE__) && SQLITE_ENABLE_LOCKING_STYLE - -/* -** The proxyLockingContext has the path and file structures for the remote -** and local proxy files in it -*/ -typedef struct proxyLockingContext proxyLockingContext; -struct proxyLockingContext { - unixFile *conchFile; /* Open conch file */ - char *conchFilePath; /* Name of the conch file */ - unixFile *lockProxy; /* Open proxy lock file */ - char *lockProxyPath; /* Name of the proxy lock file */ - char *dbPath; /* Name of the open file */ - int conchHeld; /* 1 if the conch is held, -1 if lockless */ - void *oldLockingContext; /* Original lockingcontext to restore on close */ - sqlite3_io_methods const *pOldMethod; /* Original I/O methods for close */ -}; - -/* -** The proxy lock file path for the database at dbPath is written into lPath, -** which must point to valid, writable memory large enough for a maxLen length -** file path. -*/ -static int proxyGetLockPath(const char *dbPath, char *lPath, size_t maxLen){ - int len; - int dbLen; - int i; - -#ifdef LOCKPROXYDIR - len = strlcpy(lPath, LOCKPROXYDIR, maxLen); -#else -# ifdef _CS_DARWIN_USER_TEMP_DIR - { - if( !confstr(_CS_DARWIN_USER_TEMP_DIR, lPath, maxLen) ){ - OSTRACE(("GETLOCKPATH failed %s errno=%d pid=%d\n", - lPath, errno, getpid())); - return SQLITE_IOERR_LOCK; - } - len = strlcat(lPath, "sqliteplocks", maxLen); - } -# else - len = strlcpy(lPath, "/tmp/", maxLen); -# endif -#endif - - if( lPath[len-1]!='/' ){ - len = strlcat(lPath, "/", maxLen); - } - - /* transform the db path to a unique cache name */ - dbLen = (int)strlen(dbPath); - for( i=0; i 0) ){ - /* only mkdir if leaf dir != "." or "/" or ".." */ - if( i-start>2 || (i-start==1 && buf[start] != '.' && buf[start] != '/') - || (i-start==2 && buf[start] != '.' && buf[start+1] != '.') ){ - buf[i]='\0'; - if( osMkdir(buf, SQLITE_DEFAULT_PROXYDIR_PERMISSIONS) ){ - int err=errno; - if( err!=EEXIST ) { - OSTRACE(("CREATELOCKPATH FAILED creating %s, " - "'%s' proxy lock path=%s pid=%d\n", - buf, strerror(err), lockPath, getpid())); - return err; - } - } - } - start=i+1; - } - buf[i] = lockPath[i]; - } - OSTRACE(("CREATELOCKPATH proxy lock path=%s pid=%d\n", lockPath, getpid())); - return 0; -} - -/* -** Create a new VFS file descriptor (stored in memory obtained from -** sqlite3_malloc) and open the file named "path" in the file descriptor. -** -** The caller is responsible not only for closing the file descriptor -** but also for freeing the memory associated with the file descriptor. -*/ -static int proxyCreateUnixFile( - const char *path, /* path for the new unixFile */ - unixFile **ppFile, /* unixFile created and returned by ref */ - int islockfile /* if non zero missing dirs will be created */ -) { - int fd = -1; - unixFile *pNew; - int rc = SQLITE_OK; - int openFlags = O_RDWR | O_CREAT; - sqlite3_vfs dummyVfs; - int terrno = 0; - UnixUnusedFd *pUnused = NULL; - - /* 1. first try to open/create the file - ** 2. if that fails, and this is a lock file (not-conch), try creating - ** the parent directories and then try again. - ** 3. if that fails, try to open the file read-only - ** otherwise return BUSY (if lock file) or CANTOPEN for the conch file - */ - pUnused = findReusableFd(path, openFlags); - if( pUnused ){ - fd = pUnused->fd; - }else{ - pUnused = sqlite3_malloc(sizeof(*pUnused)); - if( !pUnused ){ - return SQLITE_NOMEM; - } - } - if( fd<0 ){ - fd = robust_open(path, openFlags, 0); - terrno = errno; - if( fd<0 && errno==ENOENT && islockfile ){ - if( proxyCreateLockPath(path) == SQLITE_OK ){ - fd = robust_open(path, openFlags, 0); - } - } - } - if( fd<0 ){ - openFlags = O_RDONLY; - fd = robust_open(path, openFlags, 0); - terrno = errno; - } - if( fd<0 ){ - if( islockfile ){ - return SQLITE_BUSY; - } - switch (terrno) { - case EACCES: - return SQLITE_PERM; - case EIO: - return SQLITE_IOERR_LOCK; /* even though it is the conch */ - default: - return SQLITE_CANTOPEN_BKPT; - } - } - - pNew = (unixFile *)sqlite3_malloc(sizeof(*pNew)); - if( pNew==NULL ){ - rc = SQLITE_NOMEM; - goto end_create_proxy; - } - memset(pNew, 0, sizeof(unixFile)); - pNew->openFlags = openFlags; - memset(&dummyVfs, 0, sizeof(dummyVfs)); - dummyVfs.pAppData = (void*)&autolockIoFinder; - dummyVfs.zName = "dummy"; - pUnused->fd = fd; - pUnused->flags = openFlags; - pNew->pUnused = pUnused; - - rc = fillInUnixFile(&dummyVfs, fd, (sqlite3_file*)pNew, path, 0); - if( rc==SQLITE_OK ){ - *ppFile = pNew; - return SQLITE_OK; - } -end_create_proxy: - robust_close(pNew, fd, __LINE__); - sqlite3_free(pNew); - sqlite3_free(pUnused); - return rc; -} - -#ifdef SQLITE_TEST -/* simulate multiple hosts by creating unique hostid file paths */ -SQLITE_API int sqlite3_hostid_num = 0; -#endif - -#define PROXY_HOSTIDLEN 16 /* conch file host id length */ - -/* Not always defined in the headers as it ought to be */ -extern int gethostuuid(uuid_t id, const struct timespec *wait); - -/* get the host ID via gethostuuid(), pHostID must point to PROXY_HOSTIDLEN -** bytes of writable memory. -*/ -static int proxyGetHostID(unsigned char *pHostID, int *pError){ - assert(PROXY_HOSTIDLEN == sizeof(uuid_t)); - memset(pHostID, 0, PROXY_HOSTIDLEN); -#if defined(__MAX_OS_X_VERSION_MIN_REQUIRED)\ - && __MAC_OS_X_VERSION_MIN_REQUIRED<1050 - { - static const struct timespec timeout = {1, 0}; /* 1 sec timeout */ - if( gethostuuid(pHostID, &timeout) ){ - int err = errno; - if( pError ){ - *pError = err; - } - return SQLITE_IOERR; - } - } -#else - UNUSED_PARAMETER(pError); -#endif -#ifdef SQLITE_TEST - /* simulate multiple hosts by creating unique hostid file paths */ - if( sqlite3_hostid_num != 0){ - pHostID[0] = (char)(pHostID[0] + (char)(sqlite3_hostid_num & 0xFF)); - } -#endif - - return SQLITE_OK; -} - -/* The conch file contains the header, host id and lock file path - */ -#define PROXY_CONCHVERSION 2 /* 1-byte header, 16-byte host id, path */ -#define PROXY_HEADERLEN 1 /* conch file header length */ -#define PROXY_PATHINDEX (PROXY_HEADERLEN+PROXY_HOSTIDLEN) -#define PROXY_MAXCONCHLEN (PROXY_HEADERLEN+PROXY_HOSTIDLEN+MAXPATHLEN) - -/* -** Takes an open conch file, copies the contents to a new path and then moves -** it back. The newly created file's file descriptor is assigned to the -** conch file structure and finally the original conch file descriptor is -** closed. Returns zero if successful. -*/ -static int proxyBreakConchLock(unixFile *pFile, uuid_t myHostID){ - proxyLockingContext *pCtx = (proxyLockingContext *)pFile->lockingContext; - unixFile *conchFile = pCtx->conchFile; - char tPath[MAXPATHLEN]; - char buf[PROXY_MAXCONCHLEN]; - char *cPath = pCtx->conchFilePath; - size_t readLen = 0; - size_t pathLen = 0; - char errmsg[64] = ""; - int fd = -1; - int rc = -1; - UNUSED_PARAMETER(myHostID); - - /* create a new path by replace the trailing '-conch' with '-break' */ - pathLen = strlcpy(tPath, cPath, MAXPATHLEN); - if( pathLen>MAXPATHLEN || pathLen<6 || - (strlcpy(&tPath[pathLen-5], "break", 6) != 5) ){ - sqlite3_snprintf(sizeof(errmsg),errmsg,"path error (len %d)",(int)pathLen); - goto end_breaklock; - } - /* read the conch content */ - readLen = osPread(conchFile->h, buf, PROXY_MAXCONCHLEN, 0); - if( readLenh, __LINE__); - conchFile->h = fd; - conchFile->openFlags = O_RDWR | O_CREAT; - -end_breaklock: - if( rc ){ - if( fd>=0 ){ - osUnlink(tPath); - robust_close(pFile, fd, __LINE__); - } - fprintf(stderr, "failed to break stale lock on %s, %s\n", cPath, errmsg); - } - return rc; -} - -/* Take the requested lock on the conch file and break a stale lock if the -** host id matches. -*/ -static int proxyConchLock(unixFile *pFile, uuid_t myHostID, int lockType){ - proxyLockingContext *pCtx = (proxyLockingContext *)pFile->lockingContext; - unixFile *conchFile = pCtx->conchFile; - int rc = SQLITE_OK; - int nTries = 0; - struct timespec conchModTime; - - memset(&conchModTime, 0, sizeof(conchModTime)); - do { - rc = conchFile->pMethod->xLock((sqlite3_file*)conchFile, lockType); - nTries ++; - if( rc==SQLITE_BUSY ){ - /* If the lock failed (busy): - * 1st try: get the mod time of the conch, wait 0.5s and try again. - * 2nd try: fail if the mod time changed or host id is different, wait - * 10 sec and try again - * 3rd try: break the lock unless the mod time has changed. - */ - struct stat buf; - if( osFstat(conchFile->h, &buf) ){ - pFile->lastErrno = errno; - return SQLITE_IOERR_LOCK; - } - - if( nTries==1 ){ - conchModTime = buf.st_mtimespec; - usleep(500000); /* wait 0.5 sec and try the lock again*/ - continue; - } - - assert( nTries>1 ); - if( conchModTime.tv_sec != buf.st_mtimespec.tv_sec || - conchModTime.tv_nsec != buf.st_mtimespec.tv_nsec ){ - return SQLITE_BUSY; - } - - if( nTries==2 ){ - char tBuf[PROXY_MAXCONCHLEN]; - int len = osPread(conchFile->h, tBuf, PROXY_MAXCONCHLEN, 0); - if( len<0 ){ - pFile->lastErrno = errno; - return SQLITE_IOERR_LOCK; - } - if( len>PROXY_PATHINDEX && tBuf[0]==(char)PROXY_CONCHVERSION){ - /* don't break the lock if the host id doesn't match */ - if( 0!=memcmp(&tBuf[PROXY_HEADERLEN], myHostID, PROXY_HOSTIDLEN) ){ - return SQLITE_BUSY; - } - }else{ - /* don't break the lock on short read or a version mismatch */ - return SQLITE_BUSY; - } - usleep(10000000); /* wait 10 sec and try the lock again */ - continue; - } - - assert( nTries==3 ); - if( 0==proxyBreakConchLock(pFile, myHostID) ){ - rc = SQLITE_OK; - if( lockType==EXCLUSIVE_LOCK ){ - rc = conchFile->pMethod->xLock((sqlite3_file*)conchFile, SHARED_LOCK); - } - if( !rc ){ - rc = conchFile->pMethod->xLock((sqlite3_file*)conchFile, lockType); - } - } - } - } while( rc==SQLITE_BUSY && nTries<3 ); - - return rc; -} - -/* Takes the conch by taking a shared lock and read the contents conch, if -** lockPath is non-NULL, the host ID and lock file path must match. A NULL -** lockPath means that the lockPath in the conch file will be used if the -** host IDs match, or a new lock path will be generated automatically -** and written to the conch file. -*/ -static int proxyTakeConch(unixFile *pFile){ - proxyLockingContext *pCtx = (proxyLockingContext *)pFile->lockingContext; - - if( pCtx->conchHeld!=0 ){ - return SQLITE_OK; - }else{ - unixFile *conchFile = pCtx->conchFile; - uuid_t myHostID; - int pError = 0; - char readBuf[PROXY_MAXCONCHLEN]; - char lockPath[MAXPATHLEN]; - char *tempLockPath = NULL; - int rc = SQLITE_OK; - int createConch = 0; - int hostIdMatch = 0; - int readLen = 0; - int tryOldLockPath = 0; - int forceNewLockPath = 0; - - OSTRACE(("TAKECONCH %d for %s pid=%d\n", conchFile->h, - (pCtx->lockProxyPath ? pCtx->lockProxyPath : ":auto:"), getpid())); - - rc = proxyGetHostID(myHostID, &pError); - if( (rc&0xff)==SQLITE_IOERR ){ - pFile->lastErrno = pError; - goto end_takeconch; - } - rc = proxyConchLock(pFile, myHostID, SHARED_LOCK); - if( rc!=SQLITE_OK ){ - goto end_takeconch; - } - /* read the existing conch file */ - readLen = seekAndRead((unixFile*)conchFile, 0, readBuf, PROXY_MAXCONCHLEN); - if( readLen<0 ){ - /* I/O error: lastErrno set by seekAndRead */ - pFile->lastErrno = conchFile->lastErrno; - rc = SQLITE_IOERR_READ; - goto end_takeconch; - }else if( readLen<=(PROXY_HEADERLEN+PROXY_HOSTIDLEN) || - readBuf[0]!=(char)PROXY_CONCHVERSION ){ - /* a short read or version format mismatch means we need to create a new - ** conch file. - */ - createConch = 1; - } - /* if the host id matches and the lock path already exists in the conch - ** we'll try to use the path there, if we can't open that path, we'll - ** retry with a new auto-generated path - */ - do { /* in case we need to try again for an :auto: named lock file */ - - if( !createConch && !forceNewLockPath ){ - hostIdMatch = !memcmp(&readBuf[PROXY_HEADERLEN], myHostID, - PROXY_HOSTIDLEN); - /* if the conch has data compare the contents */ - if( !pCtx->lockProxyPath ){ - /* for auto-named local lock file, just check the host ID and we'll - ** use the local lock file path that's already in there - */ - if( hostIdMatch ){ - size_t pathLen = (readLen - PROXY_PATHINDEX); - - if( pathLen>=MAXPATHLEN ){ - pathLen=MAXPATHLEN-1; - } - memcpy(lockPath, &readBuf[PROXY_PATHINDEX], pathLen); - lockPath[pathLen] = 0; - tempLockPath = lockPath; - tryOldLockPath = 1; - /* create a copy of the lock path if the conch is taken */ - goto end_takeconch; - } - }else if( hostIdMatch - && !strncmp(pCtx->lockProxyPath, &readBuf[PROXY_PATHINDEX], - readLen-PROXY_PATHINDEX) - ){ - /* conch host and lock path match */ - goto end_takeconch; - } - } - - /* if the conch isn't writable and doesn't match, we can't take it */ - if( (conchFile->openFlags&O_RDWR) == 0 ){ - rc = SQLITE_BUSY; - goto end_takeconch; - } - - /* either the conch didn't match or we need to create a new one */ - if( !pCtx->lockProxyPath ){ - proxyGetLockPath(pCtx->dbPath, lockPath, MAXPATHLEN); - tempLockPath = lockPath; - /* create a copy of the lock path _only_ if the conch is taken */ - } - - /* update conch with host and path (this will fail if other process - ** has a shared lock already), if the host id matches, use the big - ** stick. - */ - futimes(conchFile->h, NULL); - if( hostIdMatch && !createConch ){ - if( conchFile->pInode && conchFile->pInode->nShared>1 ){ - /* We are trying for an exclusive lock but another thread in this - ** same process is still holding a shared lock. */ - rc = SQLITE_BUSY; - } else { - rc = proxyConchLock(pFile, myHostID, EXCLUSIVE_LOCK); - } - }else{ - rc = conchFile->pMethod->xLock((sqlite3_file*)conchFile, EXCLUSIVE_LOCK); - } - if( rc==SQLITE_OK ){ - char writeBuffer[PROXY_MAXCONCHLEN]; - int writeSize = 0; - - writeBuffer[0] = (char)PROXY_CONCHVERSION; - memcpy(&writeBuffer[PROXY_HEADERLEN], myHostID, PROXY_HOSTIDLEN); - if( pCtx->lockProxyPath!=NULL ){ - strlcpy(&writeBuffer[PROXY_PATHINDEX], pCtx->lockProxyPath, MAXPATHLEN); - }else{ - strlcpy(&writeBuffer[PROXY_PATHINDEX], tempLockPath, MAXPATHLEN); - } - writeSize = PROXY_PATHINDEX + strlen(&writeBuffer[PROXY_PATHINDEX]); - robust_ftruncate(conchFile->h, writeSize); - rc = unixWrite((sqlite3_file *)conchFile, writeBuffer, writeSize, 0); - fsync(conchFile->h); - /* If we created a new conch file (not just updated the contents of a - ** valid conch file), try to match the permissions of the database - */ - if( rc==SQLITE_OK && createConch ){ - struct stat buf; - int err = osFstat(pFile->h, &buf); - if( err==0 ){ - mode_t cmode = buf.st_mode&(S_IRUSR|S_IWUSR | S_IRGRP|S_IWGRP | - S_IROTH|S_IWOTH); - /* try to match the database file R/W permissions, ignore failure */ -#ifndef SQLITE_PROXY_DEBUG - osFchmod(conchFile->h, cmode); -#else - do{ - rc = osFchmod(conchFile->h, cmode); - }while( rc==(-1) && errno==EINTR ); - if( rc!=0 ){ - int code = errno; - fprintf(stderr, "fchmod %o FAILED with %d %s\n", - cmode, code, strerror(code)); - } else { - fprintf(stderr, "fchmod %o SUCCEDED\n",cmode); - } - }else{ - int code = errno; - fprintf(stderr, "STAT FAILED[%d] with %d %s\n", - err, code, strerror(code)); -#endif - } - } - } - conchFile->pMethod->xUnlock((sqlite3_file*)conchFile, SHARED_LOCK); - - end_takeconch: - OSTRACE(("TRANSPROXY: CLOSE %d\n", pFile->h)); - if( rc==SQLITE_OK && pFile->openFlags ){ - int fd; - if( pFile->h>=0 ){ - robust_close(pFile, pFile->h, __LINE__); - } - pFile->h = -1; - fd = robust_open(pCtx->dbPath, pFile->openFlags, 0); - OSTRACE(("TRANSPROXY: OPEN %d\n", fd)); - if( fd>=0 ){ - pFile->h = fd; - }else{ - rc=SQLITE_CANTOPEN_BKPT; /* SQLITE_BUSY? proxyTakeConch called - during locking */ - } - } - if( rc==SQLITE_OK && !pCtx->lockProxy ){ - char *path = tempLockPath ? tempLockPath : pCtx->lockProxyPath; - rc = proxyCreateUnixFile(path, &pCtx->lockProxy, 1); - if( rc!=SQLITE_OK && rc!=SQLITE_NOMEM && tryOldLockPath ){ - /* we couldn't create the proxy lock file with the old lock file path - ** so try again via auto-naming - */ - forceNewLockPath = 1; - tryOldLockPath = 0; - continue; /* go back to the do {} while start point, try again */ - } - } - if( rc==SQLITE_OK ){ - /* Need to make a copy of path if we extracted the value - ** from the conch file or the path was allocated on the stack - */ - if( tempLockPath ){ - pCtx->lockProxyPath = sqlite3DbStrDup(0, tempLockPath); - if( !pCtx->lockProxyPath ){ - rc = SQLITE_NOMEM; - } - } - } - if( rc==SQLITE_OK ){ - pCtx->conchHeld = 1; - - if( pCtx->lockProxy->pMethod == &afpIoMethods ){ - afpLockingContext *afpCtx; - afpCtx = (afpLockingContext *)pCtx->lockProxy->lockingContext; - afpCtx->dbPath = pCtx->lockProxyPath; - } - } else { - conchFile->pMethod->xUnlock((sqlite3_file*)conchFile, NO_LOCK); - } - OSTRACE(("TAKECONCH %d %s\n", conchFile->h, - rc==SQLITE_OK?"ok":"failed")); - return rc; - } while (1); /* in case we need to retry the :auto: lock file - - ** we should never get here except via the 'continue' call. */ - } -} - -/* -** If pFile holds a lock on a conch file, then release that lock. -*/ -static int proxyReleaseConch(unixFile *pFile){ - int rc = SQLITE_OK; /* Subroutine return code */ - proxyLockingContext *pCtx; /* The locking context for the proxy lock */ - unixFile *conchFile; /* Name of the conch file */ - - pCtx = (proxyLockingContext *)pFile->lockingContext; - conchFile = pCtx->conchFile; - OSTRACE(("RELEASECONCH %d for %s pid=%d\n", conchFile->h, - (pCtx->lockProxyPath ? pCtx->lockProxyPath : ":auto:"), - getpid())); - if( pCtx->conchHeld>0 ){ - rc = conchFile->pMethod->xUnlock((sqlite3_file*)conchFile, NO_LOCK); - } - pCtx->conchHeld = 0; - OSTRACE(("RELEASECONCH %d %s\n", conchFile->h, - (rc==SQLITE_OK ? "ok" : "failed"))); - return rc; -} - -/* -** Given the name of a database file, compute the name of its conch file. -** Store the conch filename in memory obtained from sqlite3_malloc(). -** Make *pConchPath point to the new name. Return SQLITE_OK on success -** or SQLITE_NOMEM if unable to obtain memory. -** -** The caller is responsible for ensuring that the allocated memory -** space is eventually freed. -** -** *pConchPath is set to NULL if a memory allocation error occurs. -*/ -static int proxyCreateConchPathname(char *dbPath, char **pConchPath){ - int i; /* Loop counter */ - int len = (int)strlen(dbPath); /* Length of database filename - dbPath */ - char *conchPath; /* buffer in which to construct conch name */ - - /* Allocate space for the conch filename and initialize the name to - ** the name of the original database file. */ - *pConchPath = conchPath = (char *)sqlite3_malloc(len + 8); - if( conchPath==0 ){ - return SQLITE_NOMEM; - } - memcpy(conchPath, dbPath, len+1); - - /* now insert a "." before the last / character */ - for( i=(len-1); i>=0; i-- ){ - if( conchPath[i]=='/' ){ - i++; - break; - } - } - conchPath[i]='.'; - while ( ilockingContext; - char *oldPath = pCtx->lockProxyPath; - int rc = SQLITE_OK; - - if( pFile->eFileLock!=NO_LOCK ){ - return SQLITE_BUSY; - } - - /* nothing to do if the path is NULL, :auto: or matches the existing path */ - if( !path || path[0]=='\0' || !strcmp(path, ":auto:") || - (oldPath && !strncmp(oldPath, path, MAXPATHLEN)) ){ - return SQLITE_OK; - }else{ - unixFile *lockProxy = pCtx->lockProxy; - pCtx->lockProxy=NULL; - pCtx->conchHeld = 0; - if( lockProxy!=NULL ){ - rc=lockProxy->pMethod->xClose((sqlite3_file *)lockProxy); - if( rc ) return rc; - sqlite3_free(lockProxy); - } - sqlite3_free(oldPath); - pCtx->lockProxyPath = sqlite3DbStrDup(0, path); - } - - return rc; -} - -/* -** pFile is a file that has been opened by a prior xOpen call. dbPath -** is a string buffer at least MAXPATHLEN+1 characters in size. -** -** This routine find the filename associated with pFile and writes it -** int dbPath. -*/ -static int proxyGetDbPathForUnixFile(unixFile *pFile, char *dbPath){ -#if defined(__APPLE__) - if( pFile->pMethod == &afpIoMethods ){ - /* afp style keeps a reference to the db path in the filePath field - ** of the struct */ - assert( (int)strlen((char*)pFile->lockingContext)<=MAXPATHLEN ); - strlcpy(dbPath, ((afpLockingContext *)pFile->lockingContext)->dbPath, MAXPATHLEN); - } else -#endif - if( pFile->pMethod == &dotlockIoMethods ){ - /* dot lock style uses the locking context to store the dot lock - ** file path */ - int len = strlen((char *)pFile->lockingContext) - strlen(DOTLOCK_SUFFIX); - memcpy(dbPath, (char *)pFile->lockingContext, len + 1); - }else{ - /* all other styles use the locking context to store the db file path */ - assert( strlen((char*)pFile->lockingContext)<=MAXPATHLEN ); - strlcpy(dbPath, (char *)pFile->lockingContext, MAXPATHLEN); - } - return SQLITE_OK; -} - -/* -** Takes an already filled in unix file and alters it so all file locking -** will be performed on the local proxy lock file. The following fields -** are preserved in the locking context so that they can be restored and -** the unix structure properly cleaned up at close time: -** ->lockingContext -** ->pMethod -*/ -static int proxyTransformUnixFile(unixFile *pFile, const char *path) { - proxyLockingContext *pCtx; - char dbPath[MAXPATHLEN+1]; /* Name of the database file */ - char *lockPath=NULL; - int rc = SQLITE_OK; - - if( pFile->eFileLock!=NO_LOCK ){ - return SQLITE_BUSY; - } - proxyGetDbPathForUnixFile(pFile, dbPath); - if( !path || path[0]=='\0' || !strcmp(path, ":auto:") ){ - lockPath=NULL; - }else{ - lockPath=(char *)path; - } - - OSTRACE(("TRANSPROXY %d for %s pid=%d\n", pFile->h, - (lockPath ? lockPath : ":auto:"), getpid())); - - pCtx = sqlite3_malloc( sizeof(*pCtx) ); - if( pCtx==0 ){ - return SQLITE_NOMEM; - } - memset(pCtx, 0, sizeof(*pCtx)); - - rc = proxyCreateConchPathname(dbPath, &pCtx->conchFilePath); - if( rc==SQLITE_OK ){ - rc = proxyCreateUnixFile(pCtx->conchFilePath, &pCtx->conchFile, 0); - if( rc==SQLITE_CANTOPEN && ((pFile->openFlags&O_RDWR) == 0) ){ - /* if (a) the open flags are not O_RDWR, (b) the conch isn't there, and - ** (c) the file system is read-only, then enable no-locking access. - ** Ugh, since O_RDONLY==0x0000 we test for !O_RDWR since unixOpen asserts - ** that openFlags will have only one of O_RDONLY or O_RDWR. - */ - struct statfs fsInfo; - struct stat conchInfo; - int goLockless = 0; - - if( osStat(pCtx->conchFilePath, &conchInfo) == -1 ) { - int err = errno; - if( (err==ENOENT) && (statfs(dbPath, &fsInfo) != -1) ){ - goLockless = (fsInfo.f_flags&MNT_RDONLY) == MNT_RDONLY; - } - } - if( goLockless ){ - pCtx->conchHeld = -1; /* read only FS/ lockless */ - rc = SQLITE_OK; - } - } - } - if( rc==SQLITE_OK && lockPath ){ - pCtx->lockProxyPath = sqlite3DbStrDup(0, lockPath); - } - - if( rc==SQLITE_OK ){ - pCtx->dbPath = sqlite3DbStrDup(0, dbPath); - if( pCtx->dbPath==NULL ){ - rc = SQLITE_NOMEM; - } - } - if( rc==SQLITE_OK ){ - /* all memory is allocated, proxys are created and assigned, - ** switch the locking context and pMethod then return. - */ - pCtx->oldLockingContext = pFile->lockingContext; - pFile->lockingContext = pCtx; - pCtx->pOldMethod = pFile->pMethod; - pFile->pMethod = &proxyIoMethods; - }else{ - if( pCtx->conchFile ){ - pCtx->conchFile->pMethod->xClose((sqlite3_file *)pCtx->conchFile); - sqlite3_free(pCtx->conchFile); - } - sqlite3DbFree(0, pCtx->lockProxyPath); - sqlite3_free(pCtx->conchFilePath); - sqlite3_free(pCtx); - } - OSTRACE(("TRANSPROXY %d %s\n", pFile->h, - (rc==SQLITE_OK ? "ok" : "failed"))); - return rc; -} - - -/* -** This routine handles sqlite3_file_control() calls that are specific -** to proxy locking. -*/ -static int proxyFileControl(sqlite3_file *id, int op, void *pArg){ - switch( op ){ - case SQLITE_GET_LOCKPROXYFILE: { - unixFile *pFile = (unixFile*)id; - if( pFile->pMethod == &proxyIoMethods ){ - proxyLockingContext *pCtx = (proxyLockingContext*)pFile->lockingContext; - proxyTakeConch(pFile); - if( pCtx->lockProxyPath ){ - *(const char **)pArg = pCtx->lockProxyPath; - }else{ - *(const char **)pArg = ":auto: (not held)"; - } - } else { - *(const char **)pArg = NULL; - } - return SQLITE_OK; - } - case SQLITE_SET_LOCKPROXYFILE: { - unixFile *pFile = (unixFile*)id; - int rc = SQLITE_OK; - int isProxyStyle = (pFile->pMethod == &proxyIoMethods); - if( pArg==NULL || (const char *)pArg==0 ){ - if( isProxyStyle ){ - /* turn off proxy locking - not supported */ - rc = SQLITE_ERROR /*SQLITE_PROTOCOL? SQLITE_MISUSE?*/; - }else{ - /* turn off proxy locking - already off - NOOP */ - rc = SQLITE_OK; - } - }else{ - const char *proxyPath = (const char *)pArg; - if( isProxyStyle ){ - proxyLockingContext *pCtx = - (proxyLockingContext*)pFile->lockingContext; - if( !strcmp(pArg, ":auto:") - || (pCtx->lockProxyPath && - !strncmp(pCtx->lockProxyPath, proxyPath, MAXPATHLEN)) - ){ - rc = SQLITE_OK; - }else{ - rc = switchLockProxyPath(pFile, proxyPath); - } - }else{ - /* turn on proxy file locking */ - rc = proxyTransformUnixFile(pFile, proxyPath); - } - } - return rc; - } - default: { - assert( 0 ); /* The call assures that only valid opcodes are sent */ - } - } - /*NOTREACHED*/ - return SQLITE_ERROR; -} - -/* -** Within this division (the proxying locking implementation) the procedures -** above this point are all utilities. The lock-related methods of the -** proxy-locking sqlite3_io_method object follow. -*/ - - -/* -** This routine checks if there is a RESERVED lock held on the specified -** file by this or any other process. If such a lock is held, set *pResOut -** to a non-zero value otherwise *pResOut is set to zero. The return value -** is set to SQLITE_OK unless an I/O error occurs during lock checking. -*/ -static int proxyCheckReservedLock(sqlite3_file *id, int *pResOut) { - unixFile *pFile = (unixFile*)id; - int rc = proxyTakeConch(pFile); - if( rc==SQLITE_OK ){ - proxyLockingContext *pCtx = (proxyLockingContext *)pFile->lockingContext; - if( pCtx->conchHeld>0 ){ - unixFile *proxy = pCtx->lockProxy; - return proxy->pMethod->xCheckReservedLock((sqlite3_file*)proxy, pResOut); - }else{ /* conchHeld < 0 is lockless */ - pResOut=0; - } - } - return rc; -} - -/* -** Lock the file with the lock specified by parameter eFileLock - one -** of the following: -** -** (1) SHARED_LOCK -** (2) RESERVED_LOCK -** (3) PENDING_LOCK -** (4) EXCLUSIVE_LOCK -** -** Sometimes when requesting one lock state, additional lock states -** are inserted in between. The locking might fail on one of the later -** transitions leaving the lock state different from what it started but -** still short of its goal. The following chart shows the allowed -** transitions and the inserted intermediate states: -** -** UNLOCKED -> SHARED -** SHARED -> RESERVED -** SHARED -> (PENDING) -> EXCLUSIVE -** RESERVED -> (PENDING) -> EXCLUSIVE -** PENDING -> EXCLUSIVE -** -** This routine will only increase a lock. Use the sqlite3OsUnlock() -** routine to lower a locking level. -*/ -static int proxyLock(sqlite3_file *id, int eFileLock) { - unixFile *pFile = (unixFile*)id; - int rc = proxyTakeConch(pFile); - if( rc==SQLITE_OK ){ - proxyLockingContext *pCtx = (proxyLockingContext *)pFile->lockingContext; - if( pCtx->conchHeld>0 ){ - unixFile *proxy = pCtx->lockProxy; - rc = proxy->pMethod->xLock((sqlite3_file*)proxy, eFileLock); - pFile->eFileLock = proxy->eFileLock; - }else{ - /* conchHeld < 0 is lockless */ - } - } - return rc; -} - - -/* -** Lower the locking level on file descriptor pFile to eFileLock. eFileLock -** must be either NO_LOCK or SHARED_LOCK. -** -** If the locking level of the file descriptor is already at or below -** the requested locking level, this routine is a no-op. -*/ -static int proxyUnlock(sqlite3_file *id, int eFileLock) { - unixFile *pFile = (unixFile*)id; - int rc = proxyTakeConch(pFile); - if( rc==SQLITE_OK ){ - proxyLockingContext *pCtx = (proxyLockingContext *)pFile->lockingContext; - if( pCtx->conchHeld>0 ){ - unixFile *proxy = pCtx->lockProxy; - rc = proxy->pMethod->xUnlock((sqlite3_file*)proxy, eFileLock); - pFile->eFileLock = proxy->eFileLock; - }else{ - /* conchHeld < 0 is lockless */ - } - } - return rc; -} - -/* -** Close a file that uses proxy locks. -*/ -static int proxyClose(sqlite3_file *id) { - if( id ){ - unixFile *pFile = (unixFile*)id; - proxyLockingContext *pCtx = (proxyLockingContext *)pFile->lockingContext; - unixFile *lockProxy = pCtx->lockProxy; - unixFile *conchFile = pCtx->conchFile; - int rc = SQLITE_OK; - - if( lockProxy ){ - rc = lockProxy->pMethod->xUnlock((sqlite3_file*)lockProxy, NO_LOCK); - if( rc ) return rc; - rc = lockProxy->pMethod->xClose((sqlite3_file*)lockProxy); - if( rc ) return rc; - sqlite3_free(lockProxy); - pCtx->lockProxy = 0; - } - if( conchFile ){ - if( pCtx->conchHeld ){ - rc = proxyReleaseConch(pFile); - if( rc ) return rc; - } - rc = conchFile->pMethod->xClose((sqlite3_file*)conchFile); - if( rc ) return rc; - sqlite3_free(conchFile); - } - sqlite3DbFree(0, pCtx->lockProxyPath); - sqlite3_free(pCtx->conchFilePath); - sqlite3DbFree(0, pCtx->dbPath); - /* restore the original locking context and pMethod then close it */ - pFile->lockingContext = pCtx->oldLockingContext; - pFile->pMethod = pCtx->pOldMethod; - sqlite3_free(pCtx); - return pFile->pMethod->xClose(id); - } - return SQLITE_OK; -} - - - -#endif /* defined(__APPLE__) && SQLITE_ENABLE_LOCKING_STYLE */ -/* -** The proxy locking style is intended for use with AFP filesystems. -** And since AFP is only supported on MacOSX, the proxy locking is also -** restricted to MacOSX. -** -** -******************* End of the proxy lock implementation ********************** -******************************************************************************/ - -/* -** Initialize the operating system interface. -** -** This routine registers all VFS implementations for unix-like operating -** systems. This routine, and the sqlite3_os_end() routine that follows, -** should be the only routines in this file that are visible from other -** files. -** -** This routine is called once during SQLite initialization and by a -** single thread. The memory allocation and mutex subsystems have not -** necessarily been initialized when this routine is called, and so they -** should not be used. -*/ -SQLITE_API int sqlite3_os_init(void){ - /* - ** The following macro defines an initializer for an sqlite3_vfs object. - ** The name of the VFS is NAME. The pAppData is a pointer to a pointer - ** to the "finder" function. (pAppData is a pointer to a pointer because - ** silly C90 rules prohibit a void* from being cast to a function pointer - ** and so we have to go through the intermediate pointer to avoid problems - ** when compiling with -pedantic-errors on GCC.) - ** - ** The FINDER parameter to this macro is the name of the pointer to the - ** finder-function. The finder-function returns a pointer to the - ** sqlite_io_methods object that implements the desired locking - ** behaviors. See the division above that contains the IOMETHODS - ** macro for addition information on finder-functions. - ** - ** Most finders simply return a pointer to a fixed sqlite3_io_methods - ** object. But the "autolockIoFinder" available on MacOSX does a little - ** more than that; it looks at the filesystem type that hosts the - ** database file and tries to choose an locking method appropriate for - ** that filesystem time. - */ - #define UNIXVFS(VFSNAME, FINDER) { \ - 3, /* iVersion */ \ - sizeof(unixFile), /* szOsFile */ \ - MAX_PATHNAME, /* mxPathname */ \ - 0, /* pNext */ \ - VFSNAME, /* zName */ \ - (void*)&FINDER, /* pAppData */ \ - unixOpen, /* xOpen */ \ - unixDelete, /* xDelete */ \ - unixAccess, /* xAccess */ \ - unixFullPathname, /* xFullPathname */ \ - unixDlOpen, /* xDlOpen */ \ - unixDlError, /* xDlError */ \ - unixDlSym, /* xDlSym */ \ - unixDlClose, /* xDlClose */ \ - unixRandomness, /* xRandomness */ \ - unixSleep, /* xSleep */ \ - unixCurrentTime, /* xCurrentTime */ \ - unixGetLastError, /* xGetLastError */ \ - unixCurrentTimeInt64, /* xCurrentTimeInt64 */ \ - unixSetSystemCall, /* xSetSystemCall */ \ - unixGetSystemCall, /* xGetSystemCall */ \ - unixNextSystemCall, /* xNextSystemCall */ \ - } - - /* - ** All default VFSes for unix are contained in the following array. - ** - ** Note that the sqlite3_vfs.pNext field of the VFS object is modified - ** by the SQLite core when the VFS is registered. So the following - ** array cannot be const. - */ - static sqlite3_vfs aVfs[] = { -#if SQLITE_ENABLE_LOCKING_STYLE && (OS_VXWORKS || defined(__APPLE__)) - UNIXVFS("unix", autolockIoFinder ), -#else - UNIXVFS("unix", posixIoFinder ), -#endif - UNIXVFS("unix-none", nolockIoFinder ), - UNIXVFS("unix-dotfile", dotlockIoFinder ), - UNIXVFS("unix-excl", posixIoFinder ), -#if OS_VXWORKS - UNIXVFS("unix-namedsem", semIoFinder ), -#endif -#if SQLITE_ENABLE_LOCKING_STYLE - UNIXVFS("unix-posix", posixIoFinder ), -#if !OS_VXWORKS - UNIXVFS("unix-flock", flockIoFinder ), -#endif -#endif -#if SQLITE_ENABLE_LOCKING_STYLE && defined(__APPLE__) - UNIXVFS("unix-afp", afpIoFinder ), - UNIXVFS("unix-nfs", nfsIoFinder ), - UNIXVFS("unix-proxy", proxyIoFinder ), -#endif - }; - unsigned int i; /* Loop counter */ - - /* Double-check that the aSyscall[] array has been constructed - ** correctly. See ticket [bb3a86e890c8e96ab] */ - assert( ArraySize(aSyscall)==25 ); - - /* Register all VFSes defined in the aVfs[] array */ - for(i=0; i<(sizeof(aVfs)/sizeof(sqlite3_vfs)); i++){ - sqlite3_vfs_register(&aVfs[i], i==0); - } - return SQLITE_OK; -} - -/* -** Shutdown the operating system interface. -** -** Some operating systems might need to do some cleanup in this routine, -** to release dynamically allocated objects. But not on unix. -** This routine is a no-op for unix. -*/ -SQLITE_API int sqlite3_os_end(void){ - return SQLITE_OK; -} - -#endif /* SQLITE_OS_UNIX */ - -/************** End of os_unix.c *********************************************/ -/************** Begin file os_win.c ******************************************/ -/* -** 2004 May 22 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -****************************************************************************** -** -** This file contains code that is specific to Windows. -*/ -#if SQLITE_OS_WIN /* This file is used for Windows only */ - -/* -** Include code that is common to all os_*.c files -*/ -/************** Include os_common.h in the middle of os_win.c ****************/ -/************** Begin file os_common.h ***************************************/ -/* -** 2004 May 22 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -****************************************************************************** -** -** This file contains macros and a little bit of code that is common to -** all of the platform-specific files (os_*.c) and is #included into those -** files. -** -** This file should be #included by the os_*.c files only. It is not a -** general purpose header file. -*/ -#ifndef _OS_COMMON_H_ -#define _OS_COMMON_H_ - -/* -** At least two bugs have slipped in because we changed the MEMORY_DEBUG -** macro to SQLITE_DEBUG and some older makefiles have not yet made the -** switch. The following code should catch this problem at compile-time. -*/ -#ifdef MEMORY_DEBUG -# error "The MEMORY_DEBUG macro is obsolete. Use SQLITE_DEBUG instead." -#endif - -#if defined(SQLITE_TEST) && defined(SQLITE_DEBUG) -# ifndef SQLITE_DEBUG_OS_TRACE -# define SQLITE_DEBUG_OS_TRACE 0 -# endif - int sqlite3OSTrace = SQLITE_DEBUG_OS_TRACE; -# define OSTRACE(X) if( sqlite3OSTrace ) sqlite3DebugPrintf X -#else -# define OSTRACE(X) -#endif - -/* -** Macros for performance tracing. Normally turned off. Only works -** on i486 hardware. -*/ -#ifdef SQLITE_PERFORMANCE_TRACE - -/* -** hwtime.h contains inline assembler code for implementing -** high-performance timing routines. -*/ -/************** Include hwtime.h in the middle of os_common.h ****************/ -/************** Begin file hwtime.h ******************************************/ -/* -** 2008 May 27 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -****************************************************************************** -** -** This file contains inline asm code for retrieving "high-performance" -** counters for x86 class CPUs. -*/ -#ifndef _HWTIME_H_ -#define _HWTIME_H_ - -/* -** The following routine only works on pentium-class (or newer) processors. -** It uses the RDTSC opcode to read the cycle count value out of the -** processor and returns that value. This can be used for high-res -** profiling. -*/ -#if (defined(__GNUC__) || defined(_MSC_VER)) && \ - (defined(i386) || defined(__i386__) || defined(_M_IX86)) - - #if defined(__GNUC__) - - __inline__ sqlite_uint64 sqlite3Hwtime(void){ - unsigned int lo, hi; - __asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi)); - return (sqlite_uint64)hi << 32 | lo; - } - - #elif defined(_MSC_VER) - - __declspec(naked) __inline sqlite_uint64 __cdecl sqlite3Hwtime(void){ - __asm { - rdtsc - ret ; return value at EDX:EAX - } - } - - #endif - -#elif (defined(__GNUC__) && defined(__x86_64__)) - - __inline__ sqlite_uint64 sqlite3Hwtime(void){ - unsigned long val; - __asm__ __volatile__ ("rdtsc" : "=A" (val)); - return val; - } - -#elif (defined(__GNUC__) && defined(__ppc__)) - - __inline__ sqlite_uint64 sqlite3Hwtime(void){ - unsigned long long retval; - unsigned long junk; - __asm__ __volatile__ ("\n\ - 1: mftbu %1\n\ - mftb %L0\n\ - mftbu %0\n\ - cmpw %0,%1\n\ - bne 1b" - : "=r" (retval), "=r" (junk)); - return retval; - } - -#else - - #error Need implementation of sqlite3Hwtime() for your platform. - - /* - ** To compile without implementing sqlite3Hwtime() for your platform, - ** you can remove the above #error and use the following - ** stub function. You will lose timing support for many - ** of the debugging and testing utilities, but it should at - ** least compile and run. - */ -SQLITE_PRIVATE sqlite_uint64 sqlite3Hwtime(void){ return ((sqlite_uint64)0); } - -#endif - -#endif /* !defined(_HWTIME_H_) */ - -/************** End of hwtime.h **********************************************/ -/************** Continuing where we left off in os_common.h ******************/ - -static sqlite_uint64 g_start; -static sqlite_uint64 g_elapsed; -#define TIMER_START g_start=sqlite3Hwtime() -#define TIMER_END g_elapsed=sqlite3Hwtime()-g_start -#define TIMER_ELAPSED g_elapsed -#else -#define TIMER_START -#define TIMER_END -#define TIMER_ELAPSED ((sqlite_uint64)0) -#endif - -/* -** If we compile with the SQLITE_TEST macro set, then the following block -** of code will give us the ability to simulate a disk I/O error. This -** is used for testing the I/O recovery logic. -*/ -#ifdef SQLITE_TEST -SQLITE_API int sqlite3_io_error_hit = 0; /* Total number of I/O Errors */ -SQLITE_API int sqlite3_io_error_hardhit = 0; /* Number of non-benign errors */ -SQLITE_API int sqlite3_io_error_pending = 0; /* Count down to first I/O error */ -SQLITE_API int sqlite3_io_error_persist = 0; /* True if I/O errors persist */ -SQLITE_API int sqlite3_io_error_benign = 0; /* True if errors are benign */ -SQLITE_API int sqlite3_diskfull_pending = 0; -SQLITE_API int sqlite3_diskfull = 0; -#define SimulateIOErrorBenign(X) sqlite3_io_error_benign=(X) -#define SimulateIOError(CODE) \ - if( (sqlite3_io_error_persist && sqlite3_io_error_hit) \ - || sqlite3_io_error_pending-- == 1 ) \ - { local_ioerr(); CODE; } -static void local_ioerr(){ - IOTRACE(("IOERR\n")); - sqlite3_io_error_hit++; - if( !sqlite3_io_error_benign ) sqlite3_io_error_hardhit++; -} -#define SimulateDiskfullError(CODE) \ - if( sqlite3_diskfull_pending ){ \ - if( sqlite3_diskfull_pending == 1 ){ \ - local_ioerr(); \ - sqlite3_diskfull = 1; \ - sqlite3_io_error_hit = 1; \ - CODE; \ - }else{ \ - sqlite3_diskfull_pending--; \ - } \ - } -#else -#define SimulateIOErrorBenign(X) -#define SimulateIOError(A) -#define SimulateDiskfullError(A) -#endif - -/* -** When testing, keep a count of the number of open files. -*/ -#ifdef SQLITE_TEST -SQLITE_API int sqlite3_open_file_count = 0; -#define OpenCounter(X) sqlite3_open_file_count+=(X) -#else -#define OpenCounter(X) -#endif - -#endif /* !defined(_OS_COMMON_H_) */ - -/************** End of os_common.h *******************************************/ -/************** Continuing where we left off in os_win.c *********************/ - -/* -** Include the header file for the Windows VFS. -*/ - -/* -** Compiling and using WAL mode requires several APIs that are only -** available in Windows platforms based on the NT kernel. -*/ -#if !SQLITE_OS_WINNT && !defined(SQLITE_OMIT_WAL) -# error "WAL mode requires support from the Windows NT kernel, compile\ - with SQLITE_OMIT_WAL." -#endif - -/* -** Are most of the Win32 ANSI APIs available (i.e. with certain exceptions -** based on the sub-platform)? -*/ -#if !SQLITE_OS_WINCE && !SQLITE_OS_WINRT && !defined(SQLITE_WIN32_NO_ANSI) -# define SQLITE_WIN32_HAS_ANSI -#endif - -/* -** Are most of the Win32 Unicode APIs available (i.e. with certain exceptions -** based on the sub-platform)? -*/ -#if (SQLITE_OS_WINCE || SQLITE_OS_WINNT || SQLITE_OS_WINRT) && \ - !defined(SQLITE_WIN32_NO_WIDE) -# define SQLITE_WIN32_HAS_WIDE -#endif - -/* -** Make sure at least one set of Win32 APIs is available. -*/ -#if !defined(SQLITE_WIN32_HAS_ANSI) && !defined(SQLITE_WIN32_HAS_WIDE) -# error "At least one of SQLITE_WIN32_HAS_ANSI and SQLITE_WIN32_HAS_WIDE\ - must be defined." -#endif - -/* -** Define the required Windows SDK version constants if they are not -** already available. -*/ -#ifndef NTDDI_WIN8 -# define NTDDI_WIN8 0x06020000 -#endif - -#ifndef NTDDI_WINBLUE -# define NTDDI_WINBLUE 0x06030000 -#endif - -/* -** Check if the GetVersionEx[AW] functions should be considered deprecated -** and avoid using them in that case. It should be noted here that if the -** value of the SQLITE_WIN32_GETVERSIONEX pre-processor macro is zero -** (whether via this block or via being manually specified), that implies -** the underlying operating system will always be based on the Windows NT -** Kernel. -*/ -#ifndef SQLITE_WIN32_GETVERSIONEX -# if defined(NTDDI_VERSION) && NTDDI_VERSION >= NTDDI_WINBLUE -# define SQLITE_WIN32_GETVERSIONEX 0 -# else -# define SQLITE_WIN32_GETVERSIONEX 1 -# endif -#endif - -/* -** This constant should already be defined (in the "WinDef.h" SDK file). -*/ -#ifndef MAX_PATH -# define MAX_PATH (260) -#endif - -/* -** Maximum pathname length (in chars) for Win32. This should normally be -** MAX_PATH. -*/ -#ifndef SQLITE_WIN32_MAX_PATH_CHARS -# define SQLITE_WIN32_MAX_PATH_CHARS (MAX_PATH) -#endif - -/* -** This constant should already be defined (in the "WinNT.h" SDK file). -*/ -#ifndef UNICODE_STRING_MAX_CHARS -# define UNICODE_STRING_MAX_CHARS (32767) -#endif - -/* -** Maximum pathname length (in chars) for WinNT. This should normally be -** UNICODE_STRING_MAX_CHARS. -*/ -#ifndef SQLITE_WINNT_MAX_PATH_CHARS -# define SQLITE_WINNT_MAX_PATH_CHARS (UNICODE_STRING_MAX_CHARS) -#endif - -/* -** Maximum pathname length (in bytes) for Win32. The MAX_PATH macro is in -** characters, so we allocate 4 bytes per character assuming worst-case of -** 4-bytes-per-character for UTF8. -*/ -#ifndef SQLITE_WIN32_MAX_PATH_BYTES -# define SQLITE_WIN32_MAX_PATH_BYTES (SQLITE_WIN32_MAX_PATH_CHARS*4) -#endif - -/* -** Maximum pathname length (in bytes) for WinNT. This should normally be -** UNICODE_STRING_MAX_CHARS * sizeof(WCHAR). -*/ -#ifndef SQLITE_WINNT_MAX_PATH_BYTES -# define SQLITE_WINNT_MAX_PATH_BYTES \ - (sizeof(WCHAR) * SQLITE_WINNT_MAX_PATH_CHARS) -#endif - -/* -** Maximum error message length (in chars) for WinRT. -*/ -#ifndef SQLITE_WIN32_MAX_ERRMSG_CHARS -# define SQLITE_WIN32_MAX_ERRMSG_CHARS (1024) -#endif - -/* -** Returns non-zero if the character should be treated as a directory -** separator. -*/ -#ifndef winIsDirSep -# define winIsDirSep(a) (((a) == '/') || ((a) == '\\')) -#endif - -/* -** This macro is used when a local variable is set to a value that is -** [sometimes] not used by the code (e.g. via conditional compilation). -*/ -#ifndef UNUSED_VARIABLE_VALUE -# define UNUSED_VARIABLE_VALUE(x) (void)(x) -#endif - -/* -** Returns the character that should be used as the directory separator. -*/ -#ifndef winGetDirSep -# define winGetDirSep() '\\' -#endif - -/* -** Do we need to manually define the Win32 file mapping APIs for use with WAL -** mode (e.g. these APIs are available in the Windows CE SDK; however, they -** are not present in the header file)? -*/ -#if SQLITE_WIN32_FILEMAPPING_API && !defined(SQLITE_OMIT_WAL) -/* -** Two of the file mapping APIs are different under WinRT. Figure out which -** set we need. -*/ -#if SQLITE_OS_WINRT -WINBASEAPI HANDLE WINAPI CreateFileMappingFromApp(HANDLE, \ - LPSECURITY_ATTRIBUTES, ULONG, ULONG64, LPCWSTR); - -WINBASEAPI LPVOID WINAPI MapViewOfFileFromApp(HANDLE, ULONG, ULONG64, SIZE_T); -#else -#if defined(SQLITE_WIN32_HAS_ANSI) -WINBASEAPI HANDLE WINAPI CreateFileMappingA(HANDLE, LPSECURITY_ATTRIBUTES, \ - DWORD, DWORD, DWORD, LPCSTR); -#endif /* defined(SQLITE_WIN32_HAS_ANSI) */ - -#if defined(SQLITE_WIN32_HAS_WIDE) -WINBASEAPI HANDLE WINAPI CreateFileMappingW(HANDLE, LPSECURITY_ATTRIBUTES, \ - DWORD, DWORD, DWORD, LPCWSTR); -#endif /* defined(SQLITE_WIN32_HAS_WIDE) */ - -WINBASEAPI LPVOID WINAPI MapViewOfFile(HANDLE, DWORD, DWORD, DWORD, SIZE_T); -#endif /* SQLITE_OS_WINRT */ - -/* -** This file mapping API is common to both Win32 and WinRT. -*/ -WINBASEAPI BOOL WINAPI UnmapViewOfFile(LPCVOID); -#endif /* SQLITE_WIN32_FILEMAPPING_API && !defined(SQLITE_OMIT_WAL) */ - -/* -** Some Microsoft compilers lack this definition. -*/ -#ifndef INVALID_FILE_ATTRIBUTES -# define INVALID_FILE_ATTRIBUTES ((DWORD)-1) -#endif - -#ifndef FILE_FLAG_MASK -# define FILE_FLAG_MASK (0xFF3C0000) -#endif - -#ifndef FILE_ATTRIBUTE_MASK -# define FILE_ATTRIBUTE_MASK (0x0003FFF7) -#endif - -#ifndef SQLITE_OMIT_WAL -/* Forward references to structures used for WAL */ -typedef struct winShm winShm; /* A connection to shared-memory */ -typedef struct winShmNode winShmNode; /* A region of shared-memory */ -#endif - -/* -** WinCE lacks native support for file locking so we have to fake it -** with some code of our own. -*/ -#if SQLITE_OS_WINCE -typedef struct winceLock { - int nReaders; /* Number of reader locks obtained */ - BOOL bPending; /* Indicates a pending lock has been obtained */ - BOOL bReserved; /* Indicates a reserved lock has been obtained */ - BOOL bExclusive; /* Indicates an exclusive lock has been obtained */ -} winceLock; -#endif - -/* -** The winFile structure is a subclass of sqlite3_file* specific to the win32 -** portability layer. -*/ -typedef struct winFile winFile; -struct winFile { - const sqlite3_io_methods *pMethod; /*** Must be first ***/ - sqlite3_vfs *pVfs; /* The VFS used to open this file */ - HANDLE h; /* Handle for accessing the file */ - u8 locktype; /* Type of lock currently held on this file */ - short sharedLockByte; /* Randomly chosen byte used as a shared lock */ - u8 ctrlFlags; /* Flags. See WINFILE_* below */ - DWORD lastErrno; /* The Windows errno from the last I/O error */ -#ifndef SQLITE_OMIT_WAL - winShm *pShm; /* Instance of shared memory on this file */ -#endif - const char *zPath; /* Full pathname of this file */ - int szChunk; /* Chunk size configured by FCNTL_CHUNK_SIZE */ -#if SQLITE_OS_WINCE - LPWSTR zDeleteOnClose; /* Name of file to delete when closing */ - HANDLE hMutex; /* Mutex used to control access to shared lock */ - HANDLE hShared; /* Shared memory segment used for locking */ - winceLock local; /* Locks obtained by this instance of winFile */ - winceLock *shared; /* Global shared lock memory for the file */ -#endif -#if SQLITE_MAX_MMAP_SIZE>0 - int nFetchOut; /* Number of outstanding xFetch references */ - HANDLE hMap; /* Handle for accessing memory mapping */ - void *pMapRegion; /* Area memory mapped */ - sqlite3_int64 mmapSize; /* Usable size of mapped region */ - sqlite3_int64 mmapSizeActual; /* Actual size of mapped region */ - sqlite3_int64 mmapSizeMax; /* Configured FCNTL_MMAP_SIZE value */ -#endif -}; - -/* -** Allowed values for winFile.ctrlFlags -*/ -#define WINFILE_RDONLY 0x02 /* Connection is read only */ -#define WINFILE_PERSIST_WAL 0x04 /* Persistent WAL mode */ -#define WINFILE_PSOW 0x10 /* SQLITE_IOCAP_POWERSAFE_OVERWRITE */ - -/* - * The size of the buffer used by sqlite3_win32_write_debug(). - */ -#ifndef SQLITE_WIN32_DBG_BUF_SIZE -# define SQLITE_WIN32_DBG_BUF_SIZE ((int)(4096-sizeof(DWORD))) -#endif - -/* - * The value used with sqlite3_win32_set_directory() to specify that - * the data directory should be changed. - */ -#ifndef SQLITE_WIN32_DATA_DIRECTORY_TYPE -# define SQLITE_WIN32_DATA_DIRECTORY_TYPE (1) -#endif - -/* - * The value used with sqlite3_win32_set_directory() to specify that - * the temporary directory should be changed. - */ -#ifndef SQLITE_WIN32_TEMP_DIRECTORY_TYPE -# define SQLITE_WIN32_TEMP_DIRECTORY_TYPE (2) -#endif - -/* - * If compiled with SQLITE_WIN32_MALLOC on Windows, we will use the - * various Win32 API heap functions instead of our own. - */ -#ifdef SQLITE_WIN32_MALLOC - -/* - * If this is non-zero, an isolated heap will be created by the native Win32 - * allocator subsystem; otherwise, the default process heap will be used. This - * setting has no effect when compiling for WinRT. By default, this is enabled - * and an isolated heap will be created to store all allocated data. - * - ****************************************************************************** - * WARNING: It is important to note that when this setting is non-zero and the - * winMemShutdown function is called (e.g. by the sqlite3_shutdown - * function), all data that was allocated using the isolated heap will - * be freed immediately and any attempt to access any of that freed - * data will almost certainly result in an immediate access violation. - ****************************************************************************** - */ -#ifndef SQLITE_WIN32_HEAP_CREATE -# define SQLITE_WIN32_HEAP_CREATE (TRUE) -#endif - -/* - * The initial size of the Win32-specific heap. This value may be zero. - */ -#ifndef SQLITE_WIN32_HEAP_INIT_SIZE -# define SQLITE_WIN32_HEAP_INIT_SIZE ((SQLITE_DEFAULT_CACHE_SIZE) * \ - (SQLITE_DEFAULT_PAGE_SIZE) + 4194304) -#endif - -/* - * The maximum size of the Win32-specific heap. This value may be zero. - */ -#ifndef SQLITE_WIN32_HEAP_MAX_SIZE -# define SQLITE_WIN32_HEAP_MAX_SIZE (0) -#endif - -/* - * The extra flags to use in calls to the Win32 heap APIs. This value may be - * zero for the default behavior. - */ -#ifndef SQLITE_WIN32_HEAP_FLAGS -# define SQLITE_WIN32_HEAP_FLAGS (0) -#endif - - -/* -** The winMemData structure stores information required by the Win32-specific -** sqlite3_mem_methods implementation. -*/ -typedef struct winMemData winMemData; -struct winMemData { -#ifndef NDEBUG - u32 magic1; /* Magic number to detect structure corruption. */ -#endif - HANDLE hHeap; /* The handle to our heap. */ - BOOL bOwned; /* Do we own the heap (i.e. destroy it on shutdown)? */ -#ifndef NDEBUG - u32 magic2; /* Magic number to detect structure corruption. */ -#endif -}; - -#ifndef NDEBUG -#define WINMEM_MAGIC1 0x42b2830b -#define WINMEM_MAGIC2 0xbd4d7cf4 -#endif - -static struct winMemData win_mem_data = { -#ifndef NDEBUG - WINMEM_MAGIC1, -#endif - NULL, FALSE -#ifndef NDEBUG - ,WINMEM_MAGIC2 -#endif -}; - -#ifndef NDEBUG -#define winMemAssertMagic1() assert( win_mem_data.magic1==WINMEM_MAGIC1 ) -#define winMemAssertMagic2() assert( win_mem_data.magic2==WINMEM_MAGIC2 ) -#define winMemAssertMagic() winMemAssertMagic1(); winMemAssertMagic2(); -#else -#define winMemAssertMagic() -#endif - -#define winMemGetDataPtr() &win_mem_data -#define winMemGetHeap() win_mem_data.hHeap -#define winMemGetOwned() win_mem_data.bOwned - -static void *winMemMalloc(int nBytes); -static void winMemFree(void *pPrior); -static void *winMemRealloc(void *pPrior, int nBytes); -static int winMemSize(void *p); -static int winMemRoundup(int n); -static int winMemInit(void *pAppData); -static void winMemShutdown(void *pAppData); - -SQLITE_PRIVATE const sqlite3_mem_methods *sqlite3MemGetWin32(void); -#endif /* SQLITE_WIN32_MALLOC */ - -/* -** The following variable is (normally) set once and never changes -** thereafter. It records whether the operating system is Win9x -** or WinNT. -** -** 0: Operating system unknown. -** 1: Operating system is Win9x. -** 2: Operating system is WinNT. -** -** In order to facilitate testing on a WinNT system, the test fixture -** can manually set this value to 1 to emulate Win98 behavior. -*/ -#ifdef SQLITE_TEST -SQLITE_API int sqlite3_os_type = 0; -#elif !SQLITE_OS_WINCE && !SQLITE_OS_WINRT && \ - defined(SQLITE_WIN32_HAS_ANSI) && defined(SQLITE_WIN32_HAS_WIDE) -static int sqlite3_os_type = 0; -#endif - -#ifndef SYSCALL -# define SYSCALL sqlite3_syscall_ptr -#endif - -/* -** This function is not available on Windows CE or WinRT. - */ - -#if SQLITE_OS_WINCE || SQLITE_OS_WINRT -# define osAreFileApisANSI() 1 -#endif - -/* -** Many system calls are accessed through pointer-to-functions so that -** they may be overridden at runtime to facilitate fault injection during -** testing and sandboxing. The following array holds the names and pointers -** to all overrideable system calls. -*/ -static struct win_syscall { - const char *zName; /* Name of the system call */ - sqlite3_syscall_ptr pCurrent; /* Current value of the system call */ - sqlite3_syscall_ptr pDefault; /* Default value */ -} aSyscall[] = { -#if !SQLITE_OS_WINCE && !SQLITE_OS_WINRT - { "AreFileApisANSI", (SYSCALL)AreFileApisANSI, 0 }, -#else - { "AreFileApisANSI", (SYSCALL)0, 0 }, -#endif - -#ifndef osAreFileApisANSI -#define osAreFileApisANSI ((BOOL(WINAPI*)(VOID))aSyscall[0].pCurrent) -#endif - -#if SQLITE_OS_WINCE && defined(SQLITE_WIN32_HAS_WIDE) - { "CharLowerW", (SYSCALL)CharLowerW, 0 }, -#else - { "CharLowerW", (SYSCALL)0, 0 }, -#endif - -#define osCharLowerW ((LPWSTR(WINAPI*)(LPWSTR))aSyscall[1].pCurrent) - -#if SQLITE_OS_WINCE && defined(SQLITE_WIN32_HAS_WIDE) - { "CharUpperW", (SYSCALL)CharUpperW, 0 }, -#else - { "CharUpperW", (SYSCALL)0, 0 }, -#endif - -#define osCharUpperW ((LPWSTR(WINAPI*)(LPWSTR))aSyscall[2].pCurrent) - - { "CloseHandle", (SYSCALL)CloseHandle, 0 }, - -#define osCloseHandle ((BOOL(WINAPI*)(HANDLE))aSyscall[3].pCurrent) - -#if defined(SQLITE_WIN32_HAS_ANSI) - { "CreateFileA", (SYSCALL)CreateFileA, 0 }, -#else - { "CreateFileA", (SYSCALL)0, 0 }, -#endif - -#define osCreateFileA ((HANDLE(WINAPI*)(LPCSTR,DWORD,DWORD, \ - LPSECURITY_ATTRIBUTES,DWORD,DWORD,HANDLE))aSyscall[4].pCurrent) - -#if !SQLITE_OS_WINRT && defined(SQLITE_WIN32_HAS_WIDE) - { "CreateFileW", (SYSCALL)CreateFileW, 0 }, -#else - { "CreateFileW", (SYSCALL)0, 0 }, -#endif - -#define osCreateFileW ((HANDLE(WINAPI*)(LPCWSTR,DWORD,DWORD, \ - LPSECURITY_ATTRIBUTES,DWORD,DWORD,HANDLE))aSyscall[5].pCurrent) - -#if (!SQLITE_OS_WINRT && defined(SQLITE_WIN32_HAS_ANSI) && \ - !defined(SQLITE_OMIT_WAL)) - { "CreateFileMappingA", (SYSCALL)CreateFileMappingA, 0 }, -#else - { "CreateFileMappingA", (SYSCALL)0, 0 }, -#endif - -#define osCreateFileMappingA ((HANDLE(WINAPI*)(HANDLE,LPSECURITY_ATTRIBUTES, \ - DWORD,DWORD,DWORD,LPCSTR))aSyscall[6].pCurrent) - -#if SQLITE_OS_WINCE || (!SQLITE_OS_WINRT && defined(SQLITE_WIN32_HAS_WIDE) && \ - !defined(SQLITE_OMIT_WAL)) - { "CreateFileMappingW", (SYSCALL)CreateFileMappingW, 0 }, -#else - { "CreateFileMappingW", (SYSCALL)0, 0 }, -#endif - -#define osCreateFileMappingW ((HANDLE(WINAPI*)(HANDLE,LPSECURITY_ATTRIBUTES, \ - DWORD,DWORD,DWORD,LPCWSTR))aSyscall[7].pCurrent) - -#if !SQLITE_OS_WINRT && defined(SQLITE_WIN32_HAS_WIDE) - { "CreateMutexW", (SYSCALL)CreateMutexW, 0 }, -#else - { "CreateMutexW", (SYSCALL)0, 0 }, -#endif - -#define osCreateMutexW ((HANDLE(WINAPI*)(LPSECURITY_ATTRIBUTES,BOOL, \ - LPCWSTR))aSyscall[8].pCurrent) - -#if defined(SQLITE_WIN32_HAS_ANSI) - { "DeleteFileA", (SYSCALL)DeleteFileA, 0 }, -#else - { "DeleteFileA", (SYSCALL)0, 0 }, -#endif - -#define osDeleteFileA ((BOOL(WINAPI*)(LPCSTR))aSyscall[9].pCurrent) - -#if defined(SQLITE_WIN32_HAS_WIDE) - { "DeleteFileW", (SYSCALL)DeleteFileW, 0 }, -#else - { "DeleteFileW", (SYSCALL)0, 0 }, -#endif - -#define osDeleteFileW ((BOOL(WINAPI*)(LPCWSTR))aSyscall[10].pCurrent) - -#if SQLITE_OS_WINCE - { "FileTimeToLocalFileTime", (SYSCALL)FileTimeToLocalFileTime, 0 }, -#else - { "FileTimeToLocalFileTime", (SYSCALL)0, 0 }, -#endif - -#define osFileTimeToLocalFileTime ((BOOL(WINAPI*)(CONST FILETIME*, \ - LPFILETIME))aSyscall[11].pCurrent) - -#if SQLITE_OS_WINCE - { "FileTimeToSystemTime", (SYSCALL)FileTimeToSystemTime, 0 }, -#else - { "FileTimeToSystemTime", (SYSCALL)0, 0 }, -#endif - -#define osFileTimeToSystemTime ((BOOL(WINAPI*)(CONST FILETIME*, \ - LPSYSTEMTIME))aSyscall[12].pCurrent) - - { "FlushFileBuffers", (SYSCALL)FlushFileBuffers, 0 }, - -#define osFlushFileBuffers ((BOOL(WINAPI*)(HANDLE))aSyscall[13].pCurrent) - -#if defined(SQLITE_WIN32_HAS_ANSI) - { "FormatMessageA", (SYSCALL)FormatMessageA, 0 }, -#else - { "FormatMessageA", (SYSCALL)0, 0 }, -#endif - -#define osFormatMessageA ((DWORD(WINAPI*)(DWORD,LPCVOID,DWORD,DWORD,LPSTR, \ - DWORD,va_list*))aSyscall[14].pCurrent) - -#if defined(SQLITE_WIN32_HAS_WIDE) - { "FormatMessageW", (SYSCALL)FormatMessageW, 0 }, -#else - { "FormatMessageW", (SYSCALL)0, 0 }, -#endif - -#define osFormatMessageW ((DWORD(WINAPI*)(DWORD,LPCVOID,DWORD,DWORD,LPWSTR, \ - DWORD,va_list*))aSyscall[15].pCurrent) - -#if !defined(SQLITE_OMIT_LOAD_EXTENSION) - { "FreeLibrary", (SYSCALL)FreeLibrary, 0 }, -#else - { "FreeLibrary", (SYSCALL)0, 0 }, -#endif - -#define osFreeLibrary ((BOOL(WINAPI*)(HMODULE))aSyscall[16].pCurrent) - - { "GetCurrentProcessId", (SYSCALL)GetCurrentProcessId, 0 }, - -#define osGetCurrentProcessId ((DWORD(WINAPI*)(VOID))aSyscall[17].pCurrent) - -#if !SQLITE_OS_WINCE && defined(SQLITE_WIN32_HAS_ANSI) - { "GetDiskFreeSpaceA", (SYSCALL)GetDiskFreeSpaceA, 0 }, -#else - { "GetDiskFreeSpaceA", (SYSCALL)0, 0 }, -#endif - -#define osGetDiskFreeSpaceA ((BOOL(WINAPI*)(LPCSTR,LPDWORD,LPDWORD,LPDWORD, \ - LPDWORD))aSyscall[18].pCurrent) - -#if !SQLITE_OS_WINCE && !SQLITE_OS_WINRT && defined(SQLITE_WIN32_HAS_WIDE) - { "GetDiskFreeSpaceW", (SYSCALL)GetDiskFreeSpaceW, 0 }, -#else - { "GetDiskFreeSpaceW", (SYSCALL)0, 0 }, -#endif - -#define osGetDiskFreeSpaceW ((BOOL(WINAPI*)(LPCWSTR,LPDWORD,LPDWORD,LPDWORD, \ - LPDWORD))aSyscall[19].pCurrent) - -#if defined(SQLITE_WIN32_HAS_ANSI) - { "GetFileAttributesA", (SYSCALL)GetFileAttributesA, 0 }, -#else - { "GetFileAttributesA", (SYSCALL)0, 0 }, -#endif - -#define osGetFileAttributesA ((DWORD(WINAPI*)(LPCSTR))aSyscall[20].pCurrent) - -#if !SQLITE_OS_WINRT && defined(SQLITE_WIN32_HAS_WIDE) - { "GetFileAttributesW", (SYSCALL)GetFileAttributesW, 0 }, -#else - { "GetFileAttributesW", (SYSCALL)0, 0 }, -#endif - -#define osGetFileAttributesW ((DWORD(WINAPI*)(LPCWSTR))aSyscall[21].pCurrent) - -#if defined(SQLITE_WIN32_HAS_WIDE) - { "GetFileAttributesExW", (SYSCALL)GetFileAttributesExW, 0 }, -#else - { "GetFileAttributesExW", (SYSCALL)0, 0 }, -#endif - -#define osGetFileAttributesExW ((BOOL(WINAPI*)(LPCWSTR,GET_FILEEX_INFO_LEVELS, \ - LPVOID))aSyscall[22].pCurrent) - -#if !SQLITE_OS_WINRT - { "GetFileSize", (SYSCALL)GetFileSize, 0 }, -#else - { "GetFileSize", (SYSCALL)0, 0 }, -#endif - -#define osGetFileSize ((DWORD(WINAPI*)(HANDLE,LPDWORD))aSyscall[23].pCurrent) - -#if !SQLITE_OS_WINCE && defined(SQLITE_WIN32_HAS_ANSI) - { "GetFullPathNameA", (SYSCALL)GetFullPathNameA, 0 }, -#else - { "GetFullPathNameA", (SYSCALL)0, 0 }, -#endif - -#define osGetFullPathNameA ((DWORD(WINAPI*)(LPCSTR,DWORD,LPSTR, \ - LPSTR*))aSyscall[24].pCurrent) - -#if !SQLITE_OS_WINCE && !SQLITE_OS_WINRT && defined(SQLITE_WIN32_HAS_WIDE) - { "GetFullPathNameW", (SYSCALL)GetFullPathNameW, 0 }, -#else - { "GetFullPathNameW", (SYSCALL)0, 0 }, -#endif - -#define osGetFullPathNameW ((DWORD(WINAPI*)(LPCWSTR,DWORD,LPWSTR, \ - LPWSTR*))aSyscall[25].pCurrent) - - { "GetLastError", (SYSCALL)GetLastError, 0 }, - -#define osGetLastError ((DWORD(WINAPI*)(VOID))aSyscall[26].pCurrent) - -#if !defined(SQLITE_OMIT_LOAD_EXTENSION) -#if SQLITE_OS_WINCE - /* The GetProcAddressA() routine is only available on Windows CE. */ - { "GetProcAddressA", (SYSCALL)GetProcAddressA, 0 }, -#else - /* All other Windows platforms expect GetProcAddress() to take - ** an ANSI string regardless of the _UNICODE setting */ - { "GetProcAddressA", (SYSCALL)GetProcAddress, 0 }, -#endif -#else - { "GetProcAddressA", (SYSCALL)0, 0 }, -#endif - -#define osGetProcAddressA ((FARPROC(WINAPI*)(HMODULE, \ - LPCSTR))aSyscall[27].pCurrent) - -#if !SQLITE_OS_WINRT - { "GetSystemInfo", (SYSCALL)GetSystemInfo, 0 }, -#else - { "GetSystemInfo", (SYSCALL)0, 0 }, -#endif - -#define osGetSystemInfo ((VOID(WINAPI*)(LPSYSTEM_INFO))aSyscall[28].pCurrent) - - { "GetSystemTime", (SYSCALL)GetSystemTime, 0 }, - -#define osGetSystemTime ((VOID(WINAPI*)(LPSYSTEMTIME))aSyscall[29].pCurrent) - -#if !SQLITE_OS_WINCE - { "GetSystemTimeAsFileTime", (SYSCALL)GetSystemTimeAsFileTime, 0 }, -#else - { "GetSystemTimeAsFileTime", (SYSCALL)0, 0 }, -#endif - -#define osGetSystemTimeAsFileTime ((VOID(WINAPI*)( \ - LPFILETIME))aSyscall[30].pCurrent) - -#if defined(SQLITE_WIN32_HAS_ANSI) - { "GetTempPathA", (SYSCALL)GetTempPathA, 0 }, -#else - { "GetTempPathA", (SYSCALL)0, 0 }, -#endif - -#define osGetTempPathA ((DWORD(WINAPI*)(DWORD,LPSTR))aSyscall[31].pCurrent) - -#if !SQLITE_OS_WINRT && defined(SQLITE_WIN32_HAS_WIDE) - { "GetTempPathW", (SYSCALL)GetTempPathW, 0 }, -#else - { "GetTempPathW", (SYSCALL)0, 0 }, -#endif - -#define osGetTempPathW ((DWORD(WINAPI*)(DWORD,LPWSTR))aSyscall[32].pCurrent) - -#if !SQLITE_OS_WINRT - { "GetTickCount", (SYSCALL)GetTickCount, 0 }, -#else - { "GetTickCount", (SYSCALL)0, 0 }, -#endif - -#define osGetTickCount ((DWORD(WINAPI*)(VOID))aSyscall[33].pCurrent) - -#if defined(SQLITE_WIN32_HAS_ANSI) && defined(SQLITE_WIN32_GETVERSIONEX) && \ - SQLITE_WIN32_GETVERSIONEX - { "GetVersionExA", (SYSCALL)GetVersionExA, 0 }, -#else - { "GetVersionExA", (SYSCALL)0, 0 }, -#endif - -#define osGetVersionExA ((BOOL(WINAPI*)( \ - LPOSVERSIONINFOA))aSyscall[34].pCurrent) - -#if !SQLITE_OS_WINRT && defined(SQLITE_WIN32_HAS_WIDE) && \ - defined(SQLITE_WIN32_GETVERSIONEX) && SQLITE_WIN32_GETVERSIONEX - { "GetVersionExW", (SYSCALL)GetVersionExW, 0 }, -#else - { "GetVersionExW", (SYSCALL)0, 0 }, -#endif - -#define osGetVersionExW ((BOOL(WINAPI*)( \ - LPOSVERSIONINFOW))aSyscall[35].pCurrent) - - { "HeapAlloc", (SYSCALL)HeapAlloc, 0 }, - -#define osHeapAlloc ((LPVOID(WINAPI*)(HANDLE,DWORD, \ - SIZE_T))aSyscall[36].pCurrent) - -#if !SQLITE_OS_WINRT - { "HeapCreate", (SYSCALL)HeapCreate, 0 }, -#else - { "HeapCreate", (SYSCALL)0, 0 }, -#endif - -#define osHeapCreate ((HANDLE(WINAPI*)(DWORD,SIZE_T, \ - SIZE_T))aSyscall[37].pCurrent) - -#if !SQLITE_OS_WINRT - { "HeapDestroy", (SYSCALL)HeapDestroy, 0 }, -#else - { "HeapDestroy", (SYSCALL)0, 0 }, -#endif - -#define osHeapDestroy ((BOOL(WINAPI*)(HANDLE))aSyscall[38].pCurrent) - - { "HeapFree", (SYSCALL)HeapFree, 0 }, - -#define osHeapFree ((BOOL(WINAPI*)(HANDLE,DWORD,LPVOID))aSyscall[39].pCurrent) - - { "HeapReAlloc", (SYSCALL)HeapReAlloc, 0 }, - -#define osHeapReAlloc ((LPVOID(WINAPI*)(HANDLE,DWORD,LPVOID, \ - SIZE_T))aSyscall[40].pCurrent) - - { "HeapSize", (SYSCALL)HeapSize, 0 }, - -#define osHeapSize ((SIZE_T(WINAPI*)(HANDLE,DWORD, \ - LPCVOID))aSyscall[41].pCurrent) - -#if !SQLITE_OS_WINRT - { "HeapValidate", (SYSCALL)HeapValidate, 0 }, -#else - { "HeapValidate", (SYSCALL)0, 0 }, -#endif - -#define osHeapValidate ((BOOL(WINAPI*)(HANDLE,DWORD, \ - LPCVOID))aSyscall[42].pCurrent) - -#if !SQLITE_OS_WINCE && !SQLITE_OS_WINRT - { "HeapCompact", (SYSCALL)HeapCompact, 0 }, -#else - { "HeapCompact", (SYSCALL)0, 0 }, -#endif - -#define osHeapCompact ((UINT(WINAPI*)(HANDLE,DWORD))aSyscall[43].pCurrent) - -#if defined(SQLITE_WIN32_HAS_ANSI) && !defined(SQLITE_OMIT_LOAD_EXTENSION) - { "LoadLibraryA", (SYSCALL)LoadLibraryA, 0 }, -#else - { "LoadLibraryA", (SYSCALL)0, 0 }, -#endif - -#define osLoadLibraryA ((HMODULE(WINAPI*)(LPCSTR))aSyscall[44].pCurrent) - -#if !SQLITE_OS_WINRT && defined(SQLITE_WIN32_HAS_WIDE) && \ - !defined(SQLITE_OMIT_LOAD_EXTENSION) - { "LoadLibraryW", (SYSCALL)LoadLibraryW, 0 }, -#else - { "LoadLibraryW", (SYSCALL)0, 0 }, -#endif - -#define osLoadLibraryW ((HMODULE(WINAPI*)(LPCWSTR))aSyscall[45].pCurrent) - -#if !SQLITE_OS_WINRT - { "LocalFree", (SYSCALL)LocalFree, 0 }, -#else - { "LocalFree", (SYSCALL)0, 0 }, -#endif - -#define osLocalFree ((HLOCAL(WINAPI*)(HLOCAL))aSyscall[46].pCurrent) - -#if !SQLITE_OS_WINCE && !SQLITE_OS_WINRT - { "LockFile", (SYSCALL)LockFile, 0 }, -#else - { "LockFile", (SYSCALL)0, 0 }, -#endif - -#ifndef osLockFile -#define osLockFile ((BOOL(WINAPI*)(HANDLE,DWORD,DWORD,DWORD, \ - DWORD))aSyscall[47].pCurrent) -#endif - -#if !SQLITE_OS_WINCE - { "LockFileEx", (SYSCALL)LockFileEx, 0 }, -#else - { "LockFileEx", (SYSCALL)0, 0 }, -#endif - -#ifndef osLockFileEx -#define osLockFileEx ((BOOL(WINAPI*)(HANDLE,DWORD,DWORD,DWORD,DWORD, \ - LPOVERLAPPED))aSyscall[48].pCurrent) -#endif - -#if SQLITE_OS_WINCE || (!SQLITE_OS_WINRT && !defined(SQLITE_OMIT_WAL)) - { "MapViewOfFile", (SYSCALL)MapViewOfFile, 0 }, -#else - { "MapViewOfFile", (SYSCALL)0, 0 }, -#endif - -#define osMapViewOfFile ((LPVOID(WINAPI*)(HANDLE,DWORD,DWORD,DWORD, \ - SIZE_T))aSyscall[49].pCurrent) - - { "MultiByteToWideChar", (SYSCALL)MultiByteToWideChar, 0 }, - -#define osMultiByteToWideChar ((int(WINAPI*)(UINT,DWORD,LPCSTR,int,LPWSTR, \ - int))aSyscall[50].pCurrent) - - { "QueryPerformanceCounter", (SYSCALL)QueryPerformanceCounter, 0 }, - -#define osQueryPerformanceCounter ((BOOL(WINAPI*)( \ - LARGE_INTEGER*))aSyscall[51].pCurrent) - - { "ReadFile", (SYSCALL)ReadFile, 0 }, - -#define osReadFile ((BOOL(WINAPI*)(HANDLE,LPVOID,DWORD,LPDWORD, \ - LPOVERLAPPED))aSyscall[52].pCurrent) - - { "SetEndOfFile", (SYSCALL)SetEndOfFile, 0 }, - -#define osSetEndOfFile ((BOOL(WINAPI*)(HANDLE))aSyscall[53].pCurrent) - -#if !SQLITE_OS_WINRT - { "SetFilePointer", (SYSCALL)SetFilePointer, 0 }, -#else - { "SetFilePointer", (SYSCALL)0, 0 }, -#endif - -#define osSetFilePointer ((DWORD(WINAPI*)(HANDLE,LONG,PLONG, \ - DWORD))aSyscall[54].pCurrent) - -#if !SQLITE_OS_WINRT - { "Sleep", (SYSCALL)Sleep, 0 }, -#else - { "Sleep", (SYSCALL)0, 0 }, -#endif - -#define osSleep ((VOID(WINAPI*)(DWORD))aSyscall[55].pCurrent) - - { "SystemTimeToFileTime", (SYSCALL)SystemTimeToFileTime, 0 }, - -#define osSystemTimeToFileTime ((BOOL(WINAPI*)(CONST SYSTEMTIME*, \ - LPFILETIME))aSyscall[56].pCurrent) - -#if !SQLITE_OS_WINCE && !SQLITE_OS_WINRT - { "UnlockFile", (SYSCALL)UnlockFile, 0 }, -#else - { "UnlockFile", (SYSCALL)0, 0 }, -#endif - -#ifndef osUnlockFile -#define osUnlockFile ((BOOL(WINAPI*)(HANDLE,DWORD,DWORD,DWORD, \ - DWORD))aSyscall[57].pCurrent) -#endif - -#if !SQLITE_OS_WINCE - { "UnlockFileEx", (SYSCALL)UnlockFileEx, 0 }, -#else - { "UnlockFileEx", (SYSCALL)0, 0 }, -#endif - -#define osUnlockFileEx ((BOOL(WINAPI*)(HANDLE,DWORD,DWORD,DWORD, \ - LPOVERLAPPED))aSyscall[58].pCurrent) - -#if SQLITE_OS_WINCE || !defined(SQLITE_OMIT_WAL) - { "UnmapViewOfFile", (SYSCALL)UnmapViewOfFile, 0 }, -#else - { "UnmapViewOfFile", (SYSCALL)0, 0 }, -#endif - -#define osUnmapViewOfFile ((BOOL(WINAPI*)(LPCVOID))aSyscall[59].pCurrent) - - { "WideCharToMultiByte", (SYSCALL)WideCharToMultiByte, 0 }, - -#define osWideCharToMultiByte ((int(WINAPI*)(UINT,DWORD,LPCWSTR,int,LPSTR,int, \ - LPCSTR,LPBOOL))aSyscall[60].pCurrent) - - { "WriteFile", (SYSCALL)WriteFile, 0 }, - -#define osWriteFile ((BOOL(WINAPI*)(HANDLE,LPCVOID,DWORD,LPDWORD, \ - LPOVERLAPPED))aSyscall[61].pCurrent) - -#if SQLITE_OS_WINRT - { "CreateEventExW", (SYSCALL)CreateEventExW, 0 }, -#else - { "CreateEventExW", (SYSCALL)0, 0 }, -#endif - -#define osCreateEventExW ((HANDLE(WINAPI*)(LPSECURITY_ATTRIBUTES,LPCWSTR, \ - DWORD,DWORD))aSyscall[62].pCurrent) - -#if !SQLITE_OS_WINRT - { "WaitForSingleObject", (SYSCALL)WaitForSingleObject, 0 }, -#else - { "WaitForSingleObject", (SYSCALL)0, 0 }, -#endif - -#define osWaitForSingleObject ((DWORD(WINAPI*)(HANDLE, \ - DWORD))aSyscall[63].pCurrent) - -#if SQLITE_OS_WINRT - { "WaitForSingleObjectEx", (SYSCALL)WaitForSingleObjectEx, 0 }, -#else - { "WaitForSingleObjectEx", (SYSCALL)0, 0 }, -#endif - -#define osWaitForSingleObjectEx ((DWORD(WINAPI*)(HANDLE,DWORD, \ - BOOL))aSyscall[64].pCurrent) - -#if SQLITE_OS_WINRT - { "SetFilePointerEx", (SYSCALL)SetFilePointerEx, 0 }, -#else - { "SetFilePointerEx", (SYSCALL)0, 0 }, -#endif - -#define osSetFilePointerEx ((BOOL(WINAPI*)(HANDLE,LARGE_INTEGER, \ - PLARGE_INTEGER,DWORD))aSyscall[65].pCurrent) - -#if SQLITE_OS_WINRT - { "GetFileInformationByHandleEx", (SYSCALL)GetFileInformationByHandleEx, 0 }, -#else - { "GetFileInformationByHandleEx", (SYSCALL)0, 0 }, -#endif - -#define osGetFileInformationByHandleEx ((BOOL(WINAPI*)(HANDLE, \ - FILE_INFO_BY_HANDLE_CLASS,LPVOID,DWORD))aSyscall[66].pCurrent) - -#if SQLITE_OS_WINRT && !defined(SQLITE_OMIT_WAL) - { "MapViewOfFileFromApp", (SYSCALL)MapViewOfFileFromApp, 0 }, -#else - { "MapViewOfFileFromApp", (SYSCALL)0, 0 }, -#endif - -#define osMapViewOfFileFromApp ((LPVOID(WINAPI*)(HANDLE,ULONG,ULONG64, \ - SIZE_T))aSyscall[67].pCurrent) - -#if SQLITE_OS_WINRT - { "CreateFile2", (SYSCALL)CreateFile2, 0 }, -#else - { "CreateFile2", (SYSCALL)0, 0 }, -#endif - -#define osCreateFile2 ((HANDLE(WINAPI*)(LPCWSTR,DWORD,DWORD,DWORD, \ - LPCREATEFILE2_EXTENDED_PARAMETERS))aSyscall[68].pCurrent) - -#if SQLITE_OS_WINRT && !defined(SQLITE_OMIT_LOAD_EXTENSION) - { "LoadPackagedLibrary", (SYSCALL)LoadPackagedLibrary, 0 }, -#else - { "LoadPackagedLibrary", (SYSCALL)0, 0 }, -#endif - -#define osLoadPackagedLibrary ((HMODULE(WINAPI*)(LPCWSTR, \ - DWORD))aSyscall[69].pCurrent) - -#if SQLITE_OS_WINRT - { "GetTickCount64", (SYSCALL)GetTickCount64, 0 }, -#else - { "GetTickCount64", (SYSCALL)0, 0 }, -#endif - -#define osGetTickCount64 ((ULONGLONG(WINAPI*)(VOID))aSyscall[70].pCurrent) - -#if SQLITE_OS_WINRT - { "GetNativeSystemInfo", (SYSCALL)GetNativeSystemInfo, 0 }, -#else - { "GetNativeSystemInfo", (SYSCALL)0, 0 }, -#endif - -#define osGetNativeSystemInfo ((VOID(WINAPI*)( \ - LPSYSTEM_INFO))aSyscall[71].pCurrent) - -#if defined(SQLITE_WIN32_HAS_ANSI) - { "OutputDebugStringA", (SYSCALL)OutputDebugStringA, 0 }, -#else - { "OutputDebugStringA", (SYSCALL)0, 0 }, -#endif - -#define osOutputDebugStringA ((VOID(WINAPI*)(LPCSTR))aSyscall[72].pCurrent) - -#if defined(SQLITE_WIN32_HAS_WIDE) - { "OutputDebugStringW", (SYSCALL)OutputDebugStringW, 0 }, -#else - { "OutputDebugStringW", (SYSCALL)0, 0 }, -#endif - -#define osOutputDebugStringW ((VOID(WINAPI*)(LPCWSTR))aSyscall[73].pCurrent) - - { "GetProcessHeap", (SYSCALL)GetProcessHeap, 0 }, - -#define osGetProcessHeap ((HANDLE(WINAPI*)(VOID))aSyscall[74].pCurrent) - -#if SQLITE_OS_WINRT && !defined(SQLITE_OMIT_WAL) - { "CreateFileMappingFromApp", (SYSCALL)CreateFileMappingFromApp, 0 }, -#else - { "CreateFileMappingFromApp", (SYSCALL)0, 0 }, -#endif - -#define osCreateFileMappingFromApp ((HANDLE(WINAPI*)(HANDLE, \ - LPSECURITY_ATTRIBUTES,ULONG,ULONG64,LPCWSTR))aSyscall[75].pCurrent) - -}; /* End of the overrideable system calls */ - -/* -** This is the xSetSystemCall() method of sqlite3_vfs for all of the -** "win32" VFSes. Return SQLITE_OK opon successfully updating the -** system call pointer, or SQLITE_NOTFOUND if there is no configurable -** system call named zName. -*/ -static int winSetSystemCall( - sqlite3_vfs *pNotUsed, /* The VFS pointer. Not used */ - const char *zName, /* Name of system call to override */ - sqlite3_syscall_ptr pNewFunc /* Pointer to new system call value */ -){ - unsigned int i; - int rc = SQLITE_NOTFOUND; - - UNUSED_PARAMETER(pNotUsed); - if( zName==0 ){ - /* If no zName is given, restore all system calls to their default - ** settings and return NULL - */ - rc = SQLITE_OK; - for(i=0; i0 ){ - memset(zDbgBuf, 0, SQLITE_WIN32_DBG_BUF_SIZE); - memcpy(zDbgBuf, zBuf, nMin); - osOutputDebugStringA(zDbgBuf); - }else{ - osOutputDebugStringA(zBuf); - } -#elif defined(SQLITE_WIN32_HAS_WIDE) - memset(zDbgBuf, 0, SQLITE_WIN32_DBG_BUF_SIZE); - if ( osMultiByteToWideChar( - osAreFileApisANSI() ? CP_ACP : CP_OEMCP, 0, zBuf, - nMin, (LPWSTR)zDbgBuf, SQLITE_WIN32_DBG_BUF_SIZE/sizeof(WCHAR))<=0 ){ - return; - } - osOutputDebugStringW((LPCWSTR)zDbgBuf); -#else - if( nMin>0 ){ - memset(zDbgBuf, 0, SQLITE_WIN32_DBG_BUF_SIZE); - memcpy(zDbgBuf, zBuf, nMin); - fprintf(stderr, "%s", zDbgBuf); - }else{ - fprintf(stderr, "%s", zBuf); - } -#endif -} - -/* -** The following routine suspends the current thread for at least ms -** milliseconds. This is equivalent to the Win32 Sleep() interface. -*/ -#if SQLITE_OS_WINRT -static HANDLE sleepObj = NULL; -#endif - -SQLITE_API void sqlite3_win32_sleep(DWORD milliseconds){ -#if SQLITE_OS_WINRT - if ( sleepObj==NULL ){ - sleepObj = osCreateEventExW(NULL, NULL, CREATE_EVENT_MANUAL_RESET, - SYNCHRONIZE); - } - assert( sleepObj!=NULL ); - osWaitForSingleObjectEx(sleepObj, milliseconds, FALSE); -#else - osSleep(milliseconds); -#endif -} - -/* -** Return true (non-zero) if we are running under WinNT, Win2K, WinXP, -** or WinCE. Return false (zero) for Win95, Win98, or WinME. -** -** Here is an interesting observation: Win95, Win98, and WinME lack -** the LockFileEx() API. But we can still statically link against that -** API as long as we don't call it when running Win95/98/ME. A call to -** this routine is used to determine if the host is Win95/98/ME or -** WinNT/2K/XP so that we will know whether or not we can safely call -** the LockFileEx() API. -*/ - -#if !defined(SQLITE_WIN32_GETVERSIONEX) || !SQLITE_WIN32_GETVERSIONEX -# define osIsNT() (1) -#elif SQLITE_OS_WINCE || SQLITE_OS_WINRT || !defined(SQLITE_WIN32_HAS_ANSI) -# define osIsNT() (1) -#elif !defined(SQLITE_WIN32_HAS_WIDE) -# define osIsNT() (0) -#else - static int osIsNT(void){ - if( sqlite3_os_type==0 ){ -#if defined(NTDDI_VERSION) && NTDDI_VERSION >= NTDDI_WIN8 - OSVERSIONINFOW sInfo; - sInfo.dwOSVersionInfoSize = sizeof(sInfo); - osGetVersionExW(&sInfo); -#else - OSVERSIONINFOA sInfo; - sInfo.dwOSVersionInfoSize = sizeof(sInfo); - osGetVersionExA(&sInfo); -#endif - sqlite3_os_type = sInfo.dwPlatformId==VER_PLATFORM_WIN32_NT ? 2 : 1; - } - return sqlite3_os_type==2; - } -#endif - -#ifdef SQLITE_WIN32_MALLOC -/* -** Allocate nBytes of memory. -*/ -static void *winMemMalloc(int nBytes){ - HANDLE hHeap; - void *p; - - winMemAssertMagic(); - hHeap = winMemGetHeap(); - assert( hHeap!=0 ); - assert( hHeap!=INVALID_HANDLE_VALUE ); -#if !SQLITE_OS_WINRT && defined(SQLITE_WIN32_MALLOC_VALIDATE) - assert( osHeapValidate(hHeap, SQLITE_WIN32_HEAP_FLAGS, NULL) ); -#endif - assert( nBytes>=0 ); - p = osHeapAlloc(hHeap, SQLITE_WIN32_HEAP_FLAGS, (SIZE_T)nBytes); - if( !p ){ - sqlite3_log(SQLITE_NOMEM, "failed to HeapAlloc %u bytes (%lu), heap=%p", - nBytes, osGetLastError(), (void*)hHeap); - } - return p; -} - -/* -** Free memory. -*/ -static void winMemFree(void *pPrior){ - HANDLE hHeap; - - winMemAssertMagic(); - hHeap = winMemGetHeap(); - assert( hHeap!=0 ); - assert( hHeap!=INVALID_HANDLE_VALUE ); -#if !SQLITE_OS_WINRT && defined(SQLITE_WIN32_MALLOC_VALIDATE) - assert( osHeapValidate(hHeap, SQLITE_WIN32_HEAP_FLAGS, pPrior) ); -#endif - if( !pPrior ) return; /* Passing NULL to HeapFree is undefined. */ - if( !osHeapFree(hHeap, SQLITE_WIN32_HEAP_FLAGS, pPrior) ){ - sqlite3_log(SQLITE_NOMEM, "failed to HeapFree block %p (%lu), heap=%p", - pPrior, osGetLastError(), (void*)hHeap); - } -} - -/* -** Change the size of an existing memory allocation -*/ -static void *winMemRealloc(void *pPrior, int nBytes){ - HANDLE hHeap; - void *p; - - winMemAssertMagic(); - hHeap = winMemGetHeap(); - assert( hHeap!=0 ); - assert( hHeap!=INVALID_HANDLE_VALUE ); -#if !SQLITE_OS_WINRT && defined(SQLITE_WIN32_MALLOC_VALIDATE) - assert( osHeapValidate(hHeap, SQLITE_WIN32_HEAP_FLAGS, pPrior) ); -#endif - assert( nBytes>=0 ); - if( !pPrior ){ - p = osHeapAlloc(hHeap, SQLITE_WIN32_HEAP_FLAGS, (SIZE_T)nBytes); - }else{ - p = osHeapReAlloc(hHeap, SQLITE_WIN32_HEAP_FLAGS, pPrior, (SIZE_T)nBytes); - } - if( !p ){ - sqlite3_log(SQLITE_NOMEM, "failed to %s %u bytes (%lu), heap=%p", - pPrior ? "HeapReAlloc" : "HeapAlloc", nBytes, osGetLastError(), - (void*)hHeap); - } - return p; -} - -/* -** Return the size of an outstanding allocation, in bytes. -*/ -static int winMemSize(void *p){ - HANDLE hHeap; - SIZE_T n; - - winMemAssertMagic(); - hHeap = winMemGetHeap(); - assert( hHeap!=0 ); - assert( hHeap!=INVALID_HANDLE_VALUE ); -#if !SQLITE_OS_WINRT && defined(SQLITE_WIN32_MALLOC_VALIDATE) - assert( osHeapValidate(hHeap, SQLITE_WIN32_HEAP_FLAGS, p) ); -#endif - if( !p ) return 0; - n = osHeapSize(hHeap, SQLITE_WIN32_HEAP_FLAGS, p); - if( n==(SIZE_T)-1 ){ - sqlite3_log(SQLITE_NOMEM, "failed to HeapSize block %p (%lu), heap=%p", - p, osGetLastError(), (void*)hHeap); - return 0; - } - return (int)n; -} - -/* -** Round up a request size to the next valid allocation size. -*/ -static int winMemRoundup(int n){ - return n; -} - -/* -** Initialize this module. -*/ -static int winMemInit(void *pAppData){ - winMemData *pWinMemData = (winMemData *)pAppData; - - if( !pWinMemData ) return SQLITE_ERROR; - assert( pWinMemData->magic1==WINMEM_MAGIC1 ); - assert( pWinMemData->magic2==WINMEM_MAGIC2 ); - -#if !SQLITE_OS_WINRT && SQLITE_WIN32_HEAP_CREATE - if( !pWinMemData->hHeap ){ - DWORD dwInitialSize = SQLITE_WIN32_HEAP_INIT_SIZE; - DWORD dwMaximumSize = (DWORD)sqlite3GlobalConfig.nHeap; - if( dwMaximumSize==0 ){ - dwMaximumSize = SQLITE_WIN32_HEAP_MAX_SIZE; - }else if( dwInitialSize>dwMaximumSize ){ - dwInitialSize = dwMaximumSize; - } - pWinMemData->hHeap = osHeapCreate(SQLITE_WIN32_HEAP_FLAGS, - dwInitialSize, dwMaximumSize); - if( !pWinMemData->hHeap ){ - sqlite3_log(SQLITE_NOMEM, - "failed to HeapCreate (%lu), flags=%u, initSize=%lu, maxSize=%lu", - osGetLastError(), SQLITE_WIN32_HEAP_FLAGS, dwInitialSize, - dwMaximumSize); - return SQLITE_NOMEM; - } - pWinMemData->bOwned = TRUE; - assert( pWinMemData->bOwned ); - } -#else - pWinMemData->hHeap = osGetProcessHeap(); - if( !pWinMemData->hHeap ){ - sqlite3_log(SQLITE_NOMEM, - "failed to GetProcessHeap (%lu)", osGetLastError()); - return SQLITE_NOMEM; - } - pWinMemData->bOwned = FALSE; - assert( !pWinMemData->bOwned ); -#endif - assert( pWinMemData->hHeap!=0 ); - assert( pWinMemData->hHeap!=INVALID_HANDLE_VALUE ); -#if !SQLITE_OS_WINRT && defined(SQLITE_WIN32_MALLOC_VALIDATE) - assert( osHeapValidate(pWinMemData->hHeap, SQLITE_WIN32_HEAP_FLAGS, NULL) ); -#endif - return SQLITE_OK; -} - -/* -** Deinitialize this module. -*/ -static void winMemShutdown(void *pAppData){ - winMemData *pWinMemData = (winMemData *)pAppData; - - if( !pWinMemData ) return; - assert( pWinMemData->magic1==WINMEM_MAGIC1 ); - assert( pWinMemData->magic2==WINMEM_MAGIC2 ); - - if( pWinMemData->hHeap ){ - assert( pWinMemData->hHeap!=INVALID_HANDLE_VALUE ); -#if !SQLITE_OS_WINRT && defined(SQLITE_WIN32_MALLOC_VALIDATE) - assert( osHeapValidate(pWinMemData->hHeap, SQLITE_WIN32_HEAP_FLAGS, NULL) ); -#endif - if( pWinMemData->bOwned ){ - if( !osHeapDestroy(pWinMemData->hHeap) ){ - sqlite3_log(SQLITE_NOMEM, "failed to HeapDestroy (%lu), heap=%p", - osGetLastError(), (void*)pWinMemData->hHeap); - } - pWinMemData->bOwned = FALSE; - } - pWinMemData->hHeap = NULL; - } -} - -/* -** Populate the low-level memory allocation function pointers in -** sqlite3GlobalConfig.m with pointers to the routines in this file. The -** arguments specify the block of memory to manage. -** -** This routine is only called by sqlite3_config(), and therefore -** is not required to be threadsafe (it is not). -*/ -SQLITE_PRIVATE const sqlite3_mem_methods *sqlite3MemGetWin32(void){ - static const sqlite3_mem_methods winMemMethods = { - winMemMalloc, - winMemFree, - winMemRealloc, - winMemSize, - winMemRoundup, - winMemInit, - winMemShutdown, - &win_mem_data - }; - return &winMemMethods; -} - -SQLITE_PRIVATE void sqlite3MemSetDefault(void){ - sqlite3_config(SQLITE_CONFIG_MALLOC, sqlite3MemGetWin32()); -} -#endif /* SQLITE_WIN32_MALLOC */ - -/* -** Convert a UTF-8 string to Microsoft Unicode (UTF-16?). -** -** Space to hold the returned string is obtained from malloc. -*/ -static LPWSTR winUtf8ToUnicode(const char *zFilename){ - int nChar; - LPWSTR zWideFilename; - - nChar = osMultiByteToWideChar(CP_UTF8, 0, zFilename, -1, NULL, 0); - if( nChar==0 ){ - return 0; - } - zWideFilename = sqlite3MallocZero( nChar*sizeof(zWideFilename[0]) ); - if( zWideFilename==0 ){ - return 0; - } - nChar = osMultiByteToWideChar(CP_UTF8, 0, zFilename, -1, zWideFilename, - nChar); - if( nChar==0 ){ - sqlite3_free(zWideFilename); - zWideFilename = 0; - } - return zWideFilename; -} - -/* -** Convert Microsoft Unicode to UTF-8. Space to hold the returned string is -** obtained from sqlite3_malloc(). -*/ -static char *winUnicodeToUtf8(LPCWSTR zWideFilename){ - int nByte; - char *zFilename; - - nByte = osWideCharToMultiByte(CP_UTF8, 0, zWideFilename, -1, 0, 0, 0, 0); - if( nByte == 0 ){ - return 0; - } - zFilename = sqlite3MallocZero( nByte ); - if( zFilename==0 ){ - return 0; - } - nByte = osWideCharToMultiByte(CP_UTF8, 0, zWideFilename, -1, zFilename, nByte, - 0, 0); - if( nByte == 0 ){ - sqlite3_free(zFilename); - zFilename = 0; - } - return zFilename; -} - -/* -** Convert an ANSI string to Microsoft Unicode, based on the -** current codepage settings for file apis. -** -** Space to hold the returned string is obtained -** from sqlite3_malloc. -*/ -static LPWSTR winMbcsToUnicode(const char *zFilename){ - int nByte; - LPWSTR zMbcsFilename; - int codepage = osAreFileApisANSI() ? CP_ACP : CP_OEMCP; - - nByte = osMultiByteToWideChar(codepage, 0, zFilename, -1, NULL, - 0)*sizeof(WCHAR); - if( nByte==0 ){ - return 0; - } - zMbcsFilename = sqlite3MallocZero( nByte*sizeof(zMbcsFilename[0]) ); - if( zMbcsFilename==0 ){ - return 0; - } - nByte = osMultiByteToWideChar(codepage, 0, zFilename, -1, zMbcsFilename, - nByte); - if( nByte==0 ){ - sqlite3_free(zMbcsFilename); - zMbcsFilename = 0; - } - return zMbcsFilename; -} - -/* -** Convert Microsoft Unicode to multi-byte character string, based on the -** user's ANSI codepage. -** -** Space to hold the returned string is obtained from -** sqlite3_malloc(). -*/ -static char *winUnicodeToMbcs(LPCWSTR zWideFilename){ - int nByte; - char *zFilename; - int codepage = osAreFileApisANSI() ? CP_ACP : CP_OEMCP; - - nByte = osWideCharToMultiByte(codepage, 0, zWideFilename, -1, 0, 0, 0, 0); - if( nByte == 0 ){ - return 0; - } - zFilename = sqlite3MallocZero( nByte ); - if( zFilename==0 ){ - return 0; - } - nByte = osWideCharToMultiByte(codepage, 0, zWideFilename, -1, zFilename, - nByte, 0, 0); - if( nByte == 0 ){ - sqlite3_free(zFilename); - zFilename = 0; - } - return zFilename; -} - -/* -** Convert multibyte character string to UTF-8. Space to hold the -** returned string is obtained from sqlite3_malloc(). -*/ -SQLITE_API char *sqlite3_win32_mbcs_to_utf8(const char *zFilename){ - char *zFilenameUtf8; - LPWSTR zTmpWide; - - zTmpWide = winMbcsToUnicode(zFilename); - if( zTmpWide==0 ){ - return 0; - } - zFilenameUtf8 = winUnicodeToUtf8(zTmpWide); - sqlite3_free(zTmpWide); - return zFilenameUtf8; -} - -/* -** Convert UTF-8 to multibyte character string. Space to hold the -** returned string is obtained from sqlite3_malloc(). -*/ -SQLITE_API char *sqlite3_win32_utf8_to_mbcs(const char *zFilename){ - char *zFilenameMbcs; - LPWSTR zTmpWide; - - zTmpWide = winUtf8ToUnicode(zFilename); - if( zTmpWide==0 ){ - return 0; - } - zFilenameMbcs = winUnicodeToMbcs(zTmpWide); - sqlite3_free(zTmpWide); - return zFilenameMbcs; -} - -/* -** This function sets the data directory or the temporary directory based on -** the provided arguments. The type argument must be 1 in order to set the -** data directory or 2 in order to set the temporary directory. The zValue -** argument is the name of the directory to use. The return value will be -** SQLITE_OK if successful. -*/ -SQLITE_API int sqlite3_win32_set_directory(DWORD type, LPCWSTR zValue){ - char **ppDirectory = 0; -#ifndef SQLITE_OMIT_AUTOINIT - int rc = sqlite3_initialize(); - if( rc ) return rc; -#endif - if( type==SQLITE_WIN32_DATA_DIRECTORY_TYPE ){ - ppDirectory = &sqlite3_data_directory; - }else if( type==SQLITE_WIN32_TEMP_DIRECTORY_TYPE ){ - ppDirectory = &sqlite3_temp_directory; - } - assert( !ppDirectory || type==SQLITE_WIN32_DATA_DIRECTORY_TYPE - || type==SQLITE_WIN32_TEMP_DIRECTORY_TYPE - ); - assert( !ppDirectory || sqlite3MemdebugHasType(*ppDirectory, MEMTYPE_HEAP) ); - if( ppDirectory ){ - char *zValueUtf8 = 0; - if( zValue && zValue[0] ){ - zValueUtf8 = winUnicodeToUtf8(zValue); - if ( zValueUtf8==0 ){ - return SQLITE_NOMEM; - } - } - sqlite3_free(*ppDirectory); - *ppDirectory = zValueUtf8; - return SQLITE_OK; - } - return SQLITE_ERROR; -} - -/* -** The return value of winGetLastErrorMsg -** is zero if the error message fits in the buffer, or non-zero -** otherwise (if the message was truncated). -*/ -static int winGetLastErrorMsg(DWORD lastErrno, int nBuf, char *zBuf){ - /* FormatMessage returns 0 on failure. Otherwise it - ** returns the number of TCHARs written to the output - ** buffer, excluding the terminating null char. - */ - DWORD dwLen = 0; - char *zOut = 0; - - if( osIsNT() ){ -#if SQLITE_OS_WINRT - WCHAR zTempWide[SQLITE_WIN32_MAX_ERRMSG_CHARS+1]; - dwLen = osFormatMessageW(FORMAT_MESSAGE_FROM_SYSTEM | - FORMAT_MESSAGE_IGNORE_INSERTS, - NULL, - lastErrno, - 0, - zTempWide, - SQLITE_WIN32_MAX_ERRMSG_CHARS, - 0); -#else - LPWSTR zTempWide = NULL; - dwLen = osFormatMessageW(FORMAT_MESSAGE_ALLOCATE_BUFFER | - FORMAT_MESSAGE_FROM_SYSTEM | - FORMAT_MESSAGE_IGNORE_INSERTS, - NULL, - lastErrno, - 0, - (LPWSTR) &zTempWide, - 0, - 0); -#endif - if( dwLen > 0 ){ - /* allocate a buffer and convert to UTF8 */ - sqlite3BeginBenignMalloc(); - zOut = winUnicodeToUtf8(zTempWide); - sqlite3EndBenignMalloc(); -#if !SQLITE_OS_WINRT - /* free the system buffer allocated by FormatMessage */ - osLocalFree(zTempWide); -#endif - } - } -#ifdef SQLITE_WIN32_HAS_ANSI - else{ - char *zTemp = NULL; - dwLen = osFormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER | - FORMAT_MESSAGE_FROM_SYSTEM | - FORMAT_MESSAGE_IGNORE_INSERTS, - NULL, - lastErrno, - 0, - (LPSTR) &zTemp, - 0, - 0); - if( dwLen > 0 ){ - /* allocate a buffer and convert to UTF8 */ - sqlite3BeginBenignMalloc(); - zOut = sqlite3_win32_mbcs_to_utf8(zTemp); - sqlite3EndBenignMalloc(); - /* free the system buffer allocated by FormatMessage */ - osLocalFree(zTemp); - } - } -#endif - if( 0 == dwLen ){ - sqlite3_snprintf(nBuf, zBuf, "OsError 0x%lx (%lu)", lastErrno, lastErrno); - }else{ - /* copy a maximum of nBuf chars to output buffer */ - sqlite3_snprintf(nBuf, zBuf, "%s", zOut); - /* free the UTF8 buffer */ - sqlite3_free(zOut); - } - return 0; -} - -/* -** -** This function - winLogErrorAtLine() - is only ever called via the macro -** winLogError(). -** -** This routine is invoked after an error occurs in an OS function. -** It logs a message using sqlite3_log() containing the current value of -** error code and, if possible, the human-readable equivalent from -** FormatMessage. -** -** The first argument passed to the macro should be the error code that -** will be returned to SQLite (e.g. SQLITE_IOERR_DELETE, SQLITE_CANTOPEN). -** The two subsequent arguments should be the name of the OS function that -** failed and the associated file-system path, if any. -*/ -#define winLogError(a,b,c,d) winLogErrorAtLine(a,b,c,d,__LINE__) -static int winLogErrorAtLine( - int errcode, /* SQLite error code */ - DWORD lastErrno, /* Win32 last error */ - const char *zFunc, /* Name of OS function that failed */ - const char *zPath, /* File path associated with error */ - int iLine /* Source line number where error occurred */ -){ - char zMsg[500]; /* Human readable error text */ - int i; /* Loop counter */ - - zMsg[0] = 0; - winGetLastErrorMsg(lastErrno, sizeof(zMsg), zMsg); - assert( errcode!=SQLITE_OK ); - if( zPath==0 ) zPath = ""; - for(i=0; zMsg[i] && zMsg[i]!='\r' && zMsg[i]!='\n'; i++){} - zMsg[i] = 0; - sqlite3_log(errcode, - "os_win.c:%d: (%lu) %s(%s) - %s", - iLine, lastErrno, zFunc, zPath, zMsg - ); - - return errcode; -} - -/* -** The number of times that a ReadFile(), WriteFile(), and DeleteFile() -** will be retried following a locking error - probably caused by -** antivirus software. Also the initial delay before the first retry. -** The delay increases linearly with each retry. -*/ -#ifndef SQLITE_WIN32_IOERR_RETRY -# define SQLITE_WIN32_IOERR_RETRY 10 -#endif -#ifndef SQLITE_WIN32_IOERR_RETRY_DELAY -# define SQLITE_WIN32_IOERR_RETRY_DELAY 25 -#endif -static int winIoerrRetry = SQLITE_WIN32_IOERR_RETRY; -static int winIoerrRetryDelay = SQLITE_WIN32_IOERR_RETRY_DELAY; - -/* -** The "winIoerrCanRetry1" macro is used to determine if a particular I/O -** error code obtained via GetLastError() is eligible to be retried. It -** must accept the error code DWORD as its only argument and should return -** non-zero if the error code is transient in nature and the operation -** responsible for generating the original error might succeed upon being -** retried. The argument to this macro should be a variable. -** -** Additionally, a macro named "winIoerrCanRetry2" may be defined. If it -** is defined, it will be consulted only when the macro "winIoerrCanRetry1" -** returns zero. The "winIoerrCanRetry2" macro is completely optional and -** may be used to include additional error codes in the set that should -** result in the failing I/O operation being retried by the caller. If -** defined, the "winIoerrCanRetry2" macro must exhibit external semantics -** identical to those of the "winIoerrCanRetry1" macro. -*/ -#if !defined(winIoerrCanRetry1) -#define winIoerrCanRetry1(a) (((a)==ERROR_ACCESS_DENIED) || \ - ((a)==ERROR_SHARING_VIOLATION) || \ - ((a)==ERROR_LOCK_VIOLATION) || \ - ((a)==ERROR_DEV_NOT_EXIST) || \ - ((a)==ERROR_NETNAME_DELETED) || \ - ((a)==ERROR_SEM_TIMEOUT) || \ - ((a)==ERROR_NETWORK_UNREACHABLE)) -#endif - -/* -** If a ReadFile() or WriteFile() error occurs, invoke this routine -** to see if it should be retried. Return TRUE to retry. Return FALSE -** to give up with an error. -*/ -static int winRetryIoerr(int *pnRetry, DWORD *pError){ - DWORD e = osGetLastError(); - if( *pnRetry>=winIoerrRetry ){ - if( pError ){ - *pError = e; - } - return 0; - } - if( winIoerrCanRetry1(e) ){ - sqlite3_win32_sleep(winIoerrRetryDelay*(1+*pnRetry)); - ++*pnRetry; - return 1; - } -#if defined(winIoerrCanRetry2) - else if( winIoerrCanRetry2(e) ){ - sqlite3_win32_sleep(winIoerrRetryDelay*(1+*pnRetry)); - ++*pnRetry; - return 1; - } -#endif - if( pError ){ - *pError = e; - } - return 0; -} - -/* -** Log a I/O error retry episode. -*/ -static void winLogIoerr(int nRetry){ - if( nRetry ){ - sqlite3_log(SQLITE_IOERR, - "delayed %dms for lock/sharing conflict", - winIoerrRetryDelay*nRetry*(nRetry+1)/2 - ); - } -} - -#if SQLITE_OS_WINCE -/************************************************************************* -** This section contains code for WinCE only. -*/ -#if !defined(SQLITE_MSVC_LOCALTIME_API) || !SQLITE_MSVC_LOCALTIME_API -/* -** The MSVC CRT on Windows CE may not have a localtime() function. So -** create a substitute. -*/ -/* #include */ -struct tm *__cdecl localtime(const time_t *t) -{ - static struct tm y; - FILETIME uTm, lTm; - SYSTEMTIME pTm; - sqlite3_int64 t64; - t64 = *t; - t64 = (t64 + 11644473600)*10000000; - uTm.dwLowDateTime = (DWORD)(t64 & 0xFFFFFFFF); - uTm.dwHighDateTime= (DWORD)(t64 >> 32); - osFileTimeToLocalFileTime(&uTm,&lTm); - osFileTimeToSystemTime(&lTm,&pTm); - y.tm_year = pTm.wYear - 1900; - y.tm_mon = pTm.wMonth - 1; - y.tm_wday = pTm.wDayOfWeek; - y.tm_mday = pTm.wDay; - y.tm_hour = pTm.wHour; - y.tm_min = pTm.wMinute; - y.tm_sec = pTm.wSecond; - return &y; -} -#endif - -#define HANDLE_TO_WINFILE(a) (winFile*)&((char*)a)[-(int)offsetof(winFile,h)] - -/* -** Acquire a lock on the handle h -*/ -static void winceMutexAcquire(HANDLE h){ - DWORD dwErr; - do { - dwErr = osWaitForSingleObject(h, INFINITE); - } while (dwErr != WAIT_OBJECT_0 && dwErr != WAIT_ABANDONED); -} -/* -** Release a lock acquired by winceMutexAcquire() -*/ -#define winceMutexRelease(h) ReleaseMutex(h) - -/* -** Create the mutex and shared memory used for locking in the file -** descriptor pFile -*/ -static int winceCreateLock(const char *zFilename, winFile *pFile){ - LPWSTR zTok; - LPWSTR zName; - DWORD lastErrno; - BOOL bLogged = FALSE; - BOOL bInit = TRUE; - - zName = winUtf8ToUnicode(zFilename); - if( zName==0 ){ - /* out of memory */ - return SQLITE_IOERR_NOMEM; - } - - /* Initialize the local lockdata */ - memset(&pFile->local, 0, sizeof(pFile->local)); - - /* Replace the backslashes from the filename and lowercase it - ** to derive a mutex name. */ - zTok = osCharLowerW(zName); - for (;*zTok;zTok++){ - if (*zTok == '\\') *zTok = '_'; - } - - /* Create/open the named mutex */ - pFile->hMutex = osCreateMutexW(NULL, FALSE, zName); - if (!pFile->hMutex){ - pFile->lastErrno = osGetLastError(); - sqlite3_free(zName); - return winLogError(SQLITE_IOERR, pFile->lastErrno, - "winceCreateLock1", zFilename); - } - - /* Acquire the mutex before continuing */ - winceMutexAcquire(pFile->hMutex); - - /* Since the names of named mutexes, semaphores, file mappings etc are - ** case-sensitive, take advantage of that by uppercasing the mutex name - ** and using that as the shared filemapping name. - */ - osCharUpperW(zName); - pFile->hShared = osCreateFileMappingW(INVALID_HANDLE_VALUE, NULL, - PAGE_READWRITE, 0, sizeof(winceLock), - zName); - - /* Set a flag that indicates we're the first to create the memory so it - ** must be zero-initialized */ - lastErrno = osGetLastError(); - if (lastErrno == ERROR_ALREADY_EXISTS){ - bInit = FALSE; - } - - sqlite3_free(zName); - - /* If we succeeded in making the shared memory handle, map it. */ - if( pFile->hShared ){ - pFile->shared = (winceLock*)osMapViewOfFile(pFile->hShared, - FILE_MAP_READ|FILE_MAP_WRITE, 0, 0, sizeof(winceLock)); - /* If mapping failed, close the shared memory handle and erase it */ - if( !pFile->shared ){ - pFile->lastErrno = osGetLastError(); - winLogError(SQLITE_IOERR, pFile->lastErrno, - "winceCreateLock2", zFilename); - bLogged = TRUE; - osCloseHandle(pFile->hShared); - pFile->hShared = NULL; - } - } - - /* If shared memory could not be created, then close the mutex and fail */ - if( pFile->hShared==NULL ){ - if( !bLogged ){ - pFile->lastErrno = lastErrno; - winLogError(SQLITE_IOERR, pFile->lastErrno, - "winceCreateLock3", zFilename); - bLogged = TRUE; - } - winceMutexRelease(pFile->hMutex); - osCloseHandle(pFile->hMutex); - pFile->hMutex = NULL; - return SQLITE_IOERR; - } - - /* Initialize the shared memory if we're supposed to */ - if( bInit ){ - memset(pFile->shared, 0, sizeof(winceLock)); - } - - winceMutexRelease(pFile->hMutex); - return SQLITE_OK; -} - -/* -** Destroy the part of winFile that deals with wince locks -*/ -static void winceDestroyLock(winFile *pFile){ - if (pFile->hMutex){ - /* Acquire the mutex */ - winceMutexAcquire(pFile->hMutex); - - /* The following blocks should probably assert in debug mode, but they - are to cleanup in case any locks remained open */ - if (pFile->local.nReaders){ - pFile->shared->nReaders --; - } - if (pFile->local.bReserved){ - pFile->shared->bReserved = FALSE; - } - if (pFile->local.bPending){ - pFile->shared->bPending = FALSE; - } - if (pFile->local.bExclusive){ - pFile->shared->bExclusive = FALSE; - } - - /* De-reference and close our copy of the shared memory handle */ - osUnmapViewOfFile(pFile->shared); - osCloseHandle(pFile->hShared); - - /* Done with the mutex */ - winceMutexRelease(pFile->hMutex); - osCloseHandle(pFile->hMutex); - pFile->hMutex = NULL; - } -} - -/* -** An implementation of the LockFile() API of Windows for CE -*/ -static BOOL winceLockFile( - LPHANDLE phFile, - DWORD dwFileOffsetLow, - DWORD dwFileOffsetHigh, - DWORD nNumberOfBytesToLockLow, - DWORD nNumberOfBytesToLockHigh -){ - winFile *pFile = HANDLE_TO_WINFILE(phFile); - BOOL bReturn = FALSE; - - UNUSED_PARAMETER(dwFileOffsetHigh); - UNUSED_PARAMETER(nNumberOfBytesToLockHigh); - - if (!pFile->hMutex) return TRUE; - winceMutexAcquire(pFile->hMutex); - - /* Wanting an exclusive lock? */ - if (dwFileOffsetLow == (DWORD)SHARED_FIRST - && nNumberOfBytesToLockLow == (DWORD)SHARED_SIZE){ - if (pFile->shared->nReaders == 0 && pFile->shared->bExclusive == 0){ - pFile->shared->bExclusive = TRUE; - pFile->local.bExclusive = TRUE; - bReturn = TRUE; - } - } - - /* Want a read-only lock? */ - else if (dwFileOffsetLow == (DWORD)SHARED_FIRST && - nNumberOfBytesToLockLow == 1){ - if (pFile->shared->bExclusive == 0){ - pFile->local.nReaders ++; - if (pFile->local.nReaders == 1){ - pFile->shared->nReaders ++; - } - bReturn = TRUE; - } - } - - /* Want a pending lock? */ - else if (dwFileOffsetLow == (DWORD)PENDING_BYTE - && nNumberOfBytesToLockLow == 1){ - /* If no pending lock has been acquired, then acquire it */ - if (pFile->shared->bPending == 0) { - pFile->shared->bPending = TRUE; - pFile->local.bPending = TRUE; - bReturn = TRUE; - } - } - - /* Want a reserved lock? */ - else if (dwFileOffsetLow == (DWORD)RESERVED_BYTE - && nNumberOfBytesToLockLow == 1){ - if (pFile->shared->bReserved == 0) { - pFile->shared->bReserved = TRUE; - pFile->local.bReserved = TRUE; - bReturn = TRUE; - } - } - - winceMutexRelease(pFile->hMutex); - return bReturn; -} - -/* -** An implementation of the UnlockFile API of Windows for CE -*/ -static BOOL winceUnlockFile( - LPHANDLE phFile, - DWORD dwFileOffsetLow, - DWORD dwFileOffsetHigh, - DWORD nNumberOfBytesToUnlockLow, - DWORD nNumberOfBytesToUnlockHigh -){ - winFile *pFile = HANDLE_TO_WINFILE(phFile); - BOOL bReturn = FALSE; - - UNUSED_PARAMETER(dwFileOffsetHigh); - UNUSED_PARAMETER(nNumberOfBytesToUnlockHigh); - - if (!pFile->hMutex) return TRUE; - winceMutexAcquire(pFile->hMutex); - - /* Releasing a reader lock or an exclusive lock */ - if (dwFileOffsetLow == (DWORD)SHARED_FIRST){ - /* Did we have an exclusive lock? */ - if (pFile->local.bExclusive){ - assert(nNumberOfBytesToUnlockLow == (DWORD)SHARED_SIZE); - pFile->local.bExclusive = FALSE; - pFile->shared->bExclusive = FALSE; - bReturn = TRUE; - } - - /* Did we just have a reader lock? */ - else if (pFile->local.nReaders){ - assert(nNumberOfBytesToUnlockLow == (DWORD)SHARED_SIZE - || nNumberOfBytesToUnlockLow == 1); - pFile->local.nReaders --; - if (pFile->local.nReaders == 0) - { - pFile->shared->nReaders --; - } - bReturn = TRUE; - } - } - - /* Releasing a pending lock */ - else if (dwFileOffsetLow == (DWORD)PENDING_BYTE - && nNumberOfBytesToUnlockLow == 1){ - if (pFile->local.bPending){ - pFile->local.bPending = FALSE; - pFile->shared->bPending = FALSE; - bReturn = TRUE; - } - } - /* Releasing a reserved lock */ - else if (dwFileOffsetLow == (DWORD)RESERVED_BYTE - && nNumberOfBytesToUnlockLow == 1){ - if (pFile->local.bReserved) { - pFile->local.bReserved = FALSE; - pFile->shared->bReserved = FALSE; - bReturn = TRUE; - } - } - - winceMutexRelease(pFile->hMutex); - return bReturn; -} -/* -** End of the special code for wince -*****************************************************************************/ -#endif /* SQLITE_OS_WINCE */ - -/* -** Lock a file region. -*/ -static BOOL winLockFile( - LPHANDLE phFile, - DWORD flags, - DWORD offsetLow, - DWORD offsetHigh, - DWORD numBytesLow, - DWORD numBytesHigh -){ -#if SQLITE_OS_WINCE - /* - ** NOTE: Windows CE is handled differently here due its lack of the Win32 - ** API LockFile. - */ - return winceLockFile(phFile, offsetLow, offsetHigh, - numBytesLow, numBytesHigh); -#else - if( osIsNT() ){ - OVERLAPPED ovlp; - memset(&ovlp, 0, sizeof(OVERLAPPED)); - ovlp.Offset = offsetLow; - ovlp.OffsetHigh = offsetHigh; - return osLockFileEx(*phFile, flags, 0, numBytesLow, numBytesHigh, &ovlp); - }else{ - return osLockFile(*phFile, offsetLow, offsetHigh, numBytesLow, - numBytesHigh); - } -#endif -} - -/* -** Unlock a file region. - */ -static BOOL winUnlockFile( - LPHANDLE phFile, - DWORD offsetLow, - DWORD offsetHigh, - DWORD numBytesLow, - DWORD numBytesHigh -){ -#if SQLITE_OS_WINCE - /* - ** NOTE: Windows CE is handled differently here due its lack of the Win32 - ** API UnlockFile. - */ - return winceUnlockFile(phFile, offsetLow, offsetHigh, - numBytesLow, numBytesHigh); -#else - if( osIsNT() ){ - OVERLAPPED ovlp; - memset(&ovlp, 0, sizeof(OVERLAPPED)); - ovlp.Offset = offsetLow; - ovlp.OffsetHigh = offsetHigh; - return osUnlockFileEx(*phFile, 0, numBytesLow, numBytesHigh, &ovlp); - }else{ - return osUnlockFile(*phFile, offsetLow, offsetHigh, numBytesLow, - numBytesHigh); - } -#endif -} - -/***************************************************************************** -** The next group of routines implement the I/O methods specified -** by the sqlite3_io_methods object. -******************************************************************************/ - -/* -** Some Microsoft compilers lack this definition. -*/ -#ifndef INVALID_SET_FILE_POINTER -# define INVALID_SET_FILE_POINTER ((DWORD)-1) -#endif - -/* -** Move the current position of the file handle passed as the first -** argument to offset iOffset within the file. If successful, return 0. -** Otherwise, set pFile->lastErrno and return non-zero. -*/ -static int winSeekFile(winFile *pFile, sqlite3_int64 iOffset){ -#if !SQLITE_OS_WINRT - LONG upperBits; /* Most sig. 32 bits of new offset */ - LONG lowerBits; /* Least sig. 32 bits of new offset */ - DWORD dwRet; /* Value returned by SetFilePointer() */ - DWORD lastErrno; /* Value returned by GetLastError() */ - - OSTRACE(("SEEK file=%p, offset=%lld\n", pFile->h, iOffset)); - - upperBits = (LONG)((iOffset>>32) & 0x7fffffff); - lowerBits = (LONG)(iOffset & 0xffffffff); - - /* API oddity: If successful, SetFilePointer() returns a dword - ** containing the lower 32-bits of the new file-offset. Or, if it fails, - ** it returns INVALID_SET_FILE_POINTER. However according to MSDN, - ** INVALID_SET_FILE_POINTER may also be a valid new offset. So to determine - ** whether an error has actually occurred, it is also necessary to call - ** GetLastError(). - */ - dwRet = osSetFilePointer(pFile->h, lowerBits, &upperBits, FILE_BEGIN); - - if( (dwRet==INVALID_SET_FILE_POINTER - && ((lastErrno = osGetLastError())!=NO_ERROR)) ){ - pFile->lastErrno = lastErrno; - winLogError(SQLITE_IOERR_SEEK, pFile->lastErrno, - "winSeekFile", pFile->zPath); - OSTRACE(("SEEK file=%p, rc=SQLITE_IOERR_SEEK\n", pFile->h)); - return 1; - } - - OSTRACE(("SEEK file=%p, rc=SQLITE_OK\n", pFile->h)); - return 0; -#else - /* - ** Same as above, except that this implementation works for WinRT. - */ - - LARGE_INTEGER x; /* The new offset */ - BOOL bRet; /* Value returned by SetFilePointerEx() */ - - x.QuadPart = iOffset; - bRet = osSetFilePointerEx(pFile->h, x, 0, FILE_BEGIN); - - if(!bRet){ - pFile->lastErrno = osGetLastError(); - winLogError(SQLITE_IOERR_SEEK, pFile->lastErrno, - "winSeekFile", pFile->zPath); - OSTRACE(("SEEK file=%p, rc=SQLITE_IOERR_SEEK\n", pFile->h)); - return 1; - } - - OSTRACE(("SEEK file=%p, rc=SQLITE_OK\n", pFile->h)); - return 0; -#endif -} - -#if SQLITE_MAX_MMAP_SIZE>0 -/* Forward references to VFS helper methods used for memory mapped files */ -static int winMapfile(winFile*, sqlite3_int64); -static int winUnmapfile(winFile*); -#endif - -/* -** Close a file. -** -** It is reported that an attempt to close a handle might sometimes -** fail. This is a very unreasonable result, but Windows is notorious -** for being unreasonable so I do not doubt that it might happen. If -** the close fails, we pause for 100 milliseconds and try again. As -** many as MX_CLOSE_ATTEMPT attempts to close the handle are made before -** giving up and returning an error. -*/ -#define MX_CLOSE_ATTEMPT 3 -static int winClose(sqlite3_file *id){ - int rc, cnt = 0; - winFile *pFile = (winFile*)id; - - assert( id!=0 ); -#ifndef SQLITE_OMIT_WAL - assert( pFile->pShm==0 ); -#endif - assert( pFile->h!=NULL && pFile->h!=INVALID_HANDLE_VALUE ); - OSTRACE(("CLOSE file=%p\n", pFile->h)); - -#if SQLITE_MAX_MMAP_SIZE>0 - winUnmapfile(pFile); -#endif - - do{ - rc = osCloseHandle(pFile->h); - /* SimulateIOError( rc=0; cnt=MX_CLOSE_ATTEMPT; ); */ - }while( rc==0 && ++cnt < MX_CLOSE_ATTEMPT && (sqlite3_win32_sleep(100), 1) ); -#if SQLITE_OS_WINCE -#define WINCE_DELETION_ATTEMPTS 3 - winceDestroyLock(pFile); - if( pFile->zDeleteOnClose ){ - int cnt = 0; - while( - osDeleteFileW(pFile->zDeleteOnClose)==0 - && osGetFileAttributesW(pFile->zDeleteOnClose)!=0xffffffff - && cnt++ < WINCE_DELETION_ATTEMPTS - ){ - sqlite3_win32_sleep(100); /* Wait a little before trying again */ - } - sqlite3_free(pFile->zDeleteOnClose); - } -#endif - if( rc ){ - pFile->h = NULL; - } - OpenCounter(-1); - OSTRACE(("CLOSE file=%p, rc=%s\n", pFile->h, rc ? "ok" : "failed")); - return rc ? SQLITE_OK - : winLogError(SQLITE_IOERR_CLOSE, osGetLastError(), - "winClose", pFile->zPath); -} - -/* -** Read data from a file into a buffer. Return SQLITE_OK if all -** bytes were read successfully and SQLITE_IOERR if anything goes -** wrong. -*/ -static int winRead( - sqlite3_file *id, /* File to read from */ - void *pBuf, /* Write content into this buffer */ - int amt, /* Number of bytes to read */ - sqlite3_int64 offset /* Begin reading at this offset */ -){ -#if !SQLITE_OS_WINCE - OVERLAPPED overlapped; /* The offset for ReadFile. */ -#endif - winFile *pFile = (winFile*)id; /* file handle */ - DWORD nRead; /* Number of bytes actually read from file */ - int nRetry = 0; /* Number of retrys */ - - assert( id!=0 ); - assert( amt>0 ); - assert( offset>=0 ); - SimulateIOError(return SQLITE_IOERR_READ); - OSTRACE(("READ file=%p, buffer=%p, amount=%d, offset=%lld, lock=%d\n", - pFile->h, pBuf, amt, offset, pFile->locktype)); - -#if SQLITE_MAX_MMAP_SIZE>0 - /* Deal with as much of this read request as possible by transfering - ** data from the memory mapping using memcpy(). */ - if( offsetmmapSize ){ - if( offset+amt <= pFile->mmapSize ){ - memcpy(pBuf, &((u8 *)(pFile->pMapRegion))[offset], amt); - OSTRACE(("READ-MMAP file=%p, rc=SQLITE_OK\n", pFile->h)); - return SQLITE_OK; - }else{ - int nCopy = (int)(pFile->mmapSize - offset); - memcpy(pBuf, &((u8 *)(pFile->pMapRegion))[offset], nCopy); - pBuf = &((u8 *)pBuf)[nCopy]; - amt -= nCopy; - offset += nCopy; - } - } -#endif - -#if SQLITE_OS_WINCE - if( winSeekFile(pFile, offset) ){ - OSTRACE(("READ file=%p, rc=SQLITE_FULL\n", pFile->h)); - return SQLITE_FULL; - } - while( !osReadFile(pFile->h, pBuf, amt, &nRead, 0) ){ -#else - memset(&overlapped, 0, sizeof(OVERLAPPED)); - overlapped.Offset = (LONG)(offset & 0xffffffff); - overlapped.OffsetHigh = (LONG)((offset>>32) & 0x7fffffff); - while( !osReadFile(pFile->h, pBuf, amt, &nRead, &overlapped) && - osGetLastError()!=ERROR_HANDLE_EOF ){ -#endif - DWORD lastErrno; - if( winRetryIoerr(&nRetry, &lastErrno) ) continue; - pFile->lastErrno = lastErrno; - OSTRACE(("READ file=%p, rc=SQLITE_IOERR_READ\n", pFile->h)); - return winLogError(SQLITE_IOERR_READ, pFile->lastErrno, - "winRead", pFile->zPath); - } - winLogIoerr(nRetry); - if( nRead<(DWORD)amt ){ - /* Unread parts of the buffer must be zero-filled */ - memset(&((char*)pBuf)[nRead], 0, amt-nRead); - OSTRACE(("READ file=%p, rc=SQLITE_IOERR_SHORT_READ\n", pFile->h)); - return SQLITE_IOERR_SHORT_READ; - } - - OSTRACE(("READ file=%p, rc=SQLITE_OK\n", pFile->h)); - return SQLITE_OK; -} - -/* -** Write data from a buffer into a file. Return SQLITE_OK on success -** or some other error code on failure. -*/ -static int winWrite( - sqlite3_file *id, /* File to write into */ - const void *pBuf, /* The bytes to be written */ - int amt, /* Number of bytes to write */ - sqlite3_int64 offset /* Offset into the file to begin writing at */ -){ - int rc = 0; /* True if error has occurred, else false */ - winFile *pFile = (winFile*)id; /* File handle */ - int nRetry = 0; /* Number of retries */ - - assert( amt>0 ); - assert( pFile ); - SimulateIOError(return SQLITE_IOERR_WRITE); - SimulateDiskfullError(return SQLITE_FULL); - - OSTRACE(("WRITE file=%p, buffer=%p, amount=%d, offset=%lld, lock=%d\n", - pFile->h, pBuf, amt, offset, pFile->locktype)); - -#if SQLITE_MAX_MMAP_SIZE>0 - /* Deal with as much of this write request as possible by transfering - ** data from the memory mapping using memcpy(). */ - if( offsetmmapSize ){ - if( offset+amt <= pFile->mmapSize ){ - memcpy(&((u8 *)(pFile->pMapRegion))[offset], pBuf, amt); - OSTRACE(("WRITE-MMAP file=%p, rc=SQLITE_OK\n", pFile->h)); - return SQLITE_OK; - }else{ - int nCopy = (int)(pFile->mmapSize - offset); - memcpy(&((u8 *)(pFile->pMapRegion))[offset], pBuf, nCopy); - pBuf = &((u8 *)pBuf)[nCopy]; - amt -= nCopy; - offset += nCopy; - } - } -#endif - -#if SQLITE_OS_WINCE - rc = winSeekFile(pFile, offset); - if( rc==0 ){ -#else - { -#endif -#if !SQLITE_OS_WINCE - OVERLAPPED overlapped; /* The offset for WriteFile. */ -#endif - u8 *aRem = (u8 *)pBuf; /* Data yet to be written */ - int nRem = amt; /* Number of bytes yet to be written */ - DWORD nWrite; /* Bytes written by each WriteFile() call */ - DWORD lastErrno = NO_ERROR; /* Value returned by GetLastError() */ - -#if !SQLITE_OS_WINCE - memset(&overlapped, 0, sizeof(OVERLAPPED)); - overlapped.Offset = (LONG)(offset & 0xffffffff); - overlapped.OffsetHigh = (LONG)((offset>>32) & 0x7fffffff); -#endif - - while( nRem>0 ){ -#if SQLITE_OS_WINCE - if( !osWriteFile(pFile->h, aRem, nRem, &nWrite, 0) ){ -#else - if( !osWriteFile(pFile->h, aRem, nRem, &nWrite, &overlapped) ){ -#endif - if( winRetryIoerr(&nRetry, &lastErrno) ) continue; - break; - } - assert( nWrite==0 || nWrite<=(DWORD)nRem ); - if( nWrite==0 || nWrite>(DWORD)nRem ){ - lastErrno = osGetLastError(); - break; - } -#if !SQLITE_OS_WINCE - offset += nWrite; - overlapped.Offset = (LONG)(offset & 0xffffffff); - overlapped.OffsetHigh = (LONG)((offset>>32) & 0x7fffffff); -#endif - aRem += nWrite; - nRem -= nWrite; - } - if( nRem>0 ){ - pFile->lastErrno = lastErrno; - rc = 1; - } - } - - if( rc ){ - if( ( pFile->lastErrno==ERROR_HANDLE_DISK_FULL ) - || ( pFile->lastErrno==ERROR_DISK_FULL )){ - OSTRACE(("WRITE file=%p, rc=SQLITE_FULL\n", pFile->h)); - return winLogError(SQLITE_FULL, pFile->lastErrno, - "winWrite1", pFile->zPath); - } - OSTRACE(("WRITE file=%p, rc=SQLITE_IOERR_WRITE\n", pFile->h)); - return winLogError(SQLITE_IOERR_WRITE, pFile->lastErrno, - "winWrite2", pFile->zPath); - }else{ - winLogIoerr(nRetry); - } - OSTRACE(("WRITE file=%p, rc=SQLITE_OK\n", pFile->h)); - return SQLITE_OK; -} - -/* -** Truncate an open file to a specified size -*/ -static int winTruncate(sqlite3_file *id, sqlite3_int64 nByte){ - winFile *pFile = (winFile*)id; /* File handle object */ - int rc = SQLITE_OK; /* Return code for this function */ - DWORD lastErrno; - - assert( pFile ); - SimulateIOError(return SQLITE_IOERR_TRUNCATE); - OSTRACE(("TRUNCATE file=%p, size=%lld, lock=%d\n", - pFile->h, nByte, pFile->locktype)); - - /* If the user has configured a chunk-size for this file, truncate the - ** file so that it consists of an integer number of chunks (i.e. the - ** actual file size after the operation may be larger than the requested - ** size). - */ - if( pFile->szChunk>0 ){ - nByte = ((nByte + pFile->szChunk - 1)/pFile->szChunk) * pFile->szChunk; - } - - /* SetEndOfFile() returns non-zero when successful, or zero when it fails. */ - if( winSeekFile(pFile, nByte) ){ - rc = winLogError(SQLITE_IOERR_TRUNCATE, pFile->lastErrno, - "winTruncate1", pFile->zPath); - }else if( 0==osSetEndOfFile(pFile->h) && - ((lastErrno = osGetLastError())!=ERROR_USER_MAPPED_FILE) ){ - pFile->lastErrno = lastErrno; - rc = winLogError(SQLITE_IOERR_TRUNCATE, pFile->lastErrno, - "winTruncate2", pFile->zPath); - } - -#if SQLITE_MAX_MMAP_SIZE>0 - /* If the file was truncated to a size smaller than the currently - ** mapped region, reduce the effective mapping size as well. SQLite will - ** use read() and write() to access data beyond this point from now on. - */ - if( pFile->pMapRegion && nBytemmapSize ){ - pFile->mmapSize = nByte; - } -#endif - - OSTRACE(("TRUNCATE file=%p, rc=%s\n", pFile->h, sqlite3ErrName(rc))); - return rc; -} - -#ifdef SQLITE_TEST -/* -** Count the number of fullsyncs and normal syncs. This is used to test -** that syncs and fullsyncs are occuring at the right times. -*/ -SQLITE_API int sqlite3_sync_count = 0; -SQLITE_API int sqlite3_fullsync_count = 0; -#endif - -/* -** Make sure all writes to a particular file are committed to disk. -*/ -static int winSync(sqlite3_file *id, int flags){ -#ifndef SQLITE_NO_SYNC - /* - ** Used only when SQLITE_NO_SYNC is not defined. - */ - BOOL rc; -#endif -#if !defined(NDEBUG) || !defined(SQLITE_NO_SYNC) || \ - (defined(SQLITE_TEST) && defined(SQLITE_DEBUG)) - /* - ** Used when SQLITE_NO_SYNC is not defined and by the assert() and/or - ** OSTRACE() macros. - */ - winFile *pFile = (winFile*)id; -#else - UNUSED_PARAMETER(id); -#endif - - assert( pFile ); - /* Check that one of SQLITE_SYNC_NORMAL or FULL was passed */ - assert((flags&0x0F)==SQLITE_SYNC_NORMAL - || (flags&0x0F)==SQLITE_SYNC_FULL - ); - - /* Unix cannot, but some systems may return SQLITE_FULL from here. This - ** line is to test that doing so does not cause any problems. - */ - SimulateDiskfullError( return SQLITE_FULL ); - - OSTRACE(("SYNC file=%p, flags=%x, lock=%d\n", - pFile->h, flags, pFile->locktype)); - -#ifndef SQLITE_TEST - UNUSED_PARAMETER(flags); -#else - if( (flags&0x0F)==SQLITE_SYNC_FULL ){ - sqlite3_fullsync_count++; - } - sqlite3_sync_count++; -#endif - - /* If we compiled with the SQLITE_NO_SYNC flag, then syncing is a - ** no-op - */ -#ifdef SQLITE_NO_SYNC - OSTRACE(("SYNC-NOP file=%p, rc=SQLITE_OK\n", pFile->h)); - return SQLITE_OK; -#else - rc = osFlushFileBuffers(pFile->h); - SimulateIOError( rc=FALSE ); - if( rc ){ - OSTRACE(("SYNC file=%p, rc=SQLITE_OK\n", pFile->h)); - return SQLITE_OK; - }else{ - pFile->lastErrno = osGetLastError(); - OSTRACE(("SYNC file=%p, rc=SQLITE_IOERR_FSYNC\n", pFile->h)); - return winLogError(SQLITE_IOERR_FSYNC, pFile->lastErrno, - "winSync", pFile->zPath); - } -#endif -} - -/* -** Determine the current size of a file in bytes -*/ -static int winFileSize(sqlite3_file *id, sqlite3_int64 *pSize){ - winFile *pFile = (winFile*)id; - int rc = SQLITE_OK; - - assert( id!=0 ); - assert( pSize!=0 ); - SimulateIOError(return SQLITE_IOERR_FSTAT); - OSTRACE(("SIZE file=%p, pSize=%p\n", pFile->h, pSize)); - -#if SQLITE_OS_WINRT - { - FILE_STANDARD_INFO info; - if( osGetFileInformationByHandleEx(pFile->h, FileStandardInfo, - &info, sizeof(info)) ){ - *pSize = info.EndOfFile.QuadPart; - }else{ - pFile->lastErrno = osGetLastError(); - rc = winLogError(SQLITE_IOERR_FSTAT, pFile->lastErrno, - "winFileSize", pFile->zPath); - } - } -#else - { - DWORD upperBits; - DWORD lowerBits; - DWORD lastErrno; - - lowerBits = osGetFileSize(pFile->h, &upperBits); - *pSize = (((sqlite3_int64)upperBits)<<32) + lowerBits; - if( (lowerBits == INVALID_FILE_SIZE) - && ((lastErrno = osGetLastError())!=NO_ERROR) ){ - pFile->lastErrno = lastErrno; - rc = winLogError(SQLITE_IOERR_FSTAT, pFile->lastErrno, - "winFileSize", pFile->zPath); - } - } -#endif - OSTRACE(("SIZE file=%p, pSize=%p, *pSize=%lld, rc=%s\n", - pFile->h, pSize, *pSize, sqlite3ErrName(rc))); - return rc; -} - -/* -** LOCKFILE_FAIL_IMMEDIATELY is undefined on some Windows systems. -*/ -#ifndef LOCKFILE_FAIL_IMMEDIATELY -# define LOCKFILE_FAIL_IMMEDIATELY 1 -#endif - -#ifndef LOCKFILE_EXCLUSIVE_LOCK -# define LOCKFILE_EXCLUSIVE_LOCK 2 -#endif - -/* -** Historically, SQLite has used both the LockFile and LockFileEx functions. -** When the LockFile function was used, it was always expected to fail -** immediately if the lock could not be obtained. Also, it always expected to -** obtain an exclusive lock. These flags are used with the LockFileEx function -** and reflect those expectations; therefore, they should not be changed. -*/ -#ifndef SQLITE_LOCKFILE_FLAGS -# define SQLITE_LOCKFILE_FLAGS (LOCKFILE_FAIL_IMMEDIATELY | \ - LOCKFILE_EXCLUSIVE_LOCK) -#endif - -/* -** Currently, SQLite never calls the LockFileEx function without wanting the -** call to fail immediately if the lock cannot be obtained. -*/ -#ifndef SQLITE_LOCKFILEEX_FLAGS -# define SQLITE_LOCKFILEEX_FLAGS (LOCKFILE_FAIL_IMMEDIATELY) -#endif - -/* -** Acquire a reader lock. -** Different API routines are called depending on whether or not this -** is Win9x or WinNT. -*/ -static int winGetReadLock(winFile *pFile){ - int res; - OSTRACE(("READ-LOCK file=%p, lock=%d\n", pFile->h, pFile->locktype)); - if( osIsNT() ){ -#if SQLITE_OS_WINCE - /* - ** NOTE: Windows CE is handled differently here due its lack of the Win32 - ** API LockFileEx. - */ - res = winceLockFile(&pFile->h, SHARED_FIRST, 0, 1, 0); -#else - res = winLockFile(&pFile->h, SQLITE_LOCKFILEEX_FLAGS, SHARED_FIRST, 0, - SHARED_SIZE, 0); -#endif - } -#ifdef SQLITE_WIN32_HAS_ANSI - else{ - int lk; - sqlite3_randomness(sizeof(lk), &lk); - pFile->sharedLockByte = (short)((lk & 0x7fffffff)%(SHARED_SIZE - 1)); - res = winLockFile(&pFile->h, SQLITE_LOCKFILE_FLAGS, - SHARED_FIRST+pFile->sharedLockByte, 0, 1, 0); - } -#endif - if( res == 0 ){ - pFile->lastErrno = osGetLastError(); - /* No need to log a failure to lock */ - } - OSTRACE(("READ-LOCK file=%p, result=%d\n", pFile->h, res)); - return res; -} - -/* -** Undo a readlock -*/ -static int winUnlockReadLock(winFile *pFile){ - int res; - DWORD lastErrno; - OSTRACE(("READ-UNLOCK file=%p, lock=%d\n", pFile->h, pFile->locktype)); - if( osIsNT() ){ - res = winUnlockFile(&pFile->h, SHARED_FIRST, 0, SHARED_SIZE, 0); - } -#ifdef SQLITE_WIN32_HAS_ANSI - else{ - res = winUnlockFile(&pFile->h, SHARED_FIRST+pFile->sharedLockByte, 0, 1, 0); - } -#endif - if( res==0 && ((lastErrno = osGetLastError())!=ERROR_NOT_LOCKED) ){ - pFile->lastErrno = lastErrno; - winLogError(SQLITE_IOERR_UNLOCK, pFile->lastErrno, - "winUnlockReadLock", pFile->zPath); - } - OSTRACE(("READ-UNLOCK file=%p, result=%d\n", pFile->h, res)); - return res; -} - -/* -** Lock the file with the lock specified by parameter locktype - one -** of the following: -** -** (1) SHARED_LOCK -** (2) RESERVED_LOCK -** (3) PENDING_LOCK -** (4) EXCLUSIVE_LOCK -** -** Sometimes when requesting one lock state, additional lock states -** are inserted in between. The locking might fail on one of the later -** transitions leaving the lock state different from what it started but -** still short of its goal. The following chart shows the allowed -** transitions and the inserted intermediate states: -** -** UNLOCKED -> SHARED -** SHARED -> RESERVED -** SHARED -> (PENDING) -> EXCLUSIVE -** RESERVED -> (PENDING) -> EXCLUSIVE -** PENDING -> EXCLUSIVE -** -** This routine will only increase a lock. The winUnlock() routine -** erases all locks at once and returns us immediately to locking level 0. -** It is not possible to lower the locking level one step at a time. You -** must go straight to locking level 0. -*/ -static int winLock(sqlite3_file *id, int locktype){ - int rc = SQLITE_OK; /* Return code from subroutines */ - int res = 1; /* Result of a Windows lock call */ - int newLocktype; /* Set pFile->locktype to this value before exiting */ - int gotPendingLock = 0;/* True if we acquired a PENDING lock this time */ - winFile *pFile = (winFile*)id; - DWORD lastErrno = NO_ERROR; - - assert( id!=0 ); - OSTRACE(("LOCK file=%p, oldLock=%d(%d), newLock=%d\n", - pFile->h, pFile->locktype, pFile->sharedLockByte, locktype)); - - /* If there is already a lock of this type or more restrictive on the - ** OsFile, do nothing. Don't use the end_lock: exit path, as - ** sqlite3OsEnterMutex() hasn't been called yet. - */ - if( pFile->locktype>=locktype ){ - OSTRACE(("LOCK-HELD file=%p, rc=SQLITE_OK\n", pFile->h)); - return SQLITE_OK; - } - - /* Make sure the locking sequence is correct - */ - assert( pFile->locktype!=NO_LOCK || locktype==SHARED_LOCK ); - assert( locktype!=PENDING_LOCK ); - assert( locktype!=RESERVED_LOCK || pFile->locktype==SHARED_LOCK ); - - /* Lock the PENDING_LOCK byte if we need to acquire a PENDING lock or - ** a SHARED lock. If we are acquiring a SHARED lock, the acquisition of - ** the PENDING_LOCK byte is temporary. - */ - newLocktype = pFile->locktype; - if( (pFile->locktype==NO_LOCK) - || ( (locktype==EXCLUSIVE_LOCK) - && (pFile->locktype==RESERVED_LOCK)) - ){ - int cnt = 3; - while( cnt-->0 && (res = winLockFile(&pFile->h, SQLITE_LOCKFILE_FLAGS, - PENDING_BYTE, 0, 1, 0))==0 ){ - /* Try 3 times to get the pending lock. This is needed to work - ** around problems caused by indexing and/or anti-virus software on - ** Windows systems. - ** If you are using this code as a model for alternative VFSes, do not - ** copy this retry logic. It is a hack intended for Windows only. - */ - lastErrno = osGetLastError(); - OSTRACE(("LOCK-PENDING-FAIL file=%p, count=%d, result=%d\n", - pFile->h, cnt, res)); - if( lastErrno==ERROR_INVALID_HANDLE ){ - pFile->lastErrno = lastErrno; - rc = SQLITE_IOERR_LOCK; - OSTRACE(("LOCK-FAIL file=%p, count=%d, rc=%s\n", - pFile->h, cnt, sqlite3ErrName(rc))); - return rc; - } - if( cnt ) sqlite3_win32_sleep(1); - } - gotPendingLock = res; - if( !res ){ - lastErrno = osGetLastError(); - } - } - - /* Acquire a shared lock - */ - if( locktype==SHARED_LOCK && res ){ - assert( pFile->locktype==NO_LOCK ); - res = winGetReadLock(pFile); - if( res ){ - newLocktype = SHARED_LOCK; - }else{ - lastErrno = osGetLastError(); - } - } - - /* Acquire a RESERVED lock - */ - if( locktype==RESERVED_LOCK && res ){ - assert( pFile->locktype==SHARED_LOCK ); - res = winLockFile(&pFile->h, SQLITE_LOCKFILE_FLAGS, RESERVED_BYTE, 0, 1, 0); - if( res ){ - newLocktype = RESERVED_LOCK; - }else{ - lastErrno = osGetLastError(); - } - } - - /* Acquire a PENDING lock - */ - if( locktype==EXCLUSIVE_LOCK && res ){ - newLocktype = PENDING_LOCK; - gotPendingLock = 0; - } - - /* Acquire an EXCLUSIVE lock - */ - if( locktype==EXCLUSIVE_LOCK && res ){ - assert( pFile->locktype>=SHARED_LOCK ); - res = winUnlockReadLock(pFile); - res = winLockFile(&pFile->h, SQLITE_LOCKFILE_FLAGS, SHARED_FIRST, 0, - SHARED_SIZE, 0); - if( res ){ - newLocktype = EXCLUSIVE_LOCK; - }else{ - lastErrno = osGetLastError(); - winGetReadLock(pFile); - } - } - - /* If we are holding a PENDING lock that ought to be released, then - ** release it now. - */ - if( gotPendingLock && locktype==SHARED_LOCK ){ - winUnlockFile(&pFile->h, PENDING_BYTE, 0, 1, 0); - } - - /* Update the state of the lock has held in the file descriptor then - ** return the appropriate result code. - */ - if( res ){ - rc = SQLITE_OK; - }else{ - pFile->lastErrno = lastErrno; - rc = SQLITE_BUSY; - OSTRACE(("LOCK-FAIL file=%p, wanted=%d, got=%d\n", - pFile->h, locktype, newLocktype)); - } - pFile->locktype = (u8)newLocktype; - OSTRACE(("LOCK file=%p, lock=%d, rc=%s\n", - pFile->h, pFile->locktype, sqlite3ErrName(rc))); - return rc; -} - -/* -** This routine checks if there is a RESERVED lock held on the specified -** file by this or any other process. If such a lock is held, return -** non-zero, otherwise zero. -*/ -static int winCheckReservedLock(sqlite3_file *id, int *pResOut){ - int res; - winFile *pFile = (winFile*)id; - - SimulateIOError( return SQLITE_IOERR_CHECKRESERVEDLOCK; ); - OSTRACE(("TEST-WR-LOCK file=%p, pResOut=%p\n", pFile->h, pResOut)); - - assert( id!=0 ); - if( pFile->locktype>=RESERVED_LOCK ){ - res = 1; - OSTRACE(("TEST-WR-LOCK file=%p, result=%d (local)\n", pFile->h, res)); - }else{ - res = winLockFile(&pFile->h, SQLITE_LOCKFILEEX_FLAGS,RESERVED_BYTE, 0, 1, 0); - if( res ){ - winUnlockFile(&pFile->h, RESERVED_BYTE, 0, 1, 0); - } - res = !res; - OSTRACE(("TEST-WR-LOCK file=%p, result=%d (remote)\n", pFile->h, res)); - } - *pResOut = res; - OSTRACE(("TEST-WR-LOCK file=%p, pResOut=%p, *pResOut=%d, rc=SQLITE_OK\n", - pFile->h, pResOut, *pResOut)); - return SQLITE_OK; -} - -/* -** Lower the locking level on file descriptor id to locktype. locktype -** must be either NO_LOCK or SHARED_LOCK. -** -** If the locking level of the file descriptor is already at or below -** the requested locking level, this routine is a no-op. -** -** It is not possible for this routine to fail if the second argument -** is NO_LOCK. If the second argument is SHARED_LOCK then this routine -** might return SQLITE_IOERR; -*/ -static int winUnlock(sqlite3_file *id, int locktype){ - int type; - winFile *pFile = (winFile*)id; - int rc = SQLITE_OK; - assert( pFile!=0 ); - assert( locktype<=SHARED_LOCK ); - OSTRACE(("UNLOCK file=%p, oldLock=%d(%d), newLock=%d\n", - pFile->h, pFile->locktype, pFile->sharedLockByte, locktype)); - type = pFile->locktype; - if( type>=EXCLUSIVE_LOCK ){ - winUnlockFile(&pFile->h, SHARED_FIRST, 0, SHARED_SIZE, 0); - if( locktype==SHARED_LOCK && !winGetReadLock(pFile) ){ - /* This should never happen. We should always be able to - ** reacquire the read lock */ - rc = winLogError(SQLITE_IOERR_UNLOCK, osGetLastError(), - "winUnlock", pFile->zPath); - } - } - if( type>=RESERVED_LOCK ){ - winUnlockFile(&pFile->h, RESERVED_BYTE, 0, 1, 0); - } - if( locktype==NO_LOCK && type>=SHARED_LOCK ){ - winUnlockReadLock(pFile); - } - if( type>=PENDING_LOCK ){ - winUnlockFile(&pFile->h, PENDING_BYTE, 0, 1, 0); - } - pFile->locktype = (u8)locktype; - OSTRACE(("UNLOCK file=%p, lock=%d, rc=%s\n", - pFile->h, pFile->locktype, sqlite3ErrName(rc))); - return rc; -} - -/* -** If *pArg is inititially negative then this is a query. Set *pArg to -** 1 or 0 depending on whether or not bit mask of pFile->ctrlFlags is set. -** -** If *pArg is 0 or 1, then clear or set the mask bit of pFile->ctrlFlags. -*/ -static void winModeBit(winFile *pFile, unsigned char mask, int *pArg){ - if( *pArg<0 ){ - *pArg = (pFile->ctrlFlags & mask)!=0; - }else if( (*pArg)==0 ){ - pFile->ctrlFlags &= ~mask; - }else{ - pFile->ctrlFlags |= mask; - } -} - -/* Forward references to VFS helper methods used for temporary files */ -static int winGetTempname(sqlite3_vfs *, char **); -static int winIsDir(const void *); -static BOOL winIsDriveLetterAndColon(const char *); - -/* -** Control and query of the open file handle. -*/ -static int winFileControl(sqlite3_file *id, int op, void *pArg){ - winFile *pFile = (winFile*)id; - OSTRACE(("FCNTL file=%p, op=%d, pArg=%p\n", pFile->h, op, pArg)); - switch( op ){ - case SQLITE_FCNTL_LOCKSTATE: { - *(int*)pArg = pFile->locktype; - OSTRACE(("FCNTL file=%p, rc=SQLITE_OK\n", pFile->h)); - return SQLITE_OK; - } - case SQLITE_LAST_ERRNO: { - *(int*)pArg = (int)pFile->lastErrno; - OSTRACE(("FCNTL file=%p, rc=SQLITE_OK\n", pFile->h)); - return SQLITE_OK; - } - case SQLITE_FCNTL_CHUNK_SIZE: { - pFile->szChunk = *(int *)pArg; - OSTRACE(("FCNTL file=%p, rc=SQLITE_OK\n", pFile->h)); - return SQLITE_OK; - } - case SQLITE_FCNTL_SIZE_HINT: { - if( pFile->szChunk>0 ){ - sqlite3_int64 oldSz; - int rc = winFileSize(id, &oldSz); - if( rc==SQLITE_OK ){ - sqlite3_int64 newSz = *(sqlite3_int64*)pArg; - if( newSz>oldSz ){ - SimulateIOErrorBenign(1); - rc = winTruncate(id, newSz); - SimulateIOErrorBenign(0); - } - } - OSTRACE(("FCNTL file=%p, rc=%s\n", pFile->h, sqlite3ErrName(rc))); - return rc; - } - OSTRACE(("FCNTL file=%p, rc=SQLITE_OK\n", pFile->h)); - return SQLITE_OK; - } - case SQLITE_FCNTL_PERSIST_WAL: { - winModeBit(pFile, WINFILE_PERSIST_WAL, (int*)pArg); - OSTRACE(("FCNTL file=%p, rc=SQLITE_OK\n", pFile->h)); - return SQLITE_OK; - } - case SQLITE_FCNTL_POWERSAFE_OVERWRITE: { - winModeBit(pFile, WINFILE_PSOW, (int*)pArg); - OSTRACE(("FCNTL file=%p, rc=SQLITE_OK\n", pFile->h)); - return SQLITE_OK; - } - case SQLITE_FCNTL_VFSNAME: { - *(char**)pArg = sqlite3_mprintf("%s", pFile->pVfs->zName); - OSTRACE(("FCNTL file=%p, rc=SQLITE_OK\n", pFile->h)); - return SQLITE_OK; - } - case SQLITE_FCNTL_WIN32_AV_RETRY: { - int *a = (int*)pArg; - if( a[0]>0 ){ - winIoerrRetry = a[0]; - }else{ - a[0] = winIoerrRetry; - } - if( a[1]>0 ){ - winIoerrRetryDelay = a[1]; - }else{ - a[1] = winIoerrRetryDelay; - } - OSTRACE(("FCNTL file=%p, rc=SQLITE_OK\n", pFile->h)); - return SQLITE_OK; - } -#ifdef SQLITE_TEST - case SQLITE_FCNTL_WIN32_SET_HANDLE: { - LPHANDLE phFile = (LPHANDLE)pArg; - HANDLE hOldFile = pFile->h; - pFile->h = *phFile; - *phFile = hOldFile; - OSTRACE(("FCNTL oldFile=%p, newFile=%p, rc=SQLITE_OK\n", - hOldFile, pFile->h)); - return SQLITE_OK; - } -#endif - case SQLITE_FCNTL_TEMPFILENAME: { - char *zTFile = 0; - int rc = winGetTempname(pFile->pVfs, &zTFile); - if( rc==SQLITE_OK ){ - *(char**)pArg = zTFile; - } - OSTRACE(("FCNTL file=%p, rc=%s\n", pFile->h, sqlite3ErrName(rc))); - return rc; - } -#if SQLITE_MAX_MMAP_SIZE>0 - case SQLITE_FCNTL_MMAP_SIZE: { - i64 newLimit = *(i64*)pArg; - int rc = SQLITE_OK; - if( newLimit>sqlite3GlobalConfig.mxMmap ){ - newLimit = sqlite3GlobalConfig.mxMmap; - } - *(i64*)pArg = pFile->mmapSizeMax; - if( newLimit>=0 && newLimit!=pFile->mmapSizeMax && pFile->nFetchOut==0 ){ - pFile->mmapSizeMax = newLimit; - if( pFile->mmapSize>0 ){ - winUnmapfile(pFile); - rc = winMapfile(pFile, -1); - } - } - OSTRACE(("FCNTL file=%p, rc=%s\n", pFile->h, sqlite3ErrName(rc))); - return rc; - } -#endif - } - OSTRACE(("FCNTL file=%p, rc=SQLITE_NOTFOUND\n", pFile->h)); - return SQLITE_NOTFOUND; -} - -/* -** Return the sector size in bytes of the underlying block device for -** the specified file. This is almost always 512 bytes, but may be -** larger for some devices. -** -** SQLite code assumes this function cannot fail. It also assumes that -** if two files are created in the same file-system directory (i.e. -** a database and its journal file) that the sector size will be the -** same for both. -*/ -static int winSectorSize(sqlite3_file *id){ - (void)id; - return SQLITE_DEFAULT_SECTOR_SIZE; -} - -/* -** Return a vector of device characteristics. -*/ -static int winDeviceCharacteristics(sqlite3_file *id){ - winFile *p = (winFile*)id; - return SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN | - ((p->ctrlFlags & WINFILE_PSOW)?SQLITE_IOCAP_POWERSAFE_OVERWRITE:0); -} - -/* -** Windows will only let you create file view mappings -** on allocation size granularity boundaries. -** During sqlite3_os_init() we do a GetSystemInfo() -** to get the granularity size. -*/ -static SYSTEM_INFO winSysInfo; - -#ifndef SQLITE_OMIT_WAL - -/* -** Helper functions to obtain and relinquish the global mutex. The -** global mutex is used to protect the winLockInfo objects used by -** this file, all of which may be shared by multiple threads. -** -** Function winShmMutexHeld() is used to assert() that the global mutex -** is held when required. This function is only used as part of assert() -** statements. e.g. -** -** winShmEnterMutex() -** assert( winShmMutexHeld() ); -** winShmLeaveMutex() -*/ -static void winShmEnterMutex(void){ - sqlite3_mutex_enter(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER)); -} -static void winShmLeaveMutex(void){ - sqlite3_mutex_leave(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER)); -} -#ifndef NDEBUG -static int winShmMutexHeld(void) { - return sqlite3_mutex_held(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER)); -} -#endif - -/* -** Object used to represent a single file opened and mmapped to provide -** shared memory. When multiple threads all reference the same -** log-summary, each thread has its own winFile object, but they all -** point to a single instance of this object. In other words, each -** log-summary is opened only once per process. -** -** winShmMutexHeld() must be true when creating or destroying -** this object or while reading or writing the following fields: -** -** nRef -** pNext -** -** The following fields are read-only after the object is created: -** -** fid -** zFilename -** -** Either winShmNode.mutex must be held or winShmNode.nRef==0 and -** winShmMutexHeld() is true when reading or writing any other field -** in this structure. -** -*/ -struct winShmNode { - sqlite3_mutex *mutex; /* Mutex to access this object */ - char *zFilename; /* Name of the file */ - winFile hFile; /* File handle from winOpen */ - - int szRegion; /* Size of shared-memory regions */ - int nRegion; /* Size of array apRegion */ - struct ShmRegion { - HANDLE hMap; /* File handle from CreateFileMapping */ - void *pMap; - } *aRegion; - DWORD lastErrno; /* The Windows errno from the last I/O error */ - - int nRef; /* Number of winShm objects pointing to this */ - winShm *pFirst; /* All winShm objects pointing to this */ - winShmNode *pNext; /* Next in list of all winShmNode objects */ -#ifdef SQLITE_DEBUG - u8 nextShmId; /* Next available winShm.id value */ -#endif -}; - -/* -** A global array of all winShmNode objects. -** -** The winShmMutexHeld() must be true while reading or writing this list. -*/ -static winShmNode *winShmNodeList = 0; - -/* -** Structure used internally by this VFS to record the state of an -** open shared memory connection. -** -** The following fields are initialized when this object is created and -** are read-only thereafter: -** -** winShm.pShmNode -** winShm.id -** -** All other fields are read/write. The winShm.pShmNode->mutex must be held -** while accessing any read/write fields. -*/ -struct winShm { - winShmNode *pShmNode; /* The underlying winShmNode object */ - winShm *pNext; /* Next winShm with the same winShmNode */ - u8 hasMutex; /* True if holding the winShmNode mutex */ - u16 sharedMask; /* Mask of shared locks held */ - u16 exclMask; /* Mask of exclusive locks held */ -#ifdef SQLITE_DEBUG - u8 id; /* Id of this connection with its winShmNode */ -#endif -}; - -/* -** Constants used for locking -*/ -#define WIN_SHM_BASE ((22+SQLITE_SHM_NLOCK)*4) /* first lock byte */ -#define WIN_SHM_DMS (WIN_SHM_BASE+SQLITE_SHM_NLOCK) /* deadman switch */ - -/* -** Apply advisory locks for all n bytes beginning at ofst. -*/ -#define _SHM_UNLCK 1 -#define _SHM_RDLCK 2 -#define _SHM_WRLCK 3 -static int winShmSystemLock( - winShmNode *pFile, /* Apply locks to this open shared-memory segment */ - int lockType, /* _SHM_UNLCK, _SHM_RDLCK, or _SHM_WRLCK */ - int ofst, /* Offset to first byte to be locked/unlocked */ - int nByte /* Number of bytes to lock or unlock */ -){ - int rc = 0; /* Result code form Lock/UnlockFileEx() */ - - /* Access to the winShmNode object is serialized by the caller */ - assert( sqlite3_mutex_held(pFile->mutex) || pFile->nRef==0 ); - - OSTRACE(("SHM-LOCK file=%p, lock=%d, offset=%d, size=%d\n", - pFile->hFile.h, lockType, ofst, nByte)); - - /* Release/Acquire the system-level lock */ - if( lockType==_SHM_UNLCK ){ - rc = winUnlockFile(&pFile->hFile.h, ofst, 0, nByte, 0); - }else{ - /* Initialize the locking parameters */ - DWORD dwFlags = LOCKFILE_FAIL_IMMEDIATELY; - if( lockType == _SHM_WRLCK ) dwFlags |= LOCKFILE_EXCLUSIVE_LOCK; - rc = winLockFile(&pFile->hFile.h, dwFlags, ofst, 0, nByte, 0); - } - - if( rc!= 0 ){ - rc = SQLITE_OK; - }else{ - pFile->lastErrno = osGetLastError(); - rc = SQLITE_BUSY; - } - - OSTRACE(("SHM-LOCK file=%p, func=%s, errno=%lu, rc=%s\n", - pFile->hFile.h, (lockType == _SHM_UNLCK) ? "winUnlockFile" : - "winLockFile", pFile->lastErrno, sqlite3ErrName(rc))); - - return rc; -} - -/* Forward references to VFS methods */ -static int winOpen(sqlite3_vfs*,const char*,sqlite3_file*,int,int*); -static int winDelete(sqlite3_vfs *,const char*,int); - -/* -** Purge the winShmNodeList list of all entries with winShmNode.nRef==0. -** -** This is not a VFS shared-memory method; it is a utility function called -** by VFS shared-memory methods. -*/ -static void winShmPurge(sqlite3_vfs *pVfs, int deleteFlag){ - winShmNode **pp; - winShmNode *p; - assert( winShmMutexHeld() ); - OSTRACE(("SHM-PURGE pid=%lu, deleteFlag=%d\n", - osGetCurrentProcessId(), deleteFlag)); - pp = &winShmNodeList; - while( (p = *pp)!=0 ){ - if( p->nRef==0 ){ - int i; - if( p->mutex ){ sqlite3_mutex_free(p->mutex); } - for(i=0; inRegion; i++){ - BOOL bRc = osUnmapViewOfFile(p->aRegion[i].pMap); - OSTRACE(("SHM-PURGE-UNMAP pid=%lu, region=%d, rc=%s\n", - osGetCurrentProcessId(), i, bRc ? "ok" : "failed")); - UNUSED_VARIABLE_VALUE(bRc); - bRc = osCloseHandle(p->aRegion[i].hMap); - OSTRACE(("SHM-PURGE-CLOSE pid=%lu, region=%d, rc=%s\n", - osGetCurrentProcessId(), i, bRc ? "ok" : "failed")); - UNUSED_VARIABLE_VALUE(bRc); - } - if( p->hFile.h!=NULL && p->hFile.h!=INVALID_HANDLE_VALUE ){ - SimulateIOErrorBenign(1); - winClose((sqlite3_file *)&p->hFile); - SimulateIOErrorBenign(0); - } - if( deleteFlag ){ - SimulateIOErrorBenign(1); - sqlite3BeginBenignMalloc(); - winDelete(pVfs, p->zFilename, 0); - sqlite3EndBenignMalloc(); - SimulateIOErrorBenign(0); - } - *pp = p->pNext; - sqlite3_free(p->aRegion); - sqlite3_free(p); - }else{ - pp = &p->pNext; - } - } -} - -/* -** Open the shared-memory area associated with database file pDbFd. -** -** When opening a new shared-memory file, if no other instances of that -** file are currently open, in this process or in other processes, then -** the file must be truncated to zero length or have its header cleared. -*/ -static int winOpenSharedMemory(winFile *pDbFd){ - struct winShm *p; /* The connection to be opened */ - struct winShmNode *pShmNode = 0; /* The underlying mmapped file */ - int rc; /* Result code */ - struct winShmNode *pNew; /* Newly allocated winShmNode */ - int nName; /* Size of zName in bytes */ - - assert( pDbFd->pShm==0 ); /* Not previously opened */ - - /* Allocate space for the new sqlite3_shm object. Also speculatively - ** allocate space for a new winShmNode and filename. - */ - p = sqlite3MallocZero( sizeof(*p) ); - if( p==0 ) return SQLITE_IOERR_NOMEM; - nName = sqlite3Strlen30(pDbFd->zPath); - pNew = sqlite3MallocZero( sizeof(*pShmNode) + nName + 17 ); - if( pNew==0 ){ - sqlite3_free(p); - return SQLITE_IOERR_NOMEM; - } - pNew->zFilename = (char*)&pNew[1]; - sqlite3_snprintf(nName+15, pNew->zFilename, "%s-shm", pDbFd->zPath); - sqlite3FileSuffix3(pDbFd->zPath, pNew->zFilename); - - /* Look to see if there is an existing winShmNode that can be used. - ** If no matching winShmNode currently exists, create a new one. - */ - winShmEnterMutex(); - for(pShmNode = winShmNodeList; pShmNode; pShmNode=pShmNode->pNext){ - /* TBD need to come up with better match here. Perhaps - ** use FILE_ID_BOTH_DIR_INFO Structure. - */ - if( sqlite3StrICmp(pShmNode->zFilename, pNew->zFilename)==0 ) break; - } - if( pShmNode ){ - sqlite3_free(pNew); - }else{ - pShmNode = pNew; - pNew = 0; - ((winFile*)(&pShmNode->hFile))->h = INVALID_HANDLE_VALUE; - pShmNode->pNext = winShmNodeList; - winShmNodeList = pShmNode; - - pShmNode->mutex = sqlite3_mutex_alloc(SQLITE_MUTEX_FAST); - if( pShmNode->mutex==0 ){ - rc = SQLITE_IOERR_NOMEM; - goto shm_open_err; - } - - rc = winOpen(pDbFd->pVfs, - pShmNode->zFilename, /* Name of the file (UTF-8) */ - (sqlite3_file*)&pShmNode->hFile, /* File handle here */ - SQLITE_OPEN_WAL | SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE, - 0); - if( SQLITE_OK!=rc ){ - goto shm_open_err; - } - - /* Check to see if another process is holding the dead-man switch. - ** If not, truncate the file to zero length. - */ - if( winShmSystemLock(pShmNode, _SHM_WRLCK, WIN_SHM_DMS, 1)==SQLITE_OK ){ - rc = winTruncate((sqlite3_file *)&pShmNode->hFile, 0); - if( rc!=SQLITE_OK ){ - rc = winLogError(SQLITE_IOERR_SHMOPEN, osGetLastError(), - "winOpenShm", pDbFd->zPath); - } - } - if( rc==SQLITE_OK ){ - winShmSystemLock(pShmNode, _SHM_UNLCK, WIN_SHM_DMS, 1); - rc = winShmSystemLock(pShmNode, _SHM_RDLCK, WIN_SHM_DMS, 1); - } - if( rc ) goto shm_open_err; - } - - /* Make the new connection a child of the winShmNode */ - p->pShmNode = pShmNode; -#ifdef SQLITE_DEBUG - p->id = pShmNode->nextShmId++; -#endif - pShmNode->nRef++; - pDbFd->pShm = p; - winShmLeaveMutex(); - - /* The reference count on pShmNode has already been incremented under - ** the cover of the winShmEnterMutex() mutex and the pointer from the - ** new (struct winShm) object to the pShmNode has been set. All that is - ** left to do is to link the new object into the linked list starting - ** at pShmNode->pFirst. This must be done while holding the pShmNode->mutex - ** mutex. - */ - sqlite3_mutex_enter(pShmNode->mutex); - p->pNext = pShmNode->pFirst; - pShmNode->pFirst = p; - sqlite3_mutex_leave(pShmNode->mutex); - return SQLITE_OK; - - /* Jump here on any error */ -shm_open_err: - winShmSystemLock(pShmNode, _SHM_UNLCK, WIN_SHM_DMS, 1); - winShmPurge(pDbFd->pVfs, 0); /* This call frees pShmNode if required */ - sqlite3_free(p); - sqlite3_free(pNew); - winShmLeaveMutex(); - return rc; -} - -/* -** Close a connection to shared-memory. Delete the underlying -** storage if deleteFlag is true. -*/ -static int winShmUnmap( - sqlite3_file *fd, /* Database holding shared memory */ - int deleteFlag /* Delete after closing if true */ -){ - winFile *pDbFd; /* Database holding shared-memory */ - winShm *p; /* The connection to be closed */ - winShmNode *pShmNode; /* The underlying shared-memory file */ - winShm **pp; /* For looping over sibling connections */ - - pDbFd = (winFile*)fd; - p = pDbFd->pShm; - if( p==0 ) return SQLITE_OK; - pShmNode = p->pShmNode; - - /* Remove connection p from the set of connections associated - ** with pShmNode */ - sqlite3_mutex_enter(pShmNode->mutex); - for(pp=&pShmNode->pFirst; (*pp)!=p; pp = &(*pp)->pNext){} - *pp = p->pNext; - - /* Free the connection p */ - sqlite3_free(p); - pDbFd->pShm = 0; - sqlite3_mutex_leave(pShmNode->mutex); - - /* If pShmNode->nRef has reached 0, then close the underlying - ** shared-memory file, too */ - winShmEnterMutex(); - assert( pShmNode->nRef>0 ); - pShmNode->nRef--; - if( pShmNode->nRef==0 ){ - winShmPurge(pDbFd->pVfs, deleteFlag); - } - winShmLeaveMutex(); - - return SQLITE_OK; -} - -/* -** Change the lock state for a shared-memory segment. -*/ -static int winShmLock( - sqlite3_file *fd, /* Database file holding the shared memory */ - int ofst, /* First lock to acquire or release */ - int n, /* Number of locks to acquire or release */ - int flags /* What to do with the lock */ -){ - winFile *pDbFd = (winFile*)fd; /* Connection holding shared memory */ - winShm *p = pDbFd->pShm; /* The shared memory being locked */ - winShm *pX; /* For looping over all siblings */ - winShmNode *pShmNode = p->pShmNode; - int rc = SQLITE_OK; /* Result code */ - u16 mask; /* Mask of locks to take or release */ - - assert( ofst>=0 && ofst+n<=SQLITE_SHM_NLOCK ); - assert( n>=1 ); - assert( flags==(SQLITE_SHM_LOCK | SQLITE_SHM_SHARED) - || flags==(SQLITE_SHM_LOCK | SQLITE_SHM_EXCLUSIVE) - || flags==(SQLITE_SHM_UNLOCK | SQLITE_SHM_SHARED) - || flags==(SQLITE_SHM_UNLOCK | SQLITE_SHM_EXCLUSIVE) ); - assert( n==1 || (flags & SQLITE_SHM_EXCLUSIVE)!=0 ); - - mask = (u16)((1U<<(ofst+n)) - (1U<1 || mask==(1<mutex); - if( flags & SQLITE_SHM_UNLOCK ){ - u16 allMask = 0; /* Mask of locks held by siblings */ - - /* See if any siblings hold this same lock */ - for(pX=pShmNode->pFirst; pX; pX=pX->pNext){ - if( pX==p ) continue; - assert( (pX->exclMask & (p->exclMask|p->sharedMask))==0 ); - allMask |= pX->sharedMask; - } - - /* Unlock the system-level locks */ - if( (mask & allMask)==0 ){ - rc = winShmSystemLock(pShmNode, _SHM_UNLCK, ofst+WIN_SHM_BASE, n); - }else{ - rc = SQLITE_OK; - } - - /* Undo the local locks */ - if( rc==SQLITE_OK ){ - p->exclMask &= ~mask; - p->sharedMask &= ~mask; - } - }else if( flags & SQLITE_SHM_SHARED ){ - u16 allShared = 0; /* Union of locks held by connections other than "p" */ - - /* Find out which shared locks are already held by sibling connections. - ** If any sibling already holds an exclusive lock, go ahead and return - ** SQLITE_BUSY. - */ - for(pX=pShmNode->pFirst; pX; pX=pX->pNext){ - if( (pX->exclMask & mask)!=0 ){ - rc = SQLITE_BUSY; - break; - } - allShared |= pX->sharedMask; - } - - /* Get shared locks at the system level, if necessary */ - if( rc==SQLITE_OK ){ - if( (allShared & mask)==0 ){ - rc = winShmSystemLock(pShmNode, _SHM_RDLCK, ofst+WIN_SHM_BASE, n); - }else{ - rc = SQLITE_OK; - } - } - - /* Get the local shared locks */ - if( rc==SQLITE_OK ){ - p->sharedMask |= mask; - } - }else{ - /* Make sure no sibling connections hold locks that will block this - ** lock. If any do, return SQLITE_BUSY right away. - */ - for(pX=pShmNode->pFirst; pX; pX=pX->pNext){ - if( (pX->exclMask & mask)!=0 || (pX->sharedMask & mask)!=0 ){ - rc = SQLITE_BUSY; - break; - } - } - - /* Get the exclusive locks at the system level. Then if successful - ** also mark the local connection as being locked. - */ - if( rc==SQLITE_OK ){ - rc = winShmSystemLock(pShmNode, _SHM_WRLCK, ofst+WIN_SHM_BASE, n); - if( rc==SQLITE_OK ){ - assert( (p->sharedMask & mask)==0 ); - p->exclMask |= mask; - } - } - } - sqlite3_mutex_leave(pShmNode->mutex); - OSTRACE(("SHM-LOCK pid=%lu, id=%d, sharedMask=%03x, exclMask=%03x, rc=%s\n", - osGetCurrentProcessId(), p->id, p->sharedMask, p->exclMask, - sqlite3ErrName(rc))); - return rc; -} - -/* -** Implement a memory barrier or memory fence on shared memory. -** -** All loads and stores begun before the barrier must complete before -** any load or store begun after the barrier. -*/ -static void winShmBarrier( - sqlite3_file *fd /* Database holding the shared memory */ -){ - UNUSED_PARAMETER(fd); - /* MemoryBarrier(); // does not work -- do not know why not */ - winShmEnterMutex(); - winShmLeaveMutex(); -} - -/* -** This function is called to obtain a pointer to region iRegion of the -** shared-memory associated with the database file fd. Shared-memory regions -** are numbered starting from zero. Each shared-memory region is szRegion -** bytes in size. -** -** If an error occurs, an error code is returned and *pp is set to NULL. -** -** Otherwise, if the isWrite parameter is 0 and the requested shared-memory -** region has not been allocated (by any client, including one running in a -** separate process), then *pp is set to NULL and SQLITE_OK returned. If -** isWrite is non-zero and the requested shared-memory region has not yet -** been allocated, it is allocated by this function. -** -** If the shared-memory region has already been allocated or is allocated by -** this call as described above, then it is mapped into this processes -** address space (if it is not already), *pp is set to point to the mapped -** memory and SQLITE_OK returned. -*/ -static int winShmMap( - sqlite3_file *fd, /* Handle open on database file */ - int iRegion, /* Region to retrieve */ - int szRegion, /* Size of regions */ - int isWrite, /* True to extend file if necessary */ - void volatile **pp /* OUT: Mapped memory */ -){ - winFile *pDbFd = (winFile*)fd; - winShm *p = pDbFd->pShm; - winShmNode *pShmNode; - int rc = SQLITE_OK; - - if( !p ){ - rc = winOpenSharedMemory(pDbFd); - if( rc!=SQLITE_OK ) return rc; - p = pDbFd->pShm; - } - pShmNode = p->pShmNode; - - sqlite3_mutex_enter(pShmNode->mutex); - assert( szRegion==pShmNode->szRegion || pShmNode->nRegion==0 ); - - if( pShmNode->nRegion<=iRegion ){ - struct ShmRegion *apNew; /* New aRegion[] array */ - int nByte = (iRegion+1)*szRegion; /* Minimum required file size */ - sqlite3_int64 sz; /* Current size of wal-index file */ - - pShmNode->szRegion = szRegion; - - /* The requested region is not mapped into this processes address space. - ** Check to see if it has been allocated (i.e. if the wal-index file is - ** large enough to contain the requested region). - */ - rc = winFileSize((sqlite3_file *)&pShmNode->hFile, &sz); - if( rc!=SQLITE_OK ){ - rc = winLogError(SQLITE_IOERR_SHMSIZE, osGetLastError(), - "winShmMap1", pDbFd->zPath); - goto shmpage_out; - } - - if( szhFile, nByte); - if( rc!=SQLITE_OK ){ - rc = winLogError(SQLITE_IOERR_SHMSIZE, osGetLastError(), - "winShmMap2", pDbFd->zPath); - goto shmpage_out; - } - } - - /* Map the requested memory region into this processes address space. */ - apNew = (struct ShmRegion *)sqlite3_realloc( - pShmNode->aRegion, (iRegion+1)*sizeof(apNew[0]) - ); - if( !apNew ){ - rc = SQLITE_IOERR_NOMEM; - goto shmpage_out; - } - pShmNode->aRegion = apNew; - - while( pShmNode->nRegion<=iRegion ){ - HANDLE hMap = NULL; /* file-mapping handle */ - void *pMap = 0; /* Mapped memory region */ - -#if SQLITE_OS_WINRT - hMap = osCreateFileMappingFromApp(pShmNode->hFile.h, - NULL, PAGE_READWRITE, nByte, NULL - ); -#elif defined(SQLITE_WIN32_HAS_WIDE) - hMap = osCreateFileMappingW(pShmNode->hFile.h, - NULL, PAGE_READWRITE, 0, nByte, NULL - ); -#elif defined(SQLITE_WIN32_HAS_ANSI) - hMap = osCreateFileMappingA(pShmNode->hFile.h, - NULL, PAGE_READWRITE, 0, nByte, NULL - ); -#endif - OSTRACE(("SHM-MAP-CREATE pid=%lu, region=%d, size=%d, rc=%s\n", - osGetCurrentProcessId(), pShmNode->nRegion, nByte, - hMap ? "ok" : "failed")); - if( hMap ){ - int iOffset = pShmNode->nRegion*szRegion; - int iOffsetShift = iOffset % winSysInfo.dwAllocationGranularity; -#if SQLITE_OS_WINRT - pMap = osMapViewOfFileFromApp(hMap, FILE_MAP_WRITE | FILE_MAP_READ, - iOffset - iOffsetShift, szRegion + iOffsetShift - ); -#else - pMap = osMapViewOfFile(hMap, FILE_MAP_WRITE | FILE_MAP_READ, - 0, iOffset - iOffsetShift, szRegion + iOffsetShift - ); -#endif - OSTRACE(("SHM-MAP-MAP pid=%lu, region=%d, offset=%d, size=%d, rc=%s\n", - osGetCurrentProcessId(), pShmNode->nRegion, iOffset, - szRegion, pMap ? "ok" : "failed")); - } - if( !pMap ){ - pShmNode->lastErrno = osGetLastError(); - rc = winLogError(SQLITE_IOERR_SHMMAP, pShmNode->lastErrno, - "winShmMap3", pDbFd->zPath); - if( hMap ) osCloseHandle(hMap); - goto shmpage_out; - } - - pShmNode->aRegion[pShmNode->nRegion].pMap = pMap; - pShmNode->aRegion[pShmNode->nRegion].hMap = hMap; - pShmNode->nRegion++; - } - } - -shmpage_out: - if( pShmNode->nRegion>iRegion ){ - int iOffset = iRegion*szRegion; - int iOffsetShift = iOffset % winSysInfo.dwAllocationGranularity; - char *p = (char *)pShmNode->aRegion[iRegion].pMap; - *pp = (void *)&p[iOffsetShift]; - }else{ - *pp = 0; - } - sqlite3_mutex_leave(pShmNode->mutex); - return rc; -} - -#else -# define winShmMap 0 -# define winShmLock 0 -# define winShmBarrier 0 -# define winShmUnmap 0 -#endif /* #ifndef SQLITE_OMIT_WAL */ - -/* -** Cleans up the mapped region of the specified file, if any. -*/ -#if SQLITE_MAX_MMAP_SIZE>0 -static int winUnmapfile(winFile *pFile){ - assert( pFile!=0 ); - OSTRACE(("UNMAP-FILE pid=%lu, pFile=%p, hMap=%p, pMapRegion=%p, " - "mmapSize=%lld, mmapSizeActual=%lld, mmapSizeMax=%lld\n", - osGetCurrentProcessId(), pFile, pFile->hMap, pFile->pMapRegion, - pFile->mmapSize, pFile->mmapSizeActual, pFile->mmapSizeMax)); - if( pFile->pMapRegion ){ - if( !osUnmapViewOfFile(pFile->pMapRegion) ){ - pFile->lastErrno = osGetLastError(); - OSTRACE(("UNMAP-FILE pid=%lu, pFile=%p, pMapRegion=%p, " - "rc=SQLITE_IOERR_MMAP\n", osGetCurrentProcessId(), pFile, - pFile->pMapRegion)); - return winLogError(SQLITE_IOERR_MMAP, pFile->lastErrno, - "winUnmapfile1", pFile->zPath); - } - pFile->pMapRegion = 0; - pFile->mmapSize = 0; - pFile->mmapSizeActual = 0; - } - if( pFile->hMap!=NULL ){ - if( !osCloseHandle(pFile->hMap) ){ - pFile->lastErrno = osGetLastError(); - OSTRACE(("UNMAP-FILE pid=%lu, pFile=%p, hMap=%p, rc=SQLITE_IOERR_MMAP\n", - osGetCurrentProcessId(), pFile, pFile->hMap)); - return winLogError(SQLITE_IOERR_MMAP, pFile->lastErrno, - "winUnmapfile2", pFile->zPath); - } - pFile->hMap = NULL; - } - OSTRACE(("UNMAP-FILE pid=%lu, pFile=%p, rc=SQLITE_OK\n", - osGetCurrentProcessId(), pFile)); - return SQLITE_OK; -} - -/* -** Memory map or remap the file opened by file-descriptor pFd (if the file -** is already mapped, the existing mapping is replaced by the new). Or, if -** there already exists a mapping for this file, and there are still -** outstanding xFetch() references to it, this function is a no-op. -** -** If parameter nByte is non-negative, then it is the requested size of -** the mapping to create. Otherwise, if nByte is less than zero, then the -** requested size is the size of the file on disk. The actual size of the -** created mapping is either the requested size or the value configured -** using SQLITE_FCNTL_MMAP_SIZE, whichever is smaller. -** -** SQLITE_OK is returned if no error occurs (even if the mapping is not -** recreated as a result of outstanding references) or an SQLite error -** code otherwise. -*/ -static int winMapfile(winFile *pFd, sqlite3_int64 nByte){ - sqlite3_int64 nMap = nByte; - int rc; - - assert( nMap>=0 || pFd->nFetchOut==0 ); - OSTRACE(("MAP-FILE pid=%lu, pFile=%p, size=%lld\n", - osGetCurrentProcessId(), pFd, nByte)); - - if( pFd->nFetchOut>0 ) return SQLITE_OK; - - if( nMap<0 ){ - rc = winFileSize((sqlite3_file*)pFd, &nMap); - if( rc ){ - OSTRACE(("MAP-FILE pid=%lu, pFile=%p, rc=SQLITE_IOERR_FSTAT\n", - osGetCurrentProcessId(), pFd)); - return SQLITE_IOERR_FSTAT; - } - } - if( nMap>pFd->mmapSizeMax ){ - nMap = pFd->mmapSizeMax; - } - nMap &= ~(sqlite3_int64)(winSysInfo.dwPageSize - 1); - - if( nMap==0 && pFd->mmapSize>0 ){ - winUnmapfile(pFd); - } - if( nMap!=pFd->mmapSize ){ - void *pNew = 0; - DWORD protect = PAGE_READONLY; - DWORD flags = FILE_MAP_READ; - - winUnmapfile(pFd); - if( (pFd->ctrlFlags & WINFILE_RDONLY)==0 ){ - protect = PAGE_READWRITE; - flags |= FILE_MAP_WRITE; - } -#if SQLITE_OS_WINRT - pFd->hMap = osCreateFileMappingFromApp(pFd->h, NULL, protect, nMap, NULL); -#elif defined(SQLITE_WIN32_HAS_WIDE) - pFd->hMap = osCreateFileMappingW(pFd->h, NULL, protect, - (DWORD)((nMap>>32) & 0xffffffff), - (DWORD)(nMap & 0xffffffff), NULL); -#elif defined(SQLITE_WIN32_HAS_ANSI) - pFd->hMap = osCreateFileMappingA(pFd->h, NULL, protect, - (DWORD)((nMap>>32) & 0xffffffff), - (DWORD)(nMap & 0xffffffff), NULL); -#endif - if( pFd->hMap==NULL ){ - pFd->lastErrno = osGetLastError(); - rc = winLogError(SQLITE_IOERR_MMAP, pFd->lastErrno, - "winMapfile1", pFd->zPath); - /* Log the error, but continue normal operation using xRead/xWrite */ - OSTRACE(("MAP-FILE-CREATE pid=%lu, pFile=%p, rc=%s\n", - osGetCurrentProcessId(), pFd, sqlite3ErrName(rc))); - return SQLITE_OK; - } - assert( (nMap % winSysInfo.dwPageSize)==0 ); - assert( sizeof(SIZE_T)==sizeof(sqlite3_int64) || nMap<=0xffffffff ); -#if SQLITE_OS_WINRT - pNew = osMapViewOfFileFromApp(pFd->hMap, flags, 0, (SIZE_T)nMap); -#else - pNew = osMapViewOfFile(pFd->hMap, flags, 0, 0, (SIZE_T)nMap); -#endif - if( pNew==NULL ){ - osCloseHandle(pFd->hMap); - pFd->hMap = NULL; - pFd->lastErrno = osGetLastError(); - rc = winLogError(SQLITE_IOERR_MMAP, pFd->lastErrno, - "winMapfile2", pFd->zPath); - /* Log the error, but continue normal operation using xRead/xWrite */ - OSTRACE(("MAP-FILE-MAP pid=%lu, pFile=%p, rc=%s\n", - osGetCurrentProcessId(), pFd, sqlite3ErrName(rc))); - return SQLITE_OK; - } - pFd->pMapRegion = pNew; - pFd->mmapSize = nMap; - pFd->mmapSizeActual = nMap; - } - - OSTRACE(("MAP-FILE pid=%lu, pFile=%p, rc=SQLITE_OK\n", - osGetCurrentProcessId(), pFd)); - return SQLITE_OK; -} -#endif /* SQLITE_MAX_MMAP_SIZE>0 */ - -/* -** If possible, return a pointer to a mapping of file fd starting at offset -** iOff. The mapping must be valid for at least nAmt bytes. -** -** If such a pointer can be obtained, store it in *pp and return SQLITE_OK. -** Or, if one cannot but no error occurs, set *pp to 0 and return SQLITE_OK. -** Finally, if an error does occur, return an SQLite error code. The final -** value of *pp is undefined in this case. -** -** If this function does return a pointer, the caller must eventually -** release the reference by calling winUnfetch(). -*/ -static int winFetch(sqlite3_file *fd, i64 iOff, int nAmt, void **pp){ -#if SQLITE_MAX_MMAP_SIZE>0 - winFile *pFd = (winFile*)fd; /* The underlying database file */ -#endif - *pp = 0; - - OSTRACE(("FETCH pid=%lu, pFile=%p, offset=%lld, amount=%d, pp=%p\n", - osGetCurrentProcessId(), fd, iOff, nAmt, pp)); - -#if SQLITE_MAX_MMAP_SIZE>0 - if( pFd->mmapSizeMax>0 ){ - if( pFd->pMapRegion==0 ){ - int rc = winMapfile(pFd, -1); - if( rc!=SQLITE_OK ){ - OSTRACE(("FETCH pid=%lu, pFile=%p, rc=%s\n", - osGetCurrentProcessId(), pFd, sqlite3ErrName(rc))); - return rc; - } - } - if( pFd->mmapSize >= iOff+nAmt ){ - *pp = &((u8 *)pFd->pMapRegion)[iOff]; - pFd->nFetchOut++; - } - } -#endif - - OSTRACE(("FETCH pid=%lu, pFile=%p, pp=%p, *pp=%p, rc=SQLITE_OK\n", - osGetCurrentProcessId(), fd, pp, *pp)); - return SQLITE_OK; -} - -/* -** If the third argument is non-NULL, then this function releases a -** reference obtained by an earlier call to winFetch(). The second -** argument passed to this function must be the same as the corresponding -** argument that was passed to the winFetch() invocation. -** -** Or, if the third argument is NULL, then this function is being called -** to inform the VFS layer that, according to POSIX, any existing mapping -** may now be invalid and should be unmapped. -*/ -static int winUnfetch(sqlite3_file *fd, i64 iOff, void *p){ -#if SQLITE_MAX_MMAP_SIZE>0 - winFile *pFd = (winFile*)fd; /* The underlying database file */ - - /* If p==0 (unmap the entire file) then there must be no outstanding - ** xFetch references. Or, if p!=0 (meaning it is an xFetch reference), - ** then there must be at least one outstanding. */ - assert( (p==0)==(pFd->nFetchOut==0) ); - - /* If p!=0, it must match the iOff value. */ - assert( p==0 || p==&((u8 *)pFd->pMapRegion)[iOff] ); - - OSTRACE(("UNFETCH pid=%lu, pFile=%p, offset=%lld, p=%p\n", - osGetCurrentProcessId(), pFd, iOff, p)); - - if( p ){ - pFd->nFetchOut--; - }else{ - /* FIXME: If Windows truly always prevents truncating or deleting a - ** file while a mapping is held, then the following winUnmapfile() call - ** is unnecessary can can be omitted - potentially improving - ** performance. */ - winUnmapfile(pFd); - } - - assert( pFd->nFetchOut>=0 ); -#endif - - OSTRACE(("UNFETCH pid=%lu, pFile=%p, rc=SQLITE_OK\n", - osGetCurrentProcessId(), fd)); - return SQLITE_OK; -} - -/* -** Here ends the implementation of all sqlite3_file methods. -** -********************** End sqlite3_file Methods ******************************* -******************************************************************************/ - -/* -** This vector defines all the methods that can operate on an -** sqlite3_file for win32. -*/ -static const sqlite3_io_methods winIoMethod = { - 3, /* iVersion */ - winClose, /* xClose */ - winRead, /* xRead */ - winWrite, /* xWrite */ - winTruncate, /* xTruncate */ - winSync, /* xSync */ - winFileSize, /* xFileSize */ - winLock, /* xLock */ - winUnlock, /* xUnlock */ - winCheckReservedLock, /* xCheckReservedLock */ - winFileControl, /* xFileControl */ - winSectorSize, /* xSectorSize */ - winDeviceCharacteristics, /* xDeviceCharacteristics */ - winShmMap, /* xShmMap */ - winShmLock, /* xShmLock */ - winShmBarrier, /* xShmBarrier */ - winShmUnmap, /* xShmUnmap */ - winFetch, /* xFetch */ - winUnfetch /* xUnfetch */ -}; - -/**************************************************************************** -**************************** sqlite3_vfs methods **************************** -** -** This division contains the implementation of methods on the -** sqlite3_vfs object. -*/ - -#if defined(__CYGWIN__) -/* -** Convert a filename from whatever the underlying operating system -** supports for filenames into UTF-8. Space to hold the result is -** obtained from malloc and must be freed by the calling function. -*/ -static char *winConvertToUtf8Filename(const void *zFilename){ - char *zConverted = 0; - if( osIsNT() ){ - zConverted = winUnicodeToUtf8(zFilename); - } -#ifdef SQLITE_WIN32_HAS_ANSI - else{ - zConverted = sqlite3_win32_mbcs_to_utf8(zFilename); - } -#endif - /* caller will handle out of memory */ - return zConverted; -} -#endif - -/* -** Convert a UTF-8 filename into whatever form the underlying -** operating system wants filenames in. Space to hold the result -** is obtained from malloc and must be freed by the calling -** function. -*/ -static void *winConvertFromUtf8Filename(const char *zFilename){ - void *zConverted = 0; - if( osIsNT() ){ - zConverted = winUtf8ToUnicode(zFilename); - } -#ifdef SQLITE_WIN32_HAS_ANSI - else{ - zConverted = sqlite3_win32_utf8_to_mbcs(zFilename); - } -#endif - /* caller will handle out of memory */ - return zConverted; -} - -/* -** This function returns non-zero if the specified UTF-8 string buffer -** ends with a directory separator character or one was successfully -** added to it. -*/ -static int winMakeEndInDirSep(int nBuf, char *zBuf){ - if( zBuf ){ - int nLen = sqlite3Strlen30(zBuf); - if( nLen>0 ){ - if( winIsDirSep(zBuf[nLen-1]) ){ - return 1; - }else if( nLen+1mxPathname; nBuf = nMax + 2; - zBuf = sqlite3MallocZero( nBuf ); - if( !zBuf ){ - OSTRACE(("TEMP-FILENAME rc=SQLITE_IOERR_NOMEM\n")); - return SQLITE_IOERR_NOMEM; - } - - /* Figure out the effective temporary directory. First, check if one - ** has been explicitly set by the application; otherwise, use the one - ** configured by the operating system. - */ - nDir = nMax - (nPre + 15); - assert( nDir>0 ); - if( sqlite3_temp_directory ){ - int nDirLen = sqlite3Strlen30(sqlite3_temp_directory); - if( nDirLen>0 ){ - if( !winIsDirSep(sqlite3_temp_directory[nDirLen-1]) ){ - nDirLen++; - } - if( nDirLen>nDir ){ - sqlite3_free(zBuf); - OSTRACE(("TEMP-FILENAME rc=SQLITE_ERROR\n")); - return winLogError(SQLITE_ERROR, 0, "winGetTempname1", 0); - } - sqlite3_snprintf(nMax, zBuf, "%s", sqlite3_temp_directory); - } - } -#if defined(__CYGWIN__) - else{ - static const char *azDirs[] = { - 0, /* getenv("SQLITE_TMPDIR") */ - 0, /* getenv("TMPDIR") */ - 0, /* getenv("TMP") */ - 0, /* getenv("TEMP") */ - 0, /* getenv("USERPROFILE") */ - "/var/tmp", - "/usr/tmp", - "/tmp", - ".", - 0 /* List terminator */ - }; - unsigned int i; - const char *zDir = 0; - - if( !azDirs[0] ) azDirs[0] = getenv("SQLITE_TMPDIR"); - if( !azDirs[1] ) azDirs[1] = getenv("TMPDIR"); - if( !azDirs[2] ) azDirs[2] = getenv("TMP"); - if( !azDirs[3] ) azDirs[3] = getenv("TEMP"); - if( !azDirs[4] ) azDirs[4] = getenv("USERPROFILE"); - for(i=0; i/etilqs_XXXXXXXXXXXXXXX\0\0" - ** - ** If not, return SQLITE_ERROR. The number 17 is used here in order to - ** account for the space used by the 15 character random suffix and the - ** two trailing NUL characters. The final directory separator character - ** has already added if it was not already present. - */ - nLen = sqlite3Strlen30(zBuf); - if( (nLen + nPre + 17) > nBuf ){ - sqlite3_free(zBuf); - OSTRACE(("TEMP-FILENAME rc=SQLITE_ERROR\n")); - return winLogError(SQLITE_ERROR, 0, "winGetTempname5", 0); - } - - sqlite3_snprintf(nBuf-16-nLen, zBuf+nLen, SQLITE_TEMP_FILE_PREFIX); - - j = sqlite3Strlen30(zBuf); - sqlite3_randomness(15, &zBuf[j]); - for(i=0; i<15; i++, j++){ - zBuf[j] = (char)zChars[ ((unsigned char)zBuf[j])%(sizeof(zChars)-1) ]; - } - zBuf[j] = 0; - zBuf[j+1] = 0; - *pzBuf = zBuf; - - OSTRACE(("TEMP-FILENAME name=%s, rc=SQLITE_OK\n", zBuf)); - return SQLITE_OK; -} - -/* -** Return TRUE if the named file is really a directory. Return false if -** it is something other than a directory, or if there is any kind of memory -** allocation failure. -*/ -static int winIsDir(const void *zConverted){ - DWORD attr; - int rc = 0; - DWORD lastErrno; - - if( osIsNT() ){ - int cnt = 0; - WIN32_FILE_ATTRIBUTE_DATA sAttrData; - memset(&sAttrData, 0, sizeof(sAttrData)); - while( !(rc = osGetFileAttributesExW((LPCWSTR)zConverted, - GetFileExInfoStandard, - &sAttrData)) && winRetryIoerr(&cnt, &lastErrno) ){} - if( !rc ){ - return 0; /* Invalid name? */ - } - attr = sAttrData.dwFileAttributes; -#if SQLITE_OS_WINCE==0 - }else{ - attr = osGetFileAttributesA((char*)zConverted); -#endif - } - return (attr!=INVALID_FILE_ATTRIBUTES) && (attr&FILE_ATTRIBUTE_DIRECTORY); -} - -/* -** Open a file. -*/ -static int winOpen( - sqlite3_vfs *pVfs, /* Used to get maximum path name length */ - const char *zName, /* Name of the file (UTF-8) */ - sqlite3_file *id, /* Write the SQLite file handle here */ - int flags, /* Open mode flags */ - int *pOutFlags /* Status return flags */ -){ - HANDLE h; - DWORD lastErrno = 0; - DWORD dwDesiredAccess; - DWORD dwShareMode; - DWORD dwCreationDisposition; - DWORD dwFlagsAndAttributes = 0; -#if SQLITE_OS_WINCE - int isTemp = 0; -#endif - winFile *pFile = (winFile*)id; - void *zConverted; /* Filename in OS encoding */ - const char *zUtf8Name = zName; /* Filename in UTF-8 encoding */ - int cnt = 0; - - /* If argument zPath is a NULL pointer, this function is required to open - ** a temporary file. Use this buffer to store the file name in. - */ - char *zTmpname = 0; /* For temporary filename, if necessary. */ - - int rc = SQLITE_OK; /* Function Return Code */ -#if !defined(NDEBUG) || SQLITE_OS_WINCE - int eType = flags&0xFFFFFF00; /* Type of file to open */ -#endif - - int isExclusive = (flags & SQLITE_OPEN_EXCLUSIVE); - int isDelete = (flags & SQLITE_OPEN_DELETEONCLOSE); - int isCreate = (flags & SQLITE_OPEN_CREATE); - int isReadonly = (flags & SQLITE_OPEN_READONLY); - int isReadWrite = (flags & SQLITE_OPEN_READWRITE); - -#ifndef NDEBUG - int isOpenJournal = (isCreate && ( - eType==SQLITE_OPEN_MASTER_JOURNAL - || eType==SQLITE_OPEN_MAIN_JOURNAL - || eType==SQLITE_OPEN_WAL - )); -#endif - - OSTRACE(("OPEN name=%s, pFile=%p, flags=%x, pOutFlags=%p\n", - zUtf8Name, id, flags, pOutFlags)); - - /* Check the following statements are true: - ** - ** (a) Exactly one of the READWRITE and READONLY flags must be set, and - ** (b) if CREATE is set, then READWRITE must also be set, and - ** (c) if EXCLUSIVE is set, then CREATE must also be set. - ** (d) if DELETEONCLOSE is set, then CREATE must also be set. - */ - assert((isReadonly==0 || isReadWrite==0) && (isReadWrite || isReadonly)); - assert(isCreate==0 || isReadWrite); - assert(isExclusive==0 || isCreate); - assert(isDelete==0 || isCreate); - - /* The main DB, main journal, WAL file and master journal are never - ** automatically deleted. Nor are they ever temporary files. */ - assert( (!isDelete && zName) || eType!=SQLITE_OPEN_MAIN_DB ); - assert( (!isDelete && zName) || eType!=SQLITE_OPEN_MAIN_JOURNAL ); - assert( (!isDelete && zName) || eType!=SQLITE_OPEN_MASTER_JOURNAL ); - assert( (!isDelete && zName) || eType!=SQLITE_OPEN_WAL ); - - /* Assert that the upper layer has set one of the "file-type" flags. */ - assert( eType==SQLITE_OPEN_MAIN_DB || eType==SQLITE_OPEN_TEMP_DB - || eType==SQLITE_OPEN_MAIN_JOURNAL || eType==SQLITE_OPEN_TEMP_JOURNAL - || eType==SQLITE_OPEN_SUBJOURNAL || eType==SQLITE_OPEN_MASTER_JOURNAL - || eType==SQLITE_OPEN_TRANSIENT_DB || eType==SQLITE_OPEN_WAL - ); - - assert( pFile!=0 ); - memset(pFile, 0, sizeof(winFile)); - pFile->h = INVALID_HANDLE_VALUE; - -#if SQLITE_OS_WINRT - if( !zUtf8Name && !sqlite3_temp_directory ){ - sqlite3_log(SQLITE_ERROR, - "sqlite3_temp_directory variable should be set for WinRT"); - } -#endif - - /* If the second argument to this function is NULL, generate a - ** temporary file name to use - */ - if( !zUtf8Name ){ - assert( isDelete && !isOpenJournal ); - rc = winGetTempname(pVfs, &zTmpname); - if( rc!=SQLITE_OK ){ - OSTRACE(("OPEN name=%s, rc=%s", zUtf8Name, sqlite3ErrName(rc))); - return rc; - } - zUtf8Name = zTmpname; - } - - /* Database filenames are double-zero terminated if they are not - ** URIs with parameters. Hence, they can always be passed into - ** sqlite3_uri_parameter(). - */ - assert( (eType!=SQLITE_OPEN_MAIN_DB) || (flags & SQLITE_OPEN_URI) || - zUtf8Name[sqlite3Strlen30(zUtf8Name)+1]==0 ); - - /* Convert the filename to the system encoding. */ - zConverted = winConvertFromUtf8Filename(zUtf8Name); - if( zConverted==0 ){ - sqlite3_free(zTmpname); - OSTRACE(("OPEN name=%s, rc=SQLITE_IOERR_NOMEM", zUtf8Name)); - return SQLITE_IOERR_NOMEM; - } - - if( winIsDir(zConverted) ){ - sqlite3_free(zConverted); - sqlite3_free(zTmpname); - OSTRACE(("OPEN name=%s, rc=SQLITE_CANTOPEN_ISDIR", zUtf8Name)); - return SQLITE_CANTOPEN_ISDIR; - } - - if( isReadWrite ){ - dwDesiredAccess = GENERIC_READ | GENERIC_WRITE; - }else{ - dwDesiredAccess = GENERIC_READ; - } - - /* SQLITE_OPEN_EXCLUSIVE is used to make sure that a new file is - ** created. SQLite doesn't use it to indicate "exclusive access" - ** as it is usually understood. - */ - if( isExclusive ){ - /* Creates a new file, only if it does not already exist. */ - /* If the file exists, it fails. */ - dwCreationDisposition = CREATE_NEW; - }else if( isCreate ){ - /* Open existing file, or create if it doesn't exist */ - dwCreationDisposition = OPEN_ALWAYS; - }else{ - /* Opens a file, only if it exists. */ - dwCreationDisposition = OPEN_EXISTING; - } - - dwShareMode = FILE_SHARE_READ | FILE_SHARE_WRITE; - - if( isDelete ){ -#if SQLITE_OS_WINCE - dwFlagsAndAttributes = FILE_ATTRIBUTE_HIDDEN; - isTemp = 1; -#else - dwFlagsAndAttributes = FILE_ATTRIBUTE_TEMPORARY - | FILE_ATTRIBUTE_HIDDEN - | FILE_FLAG_DELETE_ON_CLOSE; -#endif - }else{ - dwFlagsAndAttributes = FILE_ATTRIBUTE_NORMAL; - } - /* Reports from the internet are that performance is always - ** better if FILE_FLAG_RANDOM_ACCESS is used. Ticket #2699. */ -#if SQLITE_OS_WINCE - dwFlagsAndAttributes |= FILE_FLAG_RANDOM_ACCESS; -#endif - - if( osIsNT() ){ -#if SQLITE_OS_WINRT - CREATEFILE2_EXTENDED_PARAMETERS extendedParameters; - extendedParameters.dwSize = sizeof(CREATEFILE2_EXTENDED_PARAMETERS); - extendedParameters.dwFileAttributes = - dwFlagsAndAttributes & FILE_ATTRIBUTE_MASK; - extendedParameters.dwFileFlags = dwFlagsAndAttributes & FILE_FLAG_MASK; - extendedParameters.dwSecurityQosFlags = SECURITY_ANONYMOUS; - extendedParameters.lpSecurityAttributes = NULL; - extendedParameters.hTemplateFile = NULL; - while( (h = osCreateFile2((LPCWSTR)zConverted, - dwDesiredAccess, - dwShareMode, - dwCreationDisposition, - &extendedParameters))==INVALID_HANDLE_VALUE && - winRetryIoerr(&cnt, &lastErrno) ){ - /* Noop */ - } -#else - while( (h = osCreateFileW((LPCWSTR)zConverted, - dwDesiredAccess, - dwShareMode, NULL, - dwCreationDisposition, - dwFlagsAndAttributes, - NULL))==INVALID_HANDLE_VALUE && - winRetryIoerr(&cnt, &lastErrno) ){ - /* Noop */ - } -#endif - } -#ifdef SQLITE_WIN32_HAS_ANSI - else{ - while( (h = osCreateFileA((LPCSTR)zConverted, - dwDesiredAccess, - dwShareMode, NULL, - dwCreationDisposition, - dwFlagsAndAttributes, - NULL))==INVALID_HANDLE_VALUE && - winRetryIoerr(&cnt, &lastErrno) ){ - /* Noop */ - } - } -#endif - winLogIoerr(cnt); - - OSTRACE(("OPEN file=%p, name=%s, access=%lx, rc=%s\n", h, zUtf8Name, - dwDesiredAccess, (h==INVALID_HANDLE_VALUE) ? "failed" : "ok")); - - if( h==INVALID_HANDLE_VALUE ){ - pFile->lastErrno = lastErrno; - winLogError(SQLITE_CANTOPEN, pFile->lastErrno, "winOpen", zUtf8Name); - sqlite3_free(zConverted); - sqlite3_free(zTmpname); - if( isReadWrite && !isExclusive ){ - return winOpen(pVfs, zName, id, - ((flags|SQLITE_OPEN_READONLY) & - ~(SQLITE_OPEN_CREATE|SQLITE_OPEN_READWRITE)), - pOutFlags); - }else{ - return SQLITE_CANTOPEN_BKPT; - } - } - - if( pOutFlags ){ - if( isReadWrite ){ - *pOutFlags = SQLITE_OPEN_READWRITE; - }else{ - *pOutFlags = SQLITE_OPEN_READONLY; - } - } - - OSTRACE(("OPEN file=%p, name=%s, access=%lx, pOutFlags=%p, *pOutFlags=%d, " - "rc=%s\n", h, zUtf8Name, dwDesiredAccess, pOutFlags, pOutFlags ? - *pOutFlags : 0, (h==INVALID_HANDLE_VALUE) ? "failed" : "ok")); - -#if SQLITE_OS_WINCE - if( isReadWrite && eType==SQLITE_OPEN_MAIN_DB - && (rc = winceCreateLock(zName, pFile))!=SQLITE_OK - ){ - osCloseHandle(h); - sqlite3_free(zConverted); - sqlite3_free(zTmpname); - OSTRACE(("OPEN-CE-LOCK name=%s, rc=%s\n", zName, sqlite3ErrName(rc))); - return rc; - } - if( isTemp ){ - pFile->zDeleteOnClose = zConverted; - }else -#endif - { - sqlite3_free(zConverted); - } - - sqlite3_free(zTmpname); - pFile->pMethod = &winIoMethod; - pFile->pVfs = pVfs; - pFile->h = h; - if( isReadonly ){ - pFile->ctrlFlags |= WINFILE_RDONLY; - } - if( sqlite3_uri_boolean(zName, "psow", SQLITE_POWERSAFE_OVERWRITE) ){ - pFile->ctrlFlags |= WINFILE_PSOW; - } - pFile->lastErrno = NO_ERROR; - pFile->zPath = zName; -#if SQLITE_MAX_MMAP_SIZE>0 - pFile->hMap = NULL; - pFile->pMapRegion = 0; - pFile->mmapSize = 0; - pFile->mmapSizeActual = 0; - pFile->mmapSizeMax = sqlite3GlobalConfig.szMmap; -#endif - - OpenCounter(+1); - return rc; -} - -/* -** Delete the named file. -** -** Note that Windows does not allow a file to be deleted if some other -** process has it open. Sometimes a virus scanner or indexing program -** will open a journal file shortly after it is created in order to do -** whatever it does. While this other process is holding the -** file open, we will be unable to delete it. To work around this -** problem, we delay 100 milliseconds and try to delete again. Up -** to MX_DELETION_ATTEMPTs deletion attempts are run before giving -** up and returning an error. -*/ -static int winDelete( - sqlite3_vfs *pVfs, /* Not used on win32 */ - const char *zFilename, /* Name of file to delete */ - int syncDir /* Not used on win32 */ -){ - int cnt = 0; - int rc; - DWORD attr; - DWORD lastErrno = 0; - void *zConverted; - UNUSED_PARAMETER(pVfs); - UNUSED_PARAMETER(syncDir); - - SimulateIOError(return SQLITE_IOERR_DELETE); - OSTRACE(("DELETE name=%s, syncDir=%d\n", zFilename, syncDir)); - - zConverted = winConvertFromUtf8Filename(zFilename); - if( zConverted==0 ){ - OSTRACE(("DELETE name=%s, rc=SQLITE_IOERR_NOMEM\n", zFilename)); - return SQLITE_IOERR_NOMEM; - } - if( osIsNT() ){ - do { -#if SQLITE_OS_WINRT - WIN32_FILE_ATTRIBUTE_DATA sAttrData; - memset(&sAttrData, 0, sizeof(sAttrData)); - if ( osGetFileAttributesExW(zConverted, GetFileExInfoStandard, - &sAttrData) ){ - attr = sAttrData.dwFileAttributes; - }else{ - lastErrno = osGetLastError(); - if( lastErrno==ERROR_FILE_NOT_FOUND - || lastErrno==ERROR_PATH_NOT_FOUND ){ - rc = SQLITE_IOERR_DELETE_NOENT; /* Already gone? */ - }else{ - rc = SQLITE_ERROR; - } - break; - } -#else - attr = osGetFileAttributesW(zConverted); -#endif - if ( attr==INVALID_FILE_ATTRIBUTES ){ - lastErrno = osGetLastError(); - if( lastErrno==ERROR_FILE_NOT_FOUND - || lastErrno==ERROR_PATH_NOT_FOUND ){ - rc = SQLITE_IOERR_DELETE_NOENT; /* Already gone? */ - }else{ - rc = SQLITE_ERROR; - } - break; - } - if ( attr&FILE_ATTRIBUTE_DIRECTORY ){ - rc = SQLITE_ERROR; /* Files only. */ - break; - } - if ( osDeleteFileW(zConverted) ){ - rc = SQLITE_OK; /* Deleted OK. */ - break; - } - if ( !winRetryIoerr(&cnt, &lastErrno) ){ - rc = SQLITE_ERROR; /* No more retries. */ - break; - } - } while(1); - } -#ifdef SQLITE_WIN32_HAS_ANSI - else{ - do { - attr = osGetFileAttributesA(zConverted); - if ( attr==INVALID_FILE_ATTRIBUTES ){ - lastErrno = osGetLastError(); - if( lastErrno==ERROR_FILE_NOT_FOUND - || lastErrno==ERROR_PATH_NOT_FOUND ){ - rc = SQLITE_IOERR_DELETE_NOENT; /* Already gone? */ - }else{ - rc = SQLITE_ERROR; - } - break; - } - if ( attr&FILE_ATTRIBUTE_DIRECTORY ){ - rc = SQLITE_ERROR; /* Files only. */ - break; - } - if ( osDeleteFileA(zConverted) ){ - rc = SQLITE_OK; /* Deleted OK. */ - break; - } - if ( !winRetryIoerr(&cnt, &lastErrno) ){ - rc = SQLITE_ERROR; /* No more retries. */ - break; - } - } while(1); - } -#endif - if( rc && rc!=SQLITE_IOERR_DELETE_NOENT ){ - rc = winLogError(SQLITE_IOERR_DELETE, lastErrno, "winDelete", zFilename); - }else{ - winLogIoerr(cnt); - } - sqlite3_free(zConverted); - OSTRACE(("DELETE name=%s, rc=%s\n", zFilename, sqlite3ErrName(rc))); - return rc; -} - -/* -** Check the existence and status of a file. -*/ -static int winAccess( - sqlite3_vfs *pVfs, /* Not used on win32 */ - const char *zFilename, /* Name of file to check */ - int flags, /* Type of test to make on this file */ - int *pResOut /* OUT: Result */ -){ - DWORD attr; - int rc = 0; - DWORD lastErrno = 0; - void *zConverted; - UNUSED_PARAMETER(pVfs); - - SimulateIOError( return SQLITE_IOERR_ACCESS; ); - OSTRACE(("ACCESS name=%s, flags=%x, pResOut=%p\n", - zFilename, flags, pResOut)); - - zConverted = winConvertFromUtf8Filename(zFilename); - if( zConverted==0 ){ - OSTRACE(("ACCESS name=%s, rc=SQLITE_IOERR_NOMEM\n", zFilename)); - return SQLITE_IOERR_NOMEM; - } - if( osIsNT() ){ - int cnt = 0; - WIN32_FILE_ATTRIBUTE_DATA sAttrData; - memset(&sAttrData, 0, sizeof(sAttrData)); - while( !(rc = osGetFileAttributesExW((LPCWSTR)zConverted, - GetFileExInfoStandard, - &sAttrData)) && winRetryIoerr(&cnt, &lastErrno) ){} - if( rc ){ - /* For an SQLITE_ACCESS_EXISTS query, treat a zero-length file - ** as if it does not exist. - */ - if( flags==SQLITE_ACCESS_EXISTS - && sAttrData.nFileSizeHigh==0 - && sAttrData.nFileSizeLow==0 ){ - attr = INVALID_FILE_ATTRIBUTES; - }else{ - attr = sAttrData.dwFileAttributes; - } - }else{ - winLogIoerr(cnt); - if( lastErrno!=ERROR_FILE_NOT_FOUND && lastErrno!=ERROR_PATH_NOT_FOUND ){ - sqlite3_free(zConverted); - return winLogError(SQLITE_IOERR_ACCESS, lastErrno, "winAccess", - zFilename); - }else{ - attr = INVALID_FILE_ATTRIBUTES; - } - } - } -#ifdef SQLITE_WIN32_HAS_ANSI - else{ - attr = osGetFileAttributesA((char*)zConverted); - } -#endif - sqlite3_free(zConverted); - switch( flags ){ - case SQLITE_ACCESS_READ: - case SQLITE_ACCESS_EXISTS: - rc = attr!=INVALID_FILE_ATTRIBUTES; - break; - case SQLITE_ACCESS_READWRITE: - rc = attr!=INVALID_FILE_ATTRIBUTES && - (attr & FILE_ATTRIBUTE_READONLY)==0; - break; - default: - assert(!"Invalid flags argument"); - } - *pResOut = rc; - OSTRACE(("ACCESS name=%s, pResOut=%p, *pResOut=%d, rc=SQLITE_OK\n", - zFilename, pResOut, *pResOut)); - return SQLITE_OK; -} - -/* -** Returns non-zero if the specified path name starts with a drive letter -** followed by a colon character. -*/ -static BOOL winIsDriveLetterAndColon( - const char *zPathname -){ - return ( sqlite3Isalpha(zPathname[0]) && zPathname[1]==':' ); -} - -/* -** Returns non-zero if the specified path name should be used verbatim. If -** non-zero is returned from this function, the calling function must simply -** use the provided path name verbatim -OR- resolve it into a full path name -** using the GetFullPathName Win32 API function (if available). -*/ -static BOOL winIsVerbatimPathname( - const char *zPathname -){ - /* - ** If the path name starts with a forward slash or a backslash, it is either - ** a legal UNC name, a volume relative path, or an absolute path name in the - ** "Unix" format on Windows. There is no easy way to differentiate between - ** the final two cases; therefore, we return the safer return value of TRUE - ** so that callers of this function will simply use it verbatim. - */ - if ( winIsDirSep(zPathname[0]) ){ - return TRUE; - } - - /* - ** If the path name starts with a letter and a colon it is either a volume - ** relative path or an absolute path. Callers of this function must not - ** attempt to treat it as a relative path name (i.e. they should simply use - ** it verbatim). - */ - if ( winIsDriveLetterAndColon(zPathname) ){ - return TRUE; - } - - /* - ** If we get to this point, the path name should almost certainly be a purely - ** relative one (i.e. not a UNC name, not absolute, and not volume relative). - */ - return FALSE; -} - -/* -** Turn a relative pathname into a full pathname. Write the full -** pathname into zOut[]. zOut[] will be at least pVfs->mxPathname -** bytes in size. -*/ -static int winFullPathname( - sqlite3_vfs *pVfs, /* Pointer to vfs object */ - const char *zRelative, /* Possibly relative input path */ - int nFull, /* Size of output buffer in bytes */ - char *zFull /* Output buffer */ -){ - -#if defined(__CYGWIN__) - SimulateIOError( return SQLITE_ERROR ); - UNUSED_PARAMETER(nFull); - assert( nFull>=pVfs->mxPathname ); - if ( sqlite3_data_directory && !winIsVerbatimPathname(zRelative) ){ - /* - ** NOTE: We are dealing with a relative path name and the data - ** directory has been set. Therefore, use it as the basis - ** for converting the relative path name to an absolute - ** one by prepending the data directory and a slash. - */ - char *zOut = sqlite3MallocZero( pVfs->mxPathname+1 ); - if( !zOut ){ - return SQLITE_IOERR_NOMEM; - } - if( cygwin_conv_path( - (osIsNT() ? CCP_POSIX_TO_WIN_W : CCP_POSIX_TO_WIN_A) | - CCP_RELATIVE, zRelative, zOut, pVfs->mxPathname+1)<0 ){ - sqlite3_free(zOut); - return winLogError(SQLITE_CANTOPEN_CONVPATH, (DWORD)errno, - "winFullPathname1", zRelative); - }else{ - char *zUtf8 = winConvertToUtf8Filename(zOut); - if( !zUtf8 ){ - sqlite3_free(zOut); - return SQLITE_IOERR_NOMEM; - } - sqlite3_snprintf(MIN(nFull, pVfs->mxPathname), zFull, "%s%c%s", - sqlite3_data_directory, winGetDirSep(), zUtf8); - sqlite3_free(zUtf8); - sqlite3_free(zOut); - } - }else{ - char *zOut = sqlite3MallocZero( pVfs->mxPathname+1 ); - if( !zOut ){ - return SQLITE_IOERR_NOMEM; - } - if( cygwin_conv_path( - (osIsNT() ? CCP_POSIX_TO_WIN_W : CCP_POSIX_TO_WIN_A), - zRelative, zOut, pVfs->mxPathname+1)<0 ){ - sqlite3_free(zOut); - return winLogError(SQLITE_CANTOPEN_CONVPATH, (DWORD)errno, - "winFullPathname2", zRelative); - }else{ - char *zUtf8 = winConvertToUtf8Filename(zOut); - if( !zUtf8 ){ - sqlite3_free(zOut); - return SQLITE_IOERR_NOMEM; - } - sqlite3_snprintf(MIN(nFull, pVfs->mxPathname), zFull, "%s", zUtf8); - sqlite3_free(zUtf8); - sqlite3_free(zOut); - } - } - return SQLITE_OK; -#endif - -#if (SQLITE_OS_WINCE || SQLITE_OS_WINRT) && !defined(__CYGWIN__) - SimulateIOError( return SQLITE_ERROR ); - /* WinCE has no concept of a relative pathname, or so I am told. */ - /* WinRT has no way to convert a relative path to an absolute one. */ - if ( sqlite3_data_directory && !winIsVerbatimPathname(zRelative) ){ - /* - ** NOTE: We are dealing with a relative path name and the data - ** directory has been set. Therefore, use it as the basis - ** for converting the relative path name to an absolute - ** one by prepending the data directory and a backslash. - */ - sqlite3_snprintf(MIN(nFull, pVfs->mxPathname), zFull, "%s%c%s", - sqlite3_data_directory, winGetDirSep(), zRelative); - }else{ - sqlite3_snprintf(MIN(nFull, pVfs->mxPathname), zFull, "%s", zRelative); - } - return SQLITE_OK; -#endif - -#if !SQLITE_OS_WINCE && !SQLITE_OS_WINRT && !defined(__CYGWIN__) - DWORD nByte; - void *zConverted; - char *zOut; - - /* If this path name begins with "/X:", where "X" is any alphabetic - ** character, discard the initial "/" from the pathname. - */ - if( zRelative[0]=='/' && winIsDriveLetterAndColon(zRelative+1) ){ - zRelative++; - } - - /* It's odd to simulate an io-error here, but really this is just - ** using the io-error infrastructure to test that SQLite handles this - ** function failing. This function could fail if, for example, the - ** current working directory has been unlinked. - */ - SimulateIOError( return SQLITE_ERROR ); - if ( sqlite3_data_directory && !winIsVerbatimPathname(zRelative) ){ - /* - ** NOTE: We are dealing with a relative path name and the data - ** directory has been set. Therefore, use it as the basis - ** for converting the relative path name to an absolute - ** one by prepending the data directory and a backslash. - */ - sqlite3_snprintf(MIN(nFull, pVfs->mxPathname), zFull, "%s%c%s", - sqlite3_data_directory, winGetDirSep(), zRelative); - return SQLITE_OK; - } - zConverted = winConvertFromUtf8Filename(zRelative); - if( zConverted==0 ){ - return SQLITE_IOERR_NOMEM; - } - if( osIsNT() ){ - LPWSTR zTemp; - nByte = osGetFullPathNameW((LPCWSTR)zConverted, 0, 0, 0); - if( nByte==0 ){ - sqlite3_free(zConverted); - return winLogError(SQLITE_CANTOPEN_FULLPATH, osGetLastError(), - "winFullPathname1", zRelative); - } - nByte += 3; - zTemp = sqlite3MallocZero( nByte*sizeof(zTemp[0]) ); - if( zTemp==0 ){ - sqlite3_free(zConverted); - return SQLITE_IOERR_NOMEM; - } - nByte = osGetFullPathNameW((LPCWSTR)zConverted, nByte, zTemp, 0); - if( nByte==0 ){ - sqlite3_free(zConverted); - sqlite3_free(zTemp); - return winLogError(SQLITE_CANTOPEN_FULLPATH, osGetLastError(), - "winFullPathname2", zRelative); - } - sqlite3_free(zConverted); - zOut = winUnicodeToUtf8(zTemp); - sqlite3_free(zTemp); - } -#ifdef SQLITE_WIN32_HAS_ANSI - else{ - char *zTemp; - nByte = osGetFullPathNameA((char*)zConverted, 0, 0, 0); - if( nByte==0 ){ - sqlite3_free(zConverted); - return winLogError(SQLITE_CANTOPEN_FULLPATH, osGetLastError(), - "winFullPathname3", zRelative); - } - nByte += 3; - zTemp = sqlite3MallocZero( nByte*sizeof(zTemp[0]) ); - if( zTemp==0 ){ - sqlite3_free(zConverted); - return SQLITE_IOERR_NOMEM; - } - nByte = osGetFullPathNameA((char*)zConverted, nByte, zTemp, 0); - if( nByte==0 ){ - sqlite3_free(zConverted); - sqlite3_free(zTemp); - return winLogError(SQLITE_CANTOPEN_FULLPATH, osGetLastError(), - "winFullPathname4", zRelative); - } - sqlite3_free(zConverted); - zOut = sqlite3_win32_mbcs_to_utf8(zTemp); - sqlite3_free(zTemp); - } -#endif - if( zOut ){ - sqlite3_snprintf(MIN(nFull, pVfs->mxPathname), zFull, "%s", zOut); - sqlite3_free(zOut); - return SQLITE_OK; - }else{ - return SQLITE_IOERR_NOMEM; - } -#endif -} - -#ifndef SQLITE_OMIT_LOAD_EXTENSION -/* -** Interfaces for opening a shared library, finding entry points -** within the shared library, and closing the shared library. -*/ -static void *winDlOpen(sqlite3_vfs *pVfs, const char *zFilename){ - HANDLE h; -#if defined(__CYGWIN__) - int nFull = pVfs->mxPathname+1; - char *zFull = sqlite3MallocZero( nFull ); - void *zConverted = 0; - if( zFull==0 ){ - OSTRACE(("DLOPEN name=%s, handle=%p\n", zFilename, (void*)0)); - return 0; - } - if( winFullPathname(pVfs, zFilename, nFull, zFull)!=SQLITE_OK ){ - sqlite3_free(zFull); - OSTRACE(("DLOPEN name=%s, handle=%p\n", zFilename, (void*)0)); - return 0; - } - zConverted = winConvertFromUtf8Filename(zFull); - sqlite3_free(zFull); -#else - void *zConverted = winConvertFromUtf8Filename(zFilename); - UNUSED_PARAMETER(pVfs); -#endif - if( zConverted==0 ){ - OSTRACE(("DLOPEN name=%s, handle=%p\n", zFilename, (void*)0)); - return 0; - } - if( osIsNT() ){ -#if SQLITE_OS_WINRT - h = osLoadPackagedLibrary((LPCWSTR)zConverted, 0); -#else - h = osLoadLibraryW((LPCWSTR)zConverted); -#endif - } -#ifdef SQLITE_WIN32_HAS_ANSI - else{ - h = osLoadLibraryA((char*)zConverted); - } -#endif - OSTRACE(("DLOPEN name=%s, handle=%p\n", zFilename, (void*)h)); - sqlite3_free(zConverted); - return (void*)h; -} -static void winDlError(sqlite3_vfs *pVfs, int nBuf, char *zBufOut){ - UNUSED_PARAMETER(pVfs); - winGetLastErrorMsg(osGetLastError(), nBuf, zBufOut); -} -static void (*winDlSym(sqlite3_vfs *pVfs,void *pH,const char *zSym))(void){ - FARPROC proc; - UNUSED_PARAMETER(pVfs); - proc = osGetProcAddressA((HANDLE)pH, zSym); - OSTRACE(("DLSYM handle=%p, symbol=%s, address=%p\n", - (void*)pH, zSym, (void*)proc)); - return (void(*)(void))proc; -} -static void winDlClose(sqlite3_vfs *pVfs, void *pHandle){ - UNUSED_PARAMETER(pVfs); - osFreeLibrary((HANDLE)pHandle); - OSTRACE(("DLCLOSE handle=%p\n", (void*)pHandle)); -} -#else /* if SQLITE_OMIT_LOAD_EXTENSION is defined: */ - #define winDlOpen 0 - #define winDlError 0 - #define winDlSym 0 - #define winDlClose 0 -#endif - - -/* -** Write up to nBuf bytes of randomness into zBuf. -*/ -static int winRandomness(sqlite3_vfs *pVfs, int nBuf, char *zBuf){ - int n = 0; - UNUSED_PARAMETER(pVfs); -#if defined(SQLITE_TEST) - n = nBuf; - memset(zBuf, 0, nBuf); -#else - if( sizeof(SYSTEMTIME)<=nBuf-n ){ - SYSTEMTIME x; - osGetSystemTime(&x); - memcpy(&zBuf[n], &x, sizeof(x)); - n += sizeof(x); - } - if( sizeof(DWORD)<=nBuf-n ){ - DWORD pid = osGetCurrentProcessId(); - memcpy(&zBuf[n], &pid, sizeof(pid)); - n += sizeof(pid); - } -#if SQLITE_OS_WINRT - if( sizeof(ULONGLONG)<=nBuf-n ){ - ULONGLONG cnt = osGetTickCount64(); - memcpy(&zBuf[n], &cnt, sizeof(cnt)); - n += sizeof(cnt); - } -#else - if( sizeof(DWORD)<=nBuf-n ){ - DWORD cnt = osGetTickCount(); - memcpy(&zBuf[n], &cnt, sizeof(cnt)); - n += sizeof(cnt); - } -#endif - if( sizeof(LARGE_INTEGER)<=nBuf-n ){ - LARGE_INTEGER i; - osQueryPerformanceCounter(&i); - memcpy(&zBuf[n], &i, sizeof(i)); - n += sizeof(i); - } -#endif - return n; -} - - -/* -** Sleep for a little while. Return the amount of time slept. -*/ -static int winSleep(sqlite3_vfs *pVfs, int microsec){ - sqlite3_win32_sleep((microsec+999)/1000); - UNUSED_PARAMETER(pVfs); - return ((microsec+999)/1000)*1000; -} - -/* -** The following variable, if set to a non-zero value, is interpreted as -** the number of seconds since 1970 and is used to set the result of -** sqlite3OsCurrentTime() during testing. -*/ -#ifdef SQLITE_TEST -SQLITE_API int sqlite3_current_time = 0; /* Fake system time in seconds since 1970. */ -#endif - -/* -** Find the current time (in Universal Coordinated Time). Write into *piNow -** the current time and date as a Julian Day number times 86_400_000. In -** other words, write into *piNow the number of milliseconds since the Julian -** epoch of noon in Greenwich on November 24, 4714 B.C according to the -** proleptic Gregorian calendar. -** -** On success, return SQLITE_OK. Return SQLITE_ERROR if the time and date -** cannot be found. -*/ -static int winCurrentTimeInt64(sqlite3_vfs *pVfs, sqlite3_int64 *piNow){ - /* FILETIME structure is a 64-bit value representing the number of - 100-nanosecond intervals since January 1, 1601 (= JD 2305813.5). - */ - FILETIME ft; - static const sqlite3_int64 winFiletimeEpoch = 23058135*(sqlite3_int64)8640000; -#ifdef SQLITE_TEST - static const sqlite3_int64 unixEpoch = 24405875*(sqlite3_int64)8640000; -#endif - /* 2^32 - to avoid use of LL and warnings in gcc */ - static const sqlite3_int64 max32BitValue = - (sqlite3_int64)2000000000 + (sqlite3_int64)2000000000 + - (sqlite3_int64)294967296; - -#if SQLITE_OS_WINCE - SYSTEMTIME time; - osGetSystemTime(&time); - /* if SystemTimeToFileTime() fails, it returns zero. */ - if (!osSystemTimeToFileTime(&time,&ft)){ - return SQLITE_ERROR; - } -#else - osGetSystemTimeAsFileTime( &ft ); -#endif - - *piNow = winFiletimeEpoch + - ((((sqlite3_int64)ft.dwHighDateTime)*max32BitValue) + - (sqlite3_int64)ft.dwLowDateTime)/(sqlite3_int64)10000; - -#ifdef SQLITE_TEST - if( sqlite3_current_time ){ - *piNow = 1000*(sqlite3_int64)sqlite3_current_time + unixEpoch; - } -#endif - UNUSED_PARAMETER(pVfs); - return SQLITE_OK; -} - -/* -** Find the current time (in Universal Coordinated Time). Write the -** current time and date as a Julian Day number into *prNow and -** return 0. Return 1 if the time and date cannot be found. -*/ -static int winCurrentTime(sqlite3_vfs *pVfs, double *prNow){ - int rc; - sqlite3_int64 i; - rc = winCurrentTimeInt64(pVfs, &i); - if( !rc ){ - *prNow = i/86400000.0; - } - return rc; -} - -/* -** The idea is that this function works like a combination of -** GetLastError() and FormatMessage() on Windows (or errno and -** strerror_r() on Unix). After an error is returned by an OS -** function, SQLite calls this function with zBuf pointing to -** a buffer of nBuf bytes. The OS layer should populate the -** buffer with a nul-terminated UTF-8 encoded error message -** describing the last IO error to have occurred within the calling -** thread. -** -** If the error message is too large for the supplied buffer, -** it should be truncated. The return value of xGetLastError -** is zero if the error message fits in the buffer, or non-zero -** otherwise (if the message was truncated). If non-zero is returned, -** then it is not necessary to include the nul-terminator character -** in the output buffer. -** -** Not supplying an error message will have no adverse effect -** on SQLite. It is fine to have an implementation that never -** returns an error message: -** -** int xGetLastError(sqlite3_vfs *pVfs, int nBuf, char *zBuf){ -** assert(zBuf[0]=='\0'); -** return 0; -** } -** -** However if an error message is supplied, it will be incorporated -** by sqlite into the error message available to the user using -** sqlite3_errmsg(), possibly making IO errors easier to debug. -*/ -static int winGetLastError(sqlite3_vfs *pVfs, int nBuf, char *zBuf){ - UNUSED_PARAMETER(pVfs); - return winGetLastErrorMsg(osGetLastError(), nBuf, zBuf); -} - -/* -** Initialize and deinitialize the operating system interface. -*/ -SQLITE_API int sqlite3_os_init(void){ - static sqlite3_vfs winVfs = { - 3, /* iVersion */ - sizeof(winFile), /* szOsFile */ - SQLITE_WIN32_MAX_PATH_BYTES, /* mxPathname */ - 0, /* pNext */ - "win32", /* zName */ - 0, /* pAppData */ - winOpen, /* xOpen */ - winDelete, /* xDelete */ - winAccess, /* xAccess */ - winFullPathname, /* xFullPathname */ - winDlOpen, /* xDlOpen */ - winDlError, /* xDlError */ - winDlSym, /* xDlSym */ - winDlClose, /* xDlClose */ - winRandomness, /* xRandomness */ - winSleep, /* xSleep */ - winCurrentTime, /* xCurrentTime */ - winGetLastError, /* xGetLastError */ - winCurrentTimeInt64, /* xCurrentTimeInt64 */ - winSetSystemCall, /* xSetSystemCall */ - winGetSystemCall, /* xGetSystemCall */ - winNextSystemCall, /* xNextSystemCall */ - }; -#if defined(SQLITE_WIN32_HAS_WIDE) - static sqlite3_vfs winLongPathVfs = { - 3, /* iVersion */ - sizeof(winFile), /* szOsFile */ - SQLITE_WINNT_MAX_PATH_BYTES, /* mxPathname */ - 0, /* pNext */ - "win32-longpath", /* zName */ - 0, /* pAppData */ - winOpen, /* xOpen */ - winDelete, /* xDelete */ - winAccess, /* xAccess */ - winFullPathname, /* xFullPathname */ - winDlOpen, /* xDlOpen */ - winDlError, /* xDlError */ - winDlSym, /* xDlSym */ - winDlClose, /* xDlClose */ - winRandomness, /* xRandomness */ - winSleep, /* xSleep */ - winCurrentTime, /* xCurrentTime */ - winGetLastError, /* xGetLastError */ - winCurrentTimeInt64, /* xCurrentTimeInt64 */ - winSetSystemCall, /* xSetSystemCall */ - winGetSystemCall, /* xGetSystemCall */ - winNextSystemCall, /* xNextSystemCall */ - }; -#endif - - /* Double-check that the aSyscall[] array has been constructed - ** correctly. See ticket [bb3a86e890c8e96ab] */ - assert( ArraySize(aSyscall)==76 ); - - /* get memory map allocation granularity */ - memset(&winSysInfo, 0, sizeof(SYSTEM_INFO)); -#if SQLITE_OS_WINRT - osGetNativeSystemInfo(&winSysInfo); -#else - osGetSystemInfo(&winSysInfo); -#endif - assert( winSysInfo.dwAllocationGranularity>0 ); - assert( winSysInfo.dwPageSize>0 ); - - sqlite3_vfs_register(&winVfs, 1); - -#if defined(SQLITE_WIN32_HAS_WIDE) - sqlite3_vfs_register(&winLongPathVfs, 0); -#endif - - return SQLITE_OK; -} - -SQLITE_API int sqlite3_os_end(void){ -#if SQLITE_OS_WINRT - if( sleepObj!=NULL ){ - osCloseHandle(sleepObj); - sleepObj = NULL; - } -#endif - return SQLITE_OK; -} - -#endif /* SQLITE_OS_WIN */ - -/************** End of os_win.c **********************************************/ -/************** Begin file bitvec.c ******************************************/ -/* -** 2008 February 16 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** This file implements an object that represents a fixed-length -** bitmap. Bits are numbered starting with 1. -** -** A bitmap is used to record which pages of a database file have been -** journalled during a transaction, or which pages have the "dont-write" -** property. Usually only a few pages are meet either condition. -** So the bitmap is usually sparse and has low cardinality. -** But sometimes (for example when during a DROP of a large table) most -** or all of the pages in a database can get journalled. In those cases, -** the bitmap becomes dense with high cardinality. The algorithm needs -** to handle both cases well. -** -** The size of the bitmap is fixed when the object is created. -** -** All bits are clear when the bitmap is created. Individual bits -** may be set or cleared one at a time. -** -** Test operations are about 100 times more common that set operations. -** Clear operations are exceedingly rare. There are usually between -** 5 and 500 set operations per Bitvec object, though the number of sets can -** sometimes grow into tens of thousands or larger. The size of the -** Bitvec object is the number of pages in the database file at the -** start of a transaction, and is thus usually less than a few thousand, -** but can be as large as 2 billion for a really big database. -*/ - -/* Size of the Bitvec structure in bytes. */ -#define BITVEC_SZ 512 - -/* Round the union size down to the nearest pointer boundary, since that's how -** it will be aligned within the Bitvec struct. */ -#define BITVEC_USIZE (((BITVEC_SZ-(3*sizeof(u32)))/sizeof(Bitvec*))*sizeof(Bitvec*)) - -/* Type of the array "element" for the bitmap representation. -** Should be a power of 2, and ideally, evenly divide into BITVEC_USIZE. -** Setting this to the "natural word" size of your CPU may improve -** performance. */ -#define BITVEC_TELEM u8 -/* Size, in bits, of the bitmap element. */ -#define BITVEC_SZELEM 8 -/* Number of elements in a bitmap array. */ -#define BITVEC_NELEM (BITVEC_USIZE/sizeof(BITVEC_TELEM)) -/* Number of bits in the bitmap array. */ -#define BITVEC_NBIT (BITVEC_NELEM*BITVEC_SZELEM) - -/* Number of u32 values in hash table. */ -#define BITVEC_NINT (BITVEC_USIZE/sizeof(u32)) -/* Maximum number of entries in hash table before -** sub-dividing and re-hashing. */ -#define BITVEC_MXHASH (BITVEC_NINT/2) -/* Hashing function for the aHash representation. -** Empirical testing showed that the *37 multiplier -** (an arbitrary prime)in the hash function provided -** no fewer collisions than the no-op *1. */ -#define BITVEC_HASH(X) (((X)*1)%BITVEC_NINT) - -#define BITVEC_NPTR (BITVEC_USIZE/sizeof(Bitvec *)) - - -/* -** A bitmap is an instance of the following structure. -** -** This bitmap records the existence of zero or more bits -** with values between 1 and iSize, inclusive. -** -** There are three possible representations of the bitmap. -** If iSize<=BITVEC_NBIT, then Bitvec.u.aBitmap[] is a straight -** bitmap. The least significant bit is bit 1. -** -** If iSize>BITVEC_NBIT and iDivisor==0 then Bitvec.u.aHash[] is -** a hash table that will hold up to BITVEC_MXHASH distinct values. -** -** Otherwise, the value i is redirected into one of BITVEC_NPTR -** sub-bitmaps pointed to by Bitvec.u.apSub[]. Each subbitmap -** handles up to iDivisor separate values of i. apSub[0] holds -** values between 1 and iDivisor. apSub[1] holds values between -** iDivisor+1 and 2*iDivisor. apSub[N] holds values between -** N*iDivisor+1 and (N+1)*iDivisor. Each subbitmap is normalized -** to hold deal with values between 1 and iDivisor. -*/ -struct Bitvec { - u32 iSize; /* Maximum bit index. Max iSize is 4,294,967,296. */ - u32 nSet; /* Number of bits that are set - only valid for aHash - ** element. Max is BITVEC_NINT. For BITVEC_SZ of 512, - ** this would be 125. */ - u32 iDivisor; /* Number of bits handled by each apSub[] entry. */ - /* Should >=0 for apSub element. */ - /* Max iDivisor is max(u32) / BITVEC_NPTR + 1. */ - /* For a BITVEC_SZ of 512, this would be 34,359,739. */ - union { - BITVEC_TELEM aBitmap[BITVEC_NELEM]; /* Bitmap representation */ - u32 aHash[BITVEC_NINT]; /* Hash table representation */ - Bitvec *apSub[BITVEC_NPTR]; /* Recursive representation */ - } u; -}; - -/* -** Create a new bitmap object able to handle bits between 0 and iSize, -** inclusive. Return a pointer to the new object. Return NULL if -** malloc fails. -*/ -SQLITE_PRIVATE Bitvec *sqlite3BitvecCreate(u32 iSize){ - Bitvec *p; - assert( sizeof(*p)==BITVEC_SZ ); - p = sqlite3MallocZero( sizeof(*p) ); - if( p ){ - p->iSize = iSize; - } - return p; -} - -/* -** Check to see if the i-th bit is set. Return true or false. -** If p is NULL (if the bitmap has not been created) or if -** i is out of range, then return false. -*/ -SQLITE_PRIVATE int sqlite3BitvecTest(Bitvec *p, u32 i){ - if( p==0 ) return 0; - if( i>p->iSize || i==0 ) return 0; - i--; - while( p->iDivisor ){ - u32 bin = i/p->iDivisor; - i = i%p->iDivisor; - p = p->u.apSub[bin]; - if (!p) { - return 0; - } - } - if( p->iSize<=BITVEC_NBIT ){ - return (p->u.aBitmap[i/BITVEC_SZELEM] & (1<<(i&(BITVEC_SZELEM-1))))!=0; - } else{ - u32 h = BITVEC_HASH(i++); - while( p->u.aHash[h] ){ - if( p->u.aHash[h]==i ) return 1; - h = (h+1) % BITVEC_NINT; - } - return 0; - } -} - -/* -** Set the i-th bit. Return 0 on success and an error code if -** anything goes wrong. -** -** This routine might cause sub-bitmaps to be allocated. Failing -** to get the memory needed to hold the sub-bitmap is the only -** that can go wrong with an insert, assuming p and i are valid. -** -** The calling function must ensure that p is a valid Bitvec object -** and that the value for "i" is within range of the Bitvec object. -** Otherwise the behavior is undefined. -*/ -SQLITE_PRIVATE int sqlite3BitvecSet(Bitvec *p, u32 i){ - u32 h; - if( p==0 ) return SQLITE_OK; - assert( i>0 ); - assert( i<=p->iSize ); - i--; - while((p->iSize > BITVEC_NBIT) && p->iDivisor) { - u32 bin = i/p->iDivisor; - i = i%p->iDivisor; - if( p->u.apSub[bin]==0 ){ - p->u.apSub[bin] = sqlite3BitvecCreate( p->iDivisor ); - if( p->u.apSub[bin]==0 ) return SQLITE_NOMEM; - } - p = p->u.apSub[bin]; - } - if( p->iSize<=BITVEC_NBIT ){ - p->u.aBitmap[i/BITVEC_SZELEM] |= 1 << (i&(BITVEC_SZELEM-1)); - return SQLITE_OK; - } - h = BITVEC_HASH(i++); - /* if there wasn't a hash collision, and this doesn't */ - /* completely fill the hash, then just add it without */ - /* worring about sub-dividing and re-hashing. */ - if( !p->u.aHash[h] ){ - if (p->nSet<(BITVEC_NINT-1)) { - goto bitvec_set_end; - } else { - goto bitvec_set_rehash; - } - } - /* there was a collision, check to see if it's already */ - /* in hash, if not, try to find a spot for it */ - do { - if( p->u.aHash[h]==i ) return SQLITE_OK; - h++; - if( h>=BITVEC_NINT ) h = 0; - } while( p->u.aHash[h] ); - /* we didn't find it in the hash. h points to the first */ - /* available free spot. check to see if this is going to */ - /* make our hash too "full". */ -bitvec_set_rehash: - if( p->nSet>=BITVEC_MXHASH ){ - unsigned int j; - int rc; - u32 *aiValues = sqlite3StackAllocRaw(0, sizeof(p->u.aHash)); - if( aiValues==0 ){ - return SQLITE_NOMEM; - }else{ - memcpy(aiValues, p->u.aHash, sizeof(p->u.aHash)); - memset(p->u.apSub, 0, sizeof(p->u.apSub)); - p->iDivisor = (p->iSize + BITVEC_NPTR - 1)/BITVEC_NPTR; - rc = sqlite3BitvecSet(p, i); - for(j=0; jnSet++; - p->u.aHash[h] = i; - return SQLITE_OK; -} - -/* -** Clear the i-th bit. -** -** pBuf must be a pointer to at least BITVEC_SZ bytes of temporary storage -** that BitvecClear can use to rebuilt its hash table. -*/ -SQLITE_PRIVATE void sqlite3BitvecClear(Bitvec *p, u32 i, void *pBuf){ - if( p==0 ) return; - assert( i>0 ); - i--; - while( p->iDivisor ){ - u32 bin = i/p->iDivisor; - i = i%p->iDivisor; - p = p->u.apSub[bin]; - if (!p) { - return; - } - } - if( p->iSize<=BITVEC_NBIT ){ - p->u.aBitmap[i/BITVEC_SZELEM] &= ~(1 << (i&(BITVEC_SZELEM-1))); - }else{ - unsigned int j; - u32 *aiValues = pBuf; - memcpy(aiValues, p->u.aHash, sizeof(p->u.aHash)); - memset(p->u.aHash, 0, sizeof(p->u.aHash)); - p->nSet = 0; - for(j=0; jnSet++; - while( p->u.aHash[h] ){ - h++; - if( h>=BITVEC_NINT ) h = 0; - } - p->u.aHash[h] = aiValues[j]; - } - } - } -} - -/* -** Destroy a bitmap object. Reclaim all memory used. -*/ -SQLITE_PRIVATE void sqlite3BitvecDestroy(Bitvec *p){ - if( p==0 ) return; - if( p->iDivisor ){ - unsigned int i; - for(i=0; iu.apSub[i]); - } - } - sqlite3_free(p); -} - -/* -** Return the value of the iSize parameter specified when Bitvec *p -** was created. -*/ -SQLITE_PRIVATE u32 sqlite3BitvecSize(Bitvec *p){ - return p->iSize; -} - -#ifndef SQLITE_OMIT_BUILTIN_TEST -/* -** Let V[] be an array of unsigned characters sufficient to hold -** up to N bits. Let I be an integer between 0 and N. 0<=I>3] |= (1<<(I&7)) -#define CLEARBIT(V,I) V[I>>3] &= ~(1<<(I&7)) -#define TESTBIT(V,I) (V[I>>3]&(1<<(I&7)))!=0 - -/* -** This routine runs an extensive test of the Bitvec code. -** -** The input is an array of integers that acts as a program -** to test the Bitvec. The integers are opcodes followed -** by 0, 1, or 3 operands, depending on the opcode. Another -** opcode follows immediately after the last operand. -** -** There are 6 opcodes numbered from 0 through 5. 0 is the -** "halt" opcode and causes the test to end. -** -** 0 Halt and return the number of errors -** 1 N S X Set N bits beginning with S and incrementing by X -** 2 N S X Clear N bits beginning with S and incrementing by X -** 3 N Set N randomly chosen bits -** 4 N Clear N randomly chosen bits -** 5 N S X Set N bits from S increment X in array only, not in bitvec -** -** The opcodes 1 through 4 perform set and clear operations are performed -** on both a Bitvec object and on a linear array of bits obtained from malloc. -** Opcode 5 works on the linear array only, not on the Bitvec. -** Opcode 5 is used to deliberately induce a fault in order to -** confirm that error detection works. -** -** At the conclusion of the test the linear array is compared -** against the Bitvec object. If there are any differences, -** an error is returned. If they are the same, zero is returned. -** -** If a memory allocation error occurs, return -1. -*/ -SQLITE_PRIVATE int sqlite3BitvecBuiltinTest(int sz, int *aOp){ - Bitvec *pBitvec = 0; - unsigned char *pV = 0; - int rc = -1; - int i, nx, pc, op; - void *pTmpSpace; - - /* Allocate the Bitvec to be tested and a linear array of - ** bits to act as the reference */ - pBitvec = sqlite3BitvecCreate( sz ); - pV = sqlite3MallocZero( (sz+7)/8 + 1 ); - pTmpSpace = sqlite3_malloc(BITVEC_SZ); - if( pBitvec==0 || pV==0 || pTmpSpace==0 ) goto bitvec_end; - - /* NULL pBitvec tests */ - sqlite3BitvecSet(0, 1); - sqlite3BitvecClear(0, 1, pTmpSpace); - - /* Run the program */ - pc = 0; - while( (op = aOp[pc])!=0 ){ - switch( op ){ - case 1: - case 2: - case 5: { - nx = 4; - i = aOp[pc+2] - 1; - aOp[pc+2] += aOp[pc+3]; - break; - } - case 3: - case 4: - default: { - nx = 2; - sqlite3_randomness(sizeof(i), &i); - break; - } - } - if( (--aOp[pc+1]) > 0 ) nx = 0; - pc += nx; - i = (i & 0x7fffffff)%sz; - if( (op & 1)!=0 ){ - SETBIT(pV, (i+1)); - if( op!=5 ){ - if( sqlite3BitvecSet(pBitvec, i+1) ) goto bitvec_end; - } - }else{ - CLEARBIT(pV, (i+1)); - sqlite3BitvecClear(pBitvec, i+1, pTmpSpace); - } - } - - /* Test to make sure the linear array exactly matches the - ** Bitvec object. Start with the assumption that they do - ** match (rc==0). Change rc to non-zero if a discrepancy - ** is found. - */ - rc = sqlite3BitvecTest(0,0) + sqlite3BitvecTest(pBitvec, sz+1) - + sqlite3BitvecTest(pBitvec, 0) - + (sqlite3BitvecSize(pBitvec) - sz); - for(i=1; i<=sz; i++){ - if( (TESTBIT(pV,i))!=sqlite3BitvecTest(pBitvec,i) ){ - rc = i; - break; - } - } - - /* Free allocated structure */ -bitvec_end: - sqlite3_free(pTmpSpace); - sqlite3_free(pV); - sqlite3BitvecDestroy(pBitvec); - return rc; -} -#endif /* SQLITE_OMIT_BUILTIN_TEST */ - -/************** End of bitvec.c **********************************************/ -/************** Begin file pcache.c ******************************************/ -/* -** 2008 August 05 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** This file implements that page cache. -*/ - -/* -** A complete page cache is an instance of this structure. -*/ -struct PCache { - PgHdr *pDirty, *pDirtyTail; /* List of dirty pages in LRU order */ - PgHdr *pSynced; /* Last synced page in dirty page list */ - int nRef; /* Number of referenced pages */ - int szCache; /* Configured cache size */ - int szPage; /* Size of every page in this cache */ - int szExtra; /* Size of extra space for each page */ - u8 bPurgeable; /* True if pages are on backing store */ - u8 eCreate; /* eCreate value for for xFetch() */ - int (*xStress)(void*,PgHdr*); /* Call to try make a page clean */ - void *pStress; /* Argument to xStress */ - sqlite3_pcache *pCache; /* Pluggable cache module */ - PgHdr *pPage1; /* Reference to page 1 */ -}; - -/* -** Some of the assert() macros in this code are too expensive to run -** even during normal debugging. Use them only rarely on long-running -** tests. Enable the expensive asserts using the -** -DSQLITE_ENABLE_EXPENSIVE_ASSERT=1 compile-time option. -*/ -#ifdef SQLITE_ENABLE_EXPENSIVE_ASSERT -# define expensive_assert(X) assert(X) -#else -# define expensive_assert(X) -#endif - -/********************************** Linked List Management ********************/ - -#if !defined(NDEBUG) && defined(SQLITE_ENABLE_EXPENSIVE_ASSERT) -/* -** Check that the pCache->pSynced variable is set correctly. If it -** is not, either fail an assert or return zero. Otherwise, return -** non-zero. This is only used in debugging builds, as follows: -** -** expensive_assert( pcacheCheckSynced(pCache) ); -*/ -static int pcacheCheckSynced(PCache *pCache){ - PgHdr *p; - for(p=pCache->pDirtyTail; p!=pCache->pSynced; p=p->pDirtyPrev){ - assert( p->nRef || (p->flags&PGHDR_NEED_SYNC) ); - } - return (p==0 || p->nRef || (p->flags&PGHDR_NEED_SYNC)==0); -} -#endif /* !NDEBUG && SQLITE_ENABLE_EXPENSIVE_ASSERT */ - -/* -** Remove page pPage from the list of dirty pages. -*/ -static void pcacheRemoveFromDirtyList(PgHdr *pPage){ - PCache *p = pPage->pCache; - - assert( pPage->pDirtyNext || pPage==p->pDirtyTail ); - assert( pPage->pDirtyPrev || pPage==p->pDirty ); - - /* Update the PCache1.pSynced variable if necessary. */ - if( p->pSynced==pPage ){ - PgHdr *pSynced = pPage->pDirtyPrev; - while( pSynced && (pSynced->flags&PGHDR_NEED_SYNC) ){ - pSynced = pSynced->pDirtyPrev; - } - p->pSynced = pSynced; - } - - if( pPage->pDirtyNext ){ - pPage->pDirtyNext->pDirtyPrev = pPage->pDirtyPrev; - }else{ - assert( pPage==p->pDirtyTail ); - p->pDirtyTail = pPage->pDirtyPrev; - } - if( pPage->pDirtyPrev ){ - pPage->pDirtyPrev->pDirtyNext = pPage->pDirtyNext; - }else{ - assert( pPage==p->pDirty ); - p->pDirty = pPage->pDirtyNext; - if( p->pDirty==0 && p->bPurgeable ){ - assert( p->eCreate==1 ); - p->eCreate = 2; - } - } - pPage->pDirtyNext = 0; - pPage->pDirtyPrev = 0; - - expensive_assert( pcacheCheckSynced(p) ); -} - -/* -** Add page pPage to the head of the dirty list (PCache1.pDirty is set to -** pPage). -*/ -static void pcacheAddToDirtyList(PgHdr *pPage){ - PCache *p = pPage->pCache; - - assert( pPage->pDirtyNext==0 && pPage->pDirtyPrev==0 && p->pDirty!=pPage ); - - pPage->pDirtyNext = p->pDirty; - if( pPage->pDirtyNext ){ - assert( pPage->pDirtyNext->pDirtyPrev==0 ); - pPage->pDirtyNext->pDirtyPrev = pPage; - }else if( p->bPurgeable ){ - assert( p->eCreate==2 ); - p->eCreate = 1; - } - p->pDirty = pPage; - if( !p->pDirtyTail ){ - p->pDirtyTail = pPage; - } - if( !p->pSynced && 0==(pPage->flags&PGHDR_NEED_SYNC) ){ - p->pSynced = pPage; - } - expensive_assert( pcacheCheckSynced(p) ); -} - -/* -** Wrapper around the pluggable caches xUnpin method. If the cache is -** being used for an in-memory database, this function is a no-op. -*/ -static void pcacheUnpin(PgHdr *p){ - PCache *pCache = p->pCache; - if( pCache->bPurgeable ){ - if( p->pgno==1 ){ - pCache->pPage1 = 0; - } - sqlite3GlobalConfig.pcache2.xUnpin(pCache->pCache, p->pPage, 0); - } -} - -/*************************************************** General Interfaces ****** -** -** Initialize and shutdown the page cache subsystem. Neither of these -** functions are threadsafe. -*/ -SQLITE_PRIVATE int sqlite3PcacheInitialize(void){ - if( sqlite3GlobalConfig.pcache2.xInit==0 ){ - /* IMPLEMENTATION-OF: R-26801-64137 If the xInit() method is NULL, then the - ** built-in default page cache is used instead of the application defined - ** page cache. */ - sqlite3PCacheSetDefault(); - } - return sqlite3GlobalConfig.pcache2.xInit(sqlite3GlobalConfig.pcache2.pArg); -} -SQLITE_PRIVATE void sqlite3PcacheShutdown(void){ - if( sqlite3GlobalConfig.pcache2.xShutdown ){ - /* IMPLEMENTATION-OF: R-26000-56589 The xShutdown() method may be NULL. */ - sqlite3GlobalConfig.pcache2.xShutdown(sqlite3GlobalConfig.pcache2.pArg); - } -} - -/* -** Return the size in bytes of a PCache object. -*/ -SQLITE_PRIVATE int sqlite3PcacheSize(void){ return sizeof(PCache); } - -/* -** Create a new PCache object. Storage space to hold the object -** has already been allocated and is passed in as the p pointer. -** The caller discovers how much space needs to be allocated by -** calling sqlite3PcacheSize(). -*/ -SQLITE_PRIVATE void sqlite3PcacheOpen( - int szPage, /* Size of every page */ - int szExtra, /* Extra space associated with each page */ - int bPurgeable, /* True if pages are on backing store */ - int (*xStress)(void*,PgHdr*),/* Call to try to make pages clean */ - void *pStress, /* Argument to xStress */ - PCache *p /* Preallocated space for the PCache */ -){ - memset(p, 0, sizeof(PCache)); - p->szPage = szPage; - p->szExtra = szExtra; - p->bPurgeable = bPurgeable; - p->eCreate = 2; - p->xStress = xStress; - p->pStress = pStress; - p->szCache = 100; -} - -/* -** Change the page size for PCache object. The caller must ensure that there -** are no outstanding page references when this function is called. -*/ -SQLITE_PRIVATE void sqlite3PcacheSetPageSize(PCache *pCache, int szPage){ - assert( pCache->nRef==0 && pCache->pDirty==0 ); - if( pCache->pCache ){ - sqlite3GlobalConfig.pcache2.xDestroy(pCache->pCache); - pCache->pCache = 0; - pCache->pPage1 = 0; - } - pCache->szPage = szPage; -} - -/* -** Compute the number of pages of cache requested. -*/ -static int numberOfCachePages(PCache *p){ - if( p->szCache>=0 ){ - return p->szCache; - }else{ - return (int)((-1024*(i64)p->szCache)/(p->szPage+p->szExtra)); - } -} - -/* -** Try to obtain a page from the cache. -*/ -SQLITE_PRIVATE int sqlite3PcacheFetch( - PCache *pCache, /* Obtain the page from this cache */ - Pgno pgno, /* Page number to obtain */ - int createFlag, /* If true, create page if it does not exist already */ - PgHdr **ppPage /* Write the page here */ -){ - sqlite3_pcache_page *pPage; - PgHdr *pPgHdr = 0; - int eCreate; - - assert( pCache!=0 ); - assert( createFlag==1 || createFlag==0 ); - assert( pgno>0 ); - - /* If the pluggable cache (sqlite3_pcache*) has not been allocated, - ** allocate it now. - */ - if( !pCache->pCache ){ - sqlite3_pcache *p; - if( !createFlag ){ - *ppPage = 0; - return SQLITE_OK; - } - p = sqlite3GlobalConfig.pcache2.xCreate( - pCache->szPage, pCache->szExtra + sizeof(PgHdr), pCache->bPurgeable - ); - if( !p ){ - return SQLITE_NOMEM; - } - sqlite3GlobalConfig.pcache2.xCachesize(p, numberOfCachePages(pCache)); - pCache->pCache = p; - } - - /* eCreate defines what to do if the page does not exist. - ** 0 Do not allocate a new page. (createFlag==0) - ** 1 Allocate a new page if doing so is inexpensive. - ** (createFlag==1 AND bPurgeable AND pDirty) - ** 2 Allocate a new page even it doing so is difficult. - ** (createFlag==1 AND !(bPurgeable AND pDirty) - */ - eCreate = createFlag==0 ? 0 : pCache->eCreate; - assert( (createFlag*(1+(!pCache->bPurgeable||!pCache->pDirty)))==eCreate ); - pPage = sqlite3GlobalConfig.pcache2.xFetch(pCache->pCache, pgno, eCreate); - if( !pPage && eCreate==1 ){ - PgHdr *pPg; - - /* Find a dirty page to write-out and recycle. First try to find a - ** page that does not require a journal-sync (one with PGHDR_NEED_SYNC - ** cleared), but if that is not possible settle for any other - ** unreferenced dirty page. - */ - expensive_assert( pcacheCheckSynced(pCache) ); - for(pPg=pCache->pSynced; - pPg && (pPg->nRef || (pPg->flags&PGHDR_NEED_SYNC)); - pPg=pPg->pDirtyPrev - ); - pCache->pSynced = pPg; - if( !pPg ){ - for(pPg=pCache->pDirtyTail; pPg && pPg->nRef; pPg=pPg->pDirtyPrev); - } - if( pPg ){ - int rc; -#ifdef SQLITE_LOG_CACHE_SPILL - sqlite3_log(SQLITE_FULL, - "spill page %d making room for %d - cache used: %d/%d", - pPg->pgno, pgno, - sqlite3GlobalConfig.pcache.xPagecount(pCache->pCache), - numberOfCachePages(pCache)); -#endif - rc = pCache->xStress(pCache->pStress, pPg); - if( rc!=SQLITE_OK && rc!=SQLITE_BUSY ){ - return rc; - } - } - - pPage = sqlite3GlobalConfig.pcache2.xFetch(pCache->pCache, pgno, 2); - } - - if( pPage ){ - pPgHdr = (PgHdr *)pPage->pExtra; - - if( !pPgHdr->pPage ){ - memset(pPgHdr, 0, sizeof(PgHdr)); - pPgHdr->pPage = pPage; - pPgHdr->pData = pPage->pBuf; - pPgHdr->pExtra = (void *)&pPgHdr[1]; - memset(pPgHdr->pExtra, 0, pCache->szExtra); - pPgHdr->pCache = pCache; - pPgHdr->pgno = pgno; - } - assert( pPgHdr->pCache==pCache ); - assert( pPgHdr->pgno==pgno ); - assert( pPgHdr->pData==pPage->pBuf ); - assert( pPgHdr->pExtra==(void *)&pPgHdr[1] ); - - if( 0==pPgHdr->nRef ){ - pCache->nRef++; - } - pPgHdr->nRef++; - if( pgno==1 ){ - pCache->pPage1 = pPgHdr; - } - } - *ppPage = pPgHdr; - return (pPgHdr==0 && eCreate) ? SQLITE_NOMEM : SQLITE_OK; -} - -/* -** Decrement the reference count on a page. If the page is clean and the -** reference count drops to 0, then it is made elible for recycling. -*/ -SQLITE_PRIVATE void sqlite3PcacheRelease(PgHdr *p){ - assert( p->nRef>0 ); - p->nRef--; - if( p->nRef==0 ){ - PCache *pCache = p->pCache; - pCache->nRef--; - if( (p->flags&PGHDR_DIRTY)==0 ){ - pcacheUnpin(p); - }else{ - /* Move the page to the head of the dirty list. */ - pcacheRemoveFromDirtyList(p); - pcacheAddToDirtyList(p); - } - } -} - -/* -** Increase the reference count of a supplied page by 1. -*/ -SQLITE_PRIVATE void sqlite3PcacheRef(PgHdr *p){ - assert(p->nRef>0); - p->nRef++; -} - -/* -** Drop a page from the cache. There must be exactly one reference to the -** page. This function deletes that reference, so after it returns the -** page pointed to by p is invalid. -*/ -SQLITE_PRIVATE void sqlite3PcacheDrop(PgHdr *p){ - PCache *pCache; - assert( p->nRef==1 ); - if( p->flags&PGHDR_DIRTY ){ - pcacheRemoveFromDirtyList(p); - } - pCache = p->pCache; - pCache->nRef--; - if( p->pgno==1 ){ - pCache->pPage1 = 0; - } - sqlite3GlobalConfig.pcache2.xUnpin(pCache->pCache, p->pPage, 1); -} - -/* -** Make sure the page is marked as dirty. If it isn't dirty already, -** make it so. -*/ -SQLITE_PRIVATE void sqlite3PcacheMakeDirty(PgHdr *p){ - p->flags &= ~PGHDR_DONT_WRITE; - assert( p->nRef>0 ); - if( 0==(p->flags & PGHDR_DIRTY) ){ - p->flags |= PGHDR_DIRTY; - pcacheAddToDirtyList( p); - } -} - -/* -** Make sure the page is marked as clean. If it isn't clean already, -** make it so. -*/ -SQLITE_PRIVATE void sqlite3PcacheMakeClean(PgHdr *p){ - if( (p->flags & PGHDR_DIRTY) ){ - pcacheRemoveFromDirtyList(p); - p->flags &= ~(PGHDR_DIRTY|PGHDR_NEED_SYNC); - if( p->nRef==0 ){ - pcacheUnpin(p); - } - } -} - -/* -** Make every page in the cache clean. -*/ -SQLITE_PRIVATE void sqlite3PcacheCleanAll(PCache *pCache){ - PgHdr *p; - while( (p = pCache->pDirty)!=0 ){ - sqlite3PcacheMakeClean(p); - } -} - -/* -** Clear the PGHDR_NEED_SYNC flag from all dirty pages. -*/ -SQLITE_PRIVATE void sqlite3PcacheClearSyncFlags(PCache *pCache){ - PgHdr *p; - for(p=pCache->pDirty; p; p=p->pDirtyNext){ - p->flags &= ~PGHDR_NEED_SYNC; - } - pCache->pSynced = pCache->pDirtyTail; -} - -/* -** Change the page number of page p to newPgno. -*/ -SQLITE_PRIVATE void sqlite3PcacheMove(PgHdr *p, Pgno newPgno){ - PCache *pCache = p->pCache; - assert( p->nRef>0 ); - assert( newPgno>0 ); - sqlite3GlobalConfig.pcache2.xRekey(pCache->pCache, p->pPage, p->pgno,newPgno); - p->pgno = newPgno; - if( (p->flags&PGHDR_DIRTY) && (p->flags&PGHDR_NEED_SYNC) ){ - pcacheRemoveFromDirtyList(p); - pcacheAddToDirtyList(p); - } -} - -/* -** Drop every cache entry whose page number is greater than "pgno". The -** caller must ensure that there are no outstanding references to any pages -** other than page 1 with a page number greater than pgno. -** -** If there is a reference to page 1 and the pgno parameter passed to this -** function is 0, then the data area associated with page 1 is zeroed, but -** the page object is not dropped. -*/ -SQLITE_PRIVATE void sqlite3PcacheTruncate(PCache *pCache, Pgno pgno){ - if( pCache->pCache ){ - PgHdr *p; - PgHdr *pNext; - for(p=pCache->pDirty; p; p=pNext){ - pNext = p->pDirtyNext; - /* This routine never gets call with a positive pgno except right - ** after sqlite3PcacheCleanAll(). So if there are dirty pages, - ** it must be that pgno==0. - */ - assert( p->pgno>0 ); - if( ALWAYS(p->pgno>pgno) ){ - assert( p->flags&PGHDR_DIRTY ); - sqlite3PcacheMakeClean(p); - } - } - if( pgno==0 && pCache->pPage1 ){ - memset(pCache->pPage1->pData, 0, pCache->szPage); - pgno = 1; - } - sqlite3GlobalConfig.pcache2.xTruncate(pCache->pCache, pgno+1); - } -} - -/* -** Close a cache. -*/ -SQLITE_PRIVATE void sqlite3PcacheClose(PCache *pCache){ - if( pCache->pCache ){ - sqlite3GlobalConfig.pcache2.xDestroy(pCache->pCache); - } -} - -/* -** Discard the contents of the cache. -*/ -SQLITE_PRIVATE void sqlite3PcacheClear(PCache *pCache){ - sqlite3PcacheTruncate(pCache, 0); -} - -/* -** Merge two lists of pages connected by pDirty and in pgno order. -** Do not both fixing the pDirtyPrev pointers. -*/ -static PgHdr *pcacheMergeDirtyList(PgHdr *pA, PgHdr *pB){ - PgHdr result, *pTail; - pTail = &result; - while( pA && pB ){ - if( pA->pgnopgno ){ - pTail->pDirty = pA; - pTail = pA; - pA = pA->pDirty; - }else{ - pTail->pDirty = pB; - pTail = pB; - pB = pB->pDirty; - } - } - if( pA ){ - pTail->pDirty = pA; - }else if( pB ){ - pTail->pDirty = pB; - }else{ - pTail->pDirty = 0; - } - return result.pDirty; -} - -/* -** Sort the list of pages in accending order by pgno. Pages are -** connected by pDirty pointers. The pDirtyPrev pointers are -** corrupted by this sort. -** -** Since there cannot be more than 2^31 distinct pages in a database, -** there cannot be more than 31 buckets required by the merge sorter. -** One extra bucket is added to catch overflow in case something -** ever changes to make the previous sentence incorrect. -*/ -#define N_SORT_BUCKET 32 -static PgHdr *pcacheSortDirtyList(PgHdr *pIn){ - PgHdr *a[N_SORT_BUCKET], *p; - int i; - memset(a, 0, sizeof(a)); - while( pIn ){ - p = pIn; - pIn = p->pDirty; - p->pDirty = 0; - for(i=0; ALWAYS(ipDirty; p; p=p->pDirtyNext){ - p->pDirty = p->pDirtyNext; - } - return pcacheSortDirtyList(pCache->pDirty); -} - -/* -** Return the total number of referenced pages held by the cache. -*/ -SQLITE_PRIVATE int sqlite3PcacheRefCount(PCache *pCache){ - return pCache->nRef; -} - -/* -** Return the number of references to the page supplied as an argument. -*/ -SQLITE_PRIVATE int sqlite3PcachePageRefcount(PgHdr *p){ - return p->nRef; -} - -/* -** Return the total number of pages in the cache. -*/ -SQLITE_PRIVATE int sqlite3PcachePagecount(PCache *pCache){ - int nPage = 0; - if( pCache->pCache ){ - nPage = sqlite3GlobalConfig.pcache2.xPagecount(pCache->pCache); - } - return nPage; -} - -#ifdef SQLITE_TEST -/* -** Get the suggested cache-size value. -*/ -SQLITE_PRIVATE int sqlite3PcacheGetCachesize(PCache *pCache){ - return numberOfCachePages(pCache); -} -#endif - -/* -** Set the suggested cache-size value. -*/ -SQLITE_PRIVATE void sqlite3PcacheSetCachesize(PCache *pCache, int mxPage){ - pCache->szCache = mxPage; - if( pCache->pCache ){ - sqlite3GlobalConfig.pcache2.xCachesize(pCache->pCache, - numberOfCachePages(pCache)); - } -} - -/* -** Free up as much memory as possible from the page cache. -*/ -SQLITE_PRIVATE void sqlite3PcacheShrink(PCache *pCache){ - if( pCache->pCache ){ - sqlite3GlobalConfig.pcache2.xShrink(pCache->pCache); - } -} - -#if defined(SQLITE_CHECK_PAGES) || defined(SQLITE_DEBUG) -/* -** For all dirty pages currently in the cache, invoke the specified -** callback. This is only used if the SQLITE_CHECK_PAGES macro is -** defined. -*/ -SQLITE_PRIVATE void sqlite3PcacheIterateDirty(PCache *pCache, void (*xIter)(PgHdr *)){ - PgHdr *pDirty; - for(pDirty=pCache->pDirty; pDirty; pDirty=pDirty->pDirtyNext){ - xIter(pDirty); - } -} -#endif - -/************** End of pcache.c **********************************************/ -/************** Begin file pcache1.c *****************************************/ -/* -** 2008 November 05 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** -** This file implements the default page cache implementation (the -** sqlite3_pcache interface). It also contains part of the implementation -** of the SQLITE_CONFIG_PAGECACHE and sqlite3_release_memory() features. -** If the default page cache implementation is overriden, then neither of -** these two features are available. -*/ - - -typedef struct PCache1 PCache1; -typedef struct PgHdr1 PgHdr1; -typedef struct PgFreeslot PgFreeslot; -typedef struct PGroup PGroup; - -/* Each page cache (or PCache) belongs to a PGroup. A PGroup is a set -** of one or more PCaches that are able to recycle each others unpinned -** pages when they are under memory pressure. A PGroup is an instance of -** the following object. -** -** This page cache implementation works in one of two modes: -** -** (1) Every PCache is the sole member of its own PGroup. There is -** one PGroup per PCache. -** -** (2) There is a single global PGroup that all PCaches are a member -** of. -** -** Mode 1 uses more memory (since PCache instances are not able to rob -** unused pages from other PCaches) but it also operates without a mutex, -** and is therefore often faster. Mode 2 requires a mutex in order to be -** threadsafe, but recycles pages more efficiently. -** -** For mode (1), PGroup.mutex is NULL. For mode (2) there is only a single -** PGroup which is the pcache1.grp global variable and its mutex is -** SQLITE_MUTEX_STATIC_LRU. -*/ -struct PGroup { - sqlite3_mutex *mutex; /* MUTEX_STATIC_LRU or NULL */ - unsigned int nMaxPage; /* Sum of nMax for purgeable caches */ - unsigned int nMinPage; /* Sum of nMin for purgeable caches */ - unsigned int mxPinned; /* nMaxpage + 10 - nMinPage */ - unsigned int nCurrentPage; /* Number of purgeable pages allocated */ - PgHdr1 *pLruHead, *pLruTail; /* LRU list of unpinned pages */ -}; - -/* Each page cache is an instance of the following object. Every -** open database file (including each in-memory database and each -** temporary or transient database) has a single page cache which -** is an instance of this object. -** -** Pointers to structures of this type are cast and returned as -** opaque sqlite3_pcache* handles. -*/ -struct PCache1 { - /* Cache configuration parameters. Page size (szPage) and the purgeable - ** flag (bPurgeable) are set when the cache is created. nMax may be - ** modified at any time by a call to the pcache1Cachesize() method. - ** The PGroup mutex must be held when accessing nMax. - */ - PGroup *pGroup; /* PGroup this cache belongs to */ - int szPage; /* Size of allocated pages in bytes */ - int szExtra; /* Size of extra space in bytes */ - int bPurgeable; /* True if cache is purgeable */ - unsigned int nMin; /* Minimum number of pages reserved */ - unsigned int nMax; /* Configured "cache_size" value */ - unsigned int n90pct; /* nMax*9/10 */ - unsigned int iMaxKey; /* Largest key seen since xTruncate() */ - - /* Hash table of all pages. The following variables may only be accessed - ** when the accessor is holding the PGroup mutex. - */ - unsigned int nRecyclable; /* Number of pages in the LRU list */ - unsigned int nPage; /* Total number of pages in apHash */ - unsigned int nHash; /* Number of slots in apHash[] */ - PgHdr1 **apHash; /* Hash table for fast lookup by key */ -}; - -/* -** Each cache entry is represented by an instance of the following -** structure. Unless SQLITE_PCACHE_SEPARATE_HEADER is defined, a buffer of -** PgHdr1.pCache->szPage bytes is allocated directly before this structure -** in memory. -*/ -struct PgHdr1 { - sqlite3_pcache_page page; - unsigned int iKey; /* Key value (page number) */ - u8 isPinned; /* Page in use, not on the LRU list */ - PgHdr1 *pNext; /* Next in hash table chain */ - PCache1 *pCache; /* Cache that currently owns this page */ - PgHdr1 *pLruNext; /* Next in LRU list of unpinned pages */ - PgHdr1 *pLruPrev; /* Previous in LRU list of unpinned pages */ -}; - -/* -** Free slots in the allocator used to divide up the buffer provided using -** the SQLITE_CONFIG_PAGECACHE mechanism. -*/ -struct PgFreeslot { - PgFreeslot *pNext; /* Next free slot */ -}; - -/* -** Global data used by this cache. -*/ -static SQLITE_WSD struct PCacheGlobal { - PGroup grp; /* The global PGroup for mode (2) */ - - /* Variables related to SQLITE_CONFIG_PAGECACHE settings. The - ** szSlot, nSlot, pStart, pEnd, nReserve, and isInit values are all - ** fixed at sqlite3_initialize() time and do not require mutex protection. - ** The nFreeSlot and pFree values do require mutex protection. - */ - int isInit; /* True if initialized */ - int szSlot; /* Size of each free slot */ - int nSlot; /* The number of pcache slots */ - int nReserve; /* Try to keep nFreeSlot above this */ - void *pStart, *pEnd; /* Bounds of pagecache malloc range */ - /* Above requires no mutex. Use mutex below for variable that follow. */ - sqlite3_mutex *mutex; /* Mutex for accessing the following: */ - PgFreeslot *pFree; /* Free page blocks */ - int nFreeSlot; /* Number of unused pcache slots */ - /* The following value requires a mutex to change. We skip the mutex on - ** reading because (1) most platforms read a 32-bit integer atomically and - ** (2) even if an incorrect value is read, no great harm is done since this - ** is really just an optimization. */ - int bUnderPressure; /* True if low on PAGECACHE memory */ -} pcache1_g; - -/* -** All code in this file should access the global structure above via the -** alias "pcache1". This ensures that the WSD emulation is used when -** compiling for systems that do not support real WSD. -*/ -#define pcache1 (GLOBAL(struct PCacheGlobal, pcache1_g)) - -/* -** Macros to enter and leave the PCache LRU mutex. -*/ -#define pcache1EnterMutex(X) sqlite3_mutex_enter((X)->mutex) -#define pcache1LeaveMutex(X) sqlite3_mutex_leave((X)->mutex) - -/******************************************************************************/ -/******** Page Allocation/SQLITE_CONFIG_PCACHE Related Functions **************/ - -/* -** This function is called during initialization if a static buffer is -** supplied to use for the page-cache by passing the SQLITE_CONFIG_PAGECACHE -** verb to sqlite3_config(). Parameter pBuf points to an allocation large -** enough to contain 'n' buffers of 'sz' bytes each. -** -** This routine is called from sqlite3_initialize() and so it is guaranteed -** to be serialized already. There is no need for further mutexing. -*/ -SQLITE_PRIVATE void sqlite3PCacheBufferSetup(void *pBuf, int sz, int n){ - if( pcache1.isInit ){ - PgFreeslot *p; - sz = ROUNDDOWN8(sz); - pcache1.szSlot = sz; - pcache1.nSlot = pcache1.nFreeSlot = n; - pcache1.nReserve = n>90 ? 10 : (n/10 + 1); - pcache1.pStart = pBuf; - pcache1.pFree = 0; - pcache1.bUnderPressure = 0; - while( n-- ){ - p = (PgFreeslot*)pBuf; - p->pNext = pcache1.pFree; - pcache1.pFree = p; - pBuf = (void*)&((char*)pBuf)[sz]; - } - pcache1.pEnd = pBuf; - } -} - -/* -** Malloc function used within this file to allocate space from the buffer -** configured using sqlite3_config(SQLITE_CONFIG_PAGECACHE) option. If no -** such buffer exists or there is no space left in it, this function falls -** back to sqlite3Malloc(). -** -** Multiple threads can run this routine at the same time. Global variables -** in pcache1 need to be protected via mutex. -*/ -static void *pcache1Alloc(int nByte){ - void *p = 0; - assert( sqlite3_mutex_notheld(pcache1.grp.mutex) ); - sqlite3StatusSet(SQLITE_STATUS_PAGECACHE_SIZE, nByte); - if( nByte<=pcache1.szSlot ){ - sqlite3_mutex_enter(pcache1.mutex); - p = (PgHdr1 *)pcache1.pFree; - if( p ){ - pcache1.pFree = pcache1.pFree->pNext; - pcache1.nFreeSlot--; - pcache1.bUnderPressure = pcache1.nFreeSlot=0 ); - sqlite3StatusAdd(SQLITE_STATUS_PAGECACHE_USED, 1); - } - sqlite3_mutex_leave(pcache1.mutex); - } - if( p==0 ){ - /* Memory is not available in the SQLITE_CONFIG_PAGECACHE pool. Get - ** it from sqlite3Malloc instead. - */ - p = sqlite3Malloc(nByte); -#ifndef SQLITE_DISABLE_PAGECACHE_OVERFLOW_STATS - if( p ){ - int sz = sqlite3MallocSize(p); - sqlite3_mutex_enter(pcache1.mutex); - sqlite3StatusAdd(SQLITE_STATUS_PAGECACHE_OVERFLOW, sz); - sqlite3_mutex_leave(pcache1.mutex); - } -#endif - sqlite3MemdebugSetType(p, MEMTYPE_PCACHE); - } - return p; -} - -/* -** Free an allocated buffer obtained from pcache1Alloc(). -*/ -static int pcache1Free(void *p){ - int nFreed = 0; - if( p==0 ) return 0; - if( p>=pcache1.pStart && ppNext = pcache1.pFree; - pcache1.pFree = pSlot; - pcache1.nFreeSlot++; - pcache1.bUnderPressure = pcache1.nFreeSlot=pcache1.pStart && ppGroup->mutex) ); - pcache1LeaveMutex(pCache->pGroup); -#ifdef SQLITE_PCACHE_SEPARATE_HEADER - pPg = pcache1Alloc(pCache->szPage); - p = sqlite3Malloc(sizeof(PgHdr1) + pCache->szExtra); - if( !pPg || !p ){ - pcache1Free(pPg); - sqlite3_free(p); - pPg = 0; - } -#else - pPg = pcache1Alloc(sizeof(PgHdr1) + pCache->szPage + pCache->szExtra); - p = (PgHdr1 *)&((u8 *)pPg)[pCache->szPage]; -#endif - pcache1EnterMutex(pCache->pGroup); - - if( pPg ){ - p->page.pBuf = pPg; - p->page.pExtra = &p[1]; - if( pCache->bPurgeable ){ - pCache->pGroup->nCurrentPage++; - } - return p; - } - return 0; -} - -/* -** Free a page object allocated by pcache1AllocPage(). -** -** The pointer is allowed to be NULL, which is prudent. But it turns out -** that the current implementation happens to never call this routine -** with a NULL pointer, so we mark the NULL test with ALWAYS(). -*/ -static void pcache1FreePage(PgHdr1 *p){ - if( ALWAYS(p) ){ - PCache1 *pCache = p->pCache; - assert( sqlite3_mutex_held(p->pCache->pGroup->mutex) ); - pcache1Free(p->page.pBuf); -#ifdef SQLITE_PCACHE_SEPARATE_HEADER - sqlite3_free(p); -#endif - if( pCache->bPurgeable ){ - pCache->pGroup->nCurrentPage--; - } - } -} - -/* -** Malloc function used by SQLite to obtain space from the buffer configured -** using sqlite3_config(SQLITE_CONFIG_PAGECACHE) option. If no such buffer -** exists, this function falls back to sqlite3Malloc(). -*/ -SQLITE_PRIVATE void *sqlite3PageMalloc(int sz){ - return pcache1Alloc(sz); -} - -/* -** Free an allocated buffer obtained from sqlite3PageMalloc(). -*/ -SQLITE_PRIVATE void sqlite3PageFree(void *p){ - pcache1Free(p); -} - - -/* -** Return true if it desirable to avoid allocating a new page cache -** entry. -** -** If memory was allocated specifically to the page cache using -** SQLITE_CONFIG_PAGECACHE but that memory has all been used, then -** it is desirable to avoid allocating a new page cache entry because -** presumably SQLITE_CONFIG_PAGECACHE was suppose to be sufficient -** for all page cache needs and we should not need to spill the -** allocation onto the heap. -** -** Or, the heap is used for all page cache memory but the heap is -** under memory pressure, then again it is desirable to avoid -** allocating a new page cache entry in order to avoid stressing -** the heap even further. -*/ -static int pcache1UnderMemoryPressure(PCache1 *pCache){ - if( pcache1.nSlot && (pCache->szPage+pCache->szExtra)<=pcache1.szSlot ){ - return pcache1.bUnderPressure; - }else{ - return sqlite3HeapNearlyFull(); - } -} - -/******************************************************************************/ -/******** General Implementation Functions ************************************/ - -/* -** This function is used to resize the hash table used by the cache passed -** as the first argument. -** -** The PCache mutex must be held when this function is called. -*/ -static int pcache1ResizeHash(PCache1 *p){ - PgHdr1 **apNew; - unsigned int nNew; - unsigned int i; - - assert( sqlite3_mutex_held(p->pGroup->mutex) ); - - nNew = p->nHash*2; - if( nNew<256 ){ - nNew = 256; - } - - pcache1LeaveMutex(p->pGroup); - if( p->nHash ){ sqlite3BeginBenignMalloc(); } - apNew = (PgHdr1 **)sqlite3MallocZero(sizeof(PgHdr1 *)*nNew); - if( p->nHash ){ sqlite3EndBenignMalloc(); } - pcache1EnterMutex(p->pGroup); - if( apNew ){ - for(i=0; inHash; i++){ - PgHdr1 *pPage; - PgHdr1 *pNext = p->apHash[i]; - while( (pPage = pNext)!=0 ){ - unsigned int h = pPage->iKey % nNew; - pNext = pPage->pNext; - pPage->pNext = apNew[h]; - apNew[h] = pPage; - } - } - sqlite3_free(p->apHash); - p->apHash = apNew; - p->nHash = nNew; - } - - return (p->apHash ? SQLITE_OK : SQLITE_NOMEM); -} - -/* -** This function is used internally to remove the page pPage from the -** PGroup LRU list, if is part of it. If pPage is not part of the PGroup -** LRU list, then this function is a no-op. -** -** The PGroup mutex must be held when this function is called. -*/ -static void pcache1PinPage(PgHdr1 *pPage){ - PCache1 *pCache; - PGroup *pGroup; - - assert( pPage!=0 ); - assert( pPage->isPinned==0 ); - pCache = pPage->pCache; - pGroup = pCache->pGroup; - assert( pPage->pLruNext || pPage==pGroup->pLruTail ); - assert( pPage->pLruPrev || pPage==pGroup->pLruHead ); - assert( sqlite3_mutex_held(pGroup->mutex) ); - if( pPage->pLruPrev ){ - pPage->pLruPrev->pLruNext = pPage->pLruNext; - }else{ - pGroup->pLruHead = pPage->pLruNext; - } - if( pPage->pLruNext ){ - pPage->pLruNext->pLruPrev = pPage->pLruPrev; - }else{ - pGroup->pLruTail = pPage->pLruPrev; - } - pPage->pLruNext = 0; - pPage->pLruPrev = 0; - pPage->isPinned = 1; - pCache->nRecyclable--; -} - - -/* -** Remove the page supplied as an argument from the hash table -** (PCache1.apHash structure) that it is currently stored in. -** -** The PGroup mutex must be held when this function is called. -*/ -static void pcache1RemoveFromHash(PgHdr1 *pPage){ - unsigned int h; - PCache1 *pCache = pPage->pCache; - PgHdr1 **pp; - - assert( sqlite3_mutex_held(pCache->pGroup->mutex) ); - h = pPage->iKey % pCache->nHash; - for(pp=&pCache->apHash[h]; (*pp)!=pPage; pp=&(*pp)->pNext); - *pp = (*pp)->pNext; - - pCache->nPage--; -} - -/* -** If there are currently more than nMaxPage pages allocated, try -** to recycle pages to reduce the number allocated to nMaxPage. -*/ -static void pcache1EnforceMaxPage(PGroup *pGroup){ - assert( sqlite3_mutex_held(pGroup->mutex) ); - while( pGroup->nCurrentPage>pGroup->nMaxPage && pGroup->pLruTail ){ - PgHdr1 *p = pGroup->pLruTail; - assert( p->pCache->pGroup==pGroup ); - assert( p->isPinned==0 ); - pcache1PinPage(p); - pcache1RemoveFromHash(p); - pcache1FreePage(p); - } -} - -/* -** Discard all pages from cache pCache with a page number (key value) -** greater than or equal to iLimit. Any pinned pages that meet this -** criteria are unpinned before they are discarded. -** -** The PCache mutex must be held when this function is called. -*/ -static void pcache1TruncateUnsafe( - PCache1 *pCache, /* The cache to truncate */ - unsigned int iLimit /* Drop pages with this pgno or larger */ -){ - TESTONLY( unsigned int nPage = 0; ) /* To assert pCache->nPage is correct */ - unsigned int h; - assert( sqlite3_mutex_held(pCache->pGroup->mutex) ); - for(h=0; hnHash; h++){ - PgHdr1 **pp = &pCache->apHash[h]; - PgHdr1 *pPage; - while( (pPage = *pp)!=0 ){ - if( pPage->iKey>=iLimit ){ - pCache->nPage--; - *pp = pPage->pNext; - if( !pPage->isPinned ) pcache1PinPage(pPage); - pcache1FreePage(pPage); - }else{ - pp = &pPage->pNext; - TESTONLY( nPage++; ) - } - } - } - assert( pCache->nPage==nPage ); -} - -/******************************************************************************/ -/******** sqlite3_pcache Methods **********************************************/ - -/* -** Implementation of the sqlite3_pcache.xInit method. -*/ -static int pcache1Init(void *NotUsed){ - UNUSED_PARAMETER(NotUsed); - assert( pcache1.isInit==0 ); - memset(&pcache1, 0, sizeof(pcache1)); - if( sqlite3GlobalConfig.bCoreMutex ){ - pcache1.grp.mutex = sqlite3_mutex_alloc(SQLITE_MUTEX_STATIC_LRU); - pcache1.mutex = sqlite3_mutex_alloc(SQLITE_MUTEX_STATIC_PMEM); - } - pcache1.grp.mxPinned = 10; - pcache1.isInit = 1; - return SQLITE_OK; -} - -/* -** Implementation of the sqlite3_pcache.xShutdown method. -** Note that the static mutex allocated in xInit does -** not need to be freed. -*/ -static void pcache1Shutdown(void *NotUsed){ - UNUSED_PARAMETER(NotUsed); - assert( pcache1.isInit!=0 ); - memset(&pcache1, 0, sizeof(pcache1)); -} - -/* -** Implementation of the sqlite3_pcache.xCreate method. -** -** Allocate a new cache. -*/ -static sqlite3_pcache *pcache1Create(int szPage, int szExtra, int bPurgeable){ - PCache1 *pCache; /* The newly created page cache */ - PGroup *pGroup; /* The group the new page cache will belong to */ - int sz; /* Bytes of memory required to allocate the new cache */ - - /* - ** The separateCache variable is true if each PCache has its own private - ** PGroup. In other words, separateCache is true for mode (1) where no - ** mutexing is required. - ** - ** * Always use a unified cache (mode-2) if ENABLE_MEMORY_MANAGEMENT - ** - ** * Always use a unified cache in single-threaded applications - ** - ** * Otherwise (if multi-threaded and ENABLE_MEMORY_MANAGEMENT is off) - ** use separate caches (mode-1) - */ -#if defined(SQLITE_ENABLE_MEMORY_MANAGEMENT) || SQLITE_THREADSAFE==0 - const int separateCache = 0; -#else - int separateCache = sqlite3GlobalConfig.bCoreMutex>0; -#endif - - assert( (szPage & (szPage-1))==0 && szPage>=512 && szPage<=65536 ); - assert( szExtra < 300 ); - - sz = sizeof(PCache1) + sizeof(PGroup)*separateCache; - pCache = (PCache1 *)sqlite3MallocZero(sz); - if( pCache ){ - if( separateCache ){ - pGroup = (PGroup*)&pCache[1]; - pGroup->mxPinned = 10; - }else{ - pGroup = &pcache1.grp; - } - pCache->pGroup = pGroup; - pCache->szPage = szPage; - pCache->szExtra = szExtra; - pCache->bPurgeable = (bPurgeable ? 1 : 0); - if( bPurgeable ){ - pCache->nMin = 10; - pcache1EnterMutex(pGroup); - pGroup->nMinPage += pCache->nMin; - pGroup->mxPinned = pGroup->nMaxPage + 10 - pGroup->nMinPage; - pcache1LeaveMutex(pGroup); - } - } - return (sqlite3_pcache *)pCache; -} - -/* -** Implementation of the sqlite3_pcache.xCachesize method. -** -** Configure the cache_size limit for a cache. -*/ -static void pcache1Cachesize(sqlite3_pcache *p, int nMax){ - PCache1 *pCache = (PCache1 *)p; - if( pCache->bPurgeable ){ - PGroup *pGroup = pCache->pGroup; - pcache1EnterMutex(pGroup); - pGroup->nMaxPage += (nMax - pCache->nMax); - pGroup->mxPinned = pGroup->nMaxPage + 10 - pGroup->nMinPage; - pCache->nMax = nMax; - pCache->n90pct = pCache->nMax*9/10; - pcache1EnforceMaxPage(pGroup); - pcache1LeaveMutex(pGroup); - } -} - -/* -** Implementation of the sqlite3_pcache.xShrink method. -** -** Free up as much memory as possible. -*/ -static void pcache1Shrink(sqlite3_pcache *p){ - PCache1 *pCache = (PCache1*)p; - if( pCache->bPurgeable ){ - PGroup *pGroup = pCache->pGroup; - int savedMaxPage; - pcache1EnterMutex(pGroup); - savedMaxPage = pGroup->nMaxPage; - pGroup->nMaxPage = 0; - pcache1EnforceMaxPage(pGroup); - pGroup->nMaxPage = savedMaxPage; - pcache1LeaveMutex(pGroup); - } -} - -/* -** Implementation of the sqlite3_pcache.xPagecount method. -*/ -static int pcache1Pagecount(sqlite3_pcache *p){ - int n; - PCache1 *pCache = (PCache1*)p; - pcache1EnterMutex(pCache->pGroup); - n = pCache->nPage; - pcache1LeaveMutex(pCache->pGroup); - return n; -} - -/* -** Implementation of the sqlite3_pcache.xFetch method. -** -** Fetch a page by key value. -** -** Whether or not a new page may be allocated by this function depends on -** the value of the createFlag argument. 0 means do not allocate a new -** page. 1 means allocate a new page if space is easily available. 2 -** means to try really hard to allocate a new page. -** -** For a non-purgeable cache (a cache used as the storage for an in-memory -** database) there is really no difference between createFlag 1 and 2. So -** the calling function (pcache.c) will never have a createFlag of 1 on -** a non-purgeable cache. -** -** There are three different approaches to obtaining space for a page, -** depending on the value of parameter createFlag (which may be 0, 1 or 2). -** -** 1. Regardless of the value of createFlag, the cache is searched for a -** copy of the requested page. If one is found, it is returned. -** -** 2. If createFlag==0 and the page is not already in the cache, NULL is -** returned. -** -** 3. If createFlag is 1, and the page is not already in the cache, then -** return NULL (do not allocate a new page) if any of the following -** conditions are true: -** -** (a) the number of pages pinned by the cache is greater than -** PCache1.nMax, or -** -** (b) the number of pages pinned by the cache is greater than -** the sum of nMax for all purgeable caches, less the sum of -** nMin for all other purgeable caches, or -** -** 4. If none of the first three conditions apply and the cache is marked -** as purgeable, and if one of the following is true: -** -** (a) The number of pages allocated for the cache is already -** PCache1.nMax, or -** -** (b) The number of pages allocated for all purgeable caches is -** already equal to or greater than the sum of nMax for all -** purgeable caches, -** -** (c) The system is under memory pressure and wants to avoid -** unnecessary pages cache entry allocations -** -** then attempt to recycle a page from the LRU list. If it is the right -** size, return the recycled buffer. Otherwise, free the buffer and -** proceed to step 5. -** -** 5. Otherwise, allocate and return a new page buffer. -*/ -static sqlite3_pcache_page *pcache1Fetch( - sqlite3_pcache *p, - unsigned int iKey, - int createFlag -){ - unsigned int nPinned; - PCache1 *pCache = (PCache1 *)p; - PGroup *pGroup; - PgHdr1 *pPage = 0; - - assert( offsetof(PgHdr1,page)==0 ); - assert( pCache->bPurgeable || createFlag!=1 ); - assert( pCache->bPurgeable || pCache->nMin==0 ); - assert( pCache->bPurgeable==0 || pCache->nMin==10 ); - assert( pCache->nMin==0 || pCache->bPurgeable ); - pcache1EnterMutex(pGroup = pCache->pGroup); - - /* Step 1: Search the hash table for an existing entry. */ - if( pCache->nHash>0 ){ - unsigned int h = iKey % pCache->nHash; - for(pPage=pCache->apHash[h]; pPage&&pPage->iKey!=iKey; pPage=pPage->pNext); - } - - /* Step 2: Abort if no existing page is found and createFlag is 0 */ - if( pPage ){ - if( !pPage->isPinned ) pcache1PinPage(pPage); - goto fetch_out; - } - if( createFlag==0 ){ - goto fetch_out; - } - - /* The pGroup local variable will normally be initialized by the - ** pcache1EnterMutex() macro above. But if SQLITE_MUTEX_OMIT is defined, - ** then pcache1EnterMutex() is a no-op, so we have to initialize the - ** local variable here. Delaying the initialization of pGroup is an - ** optimization: The common case is to exit the module before reaching - ** this point. - */ -#ifdef SQLITE_MUTEX_OMIT - pGroup = pCache->pGroup; -#endif - - /* Step 3: Abort if createFlag is 1 but the cache is nearly full */ - assert( pCache->nPage >= pCache->nRecyclable ); - nPinned = pCache->nPage - pCache->nRecyclable; - assert( pGroup->mxPinned == pGroup->nMaxPage + 10 - pGroup->nMinPage ); - assert( pCache->n90pct == pCache->nMax*9/10 ); - if( createFlag==1 && ( - nPinned>=pGroup->mxPinned - || nPinned>=pCache->n90pct - || pcache1UnderMemoryPressure(pCache) - )){ - goto fetch_out; - } - - if( pCache->nPage>=pCache->nHash && pcache1ResizeHash(pCache) ){ - goto fetch_out; - } - assert( pCache->nHash>0 && pCache->apHash ); - - /* Step 4. Try to recycle a page. */ - if( pCache->bPurgeable && pGroup->pLruTail && ( - (pCache->nPage+1>=pCache->nMax) - || pGroup->nCurrentPage>=pGroup->nMaxPage - || pcache1UnderMemoryPressure(pCache) - )){ - PCache1 *pOther; - pPage = pGroup->pLruTail; - assert( pPage->isPinned==0 ); - pcache1RemoveFromHash(pPage); - pcache1PinPage(pPage); - pOther = pPage->pCache; - - /* We want to verify that szPage and szExtra are the same for pOther - ** and pCache. Assert that we can verify this by comparing sums. */ - assert( (pCache->szPage & (pCache->szPage-1))==0 && pCache->szPage>=512 ); - assert( pCache->szExtra<512 ); - assert( (pOther->szPage & (pOther->szPage-1))==0 && pOther->szPage>=512 ); - assert( pOther->szExtra<512 ); - - if( pOther->szPage+pOther->szExtra != pCache->szPage+pCache->szExtra ){ - pcache1FreePage(pPage); - pPage = 0; - }else{ - pGroup->nCurrentPage -= (pOther->bPurgeable - pCache->bPurgeable); - } - } - - /* Step 5. If a usable page buffer has still not been found, - ** attempt to allocate a new one. - */ - if( !pPage ){ - if( createFlag==1 ) sqlite3BeginBenignMalloc(); - pPage = pcache1AllocPage(pCache); - if( createFlag==1 ) sqlite3EndBenignMalloc(); - } - - if( pPage ){ - unsigned int h = iKey % pCache->nHash; - pCache->nPage++; - pPage->iKey = iKey; - pPage->pNext = pCache->apHash[h]; - pPage->pCache = pCache; - pPage->pLruPrev = 0; - pPage->pLruNext = 0; - pPage->isPinned = 1; - *(void **)pPage->page.pExtra = 0; - pCache->apHash[h] = pPage; - } - -fetch_out: - if( pPage && iKey>pCache->iMaxKey ){ - pCache->iMaxKey = iKey; - } - pcache1LeaveMutex(pGroup); - return (sqlite3_pcache_page*)pPage; -} - - -/* -** Implementation of the sqlite3_pcache.xUnpin method. -** -** Mark a page as unpinned (eligible for asynchronous recycling). -*/ -static void pcache1Unpin( - sqlite3_pcache *p, - sqlite3_pcache_page *pPg, - int reuseUnlikely -){ - PCache1 *pCache = (PCache1 *)p; - PgHdr1 *pPage = (PgHdr1 *)pPg; - PGroup *pGroup = pCache->pGroup; - - assert( pPage->pCache==pCache ); - pcache1EnterMutex(pGroup); - - /* It is an error to call this function if the page is already - ** part of the PGroup LRU list. - */ - assert( pPage->pLruPrev==0 && pPage->pLruNext==0 ); - assert( pGroup->pLruHead!=pPage && pGroup->pLruTail!=pPage ); - assert( pPage->isPinned==1 ); - - if( reuseUnlikely || pGroup->nCurrentPage>pGroup->nMaxPage ){ - pcache1RemoveFromHash(pPage); - pcache1FreePage(pPage); - }else{ - /* Add the page to the PGroup LRU list. */ - if( pGroup->pLruHead ){ - pGroup->pLruHead->pLruPrev = pPage; - pPage->pLruNext = pGroup->pLruHead; - pGroup->pLruHead = pPage; - }else{ - pGroup->pLruTail = pPage; - pGroup->pLruHead = pPage; - } - pCache->nRecyclable++; - pPage->isPinned = 0; - } - - pcache1LeaveMutex(pCache->pGroup); -} - -/* -** Implementation of the sqlite3_pcache.xRekey method. -*/ -static void pcache1Rekey( - sqlite3_pcache *p, - sqlite3_pcache_page *pPg, - unsigned int iOld, - unsigned int iNew -){ - PCache1 *pCache = (PCache1 *)p; - PgHdr1 *pPage = (PgHdr1 *)pPg; - PgHdr1 **pp; - unsigned int h; - assert( pPage->iKey==iOld ); - assert( pPage->pCache==pCache ); - - pcache1EnterMutex(pCache->pGroup); - - h = iOld%pCache->nHash; - pp = &pCache->apHash[h]; - while( (*pp)!=pPage ){ - pp = &(*pp)->pNext; - } - *pp = pPage->pNext; - - h = iNew%pCache->nHash; - pPage->iKey = iNew; - pPage->pNext = pCache->apHash[h]; - pCache->apHash[h] = pPage; - if( iNew>pCache->iMaxKey ){ - pCache->iMaxKey = iNew; - } - - pcache1LeaveMutex(pCache->pGroup); -} - -/* -** Implementation of the sqlite3_pcache.xTruncate method. -** -** Discard all unpinned pages in the cache with a page number equal to -** or greater than parameter iLimit. Any pinned pages with a page number -** equal to or greater than iLimit are implicitly unpinned. -*/ -static void pcache1Truncate(sqlite3_pcache *p, unsigned int iLimit){ - PCache1 *pCache = (PCache1 *)p; - pcache1EnterMutex(pCache->pGroup); - if( iLimit<=pCache->iMaxKey ){ - pcache1TruncateUnsafe(pCache, iLimit); - pCache->iMaxKey = iLimit-1; - } - pcache1LeaveMutex(pCache->pGroup); -} - -/* -** Implementation of the sqlite3_pcache.xDestroy method. -** -** Destroy a cache allocated using pcache1Create(). -*/ -static void pcache1Destroy(sqlite3_pcache *p){ - PCache1 *pCache = (PCache1 *)p; - PGroup *pGroup = pCache->pGroup; - assert( pCache->bPurgeable || (pCache->nMax==0 && pCache->nMin==0) ); - pcache1EnterMutex(pGroup); - pcache1TruncateUnsafe(pCache, 0); - assert( pGroup->nMaxPage >= pCache->nMax ); - pGroup->nMaxPage -= pCache->nMax; - assert( pGroup->nMinPage >= pCache->nMin ); - pGroup->nMinPage -= pCache->nMin; - pGroup->mxPinned = pGroup->nMaxPage + 10 - pGroup->nMinPage; - pcache1EnforceMaxPage(pGroup); - pcache1LeaveMutex(pGroup); - sqlite3_free(pCache->apHash); - sqlite3_free(pCache); -} - -/* -** This function is called during initialization (sqlite3_initialize()) to -** install the default pluggable cache module, assuming the user has not -** already provided an alternative. -*/ -SQLITE_PRIVATE void sqlite3PCacheSetDefault(void){ - static const sqlite3_pcache_methods2 defaultMethods = { - 1, /* iVersion */ - 0, /* pArg */ - pcache1Init, /* xInit */ - pcache1Shutdown, /* xShutdown */ - pcache1Create, /* xCreate */ - pcache1Cachesize, /* xCachesize */ - pcache1Pagecount, /* xPagecount */ - pcache1Fetch, /* xFetch */ - pcache1Unpin, /* xUnpin */ - pcache1Rekey, /* xRekey */ - pcache1Truncate, /* xTruncate */ - pcache1Destroy, /* xDestroy */ - pcache1Shrink /* xShrink */ - }; - sqlite3_config(SQLITE_CONFIG_PCACHE2, &defaultMethods); -} - -#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT -/* -** This function is called to free superfluous dynamically allocated memory -** held by the pager system. Memory in use by any SQLite pager allocated -** by the current thread may be sqlite3_free()ed. -** -** nReq is the number of bytes of memory required. Once this much has -** been released, the function returns. The return value is the total number -** of bytes of memory released. -*/ -SQLITE_PRIVATE int sqlite3PcacheReleaseMemory(int nReq){ - int nFree = 0; - assert( sqlite3_mutex_notheld(pcache1.grp.mutex) ); - assert( sqlite3_mutex_notheld(pcache1.mutex) ); - if( pcache1.pStart==0 ){ - PgHdr1 *p; - pcache1EnterMutex(&pcache1.grp); - while( (nReq<0 || nFreepage.pBuf); -#ifdef SQLITE_PCACHE_SEPARATE_HEADER - nFree += sqlite3MemSize(p); -#endif - assert( p->isPinned==0 ); - pcache1PinPage(p); - pcache1RemoveFromHash(p); - pcache1FreePage(p); - } - pcache1LeaveMutex(&pcache1.grp); - } - return nFree; -} -#endif /* SQLITE_ENABLE_MEMORY_MANAGEMENT */ - -#ifdef SQLITE_TEST -/* -** This function is used by test procedures to inspect the internal state -** of the global cache. -*/ -SQLITE_PRIVATE void sqlite3PcacheStats( - int *pnCurrent, /* OUT: Total number of pages cached */ - int *pnMax, /* OUT: Global maximum cache size */ - int *pnMin, /* OUT: Sum of PCache1.nMin for purgeable caches */ - int *pnRecyclable /* OUT: Total number of pages available for recycling */ -){ - PgHdr1 *p; - int nRecyclable = 0; - for(p=pcache1.grp.pLruHead; p; p=p->pLruNext){ - assert( p->isPinned==0 ); - nRecyclable++; - } - *pnCurrent = pcache1.grp.nCurrentPage; - *pnMax = (int)pcache1.grp.nMaxPage; - *pnMin = (int)pcache1.grp.nMinPage; - *pnRecyclable = nRecyclable; -} -#endif - -/************** End of pcache1.c *********************************************/ -/************** Begin file rowset.c ******************************************/ -/* -** 2008 December 3 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** -** This module implements an object we call a "RowSet". -** -** The RowSet object is a collection of rowids. Rowids -** are inserted into the RowSet in an arbitrary order. Inserts -** can be intermixed with tests to see if a given rowid has been -** previously inserted into the RowSet. -** -** After all inserts are finished, it is possible to extract the -** elements of the RowSet in sorted order. Once this extraction -** process has started, no new elements may be inserted. -** -** Hence, the primitive operations for a RowSet are: -** -** CREATE -** INSERT -** TEST -** SMALLEST -** DESTROY -** -** The CREATE and DESTROY primitives are the constructor and destructor, -** obviously. The INSERT primitive adds a new element to the RowSet. -** TEST checks to see if an element is already in the RowSet. SMALLEST -** extracts the least value from the RowSet. -** -** The INSERT primitive might allocate additional memory. Memory is -** allocated in chunks so most INSERTs do no allocation. There is an -** upper bound on the size of allocated memory. No memory is freed -** until DESTROY. -** -** The TEST primitive includes a "batch" number. The TEST primitive -** will only see elements that were inserted before the last change -** in the batch number. In other words, if an INSERT occurs between -** two TESTs where the TESTs have the same batch nubmer, then the -** value added by the INSERT will not be visible to the second TEST. -** The initial batch number is zero, so if the very first TEST contains -** a non-zero batch number, it will see all prior INSERTs. -** -** No INSERTs may occurs after a SMALLEST. An assertion will fail if -** that is attempted. -** -** The cost of an INSERT is roughly constant. (Sometime new memory -** has to be allocated on an INSERT.) The cost of a TEST with a new -** batch number is O(NlogN) where N is the number of elements in the RowSet. -** The cost of a TEST using the same batch number is O(logN). The cost -** of the first SMALLEST is O(NlogN). Second and subsequent SMALLEST -** primitives are constant time. The cost of DESTROY is O(N). -** -** There is an added cost of O(N) when switching between TEST and -** SMALLEST primitives. -*/ - - -/* -** Target size for allocation chunks. -*/ -#define ROWSET_ALLOCATION_SIZE 1024 - -/* -** The number of rowset entries per allocation chunk. -*/ -#define ROWSET_ENTRY_PER_CHUNK \ - ((ROWSET_ALLOCATION_SIZE-8)/sizeof(struct RowSetEntry)) - -/* -** Each entry in a RowSet is an instance of the following object. -** -** This same object is reused to store a linked list of trees of RowSetEntry -** objects. In that alternative use, pRight points to the next entry -** in the list, pLeft points to the tree, and v is unused. The -** RowSet.pForest value points to the head of this forest list. -*/ -struct RowSetEntry { - i64 v; /* ROWID value for this entry */ - struct RowSetEntry *pRight; /* Right subtree (larger entries) or list */ - struct RowSetEntry *pLeft; /* Left subtree (smaller entries) */ -}; - -/* -** RowSetEntry objects are allocated in large chunks (instances of the -** following structure) to reduce memory allocation overhead. The -** chunks are kept on a linked list so that they can be deallocated -** when the RowSet is destroyed. -*/ -struct RowSetChunk { - struct RowSetChunk *pNextChunk; /* Next chunk on list of them all */ - struct RowSetEntry aEntry[ROWSET_ENTRY_PER_CHUNK]; /* Allocated entries */ -}; - -/* -** A RowSet in an instance of the following structure. -** -** A typedef of this structure if found in sqliteInt.h. -*/ -struct RowSet { - struct RowSetChunk *pChunk; /* List of all chunk allocations */ - sqlite3 *db; /* The database connection */ - struct RowSetEntry *pEntry; /* List of entries using pRight */ - struct RowSetEntry *pLast; /* Last entry on the pEntry list */ - struct RowSetEntry *pFresh; /* Source of new entry objects */ - struct RowSetEntry *pForest; /* List of binary trees of entries */ - u16 nFresh; /* Number of objects on pFresh */ - u16 rsFlags; /* Various flags */ - int iBatch; /* Current insert batch */ -}; - -/* -** Allowed values for RowSet.rsFlags -*/ -#define ROWSET_SORTED 0x01 /* True if RowSet.pEntry is sorted */ -#define ROWSET_NEXT 0x02 /* True if sqlite3RowSetNext() has been called */ - -/* -** Turn bulk memory into a RowSet object. N bytes of memory -** are available at pSpace. The db pointer is used as a memory context -** for any subsequent allocations that need to occur. -** Return a pointer to the new RowSet object. -** -** It must be the case that N is sufficient to make a Rowset. If not -** an assertion fault occurs. -** -** If N is larger than the minimum, use the surplus as an initial -** allocation of entries available to be filled. -*/ -SQLITE_PRIVATE RowSet *sqlite3RowSetInit(sqlite3 *db, void *pSpace, unsigned int N){ - RowSet *p; - assert( N >= ROUND8(sizeof(*p)) ); - p = pSpace; - p->pChunk = 0; - p->db = db; - p->pEntry = 0; - p->pLast = 0; - p->pForest = 0; - p->pFresh = (struct RowSetEntry*)(ROUND8(sizeof(*p)) + (char*)p); - p->nFresh = (u16)((N - ROUND8(sizeof(*p)))/sizeof(struct RowSetEntry)); - p->rsFlags = ROWSET_SORTED; - p->iBatch = 0; - return p; -} - -/* -** Deallocate all chunks from a RowSet. This frees all memory that -** the RowSet has allocated over its lifetime. This routine is -** the destructor for the RowSet. -*/ -SQLITE_PRIVATE void sqlite3RowSetClear(RowSet *p){ - struct RowSetChunk *pChunk, *pNextChunk; - for(pChunk=p->pChunk; pChunk; pChunk = pNextChunk){ - pNextChunk = pChunk->pNextChunk; - sqlite3DbFree(p->db, pChunk); - } - p->pChunk = 0; - p->nFresh = 0; - p->pEntry = 0; - p->pLast = 0; - p->pForest = 0; - p->rsFlags = ROWSET_SORTED; -} - -/* -** Allocate a new RowSetEntry object that is associated with the -** given RowSet. Return a pointer to the new and completely uninitialized -** objected. -** -** In an OOM situation, the RowSet.db->mallocFailed flag is set and this -** routine returns NULL. -*/ -static struct RowSetEntry *rowSetEntryAlloc(RowSet *p){ - assert( p!=0 ); - if( p->nFresh==0 ){ - struct RowSetChunk *pNew; - pNew = sqlite3DbMallocRaw(p->db, sizeof(*pNew)); - if( pNew==0 ){ - return 0; - } - pNew->pNextChunk = p->pChunk; - p->pChunk = pNew; - p->pFresh = pNew->aEntry; - p->nFresh = ROWSET_ENTRY_PER_CHUNK; - } - p->nFresh--; - return p->pFresh++; -} - -/* -** Insert a new value into a RowSet. -** -** The mallocFailed flag of the database connection is set if a -** memory allocation fails. -*/ -SQLITE_PRIVATE void sqlite3RowSetInsert(RowSet *p, i64 rowid){ - struct RowSetEntry *pEntry; /* The new entry */ - struct RowSetEntry *pLast; /* The last prior entry */ - - /* This routine is never called after sqlite3RowSetNext() */ - assert( p!=0 && (p->rsFlags & ROWSET_NEXT)==0 ); - - pEntry = rowSetEntryAlloc(p); - if( pEntry==0 ) return; - pEntry->v = rowid; - pEntry->pRight = 0; - pLast = p->pLast; - if( pLast ){ - if( (p->rsFlags & ROWSET_SORTED)!=0 && rowid<=pLast->v ){ - p->rsFlags &= ~ROWSET_SORTED; - } - pLast->pRight = pEntry; - }else{ - p->pEntry = pEntry; - } - p->pLast = pEntry; -} - -/* -** Merge two lists of RowSetEntry objects. Remove duplicates. -** -** The input lists are connected via pRight pointers and are -** assumed to each already be in sorted order. -*/ -static struct RowSetEntry *rowSetEntryMerge( - struct RowSetEntry *pA, /* First sorted list to be merged */ - struct RowSetEntry *pB /* Second sorted list to be merged */ -){ - struct RowSetEntry head; - struct RowSetEntry *pTail; - - pTail = &head; - while( pA && pB ){ - assert( pA->pRight==0 || pA->v<=pA->pRight->v ); - assert( pB->pRight==0 || pB->v<=pB->pRight->v ); - if( pA->vv ){ - pTail->pRight = pA; - pA = pA->pRight; - pTail = pTail->pRight; - }else if( pB->vv ){ - pTail->pRight = pB; - pB = pB->pRight; - pTail = pTail->pRight; - }else{ - pA = pA->pRight; - } - } - if( pA ){ - assert( pA->pRight==0 || pA->v<=pA->pRight->v ); - pTail->pRight = pA; - }else{ - assert( pB==0 || pB->pRight==0 || pB->v<=pB->pRight->v ); - pTail->pRight = pB; - } - return head.pRight; -} - -/* -** Sort all elements on the list of RowSetEntry objects into order of -** increasing v. -*/ -static struct RowSetEntry *rowSetEntrySort(struct RowSetEntry *pIn){ - unsigned int i; - struct RowSetEntry *pNext, *aBucket[40]; - - memset(aBucket, 0, sizeof(aBucket)); - while( pIn ){ - pNext = pIn->pRight; - pIn->pRight = 0; - for(i=0; aBucket[i]; i++){ - pIn = rowSetEntryMerge(aBucket[i], pIn); - aBucket[i] = 0; - } - aBucket[i] = pIn; - pIn = pNext; - } - pIn = 0; - for(i=0; ipLeft ){ - struct RowSetEntry *p; - rowSetTreeToList(pIn->pLeft, ppFirst, &p); - p->pRight = pIn; - }else{ - *ppFirst = pIn; - } - if( pIn->pRight ){ - rowSetTreeToList(pIn->pRight, &pIn->pRight, ppLast); - }else{ - *ppLast = pIn; - } - assert( (*ppLast)->pRight==0 ); -} - - -/* -** Convert a sorted list of elements (connected by pRight) into a binary -** tree with depth of iDepth. A depth of 1 means the tree contains a single -** node taken from the head of *ppList. A depth of 2 means a tree with -** three nodes. And so forth. -** -** Use as many entries from the input list as required and update the -** *ppList to point to the unused elements of the list. If the input -** list contains too few elements, then construct an incomplete tree -** and leave *ppList set to NULL. -** -** Return a pointer to the root of the constructed binary tree. -*/ -static struct RowSetEntry *rowSetNDeepTree( - struct RowSetEntry **ppList, - int iDepth -){ - struct RowSetEntry *p; /* Root of the new tree */ - struct RowSetEntry *pLeft; /* Left subtree */ - if( *ppList==0 ){ - return 0; - } - if( iDepth==1 ){ - p = *ppList; - *ppList = p->pRight; - p->pLeft = p->pRight = 0; - return p; - } - pLeft = rowSetNDeepTree(ppList, iDepth-1); - p = *ppList; - if( p==0 ){ - return pLeft; - } - p->pLeft = pLeft; - *ppList = p->pRight; - p->pRight = rowSetNDeepTree(ppList, iDepth-1); - return p; -} - -/* -** Convert a sorted list of elements into a binary tree. Make the tree -** as deep as it needs to be in order to contain the entire list. -*/ -static struct RowSetEntry *rowSetListToTree(struct RowSetEntry *pList){ - int iDepth; /* Depth of the tree so far */ - struct RowSetEntry *p; /* Current tree root */ - struct RowSetEntry *pLeft; /* Left subtree */ - - assert( pList!=0 ); - p = pList; - pList = p->pRight; - p->pLeft = p->pRight = 0; - for(iDepth=1; pList; iDepth++){ - pLeft = p; - p = pList; - pList = p->pRight; - p->pLeft = pLeft; - p->pRight = rowSetNDeepTree(&pList, iDepth); - } - return p; -} - -/* -** Take all the entries on p->pEntry and on the trees in p->pForest and -** sort them all together into one big ordered list on p->pEntry. -** -** This routine should only be called once in the life of a RowSet. -*/ -static void rowSetToList(RowSet *p){ - - /* This routine is called only once */ - assert( p!=0 && (p->rsFlags & ROWSET_NEXT)==0 ); - - if( (p->rsFlags & ROWSET_SORTED)==0 ){ - p->pEntry = rowSetEntrySort(p->pEntry); - } - - /* While this module could theoretically support it, sqlite3RowSetNext() - ** is never called after sqlite3RowSetText() for the same RowSet. So - ** there is never a forest to deal with. Should this change, simply - ** remove the assert() and the #if 0. */ - assert( p->pForest==0 ); -#if 0 - while( p->pForest ){ - struct RowSetEntry *pTree = p->pForest->pLeft; - if( pTree ){ - struct RowSetEntry *pHead, *pTail; - rowSetTreeToList(pTree, &pHead, &pTail); - p->pEntry = rowSetEntryMerge(p->pEntry, pHead); - } - p->pForest = p->pForest->pRight; - } -#endif - p->rsFlags |= ROWSET_NEXT; /* Verify this routine is never called again */ -} - -/* -** Extract the smallest element from the RowSet. -** Write the element into *pRowid. Return 1 on success. Return -** 0 if the RowSet is already empty. -** -** After this routine has been called, the sqlite3RowSetInsert() -** routine may not be called again. -*/ -SQLITE_PRIVATE int sqlite3RowSetNext(RowSet *p, i64 *pRowid){ - assert( p!=0 ); - - /* Merge the forest into a single sorted list on first call */ - if( (p->rsFlags & ROWSET_NEXT)==0 ) rowSetToList(p); - - /* Return the next entry on the list */ - if( p->pEntry ){ - *pRowid = p->pEntry->v; - p->pEntry = p->pEntry->pRight; - if( p->pEntry==0 ){ - sqlite3RowSetClear(p); - } - return 1; - }else{ - return 0; - } -} - -/* -** Check to see if element iRowid was inserted into the rowset as -** part of any insert batch prior to iBatch. Return 1 or 0. -** -** If this is the first test of a new batch and if there exist entires -** on pRowSet->pEntry, then sort those entires into the forest at -** pRowSet->pForest so that they can be tested. -*/ -SQLITE_PRIVATE int sqlite3RowSetTest(RowSet *pRowSet, int iBatch, sqlite3_int64 iRowid){ - struct RowSetEntry *p, *pTree; - - /* This routine is never called after sqlite3RowSetNext() */ - assert( pRowSet!=0 && (pRowSet->rsFlags & ROWSET_NEXT)==0 ); - - /* Sort entries into the forest on the first test of a new batch - */ - if( iBatch!=pRowSet->iBatch ){ - p = pRowSet->pEntry; - if( p ){ - struct RowSetEntry **ppPrevTree = &pRowSet->pForest; - if( (pRowSet->rsFlags & ROWSET_SORTED)==0 ){ - p = rowSetEntrySort(p); - } - for(pTree = pRowSet->pForest; pTree; pTree=pTree->pRight){ - ppPrevTree = &pTree->pRight; - if( pTree->pLeft==0 ){ - pTree->pLeft = rowSetListToTree(p); - break; - }else{ - struct RowSetEntry *pAux, *pTail; - rowSetTreeToList(pTree->pLeft, &pAux, &pTail); - pTree->pLeft = 0; - p = rowSetEntryMerge(pAux, p); - } - } - if( pTree==0 ){ - *ppPrevTree = pTree = rowSetEntryAlloc(pRowSet); - if( pTree ){ - pTree->v = 0; - pTree->pRight = 0; - pTree->pLeft = rowSetListToTree(p); - } - } - pRowSet->pEntry = 0; - pRowSet->pLast = 0; - pRowSet->rsFlags |= ROWSET_SORTED; - } - pRowSet->iBatch = iBatch; - } - - /* Test to see if the iRowid value appears anywhere in the forest. - ** Return 1 if it does and 0 if not. - */ - for(pTree = pRowSet->pForest; pTree; pTree=pTree->pRight){ - p = pTree->pLeft; - while( p ){ - if( p->vpRight; - }else if( p->v>iRowid ){ - p = p->pLeft; - }else{ - return 1; - } - } - } - return 0; -} - -/************** End of rowset.c **********************************************/ -/************** Begin file pager.c *******************************************/ -/* -** 2001 September 15 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** This is the implementation of the page cache subsystem or "pager". -** -** The pager is used to access a database disk file. It implements -** atomic commit and rollback through the use of a journal file that -** is separate from the database file. The pager also implements file -** locking to prevent two processes from writing the same database -** file simultaneously, or one process from reading the database while -** another is writing. -*/ -#ifndef SQLITE_OMIT_DISKIO -/************** Include wal.h in the middle of pager.c ***********************/ -/************** Begin file wal.h *********************************************/ -/* -** 2010 February 1 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** This header file defines the interface to the write-ahead logging -** system. Refer to the comments below and the header comment attached to -** the implementation of each function in log.c for further details. -*/ - -#ifndef _WAL_H_ -#define _WAL_H_ - - -/* Additional values that can be added to the sync_flags argument of -** sqlite3WalFrames(): -*/ -#define WAL_SYNC_TRANSACTIONS 0x20 /* Sync at the end of each transaction */ -#define SQLITE_SYNC_MASK 0x13 /* Mask off the SQLITE_SYNC_* values */ - -#ifdef SQLITE_OMIT_WAL -# define sqlite3WalOpen(x,y,z) 0 -# define sqlite3WalLimit(x,y) -# define sqlite3WalClose(w,x,y,z) 0 -# define sqlite3WalBeginReadTransaction(y,z) 0 -# define sqlite3WalEndReadTransaction(z) -# define sqlite3WalDbsize(y) 0 -# define sqlite3WalBeginWriteTransaction(y) 0 -# define sqlite3WalEndWriteTransaction(x) 0 -# define sqlite3WalUndo(x,y,z) 0 -# define sqlite3WalSavepoint(y,z) -# define sqlite3WalSavepointUndo(y,z) 0 -# define sqlite3WalFrames(u,v,w,x,y,z) 0 -# define sqlite3WalCheckpoint(r,s,t,u,v,w,x,y,z) 0 -# define sqlite3WalCallback(z) 0 -# define sqlite3WalExclusiveMode(y,z) 0 -# define sqlite3WalHeapMemory(z) 0 -# define sqlite3WalFramesize(z) 0 -# define sqlite3WalFindFrame(x,y,z) 0 -#else - -#define WAL_SAVEPOINT_NDATA 4 - -/* Connection to a write-ahead log (WAL) file. -** There is one object of this type for each pager. -*/ -typedef struct Wal Wal; - -/* Open and close a connection to a write-ahead log. */ -SQLITE_PRIVATE int sqlite3WalOpen(sqlite3_vfs*, sqlite3_file*, const char *, int, i64, Wal**); -SQLITE_PRIVATE int sqlite3WalClose(Wal *pWal, int sync_flags, int, u8 *); - -/* Set the limiting size of a WAL file. */ -SQLITE_PRIVATE void sqlite3WalLimit(Wal*, i64); - -/* Used by readers to open (lock) and close (unlock) a snapshot. A -** snapshot is like a read-transaction. It is the state of the database -** at an instant in time. sqlite3WalOpenSnapshot gets a read lock and -** preserves the current state even if the other threads or processes -** write to or checkpoint the WAL. sqlite3WalCloseSnapshot() closes the -** transaction and releases the lock. -*/ -SQLITE_PRIVATE int sqlite3WalBeginReadTransaction(Wal *pWal, int *); -SQLITE_PRIVATE void sqlite3WalEndReadTransaction(Wal *pWal); - -/* Read a page from the write-ahead log, if it is present. */ -SQLITE_PRIVATE int sqlite3WalFindFrame(Wal *, Pgno, u32 *); -SQLITE_PRIVATE int sqlite3WalReadFrame(Wal *, u32, int, u8 *); - -/* If the WAL is not empty, return the size of the database. */ -SQLITE_PRIVATE Pgno sqlite3WalDbsize(Wal *pWal); - -/* Obtain or release the WRITER lock. */ -SQLITE_PRIVATE int sqlite3WalBeginWriteTransaction(Wal *pWal); -SQLITE_PRIVATE int sqlite3WalEndWriteTransaction(Wal *pWal); - -/* Undo any frames written (but not committed) to the log */ -SQLITE_PRIVATE int sqlite3WalUndo(Wal *pWal, int (*xUndo)(void *, Pgno), void *pUndoCtx); - -/* Return an integer that records the current (uncommitted) write -** position in the WAL */ -SQLITE_PRIVATE void sqlite3WalSavepoint(Wal *pWal, u32 *aWalData); - -/* Move the write position of the WAL back to iFrame. Called in -** response to a ROLLBACK TO command. */ -SQLITE_PRIVATE int sqlite3WalSavepointUndo(Wal *pWal, u32 *aWalData); - -/* Write a frame or frames to the log. */ -SQLITE_PRIVATE int sqlite3WalFrames(Wal *pWal, int, PgHdr *, Pgno, int, int); - -/* Copy pages from the log to the database file */ -SQLITE_PRIVATE int sqlite3WalCheckpoint( - Wal *pWal, /* Write-ahead log connection */ - int eMode, /* One of PASSIVE, FULL and RESTART */ - int (*xBusy)(void*), /* Function to call when busy */ - void *pBusyArg, /* Context argument for xBusyHandler */ - int sync_flags, /* Flags to sync db file with (or 0) */ - int nBuf, /* Size of buffer nBuf */ - u8 *zBuf, /* Temporary buffer to use */ - int *pnLog, /* OUT: Number of frames in WAL */ - int *pnCkpt /* OUT: Number of backfilled frames in WAL */ -); - -/* Return the value to pass to a sqlite3_wal_hook callback, the -** number of frames in the WAL at the point of the last commit since -** sqlite3WalCallback() was called. If no commits have occurred since -** the last call, then return 0. -*/ -SQLITE_PRIVATE int sqlite3WalCallback(Wal *pWal); - -/* Tell the wal layer that an EXCLUSIVE lock has been obtained (or released) -** by the pager layer on the database file. -*/ -SQLITE_PRIVATE int sqlite3WalExclusiveMode(Wal *pWal, int op); - -/* Return true if the argument is non-NULL and the WAL module is using -** heap-memory for the wal-index. Otherwise, if the argument is NULL or the -** WAL module is using shared-memory, return false. -*/ -SQLITE_PRIVATE int sqlite3WalHeapMemory(Wal *pWal); - -#ifdef SQLITE_ENABLE_ZIPVFS -/* If the WAL file is not empty, return the number of bytes of content -** stored in each frame (i.e. the db page-size when the WAL was created). -*/ -SQLITE_PRIVATE int sqlite3WalFramesize(Wal *pWal); -#endif - -#endif /* ifndef SQLITE_OMIT_WAL */ -#endif /* _WAL_H_ */ - -/************** End of wal.h *************************************************/ -/************** Continuing where we left off in pager.c **********************/ - - -/******************* NOTES ON THE DESIGN OF THE PAGER ************************ -** -** This comment block describes invariants that hold when using a rollback -** journal. These invariants do not apply for journal_mode=WAL, -** journal_mode=MEMORY, or journal_mode=OFF. -** -** Within this comment block, a page is deemed to have been synced -** automatically as soon as it is written when PRAGMA synchronous=OFF. -** Otherwise, the page is not synced until the xSync method of the VFS -** is called successfully on the file containing the page. -** -** Definition: A page of the database file is said to be "overwriteable" if -** one or more of the following are true about the page: -** -** (a) The original content of the page as it was at the beginning of -** the transaction has been written into the rollback journal and -** synced. -** -** (b) The page was a freelist leaf page at the start of the transaction. -** -** (c) The page number is greater than the largest page that existed in -** the database file at the start of the transaction. -** -** (1) A page of the database file is never overwritten unless one of the -** following are true: -** -** (a) The page and all other pages on the same sector are overwriteable. -** -** (b) The atomic page write optimization is enabled, and the entire -** transaction other than the update of the transaction sequence -** number consists of a single page change. -** -** (2) The content of a page written into the rollback journal exactly matches -** both the content in the database when the rollback journal was written -** and the content in the database at the beginning of the current -** transaction. -** -** (3) Writes to the database file are an integer multiple of the page size -** in length and are aligned on a page boundary. -** -** (4) Reads from the database file are either aligned on a page boundary and -** an integer multiple of the page size in length or are taken from the -** first 100 bytes of the database file. -** -** (5) All writes to the database file are synced prior to the rollback journal -** being deleted, truncated, or zeroed. -** -** (6) If a master journal file is used, then all writes to the database file -** are synced prior to the master journal being deleted. -** -** Definition: Two databases (or the same database at two points it time) -** are said to be "logically equivalent" if they give the same answer to -** all queries. Note in particular the content of freelist leaf -** pages can be changed arbitarily without effecting the logical equivalence -** of the database. -** -** (7) At any time, if any subset, including the empty set and the total set, -** of the unsynced changes to a rollback journal are removed and the -** journal is rolled back, the resulting database file will be logical -** equivalent to the database file at the beginning of the transaction. -** -** (8) When a transaction is rolled back, the xTruncate method of the VFS -** is called to restore the database file to the same size it was at -** the beginning of the transaction. (In some VFSes, the xTruncate -** method is a no-op, but that does not change the fact the SQLite will -** invoke it.) -** -** (9) Whenever the database file is modified, at least one bit in the range -** of bytes from 24 through 39 inclusive will be changed prior to releasing -** the EXCLUSIVE lock, thus signaling other connections on the same -** database to flush their caches. -** -** (10) The pattern of bits in bytes 24 through 39 shall not repeat in less -** than one billion transactions. -** -** (11) A database file is well-formed at the beginning and at the conclusion -** of every transaction. -** -** (12) An EXCLUSIVE lock is held on the database file when writing to -** the database file. -** -** (13) A SHARED lock is held on the database file while reading any -** content out of the database file. -** -******************************************************************************/ - -/* -** Macros for troubleshooting. Normally turned off -*/ -#if 0 -int sqlite3PagerTrace=1; /* True to enable tracing */ -#define sqlite3DebugPrintf printf -#define PAGERTRACE(X) if( sqlite3PagerTrace ){ sqlite3DebugPrintf X; } -#else -#define PAGERTRACE(X) -#endif - -/* -** The following two macros are used within the PAGERTRACE() macros above -** to print out file-descriptors. -** -** PAGERID() takes a pointer to a Pager struct as its argument. The -** associated file-descriptor is returned. FILEHANDLEID() takes an sqlite3_file -** struct as its argument. -*/ -#define PAGERID(p) ((int)(p->fd)) -#define FILEHANDLEID(fd) ((int)fd) - -/* -** The Pager.eState variable stores the current 'state' of a pager. A -** pager may be in any one of the seven states shown in the following -** state diagram. -** -** OPEN <------+------+ -** | | | -** V | | -** +---------> READER-------+ | -** | | | -** | V | -** |<-------WRITER_LOCKED------> ERROR -** | | ^ -** | V | -** |<------WRITER_CACHEMOD-------->| -** | | | -** | V | -** |<-------WRITER_DBMOD---------->| -** | | | -** | V | -** +<------WRITER_FINISHED-------->+ -** -** -** List of state transitions and the C [function] that performs each: -** -** OPEN -> READER [sqlite3PagerSharedLock] -** READER -> OPEN [pager_unlock] -** -** READER -> WRITER_LOCKED [sqlite3PagerBegin] -** WRITER_LOCKED -> WRITER_CACHEMOD [pager_open_journal] -** WRITER_CACHEMOD -> WRITER_DBMOD [syncJournal] -** WRITER_DBMOD -> WRITER_FINISHED [sqlite3PagerCommitPhaseOne] -** WRITER_*** -> READER [pager_end_transaction] -** -** WRITER_*** -> ERROR [pager_error] -** ERROR -> OPEN [pager_unlock] -** -** -** OPEN: -** -** The pager starts up in this state. Nothing is guaranteed in this -** state - the file may or may not be locked and the database size is -** unknown. The database may not be read or written. -** -** * No read or write transaction is active. -** * Any lock, or no lock at all, may be held on the database file. -** * The dbSize, dbOrigSize and dbFileSize variables may not be trusted. -** -** READER: -** -** In this state all the requirements for reading the database in -** rollback (non-WAL) mode are met. Unless the pager is (or recently -** was) in exclusive-locking mode, a user-level read transaction is -** open. The database size is known in this state. -** -** A connection running with locking_mode=normal enters this state when -** it opens a read-transaction on the database and returns to state -** OPEN after the read-transaction is completed. However a connection -** running in locking_mode=exclusive (including temp databases) remains in -** this state even after the read-transaction is closed. The only way -** a locking_mode=exclusive connection can transition from READER to OPEN -** is via the ERROR state (see below). -** -** * A read transaction may be active (but a write-transaction cannot). -** * A SHARED or greater lock is held on the database file. -** * The dbSize variable may be trusted (even if a user-level read -** transaction is not active). The dbOrigSize and dbFileSize variables -** may not be trusted at this point. -** * If the database is a WAL database, then the WAL connection is open. -** * Even if a read-transaction is not open, it is guaranteed that -** there is no hot-journal in the file-system. -** -** WRITER_LOCKED: -** -** The pager moves to this state from READER when a write-transaction -** is first opened on the database. In WRITER_LOCKED state, all locks -** required to start a write-transaction are held, but no actual -** modifications to the cache or database have taken place. -** -** In rollback mode, a RESERVED or (if the transaction was opened with -** BEGIN EXCLUSIVE) EXCLUSIVE lock is obtained on the database file when -** moving to this state, but the journal file is not written to or opened -** to in this state. If the transaction is committed or rolled back while -** in WRITER_LOCKED state, all that is required is to unlock the database -** file. -** -** IN WAL mode, WalBeginWriteTransaction() is called to lock the log file. -** If the connection is running with locking_mode=exclusive, an attempt -** is made to obtain an EXCLUSIVE lock on the database file. -** -** * A write transaction is active. -** * If the connection is open in rollback-mode, a RESERVED or greater -** lock is held on the database file. -** * If the connection is open in WAL-mode, a WAL write transaction -** is open (i.e. sqlite3WalBeginWriteTransaction() has been successfully -** called). -** * The dbSize, dbOrigSize and dbFileSize variables are all valid. -** * The contents of the pager cache have not been modified. -** * The journal file may or may not be open. -** * Nothing (not even the first header) has been written to the journal. -** -** WRITER_CACHEMOD: -** -** A pager moves from WRITER_LOCKED state to this state when a page is -** first modified by the upper layer. In rollback mode the journal file -** is opened (if it is not already open) and a header written to the -** start of it. The database file on disk has not been modified. -** -** * A write transaction is active. -** * A RESERVED or greater lock is held on the database file. -** * The journal file is open and the first header has been written -** to it, but the header has not been synced to disk. -** * The contents of the page cache have been modified. -** -** WRITER_DBMOD: -** -** The pager transitions from WRITER_CACHEMOD into WRITER_DBMOD state -** when it modifies the contents of the database file. WAL connections -** never enter this state (since they do not modify the database file, -** just the log file). -** -** * A write transaction is active. -** * An EXCLUSIVE or greater lock is held on the database file. -** * The journal file is open and the first header has been written -** and synced to disk. -** * The contents of the page cache have been modified (and possibly -** written to disk). -** -** WRITER_FINISHED: -** -** It is not possible for a WAL connection to enter this state. -** -** A rollback-mode pager changes to WRITER_FINISHED state from WRITER_DBMOD -** state after the entire transaction has been successfully written into the -** database file. In this state the transaction may be committed simply -** by finalizing the journal file. Once in WRITER_FINISHED state, it is -** not possible to modify the database further. At this point, the upper -** layer must either commit or rollback the transaction. -** -** * A write transaction is active. -** * An EXCLUSIVE or greater lock is held on the database file. -** * All writing and syncing of journal and database data has finished. -** If no error occurred, all that remains is to finalize the journal to -** commit the transaction. If an error did occur, the caller will need -** to rollback the transaction. -** -** ERROR: -** -** The ERROR state is entered when an IO or disk-full error (including -** SQLITE_IOERR_NOMEM) occurs at a point in the code that makes it -** difficult to be sure that the in-memory pager state (cache contents, -** db size etc.) are consistent with the contents of the file-system. -** -** Temporary pager files may enter the ERROR state, but in-memory pagers -** cannot. -** -** For example, if an IO error occurs while performing a rollback, -** the contents of the page-cache may be left in an inconsistent state. -** At this point it would be dangerous to change back to READER state -** (as usually happens after a rollback). Any subsequent readers might -** report database corruption (due to the inconsistent cache), and if -** they upgrade to writers, they may inadvertently corrupt the database -** file. To avoid this hazard, the pager switches into the ERROR state -** instead of READER following such an error. -** -** Once it has entered the ERROR state, any attempt to use the pager -** to read or write data returns an error. Eventually, once all -** outstanding transactions have been abandoned, the pager is able to -** transition back to OPEN state, discarding the contents of the -** page-cache and any other in-memory state at the same time. Everything -** is reloaded from disk (and, if necessary, hot-journal rollback peformed) -** when a read-transaction is next opened on the pager (transitioning -** the pager into READER state). At that point the system has recovered -** from the error. -** -** Specifically, the pager jumps into the ERROR state if: -** -** 1. An error occurs while attempting a rollback. This happens in -** function sqlite3PagerRollback(). -** -** 2. An error occurs while attempting to finalize a journal file -** following a commit in function sqlite3PagerCommitPhaseTwo(). -** -** 3. An error occurs while attempting to write to the journal or -** database file in function pagerStress() in order to free up -** memory. -** -** In other cases, the error is returned to the b-tree layer. The b-tree -** layer then attempts a rollback operation. If the error condition -** persists, the pager enters the ERROR state via condition (1) above. -** -** Condition (3) is necessary because it can be triggered by a read-only -** statement executed within a transaction. In this case, if the error -** code were simply returned to the user, the b-tree layer would not -** automatically attempt a rollback, as it assumes that an error in a -** read-only statement cannot leave the pager in an internally inconsistent -** state. -** -** * The Pager.errCode variable is set to something other than SQLITE_OK. -** * There are one or more outstanding references to pages (after the -** last reference is dropped the pager should move back to OPEN state). -** * The pager is not an in-memory pager. -** -** -** Notes: -** -** * A pager is never in WRITER_DBMOD or WRITER_FINISHED state if the -** connection is open in WAL mode. A WAL connection is always in one -** of the first four states. -** -** * Normally, a connection open in exclusive mode is never in PAGER_OPEN -** state. There are two exceptions: immediately after exclusive-mode has -** been turned on (and before any read or write transactions are -** executed), and when the pager is leaving the "error state". -** -** * See also: assert_pager_state(). -*/ -#define PAGER_OPEN 0 -#define PAGER_READER 1 -#define PAGER_WRITER_LOCKED 2 -#define PAGER_WRITER_CACHEMOD 3 -#define PAGER_WRITER_DBMOD 4 -#define PAGER_WRITER_FINISHED 5 -#define PAGER_ERROR 6 - -/* -** The Pager.eLock variable is almost always set to one of the -** following locking-states, according to the lock currently held on -** the database file: NO_LOCK, SHARED_LOCK, RESERVED_LOCK or EXCLUSIVE_LOCK. -** This variable is kept up to date as locks are taken and released by -** the pagerLockDb() and pagerUnlockDb() wrappers. -** -** If the VFS xLock() or xUnlock() returns an error other than SQLITE_BUSY -** (i.e. one of the SQLITE_IOERR subtypes), it is not clear whether or not -** the operation was successful. In these circumstances pagerLockDb() and -** pagerUnlockDb() take a conservative approach - eLock is always updated -** when unlocking the file, and only updated when locking the file if the -** VFS call is successful. This way, the Pager.eLock variable may be set -** to a less exclusive (lower) value than the lock that is actually held -** at the system level, but it is never set to a more exclusive value. -** -** This is usually safe. If an xUnlock fails or appears to fail, there may -** be a few redundant xLock() calls or a lock may be held for longer than -** required, but nothing really goes wrong. -** -** The exception is when the database file is unlocked as the pager moves -** from ERROR to OPEN state. At this point there may be a hot-journal file -** in the file-system that needs to be rolled back (as part of a OPEN->SHARED -** transition, by the same pager or any other). If the call to xUnlock() -** fails at this point and the pager is left holding an EXCLUSIVE lock, this -** can confuse the call to xCheckReservedLock() call made later as part -** of hot-journal detection. -** -** xCheckReservedLock() is defined as returning true "if there is a RESERVED -** lock held by this process or any others". So xCheckReservedLock may -** return true because the caller itself is holding an EXCLUSIVE lock (but -** doesn't know it because of a previous error in xUnlock). If this happens -** a hot-journal may be mistaken for a journal being created by an active -** transaction in another process, causing SQLite to read from the database -** without rolling it back. -** -** To work around this, if a call to xUnlock() fails when unlocking the -** database in the ERROR state, Pager.eLock is set to UNKNOWN_LOCK. It -** is only changed back to a real locking state after a successful call -** to xLock(EXCLUSIVE). Also, the code to do the OPEN->SHARED state transition -** omits the check for a hot-journal if Pager.eLock is set to UNKNOWN_LOCK -** lock. Instead, it assumes a hot-journal exists and obtains an EXCLUSIVE -** lock on the database file before attempting to roll it back. See function -** PagerSharedLock() for more detail. -** -** Pager.eLock may only be set to UNKNOWN_LOCK when the pager is in -** PAGER_OPEN state. -*/ -#define UNKNOWN_LOCK (EXCLUSIVE_LOCK+1) - -/* -** A macro used for invoking the codec if there is one -*/ -#ifdef SQLITE_HAS_CODEC -# define CODEC1(P,D,N,X,E) \ - if( P->xCodec && P->xCodec(P->pCodec,D,N,X)==0 ){ E; } -# define CODEC2(P,D,N,X,E,O) \ - if( P->xCodec==0 ){ O=(char*)D; }else \ - if( (O=(char*)(P->xCodec(P->pCodec,D,N,X)))==0 ){ E; } -#else -# define CODEC1(P,D,N,X,E) /* NO-OP */ -# define CODEC2(P,D,N,X,E,O) O=(char*)D -#endif - -/* -** The maximum allowed sector size. 64KiB. If the xSectorsize() method -** returns a value larger than this, then MAX_SECTOR_SIZE is used instead. -** This could conceivably cause corruption following a power failure on -** such a system. This is currently an undocumented limit. -*/ -#define MAX_SECTOR_SIZE 0x10000 - -/* -** An instance of the following structure is allocated for each active -** savepoint and statement transaction in the system. All such structures -** are stored in the Pager.aSavepoint[] array, which is allocated and -** resized using sqlite3Realloc(). -** -** When a savepoint is created, the PagerSavepoint.iHdrOffset field is -** set to 0. If a journal-header is written into the main journal while -** the savepoint is active, then iHdrOffset is set to the byte offset -** immediately following the last journal record written into the main -** journal before the journal-header. This is required during savepoint -** rollback (see pagerPlaybackSavepoint()). -*/ -typedef struct PagerSavepoint PagerSavepoint; -struct PagerSavepoint { - i64 iOffset; /* Starting offset in main journal */ - i64 iHdrOffset; /* See above */ - Bitvec *pInSavepoint; /* Set of pages in this savepoint */ - Pgno nOrig; /* Original number of pages in file */ - Pgno iSubRec; /* Index of first record in sub-journal */ -#ifndef SQLITE_OMIT_WAL - u32 aWalData[WAL_SAVEPOINT_NDATA]; /* WAL savepoint context */ -#endif -}; - -/* -** Bits of the Pager.doNotSpill flag. See further description below. -*/ -#define SPILLFLAG_OFF 0x01 /* Never spill cache. Set via pragma */ -#define SPILLFLAG_ROLLBACK 0x02 /* Current rolling back, so do not spill */ -#define SPILLFLAG_NOSYNC 0x04 /* Spill is ok, but do not sync */ - -/* -** A open page cache is an instance of struct Pager. A description of -** some of the more important member variables follows: -** -** eState -** -** The current 'state' of the pager object. See the comment and state -** diagram above for a description of the pager state. -** -** eLock -** -** For a real on-disk database, the current lock held on the database file - -** NO_LOCK, SHARED_LOCK, RESERVED_LOCK or EXCLUSIVE_LOCK. -** -** For a temporary or in-memory database (neither of which require any -** locks), this variable is always set to EXCLUSIVE_LOCK. Since such -** databases always have Pager.exclusiveMode==1, this tricks the pager -** logic into thinking that it already has all the locks it will ever -** need (and no reason to release them). -** -** In some (obscure) circumstances, this variable may also be set to -** UNKNOWN_LOCK. See the comment above the #define of UNKNOWN_LOCK for -** details. -** -** changeCountDone -** -** This boolean variable is used to make sure that the change-counter -** (the 4-byte header field at byte offset 24 of the database file) is -** not updated more often than necessary. -** -** It is set to true when the change-counter field is updated, which -** can only happen if an exclusive lock is held on the database file. -** It is cleared (set to false) whenever an exclusive lock is -** relinquished on the database file. Each time a transaction is committed, -** The changeCountDone flag is inspected. If it is true, the work of -** updating the change-counter is omitted for the current transaction. -** -** This mechanism means that when running in exclusive mode, a connection -** need only update the change-counter once, for the first transaction -** committed. -** -** setMaster -** -** When PagerCommitPhaseOne() is called to commit a transaction, it may -** (or may not) specify a master-journal name to be written into the -** journal file before it is synced to disk. -** -** Whether or not a journal file contains a master-journal pointer affects -** the way in which the journal file is finalized after the transaction is -** committed or rolled back when running in "journal_mode=PERSIST" mode. -** If a journal file does not contain a master-journal pointer, it is -** finalized by overwriting the first journal header with zeroes. If -** it does contain a master-journal pointer the journal file is finalized -** by truncating it to zero bytes, just as if the connection were -** running in "journal_mode=truncate" mode. -** -** Journal files that contain master journal pointers cannot be finalized -** simply by overwriting the first journal-header with zeroes, as the -** master journal pointer could interfere with hot-journal rollback of any -** subsequently interrupted transaction that reuses the journal file. -** -** The flag is cleared as soon as the journal file is finalized (either -** by PagerCommitPhaseTwo or PagerRollback). If an IO error prevents the -** journal file from being successfully finalized, the setMaster flag -** is cleared anyway (and the pager will move to ERROR state). -** -** doNotSpill -** -** This variables control the behavior of cache-spills (calls made by -** the pcache module to the pagerStress() routine to write cached data -** to the file-system in order to free up memory). -** -** When bits SPILLFLAG_OFF or SPILLFLAG_ROLLBACK of doNotSpill are set, -** writing to the database from pagerStress() is disabled altogether. -** The SPILLFLAG_ROLLBACK case is done in a very obscure case that -** comes up during savepoint rollback that requires the pcache module -** to allocate a new page to prevent the journal file from being written -** while it is being traversed by code in pager_playback(). The SPILLFLAG_OFF -** case is a user preference. -** -** If the SPILLFLAG_NOSYNC bit is set, writing to the database from pagerStress() -** is permitted, but syncing the journal file is not. This flag is set -** by sqlite3PagerWrite() when the file-system sector-size is larger than -** the database page-size in order to prevent a journal sync from happening -** in between the journalling of two pages on the same sector. -** -** subjInMemory -** -** This is a boolean variable. If true, then any required sub-journal -** is opened as an in-memory journal file. If false, then in-memory -** sub-journals are only used for in-memory pager files. -** -** This variable is updated by the upper layer each time a new -** write-transaction is opened. -** -** dbSize, dbOrigSize, dbFileSize -** -** Variable dbSize is set to the number of pages in the database file. -** It is valid in PAGER_READER and higher states (all states except for -** OPEN and ERROR). -** -** dbSize is set based on the size of the database file, which may be -** larger than the size of the database (the value stored at offset -** 28 of the database header by the btree). If the size of the file -** is not an integer multiple of the page-size, the value stored in -** dbSize is rounded down (i.e. a 5KB file with 2K page-size has dbSize==2). -** Except, any file that is greater than 0 bytes in size is considered -** to have at least one page. (i.e. a 1KB file with 2K page-size leads -** to dbSize==1). -** -** During a write-transaction, if pages with page-numbers greater than -** dbSize are modified in the cache, dbSize is updated accordingly. -** Similarly, if the database is truncated using PagerTruncateImage(), -** dbSize is updated. -** -** Variables dbOrigSize and dbFileSize are valid in states -** PAGER_WRITER_LOCKED and higher. dbOrigSize is a copy of the dbSize -** variable at the start of the transaction. It is used during rollback, -** and to determine whether or not pages need to be journalled before -** being modified. -** -** Throughout a write-transaction, dbFileSize contains the size of -** the file on disk in pages. It is set to a copy of dbSize when the -** write-transaction is first opened, and updated when VFS calls are made -** to write or truncate the database file on disk. -** -** The only reason the dbFileSize variable is required is to suppress -** unnecessary calls to xTruncate() after committing a transaction. If, -** when a transaction is committed, the dbFileSize variable indicates -** that the database file is larger than the database image (Pager.dbSize), -** pager_truncate() is called. The pager_truncate() call uses xFilesize() -** to measure the database file on disk, and then truncates it if required. -** dbFileSize is not used when rolling back a transaction. In this case -** pager_truncate() is called unconditionally (which means there may be -** a call to xFilesize() that is not strictly required). In either case, -** pager_truncate() may cause the file to become smaller or larger. -** -** dbHintSize -** -** The dbHintSize variable is used to limit the number of calls made to -** the VFS xFileControl(FCNTL_SIZE_HINT) method. -** -** dbHintSize is set to a copy of the dbSize variable when a -** write-transaction is opened (at the same time as dbFileSize and -** dbOrigSize). If the xFileControl(FCNTL_SIZE_HINT) method is called, -** dbHintSize is increased to the number of pages that correspond to the -** size-hint passed to the method call. See pager_write_pagelist() for -** details. -** -** errCode -** -** The Pager.errCode variable is only ever used in PAGER_ERROR state. It -** is set to zero in all other states. In PAGER_ERROR state, Pager.errCode -** is always set to SQLITE_FULL, SQLITE_IOERR or one of the SQLITE_IOERR_XXX -** sub-codes. -*/ -struct Pager { - sqlite3_vfs *pVfs; /* OS functions to use for IO */ - u8 exclusiveMode; /* Boolean. True if locking_mode==EXCLUSIVE */ - u8 journalMode; /* One of the PAGER_JOURNALMODE_* values */ - u8 useJournal; /* Use a rollback journal on this file */ - u8 noSync; /* Do not sync the journal if true */ - u8 fullSync; /* Do extra syncs of the journal for robustness */ - u8 ckptSyncFlags; /* SYNC_NORMAL or SYNC_FULL for checkpoint */ - u8 walSyncFlags; /* SYNC_NORMAL or SYNC_FULL for wal writes */ - u8 syncFlags; /* SYNC_NORMAL or SYNC_FULL otherwise */ - u8 tempFile; /* zFilename is a temporary or immutable file */ - u8 noLock; /* Do not lock (except in WAL mode) */ - u8 readOnly; /* True for a read-only database */ - u8 memDb; /* True to inhibit all file I/O */ - - /************************************************************************** - ** The following block contains those class members that change during - ** routine opertion. Class members not in this block are either fixed - ** when the pager is first created or else only change when there is a - ** significant mode change (such as changing the page_size, locking_mode, - ** or the journal_mode). From another view, these class members describe - ** the "state" of the pager, while other class members describe the - ** "configuration" of the pager. - */ - u8 eState; /* Pager state (OPEN, READER, WRITER_LOCKED..) */ - u8 eLock; /* Current lock held on database file */ - u8 changeCountDone; /* Set after incrementing the change-counter */ - u8 setMaster; /* True if a m-j name has been written to jrnl */ - u8 doNotSpill; /* Do not spill the cache when non-zero */ - u8 subjInMemory; /* True to use in-memory sub-journals */ - Pgno dbSize; /* Number of pages in the database */ - Pgno dbOrigSize; /* dbSize before the current transaction */ - Pgno dbFileSize; /* Number of pages in the database file */ - Pgno dbHintSize; /* Value passed to FCNTL_SIZE_HINT call */ - int errCode; /* One of several kinds of errors */ - int nRec; /* Pages journalled since last j-header written */ - u32 cksumInit; /* Quasi-random value added to every checksum */ - u32 nSubRec; /* Number of records written to sub-journal */ - Bitvec *pInJournal; /* One bit for each page in the database file */ - sqlite3_file *fd; /* File descriptor for database */ - sqlite3_file *jfd; /* File descriptor for main journal */ - sqlite3_file *sjfd; /* File descriptor for sub-journal */ - i64 journalOff; /* Current write offset in the journal file */ - i64 journalHdr; /* Byte offset to previous journal header */ - sqlite3_backup *pBackup; /* Pointer to list of ongoing backup processes */ - PagerSavepoint *aSavepoint; /* Array of active savepoints */ - int nSavepoint; /* Number of elements in aSavepoint[] */ - char dbFileVers[16]; /* Changes whenever database file changes */ - - u8 bUseFetch; /* True to use xFetch() */ - int nMmapOut; /* Number of mmap pages currently outstanding */ - sqlite3_int64 szMmap; /* Desired maximum mmap size */ - PgHdr *pMmapFreelist; /* List of free mmap page headers (pDirty) */ - /* - ** End of the routinely-changing class members - ***************************************************************************/ - - u16 nExtra; /* Add this many bytes to each in-memory page */ - i16 nReserve; /* Number of unused bytes at end of each page */ - u32 vfsFlags; /* Flags for sqlite3_vfs.xOpen() */ - u32 sectorSize; /* Assumed sector size during rollback */ - int pageSize; /* Number of bytes in a page */ - Pgno mxPgno; /* Maximum allowed size of the database */ - i64 journalSizeLimit; /* Size limit for persistent journal files */ - char *zFilename; /* Name of the database file */ - char *zJournal; /* Name of the journal file */ - int (*xBusyHandler)(void*); /* Function to call when busy */ - void *pBusyHandlerArg; /* Context argument for xBusyHandler */ - int aStat[3]; /* Total cache hits, misses and writes */ -#ifdef SQLITE_TEST - int nRead; /* Database pages read */ -#endif - void (*xReiniter)(DbPage*); /* Call this routine when reloading pages */ -#ifdef SQLITE_HAS_CODEC - void *(*xCodec)(void*,void*,Pgno,int); /* Routine for en/decoding data */ - void (*xCodecSizeChng)(void*,int,int); /* Notify of page size changes */ - void (*xCodecFree)(void*); /* Destructor for the codec */ - void *pCodec; /* First argument to xCodec... methods */ -#endif - char *pTmpSpace; /* Pager.pageSize bytes of space for tmp use */ - PCache *pPCache; /* Pointer to page cache object */ -#ifndef SQLITE_OMIT_WAL - Wal *pWal; /* Write-ahead log used by "journal_mode=wal" */ - char *zWal; /* File name for write-ahead log */ -#endif -}; - -/* -** Indexes for use with Pager.aStat[]. The Pager.aStat[] array contains -** the values accessed by passing SQLITE_DBSTATUS_CACHE_HIT, CACHE_MISS -** or CACHE_WRITE to sqlite3_db_status(). -*/ -#define PAGER_STAT_HIT 0 -#define PAGER_STAT_MISS 1 -#define PAGER_STAT_WRITE 2 - -/* -** The following global variables hold counters used for -** testing purposes only. These variables do not exist in -** a non-testing build. These variables are not thread-safe. -*/ -#ifdef SQLITE_TEST -SQLITE_API int sqlite3_pager_readdb_count = 0; /* Number of full pages read from DB */ -SQLITE_API int sqlite3_pager_writedb_count = 0; /* Number of full pages written to DB */ -SQLITE_API int sqlite3_pager_writej_count = 0; /* Number of pages written to journal */ -# define PAGER_INCR(v) v++ -#else -# define PAGER_INCR(v) -#endif - - - -/* -** Journal files begin with the following magic string. The data -** was obtained from /dev/random. It is used only as a sanity check. -** -** Since version 2.8.0, the journal format contains additional sanity -** checking information. If the power fails while the journal is being -** written, semi-random garbage data might appear in the journal -** file after power is restored. If an attempt is then made -** to roll the journal back, the database could be corrupted. The additional -** sanity checking data is an attempt to discover the garbage in the -** journal and ignore it. -** -** The sanity checking information for the new journal format consists -** of a 32-bit checksum on each page of data. The checksum covers both -** the page number and the pPager->pageSize bytes of data for the page. -** This cksum is initialized to a 32-bit random value that appears in the -** journal file right after the header. The random initializer is important, -** because garbage data that appears at the end of a journal is likely -** data that was once in other files that have now been deleted. If the -** garbage data came from an obsolete journal file, the checksums might -** be correct. But by initializing the checksum to random value which -** is different for every journal, we minimize that risk. -*/ -static const unsigned char aJournalMagic[] = { - 0xd9, 0xd5, 0x05, 0xf9, 0x20, 0xa1, 0x63, 0xd7, -}; - -/* -** The size of the of each page record in the journal is given by -** the following macro. -*/ -#define JOURNAL_PG_SZ(pPager) ((pPager->pageSize) + 8) - -/* -** The journal header size for this pager. This is usually the same -** size as a single disk sector. See also setSectorSize(). -*/ -#define JOURNAL_HDR_SZ(pPager) (pPager->sectorSize) - -/* -** The macro MEMDB is true if we are dealing with an in-memory database. -** We do this as a macro so that if the SQLITE_OMIT_MEMORYDB macro is set, -** the value of MEMDB will be a constant and the compiler will optimize -** out code that would never execute. -*/ -#ifdef SQLITE_OMIT_MEMORYDB -# define MEMDB 0 -#else -# define MEMDB pPager->memDb -#endif - -/* -** The macro USEFETCH is true if we are allowed to use the xFetch and xUnfetch -** interfaces to access the database using memory-mapped I/O. -*/ -#if SQLITE_MAX_MMAP_SIZE>0 -# define USEFETCH(x) ((x)->bUseFetch) -#else -# define USEFETCH(x) 0 -#endif - -/* -** The maximum legal page number is (2^31 - 1). -*/ -#define PAGER_MAX_PGNO 2147483647 - -/* -** The argument to this macro is a file descriptor (type sqlite3_file*). -** Return 0 if it is not open, or non-zero (but not 1) if it is. -** -** This is so that expressions can be written as: -** -** if( isOpen(pPager->jfd) ){ ... -** -** instead of -** -** if( pPager->jfd->pMethods ){ ... -*/ -#define isOpen(pFd) ((pFd)->pMethods) - -/* -** Return true if this pager uses a write-ahead log instead of the usual -** rollback journal. Otherwise false. -*/ -#ifndef SQLITE_OMIT_WAL -static int pagerUseWal(Pager *pPager){ - return (pPager->pWal!=0); -} -#else -# define pagerUseWal(x) 0 -# define pagerRollbackWal(x) 0 -# define pagerWalFrames(v,w,x,y) 0 -# define pagerOpenWalIfPresent(z) SQLITE_OK -# define pagerBeginReadTransaction(z) SQLITE_OK -#endif - -#ifndef NDEBUG -/* -** Usage: -** -** assert( assert_pager_state(pPager) ); -** -** This function runs many asserts to try to find inconsistencies in -** the internal state of the Pager object. -*/ -static int assert_pager_state(Pager *p){ - Pager *pPager = p; - - /* State must be valid. */ - assert( p->eState==PAGER_OPEN - || p->eState==PAGER_READER - || p->eState==PAGER_WRITER_LOCKED - || p->eState==PAGER_WRITER_CACHEMOD - || p->eState==PAGER_WRITER_DBMOD - || p->eState==PAGER_WRITER_FINISHED - || p->eState==PAGER_ERROR - ); - - /* Regardless of the current state, a temp-file connection always behaves - ** as if it has an exclusive lock on the database file. It never updates - ** the change-counter field, so the changeCountDone flag is always set. - */ - assert( p->tempFile==0 || p->eLock==EXCLUSIVE_LOCK ); - assert( p->tempFile==0 || pPager->changeCountDone ); - - /* If the useJournal flag is clear, the journal-mode must be "OFF". - ** And if the journal-mode is "OFF", the journal file must not be open. - */ - assert( p->journalMode==PAGER_JOURNALMODE_OFF || p->useJournal ); - assert( p->journalMode!=PAGER_JOURNALMODE_OFF || !isOpen(p->jfd) ); - - /* Check that MEMDB implies noSync. And an in-memory journal. Since - ** this means an in-memory pager performs no IO at all, it cannot encounter - ** either SQLITE_IOERR or SQLITE_FULL during rollback or while finalizing - ** a journal file. (although the in-memory journal implementation may - ** return SQLITE_IOERR_NOMEM while the journal file is being written). It - ** is therefore not possible for an in-memory pager to enter the ERROR - ** state. - */ - if( MEMDB ){ - assert( p->noSync ); - assert( p->journalMode==PAGER_JOURNALMODE_OFF - || p->journalMode==PAGER_JOURNALMODE_MEMORY - ); - assert( p->eState!=PAGER_ERROR && p->eState!=PAGER_OPEN ); - assert( pagerUseWal(p)==0 ); - } - - /* If changeCountDone is set, a RESERVED lock or greater must be held - ** on the file. - */ - assert( pPager->changeCountDone==0 || pPager->eLock>=RESERVED_LOCK ); - assert( p->eLock!=PENDING_LOCK ); - - switch( p->eState ){ - case PAGER_OPEN: - assert( !MEMDB ); - assert( pPager->errCode==SQLITE_OK ); - assert( sqlite3PcacheRefCount(pPager->pPCache)==0 || pPager->tempFile ); - break; - - case PAGER_READER: - assert( pPager->errCode==SQLITE_OK ); - assert( p->eLock!=UNKNOWN_LOCK ); - assert( p->eLock>=SHARED_LOCK ); - break; - - case PAGER_WRITER_LOCKED: - assert( p->eLock!=UNKNOWN_LOCK ); - assert( pPager->errCode==SQLITE_OK ); - if( !pagerUseWal(pPager) ){ - assert( p->eLock>=RESERVED_LOCK ); - } - assert( pPager->dbSize==pPager->dbOrigSize ); - assert( pPager->dbOrigSize==pPager->dbFileSize ); - assert( pPager->dbOrigSize==pPager->dbHintSize ); - assert( pPager->setMaster==0 ); - break; - - case PAGER_WRITER_CACHEMOD: - assert( p->eLock!=UNKNOWN_LOCK ); - assert( pPager->errCode==SQLITE_OK ); - if( !pagerUseWal(pPager) ){ - /* It is possible that if journal_mode=wal here that neither the - ** journal file nor the WAL file are open. This happens during - ** a rollback transaction that switches from journal_mode=off - ** to journal_mode=wal. - */ - assert( p->eLock>=RESERVED_LOCK ); - assert( isOpen(p->jfd) - || p->journalMode==PAGER_JOURNALMODE_OFF - || p->journalMode==PAGER_JOURNALMODE_WAL - ); - } - assert( pPager->dbOrigSize==pPager->dbFileSize ); - assert( pPager->dbOrigSize==pPager->dbHintSize ); - break; - - case PAGER_WRITER_DBMOD: - assert( p->eLock==EXCLUSIVE_LOCK ); - assert( pPager->errCode==SQLITE_OK ); - assert( !pagerUseWal(pPager) ); - assert( p->eLock>=EXCLUSIVE_LOCK ); - assert( isOpen(p->jfd) - || p->journalMode==PAGER_JOURNALMODE_OFF - || p->journalMode==PAGER_JOURNALMODE_WAL - ); - assert( pPager->dbOrigSize<=pPager->dbHintSize ); - break; - - case PAGER_WRITER_FINISHED: - assert( p->eLock==EXCLUSIVE_LOCK ); - assert( pPager->errCode==SQLITE_OK ); - assert( !pagerUseWal(pPager) ); - assert( isOpen(p->jfd) - || p->journalMode==PAGER_JOURNALMODE_OFF - || p->journalMode==PAGER_JOURNALMODE_WAL - ); - break; - - case PAGER_ERROR: - /* There must be at least one outstanding reference to the pager if - ** in ERROR state. Otherwise the pager should have already dropped - ** back to OPEN state. - */ - assert( pPager->errCode!=SQLITE_OK ); - assert( sqlite3PcacheRefCount(pPager->pPCache)>0 ); - break; - } - - return 1; -} -#endif /* ifndef NDEBUG */ - -#ifdef SQLITE_DEBUG -/* -** Return a pointer to a human readable string in a static buffer -** containing the state of the Pager object passed as an argument. This -** is intended to be used within debuggers. For example, as an alternative -** to "print *pPager" in gdb: -** -** (gdb) printf "%s", print_pager_state(pPager) -*/ -static char *print_pager_state(Pager *p){ - static char zRet[1024]; - - sqlite3_snprintf(1024, zRet, - "Filename: %s\n" - "State: %s errCode=%d\n" - "Lock: %s\n" - "Locking mode: locking_mode=%s\n" - "Journal mode: journal_mode=%s\n" - "Backing store: tempFile=%d memDb=%d useJournal=%d\n" - "Journal: journalOff=%lld journalHdr=%lld\n" - "Size: dbsize=%d dbOrigSize=%d dbFileSize=%d\n" - , p->zFilename - , p->eState==PAGER_OPEN ? "OPEN" : - p->eState==PAGER_READER ? "READER" : - p->eState==PAGER_WRITER_LOCKED ? "WRITER_LOCKED" : - p->eState==PAGER_WRITER_CACHEMOD ? "WRITER_CACHEMOD" : - p->eState==PAGER_WRITER_DBMOD ? "WRITER_DBMOD" : - p->eState==PAGER_WRITER_FINISHED ? "WRITER_FINISHED" : - p->eState==PAGER_ERROR ? "ERROR" : "?error?" - , (int)p->errCode - , p->eLock==NO_LOCK ? "NO_LOCK" : - p->eLock==RESERVED_LOCK ? "RESERVED" : - p->eLock==EXCLUSIVE_LOCK ? "EXCLUSIVE" : - p->eLock==SHARED_LOCK ? "SHARED" : - p->eLock==UNKNOWN_LOCK ? "UNKNOWN" : "?error?" - , p->exclusiveMode ? "exclusive" : "normal" - , p->journalMode==PAGER_JOURNALMODE_MEMORY ? "memory" : - p->journalMode==PAGER_JOURNALMODE_OFF ? "off" : - p->journalMode==PAGER_JOURNALMODE_DELETE ? "delete" : - p->journalMode==PAGER_JOURNALMODE_PERSIST ? "persist" : - p->journalMode==PAGER_JOURNALMODE_TRUNCATE ? "truncate" : - p->journalMode==PAGER_JOURNALMODE_WAL ? "wal" : "?error?" - , (int)p->tempFile, (int)p->memDb, (int)p->useJournal - , p->journalOff, p->journalHdr - , (int)p->dbSize, (int)p->dbOrigSize, (int)p->dbFileSize - ); - - return zRet; -} -#endif - -/* -** Return true if it is necessary to write page *pPg into the sub-journal. -** A page needs to be written into the sub-journal if there exists one -** or more open savepoints for which: -** -** * The page-number is less than or equal to PagerSavepoint.nOrig, and -** * The bit corresponding to the page-number is not set in -** PagerSavepoint.pInSavepoint. -*/ -static int subjRequiresPage(PgHdr *pPg){ - Pager *pPager = pPg->pPager; - PagerSavepoint *p; - Pgno pgno = pPg->pgno; - int i; - for(i=0; inSavepoint; i++){ - p = &pPager->aSavepoint[i]; - if( p->nOrig>=pgno && 0==sqlite3BitvecTest(p->pInSavepoint, pgno) ){ - return 1; - } - } - return 0; -} - -/* -** Return true if the page is already in the journal file. -*/ -static int pageInJournal(Pager *pPager, PgHdr *pPg){ - return sqlite3BitvecTest(pPager->pInJournal, pPg->pgno); -} - -/* -** Read a 32-bit integer from the given file descriptor. Store the integer -** that is read in *pRes. Return SQLITE_OK if everything worked, or an -** error code is something goes wrong. -** -** All values are stored on disk as big-endian. -*/ -static int read32bits(sqlite3_file *fd, i64 offset, u32 *pRes){ - unsigned char ac[4]; - int rc = sqlite3OsRead(fd, ac, sizeof(ac), offset); - if( rc==SQLITE_OK ){ - *pRes = sqlite3Get4byte(ac); - } - return rc; -} - -/* -** Write a 32-bit integer into a string buffer in big-endian byte order. -*/ -#define put32bits(A,B) sqlite3Put4byte((u8*)A,B) - - -/* -** Write a 32-bit integer into the given file descriptor. Return SQLITE_OK -** on success or an error code is something goes wrong. -*/ -static int write32bits(sqlite3_file *fd, i64 offset, u32 val){ - char ac[4]; - put32bits(ac, val); - return sqlite3OsWrite(fd, ac, 4, offset); -} - -/* -** Unlock the database file to level eLock, which must be either NO_LOCK -** or SHARED_LOCK. Regardless of whether or not the call to xUnlock() -** succeeds, set the Pager.eLock variable to match the (attempted) new lock. -** -** Except, if Pager.eLock is set to UNKNOWN_LOCK when this function is -** called, do not modify it. See the comment above the #define of -** UNKNOWN_LOCK for an explanation of this. -*/ -static int pagerUnlockDb(Pager *pPager, int eLock){ - int rc = SQLITE_OK; - - assert( !pPager->exclusiveMode || pPager->eLock==eLock ); - assert( eLock==NO_LOCK || eLock==SHARED_LOCK ); - assert( eLock!=NO_LOCK || pagerUseWal(pPager)==0 ); - if( isOpen(pPager->fd) ){ - assert( pPager->eLock>=eLock ); - rc = pPager->noLock ? SQLITE_OK : sqlite3OsUnlock(pPager->fd, eLock); - if( pPager->eLock!=UNKNOWN_LOCK ){ - pPager->eLock = (u8)eLock; - } - IOTRACE(("UNLOCK %p %d\n", pPager, eLock)) - } - return rc; -} - -/* -** Lock the database file to level eLock, which must be either SHARED_LOCK, -** RESERVED_LOCK or EXCLUSIVE_LOCK. If the caller is successful, set the -** Pager.eLock variable to the new locking state. -** -** Except, if Pager.eLock is set to UNKNOWN_LOCK when this function is -** called, do not modify it unless the new locking state is EXCLUSIVE_LOCK. -** See the comment above the #define of UNKNOWN_LOCK for an explanation -** of this. -*/ -static int pagerLockDb(Pager *pPager, int eLock){ - int rc = SQLITE_OK; - - assert( eLock==SHARED_LOCK || eLock==RESERVED_LOCK || eLock==EXCLUSIVE_LOCK ); - if( pPager->eLockeLock==UNKNOWN_LOCK ){ - rc = pPager->noLock ? SQLITE_OK : sqlite3OsLock(pPager->fd, eLock); - if( rc==SQLITE_OK && (pPager->eLock!=UNKNOWN_LOCK||eLock==EXCLUSIVE_LOCK) ){ - pPager->eLock = (u8)eLock; - IOTRACE(("LOCK %p %d\n", pPager, eLock)) - } - } - return rc; -} - -/* -** This function determines whether or not the atomic-write optimization -** can be used with this pager. The optimization can be used if: -** -** (a) the value returned by OsDeviceCharacteristics() indicates that -** a database page may be written atomically, and -** (b) the value returned by OsSectorSize() is less than or equal -** to the page size. -** -** The optimization is also always enabled for temporary files. It is -** an error to call this function if pPager is opened on an in-memory -** database. -** -** If the optimization cannot be used, 0 is returned. If it can be used, -** then the value returned is the size of the journal file when it -** contains rollback data for exactly one page. -*/ -#ifdef SQLITE_ENABLE_ATOMIC_WRITE -static int jrnlBufferSize(Pager *pPager){ - assert( !MEMDB ); - if( !pPager->tempFile ){ - int dc; /* Device characteristics */ - int nSector; /* Sector size */ - int szPage; /* Page size */ - - assert( isOpen(pPager->fd) ); - dc = sqlite3OsDeviceCharacteristics(pPager->fd); - nSector = pPager->sectorSize; - szPage = pPager->pageSize; - - assert(SQLITE_IOCAP_ATOMIC512==(512>>8)); - assert(SQLITE_IOCAP_ATOMIC64K==(65536>>8)); - if( 0==(dc&(SQLITE_IOCAP_ATOMIC|(szPage>>8)) || nSector>szPage) ){ - return 0; - } - } - - return JOURNAL_HDR_SZ(pPager) + JOURNAL_PG_SZ(pPager); -} -#endif - -/* -** If SQLITE_CHECK_PAGES is defined then we do some sanity checking -** on the cache using a hash function. This is used for testing -** and debugging only. -*/ -#ifdef SQLITE_CHECK_PAGES -/* -** Return a 32-bit hash of the page data for pPage. -*/ -static u32 pager_datahash(int nByte, unsigned char *pData){ - u32 hash = 0; - int i; - for(i=0; ipPager->pageSize, (unsigned char *)pPage->pData); -} -static void pager_set_pagehash(PgHdr *pPage){ - pPage->pageHash = pager_pagehash(pPage); -} - -/* -** The CHECK_PAGE macro takes a PgHdr* as an argument. If SQLITE_CHECK_PAGES -** is defined, and NDEBUG is not defined, an assert() statement checks -** that the page is either dirty or still matches the calculated page-hash. -*/ -#define CHECK_PAGE(x) checkPage(x) -static void checkPage(PgHdr *pPg){ - Pager *pPager = pPg->pPager; - assert( pPager->eState!=PAGER_ERROR ); - assert( (pPg->flags&PGHDR_DIRTY) || pPg->pageHash==pager_pagehash(pPg) ); -} - -#else -#define pager_datahash(X,Y) 0 -#define pager_pagehash(X) 0 -#define pager_set_pagehash(X) -#define CHECK_PAGE(x) -#endif /* SQLITE_CHECK_PAGES */ - -/* -** When this is called the journal file for pager pPager must be open. -** This function attempts to read a master journal file name from the -** end of the file and, if successful, copies it into memory supplied -** by the caller. See comments above writeMasterJournal() for the format -** used to store a master journal file name at the end of a journal file. -** -** zMaster must point to a buffer of at least nMaster bytes allocated by -** the caller. This should be sqlite3_vfs.mxPathname+1 (to ensure there is -** enough space to write the master journal name). If the master journal -** name in the journal is longer than nMaster bytes (including a -** nul-terminator), then this is handled as if no master journal name -** were present in the journal. -** -** If a master journal file name is present at the end of the journal -** file, then it is copied into the buffer pointed to by zMaster. A -** nul-terminator byte is appended to the buffer following the master -** journal file name. -** -** If it is determined that no master journal file name is present -** zMaster[0] is set to 0 and SQLITE_OK returned. -** -** If an error occurs while reading from the journal file, an SQLite -** error code is returned. -*/ -static int readMasterJournal(sqlite3_file *pJrnl, char *zMaster, u32 nMaster){ - int rc; /* Return code */ - u32 len; /* Length in bytes of master journal name */ - i64 szJ; /* Total size in bytes of journal file pJrnl */ - u32 cksum; /* MJ checksum value read from journal */ - u32 u; /* Unsigned loop counter */ - unsigned char aMagic[8]; /* A buffer to hold the magic header */ - zMaster[0] = '\0'; - - if( SQLITE_OK!=(rc = sqlite3OsFileSize(pJrnl, &szJ)) - || szJ<16 - || SQLITE_OK!=(rc = read32bits(pJrnl, szJ-16, &len)) - || len>=nMaster - || len==0 - || SQLITE_OK!=(rc = read32bits(pJrnl, szJ-12, &cksum)) - || SQLITE_OK!=(rc = sqlite3OsRead(pJrnl, aMagic, 8, szJ-8)) - || memcmp(aMagic, aJournalMagic, 8) - || SQLITE_OK!=(rc = sqlite3OsRead(pJrnl, zMaster, len, szJ-16-len)) - ){ - return rc; - } - - /* See if the checksum matches the master journal name */ - for(u=0; ujournalOff, assuming a sector -** size of pPager->sectorSize bytes. -** -** i.e for a sector size of 512: -** -** Pager.journalOff Return value -** --------------------------------------- -** 0 0 -** 512 512 -** 100 512 -** 2000 2048 -** -*/ -static i64 journalHdrOffset(Pager *pPager){ - i64 offset = 0; - i64 c = pPager->journalOff; - if( c ){ - offset = ((c-1)/JOURNAL_HDR_SZ(pPager) + 1) * JOURNAL_HDR_SZ(pPager); - } - assert( offset%JOURNAL_HDR_SZ(pPager)==0 ); - assert( offset>=c ); - assert( (offset-c)jfd) ); - if( pPager->journalOff ){ - const i64 iLimit = pPager->journalSizeLimit; /* Local cache of jsl */ - - IOTRACE(("JZEROHDR %p\n", pPager)) - if( doTruncate || iLimit==0 ){ - rc = sqlite3OsTruncate(pPager->jfd, 0); - }else{ - static const char zeroHdr[28] = {0}; - rc = sqlite3OsWrite(pPager->jfd, zeroHdr, sizeof(zeroHdr), 0); - } - if( rc==SQLITE_OK && !pPager->noSync ){ - rc = sqlite3OsSync(pPager->jfd, SQLITE_SYNC_DATAONLY|pPager->syncFlags); - } - - /* At this point the transaction is committed but the write lock - ** is still held on the file. If there is a size limit configured for - ** the persistent journal and the journal file currently consumes more - ** space than that limit allows for, truncate it now. There is no need - ** to sync the file following this operation. - */ - if( rc==SQLITE_OK && iLimit>0 ){ - i64 sz; - rc = sqlite3OsFileSize(pPager->jfd, &sz); - if( rc==SQLITE_OK && sz>iLimit ){ - rc = sqlite3OsTruncate(pPager->jfd, iLimit); - } - } - } - return rc; -} - -/* -** The journal file must be open when this routine is called. A journal -** header (JOURNAL_HDR_SZ bytes) is written into the journal file at the -** current location. -** -** The format for the journal header is as follows: -** - 8 bytes: Magic identifying journal format. -** - 4 bytes: Number of records in journal, or -1 no-sync mode is on. -** - 4 bytes: Random number used for page hash. -** - 4 bytes: Initial database page count. -** - 4 bytes: Sector size used by the process that wrote this journal. -** - 4 bytes: Database page size. -** -** Followed by (JOURNAL_HDR_SZ - 28) bytes of unused space. -*/ -static int writeJournalHdr(Pager *pPager){ - int rc = SQLITE_OK; /* Return code */ - char *zHeader = pPager->pTmpSpace; /* Temporary space used to build header */ - u32 nHeader = (u32)pPager->pageSize;/* Size of buffer pointed to by zHeader */ - u32 nWrite; /* Bytes of header sector written */ - int ii; /* Loop counter */ - - assert( isOpen(pPager->jfd) ); /* Journal file must be open. */ - - if( nHeader>JOURNAL_HDR_SZ(pPager) ){ - nHeader = JOURNAL_HDR_SZ(pPager); - } - - /* If there are active savepoints and any of them were created - ** since the most recent journal header was written, update the - ** PagerSavepoint.iHdrOffset fields now. - */ - for(ii=0; iinSavepoint; ii++){ - if( pPager->aSavepoint[ii].iHdrOffset==0 ){ - pPager->aSavepoint[ii].iHdrOffset = pPager->journalOff; - } - } - - pPager->journalHdr = pPager->journalOff = journalHdrOffset(pPager); - - /* - ** Write the nRec Field - the number of page records that follow this - ** journal header. Normally, zero is written to this value at this time. - ** After the records are added to the journal (and the journal synced, - ** if in full-sync mode), the zero is overwritten with the true number - ** of records (see syncJournal()). - ** - ** A faster alternative is to write 0xFFFFFFFF to the nRec field. When - ** reading the journal this value tells SQLite to assume that the - ** rest of the journal file contains valid page records. This assumption - ** is dangerous, as if a failure occurred whilst writing to the journal - ** file it may contain some garbage data. There are two scenarios - ** where this risk can be ignored: - ** - ** * When the pager is in no-sync mode. Corruption can follow a - ** power failure in this case anyway. - ** - ** * When the SQLITE_IOCAP_SAFE_APPEND flag is set. This guarantees - ** that garbage data is never appended to the journal file. - */ - assert( isOpen(pPager->fd) || pPager->noSync ); - if( pPager->noSync || (pPager->journalMode==PAGER_JOURNALMODE_MEMORY) - || (sqlite3OsDeviceCharacteristics(pPager->fd)&SQLITE_IOCAP_SAFE_APPEND) - ){ - memcpy(zHeader, aJournalMagic, sizeof(aJournalMagic)); - put32bits(&zHeader[sizeof(aJournalMagic)], 0xffffffff); - }else{ - memset(zHeader, 0, sizeof(aJournalMagic)+4); - } - - /* The random check-hash initializer */ - sqlite3_randomness(sizeof(pPager->cksumInit), &pPager->cksumInit); - put32bits(&zHeader[sizeof(aJournalMagic)+4], pPager->cksumInit); - /* The initial database size */ - put32bits(&zHeader[sizeof(aJournalMagic)+8], pPager->dbOrigSize); - /* The assumed sector size for this process */ - put32bits(&zHeader[sizeof(aJournalMagic)+12], pPager->sectorSize); - - /* The page size */ - put32bits(&zHeader[sizeof(aJournalMagic)+16], pPager->pageSize); - - /* Initializing the tail of the buffer is not necessary. Everything - ** works find if the following memset() is omitted. But initializing - ** the memory prevents valgrind from complaining, so we are willing to - ** take the performance hit. - */ - memset(&zHeader[sizeof(aJournalMagic)+20], 0, - nHeader-(sizeof(aJournalMagic)+20)); - - /* In theory, it is only necessary to write the 28 bytes that the - ** journal header consumes to the journal file here. Then increment the - ** Pager.journalOff variable by JOURNAL_HDR_SZ so that the next - ** record is written to the following sector (leaving a gap in the file - ** that will be implicitly filled in by the OS). - ** - ** However it has been discovered that on some systems this pattern can - ** be significantly slower than contiguously writing data to the file, - ** even if that means explicitly writing data to the block of - ** (JOURNAL_HDR_SZ - 28) bytes that will not be used. So that is what - ** is done. - ** - ** The loop is required here in case the sector-size is larger than the - ** database page size. Since the zHeader buffer is only Pager.pageSize - ** bytes in size, more than one call to sqlite3OsWrite() may be required - ** to populate the entire journal header sector. - */ - for(nWrite=0; rc==SQLITE_OK&&nWritejournalHdr, nHeader)) - rc = sqlite3OsWrite(pPager->jfd, zHeader, nHeader, pPager->journalOff); - assert( pPager->journalHdr <= pPager->journalOff ); - pPager->journalOff += nHeader; - } - - return rc; -} - -/* -** The journal file must be open when this is called. A journal header file -** (JOURNAL_HDR_SZ bytes) is read from the current location in the journal -** file. The current location in the journal file is given by -** pPager->journalOff. See comments above function writeJournalHdr() for -** a description of the journal header format. -** -** If the header is read successfully, *pNRec is set to the number of -** page records following this header and *pDbSize is set to the size of the -** database before the transaction began, in pages. Also, pPager->cksumInit -** is set to the value read from the journal header. SQLITE_OK is returned -** in this case. -** -** If the journal header file appears to be corrupted, SQLITE_DONE is -** returned and *pNRec and *PDbSize are undefined. If JOURNAL_HDR_SZ bytes -** cannot be read from the journal file an error code is returned. -*/ -static int readJournalHdr( - Pager *pPager, /* Pager object */ - int isHot, - i64 journalSize, /* Size of the open journal file in bytes */ - u32 *pNRec, /* OUT: Value read from the nRec field */ - u32 *pDbSize /* OUT: Value of original database size field */ -){ - int rc; /* Return code */ - unsigned char aMagic[8]; /* A buffer to hold the magic header */ - i64 iHdrOff; /* Offset of journal header being read */ - - assert( isOpen(pPager->jfd) ); /* Journal file must be open. */ - - /* Advance Pager.journalOff to the start of the next sector. If the - ** journal file is too small for there to be a header stored at this - ** point, return SQLITE_DONE. - */ - pPager->journalOff = journalHdrOffset(pPager); - if( pPager->journalOff+JOURNAL_HDR_SZ(pPager) > journalSize ){ - return SQLITE_DONE; - } - iHdrOff = pPager->journalOff; - - /* Read in the first 8 bytes of the journal header. If they do not match - ** the magic string found at the start of each journal header, return - ** SQLITE_DONE. If an IO error occurs, return an error code. Otherwise, - ** proceed. - */ - if( isHot || iHdrOff!=pPager->journalHdr ){ - rc = sqlite3OsRead(pPager->jfd, aMagic, sizeof(aMagic), iHdrOff); - if( rc ){ - return rc; - } - if( memcmp(aMagic, aJournalMagic, sizeof(aMagic))!=0 ){ - return SQLITE_DONE; - } - } - - /* Read the first three 32-bit fields of the journal header: The nRec - ** field, the checksum-initializer and the database size at the start - ** of the transaction. Return an error code if anything goes wrong. - */ - if( SQLITE_OK!=(rc = read32bits(pPager->jfd, iHdrOff+8, pNRec)) - || SQLITE_OK!=(rc = read32bits(pPager->jfd, iHdrOff+12, &pPager->cksumInit)) - || SQLITE_OK!=(rc = read32bits(pPager->jfd, iHdrOff+16, pDbSize)) - ){ - return rc; - } - - if( pPager->journalOff==0 ){ - u32 iPageSize; /* Page-size field of journal header */ - u32 iSectorSize; /* Sector-size field of journal header */ - - /* Read the page-size and sector-size journal header fields. */ - if( SQLITE_OK!=(rc = read32bits(pPager->jfd, iHdrOff+20, &iSectorSize)) - || SQLITE_OK!=(rc = read32bits(pPager->jfd, iHdrOff+24, &iPageSize)) - ){ - return rc; - } - - /* Versions of SQLite prior to 3.5.8 set the page-size field of the - ** journal header to zero. In this case, assume that the Pager.pageSize - ** variable is already set to the correct page size. - */ - if( iPageSize==0 ){ - iPageSize = pPager->pageSize; - } - - /* Check that the values read from the page-size and sector-size fields - ** are within range. To be 'in range', both values need to be a power - ** of two greater than or equal to 512 or 32, and not greater than their - ** respective compile time maximum limits. - */ - if( iPageSize<512 || iSectorSize<32 - || iPageSize>SQLITE_MAX_PAGE_SIZE || iSectorSize>MAX_SECTOR_SIZE - || ((iPageSize-1)&iPageSize)!=0 || ((iSectorSize-1)&iSectorSize)!=0 - ){ - /* If the either the page-size or sector-size in the journal-header is - ** invalid, then the process that wrote the journal-header must have - ** crashed before the header was synced. In this case stop reading - ** the journal file here. - */ - return SQLITE_DONE; - } - - /* Update the page-size to match the value read from the journal. - ** Use a testcase() macro to make sure that malloc failure within - ** PagerSetPagesize() is tested. - */ - rc = sqlite3PagerSetPagesize(pPager, &iPageSize, -1); - testcase( rc!=SQLITE_OK ); - - /* Update the assumed sector-size to match the value used by - ** the process that created this journal. If this journal was - ** created by a process other than this one, then this routine - ** is being called from within pager_playback(). The local value - ** of Pager.sectorSize is restored at the end of that routine. - */ - pPager->sectorSize = iSectorSize; - } - - pPager->journalOff += JOURNAL_HDR_SZ(pPager); - return rc; -} - - -/* -** Write the supplied master journal name into the journal file for pager -** pPager at the current location. The master journal name must be the last -** thing written to a journal file. If the pager is in full-sync mode, the -** journal file descriptor is advanced to the next sector boundary before -** anything is written. The format is: -** -** + 4 bytes: PAGER_MJ_PGNO. -** + N bytes: Master journal filename in utf-8. -** + 4 bytes: N (length of master journal name in bytes, no nul-terminator). -** + 4 bytes: Master journal name checksum. -** + 8 bytes: aJournalMagic[]. -** -** The master journal page checksum is the sum of the bytes in the master -** journal name, where each byte is interpreted as a signed 8-bit integer. -** -** If zMaster is a NULL pointer (occurs for a single database transaction), -** this call is a no-op. -*/ -static int writeMasterJournal(Pager *pPager, const char *zMaster){ - int rc; /* Return code */ - int nMaster; /* Length of string zMaster */ - i64 iHdrOff; /* Offset of header in journal file */ - i64 jrnlSize; /* Size of journal file on disk */ - u32 cksum = 0; /* Checksum of string zMaster */ - - assert( pPager->setMaster==0 ); - assert( !pagerUseWal(pPager) ); - - if( !zMaster - || pPager->journalMode==PAGER_JOURNALMODE_MEMORY - || !isOpen(pPager->jfd) - ){ - return SQLITE_OK; - } - pPager->setMaster = 1; - assert( pPager->journalHdr <= pPager->journalOff ); - - /* Calculate the length in bytes and the checksum of zMaster */ - for(nMaster=0; zMaster[nMaster]; nMaster++){ - cksum += zMaster[nMaster]; - } - - /* If in full-sync mode, advance to the next disk sector before writing - ** the master journal name. This is in case the previous page written to - ** the journal has already been synced. - */ - if( pPager->fullSync ){ - pPager->journalOff = journalHdrOffset(pPager); - } - iHdrOff = pPager->journalOff; - - /* Write the master journal data to the end of the journal file. If - ** an error occurs, return the error code to the caller. - */ - if( (0 != (rc = write32bits(pPager->jfd, iHdrOff, PAGER_MJ_PGNO(pPager)))) - || (0 != (rc = sqlite3OsWrite(pPager->jfd, zMaster, nMaster, iHdrOff+4))) - || (0 != (rc = write32bits(pPager->jfd, iHdrOff+4+nMaster, nMaster))) - || (0 != (rc = write32bits(pPager->jfd, iHdrOff+4+nMaster+4, cksum))) - || (0 != (rc = sqlite3OsWrite(pPager->jfd, aJournalMagic, 8, iHdrOff+4+nMaster+8))) - ){ - return rc; - } - pPager->journalOff += (nMaster+20); - - /* If the pager is in peristent-journal mode, then the physical - ** journal-file may extend past the end of the master-journal name - ** and 8 bytes of magic data just written to the file. This is - ** dangerous because the code to rollback a hot-journal file - ** will not be able to find the master-journal name to determine - ** whether or not the journal is hot. - ** - ** Easiest thing to do in this scenario is to truncate the journal - ** file to the required size. - */ - if( SQLITE_OK==(rc = sqlite3OsFileSize(pPager->jfd, &jrnlSize)) - && jrnlSize>pPager->journalOff - ){ - rc = sqlite3OsTruncate(pPager->jfd, pPager->journalOff); - } - return rc; -} - -/* -** Find a page in the hash table given its page number. Return -** a pointer to the page or NULL if the requested page is not -** already in memory. -*/ -static PgHdr *pager_lookup(Pager *pPager, Pgno pgno){ - PgHdr *p = 0; /* Return value */ - - /* It is not possible for a call to PcacheFetch() with createFlag==0 to - ** fail, since no attempt to allocate dynamic memory will be made. - */ - (void)sqlite3PcacheFetch(pPager->pPCache, pgno, 0, &p); - return p; -} - -/* -** Discard the entire contents of the in-memory page-cache. -*/ -static void pager_reset(Pager *pPager){ - sqlite3BackupRestart(pPager->pBackup); - sqlite3PcacheClear(pPager->pPCache); -} - -/* -** Free all structures in the Pager.aSavepoint[] array and set both -** Pager.aSavepoint and Pager.nSavepoint to zero. Close the sub-journal -** if it is open and the pager is not in exclusive mode. -*/ -static void releaseAllSavepoints(Pager *pPager){ - int ii; /* Iterator for looping through Pager.aSavepoint */ - for(ii=0; iinSavepoint; ii++){ - sqlite3BitvecDestroy(pPager->aSavepoint[ii].pInSavepoint); - } - if( !pPager->exclusiveMode || sqlite3IsMemJournal(pPager->sjfd) ){ - sqlite3OsClose(pPager->sjfd); - } - sqlite3_free(pPager->aSavepoint); - pPager->aSavepoint = 0; - pPager->nSavepoint = 0; - pPager->nSubRec = 0; -} - -/* -** Set the bit number pgno in the PagerSavepoint.pInSavepoint -** bitvecs of all open savepoints. Return SQLITE_OK if successful -** or SQLITE_NOMEM if a malloc failure occurs. -*/ -static int addToSavepointBitvecs(Pager *pPager, Pgno pgno){ - int ii; /* Loop counter */ - int rc = SQLITE_OK; /* Result code */ - - for(ii=0; iinSavepoint; ii++){ - PagerSavepoint *p = &pPager->aSavepoint[ii]; - if( pgno<=p->nOrig ){ - rc |= sqlite3BitvecSet(p->pInSavepoint, pgno); - testcase( rc==SQLITE_NOMEM ); - assert( rc==SQLITE_OK || rc==SQLITE_NOMEM ); - } - } - return rc; -} - -/* -** This function is a no-op if the pager is in exclusive mode and not -** in the ERROR state. Otherwise, it switches the pager to PAGER_OPEN -** state. -** -** If the pager is not in exclusive-access mode, the database file is -** completely unlocked. If the file is unlocked and the file-system does -** not exhibit the UNDELETABLE_WHEN_OPEN property, the journal file is -** closed (if it is open). -** -** If the pager is in ERROR state when this function is called, the -** contents of the pager cache are discarded before switching back to -** the OPEN state. Regardless of whether the pager is in exclusive-mode -** or not, any journal file left in the file-system will be treated -** as a hot-journal and rolled back the next time a read-transaction -** is opened (by this or by any other connection). -*/ -static void pager_unlock(Pager *pPager){ - - assert( pPager->eState==PAGER_READER - || pPager->eState==PAGER_OPEN - || pPager->eState==PAGER_ERROR - ); - - sqlite3BitvecDestroy(pPager->pInJournal); - pPager->pInJournal = 0; - releaseAllSavepoints(pPager); - - if( pagerUseWal(pPager) ){ - assert( !isOpen(pPager->jfd) ); - sqlite3WalEndReadTransaction(pPager->pWal); - pPager->eState = PAGER_OPEN; - }else if( !pPager->exclusiveMode ){ - int rc; /* Error code returned by pagerUnlockDb() */ - int iDc = isOpen(pPager->fd)?sqlite3OsDeviceCharacteristics(pPager->fd):0; - - /* If the operating system support deletion of open files, then - ** close the journal file when dropping the database lock. Otherwise - ** another connection with journal_mode=delete might delete the file - ** out from under us. - */ - assert( (PAGER_JOURNALMODE_MEMORY & 5)!=1 ); - assert( (PAGER_JOURNALMODE_OFF & 5)!=1 ); - assert( (PAGER_JOURNALMODE_WAL & 5)!=1 ); - assert( (PAGER_JOURNALMODE_DELETE & 5)!=1 ); - assert( (PAGER_JOURNALMODE_TRUNCATE & 5)==1 ); - assert( (PAGER_JOURNALMODE_PERSIST & 5)==1 ); - if( 0==(iDc & SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN) - || 1!=(pPager->journalMode & 5) - ){ - sqlite3OsClose(pPager->jfd); - } - - /* If the pager is in the ERROR state and the call to unlock the database - ** file fails, set the current lock to UNKNOWN_LOCK. See the comment - ** above the #define for UNKNOWN_LOCK for an explanation of why this - ** is necessary. - */ - rc = pagerUnlockDb(pPager, NO_LOCK); - if( rc!=SQLITE_OK && pPager->eState==PAGER_ERROR ){ - pPager->eLock = UNKNOWN_LOCK; - } - - /* The pager state may be changed from PAGER_ERROR to PAGER_OPEN here - ** without clearing the error code. This is intentional - the error - ** code is cleared and the cache reset in the block below. - */ - assert( pPager->errCode || pPager->eState!=PAGER_ERROR ); - pPager->changeCountDone = 0; - pPager->eState = PAGER_OPEN; - } - - /* If Pager.errCode is set, the contents of the pager cache cannot be - ** trusted. Now that there are no outstanding references to the pager, - ** it can safely move back to PAGER_OPEN state. This happens in both - ** normal and exclusive-locking mode. - */ - if( pPager->errCode ){ - assert( !MEMDB ); - pager_reset(pPager); - pPager->changeCountDone = pPager->tempFile; - pPager->eState = PAGER_OPEN; - pPager->errCode = SQLITE_OK; - if( USEFETCH(pPager) ) sqlite3OsUnfetch(pPager->fd, 0, 0); - } - - pPager->journalOff = 0; - pPager->journalHdr = 0; - pPager->setMaster = 0; -} - -/* -** This function is called whenever an IOERR or FULL error that requires -** the pager to transition into the ERROR state may ahve occurred. -** The first argument is a pointer to the pager structure, the second -** the error-code about to be returned by a pager API function. The -** value returned is a copy of the second argument to this function. -** -** If the second argument is SQLITE_FULL, SQLITE_IOERR or one of the -** IOERR sub-codes, the pager enters the ERROR state and the error code -** is stored in Pager.errCode. While the pager remains in the ERROR state, -** all major API calls on the Pager will immediately return Pager.errCode. -** -** The ERROR state indicates that the contents of the pager-cache -** cannot be trusted. This state can be cleared by completely discarding -** the contents of the pager-cache. If a transaction was active when -** the persistent error occurred, then the rollback journal may need -** to be replayed to restore the contents of the database file (as if -** it were a hot-journal). -*/ -static int pager_error(Pager *pPager, int rc){ - int rc2 = rc & 0xff; - assert( rc==SQLITE_OK || !MEMDB ); - assert( - pPager->errCode==SQLITE_FULL || - pPager->errCode==SQLITE_OK || - (pPager->errCode & 0xff)==SQLITE_IOERR - ); - if( rc2==SQLITE_FULL || rc2==SQLITE_IOERR ){ - pPager->errCode = rc; - pPager->eState = PAGER_ERROR; - } - return rc; -} - -static int pager_truncate(Pager *pPager, Pgno nPage); - -/* -** This routine ends a transaction. A transaction is usually ended by -** either a COMMIT or a ROLLBACK operation. This routine may be called -** after rollback of a hot-journal, or if an error occurs while opening -** the journal file or writing the very first journal-header of a -** database transaction. -** -** This routine is never called in PAGER_ERROR state. If it is called -** in PAGER_NONE or PAGER_SHARED state and the lock held is less -** exclusive than a RESERVED lock, it is a no-op. -** -** Otherwise, any active savepoints are released. -** -** If the journal file is open, then it is "finalized". Once a journal -** file has been finalized it is not possible to use it to roll back a -** transaction. Nor will it be considered to be a hot-journal by this -** or any other database connection. Exactly how a journal is finalized -** depends on whether or not the pager is running in exclusive mode and -** the current journal-mode (Pager.journalMode value), as follows: -** -** journalMode==MEMORY -** Journal file descriptor is simply closed. This destroys an -** in-memory journal. -** -** journalMode==TRUNCATE -** Journal file is truncated to zero bytes in size. -** -** journalMode==PERSIST -** The first 28 bytes of the journal file are zeroed. This invalidates -** the first journal header in the file, and hence the entire journal -** file. An invalid journal file cannot be rolled back. -** -** journalMode==DELETE -** The journal file is closed and deleted using sqlite3OsDelete(). -** -** If the pager is running in exclusive mode, this method of finalizing -** the journal file is never used. Instead, if the journalMode is -** DELETE and the pager is in exclusive mode, the method described under -** journalMode==PERSIST is used instead. -** -** After the journal is finalized, the pager moves to PAGER_READER state. -** If running in non-exclusive rollback mode, the lock on the file is -** downgraded to a SHARED_LOCK. -** -** SQLITE_OK is returned if no error occurs. If an error occurs during -** any of the IO operations to finalize the journal file or unlock the -** database then the IO error code is returned to the user. If the -** operation to finalize the journal file fails, then the code still -** tries to unlock the database file if not in exclusive mode. If the -** unlock operation fails as well, then the first error code related -** to the first error encountered (the journal finalization one) is -** returned. -*/ -static int pager_end_transaction(Pager *pPager, int hasMaster, int bCommit){ - int rc = SQLITE_OK; /* Error code from journal finalization operation */ - int rc2 = SQLITE_OK; /* Error code from db file unlock operation */ - - /* Do nothing if the pager does not have an open write transaction - ** or at least a RESERVED lock. This function may be called when there - ** is no write-transaction active but a RESERVED or greater lock is - ** held under two circumstances: - ** - ** 1. After a successful hot-journal rollback, it is called with - ** eState==PAGER_NONE and eLock==EXCLUSIVE_LOCK. - ** - ** 2. If a connection with locking_mode=exclusive holding an EXCLUSIVE - ** lock switches back to locking_mode=normal and then executes a - ** read-transaction, this function is called with eState==PAGER_READER - ** and eLock==EXCLUSIVE_LOCK when the read-transaction is closed. - */ - assert( assert_pager_state(pPager) ); - assert( pPager->eState!=PAGER_ERROR ); - if( pPager->eStateeLockjfd) || pPager->pInJournal==0 ); - if( isOpen(pPager->jfd) ){ - assert( !pagerUseWal(pPager) ); - - /* Finalize the journal file. */ - if( sqlite3IsMemJournal(pPager->jfd) ){ - assert( pPager->journalMode==PAGER_JOURNALMODE_MEMORY ); - sqlite3OsClose(pPager->jfd); - }else if( pPager->journalMode==PAGER_JOURNALMODE_TRUNCATE ){ - if( pPager->journalOff==0 ){ - rc = SQLITE_OK; - }else{ - rc = sqlite3OsTruncate(pPager->jfd, 0); - } - pPager->journalOff = 0; - }else if( pPager->journalMode==PAGER_JOURNALMODE_PERSIST - || (pPager->exclusiveMode && pPager->journalMode!=PAGER_JOURNALMODE_WAL) - ){ - rc = zeroJournalHdr(pPager, hasMaster); - pPager->journalOff = 0; - }else{ - /* This branch may be executed with Pager.journalMode==MEMORY if - ** a hot-journal was just rolled back. In this case the journal - ** file should be closed and deleted. If this connection writes to - ** the database file, it will do so using an in-memory journal. - */ - int bDelete = (!pPager->tempFile && sqlite3JournalExists(pPager->jfd)); - assert( pPager->journalMode==PAGER_JOURNALMODE_DELETE - || pPager->journalMode==PAGER_JOURNALMODE_MEMORY - || pPager->journalMode==PAGER_JOURNALMODE_WAL - ); - sqlite3OsClose(pPager->jfd); - if( bDelete ){ - rc = sqlite3OsDelete(pPager->pVfs, pPager->zJournal, 0); - } - } - } - -#ifdef SQLITE_CHECK_PAGES - sqlite3PcacheIterateDirty(pPager->pPCache, pager_set_pagehash); - if( pPager->dbSize==0 && sqlite3PcacheRefCount(pPager->pPCache)>0 ){ - PgHdr *p = pager_lookup(pPager, 1); - if( p ){ - p->pageHash = 0; - sqlite3PagerUnrefNotNull(p); - } - } -#endif - - sqlite3BitvecDestroy(pPager->pInJournal); - pPager->pInJournal = 0; - pPager->nRec = 0; - sqlite3PcacheCleanAll(pPager->pPCache); - sqlite3PcacheTruncate(pPager->pPCache, pPager->dbSize); - - if( pagerUseWal(pPager) ){ - /* Drop the WAL write-lock, if any. Also, if the connection was in - ** locking_mode=exclusive mode but is no longer, drop the EXCLUSIVE - ** lock held on the database file. - */ - rc2 = sqlite3WalEndWriteTransaction(pPager->pWal); - assert( rc2==SQLITE_OK ); - }else if( rc==SQLITE_OK && bCommit && pPager->dbFileSize>pPager->dbSize ){ - /* This branch is taken when committing a transaction in rollback-journal - ** mode if the database file on disk is larger than the database image. - ** At this point the journal has been finalized and the transaction - ** successfully committed, but the EXCLUSIVE lock is still held on the - ** file. So it is safe to truncate the database file to its minimum - ** required size. */ - assert( pPager->eLock==EXCLUSIVE_LOCK ); - rc = pager_truncate(pPager, pPager->dbSize); - } - - if( rc==SQLITE_OK && bCommit && isOpen(pPager->fd) ){ - rc = sqlite3OsFileControl(pPager->fd, SQLITE_FCNTL_COMMIT_PHASETWO, 0); - if( rc==SQLITE_NOTFOUND ) rc = SQLITE_OK; - } - - if( !pPager->exclusiveMode - && (!pagerUseWal(pPager) || sqlite3WalExclusiveMode(pPager->pWal, 0)) - ){ - rc2 = pagerUnlockDb(pPager, SHARED_LOCK); - pPager->changeCountDone = 0; - } - pPager->eState = PAGER_READER; - pPager->setMaster = 0; - - return (rc==SQLITE_OK?rc2:rc); -} - -/* -** Execute a rollback if a transaction is active and unlock the -** database file. -** -** If the pager has already entered the ERROR state, do not attempt -** the rollback at this time. Instead, pager_unlock() is called. The -** call to pager_unlock() will discard all in-memory pages, unlock -** the database file and move the pager back to OPEN state. If this -** means that there is a hot-journal left in the file-system, the next -** connection to obtain a shared lock on the pager (which may be this one) -** will roll it back. -** -** If the pager has not already entered the ERROR state, but an IO or -** malloc error occurs during a rollback, then this will itself cause -** the pager to enter the ERROR state. Which will be cleared by the -** call to pager_unlock(), as described above. -*/ -static void pagerUnlockAndRollback(Pager *pPager){ - if( pPager->eState!=PAGER_ERROR && pPager->eState!=PAGER_OPEN ){ - assert( assert_pager_state(pPager) ); - if( pPager->eState>=PAGER_WRITER_LOCKED ){ - sqlite3BeginBenignMalloc(); - sqlite3PagerRollback(pPager); - sqlite3EndBenignMalloc(); - }else if( !pPager->exclusiveMode ){ - assert( pPager->eState==PAGER_READER ); - pager_end_transaction(pPager, 0, 0); - } - } - pager_unlock(pPager); -} - -/* -** Parameter aData must point to a buffer of pPager->pageSize bytes -** of data. Compute and return a checksum based ont the contents of the -** page of data and the current value of pPager->cksumInit. -** -** This is not a real checksum. It is really just the sum of the -** random initial value (pPager->cksumInit) and every 200th byte -** of the page data, starting with byte offset (pPager->pageSize%200). -** Each byte is interpreted as an 8-bit unsigned integer. -** -** Changing the formula used to compute this checksum results in an -** incompatible journal file format. -** -** If journal corruption occurs due to a power failure, the most likely -** scenario is that one end or the other of the record will be changed. -** It is much less likely that the two ends of the journal record will be -** correct and the middle be corrupt. Thus, this "checksum" scheme, -** though fast and simple, catches the mostly likely kind of corruption. -*/ -static u32 pager_cksum(Pager *pPager, const u8 *aData){ - u32 cksum = pPager->cksumInit; /* Checksum value to return */ - int i = pPager->pageSize-200; /* Loop counter */ - while( i>0 ){ - cksum += aData[i]; - i -= 200; - } - return cksum; -} - -/* -** Report the current page size and number of reserved bytes back -** to the codec. -*/ -#ifdef SQLITE_HAS_CODEC -static void pagerReportSize(Pager *pPager){ - if( pPager->xCodecSizeChng ){ - pPager->xCodecSizeChng(pPager->pCodec, pPager->pageSize, - (int)pPager->nReserve); - } -} -#else -# define pagerReportSize(X) /* No-op if we do not support a codec */ -#endif - -/* -** Read a single page from either the journal file (if isMainJrnl==1) or -** from the sub-journal (if isMainJrnl==0) and playback that page. -** The page begins at offset *pOffset into the file. The *pOffset -** value is increased to the start of the next page in the journal. -** -** The main rollback journal uses checksums - the statement journal does -** not. -** -** If the page number of the page record read from the (sub-)journal file -** is greater than the current value of Pager.dbSize, then playback is -** skipped and SQLITE_OK is returned. -** -** If pDone is not NULL, then it is a record of pages that have already -** been played back. If the page at *pOffset has already been played back -** (if the corresponding pDone bit is set) then skip the playback. -** Make sure the pDone bit corresponding to the *pOffset page is set -** prior to returning. -** -** If the page record is successfully read from the (sub-)journal file -** and played back, then SQLITE_OK is returned. If an IO error occurs -** while reading the record from the (sub-)journal file or while writing -** to the database file, then the IO error code is returned. If data -** is successfully read from the (sub-)journal file but appears to be -** corrupted, SQLITE_DONE is returned. Data is considered corrupted in -** two circumstances: -** -** * If the record page-number is illegal (0 or PAGER_MJ_PGNO), or -** * If the record is being rolled back from the main journal file -** and the checksum field does not match the record content. -** -** Neither of these two scenarios are possible during a savepoint rollback. -** -** If this is a savepoint rollback, then memory may have to be dynamically -** allocated by this function. If this is the case and an allocation fails, -** SQLITE_NOMEM is returned. -*/ -static int pager_playback_one_page( - Pager *pPager, /* The pager being played back */ - i64 *pOffset, /* Offset of record to playback */ - Bitvec *pDone, /* Bitvec of pages already played back */ - int isMainJrnl, /* 1 -> main journal. 0 -> sub-journal. */ - int isSavepnt /* True for a savepoint rollback */ -){ - int rc; - PgHdr *pPg; /* An existing page in the cache */ - Pgno pgno; /* The page number of a page in journal */ - u32 cksum; /* Checksum used for sanity checking */ - char *aData; /* Temporary storage for the page */ - sqlite3_file *jfd; /* The file descriptor for the journal file */ - int isSynced; /* True if journal page is synced */ - - assert( (isMainJrnl&~1)==0 ); /* isMainJrnl is 0 or 1 */ - assert( (isSavepnt&~1)==0 ); /* isSavepnt is 0 or 1 */ - assert( isMainJrnl || pDone ); /* pDone always used on sub-journals */ - assert( isSavepnt || pDone==0 ); /* pDone never used on non-savepoint */ - - aData = pPager->pTmpSpace; - assert( aData ); /* Temp storage must have already been allocated */ - assert( pagerUseWal(pPager)==0 || (!isMainJrnl && isSavepnt) ); - - /* Either the state is greater than PAGER_WRITER_CACHEMOD (a transaction - ** or savepoint rollback done at the request of the caller) or this is - ** a hot-journal rollback. If it is a hot-journal rollback, the pager - ** is in state OPEN and holds an EXCLUSIVE lock. Hot-journal rollback - ** only reads from the main journal, not the sub-journal. - */ - assert( pPager->eState>=PAGER_WRITER_CACHEMOD - || (pPager->eState==PAGER_OPEN && pPager->eLock==EXCLUSIVE_LOCK) - ); - assert( pPager->eState>=PAGER_WRITER_CACHEMOD || isMainJrnl ); - - /* Read the page number and page data from the journal or sub-journal - ** file. Return an error code to the caller if an IO error occurs. - */ - jfd = isMainJrnl ? pPager->jfd : pPager->sjfd; - rc = read32bits(jfd, *pOffset, &pgno); - if( rc!=SQLITE_OK ) return rc; - rc = sqlite3OsRead(jfd, (u8*)aData, pPager->pageSize, (*pOffset)+4); - if( rc!=SQLITE_OK ) return rc; - *pOffset += pPager->pageSize + 4 + isMainJrnl*4; - - /* Sanity checking on the page. This is more important that I originally - ** thought. If a power failure occurs while the journal is being written, - ** it could cause invalid data to be written into the journal. We need to - ** detect this invalid data (with high probability) and ignore it. - */ - if( pgno==0 || pgno==PAGER_MJ_PGNO(pPager) ){ - assert( !isSavepnt ); - return SQLITE_DONE; - } - if( pgno>(Pgno)pPager->dbSize || sqlite3BitvecTest(pDone, pgno) ){ - return SQLITE_OK; - } - if( isMainJrnl ){ - rc = read32bits(jfd, (*pOffset)-4, &cksum); - if( rc ) return rc; - if( !isSavepnt && pager_cksum(pPager, (u8*)aData)!=cksum ){ - return SQLITE_DONE; - } - } - - /* If this page has already been played by before during the current - ** rollback, then don't bother to play it back again. - */ - if( pDone && (rc = sqlite3BitvecSet(pDone, pgno))!=SQLITE_OK ){ - return rc; - } - - /* When playing back page 1, restore the nReserve setting - */ - if( pgno==1 && pPager->nReserve!=((u8*)aData)[20] ){ - pPager->nReserve = ((u8*)aData)[20]; - pagerReportSize(pPager); - } - - /* If the pager is in CACHEMOD state, then there must be a copy of this - ** page in the pager cache. In this case just update the pager cache, - ** not the database file. The page is left marked dirty in this case. - ** - ** An exception to the above rule: If the database is in no-sync mode - ** and a page is moved during an incremental vacuum then the page may - ** not be in the pager cache. Later: if a malloc() or IO error occurs - ** during a Movepage() call, then the page may not be in the cache - ** either. So the condition described in the above paragraph is not - ** assert()able. - ** - ** If in WRITER_DBMOD, WRITER_FINISHED or OPEN state, then we update the - ** pager cache if it exists and the main file. The page is then marked - ** not dirty. Since this code is only executed in PAGER_OPEN state for - ** a hot-journal rollback, it is guaranteed that the page-cache is empty - ** if the pager is in OPEN state. - ** - ** Ticket #1171: The statement journal might contain page content that is - ** different from the page content at the start of the transaction. - ** This occurs when a page is changed prior to the start of a statement - ** then changed again within the statement. When rolling back such a - ** statement we must not write to the original database unless we know - ** for certain that original page contents are synced into the main rollback - ** journal. Otherwise, a power loss might leave modified data in the - ** database file without an entry in the rollback journal that can - ** restore the database to its original form. Two conditions must be - ** met before writing to the database files. (1) the database must be - ** locked. (2) we know that the original page content is fully synced - ** in the main journal either because the page is not in cache or else - ** the page is marked as needSync==0. - ** - ** 2008-04-14: When attempting to vacuum a corrupt database file, it - ** is possible to fail a statement on a database that does not yet exist. - ** Do not attempt to write if database file has never been opened. - */ - if( pagerUseWal(pPager) ){ - pPg = 0; - }else{ - pPg = pager_lookup(pPager, pgno); - } - assert( pPg || !MEMDB ); - assert( pPager->eState!=PAGER_OPEN || pPg==0 ); - PAGERTRACE(("PLAYBACK %d page %d hash(%08x) %s\n", - PAGERID(pPager), pgno, pager_datahash(pPager->pageSize, (u8*)aData), - (isMainJrnl?"main-journal":"sub-journal") - )); - if( isMainJrnl ){ - isSynced = pPager->noSync || (*pOffset <= pPager->journalHdr); - }else{ - isSynced = (pPg==0 || 0==(pPg->flags & PGHDR_NEED_SYNC)); - } - if( isOpen(pPager->fd) - && (pPager->eState>=PAGER_WRITER_DBMOD || pPager->eState==PAGER_OPEN) - && isSynced - ){ - i64 ofst = (pgno-1)*(i64)pPager->pageSize; - testcase( !isSavepnt && pPg!=0 && (pPg->flags&PGHDR_NEED_SYNC)!=0 ); - assert( !pagerUseWal(pPager) ); - rc = sqlite3OsWrite(pPager->fd, (u8 *)aData, pPager->pageSize, ofst); - if( pgno>pPager->dbFileSize ){ - pPager->dbFileSize = pgno; - } - if( pPager->pBackup ){ - CODEC1(pPager, aData, pgno, 3, rc=SQLITE_NOMEM); - sqlite3BackupUpdate(pPager->pBackup, pgno, (u8*)aData); - CODEC2(pPager, aData, pgno, 7, rc=SQLITE_NOMEM, aData); - } - }else if( !isMainJrnl && pPg==0 ){ - /* If this is a rollback of a savepoint and data was not written to - ** the database and the page is not in-memory, there is a potential - ** problem. When the page is next fetched by the b-tree layer, it - ** will be read from the database file, which may or may not be - ** current. - ** - ** There are a couple of different ways this can happen. All are quite - ** obscure. When running in synchronous mode, this can only happen - ** if the page is on the free-list at the start of the transaction, then - ** populated, then moved using sqlite3PagerMovepage(). - ** - ** The solution is to add an in-memory page to the cache containing - ** the data just read from the sub-journal. Mark the page as dirty - ** and if the pager requires a journal-sync, then mark the page as - ** requiring a journal-sync before it is written. - */ - assert( isSavepnt ); - assert( (pPager->doNotSpill & SPILLFLAG_ROLLBACK)==0 ); - pPager->doNotSpill |= SPILLFLAG_ROLLBACK; - rc = sqlite3PagerAcquire(pPager, pgno, &pPg, 1); - assert( (pPager->doNotSpill & SPILLFLAG_ROLLBACK)!=0 ); - pPager->doNotSpill &= ~SPILLFLAG_ROLLBACK; - if( rc!=SQLITE_OK ) return rc; - pPg->flags &= ~PGHDR_NEED_READ; - sqlite3PcacheMakeDirty(pPg); - } - if( pPg ){ - /* No page should ever be explicitly rolled back that is in use, except - ** for page 1 which is held in use in order to keep the lock on the - ** database active. However such a page may be rolled back as a result - ** of an internal error resulting in an automatic call to - ** sqlite3PagerRollback(). - */ - void *pData; - pData = pPg->pData; - memcpy(pData, (u8*)aData, pPager->pageSize); - pPager->xReiniter(pPg); - if( isMainJrnl && (!isSavepnt || *pOffset<=pPager->journalHdr) ){ - /* If the contents of this page were just restored from the main - ** journal file, then its content must be as they were when the - ** transaction was first opened. In this case we can mark the page - ** as clean, since there will be no need to write it out to the - ** database. - ** - ** There is one exception to this rule. If the page is being rolled - ** back as part of a savepoint (or statement) rollback from an - ** unsynced portion of the main journal file, then it is not safe - ** to mark the page as clean. This is because marking the page as - ** clean will clear the PGHDR_NEED_SYNC flag. Since the page is - ** already in the journal file (recorded in Pager.pInJournal) and - ** the PGHDR_NEED_SYNC flag is cleared, if the page is written to - ** again within this transaction, it will be marked as dirty but - ** the PGHDR_NEED_SYNC flag will not be set. It could then potentially - ** be written out into the database file before its journal file - ** segment is synced. If a crash occurs during or following this, - ** database corruption may ensue. - */ - assert( !pagerUseWal(pPager) ); - sqlite3PcacheMakeClean(pPg); - } - pager_set_pagehash(pPg); - - /* If this was page 1, then restore the value of Pager.dbFileVers. - ** Do this before any decoding. */ - if( pgno==1 ){ - memcpy(&pPager->dbFileVers, &((u8*)pData)[24],sizeof(pPager->dbFileVers)); - } - - /* Decode the page just read from disk */ - CODEC1(pPager, pData, pPg->pgno, 3, rc=SQLITE_NOMEM); - sqlite3PcacheRelease(pPg); - } - return rc; -} - -/* -** Parameter zMaster is the name of a master journal file. A single journal -** file that referred to the master journal file has just been rolled back. -** This routine checks if it is possible to delete the master journal file, -** and does so if it is. -** -** Argument zMaster may point to Pager.pTmpSpace. So that buffer is not -** available for use within this function. -** -** When a master journal file is created, it is populated with the names -** of all of its child journals, one after another, formatted as utf-8 -** encoded text. The end of each child journal file is marked with a -** nul-terminator byte (0x00). i.e. the entire contents of a master journal -** file for a transaction involving two databases might be: -** -** "/home/bill/a.db-journal\x00/home/bill/b.db-journal\x00" -** -** A master journal file may only be deleted once all of its child -** journals have been rolled back. -** -** This function reads the contents of the master-journal file into -** memory and loops through each of the child journal names. For -** each child journal, it checks if: -** -** * if the child journal exists, and if so -** * if the child journal contains a reference to master journal -** file zMaster -** -** If a child journal can be found that matches both of the criteria -** above, this function returns without doing anything. Otherwise, if -** no such child journal can be found, file zMaster is deleted from -** the file-system using sqlite3OsDelete(). -** -** If an IO error within this function, an error code is returned. This -** function allocates memory by calling sqlite3Malloc(). If an allocation -** fails, SQLITE_NOMEM is returned. Otherwise, if no IO or malloc errors -** occur, SQLITE_OK is returned. -** -** TODO: This function allocates a single block of memory to load -** the entire contents of the master journal file. This could be -** a couple of kilobytes or so - potentially larger than the page -** size. -*/ -static int pager_delmaster(Pager *pPager, const char *zMaster){ - sqlite3_vfs *pVfs = pPager->pVfs; - int rc; /* Return code */ - sqlite3_file *pMaster; /* Malloc'd master-journal file descriptor */ - sqlite3_file *pJournal; /* Malloc'd child-journal file descriptor */ - char *zMasterJournal = 0; /* Contents of master journal file */ - i64 nMasterJournal; /* Size of master journal file */ - char *zJournal; /* Pointer to one journal within MJ file */ - char *zMasterPtr; /* Space to hold MJ filename from a journal file */ - int nMasterPtr; /* Amount of space allocated to zMasterPtr[] */ - - /* Allocate space for both the pJournal and pMaster file descriptors. - ** If successful, open the master journal file for reading. - */ - pMaster = (sqlite3_file *)sqlite3MallocZero(pVfs->szOsFile * 2); - pJournal = (sqlite3_file *)(((u8 *)pMaster) + pVfs->szOsFile); - if( !pMaster ){ - rc = SQLITE_NOMEM; - }else{ - const int flags = (SQLITE_OPEN_READONLY|SQLITE_OPEN_MASTER_JOURNAL); - rc = sqlite3OsOpen(pVfs, zMaster, pMaster, flags, 0); - } - if( rc!=SQLITE_OK ) goto delmaster_out; - - /* Load the entire master journal file into space obtained from - ** sqlite3_malloc() and pointed to by zMasterJournal. Also obtain - ** sufficient space (in zMasterPtr) to hold the names of master - ** journal files extracted from regular rollback-journals. - */ - rc = sqlite3OsFileSize(pMaster, &nMasterJournal); - if( rc!=SQLITE_OK ) goto delmaster_out; - nMasterPtr = pVfs->mxPathname+1; - zMasterJournal = sqlite3Malloc((int)nMasterJournal + nMasterPtr + 1); - if( !zMasterJournal ){ - rc = SQLITE_NOMEM; - goto delmaster_out; - } - zMasterPtr = &zMasterJournal[nMasterJournal+1]; - rc = sqlite3OsRead(pMaster, zMasterJournal, (int)nMasterJournal, 0); - if( rc!=SQLITE_OK ) goto delmaster_out; - zMasterJournal[nMasterJournal] = 0; - - zJournal = zMasterJournal; - while( (zJournal-zMasterJournal)pageSize bytes). -** If the file on disk is currently larger than nPage pages, then use the VFS -** xTruncate() method to truncate it. -** -** Or, it might might be the case that the file on disk is smaller than -** nPage pages. Some operating system implementations can get confused if -** you try to truncate a file to some size that is larger than it -** currently is, so detect this case and write a single zero byte to -** the end of the new file instead. -** -** If successful, return SQLITE_OK. If an IO error occurs while modifying -** the database file, return the error code to the caller. -*/ -static int pager_truncate(Pager *pPager, Pgno nPage){ - int rc = SQLITE_OK; - assert( pPager->eState!=PAGER_ERROR ); - assert( pPager->eState!=PAGER_READER ); - - if( isOpen(pPager->fd) - && (pPager->eState>=PAGER_WRITER_DBMOD || pPager->eState==PAGER_OPEN) - ){ - i64 currentSize, newSize; - int szPage = pPager->pageSize; - assert( pPager->eLock==EXCLUSIVE_LOCK ); - /* TODO: Is it safe to use Pager.dbFileSize here? */ - rc = sqlite3OsFileSize(pPager->fd, ¤tSize); - newSize = szPage*(i64)nPage; - if( rc==SQLITE_OK && currentSize!=newSize ){ - if( currentSize>newSize ){ - rc = sqlite3OsTruncate(pPager->fd, newSize); - }else if( (currentSize+szPage)<=newSize ){ - char *pTmp = pPager->pTmpSpace; - memset(pTmp, 0, szPage); - testcase( (newSize-szPage) == currentSize ); - testcase( (newSize-szPage) > currentSize ); - rc = sqlite3OsWrite(pPager->fd, pTmp, szPage, newSize-szPage); - } - if( rc==SQLITE_OK ){ - pPager->dbFileSize = nPage; - } - } - } - return rc; -} - -/* -** Return a sanitized version of the sector-size of OS file pFile. The -** return value is guaranteed to lie between 32 and MAX_SECTOR_SIZE. -*/ -SQLITE_PRIVATE int sqlite3SectorSize(sqlite3_file *pFile){ - int iRet = sqlite3OsSectorSize(pFile); - if( iRet<32 ){ - iRet = 512; - }else if( iRet>MAX_SECTOR_SIZE ){ - assert( MAX_SECTOR_SIZE>=512 ); - iRet = MAX_SECTOR_SIZE; - } - return iRet; -} - -/* -** Set the value of the Pager.sectorSize variable for the given -** pager based on the value returned by the xSectorSize method -** of the open database file. The sector size will be used used -** to determine the size and alignment of journal header and -** master journal pointers within created journal files. -** -** For temporary files the effective sector size is always 512 bytes. -** -** Otherwise, for non-temporary files, the effective sector size is -** the value returned by the xSectorSize() method rounded up to 32 if -** it is less than 32, or rounded down to MAX_SECTOR_SIZE if it -** is greater than MAX_SECTOR_SIZE. -** -** If the file has the SQLITE_IOCAP_POWERSAFE_OVERWRITE property, then set -** the effective sector size to its minimum value (512). The purpose of -** pPager->sectorSize is to define the "blast radius" of bytes that -** might change if a crash occurs while writing to a single byte in -** that range. But with POWERSAFE_OVERWRITE, the blast radius is zero -** (that is what POWERSAFE_OVERWRITE means), so we minimize the sector -** size. For backwards compatibility of the rollback journal file format, -** we cannot reduce the effective sector size below 512. -*/ -static void setSectorSize(Pager *pPager){ - assert( isOpen(pPager->fd) || pPager->tempFile ); - - if( pPager->tempFile - || (sqlite3OsDeviceCharacteristics(pPager->fd) & - SQLITE_IOCAP_POWERSAFE_OVERWRITE)!=0 - ){ - /* Sector size doesn't matter for temporary files. Also, the file - ** may not have been opened yet, in which case the OsSectorSize() - ** call will segfault. */ - pPager->sectorSize = 512; - }else{ - pPager->sectorSize = sqlite3SectorSize(pPager->fd); - } -} - -/* -** Playback the journal and thus restore the database file to -** the state it was in before we started making changes. -** -** The journal file format is as follows: -** -** (1) 8 byte prefix. A copy of aJournalMagic[]. -** (2) 4 byte big-endian integer which is the number of valid page records -** in the journal. If this value is 0xffffffff, then compute the -** number of page records from the journal size. -** (3) 4 byte big-endian integer which is the initial value for the -** sanity checksum. -** (4) 4 byte integer which is the number of pages to truncate the -** database to during a rollback. -** (5) 4 byte big-endian integer which is the sector size. The header -** is this many bytes in size. -** (6) 4 byte big-endian integer which is the page size. -** (7) zero padding out to the next sector size. -** (8) Zero or more pages instances, each as follows: -** + 4 byte page number. -** + pPager->pageSize bytes of data. -** + 4 byte checksum -** -** When we speak of the journal header, we mean the first 7 items above. -** Each entry in the journal is an instance of the 8th item. -** -** Call the value from the second bullet "nRec". nRec is the number of -** valid page entries in the journal. In most cases, you can compute the -** value of nRec from the size of the journal file. But if a power -** failure occurred while the journal was being written, it could be the -** case that the size of the journal file had already been increased but -** the extra entries had not yet made it safely to disk. In such a case, -** the value of nRec computed from the file size would be too large. For -** that reason, we always use the nRec value in the header. -** -** If the nRec value is 0xffffffff it means that nRec should be computed -** from the file size. This value is used when the user selects the -** no-sync option for the journal. A power failure could lead to corruption -** in this case. But for things like temporary table (which will be -** deleted when the power is restored) we don't care. -** -** If the file opened as the journal file is not a well-formed -** journal file then all pages up to the first corrupted page are rolled -** back (or no pages if the journal header is corrupted). The journal file -** is then deleted and SQLITE_OK returned, just as if no corruption had -** been encountered. -** -** If an I/O or malloc() error occurs, the journal-file is not deleted -** and an error code is returned. -** -** The isHot parameter indicates that we are trying to rollback a journal -** that might be a hot journal. Or, it could be that the journal is -** preserved because of JOURNALMODE_PERSIST or JOURNALMODE_TRUNCATE. -** If the journal really is hot, reset the pager cache prior rolling -** back any content. If the journal is merely persistent, no reset is -** needed. -*/ -static int pager_playback(Pager *pPager, int isHot){ - sqlite3_vfs *pVfs = pPager->pVfs; - i64 szJ; /* Size of the journal file in bytes */ - u32 nRec; /* Number of Records in the journal */ - u32 u; /* Unsigned loop counter */ - Pgno mxPg = 0; /* Size of the original file in pages */ - int rc; /* Result code of a subroutine */ - int res = 1; /* Value returned by sqlite3OsAccess() */ - char *zMaster = 0; /* Name of master journal file if any */ - int needPagerReset; /* True to reset page prior to first page rollback */ - int nPlayback = 0; /* Total number of pages restored from journal */ - - /* Figure out how many records are in the journal. Abort early if - ** the journal is empty. - */ - assert( isOpen(pPager->jfd) ); - rc = sqlite3OsFileSize(pPager->jfd, &szJ); - if( rc!=SQLITE_OK ){ - goto end_playback; - } - - /* Read the master journal name from the journal, if it is present. - ** If a master journal file name is specified, but the file is not - ** present on disk, then the journal is not hot and does not need to be - ** played back. - ** - ** TODO: Technically the following is an error because it assumes that - ** buffer Pager.pTmpSpace is (mxPathname+1) bytes or larger. i.e. that - ** (pPager->pageSize >= pPager->pVfs->mxPathname+1). Using os_unix.c, - ** mxPathname is 512, which is the same as the minimum allowable value - ** for pageSize. - */ - zMaster = pPager->pTmpSpace; - rc = readMasterJournal(pPager->jfd, zMaster, pPager->pVfs->mxPathname+1); - if( rc==SQLITE_OK && zMaster[0] ){ - rc = sqlite3OsAccess(pVfs, zMaster, SQLITE_ACCESS_EXISTS, &res); - } - zMaster = 0; - if( rc!=SQLITE_OK || !res ){ - goto end_playback; - } - pPager->journalOff = 0; - needPagerReset = isHot; - - /* This loop terminates either when a readJournalHdr() or - ** pager_playback_one_page() call returns SQLITE_DONE or an IO error - ** occurs. - */ - while( 1 ){ - /* Read the next journal header from the journal file. If there are - ** not enough bytes left in the journal file for a complete header, or - ** it is corrupted, then a process must have failed while writing it. - ** This indicates nothing more needs to be rolled back. - */ - rc = readJournalHdr(pPager, isHot, szJ, &nRec, &mxPg); - if( rc!=SQLITE_OK ){ - if( rc==SQLITE_DONE ){ - rc = SQLITE_OK; - } - goto end_playback; - } - - /* If nRec is 0xffffffff, then this journal was created by a process - ** working in no-sync mode. This means that the rest of the journal - ** file consists of pages, there are no more journal headers. Compute - ** the value of nRec based on this assumption. - */ - if( nRec==0xffffffff ){ - assert( pPager->journalOff==JOURNAL_HDR_SZ(pPager) ); - nRec = (int)((szJ - JOURNAL_HDR_SZ(pPager))/JOURNAL_PG_SZ(pPager)); - } - - /* If nRec is 0 and this rollback is of a transaction created by this - ** process and if this is the final header in the journal, then it means - ** that this part of the journal was being filled but has not yet been - ** synced to disk. Compute the number of pages based on the remaining - ** size of the file. - ** - ** The third term of the test was added to fix ticket #2565. - ** When rolling back a hot journal, nRec==0 always means that the next - ** chunk of the journal contains zero pages to be rolled back. But - ** when doing a ROLLBACK and the nRec==0 chunk is the last chunk in - ** the journal, it means that the journal might contain additional - ** pages that need to be rolled back and that the number of pages - ** should be computed based on the journal file size. - */ - if( nRec==0 && !isHot && - pPager->journalHdr+JOURNAL_HDR_SZ(pPager)==pPager->journalOff ){ - nRec = (int)((szJ - pPager->journalOff) / JOURNAL_PG_SZ(pPager)); - } - - /* If this is the first header read from the journal, truncate the - ** database file back to its original size. - */ - if( pPager->journalOff==JOURNAL_HDR_SZ(pPager) ){ - rc = pager_truncate(pPager, mxPg); - if( rc!=SQLITE_OK ){ - goto end_playback; - } - pPager->dbSize = mxPg; - } - - /* Copy original pages out of the journal and back into the - ** database file and/or page cache. - */ - for(u=0; ujournalOff,0,1,0); - if( rc==SQLITE_OK ){ - nPlayback++; - }else{ - if( rc==SQLITE_DONE ){ - pPager->journalOff = szJ; - break; - }else if( rc==SQLITE_IOERR_SHORT_READ ){ - /* If the journal has been truncated, simply stop reading and - ** processing the journal. This might happen if the journal was - ** not completely written and synced prior to a crash. In that - ** case, the database should have never been written in the - ** first place so it is OK to simply abandon the rollback. */ - rc = SQLITE_OK; - goto end_playback; - }else{ - /* If we are unable to rollback, quit and return the error - ** code. This will cause the pager to enter the error state - ** so that no further harm will be done. Perhaps the next - ** process to come along will be able to rollback the database. - */ - goto end_playback; - } - } - } - } - /*NOTREACHED*/ - assert( 0 ); - -end_playback: - /* Following a rollback, the database file should be back in its original - ** state prior to the start of the transaction, so invoke the - ** SQLITE_FCNTL_DB_UNCHANGED file-control method to disable the - ** assertion that the transaction counter was modified. - */ -#ifdef SQLITE_DEBUG - if( pPager->fd->pMethods ){ - sqlite3OsFileControlHint(pPager->fd,SQLITE_FCNTL_DB_UNCHANGED,0); - } -#endif - - /* If this playback is happening automatically as a result of an IO or - ** malloc error that occurred after the change-counter was updated but - ** before the transaction was committed, then the change-counter - ** modification may just have been reverted. If this happens in exclusive - ** mode, then subsequent transactions performed by the connection will not - ** update the change-counter at all. This may lead to cache inconsistency - ** problems for other processes at some point in the future. So, just - ** in case this has happened, clear the changeCountDone flag now. - */ - pPager->changeCountDone = pPager->tempFile; - - if( rc==SQLITE_OK ){ - zMaster = pPager->pTmpSpace; - rc = readMasterJournal(pPager->jfd, zMaster, pPager->pVfs->mxPathname+1); - testcase( rc!=SQLITE_OK ); - } - if( rc==SQLITE_OK - && (pPager->eState>=PAGER_WRITER_DBMOD || pPager->eState==PAGER_OPEN) - ){ - rc = sqlite3PagerSync(pPager, 0); - } - if( rc==SQLITE_OK ){ - rc = pager_end_transaction(pPager, zMaster[0]!='\0', 0); - testcase( rc!=SQLITE_OK ); - } - if( rc==SQLITE_OK && zMaster[0] && res ){ - /* If there was a master journal and this routine will return success, - ** see if it is possible to delete the master journal. - */ - rc = pager_delmaster(pPager, zMaster); - testcase( rc!=SQLITE_OK ); - } - if( isHot && nPlayback ){ - sqlite3_log(SQLITE_NOTICE_RECOVER_ROLLBACK, "recovered %d pages from %s", - nPlayback, pPager->zJournal); - } - - /* The Pager.sectorSize variable may have been updated while rolling - ** back a journal created by a process with a different sector size - ** value. Reset it to the correct value for this process. - */ - setSectorSize(pPager); - return rc; -} - - -/* -** Read the content for page pPg out of the database file and into -** pPg->pData. A shared lock or greater must be held on the database -** file before this function is called. -** -** If page 1 is read, then the value of Pager.dbFileVers[] is set to -** the value read from the database file. -** -** If an IO error occurs, then the IO error is returned to the caller. -** Otherwise, SQLITE_OK is returned. -*/ -static int readDbPage(PgHdr *pPg, u32 iFrame){ - Pager *pPager = pPg->pPager; /* Pager object associated with page pPg */ - Pgno pgno = pPg->pgno; /* Page number to read */ - int rc = SQLITE_OK; /* Return code */ - int pgsz = pPager->pageSize; /* Number of bytes to read */ - - assert( pPager->eState>=PAGER_READER && !MEMDB ); - assert( isOpen(pPager->fd) ); - -#ifndef SQLITE_OMIT_WAL - if( iFrame ){ - /* Try to pull the page from the write-ahead log. */ - rc = sqlite3WalReadFrame(pPager->pWal, iFrame, pgsz, pPg->pData); - }else -#endif - { - i64 iOffset = (pgno-1)*(i64)pPager->pageSize; - rc = sqlite3OsRead(pPager->fd, pPg->pData, pgsz, iOffset); - if( rc==SQLITE_IOERR_SHORT_READ ){ - rc = SQLITE_OK; - } - } - - if( pgno==1 ){ - if( rc ){ - /* If the read is unsuccessful, set the dbFileVers[] to something - ** that will never be a valid file version. dbFileVers[] is a copy - ** of bytes 24..39 of the database. Bytes 28..31 should always be - ** zero or the size of the database in page. Bytes 32..35 and 35..39 - ** should be page numbers which are never 0xffffffff. So filling - ** pPager->dbFileVers[] with all 0xff bytes should suffice. - ** - ** For an encrypted database, the situation is more complex: bytes - ** 24..39 of the database are white noise. But the probability of - ** white noising equaling 16 bytes of 0xff is vanishingly small so - ** we should still be ok. - */ - memset(pPager->dbFileVers, 0xff, sizeof(pPager->dbFileVers)); - }else{ - u8 *dbFileVers = &((u8*)pPg->pData)[24]; - memcpy(&pPager->dbFileVers, dbFileVers, sizeof(pPager->dbFileVers)); - } - } - CODEC1(pPager, pPg->pData, pgno, 3, rc = SQLITE_NOMEM); - - PAGER_INCR(sqlite3_pager_readdb_count); - PAGER_INCR(pPager->nRead); - IOTRACE(("PGIN %p %d\n", pPager, pgno)); - PAGERTRACE(("FETCH %d page %d hash(%08x)\n", - PAGERID(pPager), pgno, pager_pagehash(pPg))); - - return rc; -} - -/* -** Update the value of the change-counter at offsets 24 and 92 in -** the header and the sqlite version number at offset 96. -** -** This is an unconditional update. See also the pager_incr_changecounter() -** routine which only updates the change-counter if the update is actually -** needed, as determined by the pPager->changeCountDone state variable. -*/ -static void pager_write_changecounter(PgHdr *pPg){ - u32 change_counter; - - /* Increment the value just read and write it back to byte 24. */ - change_counter = sqlite3Get4byte((u8*)pPg->pPager->dbFileVers)+1; - put32bits(((char*)pPg->pData)+24, change_counter); - - /* Also store the SQLite version number in bytes 96..99 and in - ** bytes 92..95 store the change counter for which the version number - ** is valid. */ - put32bits(((char*)pPg->pData)+92, change_counter); - put32bits(((char*)pPg->pData)+96, SQLITE_VERSION_NUMBER); -} - -#ifndef SQLITE_OMIT_WAL -/* -** This function is invoked once for each page that has already been -** written into the log file when a WAL transaction is rolled back. -** Parameter iPg is the page number of said page. The pCtx argument -** is actually a pointer to the Pager structure. -** -** If page iPg is present in the cache, and has no outstanding references, -** it is discarded. Otherwise, if there are one or more outstanding -** references, the page content is reloaded from the database. If the -** attempt to reload content from the database is required and fails, -** return an SQLite error code. Otherwise, SQLITE_OK. -*/ -static int pagerUndoCallback(void *pCtx, Pgno iPg){ - int rc = SQLITE_OK; - Pager *pPager = (Pager *)pCtx; - PgHdr *pPg; - - assert( pagerUseWal(pPager) ); - pPg = sqlite3PagerLookup(pPager, iPg); - if( pPg ){ - if( sqlite3PcachePageRefcount(pPg)==1 ){ - sqlite3PcacheDrop(pPg); - }else{ - u32 iFrame = 0; - rc = sqlite3WalFindFrame(pPager->pWal, pPg->pgno, &iFrame); - if( rc==SQLITE_OK ){ - rc = readDbPage(pPg, iFrame); - } - if( rc==SQLITE_OK ){ - pPager->xReiniter(pPg); - } - sqlite3PagerUnrefNotNull(pPg); - } - } - - /* Normally, if a transaction is rolled back, any backup processes are - ** updated as data is copied out of the rollback journal and into the - ** database. This is not generally possible with a WAL database, as - ** rollback involves simply truncating the log file. Therefore, if one - ** or more frames have already been written to the log (and therefore - ** also copied into the backup databases) as part of this transaction, - ** the backups must be restarted. - */ - sqlite3BackupRestart(pPager->pBackup); - - return rc; -} - -/* -** This function is called to rollback a transaction on a WAL database. -*/ -static int pagerRollbackWal(Pager *pPager){ - int rc; /* Return Code */ - PgHdr *pList; /* List of dirty pages to revert */ - - /* For all pages in the cache that are currently dirty or have already - ** been written (but not committed) to the log file, do one of the - ** following: - ** - ** + Discard the cached page (if refcount==0), or - ** + Reload page content from the database (if refcount>0). - */ - pPager->dbSize = pPager->dbOrigSize; - rc = sqlite3WalUndo(pPager->pWal, pagerUndoCallback, (void *)pPager); - pList = sqlite3PcacheDirtyList(pPager->pPCache); - while( pList && rc==SQLITE_OK ){ - PgHdr *pNext = pList->pDirty; - rc = pagerUndoCallback((void *)pPager, pList->pgno); - pList = pNext; - } - - return rc; -} - -/* -** This function is a wrapper around sqlite3WalFrames(). As well as logging -** the contents of the list of pages headed by pList (connected by pDirty), -** this function notifies any active backup processes that the pages have -** changed. -** -** The list of pages passed into this routine is always sorted by page number. -** Hence, if page 1 appears anywhere on the list, it will be the first page. -*/ -static int pagerWalFrames( - Pager *pPager, /* Pager object */ - PgHdr *pList, /* List of frames to log */ - Pgno nTruncate, /* Database size after this commit */ - int isCommit /* True if this is a commit */ -){ - int rc; /* Return code */ - int nList; /* Number of pages in pList */ -#if defined(SQLITE_DEBUG) || defined(SQLITE_CHECK_PAGES) - PgHdr *p; /* For looping over pages */ -#endif - - assert( pPager->pWal ); - assert( pList ); -#ifdef SQLITE_DEBUG - /* Verify that the page list is in accending order */ - for(p=pList; p && p->pDirty; p=p->pDirty){ - assert( p->pgno < p->pDirty->pgno ); - } -#endif - - assert( pList->pDirty==0 || isCommit ); - if( isCommit ){ - /* If a WAL transaction is being committed, there is no point in writing - ** any pages with page numbers greater than nTruncate into the WAL file. - ** They will never be read by any client. So remove them from the pDirty - ** list here. */ - PgHdr *p; - PgHdr **ppNext = &pList; - nList = 0; - for(p=pList; (*ppNext = p)!=0; p=p->pDirty){ - if( p->pgno<=nTruncate ){ - ppNext = &p->pDirty; - nList++; - } - } - assert( pList ); - }else{ - nList = 1; - } - pPager->aStat[PAGER_STAT_WRITE] += nList; - - if( pList->pgno==1 ) pager_write_changecounter(pList); - rc = sqlite3WalFrames(pPager->pWal, - pPager->pageSize, pList, nTruncate, isCommit, pPager->walSyncFlags - ); - if( rc==SQLITE_OK && pPager->pBackup ){ - PgHdr *p; - for(p=pList; p; p=p->pDirty){ - sqlite3BackupUpdate(pPager->pBackup, p->pgno, (u8 *)p->pData); - } - } - -#ifdef SQLITE_CHECK_PAGES - pList = sqlite3PcacheDirtyList(pPager->pPCache); - for(p=pList; p; p=p->pDirty){ - pager_set_pagehash(p); - } -#endif - - return rc; -} - -/* -** Begin a read transaction on the WAL. -** -** This routine used to be called "pagerOpenSnapshot()" because it essentially -** makes a snapshot of the database at the current point in time and preserves -** that snapshot for use by the reader in spite of concurrently changes by -** other writers or checkpointers. -*/ -static int pagerBeginReadTransaction(Pager *pPager){ - int rc; /* Return code */ - int changed = 0; /* True if cache must be reset */ - - assert( pagerUseWal(pPager) ); - assert( pPager->eState==PAGER_OPEN || pPager->eState==PAGER_READER ); - - /* sqlite3WalEndReadTransaction() was not called for the previous - ** transaction in locking_mode=EXCLUSIVE. So call it now. If we - ** are in locking_mode=NORMAL and EndRead() was previously called, - ** the duplicate call is harmless. - */ - sqlite3WalEndReadTransaction(pPager->pWal); - - rc = sqlite3WalBeginReadTransaction(pPager->pWal, &changed); - if( rc!=SQLITE_OK || changed ){ - pager_reset(pPager); - if( USEFETCH(pPager) ) sqlite3OsUnfetch(pPager->fd, 0, 0); - } - - return rc; -} -#endif - -/* -** This function is called as part of the transition from PAGER_OPEN -** to PAGER_READER state to determine the size of the database file -** in pages (assuming the page size currently stored in Pager.pageSize). -** -** If no error occurs, SQLITE_OK is returned and the size of the database -** in pages is stored in *pnPage. Otherwise, an error code (perhaps -** SQLITE_IOERR_FSTAT) is returned and *pnPage is left unmodified. -*/ -static int pagerPagecount(Pager *pPager, Pgno *pnPage){ - Pgno nPage; /* Value to return via *pnPage */ - - /* Query the WAL sub-system for the database size. The WalDbsize() - ** function returns zero if the WAL is not open (i.e. Pager.pWal==0), or - ** if the database size is not available. The database size is not - ** available from the WAL sub-system if the log file is empty or - ** contains no valid committed transactions. - */ - assert( pPager->eState==PAGER_OPEN ); - assert( pPager->eLock>=SHARED_LOCK ); - nPage = sqlite3WalDbsize(pPager->pWal); - - /* If the database size was not available from the WAL sub-system, - ** determine it based on the size of the database file. If the size - ** of the database file is not an integer multiple of the page-size, - ** round down to the nearest page. Except, any file larger than 0 - ** bytes in size is considered to contain at least one page. - */ - if( nPage==0 ){ - i64 n = 0; /* Size of db file in bytes */ - assert( isOpen(pPager->fd) || pPager->tempFile ); - if( isOpen(pPager->fd) ){ - int rc = sqlite3OsFileSize(pPager->fd, &n); - if( rc!=SQLITE_OK ){ - return rc; - } - } - nPage = (Pgno)((n+pPager->pageSize-1) / pPager->pageSize); - } - - /* If the current number of pages in the file is greater than the - ** configured maximum pager number, increase the allowed limit so - ** that the file can be read. - */ - if( nPage>pPager->mxPgno ){ - pPager->mxPgno = (Pgno)nPage; - } - - *pnPage = nPage; - return SQLITE_OK; -} - -#ifndef SQLITE_OMIT_WAL -/* -** Check if the *-wal file that corresponds to the database opened by pPager -** exists if the database is not empy, or verify that the *-wal file does -** not exist (by deleting it) if the database file is empty. -** -** If the database is not empty and the *-wal file exists, open the pager -** in WAL mode. If the database is empty or if no *-wal file exists and -** if no error occurs, make sure Pager.journalMode is not set to -** PAGER_JOURNALMODE_WAL. -** -** Return SQLITE_OK or an error code. -** -** The caller must hold a SHARED lock on the database file to call this -** function. Because an EXCLUSIVE lock on the db file is required to delete -** a WAL on a none-empty database, this ensures there is no race condition -** between the xAccess() below and an xDelete() being executed by some -** other connection. -*/ -static int pagerOpenWalIfPresent(Pager *pPager){ - int rc = SQLITE_OK; - assert( pPager->eState==PAGER_OPEN ); - assert( pPager->eLock>=SHARED_LOCK ); - - if( !pPager->tempFile ){ - int isWal; /* True if WAL file exists */ - Pgno nPage; /* Size of the database file */ - - rc = pagerPagecount(pPager, &nPage); - if( rc ) return rc; - if( nPage==0 ){ - rc = sqlite3OsDelete(pPager->pVfs, pPager->zWal, 0); - if( rc==SQLITE_IOERR_DELETE_NOENT ) rc = SQLITE_OK; - isWal = 0; - }else{ - rc = sqlite3OsAccess( - pPager->pVfs, pPager->zWal, SQLITE_ACCESS_EXISTS, &isWal - ); - } - if( rc==SQLITE_OK ){ - if( isWal ){ - testcase( sqlite3PcachePagecount(pPager->pPCache)==0 ); - rc = sqlite3PagerOpenWal(pPager, 0); - }else if( pPager->journalMode==PAGER_JOURNALMODE_WAL ){ - pPager->journalMode = PAGER_JOURNALMODE_DELETE; - } - } - } - return rc; -} -#endif - -/* -** Playback savepoint pSavepoint. Or, if pSavepoint==NULL, then playback -** the entire master journal file. The case pSavepoint==NULL occurs when -** a ROLLBACK TO command is invoked on a SAVEPOINT that is a transaction -** savepoint. -** -** When pSavepoint is not NULL (meaning a non-transaction savepoint is -** being rolled back), then the rollback consists of up to three stages, -** performed in the order specified: -** -** * Pages are played back from the main journal starting at byte -** offset PagerSavepoint.iOffset and continuing to -** PagerSavepoint.iHdrOffset, or to the end of the main journal -** file if PagerSavepoint.iHdrOffset is zero. -** -** * If PagerSavepoint.iHdrOffset is not zero, then pages are played -** back starting from the journal header immediately following -** PagerSavepoint.iHdrOffset to the end of the main journal file. -** -** * Pages are then played back from the sub-journal file, starting -** with the PagerSavepoint.iSubRec and continuing to the end of -** the journal file. -** -** Throughout the rollback process, each time a page is rolled back, the -** corresponding bit is set in a bitvec structure (variable pDone in the -** implementation below). This is used to ensure that a page is only -** rolled back the first time it is encountered in either journal. -** -** If pSavepoint is NULL, then pages are only played back from the main -** journal file. There is no need for a bitvec in this case. -** -** In either case, before playback commences the Pager.dbSize variable -** is reset to the value that it held at the start of the savepoint -** (or transaction). No page with a page-number greater than this value -** is played back. If one is encountered it is simply skipped. -*/ -static int pagerPlaybackSavepoint(Pager *pPager, PagerSavepoint *pSavepoint){ - i64 szJ; /* Effective size of the main journal */ - i64 iHdrOff; /* End of first segment of main-journal records */ - int rc = SQLITE_OK; /* Return code */ - Bitvec *pDone = 0; /* Bitvec to ensure pages played back only once */ - - assert( pPager->eState!=PAGER_ERROR ); - assert( pPager->eState>=PAGER_WRITER_LOCKED ); - - /* Allocate a bitvec to use to store the set of pages rolled back */ - if( pSavepoint ){ - pDone = sqlite3BitvecCreate(pSavepoint->nOrig); - if( !pDone ){ - return SQLITE_NOMEM; - } - } - - /* Set the database size back to the value it was before the savepoint - ** being reverted was opened. - */ - pPager->dbSize = pSavepoint ? pSavepoint->nOrig : pPager->dbOrigSize; - pPager->changeCountDone = pPager->tempFile; - - if( !pSavepoint && pagerUseWal(pPager) ){ - return pagerRollbackWal(pPager); - } - - /* Use pPager->journalOff as the effective size of the main rollback - ** journal. The actual file might be larger than this in - ** PAGER_JOURNALMODE_TRUNCATE or PAGER_JOURNALMODE_PERSIST. But anything - ** past pPager->journalOff is off-limits to us. - */ - szJ = pPager->journalOff; - assert( pagerUseWal(pPager)==0 || szJ==0 ); - - /* Begin by rolling back records from the main journal starting at - ** PagerSavepoint.iOffset and continuing to the next journal header. - ** There might be records in the main journal that have a page number - ** greater than the current database size (pPager->dbSize) but those - ** will be skipped automatically. Pages are added to pDone as they - ** are played back. - */ - if( pSavepoint && !pagerUseWal(pPager) ){ - iHdrOff = pSavepoint->iHdrOffset ? pSavepoint->iHdrOffset : szJ; - pPager->journalOff = pSavepoint->iOffset; - while( rc==SQLITE_OK && pPager->journalOffjournalOff, pDone, 1, 1); - } - assert( rc!=SQLITE_DONE ); - }else{ - pPager->journalOff = 0; - } - - /* Continue rolling back records out of the main journal starting at - ** the first journal header seen and continuing until the effective end - ** of the main journal file. Continue to skip out-of-range pages and - ** continue adding pages rolled back to pDone. - */ - while( rc==SQLITE_OK && pPager->journalOffjournalHdr+JOURNAL_HDR_SZ(pPager)==pPager->journalOff" - ** test is related to ticket #2565. See the discussion in the - ** pager_playback() function for additional information. - */ - if( nJRec==0 - && pPager->journalHdr+JOURNAL_HDR_SZ(pPager)==pPager->journalOff - ){ - nJRec = (u32)((szJ - pPager->journalOff)/JOURNAL_PG_SZ(pPager)); - } - for(ii=0; rc==SQLITE_OK && iijournalOffjournalOff, pDone, 1, 1); - } - assert( rc!=SQLITE_DONE ); - } - assert( rc!=SQLITE_OK || pPager->journalOff>=szJ ); - - /* Finally, rollback pages from the sub-journal. Page that were - ** previously rolled back out of the main journal (and are hence in pDone) - ** will be skipped. Out-of-range pages are also skipped. - */ - if( pSavepoint ){ - u32 ii; /* Loop counter */ - i64 offset = (i64)pSavepoint->iSubRec*(4+pPager->pageSize); - - if( pagerUseWal(pPager) ){ - rc = sqlite3WalSavepointUndo(pPager->pWal, pSavepoint->aWalData); - } - for(ii=pSavepoint->iSubRec; rc==SQLITE_OK && iinSubRec; ii++){ - assert( offset==(i64)ii*(4+pPager->pageSize) ); - rc = pager_playback_one_page(pPager, &offset, pDone, 0, 1); - } - assert( rc!=SQLITE_DONE ); - } - - sqlite3BitvecDestroy(pDone); - if( rc==SQLITE_OK ){ - pPager->journalOff = szJ; - } - - return rc; -} - -/* -** Change the maximum number of in-memory pages that are allowed. -*/ -SQLITE_PRIVATE void sqlite3PagerSetCachesize(Pager *pPager, int mxPage){ - sqlite3PcacheSetCachesize(pPager->pPCache, mxPage); -} - -/* -** Invoke SQLITE_FCNTL_MMAP_SIZE based on the current value of szMmap. -*/ -static void pagerFixMaplimit(Pager *pPager){ -#if SQLITE_MAX_MMAP_SIZE>0 - sqlite3_file *fd = pPager->fd; - if( isOpen(fd) && fd->pMethods->iVersion>=3 ){ - sqlite3_int64 sz; - sz = pPager->szMmap; - pPager->bUseFetch = (sz>0); - sqlite3OsFileControlHint(pPager->fd, SQLITE_FCNTL_MMAP_SIZE, &sz); - } -#endif -} - -/* -** Change the maximum size of any memory mapping made of the database file. -*/ -SQLITE_PRIVATE void sqlite3PagerSetMmapLimit(Pager *pPager, sqlite3_int64 szMmap){ - pPager->szMmap = szMmap; - pagerFixMaplimit(pPager); -} - -/* -** Free as much memory as possible from the pager. -*/ -SQLITE_PRIVATE void sqlite3PagerShrink(Pager *pPager){ - sqlite3PcacheShrink(pPager->pPCache); -} - -/* -** Adjust settings of the pager to those specified in the pgFlags parameter. -** -** The "level" in pgFlags & PAGER_SYNCHRONOUS_MASK sets the robustness -** of the database to damage due to OS crashes or power failures by -** changing the number of syncs()s when writing the journals. -** There are three levels: -** -** OFF sqlite3OsSync() is never called. This is the default -** for temporary and transient files. -** -** NORMAL The journal is synced once before writes begin on the -** database. This is normally adequate protection, but -** it is theoretically possible, though very unlikely, -** that an inopertune power failure could leave the journal -** in a state which would cause damage to the database -** when it is rolled back. -** -** FULL The journal is synced twice before writes begin on the -** database (with some additional information - the nRec field -** of the journal header - being written in between the two -** syncs). If we assume that writing a -** single disk sector is atomic, then this mode provides -** assurance that the journal will not be corrupted to the -** point of causing damage to the database during rollback. -** -** The above is for a rollback-journal mode. For WAL mode, OFF continues -** to mean that no syncs ever occur. NORMAL means that the WAL is synced -** prior to the start of checkpoint and that the database file is synced -** at the conclusion of the checkpoint if the entire content of the WAL -** was written back into the database. But no sync operations occur for -** an ordinary commit in NORMAL mode with WAL. FULL means that the WAL -** file is synced following each commit operation, in addition to the -** syncs associated with NORMAL. -** -** Do not confuse synchronous=FULL with SQLITE_SYNC_FULL. The -** SQLITE_SYNC_FULL macro means to use the MacOSX-style full-fsync -** using fcntl(F_FULLFSYNC). SQLITE_SYNC_NORMAL means to do an -** ordinary fsync() call. There is no difference between SQLITE_SYNC_FULL -** and SQLITE_SYNC_NORMAL on platforms other than MacOSX. But the -** synchronous=FULL versus synchronous=NORMAL setting determines when -** the xSync primitive is called and is relevant to all platforms. -** -** Numeric values associated with these states are OFF==1, NORMAL=2, -** and FULL=3. -*/ -#ifndef SQLITE_OMIT_PAGER_PRAGMAS -SQLITE_PRIVATE void sqlite3PagerSetFlags( - Pager *pPager, /* The pager to set safety level for */ - unsigned pgFlags /* Various flags */ -){ - unsigned level = pgFlags & PAGER_SYNCHRONOUS_MASK; - assert( level>=1 && level<=3 ); - pPager->noSync = (level==1 || pPager->tempFile) ?1:0; - pPager->fullSync = (level==3 && !pPager->tempFile) ?1:0; - if( pPager->noSync ){ - pPager->syncFlags = 0; - pPager->ckptSyncFlags = 0; - }else if( pgFlags & PAGER_FULLFSYNC ){ - pPager->syncFlags = SQLITE_SYNC_FULL; - pPager->ckptSyncFlags = SQLITE_SYNC_FULL; - }else if( pgFlags & PAGER_CKPT_FULLFSYNC ){ - pPager->syncFlags = SQLITE_SYNC_NORMAL; - pPager->ckptSyncFlags = SQLITE_SYNC_FULL; - }else{ - pPager->syncFlags = SQLITE_SYNC_NORMAL; - pPager->ckptSyncFlags = SQLITE_SYNC_NORMAL; - } - pPager->walSyncFlags = pPager->syncFlags; - if( pPager->fullSync ){ - pPager->walSyncFlags |= WAL_SYNC_TRANSACTIONS; - } - if( pgFlags & PAGER_CACHESPILL ){ - pPager->doNotSpill &= ~SPILLFLAG_OFF; - }else{ - pPager->doNotSpill |= SPILLFLAG_OFF; - } -} -#endif - -/* -** The following global variable is incremented whenever the library -** attempts to open a temporary file. This information is used for -** testing and analysis only. -*/ -#ifdef SQLITE_TEST -SQLITE_API int sqlite3_opentemp_count = 0; -#endif - -/* -** Open a temporary file. -** -** Write the file descriptor into *pFile. Return SQLITE_OK on success -** or some other error code if we fail. The OS will automatically -** delete the temporary file when it is closed. -** -** The flags passed to the VFS layer xOpen() call are those specified -** by parameter vfsFlags ORed with the following: -** -** SQLITE_OPEN_READWRITE -** SQLITE_OPEN_CREATE -** SQLITE_OPEN_EXCLUSIVE -** SQLITE_OPEN_DELETEONCLOSE -*/ -static int pagerOpentemp( - Pager *pPager, /* The pager object */ - sqlite3_file *pFile, /* Write the file descriptor here */ - int vfsFlags /* Flags passed through to the VFS */ -){ - int rc; /* Return code */ - -#ifdef SQLITE_TEST - sqlite3_opentemp_count++; /* Used for testing and analysis only */ -#endif - - vfsFlags |= SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE | - SQLITE_OPEN_EXCLUSIVE | SQLITE_OPEN_DELETEONCLOSE; - rc = sqlite3OsOpen(pPager->pVfs, 0, pFile, vfsFlags, 0); - assert( rc!=SQLITE_OK || isOpen(pFile) ); - return rc; -} - -/* -** Set the busy handler function. -** -** The pager invokes the busy-handler if sqlite3OsLock() returns -** SQLITE_BUSY when trying to upgrade from no-lock to a SHARED lock, -** or when trying to upgrade from a RESERVED lock to an EXCLUSIVE -** lock. It does *not* invoke the busy handler when upgrading from -** SHARED to RESERVED, or when upgrading from SHARED to EXCLUSIVE -** (which occurs during hot-journal rollback). Summary: -** -** Transition | Invokes xBusyHandler -** -------------------------------------------------------- -** NO_LOCK -> SHARED_LOCK | Yes -** SHARED_LOCK -> RESERVED_LOCK | No -** SHARED_LOCK -> EXCLUSIVE_LOCK | No -** RESERVED_LOCK -> EXCLUSIVE_LOCK | Yes -** -** If the busy-handler callback returns non-zero, the lock is -** retried. If it returns zero, then the SQLITE_BUSY error is -** returned to the caller of the pager API function. -*/ -SQLITE_PRIVATE void sqlite3PagerSetBusyhandler( - Pager *pPager, /* Pager object */ - int (*xBusyHandler)(void *), /* Pointer to busy-handler function */ - void *pBusyHandlerArg /* Argument to pass to xBusyHandler */ -){ - pPager->xBusyHandler = xBusyHandler; - pPager->pBusyHandlerArg = pBusyHandlerArg; - - if( isOpen(pPager->fd) ){ - void **ap = (void **)&pPager->xBusyHandler; - assert( ((int(*)(void *))(ap[0]))==xBusyHandler ); - assert( ap[1]==pBusyHandlerArg ); - sqlite3OsFileControlHint(pPager->fd, SQLITE_FCNTL_BUSYHANDLER, (void *)ap); - } -} - -/* -** Change the page size used by the Pager object. The new page size -** is passed in *pPageSize. -** -** If the pager is in the error state when this function is called, it -** is a no-op. The value returned is the error state error code (i.e. -** one of SQLITE_IOERR, an SQLITE_IOERR_xxx sub-code or SQLITE_FULL). -** -** Otherwise, if all of the following are true: -** -** * the new page size (value of *pPageSize) is valid (a power -** of two between 512 and SQLITE_MAX_PAGE_SIZE, inclusive), and -** -** * there are no outstanding page references, and -** -** * the database is either not an in-memory database or it is -** an in-memory database that currently consists of zero pages. -** -** then the pager object page size is set to *pPageSize. -** -** If the page size is changed, then this function uses sqlite3PagerMalloc() -** to obtain a new Pager.pTmpSpace buffer. If this allocation attempt -** fails, SQLITE_NOMEM is returned and the page size remains unchanged. -** In all other cases, SQLITE_OK is returned. -** -** If the page size is not changed, either because one of the enumerated -** conditions above is not true, the pager was in error state when this -** function was called, or because the memory allocation attempt failed, -** then *pPageSize is set to the old, retained page size before returning. -*/ -SQLITE_PRIVATE int sqlite3PagerSetPagesize(Pager *pPager, u32 *pPageSize, int nReserve){ - int rc = SQLITE_OK; - - /* It is not possible to do a full assert_pager_state() here, as this - ** function may be called from within PagerOpen(), before the state - ** of the Pager object is internally consistent. - ** - ** At one point this function returned an error if the pager was in - ** PAGER_ERROR state. But since PAGER_ERROR state guarantees that - ** there is at least one outstanding page reference, this function - ** is a no-op for that case anyhow. - */ - - u32 pageSize = *pPageSize; - assert( pageSize==0 || (pageSize>=512 && pageSize<=SQLITE_MAX_PAGE_SIZE) ); - if( (pPager->memDb==0 || pPager->dbSize==0) - && sqlite3PcacheRefCount(pPager->pPCache)==0 - && pageSize && pageSize!=(u32)pPager->pageSize - ){ - char *pNew = NULL; /* New temp space */ - i64 nByte = 0; - - if( pPager->eState>PAGER_OPEN && isOpen(pPager->fd) ){ - rc = sqlite3OsFileSize(pPager->fd, &nByte); - } - if( rc==SQLITE_OK ){ - pNew = (char *)sqlite3PageMalloc(pageSize); - if( !pNew ) rc = SQLITE_NOMEM; - } - - if( rc==SQLITE_OK ){ - pager_reset(pPager); - pPager->dbSize = (Pgno)((nByte+pageSize-1)/pageSize); - pPager->pageSize = pageSize; - sqlite3PageFree(pPager->pTmpSpace); - pPager->pTmpSpace = pNew; - sqlite3PcacheSetPageSize(pPager->pPCache, pageSize); - } - } - - *pPageSize = pPager->pageSize; - if( rc==SQLITE_OK ){ - if( nReserve<0 ) nReserve = pPager->nReserve; - assert( nReserve>=0 && nReserve<1000 ); - pPager->nReserve = (i16)nReserve; - pagerReportSize(pPager); - pagerFixMaplimit(pPager); - } - return rc; -} - -/* -** Return a pointer to the "temporary page" buffer held internally -** by the pager. This is a buffer that is big enough to hold the -** entire content of a database page. This buffer is used internally -** during rollback and will be overwritten whenever a rollback -** occurs. But other modules are free to use it too, as long as -** no rollbacks are happening. -*/ -SQLITE_PRIVATE void *sqlite3PagerTempSpace(Pager *pPager){ - return pPager->pTmpSpace; -} - -/* -** Attempt to set the maximum database page count if mxPage is positive. -** Make no changes if mxPage is zero or negative. And never reduce the -** maximum page count below the current size of the database. -** -** Regardless of mxPage, return the current maximum page count. -*/ -SQLITE_PRIVATE int sqlite3PagerMaxPageCount(Pager *pPager, int mxPage){ - if( mxPage>0 ){ - pPager->mxPgno = mxPage; - } - assert( pPager->eState!=PAGER_OPEN ); /* Called only by OP_MaxPgcnt */ - assert( pPager->mxPgno>=pPager->dbSize ); /* OP_MaxPgcnt enforces this */ - return pPager->mxPgno; -} - -/* -** The following set of routines are used to disable the simulated -** I/O error mechanism. These routines are used to avoid simulated -** errors in places where we do not care about errors. -** -** Unless -DSQLITE_TEST=1 is used, these routines are all no-ops -** and generate no code. -*/ -#ifdef SQLITE_TEST -SQLITE_API extern int sqlite3_io_error_pending; -SQLITE_API extern int sqlite3_io_error_hit; -static int saved_cnt; -void disable_simulated_io_errors(void){ - saved_cnt = sqlite3_io_error_pending; - sqlite3_io_error_pending = -1; -} -void enable_simulated_io_errors(void){ - sqlite3_io_error_pending = saved_cnt; -} -#else -# define disable_simulated_io_errors() -# define enable_simulated_io_errors() -#endif - -/* -** Read the first N bytes from the beginning of the file into memory -** that pDest points to. -** -** If the pager was opened on a transient file (zFilename==""), or -** opened on a file less than N bytes in size, the output buffer is -** zeroed and SQLITE_OK returned. The rationale for this is that this -** function is used to read database headers, and a new transient or -** zero sized database has a header than consists entirely of zeroes. -** -** If any IO error apart from SQLITE_IOERR_SHORT_READ is encountered, -** the error code is returned to the caller and the contents of the -** output buffer undefined. -*/ -SQLITE_PRIVATE int sqlite3PagerReadFileheader(Pager *pPager, int N, unsigned char *pDest){ - int rc = SQLITE_OK; - memset(pDest, 0, N); - assert( isOpen(pPager->fd) || pPager->tempFile ); - - /* This routine is only called by btree immediately after creating - ** the Pager object. There has not been an opportunity to transition - ** to WAL mode yet. - */ - assert( !pagerUseWal(pPager) ); - - if( isOpen(pPager->fd) ){ - IOTRACE(("DBHDR %p 0 %d\n", pPager, N)) - rc = sqlite3OsRead(pPager->fd, pDest, N, 0); - if( rc==SQLITE_IOERR_SHORT_READ ){ - rc = SQLITE_OK; - } - } - return rc; -} - -/* -** This function may only be called when a read-transaction is open on -** the pager. It returns the total number of pages in the database. -** -** However, if the file is between 1 and bytes in size, then -** this is considered a 1 page file. -*/ -SQLITE_PRIVATE void sqlite3PagerPagecount(Pager *pPager, int *pnPage){ - assert( pPager->eState>=PAGER_READER ); - assert( pPager->eState!=PAGER_WRITER_FINISHED ); - *pnPage = (int)pPager->dbSize; -} - - -/* -** Try to obtain a lock of type locktype on the database file. If -** a similar or greater lock is already held, this function is a no-op -** (returning SQLITE_OK immediately). -** -** Otherwise, attempt to obtain the lock using sqlite3OsLock(). Invoke -** the busy callback if the lock is currently not available. Repeat -** until the busy callback returns false or until the attempt to -** obtain the lock succeeds. -** -** Return SQLITE_OK on success and an error code if we cannot obtain -** the lock. If the lock is obtained successfully, set the Pager.state -** variable to locktype before returning. -*/ -static int pager_wait_on_lock(Pager *pPager, int locktype){ - int rc; /* Return code */ - - /* Check that this is either a no-op (because the requested lock is - ** already held, or one of the transistions that the busy-handler - ** may be invoked during, according to the comment above - ** sqlite3PagerSetBusyhandler(). - */ - assert( (pPager->eLock>=locktype) - || (pPager->eLock==NO_LOCK && locktype==SHARED_LOCK) - || (pPager->eLock==RESERVED_LOCK && locktype==EXCLUSIVE_LOCK) - ); - - do { - rc = pagerLockDb(pPager, locktype); - }while( rc==SQLITE_BUSY && pPager->xBusyHandler(pPager->pBusyHandlerArg) ); - return rc; -} - -/* -** Function assertTruncateConstraint(pPager) checks that one of the -** following is true for all dirty pages currently in the page-cache: -** -** a) The page number is less than or equal to the size of the -** current database image, in pages, OR -** -** b) if the page content were written at this time, it would not -** be necessary to write the current content out to the sub-journal -** (as determined by function subjRequiresPage()). -** -** If the condition asserted by this function were not true, and the -** dirty page were to be discarded from the cache via the pagerStress() -** routine, pagerStress() would not write the current page content to -** the database file. If a savepoint transaction were rolled back after -** this happened, the correct behavior would be to restore the current -** content of the page. However, since this content is not present in either -** the database file or the portion of the rollback journal and -** sub-journal rolled back the content could not be restored and the -** database image would become corrupt. It is therefore fortunate that -** this circumstance cannot arise. -*/ -#if defined(SQLITE_DEBUG) -static void assertTruncateConstraintCb(PgHdr *pPg){ - assert( pPg->flags&PGHDR_DIRTY ); - assert( !subjRequiresPage(pPg) || pPg->pgno<=pPg->pPager->dbSize ); -} -static void assertTruncateConstraint(Pager *pPager){ - sqlite3PcacheIterateDirty(pPager->pPCache, assertTruncateConstraintCb); -} -#else -# define assertTruncateConstraint(pPager) -#endif - -/* -** Truncate the in-memory database file image to nPage pages. This -** function does not actually modify the database file on disk. It -** just sets the internal state of the pager object so that the -** truncation will be done when the current transaction is committed. -** -** This function is only called right before committing a transaction. -** Once this function has been called, the transaction must either be -** rolled back or committed. It is not safe to call this function and -** then continue writing to the database. -*/ -SQLITE_PRIVATE void sqlite3PagerTruncateImage(Pager *pPager, Pgno nPage){ - assert( pPager->dbSize>=nPage ); - assert( pPager->eState>=PAGER_WRITER_CACHEMOD ); - pPager->dbSize = nPage; - - /* At one point the code here called assertTruncateConstraint() to - ** ensure that all pages being truncated away by this operation are, - ** if one or more savepoints are open, present in the savepoint - ** journal so that they can be restored if the savepoint is rolled - ** back. This is no longer necessary as this function is now only - ** called right before committing a transaction. So although the - ** Pager object may still have open savepoints (Pager.nSavepoint!=0), - ** they cannot be rolled back. So the assertTruncateConstraint() call - ** is no longer correct. */ -} - - -/* -** This function is called before attempting a hot-journal rollback. It -** syncs the journal file to disk, then sets pPager->journalHdr to the -** size of the journal file so that the pager_playback() routine knows -** that the entire journal file has been synced. -** -** Syncing a hot-journal to disk before attempting to roll it back ensures -** that if a power-failure occurs during the rollback, the process that -** attempts rollback following system recovery sees the same journal -** content as this process. -** -** If everything goes as planned, SQLITE_OK is returned. Otherwise, -** an SQLite error code. -*/ -static int pagerSyncHotJournal(Pager *pPager){ - int rc = SQLITE_OK; - if( !pPager->noSync ){ - rc = sqlite3OsSync(pPager->jfd, SQLITE_SYNC_NORMAL); - } - if( rc==SQLITE_OK ){ - rc = sqlite3OsFileSize(pPager->jfd, &pPager->journalHdr); - } - return rc; -} - -/* -** Obtain a reference to a memory mapped page object for page number pgno. -** The new object will use the pointer pData, obtained from xFetch(). -** If successful, set *ppPage to point to the new page reference -** and return SQLITE_OK. Otherwise, return an SQLite error code and set -** *ppPage to zero. -** -** Page references obtained by calling this function should be released -** by calling pagerReleaseMapPage(). -*/ -static int pagerAcquireMapPage( - Pager *pPager, /* Pager object */ - Pgno pgno, /* Page number */ - void *pData, /* xFetch()'d data for this page */ - PgHdr **ppPage /* OUT: Acquired page object */ -){ - PgHdr *p; /* Memory mapped page to return */ - - if( pPager->pMmapFreelist ){ - *ppPage = p = pPager->pMmapFreelist; - pPager->pMmapFreelist = p->pDirty; - p->pDirty = 0; - memset(p->pExtra, 0, pPager->nExtra); - }else{ - *ppPage = p = (PgHdr *)sqlite3MallocZero(sizeof(PgHdr) + pPager->nExtra); - if( p==0 ){ - sqlite3OsUnfetch(pPager->fd, (i64)(pgno-1) * pPager->pageSize, pData); - return SQLITE_NOMEM; - } - p->pExtra = (void *)&p[1]; - p->flags = PGHDR_MMAP; - p->nRef = 1; - p->pPager = pPager; - } - - assert( p->pExtra==(void *)&p[1] ); - assert( p->pPage==0 ); - assert( p->flags==PGHDR_MMAP ); - assert( p->pPager==pPager ); - assert( p->nRef==1 ); - - p->pgno = pgno; - p->pData = pData; - pPager->nMmapOut++; - - return SQLITE_OK; -} - -/* -** Release a reference to page pPg. pPg must have been returned by an -** earlier call to pagerAcquireMapPage(). -*/ -static void pagerReleaseMapPage(PgHdr *pPg){ - Pager *pPager = pPg->pPager; - pPager->nMmapOut--; - pPg->pDirty = pPager->pMmapFreelist; - pPager->pMmapFreelist = pPg; - - assert( pPager->fd->pMethods->iVersion>=3 ); - sqlite3OsUnfetch(pPager->fd, (i64)(pPg->pgno-1)*pPager->pageSize, pPg->pData); -} - -/* -** Free all PgHdr objects stored in the Pager.pMmapFreelist list. -*/ -static void pagerFreeMapHdrs(Pager *pPager){ - PgHdr *p; - PgHdr *pNext; - for(p=pPager->pMmapFreelist; p; p=pNext){ - pNext = p->pDirty; - sqlite3_free(p); - } -} - - -/* -** Shutdown the page cache. Free all memory and close all files. -** -** If a transaction was in progress when this routine is called, that -** transaction is rolled back. All outstanding pages are invalidated -** and their memory is freed. Any attempt to use a page associated -** with this page cache after this function returns will likely -** result in a coredump. -** -** This function always succeeds. If a transaction is active an attempt -** is made to roll it back. If an error occurs during the rollback -** a hot journal may be left in the filesystem but no error is returned -** to the caller. -*/ -SQLITE_PRIVATE int sqlite3PagerClose(Pager *pPager){ - u8 *pTmp = (u8 *)pPager->pTmpSpace; - - assert( assert_pager_state(pPager) ); - disable_simulated_io_errors(); - sqlite3BeginBenignMalloc(); - pagerFreeMapHdrs(pPager); - /* pPager->errCode = 0; */ - pPager->exclusiveMode = 0; -#ifndef SQLITE_OMIT_WAL - sqlite3WalClose(pPager->pWal, pPager->ckptSyncFlags, pPager->pageSize, pTmp); - pPager->pWal = 0; -#endif - pager_reset(pPager); - if( MEMDB ){ - pager_unlock(pPager); - }else{ - /* If it is open, sync the journal file before calling UnlockAndRollback. - ** If this is not done, then an unsynced portion of the open journal - ** file may be played back into the database. If a power failure occurs - ** while this is happening, the database could become corrupt. - ** - ** If an error occurs while trying to sync the journal, shift the pager - ** into the ERROR state. This causes UnlockAndRollback to unlock the - ** database and close the journal file without attempting to roll it - ** back or finalize it. The next database user will have to do hot-journal - ** rollback before accessing the database file. - */ - if( isOpen(pPager->jfd) ){ - pager_error(pPager, pagerSyncHotJournal(pPager)); - } - pagerUnlockAndRollback(pPager); - } - sqlite3EndBenignMalloc(); - enable_simulated_io_errors(); - PAGERTRACE(("CLOSE %d\n", PAGERID(pPager))); - IOTRACE(("CLOSE %p\n", pPager)) - sqlite3OsClose(pPager->jfd); - sqlite3OsClose(pPager->fd); - sqlite3PageFree(pTmp); - sqlite3PcacheClose(pPager->pPCache); - -#ifdef SQLITE_HAS_CODEC - if( pPager->xCodecFree ) pPager->xCodecFree(pPager->pCodec); -#endif - - assert( !pPager->aSavepoint && !pPager->pInJournal ); - assert( !isOpen(pPager->jfd) && !isOpen(pPager->sjfd) ); - - sqlite3_free(pPager); - return SQLITE_OK; -} - -#if !defined(NDEBUG) || defined(SQLITE_TEST) -/* -** Return the page number for page pPg. -*/ -SQLITE_PRIVATE Pgno sqlite3PagerPagenumber(DbPage *pPg){ - return pPg->pgno; -} -#endif - -/* -** Increment the reference count for page pPg. -*/ -SQLITE_PRIVATE void sqlite3PagerRef(DbPage *pPg){ - sqlite3PcacheRef(pPg); -} - -/* -** Sync the journal. In other words, make sure all the pages that have -** been written to the journal have actually reached the surface of the -** disk and can be restored in the event of a hot-journal rollback. -** -** If the Pager.noSync flag is set, then this function is a no-op. -** Otherwise, the actions required depend on the journal-mode and the -** device characteristics of the file-system, as follows: -** -** * If the journal file is an in-memory journal file, no action need -** be taken. -** -** * Otherwise, if the device does not support the SAFE_APPEND property, -** then the nRec field of the most recently written journal header -** is updated to contain the number of journal records that have -** been written following it. If the pager is operating in full-sync -** mode, then the journal file is synced before this field is updated. -** -** * If the device does not support the SEQUENTIAL property, then -** journal file is synced. -** -** Or, in pseudo-code: -** -** if( NOT ){ -** if( NOT SAFE_APPEND ){ -** if( ) xSync(); -** -** } -** if( NOT SEQUENTIAL ) xSync(); -** } -** -** If successful, this routine clears the PGHDR_NEED_SYNC flag of every -** page currently held in memory before returning SQLITE_OK. If an IO -** error is encountered, then the IO error code is returned to the caller. -*/ -static int syncJournal(Pager *pPager, int newHdr){ - int rc; /* Return code */ - - assert( pPager->eState==PAGER_WRITER_CACHEMOD - || pPager->eState==PAGER_WRITER_DBMOD - ); - assert( assert_pager_state(pPager) ); - assert( !pagerUseWal(pPager) ); - - rc = sqlite3PagerExclusiveLock(pPager); - if( rc!=SQLITE_OK ) return rc; - - if( !pPager->noSync ){ - assert( !pPager->tempFile ); - if( isOpen(pPager->jfd) && pPager->journalMode!=PAGER_JOURNALMODE_MEMORY ){ - const int iDc = sqlite3OsDeviceCharacteristics(pPager->fd); - assert( isOpen(pPager->jfd) ); - - if( 0==(iDc&SQLITE_IOCAP_SAFE_APPEND) ){ - /* This block deals with an obscure problem. If the last connection - ** that wrote to this database was operating in persistent-journal - ** mode, then the journal file may at this point actually be larger - ** than Pager.journalOff bytes. If the next thing in the journal - ** file happens to be a journal-header (written as part of the - ** previous connection's transaction), and a crash or power-failure - ** occurs after nRec is updated but before this connection writes - ** anything else to the journal file (or commits/rolls back its - ** transaction), then SQLite may become confused when doing the - ** hot-journal rollback following recovery. It may roll back all - ** of this connections data, then proceed to rolling back the old, - ** out-of-date data that follows it. Database corruption. - ** - ** To work around this, if the journal file does appear to contain - ** a valid header following Pager.journalOff, then write a 0x00 - ** byte to the start of it to prevent it from being recognized. - ** - ** Variable iNextHdrOffset is set to the offset at which this - ** problematic header will occur, if it exists. aMagic is used - ** as a temporary buffer to inspect the first couple of bytes of - ** the potential journal header. - */ - i64 iNextHdrOffset; - u8 aMagic[8]; - u8 zHeader[sizeof(aJournalMagic)+4]; - - memcpy(zHeader, aJournalMagic, sizeof(aJournalMagic)); - put32bits(&zHeader[sizeof(aJournalMagic)], pPager->nRec); - - iNextHdrOffset = journalHdrOffset(pPager); - rc = sqlite3OsRead(pPager->jfd, aMagic, 8, iNextHdrOffset); - if( rc==SQLITE_OK && 0==memcmp(aMagic, aJournalMagic, 8) ){ - static const u8 zerobyte = 0; - rc = sqlite3OsWrite(pPager->jfd, &zerobyte, 1, iNextHdrOffset); - } - if( rc!=SQLITE_OK && rc!=SQLITE_IOERR_SHORT_READ ){ - return rc; - } - - /* Write the nRec value into the journal file header. If in - ** full-synchronous mode, sync the journal first. This ensures that - ** all data has really hit the disk before nRec is updated to mark - ** it as a candidate for rollback. - ** - ** This is not required if the persistent media supports the - ** SAFE_APPEND property. Because in this case it is not possible - ** for garbage data to be appended to the file, the nRec field - ** is populated with 0xFFFFFFFF when the journal header is written - ** and never needs to be updated. - */ - if( pPager->fullSync && 0==(iDc&SQLITE_IOCAP_SEQUENTIAL) ){ - PAGERTRACE(("SYNC journal of %d\n", PAGERID(pPager))); - IOTRACE(("JSYNC %p\n", pPager)) - rc = sqlite3OsSync(pPager->jfd, pPager->syncFlags); - if( rc!=SQLITE_OK ) return rc; - } - IOTRACE(("JHDR %p %lld\n", pPager, pPager->journalHdr)); - rc = sqlite3OsWrite( - pPager->jfd, zHeader, sizeof(zHeader), pPager->journalHdr - ); - if( rc!=SQLITE_OK ) return rc; - } - if( 0==(iDc&SQLITE_IOCAP_SEQUENTIAL) ){ - PAGERTRACE(("SYNC journal of %d\n", PAGERID(pPager))); - IOTRACE(("JSYNC %p\n", pPager)) - rc = sqlite3OsSync(pPager->jfd, pPager->syncFlags| - (pPager->syncFlags==SQLITE_SYNC_FULL?SQLITE_SYNC_DATAONLY:0) - ); - if( rc!=SQLITE_OK ) return rc; - } - - pPager->journalHdr = pPager->journalOff; - if( newHdr && 0==(iDc&SQLITE_IOCAP_SAFE_APPEND) ){ - pPager->nRec = 0; - rc = writeJournalHdr(pPager); - if( rc!=SQLITE_OK ) return rc; - } - }else{ - pPager->journalHdr = pPager->journalOff; - } - } - - /* Unless the pager is in noSync mode, the journal file was just - ** successfully synced. Either way, clear the PGHDR_NEED_SYNC flag on - ** all pages. - */ - sqlite3PcacheClearSyncFlags(pPager->pPCache); - pPager->eState = PAGER_WRITER_DBMOD; - assert( assert_pager_state(pPager) ); - return SQLITE_OK; -} - -/* -** The argument is the first in a linked list of dirty pages connected -** by the PgHdr.pDirty pointer. This function writes each one of the -** in-memory pages in the list to the database file. The argument may -** be NULL, representing an empty list. In this case this function is -** a no-op. -** -** The pager must hold at least a RESERVED lock when this function -** is called. Before writing anything to the database file, this lock -** is upgraded to an EXCLUSIVE lock. If the lock cannot be obtained, -** SQLITE_BUSY is returned and no data is written to the database file. -** -** If the pager is a temp-file pager and the actual file-system file -** is not yet open, it is created and opened before any data is -** written out. -** -** Once the lock has been upgraded and, if necessary, the file opened, -** the pages are written out to the database file in list order. Writing -** a page is skipped if it meets either of the following criteria: -** -** * The page number is greater than Pager.dbSize, or -** * The PGHDR_DONT_WRITE flag is set on the page. -** -** If writing out a page causes the database file to grow, Pager.dbFileSize -** is updated accordingly. If page 1 is written out, then the value cached -** in Pager.dbFileVers[] is updated to match the new value stored in -** the database file. -** -** If everything is successful, SQLITE_OK is returned. If an IO error -** occurs, an IO error code is returned. Or, if the EXCLUSIVE lock cannot -** be obtained, SQLITE_BUSY is returned. -*/ -static int pager_write_pagelist(Pager *pPager, PgHdr *pList){ - int rc = SQLITE_OK; /* Return code */ - - /* This function is only called for rollback pagers in WRITER_DBMOD state. */ - assert( !pagerUseWal(pPager) ); - assert( pPager->eState==PAGER_WRITER_DBMOD ); - assert( pPager->eLock==EXCLUSIVE_LOCK ); - - /* If the file is a temp-file has not yet been opened, open it now. It - ** is not possible for rc to be other than SQLITE_OK if this branch - ** is taken, as pager_wait_on_lock() is a no-op for temp-files. - */ - if( !isOpen(pPager->fd) ){ - assert( pPager->tempFile && rc==SQLITE_OK ); - rc = pagerOpentemp(pPager, pPager->fd, pPager->vfsFlags); - } - - /* Before the first write, give the VFS a hint of what the final - ** file size will be. - */ - assert( rc!=SQLITE_OK || isOpen(pPager->fd) ); - if( rc==SQLITE_OK - && pPager->dbHintSizedbSize - && (pList->pDirty || pList->pgno>pPager->dbHintSize) - ){ - sqlite3_int64 szFile = pPager->pageSize * (sqlite3_int64)pPager->dbSize; - sqlite3OsFileControlHint(pPager->fd, SQLITE_FCNTL_SIZE_HINT, &szFile); - pPager->dbHintSize = pPager->dbSize; - } - - while( rc==SQLITE_OK && pList ){ - Pgno pgno = pList->pgno; - - /* If there are dirty pages in the page cache with page numbers greater - ** than Pager.dbSize, this means sqlite3PagerTruncateImage() was called to - ** make the file smaller (presumably by auto-vacuum code). Do not write - ** any such pages to the file. - ** - ** Also, do not write out any page that has the PGHDR_DONT_WRITE flag - ** set (set by sqlite3PagerDontWrite()). - */ - if( pgno<=pPager->dbSize && 0==(pList->flags&PGHDR_DONT_WRITE) ){ - i64 offset = (pgno-1)*(i64)pPager->pageSize; /* Offset to write */ - char *pData; /* Data to write */ - - assert( (pList->flags&PGHDR_NEED_SYNC)==0 ); - if( pList->pgno==1 ) pager_write_changecounter(pList); - - /* Encode the database */ - CODEC2(pPager, pList->pData, pgno, 6, return SQLITE_NOMEM, pData); - - /* Write out the page data. */ - rc = sqlite3OsWrite(pPager->fd, pData, pPager->pageSize, offset); - - /* If page 1 was just written, update Pager.dbFileVers to match - ** the value now stored in the database file. If writing this - ** page caused the database file to grow, update dbFileSize. - */ - if( pgno==1 ){ - memcpy(&pPager->dbFileVers, &pData[24], sizeof(pPager->dbFileVers)); - } - if( pgno>pPager->dbFileSize ){ - pPager->dbFileSize = pgno; - } - pPager->aStat[PAGER_STAT_WRITE]++; - - /* Update any backup objects copying the contents of this pager. */ - sqlite3BackupUpdate(pPager->pBackup, pgno, (u8*)pList->pData); - - PAGERTRACE(("STORE %d page %d hash(%08x)\n", - PAGERID(pPager), pgno, pager_pagehash(pList))); - IOTRACE(("PGOUT %p %d\n", pPager, pgno)); - PAGER_INCR(sqlite3_pager_writedb_count); - }else{ - PAGERTRACE(("NOSTORE %d page %d\n", PAGERID(pPager), pgno)); - } - pager_set_pagehash(pList); - pList = pList->pDirty; - } - - return rc; -} - -/* -** Ensure that the sub-journal file is open. If it is already open, this -** function is a no-op. -** -** SQLITE_OK is returned if everything goes according to plan. An -** SQLITE_IOERR_XXX error code is returned if a call to sqlite3OsOpen() -** fails. -*/ -static int openSubJournal(Pager *pPager){ - int rc = SQLITE_OK; - if( !isOpen(pPager->sjfd) ){ - if( pPager->journalMode==PAGER_JOURNALMODE_MEMORY || pPager->subjInMemory ){ - sqlite3MemJournalOpen(pPager->sjfd); - }else{ - rc = pagerOpentemp(pPager, pPager->sjfd, SQLITE_OPEN_SUBJOURNAL); - } - } - return rc; -} - -/* -** Append a record of the current state of page pPg to the sub-journal. -** It is the callers responsibility to use subjRequiresPage() to check -** that it is really required before calling this function. -** -** If successful, set the bit corresponding to pPg->pgno in the bitvecs -** for all open savepoints before returning. -** -** This function returns SQLITE_OK if everything is successful, an IO -** error code if the attempt to write to the sub-journal fails, or -** SQLITE_NOMEM if a malloc fails while setting a bit in a savepoint -** bitvec. -*/ -static int subjournalPage(PgHdr *pPg){ - int rc = SQLITE_OK; - Pager *pPager = pPg->pPager; - if( pPager->journalMode!=PAGER_JOURNALMODE_OFF ){ - - /* Open the sub-journal, if it has not already been opened */ - assert( pPager->useJournal ); - assert( isOpen(pPager->jfd) || pagerUseWal(pPager) ); - assert( isOpen(pPager->sjfd) || pPager->nSubRec==0 ); - assert( pagerUseWal(pPager) - || pageInJournal(pPager, pPg) - || pPg->pgno>pPager->dbOrigSize - ); - rc = openSubJournal(pPager); - - /* If the sub-journal was opened successfully (or was already open), - ** write the journal record into the file. */ - if( rc==SQLITE_OK ){ - void *pData = pPg->pData; - i64 offset = (i64)pPager->nSubRec*(4+pPager->pageSize); - char *pData2; - - CODEC2(pPager, pData, pPg->pgno, 7, return SQLITE_NOMEM, pData2); - PAGERTRACE(("STMT-JOURNAL %d page %d\n", PAGERID(pPager), pPg->pgno)); - rc = write32bits(pPager->sjfd, offset, pPg->pgno); - if( rc==SQLITE_OK ){ - rc = sqlite3OsWrite(pPager->sjfd, pData2, pPager->pageSize, offset+4); - } - } - } - if( rc==SQLITE_OK ){ - pPager->nSubRec++; - assert( pPager->nSavepoint>0 ); - rc = addToSavepointBitvecs(pPager, pPg->pgno); - } - return rc; -} - -/* -** This function is called by the pcache layer when it has reached some -** soft memory limit. The first argument is a pointer to a Pager object -** (cast as a void*). The pager is always 'purgeable' (not an in-memory -** database). The second argument is a reference to a page that is -** currently dirty but has no outstanding references. The page -** is always associated with the Pager object passed as the first -** argument. -** -** The job of this function is to make pPg clean by writing its contents -** out to the database file, if possible. This may involve syncing the -** journal file. -** -** If successful, sqlite3PcacheMakeClean() is called on the page and -** SQLITE_OK returned. If an IO error occurs while trying to make the -** page clean, the IO error code is returned. If the page cannot be -** made clean for some other reason, but no error occurs, then SQLITE_OK -** is returned by sqlite3PcacheMakeClean() is not called. -*/ -static int pagerStress(void *p, PgHdr *pPg){ - Pager *pPager = (Pager *)p; - int rc = SQLITE_OK; - - assert( pPg->pPager==pPager ); - assert( pPg->flags&PGHDR_DIRTY ); - - /* The doNotSpill NOSYNC bit is set during times when doing a sync of - ** journal (and adding a new header) is not allowed. This occurs - ** during calls to sqlite3PagerWrite() while trying to journal multiple - ** pages belonging to the same sector. - ** - ** The doNotSpill ROLLBACK and OFF bits inhibits all cache spilling - ** regardless of whether or not a sync is required. This is set during - ** a rollback or by user request, respectively. - ** - ** Spilling is also prohibited when in an error state since that could - ** lead to database corruption. In the current implementaton it - ** is impossible for sqlite3PcacheFetch() to be called with createFlag==1 - ** while in the error state, hence it is impossible for this routine to - ** be called in the error state. Nevertheless, we include a NEVER() - ** test for the error state as a safeguard against future changes. - */ - if( NEVER(pPager->errCode) ) return SQLITE_OK; - testcase( pPager->doNotSpill & SPILLFLAG_ROLLBACK ); - testcase( pPager->doNotSpill & SPILLFLAG_OFF ); - testcase( pPager->doNotSpill & SPILLFLAG_NOSYNC ); - if( pPager->doNotSpill - && ((pPager->doNotSpill & (SPILLFLAG_ROLLBACK|SPILLFLAG_OFF))!=0 - || (pPg->flags & PGHDR_NEED_SYNC)!=0) - ){ - return SQLITE_OK; - } - - pPg->pDirty = 0; - if( pagerUseWal(pPager) ){ - /* Write a single frame for this page to the log. */ - if( subjRequiresPage(pPg) ){ - rc = subjournalPage(pPg); - } - if( rc==SQLITE_OK ){ - rc = pagerWalFrames(pPager, pPg, 0, 0); - } - }else{ - - /* Sync the journal file if required. */ - if( pPg->flags&PGHDR_NEED_SYNC - || pPager->eState==PAGER_WRITER_CACHEMOD - ){ - rc = syncJournal(pPager, 1); - } - - /* If the page number of this page is larger than the current size of - ** the database image, it may need to be written to the sub-journal. - ** This is because the call to pager_write_pagelist() below will not - ** actually write data to the file in this case. - ** - ** Consider the following sequence of events: - ** - ** BEGIN; - ** - ** - ** SAVEPOINT sp; - ** - ** pagerStress(page X) - ** ROLLBACK TO sp; - ** - ** If (X>Y), then when pagerStress is called page X will not be written - ** out to the database file, but will be dropped from the cache. Then, - ** following the "ROLLBACK TO sp" statement, reading page X will read - ** data from the database file. This will be the copy of page X as it - ** was when the transaction started, not as it was when "SAVEPOINT sp" - ** was executed. - ** - ** The solution is to write the current data for page X into the - ** sub-journal file now (if it is not already there), so that it will - ** be restored to its current value when the "ROLLBACK TO sp" is - ** executed. - */ - if( NEVER( - rc==SQLITE_OK && pPg->pgno>pPager->dbSize && subjRequiresPage(pPg) - ) ){ - rc = subjournalPage(pPg); - } - - /* Write the contents of the page out to the database file. */ - if( rc==SQLITE_OK ){ - assert( (pPg->flags&PGHDR_NEED_SYNC)==0 ); - rc = pager_write_pagelist(pPager, pPg); - } - } - - /* Mark the page as clean. */ - if( rc==SQLITE_OK ){ - PAGERTRACE(("STRESS %d page %d\n", PAGERID(pPager), pPg->pgno)); - sqlite3PcacheMakeClean(pPg); - } - - return pager_error(pPager, rc); -} - - -/* -** Allocate and initialize a new Pager object and put a pointer to it -** in *ppPager. The pager should eventually be freed by passing it -** to sqlite3PagerClose(). -** -** The zFilename argument is the path to the database file to open. -** If zFilename is NULL then a randomly-named temporary file is created -** and used as the file to be cached. Temporary files are be deleted -** automatically when they are closed. If zFilename is ":memory:" then -** all information is held in cache. It is never written to disk. -** This can be used to implement an in-memory database. -** -** The nExtra parameter specifies the number of bytes of space allocated -** along with each page reference. This space is available to the user -** via the sqlite3PagerGetExtra() API. -** -** The flags argument is used to specify properties that affect the -** operation of the pager. It should be passed some bitwise combination -** of the PAGER_* flags. -** -** The vfsFlags parameter is a bitmask to pass to the flags parameter -** of the xOpen() method of the supplied VFS when opening files. -** -** If the pager object is allocated and the specified file opened -** successfully, SQLITE_OK is returned and *ppPager set to point to -** the new pager object. If an error occurs, *ppPager is set to NULL -** and error code returned. This function may return SQLITE_NOMEM -** (sqlite3Malloc() is used to allocate memory), SQLITE_CANTOPEN or -** various SQLITE_IO_XXX errors. -*/ -SQLITE_PRIVATE int sqlite3PagerOpen( - sqlite3_vfs *pVfs, /* The virtual file system to use */ - Pager **ppPager, /* OUT: Return the Pager structure here */ - const char *zFilename, /* Name of the database file to open */ - int nExtra, /* Extra bytes append to each in-memory page */ - int flags, /* flags controlling this file */ - int vfsFlags, /* flags passed through to sqlite3_vfs.xOpen() */ - void (*xReinit)(DbPage*) /* Function to reinitialize pages */ -){ - u8 *pPtr; - Pager *pPager = 0; /* Pager object to allocate and return */ - int rc = SQLITE_OK; /* Return code */ - int tempFile = 0; /* True for temp files (incl. in-memory files) */ - int memDb = 0; /* True if this is an in-memory file */ - int readOnly = 0; /* True if this is a read-only file */ - int journalFileSize; /* Bytes to allocate for each journal fd */ - char *zPathname = 0; /* Full path to database file */ - int nPathname = 0; /* Number of bytes in zPathname */ - int useJournal = (flags & PAGER_OMIT_JOURNAL)==0; /* False to omit journal */ - int pcacheSize = sqlite3PcacheSize(); /* Bytes to allocate for PCache */ - u32 szPageDflt = SQLITE_DEFAULT_PAGE_SIZE; /* Default page size */ - const char *zUri = 0; /* URI args to copy */ - int nUri = 0; /* Number of bytes of URI args at *zUri */ - - /* Figure out how much space is required for each journal file-handle - ** (there are two of them, the main journal and the sub-journal). This - ** is the maximum space required for an in-memory journal file handle - ** and a regular journal file-handle. Note that a "regular journal-handle" - ** may be a wrapper capable of caching the first portion of the journal - ** file in memory to implement the atomic-write optimization (see - ** source file journal.c). - */ - if( sqlite3JournalSize(pVfs)>sqlite3MemJournalSize() ){ - journalFileSize = ROUND8(sqlite3JournalSize(pVfs)); - }else{ - journalFileSize = ROUND8(sqlite3MemJournalSize()); - } - - /* Set the output variable to NULL in case an error occurs. */ - *ppPager = 0; - -#ifndef SQLITE_OMIT_MEMORYDB - if( flags & PAGER_MEMORY ){ - memDb = 1; - if( zFilename && zFilename[0] ){ - zPathname = sqlite3DbStrDup(0, zFilename); - if( zPathname==0 ) return SQLITE_NOMEM; - nPathname = sqlite3Strlen30(zPathname); - zFilename = 0; - } - } -#endif - - /* Compute and store the full pathname in an allocated buffer pointed - ** to by zPathname, length nPathname. Or, if this is a temporary file, - ** leave both nPathname and zPathname set to 0. - */ - if( zFilename && zFilename[0] ){ - const char *z; - nPathname = pVfs->mxPathname+1; - zPathname = sqlite3DbMallocRaw(0, nPathname*2); - if( zPathname==0 ){ - return SQLITE_NOMEM; - } - zPathname[0] = 0; /* Make sure initialized even if FullPathname() fails */ - rc = sqlite3OsFullPathname(pVfs, zFilename, nPathname, zPathname); - nPathname = sqlite3Strlen30(zPathname); - z = zUri = &zFilename[sqlite3Strlen30(zFilename)+1]; - while( *z ){ - z += sqlite3Strlen30(z)+1; - z += sqlite3Strlen30(z)+1; - } - nUri = (int)(&z[1] - zUri); - assert( nUri>=0 ); - if( rc==SQLITE_OK && nPathname+8>pVfs->mxPathname ){ - /* This branch is taken when the journal path required by - ** the database being opened will be more than pVfs->mxPathname - ** bytes in length. This means the database cannot be opened, - ** as it will not be possible to open the journal file or even - ** check for a hot-journal before reading. - */ - rc = SQLITE_CANTOPEN_BKPT; - } - if( rc!=SQLITE_OK ){ - sqlite3DbFree(0, zPathname); - return rc; - } - } - - /* Allocate memory for the Pager structure, PCache object, the - ** three file descriptors, the database file name and the journal - ** file name. The layout in memory is as follows: - ** - ** Pager object (sizeof(Pager) bytes) - ** PCache object (sqlite3PcacheSize() bytes) - ** Database file handle (pVfs->szOsFile bytes) - ** Sub-journal file handle (journalFileSize bytes) - ** Main journal file handle (journalFileSize bytes) - ** Database file name (nPathname+1 bytes) - ** Journal file name (nPathname+8+1 bytes) - */ - pPtr = (u8 *)sqlite3MallocZero( - ROUND8(sizeof(*pPager)) + /* Pager structure */ - ROUND8(pcacheSize) + /* PCache object */ - ROUND8(pVfs->szOsFile) + /* The main db file */ - journalFileSize * 2 + /* The two journal files */ - nPathname + 1 + nUri + /* zFilename */ - nPathname + 8 + 2 /* zJournal */ -#ifndef SQLITE_OMIT_WAL - + nPathname + 4 + 2 /* zWal */ -#endif - ); - assert( EIGHT_BYTE_ALIGNMENT(SQLITE_INT_TO_PTR(journalFileSize)) ); - if( !pPtr ){ - sqlite3DbFree(0, zPathname); - return SQLITE_NOMEM; - } - pPager = (Pager*)(pPtr); - pPager->pPCache = (PCache*)(pPtr += ROUND8(sizeof(*pPager))); - pPager->fd = (sqlite3_file*)(pPtr += ROUND8(pcacheSize)); - pPager->sjfd = (sqlite3_file*)(pPtr += ROUND8(pVfs->szOsFile)); - pPager->jfd = (sqlite3_file*)(pPtr += journalFileSize); - pPager->zFilename = (char*)(pPtr += journalFileSize); - assert( EIGHT_BYTE_ALIGNMENT(pPager->jfd) ); - - /* Fill in the Pager.zFilename and Pager.zJournal buffers, if required. */ - if( zPathname ){ - assert( nPathname>0 ); - pPager->zJournal = (char*)(pPtr += nPathname + 1 + nUri); - memcpy(pPager->zFilename, zPathname, nPathname); - if( nUri ) memcpy(&pPager->zFilename[nPathname+1], zUri, nUri); - memcpy(pPager->zJournal, zPathname, nPathname); - memcpy(&pPager->zJournal[nPathname], "-journal\000", 8+2); - sqlite3FileSuffix3(pPager->zFilename, pPager->zJournal); -#ifndef SQLITE_OMIT_WAL - pPager->zWal = &pPager->zJournal[nPathname+8+1]; - memcpy(pPager->zWal, zPathname, nPathname); - memcpy(&pPager->zWal[nPathname], "-wal\000", 4+1); - sqlite3FileSuffix3(pPager->zFilename, pPager->zWal); -#endif - sqlite3DbFree(0, zPathname); - } - pPager->pVfs = pVfs; - pPager->vfsFlags = vfsFlags; - - /* Open the pager file. - */ - if( zFilename && zFilename[0] ){ - int fout = 0; /* VFS flags returned by xOpen() */ - rc = sqlite3OsOpen(pVfs, pPager->zFilename, pPager->fd, vfsFlags, &fout); - assert( !memDb ); - readOnly = (fout&SQLITE_OPEN_READONLY); - - /* If the file was successfully opened for read/write access, - ** choose a default page size in case we have to create the - ** database file. The default page size is the maximum of: - ** - ** + SQLITE_DEFAULT_PAGE_SIZE, - ** + The value returned by sqlite3OsSectorSize() - ** + The largest page size that can be written atomically. - */ - if( rc==SQLITE_OK ){ - int iDc = sqlite3OsDeviceCharacteristics(pPager->fd); - if( !readOnly ){ - setSectorSize(pPager); - assert(SQLITE_DEFAULT_PAGE_SIZE<=SQLITE_MAX_DEFAULT_PAGE_SIZE); - if( szPageDfltsectorSize ){ - if( pPager->sectorSize>SQLITE_MAX_DEFAULT_PAGE_SIZE ){ - szPageDflt = SQLITE_MAX_DEFAULT_PAGE_SIZE; - }else{ - szPageDflt = (u32)pPager->sectorSize; - } - } -#ifdef SQLITE_ENABLE_ATOMIC_WRITE - { - int ii; - assert(SQLITE_IOCAP_ATOMIC512==(512>>8)); - assert(SQLITE_IOCAP_ATOMIC64K==(65536>>8)); - assert(SQLITE_MAX_DEFAULT_PAGE_SIZE<=65536); - for(ii=szPageDflt; ii<=SQLITE_MAX_DEFAULT_PAGE_SIZE; ii=ii*2){ - if( iDc&(SQLITE_IOCAP_ATOMIC|(ii>>8)) ){ - szPageDflt = ii; - } - } - } -#endif - } - pPager->noLock = sqlite3_uri_boolean(zFilename, "nolock", 0); - if( (iDc & SQLITE_IOCAP_IMMUTABLE)!=0 - || sqlite3_uri_boolean(zFilename, "immutable", 0) ){ - vfsFlags |= SQLITE_OPEN_READONLY; - goto act_like_temp_file; - } - } - }else{ - /* If a temporary file is requested, it is not opened immediately. - ** In this case we accept the default page size and delay actually - ** opening the file until the first call to OsWrite(). - ** - ** This branch is also run for an in-memory database. An in-memory - ** database is the same as a temp-file that is never written out to - ** disk and uses an in-memory rollback journal. - ** - ** This branch also runs for files marked as immutable. - */ -act_like_temp_file: - tempFile = 1; - pPager->eState = PAGER_READER; /* Pretend we already have a lock */ - pPager->eLock = EXCLUSIVE_LOCK; /* Pretend we are in EXCLUSIVE locking mode */ - pPager->noLock = 1; /* Do no locking */ - readOnly = (vfsFlags&SQLITE_OPEN_READONLY); - } - - /* The following call to PagerSetPagesize() serves to set the value of - ** Pager.pageSize and to allocate the Pager.pTmpSpace buffer. - */ - if( rc==SQLITE_OK ){ - assert( pPager->memDb==0 ); - rc = sqlite3PagerSetPagesize(pPager, &szPageDflt, -1); - testcase( rc!=SQLITE_OK ); - } - - /* If an error occurred in either of the blocks above, free the - ** Pager structure and close the file. - */ - if( rc!=SQLITE_OK ){ - assert( !pPager->pTmpSpace ); - sqlite3OsClose(pPager->fd); - sqlite3_free(pPager); - return rc; - } - - /* Initialize the PCache object. */ - assert( nExtra<1000 ); - nExtra = ROUND8(nExtra); - sqlite3PcacheOpen(szPageDflt, nExtra, !memDb, - !memDb?pagerStress:0, (void *)pPager, pPager->pPCache); - - PAGERTRACE(("OPEN %d %s\n", FILEHANDLEID(pPager->fd), pPager->zFilename)); - IOTRACE(("OPEN %p %s\n", pPager, pPager->zFilename)) - - pPager->useJournal = (u8)useJournal; - /* pPager->stmtOpen = 0; */ - /* pPager->stmtInUse = 0; */ - /* pPager->nRef = 0; */ - /* pPager->stmtSize = 0; */ - /* pPager->stmtJSize = 0; */ - /* pPager->nPage = 0; */ - pPager->mxPgno = SQLITE_MAX_PAGE_COUNT; - /* pPager->state = PAGER_UNLOCK; */ - /* pPager->errMask = 0; */ - pPager->tempFile = (u8)tempFile; - assert( tempFile==PAGER_LOCKINGMODE_NORMAL - || tempFile==PAGER_LOCKINGMODE_EXCLUSIVE ); - assert( PAGER_LOCKINGMODE_EXCLUSIVE==1 ); - pPager->exclusiveMode = (u8)tempFile; - pPager->changeCountDone = pPager->tempFile; - pPager->memDb = (u8)memDb; - pPager->readOnly = (u8)readOnly; - assert( useJournal || pPager->tempFile ); - pPager->noSync = pPager->tempFile; - if( pPager->noSync ){ - assert( pPager->fullSync==0 ); - assert( pPager->syncFlags==0 ); - assert( pPager->walSyncFlags==0 ); - assert( pPager->ckptSyncFlags==0 ); - }else{ - pPager->fullSync = 1; - pPager->syncFlags = SQLITE_SYNC_NORMAL; - pPager->walSyncFlags = SQLITE_SYNC_NORMAL | WAL_SYNC_TRANSACTIONS; - pPager->ckptSyncFlags = SQLITE_SYNC_NORMAL; - } - /* pPager->pFirst = 0; */ - /* pPager->pFirstSynced = 0; */ - /* pPager->pLast = 0; */ - pPager->nExtra = (u16)nExtra; - pPager->journalSizeLimit = SQLITE_DEFAULT_JOURNAL_SIZE_LIMIT; - assert( isOpen(pPager->fd) || tempFile ); - setSectorSize(pPager); - if( !useJournal ){ - pPager->journalMode = PAGER_JOURNALMODE_OFF; - }else if( memDb ){ - pPager->journalMode = PAGER_JOURNALMODE_MEMORY; - } - /* pPager->xBusyHandler = 0; */ - /* pPager->pBusyHandlerArg = 0; */ - pPager->xReiniter = xReinit; - /* memset(pPager->aHash, 0, sizeof(pPager->aHash)); */ - /* pPager->szMmap = SQLITE_DEFAULT_MMAP_SIZE // will be set by btree.c */ - - *ppPager = pPager; - return SQLITE_OK; -} - - -/* Verify that the database file has not be deleted or renamed out from -** under the pager. Return SQLITE_OK if the database is still were it ought -** to be on disk. Return non-zero (SQLITE_READONLY_DBMOVED or some other error -** code from sqlite3OsAccess()) if the database has gone missing. -*/ -static int databaseIsUnmoved(Pager *pPager){ - int bHasMoved = 0; - int rc; - - if( pPager->tempFile ) return SQLITE_OK; - if( pPager->dbSize==0 ) return SQLITE_OK; - assert( pPager->zFilename && pPager->zFilename[0] ); - rc = sqlite3OsFileControl(pPager->fd, SQLITE_FCNTL_HAS_MOVED, &bHasMoved); - if( rc==SQLITE_NOTFOUND ){ - /* If the HAS_MOVED file-control is unimplemented, assume that the file - ** has not been moved. That is the historical behavior of SQLite: prior to - ** version 3.8.3, it never checked */ - rc = SQLITE_OK; - }else if( rc==SQLITE_OK && bHasMoved ){ - rc = SQLITE_READONLY_DBMOVED; - } - return rc; -} - - -/* -** This function is called after transitioning from PAGER_UNLOCK to -** PAGER_SHARED state. It tests if there is a hot journal present in -** the file-system for the given pager. A hot journal is one that -** needs to be played back. According to this function, a hot-journal -** file exists if the following criteria are met: -** -** * The journal file exists in the file system, and -** * No process holds a RESERVED or greater lock on the database file, and -** * The database file itself is greater than 0 bytes in size, and -** * The first byte of the journal file exists and is not 0x00. -** -** If the current size of the database file is 0 but a journal file -** exists, that is probably an old journal left over from a prior -** database with the same name. In this case the journal file is -** just deleted using OsDelete, *pExists is set to 0 and SQLITE_OK -** is returned. -** -** This routine does not check if there is a master journal filename -** at the end of the file. If there is, and that master journal file -** does not exist, then the journal file is not really hot. In this -** case this routine will return a false-positive. The pager_playback() -** routine will discover that the journal file is not really hot and -** will not roll it back. -** -** If a hot-journal file is found to exist, *pExists is set to 1 and -** SQLITE_OK returned. If no hot-journal file is present, *pExists is -** set to 0 and SQLITE_OK returned. If an IO error occurs while trying -** to determine whether or not a hot-journal file exists, the IO error -** code is returned and the value of *pExists is undefined. -*/ -static int hasHotJournal(Pager *pPager, int *pExists){ - sqlite3_vfs * const pVfs = pPager->pVfs; - int rc = SQLITE_OK; /* Return code */ - int exists = 1; /* True if a journal file is present */ - int jrnlOpen = !!isOpen(pPager->jfd); - - assert( pPager->useJournal ); - assert( isOpen(pPager->fd) ); - assert( pPager->eState==PAGER_OPEN ); - - assert( jrnlOpen==0 || ( sqlite3OsDeviceCharacteristics(pPager->jfd) & - SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN - )); - - *pExists = 0; - if( !jrnlOpen ){ - rc = sqlite3OsAccess(pVfs, pPager->zJournal, SQLITE_ACCESS_EXISTS, &exists); - } - if( rc==SQLITE_OK && exists ){ - int locked = 0; /* True if some process holds a RESERVED lock */ - - /* Race condition here: Another process might have been holding the - ** the RESERVED lock and have a journal open at the sqlite3OsAccess() - ** call above, but then delete the journal and drop the lock before - ** we get to the following sqlite3OsCheckReservedLock() call. If that - ** is the case, this routine might think there is a hot journal when - ** in fact there is none. This results in a false-positive which will - ** be dealt with by the playback routine. Ticket #3883. - */ - rc = sqlite3OsCheckReservedLock(pPager->fd, &locked); - if( rc==SQLITE_OK && !locked ){ - Pgno nPage; /* Number of pages in database file */ - - rc = pagerPagecount(pPager, &nPage); - if( rc==SQLITE_OK ){ - /* If the database is zero pages in size, that means that either (1) the - ** journal is a remnant from a prior database with the same name where - ** the database file but not the journal was deleted, or (2) the initial - ** transaction that populates a new database is being rolled back. - ** In either case, the journal file can be deleted. However, take care - ** not to delete the journal file if it is already open due to - ** journal_mode=PERSIST. - */ - if( nPage==0 && !jrnlOpen ){ - sqlite3BeginBenignMalloc(); - if( pagerLockDb(pPager, RESERVED_LOCK)==SQLITE_OK ){ - sqlite3OsDelete(pVfs, pPager->zJournal, 0); - if( !pPager->exclusiveMode ) pagerUnlockDb(pPager, SHARED_LOCK); - } - sqlite3EndBenignMalloc(); - }else{ - /* The journal file exists and no other connection has a reserved - ** or greater lock on the database file. Now check that there is - ** at least one non-zero bytes at the start of the journal file. - ** If there is, then we consider this journal to be hot. If not, - ** it can be ignored. - */ - if( !jrnlOpen ){ - int f = SQLITE_OPEN_READONLY|SQLITE_OPEN_MAIN_JOURNAL; - rc = sqlite3OsOpen(pVfs, pPager->zJournal, pPager->jfd, f, &f); - } - if( rc==SQLITE_OK ){ - u8 first = 0; - rc = sqlite3OsRead(pPager->jfd, (void *)&first, 1, 0); - if( rc==SQLITE_IOERR_SHORT_READ ){ - rc = SQLITE_OK; - } - if( !jrnlOpen ){ - sqlite3OsClose(pPager->jfd); - } - *pExists = (first!=0); - }else if( rc==SQLITE_CANTOPEN ){ - /* If we cannot open the rollback journal file in order to see if - ** its has a zero header, that might be due to an I/O error, or - ** it might be due to the race condition described above and in - ** ticket #3883. Either way, assume that the journal is hot. - ** This might be a false positive. But if it is, then the - ** automatic journal playback and recovery mechanism will deal - ** with it under an EXCLUSIVE lock where we do not need to - ** worry so much with race conditions. - */ - *pExists = 1; - rc = SQLITE_OK; - } - } - } - } - } - - return rc; -} - -/* -** This function is called to obtain a shared lock on the database file. -** It is illegal to call sqlite3PagerAcquire() until after this function -** has been successfully called. If a shared-lock is already held when -** this function is called, it is a no-op. -** -** The following operations are also performed by this function. -** -** 1) If the pager is currently in PAGER_OPEN state (no lock held -** on the database file), then an attempt is made to obtain a -** SHARED lock on the database file. Immediately after obtaining -** the SHARED lock, the file-system is checked for a hot-journal, -** which is played back if present. Following any hot-journal -** rollback, the contents of the cache are validated by checking -** the 'change-counter' field of the database file header and -** discarded if they are found to be invalid. -** -** 2) If the pager is running in exclusive-mode, and there are currently -** no outstanding references to any pages, and is in the error state, -** then an attempt is made to clear the error state by discarding -** the contents of the page cache and rolling back any open journal -** file. -** -** If everything is successful, SQLITE_OK is returned. If an IO error -** occurs while locking the database, checking for a hot-journal file or -** rolling back a journal file, the IO error code is returned. -*/ -SQLITE_PRIVATE int sqlite3PagerSharedLock(Pager *pPager){ - int rc = SQLITE_OK; /* Return code */ - - /* This routine is only called from b-tree and only when there are no - ** outstanding pages. This implies that the pager state should either - ** be OPEN or READER. READER is only possible if the pager is or was in - ** exclusive access mode. - */ - assert( sqlite3PcacheRefCount(pPager->pPCache)==0 ); - assert( assert_pager_state(pPager) ); - assert( pPager->eState==PAGER_OPEN || pPager->eState==PAGER_READER ); - if( NEVER(MEMDB && pPager->errCode) ){ return pPager->errCode; } - - if( !pagerUseWal(pPager) && pPager->eState==PAGER_OPEN ){ - int bHotJournal = 1; /* True if there exists a hot journal-file */ - - assert( !MEMDB ); - - rc = pager_wait_on_lock(pPager, SHARED_LOCK); - if( rc!=SQLITE_OK ){ - assert( pPager->eLock==NO_LOCK || pPager->eLock==UNKNOWN_LOCK ); - goto failed; - } - - /* If a journal file exists, and there is no RESERVED lock on the - ** database file, then it either needs to be played back or deleted. - */ - if( pPager->eLock<=SHARED_LOCK ){ - rc = hasHotJournal(pPager, &bHotJournal); - } - if( rc!=SQLITE_OK ){ - goto failed; - } - if( bHotJournal ){ - if( pPager->readOnly ){ - rc = SQLITE_READONLY_ROLLBACK; - goto failed; - } - - /* Get an EXCLUSIVE lock on the database file. At this point it is - ** important that a RESERVED lock is not obtained on the way to the - ** EXCLUSIVE lock. If it were, another process might open the - ** database file, detect the RESERVED lock, and conclude that the - ** database is safe to read while this process is still rolling the - ** hot-journal back. - ** - ** Because the intermediate RESERVED lock is not requested, any - ** other process attempting to access the database file will get to - ** this point in the code and fail to obtain its own EXCLUSIVE lock - ** on the database file. - ** - ** Unless the pager is in locking_mode=exclusive mode, the lock is - ** downgraded to SHARED_LOCK before this function returns. - */ - rc = pagerLockDb(pPager, EXCLUSIVE_LOCK); - if( rc!=SQLITE_OK ){ - goto failed; - } - - /* If it is not already open and the file exists on disk, open the - ** journal for read/write access. Write access is required because - ** in exclusive-access mode the file descriptor will be kept open - ** and possibly used for a transaction later on. Also, write-access - ** is usually required to finalize the journal in journal_mode=persist - ** mode (and also for journal_mode=truncate on some systems). - ** - ** If the journal does not exist, it usually means that some - ** other connection managed to get in and roll it back before - ** this connection obtained the exclusive lock above. Or, it - ** may mean that the pager was in the error-state when this - ** function was called and the journal file does not exist. - */ - if( !isOpen(pPager->jfd) ){ - sqlite3_vfs * const pVfs = pPager->pVfs; - int bExists; /* True if journal file exists */ - rc = sqlite3OsAccess( - pVfs, pPager->zJournal, SQLITE_ACCESS_EXISTS, &bExists); - if( rc==SQLITE_OK && bExists ){ - int fout = 0; - int f = SQLITE_OPEN_READWRITE|SQLITE_OPEN_MAIN_JOURNAL; - assert( !pPager->tempFile ); - rc = sqlite3OsOpen(pVfs, pPager->zJournal, pPager->jfd, f, &fout); - assert( rc!=SQLITE_OK || isOpen(pPager->jfd) ); - if( rc==SQLITE_OK && fout&SQLITE_OPEN_READONLY ){ - rc = SQLITE_CANTOPEN_BKPT; - sqlite3OsClose(pPager->jfd); - } - } - } - - /* Playback and delete the journal. Drop the database write - ** lock and reacquire the read lock. Purge the cache before - ** playing back the hot-journal so that we don't end up with - ** an inconsistent cache. Sync the hot journal before playing - ** it back since the process that crashed and left the hot journal - ** probably did not sync it and we are required to always sync - ** the journal before playing it back. - */ - if( isOpen(pPager->jfd) ){ - assert( rc==SQLITE_OK ); - rc = pagerSyncHotJournal(pPager); - if( rc==SQLITE_OK ){ - rc = pager_playback(pPager, 1); - pPager->eState = PAGER_OPEN; - } - }else if( !pPager->exclusiveMode ){ - pagerUnlockDb(pPager, SHARED_LOCK); - } - - if( rc!=SQLITE_OK ){ - /* This branch is taken if an error occurs while trying to open - ** or roll back a hot-journal while holding an EXCLUSIVE lock. The - ** pager_unlock() routine will be called before returning to unlock - ** the file. If the unlock attempt fails, then Pager.eLock must be - ** set to UNKNOWN_LOCK (see the comment above the #define for - ** UNKNOWN_LOCK above for an explanation). - ** - ** In order to get pager_unlock() to do this, set Pager.eState to - ** PAGER_ERROR now. This is not actually counted as a transition - ** to ERROR state in the state diagram at the top of this file, - ** since we know that the same call to pager_unlock() will very - ** shortly transition the pager object to the OPEN state. Calling - ** assert_pager_state() would fail now, as it should not be possible - ** to be in ERROR state when there are zero outstanding page - ** references. - */ - pager_error(pPager, rc); - goto failed; - } - - assert( pPager->eState==PAGER_OPEN ); - assert( (pPager->eLock==SHARED_LOCK) - || (pPager->exclusiveMode && pPager->eLock>SHARED_LOCK) - ); - } - - if( !pPager->tempFile && ( - pPager->pBackup - || sqlite3PcachePagecount(pPager->pPCache)>0 - || USEFETCH(pPager) - )){ - /* The shared-lock has just been acquired on the database file - ** and there are already pages in the cache (from a previous - ** read or write transaction). Check to see if the database - ** has been modified. If the database has changed, flush the - ** cache. - ** - ** Database changes is detected by looking at 15 bytes beginning - ** at offset 24 into the file. The first 4 of these 16 bytes are - ** a 32-bit counter that is incremented with each change. The - ** other bytes change randomly with each file change when - ** a codec is in use. - ** - ** There is a vanishingly small chance that a change will not be - ** detected. The chance of an undetected change is so small that - ** it can be neglected. - */ - Pgno nPage = 0; - char dbFileVers[sizeof(pPager->dbFileVers)]; - - rc = pagerPagecount(pPager, &nPage); - if( rc ) goto failed; - - if( nPage>0 ){ - IOTRACE(("CKVERS %p %d\n", pPager, sizeof(dbFileVers))); - rc = sqlite3OsRead(pPager->fd, &dbFileVers, sizeof(dbFileVers), 24); - if( rc!=SQLITE_OK && rc!=SQLITE_IOERR_SHORT_READ ){ - goto failed; - } - }else{ - memset(dbFileVers, 0, sizeof(dbFileVers)); - } - - if( memcmp(pPager->dbFileVers, dbFileVers, sizeof(dbFileVers))!=0 ){ - pager_reset(pPager); - - /* Unmap the database file. It is possible that external processes - ** may have truncated the database file and then extended it back - ** to its original size while this process was not holding a lock. - ** In this case there may exist a Pager.pMap mapping that appears - ** to be the right size but is not actually valid. Avoid this - ** possibility by unmapping the db here. */ - if( USEFETCH(pPager) ){ - sqlite3OsUnfetch(pPager->fd, 0, 0); - } - } - } - - /* If there is a WAL file in the file-system, open this database in WAL - ** mode. Otherwise, the following function call is a no-op. - */ - rc = pagerOpenWalIfPresent(pPager); -#ifndef SQLITE_OMIT_WAL - assert( pPager->pWal==0 || rc==SQLITE_OK ); -#endif - } - - if( pagerUseWal(pPager) ){ - assert( rc==SQLITE_OK ); - rc = pagerBeginReadTransaction(pPager); - } - - if( pPager->eState==PAGER_OPEN && rc==SQLITE_OK ){ - rc = pagerPagecount(pPager, &pPager->dbSize); - } - - failed: - if( rc!=SQLITE_OK ){ - assert( !MEMDB ); - pager_unlock(pPager); - assert( pPager->eState==PAGER_OPEN ); - }else{ - pPager->eState = PAGER_READER; - } - return rc; -} - -/* -** If the reference count has reached zero, rollback any active -** transaction and unlock the pager. -** -** Except, in locking_mode=EXCLUSIVE when there is nothing to in -** the rollback journal, the unlock is not performed and there is -** nothing to rollback, so this routine is a no-op. -*/ -static void pagerUnlockIfUnused(Pager *pPager){ - if( pPager->nMmapOut==0 && (sqlite3PcacheRefCount(pPager->pPCache)==0) ){ - pagerUnlockAndRollback(pPager); - } -} - -/* -** Acquire a reference to page number pgno in pager pPager (a page -** reference has type DbPage*). If the requested reference is -** successfully obtained, it is copied to *ppPage and SQLITE_OK returned. -** -** If the requested page is already in the cache, it is returned. -** Otherwise, a new page object is allocated and populated with data -** read from the database file. In some cases, the pcache module may -** choose not to allocate a new page object and may reuse an existing -** object with no outstanding references. -** -** The extra data appended to a page is always initialized to zeros the -** first time a page is loaded into memory. If the page requested is -** already in the cache when this function is called, then the extra -** data is left as it was when the page object was last used. -** -** If the database image is smaller than the requested page or if a -** non-zero value is passed as the noContent parameter and the -** requested page is not already stored in the cache, then no -** actual disk read occurs. In this case the memory image of the -** page is initialized to all zeros. -** -** If noContent is true, it means that we do not care about the contents -** of the page. This occurs in two scenarios: -** -** a) When reading a free-list leaf page from the database, and -** -** b) When a savepoint is being rolled back and we need to load -** a new page into the cache to be filled with the data read -** from the savepoint journal. -** -** If noContent is true, then the data returned is zeroed instead of -** being read from the database. Additionally, the bits corresponding -** to pgno in Pager.pInJournal (bitvec of pages already written to the -** journal file) and the PagerSavepoint.pInSavepoint bitvecs of any open -** savepoints are set. This means if the page is made writable at any -** point in the future, using a call to sqlite3PagerWrite(), its contents -** will not be journaled. This saves IO. -** -** The acquisition might fail for several reasons. In all cases, -** an appropriate error code is returned and *ppPage is set to NULL. -** -** See also sqlite3PagerLookup(). Both this routine and Lookup() attempt -** to find a page in the in-memory cache first. If the page is not already -** in memory, this routine goes to disk to read it in whereas Lookup() -** just returns 0. This routine acquires a read-lock the first time it -** has to go to disk, and could also playback an old journal if necessary. -** Since Lookup() never goes to disk, it never has to deal with locks -** or journal files. -*/ -SQLITE_PRIVATE int sqlite3PagerAcquire( - Pager *pPager, /* The pager open on the database file */ - Pgno pgno, /* Page number to fetch */ - DbPage **ppPage, /* Write a pointer to the page here */ - int flags /* PAGER_GET_XXX flags */ -){ - int rc = SQLITE_OK; - PgHdr *pPg = 0; - u32 iFrame = 0; /* Frame to read from WAL file */ - const int noContent = (flags & PAGER_GET_NOCONTENT); - - /* It is acceptable to use a read-only (mmap) page for any page except - ** page 1 if there is no write-transaction open or the ACQUIRE_READONLY - ** flag was specified by the caller. And so long as the db is not a - ** temporary or in-memory database. */ - const int bMmapOk = (pgno!=1 && USEFETCH(pPager) - && (pPager->eState==PAGER_READER || (flags & PAGER_GET_READONLY)) -#ifdef SQLITE_HAS_CODEC - && pPager->xCodec==0 -#endif - ); - - assert( pPager->eState>=PAGER_READER ); - assert( assert_pager_state(pPager) ); - assert( noContent==0 || bMmapOk==0 ); - - if( pgno==0 ){ - return SQLITE_CORRUPT_BKPT; - } - - /* If the pager is in the error state, return an error immediately. - ** Otherwise, request the page from the PCache layer. */ - if( pPager->errCode!=SQLITE_OK ){ - rc = pPager->errCode; - }else{ - - if( bMmapOk && pagerUseWal(pPager) ){ - rc = sqlite3WalFindFrame(pPager->pWal, pgno, &iFrame); - if( rc!=SQLITE_OK ) goto pager_acquire_err; - } - - if( bMmapOk && iFrame==0 ){ - void *pData = 0; - - rc = sqlite3OsFetch(pPager->fd, - (i64)(pgno-1) * pPager->pageSize, pPager->pageSize, &pData - ); - - if( rc==SQLITE_OK && pData ){ - if( pPager->eState>PAGER_READER ){ - (void)sqlite3PcacheFetch(pPager->pPCache, pgno, 0, &pPg); - } - if( pPg==0 ){ - rc = pagerAcquireMapPage(pPager, pgno, pData, &pPg); - }else{ - sqlite3OsUnfetch(pPager->fd, (i64)(pgno-1)*pPager->pageSize, pData); - } - if( pPg ){ - assert( rc==SQLITE_OK ); - *ppPage = pPg; - return SQLITE_OK; - } - } - if( rc!=SQLITE_OK ){ - goto pager_acquire_err; - } - } - - rc = sqlite3PcacheFetch(pPager->pPCache, pgno, 1, ppPage); - } - - if( rc!=SQLITE_OK ){ - /* Either the call to sqlite3PcacheFetch() returned an error or the - ** pager was already in the error-state when this function was called. - ** Set pPg to 0 and jump to the exception handler. */ - pPg = 0; - goto pager_acquire_err; - } - assert( (*ppPage)->pgno==pgno ); - assert( (*ppPage)->pPager==pPager || (*ppPage)->pPager==0 ); - - if( (*ppPage)->pPager && !noContent ){ - /* In this case the pcache already contains an initialized copy of - ** the page. Return without further ado. */ - assert( pgno<=PAGER_MAX_PGNO && pgno!=PAGER_MJ_PGNO(pPager) ); - pPager->aStat[PAGER_STAT_HIT]++; - return SQLITE_OK; - - }else{ - /* The pager cache has created a new page. Its content needs to - ** be initialized. */ - - pPg = *ppPage; - pPg->pPager = pPager; - - /* The maximum page number is 2^31. Return SQLITE_CORRUPT if a page - ** number greater than this, or the unused locking-page, is requested. */ - if( pgno>PAGER_MAX_PGNO || pgno==PAGER_MJ_PGNO(pPager) ){ - rc = SQLITE_CORRUPT_BKPT; - goto pager_acquire_err; - } - - if( MEMDB || pPager->dbSizefd) ){ - if( pgno>pPager->mxPgno ){ - rc = SQLITE_FULL; - goto pager_acquire_err; - } - if( noContent ){ - /* Failure to set the bits in the InJournal bit-vectors is benign. - ** It merely means that we might do some extra work to journal a - ** page that does not need to be journaled. Nevertheless, be sure - ** to test the case where a malloc error occurs while trying to set - ** a bit in a bit vector. - */ - sqlite3BeginBenignMalloc(); - if( pgno<=pPager->dbOrigSize ){ - TESTONLY( rc = ) sqlite3BitvecSet(pPager->pInJournal, pgno); - testcase( rc==SQLITE_NOMEM ); - } - TESTONLY( rc = ) addToSavepointBitvecs(pPager, pgno); - testcase( rc==SQLITE_NOMEM ); - sqlite3EndBenignMalloc(); - } - memset(pPg->pData, 0, pPager->pageSize); - IOTRACE(("ZERO %p %d\n", pPager, pgno)); - }else{ - if( pagerUseWal(pPager) && bMmapOk==0 ){ - rc = sqlite3WalFindFrame(pPager->pWal, pgno, &iFrame); - if( rc!=SQLITE_OK ) goto pager_acquire_err; - } - assert( pPg->pPager==pPager ); - pPager->aStat[PAGER_STAT_MISS]++; - rc = readDbPage(pPg, iFrame); - if( rc!=SQLITE_OK ){ - goto pager_acquire_err; - } - } - pager_set_pagehash(pPg); - } - - return SQLITE_OK; - -pager_acquire_err: - assert( rc!=SQLITE_OK ); - if( pPg ){ - sqlite3PcacheDrop(pPg); - } - pagerUnlockIfUnused(pPager); - - *ppPage = 0; - return rc; -} - -/* -** Acquire a page if it is already in the in-memory cache. Do -** not read the page from disk. Return a pointer to the page, -** or 0 if the page is not in cache. -** -** See also sqlite3PagerGet(). The difference between this routine -** and sqlite3PagerGet() is that _get() will go to the disk and read -** in the page if the page is not already in cache. This routine -** returns NULL if the page is not in cache or if a disk I/O error -** has ever happened. -*/ -SQLITE_PRIVATE DbPage *sqlite3PagerLookup(Pager *pPager, Pgno pgno){ - PgHdr *pPg = 0; - assert( pPager!=0 ); - assert( pgno!=0 ); - assert( pPager->pPCache!=0 ); - assert( pPager->eState>=PAGER_READER && pPager->eState!=PAGER_ERROR ); - sqlite3PcacheFetch(pPager->pPCache, pgno, 0, &pPg); - return pPg; -} - -/* -** Release a page reference. -** -** If the number of references to the page drop to zero, then the -** page is added to the LRU list. When all references to all pages -** are released, a rollback occurs and the lock on the database is -** removed. -*/ -SQLITE_PRIVATE void sqlite3PagerUnrefNotNull(DbPage *pPg){ - Pager *pPager; - assert( pPg!=0 ); - pPager = pPg->pPager; - if( pPg->flags & PGHDR_MMAP ){ - pagerReleaseMapPage(pPg); - }else{ - sqlite3PcacheRelease(pPg); - } - pagerUnlockIfUnused(pPager); -} -SQLITE_PRIVATE void sqlite3PagerUnref(DbPage *pPg){ - if( pPg ) sqlite3PagerUnrefNotNull(pPg); -} - -/* -** This function is called at the start of every write transaction. -** There must already be a RESERVED or EXCLUSIVE lock on the database -** file when this routine is called. -** -** Open the journal file for pager pPager and write a journal header -** to the start of it. If there are active savepoints, open the sub-journal -** as well. This function is only used when the journal file is being -** opened to write a rollback log for a transaction. It is not used -** when opening a hot journal file to roll it back. -** -** If the journal file is already open (as it may be in exclusive mode), -** then this function just writes a journal header to the start of the -** already open file. -** -** Whether or not the journal file is opened by this function, the -** Pager.pInJournal bitvec structure is allocated. -** -** Return SQLITE_OK if everything is successful. Otherwise, return -** SQLITE_NOMEM if the attempt to allocate Pager.pInJournal fails, or -** an IO error code if opening or writing the journal file fails. -*/ -static int pager_open_journal(Pager *pPager){ - int rc = SQLITE_OK; /* Return code */ - sqlite3_vfs * const pVfs = pPager->pVfs; /* Local cache of vfs pointer */ - - assert( pPager->eState==PAGER_WRITER_LOCKED ); - assert( assert_pager_state(pPager) ); - assert( pPager->pInJournal==0 ); - - /* If already in the error state, this function is a no-op. But on - ** the other hand, this routine is never called if we are already in - ** an error state. */ - if( NEVER(pPager->errCode) ) return pPager->errCode; - - if( !pagerUseWal(pPager) && pPager->journalMode!=PAGER_JOURNALMODE_OFF ){ - pPager->pInJournal = sqlite3BitvecCreate(pPager->dbSize); - if( pPager->pInJournal==0 ){ - return SQLITE_NOMEM; - } - - /* Open the journal file if it is not already open. */ - if( !isOpen(pPager->jfd) ){ - if( pPager->journalMode==PAGER_JOURNALMODE_MEMORY ){ - sqlite3MemJournalOpen(pPager->jfd); - }else{ - const int flags = /* VFS flags to open journal file */ - SQLITE_OPEN_READWRITE|SQLITE_OPEN_CREATE| - (pPager->tempFile ? - (SQLITE_OPEN_DELETEONCLOSE|SQLITE_OPEN_TEMP_JOURNAL): - (SQLITE_OPEN_MAIN_JOURNAL) - ); - - /* Verify that the database still has the same name as it did when - ** it was originally opened. */ - rc = databaseIsUnmoved(pPager); - if( rc==SQLITE_OK ){ -#ifdef SQLITE_ENABLE_ATOMIC_WRITE - rc = sqlite3JournalOpen( - pVfs, pPager->zJournal, pPager->jfd, flags, jrnlBufferSize(pPager) - ); -#else - rc = sqlite3OsOpen(pVfs, pPager->zJournal, pPager->jfd, flags, 0); -#endif - } - } - assert( rc!=SQLITE_OK || isOpen(pPager->jfd) ); - } - - - /* Write the first journal header to the journal file and open - ** the sub-journal if necessary. - */ - if( rc==SQLITE_OK ){ - /* TODO: Check if all of these are really required. */ - pPager->nRec = 0; - pPager->journalOff = 0; - pPager->setMaster = 0; - pPager->journalHdr = 0; - rc = writeJournalHdr(pPager); - } - } - - if( rc!=SQLITE_OK ){ - sqlite3BitvecDestroy(pPager->pInJournal); - pPager->pInJournal = 0; - }else{ - assert( pPager->eState==PAGER_WRITER_LOCKED ); - pPager->eState = PAGER_WRITER_CACHEMOD; - } - - return rc; -} - -/* -** Begin a write-transaction on the specified pager object. If a -** write-transaction has already been opened, this function is a no-op. -** -** If the exFlag argument is false, then acquire at least a RESERVED -** lock on the database file. If exFlag is true, then acquire at least -** an EXCLUSIVE lock. If such a lock is already held, no locking -** functions need be called. -** -** If the subjInMemory argument is non-zero, then any sub-journal opened -** within this transaction will be opened as an in-memory file. This -** has no effect if the sub-journal is already opened (as it may be when -** running in exclusive mode) or if the transaction does not require a -** sub-journal. If the subjInMemory argument is zero, then any required -** sub-journal is implemented in-memory if pPager is an in-memory database, -** or using a temporary file otherwise. -*/ -SQLITE_PRIVATE int sqlite3PagerBegin(Pager *pPager, int exFlag, int subjInMemory){ - int rc = SQLITE_OK; - - if( pPager->errCode ) return pPager->errCode; - assert( pPager->eState>=PAGER_READER && pPager->eStatesubjInMemory = (u8)subjInMemory; - - if( ALWAYS(pPager->eState==PAGER_READER) ){ - assert( pPager->pInJournal==0 ); - - if( pagerUseWal(pPager) ){ - /* If the pager is configured to use locking_mode=exclusive, and an - ** exclusive lock on the database is not already held, obtain it now. - */ - if( pPager->exclusiveMode && sqlite3WalExclusiveMode(pPager->pWal, -1) ){ - rc = pagerLockDb(pPager, EXCLUSIVE_LOCK); - if( rc!=SQLITE_OK ){ - return rc; - } - sqlite3WalExclusiveMode(pPager->pWal, 1); - } - - /* Grab the write lock on the log file. If successful, upgrade to - ** PAGER_RESERVED state. Otherwise, return an error code to the caller. - ** The busy-handler is not invoked if another connection already - ** holds the write-lock. If possible, the upper layer will call it. - */ - rc = sqlite3WalBeginWriteTransaction(pPager->pWal); - }else{ - /* Obtain a RESERVED lock on the database file. If the exFlag parameter - ** is true, then immediately upgrade this to an EXCLUSIVE lock. The - ** busy-handler callback can be used when upgrading to the EXCLUSIVE - ** lock, but not when obtaining the RESERVED lock. - */ - rc = pagerLockDb(pPager, RESERVED_LOCK); - if( rc==SQLITE_OK && exFlag ){ - rc = pager_wait_on_lock(pPager, EXCLUSIVE_LOCK); - } - } - - if( rc==SQLITE_OK ){ - /* Change to WRITER_LOCKED state. - ** - ** WAL mode sets Pager.eState to PAGER_WRITER_LOCKED or CACHEMOD - ** when it has an open transaction, but never to DBMOD or FINISHED. - ** This is because in those states the code to roll back savepoint - ** transactions may copy data from the sub-journal into the database - ** file as well as into the page cache. Which would be incorrect in - ** WAL mode. - */ - pPager->eState = PAGER_WRITER_LOCKED; - pPager->dbHintSize = pPager->dbSize; - pPager->dbFileSize = pPager->dbSize; - pPager->dbOrigSize = pPager->dbSize; - pPager->journalOff = 0; - } - - assert( rc==SQLITE_OK || pPager->eState==PAGER_READER ); - assert( rc!=SQLITE_OK || pPager->eState==PAGER_WRITER_LOCKED ); - assert( assert_pager_state(pPager) ); - } - - PAGERTRACE(("TRANSACTION %d\n", PAGERID(pPager))); - return rc; -} - -/* -** Mark a single data page as writeable. The page is written into the -** main journal or sub-journal as required. If the page is written into -** one of the journals, the corresponding bit is set in the -** Pager.pInJournal bitvec and the PagerSavepoint.pInSavepoint bitvecs -** of any open savepoints as appropriate. -*/ -static int pager_write(PgHdr *pPg){ - Pager *pPager = pPg->pPager; - int rc = SQLITE_OK; - int inJournal; - - /* This routine is not called unless a write-transaction has already - ** been started. The journal file may or may not be open at this point. - ** It is never called in the ERROR state. - */ - assert( pPager->eState==PAGER_WRITER_LOCKED - || pPager->eState==PAGER_WRITER_CACHEMOD - || pPager->eState==PAGER_WRITER_DBMOD - ); - assert( assert_pager_state(pPager) ); - assert( pPager->errCode==0 ); - assert( pPager->readOnly==0 ); - - CHECK_PAGE(pPg); - - /* The journal file needs to be opened. Higher level routines have already - ** obtained the necessary locks to begin the write-transaction, but the - ** rollback journal might not yet be open. Open it now if this is the case. - ** - ** This is done before calling sqlite3PcacheMakeDirty() on the page. - ** Otherwise, if it were done after calling sqlite3PcacheMakeDirty(), then - ** an error might occur and the pager would end up in WRITER_LOCKED state - ** with pages marked as dirty in the cache. - */ - if( pPager->eState==PAGER_WRITER_LOCKED ){ - rc = pager_open_journal(pPager); - if( rc!=SQLITE_OK ) return rc; - } - assert( pPager->eState>=PAGER_WRITER_CACHEMOD ); - assert( assert_pager_state(pPager) ); - - /* Mark the page as dirty. If the page has already been written - ** to the journal then we can return right away. - */ - sqlite3PcacheMakeDirty(pPg); - inJournal = pageInJournal(pPager, pPg); - if( inJournal && (pPager->nSavepoint==0 || !subjRequiresPage(pPg)) ){ - assert( !pagerUseWal(pPager) ); - }else{ - - /* The transaction journal now exists and we have a RESERVED or an - ** EXCLUSIVE lock on the main database file. Write the current page to - ** the transaction journal if it is not there already. - */ - if( !inJournal && !pagerUseWal(pPager) ){ - assert( pagerUseWal(pPager)==0 ); - if( pPg->pgno<=pPager->dbOrigSize && isOpen(pPager->jfd) ){ - u32 cksum; - char *pData2; - i64 iOff = pPager->journalOff; - - /* We should never write to the journal file the page that - ** contains the database locks. The following assert verifies - ** that we do not. */ - assert( pPg->pgno!=PAGER_MJ_PGNO(pPager) ); - - assert( pPager->journalHdr<=pPager->journalOff ); - CODEC2(pPager, pPg->pData, pPg->pgno, 7, return SQLITE_NOMEM, pData2); - cksum = pager_cksum(pPager, (u8*)pData2); - - /* Even if an IO or diskfull error occurs while journalling the - ** page in the block above, set the need-sync flag for the page. - ** Otherwise, when the transaction is rolled back, the logic in - ** playback_one_page() will think that the page needs to be restored - ** in the database file. And if an IO error occurs while doing so, - ** then corruption may follow. - */ - pPg->flags |= PGHDR_NEED_SYNC; - - rc = write32bits(pPager->jfd, iOff, pPg->pgno); - if( rc!=SQLITE_OK ) return rc; - rc = sqlite3OsWrite(pPager->jfd, pData2, pPager->pageSize, iOff+4); - if( rc!=SQLITE_OK ) return rc; - rc = write32bits(pPager->jfd, iOff+pPager->pageSize+4, cksum); - if( rc!=SQLITE_OK ) return rc; - - IOTRACE(("JOUT %p %d %lld %d\n", pPager, pPg->pgno, - pPager->journalOff, pPager->pageSize)); - PAGER_INCR(sqlite3_pager_writej_count); - PAGERTRACE(("JOURNAL %d page %d needSync=%d hash(%08x)\n", - PAGERID(pPager), pPg->pgno, - ((pPg->flags&PGHDR_NEED_SYNC)?1:0), pager_pagehash(pPg))); - - pPager->journalOff += 8 + pPager->pageSize; - pPager->nRec++; - assert( pPager->pInJournal!=0 ); - rc = sqlite3BitvecSet(pPager->pInJournal, pPg->pgno); - testcase( rc==SQLITE_NOMEM ); - assert( rc==SQLITE_OK || rc==SQLITE_NOMEM ); - rc |= addToSavepointBitvecs(pPager, pPg->pgno); - if( rc!=SQLITE_OK ){ - assert( rc==SQLITE_NOMEM ); - return rc; - } - }else{ - if( pPager->eState!=PAGER_WRITER_DBMOD ){ - pPg->flags |= PGHDR_NEED_SYNC; - } - PAGERTRACE(("APPEND %d page %d needSync=%d\n", - PAGERID(pPager), pPg->pgno, - ((pPg->flags&PGHDR_NEED_SYNC)?1:0))); - } - } - - /* If the statement journal is open and the page is not in it, - ** then write the current page to the statement journal. Note that - ** the statement journal format differs from the standard journal format - ** in that it omits the checksums and the header. - */ - if( pPager->nSavepoint>0 && subjRequiresPage(pPg) ){ - rc = subjournalPage(pPg); - } - } - - /* Update the database size and return. - */ - if( pPager->dbSizepgno ){ - pPager->dbSize = pPg->pgno; - } - return rc; -} - -/* -** Mark a data page as writeable. This routine must be called before -** making changes to a page. The caller must check the return value -** of this function and be careful not to change any page data unless -** this routine returns SQLITE_OK. -** -** The difference between this function and pager_write() is that this -** function also deals with the special case where 2 or more pages -** fit on a single disk sector. In this case all co-resident pages -** must have been written to the journal file before returning. -** -** If an error occurs, SQLITE_NOMEM or an IO error code is returned -** as appropriate. Otherwise, SQLITE_OK. -*/ -SQLITE_PRIVATE int sqlite3PagerWrite(DbPage *pDbPage){ - int rc = SQLITE_OK; - - PgHdr *pPg = pDbPage; - Pager *pPager = pPg->pPager; - - assert( (pPg->flags & PGHDR_MMAP)==0 ); - assert( pPager->eState>=PAGER_WRITER_LOCKED ); - assert( pPager->eState!=PAGER_ERROR ); - assert( assert_pager_state(pPager) ); - - if( pPager->sectorSize > (u32)pPager->pageSize ){ - Pgno nPageCount; /* Total number of pages in database file */ - Pgno pg1; /* First page of the sector pPg is located on. */ - int nPage = 0; /* Number of pages starting at pg1 to journal */ - int ii; /* Loop counter */ - int needSync = 0; /* True if any page has PGHDR_NEED_SYNC */ - Pgno nPagePerSector = (pPager->sectorSize/pPager->pageSize); - - /* Set the doNotSpill NOSYNC bit to 1. This is because we cannot allow - ** a journal header to be written between the pages journaled by - ** this function. - */ - assert( !MEMDB ); - assert( (pPager->doNotSpill & SPILLFLAG_NOSYNC)==0 ); - pPager->doNotSpill |= SPILLFLAG_NOSYNC; - - /* This trick assumes that both the page-size and sector-size are - ** an integer power of 2. It sets variable pg1 to the identifier - ** of the first page of the sector pPg is located on. - */ - pg1 = ((pPg->pgno-1) & ~(nPagePerSector-1)) + 1; - - nPageCount = pPager->dbSize; - if( pPg->pgno>nPageCount ){ - nPage = (pPg->pgno - pg1)+1; - }else if( (pg1+nPagePerSector-1)>nPageCount ){ - nPage = nPageCount+1-pg1; - }else{ - nPage = nPagePerSector; - } - assert(nPage>0); - assert(pg1<=pPg->pgno); - assert((pg1+nPage)>pPg->pgno); - - for(ii=0; iipgno || !sqlite3BitvecTest(pPager->pInJournal, pg) ){ - if( pg!=PAGER_MJ_PGNO(pPager) ){ - rc = sqlite3PagerGet(pPager, pg, &pPage); - if( rc==SQLITE_OK ){ - rc = pager_write(pPage); - if( pPage->flags&PGHDR_NEED_SYNC ){ - needSync = 1; - } - sqlite3PagerUnrefNotNull(pPage); - } - } - }else if( (pPage = pager_lookup(pPager, pg))!=0 ){ - if( pPage->flags&PGHDR_NEED_SYNC ){ - needSync = 1; - } - sqlite3PagerUnrefNotNull(pPage); - } - } - - /* If the PGHDR_NEED_SYNC flag is set for any of the nPage pages - ** starting at pg1, then it needs to be set for all of them. Because - ** writing to any of these nPage pages may damage the others, the - ** journal file must contain sync()ed copies of all of them - ** before any of them can be written out to the database file. - */ - if( rc==SQLITE_OK && needSync ){ - assert( !MEMDB ); - for(ii=0; iiflags |= PGHDR_NEED_SYNC; - sqlite3PagerUnrefNotNull(pPage); - } - } - } - - assert( (pPager->doNotSpill & SPILLFLAG_NOSYNC)!=0 ); - pPager->doNotSpill &= ~SPILLFLAG_NOSYNC; - }else{ - rc = pager_write(pDbPage); - } - return rc; -} - -/* -** Return TRUE if the page given in the argument was previously passed -** to sqlite3PagerWrite(). In other words, return TRUE if it is ok -** to change the content of the page. -*/ -#ifndef NDEBUG -SQLITE_PRIVATE int sqlite3PagerIswriteable(DbPage *pPg){ - return pPg->flags&PGHDR_DIRTY; -} -#endif - -/* -** A call to this routine tells the pager that it is not necessary to -** write the information on page pPg back to the disk, even though -** that page might be marked as dirty. This happens, for example, when -** the page has been added as a leaf of the freelist and so its -** content no longer matters. -** -** The overlying software layer calls this routine when all of the data -** on the given page is unused. The pager marks the page as clean so -** that it does not get written to disk. -** -** Tests show that this optimization can quadruple the speed of large -** DELETE operations. -*/ -SQLITE_PRIVATE void sqlite3PagerDontWrite(PgHdr *pPg){ - Pager *pPager = pPg->pPager; - if( (pPg->flags&PGHDR_DIRTY) && pPager->nSavepoint==0 ){ - PAGERTRACE(("DONT_WRITE page %d of %d\n", pPg->pgno, PAGERID(pPager))); - IOTRACE(("CLEAN %p %d\n", pPager, pPg->pgno)) - pPg->flags |= PGHDR_DONT_WRITE; - pager_set_pagehash(pPg); - } -} - -/* -** This routine is called to increment the value of the database file -** change-counter, stored as a 4-byte big-endian integer starting at -** byte offset 24 of the pager file. The secondary change counter at -** 92 is also updated, as is the SQLite version number at offset 96. -** -** But this only happens if the pPager->changeCountDone flag is false. -** To avoid excess churning of page 1, the update only happens once. -** See also the pager_write_changecounter() routine that does an -** unconditional update of the change counters. -** -** If the isDirectMode flag is zero, then this is done by calling -** sqlite3PagerWrite() on page 1, then modifying the contents of the -** page data. In this case the file will be updated when the current -** transaction is committed. -** -** The isDirectMode flag may only be non-zero if the library was compiled -** with the SQLITE_ENABLE_ATOMIC_WRITE macro defined. In this case, -** if isDirect is non-zero, then the database file is updated directly -** by writing an updated version of page 1 using a call to the -** sqlite3OsWrite() function. -*/ -static int pager_incr_changecounter(Pager *pPager, int isDirectMode){ - int rc = SQLITE_OK; - - assert( pPager->eState==PAGER_WRITER_CACHEMOD - || pPager->eState==PAGER_WRITER_DBMOD - ); - assert( assert_pager_state(pPager) ); - - /* Declare and initialize constant integer 'isDirect'. If the - ** atomic-write optimization is enabled in this build, then isDirect - ** is initialized to the value passed as the isDirectMode parameter - ** to this function. Otherwise, it is always set to zero. - ** - ** The idea is that if the atomic-write optimization is not - ** enabled at compile time, the compiler can omit the tests of - ** 'isDirect' below, as well as the block enclosed in the - ** "if( isDirect )" condition. - */ -#ifndef SQLITE_ENABLE_ATOMIC_WRITE -# define DIRECT_MODE 0 - assert( isDirectMode==0 ); - UNUSED_PARAMETER(isDirectMode); -#else -# define DIRECT_MODE isDirectMode -#endif - - if( !pPager->changeCountDone && ALWAYS(pPager->dbSize>0) ){ - PgHdr *pPgHdr; /* Reference to page 1 */ - - assert( !pPager->tempFile && isOpen(pPager->fd) ); - - /* Open page 1 of the file for writing. */ - rc = sqlite3PagerGet(pPager, 1, &pPgHdr); - assert( pPgHdr==0 || rc==SQLITE_OK ); - - /* If page one was fetched successfully, and this function is not - ** operating in direct-mode, make page 1 writable. When not in - ** direct mode, page 1 is always held in cache and hence the PagerGet() - ** above is always successful - hence the ALWAYS on rc==SQLITE_OK. - */ - if( !DIRECT_MODE && ALWAYS(rc==SQLITE_OK) ){ - rc = sqlite3PagerWrite(pPgHdr); - } - - if( rc==SQLITE_OK ){ - /* Actually do the update of the change counter */ - pager_write_changecounter(pPgHdr); - - /* If running in direct mode, write the contents of page 1 to the file. */ - if( DIRECT_MODE ){ - const void *zBuf; - assert( pPager->dbFileSize>0 ); - CODEC2(pPager, pPgHdr->pData, 1, 6, rc=SQLITE_NOMEM, zBuf); - if( rc==SQLITE_OK ){ - rc = sqlite3OsWrite(pPager->fd, zBuf, pPager->pageSize, 0); - pPager->aStat[PAGER_STAT_WRITE]++; - } - if( rc==SQLITE_OK ){ - /* Update the pager's copy of the change-counter. Otherwise, the - ** next time a read transaction is opened the cache will be - ** flushed (as the change-counter values will not match). */ - const void *pCopy = (const void *)&((const char *)zBuf)[24]; - memcpy(&pPager->dbFileVers, pCopy, sizeof(pPager->dbFileVers)); - pPager->changeCountDone = 1; - } - }else{ - pPager->changeCountDone = 1; - } - } - - /* Release the page reference. */ - sqlite3PagerUnref(pPgHdr); - } - return rc; -} - -/* -** Sync the database file to disk. This is a no-op for in-memory databases -** or pages with the Pager.noSync flag set. -** -** If successful, or if called on a pager for which it is a no-op, this -** function returns SQLITE_OK. Otherwise, an IO error code is returned. -*/ -SQLITE_PRIVATE int sqlite3PagerSync(Pager *pPager, const char *zMaster){ - int rc = SQLITE_OK; - - if( isOpen(pPager->fd) ){ - void *pArg = (void*)zMaster; - rc = sqlite3OsFileControl(pPager->fd, SQLITE_FCNTL_SYNC, pArg); - if( rc==SQLITE_NOTFOUND ) rc = SQLITE_OK; - } - if( rc==SQLITE_OK && !pPager->noSync ){ - assert( !MEMDB ); - rc = sqlite3OsSync(pPager->fd, pPager->syncFlags); - } - return rc; -} - -/* -** This function may only be called while a write-transaction is active in -** rollback. If the connection is in WAL mode, this call is a no-op. -** Otherwise, if the connection does not already have an EXCLUSIVE lock on -** the database file, an attempt is made to obtain one. -** -** If the EXCLUSIVE lock is already held or the attempt to obtain it is -** successful, or the connection is in WAL mode, SQLITE_OK is returned. -** Otherwise, either SQLITE_BUSY or an SQLITE_IOERR_XXX error code is -** returned. -*/ -SQLITE_PRIVATE int sqlite3PagerExclusiveLock(Pager *pPager){ - int rc = SQLITE_OK; - assert( pPager->eState==PAGER_WRITER_CACHEMOD - || pPager->eState==PAGER_WRITER_DBMOD - || pPager->eState==PAGER_WRITER_LOCKED - ); - assert( assert_pager_state(pPager) ); - if( 0==pagerUseWal(pPager) ){ - rc = pager_wait_on_lock(pPager, EXCLUSIVE_LOCK); - } - return rc; -} - -/* -** Sync the database file for the pager pPager. zMaster points to the name -** of a master journal file that should be written into the individual -** journal file. zMaster may be NULL, which is interpreted as no master -** journal (a single database transaction). -** -** This routine ensures that: -** -** * The database file change-counter is updated, -** * the journal is synced (unless the atomic-write optimization is used), -** * all dirty pages are written to the database file, -** * the database file is truncated (if required), and -** * the database file synced. -** -** The only thing that remains to commit the transaction is to finalize -** (delete, truncate or zero the first part of) the journal file (or -** delete the master journal file if specified). -** -** Note that if zMaster==NULL, this does not overwrite a previous value -** passed to an sqlite3PagerCommitPhaseOne() call. -** -** If the final parameter - noSync - is true, then the database file itself -** is not synced. The caller must call sqlite3PagerSync() directly to -** sync the database file before calling CommitPhaseTwo() to delete the -** journal file in this case. -*/ -SQLITE_PRIVATE int sqlite3PagerCommitPhaseOne( - Pager *pPager, /* Pager object */ - const char *zMaster, /* If not NULL, the master journal name */ - int noSync /* True to omit the xSync on the db file */ -){ - int rc = SQLITE_OK; /* Return code */ - - assert( pPager->eState==PAGER_WRITER_LOCKED - || pPager->eState==PAGER_WRITER_CACHEMOD - || pPager->eState==PAGER_WRITER_DBMOD - || pPager->eState==PAGER_ERROR - ); - assert( assert_pager_state(pPager) ); - - /* If a prior error occurred, report that error again. */ - if( NEVER(pPager->errCode) ) return pPager->errCode; - - PAGERTRACE(("DATABASE SYNC: File=%s zMaster=%s nSize=%d\n", - pPager->zFilename, zMaster, pPager->dbSize)); - - /* If no database changes have been made, return early. */ - if( pPager->eStatepBackup); - }else{ - if( pagerUseWal(pPager) ){ - PgHdr *pList = sqlite3PcacheDirtyList(pPager->pPCache); - PgHdr *pPageOne = 0; - if( pList==0 ){ - /* Must have at least one page for the WAL commit flag. - ** Ticket [2d1a5c67dfc2363e44f29d9bbd57f] 2011-05-18 */ - rc = sqlite3PagerGet(pPager, 1, &pPageOne); - pList = pPageOne; - pList->pDirty = 0; - } - assert( rc==SQLITE_OK ); - if( ALWAYS(pList) ){ - rc = pagerWalFrames(pPager, pList, pPager->dbSize, 1); - } - sqlite3PagerUnref(pPageOne); - if( rc==SQLITE_OK ){ - sqlite3PcacheCleanAll(pPager->pPCache); - } - }else{ - /* The following block updates the change-counter. Exactly how it - ** does this depends on whether or not the atomic-update optimization - ** was enabled at compile time, and if this transaction meets the - ** runtime criteria to use the operation: - ** - ** * The file-system supports the atomic-write property for - ** blocks of size page-size, and - ** * This commit is not part of a multi-file transaction, and - ** * Exactly one page has been modified and store in the journal file. - ** - ** If the optimization was not enabled at compile time, then the - ** pager_incr_changecounter() function is called to update the change - ** counter in 'indirect-mode'. If the optimization is compiled in but - ** is not applicable to this transaction, call sqlite3JournalCreate() - ** to make sure the journal file has actually been created, then call - ** pager_incr_changecounter() to update the change-counter in indirect - ** mode. - ** - ** Otherwise, if the optimization is both enabled and applicable, - ** then call pager_incr_changecounter() to update the change-counter - ** in 'direct' mode. In this case the journal file will never be - ** created for this transaction. - */ - #ifdef SQLITE_ENABLE_ATOMIC_WRITE - PgHdr *pPg; - assert( isOpen(pPager->jfd) - || pPager->journalMode==PAGER_JOURNALMODE_OFF - || pPager->journalMode==PAGER_JOURNALMODE_WAL - ); - if( !zMaster && isOpen(pPager->jfd) - && pPager->journalOff==jrnlBufferSize(pPager) - && pPager->dbSize>=pPager->dbOrigSize - && (0==(pPg = sqlite3PcacheDirtyList(pPager->pPCache)) || 0==pPg->pDirty) - ){ - /* Update the db file change counter via the direct-write method. The - ** following call will modify the in-memory representation of page 1 - ** to include the updated change counter and then write page 1 - ** directly to the database file. Because of the atomic-write - ** property of the host file-system, this is safe. - */ - rc = pager_incr_changecounter(pPager, 1); - }else{ - rc = sqlite3JournalCreate(pPager->jfd); - if( rc==SQLITE_OK ){ - rc = pager_incr_changecounter(pPager, 0); - } - } - #else - rc = pager_incr_changecounter(pPager, 0); - #endif - if( rc!=SQLITE_OK ) goto commit_phase_one_exit; - - /* Write the master journal name into the journal file. If a master - ** journal file name has already been written to the journal file, - ** or if zMaster is NULL (no master journal), then this call is a no-op. - */ - rc = writeMasterJournal(pPager, zMaster); - if( rc!=SQLITE_OK ) goto commit_phase_one_exit; - - /* Sync the journal file and write all dirty pages to the database. - ** If the atomic-update optimization is being used, this sync will not - ** create the journal file or perform any real IO. - ** - ** Because the change-counter page was just modified, unless the - ** atomic-update optimization is used it is almost certain that the - ** journal requires a sync here. However, in locking_mode=exclusive - ** on a system under memory pressure it is just possible that this is - ** not the case. In this case it is likely enough that the redundant - ** xSync() call will be changed to a no-op by the OS anyhow. - */ - rc = syncJournal(pPager, 0); - if( rc!=SQLITE_OK ) goto commit_phase_one_exit; - - rc = pager_write_pagelist(pPager,sqlite3PcacheDirtyList(pPager->pPCache)); - if( rc!=SQLITE_OK ){ - assert( rc!=SQLITE_IOERR_BLOCKED ); - goto commit_phase_one_exit; - } - sqlite3PcacheCleanAll(pPager->pPCache); - - /* If the file on disk is smaller than the database image, use - ** pager_truncate to grow the file here. This can happen if the database - ** image was extended as part of the current transaction and then the - ** last page in the db image moved to the free-list. In this case the - ** last page is never written out to disk, leaving the database file - ** undersized. Fix this now if it is the case. */ - if( pPager->dbSize>pPager->dbFileSize ){ - Pgno nNew = pPager->dbSize - (pPager->dbSize==PAGER_MJ_PGNO(pPager)); - assert( pPager->eState==PAGER_WRITER_DBMOD ); - rc = pager_truncate(pPager, nNew); - if( rc!=SQLITE_OK ) goto commit_phase_one_exit; - } - - /* Finally, sync the database file. */ - if( !noSync ){ - rc = sqlite3PagerSync(pPager, zMaster); - } - IOTRACE(("DBSYNC %p\n", pPager)) - } - } - -commit_phase_one_exit: - if( rc==SQLITE_OK && !pagerUseWal(pPager) ){ - pPager->eState = PAGER_WRITER_FINISHED; - } - return rc; -} - - -/* -** When this function is called, the database file has been completely -** updated to reflect the changes made by the current transaction and -** synced to disk. The journal file still exists in the file-system -** though, and if a failure occurs at this point it will eventually -** be used as a hot-journal and the current transaction rolled back. -** -** This function finalizes the journal file, either by deleting, -** truncating or partially zeroing it, so that it cannot be used -** for hot-journal rollback. Once this is done the transaction is -** irrevocably committed. -** -** If an error occurs, an IO error code is returned and the pager -** moves into the error state. Otherwise, SQLITE_OK is returned. -*/ -SQLITE_PRIVATE int sqlite3PagerCommitPhaseTwo(Pager *pPager){ - int rc = SQLITE_OK; /* Return code */ - - /* This routine should not be called if a prior error has occurred. - ** But if (due to a coding error elsewhere in the system) it does get - ** called, just return the same error code without doing anything. */ - if( NEVER(pPager->errCode) ) return pPager->errCode; - - assert( pPager->eState==PAGER_WRITER_LOCKED - || pPager->eState==PAGER_WRITER_FINISHED - || (pagerUseWal(pPager) && pPager->eState==PAGER_WRITER_CACHEMOD) - ); - assert( assert_pager_state(pPager) ); - - /* An optimization. If the database was not actually modified during - ** this transaction, the pager is running in exclusive-mode and is - ** using persistent journals, then this function is a no-op. - ** - ** The start of the journal file currently contains a single journal - ** header with the nRec field set to 0. If such a journal is used as - ** a hot-journal during hot-journal rollback, 0 changes will be made - ** to the database file. So there is no need to zero the journal - ** header. Since the pager is in exclusive mode, there is no need - ** to drop any locks either. - */ - if( pPager->eState==PAGER_WRITER_LOCKED - && pPager->exclusiveMode - && pPager->journalMode==PAGER_JOURNALMODE_PERSIST - ){ - assert( pPager->journalOff==JOURNAL_HDR_SZ(pPager) || !pPager->journalOff ); - pPager->eState = PAGER_READER; - return SQLITE_OK; - } - - PAGERTRACE(("COMMIT %d\n", PAGERID(pPager))); - rc = pager_end_transaction(pPager, pPager->setMaster, 1); - return pager_error(pPager, rc); -} - -/* -** If a write transaction is open, then all changes made within the -** transaction are reverted and the current write-transaction is closed. -** The pager falls back to PAGER_READER state if successful, or PAGER_ERROR -** state if an error occurs. -** -** If the pager is already in PAGER_ERROR state when this function is called, -** it returns Pager.errCode immediately. No work is performed in this case. -** -** Otherwise, in rollback mode, this function performs two functions: -** -** 1) It rolls back the journal file, restoring all database file and -** in-memory cache pages to the state they were in when the transaction -** was opened, and -** -** 2) It finalizes the journal file, so that it is not used for hot -** rollback at any point in the future. -** -** Finalization of the journal file (task 2) is only performed if the -** rollback is successful. -** -** In WAL mode, all cache-entries containing data modified within the -** current transaction are either expelled from the cache or reverted to -** their pre-transaction state by re-reading data from the database or -** WAL files. The WAL transaction is then closed. -*/ -SQLITE_PRIVATE int sqlite3PagerRollback(Pager *pPager){ - int rc = SQLITE_OK; /* Return code */ - PAGERTRACE(("ROLLBACK %d\n", PAGERID(pPager))); - - /* PagerRollback() is a no-op if called in READER or OPEN state. If - ** the pager is already in the ERROR state, the rollback is not - ** attempted here. Instead, the error code is returned to the caller. - */ - assert( assert_pager_state(pPager) ); - if( pPager->eState==PAGER_ERROR ) return pPager->errCode; - if( pPager->eState<=PAGER_READER ) return SQLITE_OK; - - if( pagerUseWal(pPager) ){ - int rc2; - rc = sqlite3PagerSavepoint(pPager, SAVEPOINT_ROLLBACK, -1); - rc2 = pager_end_transaction(pPager, pPager->setMaster, 0); - if( rc==SQLITE_OK ) rc = rc2; - }else if( !isOpen(pPager->jfd) || pPager->eState==PAGER_WRITER_LOCKED ){ - int eState = pPager->eState; - rc = pager_end_transaction(pPager, 0, 0); - if( !MEMDB && eState>PAGER_WRITER_LOCKED ){ - /* This can happen using journal_mode=off. Move the pager to the error - ** state to indicate that the contents of the cache may not be trusted. - ** Any active readers will get SQLITE_ABORT. - */ - pPager->errCode = SQLITE_ABORT; - pPager->eState = PAGER_ERROR; - return rc; - } - }else{ - rc = pager_playback(pPager, 0); - } - - assert( pPager->eState==PAGER_READER || rc!=SQLITE_OK ); - assert( rc==SQLITE_OK || rc==SQLITE_FULL || rc==SQLITE_CORRUPT - || rc==SQLITE_NOMEM || (rc&0xFF)==SQLITE_IOERR - || rc==SQLITE_CANTOPEN - ); - - /* If an error occurs during a ROLLBACK, we can no longer trust the pager - ** cache. So call pager_error() on the way out to make any error persistent. - */ - return pager_error(pPager, rc); -} - -/* -** Return TRUE if the database file is opened read-only. Return FALSE -** if the database is (in theory) writable. -*/ -SQLITE_PRIVATE u8 sqlite3PagerIsreadonly(Pager *pPager){ - return pPager->readOnly; -} - -/* -** Return the number of references to the pager. -*/ -SQLITE_PRIVATE int sqlite3PagerRefcount(Pager *pPager){ - return sqlite3PcacheRefCount(pPager->pPCache); -} - -/* -** Return the approximate number of bytes of memory currently -** used by the pager and its associated cache. -*/ -SQLITE_PRIVATE int sqlite3PagerMemUsed(Pager *pPager){ - int perPageSize = pPager->pageSize + pPager->nExtra + sizeof(PgHdr) - + 5*sizeof(void*); - return perPageSize*sqlite3PcachePagecount(pPager->pPCache) - + sqlite3MallocSize(pPager) - + pPager->pageSize; -} - -/* -** Return the number of references to the specified page. -*/ -SQLITE_PRIVATE int sqlite3PagerPageRefcount(DbPage *pPage){ - return sqlite3PcachePageRefcount(pPage); -} - -#ifdef SQLITE_TEST -/* -** This routine is used for testing and analysis only. -*/ -SQLITE_PRIVATE int *sqlite3PagerStats(Pager *pPager){ - static int a[11]; - a[0] = sqlite3PcacheRefCount(pPager->pPCache); - a[1] = sqlite3PcachePagecount(pPager->pPCache); - a[2] = sqlite3PcacheGetCachesize(pPager->pPCache); - a[3] = pPager->eState==PAGER_OPEN ? -1 : (int) pPager->dbSize; - a[4] = pPager->eState; - a[5] = pPager->errCode; - a[6] = pPager->aStat[PAGER_STAT_HIT]; - a[7] = pPager->aStat[PAGER_STAT_MISS]; - a[8] = 0; /* Used to be pPager->nOvfl */ - a[9] = pPager->nRead; - a[10] = pPager->aStat[PAGER_STAT_WRITE]; - return a; -} -#endif - -/* -** Parameter eStat must be either SQLITE_DBSTATUS_CACHE_HIT or -** SQLITE_DBSTATUS_CACHE_MISS. Before returning, *pnVal is incremented by the -** current cache hit or miss count, according to the value of eStat. If the -** reset parameter is non-zero, the cache hit or miss count is zeroed before -** returning. -*/ -SQLITE_PRIVATE void sqlite3PagerCacheStat(Pager *pPager, int eStat, int reset, int *pnVal){ - - assert( eStat==SQLITE_DBSTATUS_CACHE_HIT - || eStat==SQLITE_DBSTATUS_CACHE_MISS - || eStat==SQLITE_DBSTATUS_CACHE_WRITE - ); - - assert( SQLITE_DBSTATUS_CACHE_HIT+1==SQLITE_DBSTATUS_CACHE_MISS ); - assert( SQLITE_DBSTATUS_CACHE_HIT+2==SQLITE_DBSTATUS_CACHE_WRITE ); - assert( PAGER_STAT_HIT==0 && PAGER_STAT_MISS==1 && PAGER_STAT_WRITE==2 ); - - *pnVal += pPager->aStat[eStat - SQLITE_DBSTATUS_CACHE_HIT]; - if( reset ){ - pPager->aStat[eStat - SQLITE_DBSTATUS_CACHE_HIT] = 0; - } -} - -/* -** Return true if this is an in-memory pager. -*/ -SQLITE_PRIVATE int sqlite3PagerIsMemdb(Pager *pPager){ - return MEMDB; -} - -/* -** Check that there are at least nSavepoint savepoints open. If there are -** currently less than nSavepoints open, then open one or more savepoints -** to make up the difference. If the number of savepoints is already -** equal to nSavepoint, then this function is a no-op. -** -** If a memory allocation fails, SQLITE_NOMEM is returned. If an error -** occurs while opening the sub-journal file, then an IO error code is -** returned. Otherwise, SQLITE_OK. -*/ -SQLITE_PRIVATE int sqlite3PagerOpenSavepoint(Pager *pPager, int nSavepoint){ - int rc = SQLITE_OK; /* Return code */ - int nCurrent = pPager->nSavepoint; /* Current number of savepoints */ - - assert( pPager->eState>=PAGER_WRITER_LOCKED ); - assert( assert_pager_state(pPager) ); - - if( nSavepoint>nCurrent && pPager->useJournal ){ - int ii; /* Iterator variable */ - PagerSavepoint *aNew; /* New Pager.aSavepoint array */ - - /* Grow the Pager.aSavepoint array using realloc(). Return SQLITE_NOMEM - ** if the allocation fails. Otherwise, zero the new portion in case a - ** malloc failure occurs while populating it in the for(...) loop below. - */ - aNew = (PagerSavepoint *)sqlite3Realloc( - pPager->aSavepoint, sizeof(PagerSavepoint)*nSavepoint - ); - if( !aNew ){ - return SQLITE_NOMEM; - } - memset(&aNew[nCurrent], 0, (nSavepoint-nCurrent) * sizeof(PagerSavepoint)); - pPager->aSavepoint = aNew; - - /* Populate the PagerSavepoint structures just allocated. */ - for(ii=nCurrent; iidbSize; - if( isOpen(pPager->jfd) && pPager->journalOff>0 ){ - aNew[ii].iOffset = pPager->journalOff; - }else{ - aNew[ii].iOffset = JOURNAL_HDR_SZ(pPager); - } - aNew[ii].iSubRec = pPager->nSubRec; - aNew[ii].pInSavepoint = sqlite3BitvecCreate(pPager->dbSize); - if( !aNew[ii].pInSavepoint ){ - return SQLITE_NOMEM; - } - if( pagerUseWal(pPager) ){ - sqlite3WalSavepoint(pPager->pWal, aNew[ii].aWalData); - } - pPager->nSavepoint = ii+1; - } - assert( pPager->nSavepoint==nSavepoint ); - assertTruncateConstraint(pPager); - } - - return rc; -} - -/* -** This function is called to rollback or release (commit) a savepoint. -** The savepoint to release or rollback need not be the most recently -** created savepoint. -** -** Parameter op is always either SAVEPOINT_ROLLBACK or SAVEPOINT_RELEASE. -** If it is SAVEPOINT_RELEASE, then release and destroy the savepoint with -** index iSavepoint. If it is SAVEPOINT_ROLLBACK, then rollback all changes -** that have occurred since the specified savepoint was created. -** -** The savepoint to rollback or release is identified by parameter -** iSavepoint. A value of 0 means to operate on the outermost savepoint -** (the first created). A value of (Pager.nSavepoint-1) means operate -** on the most recently created savepoint. If iSavepoint is greater than -** (Pager.nSavepoint-1), then this function is a no-op. -** -** If a negative value is passed to this function, then the current -** transaction is rolled back. This is different to calling -** sqlite3PagerRollback() because this function does not terminate -** the transaction or unlock the database, it just restores the -** contents of the database to its original state. -** -** In any case, all savepoints with an index greater than iSavepoint -** are destroyed. If this is a release operation (op==SAVEPOINT_RELEASE), -** then savepoint iSavepoint is also destroyed. -** -** This function may return SQLITE_NOMEM if a memory allocation fails, -** or an IO error code if an IO error occurs while rolling back a -** savepoint. If no errors occur, SQLITE_OK is returned. -*/ -SQLITE_PRIVATE int sqlite3PagerSavepoint(Pager *pPager, int op, int iSavepoint){ - int rc = pPager->errCode; /* Return code */ - - assert( op==SAVEPOINT_RELEASE || op==SAVEPOINT_ROLLBACK ); - assert( iSavepoint>=0 || op==SAVEPOINT_ROLLBACK ); - - if( rc==SQLITE_OK && iSavepointnSavepoint ){ - int ii; /* Iterator variable */ - int nNew; /* Number of remaining savepoints after this op. */ - - /* Figure out how many savepoints will still be active after this - ** operation. Store this value in nNew. Then free resources associated - ** with any savepoints that are destroyed by this operation. - */ - nNew = iSavepoint + (( op==SAVEPOINT_RELEASE ) ? 0 : 1); - for(ii=nNew; iinSavepoint; ii++){ - sqlite3BitvecDestroy(pPager->aSavepoint[ii].pInSavepoint); - } - pPager->nSavepoint = nNew; - - /* If this is a release of the outermost savepoint, truncate - ** the sub-journal to zero bytes in size. */ - if( op==SAVEPOINT_RELEASE ){ - if( nNew==0 && isOpen(pPager->sjfd) ){ - /* Only truncate if it is an in-memory sub-journal. */ - if( sqlite3IsMemJournal(pPager->sjfd) ){ - rc = sqlite3OsTruncate(pPager->sjfd, 0); - assert( rc==SQLITE_OK ); - } - pPager->nSubRec = 0; - } - } - /* Else this is a rollback operation, playback the specified savepoint. - ** If this is a temp-file, it is possible that the journal file has - ** not yet been opened. In this case there have been no changes to - ** the database file, so the playback operation can be skipped. - */ - else if( pagerUseWal(pPager) || isOpen(pPager->jfd) ){ - PagerSavepoint *pSavepoint = (nNew==0)?0:&pPager->aSavepoint[nNew-1]; - rc = pagerPlaybackSavepoint(pPager, pSavepoint); - assert(rc!=SQLITE_DONE); - } - } - - return rc; -} - -/* -** Return the full pathname of the database file. -** -** Except, if the pager is in-memory only, then return an empty string if -** nullIfMemDb is true. This routine is called with nullIfMemDb==1 when -** used to report the filename to the user, for compatibility with legacy -** behavior. But when the Btree needs to know the filename for matching to -** shared cache, it uses nullIfMemDb==0 so that in-memory databases can -** participate in shared-cache. -*/ -SQLITE_PRIVATE const char *sqlite3PagerFilename(Pager *pPager, int nullIfMemDb){ - return (nullIfMemDb && pPager->memDb) ? "" : pPager->zFilename; -} - -/* -** Return the VFS structure for the pager. -*/ -SQLITE_PRIVATE const sqlite3_vfs *sqlite3PagerVfs(Pager *pPager){ - return pPager->pVfs; -} - -/* -** Return the file handle for the database file associated -** with the pager. This might return NULL if the file has -** not yet been opened. -*/ -SQLITE_PRIVATE sqlite3_file *sqlite3PagerFile(Pager *pPager){ - return pPager->fd; -} - -/* -** Return the full pathname of the journal file. -*/ -SQLITE_PRIVATE const char *sqlite3PagerJournalname(Pager *pPager){ - return pPager->zJournal; -} - -/* -** Return true if fsync() calls are disabled for this pager. Return FALSE -** if fsync()s are executed normally. -*/ -SQLITE_PRIVATE int sqlite3PagerNosync(Pager *pPager){ - return pPager->noSync; -} - -#ifdef SQLITE_HAS_CODEC -/* -** Set or retrieve the codec for this pager -*/ -SQLITE_PRIVATE void sqlite3PagerSetCodec( - Pager *pPager, - void *(*xCodec)(void*,void*,Pgno,int), - void (*xCodecSizeChng)(void*,int,int), - void (*xCodecFree)(void*), - void *pCodec -){ - if( pPager->xCodecFree ) pPager->xCodecFree(pPager->pCodec); - pPager->xCodec = pPager->memDb ? 0 : xCodec; - pPager->xCodecSizeChng = xCodecSizeChng; - pPager->xCodecFree = xCodecFree; - pPager->pCodec = pCodec; - pagerReportSize(pPager); -} -SQLITE_PRIVATE void *sqlite3PagerGetCodec(Pager *pPager){ - return pPager->pCodec; -} - -/* -** This function is called by the wal module when writing page content -** into the log file. -** -** This function returns a pointer to a buffer containing the encrypted -** page content. If a malloc fails, this function may return NULL. -*/ -SQLITE_PRIVATE void *sqlite3PagerCodec(PgHdr *pPg){ - void *aData = 0; - CODEC2(pPg->pPager, pPg->pData, pPg->pgno, 6, return 0, aData); - return aData; -} - -/* -** Return the current pager state -*/ -SQLITE_PRIVATE int sqlite3PagerState(Pager *pPager){ - return pPager->eState; -} -#endif /* SQLITE_HAS_CODEC */ - -#ifndef SQLITE_OMIT_AUTOVACUUM -/* -** Move the page pPg to location pgno in the file. -** -** There must be no references to the page previously located at -** pgno (which we call pPgOld) though that page is allowed to be -** in cache. If the page previously located at pgno is not already -** in the rollback journal, it is not put there by by this routine. -** -** References to the page pPg remain valid. Updating any -** meta-data associated with pPg (i.e. data stored in the nExtra bytes -** allocated along with the page) is the responsibility of the caller. -** -** A transaction must be active when this routine is called. It used to be -** required that a statement transaction was not active, but this restriction -** has been removed (CREATE INDEX needs to move a page when a statement -** transaction is active). -** -** If the fourth argument, isCommit, is non-zero, then this page is being -** moved as part of a database reorganization just before the transaction -** is being committed. In this case, it is guaranteed that the database page -** pPg refers to will not be written to again within this transaction. -** -** This function may return SQLITE_NOMEM or an IO error code if an error -** occurs. Otherwise, it returns SQLITE_OK. -*/ -SQLITE_PRIVATE int sqlite3PagerMovepage(Pager *pPager, DbPage *pPg, Pgno pgno, int isCommit){ - PgHdr *pPgOld; /* The page being overwritten. */ - Pgno needSyncPgno = 0; /* Old value of pPg->pgno, if sync is required */ - int rc; /* Return code */ - Pgno origPgno; /* The original page number */ - - assert( pPg->nRef>0 ); - assert( pPager->eState==PAGER_WRITER_CACHEMOD - || pPager->eState==PAGER_WRITER_DBMOD - ); - assert( assert_pager_state(pPager) ); - - /* In order to be able to rollback, an in-memory database must journal - ** the page we are moving from. - */ - if( MEMDB ){ - rc = sqlite3PagerWrite(pPg); - if( rc ) return rc; - } - - /* If the page being moved is dirty and has not been saved by the latest - ** savepoint, then save the current contents of the page into the - ** sub-journal now. This is required to handle the following scenario: - ** - ** BEGIN; - ** - ** SAVEPOINT one; - ** - ** ROLLBACK TO one; - ** - ** If page X were not written to the sub-journal here, it would not - ** be possible to restore its contents when the "ROLLBACK TO one" - ** statement were is processed. - ** - ** subjournalPage() may need to allocate space to store pPg->pgno into - ** one or more savepoint bitvecs. This is the reason this function - ** may return SQLITE_NOMEM. - */ - if( pPg->flags&PGHDR_DIRTY - && subjRequiresPage(pPg) - && SQLITE_OK!=(rc = subjournalPage(pPg)) - ){ - return rc; - } - - PAGERTRACE(("MOVE %d page %d (needSync=%d) moves to %d\n", - PAGERID(pPager), pPg->pgno, (pPg->flags&PGHDR_NEED_SYNC)?1:0, pgno)); - IOTRACE(("MOVE %p %d %d\n", pPager, pPg->pgno, pgno)) - - /* If the journal needs to be sync()ed before page pPg->pgno can - ** be written to, store pPg->pgno in local variable needSyncPgno. - ** - ** If the isCommit flag is set, there is no need to remember that - ** the journal needs to be sync()ed before database page pPg->pgno - ** can be written to. The caller has already promised not to write to it. - */ - if( (pPg->flags&PGHDR_NEED_SYNC) && !isCommit ){ - needSyncPgno = pPg->pgno; - assert( pPager->journalMode==PAGER_JOURNALMODE_OFF || - pageInJournal(pPager, pPg) || pPg->pgno>pPager->dbOrigSize ); - assert( pPg->flags&PGHDR_DIRTY ); - } - - /* If the cache contains a page with page-number pgno, remove it - ** from its hash chain. Also, if the PGHDR_NEED_SYNC flag was set for - ** page pgno before the 'move' operation, it needs to be retained - ** for the page moved there. - */ - pPg->flags &= ~PGHDR_NEED_SYNC; - pPgOld = pager_lookup(pPager, pgno); - assert( !pPgOld || pPgOld->nRef==1 ); - if( pPgOld ){ - pPg->flags |= (pPgOld->flags&PGHDR_NEED_SYNC); - if( MEMDB ){ - /* Do not discard pages from an in-memory database since we might - ** need to rollback later. Just move the page out of the way. */ - sqlite3PcacheMove(pPgOld, pPager->dbSize+1); - }else{ - sqlite3PcacheDrop(pPgOld); - } - } - - origPgno = pPg->pgno; - sqlite3PcacheMove(pPg, pgno); - sqlite3PcacheMakeDirty(pPg); - - /* For an in-memory database, make sure the original page continues - ** to exist, in case the transaction needs to roll back. Use pPgOld - ** as the original page since it has already been allocated. - */ - if( MEMDB ){ - assert( pPgOld ); - sqlite3PcacheMove(pPgOld, origPgno); - sqlite3PagerUnrefNotNull(pPgOld); - } - - if( needSyncPgno ){ - /* If needSyncPgno is non-zero, then the journal file needs to be - ** sync()ed before any data is written to database file page needSyncPgno. - ** Currently, no such page exists in the page-cache and the - ** "is journaled" bitvec flag has been set. This needs to be remedied by - ** loading the page into the pager-cache and setting the PGHDR_NEED_SYNC - ** flag. - ** - ** If the attempt to load the page into the page-cache fails, (due - ** to a malloc() or IO failure), clear the bit in the pInJournal[] - ** array. Otherwise, if the page is loaded and written again in - ** this transaction, it may be written to the database file before - ** it is synced into the journal file. This way, it may end up in - ** the journal file twice, but that is not a problem. - */ - PgHdr *pPgHdr; - rc = sqlite3PagerGet(pPager, needSyncPgno, &pPgHdr); - if( rc!=SQLITE_OK ){ - if( needSyncPgno<=pPager->dbOrigSize ){ - assert( pPager->pTmpSpace!=0 ); - sqlite3BitvecClear(pPager->pInJournal, needSyncPgno, pPager->pTmpSpace); - } - return rc; - } - pPgHdr->flags |= PGHDR_NEED_SYNC; - sqlite3PcacheMakeDirty(pPgHdr); - sqlite3PagerUnrefNotNull(pPgHdr); - } - - return SQLITE_OK; -} -#endif - -/* -** Return a pointer to the data for the specified page. -*/ -SQLITE_PRIVATE void *sqlite3PagerGetData(DbPage *pPg){ - assert( pPg->nRef>0 || pPg->pPager->memDb ); - return pPg->pData; -} - -/* -** Return a pointer to the Pager.nExtra bytes of "extra" space -** allocated along with the specified page. -*/ -SQLITE_PRIVATE void *sqlite3PagerGetExtra(DbPage *pPg){ - return pPg->pExtra; -} - -/* -** Get/set the locking-mode for this pager. Parameter eMode must be one -** of PAGER_LOCKINGMODE_QUERY, PAGER_LOCKINGMODE_NORMAL or -** PAGER_LOCKINGMODE_EXCLUSIVE. If the parameter is not _QUERY, then -** the locking-mode is set to the value specified. -** -** The returned value is either PAGER_LOCKINGMODE_NORMAL or -** PAGER_LOCKINGMODE_EXCLUSIVE, indicating the current (possibly updated) -** locking-mode. -*/ -SQLITE_PRIVATE int sqlite3PagerLockingMode(Pager *pPager, int eMode){ - assert( eMode==PAGER_LOCKINGMODE_QUERY - || eMode==PAGER_LOCKINGMODE_NORMAL - || eMode==PAGER_LOCKINGMODE_EXCLUSIVE ); - assert( PAGER_LOCKINGMODE_QUERY<0 ); - assert( PAGER_LOCKINGMODE_NORMAL>=0 && PAGER_LOCKINGMODE_EXCLUSIVE>=0 ); - assert( pPager->exclusiveMode || 0==sqlite3WalHeapMemory(pPager->pWal) ); - if( eMode>=0 && !pPager->tempFile && !sqlite3WalHeapMemory(pPager->pWal) ){ - pPager->exclusiveMode = (u8)eMode; - } - return (int)pPager->exclusiveMode; -} - -/* -** Set the journal-mode for this pager. Parameter eMode must be one of: -** -** PAGER_JOURNALMODE_DELETE -** PAGER_JOURNALMODE_TRUNCATE -** PAGER_JOURNALMODE_PERSIST -** PAGER_JOURNALMODE_OFF -** PAGER_JOURNALMODE_MEMORY -** PAGER_JOURNALMODE_WAL -** -** The journalmode is set to the value specified if the change is allowed. -** The change may be disallowed for the following reasons: -** -** * An in-memory database can only have its journal_mode set to _OFF -** or _MEMORY. -** -** * Temporary databases cannot have _WAL journalmode. -** -** The returned indicate the current (possibly updated) journal-mode. -*/ -SQLITE_PRIVATE int sqlite3PagerSetJournalMode(Pager *pPager, int eMode){ - u8 eOld = pPager->journalMode; /* Prior journalmode */ - -#ifdef SQLITE_DEBUG - /* The print_pager_state() routine is intended to be used by the debugger - ** only. We invoke it once here to suppress a compiler warning. */ - print_pager_state(pPager); -#endif - - - /* The eMode parameter is always valid */ - assert( eMode==PAGER_JOURNALMODE_DELETE - || eMode==PAGER_JOURNALMODE_TRUNCATE - || eMode==PAGER_JOURNALMODE_PERSIST - || eMode==PAGER_JOURNALMODE_OFF - || eMode==PAGER_JOURNALMODE_WAL - || eMode==PAGER_JOURNALMODE_MEMORY ); - - /* This routine is only called from the OP_JournalMode opcode, and - ** the logic there will never allow a temporary file to be changed - ** to WAL mode. - */ - assert( pPager->tempFile==0 || eMode!=PAGER_JOURNALMODE_WAL ); - - /* Do allow the journalmode of an in-memory database to be set to - ** anything other than MEMORY or OFF - */ - if( MEMDB ){ - assert( eOld==PAGER_JOURNALMODE_MEMORY || eOld==PAGER_JOURNALMODE_OFF ); - if( eMode!=PAGER_JOURNALMODE_MEMORY && eMode!=PAGER_JOURNALMODE_OFF ){ - eMode = eOld; - } - } - - if( eMode!=eOld ){ - - /* Change the journal mode. */ - assert( pPager->eState!=PAGER_ERROR ); - pPager->journalMode = (u8)eMode; - - /* When transistioning from TRUNCATE or PERSIST to any other journal - ** mode except WAL, unless the pager is in locking_mode=exclusive mode, - ** delete the journal file. - */ - assert( (PAGER_JOURNALMODE_TRUNCATE & 5)==1 ); - assert( (PAGER_JOURNALMODE_PERSIST & 5)==1 ); - assert( (PAGER_JOURNALMODE_DELETE & 5)==0 ); - assert( (PAGER_JOURNALMODE_MEMORY & 5)==4 ); - assert( (PAGER_JOURNALMODE_OFF & 5)==0 ); - assert( (PAGER_JOURNALMODE_WAL & 5)==5 ); - - assert( isOpen(pPager->fd) || pPager->exclusiveMode ); - if( !pPager->exclusiveMode && (eOld & 5)==1 && (eMode & 1)==0 ){ - - /* In this case we would like to delete the journal file. If it is - ** not possible, then that is not a problem. Deleting the journal file - ** here is an optimization only. - ** - ** Before deleting the journal file, obtain a RESERVED lock on the - ** database file. This ensures that the journal file is not deleted - ** while it is in use by some other client. - */ - sqlite3OsClose(pPager->jfd); - if( pPager->eLock>=RESERVED_LOCK ){ - sqlite3OsDelete(pPager->pVfs, pPager->zJournal, 0); - }else{ - int rc = SQLITE_OK; - int state = pPager->eState; - assert( state==PAGER_OPEN || state==PAGER_READER ); - if( state==PAGER_OPEN ){ - rc = sqlite3PagerSharedLock(pPager); - } - if( pPager->eState==PAGER_READER ){ - assert( rc==SQLITE_OK ); - rc = pagerLockDb(pPager, RESERVED_LOCK); - } - if( rc==SQLITE_OK ){ - sqlite3OsDelete(pPager->pVfs, pPager->zJournal, 0); - } - if( rc==SQLITE_OK && state==PAGER_READER ){ - pagerUnlockDb(pPager, SHARED_LOCK); - }else if( state==PAGER_OPEN ){ - pager_unlock(pPager); - } - assert( state==pPager->eState ); - } - } - } - - /* Return the new journal mode */ - return (int)pPager->journalMode; -} - -/* -** Return the current journal mode. -*/ -SQLITE_PRIVATE int sqlite3PagerGetJournalMode(Pager *pPager){ - return (int)pPager->journalMode; -} - -/* -** Return TRUE if the pager is in a state where it is OK to change the -** journalmode. Journalmode changes can only happen when the database -** is unmodified. -*/ -SQLITE_PRIVATE int sqlite3PagerOkToChangeJournalMode(Pager *pPager){ - assert( assert_pager_state(pPager) ); - if( pPager->eState>=PAGER_WRITER_CACHEMOD ) return 0; - if( NEVER(isOpen(pPager->jfd) && pPager->journalOff>0) ) return 0; - return 1; -} - -/* -** Get/set the size-limit used for persistent journal files. -** -** Setting the size limit to -1 means no limit is enforced. -** An attempt to set a limit smaller than -1 is a no-op. -*/ -SQLITE_PRIVATE i64 sqlite3PagerJournalSizeLimit(Pager *pPager, i64 iLimit){ - if( iLimit>=-1 ){ - pPager->journalSizeLimit = iLimit; - sqlite3WalLimit(pPager->pWal, iLimit); - } - return pPager->journalSizeLimit; -} - -/* -** Return a pointer to the pPager->pBackup variable. The backup module -** in backup.c maintains the content of this variable. This module -** uses it opaquely as an argument to sqlite3BackupRestart() and -** sqlite3BackupUpdate() only. -*/ -SQLITE_PRIVATE sqlite3_backup **sqlite3PagerBackupPtr(Pager *pPager){ - return &pPager->pBackup; -} - -#ifndef SQLITE_OMIT_VACUUM -/* -** Unless this is an in-memory or temporary database, clear the pager cache. -*/ -SQLITE_PRIVATE void sqlite3PagerClearCache(Pager *pPager){ - if( !MEMDB && pPager->tempFile==0 ) pager_reset(pPager); -} -#endif - -#ifndef SQLITE_OMIT_WAL -/* -** This function is called when the user invokes "PRAGMA wal_checkpoint", -** "PRAGMA wal_blocking_checkpoint" or calls the sqlite3_wal_checkpoint() -** or wal_blocking_checkpoint() API functions. -** -** Parameter eMode is one of SQLITE_CHECKPOINT_PASSIVE, FULL or RESTART. -*/ -SQLITE_PRIVATE int sqlite3PagerCheckpoint(Pager *pPager, int eMode, int *pnLog, int *pnCkpt){ - int rc = SQLITE_OK; - if( pPager->pWal ){ - rc = sqlite3WalCheckpoint(pPager->pWal, eMode, - pPager->xBusyHandler, pPager->pBusyHandlerArg, - pPager->ckptSyncFlags, pPager->pageSize, (u8 *)pPager->pTmpSpace, - pnLog, pnCkpt - ); - } - return rc; -} - -SQLITE_PRIVATE int sqlite3PagerWalCallback(Pager *pPager){ - return sqlite3WalCallback(pPager->pWal); -} - -/* -** Return true if the underlying VFS for the given pager supports the -** primitives necessary for write-ahead logging. -*/ -SQLITE_PRIVATE int sqlite3PagerWalSupported(Pager *pPager){ - const sqlite3_io_methods *pMethods = pPager->fd->pMethods; - return pPager->exclusiveMode || (pMethods->iVersion>=2 && pMethods->xShmMap); -} - -/* -** Attempt to take an exclusive lock on the database file. If a PENDING lock -** is obtained instead, immediately release it. -*/ -static int pagerExclusiveLock(Pager *pPager){ - int rc; /* Return code */ - - assert( pPager->eLock==SHARED_LOCK || pPager->eLock==EXCLUSIVE_LOCK ); - rc = pagerLockDb(pPager, EXCLUSIVE_LOCK); - if( rc!=SQLITE_OK ){ - /* If the attempt to grab the exclusive lock failed, release the - ** pending lock that may have been obtained instead. */ - pagerUnlockDb(pPager, SHARED_LOCK); - } - - return rc; -} - -/* -** Call sqlite3WalOpen() to open the WAL handle. If the pager is in -** exclusive-locking mode when this function is called, take an EXCLUSIVE -** lock on the database file and use heap-memory to store the wal-index -** in. Otherwise, use the normal shared-memory. -*/ -static int pagerOpenWal(Pager *pPager){ - int rc = SQLITE_OK; - - assert( pPager->pWal==0 && pPager->tempFile==0 ); - assert( pPager->eLock==SHARED_LOCK || pPager->eLock==EXCLUSIVE_LOCK ); - - /* If the pager is already in exclusive-mode, the WAL module will use - ** heap-memory for the wal-index instead of the VFS shared-memory - ** implementation. Take the exclusive lock now, before opening the WAL - ** file, to make sure this is safe. - */ - if( pPager->exclusiveMode ){ - rc = pagerExclusiveLock(pPager); - } - - /* Open the connection to the log file. If this operation fails, - ** (e.g. due to malloc() failure), return an error code. - */ - if( rc==SQLITE_OK ){ - rc = sqlite3WalOpen(pPager->pVfs, - pPager->fd, pPager->zWal, pPager->exclusiveMode, - pPager->journalSizeLimit, &pPager->pWal - ); - } - pagerFixMaplimit(pPager); - - return rc; -} - - -/* -** The caller must be holding a SHARED lock on the database file to call -** this function. -** -** If the pager passed as the first argument is open on a real database -** file (not a temp file or an in-memory database), and the WAL file -** is not already open, make an attempt to open it now. If successful, -** return SQLITE_OK. If an error occurs or the VFS used by the pager does -** not support the xShmXXX() methods, return an error code. *pbOpen is -** not modified in either case. -** -** If the pager is open on a temp-file (or in-memory database), or if -** the WAL file is already open, set *pbOpen to 1 and return SQLITE_OK -** without doing anything. -*/ -SQLITE_PRIVATE int sqlite3PagerOpenWal( - Pager *pPager, /* Pager object */ - int *pbOpen /* OUT: Set to true if call is a no-op */ -){ - int rc = SQLITE_OK; /* Return code */ - - assert( assert_pager_state(pPager) ); - assert( pPager->eState==PAGER_OPEN || pbOpen ); - assert( pPager->eState==PAGER_READER || !pbOpen ); - assert( pbOpen==0 || *pbOpen==0 ); - assert( pbOpen!=0 || (!pPager->tempFile && !pPager->pWal) ); - - if( !pPager->tempFile && !pPager->pWal ){ - if( !sqlite3PagerWalSupported(pPager) ) return SQLITE_CANTOPEN; - - /* Close any rollback journal previously open */ - sqlite3OsClose(pPager->jfd); - - rc = pagerOpenWal(pPager); - if( rc==SQLITE_OK ){ - pPager->journalMode = PAGER_JOURNALMODE_WAL; - pPager->eState = PAGER_OPEN; - } - }else{ - *pbOpen = 1; - } - - return rc; -} - -/* -** This function is called to close the connection to the log file prior -** to switching from WAL to rollback mode. -** -** Before closing the log file, this function attempts to take an -** EXCLUSIVE lock on the database file. If this cannot be obtained, an -** error (SQLITE_BUSY) is returned and the log connection is not closed. -** If successful, the EXCLUSIVE lock is not released before returning. -*/ -SQLITE_PRIVATE int sqlite3PagerCloseWal(Pager *pPager){ - int rc = SQLITE_OK; - - assert( pPager->journalMode==PAGER_JOURNALMODE_WAL ); - - /* If the log file is not already open, but does exist in the file-system, - ** it may need to be checkpointed before the connection can switch to - ** rollback mode. Open it now so this can happen. - */ - if( !pPager->pWal ){ - int logexists = 0; - rc = pagerLockDb(pPager, SHARED_LOCK); - if( rc==SQLITE_OK ){ - rc = sqlite3OsAccess( - pPager->pVfs, pPager->zWal, SQLITE_ACCESS_EXISTS, &logexists - ); - } - if( rc==SQLITE_OK && logexists ){ - rc = pagerOpenWal(pPager); - } - } - - /* Checkpoint and close the log. Because an EXCLUSIVE lock is held on - ** the database file, the log and log-summary files will be deleted. - */ - if( rc==SQLITE_OK && pPager->pWal ){ - rc = pagerExclusiveLock(pPager); - if( rc==SQLITE_OK ){ - rc = sqlite3WalClose(pPager->pWal, pPager->ckptSyncFlags, - pPager->pageSize, (u8*)pPager->pTmpSpace); - pPager->pWal = 0; - pagerFixMaplimit(pPager); - } - } - return rc; -} - -#endif /* !SQLITE_OMIT_WAL */ - -#ifdef SQLITE_ENABLE_ZIPVFS -/* -** A read-lock must be held on the pager when this function is called. If -** the pager is in WAL mode and the WAL file currently contains one or more -** frames, return the size in bytes of the page images stored within the -** WAL frames. Otherwise, if this is not a WAL database or the WAL file -** is empty, return 0. -*/ -SQLITE_PRIVATE int sqlite3PagerWalFramesize(Pager *pPager){ - assert( pPager->eState==PAGER_READER ); - return sqlite3WalFramesize(pPager->pWal); -} -#endif - -#endif /* SQLITE_OMIT_DISKIO */ - -/************** End of pager.c ***********************************************/ -/************** Begin file wal.c *********************************************/ -/* -** 2010 February 1 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** -** This file contains the implementation of a write-ahead log (WAL) used in -** "journal_mode=WAL" mode. -** -** WRITE-AHEAD LOG (WAL) FILE FORMAT -** -** A WAL file consists of a header followed by zero or more "frames". -** Each frame records the revised content of a single page from the -** database file. All changes to the database are recorded by writing -** frames into the WAL. Transactions commit when a frame is written that -** contains a commit marker. A single WAL can and usually does record -** multiple transactions. Periodically, the content of the WAL is -** transferred back into the database file in an operation called a -** "checkpoint". -** -** A single WAL file can be used multiple times. In other words, the -** WAL can fill up with frames and then be checkpointed and then new -** frames can overwrite the old ones. A WAL always grows from beginning -** toward the end. Checksums and counters attached to each frame are -** used to determine which frames within the WAL are valid and which -** are leftovers from prior checkpoints. -** -** The WAL header is 32 bytes in size and consists of the following eight -** big-endian 32-bit unsigned integer values: -** -** 0: Magic number. 0x377f0682 or 0x377f0683 -** 4: File format version. Currently 3007000 -** 8: Database page size. Example: 1024 -** 12: Checkpoint sequence number -** 16: Salt-1, random integer incremented with each checkpoint -** 20: Salt-2, a different random integer changing with each ckpt -** 24: Checksum-1 (first part of checksum for first 24 bytes of header). -** 28: Checksum-2 (second part of checksum for first 24 bytes of header). -** -** Immediately following the wal-header are zero or more frames. Each -** frame consists of a 24-byte frame-header followed by a bytes -** of page data. The frame-header is six big-endian 32-bit unsigned -** integer values, as follows: -** -** 0: Page number. -** 4: For commit records, the size of the database image in pages -** after the commit. For all other records, zero. -** 8: Salt-1 (copied from the header) -** 12: Salt-2 (copied from the header) -** 16: Checksum-1. -** 20: Checksum-2. -** -** A frame is considered valid if and only if the following conditions are -** true: -** -** (1) The salt-1 and salt-2 values in the frame-header match -** salt values in the wal-header -** -** (2) The checksum values in the final 8 bytes of the frame-header -** exactly match the checksum computed consecutively on the -** WAL header and the first 8 bytes and the content of all frames -** up to and including the current frame. -** -** The checksum is computed using 32-bit big-endian integers if the -** magic number in the first 4 bytes of the WAL is 0x377f0683 and it -** is computed using little-endian if the magic number is 0x377f0682. -** The checksum values are always stored in the frame header in a -** big-endian format regardless of which byte order is used to compute -** the checksum. The checksum is computed by interpreting the input as -** an even number of unsigned 32-bit integers: x[0] through x[N]. The -** algorithm used for the checksum is as follows: -** -** for i from 0 to n-1 step 2: -** s0 += x[i] + s1; -** s1 += x[i+1] + s0; -** endfor -** -** Note that s0 and s1 are both weighted checksums using fibonacci weights -** in reverse order (the largest fibonacci weight occurs on the first element -** of the sequence being summed.) The s1 value spans all 32-bit -** terms of the sequence whereas s0 omits the final term. -** -** On a checkpoint, the WAL is first VFS.xSync-ed, then valid content of the -** WAL is transferred into the database, then the database is VFS.xSync-ed. -** The VFS.xSync operations serve as write barriers - all writes launched -** before the xSync must complete before any write that launches after the -** xSync begins. -** -** After each checkpoint, the salt-1 value is incremented and the salt-2 -** value is randomized. This prevents old and new frames in the WAL from -** being considered valid at the same time and being checkpointing together -** following a crash. -** -** READER ALGORITHM -** -** To read a page from the database (call it page number P), a reader -** first checks the WAL to see if it contains page P. If so, then the -** last valid instance of page P that is a followed by a commit frame -** or is a commit frame itself becomes the value read. If the WAL -** contains no copies of page P that are valid and which are a commit -** frame or are followed by a commit frame, then page P is read from -** the database file. -** -** To start a read transaction, the reader records the index of the last -** valid frame in the WAL. The reader uses this recorded "mxFrame" value -** for all subsequent read operations. New transactions can be appended -** to the WAL, but as long as the reader uses its original mxFrame value -** and ignores the newly appended content, it will see a consistent snapshot -** of the database from a single point in time. This technique allows -** multiple concurrent readers to view different versions of the database -** content simultaneously. -** -** The reader algorithm in the previous paragraphs works correctly, but -** because frames for page P can appear anywhere within the WAL, the -** reader has to scan the entire WAL looking for page P frames. If the -** WAL is large (multiple megabytes is typical) that scan can be slow, -** and read performance suffers. To overcome this problem, a separate -** data structure called the wal-index is maintained to expedite the -** search for frames of a particular page. -** -** WAL-INDEX FORMAT -** -** Conceptually, the wal-index is shared memory, though VFS implementations -** might choose to implement the wal-index using a mmapped file. Because -** the wal-index is shared memory, SQLite does not support journal_mode=WAL -** on a network filesystem. All users of the database must be able to -** share memory. -** -** The wal-index is transient. After a crash, the wal-index can (and should -** be) reconstructed from the original WAL file. In fact, the VFS is required -** to either truncate or zero the header of the wal-index when the last -** connection to it closes. Because the wal-index is transient, it can -** use an architecture-specific format; it does not have to be cross-platform. -** Hence, unlike the database and WAL file formats which store all values -** as big endian, the wal-index can store multi-byte values in the native -** byte order of the host computer. -** -** The purpose of the wal-index is to answer this question quickly: Given -** a page number P and a maximum frame index M, return the index of the -** last frame in the wal before frame M for page P in the WAL, or return -** NULL if there are no frames for page P in the WAL prior to M. -** -** The wal-index consists of a header region, followed by an one or -** more index blocks. -** -** The wal-index header contains the total number of frames within the WAL -** in the mxFrame field. -** -** Each index block except for the first contains information on -** HASHTABLE_NPAGE frames. The first index block contains information on -** HASHTABLE_NPAGE_ONE frames. The values of HASHTABLE_NPAGE_ONE and -** HASHTABLE_NPAGE are selected so that together the wal-index header and -** first index block are the same size as all other index blocks in the -** wal-index. -** -** Each index block contains two sections, a page-mapping that contains the -** database page number associated with each wal frame, and a hash-table -** that allows readers to query an index block for a specific page number. -** The page-mapping is an array of HASHTABLE_NPAGE (or HASHTABLE_NPAGE_ONE -** for the first index block) 32-bit page numbers. The first entry in the -** first index-block contains the database page number corresponding to the -** first frame in the WAL file. The first entry in the second index block -** in the WAL file corresponds to the (HASHTABLE_NPAGE_ONE+1)th frame in -** the log, and so on. -** -** The last index block in a wal-index usually contains less than the full -** complement of HASHTABLE_NPAGE (or HASHTABLE_NPAGE_ONE) page-numbers, -** depending on the contents of the WAL file. This does not change the -** allocated size of the page-mapping array - the page-mapping array merely -** contains unused entries. -** -** Even without using the hash table, the last frame for page P -** can be found by scanning the page-mapping sections of each index block -** starting with the last index block and moving toward the first, and -** within each index block, starting at the end and moving toward the -** beginning. The first entry that equals P corresponds to the frame -** holding the content for that page. -** -** The hash table consists of HASHTABLE_NSLOT 16-bit unsigned integers. -** HASHTABLE_NSLOT = 2*HASHTABLE_NPAGE, and there is one entry in the -** hash table for each page number in the mapping section, so the hash -** table is never more than half full. The expected number of collisions -** prior to finding a match is 1. Each entry of the hash table is an -** 1-based index of an entry in the mapping section of the same -** index block. Let K be the 1-based index of the largest entry in -** the mapping section. (For index blocks other than the last, K will -** always be exactly HASHTABLE_NPAGE (4096) and for the last index block -** K will be (mxFrame%HASHTABLE_NPAGE).) Unused slots of the hash table -** contain a value of 0. -** -** To look for page P in the hash table, first compute a hash iKey on -** P as follows: -** -** iKey = (P * 383) % HASHTABLE_NSLOT -** -** Then start scanning entries of the hash table, starting with iKey -** (wrapping around to the beginning when the end of the hash table is -** reached) until an unused hash slot is found. Let the first unused slot -** be at index iUnused. (iUnused might be less than iKey if there was -** wrap-around.) Because the hash table is never more than half full, -** the search is guaranteed to eventually hit an unused entry. Let -** iMax be the value between iKey and iUnused, closest to iUnused, -** where aHash[iMax]==P. If there is no iMax entry (if there exists -** no hash slot such that aHash[i]==p) then page P is not in the -** current index block. Otherwise the iMax-th mapping entry of the -** current index block corresponds to the last entry that references -** page P. -** -** A hash search begins with the last index block and moves toward the -** first index block, looking for entries corresponding to page P. On -** average, only two or three slots in each index block need to be -** examined in order to either find the last entry for page P, or to -** establish that no such entry exists in the block. Each index block -** holds over 4000 entries. So two or three index blocks are sufficient -** to cover a typical 10 megabyte WAL file, assuming 1K pages. 8 or 10 -** comparisons (on average) suffice to either locate a frame in the -** WAL or to establish that the frame does not exist in the WAL. This -** is much faster than scanning the entire 10MB WAL. -** -** Note that entries are added in order of increasing K. Hence, one -** reader might be using some value K0 and a second reader that started -** at a later time (after additional transactions were added to the WAL -** and to the wal-index) might be using a different value K1, where K1>K0. -** Both readers can use the same hash table and mapping section to get -** the correct result. There may be entries in the hash table with -** K>K0 but to the first reader, those entries will appear to be unused -** slots in the hash table and so the first reader will get an answer as -** if no values greater than K0 had ever been inserted into the hash table -** in the first place - which is what reader one wants. Meanwhile, the -** second reader using K1 will see additional values that were inserted -** later, which is exactly what reader two wants. -** -** When a rollback occurs, the value of K is decreased. Hash table entries -** that correspond to frames greater than the new K value are removed -** from the hash table at this point. -*/ -#ifndef SQLITE_OMIT_WAL - - -/* -** Trace output macros -*/ -#if defined(SQLITE_TEST) && defined(SQLITE_DEBUG) -SQLITE_PRIVATE int sqlite3WalTrace = 0; -# define WALTRACE(X) if(sqlite3WalTrace) sqlite3DebugPrintf X -#else -# define WALTRACE(X) -#endif - -/* -** The maximum (and only) versions of the wal and wal-index formats -** that may be interpreted by this version of SQLite. -** -** If a client begins recovering a WAL file and finds that (a) the checksum -** values in the wal-header are correct and (b) the version field is not -** WAL_MAX_VERSION, recovery fails and SQLite returns SQLITE_CANTOPEN. -** -** Similarly, if a client successfully reads a wal-index header (i.e. the -** checksum test is successful) and finds that the version field is not -** WALINDEX_MAX_VERSION, then no read-transaction is opened and SQLite -** returns SQLITE_CANTOPEN. -*/ -#define WAL_MAX_VERSION 3007000 -#define WALINDEX_MAX_VERSION 3007000 - -/* -** Indices of various locking bytes. WAL_NREADER is the number -** of available reader locks and should be at least 3. -*/ -#define WAL_WRITE_LOCK 0 -#define WAL_ALL_BUT_WRITE 1 -#define WAL_CKPT_LOCK 1 -#define WAL_RECOVER_LOCK 2 -#define WAL_READ_LOCK(I) (3+(I)) -#define WAL_NREADER (SQLITE_SHM_NLOCK-3) - - -/* Object declarations */ -typedef struct WalIndexHdr WalIndexHdr; -typedef struct WalIterator WalIterator; -typedef struct WalCkptInfo WalCkptInfo; - - -/* -** The following object holds a copy of the wal-index header content. -** -** The actual header in the wal-index consists of two copies of this -** object. -** -** The szPage value can be any power of 2 between 512 and 32768, inclusive. -** Or it can be 1 to represent a 65536-byte page. The latter case was -** added in 3.7.1 when support for 64K pages was added. -*/ -struct WalIndexHdr { - u32 iVersion; /* Wal-index version */ - u32 unused; /* Unused (padding) field */ - u32 iChange; /* Counter incremented each transaction */ - u8 isInit; /* 1 when initialized */ - u8 bigEndCksum; /* True if checksums in WAL are big-endian */ - u16 szPage; /* Database page size in bytes. 1==64K */ - u32 mxFrame; /* Index of last valid frame in the WAL */ - u32 nPage; /* Size of database in pages */ - u32 aFrameCksum[2]; /* Checksum of last frame in log */ - u32 aSalt[2]; /* Two salt values copied from WAL header */ - u32 aCksum[2]; /* Checksum over all prior fields */ -}; - -/* -** A copy of the following object occurs in the wal-index immediately -** following the second copy of the WalIndexHdr. This object stores -** information used by checkpoint. -** -** nBackfill is the number of frames in the WAL that have been written -** back into the database. (We call the act of moving content from WAL to -** database "backfilling".) The nBackfill number is never greater than -** WalIndexHdr.mxFrame. nBackfill can only be increased by threads -** holding the WAL_CKPT_LOCK lock (which includes a recovery thread). -** However, a WAL_WRITE_LOCK thread can move the value of nBackfill from -** mxFrame back to zero when the WAL is reset. -** -** There is one entry in aReadMark[] for each reader lock. If a reader -** holds read-lock K, then the value in aReadMark[K] is no greater than -** the mxFrame for that reader. The value READMARK_NOT_USED (0xffffffff) -** for any aReadMark[] means that entry is unused. aReadMark[0] is -** a special case; its value is never used and it exists as a place-holder -** to avoid having to offset aReadMark[] indexs by one. Readers holding -** WAL_READ_LOCK(0) always ignore the entire WAL and read all content -** directly from the database. -** -** The value of aReadMark[K] may only be changed by a thread that -** is holding an exclusive lock on WAL_READ_LOCK(K). Thus, the value of -** aReadMark[K] cannot changed while there is a reader is using that mark -** since the reader will be holding a shared lock on WAL_READ_LOCK(K). -** -** The checkpointer may only transfer frames from WAL to database where -** the frame numbers are less than or equal to every aReadMark[] that is -** in use (that is, every aReadMark[j] for which there is a corresponding -** WAL_READ_LOCK(j)). New readers (usually) pick the aReadMark[] with the -** largest value and will increase an unused aReadMark[] to mxFrame if there -** is not already an aReadMark[] equal to mxFrame. The exception to the -** previous sentence is when nBackfill equals mxFrame (meaning that everything -** in the WAL has been backfilled into the database) then new readers -** will choose aReadMark[0] which has value 0 and hence such reader will -** get all their all content directly from the database file and ignore -** the WAL. -** -** Writers normally append new frames to the end of the WAL. However, -** if nBackfill equals mxFrame (meaning that all WAL content has been -** written back into the database) and if no readers are using the WAL -** (in other words, if there are no WAL_READ_LOCK(i) where i>0) then -** the writer will first "reset" the WAL back to the beginning and start -** writing new content beginning at frame 1. -** -** We assume that 32-bit loads are atomic and so no locks are needed in -** order to read from any aReadMark[] entries. -*/ -struct WalCkptInfo { - u32 nBackfill; /* Number of WAL frames backfilled into DB */ - u32 aReadMark[WAL_NREADER]; /* Reader marks */ -}; -#define READMARK_NOT_USED 0xffffffff - - -/* A block of WALINDEX_LOCK_RESERVED bytes beginning at -** WALINDEX_LOCK_OFFSET is reserved for locks. Since some systems -** only support mandatory file-locks, we do not read or write data -** from the region of the file on which locks are applied. -*/ -#define WALINDEX_LOCK_OFFSET (sizeof(WalIndexHdr)*2 + sizeof(WalCkptInfo)) -#define WALINDEX_LOCK_RESERVED 16 -#define WALINDEX_HDR_SIZE (WALINDEX_LOCK_OFFSET+WALINDEX_LOCK_RESERVED) - -/* Size of header before each frame in wal */ -#define WAL_FRAME_HDRSIZE 24 - -/* Size of write ahead log header, including checksum. */ -/* #define WAL_HDRSIZE 24 */ -#define WAL_HDRSIZE 32 - -/* WAL magic value. Either this value, or the same value with the least -** significant bit also set (WAL_MAGIC | 0x00000001) is stored in 32-bit -** big-endian format in the first 4 bytes of a WAL file. -** -** If the LSB is set, then the checksums for each frame within the WAL -** file are calculated by treating all data as an array of 32-bit -** big-endian words. Otherwise, they are calculated by interpreting -** all data as 32-bit little-endian words. -*/ -#define WAL_MAGIC 0x377f0682 - -/* -** Return the offset of frame iFrame in the write-ahead log file, -** assuming a database page size of szPage bytes. The offset returned -** is to the start of the write-ahead log frame-header. -*/ -#define walFrameOffset(iFrame, szPage) ( \ - WAL_HDRSIZE + ((iFrame)-1)*(i64)((szPage)+WAL_FRAME_HDRSIZE) \ -) - -/* -** An open write-ahead log file is represented by an instance of the -** following object. -*/ -struct Wal { - sqlite3_vfs *pVfs; /* The VFS used to create pDbFd */ - sqlite3_file *pDbFd; /* File handle for the database file */ - sqlite3_file *pWalFd; /* File handle for WAL file */ - u32 iCallback; /* Value to pass to log callback (or 0) */ - i64 mxWalSize; /* Truncate WAL to this size upon reset */ - int nWiData; /* Size of array apWiData */ - int szFirstBlock; /* Size of first block written to WAL file */ - volatile u32 **apWiData; /* Pointer to wal-index content in memory */ - u32 szPage; /* Database page size */ - i16 readLock; /* Which read lock is being held. -1 for none */ - u8 syncFlags; /* Flags to use to sync header writes */ - u8 exclusiveMode; /* Non-zero if connection is in exclusive mode */ - u8 writeLock; /* True if in a write transaction */ - u8 ckptLock; /* True if holding a checkpoint lock */ - u8 readOnly; /* WAL_RDWR, WAL_RDONLY, or WAL_SHM_RDONLY */ - u8 truncateOnCommit; /* True to truncate WAL file on commit */ - u8 syncHeader; /* Fsync the WAL header if true */ - u8 padToSectorBoundary; /* Pad transactions out to the next sector */ - WalIndexHdr hdr; /* Wal-index header for current transaction */ - const char *zWalName; /* Name of WAL file */ - u32 nCkpt; /* Checkpoint sequence counter in the wal-header */ -#ifdef SQLITE_DEBUG - u8 lockError; /* True if a locking error has occurred */ -#endif -}; - -/* -** Candidate values for Wal.exclusiveMode. -*/ -#define WAL_NORMAL_MODE 0 -#define WAL_EXCLUSIVE_MODE 1 -#define WAL_HEAPMEMORY_MODE 2 - -/* -** Possible values for WAL.readOnly -*/ -#define WAL_RDWR 0 /* Normal read/write connection */ -#define WAL_RDONLY 1 /* The WAL file is readonly */ -#define WAL_SHM_RDONLY 2 /* The SHM file is readonly */ - -/* -** Each page of the wal-index mapping contains a hash-table made up of -** an array of HASHTABLE_NSLOT elements of the following type. -*/ -typedef u16 ht_slot; - -/* -** This structure is used to implement an iterator that loops through -** all frames in the WAL in database page order. Where two or more frames -** correspond to the same database page, the iterator visits only the -** frame most recently written to the WAL (in other words, the frame with -** the largest index). -** -** The internals of this structure are only accessed by: -** -** walIteratorInit() - Create a new iterator, -** walIteratorNext() - Step an iterator, -** walIteratorFree() - Free an iterator. -** -** This functionality is used by the checkpoint code (see walCheckpoint()). -*/ -struct WalIterator { - int iPrior; /* Last result returned from the iterator */ - int nSegment; /* Number of entries in aSegment[] */ - struct WalSegment { - int iNext; /* Next slot in aIndex[] not yet returned */ - ht_slot *aIndex; /* i0, i1, i2... such that aPgno[iN] ascend */ - u32 *aPgno; /* Array of page numbers. */ - int nEntry; /* Nr. of entries in aPgno[] and aIndex[] */ - int iZero; /* Frame number associated with aPgno[0] */ - } aSegment[1]; /* One for every 32KB page in the wal-index */ -}; - -/* -** Define the parameters of the hash tables in the wal-index file. There -** is a hash-table following every HASHTABLE_NPAGE page numbers in the -** wal-index. -** -** Changing any of these constants will alter the wal-index format and -** create incompatibilities. -*/ -#define HASHTABLE_NPAGE 4096 /* Must be power of 2 */ -#define HASHTABLE_HASH_1 383 /* Should be prime */ -#define HASHTABLE_NSLOT (HASHTABLE_NPAGE*2) /* Must be a power of 2 */ - -/* -** The block of page numbers associated with the first hash-table in a -** wal-index is smaller than usual. This is so that there is a complete -** hash-table on each aligned 32KB page of the wal-index. -*/ -#define HASHTABLE_NPAGE_ONE (HASHTABLE_NPAGE - (WALINDEX_HDR_SIZE/sizeof(u32))) - -/* The wal-index is divided into pages of WALINDEX_PGSZ bytes each. */ -#define WALINDEX_PGSZ ( \ - sizeof(ht_slot)*HASHTABLE_NSLOT + HASHTABLE_NPAGE*sizeof(u32) \ -) - -/* -** Obtain a pointer to the iPage'th page of the wal-index. The wal-index -** is broken into pages of WALINDEX_PGSZ bytes. Wal-index pages are -** numbered from zero. -** -** If this call is successful, *ppPage is set to point to the wal-index -** page and SQLITE_OK is returned. If an error (an OOM or VFS error) occurs, -** then an SQLite error code is returned and *ppPage is set to 0. -*/ -static int walIndexPage(Wal *pWal, int iPage, volatile u32 **ppPage){ - int rc = SQLITE_OK; - - /* Enlarge the pWal->apWiData[] array if required */ - if( pWal->nWiData<=iPage ){ - int nByte = sizeof(u32*)*(iPage+1); - volatile u32 **apNew; - apNew = (volatile u32 **)sqlite3_realloc((void *)pWal->apWiData, nByte); - if( !apNew ){ - *ppPage = 0; - return SQLITE_NOMEM; - } - memset((void*)&apNew[pWal->nWiData], 0, - sizeof(u32*)*(iPage+1-pWal->nWiData)); - pWal->apWiData = apNew; - pWal->nWiData = iPage+1; - } - - /* Request a pointer to the required page from the VFS */ - if( pWal->apWiData[iPage]==0 ){ - if( pWal->exclusiveMode==WAL_HEAPMEMORY_MODE ){ - pWal->apWiData[iPage] = (u32 volatile *)sqlite3MallocZero(WALINDEX_PGSZ); - if( !pWal->apWiData[iPage] ) rc = SQLITE_NOMEM; - }else{ - rc = sqlite3OsShmMap(pWal->pDbFd, iPage, WALINDEX_PGSZ, - pWal->writeLock, (void volatile **)&pWal->apWiData[iPage] - ); - if( rc==SQLITE_READONLY ){ - pWal->readOnly |= WAL_SHM_RDONLY; - rc = SQLITE_OK; - } - } - } - - *ppPage = pWal->apWiData[iPage]; - assert( iPage==0 || *ppPage || rc!=SQLITE_OK ); - return rc; -} - -/* -** Return a pointer to the WalCkptInfo structure in the wal-index. -*/ -static volatile WalCkptInfo *walCkptInfo(Wal *pWal){ - assert( pWal->nWiData>0 && pWal->apWiData[0] ); - return (volatile WalCkptInfo*)&(pWal->apWiData[0][sizeof(WalIndexHdr)/2]); -} - -/* -** Return a pointer to the WalIndexHdr structure in the wal-index. -*/ -static volatile WalIndexHdr *walIndexHdr(Wal *pWal){ - assert( pWal->nWiData>0 && pWal->apWiData[0] ); - return (volatile WalIndexHdr*)pWal->apWiData[0]; -} - -/* -** The argument to this macro must be of type u32. On a little-endian -** architecture, it returns the u32 value that results from interpreting -** the 4 bytes as a big-endian value. On a big-endian architecture, it -** returns the value that would be produced by intepreting the 4 bytes -** of the input value as a little-endian integer. -*/ -#define BYTESWAP32(x) ( \ - (((x)&0x000000FF)<<24) + (((x)&0x0000FF00)<<8) \ - + (((x)&0x00FF0000)>>8) + (((x)&0xFF000000)>>24) \ -) - -/* -** Generate or extend an 8 byte checksum based on the data in -** array aByte[] and the initial values of aIn[0] and aIn[1] (or -** initial values of 0 and 0 if aIn==NULL). -** -** The checksum is written back into aOut[] before returning. -** -** nByte must be a positive multiple of 8. -*/ -static void walChecksumBytes( - int nativeCksum, /* True for native byte-order, false for non-native */ - u8 *a, /* Content to be checksummed */ - int nByte, /* Bytes of content in a[]. Must be a multiple of 8. */ - const u32 *aIn, /* Initial checksum value input */ - u32 *aOut /* OUT: Final checksum value output */ -){ - u32 s1, s2; - u32 *aData = (u32 *)a; - u32 *aEnd = (u32 *)&a[nByte]; - - if( aIn ){ - s1 = aIn[0]; - s2 = aIn[1]; - }else{ - s1 = s2 = 0; - } - - assert( nByte>=8 ); - assert( (nByte&0x00000007)==0 ); - - if( nativeCksum ){ - do { - s1 += *aData++ + s2; - s2 += *aData++ + s1; - }while( aDataexclusiveMode!=WAL_HEAPMEMORY_MODE ){ - sqlite3OsShmBarrier(pWal->pDbFd); - } -} - -/* -** Write the header information in pWal->hdr into the wal-index. -** -** The checksum on pWal->hdr is updated before it is written. -*/ -static void walIndexWriteHdr(Wal *pWal){ - volatile WalIndexHdr *aHdr = walIndexHdr(pWal); - const int nCksum = offsetof(WalIndexHdr, aCksum); - - assert( pWal->writeLock ); - pWal->hdr.isInit = 1; - pWal->hdr.iVersion = WALINDEX_MAX_VERSION; - walChecksumBytes(1, (u8*)&pWal->hdr, nCksum, 0, pWal->hdr.aCksum); - memcpy((void *)&aHdr[1], (void *)&pWal->hdr, sizeof(WalIndexHdr)); - walShmBarrier(pWal); - memcpy((void *)&aHdr[0], (void *)&pWal->hdr, sizeof(WalIndexHdr)); -} - -/* -** This function encodes a single frame header and writes it to a buffer -** supplied by the caller. A frame-header is made up of a series of -** 4-byte big-endian integers, as follows: -** -** 0: Page number. -** 4: For commit records, the size of the database image in pages -** after the commit. For all other records, zero. -** 8: Salt-1 (copied from the wal-header) -** 12: Salt-2 (copied from the wal-header) -** 16: Checksum-1. -** 20: Checksum-2. -*/ -static void walEncodeFrame( - Wal *pWal, /* The write-ahead log */ - u32 iPage, /* Database page number for frame */ - u32 nTruncate, /* New db size (or 0 for non-commit frames) */ - u8 *aData, /* Pointer to page data */ - u8 *aFrame /* OUT: Write encoded frame here */ -){ - int nativeCksum; /* True for native byte-order checksums */ - u32 *aCksum = pWal->hdr.aFrameCksum; - assert( WAL_FRAME_HDRSIZE==24 ); - sqlite3Put4byte(&aFrame[0], iPage); - sqlite3Put4byte(&aFrame[4], nTruncate); - memcpy(&aFrame[8], pWal->hdr.aSalt, 8); - - nativeCksum = (pWal->hdr.bigEndCksum==SQLITE_BIGENDIAN); - walChecksumBytes(nativeCksum, aFrame, 8, aCksum, aCksum); - walChecksumBytes(nativeCksum, aData, pWal->szPage, aCksum, aCksum); - - sqlite3Put4byte(&aFrame[16], aCksum[0]); - sqlite3Put4byte(&aFrame[20], aCksum[1]); -} - -/* -** Check to see if the frame with header in aFrame[] and content -** in aData[] is valid. If it is a valid frame, fill *piPage and -** *pnTruncate and return true. Return if the frame is not valid. -*/ -static int walDecodeFrame( - Wal *pWal, /* The write-ahead log */ - u32 *piPage, /* OUT: Database page number for frame */ - u32 *pnTruncate, /* OUT: New db size (or 0 if not commit) */ - u8 *aData, /* Pointer to page data (for checksum) */ - u8 *aFrame /* Frame data */ -){ - int nativeCksum; /* True for native byte-order checksums */ - u32 *aCksum = pWal->hdr.aFrameCksum; - u32 pgno; /* Page number of the frame */ - assert( WAL_FRAME_HDRSIZE==24 ); - - /* A frame is only valid if the salt values in the frame-header - ** match the salt values in the wal-header. - */ - if( memcmp(&pWal->hdr.aSalt, &aFrame[8], 8)!=0 ){ - return 0; - } - - /* A frame is only valid if the page number is creater than zero. - */ - pgno = sqlite3Get4byte(&aFrame[0]); - if( pgno==0 ){ - return 0; - } - - /* A frame is only valid if a checksum of the WAL header, - ** all prior frams, the first 16 bytes of this frame-header, - ** and the frame-data matches the checksum in the last 8 - ** bytes of this frame-header. - */ - nativeCksum = (pWal->hdr.bigEndCksum==SQLITE_BIGENDIAN); - walChecksumBytes(nativeCksum, aFrame, 8, aCksum, aCksum); - walChecksumBytes(nativeCksum, aData, pWal->szPage, aCksum, aCksum); - if( aCksum[0]!=sqlite3Get4byte(&aFrame[16]) - || aCksum[1]!=sqlite3Get4byte(&aFrame[20]) - ){ - /* Checksum failed. */ - return 0; - } - - /* If we reach this point, the frame is valid. Return the page number - ** and the new database size. - */ - *piPage = pgno; - *pnTruncate = sqlite3Get4byte(&aFrame[4]); - return 1; -} - - -#if defined(SQLITE_TEST) && defined(SQLITE_DEBUG) -/* -** Names of locks. This routine is used to provide debugging output and is not -** a part of an ordinary build. -*/ -static const char *walLockName(int lockIdx){ - if( lockIdx==WAL_WRITE_LOCK ){ - return "WRITE-LOCK"; - }else if( lockIdx==WAL_CKPT_LOCK ){ - return "CKPT-LOCK"; - }else if( lockIdx==WAL_RECOVER_LOCK ){ - return "RECOVER-LOCK"; - }else{ - static char zName[15]; - sqlite3_snprintf(sizeof(zName), zName, "READ-LOCK[%d]", - lockIdx-WAL_READ_LOCK(0)); - return zName; - } -} -#endif /*defined(SQLITE_TEST) || defined(SQLITE_DEBUG) */ - - -/* -** Set or release locks on the WAL. Locks are either shared or exclusive. -** A lock cannot be moved directly between shared and exclusive - it must go -** through the unlocked state first. -** -** In locking_mode=EXCLUSIVE, all of these routines become no-ops. -*/ -static int walLockShared(Wal *pWal, int lockIdx){ - int rc; - if( pWal->exclusiveMode ) return SQLITE_OK; - rc = sqlite3OsShmLock(pWal->pDbFd, lockIdx, 1, - SQLITE_SHM_LOCK | SQLITE_SHM_SHARED); - WALTRACE(("WAL%p: acquire SHARED-%s %s\n", pWal, - walLockName(lockIdx), rc ? "failed" : "ok")); - VVA_ONLY( pWal->lockError = (u8)(rc!=SQLITE_OK && rc!=SQLITE_BUSY); ) - return rc; -} -static void walUnlockShared(Wal *pWal, int lockIdx){ - if( pWal->exclusiveMode ) return; - (void)sqlite3OsShmLock(pWal->pDbFd, lockIdx, 1, - SQLITE_SHM_UNLOCK | SQLITE_SHM_SHARED); - WALTRACE(("WAL%p: release SHARED-%s\n", pWal, walLockName(lockIdx))); -} -static int walLockExclusive(Wal *pWal, int lockIdx, int n){ - int rc; - if( pWal->exclusiveMode ) return SQLITE_OK; - rc = sqlite3OsShmLock(pWal->pDbFd, lockIdx, n, - SQLITE_SHM_LOCK | SQLITE_SHM_EXCLUSIVE); - WALTRACE(("WAL%p: acquire EXCLUSIVE-%s cnt=%d %s\n", pWal, - walLockName(lockIdx), n, rc ? "failed" : "ok")); - VVA_ONLY( pWal->lockError = (u8)(rc!=SQLITE_OK && rc!=SQLITE_BUSY); ) - return rc; -} -static void walUnlockExclusive(Wal *pWal, int lockIdx, int n){ - if( pWal->exclusiveMode ) return; - (void)sqlite3OsShmLock(pWal->pDbFd, lockIdx, n, - SQLITE_SHM_UNLOCK | SQLITE_SHM_EXCLUSIVE); - WALTRACE(("WAL%p: release EXCLUSIVE-%s cnt=%d\n", pWal, - walLockName(lockIdx), n)); -} - -/* -** Compute a hash on a page number. The resulting hash value must land -** between 0 and (HASHTABLE_NSLOT-1). The walHashNext() function advances -** the hash to the next value in the event of a collision. -*/ -static int walHash(u32 iPage){ - assert( iPage>0 ); - assert( (HASHTABLE_NSLOT & (HASHTABLE_NSLOT-1))==0 ); - return (iPage*HASHTABLE_HASH_1) & (HASHTABLE_NSLOT-1); -} -static int walNextHash(int iPriorHash){ - return (iPriorHash+1)&(HASHTABLE_NSLOT-1); -} - -/* -** Return pointers to the hash table and page number array stored on -** page iHash of the wal-index. The wal-index is broken into 32KB pages -** numbered starting from 0. -** -** Set output variable *paHash to point to the start of the hash table -** in the wal-index file. Set *piZero to one less than the frame -** number of the first frame indexed by this hash table. If a -** slot in the hash table is set to N, it refers to frame number -** (*piZero+N) in the log. -** -** Finally, set *paPgno so that *paPgno[1] is the page number of the -** first frame indexed by the hash table, frame (*piZero+1). -*/ -static int walHashGet( - Wal *pWal, /* WAL handle */ - int iHash, /* Find the iHash'th table */ - volatile ht_slot **paHash, /* OUT: Pointer to hash index */ - volatile u32 **paPgno, /* OUT: Pointer to page number array */ - u32 *piZero /* OUT: Frame associated with *paPgno[0] */ -){ - int rc; /* Return code */ - volatile u32 *aPgno; - - rc = walIndexPage(pWal, iHash, &aPgno); - assert( rc==SQLITE_OK || iHash>0 ); - - if( rc==SQLITE_OK ){ - u32 iZero; - volatile ht_slot *aHash; - - aHash = (volatile ht_slot *)&aPgno[HASHTABLE_NPAGE]; - if( iHash==0 ){ - aPgno = &aPgno[WALINDEX_HDR_SIZE/sizeof(u32)]; - iZero = 0; - }else{ - iZero = HASHTABLE_NPAGE_ONE + (iHash-1)*HASHTABLE_NPAGE; - } - - *paPgno = &aPgno[-1]; - *paHash = aHash; - *piZero = iZero; - } - return rc; -} - -/* -** Return the number of the wal-index page that contains the hash-table -** and page-number array that contain entries corresponding to WAL frame -** iFrame. The wal-index is broken up into 32KB pages. Wal-index pages -** are numbered starting from 0. -*/ -static int walFramePage(u32 iFrame){ - int iHash = (iFrame+HASHTABLE_NPAGE-HASHTABLE_NPAGE_ONE-1) / HASHTABLE_NPAGE; - assert( (iHash==0 || iFrame>HASHTABLE_NPAGE_ONE) - && (iHash>=1 || iFrame<=HASHTABLE_NPAGE_ONE) - && (iHash<=1 || iFrame>(HASHTABLE_NPAGE_ONE+HASHTABLE_NPAGE)) - && (iHash>=2 || iFrame<=HASHTABLE_NPAGE_ONE+HASHTABLE_NPAGE) - && (iHash<=2 || iFrame>(HASHTABLE_NPAGE_ONE+2*HASHTABLE_NPAGE)) - ); - return iHash; -} - -/* -** Return the page number associated with frame iFrame in this WAL. -*/ -static u32 walFramePgno(Wal *pWal, u32 iFrame){ - int iHash = walFramePage(iFrame); - if( iHash==0 ){ - return pWal->apWiData[0][WALINDEX_HDR_SIZE/sizeof(u32) + iFrame - 1]; - } - return pWal->apWiData[iHash][(iFrame-1-HASHTABLE_NPAGE_ONE)%HASHTABLE_NPAGE]; -} - -/* -** Remove entries from the hash table that point to WAL slots greater -** than pWal->hdr.mxFrame. -** -** This function is called whenever pWal->hdr.mxFrame is decreased due -** to a rollback or savepoint. -** -** At most only the hash table containing pWal->hdr.mxFrame needs to be -** updated. Any later hash tables will be automatically cleared when -** pWal->hdr.mxFrame advances to the point where those hash tables are -** actually needed. -*/ -static void walCleanupHash(Wal *pWal){ - volatile ht_slot *aHash = 0; /* Pointer to hash table to clear */ - volatile u32 *aPgno = 0; /* Page number array for hash table */ - u32 iZero = 0; /* frame == (aHash[x]+iZero) */ - int iLimit = 0; /* Zero values greater than this */ - int nByte; /* Number of bytes to zero in aPgno[] */ - int i; /* Used to iterate through aHash[] */ - - assert( pWal->writeLock ); - testcase( pWal->hdr.mxFrame==HASHTABLE_NPAGE_ONE-1 ); - testcase( pWal->hdr.mxFrame==HASHTABLE_NPAGE_ONE ); - testcase( pWal->hdr.mxFrame==HASHTABLE_NPAGE_ONE+1 ); - - if( pWal->hdr.mxFrame==0 ) return; - - /* Obtain pointers to the hash-table and page-number array containing - ** the entry that corresponds to frame pWal->hdr.mxFrame. It is guaranteed - ** that the page said hash-table and array reside on is already mapped. - */ - assert( pWal->nWiData>walFramePage(pWal->hdr.mxFrame) ); - assert( pWal->apWiData[walFramePage(pWal->hdr.mxFrame)] ); - walHashGet(pWal, walFramePage(pWal->hdr.mxFrame), &aHash, &aPgno, &iZero); - - /* Zero all hash-table entries that correspond to frame numbers greater - ** than pWal->hdr.mxFrame. - */ - iLimit = pWal->hdr.mxFrame - iZero; - assert( iLimit>0 ); - for(i=0; iiLimit ){ - aHash[i] = 0; - } - } - - /* Zero the entries in the aPgno array that correspond to frames with - ** frame numbers greater than pWal->hdr.mxFrame. - */ - nByte = (int)((char *)aHash - (char *)&aPgno[iLimit+1]); - memset((void *)&aPgno[iLimit+1], 0, nByte); - -#ifdef SQLITE_ENABLE_EXPENSIVE_ASSERT - /* Verify that the every entry in the mapping region is still reachable - ** via the hash table even after the cleanup. - */ - if( iLimit ){ - int i; /* Loop counter */ - int iKey; /* Hash key */ - for(i=1; i<=iLimit; i++){ - for(iKey=walHash(aPgno[i]); aHash[iKey]; iKey=walNextHash(iKey)){ - if( aHash[iKey]==i ) break; - } - assert( aHash[iKey]==i ); - } - } -#endif /* SQLITE_ENABLE_EXPENSIVE_ASSERT */ -} - - -/* -** Set an entry in the wal-index that will map database page number -** pPage into WAL frame iFrame. -*/ -static int walIndexAppend(Wal *pWal, u32 iFrame, u32 iPage){ - int rc; /* Return code */ - u32 iZero = 0; /* One less than frame number of aPgno[1] */ - volatile u32 *aPgno = 0; /* Page number array */ - volatile ht_slot *aHash = 0; /* Hash table */ - - rc = walHashGet(pWal, walFramePage(iFrame), &aHash, &aPgno, &iZero); - - /* Assuming the wal-index file was successfully mapped, populate the - ** page number array and hash table entry. - */ - if( rc==SQLITE_OK ){ - int iKey; /* Hash table key */ - int idx; /* Value to write to hash-table slot */ - int nCollide; /* Number of hash collisions */ - - idx = iFrame - iZero; - assert( idx <= HASHTABLE_NSLOT/2 + 1 ); - - /* If this is the first entry to be added to this hash-table, zero the - ** entire hash table and aPgno[] array before proceding. - */ - if( idx==1 ){ - int nByte = (int)((u8 *)&aHash[HASHTABLE_NSLOT] - (u8 *)&aPgno[1]); - memset((void*)&aPgno[1], 0, nByte); - } - - /* If the entry in aPgno[] is already set, then the previous writer - ** must have exited unexpectedly in the middle of a transaction (after - ** writing one or more dirty pages to the WAL to free up memory). - ** Remove the remnants of that writers uncommitted transaction from - ** the hash-table before writing any new entries. - */ - if( aPgno[idx] ){ - walCleanupHash(pWal); - assert( !aPgno[idx] ); - } - - /* Write the aPgno[] array entry and the hash-table slot. */ - nCollide = idx; - for(iKey=walHash(iPage); aHash[iKey]; iKey=walNextHash(iKey)){ - if( (nCollide--)==0 ) return SQLITE_CORRUPT_BKPT; - } - aPgno[idx] = iPage; - aHash[iKey] = (ht_slot)idx; - -#ifdef SQLITE_ENABLE_EXPENSIVE_ASSERT - /* Verify that the number of entries in the hash table exactly equals - ** the number of entries in the mapping region. - */ - { - int i; /* Loop counter */ - int nEntry = 0; /* Number of entries in the hash table */ - for(i=0; ickptLock==1 || pWal->ckptLock==0 ); - assert( WAL_ALL_BUT_WRITE==WAL_WRITE_LOCK+1 ); - assert( WAL_CKPT_LOCK==WAL_ALL_BUT_WRITE ); - assert( pWal->writeLock ); - iLock = WAL_ALL_BUT_WRITE + pWal->ckptLock; - nLock = SQLITE_SHM_NLOCK - iLock; - rc = walLockExclusive(pWal, iLock, nLock); - if( rc ){ - return rc; - } - WALTRACE(("WAL%p: recovery begin...\n", pWal)); - - memset(&pWal->hdr, 0, sizeof(WalIndexHdr)); - - rc = sqlite3OsFileSize(pWal->pWalFd, &nSize); - if( rc!=SQLITE_OK ){ - goto recovery_error; - } - - if( nSize>WAL_HDRSIZE ){ - u8 aBuf[WAL_HDRSIZE]; /* Buffer to load WAL header into */ - u8 *aFrame = 0; /* Malloc'd buffer to load entire frame */ - int szFrame; /* Number of bytes in buffer aFrame[] */ - u8 *aData; /* Pointer to data part of aFrame buffer */ - int iFrame; /* Index of last frame read */ - i64 iOffset; /* Next offset to read from log file */ - int szPage; /* Page size according to the log */ - u32 magic; /* Magic value read from WAL header */ - u32 version; /* Magic value read from WAL header */ - int isValid; /* True if this frame is valid */ - - /* Read in the WAL header. */ - rc = sqlite3OsRead(pWal->pWalFd, aBuf, WAL_HDRSIZE, 0); - if( rc!=SQLITE_OK ){ - goto recovery_error; - } - - /* If the database page size is not a power of two, or is greater than - ** SQLITE_MAX_PAGE_SIZE, conclude that the WAL file contains no valid - ** data. Similarly, if the 'magic' value is invalid, ignore the whole - ** WAL file. - */ - magic = sqlite3Get4byte(&aBuf[0]); - szPage = sqlite3Get4byte(&aBuf[8]); - if( (magic&0xFFFFFFFE)!=WAL_MAGIC - || szPage&(szPage-1) - || szPage>SQLITE_MAX_PAGE_SIZE - || szPage<512 - ){ - goto finished; - } - pWal->hdr.bigEndCksum = (u8)(magic&0x00000001); - pWal->szPage = szPage; - pWal->nCkpt = sqlite3Get4byte(&aBuf[12]); - memcpy(&pWal->hdr.aSalt, &aBuf[16], 8); - - /* Verify that the WAL header checksum is correct */ - walChecksumBytes(pWal->hdr.bigEndCksum==SQLITE_BIGENDIAN, - aBuf, WAL_HDRSIZE-2*4, 0, pWal->hdr.aFrameCksum - ); - if( pWal->hdr.aFrameCksum[0]!=sqlite3Get4byte(&aBuf[24]) - || pWal->hdr.aFrameCksum[1]!=sqlite3Get4byte(&aBuf[28]) - ){ - goto finished; - } - - /* Verify that the version number on the WAL format is one that - ** are able to understand */ - version = sqlite3Get4byte(&aBuf[4]); - if( version!=WAL_MAX_VERSION ){ - rc = SQLITE_CANTOPEN_BKPT; - goto finished; - } - - /* Malloc a buffer to read frames into. */ - szFrame = szPage + WAL_FRAME_HDRSIZE; - aFrame = (u8 *)sqlite3_malloc(szFrame); - if( !aFrame ){ - rc = SQLITE_NOMEM; - goto recovery_error; - } - aData = &aFrame[WAL_FRAME_HDRSIZE]; - - /* Read all frames from the log file. */ - iFrame = 0; - for(iOffset=WAL_HDRSIZE; (iOffset+szFrame)<=nSize; iOffset+=szFrame){ - u32 pgno; /* Database page number for frame */ - u32 nTruncate; /* dbsize field from frame header */ - - /* Read and decode the next log frame. */ - iFrame++; - rc = sqlite3OsRead(pWal->pWalFd, aFrame, szFrame, iOffset); - if( rc!=SQLITE_OK ) break; - isValid = walDecodeFrame(pWal, &pgno, &nTruncate, aData, aFrame); - if( !isValid ) break; - rc = walIndexAppend(pWal, iFrame, pgno); - if( rc!=SQLITE_OK ) break; - - /* If nTruncate is non-zero, this is a commit record. */ - if( nTruncate ){ - pWal->hdr.mxFrame = iFrame; - pWal->hdr.nPage = nTruncate; - pWal->hdr.szPage = (u16)((szPage&0xff00) | (szPage>>16)); - testcase( szPage<=32768 ); - testcase( szPage>=65536 ); - aFrameCksum[0] = pWal->hdr.aFrameCksum[0]; - aFrameCksum[1] = pWal->hdr.aFrameCksum[1]; - } - } - - sqlite3_free(aFrame); - } - -finished: - if( rc==SQLITE_OK ){ - volatile WalCkptInfo *pInfo; - int i; - pWal->hdr.aFrameCksum[0] = aFrameCksum[0]; - pWal->hdr.aFrameCksum[1] = aFrameCksum[1]; - walIndexWriteHdr(pWal); - - /* Reset the checkpoint-header. This is safe because this thread is - ** currently holding locks that exclude all other readers, writers and - ** checkpointers. - */ - pInfo = walCkptInfo(pWal); - pInfo->nBackfill = 0; - pInfo->aReadMark[0] = 0; - for(i=1; iaReadMark[i] = READMARK_NOT_USED; - if( pWal->hdr.mxFrame ) pInfo->aReadMark[1] = pWal->hdr.mxFrame; - - /* If more than one frame was recovered from the log file, report an - ** event via sqlite3_log(). This is to help with identifying performance - ** problems caused by applications routinely shutting down without - ** checkpointing the log file. - */ - if( pWal->hdr.nPage ){ - sqlite3_log(SQLITE_NOTICE_RECOVER_WAL, - "recovered %d frames from WAL file %s", - pWal->hdr.mxFrame, pWal->zWalName - ); - } - } - -recovery_error: - WALTRACE(("WAL%p: recovery %s\n", pWal, rc ? "failed" : "ok")); - walUnlockExclusive(pWal, iLock, nLock); - return rc; -} - -/* -** Close an open wal-index. -*/ -static void walIndexClose(Wal *pWal, int isDelete){ - if( pWal->exclusiveMode==WAL_HEAPMEMORY_MODE ){ - int i; - for(i=0; inWiData; i++){ - sqlite3_free((void *)pWal->apWiData[i]); - pWal->apWiData[i] = 0; - } - }else{ - sqlite3OsShmUnmap(pWal->pDbFd, isDelete); - } -} - -/* -** Open a connection to the WAL file zWalName. The database file must -** already be opened on connection pDbFd. The buffer that zWalName points -** to must remain valid for the lifetime of the returned Wal* handle. -** -** A SHARED lock should be held on the database file when this function -** is called. The purpose of this SHARED lock is to prevent any other -** client from unlinking the WAL or wal-index file. If another process -** were to do this just after this client opened one of these files, the -** system would be badly broken. -** -** If the log file is successfully opened, SQLITE_OK is returned and -** *ppWal is set to point to a new WAL handle. If an error occurs, -** an SQLite error code is returned and *ppWal is left unmodified. -*/ -SQLITE_PRIVATE int sqlite3WalOpen( - sqlite3_vfs *pVfs, /* vfs module to open wal and wal-index */ - sqlite3_file *pDbFd, /* The open database file */ - const char *zWalName, /* Name of the WAL file */ - int bNoShm, /* True to run in heap-memory mode */ - i64 mxWalSize, /* Truncate WAL to this size on reset */ - Wal **ppWal /* OUT: Allocated Wal handle */ -){ - int rc; /* Return Code */ - Wal *pRet; /* Object to allocate and return */ - int flags; /* Flags passed to OsOpen() */ - - assert( zWalName && zWalName[0] ); - assert( pDbFd ); - - /* In the amalgamation, the os_unix.c and os_win.c source files come before - ** this source file. Verify that the #defines of the locking byte offsets - ** in os_unix.c and os_win.c agree with the WALINDEX_LOCK_OFFSET value. - */ -#ifdef WIN_SHM_BASE - assert( WIN_SHM_BASE==WALINDEX_LOCK_OFFSET ); -#endif -#ifdef UNIX_SHM_BASE - assert( UNIX_SHM_BASE==WALINDEX_LOCK_OFFSET ); -#endif - - - /* Allocate an instance of struct Wal to return. */ - *ppWal = 0; - pRet = (Wal*)sqlite3MallocZero(sizeof(Wal) + pVfs->szOsFile); - if( !pRet ){ - return SQLITE_NOMEM; - } - - pRet->pVfs = pVfs; - pRet->pWalFd = (sqlite3_file *)&pRet[1]; - pRet->pDbFd = pDbFd; - pRet->readLock = -1; - pRet->mxWalSize = mxWalSize; - pRet->zWalName = zWalName; - pRet->syncHeader = 1; - pRet->padToSectorBoundary = 1; - pRet->exclusiveMode = (bNoShm ? WAL_HEAPMEMORY_MODE: WAL_NORMAL_MODE); - - /* Open file handle on the write-ahead log file. */ - flags = (SQLITE_OPEN_READWRITE|SQLITE_OPEN_CREATE|SQLITE_OPEN_WAL); - rc = sqlite3OsOpen(pVfs, zWalName, pRet->pWalFd, flags, &flags); - if( rc==SQLITE_OK && flags&SQLITE_OPEN_READONLY ){ - pRet->readOnly = WAL_RDONLY; - } - - if( rc!=SQLITE_OK ){ - walIndexClose(pRet, 0); - sqlite3OsClose(pRet->pWalFd); - sqlite3_free(pRet); - }else{ - int iDC = sqlite3OsDeviceCharacteristics(pDbFd); - if( iDC & SQLITE_IOCAP_SEQUENTIAL ){ pRet->syncHeader = 0; } - if( iDC & SQLITE_IOCAP_POWERSAFE_OVERWRITE ){ - pRet->padToSectorBoundary = 0; - } - *ppWal = pRet; - WALTRACE(("WAL%d: opened\n", pRet)); - } - return rc; -} - -/* -** Change the size to which the WAL file is trucated on each reset. -*/ -SQLITE_PRIVATE void sqlite3WalLimit(Wal *pWal, i64 iLimit){ - if( pWal ) pWal->mxWalSize = iLimit; -} - -/* -** Find the smallest page number out of all pages held in the WAL that -** has not been returned by any prior invocation of this method on the -** same WalIterator object. Write into *piFrame the frame index where -** that page was last written into the WAL. Write into *piPage the page -** number. -** -** Return 0 on success. If there are no pages in the WAL with a page -** number larger than *piPage, then return 1. -*/ -static int walIteratorNext( - WalIterator *p, /* Iterator */ - u32 *piPage, /* OUT: The page number of the next page */ - u32 *piFrame /* OUT: Wal frame index of next page */ -){ - u32 iMin; /* Result pgno must be greater than iMin */ - u32 iRet = 0xFFFFFFFF; /* 0xffffffff is never a valid page number */ - int i; /* For looping through segments */ - - iMin = p->iPrior; - assert( iMin<0xffffffff ); - for(i=p->nSegment-1; i>=0; i--){ - struct WalSegment *pSegment = &p->aSegment[i]; - while( pSegment->iNextnEntry ){ - u32 iPg = pSegment->aPgno[pSegment->aIndex[pSegment->iNext]]; - if( iPg>iMin ){ - if( iPgiZero + pSegment->aIndex[pSegment->iNext]; - } - break; - } - pSegment->iNext++; - } - } - - *piPage = p->iPrior = iRet; - return (iRet==0xFFFFFFFF); -} - -/* -** This function merges two sorted lists into a single sorted list. -** -** aLeft[] and aRight[] are arrays of indices. The sort key is -** aContent[aLeft[]] and aContent[aRight[]]. Upon entry, the following -** is guaranteed for all J0 && nRight>0 ); - while( iRight=nRight || aContent[aLeft[iLeft]]=nLeft || aContent[aLeft[iLeft]]>dbpage ); - assert( iRight>=nRight || aContent[aRight[iRight]]>dbpage ); - } - - *paRight = aLeft; - *pnRight = iOut; - memcpy(aLeft, aTmp, sizeof(aTmp[0])*iOut); -} - -/* -** Sort the elements in list aList using aContent[] as the sort key. -** Remove elements with duplicate keys, preferring to keep the -** larger aList[] values. -** -** The aList[] entries are indices into aContent[]. The values in -** aList[] are to be sorted so that for all J0 ); - assert( HASHTABLE_NPAGE==(1<<(ArraySize(aSub)-1)) ); - - for(iList=0; iListaList && p->nList<=(1<aList==&aList[iList&~((2<aList, p->nList, &aMerge, &nMerge, aBuffer); - } - aSub[iSub].aList = aMerge; - aSub[iSub].nList = nMerge; - } - - for(iSub++; iSubnList<=(1<aList==&aList[nList&~((2<aList, p->nList, &aMerge, &nMerge, aBuffer); - } - } - assert( aMerge==aList ); - *pnList = nMerge; - -#ifdef SQLITE_DEBUG - { - int i; - for(i=1; i<*pnList; i++){ - assert( aContent[aList[i]] > aContent[aList[i-1]] ); - } - } -#endif -} - -/* -** Free an iterator allocated by walIteratorInit(). -*/ -static void walIteratorFree(WalIterator *p){ - sqlite3ScratchFree(p); -} - -/* -** Construct a WalInterator object that can be used to loop over all -** pages in the WAL in ascending order. The caller must hold the checkpoint -** lock. -** -** On success, make *pp point to the newly allocated WalInterator object -** return SQLITE_OK. Otherwise, return an error code. If this routine -** returns an error, the value of *pp is undefined. -** -** The calling routine should invoke walIteratorFree() to destroy the -** WalIterator object when it has finished with it. -*/ -static int walIteratorInit(Wal *pWal, WalIterator **pp){ - WalIterator *p; /* Return value */ - int nSegment; /* Number of segments to merge */ - u32 iLast; /* Last frame in log */ - int nByte; /* Number of bytes to allocate */ - int i; /* Iterator variable */ - ht_slot *aTmp; /* Temp space used by merge-sort */ - int rc = SQLITE_OK; /* Return Code */ - - /* This routine only runs while holding the checkpoint lock. And - ** it only runs if there is actually content in the log (mxFrame>0). - */ - assert( pWal->ckptLock && pWal->hdr.mxFrame>0 ); - iLast = pWal->hdr.mxFrame; - - /* Allocate space for the WalIterator object. */ - nSegment = walFramePage(iLast) + 1; - nByte = sizeof(WalIterator) - + (nSegment-1)*sizeof(struct WalSegment) - + iLast*sizeof(ht_slot); - p = (WalIterator *)sqlite3ScratchMalloc(nByte); - if( !p ){ - return SQLITE_NOMEM; - } - memset(p, 0, nByte); - p->nSegment = nSegment; - - /* Allocate temporary space used by the merge-sort routine. This block - ** of memory will be freed before this function returns. - */ - aTmp = (ht_slot *)sqlite3ScratchMalloc( - sizeof(ht_slot) * (iLast>HASHTABLE_NPAGE?HASHTABLE_NPAGE:iLast) - ); - if( !aTmp ){ - rc = SQLITE_NOMEM; - } - - for(i=0; rc==SQLITE_OK && iaSegment[p->nSegment])[iZero]; - iZero++; - - for(j=0; jaSegment[i].iZero = iZero; - p->aSegment[i].nEntry = nEntry; - p->aSegment[i].aIndex = aIndex; - p->aSegment[i].aPgno = (u32 *)aPgno; - } - } - sqlite3ScratchFree(aTmp); - - if( rc!=SQLITE_OK ){ - walIteratorFree(p); - } - *pp = p; - return rc; -} - -/* -** Attempt to obtain the exclusive WAL lock defined by parameters lockIdx and -** n. If the attempt fails and parameter xBusy is not NULL, then it is a -** busy-handler function. Invoke it and retry the lock until either the -** lock is successfully obtained or the busy-handler returns 0. -*/ -static int walBusyLock( - Wal *pWal, /* WAL connection */ - int (*xBusy)(void*), /* Function to call when busy */ - void *pBusyArg, /* Context argument for xBusyHandler */ - int lockIdx, /* Offset of first byte to lock */ - int n /* Number of bytes to lock */ -){ - int rc; - do { - rc = walLockExclusive(pWal, lockIdx, n); - }while( xBusy && rc==SQLITE_BUSY && xBusy(pBusyArg) ); - return rc; -} - -/* -** The cache of the wal-index header must be valid to call this function. -** Return the page-size in bytes used by the database. -*/ -static int walPagesize(Wal *pWal){ - return (pWal->hdr.szPage&0xfe00) + ((pWal->hdr.szPage&0x0001)<<16); -} - -/* -** Copy as much content as we can from the WAL back into the database file -** in response to an sqlite3_wal_checkpoint() request or the equivalent. -** -** The amount of information copies from WAL to database might be limited -** by active readers. This routine will never overwrite a database page -** that a concurrent reader might be using. -** -** All I/O barrier operations (a.k.a fsyncs) occur in this routine when -** SQLite is in WAL-mode in synchronous=NORMAL. That means that if -** checkpoints are always run by a background thread or background -** process, foreground threads will never block on a lengthy fsync call. -** -** Fsync is called on the WAL before writing content out of the WAL and -** into the database. This ensures that if the new content is persistent -** in the WAL and can be recovered following a power-loss or hard reset. -** -** Fsync is also called on the database file if (and only if) the entire -** WAL content is copied into the database file. This second fsync makes -** it safe to delete the WAL since the new content will persist in the -** database file. -** -** This routine uses and updates the nBackfill field of the wal-index header. -** This is the only routine tha will increase the value of nBackfill. -** (A WAL reset or recovery will revert nBackfill to zero, but not increase -** its value.) -** -** The caller must be holding sufficient locks to ensure that no other -** checkpoint is running (in any other thread or process) at the same -** time. -*/ -static int walCheckpoint( - Wal *pWal, /* Wal connection */ - int eMode, /* One of PASSIVE, FULL or RESTART */ - int (*xBusyCall)(void*), /* Function to call when busy */ - void *pBusyArg, /* Context argument for xBusyHandler */ - int sync_flags, /* Flags for OsSync() (or 0) */ - u8 *zBuf /* Temporary buffer to use */ -){ - int rc; /* Return code */ - int szPage; /* Database page-size */ - WalIterator *pIter = 0; /* Wal iterator context */ - u32 iDbpage = 0; /* Next database page to write */ - u32 iFrame = 0; /* Wal frame containing data for iDbpage */ - u32 mxSafeFrame; /* Max frame that can be backfilled */ - u32 mxPage; /* Max database page to write */ - int i; /* Loop counter */ - volatile WalCkptInfo *pInfo; /* The checkpoint status information */ - int (*xBusy)(void*) = 0; /* Function to call when waiting for locks */ - - szPage = walPagesize(pWal); - testcase( szPage<=32768 ); - testcase( szPage>=65536 ); - pInfo = walCkptInfo(pWal); - if( pInfo->nBackfill>=pWal->hdr.mxFrame ) return SQLITE_OK; - - /* Allocate the iterator */ - rc = walIteratorInit(pWal, &pIter); - if( rc!=SQLITE_OK ){ - return rc; - } - assert( pIter ); - - if( eMode!=SQLITE_CHECKPOINT_PASSIVE ) xBusy = xBusyCall; - - /* Compute in mxSafeFrame the index of the last frame of the WAL that is - ** safe to write into the database. Frames beyond mxSafeFrame might - ** overwrite database pages that are in use by active readers and thus - ** cannot be backfilled from the WAL. - */ - mxSafeFrame = pWal->hdr.mxFrame; - mxPage = pWal->hdr.nPage; - for(i=1; iaReadMark[i]; - if( mxSafeFrame>y ){ - assert( y<=pWal->hdr.mxFrame ); - rc = walBusyLock(pWal, xBusy, pBusyArg, WAL_READ_LOCK(i), 1); - if( rc==SQLITE_OK ){ - pInfo->aReadMark[i] = (i==1 ? mxSafeFrame : READMARK_NOT_USED); - walUnlockExclusive(pWal, WAL_READ_LOCK(i), 1); - }else if( rc==SQLITE_BUSY ){ - mxSafeFrame = y; - xBusy = 0; - }else{ - goto walcheckpoint_out; - } - } - } - - if( pInfo->nBackfillnBackfill; - - /* Sync the WAL to disk */ - if( sync_flags ){ - rc = sqlite3OsSync(pWal->pWalFd, sync_flags); - } - - /* If the database may grow as a result of this checkpoint, hint - ** about the eventual size of the db file to the VFS layer. - */ - if( rc==SQLITE_OK ){ - i64 nReq = ((i64)mxPage * szPage); - rc = sqlite3OsFileSize(pWal->pDbFd, &nSize); - if( rc==SQLITE_OK && nSizepDbFd, SQLITE_FCNTL_SIZE_HINT, &nReq); - } - } - - - /* Iterate through the contents of the WAL, copying data to the db file. */ - while( rc==SQLITE_OK && 0==walIteratorNext(pIter, &iDbpage, &iFrame) ){ - i64 iOffset; - assert( walFramePgno(pWal, iFrame)==iDbpage ); - if( iFrame<=nBackfill || iFrame>mxSafeFrame || iDbpage>mxPage ) continue; - iOffset = walFrameOffset(iFrame, szPage) + WAL_FRAME_HDRSIZE; - /* testcase( IS_BIG_INT(iOffset) ); // requires a 4GiB WAL file */ - rc = sqlite3OsRead(pWal->pWalFd, zBuf, szPage, iOffset); - if( rc!=SQLITE_OK ) break; - iOffset = (iDbpage-1)*(i64)szPage; - testcase( IS_BIG_INT(iOffset) ); - rc = sqlite3OsWrite(pWal->pDbFd, zBuf, szPage, iOffset); - if( rc!=SQLITE_OK ) break; - } - - /* If work was actually accomplished... */ - if( rc==SQLITE_OK ){ - if( mxSafeFrame==walIndexHdr(pWal)->mxFrame ){ - i64 szDb = pWal->hdr.nPage*(i64)szPage; - testcase( IS_BIG_INT(szDb) ); - rc = sqlite3OsTruncate(pWal->pDbFd, szDb); - if( rc==SQLITE_OK && sync_flags ){ - rc = sqlite3OsSync(pWal->pDbFd, sync_flags); - } - } - if( rc==SQLITE_OK ){ - pInfo->nBackfill = mxSafeFrame; - } - } - - /* Release the reader lock held while backfilling */ - walUnlockExclusive(pWal, WAL_READ_LOCK(0), 1); - } - - if( rc==SQLITE_BUSY ){ - /* Reset the return code so as not to report a checkpoint failure - ** just because there are active readers. */ - rc = SQLITE_OK; - } - - /* If this is an SQLITE_CHECKPOINT_RESTART operation, and the entire wal - ** file has been copied into the database file, then block until all - ** readers have finished using the wal file. This ensures that the next - ** process to write to the database restarts the wal file. - */ - if( rc==SQLITE_OK && eMode!=SQLITE_CHECKPOINT_PASSIVE ){ - assert( pWal->writeLock ); - if( pInfo->nBackfillhdr.mxFrame ){ - rc = SQLITE_BUSY; - }else if( eMode==SQLITE_CHECKPOINT_RESTART ){ - assert( mxSafeFrame==pWal->hdr.mxFrame ); - rc = walBusyLock(pWal, xBusy, pBusyArg, WAL_READ_LOCK(1), WAL_NREADER-1); - if( rc==SQLITE_OK ){ - walUnlockExclusive(pWal, WAL_READ_LOCK(1), WAL_NREADER-1); - } - } - } - - walcheckpoint_out: - walIteratorFree(pIter); - return rc; -} - -/* -** If the WAL file is currently larger than nMax bytes in size, truncate -** it to exactly nMax bytes. If an error occurs while doing so, ignore it. -*/ -static void walLimitSize(Wal *pWal, i64 nMax){ - i64 sz; - int rx; - sqlite3BeginBenignMalloc(); - rx = sqlite3OsFileSize(pWal->pWalFd, &sz); - if( rx==SQLITE_OK && (sz > nMax ) ){ - rx = sqlite3OsTruncate(pWal->pWalFd, nMax); - } - sqlite3EndBenignMalloc(); - if( rx ){ - sqlite3_log(rx, "cannot limit WAL size: %s", pWal->zWalName); - } -} - -/* -** Close a connection to a log file. -*/ -SQLITE_PRIVATE int sqlite3WalClose( - Wal *pWal, /* Wal to close */ - int sync_flags, /* Flags to pass to OsSync() (or 0) */ - int nBuf, - u8 *zBuf /* Buffer of at least nBuf bytes */ -){ - int rc = SQLITE_OK; - if( pWal ){ - int isDelete = 0; /* True to unlink wal and wal-index files */ - - /* If an EXCLUSIVE lock can be obtained on the database file (using the - ** ordinary, rollback-mode locking methods, this guarantees that the - ** connection associated with this log file is the only connection to - ** the database. In this case checkpoint the database and unlink both - ** the wal and wal-index files. - ** - ** The EXCLUSIVE lock is not released before returning. - */ - rc = sqlite3OsLock(pWal->pDbFd, SQLITE_LOCK_EXCLUSIVE); - if( rc==SQLITE_OK ){ - if( pWal->exclusiveMode==WAL_NORMAL_MODE ){ - pWal->exclusiveMode = WAL_EXCLUSIVE_MODE; - } - rc = sqlite3WalCheckpoint( - pWal, SQLITE_CHECKPOINT_PASSIVE, 0, 0, sync_flags, nBuf, zBuf, 0, 0 - ); - if( rc==SQLITE_OK ){ - int bPersist = -1; - sqlite3OsFileControlHint( - pWal->pDbFd, SQLITE_FCNTL_PERSIST_WAL, &bPersist - ); - if( bPersist!=1 ){ - /* Try to delete the WAL file if the checkpoint completed and - ** fsyned (rc==SQLITE_OK) and if we are not in persistent-wal - ** mode (!bPersist) */ - isDelete = 1; - }else if( pWal->mxWalSize>=0 ){ - /* Try to truncate the WAL file to zero bytes if the checkpoint - ** completed and fsynced (rc==SQLITE_OK) and we are in persistent - ** WAL mode (bPersist) and if the PRAGMA journal_size_limit is a - ** non-negative value (pWal->mxWalSize>=0). Note that we truncate - ** to zero bytes as truncating to the journal_size_limit might - ** leave a corrupt WAL file on disk. */ - walLimitSize(pWal, 0); - } - } - } - - walIndexClose(pWal, isDelete); - sqlite3OsClose(pWal->pWalFd); - if( isDelete ){ - sqlite3BeginBenignMalloc(); - sqlite3OsDelete(pWal->pVfs, pWal->zWalName, 0); - sqlite3EndBenignMalloc(); - } - WALTRACE(("WAL%p: closed\n", pWal)); - sqlite3_free((void *)pWal->apWiData); - sqlite3_free(pWal); - } - return rc; -} - -/* -** Try to read the wal-index header. Return 0 on success and 1 if -** there is a problem. -** -** The wal-index is in shared memory. Another thread or process might -** be writing the header at the same time this procedure is trying to -** read it, which might result in inconsistency. A dirty read is detected -** by verifying that both copies of the header are the same and also by -** a checksum on the header. -** -** If and only if the read is consistent and the header is different from -** pWal->hdr, then pWal->hdr is updated to the content of the new header -** and *pChanged is set to 1. -** -** If the checksum cannot be verified return non-zero. If the header -** is read successfully and the checksum verified, return zero. -*/ -static int walIndexTryHdr(Wal *pWal, int *pChanged){ - u32 aCksum[2]; /* Checksum on the header content */ - WalIndexHdr h1, h2; /* Two copies of the header content */ - WalIndexHdr volatile *aHdr; /* Header in shared memory */ - - /* The first page of the wal-index must be mapped at this point. */ - assert( pWal->nWiData>0 && pWal->apWiData[0] ); - - /* Read the header. This might happen concurrently with a write to the - ** same area of shared memory on a different CPU in a SMP, - ** meaning it is possible that an inconsistent snapshot is read - ** from the file. If this happens, return non-zero. - ** - ** There are two copies of the header at the beginning of the wal-index. - ** When reading, read [0] first then [1]. Writes are in the reverse order. - ** Memory barriers are used to prevent the compiler or the hardware from - ** reordering the reads and writes. - */ - aHdr = walIndexHdr(pWal); - memcpy(&h1, (void *)&aHdr[0], sizeof(h1)); - walShmBarrier(pWal); - memcpy(&h2, (void *)&aHdr[1], sizeof(h2)); - - if( memcmp(&h1, &h2, sizeof(h1))!=0 ){ - return 1; /* Dirty read */ - } - if( h1.isInit==0 ){ - return 1; /* Malformed header - probably all zeros */ - } - walChecksumBytes(1, (u8*)&h1, sizeof(h1)-sizeof(h1.aCksum), 0, aCksum); - if( aCksum[0]!=h1.aCksum[0] || aCksum[1]!=h1.aCksum[1] ){ - return 1; /* Checksum does not match */ - } - - if( memcmp(&pWal->hdr, &h1, sizeof(WalIndexHdr)) ){ - *pChanged = 1; - memcpy(&pWal->hdr, &h1, sizeof(WalIndexHdr)); - pWal->szPage = (pWal->hdr.szPage&0xfe00) + ((pWal->hdr.szPage&0x0001)<<16); - testcase( pWal->szPage<=32768 ); - testcase( pWal->szPage>=65536 ); - } - - /* The header was successfully read. Return zero. */ - return 0; -} - -/* -** Read the wal-index header from the wal-index and into pWal->hdr. -** If the wal-header appears to be corrupt, try to reconstruct the -** wal-index from the WAL before returning. -** -** Set *pChanged to 1 if the wal-index header value in pWal->hdr is -** changed by this opertion. If pWal->hdr is unchanged, set *pChanged -** to 0. -** -** If the wal-index header is successfully read, return SQLITE_OK. -** Otherwise an SQLite error code. -*/ -static int walIndexReadHdr(Wal *pWal, int *pChanged){ - int rc; /* Return code */ - int badHdr; /* True if a header read failed */ - volatile u32 *page0; /* Chunk of wal-index containing header */ - - /* Ensure that page 0 of the wal-index (the page that contains the - ** wal-index header) is mapped. Return early if an error occurs here. - */ - assert( pChanged ); - rc = walIndexPage(pWal, 0, &page0); - if( rc!=SQLITE_OK ){ - return rc; - }; - assert( page0 || pWal->writeLock==0 ); - - /* If the first page of the wal-index has been mapped, try to read the - ** wal-index header immediately, without holding any lock. This usually - ** works, but may fail if the wal-index header is corrupt or currently - ** being modified by another thread or process. - */ - badHdr = (page0 ? walIndexTryHdr(pWal, pChanged) : 1); - - /* If the first attempt failed, it might have been due to a race - ** with a writer. So get a WRITE lock and try again. - */ - assert( badHdr==0 || pWal->writeLock==0 ); - if( badHdr ){ - if( pWal->readOnly & WAL_SHM_RDONLY ){ - if( SQLITE_OK==(rc = walLockShared(pWal, WAL_WRITE_LOCK)) ){ - walUnlockShared(pWal, WAL_WRITE_LOCK); - rc = SQLITE_READONLY_RECOVERY; - } - }else if( SQLITE_OK==(rc = walLockExclusive(pWal, WAL_WRITE_LOCK, 1)) ){ - pWal->writeLock = 1; - if( SQLITE_OK==(rc = walIndexPage(pWal, 0, &page0)) ){ - badHdr = walIndexTryHdr(pWal, pChanged); - if( badHdr ){ - /* If the wal-index header is still malformed even while holding - ** a WRITE lock, it can only mean that the header is corrupted and - ** needs to be reconstructed. So run recovery to do exactly that. - */ - rc = walIndexRecover(pWal); - *pChanged = 1; - } - } - pWal->writeLock = 0; - walUnlockExclusive(pWal, WAL_WRITE_LOCK, 1); - } - } - - /* If the header is read successfully, check the version number to make - ** sure the wal-index was not constructed with some future format that - ** this version of SQLite cannot understand. - */ - if( badHdr==0 && pWal->hdr.iVersion!=WALINDEX_MAX_VERSION ){ - rc = SQLITE_CANTOPEN_BKPT; - } - - return rc; -} - -/* -** This is the value that walTryBeginRead returns when it needs to -** be retried. -*/ -#define WAL_RETRY (-1) - -/* -** Attempt to start a read transaction. This might fail due to a race or -** other transient condition. When that happens, it returns WAL_RETRY to -** indicate to the caller that it is safe to retry immediately. -** -** On success return SQLITE_OK. On a permanent failure (such an -** I/O error or an SQLITE_BUSY because another process is running -** recovery) return a positive error code. -** -** The useWal parameter is true to force the use of the WAL and disable -** the case where the WAL is bypassed because it has been completely -** checkpointed. If useWal==0 then this routine calls walIndexReadHdr() -** to make a copy of the wal-index header into pWal->hdr. If the -** wal-index header has changed, *pChanged is set to 1 (as an indication -** to the caller that the local paget cache is obsolete and needs to be -** flushed.) When useWal==1, the wal-index header is assumed to already -** be loaded and the pChanged parameter is unused. -** -** The caller must set the cnt parameter to the number of prior calls to -** this routine during the current read attempt that returned WAL_RETRY. -** This routine will start taking more aggressive measures to clear the -** race conditions after multiple WAL_RETRY returns, and after an excessive -** number of errors will ultimately return SQLITE_PROTOCOL. The -** SQLITE_PROTOCOL return indicates that some other process has gone rogue -** and is not honoring the locking protocol. There is a vanishingly small -** chance that SQLITE_PROTOCOL could be returned because of a run of really -** bad luck when there is lots of contention for the wal-index, but that -** possibility is so small that it can be safely neglected, we believe. -** -** On success, this routine obtains a read lock on -** WAL_READ_LOCK(pWal->readLock). The pWal->readLock integer is -** in the range 0 <= pWal->readLock < WAL_NREADER. If pWal->readLock==(-1) -** that means the Wal does not hold any read lock. The reader must not -** access any database page that is modified by a WAL frame up to and -** including frame number aReadMark[pWal->readLock]. The reader will -** use WAL frames up to and including pWal->hdr.mxFrame if pWal->readLock>0 -** Or if pWal->readLock==0, then the reader will ignore the WAL -** completely and get all content directly from the database file. -** If the useWal parameter is 1 then the WAL will never be ignored and -** this routine will always set pWal->readLock>0 on success. -** When the read transaction is completed, the caller must release the -** lock on WAL_READ_LOCK(pWal->readLock) and set pWal->readLock to -1. -** -** This routine uses the nBackfill and aReadMark[] fields of the header -** to select a particular WAL_READ_LOCK() that strives to let the -** checkpoint process do as much work as possible. This routine might -** update values of the aReadMark[] array in the header, but if it does -** so it takes care to hold an exclusive lock on the corresponding -** WAL_READ_LOCK() while changing values. -*/ -static int walTryBeginRead(Wal *pWal, int *pChanged, int useWal, int cnt){ - volatile WalCkptInfo *pInfo; /* Checkpoint information in wal-index */ - u32 mxReadMark; /* Largest aReadMark[] value */ - int mxI; /* Index of largest aReadMark[] value */ - int i; /* Loop counter */ - int rc = SQLITE_OK; /* Return code */ - - assert( pWal->readLock<0 ); /* Not currently locked */ - - /* Take steps to avoid spinning forever if there is a protocol error. - ** - ** Circumstances that cause a RETRY should only last for the briefest - ** instances of time. No I/O or other system calls are done while the - ** locks are held, so the locks should not be held for very long. But - ** if we are unlucky, another process that is holding a lock might get - ** paged out or take a page-fault that is time-consuming to resolve, - ** during the few nanoseconds that it is holding the lock. In that case, - ** it might take longer than normal for the lock to free. - ** - ** After 5 RETRYs, we begin calling sqlite3OsSleep(). The first few - ** calls to sqlite3OsSleep() have a delay of 1 microsecond. Really this - ** is more of a scheduler yield than an actual delay. But on the 10th - ** an subsequent retries, the delays start becoming longer and longer, - ** so that on the 100th (and last) RETRY we delay for 21 milliseconds. - ** The total delay time before giving up is less than 1 second. - */ - if( cnt>5 ){ - int nDelay = 1; /* Pause time in microseconds */ - if( cnt>100 ){ - VVA_ONLY( pWal->lockError = 1; ) - return SQLITE_PROTOCOL; - } - if( cnt>=10 ) nDelay = (cnt-9)*238; /* Max delay 21ms. Total delay 996ms */ - sqlite3OsSleep(pWal->pVfs, nDelay); - } - - if( !useWal ){ - rc = walIndexReadHdr(pWal, pChanged); - if( rc==SQLITE_BUSY ){ - /* If there is not a recovery running in another thread or process - ** then convert BUSY errors to WAL_RETRY. If recovery is known to - ** be running, convert BUSY to BUSY_RECOVERY. There is a race here - ** which might cause WAL_RETRY to be returned even if BUSY_RECOVERY - ** would be technically correct. But the race is benign since with - ** WAL_RETRY this routine will be called again and will probably be - ** right on the second iteration. - */ - if( pWal->apWiData[0]==0 ){ - /* This branch is taken when the xShmMap() method returns SQLITE_BUSY. - ** We assume this is a transient condition, so return WAL_RETRY. The - ** xShmMap() implementation used by the default unix and win32 VFS - ** modules may return SQLITE_BUSY due to a race condition in the - ** code that determines whether or not the shared-memory region - ** must be zeroed before the requested page is returned. - */ - rc = WAL_RETRY; - }else if( SQLITE_OK==(rc = walLockShared(pWal, WAL_RECOVER_LOCK)) ){ - walUnlockShared(pWal, WAL_RECOVER_LOCK); - rc = WAL_RETRY; - }else if( rc==SQLITE_BUSY ){ - rc = SQLITE_BUSY_RECOVERY; - } - } - if( rc!=SQLITE_OK ){ - return rc; - } - } - - pInfo = walCkptInfo(pWal); - if( !useWal && pInfo->nBackfill==pWal->hdr.mxFrame ){ - /* The WAL has been completely backfilled (or it is empty). - ** and can be safely ignored. - */ - rc = walLockShared(pWal, WAL_READ_LOCK(0)); - walShmBarrier(pWal); - if( rc==SQLITE_OK ){ - if( memcmp((void *)walIndexHdr(pWal), &pWal->hdr, sizeof(WalIndexHdr)) ){ - /* It is not safe to allow the reader to continue here if frames - ** may have been appended to the log before READ_LOCK(0) was obtained. - ** When holding READ_LOCK(0), the reader ignores the entire log file, - ** which implies that the database file contains a trustworthy - ** snapshoT. Since holding READ_LOCK(0) prevents a checkpoint from - ** happening, this is usually correct. - ** - ** However, if frames have been appended to the log (or if the log - ** is wrapped and written for that matter) before the READ_LOCK(0) - ** is obtained, that is not necessarily true. A checkpointer may - ** have started to backfill the appended frames but crashed before - ** it finished. Leaving a corrupt image in the database file. - */ - walUnlockShared(pWal, WAL_READ_LOCK(0)); - return WAL_RETRY; - } - pWal->readLock = 0; - return SQLITE_OK; - }else if( rc!=SQLITE_BUSY ){ - return rc; - } - } - - /* If we get this far, it means that the reader will want to use - ** the WAL to get at content from recent commits. The job now is - ** to select one of the aReadMark[] entries that is closest to - ** but not exceeding pWal->hdr.mxFrame and lock that entry. - */ - mxReadMark = 0; - mxI = 0; - for(i=1; iaReadMark[i]; - if( mxReadMark<=thisMark && thisMark<=pWal->hdr.mxFrame ){ - assert( thisMark!=READMARK_NOT_USED ); - mxReadMark = thisMark; - mxI = i; - } - } - /* There was once an "if" here. The extra "{" is to preserve indentation. */ - { - if( (pWal->readOnly & WAL_SHM_RDONLY)==0 - && (mxReadMarkhdr.mxFrame || mxI==0) - ){ - for(i=1; iaReadMark[i] = pWal->hdr.mxFrame; - mxI = i; - walUnlockExclusive(pWal, WAL_READ_LOCK(i), 1); - break; - }else if( rc!=SQLITE_BUSY ){ - return rc; - } - } - } - if( mxI==0 ){ - assert( rc==SQLITE_BUSY || (pWal->readOnly & WAL_SHM_RDONLY)!=0 ); - return rc==SQLITE_BUSY ? WAL_RETRY : SQLITE_READONLY_CANTLOCK; - } - - rc = walLockShared(pWal, WAL_READ_LOCK(mxI)); - if( rc ){ - return rc==SQLITE_BUSY ? WAL_RETRY : rc; - } - /* Now that the read-lock has been obtained, check that neither the - ** value in the aReadMark[] array or the contents of the wal-index - ** header have changed. - ** - ** It is necessary to check that the wal-index header did not change - ** between the time it was read and when the shared-lock was obtained - ** on WAL_READ_LOCK(mxI) was obtained to account for the possibility - ** that the log file may have been wrapped by a writer, or that frames - ** that occur later in the log than pWal->hdr.mxFrame may have been - ** copied into the database by a checkpointer. If either of these things - ** happened, then reading the database with the current value of - ** pWal->hdr.mxFrame risks reading a corrupted snapshot. So, retry - ** instead. - ** - ** This does not guarantee that the copy of the wal-index header is up to - ** date before proceeding. That would not be possible without somehow - ** blocking writers. It only guarantees that a dangerous checkpoint or - ** log-wrap (either of which would require an exclusive lock on - ** WAL_READ_LOCK(mxI)) has not occurred since the snapshot was valid. - */ - walShmBarrier(pWal); - if( pInfo->aReadMark[mxI]!=mxReadMark - || memcmp((void *)walIndexHdr(pWal), &pWal->hdr, sizeof(WalIndexHdr)) - ){ - walUnlockShared(pWal, WAL_READ_LOCK(mxI)); - return WAL_RETRY; - }else{ - assert( mxReadMark<=pWal->hdr.mxFrame ); - pWal->readLock = (i16)mxI; - } - } - return rc; -} - -/* -** Begin a read transaction on the database. -** -** This routine used to be called sqlite3OpenSnapshot() and with good reason: -** it takes a snapshot of the state of the WAL and wal-index for the current -** instant in time. The current thread will continue to use this snapshot. -** Other threads might append new content to the WAL and wal-index but -** that extra content is ignored by the current thread. -** -** If the database contents have changes since the previous read -** transaction, then *pChanged is set to 1 before returning. The -** Pager layer will use this to know that is cache is stale and -** needs to be flushed. -*/ -SQLITE_PRIVATE int sqlite3WalBeginReadTransaction(Wal *pWal, int *pChanged){ - int rc; /* Return code */ - int cnt = 0; /* Number of TryBeginRead attempts */ - - do{ - rc = walTryBeginRead(pWal, pChanged, 0, ++cnt); - }while( rc==WAL_RETRY ); - testcase( (rc&0xff)==SQLITE_BUSY ); - testcase( (rc&0xff)==SQLITE_IOERR ); - testcase( rc==SQLITE_PROTOCOL ); - testcase( rc==SQLITE_OK ); - return rc; -} - -/* -** Finish with a read transaction. All this does is release the -** read-lock. -*/ -SQLITE_PRIVATE void sqlite3WalEndReadTransaction(Wal *pWal){ - sqlite3WalEndWriteTransaction(pWal); - if( pWal->readLock>=0 ){ - walUnlockShared(pWal, WAL_READ_LOCK(pWal->readLock)); - pWal->readLock = -1; - } -} - -/* -** Search the wal file for page pgno. If found, set *piRead to the frame that -** contains the page. Otherwise, if pgno is not in the wal file, set *piRead -** to zero. -** -** Return SQLITE_OK if successful, or an error code if an error occurs. If an -** error does occur, the final value of *piRead is undefined. -*/ -SQLITE_PRIVATE int sqlite3WalFindFrame( - Wal *pWal, /* WAL handle */ - Pgno pgno, /* Database page number to read data for */ - u32 *piRead /* OUT: Frame number (or zero) */ -){ - u32 iRead = 0; /* If !=0, WAL frame to return data from */ - u32 iLast = pWal->hdr.mxFrame; /* Last page in WAL for this reader */ - int iHash; /* Used to loop through N hash tables */ - - /* This routine is only be called from within a read transaction. */ - assert( pWal->readLock>=0 || pWal->lockError ); - - /* If the "last page" field of the wal-index header snapshot is 0, then - ** no data will be read from the wal under any circumstances. Return early - ** in this case as an optimization. Likewise, if pWal->readLock==0, - ** then the WAL is ignored by the reader so return early, as if the - ** WAL were empty. - */ - if( iLast==0 || pWal->readLock==0 ){ - *piRead = 0; - return SQLITE_OK; - } - - /* Search the hash table or tables for an entry matching page number - ** pgno. Each iteration of the following for() loop searches one - ** hash table (each hash table indexes up to HASHTABLE_NPAGE frames). - ** - ** This code might run concurrently to the code in walIndexAppend() - ** that adds entries to the wal-index (and possibly to this hash - ** table). This means the value just read from the hash - ** slot (aHash[iKey]) may have been added before or after the - ** current read transaction was opened. Values added after the - ** read transaction was opened may have been written incorrectly - - ** i.e. these slots may contain garbage data. However, we assume - ** that any slots written before the current read transaction was - ** opened remain unmodified. - ** - ** For the reasons above, the if(...) condition featured in the inner - ** loop of the following block is more stringent that would be required - ** if we had exclusive access to the hash-table: - ** - ** (aPgno[iFrame]==pgno): - ** This condition filters out normal hash-table collisions. - ** - ** (iFrame<=iLast): - ** This condition filters out entries that were added to the hash - ** table after the current read-transaction had started. - */ - for(iHash=walFramePage(iLast); iHash>=0 && iRead==0; iHash--){ - volatile ht_slot *aHash; /* Pointer to hash table */ - volatile u32 *aPgno; /* Pointer to array of page numbers */ - u32 iZero; /* Frame number corresponding to aPgno[0] */ - int iKey; /* Hash slot index */ - int nCollide; /* Number of hash collisions remaining */ - int rc; /* Error code */ - - rc = walHashGet(pWal, iHash, &aHash, &aPgno, &iZero); - if( rc!=SQLITE_OK ){ - return rc; - } - nCollide = HASHTABLE_NSLOT; - for(iKey=walHash(pgno); aHash[iKey]; iKey=walNextHash(iKey)){ - u32 iFrame = aHash[iKey] + iZero; - if( iFrame<=iLast && aPgno[aHash[iKey]]==pgno ){ - /* assert( iFrame>iRead ); -- not true if there is corruption */ - iRead = iFrame; - } - if( (nCollide--)==0 ){ - return SQLITE_CORRUPT_BKPT; - } - } - } - -#ifdef SQLITE_ENABLE_EXPENSIVE_ASSERT - /* If expensive assert() statements are available, do a linear search - ** of the wal-index file content. Make sure the results agree with the - ** result obtained using the hash indexes above. */ - { - u32 iRead2 = 0; - u32 iTest; - for(iTest=iLast; iTest>0; iTest--){ - if( walFramePgno(pWal, iTest)==pgno ){ - iRead2 = iTest; - break; - } - } - assert( iRead==iRead2 ); - } -#endif - - *piRead = iRead; - return SQLITE_OK; -} - -/* -** Read the contents of frame iRead from the wal file into buffer pOut -** (which is nOut bytes in size). Return SQLITE_OK if successful, or an -** error code otherwise. -*/ -SQLITE_PRIVATE int sqlite3WalReadFrame( - Wal *pWal, /* WAL handle */ - u32 iRead, /* Frame to read */ - int nOut, /* Size of buffer pOut in bytes */ - u8 *pOut /* Buffer to write page data to */ -){ - int sz; - i64 iOffset; - sz = pWal->hdr.szPage; - sz = (sz&0xfe00) + ((sz&0x0001)<<16); - testcase( sz<=32768 ); - testcase( sz>=65536 ); - iOffset = walFrameOffset(iRead, sz) + WAL_FRAME_HDRSIZE; - /* testcase( IS_BIG_INT(iOffset) ); // requires a 4GiB WAL */ - return sqlite3OsRead(pWal->pWalFd, pOut, (nOut>sz ? sz : nOut), iOffset); -} - -/* -** Return the size of the database in pages (or zero, if unknown). -*/ -SQLITE_PRIVATE Pgno sqlite3WalDbsize(Wal *pWal){ - if( pWal && ALWAYS(pWal->readLock>=0) ){ - return pWal->hdr.nPage; - } - return 0; -} - - -/* -** This function starts a write transaction on the WAL. -** -** A read transaction must have already been started by a prior call -** to sqlite3WalBeginReadTransaction(). -** -** If another thread or process has written into the database since -** the read transaction was started, then it is not possible for this -** thread to write as doing so would cause a fork. So this routine -** returns SQLITE_BUSY in that case and no write transaction is started. -** -** There can only be a single writer active at a time. -*/ -SQLITE_PRIVATE int sqlite3WalBeginWriteTransaction(Wal *pWal){ - int rc; - - /* Cannot start a write transaction without first holding a read - ** transaction. */ - assert( pWal->readLock>=0 ); - - if( pWal->readOnly ){ - return SQLITE_READONLY; - } - - /* Only one writer allowed at a time. Get the write lock. Return - ** SQLITE_BUSY if unable. - */ - rc = walLockExclusive(pWal, WAL_WRITE_LOCK, 1); - if( rc ){ - return rc; - } - pWal->writeLock = 1; - - /* If another connection has written to the database file since the - ** time the read transaction on this connection was started, then - ** the write is disallowed. - */ - if( memcmp(&pWal->hdr, (void *)walIndexHdr(pWal), sizeof(WalIndexHdr))!=0 ){ - walUnlockExclusive(pWal, WAL_WRITE_LOCK, 1); - pWal->writeLock = 0; - rc = SQLITE_BUSY_SNAPSHOT; - } - - return rc; -} - -/* -** End a write transaction. The commit has already been done. This -** routine merely releases the lock. -*/ -SQLITE_PRIVATE int sqlite3WalEndWriteTransaction(Wal *pWal){ - if( pWal->writeLock ){ - walUnlockExclusive(pWal, WAL_WRITE_LOCK, 1); - pWal->writeLock = 0; - pWal->truncateOnCommit = 0; - } - return SQLITE_OK; -} - -/* -** If any data has been written (but not committed) to the log file, this -** function moves the write-pointer back to the start of the transaction. -** -** Additionally, the callback function is invoked for each frame written -** to the WAL since the start of the transaction. If the callback returns -** other than SQLITE_OK, it is not invoked again and the error code is -** returned to the caller. -** -** Otherwise, if the callback function does not return an error, this -** function returns SQLITE_OK. -*/ -SQLITE_PRIVATE int sqlite3WalUndo(Wal *pWal, int (*xUndo)(void *, Pgno), void *pUndoCtx){ - int rc = SQLITE_OK; - if( ALWAYS(pWal->writeLock) ){ - Pgno iMax = pWal->hdr.mxFrame; - Pgno iFrame; - - /* Restore the clients cache of the wal-index header to the state it - ** was in before the client began writing to the database. - */ - memcpy(&pWal->hdr, (void *)walIndexHdr(pWal), sizeof(WalIndexHdr)); - - for(iFrame=pWal->hdr.mxFrame+1; - ALWAYS(rc==SQLITE_OK) && iFrame<=iMax; - iFrame++ - ){ - /* This call cannot fail. Unless the page for which the page number - ** is passed as the second argument is (a) in the cache and - ** (b) has an outstanding reference, then xUndo is either a no-op - ** (if (a) is false) or simply expels the page from the cache (if (b) - ** is false). - ** - ** If the upper layer is doing a rollback, it is guaranteed that there - ** are no outstanding references to any page other than page 1. And - ** page 1 is never written to the log until the transaction is - ** committed. As a result, the call to xUndo may not fail. - */ - assert( walFramePgno(pWal, iFrame)!=1 ); - rc = xUndo(pUndoCtx, walFramePgno(pWal, iFrame)); - } - if( iMax!=pWal->hdr.mxFrame ) walCleanupHash(pWal); - } - assert( rc==SQLITE_OK ); - return rc; -} - -/* -** Argument aWalData must point to an array of WAL_SAVEPOINT_NDATA u32 -** values. This function populates the array with values required to -** "rollback" the write position of the WAL handle back to the current -** point in the event of a savepoint rollback (via WalSavepointUndo()). -*/ -SQLITE_PRIVATE void sqlite3WalSavepoint(Wal *pWal, u32 *aWalData){ - assert( pWal->writeLock ); - aWalData[0] = pWal->hdr.mxFrame; - aWalData[1] = pWal->hdr.aFrameCksum[0]; - aWalData[2] = pWal->hdr.aFrameCksum[1]; - aWalData[3] = pWal->nCkpt; -} - -/* -** Move the write position of the WAL back to the point identified by -** the values in the aWalData[] array. aWalData must point to an array -** of WAL_SAVEPOINT_NDATA u32 values that has been previously populated -** by a call to WalSavepoint(). -*/ -SQLITE_PRIVATE int sqlite3WalSavepointUndo(Wal *pWal, u32 *aWalData){ - int rc = SQLITE_OK; - - assert( pWal->writeLock ); - assert( aWalData[3]!=pWal->nCkpt || aWalData[0]<=pWal->hdr.mxFrame ); - - if( aWalData[3]!=pWal->nCkpt ){ - /* This savepoint was opened immediately after the write-transaction - ** was started. Right after that, the writer decided to wrap around - ** to the start of the log. Update the savepoint values to match. - */ - aWalData[0] = 0; - aWalData[3] = pWal->nCkpt; - } - - if( aWalData[0]hdr.mxFrame ){ - pWal->hdr.mxFrame = aWalData[0]; - pWal->hdr.aFrameCksum[0] = aWalData[1]; - pWal->hdr.aFrameCksum[1] = aWalData[2]; - walCleanupHash(pWal); - } - - return rc; -} - - -/* -** This function is called just before writing a set of frames to the log -** file (see sqlite3WalFrames()). It checks to see if, instead of appending -** to the current log file, it is possible to overwrite the start of the -** existing log file with the new frames (i.e. "reset" the log). If so, -** it sets pWal->hdr.mxFrame to 0. Otherwise, pWal->hdr.mxFrame is left -** unchanged. -** -** SQLITE_OK is returned if no error is encountered (regardless of whether -** or not pWal->hdr.mxFrame is modified). An SQLite error code is returned -** if an error occurs. -*/ -static int walRestartLog(Wal *pWal){ - int rc = SQLITE_OK; - int cnt; - - if( pWal->readLock==0 ){ - volatile WalCkptInfo *pInfo = walCkptInfo(pWal); - assert( pInfo->nBackfill==pWal->hdr.mxFrame ); - if( pInfo->nBackfill>0 ){ - u32 salt1; - sqlite3_randomness(4, &salt1); - rc = walLockExclusive(pWal, WAL_READ_LOCK(1), WAL_NREADER-1); - if( rc==SQLITE_OK ){ - /* If all readers are using WAL_READ_LOCK(0) (in other words if no - ** readers are currently using the WAL), then the transactions - ** frames will overwrite the start of the existing log. Update the - ** wal-index header to reflect this. - ** - ** In theory it would be Ok to update the cache of the header only - ** at this point. But updating the actual wal-index header is also - ** safe and means there is no special case for sqlite3WalUndo() - ** to handle if this transaction is rolled back. - */ - int i; /* Loop counter */ - u32 *aSalt = pWal->hdr.aSalt; /* Big-endian salt values */ - - pWal->nCkpt++; - pWal->hdr.mxFrame = 0; - sqlite3Put4byte((u8*)&aSalt[0], 1 + sqlite3Get4byte((u8*)&aSalt[0])); - aSalt[1] = salt1; - walIndexWriteHdr(pWal); - pInfo->nBackfill = 0; - pInfo->aReadMark[1] = 0; - for(i=2; iaReadMark[i] = READMARK_NOT_USED; - assert( pInfo->aReadMark[0]==0 ); - walUnlockExclusive(pWal, WAL_READ_LOCK(1), WAL_NREADER-1); - }else if( rc!=SQLITE_BUSY ){ - return rc; - } - } - walUnlockShared(pWal, WAL_READ_LOCK(0)); - pWal->readLock = -1; - cnt = 0; - do{ - int notUsed; - rc = walTryBeginRead(pWal, ¬Used, 1, ++cnt); - }while( rc==WAL_RETRY ); - assert( (rc&0xff)!=SQLITE_BUSY ); /* BUSY not possible when useWal==1 */ - testcase( (rc&0xff)==SQLITE_IOERR ); - testcase( rc==SQLITE_PROTOCOL ); - testcase( rc==SQLITE_OK ); - } - return rc; -} - -/* -** Information about the current state of the WAL file and where -** the next fsync should occur - passed from sqlite3WalFrames() into -** walWriteToLog(). -*/ -typedef struct WalWriter { - Wal *pWal; /* The complete WAL information */ - sqlite3_file *pFd; /* The WAL file to which we write */ - sqlite3_int64 iSyncPoint; /* Fsync at this offset */ - int syncFlags; /* Flags for the fsync */ - int szPage; /* Size of one page */ -} WalWriter; - -/* -** Write iAmt bytes of content into the WAL file beginning at iOffset. -** Do a sync when crossing the p->iSyncPoint boundary. -** -** In other words, if iSyncPoint is in between iOffset and iOffset+iAmt, -** first write the part before iSyncPoint, then sync, then write the -** rest. -*/ -static int walWriteToLog( - WalWriter *p, /* WAL to write to */ - void *pContent, /* Content to be written */ - int iAmt, /* Number of bytes to write */ - sqlite3_int64 iOffset /* Start writing at this offset */ -){ - int rc; - if( iOffsetiSyncPoint && iOffset+iAmt>=p->iSyncPoint ){ - int iFirstAmt = (int)(p->iSyncPoint - iOffset); - rc = sqlite3OsWrite(p->pFd, pContent, iFirstAmt, iOffset); - if( rc ) return rc; - iOffset += iFirstAmt; - iAmt -= iFirstAmt; - pContent = (void*)(iFirstAmt + (char*)pContent); - assert( p->syncFlags & (SQLITE_SYNC_NORMAL|SQLITE_SYNC_FULL) ); - rc = sqlite3OsSync(p->pFd, p->syncFlags & SQLITE_SYNC_MASK); - if( iAmt==0 || rc ) return rc; - } - rc = sqlite3OsWrite(p->pFd, pContent, iAmt, iOffset); - return rc; -} - -/* -** Write out a single frame of the WAL -*/ -static int walWriteOneFrame( - WalWriter *p, /* Where to write the frame */ - PgHdr *pPage, /* The page of the frame to be written */ - int nTruncate, /* The commit flag. Usually 0. >0 for commit */ - sqlite3_int64 iOffset /* Byte offset at which to write */ -){ - int rc; /* Result code from subfunctions */ - void *pData; /* Data actually written */ - u8 aFrame[WAL_FRAME_HDRSIZE]; /* Buffer to assemble frame-header in */ -#if defined(SQLITE_HAS_CODEC) - if( (pData = sqlite3PagerCodec(pPage))==0 ) return SQLITE_NOMEM; -#else - pData = pPage->pData; -#endif - walEncodeFrame(p->pWal, pPage->pgno, nTruncate, pData, aFrame); - rc = walWriteToLog(p, aFrame, sizeof(aFrame), iOffset); - if( rc ) return rc; - /* Write the page data */ - rc = walWriteToLog(p, pData, p->szPage, iOffset+sizeof(aFrame)); - return rc; -} - -/* -** Write a set of frames to the log. The caller must hold the write-lock -** on the log file (obtained using sqlite3WalBeginWriteTransaction()). -*/ -SQLITE_PRIVATE int sqlite3WalFrames( - Wal *pWal, /* Wal handle to write to */ - int szPage, /* Database page-size in bytes */ - PgHdr *pList, /* List of dirty pages to write */ - Pgno nTruncate, /* Database size after this commit */ - int isCommit, /* True if this is a commit */ - int sync_flags /* Flags to pass to OsSync() (or 0) */ -){ - int rc; /* Used to catch return codes */ - u32 iFrame; /* Next frame address */ - PgHdr *p; /* Iterator to run through pList with. */ - PgHdr *pLast = 0; /* Last frame in list */ - int nExtra = 0; /* Number of extra copies of last page */ - int szFrame; /* The size of a single frame */ - i64 iOffset; /* Next byte to write in WAL file */ - WalWriter w; /* The writer */ - - assert( pList ); - assert( pWal->writeLock ); - - /* If this frame set completes a transaction, then nTruncate>0. If - ** nTruncate==0 then this frame set does not complete the transaction. */ - assert( (isCommit!=0)==(nTruncate!=0) ); - -#if defined(SQLITE_TEST) && defined(SQLITE_DEBUG) - { int cnt; for(cnt=0, p=pList; p; p=p->pDirty, cnt++){} - WALTRACE(("WAL%p: frame write begin. %d frames. mxFrame=%d. %s\n", - pWal, cnt, pWal->hdr.mxFrame, isCommit ? "Commit" : "Spill")); - } -#endif - - /* See if it is possible to write these frames into the start of the - ** log file, instead of appending to it at pWal->hdr.mxFrame. - */ - if( SQLITE_OK!=(rc = walRestartLog(pWal)) ){ - return rc; - } - - /* If this is the first frame written into the log, write the WAL - ** header to the start of the WAL file. See comments at the top of - ** this source file for a description of the WAL header format. - */ - iFrame = pWal->hdr.mxFrame; - if( iFrame==0 ){ - u8 aWalHdr[WAL_HDRSIZE]; /* Buffer to assemble wal-header in */ - u32 aCksum[2]; /* Checksum for wal-header */ - - sqlite3Put4byte(&aWalHdr[0], (WAL_MAGIC | SQLITE_BIGENDIAN)); - sqlite3Put4byte(&aWalHdr[4], WAL_MAX_VERSION); - sqlite3Put4byte(&aWalHdr[8], szPage); - sqlite3Put4byte(&aWalHdr[12], pWal->nCkpt); - if( pWal->nCkpt==0 ) sqlite3_randomness(8, pWal->hdr.aSalt); - memcpy(&aWalHdr[16], pWal->hdr.aSalt, 8); - walChecksumBytes(1, aWalHdr, WAL_HDRSIZE-2*4, 0, aCksum); - sqlite3Put4byte(&aWalHdr[24], aCksum[0]); - sqlite3Put4byte(&aWalHdr[28], aCksum[1]); - - pWal->szPage = szPage; - pWal->hdr.bigEndCksum = SQLITE_BIGENDIAN; - pWal->hdr.aFrameCksum[0] = aCksum[0]; - pWal->hdr.aFrameCksum[1] = aCksum[1]; - pWal->truncateOnCommit = 1; - - rc = sqlite3OsWrite(pWal->pWalFd, aWalHdr, sizeof(aWalHdr), 0); - WALTRACE(("WAL%p: wal-header write %s\n", pWal, rc ? "failed" : "ok")); - if( rc!=SQLITE_OK ){ - return rc; - } - - /* Sync the header (unless SQLITE_IOCAP_SEQUENTIAL is true or unless - ** all syncing is turned off by PRAGMA synchronous=OFF). Otherwise - ** an out-of-order write following a WAL restart could result in - ** database corruption. See the ticket: - ** - ** http://localhost:591/sqlite/info/ff5be73dee - */ - if( pWal->syncHeader && sync_flags ){ - rc = sqlite3OsSync(pWal->pWalFd, sync_flags & SQLITE_SYNC_MASK); - if( rc ) return rc; - } - } - assert( (int)pWal->szPage==szPage ); - - /* Setup information needed to write frames into the WAL */ - w.pWal = pWal; - w.pFd = pWal->pWalFd; - w.iSyncPoint = 0; - w.syncFlags = sync_flags; - w.szPage = szPage; - iOffset = walFrameOffset(iFrame+1, szPage); - szFrame = szPage + WAL_FRAME_HDRSIZE; - - /* Write all frames into the log file exactly once */ - for(p=pList; p; p=p->pDirty){ - int nDbSize; /* 0 normally. Positive == commit flag */ - iFrame++; - assert( iOffset==walFrameOffset(iFrame, szPage) ); - nDbSize = (isCommit && p->pDirty==0) ? nTruncate : 0; - rc = walWriteOneFrame(&w, p, nDbSize, iOffset); - if( rc ) return rc; - pLast = p; - iOffset += szFrame; - } - - /* If this is the end of a transaction, then we might need to pad - ** the transaction and/or sync the WAL file. - ** - ** Padding and syncing only occur if this set of frames complete a - ** transaction and if PRAGMA synchronous=FULL. If synchronous==NORMAL - ** or synchonous==OFF, then no padding or syncing are needed. - ** - ** If SQLITE_IOCAP_POWERSAFE_OVERWRITE is defined, then padding is not - ** needed and only the sync is done. If padding is needed, then the - ** final frame is repeated (with its commit mark) until the next sector - ** boundary is crossed. Only the part of the WAL prior to the last - ** sector boundary is synced; the part of the last frame that extends - ** past the sector boundary is written after the sync. - */ - if( isCommit && (sync_flags & WAL_SYNC_TRANSACTIONS)!=0 ){ - if( pWal->padToSectorBoundary ){ - int sectorSize = sqlite3SectorSize(pWal->pWalFd); - w.iSyncPoint = ((iOffset+sectorSize-1)/sectorSize)*sectorSize; - while( iOffsettruncateOnCommit && pWal->mxWalSize>=0 ){ - i64 sz = pWal->mxWalSize; - if( walFrameOffset(iFrame+nExtra+1, szPage)>pWal->mxWalSize ){ - sz = walFrameOffset(iFrame+nExtra+1, szPage); - } - walLimitSize(pWal, sz); - pWal->truncateOnCommit = 0; - } - - /* Append data to the wal-index. It is not necessary to lock the - ** wal-index to do this as the SQLITE_SHM_WRITE lock held on the wal-index - ** guarantees that there are no other writers, and no data that may - ** be in use by existing readers is being overwritten. - */ - iFrame = pWal->hdr.mxFrame; - for(p=pList; p && rc==SQLITE_OK; p=p->pDirty){ - iFrame++; - rc = walIndexAppend(pWal, iFrame, p->pgno); - } - while( rc==SQLITE_OK && nExtra>0 ){ - iFrame++; - nExtra--; - rc = walIndexAppend(pWal, iFrame, pLast->pgno); - } - - if( rc==SQLITE_OK ){ - /* Update the private copy of the header. */ - pWal->hdr.szPage = (u16)((szPage&0xff00) | (szPage>>16)); - testcase( szPage<=32768 ); - testcase( szPage>=65536 ); - pWal->hdr.mxFrame = iFrame; - if( isCommit ){ - pWal->hdr.iChange++; - pWal->hdr.nPage = nTruncate; - } - /* If this is a commit, update the wal-index header too. */ - if( isCommit ){ - walIndexWriteHdr(pWal); - pWal->iCallback = iFrame; - } - } - - WALTRACE(("WAL%p: frame write %s\n", pWal, rc ? "failed" : "ok")); - return rc; -} - -/* -** This routine is called to implement sqlite3_wal_checkpoint() and -** related interfaces. -** -** Obtain a CHECKPOINT lock and then backfill as much information as -** we can from WAL into the database. -** -** If parameter xBusy is not NULL, it is a pointer to a busy-handler -** callback. In this case this function runs a blocking checkpoint. -*/ -SQLITE_PRIVATE int sqlite3WalCheckpoint( - Wal *pWal, /* Wal connection */ - int eMode, /* PASSIVE, FULL or RESTART */ - int (*xBusy)(void*), /* Function to call when busy */ - void *pBusyArg, /* Context argument for xBusyHandler */ - int sync_flags, /* Flags to sync db file with (or 0) */ - int nBuf, /* Size of temporary buffer */ - u8 *zBuf, /* Temporary buffer to use */ - int *pnLog, /* OUT: Number of frames in WAL */ - int *pnCkpt /* OUT: Number of backfilled frames in WAL */ -){ - int rc; /* Return code */ - int isChanged = 0; /* True if a new wal-index header is loaded */ - int eMode2 = eMode; /* Mode to pass to walCheckpoint() */ - - assert( pWal->ckptLock==0 ); - assert( pWal->writeLock==0 ); - - if( pWal->readOnly ) return SQLITE_READONLY; - WALTRACE(("WAL%p: checkpoint begins\n", pWal)); - rc = walLockExclusive(pWal, WAL_CKPT_LOCK, 1); - if( rc ){ - /* Usually this is SQLITE_BUSY meaning that another thread or process - ** is already running a checkpoint, or maybe a recovery. But it might - ** also be SQLITE_IOERR. */ - return rc; - } - pWal->ckptLock = 1; - - /* If this is a blocking-checkpoint, then obtain the write-lock as well - ** to prevent any writers from running while the checkpoint is underway. - ** This has to be done before the call to walIndexReadHdr() below. - ** - ** If the writer lock cannot be obtained, then a passive checkpoint is - ** run instead. Since the checkpointer is not holding the writer lock, - ** there is no point in blocking waiting for any readers. Assuming no - ** other error occurs, this function will return SQLITE_BUSY to the caller. - */ - if( eMode!=SQLITE_CHECKPOINT_PASSIVE ){ - rc = walBusyLock(pWal, xBusy, pBusyArg, WAL_WRITE_LOCK, 1); - if( rc==SQLITE_OK ){ - pWal->writeLock = 1; - }else if( rc==SQLITE_BUSY ){ - eMode2 = SQLITE_CHECKPOINT_PASSIVE; - rc = SQLITE_OK; - } - } - - /* Read the wal-index header. */ - if( rc==SQLITE_OK ){ - rc = walIndexReadHdr(pWal, &isChanged); - if( isChanged && pWal->pDbFd->pMethods->iVersion>=3 ){ - sqlite3OsUnfetch(pWal->pDbFd, 0, 0); - } - } - - /* Copy data from the log to the database file. */ - if( rc==SQLITE_OK ){ - if( pWal->hdr.mxFrame && walPagesize(pWal)!=nBuf ){ - rc = SQLITE_CORRUPT_BKPT; - }else{ - rc = walCheckpoint(pWal, eMode2, xBusy, pBusyArg, sync_flags, zBuf); - } - - /* If no error occurred, set the output variables. */ - if( rc==SQLITE_OK || rc==SQLITE_BUSY ){ - if( pnLog ) *pnLog = (int)pWal->hdr.mxFrame; - if( pnCkpt ) *pnCkpt = (int)(walCkptInfo(pWal)->nBackfill); - } - } - - if( isChanged ){ - /* If a new wal-index header was loaded before the checkpoint was - ** performed, then the pager-cache associated with pWal is now - ** out of date. So zero the cached wal-index header to ensure that - ** next time the pager opens a snapshot on this database it knows that - ** the cache needs to be reset. - */ - memset(&pWal->hdr, 0, sizeof(WalIndexHdr)); - } - - /* Release the locks. */ - sqlite3WalEndWriteTransaction(pWal); - walUnlockExclusive(pWal, WAL_CKPT_LOCK, 1); - pWal->ckptLock = 0; - WALTRACE(("WAL%p: checkpoint %s\n", pWal, rc ? "failed" : "ok")); - return (rc==SQLITE_OK && eMode!=eMode2 ? SQLITE_BUSY : rc); -} - -/* Return the value to pass to a sqlite3_wal_hook callback, the -** number of frames in the WAL at the point of the last commit since -** sqlite3WalCallback() was called. If no commits have occurred since -** the last call, then return 0. -*/ -SQLITE_PRIVATE int sqlite3WalCallback(Wal *pWal){ - u32 ret = 0; - if( pWal ){ - ret = pWal->iCallback; - pWal->iCallback = 0; - } - return (int)ret; -} - -/* -** This function is called to change the WAL subsystem into or out -** of locking_mode=EXCLUSIVE. -** -** If op is zero, then attempt to change from locking_mode=EXCLUSIVE -** into locking_mode=NORMAL. This means that we must acquire a lock -** on the pWal->readLock byte. If the WAL is already in locking_mode=NORMAL -** or if the acquisition of the lock fails, then return 0. If the -** transition out of exclusive-mode is successful, return 1. This -** operation must occur while the pager is still holding the exclusive -** lock on the main database file. -** -** If op is one, then change from locking_mode=NORMAL into -** locking_mode=EXCLUSIVE. This means that the pWal->readLock must -** be released. Return 1 if the transition is made and 0 if the -** WAL is already in exclusive-locking mode - meaning that this -** routine is a no-op. The pager must already hold the exclusive lock -** on the main database file before invoking this operation. -** -** If op is negative, then do a dry-run of the op==1 case but do -** not actually change anything. The pager uses this to see if it -** should acquire the database exclusive lock prior to invoking -** the op==1 case. -*/ -SQLITE_PRIVATE int sqlite3WalExclusiveMode(Wal *pWal, int op){ - int rc; - assert( pWal->writeLock==0 ); - assert( pWal->exclusiveMode!=WAL_HEAPMEMORY_MODE || op==-1 ); - - /* pWal->readLock is usually set, but might be -1 if there was a - ** prior error while attempting to acquire are read-lock. This cannot - ** happen if the connection is actually in exclusive mode (as no xShmLock - ** locks are taken in this case). Nor should the pager attempt to - ** upgrade to exclusive-mode following such an error. - */ - assert( pWal->readLock>=0 || pWal->lockError ); - assert( pWal->readLock>=0 || (op<=0 && pWal->exclusiveMode==0) ); - - if( op==0 ){ - if( pWal->exclusiveMode ){ - pWal->exclusiveMode = 0; - if( walLockShared(pWal, WAL_READ_LOCK(pWal->readLock))!=SQLITE_OK ){ - pWal->exclusiveMode = 1; - } - rc = pWal->exclusiveMode==0; - }else{ - /* Already in locking_mode=NORMAL */ - rc = 0; - } - }else if( op>0 ){ - assert( pWal->exclusiveMode==0 ); - assert( pWal->readLock>=0 ); - walUnlockShared(pWal, WAL_READ_LOCK(pWal->readLock)); - pWal->exclusiveMode = 1; - rc = 1; - }else{ - rc = pWal->exclusiveMode==0; - } - return rc; -} - -/* -** Return true if the argument is non-NULL and the WAL module is using -** heap-memory for the wal-index. Otherwise, if the argument is NULL or the -** WAL module is using shared-memory, return false. -*/ -SQLITE_PRIVATE int sqlite3WalHeapMemory(Wal *pWal){ - return (pWal && pWal->exclusiveMode==WAL_HEAPMEMORY_MODE ); -} - -#ifdef SQLITE_ENABLE_ZIPVFS -/* -** If the argument is not NULL, it points to a Wal object that holds a -** read-lock. This function returns the database page-size if it is known, -** or zero if it is not (or if pWal is NULL). -*/ -SQLITE_PRIVATE int sqlite3WalFramesize(Wal *pWal){ - assert( pWal==0 || pWal->readLock>=0 ); - return (pWal ? pWal->szPage : 0); -} -#endif - -#endif /* #ifndef SQLITE_OMIT_WAL */ - -/************** End of wal.c *************************************************/ -/************** Begin file btmutex.c *****************************************/ -/* -** 2007 August 27 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** -** This file contains code used to implement mutexes on Btree objects. -** This code really belongs in btree.c. But btree.c is getting too -** big and we want to break it down some. This packaged seemed like -** a good breakout. -*/ -/************** Include btreeInt.h in the middle of btmutex.c ****************/ -/************** Begin file btreeInt.h ****************************************/ -/* -** 2004 April 6 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** This file implements a external (disk-based) database using BTrees. -** For a detailed discussion of BTrees, refer to -** -** Donald E. Knuth, THE ART OF COMPUTER PROGRAMMING, Volume 3: -** "Sorting And Searching", pages 473-480. Addison-Wesley -** Publishing Company, Reading, Massachusetts. -** -** The basic idea is that each page of the file contains N database -** entries and N+1 pointers to subpages. -** -** ---------------------------------------------------------------- -** | Ptr(0) | Key(0) | Ptr(1) | Key(1) | ... | Key(N-1) | Ptr(N) | -** ---------------------------------------------------------------- -** -** All of the keys on the page that Ptr(0) points to have values less -** than Key(0). All of the keys on page Ptr(1) and its subpages have -** values greater than Key(0) and less than Key(1). All of the keys -** on Ptr(N) and its subpages have values greater than Key(N-1). And -** so forth. -** -** Finding a particular key requires reading O(log(M)) pages from the -** disk where M is the number of entries in the tree. -** -** In this implementation, a single file can hold one or more separate -** BTrees. Each BTree is identified by the index of its root page. The -** key and data for any entry are combined to form the "payload". A -** fixed amount of payload can be carried directly on the database -** page. If the payload is larger than the preset amount then surplus -** bytes are stored on overflow pages. The payload for an entry -** and the preceding pointer are combined to form a "Cell". Each -** page has a small header which contains the Ptr(N) pointer and other -** information such as the size of key and data. -** -** FORMAT DETAILS -** -** The file is divided into pages. The first page is called page 1, -** the second is page 2, and so forth. A page number of zero indicates -** "no such page". The page size can be any power of 2 between 512 and 65536. -** Each page can be either a btree page, a freelist page, an overflow -** page, or a pointer-map page. -** -** The first page is always a btree page. The first 100 bytes of the first -** page contain a special header (the "file header") that describes the file. -** The format of the file header is as follows: -** -** OFFSET SIZE DESCRIPTION -** 0 16 Header string: "SQLite format 3\000" -** 16 2 Page size in bytes. (1 means 65536) -** 18 1 File format write version -** 19 1 File format read version -** 20 1 Bytes of unused space at the end of each page -** 21 1 Max embedded payload fraction (must be 64) -** 22 1 Min embedded payload fraction (must be 32) -** 23 1 Min leaf payload fraction (must be 32) -** 24 4 File change counter -** 28 4 Reserved for future use -** 32 4 First freelist page -** 36 4 Number of freelist pages in the file -** 40 60 15 4-byte meta values passed to higher layers -** -** 40 4 Schema cookie -** 44 4 File format of schema layer -** 48 4 Size of page cache -** 52 4 Largest root-page (auto/incr_vacuum) -** 56 4 1=UTF-8 2=UTF16le 3=UTF16be -** 60 4 User version -** 64 4 Incremental vacuum mode -** 68 4 Application-ID -** 72 20 unused -** 92 4 The version-valid-for number -** 96 4 SQLITE_VERSION_NUMBER -** -** All of the integer values are big-endian (most significant byte first). -** -** The file change counter is incremented when the database is changed -** This counter allows other processes to know when the file has changed -** and thus when they need to flush their cache. -** -** The max embedded payload fraction is the amount of the total usable -** space in a page that can be consumed by a single cell for standard -** B-tree (non-LEAFDATA) tables. A value of 255 means 100%. The default -** is to limit the maximum cell size so that at least 4 cells will fit -** on one page. Thus the default max embedded payload fraction is 64. -** -** If the payload for a cell is larger than the max payload, then extra -** payload is spilled to overflow pages. Once an overflow page is allocated, -** as many bytes as possible are moved into the overflow pages without letting -** the cell size drop below the min embedded payload fraction. -** -** The min leaf payload fraction is like the min embedded payload fraction -** except that it applies to leaf nodes in a LEAFDATA tree. The maximum -** payload fraction for a LEAFDATA tree is always 100% (or 255) and it -** not specified in the header. -** -** Each btree pages is divided into three sections: The header, the -** cell pointer array, and the cell content area. Page 1 also has a 100-byte -** file header that occurs before the page header. -** -** |----------------| -** | file header | 100 bytes. Page 1 only. -** |----------------| -** | page header | 8 bytes for leaves. 12 bytes for interior nodes -** |----------------| -** | cell pointer | | 2 bytes per cell. Sorted order. -** | array | | Grows downward -** | | v -** |----------------| -** | unallocated | -** | space | -** |----------------| ^ Grows upwards -** | cell content | | Arbitrary order interspersed with freeblocks. -** | area | | and free space fragments. -** |----------------| -** -** The page headers looks like this: -** -** OFFSET SIZE DESCRIPTION -** 0 1 Flags. 1: intkey, 2: zerodata, 4: leafdata, 8: leaf -** 1 2 byte offset to the first freeblock -** 3 2 number of cells on this page -** 5 2 first byte of the cell content area -** 7 1 number of fragmented free bytes -** 8 4 Right child (the Ptr(N) value). Omitted on leaves. -** -** The flags define the format of this btree page. The leaf flag means that -** this page has no children. The zerodata flag means that this page carries -** only keys and no data. The intkey flag means that the key is a integer -** which is stored in the key size entry of the cell header rather than in -** the payload area. -** -** The cell pointer array begins on the first byte after the page header. -** The cell pointer array contains zero or more 2-byte numbers which are -** offsets from the beginning of the page to the cell content in the cell -** content area. The cell pointers occur in sorted order. The system strives -** to keep free space after the last cell pointer so that new cells can -** be easily added without having to defragment the page. -** -** Cell content is stored at the very end of the page and grows toward the -** beginning of the page. -** -** Unused space within the cell content area is collected into a linked list of -** freeblocks. Each freeblock is at least 4 bytes in size. The byte offset -** to the first freeblock is given in the header. Freeblocks occur in -** increasing order. Because a freeblock must be at least 4 bytes in size, -** any group of 3 or fewer unused bytes in the cell content area cannot -** exist on the freeblock chain. A group of 3 or fewer free bytes is called -** a fragment. The total number of bytes in all fragments is recorded. -** in the page header at offset 7. -** -** SIZE DESCRIPTION -** 2 Byte offset of the next freeblock -** 2 Bytes in this freeblock -** -** Cells are of variable length. Cells are stored in the cell content area at -** the end of the page. Pointers to the cells are in the cell pointer array -** that immediately follows the page header. Cells is not necessarily -** contiguous or in order, but cell pointers are contiguous and in order. -** -** Cell content makes use of variable length integers. A variable -** length integer is 1 to 9 bytes where the lower 7 bits of each -** byte are used. The integer consists of all bytes that have bit 8 set and -** the first byte with bit 8 clear. The most significant byte of the integer -** appears first. A variable-length integer may not be more than 9 bytes long. -** As a special case, all 8 bytes of the 9th byte are used as data. This -** allows a 64-bit integer to be encoded in 9 bytes. -** -** 0x00 becomes 0x00000000 -** 0x7f becomes 0x0000007f -** 0x81 0x00 becomes 0x00000080 -** 0x82 0x00 becomes 0x00000100 -** 0x80 0x7f becomes 0x0000007f -** 0x8a 0x91 0xd1 0xac 0x78 becomes 0x12345678 -** 0x81 0x81 0x81 0x81 0x01 becomes 0x10204081 -** -** Variable length integers are used for rowids and to hold the number of -** bytes of key and data in a btree cell. -** -** The content of a cell looks like this: -** -** SIZE DESCRIPTION -** 4 Page number of the left child. Omitted if leaf flag is set. -** var Number of bytes of data. Omitted if the zerodata flag is set. -** var Number of bytes of key. Or the key itself if intkey flag is set. -** * Payload -** 4 First page of the overflow chain. Omitted if no overflow -** -** Overflow pages form a linked list. Each page except the last is completely -** filled with data (pagesize - 4 bytes). The last page can have as little -** as 1 byte of data. -** -** SIZE DESCRIPTION -** 4 Page number of next overflow page -** * Data -** -** Freelist pages come in two subtypes: trunk pages and leaf pages. The -** file header points to the first in a linked list of trunk page. Each trunk -** page points to multiple leaf pages. The content of a leaf page is -** unspecified. A trunk page looks like this: -** -** SIZE DESCRIPTION -** 4 Page number of next trunk page -** 4 Number of leaf pointers on this page -** * zero or more pages numbers of leaves -*/ - - -/* The following value is the maximum cell size assuming a maximum page -** size give above. -*/ -#define MX_CELL_SIZE(pBt) ((int)(pBt->pageSize-8)) - -/* The maximum number of cells on a single page of the database. This -** assumes a minimum cell size of 6 bytes (4 bytes for the cell itself -** plus 2 bytes for the index to the cell in the page header). Such -** small cells will be rare, but they are possible. -*/ -#define MX_CELL(pBt) ((pBt->pageSize-8)/6) - -/* Forward declarations */ -typedef struct MemPage MemPage; -typedef struct BtLock BtLock; - -/* -** This is a magic string that appears at the beginning of every -** SQLite database in order to identify the file as a real database. -** -** You can change this value at compile-time by specifying a -** -DSQLITE_FILE_HEADER="..." on the compiler command-line. The -** header must be exactly 16 bytes including the zero-terminator so -** the string itself should be 15 characters long. If you change -** the header, then your custom library will not be able to read -** databases generated by the standard tools and the standard tools -** will not be able to read databases created by your custom library. -*/ -#ifndef SQLITE_FILE_HEADER /* 123456789 123456 */ -# define SQLITE_FILE_HEADER "SQLite format 3" -#endif - -/* -** Page type flags. An ORed combination of these flags appear as the -** first byte of on-disk image of every BTree page. -*/ -#define PTF_INTKEY 0x01 -#define PTF_ZERODATA 0x02 -#define PTF_LEAFDATA 0x04 -#define PTF_LEAF 0x08 - -/* -** As each page of the file is loaded into memory, an instance of the following -** structure is appended and initialized to zero. This structure stores -** information about the page that is decoded from the raw file page. -** -** The pParent field points back to the parent page. This allows us to -** walk up the BTree from any leaf to the root. Care must be taken to -** unref() the parent page pointer when this page is no longer referenced. -** The pageDestructor() routine handles that chore. -** -** Access to all fields of this structure is controlled by the mutex -** stored in MemPage.pBt->mutex. -*/ -struct MemPage { - u8 isInit; /* True if previously initialized. MUST BE FIRST! */ - u8 nOverflow; /* Number of overflow cell bodies in aCell[] */ - u8 intKey; /* True if intkey flag is set */ - u8 leaf; /* True if leaf flag is set */ - u8 hasData; /* True if this page stores data */ - u8 hdrOffset; /* 100 for page 1. 0 otherwise */ - u8 childPtrSize; /* 0 if leaf==1. 4 if leaf==0 */ - u8 max1bytePayload; /* min(maxLocal,127) */ - u16 maxLocal; /* Copy of BtShared.maxLocal or BtShared.maxLeaf */ - u16 minLocal; /* Copy of BtShared.minLocal or BtShared.minLeaf */ - u16 cellOffset; /* Index in aData of first cell pointer */ - u16 nFree; /* Number of free bytes on the page */ - u16 nCell; /* Number of cells on this page, local and ovfl */ - u16 maskPage; /* Mask for page offset */ - u16 aiOvfl[5]; /* Insert the i-th overflow cell before the aiOvfl-th - ** non-overflow cell */ - u8 *apOvfl[5]; /* Pointers to the body of overflow cells */ - BtShared *pBt; /* Pointer to BtShared that this page is part of */ - u8 *aData; /* Pointer to disk image of the page data */ - u8 *aDataEnd; /* One byte past the end of usable data */ - u8 *aCellIdx; /* The cell index area */ - DbPage *pDbPage; /* Pager page handle */ - Pgno pgno; /* Page number for this page */ -}; - -/* -** The in-memory image of a disk page has the auxiliary information appended -** to the end. EXTRA_SIZE is the number of bytes of space needed to hold -** that extra information. -*/ -#define EXTRA_SIZE sizeof(MemPage) - -/* -** A linked list of the following structures is stored at BtShared.pLock. -** Locks are added (or upgraded from READ_LOCK to WRITE_LOCK) when a cursor -** is opened on the table with root page BtShared.iTable. Locks are removed -** from this list when a transaction is committed or rolled back, or when -** a btree handle is closed. -*/ -struct BtLock { - Btree *pBtree; /* Btree handle holding this lock */ - Pgno iTable; /* Root page of table */ - u8 eLock; /* READ_LOCK or WRITE_LOCK */ - BtLock *pNext; /* Next in BtShared.pLock list */ -}; - -/* Candidate values for BtLock.eLock */ -#define READ_LOCK 1 -#define WRITE_LOCK 2 - -/* A Btree handle -** -** A database connection contains a pointer to an instance of -** this object for every database file that it has open. This structure -** is opaque to the database connection. The database connection cannot -** see the internals of this structure and only deals with pointers to -** this structure. -** -** For some database files, the same underlying database cache might be -** shared between multiple connections. In that case, each connection -** has it own instance of this object. But each instance of this object -** points to the same BtShared object. The database cache and the -** schema associated with the database file are all contained within -** the BtShared object. -** -** All fields in this structure are accessed under sqlite3.mutex. -** The pBt pointer itself may not be changed while there exists cursors -** in the referenced BtShared that point back to this Btree since those -** cursors have to go through this Btree to find their BtShared and -** they often do so without holding sqlite3.mutex. -*/ -struct Btree { - sqlite3 *db; /* The database connection holding this btree */ - BtShared *pBt; /* Sharable content of this btree */ - u8 inTrans; /* TRANS_NONE, TRANS_READ or TRANS_WRITE */ - u8 sharable; /* True if we can share pBt with another db */ - u8 locked; /* True if db currently has pBt locked */ - int wantToLock; /* Number of nested calls to sqlite3BtreeEnter() */ - int nBackup; /* Number of backup operations reading this btree */ - Btree *pNext; /* List of other sharable Btrees from the same db */ - Btree *pPrev; /* Back pointer of the same list */ -#ifndef SQLITE_OMIT_SHARED_CACHE - BtLock lock; /* Object used to lock page 1 */ -#endif -}; - -/* -** Btree.inTrans may take one of the following values. -** -** If the shared-data extension is enabled, there may be multiple users -** of the Btree structure. At most one of these may open a write transaction, -** but any number may have active read transactions. -*/ -#define TRANS_NONE 0 -#define TRANS_READ 1 -#define TRANS_WRITE 2 - -/* -** An instance of this object represents a single database file. -** -** A single database file can be in use at the same time by two -** or more database connections. When two or more connections are -** sharing the same database file, each connection has it own -** private Btree object for the file and each of those Btrees points -** to this one BtShared object. BtShared.nRef is the number of -** connections currently sharing this database file. -** -** Fields in this structure are accessed under the BtShared.mutex -** mutex, except for nRef and pNext which are accessed under the -** global SQLITE_MUTEX_STATIC_MASTER mutex. The pPager field -** may not be modified once it is initially set as long as nRef>0. -** The pSchema field may be set once under BtShared.mutex and -** thereafter is unchanged as long as nRef>0. -** -** isPending: -** -** If a BtShared client fails to obtain a write-lock on a database -** table (because there exists one or more read-locks on the table), -** the shared-cache enters 'pending-lock' state and isPending is -** set to true. -** -** The shared-cache leaves the 'pending lock' state when either of -** the following occur: -** -** 1) The current writer (BtShared.pWriter) concludes its transaction, OR -** 2) The number of locks held by other connections drops to zero. -** -** while in the 'pending-lock' state, no connection may start a new -** transaction. -** -** This feature is included to help prevent writer-starvation. -*/ -struct BtShared { - Pager *pPager; /* The page cache */ - sqlite3 *db; /* Database connection currently using this Btree */ - BtCursor *pCursor; /* A list of all open cursors */ - MemPage *pPage1; /* First page of the database */ - u8 openFlags; /* Flags to sqlite3BtreeOpen() */ -#ifndef SQLITE_OMIT_AUTOVACUUM - u8 autoVacuum; /* True if auto-vacuum is enabled */ - u8 incrVacuum; /* True if incr-vacuum is enabled */ - u8 bDoTruncate; /* True to truncate db on commit */ -#endif - u8 inTransaction; /* Transaction state */ - u8 max1bytePayload; /* Maximum first byte of cell for a 1-byte payload */ - u16 btsFlags; /* Boolean parameters. See BTS_* macros below */ - u16 maxLocal; /* Maximum local payload in non-LEAFDATA tables */ - u16 minLocal; /* Minimum local payload in non-LEAFDATA tables */ - u16 maxLeaf; /* Maximum local payload in a LEAFDATA table */ - u16 minLeaf; /* Minimum local payload in a LEAFDATA table */ - u32 pageSize; /* Total number of bytes on a page */ - u32 usableSize; /* Number of usable bytes on each page */ - int nTransaction; /* Number of open transactions (read + write) */ - u32 nPage; /* Number of pages in the database */ - void *pSchema; /* Pointer to space allocated by sqlite3BtreeSchema() */ - void (*xFreeSchema)(void*); /* Destructor for BtShared.pSchema */ - sqlite3_mutex *mutex; /* Non-recursive mutex required to access this object */ - Bitvec *pHasContent; /* Set of pages moved to free-list this transaction */ -#ifndef SQLITE_OMIT_SHARED_CACHE - int nRef; /* Number of references to this structure */ - BtShared *pNext; /* Next on a list of sharable BtShared structs */ - BtLock *pLock; /* List of locks held on this shared-btree struct */ - Btree *pWriter; /* Btree with currently open write transaction */ -#endif - u8 *pTmpSpace; /* BtShared.pageSize bytes of space for tmp use */ -}; - -/* -** Allowed values for BtShared.btsFlags -*/ -#define BTS_READ_ONLY 0x0001 /* Underlying file is readonly */ -#define BTS_PAGESIZE_FIXED 0x0002 /* Page size can no longer be changed */ -#define BTS_SECURE_DELETE 0x0004 /* PRAGMA secure_delete is enabled */ -#define BTS_INITIALLY_EMPTY 0x0008 /* Database was empty at trans start */ -#define BTS_NO_WAL 0x0010 /* Do not open write-ahead-log files */ -#define BTS_EXCLUSIVE 0x0020 /* pWriter has an exclusive lock */ -#define BTS_PENDING 0x0040 /* Waiting for read-locks to clear */ - -/* -** An instance of the following structure is used to hold information -** about a cell. The parseCellPtr() function fills in this structure -** based on information extract from the raw disk page. -*/ -typedef struct CellInfo CellInfo; -struct CellInfo { - i64 nKey; /* The key for INTKEY tables, or number of bytes in key */ - u8 *pCell; /* Pointer to the start of cell content */ - u32 nData; /* Number of bytes of data */ - u32 nPayload; /* Total amount of payload */ - u16 nHeader; /* Size of the cell content header in bytes */ - u16 nLocal; /* Amount of payload held locally */ - u16 iOverflow; /* Offset to overflow page number. Zero if no overflow */ - u16 nSize; /* Size of the cell content on the main b-tree page */ -}; - -/* -** Maximum depth of an SQLite B-Tree structure. Any B-Tree deeper than -** this will be declared corrupt. This value is calculated based on a -** maximum database size of 2^31 pages a minimum fanout of 2 for a -** root-node and 3 for all other internal nodes. -** -** If a tree that appears to be taller than this is encountered, it is -** assumed that the database is corrupt. -*/ -#define BTCURSOR_MAX_DEPTH 20 - -/* -** A cursor is a pointer to a particular entry within a particular -** b-tree within a database file. -** -** The entry is identified by its MemPage and the index in -** MemPage.aCell[] of the entry. -** -** A single database file can be shared by two more database connections, -** but cursors cannot be shared. Each cursor is associated with a -** particular database connection identified BtCursor.pBtree.db. -** -** Fields in this structure are accessed under the BtShared.mutex -** found at self->pBt->mutex. -*/ -struct BtCursor { - Btree *pBtree; /* The Btree to which this cursor belongs */ - BtShared *pBt; /* The BtShared this cursor points to */ - BtCursor *pNext, *pPrev; /* Forms a linked list of all cursors */ - struct KeyInfo *pKeyInfo; /* Argument passed to comparison function */ - Pgno *aOverflow; /* Cache of overflow page locations */ - CellInfo info; /* A parse of the cell we are pointing at */ - i64 nKey; /* Size of pKey, or last integer key */ - void *pKey; /* Saved key that was cursor last known position */ - Pgno pgnoRoot; /* The root page of this tree */ - int nOvflAlloc; /* Allocated size of aOverflow[] array */ - int skipNext; /* Prev() is noop if negative. Next() is noop if positive */ - u8 curFlags; /* zero or more BTCF_* flags defined below */ - u8 eState; /* One of the CURSOR_XXX constants (see below) */ - u8 hints; /* As configured by CursorSetHints() */ - i16 iPage; /* Index of current page in apPage */ - u16 aiIdx[BTCURSOR_MAX_DEPTH]; /* Current index in apPage[i] */ - MemPage *apPage[BTCURSOR_MAX_DEPTH]; /* Pages from root to current page */ -}; - -/* -** Legal values for BtCursor.curFlags -*/ -#define BTCF_WriteFlag 0x01 /* True if a write cursor */ -#define BTCF_ValidNKey 0x02 /* True if info.nKey is valid */ -#define BTCF_ValidOvfl 0x04 /* True if aOverflow is valid */ -#define BTCF_AtLast 0x08 /* Cursor is pointing ot the last entry */ -#define BTCF_Incrblob 0x10 /* True if an incremental I/O handle */ - -/* -** Potential values for BtCursor.eState. -** -** CURSOR_INVALID: -** Cursor does not point to a valid entry. This can happen (for example) -** because the table is empty or because BtreeCursorFirst() has not been -** called. -** -** CURSOR_VALID: -** Cursor points to a valid entry. getPayload() etc. may be called. -** -** CURSOR_SKIPNEXT: -** Cursor is valid except that the Cursor.skipNext field is non-zero -** indicating that the next sqlite3BtreeNext() or sqlite3BtreePrevious() -** operation should be a no-op. -** -** CURSOR_REQUIRESEEK: -** The table that this cursor was opened on still exists, but has been -** modified since the cursor was last used. The cursor position is saved -** in variables BtCursor.pKey and BtCursor.nKey. When a cursor is in -** this state, restoreCursorPosition() can be called to attempt to -** seek the cursor to the saved position. -** -** CURSOR_FAULT: -** A unrecoverable error (an I/O error or a malloc failure) has occurred -** on a different connection that shares the BtShared cache with this -** cursor. The error has left the cache in an inconsistent state. -** Do nothing else with this cursor. Any attempt to use the cursor -** should return the error code stored in BtCursor.skip -*/ -#define CURSOR_INVALID 0 -#define CURSOR_VALID 1 -#define CURSOR_SKIPNEXT 2 -#define CURSOR_REQUIRESEEK 3 -#define CURSOR_FAULT 4 - -/* -** The database page the PENDING_BYTE occupies. This page is never used. -*/ -# define PENDING_BYTE_PAGE(pBt) PAGER_MJ_PGNO(pBt) - -/* -** These macros define the location of the pointer-map entry for a -** database page. The first argument to each is the number of usable -** bytes on each page of the database (often 1024). The second is the -** page number to look up in the pointer map. -** -** PTRMAP_PAGENO returns the database page number of the pointer-map -** page that stores the required pointer. PTRMAP_PTROFFSET returns -** the offset of the requested map entry. -** -** If the pgno argument passed to PTRMAP_PAGENO is a pointer-map page, -** then pgno is returned. So (pgno==PTRMAP_PAGENO(pgsz, pgno)) can be -** used to test if pgno is a pointer-map page. PTRMAP_ISPAGE implements -** this test. -*/ -#define PTRMAP_PAGENO(pBt, pgno) ptrmapPageno(pBt, pgno) -#define PTRMAP_PTROFFSET(pgptrmap, pgno) (5*(pgno-pgptrmap-1)) -#define PTRMAP_ISPAGE(pBt, pgno) (PTRMAP_PAGENO((pBt),(pgno))==(pgno)) - -/* -** The pointer map is a lookup table that identifies the parent page for -** each child page in the database file. The parent page is the page that -** contains a pointer to the child. Every page in the database contains -** 0 or 1 parent pages. (In this context 'database page' refers -** to any page that is not part of the pointer map itself.) Each pointer map -** entry consists of a single byte 'type' and a 4 byte parent page number. -** The PTRMAP_XXX identifiers below are the valid types. -** -** The purpose of the pointer map is to facility moving pages from one -** position in the file to another as part of autovacuum. When a page -** is moved, the pointer in its parent must be updated to point to the -** new location. The pointer map is used to locate the parent page quickly. -** -** PTRMAP_ROOTPAGE: The database page is a root-page. The page-number is not -** used in this case. -** -** PTRMAP_FREEPAGE: The database page is an unused (free) page. The page-number -** is not used in this case. -** -** PTRMAP_OVERFLOW1: The database page is the first page in a list of -** overflow pages. The page number identifies the page that -** contains the cell with a pointer to this overflow page. -** -** PTRMAP_OVERFLOW2: The database page is the second or later page in a list of -** overflow pages. The page-number identifies the previous -** page in the overflow page list. -** -** PTRMAP_BTREE: The database page is a non-root btree page. The page number -** identifies the parent page in the btree. -*/ -#define PTRMAP_ROOTPAGE 1 -#define PTRMAP_FREEPAGE 2 -#define PTRMAP_OVERFLOW1 3 -#define PTRMAP_OVERFLOW2 4 -#define PTRMAP_BTREE 5 - -/* A bunch of assert() statements to check the transaction state variables -** of handle p (type Btree*) are internally consistent. -*/ -#define btreeIntegrity(p) \ - assert( p->pBt->inTransaction!=TRANS_NONE || p->pBt->nTransaction==0 ); \ - assert( p->pBt->inTransaction>=p->inTrans ); - - -/* -** The ISAUTOVACUUM macro is used within balance_nonroot() to determine -** if the database supports auto-vacuum or not. Because it is used -** within an expression that is an argument to another macro -** (sqliteMallocRaw), it is not possible to use conditional compilation. -** So, this macro is defined instead. -*/ -#ifndef SQLITE_OMIT_AUTOVACUUM -#define ISAUTOVACUUM (pBt->autoVacuum) -#else -#define ISAUTOVACUUM 0 -#endif - - -/* -** This structure is passed around through all the sanity checking routines -** in order to keep track of some global state information. -** -** The aRef[] array is allocated so that there is 1 bit for each page in -** the database. As the integrity-check proceeds, for each page used in -** the database the corresponding bit is set. This allows integrity-check to -** detect pages that are used twice and orphaned pages (both of which -** indicate corruption). -*/ -typedef struct IntegrityCk IntegrityCk; -struct IntegrityCk { - BtShared *pBt; /* The tree being checked out */ - Pager *pPager; /* The associated pager. Also accessible by pBt->pPager */ - u8 *aPgRef; /* 1 bit per page in the db (see above) */ - Pgno nPage; /* Number of pages in the database */ - int mxErr; /* Stop accumulating errors when this reaches zero */ - int nErr; /* Number of messages written to zErrMsg so far */ - int mallocFailed; /* A memory allocation error has occurred */ - StrAccum errMsg; /* Accumulate the error message text here */ -}; - -/* -** Routines to read or write a two- and four-byte big-endian integer values. -*/ -#define get2byte(x) ((x)[0]<<8 | (x)[1]) -#define put2byte(p,v) ((p)[0] = (u8)((v)>>8), (p)[1] = (u8)(v)) -#define get4byte sqlite3Get4byte -#define put4byte sqlite3Put4byte - -/************** End of btreeInt.h ********************************************/ -/************** Continuing where we left off in btmutex.c ********************/ -#ifndef SQLITE_OMIT_SHARED_CACHE -#if SQLITE_THREADSAFE - -/* -** Obtain the BtShared mutex associated with B-Tree handle p. Also, -** set BtShared.db to the database handle associated with p and the -** p->locked boolean to true. -*/ -static void lockBtreeMutex(Btree *p){ - assert( p->locked==0 ); - assert( sqlite3_mutex_notheld(p->pBt->mutex) ); - assert( sqlite3_mutex_held(p->db->mutex) ); - - sqlite3_mutex_enter(p->pBt->mutex); - p->pBt->db = p->db; - p->locked = 1; -} - -/* -** Release the BtShared mutex associated with B-Tree handle p and -** clear the p->locked boolean. -*/ -static void unlockBtreeMutex(Btree *p){ - BtShared *pBt = p->pBt; - assert( p->locked==1 ); - assert( sqlite3_mutex_held(pBt->mutex) ); - assert( sqlite3_mutex_held(p->db->mutex) ); - assert( p->db==pBt->db ); - - sqlite3_mutex_leave(pBt->mutex); - p->locked = 0; -} - -/* -** Enter a mutex on the given BTree object. -** -** If the object is not sharable, then no mutex is ever required -** and this routine is a no-op. The underlying mutex is non-recursive. -** But we keep a reference count in Btree.wantToLock so the behavior -** of this interface is recursive. -** -** To avoid deadlocks, multiple Btrees are locked in the same order -** by all database connections. The p->pNext is a list of other -** Btrees belonging to the same database connection as the p Btree -** which need to be locked after p. If we cannot get a lock on -** p, then first unlock all of the others on p->pNext, then wait -** for the lock to become available on p, then relock all of the -** subsequent Btrees that desire a lock. -*/ -SQLITE_PRIVATE void sqlite3BtreeEnter(Btree *p){ - Btree *pLater; - - /* Some basic sanity checking on the Btree. The list of Btrees - ** connected by pNext and pPrev should be in sorted order by - ** Btree.pBt value. All elements of the list should belong to - ** the same connection. Only shared Btrees are on the list. */ - assert( p->pNext==0 || p->pNext->pBt>p->pBt ); - assert( p->pPrev==0 || p->pPrev->pBtpBt ); - assert( p->pNext==0 || p->pNext->db==p->db ); - assert( p->pPrev==0 || p->pPrev->db==p->db ); - assert( p->sharable || (p->pNext==0 && p->pPrev==0) ); - - /* Check for locking consistency */ - assert( !p->locked || p->wantToLock>0 ); - assert( p->sharable || p->wantToLock==0 ); - - /* We should already hold a lock on the database connection */ - assert( sqlite3_mutex_held(p->db->mutex) ); - - /* Unless the database is sharable and unlocked, then BtShared.db - ** should already be set correctly. */ - assert( (p->locked==0 && p->sharable) || p->pBt->db==p->db ); - - if( !p->sharable ) return; - p->wantToLock++; - if( p->locked ) return; - - /* In most cases, we should be able to acquire the lock we - ** want without having to go throught the ascending lock - ** procedure that follows. Just be sure not to block. - */ - if( sqlite3_mutex_try(p->pBt->mutex)==SQLITE_OK ){ - p->pBt->db = p->db; - p->locked = 1; - return; - } - - /* To avoid deadlock, first release all locks with a larger - ** BtShared address. Then acquire our lock. Then reacquire - ** the other BtShared locks that we used to hold in ascending - ** order. - */ - for(pLater=p->pNext; pLater; pLater=pLater->pNext){ - assert( pLater->sharable ); - assert( pLater->pNext==0 || pLater->pNext->pBt>pLater->pBt ); - assert( !pLater->locked || pLater->wantToLock>0 ); - if( pLater->locked ){ - unlockBtreeMutex(pLater); - } - } - lockBtreeMutex(p); - for(pLater=p->pNext; pLater; pLater=pLater->pNext){ - if( pLater->wantToLock ){ - lockBtreeMutex(pLater); - } - } -} - -/* -** Exit the recursive mutex on a Btree. -*/ -SQLITE_PRIVATE void sqlite3BtreeLeave(Btree *p){ - if( p->sharable ){ - assert( p->wantToLock>0 ); - p->wantToLock--; - if( p->wantToLock==0 ){ - unlockBtreeMutex(p); - } - } -} - -#ifndef NDEBUG -/* -** Return true if the BtShared mutex is held on the btree, or if the -** B-Tree is not marked as sharable. -** -** This routine is used only from within assert() statements. -*/ -SQLITE_PRIVATE int sqlite3BtreeHoldsMutex(Btree *p){ - assert( p->sharable==0 || p->locked==0 || p->wantToLock>0 ); - assert( p->sharable==0 || p->locked==0 || p->db==p->pBt->db ); - assert( p->sharable==0 || p->locked==0 || sqlite3_mutex_held(p->pBt->mutex) ); - assert( p->sharable==0 || p->locked==0 || sqlite3_mutex_held(p->db->mutex) ); - - return (p->sharable==0 || p->locked); -} -#endif - - -#ifndef SQLITE_OMIT_INCRBLOB -/* -** Enter and leave a mutex on a Btree given a cursor owned by that -** Btree. These entry points are used by incremental I/O and can be -** omitted if that module is not used. -*/ -SQLITE_PRIVATE void sqlite3BtreeEnterCursor(BtCursor *pCur){ - sqlite3BtreeEnter(pCur->pBtree); -} -SQLITE_PRIVATE void sqlite3BtreeLeaveCursor(BtCursor *pCur){ - sqlite3BtreeLeave(pCur->pBtree); -} -#endif /* SQLITE_OMIT_INCRBLOB */ - - -/* -** Enter the mutex on every Btree associated with a database -** connection. This is needed (for example) prior to parsing -** a statement since we will be comparing table and column names -** against all schemas and we do not want those schemas being -** reset out from under us. -** -** There is a corresponding leave-all procedures. -** -** Enter the mutexes in accending order by BtShared pointer address -** to avoid the possibility of deadlock when two threads with -** two or more btrees in common both try to lock all their btrees -** at the same instant. -*/ -SQLITE_PRIVATE void sqlite3BtreeEnterAll(sqlite3 *db){ - int i; - Btree *p; - assert( sqlite3_mutex_held(db->mutex) ); - for(i=0; inDb; i++){ - p = db->aDb[i].pBt; - if( p ) sqlite3BtreeEnter(p); - } -} -SQLITE_PRIVATE void sqlite3BtreeLeaveAll(sqlite3 *db){ - int i; - Btree *p; - assert( sqlite3_mutex_held(db->mutex) ); - for(i=0; inDb; i++){ - p = db->aDb[i].pBt; - if( p ) sqlite3BtreeLeave(p); - } -} - -/* -** Return true if a particular Btree requires a lock. Return FALSE if -** no lock is ever required since it is not sharable. -*/ -SQLITE_PRIVATE int sqlite3BtreeSharable(Btree *p){ - return p->sharable; -} - -#ifndef NDEBUG -/* -** Return true if the current thread holds the database connection -** mutex and all required BtShared mutexes. -** -** This routine is used inside assert() statements only. -*/ -SQLITE_PRIVATE int sqlite3BtreeHoldsAllMutexes(sqlite3 *db){ - int i; - if( !sqlite3_mutex_held(db->mutex) ){ - return 0; - } - for(i=0; inDb; i++){ - Btree *p; - p = db->aDb[i].pBt; - if( p && p->sharable && - (p->wantToLock==0 || !sqlite3_mutex_held(p->pBt->mutex)) ){ - return 0; - } - } - return 1; -} -#endif /* NDEBUG */ - -#ifndef NDEBUG -/* -** Return true if the correct mutexes are held for accessing the -** db->aDb[iDb].pSchema structure. The mutexes required for schema -** access are: -** -** (1) The mutex on db -** (2) if iDb!=1, then the mutex on db->aDb[iDb].pBt. -** -** If pSchema is not NULL, then iDb is computed from pSchema and -** db using sqlite3SchemaToIndex(). -*/ -SQLITE_PRIVATE int sqlite3SchemaMutexHeld(sqlite3 *db, int iDb, Schema *pSchema){ - Btree *p; - assert( db!=0 ); - if( pSchema ) iDb = sqlite3SchemaToIndex(db, pSchema); - assert( iDb>=0 && iDbnDb ); - if( !sqlite3_mutex_held(db->mutex) ) return 0; - if( iDb==1 ) return 1; - p = db->aDb[iDb].pBt; - assert( p!=0 ); - return p->sharable==0 || p->locked==1; -} -#endif /* NDEBUG */ - -#else /* SQLITE_THREADSAFE>0 above. SQLITE_THREADSAFE==0 below */ -/* -** The following are special cases for mutex enter routines for use -** in single threaded applications that use shared cache. Except for -** these two routines, all mutex operations are no-ops in that case and -** are null #defines in btree.h. -** -** If shared cache is disabled, then all btree mutex routines, including -** the ones below, are no-ops and are null #defines in btree.h. -*/ - -SQLITE_PRIVATE void sqlite3BtreeEnter(Btree *p){ - p->pBt->db = p->db; -} -SQLITE_PRIVATE void sqlite3BtreeEnterAll(sqlite3 *db){ - int i; - for(i=0; inDb; i++){ - Btree *p = db->aDb[i].pBt; - if( p ){ - p->pBt->db = p->db; - } - } -} -#endif /* if SQLITE_THREADSAFE */ -#endif /* ifndef SQLITE_OMIT_SHARED_CACHE */ - -/************** End of btmutex.c *********************************************/ -/************** Begin file btree.c *******************************************/ -/* -** 2004 April 6 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** This file implements a external (disk-based) database using BTrees. -** See the header comment on "btreeInt.h" for additional information. -** Including a description of file format and an overview of operation. -*/ - -/* -** The header string that appears at the beginning of every -** SQLite database. -*/ -static const char zMagicHeader[] = SQLITE_FILE_HEADER; - -/* -** Set this global variable to 1 to enable tracing using the TRACE -** macro. -*/ -#if 0 -int sqlite3BtreeTrace=1; /* True to enable tracing */ -# define TRACE(X) if(sqlite3BtreeTrace){printf X;fflush(stdout);} -#else -# define TRACE(X) -#endif - -/* -** Extract a 2-byte big-endian integer from an array of unsigned bytes. -** But if the value is zero, make it 65536. -** -** This routine is used to extract the "offset to cell content area" value -** from the header of a btree page. If the page size is 65536 and the page -** is empty, the offset should be 65536, but the 2-byte value stores zero. -** This routine makes the necessary adjustment to 65536. -*/ -#define get2byteNotZero(X) (((((int)get2byte(X))-1)&0xffff)+1) - -/* -** Values passed as the 5th argument to allocateBtreePage() -*/ -#define BTALLOC_ANY 0 /* Allocate any page */ -#define BTALLOC_EXACT 1 /* Allocate exact page if possible */ -#define BTALLOC_LE 2 /* Allocate any page <= the parameter */ - -/* -** Macro IfNotOmitAV(x) returns (x) if SQLITE_OMIT_AUTOVACUUM is not -** defined, or 0 if it is. For example: -** -** bIncrVacuum = IfNotOmitAV(pBtShared->incrVacuum); -*/ -#ifndef SQLITE_OMIT_AUTOVACUUM -#define IfNotOmitAV(expr) (expr) -#else -#define IfNotOmitAV(expr) 0 -#endif - -#ifndef SQLITE_OMIT_SHARED_CACHE -/* -** A list of BtShared objects that are eligible for participation -** in shared cache. This variable has file scope during normal builds, -** but the test harness needs to access it so we make it global for -** test builds. -** -** Access to this variable is protected by SQLITE_MUTEX_STATIC_MASTER. -*/ -#ifdef SQLITE_TEST -SQLITE_PRIVATE BtShared *SQLITE_WSD sqlite3SharedCacheList = 0; -#else -static BtShared *SQLITE_WSD sqlite3SharedCacheList = 0; -#endif -#endif /* SQLITE_OMIT_SHARED_CACHE */ - -#ifndef SQLITE_OMIT_SHARED_CACHE -/* -** Enable or disable the shared pager and schema features. -** -** This routine has no effect on existing database connections. -** The shared cache setting effects only future calls to -** sqlite3_open(), sqlite3_open16(), or sqlite3_open_v2(). -*/ -SQLITE_API int sqlite3_enable_shared_cache(int enable){ - sqlite3GlobalConfig.sharedCacheEnabled = enable; - return SQLITE_OK; -} -#endif - - - -#ifdef SQLITE_OMIT_SHARED_CACHE - /* - ** The functions querySharedCacheTableLock(), setSharedCacheTableLock(), - ** and clearAllSharedCacheTableLocks() - ** manipulate entries in the BtShared.pLock linked list used to store - ** shared-cache table level locks. If the library is compiled with the - ** shared-cache feature disabled, then there is only ever one user - ** of each BtShared structure and so this locking is not necessary. - ** So define the lock related functions as no-ops. - */ - #define querySharedCacheTableLock(a,b,c) SQLITE_OK - #define setSharedCacheTableLock(a,b,c) SQLITE_OK - #define clearAllSharedCacheTableLocks(a) - #define downgradeAllSharedCacheTableLocks(a) - #define hasSharedCacheTableLock(a,b,c,d) 1 - #define hasReadConflicts(a, b) 0 -#endif - -#ifndef SQLITE_OMIT_SHARED_CACHE - -#ifdef SQLITE_DEBUG -/* -**** This function is only used as part of an assert() statement. *** -** -** Check to see if pBtree holds the required locks to read or write to the -** table with root page iRoot. Return 1 if it does and 0 if not. -** -** For example, when writing to a table with root-page iRoot via -** Btree connection pBtree: -** -** assert( hasSharedCacheTableLock(pBtree, iRoot, 0, WRITE_LOCK) ); -** -** When writing to an index that resides in a sharable database, the -** caller should have first obtained a lock specifying the root page of -** the corresponding table. This makes things a bit more complicated, -** as this module treats each table as a separate structure. To determine -** the table corresponding to the index being written, this -** function has to search through the database schema. -** -** Instead of a lock on the table/index rooted at page iRoot, the caller may -** hold a write-lock on the schema table (root page 1). This is also -** acceptable. -*/ -static int hasSharedCacheTableLock( - Btree *pBtree, /* Handle that must hold lock */ - Pgno iRoot, /* Root page of b-tree */ - int isIndex, /* True if iRoot is the root of an index b-tree */ - int eLockType /* Required lock type (READ_LOCK or WRITE_LOCK) */ -){ - Schema *pSchema = (Schema *)pBtree->pBt->pSchema; - Pgno iTab = 0; - BtLock *pLock; - - /* If this database is not shareable, or if the client is reading - ** and has the read-uncommitted flag set, then no lock is required. - ** Return true immediately. - */ - if( (pBtree->sharable==0) - || (eLockType==READ_LOCK && (pBtree->db->flags & SQLITE_ReadUncommitted)) - ){ - return 1; - } - - /* If the client is reading or writing an index and the schema is - ** not loaded, then it is too difficult to actually check to see if - ** the correct locks are held. So do not bother - just return true. - ** This case does not come up very often anyhow. - */ - if( isIndex && (!pSchema || (pSchema->flags&DB_SchemaLoaded)==0) ){ - return 1; - } - - /* Figure out the root-page that the lock should be held on. For table - ** b-trees, this is just the root page of the b-tree being read or - ** written. For index b-trees, it is the root page of the associated - ** table. */ - if( isIndex ){ - HashElem *p; - for(p=sqliteHashFirst(&pSchema->idxHash); p; p=sqliteHashNext(p)){ - Index *pIdx = (Index *)sqliteHashData(p); - if( pIdx->tnum==(int)iRoot ){ - iTab = pIdx->pTable->tnum; - } - } - }else{ - iTab = iRoot; - } - - /* Search for the required lock. Either a write-lock on root-page iTab, a - ** write-lock on the schema table, or (if the client is reading) a - ** read-lock on iTab will suffice. Return 1 if any of these are found. */ - for(pLock=pBtree->pBt->pLock; pLock; pLock=pLock->pNext){ - if( pLock->pBtree==pBtree - && (pLock->iTable==iTab || (pLock->eLock==WRITE_LOCK && pLock->iTable==1)) - && pLock->eLock>=eLockType - ){ - return 1; - } - } - - /* Failed to find the required lock. */ - return 0; -} -#endif /* SQLITE_DEBUG */ - -#ifdef SQLITE_DEBUG -/* -**** This function may be used as part of assert() statements only. **** -** -** Return true if it would be illegal for pBtree to write into the -** table or index rooted at iRoot because other shared connections are -** simultaneously reading that same table or index. -** -** It is illegal for pBtree to write if some other Btree object that -** shares the same BtShared object is currently reading or writing -** the iRoot table. Except, if the other Btree object has the -** read-uncommitted flag set, then it is OK for the other object to -** have a read cursor. -** -** For example, before writing to any part of the table or index -** rooted at page iRoot, one should call: -** -** assert( !hasReadConflicts(pBtree, iRoot) ); -*/ -static int hasReadConflicts(Btree *pBtree, Pgno iRoot){ - BtCursor *p; - for(p=pBtree->pBt->pCursor; p; p=p->pNext){ - if( p->pgnoRoot==iRoot - && p->pBtree!=pBtree - && 0==(p->pBtree->db->flags & SQLITE_ReadUncommitted) - ){ - return 1; - } - } - return 0; -} -#endif /* #ifdef SQLITE_DEBUG */ - -/* -** Query to see if Btree handle p may obtain a lock of type eLock -** (READ_LOCK or WRITE_LOCK) on the table with root-page iTab. Return -** SQLITE_OK if the lock may be obtained (by calling -** setSharedCacheTableLock()), or SQLITE_LOCKED if not. -*/ -static int querySharedCacheTableLock(Btree *p, Pgno iTab, u8 eLock){ - BtShared *pBt = p->pBt; - BtLock *pIter; - - assert( sqlite3BtreeHoldsMutex(p) ); - assert( eLock==READ_LOCK || eLock==WRITE_LOCK ); - assert( p->db!=0 ); - assert( !(p->db->flags&SQLITE_ReadUncommitted)||eLock==WRITE_LOCK||iTab==1 ); - - /* If requesting a write-lock, then the Btree must have an open write - ** transaction on this file. And, obviously, for this to be so there - ** must be an open write transaction on the file itself. - */ - assert( eLock==READ_LOCK || (p==pBt->pWriter && p->inTrans==TRANS_WRITE) ); - assert( eLock==READ_LOCK || pBt->inTransaction==TRANS_WRITE ); - - /* This routine is a no-op if the shared-cache is not enabled */ - if( !p->sharable ){ - return SQLITE_OK; - } - - /* If some other connection is holding an exclusive lock, the - ** requested lock may not be obtained. - */ - if( pBt->pWriter!=p && (pBt->btsFlags & BTS_EXCLUSIVE)!=0 ){ - sqlite3ConnectionBlocked(p->db, pBt->pWriter->db); - return SQLITE_LOCKED_SHAREDCACHE; - } - - for(pIter=pBt->pLock; pIter; pIter=pIter->pNext){ - /* The condition (pIter->eLock!=eLock) in the following if(...) - ** statement is a simplification of: - ** - ** (eLock==WRITE_LOCK || pIter->eLock==WRITE_LOCK) - ** - ** since we know that if eLock==WRITE_LOCK, then no other connection - ** may hold a WRITE_LOCK on any table in this file (since there can - ** only be a single writer). - */ - assert( pIter->eLock==READ_LOCK || pIter->eLock==WRITE_LOCK ); - assert( eLock==READ_LOCK || pIter->pBtree==p || pIter->eLock==READ_LOCK); - if( pIter->pBtree!=p && pIter->iTable==iTab && pIter->eLock!=eLock ){ - sqlite3ConnectionBlocked(p->db, pIter->pBtree->db); - if( eLock==WRITE_LOCK ){ - assert( p==pBt->pWriter ); - pBt->btsFlags |= BTS_PENDING; - } - return SQLITE_LOCKED_SHAREDCACHE; - } - } - return SQLITE_OK; -} -#endif /* !SQLITE_OMIT_SHARED_CACHE */ - -#ifndef SQLITE_OMIT_SHARED_CACHE -/* -** Add a lock on the table with root-page iTable to the shared-btree used -** by Btree handle p. Parameter eLock must be either READ_LOCK or -** WRITE_LOCK. -** -** This function assumes the following: -** -** (a) The specified Btree object p is connected to a sharable -** database (one with the BtShared.sharable flag set), and -** -** (b) No other Btree objects hold a lock that conflicts -** with the requested lock (i.e. querySharedCacheTableLock() has -** already been called and returned SQLITE_OK). -** -** SQLITE_OK is returned if the lock is added successfully. SQLITE_NOMEM -** is returned if a malloc attempt fails. -*/ -static int setSharedCacheTableLock(Btree *p, Pgno iTable, u8 eLock){ - BtShared *pBt = p->pBt; - BtLock *pLock = 0; - BtLock *pIter; - - assert( sqlite3BtreeHoldsMutex(p) ); - assert( eLock==READ_LOCK || eLock==WRITE_LOCK ); - assert( p->db!=0 ); - - /* A connection with the read-uncommitted flag set will never try to - ** obtain a read-lock using this function. The only read-lock obtained - ** by a connection in read-uncommitted mode is on the sqlite_master - ** table, and that lock is obtained in BtreeBeginTrans(). */ - assert( 0==(p->db->flags&SQLITE_ReadUncommitted) || eLock==WRITE_LOCK ); - - /* This function should only be called on a sharable b-tree after it - ** has been determined that no other b-tree holds a conflicting lock. */ - assert( p->sharable ); - assert( SQLITE_OK==querySharedCacheTableLock(p, iTable, eLock) ); - - /* First search the list for an existing lock on this table. */ - for(pIter=pBt->pLock; pIter; pIter=pIter->pNext){ - if( pIter->iTable==iTable && pIter->pBtree==p ){ - pLock = pIter; - break; - } - } - - /* If the above search did not find a BtLock struct associating Btree p - ** with table iTable, allocate one and link it into the list. - */ - if( !pLock ){ - pLock = (BtLock *)sqlite3MallocZero(sizeof(BtLock)); - if( !pLock ){ - return SQLITE_NOMEM; - } - pLock->iTable = iTable; - pLock->pBtree = p; - pLock->pNext = pBt->pLock; - pBt->pLock = pLock; - } - - /* Set the BtLock.eLock variable to the maximum of the current lock - ** and the requested lock. This means if a write-lock was already held - ** and a read-lock requested, we don't incorrectly downgrade the lock. - */ - assert( WRITE_LOCK>READ_LOCK ); - if( eLock>pLock->eLock ){ - pLock->eLock = eLock; - } - - return SQLITE_OK; -} -#endif /* !SQLITE_OMIT_SHARED_CACHE */ - -#ifndef SQLITE_OMIT_SHARED_CACHE -/* -** Release all the table locks (locks obtained via calls to -** the setSharedCacheTableLock() procedure) held by Btree object p. -** -** This function assumes that Btree p has an open read or write -** transaction. If it does not, then the BTS_PENDING flag -** may be incorrectly cleared. -*/ -static void clearAllSharedCacheTableLocks(Btree *p){ - BtShared *pBt = p->pBt; - BtLock **ppIter = &pBt->pLock; - - assert( sqlite3BtreeHoldsMutex(p) ); - assert( p->sharable || 0==*ppIter ); - assert( p->inTrans>0 ); - - while( *ppIter ){ - BtLock *pLock = *ppIter; - assert( (pBt->btsFlags & BTS_EXCLUSIVE)==0 || pBt->pWriter==pLock->pBtree ); - assert( pLock->pBtree->inTrans>=pLock->eLock ); - if( pLock->pBtree==p ){ - *ppIter = pLock->pNext; - assert( pLock->iTable!=1 || pLock==&p->lock ); - if( pLock->iTable!=1 ){ - sqlite3_free(pLock); - } - }else{ - ppIter = &pLock->pNext; - } - } - - assert( (pBt->btsFlags & BTS_PENDING)==0 || pBt->pWriter ); - if( pBt->pWriter==p ){ - pBt->pWriter = 0; - pBt->btsFlags &= ~(BTS_EXCLUSIVE|BTS_PENDING); - }else if( pBt->nTransaction==2 ){ - /* This function is called when Btree p is concluding its - ** transaction. If there currently exists a writer, and p is not - ** that writer, then the number of locks held by connections other - ** than the writer must be about to drop to zero. In this case - ** set the BTS_PENDING flag to 0. - ** - ** If there is not currently a writer, then BTS_PENDING must - ** be zero already. So this next line is harmless in that case. - */ - pBt->btsFlags &= ~BTS_PENDING; - } -} - -/* -** This function changes all write-locks held by Btree p into read-locks. -*/ -static void downgradeAllSharedCacheTableLocks(Btree *p){ - BtShared *pBt = p->pBt; - if( pBt->pWriter==p ){ - BtLock *pLock; - pBt->pWriter = 0; - pBt->btsFlags &= ~(BTS_EXCLUSIVE|BTS_PENDING); - for(pLock=pBt->pLock; pLock; pLock=pLock->pNext){ - assert( pLock->eLock==READ_LOCK || pLock->pBtree==p ); - pLock->eLock = READ_LOCK; - } - } -} - -#endif /* SQLITE_OMIT_SHARED_CACHE */ - -static void releasePage(MemPage *pPage); /* Forward reference */ - -/* -***** This routine is used inside of assert() only **** -** -** Verify that the cursor holds the mutex on its BtShared -*/ -#ifdef SQLITE_DEBUG -static int cursorHoldsMutex(BtCursor *p){ - return sqlite3_mutex_held(p->pBt->mutex); -} -#endif - -/* -** Invalidate the overflow cache of the cursor passed as the first argument. -** on the shared btree structure pBt. -*/ -#define invalidateOverflowCache(pCur) (pCur->curFlags &= ~BTCF_ValidOvfl) - -/* -** Invalidate the overflow page-list cache for all cursors opened -** on the shared btree structure pBt. -*/ -static void invalidateAllOverflowCache(BtShared *pBt){ - BtCursor *p; - assert( sqlite3_mutex_held(pBt->mutex) ); - for(p=pBt->pCursor; p; p=p->pNext){ - invalidateOverflowCache(p); - } -} - -#ifndef SQLITE_OMIT_INCRBLOB -/* -** This function is called before modifying the contents of a table -** to invalidate any incrblob cursors that are open on the -** row or one of the rows being modified. -** -** If argument isClearTable is true, then the entire contents of the -** table is about to be deleted. In this case invalidate all incrblob -** cursors open on any row within the table with root-page pgnoRoot. -** -** Otherwise, if argument isClearTable is false, then the row with -** rowid iRow is being replaced or deleted. In this case invalidate -** only those incrblob cursors open on that specific row. -*/ -static void invalidateIncrblobCursors( - Btree *pBtree, /* The database file to check */ - i64 iRow, /* The rowid that might be changing */ - int isClearTable /* True if all rows are being deleted */ -){ - BtCursor *p; - BtShared *pBt = pBtree->pBt; - assert( sqlite3BtreeHoldsMutex(pBtree) ); - for(p=pBt->pCursor; p; p=p->pNext){ - if( (p->curFlags & BTCF_Incrblob)!=0 && (isClearTable || p->info.nKey==iRow) ){ - p->eState = CURSOR_INVALID; - } - } -} - -#else - /* Stub function when INCRBLOB is omitted */ - #define invalidateIncrblobCursors(x,y,z) -#endif /* SQLITE_OMIT_INCRBLOB */ - -/* -** Set bit pgno of the BtShared.pHasContent bitvec. This is called -** when a page that previously contained data becomes a free-list leaf -** page. -** -** The BtShared.pHasContent bitvec exists to work around an obscure -** bug caused by the interaction of two useful IO optimizations surrounding -** free-list leaf pages: -** -** 1) When all data is deleted from a page and the page becomes -** a free-list leaf page, the page is not written to the database -** (as free-list leaf pages contain no meaningful data). Sometimes -** such a page is not even journalled (as it will not be modified, -** why bother journalling it?). -** -** 2) When a free-list leaf page is reused, its content is not read -** from the database or written to the journal file (why should it -** be, if it is not at all meaningful?). -** -** By themselves, these optimizations work fine and provide a handy -** performance boost to bulk delete or insert operations. However, if -** a page is moved to the free-list and then reused within the same -** transaction, a problem comes up. If the page is not journalled when -** it is moved to the free-list and it is also not journalled when it -** is extracted from the free-list and reused, then the original data -** may be lost. In the event of a rollback, it may not be possible -** to restore the database to its original configuration. -** -** The solution is the BtShared.pHasContent bitvec. Whenever a page is -** moved to become a free-list leaf page, the corresponding bit is -** set in the bitvec. Whenever a leaf page is extracted from the free-list, -** optimization 2 above is omitted if the corresponding bit is already -** set in BtShared.pHasContent. The contents of the bitvec are cleared -** at the end of every transaction. -*/ -static int btreeSetHasContent(BtShared *pBt, Pgno pgno){ - int rc = SQLITE_OK; - if( !pBt->pHasContent ){ - assert( pgno<=pBt->nPage ); - pBt->pHasContent = sqlite3BitvecCreate(pBt->nPage); - if( !pBt->pHasContent ){ - rc = SQLITE_NOMEM; - } - } - if( rc==SQLITE_OK && pgno<=sqlite3BitvecSize(pBt->pHasContent) ){ - rc = sqlite3BitvecSet(pBt->pHasContent, pgno); - } - return rc; -} - -/* -** Query the BtShared.pHasContent vector. -** -** This function is called when a free-list leaf page is removed from the -** free-list for reuse. It returns false if it is safe to retrieve the -** page from the pager layer with the 'no-content' flag set. True otherwise. -*/ -static int btreeGetHasContent(BtShared *pBt, Pgno pgno){ - Bitvec *p = pBt->pHasContent; - return (p && (pgno>sqlite3BitvecSize(p) || sqlite3BitvecTest(p, pgno))); -} - -/* -** Clear (destroy) the BtShared.pHasContent bitvec. This should be -** invoked at the conclusion of each write-transaction. -*/ -static void btreeClearHasContent(BtShared *pBt){ - sqlite3BitvecDestroy(pBt->pHasContent); - pBt->pHasContent = 0; -} - -/* -** Release all of the apPage[] pages for a cursor. -*/ -static void btreeReleaseAllCursorPages(BtCursor *pCur){ - int i; - for(i=0; i<=pCur->iPage; i++){ - releasePage(pCur->apPage[i]); - pCur->apPage[i] = 0; - } - pCur->iPage = -1; -} - - -/* -** Save the current cursor position in the variables BtCursor.nKey -** and BtCursor.pKey. The cursor's state is set to CURSOR_REQUIRESEEK. -** -** The caller must ensure that the cursor is valid (has eState==CURSOR_VALID) -** prior to calling this routine. -*/ -static int saveCursorPosition(BtCursor *pCur){ - int rc; - - assert( CURSOR_VALID==pCur->eState ); - assert( 0==pCur->pKey ); - assert( cursorHoldsMutex(pCur) ); - - rc = sqlite3BtreeKeySize(pCur, &pCur->nKey); - assert( rc==SQLITE_OK ); /* KeySize() cannot fail */ - - /* If this is an intKey table, then the above call to BtreeKeySize() - ** stores the integer key in pCur->nKey. In this case this value is - ** all that is required. Otherwise, if pCur is not open on an intKey - ** table, then malloc space for and store the pCur->nKey bytes of key - ** data. - */ - if( 0==pCur->apPage[0]->intKey ){ - void *pKey = sqlite3Malloc( (int)pCur->nKey ); - if( pKey ){ - rc = sqlite3BtreeKey(pCur, 0, (int)pCur->nKey, pKey); - if( rc==SQLITE_OK ){ - pCur->pKey = pKey; - }else{ - sqlite3_free(pKey); - } - }else{ - rc = SQLITE_NOMEM; - } - } - assert( !pCur->apPage[0]->intKey || !pCur->pKey ); - - if( rc==SQLITE_OK ){ - btreeReleaseAllCursorPages(pCur); - pCur->eState = CURSOR_REQUIRESEEK; - } - - invalidateOverflowCache(pCur); - return rc; -} - -/* -** Save the positions of all cursors (except pExcept) that are open on -** the table with root-page iRoot. Usually, this is called just before cursor -** pExcept is used to modify the table (BtreeDelete() or BtreeInsert()). -*/ -static int saveAllCursors(BtShared *pBt, Pgno iRoot, BtCursor *pExcept){ - BtCursor *p; - assert( sqlite3_mutex_held(pBt->mutex) ); - assert( pExcept==0 || pExcept->pBt==pBt ); - for(p=pBt->pCursor; p; p=p->pNext){ - if( p!=pExcept && (0==iRoot || p->pgnoRoot==iRoot) ){ - if( p->eState==CURSOR_VALID ){ - int rc = saveCursorPosition(p); - if( SQLITE_OK!=rc ){ - return rc; - } - }else{ - testcase( p->iPage>0 ); - btreeReleaseAllCursorPages(p); - } - } - } - return SQLITE_OK; -} - -/* -** Clear the current cursor position. -*/ -SQLITE_PRIVATE void sqlite3BtreeClearCursor(BtCursor *pCur){ - assert( cursorHoldsMutex(pCur) ); - sqlite3_free(pCur->pKey); - pCur->pKey = 0; - pCur->eState = CURSOR_INVALID; -} - -/* -** In this version of BtreeMoveto, pKey is a packed index record -** such as is generated by the OP_MakeRecord opcode. Unpack the -** record and then call BtreeMovetoUnpacked() to do the work. -*/ -static int btreeMoveto( - BtCursor *pCur, /* Cursor open on the btree to be searched */ - const void *pKey, /* Packed key if the btree is an index */ - i64 nKey, /* Integer key for tables. Size of pKey for indices */ - int bias, /* Bias search to the high end */ - int *pRes /* Write search results here */ -){ - int rc; /* Status code */ - UnpackedRecord *pIdxKey; /* Unpacked index key */ - char aSpace[200]; /* Temp space for pIdxKey - to avoid a malloc */ - char *pFree = 0; - - if( pKey ){ - assert( nKey==(i64)(int)nKey ); - pIdxKey = sqlite3VdbeAllocUnpackedRecord( - pCur->pKeyInfo, aSpace, sizeof(aSpace), &pFree - ); - if( pIdxKey==0 ) return SQLITE_NOMEM; - sqlite3VdbeRecordUnpack(pCur->pKeyInfo, (int)nKey, pKey, pIdxKey); - if( pIdxKey->nField==0 ){ - sqlite3DbFree(pCur->pKeyInfo->db, pFree); - return SQLITE_CORRUPT_BKPT; - } - }else{ - pIdxKey = 0; - } - rc = sqlite3BtreeMovetoUnpacked(pCur, pIdxKey, nKey, bias, pRes); - if( pFree ){ - sqlite3DbFree(pCur->pKeyInfo->db, pFree); - } - return rc; -} - -/* -** Restore the cursor to the position it was in (or as close to as possible) -** when saveCursorPosition() was called. Note that this call deletes the -** saved position info stored by saveCursorPosition(), so there can be -** at most one effective restoreCursorPosition() call after each -** saveCursorPosition(). -*/ -static int btreeRestoreCursorPosition(BtCursor *pCur){ - int rc; - assert( cursorHoldsMutex(pCur) ); - assert( pCur->eState>=CURSOR_REQUIRESEEK ); - if( pCur->eState==CURSOR_FAULT ){ - return pCur->skipNext; - } - pCur->eState = CURSOR_INVALID; - rc = btreeMoveto(pCur, pCur->pKey, pCur->nKey, 0, &pCur->skipNext); - if( rc==SQLITE_OK ){ - sqlite3_free(pCur->pKey); - pCur->pKey = 0; - assert( pCur->eState==CURSOR_VALID || pCur->eState==CURSOR_INVALID ); - if( pCur->skipNext && pCur->eState==CURSOR_VALID ){ - pCur->eState = CURSOR_SKIPNEXT; - } - } - return rc; -} - -#define restoreCursorPosition(p) \ - (p->eState>=CURSOR_REQUIRESEEK ? \ - btreeRestoreCursorPosition(p) : \ - SQLITE_OK) - -/* -** Determine whether or not a cursor has moved from the position it -** was last placed at. Cursors can move when the row they are pointing -** at is deleted out from under them. -** -** This routine returns an error code if something goes wrong. The -** integer *pHasMoved is set as follows: -** -** 0: The cursor is unchanged -** 1: The cursor is still pointing at the same row, but the pointers -** returned by sqlite3BtreeKeyFetch() or sqlite3BtreeDataFetch() -** might now be invalid because of a balance() or other change to the -** b-tree. -** 2: The cursor is no longer pointing to the row. The row might have -** been deleted out from under the cursor. -*/ -SQLITE_PRIVATE int sqlite3BtreeCursorHasMoved(BtCursor *pCur, int *pHasMoved){ - int rc; - - if( pCur->eState==CURSOR_VALID ){ - *pHasMoved = 0; - return SQLITE_OK; - } - rc = restoreCursorPosition(pCur); - if( rc ){ - *pHasMoved = 2; - return rc; - } - if( pCur->eState!=CURSOR_VALID || NEVER(pCur->skipNext!=0) ){ - *pHasMoved = 2; - }else{ - *pHasMoved = 1; - } - return SQLITE_OK; -} - -#ifndef SQLITE_OMIT_AUTOVACUUM -/* -** Given a page number of a regular database page, return the page -** number for the pointer-map page that contains the entry for the -** input page number. -** -** Return 0 (not a valid page) for pgno==1 since there is -** no pointer map associated with page 1. The integrity_check logic -** requires that ptrmapPageno(*,1)!=1. -*/ -static Pgno ptrmapPageno(BtShared *pBt, Pgno pgno){ - int nPagesPerMapPage; - Pgno iPtrMap, ret; - assert( sqlite3_mutex_held(pBt->mutex) ); - if( pgno<2 ) return 0; - nPagesPerMapPage = (pBt->usableSize/5)+1; - iPtrMap = (pgno-2)/nPagesPerMapPage; - ret = (iPtrMap*nPagesPerMapPage) + 2; - if( ret==PENDING_BYTE_PAGE(pBt) ){ - ret++; - } - return ret; -} - -/* -** Write an entry into the pointer map. -** -** This routine updates the pointer map entry for page number 'key' -** so that it maps to type 'eType' and parent page number 'pgno'. -** -** If *pRC is initially non-zero (non-SQLITE_OK) then this routine is -** a no-op. If an error occurs, the appropriate error code is written -** into *pRC. -*/ -static void ptrmapPut(BtShared *pBt, Pgno key, u8 eType, Pgno parent, int *pRC){ - DbPage *pDbPage; /* The pointer map page */ - u8 *pPtrmap; /* The pointer map data */ - Pgno iPtrmap; /* The pointer map page number */ - int offset; /* Offset in pointer map page */ - int rc; /* Return code from subfunctions */ - - if( *pRC ) return; - - assert( sqlite3_mutex_held(pBt->mutex) ); - /* The master-journal page number must never be used as a pointer map page */ - assert( 0==PTRMAP_ISPAGE(pBt, PENDING_BYTE_PAGE(pBt)) ); - - assert( pBt->autoVacuum ); - if( key==0 ){ - *pRC = SQLITE_CORRUPT_BKPT; - return; - } - iPtrmap = PTRMAP_PAGENO(pBt, key); - rc = sqlite3PagerGet(pBt->pPager, iPtrmap, &pDbPage); - if( rc!=SQLITE_OK ){ - *pRC = rc; - return; - } - offset = PTRMAP_PTROFFSET(iPtrmap, key); - if( offset<0 ){ - *pRC = SQLITE_CORRUPT_BKPT; - goto ptrmap_exit; - } - assert( offset <= (int)pBt->usableSize-5 ); - pPtrmap = (u8 *)sqlite3PagerGetData(pDbPage); - - if( eType!=pPtrmap[offset] || get4byte(&pPtrmap[offset+1])!=parent ){ - TRACE(("PTRMAP_UPDATE: %d->(%d,%d)\n", key, eType, parent)); - *pRC= rc = sqlite3PagerWrite(pDbPage); - if( rc==SQLITE_OK ){ - pPtrmap[offset] = eType; - put4byte(&pPtrmap[offset+1], parent); - } - } - -ptrmap_exit: - sqlite3PagerUnref(pDbPage); -} - -/* -** Read an entry from the pointer map. -** -** This routine retrieves the pointer map entry for page 'key', writing -** the type and parent page number to *pEType and *pPgno respectively. -** An error code is returned if something goes wrong, otherwise SQLITE_OK. -*/ -static int ptrmapGet(BtShared *pBt, Pgno key, u8 *pEType, Pgno *pPgno){ - DbPage *pDbPage; /* The pointer map page */ - int iPtrmap; /* Pointer map page index */ - u8 *pPtrmap; /* Pointer map page data */ - int offset; /* Offset of entry in pointer map */ - int rc; - - assert( sqlite3_mutex_held(pBt->mutex) ); - - iPtrmap = PTRMAP_PAGENO(pBt, key); - rc = sqlite3PagerGet(pBt->pPager, iPtrmap, &pDbPage); - if( rc!=0 ){ - return rc; - } - pPtrmap = (u8 *)sqlite3PagerGetData(pDbPage); - - offset = PTRMAP_PTROFFSET(iPtrmap, key); - if( offset<0 ){ - sqlite3PagerUnref(pDbPage); - return SQLITE_CORRUPT_BKPT; - } - assert( offset <= (int)pBt->usableSize-5 ); - assert( pEType!=0 ); - *pEType = pPtrmap[offset]; - if( pPgno ) *pPgno = get4byte(&pPtrmap[offset+1]); - - sqlite3PagerUnref(pDbPage); - if( *pEType<1 || *pEType>5 ) return SQLITE_CORRUPT_BKPT; - return SQLITE_OK; -} - -#else /* if defined SQLITE_OMIT_AUTOVACUUM */ - #define ptrmapPut(w,x,y,z,rc) - #define ptrmapGet(w,x,y,z) SQLITE_OK - #define ptrmapPutOvflPtr(x, y, rc) -#endif - -/* -** Given a btree page and a cell index (0 means the first cell on -** the page, 1 means the second cell, and so forth) return a pointer -** to the cell content. -** -** This routine works only for pages that do not contain overflow cells. -*/ -#define findCell(P,I) \ - ((P)->aData + ((P)->maskPage & get2byte(&(P)->aCellIdx[2*(I)]))) -#define findCellv2(D,M,O,I) (D+(M&get2byte(D+(O+2*(I))))) - - -/* -** This a more complex version of findCell() that works for -** pages that do contain overflow cells. -*/ -static u8 *findOverflowCell(MemPage *pPage, int iCell){ - int i; - assert( sqlite3_mutex_held(pPage->pBt->mutex) ); - for(i=pPage->nOverflow-1; i>=0; i--){ - int k; - k = pPage->aiOvfl[i]; - if( k<=iCell ){ - if( k==iCell ){ - return pPage->apOvfl[i]; - } - iCell--; - } - } - return findCell(pPage, iCell); -} - -/* -** Parse a cell content block and fill in the CellInfo structure. There -** are two versions of this function. btreeParseCell() takes a -** cell index as the second argument and btreeParseCellPtr() -** takes a pointer to the body of the cell as its second argument. -** -** Within this file, the parseCell() macro can be called instead of -** btreeParseCellPtr(). Using some compilers, this will be faster. -*/ -static void btreeParseCellPtr( - MemPage *pPage, /* Page containing the cell */ - u8 *pCell, /* Pointer to the cell text. */ - CellInfo *pInfo /* Fill in this structure */ -){ - u16 n; /* Number bytes in cell content header */ - u32 nPayload; /* Number of bytes of cell payload */ - - assert( sqlite3_mutex_held(pPage->pBt->mutex) ); - - pInfo->pCell = pCell; - assert( pPage->leaf==0 || pPage->leaf==1 ); - n = pPage->childPtrSize; - assert( n==4-4*pPage->leaf ); - if( pPage->intKey ){ - if( pPage->hasData ){ - assert( n==0 ); - n = getVarint32(pCell, nPayload); - }else{ - nPayload = 0; - } - n += getVarint(&pCell[n], (u64*)&pInfo->nKey); - pInfo->nData = nPayload; - }else{ - pInfo->nData = 0; - n += getVarint32(&pCell[n], nPayload); - pInfo->nKey = nPayload; - } - pInfo->nPayload = nPayload; - pInfo->nHeader = n; - testcase( nPayload==pPage->maxLocal ); - testcase( nPayload==pPage->maxLocal+1 ); - if( likely(nPayload<=pPage->maxLocal) ){ - /* This is the (easy) common case where the entire payload fits - ** on the local page. No overflow is required. - */ - if( (pInfo->nSize = (u16)(n+nPayload))<4 ) pInfo->nSize = 4; - pInfo->nLocal = (u16)nPayload; - pInfo->iOverflow = 0; - }else{ - /* If the payload will not fit completely on the local page, we have - ** to decide how much to store locally and how much to spill onto - ** overflow pages. The strategy is to minimize the amount of unused - ** space on overflow pages while keeping the amount of local storage - ** in between minLocal and maxLocal. - ** - ** Warning: changing the way overflow payload is distributed in any - ** way will result in an incompatible file format. - */ - int minLocal; /* Minimum amount of payload held locally */ - int maxLocal; /* Maximum amount of payload held locally */ - int surplus; /* Overflow payload available for local storage */ - - minLocal = pPage->minLocal; - maxLocal = pPage->maxLocal; - surplus = minLocal + (nPayload - minLocal)%(pPage->pBt->usableSize - 4); - testcase( surplus==maxLocal ); - testcase( surplus==maxLocal+1 ); - if( surplus <= maxLocal ){ - pInfo->nLocal = (u16)surplus; - }else{ - pInfo->nLocal = (u16)minLocal; - } - pInfo->iOverflow = (u16)(pInfo->nLocal + n); - pInfo->nSize = pInfo->iOverflow + 4; - } -} -#define parseCell(pPage, iCell, pInfo) \ - btreeParseCellPtr((pPage), findCell((pPage), (iCell)), (pInfo)) -static void btreeParseCell( - MemPage *pPage, /* Page containing the cell */ - int iCell, /* The cell index. First cell is 0 */ - CellInfo *pInfo /* Fill in this structure */ -){ - parseCell(pPage, iCell, pInfo); -} - -/* -** Compute the total number of bytes that a Cell needs in the cell -** data area of the btree-page. The return number includes the cell -** data header and the local payload, but not any overflow page or -** the space used by the cell pointer. -*/ -static u16 cellSizePtr(MemPage *pPage, u8 *pCell){ - u8 *pIter = &pCell[pPage->childPtrSize]; - u32 nSize; - -#ifdef SQLITE_DEBUG - /* The value returned by this function should always be the same as - ** the (CellInfo.nSize) value found by doing a full parse of the - ** cell. If SQLITE_DEBUG is defined, an assert() at the bottom of - ** this function verifies that this invariant is not violated. */ - CellInfo debuginfo; - btreeParseCellPtr(pPage, pCell, &debuginfo); -#endif - - if( pPage->intKey ){ - u8 *pEnd; - if( pPage->hasData ){ - pIter += getVarint32(pIter, nSize); - }else{ - nSize = 0; - } - - /* pIter now points at the 64-bit integer key value, a variable length - ** integer. The following block moves pIter to point at the first byte - ** past the end of the key value. */ - pEnd = &pIter[9]; - while( (*pIter++)&0x80 && pItermaxLocal ); - testcase( nSize==pPage->maxLocal+1 ); - if( nSize>pPage->maxLocal ){ - int minLocal = pPage->minLocal; - nSize = minLocal + (nSize - minLocal) % (pPage->pBt->usableSize - 4); - testcase( nSize==pPage->maxLocal ); - testcase( nSize==pPage->maxLocal+1 ); - if( nSize>pPage->maxLocal ){ - nSize = minLocal; - } - nSize += 4; - } - nSize += (u32)(pIter - pCell); - - /* The minimum size of any cell is 4 bytes. */ - if( nSize<4 ){ - nSize = 4; - } - - assert( nSize==debuginfo.nSize ); - return (u16)nSize; -} - -#ifdef SQLITE_DEBUG -/* This variation on cellSizePtr() is used inside of assert() statements -** only. */ -static u16 cellSize(MemPage *pPage, int iCell){ - return cellSizePtr(pPage, findCell(pPage, iCell)); -} -#endif - -#ifndef SQLITE_OMIT_AUTOVACUUM -/* -** If the cell pCell, part of page pPage contains a pointer -** to an overflow page, insert an entry into the pointer-map -** for the overflow page. -*/ -static void ptrmapPutOvflPtr(MemPage *pPage, u8 *pCell, int *pRC){ - CellInfo info; - if( *pRC ) return; - assert( pCell!=0 ); - btreeParseCellPtr(pPage, pCell, &info); - assert( (info.nData+(pPage->intKey?0:info.nKey))==info.nPayload ); - if( info.iOverflow ){ - Pgno ovfl = get4byte(&pCell[info.iOverflow]); - ptrmapPut(pPage->pBt, ovfl, PTRMAP_OVERFLOW1, pPage->pgno, pRC); - } -} -#endif - - -/* -** Defragment the page given. All Cells are moved to the -** end of the page and all free space is collected into one -** big FreeBlk that occurs in between the header and cell -** pointer array and the cell content area. -*/ -static int defragmentPage(MemPage *pPage){ - int i; /* Loop counter */ - int pc; /* Address of a i-th cell */ - int hdr; /* Offset to the page header */ - int size; /* Size of a cell */ - int usableSize; /* Number of usable bytes on a page */ - int cellOffset; /* Offset to the cell pointer array */ - int cbrk; /* Offset to the cell content area */ - int nCell; /* Number of cells on the page */ - unsigned char *data; /* The page data */ - unsigned char *temp; /* Temp area for cell content */ - int iCellFirst; /* First allowable cell index */ - int iCellLast; /* Last possible cell index */ - - - assert( sqlite3PagerIswriteable(pPage->pDbPage) ); - assert( pPage->pBt!=0 ); - assert( pPage->pBt->usableSize <= SQLITE_MAX_PAGE_SIZE ); - assert( pPage->nOverflow==0 ); - assert( sqlite3_mutex_held(pPage->pBt->mutex) ); - temp = sqlite3PagerTempSpace(pPage->pBt->pPager); - data = pPage->aData; - hdr = pPage->hdrOffset; - cellOffset = pPage->cellOffset; - nCell = pPage->nCell; - assert( nCell==get2byte(&data[hdr+3]) ); - usableSize = pPage->pBt->usableSize; - cbrk = get2byte(&data[hdr+5]); - memcpy(&temp[cbrk], &data[cbrk], usableSize - cbrk); - cbrk = usableSize; - iCellFirst = cellOffset + 2*nCell; - iCellLast = usableSize - 4; - for(i=0; iiCellLast ){ - return SQLITE_CORRUPT_BKPT; - } -#endif - assert( pc>=iCellFirst && pc<=iCellLast ); - size = cellSizePtr(pPage, &temp[pc]); - cbrk -= size; -#if defined(SQLITE_ENABLE_OVERSIZE_CELL_CHECK) - if( cbrkusableSize ){ - return SQLITE_CORRUPT_BKPT; - } -#endif - assert( cbrk+size<=usableSize && cbrk>=iCellFirst ); - testcase( cbrk+size==usableSize ); - testcase( pc+size==usableSize ); - memcpy(&data[cbrk], &temp[pc], size); - put2byte(pAddr, cbrk); - } - assert( cbrk>=iCellFirst ); - put2byte(&data[hdr+5], cbrk); - data[hdr+1] = 0; - data[hdr+2] = 0; - data[hdr+7] = 0; - memset(&data[iCellFirst], 0, cbrk-iCellFirst); - assert( sqlite3PagerIswriteable(pPage->pDbPage) ); - if( cbrk-iCellFirst!=pPage->nFree ){ - return SQLITE_CORRUPT_BKPT; - } - return SQLITE_OK; -} - -/* -** Allocate nByte bytes of space from within the B-Tree page passed -** as the first argument. Write into *pIdx the index into pPage->aData[] -** of the first byte of allocated space. Return either SQLITE_OK or -** an error code (usually SQLITE_CORRUPT). -** -** The caller guarantees that there is sufficient space to make the -** allocation. This routine might need to defragment in order to bring -** all the space together, however. This routine will avoid using -** the first two bytes past the cell pointer area since presumably this -** allocation is being made in order to insert a new cell, so we will -** also end up needing a new cell pointer. -*/ -static int allocateSpace(MemPage *pPage, int nByte, int *pIdx){ - const int hdr = pPage->hdrOffset; /* Local cache of pPage->hdrOffset */ - u8 * const data = pPage->aData; /* Local cache of pPage->aData */ - int nFrag; /* Number of fragmented bytes on pPage */ - int top; /* First byte of cell content area */ - int gap; /* First byte of gap between cell pointers and cell content */ - int rc; /* Integer return code */ - int usableSize; /* Usable size of the page */ - - assert( sqlite3PagerIswriteable(pPage->pDbPage) ); - assert( pPage->pBt ); - assert( sqlite3_mutex_held(pPage->pBt->mutex) ); - assert( nByte>=0 ); /* Minimum cell size is 4 */ - assert( pPage->nFree>=nByte ); - assert( pPage->nOverflow==0 ); - usableSize = pPage->pBt->usableSize; - assert( nByte < usableSize-8 ); - - nFrag = data[hdr+7]; - assert( pPage->cellOffset == hdr + 12 - 4*pPage->leaf ); - gap = pPage->cellOffset + 2*pPage->nCell; - top = get2byteNotZero(&data[hdr+5]); - if( gap>top ) return SQLITE_CORRUPT_BKPT; - testcase( gap+2==top ); - testcase( gap+1==top ); - testcase( gap==top ); - - if( nFrag>=60 ){ - /* Always defragment highly fragmented pages */ - rc = defragmentPage(pPage); - if( rc ) return rc; - top = get2byteNotZero(&data[hdr+5]); - }else if( gap+2<=top ){ - /* Search the freelist looking for a free slot big enough to satisfy - ** the request. The allocation is made from the first free slot in - ** the list that is large enough to accommodate it. - */ - int pc, addr; - for(addr=hdr+1; (pc = get2byte(&data[addr]))>0; addr=pc){ - int size; /* Size of the free slot */ - if( pc>usableSize-4 || pc=nByte ){ - int x = size - nByte; - testcase( x==4 ); - testcase( x==3 ); - if( x<4 ){ - /* Remove the slot from the free-list. Update the number of - ** fragmented bytes within the page. */ - memcpy(&data[addr], &data[pc], 2); - data[hdr+7] = (u8)(nFrag + x); - }else if( size+pc > usableSize ){ - return SQLITE_CORRUPT_BKPT; - }else{ - /* The slot remains on the free-list. Reduce its size to account - ** for the portion used by the new allocation. */ - put2byte(&data[pc+2], x); - } - *pIdx = pc + x; - return SQLITE_OK; - } - } - } - - /* Check to make sure there is enough space in the gap to satisfy - ** the allocation. If not, defragment. - */ - testcase( gap+2+nByte==top ); - if( gap+2+nByte>top ){ - rc = defragmentPage(pPage); - if( rc ) return rc; - top = get2byteNotZero(&data[hdr+5]); - assert( gap+nByte<=top ); - } - - - /* Allocate memory from the gap in between the cell pointer array - ** and the cell content area. The btreeInitPage() call has already - ** validated the freelist. Given that the freelist is valid, there - ** is no way that the allocation can extend off the end of the page. - ** The assert() below verifies the previous sentence. - */ - top -= nByte; - put2byte(&data[hdr+5], top); - assert( top+nByte <= (int)pPage->pBt->usableSize ); - *pIdx = top; - return SQLITE_OK; -} - -/* -** Return a section of the pPage->aData to the freelist. -** The first byte of the new free block is pPage->aDisk[start] -** and the size of the block is "size" bytes. -** -** Most of the effort here is involved in coalesing adjacent -** free blocks into a single big free block. -*/ -static int freeSpace(MemPage *pPage, int start, int size){ - int addr, pbegin, hdr; - int iLast; /* Largest possible freeblock offset */ - unsigned char *data = pPage->aData; - - assert( pPage->pBt!=0 ); - assert( sqlite3PagerIswriteable(pPage->pDbPage) ); - assert( start>=pPage->hdrOffset+6+pPage->childPtrSize ); - assert( (start + size) <= (int)pPage->pBt->usableSize ); - assert( sqlite3_mutex_held(pPage->pBt->mutex) ); - assert( size>=0 ); /* Minimum cell size is 4 */ - - if( pPage->pBt->btsFlags & BTS_SECURE_DELETE ){ - /* Overwrite deleted information with zeros when the secure_delete - ** option is enabled */ - memset(&data[start], 0, size); - } - - /* Add the space back into the linked list of freeblocks. Note that - ** even though the freeblock list was checked by btreeInitPage(), - ** btreeInitPage() did not detect overlapping cells or - ** freeblocks that overlapped cells. Nor does it detect when the - ** cell content area exceeds the value in the page header. If these - ** situations arise, then subsequent insert operations might corrupt - ** the freelist. So we do need to check for corruption while scanning - ** the freelist. - */ - hdr = pPage->hdrOffset; - addr = hdr + 1; - iLast = pPage->pBt->usableSize - 4; - assert( start<=iLast ); - while( (pbegin = get2byte(&data[addr]))0 ){ - if( pbeginiLast ){ - return SQLITE_CORRUPT_BKPT; - } - assert( pbegin>addr || pbegin==0 ); - put2byte(&data[addr], start); - put2byte(&data[start], pbegin); - put2byte(&data[start+2], size); - pPage->nFree = pPage->nFree + (u16)size; - - /* Coalesce adjacent free blocks */ - addr = hdr + 1; - while( (pbegin = get2byte(&data[addr]))>0 ){ - int pnext, psize, x; - assert( pbegin>addr ); - assert( pbegin <= (int)pPage->pBt->usableSize-4 ); - pnext = get2byte(&data[pbegin]); - psize = get2byte(&data[pbegin+2]); - if( pbegin + psize + 3 >= pnext && pnext>0 ){ - int frag = pnext - (pbegin+psize); - if( (frag<0) || (frag>(int)data[hdr+7]) ){ - return SQLITE_CORRUPT_BKPT; - } - data[hdr+7] -= (u8)frag; - x = get2byte(&data[pnext]); - put2byte(&data[pbegin], x); - x = pnext + get2byte(&data[pnext+2]) - pbegin; - put2byte(&data[pbegin+2], x); - }else{ - addr = pbegin; - } - } - - /* If the cell content area begins with a freeblock, remove it. */ - if( data[hdr+1]==data[hdr+5] && data[hdr+2]==data[hdr+6] ){ - int top; - pbegin = get2byte(&data[hdr+1]); - memcpy(&data[hdr+1], &data[pbegin], 2); - top = get2byte(&data[hdr+5]) + get2byte(&data[pbegin+2]); - put2byte(&data[hdr+5], top); - } - assert( sqlite3PagerIswriteable(pPage->pDbPage) ); - return SQLITE_OK; -} - -/* -** Decode the flags byte (the first byte of the header) for a page -** and initialize fields of the MemPage structure accordingly. -** -** Only the following combinations are supported. Anything different -** indicates a corrupt database files: -** -** PTF_ZERODATA -** PTF_ZERODATA | PTF_LEAF -** PTF_LEAFDATA | PTF_INTKEY -** PTF_LEAFDATA | PTF_INTKEY | PTF_LEAF -*/ -static int decodeFlags(MemPage *pPage, int flagByte){ - BtShared *pBt; /* A copy of pPage->pBt */ - - assert( pPage->hdrOffset==(pPage->pgno==1 ? 100 : 0) ); - assert( sqlite3_mutex_held(pPage->pBt->mutex) ); - pPage->leaf = (u8)(flagByte>>3); assert( PTF_LEAF == 1<<3 ); - flagByte &= ~PTF_LEAF; - pPage->childPtrSize = 4-4*pPage->leaf; - pBt = pPage->pBt; - if( flagByte==(PTF_LEAFDATA | PTF_INTKEY) ){ - pPage->intKey = 1; - pPage->hasData = pPage->leaf; - pPage->maxLocal = pBt->maxLeaf; - pPage->minLocal = pBt->minLeaf; - }else if( flagByte==PTF_ZERODATA ){ - pPage->intKey = 0; - pPage->hasData = 0; - pPage->maxLocal = pBt->maxLocal; - pPage->minLocal = pBt->minLocal; - }else{ - return SQLITE_CORRUPT_BKPT; - } - pPage->max1bytePayload = pBt->max1bytePayload; - return SQLITE_OK; -} - -/* -** Initialize the auxiliary information for a disk block. -** -** Return SQLITE_OK on success. If we see that the page does -** not contain a well-formed database page, then return -** SQLITE_CORRUPT. Note that a return of SQLITE_OK does not -** guarantee that the page is well-formed. It only shows that -** we failed to detect any corruption. -*/ -static int btreeInitPage(MemPage *pPage){ - - assert( pPage->pBt!=0 ); - assert( sqlite3_mutex_held(pPage->pBt->mutex) ); - assert( pPage->pgno==sqlite3PagerPagenumber(pPage->pDbPage) ); - assert( pPage == sqlite3PagerGetExtra(pPage->pDbPage) ); - assert( pPage->aData == sqlite3PagerGetData(pPage->pDbPage) ); - - if( !pPage->isInit ){ - u16 pc; /* Address of a freeblock within pPage->aData[] */ - u8 hdr; /* Offset to beginning of page header */ - u8 *data; /* Equal to pPage->aData */ - BtShared *pBt; /* The main btree structure */ - int usableSize; /* Amount of usable space on each page */ - u16 cellOffset; /* Offset from start of page to first cell pointer */ - int nFree; /* Number of unused bytes on the page */ - int top; /* First byte of the cell content area */ - int iCellFirst; /* First allowable cell or freeblock offset */ - int iCellLast; /* Last possible cell or freeblock offset */ - - pBt = pPage->pBt; - - hdr = pPage->hdrOffset; - data = pPage->aData; - if( decodeFlags(pPage, data[hdr]) ) return SQLITE_CORRUPT_BKPT; - assert( pBt->pageSize>=512 && pBt->pageSize<=65536 ); - pPage->maskPage = (u16)(pBt->pageSize - 1); - pPage->nOverflow = 0; - usableSize = pBt->usableSize; - pPage->cellOffset = cellOffset = hdr + 12 - 4*pPage->leaf; - pPage->aDataEnd = &data[usableSize]; - pPage->aCellIdx = &data[cellOffset]; - top = get2byteNotZero(&data[hdr+5]); - pPage->nCell = get2byte(&data[hdr+3]); - if( pPage->nCell>MX_CELL(pBt) ){ - /* To many cells for a single page. The page must be corrupt */ - return SQLITE_CORRUPT_BKPT; - } - testcase( pPage->nCell==MX_CELL(pBt) ); - - /* A malformed database page might cause us to read past the end - ** of page when parsing a cell. - ** - ** The following block of code checks early to see if a cell extends - ** past the end of a page boundary and causes SQLITE_CORRUPT to be - ** returned if it does. - */ - iCellFirst = cellOffset + 2*pPage->nCell; - iCellLast = usableSize - 4; -#if defined(SQLITE_ENABLE_OVERSIZE_CELL_CHECK) - { - int i; /* Index into the cell pointer array */ - int sz; /* Size of a cell */ - - if( !pPage->leaf ) iCellLast--; - for(i=0; inCell; i++){ - pc = get2byte(&data[cellOffset+i*2]); - testcase( pc==iCellFirst ); - testcase( pc==iCellLast ); - if( pciCellLast ){ - return SQLITE_CORRUPT_BKPT; - } - sz = cellSizePtr(pPage, &data[pc]); - testcase( pc+sz==usableSize ); - if( pc+sz>usableSize ){ - return SQLITE_CORRUPT_BKPT; - } - } - if( !pPage->leaf ) iCellLast++; - } -#endif - - /* Compute the total free space on the page */ - pc = get2byte(&data[hdr+1]); - nFree = data[hdr+7] + top; - while( pc>0 ){ - u16 next, size; - if( pciCellLast ){ - /* Start of free block is off the page */ - return SQLITE_CORRUPT_BKPT; - } - next = get2byte(&data[pc]); - size = get2byte(&data[pc+2]); - if( (next>0 && next<=pc+size+3) || pc+size>usableSize ){ - /* Free blocks must be in ascending order. And the last byte of - ** the free-block must lie on the database page. */ - return SQLITE_CORRUPT_BKPT; - } - nFree = nFree + size; - pc = next; - } - - /* At this point, nFree contains the sum of the offset to the start - ** of the cell-content area plus the number of free bytes within - ** the cell-content area. If this is greater than the usable-size - ** of the page, then the page must be corrupted. This check also - ** serves to verify that the offset to the start of the cell-content - ** area, according to the page header, lies within the page. - */ - if( nFree>usableSize ){ - return SQLITE_CORRUPT_BKPT; - } - pPage->nFree = (u16)(nFree - iCellFirst); - pPage->isInit = 1; - } - return SQLITE_OK; -} - -/* -** Set up a raw page so that it looks like a database page holding -** no entries. -*/ -static void zeroPage(MemPage *pPage, int flags){ - unsigned char *data = pPage->aData; - BtShared *pBt = pPage->pBt; - u8 hdr = pPage->hdrOffset; - u16 first; - - assert( sqlite3PagerPagenumber(pPage->pDbPage)==pPage->pgno ); - assert( sqlite3PagerGetExtra(pPage->pDbPage) == (void*)pPage ); - assert( sqlite3PagerGetData(pPage->pDbPage) == data ); - assert( sqlite3PagerIswriteable(pPage->pDbPage) ); - assert( sqlite3_mutex_held(pBt->mutex) ); - if( pBt->btsFlags & BTS_SECURE_DELETE ){ - memset(&data[hdr], 0, pBt->usableSize - hdr); - } - data[hdr] = (char)flags; - first = hdr + ((flags&PTF_LEAF)==0 ? 12 : 8); - memset(&data[hdr+1], 0, 4); - data[hdr+7] = 0; - put2byte(&data[hdr+5], pBt->usableSize); - pPage->nFree = (u16)(pBt->usableSize - first); - decodeFlags(pPage, flags); - pPage->cellOffset = first; - pPage->aDataEnd = &data[pBt->usableSize]; - pPage->aCellIdx = &data[first]; - pPage->nOverflow = 0; - assert( pBt->pageSize>=512 && pBt->pageSize<=65536 ); - pPage->maskPage = (u16)(pBt->pageSize - 1); - pPage->nCell = 0; - pPage->isInit = 1; -} - - -/* -** Convert a DbPage obtained from the pager into a MemPage used by -** the btree layer. -*/ -static MemPage *btreePageFromDbPage(DbPage *pDbPage, Pgno pgno, BtShared *pBt){ - MemPage *pPage = (MemPage*)sqlite3PagerGetExtra(pDbPage); - pPage->aData = sqlite3PagerGetData(pDbPage); - pPage->pDbPage = pDbPage; - pPage->pBt = pBt; - pPage->pgno = pgno; - pPage->hdrOffset = pPage->pgno==1 ? 100 : 0; - return pPage; -} - -/* -** Get a page from the pager. Initialize the MemPage.pBt and -** MemPage.aData elements if needed. -** -** If the noContent flag is set, it means that we do not care about -** the content of the page at this time. So do not go to the disk -** to fetch the content. Just fill in the content with zeros for now. -** If in the future we call sqlite3PagerWrite() on this page, that -** means we have started to be concerned about content and the disk -** read should occur at that point. -*/ -static int btreeGetPage( - BtShared *pBt, /* The btree */ - Pgno pgno, /* Number of the page to fetch */ - MemPage **ppPage, /* Return the page in this parameter */ - int flags /* PAGER_GET_NOCONTENT or PAGER_GET_READONLY */ -){ - int rc; - DbPage *pDbPage; - - assert( flags==0 || flags==PAGER_GET_NOCONTENT || flags==PAGER_GET_READONLY ); - assert( sqlite3_mutex_held(pBt->mutex) ); - rc = sqlite3PagerAcquire(pBt->pPager, pgno, (DbPage**)&pDbPage, flags); - if( rc ) return rc; - *ppPage = btreePageFromDbPage(pDbPage, pgno, pBt); - return SQLITE_OK; -} - -/* -** Retrieve a page from the pager cache. If the requested page is not -** already in the pager cache return NULL. Initialize the MemPage.pBt and -** MemPage.aData elements if needed. -*/ -static MemPage *btreePageLookup(BtShared *pBt, Pgno pgno){ - DbPage *pDbPage; - assert( sqlite3_mutex_held(pBt->mutex) ); - pDbPage = sqlite3PagerLookup(pBt->pPager, pgno); - if( pDbPage ){ - return btreePageFromDbPage(pDbPage, pgno, pBt); - } - return 0; -} - -/* -** Return the size of the database file in pages. If there is any kind of -** error, return ((unsigned int)-1). -*/ -static Pgno btreePagecount(BtShared *pBt){ - return pBt->nPage; -} -SQLITE_PRIVATE u32 sqlite3BtreeLastPage(Btree *p){ - assert( sqlite3BtreeHoldsMutex(p) ); - assert( ((p->pBt->nPage)&0x8000000)==0 ); - return (int)btreePagecount(p->pBt); -} - -/* -** Get a page from the pager and initialize it. This routine is just a -** convenience wrapper around separate calls to btreeGetPage() and -** btreeInitPage(). -** -** If an error occurs, then the value *ppPage is set to is undefined. It -** may remain unchanged, or it may be set to an invalid value. -*/ -static int getAndInitPage( - BtShared *pBt, /* The database file */ - Pgno pgno, /* Number of the page to get */ - MemPage **ppPage, /* Write the page pointer here */ - int bReadonly /* PAGER_GET_READONLY or 0 */ -){ - int rc; - assert( sqlite3_mutex_held(pBt->mutex) ); - assert( bReadonly==PAGER_GET_READONLY || bReadonly==0 ); - - if( pgno>btreePagecount(pBt) ){ - rc = SQLITE_CORRUPT_BKPT; - }else{ - rc = btreeGetPage(pBt, pgno, ppPage, bReadonly); - if( rc==SQLITE_OK && (*ppPage)->isInit==0 ){ - rc = btreeInitPage(*ppPage); - if( rc!=SQLITE_OK ){ - releasePage(*ppPage); - } - } - } - - testcase( pgno==0 ); - assert( pgno!=0 || rc==SQLITE_CORRUPT ); - return rc; -} - -/* -** Release a MemPage. This should be called once for each prior -** call to btreeGetPage. -*/ -static void releasePage(MemPage *pPage){ - if( pPage ){ - assert( pPage->aData ); - assert( pPage->pBt ); - assert( pPage->pDbPage!=0 ); - assert( sqlite3PagerGetExtra(pPage->pDbPage) == (void*)pPage ); - assert( sqlite3PagerGetData(pPage->pDbPage)==pPage->aData ); - assert( sqlite3_mutex_held(pPage->pBt->mutex) ); - sqlite3PagerUnrefNotNull(pPage->pDbPage); - } -} - -/* -** During a rollback, when the pager reloads information into the cache -** so that the cache is restored to its original state at the start of -** the transaction, for each page restored this routine is called. -** -** This routine needs to reset the extra data section at the end of the -** page to agree with the restored data. -*/ -static void pageReinit(DbPage *pData){ - MemPage *pPage; - pPage = (MemPage *)sqlite3PagerGetExtra(pData); - assert( sqlite3PagerPageRefcount(pData)>0 ); - if( pPage->isInit ){ - assert( sqlite3_mutex_held(pPage->pBt->mutex) ); - pPage->isInit = 0; - if( sqlite3PagerPageRefcount(pData)>1 ){ - /* pPage might not be a btree page; it might be an overflow page - ** or ptrmap page or a free page. In those cases, the following - ** call to btreeInitPage() will likely return SQLITE_CORRUPT. - ** But no harm is done by this. And it is very important that - ** btreeInitPage() be called on every btree page so we make - ** the call for every page that comes in for re-initing. */ - btreeInitPage(pPage); - } - } -} - -/* -** Invoke the busy handler for a btree. -*/ -static int btreeInvokeBusyHandler(void *pArg){ - BtShared *pBt = (BtShared*)pArg; - assert( pBt->db ); - assert( sqlite3_mutex_held(pBt->db->mutex) ); - return sqlite3InvokeBusyHandler(&pBt->db->busyHandler); -} - -/* -** Open a database file. -** -** zFilename is the name of the database file. If zFilename is NULL -** then an ephemeral database is created. The ephemeral database might -** be exclusively in memory, or it might use a disk-based memory cache. -** Either way, the ephemeral database will be automatically deleted -** when sqlite3BtreeClose() is called. -** -** If zFilename is ":memory:" then an in-memory database is created -** that is automatically destroyed when it is closed. -** -** The "flags" parameter is a bitmask that might contain bits like -** BTREE_OMIT_JOURNAL and/or BTREE_MEMORY. -** -** If the database is already opened in the same database connection -** and we are in shared cache mode, then the open will fail with an -** SQLITE_CONSTRAINT error. We cannot allow two or more BtShared -** objects in the same database connection since doing so will lead -** to problems with locking. -*/ -SQLITE_PRIVATE int sqlite3BtreeOpen( - sqlite3_vfs *pVfs, /* VFS to use for this b-tree */ - const char *zFilename, /* Name of the file containing the BTree database */ - sqlite3 *db, /* Associated database handle */ - Btree **ppBtree, /* Pointer to new Btree object written here */ - int flags, /* Options */ - int vfsFlags /* Flags passed through to sqlite3_vfs.xOpen() */ -){ - BtShared *pBt = 0; /* Shared part of btree structure */ - Btree *p; /* Handle to return */ - sqlite3_mutex *mutexOpen = 0; /* Prevents a race condition. Ticket #3537 */ - int rc = SQLITE_OK; /* Result code from this function */ - u8 nReserve; /* Byte of unused space on each page */ - unsigned char zDbHeader[100]; /* Database header content */ - - /* True if opening an ephemeral, temporary database */ - const int isTempDb = zFilename==0 || zFilename[0]==0; - - /* Set the variable isMemdb to true for an in-memory database, or - ** false for a file-based database. - */ -#ifdef SQLITE_OMIT_MEMORYDB - const int isMemdb = 0; -#else - const int isMemdb = (zFilename && strcmp(zFilename, ":memory:")==0) - || (isTempDb && sqlite3TempInMemory(db)) - || (vfsFlags & SQLITE_OPEN_MEMORY)!=0; -#endif - - assert( db!=0 ); - assert( pVfs!=0 ); - assert( sqlite3_mutex_held(db->mutex) ); - assert( (flags&0xff)==flags ); /* flags fit in 8 bits */ - - /* Only a BTREE_SINGLE database can be BTREE_UNORDERED */ - assert( (flags & BTREE_UNORDERED)==0 || (flags & BTREE_SINGLE)!=0 ); - - /* A BTREE_SINGLE database is always a temporary and/or ephemeral */ - assert( (flags & BTREE_SINGLE)==0 || isTempDb ); - - if( isMemdb ){ - flags |= BTREE_MEMORY; - } - if( (vfsFlags & SQLITE_OPEN_MAIN_DB)!=0 && (isMemdb || isTempDb) ){ - vfsFlags = (vfsFlags & ~SQLITE_OPEN_MAIN_DB) | SQLITE_OPEN_TEMP_DB; - } - p = sqlite3MallocZero(sizeof(Btree)); - if( !p ){ - return SQLITE_NOMEM; - } - p->inTrans = TRANS_NONE; - p->db = db; -#ifndef SQLITE_OMIT_SHARED_CACHE - p->lock.pBtree = p; - p->lock.iTable = 1; -#endif - -#if !defined(SQLITE_OMIT_SHARED_CACHE) && !defined(SQLITE_OMIT_DISKIO) - /* - ** If this Btree is a candidate for shared cache, try to find an - ** existing BtShared object that we can share with - */ - if( isTempDb==0 && (isMemdb==0 || (vfsFlags&SQLITE_OPEN_URI)!=0) ){ - if( vfsFlags & SQLITE_OPEN_SHAREDCACHE ){ - int nFullPathname = pVfs->mxPathname+1; - char *zFullPathname = sqlite3Malloc(nFullPathname); - MUTEX_LOGIC( sqlite3_mutex *mutexShared; ) - p->sharable = 1; - if( !zFullPathname ){ - sqlite3_free(p); - return SQLITE_NOMEM; - } - if( isMemdb ){ - memcpy(zFullPathname, zFilename, sqlite3Strlen30(zFilename)+1); - }else{ - rc = sqlite3OsFullPathname(pVfs, zFilename, - nFullPathname, zFullPathname); - if( rc ){ - sqlite3_free(zFullPathname); - sqlite3_free(p); - return rc; - } - } -#if SQLITE_THREADSAFE - mutexOpen = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_OPEN); - sqlite3_mutex_enter(mutexOpen); - mutexShared = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER); - sqlite3_mutex_enter(mutexShared); -#endif - for(pBt=GLOBAL(BtShared*,sqlite3SharedCacheList); pBt; pBt=pBt->pNext){ - assert( pBt->nRef>0 ); - if( 0==strcmp(zFullPathname, sqlite3PagerFilename(pBt->pPager, 0)) - && sqlite3PagerVfs(pBt->pPager)==pVfs ){ - int iDb; - for(iDb=db->nDb-1; iDb>=0; iDb--){ - Btree *pExisting = db->aDb[iDb].pBt; - if( pExisting && pExisting->pBt==pBt ){ - sqlite3_mutex_leave(mutexShared); - sqlite3_mutex_leave(mutexOpen); - sqlite3_free(zFullPathname); - sqlite3_free(p); - return SQLITE_CONSTRAINT; - } - } - p->pBt = pBt; - pBt->nRef++; - break; - } - } - sqlite3_mutex_leave(mutexShared); - sqlite3_free(zFullPathname); - } -#ifdef SQLITE_DEBUG - else{ - /* In debug mode, we mark all persistent databases as sharable - ** even when they are not. This exercises the locking code and - ** gives more opportunity for asserts(sqlite3_mutex_held()) - ** statements to find locking problems. - */ - p->sharable = 1; - } -#endif - } -#endif - if( pBt==0 ){ - /* - ** The following asserts make sure that structures used by the btree are - ** the right size. This is to guard against size changes that result - ** when compiling on a different architecture. - */ - assert( sizeof(i64)==8 || sizeof(i64)==4 ); - assert( sizeof(u64)==8 || sizeof(u64)==4 ); - assert( sizeof(u32)==4 ); - assert( sizeof(u16)==2 ); - assert( sizeof(Pgno)==4 ); - - pBt = sqlite3MallocZero( sizeof(*pBt) ); - if( pBt==0 ){ - rc = SQLITE_NOMEM; - goto btree_open_out; - } - rc = sqlite3PagerOpen(pVfs, &pBt->pPager, zFilename, - EXTRA_SIZE, flags, vfsFlags, pageReinit); - if( rc==SQLITE_OK ){ - sqlite3PagerSetMmapLimit(pBt->pPager, db->szMmap); - rc = sqlite3PagerReadFileheader(pBt->pPager,sizeof(zDbHeader),zDbHeader); - } - if( rc!=SQLITE_OK ){ - goto btree_open_out; - } - pBt->openFlags = (u8)flags; - pBt->db = db; - sqlite3PagerSetBusyhandler(pBt->pPager, btreeInvokeBusyHandler, pBt); - p->pBt = pBt; - - pBt->pCursor = 0; - pBt->pPage1 = 0; - if( sqlite3PagerIsreadonly(pBt->pPager) ) pBt->btsFlags |= BTS_READ_ONLY; -#ifdef SQLITE_SECURE_DELETE - pBt->btsFlags |= BTS_SECURE_DELETE; -#endif - pBt->pageSize = (zDbHeader[16]<<8) | (zDbHeader[17]<<16); - if( pBt->pageSize<512 || pBt->pageSize>SQLITE_MAX_PAGE_SIZE - || ((pBt->pageSize-1)&pBt->pageSize)!=0 ){ - pBt->pageSize = 0; -#ifndef SQLITE_OMIT_AUTOVACUUM - /* If the magic name ":memory:" will create an in-memory database, then - ** leave the autoVacuum mode at 0 (do not auto-vacuum), even if - ** SQLITE_DEFAULT_AUTOVACUUM is true. On the other hand, if - ** SQLITE_OMIT_MEMORYDB has been defined, then ":memory:" is just a - ** regular file-name. In this case the auto-vacuum applies as per normal. - */ - if( zFilename && !isMemdb ){ - pBt->autoVacuum = (SQLITE_DEFAULT_AUTOVACUUM ? 1 : 0); - pBt->incrVacuum = (SQLITE_DEFAULT_AUTOVACUUM==2 ? 1 : 0); - } -#endif - nReserve = 0; - }else{ - nReserve = zDbHeader[20]; - pBt->btsFlags |= BTS_PAGESIZE_FIXED; -#ifndef SQLITE_OMIT_AUTOVACUUM - pBt->autoVacuum = (get4byte(&zDbHeader[36 + 4*4])?1:0); - pBt->incrVacuum = (get4byte(&zDbHeader[36 + 7*4])?1:0); -#endif - } - rc = sqlite3PagerSetPagesize(pBt->pPager, &pBt->pageSize, nReserve); - if( rc ) goto btree_open_out; - pBt->usableSize = pBt->pageSize - nReserve; - assert( (pBt->pageSize & 7)==0 ); /* 8-byte alignment of pageSize */ - -#if !defined(SQLITE_OMIT_SHARED_CACHE) && !defined(SQLITE_OMIT_DISKIO) - /* Add the new BtShared object to the linked list sharable BtShareds. - */ - if( p->sharable ){ - MUTEX_LOGIC( sqlite3_mutex *mutexShared; ) - pBt->nRef = 1; - MUTEX_LOGIC( mutexShared = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER);) - if( SQLITE_THREADSAFE && sqlite3GlobalConfig.bCoreMutex ){ - pBt->mutex = sqlite3MutexAlloc(SQLITE_MUTEX_FAST); - if( pBt->mutex==0 ){ - rc = SQLITE_NOMEM; - db->mallocFailed = 0; - goto btree_open_out; - } - } - sqlite3_mutex_enter(mutexShared); - pBt->pNext = GLOBAL(BtShared*,sqlite3SharedCacheList); - GLOBAL(BtShared*,sqlite3SharedCacheList) = pBt; - sqlite3_mutex_leave(mutexShared); - } -#endif - } - -#if !defined(SQLITE_OMIT_SHARED_CACHE) && !defined(SQLITE_OMIT_DISKIO) - /* If the new Btree uses a sharable pBtShared, then link the new - ** Btree into the list of all sharable Btrees for the same connection. - ** The list is kept in ascending order by pBt address. - */ - if( p->sharable ){ - int i; - Btree *pSib; - for(i=0; inDb; i++){ - if( (pSib = db->aDb[i].pBt)!=0 && pSib->sharable ){ - while( pSib->pPrev ){ pSib = pSib->pPrev; } - if( p->pBtpBt ){ - p->pNext = pSib; - p->pPrev = 0; - pSib->pPrev = p; - }else{ - while( pSib->pNext && pSib->pNext->pBtpBt ){ - pSib = pSib->pNext; - } - p->pNext = pSib->pNext; - p->pPrev = pSib; - if( p->pNext ){ - p->pNext->pPrev = p; - } - pSib->pNext = p; - } - break; - } - } - } -#endif - *ppBtree = p; - -btree_open_out: - if( rc!=SQLITE_OK ){ - if( pBt && pBt->pPager ){ - sqlite3PagerClose(pBt->pPager); - } - sqlite3_free(pBt); - sqlite3_free(p); - *ppBtree = 0; - }else{ - /* If the B-Tree was successfully opened, set the pager-cache size to the - ** default value. Except, when opening on an existing shared pager-cache, - ** do not change the pager-cache size. - */ - if( sqlite3BtreeSchema(p, 0, 0)==0 ){ - sqlite3PagerSetCachesize(p->pBt->pPager, SQLITE_DEFAULT_CACHE_SIZE); - } - } - if( mutexOpen ){ - assert( sqlite3_mutex_held(mutexOpen) ); - sqlite3_mutex_leave(mutexOpen); - } - return rc; -} - -/* -** Decrement the BtShared.nRef counter. When it reaches zero, -** remove the BtShared structure from the sharing list. Return -** true if the BtShared.nRef counter reaches zero and return -** false if it is still positive. -*/ -static int removeFromSharingList(BtShared *pBt){ -#ifndef SQLITE_OMIT_SHARED_CACHE - MUTEX_LOGIC( sqlite3_mutex *pMaster; ) - BtShared *pList; - int removed = 0; - - assert( sqlite3_mutex_notheld(pBt->mutex) ); - MUTEX_LOGIC( pMaster = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER); ) - sqlite3_mutex_enter(pMaster); - pBt->nRef--; - if( pBt->nRef<=0 ){ - if( GLOBAL(BtShared*,sqlite3SharedCacheList)==pBt ){ - GLOBAL(BtShared*,sqlite3SharedCacheList) = pBt->pNext; - }else{ - pList = GLOBAL(BtShared*,sqlite3SharedCacheList); - while( ALWAYS(pList) && pList->pNext!=pBt ){ - pList=pList->pNext; - } - if( ALWAYS(pList) ){ - pList->pNext = pBt->pNext; - } - } - if( SQLITE_THREADSAFE ){ - sqlite3_mutex_free(pBt->mutex); - } - removed = 1; - } - sqlite3_mutex_leave(pMaster); - return removed; -#else - return 1; -#endif -} - -/* -** Make sure pBt->pTmpSpace points to an allocation of -** MX_CELL_SIZE(pBt) bytes. -*/ -static void allocateTempSpace(BtShared *pBt){ - if( !pBt->pTmpSpace ){ - pBt->pTmpSpace = sqlite3PageMalloc( pBt->pageSize ); - - /* One of the uses of pBt->pTmpSpace is to format cells before - ** inserting them into a leaf page (function fillInCell()). If - ** a cell is less than 4 bytes in size, it is rounded up to 4 bytes - ** by the various routines that manipulate binary cells. Which - ** can mean that fillInCell() only initializes the first 2 or 3 - ** bytes of pTmpSpace, but that the first 4 bytes are copied from - ** it into a database page. This is not actually a problem, but it - ** does cause a valgrind error when the 1 or 2 bytes of unitialized - ** data is passed to system call write(). So to avoid this error, - ** zero the first 4 bytes of temp space here. */ - if( pBt->pTmpSpace ) memset(pBt->pTmpSpace, 0, 4); - } -} - -/* -** Free the pBt->pTmpSpace allocation -*/ -static void freeTempSpace(BtShared *pBt){ - sqlite3PageFree( pBt->pTmpSpace); - pBt->pTmpSpace = 0; -} - -/* -** Close an open database and invalidate all cursors. -*/ -SQLITE_PRIVATE int sqlite3BtreeClose(Btree *p){ - BtShared *pBt = p->pBt; - BtCursor *pCur; - - /* Close all cursors opened via this handle. */ - assert( sqlite3_mutex_held(p->db->mutex) ); - sqlite3BtreeEnter(p); - pCur = pBt->pCursor; - while( pCur ){ - BtCursor *pTmp = pCur; - pCur = pCur->pNext; - if( pTmp->pBtree==p ){ - sqlite3BtreeCloseCursor(pTmp); - } - } - - /* Rollback any active transaction and free the handle structure. - ** The call to sqlite3BtreeRollback() drops any table-locks held by - ** this handle. - */ - sqlite3BtreeRollback(p, SQLITE_OK); - sqlite3BtreeLeave(p); - - /* If there are still other outstanding references to the shared-btree - ** structure, return now. The remainder of this procedure cleans - ** up the shared-btree. - */ - assert( p->wantToLock==0 && p->locked==0 ); - if( !p->sharable || removeFromSharingList(pBt) ){ - /* The pBt is no longer on the sharing list, so we can access - ** it without having to hold the mutex. - ** - ** Clean out and delete the BtShared object. - */ - assert( !pBt->pCursor ); - sqlite3PagerClose(pBt->pPager); - if( pBt->xFreeSchema && pBt->pSchema ){ - pBt->xFreeSchema(pBt->pSchema); - } - sqlite3DbFree(0, pBt->pSchema); - freeTempSpace(pBt); - sqlite3_free(pBt); - } - -#ifndef SQLITE_OMIT_SHARED_CACHE - assert( p->wantToLock==0 ); - assert( p->locked==0 ); - if( p->pPrev ) p->pPrev->pNext = p->pNext; - if( p->pNext ) p->pNext->pPrev = p->pPrev; -#endif - - sqlite3_free(p); - return SQLITE_OK; -} - -/* -** Change the limit on the number of pages allowed in the cache. -** -** The maximum number of cache pages is set to the absolute -** value of mxPage. If mxPage is negative, the pager will -** operate asynchronously - it will not stop to do fsync()s -** to insure data is written to the disk surface before -** continuing. Transactions still work if synchronous is off, -** and the database cannot be corrupted if this program -** crashes. But if the operating system crashes or there is -** an abrupt power failure when synchronous is off, the database -** could be left in an inconsistent and unrecoverable state. -** Synchronous is on by default so database corruption is not -** normally a worry. -*/ -SQLITE_PRIVATE int sqlite3BtreeSetCacheSize(Btree *p, int mxPage){ - BtShared *pBt = p->pBt; - assert( sqlite3_mutex_held(p->db->mutex) ); - sqlite3BtreeEnter(p); - sqlite3PagerSetCachesize(pBt->pPager, mxPage); - sqlite3BtreeLeave(p); - return SQLITE_OK; -} - -#if SQLITE_MAX_MMAP_SIZE>0 -/* -** Change the limit on the amount of the database file that may be -** memory mapped. -*/ -SQLITE_PRIVATE int sqlite3BtreeSetMmapLimit(Btree *p, sqlite3_int64 szMmap){ - BtShared *pBt = p->pBt; - assert( sqlite3_mutex_held(p->db->mutex) ); - sqlite3BtreeEnter(p); - sqlite3PagerSetMmapLimit(pBt->pPager, szMmap); - sqlite3BtreeLeave(p); - return SQLITE_OK; -} -#endif /* SQLITE_MAX_MMAP_SIZE>0 */ - -/* -** Change the way data is synced to disk in order to increase or decrease -** how well the database resists damage due to OS crashes and power -** failures. Level 1 is the same as asynchronous (no syncs() occur and -** there is a high probability of damage) Level 2 is the default. There -** is a very low but non-zero probability of damage. Level 3 reduces the -** probability of damage to near zero but with a write performance reduction. -*/ -#ifndef SQLITE_OMIT_PAGER_PRAGMAS -SQLITE_PRIVATE int sqlite3BtreeSetPagerFlags( - Btree *p, /* The btree to set the safety level on */ - unsigned pgFlags /* Various PAGER_* flags */ -){ - BtShared *pBt = p->pBt; - assert( sqlite3_mutex_held(p->db->mutex) ); - sqlite3BtreeEnter(p); - sqlite3PagerSetFlags(pBt->pPager, pgFlags); - sqlite3BtreeLeave(p); - return SQLITE_OK; -} -#endif - -/* -** Return TRUE if the given btree is set to safety level 1. In other -** words, return TRUE if no sync() occurs on the disk files. -*/ -SQLITE_PRIVATE int sqlite3BtreeSyncDisabled(Btree *p){ - BtShared *pBt = p->pBt; - int rc; - assert( sqlite3_mutex_held(p->db->mutex) ); - sqlite3BtreeEnter(p); - assert( pBt && pBt->pPager ); - rc = sqlite3PagerNosync(pBt->pPager); - sqlite3BtreeLeave(p); - return rc; -} - -/* -** Change the default pages size and the number of reserved bytes per page. -** Or, if the page size has already been fixed, return SQLITE_READONLY -** without changing anything. -** -** The page size must be a power of 2 between 512 and 65536. If the page -** size supplied does not meet this constraint then the page size is not -** changed. -** -** Page sizes are constrained to be a power of two so that the region -** of the database file used for locking (beginning at PENDING_BYTE, -** the first byte past the 1GB boundary, 0x40000000) needs to occur -** at the beginning of a page. -** -** If parameter nReserve is less than zero, then the number of reserved -** bytes per page is left unchanged. -** -** If the iFix!=0 then the BTS_PAGESIZE_FIXED flag is set so that the page size -** and autovacuum mode can no longer be changed. -*/ -SQLITE_PRIVATE int sqlite3BtreeSetPageSize(Btree *p, int pageSize, int nReserve, int iFix){ - int rc = SQLITE_OK; - BtShared *pBt = p->pBt; - assert( nReserve>=-1 && nReserve<=255 ); - sqlite3BtreeEnter(p); - if( pBt->btsFlags & BTS_PAGESIZE_FIXED ){ - sqlite3BtreeLeave(p); - return SQLITE_READONLY; - } - if( nReserve<0 ){ - nReserve = pBt->pageSize - pBt->usableSize; - } - assert( nReserve>=0 && nReserve<=255 ); - if( pageSize>=512 && pageSize<=SQLITE_MAX_PAGE_SIZE && - ((pageSize-1)&pageSize)==0 ){ - assert( (pageSize & 7)==0 ); - assert( !pBt->pPage1 && !pBt->pCursor ); - pBt->pageSize = (u32)pageSize; - freeTempSpace(pBt); - } - rc = sqlite3PagerSetPagesize(pBt->pPager, &pBt->pageSize, nReserve); - pBt->usableSize = pBt->pageSize - (u16)nReserve; - if( iFix ) pBt->btsFlags |= BTS_PAGESIZE_FIXED; - sqlite3BtreeLeave(p); - return rc; -} - -/* -** Return the currently defined page size -*/ -SQLITE_PRIVATE int sqlite3BtreeGetPageSize(Btree *p){ - return p->pBt->pageSize; -} - -#if defined(SQLITE_HAS_CODEC) || defined(SQLITE_DEBUG) -/* -** This function is similar to sqlite3BtreeGetReserve(), except that it -** may only be called if it is guaranteed that the b-tree mutex is already -** held. -** -** This is useful in one special case in the backup API code where it is -** known that the shared b-tree mutex is held, but the mutex on the -** database handle that owns *p is not. In this case if sqlite3BtreeEnter() -** were to be called, it might collide with some other operation on the -** database handle that owns *p, causing undefined behavior. -*/ -SQLITE_PRIVATE int sqlite3BtreeGetReserveNoMutex(Btree *p){ - assert( sqlite3_mutex_held(p->pBt->mutex) ); - return p->pBt->pageSize - p->pBt->usableSize; -} -#endif /* SQLITE_HAS_CODEC || SQLITE_DEBUG */ - -#if !defined(SQLITE_OMIT_PAGER_PRAGMAS) || !defined(SQLITE_OMIT_VACUUM) -/* -** Return the number of bytes of space at the end of every page that -** are intentually left unused. This is the "reserved" space that is -** sometimes used by extensions. -*/ -SQLITE_PRIVATE int sqlite3BtreeGetReserve(Btree *p){ - int n; - sqlite3BtreeEnter(p); - n = p->pBt->pageSize - p->pBt->usableSize; - sqlite3BtreeLeave(p); - return n; -} - -/* -** Set the maximum page count for a database if mxPage is positive. -** No changes are made if mxPage is 0 or negative. -** Regardless of the value of mxPage, return the maximum page count. -*/ -SQLITE_PRIVATE int sqlite3BtreeMaxPageCount(Btree *p, int mxPage){ - int n; - sqlite3BtreeEnter(p); - n = sqlite3PagerMaxPageCount(p->pBt->pPager, mxPage); - sqlite3BtreeLeave(p); - return n; -} - -/* -** Set the BTS_SECURE_DELETE flag if newFlag is 0 or 1. If newFlag is -1, -** then make no changes. Always return the value of the BTS_SECURE_DELETE -** setting after the change. -*/ -SQLITE_PRIVATE int sqlite3BtreeSecureDelete(Btree *p, int newFlag){ - int b; - if( p==0 ) return 0; - sqlite3BtreeEnter(p); - if( newFlag>=0 ){ - p->pBt->btsFlags &= ~BTS_SECURE_DELETE; - if( newFlag ) p->pBt->btsFlags |= BTS_SECURE_DELETE; - } - b = (p->pBt->btsFlags & BTS_SECURE_DELETE)!=0; - sqlite3BtreeLeave(p); - return b; -} -#endif /* !defined(SQLITE_OMIT_PAGER_PRAGMAS) || !defined(SQLITE_OMIT_VACUUM) */ - -/* -** Change the 'auto-vacuum' property of the database. If the 'autoVacuum' -** parameter is non-zero, then auto-vacuum mode is enabled. If zero, it -** is disabled. The default value for the auto-vacuum property is -** determined by the SQLITE_DEFAULT_AUTOVACUUM macro. -*/ -SQLITE_PRIVATE int sqlite3BtreeSetAutoVacuum(Btree *p, int autoVacuum){ -#ifdef SQLITE_OMIT_AUTOVACUUM - return SQLITE_READONLY; -#else - BtShared *pBt = p->pBt; - int rc = SQLITE_OK; - u8 av = (u8)autoVacuum; - - sqlite3BtreeEnter(p); - if( (pBt->btsFlags & BTS_PAGESIZE_FIXED)!=0 && (av ?1:0)!=pBt->autoVacuum ){ - rc = SQLITE_READONLY; - }else{ - pBt->autoVacuum = av ?1:0; - pBt->incrVacuum = av==2 ?1:0; - } - sqlite3BtreeLeave(p); - return rc; -#endif -} - -/* -** Return the value of the 'auto-vacuum' property. If auto-vacuum is -** enabled 1 is returned. Otherwise 0. -*/ -SQLITE_PRIVATE int sqlite3BtreeGetAutoVacuum(Btree *p){ -#ifdef SQLITE_OMIT_AUTOVACUUM - return BTREE_AUTOVACUUM_NONE; -#else - int rc; - sqlite3BtreeEnter(p); - rc = ( - (!p->pBt->autoVacuum)?BTREE_AUTOVACUUM_NONE: - (!p->pBt->incrVacuum)?BTREE_AUTOVACUUM_FULL: - BTREE_AUTOVACUUM_INCR - ); - sqlite3BtreeLeave(p); - return rc; -#endif -} - - -/* -** Get a reference to pPage1 of the database file. This will -** also acquire a readlock on that file. -** -** SQLITE_OK is returned on success. If the file is not a -** well-formed database file, then SQLITE_CORRUPT is returned. -** SQLITE_BUSY is returned if the database is locked. SQLITE_NOMEM -** is returned if we run out of memory. -*/ -static int lockBtree(BtShared *pBt){ - int rc; /* Result code from subfunctions */ - MemPage *pPage1; /* Page 1 of the database file */ - int nPage; /* Number of pages in the database */ - int nPageFile = 0; /* Number of pages in the database file */ - int nPageHeader; /* Number of pages in the database according to hdr */ - - assert( sqlite3_mutex_held(pBt->mutex) ); - assert( pBt->pPage1==0 ); - rc = sqlite3PagerSharedLock(pBt->pPager); - if( rc!=SQLITE_OK ) return rc; - rc = btreeGetPage(pBt, 1, &pPage1, 0); - if( rc!=SQLITE_OK ) return rc; - - /* Do some checking to help insure the file we opened really is - ** a valid database file. - */ - nPage = nPageHeader = get4byte(28+(u8*)pPage1->aData); - sqlite3PagerPagecount(pBt->pPager, &nPageFile); - if( nPage==0 || memcmp(24+(u8*)pPage1->aData, 92+(u8*)pPage1->aData,4)!=0 ){ - nPage = nPageFile; - } - if( nPage>0 ){ - u32 pageSize; - u32 usableSize; - u8 *page1 = pPage1->aData; - rc = SQLITE_NOTADB; - if( memcmp(page1, zMagicHeader, 16)!=0 ){ - goto page1_init_failed; - } - -#ifdef SQLITE_OMIT_WAL - if( page1[18]>1 ){ - pBt->btsFlags |= BTS_READ_ONLY; - } - if( page1[19]>1 ){ - goto page1_init_failed; - } -#else - if( page1[18]>2 ){ - pBt->btsFlags |= BTS_READ_ONLY; - } - if( page1[19]>2 ){ - goto page1_init_failed; - } - - /* If the write version is set to 2, this database should be accessed - ** in WAL mode. If the log is not already open, open it now. Then - ** return SQLITE_OK and return without populating BtShared.pPage1. - ** The caller detects this and calls this function again. This is - ** required as the version of page 1 currently in the page1 buffer - ** may not be the latest version - there may be a newer one in the log - ** file. - */ - if( page1[19]==2 && (pBt->btsFlags & BTS_NO_WAL)==0 ){ - int isOpen = 0; - rc = sqlite3PagerOpenWal(pBt->pPager, &isOpen); - if( rc!=SQLITE_OK ){ - goto page1_init_failed; - }else if( isOpen==0 ){ - releasePage(pPage1); - return SQLITE_OK; - } - rc = SQLITE_NOTADB; - } -#endif - - /* The maximum embedded fraction must be exactly 25%. And the minimum - ** embedded fraction must be 12.5% for both leaf-data and non-leaf-data. - ** The original design allowed these amounts to vary, but as of - ** version 3.6.0, we require them to be fixed. - */ - if( memcmp(&page1[21], "\100\040\040",3)!=0 ){ - goto page1_init_failed; - } - pageSize = (page1[16]<<8) | (page1[17]<<16); - if( ((pageSize-1)&pageSize)!=0 - || pageSize>SQLITE_MAX_PAGE_SIZE - || pageSize<=256 - ){ - goto page1_init_failed; - } - assert( (pageSize & 7)==0 ); - usableSize = pageSize - page1[20]; - if( (u32)pageSize!=pBt->pageSize ){ - /* After reading the first page of the database assuming a page size - ** of BtShared.pageSize, we have discovered that the page-size is - ** actually pageSize. Unlock the database, leave pBt->pPage1 at - ** zero and return SQLITE_OK. The caller will call this function - ** again with the correct page-size. - */ - releasePage(pPage1); - pBt->usableSize = usableSize; - pBt->pageSize = pageSize; - freeTempSpace(pBt); - rc = sqlite3PagerSetPagesize(pBt->pPager, &pBt->pageSize, - pageSize-usableSize); - return rc; - } - if( (pBt->db->flags & SQLITE_RecoveryMode)==0 && nPage>nPageFile ){ - rc = SQLITE_CORRUPT_BKPT; - goto page1_init_failed; - } - if( usableSize<480 ){ - goto page1_init_failed; - } - pBt->pageSize = pageSize; - pBt->usableSize = usableSize; -#ifndef SQLITE_OMIT_AUTOVACUUM - pBt->autoVacuum = (get4byte(&page1[36 + 4*4])?1:0); - pBt->incrVacuum = (get4byte(&page1[36 + 7*4])?1:0); -#endif - } - - /* maxLocal is the maximum amount of payload to store locally for - ** a cell. Make sure it is small enough so that at least minFanout - ** cells can will fit on one page. We assume a 10-byte page header. - ** Besides the payload, the cell must store: - ** 2-byte pointer to the cell - ** 4-byte child pointer - ** 9-byte nKey value - ** 4-byte nData value - ** 4-byte overflow page pointer - ** So a cell consists of a 2-byte pointer, a header which is as much as - ** 17 bytes long, 0 to N bytes of payload, and an optional 4 byte overflow - ** page pointer. - */ - pBt->maxLocal = (u16)((pBt->usableSize-12)*64/255 - 23); - pBt->minLocal = (u16)((pBt->usableSize-12)*32/255 - 23); - pBt->maxLeaf = (u16)(pBt->usableSize - 35); - pBt->minLeaf = (u16)((pBt->usableSize-12)*32/255 - 23); - if( pBt->maxLocal>127 ){ - pBt->max1bytePayload = 127; - }else{ - pBt->max1bytePayload = (u8)pBt->maxLocal; - } - assert( pBt->maxLeaf + 23 <= MX_CELL_SIZE(pBt) ); - pBt->pPage1 = pPage1; - pBt->nPage = nPage; - return SQLITE_OK; - -page1_init_failed: - releasePage(pPage1); - pBt->pPage1 = 0; - return rc; -} - -#ifndef NDEBUG -/* -** Return the number of cursors open on pBt. This is for use -** in assert() expressions, so it is only compiled if NDEBUG is not -** defined. -** -** Only write cursors are counted if wrOnly is true. If wrOnly is -** false then all cursors are counted. -** -** For the purposes of this routine, a cursor is any cursor that -** is capable of reading or writing to the databse. Cursors that -** have been tripped into the CURSOR_FAULT state are not counted. -*/ -static int countValidCursors(BtShared *pBt, int wrOnly){ - BtCursor *pCur; - int r = 0; - for(pCur=pBt->pCursor; pCur; pCur=pCur->pNext){ - if( (wrOnly==0 || (pCur->curFlags & BTCF_WriteFlag)!=0) - && pCur->eState!=CURSOR_FAULT ) r++; - } - return r; -} -#endif - -/* -** If there are no outstanding cursors and we are not in the middle -** of a transaction but there is a read lock on the database, then -** this routine unrefs the first page of the database file which -** has the effect of releasing the read lock. -** -** If there is a transaction in progress, this routine is a no-op. -*/ -static void unlockBtreeIfUnused(BtShared *pBt){ - assert( sqlite3_mutex_held(pBt->mutex) ); - assert( countValidCursors(pBt,0)==0 || pBt->inTransaction>TRANS_NONE ); - if( pBt->inTransaction==TRANS_NONE && pBt->pPage1!=0 ){ - assert( pBt->pPage1->aData ); - assert( sqlite3PagerRefcount(pBt->pPager)==1 ); - assert( pBt->pPage1->aData ); - releasePage(pBt->pPage1); - pBt->pPage1 = 0; - } -} - -/* -** If pBt points to an empty file then convert that empty file -** into a new empty database by initializing the first page of -** the database. -*/ -static int newDatabase(BtShared *pBt){ - MemPage *pP1; - unsigned char *data; - int rc; - - assert( sqlite3_mutex_held(pBt->mutex) ); - if( pBt->nPage>0 ){ - return SQLITE_OK; - } - pP1 = pBt->pPage1; - assert( pP1!=0 ); - data = pP1->aData; - rc = sqlite3PagerWrite(pP1->pDbPage); - if( rc ) return rc; - memcpy(data, zMagicHeader, sizeof(zMagicHeader)); - assert( sizeof(zMagicHeader)==16 ); - data[16] = (u8)((pBt->pageSize>>8)&0xff); - data[17] = (u8)((pBt->pageSize>>16)&0xff); - data[18] = 1; - data[19] = 1; - assert( pBt->usableSize<=pBt->pageSize && pBt->usableSize+255>=pBt->pageSize); - data[20] = (u8)(pBt->pageSize - pBt->usableSize); - data[21] = 64; - data[22] = 32; - data[23] = 32; - memset(&data[24], 0, 100-24); - zeroPage(pP1, PTF_INTKEY|PTF_LEAF|PTF_LEAFDATA ); - pBt->btsFlags |= BTS_PAGESIZE_FIXED; -#ifndef SQLITE_OMIT_AUTOVACUUM - assert( pBt->autoVacuum==1 || pBt->autoVacuum==0 ); - assert( pBt->incrVacuum==1 || pBt->incrVacuum==0 ); - put4byte(&data[36 + 4*4], pBt->autoVacuum); - put4byte(&data[36 + 7*4], pBt->incrVacuum); -#endif - pBt->nPage = 1; - data[31] = 1; - return SQLITE_OK; -} - -/* -** Initialize the first page of the database file (creating a database -** consisting of a single page and no schema objects). Return SQLITE_OK -** if successful, or an SQLite error code otherwise. -*/ -SQLITE_PRIVATE int sqlite3BtreeNewDb(Btree *p){ - int rc; - sqlite3BtreeEnter(p); - p->pBt->nPage = 0; - rc = newDatabase(p->pBt); - sqlite3BtreeLeave(p); - return rc; -} - -/* -** Attempt to start a new transaction. A write-transaction -** is started if the second argument is nonzero, otherwise a read- -** transaction. If the second argument is 2 or more and exclusive -** transaction is started, meaning that no other process is allowed -** to access the database. A preexisting transaction may not be -** upgraded to exclusive by calling this routine a second time - the -** exclusivity flag only works for a new transaction. -** -** A write-transaction must be started before attempting any -** changes to the database. None of the following routines -** will work unless a transaction is started first: -** -** sqlite3BtreeCreateTable() -** sqlite3BtreeCreateIndex() -** sqlite3BtreeClearTable() -** sqlite3BtreeDropTable() -** sqlite3BtreeInsert() -** sqlite3BtreeDelete() -** sqlite3BtreeUpdateMeta() -** -** If an initial attempt to acquire the lock fails because of lock contention -** and the database was previously unlocked, then invoke the busy handler -** if there is one. But if there was previously a read-lock, do not -** invoke the busy handler - just return SQLITE_BUSY. SQLITE_BUSY is -** returned when there is already a read-lock in order to avoid a deadlock. -** -** Suppose there are two processes A and B. A has a read lock and B has -** a reserved lock. B tries to promote to exclusive but is blocked because -** of A's read lock. A tries to promote to reserved but is blocked by B. -** One or the other of the two processes must give way or there can be -** no progress. By returning SQLITE_BUSY and not invoking the busy callback -** when A already has a read lock, we encourage A to give up and let B -** proceed. -*/ -SQLITE_PRIVATE int sqlite3BtreeBeginTrans(Btree *p, int wrflag){ - sqlite3 *pBlock = 0; - BtShared *pBt = p->pBt; - int rc = SQLITE_OK; - - sqlite3BtreeEnter(p); - btreeIntegrity(p); - - /* If the btree is already in a write-transaction, or it - ** is already in a read-transaction and a read-transaction - ** is requested, this is a no-op. - */ - if( p->inTrans==TRANS_WRITE || (p->inTrans==TRANS_READ && !wrflag) ){ - goto trans_begun; - } - assert( pBt->inTransaction==TRANS_WRITE || IfNotOmitAV(pBt->bDoTruncate)==0 ); - - /* Write transactions are not possible on a read-only database */ - if( (pBt->btsFlags & BTS_READ_ONLY)!=0 && wrflag ){ - rc = SQLITE_READONLY; - goto trans_begun; - } - -#ifndef SQLITE_OMIT_SHARED_CACHE - /* If another database handle has already opened a write transaction - ** on this shared-btree structure and a second write transaction is - ** requested, return SQLITE_LOCKED. - */ - if( (wrflag && pBt->inTransaction==TRANS_WRITE) - || (pBt->btsFlags & BTS_PENDING)!=0 - ){ - pBlock = pBt->pWriter->db; - }else if( wrflag>1 ){ - BtLock *pIter; - for(pIter=pBt->pLock; pIter; pIter=pIter->pNext){ - if( pIter->pBtree!=p ){ - pBlock = pIter->pBtree->db; - break; - } - } - } - if( pBlock ){ - sqlite3ConnectionBlocked(p->db, pBlock); - rc = SQLITE_LOCKED_SHAREDCACHE; - goto trans_begun; - } -#endif - - /* Any read-only or read-write transaction implies a read-lock on - ** page 1. So if some other shared-cache client already has a write-lock - ** on page 1, the transaction cannot be opened. */ - rc = querySharedCacheTableLock(p, MASTER_ROOT, READ_LOCK); - if( SQLITE_OK!=rc ) goto trans_begun; - - pBt->btsFlags &= ~BTS_INITIALLY_EMPTY; - if( pBt->nPage==0 ) pBt->btsFlags |= BTS_INITIALLY_EMPTY; - do { - /* Call lockBtree() until either pBt->pPage1 is populated or - ** lockBtree() returns something other than SQLITE_OK. lockBtree() - ** may return SQLITE_OK but leave pBt->pPage1 set to 0 if after - ** reading page 1 it discovers that the page-size of the database - ** file is not pBt->pageSize. In this case lockBtree() will update - ** pBt->pageSize to the page-size of the file on disk. - */ - while( pBt->pPage1==0 && SQLITE_OK==(rc = lockBtree(pBt)) ); - - if( rc==SQLITE_OK && wrflag ){ - if( (pBt->btsFlags & BTS_READ_ONLY)!=0 ){ - rc = SQLITE_READONLY; - }else{ - rc = sqlite3PagerBegin(pBt->pPager,wrflag>1,sqlite3TempInMemory(p->db)); - if( rc==SQLITE_OK ){ - rc = newDatabase(pBt); - } - } - } - - if( rc!=SQLITE_OK ){ - unlockBtreeIfUnused(pBt); - } - }while( (rc&0xFF)==SQLITE_BUSY && pBt->inTransaction==TRANS_NONE && - btreeInvokeBusyHandler(pBt) ); - - if( rc==SQLITE_OK ){ - if( p->inTrans==TRANS_NONE ){ - pBt->nTransaction++; -#ifndef SQLITE_OMIT_SHARED_CACHE - if( p->sharable ){ - assert( p->lock.pBtree==p && p->lock.iTable==1 ); - p->lock.eLock = READ_LOCK; - p->lock.pNext = pBt->pLock; - pBt->pLock = &p->lock; - } -#endif - } - p->inTrans = (wrflag?TRANS_WRITE:TRANS_READ); - if( p->inTrans>pBt->inTransaction ){ - pBt->inTransaction = p->inTrans; - } - if( wrflag ){ - MemPage *pPage1 = pBt->pPage1; -#ifndef SQLITE_OMIT_SHARED_CACHE - assert( !pBt->pWriter ); - pBt->pWriter = p; - pBt->btsFlags &= ~BTS_EXCLUSIVE; - if( wrflag>1 ) pBt->btsFlags |= BTS_EXCLUSIVE; -#endif - - /* If the db-size header field is incorrect (as it may be if an old - ** client has been writing the database file), update it now. Doing - ** this sooner rather than later means the database size can safely - ** re-read the database size from page 1 if a savepoint or transaction - ** rollback occurs within the transaction. - */ - if( pBt->nPage!=get4byte(&pPage1->aData[28]) ){ - rc = sqlite3PagerWrite(pPage1->pDbPage); - if( rc==SQLITE_OK ){ - put4byte(&pPage1->aData[28], pBt->nPage); - } - } - } - } - - -trans_begun: - if( rc==SQLITE_OK && wrflag ){ - /* This call makes sure that the pager has the correct number of - ** open savepoints. If the second parameter is greater than 0 and - ** the sub-journal is not already open, then it will be opened here. - */ - rc = sqlite3PagerOpenSavepoint(pBt->pPager, p->db->nSavepoint); - } - - btreeIntegrity(p); - sqlite3BtreeLeave(p); - return rc; -} - -#ifndef SQLITE_OMIT_AUTOVACUUM - -/* -** Set the pointer-map entries for all children of page pPage. Also, if -** pPage contains cells that point to overflow pages, set the pointer -** map entries for the overflow pages as well. -*/ -static int setChildPtrmaps(MemPage *pPage){ - int i; /* Counter variable */ - int nCell; /* Number of cells in page pPage */ - int rc; /* Return code */ - BtShared *pBt = pPage->pBt; - u8 isInitOrig = pPage->isInit; - Pgno pgno = pPage->pgno; - - assert( sqlite3_mutex_held(pPage->pBt->mutex) ); - rc = btreeInitPage(pPage); - if( rc!=SQLITE_OK ){ - goto set_child_ptrmaps_out; - } - nCell = pPage->nCell; - - for(i=0; ileaf ){ - Pgno childPgno = get4byte(pCell); - ptrmapPut(pBt, childPgno, PTRMAP_BTREE, pgno, &rc); - } - } - - if( !pPage->leaf ){ - Pgno childPgno = get4byte(&pPage->aData[pPage->hdrOffset+8]); - ptrmapPut(pBt, childPgno, PTRMAP_BTREE, pgno, &rc); - } - -set_child_ptrmaps_out: - pPage->isInit = isInitOrig; - return rc; -} - -/* -** Somewhere on pPage is a pointer to page iFrom. Modify this pointer so -** that it points to iTo. Parameter eType describes the type of pointer to -** be modified, as follows: -** -** PTRMAP_BTREE: pPage is a btree-page. The pointer points at a child -** page of pPage. -** -** PTRMAP_OVERFLOW1: pPage is a btree-page. The pointer points at an overflow -** page pointed to by one of the cells on pPage. -** -** PTRMAP_OVERFLOW2: pPage is an overflow-page. The pointer points at the next -** overflow page in the list. -*/ -static int modifyPagePointer(MemPage *pPage, Pgno iFrom, Pgno iTo, u8 eType){ - assert( sqlite3_mutex_held(pPage->pBt->mutex) ); - assert( sqlite3PagerIswriteable(pPage->pDbPage) ); - if( eType==PTRMAP_OVERFLOW2 ){ - /* The pointer is always the first 4 bytes of the page in this case. */ - if( get4byte(pPage->aData)!=iFrom ){ - return SQLITE_CORRUPT_BKPT; - } - put4byte(pPage->aData, iTo); - }else{ - u8 isInitOrig = pPage->isInit; - int i; - int nCell; - - btreeInitPage(pPage); - nCell = pPage->nCell; - - for(i=0; iaData+pPage->maskPage - && iFrom==get4byte(&pCell[info.iOverflow]) - ){ - put4byte(&pCell[info.iOverflow], iTo); - break; - } - }else{ - if( get4byte(pCell)==iFrom ){ - put4byte(pCell, iTo); - break; - } - } - } - - if( i==nCell ){ - if( eType!=PTRMAP_BTREE || - get4byte(&pPage->aData[pPage->hdrOffset+8])!=iFrom ){ - return SQLITE_CORRUPT_BKPT; - } - put4byte(&pPage->aData[pPage->hdrOffset+8], iTo); - } - - pPage->isInit = isInitOrig; - } - return SQLITE_OK; -} - - -/* -** Move the open database page pDbPage to location iFreePage in the -** database. The pDbPage reference remains valid. -** -** The isCommit flag indicates that there is no need to remember that -** the journal needs to be sync()ed before database page pDbPage->pgno -** can be written to. The caller has already promised not to write to that -** page. -*/ -static int relocatePage( - BtShared *pBt, /* Btree */ - MemPage *pDbPage, /* Open page to move */ - u8 eType, /* Pointer map 'type' entry for pDbPage */ - Pgno iPtrPage, /* Pointer map 'page-no' entry for pDbPage */ - Pgno iFreePage, /* The location to move pDbPage to */ - int isCommit /* isCommit flag passed to sqlite3PagerMovepage */ -){ - MemPage *pPtrPage; /* The page that contains a pointer to pDbPage */ - Pgno iDbPage = pDbPage->pgno; - Pager *pPager = pBt->pPager; - int rc; - - assert( eType==PTRMAP_OVERFLOW2 || eType==PTRMAP_OVERFLOW1 || - eType==PTRMAP_BTREE || eType==PTRMAP_ROOTPAGE ); - assert( sqlite3_mutex_held(pBt->mutex) ); - assert( pDbPage->pBt==pBt ); - - /* Move page iDbPage from its current location to page number iFreePage */ - TRACE(("AUTOVACUUM: Moving %d to free page %d (ptr page %d type %d)\n", - iDbPage, iFreePage, iPtrPage, eType)); - rc = sqlite3PagerMovepage(pPager, pDbPage->pDbPage, iFreePage, isCommit); - if( rc!=SQLITE_OK ){ - return rc; - } - pDbPage->pgno = iFreePage; - - /* If pDbPage was a btree-page, then it may have child pages and/or cells - ** that point to overflow pages. The pointer map entries for all these - ** pages need to be changed. - ** - ** If pDbPage is an overflow page, then the first 4 bytes may store a - ** pointer to a subsequent overflow page. If this is the case, then - ** the pointer map needs to be updated for the subsequent overflow page. - */ - if( eType==PTRMAP_BTREE || eType==PTRMAP_ROOTPAGE ){ - rc = setChildPtrmaps(pDbPage); - if( rc!=SQLITE_OK ){ - return rc; - } - }else{ - Pgno nextOvfl = get4byte(pDbPage->aData); - if( nextOvfl!=0 ){ - ptrmapPut(pBt, nextOvfl, PTRMAP_OVERFLOW2, iFreePage, &rc); - if( rc!=SQLITE_OK ){ - return rc; - } - } - } - - /* Fix the database pointer on page iPtrPage that pointed at iDbPage so - ** that it points at iFreePage. Also fix the pointer map entry for - ** iPtrPage. - */ - if( eType!=PTRMAP_ROOTPAGE ){ - rc = btreeGetPage(pBt, iPtrPage, &pPtrPage, 0); - if( rc!=SQLITE_OK ){ - return rc; - } - rc = sqlite3PagerWrite(pPtrPage->pDbPage); - if( rc!=SQLITE_OK ){ - releasePage(pPtrPage); - return rc; - } - rc = modifyPagePointer(pPtrPage, iDbPage, iFreePage, eType); - releasePage(pPtrPage); - if( rc==SQLITE_OK ){ - ptrmapPut(pBt, iFreePage, eType, iPtrPage, &rc); - } - } - return rc; -} - -/* Forward declaration required by incrVacuumStep(). */ -static int allocateBtreePage(BtShared *, MemPage **, Pgno *, Pgno, u8); - -/* -** Perform a single step of an incremental-vacuum. If successful, return -** SQLITE_OK. If there is no work to do (and therefore no point in -** calling this function again), return SQLITE_DONE. Or, if an error -** occurs, return some other error code. -** -** More specificly, this function attempts to re-organize the database so -** that the last page of the file currently in use is no longer in use. -** -** Parameter nFin is the number of pages that this database would contain -** were this function called until it returns SQLITE_DONE. -** -** If the bCommit parameter is non-zero, this function assumes that the -** caller will keep calling incrVacuumStep() until it returns SQLITE_DONE -** or an error. bCommit is passed true for an auto-vacuum-on-commmit -** operation, or false for an incremental vacuum. -*/ -static int incrVacuumStep(BtShared *pBt, Pgno nFin, Pgno iLastPg, int bCommit){ - Pgno nFreeList; /* Number of pages still on the free-list */ - int rc; - - assert( sqlite3_mutex_held(pBt->mutex) ); - assert( iLastPg>nFin ); - - if( !PTRMAP_ISPAGE(pBt, iLastPg) && iLastPg!=PENDING_BYTE_PAGE(pBt) ){ - u8 eType; - Pgno iPtrPage; - - nFreeList = get4byte(&pBt->pPage1->aData[36]); - if( nFreeList==0 ){ - return SQLITE_DONE; - } - - rc = ptrmapGet(pBt, iLastPg, &eType, &iPtrPage); - if( rc!=SQLITE_OK ){ - return rc; - } - if( eType==PTRMAP_ROOTPAGE ){ - return SQLITE_CORRUPT_BKPT; - } - - if( eType==PTRMAP_FREEPAGE ){ - if( bCommit==0 ){ - /* Remove the page from the files free-list. This is not required - ** if bCommit is non-zero. In that case, the free-list will be - ** truncated to zero after this function returns, so it doesn't - ** matter if it still contains some garbage entries. - */ - Pgno iFreePg; - MemPage *pFreePg; - rc = allocateBtreePage(pBt, &pFreePg, &iFreePg, iLastPg, BTALLOC_EXACT); - if( rc!=SQLITE_OK ){ - return rc; - } - assert( iFreePg==iLastPg ); - releasePage(pFreePg); - } - } else { - Pgno iFreePg; /* Index of free page to move pLastPg to */ - MemPage *pLastPg; - u8 eMode = BTALLOC_ANY; /* Mode parameter for allocateBtreePage() */ - Pgno iNear = 0; /* nearby parameter for allocateBtreePage() */ - - rc = btreeGetPage(pBt, iLastPg, &pLastPg, 0); - if( rc!=SQLITE_OK ){ - return rc; - } - - /* If bCommit is zero, this loop runs exactly once and page pLastPg - ** is swapped with the first free page pulled off the free list. - ** - ** On the other hand, if bCommit is greater than zero, then keep - ** looping until a free-page located within the first nFin pages - ** of the file is found. - */ - if( bCommit==0 ){ - eMode = BTALLOC_LE; - iNear = nFin; - } - do { - MemPage *pFreePg; - rc = allocateBtreePage(pBt, &pFreePg, &iFreePg, iNear, eMode); - if( rc!=SQLITE_OK ){ - releasePage(pLastPg); - return rc; - } - releasePage(pFreePg); - }while( bCommit && iFreePg>nFin ); - assert( iFreePgbDoTruncate = 1; - pBt->nPage = iLastPg; - } - return SQLITE_OK; -} - -/* -** The database opened by the first argument is an auto-vacuum database -** nOrig pages in size containing nFree free pages. Return the expected -** size of the database in pages following an auto-vacuum operation. -*/ -static Pgno finalDbSize(BtShared *pBt, Pgno nOrig, Pgno nFree){ - int nEntry; /* Number of entries on one ptrmap page */ - Pgno nPtrmap; /* Number of PtrMap pages to be freed */ - Pgno nFin; /* Return value */ - - nEntry = pBt->usableSize/5; - nPtrmap = (nFree-nOrig+PTRMAP_PAGENO(pBt, nOrig)+nEntry)/nEntry; - nFin = nOrig - nFree - nPtrmap; - if( nOrig>PENDING_BYTE_PAGE(pBt) && nFinpBt; - - sqlite3BtreeEnter(p); - assert( pBt->inTransaction==TRANS_WRITE && p->inTrans==TRANS_WRITE ); - if( !pBt->autoVacuum ){ - rc = SQLITE_DONE; - }else{ - Pgno nOrig = btreePagecount(pBt); - Pgno nFree = get4byte(&pBt->pPage1->aData[36]); - Pgno nFin = finalDbSize(pBt, nOrig, nFree); - - if( nOrig0 ){ - rc = saveAllCursors(pBt, 0, 0); - if( rc==SQLITE_OK ){ - invalidateAllOverflowCache(pBt); - rc = incrVacuumStep(pBt, nFin, nOrig, 0); - } - if( rc==SQLITE_OK ){ - rc = sqlite3PagerWrite(pBt->pPage1->pDbPage); - put4byte(&pBt->pPage1->aData[28], pBt->nPage); - } - }else{ - rc = SQLITE_DONE; - } - } - sqlite3BtreeLeave(p); - return rc; -} - -/* -** This routine is called prior to sqlite3PagerCommit when a transaction -** is committed for an auto-vacuum database. -** -** If SQLITE_OK is returned, then *pnTrunc is set to the number of pages -** the database file should be truncated to during the commit process. -** i.e. the database has been reorganized so that only the first *pnTrunc -** pages are in use. -*/ -static int autoVacuumCommit(BtShared *pBt){ - int rc = SQLITE_OK; - Pager *pPager = pBt->pPager; - VVA_ONLY( int nRef = sqlite3PagerRefcount(pPager) ); - - assert( sqlite3_mutex_held(pBt->mutex) ); - invalidateAllOverflowCache(pBt); - assert(pBt->autoVacuum); - if( !pBt->incrVacuum ){ - Pgno nFin; /* Number of pages in database after autovacuuming */ - Pgno nFree; /* Number of pages on the freelist initially */ - Pgno iFree; /* The next page to be freed */ - Pgno nOrig; /* Database size before freeing */ - - nOrig = btreePagecount(pBt); - if( PTRMAP_ISPAGE(pBt, nOrig) || nOrig==PENDING_BYTE_PAGE(pBt) ){ - /* It is not possible to create a database for which the final page - ** is either a pointer-map page or the pending-byte page. If one - ** is encountered, this indicates corruption. - */ - return SQLITE_CORRUPT_BKPT; - } - - nFree = get4byte(&pBt->pPage1->aData[36]); - nFin = finalDbSize(pBt, nOrig, nFree); - if( nFin>nOrig ) return SQLITE_CORRUPT_BKPT; - if( nFinnFin && rc==SQLITE_OK; iFree--){ - rc = incrVacuumStep(pBt, nFin, iFree, 1); - } - if( (rc==SQLITE_DONE || rc==SQLITE_OK) && nFree>0 ){ - rc = sqlite3PagerWrite(pBt->pPage1->pDbPage); - put4byte(&pBt->pPage1->aData[32], 0); - put4byte(&pBt->pPage1->aData[36], 0); - put4byte(&pBt->pPage1->aData[28], nFin); - pBt->bDoTruncate = 1; - pBt->nPage = nFin; - } - if( rc!=SQLITE_OK ){ - sqlite3PagerRollback(pPager); - } - } - - assert( nRef>=sqlite3PagerRefcount(pPager) ); - return rc; -} - -#else /* ifndef SQLITE_OMIT_AUTOVACUUM */ -# define setChildPtrmaps(x) SQLITE_OK -#endif - -/* -** This routine does the first phase of a two-phase commit. This routine -** causes a rollback journal to be created (if it does not already exist) -** and populated with enough information so that if a power loss occurs -** the database can be restored to its original state by playing back -** the journal. Then the contents of the journal are flushed out to -** the disk. After the journal is safely on oxide, the changes to the -** database are written into the database file and flushed to oxide. -** At the end of this call, the rollback journal still exists on the -** disk and we are still holding all locks, so the transaction has not -** committed. See sqlite3BtreeCommitPhaseTwo() for the second phase of the -** commit process. -** -** This call is a no-op if no write-transaction is currently active on pBt. -** -** Otherwise, sync the database file for the btree pBt. zMaster points to -** the name of a master journal file that should be written into the -** individual journal file, or is NULL, indicating no master journal file -** (single database transaction). -** -** When this is called, the master journal should already have been -** created, populated with this journal pointer and synced to disk. -** -** Once this is routine has returned, the only thing required to commit -** the write-transaction for this database file is to delete the journal. -*/ -SQLITE_PRIVATE int sqlite3BtreeCommitPhaseOne(Btree *p, const char *zMaster){ - int rc = SQLITE_OK; - if( p->inTrans==TRANS_WRITE ){ - BtShared *pBt = p->pBt; - sqlite3BtreeEnter(p); -#ifndef SQLITE_OMIT_AUTOVACUUM - if( pBt->autoVacuum ){ - rc = autoVacuumCommit(pBt); - if( rc!=SQLITE_OK ){ - sqlite3BtreeLeave(p); - return rc; - } - } - if( pBt->bDoTruncate ){ - sqlite3PagerTruncateImage(pBt->pPager, pBt->nPage); - } -#endif - rc = sqlite3PagerCommitPhaseOne(pBt->pPager, zMaster, 0); - sqlite3BtreeLeave(p); - } - return rc; -} - -/* -** This function is called from both BtreeCommitPhaseTwo() and BtreeRollback() -** at the conclusion of a transaction. -*/ -static void btreeEndTransaction(Btree *p){ - BtShared *pBt = p->pBt; - sqlite3 *db = p->db; - assert( sqlite3BtreeHoldsMutex(p) ); - -#ifndef SQLITE_OMIT_AUTOVACUUM - pBt->bDoTruncate = 0; -#endif - if( p->inTrans>TRANS_NONE && db->nVdbeRead>1 ){ - /* If there are other active statements that belong to this database - ** handle, downgrade to a read-only transaction. The other statements - ** may still be reading from the database. */ - downgradeAllSharedCacheTableLocks(p); - p->inTrans = TRANS_READ; - }else{ - /* If the handle had any kind of transaction open, decrement the - ** transaction count of the shared btree. If the transaction count - ** reaches 0, set the shared state to TRANS_NONE. The unlockBtreeIfUnused() - ** call below will unlock the pager. */ - if( p->inTrans!=TRANS_NONE ){ - clearAllSharedCacheTableLocks(p); - pBt->nTransaction--; - if( 0==pBt->nTransaction ){ - pBt->inTransaction = TRANS_NONE; - } - } - - /* Set the current transaction state to TRANS_NONE and unlock the - ** pager if this call closed the only read or write transaction. */ - p->inTrans = TRANS_NONE; - unlockBtreeIfUnused(pBt); - } - - btreeIntegrity(p); -} - -/* -** Commit the transaction currently in progress. -** -** This routine implements the second phase of a 2-phase commit. The -** sqlite3BtreeCommitPhaseOne() routine does the first phase and should -** be invoked prior to calling this routine. The sqlite3BtreeCommitPhaseOne() -** routine did all the work of writing information out to disk and flushing the -** contents so that they are written onto the disk platter. All this -** routine has to do is delete or truncate or zero the header in the -** the rollback journal (which causes the transaction to commit) and -** drop locks. -** -** Normally, if an error occurs while the pager layer is attempting to -** finalize the underlying journal file, this function returns an error and -** the upper layer will attempt a rollback. However, if the second argument -** is non-zero then this b-tree transaction is part of a multi-file -** transaction. In this case, the transaction has already been committed -** (by deleting a master journal file) and the caller will ignore this -** functions return code. So, even if an error occurs in the pager layer, -** reset the b-tree objects internal state to indicate that the write -** transaction has been closed. This is quite safe, as the pager will have -** transitioned to the error state. -** -** This will release the write lock on the database file. If there -** are no active cursors, it also releases the read lock. -*/ -SQLITE_PRIVATE int sqlite3BtreeCommitPhaseTwo(Btree *p, int bCleanup){ - - if( p->inTrans==TRANS_NONE ) return SQLITE_OK; - sqlite3BtreeEnter(p); - btreeIntegrity(p); - - /* If the handle has a write-transaction open, commit the shared-btrees - ** transaction and set the shared state to TRANS_READ. - */ - if( p->inTrans==TRANS_WRITE ){ - int rc; - BtShared *pBt = p->pBt; - assert( pBt->inTransaction==TRANS_WRITE ); - assert( pBt->nTransaction>0 ); - rc = sqlite3PagerCommitPhaseTwo(pBt->pPager); - if( rc!=SQLITE_OK && bCleanup==0 ){ - sqlite3BtreeLeave(p); - return rc; - } - pBt->inTransaction = TRANS_READ; - btreeClearHasContent(pBt); - } - - btreeEndTransaction(p); - sqlite3BtreeLeave(p); - return SQLITE_OK; -} - -/* -** Do both phases of a commit. -*/ -SQLITE_PRIVATE int sqlite3BtreeCommit(Btree *p){ - int rc; - sqlite3BtreeEnter(p); - rc = sqlite3BtreeCommitPhaseOne(p, 0); - if( rc==SQLITE_OK ){ - rc = sqlite3BtreeCommitPhaseTwo(p, 0); - } - sqlite3BtreeLeave(p); - return rc; -} - -/* -** This routine sets the state to CURSOR_FAULT and the error -** code to errCode for every cursor on BtShared that pBtree -** references. -** -** Every cursor is tripped, including cursors that belong -** to other database connections that happen to be sharing -** the cache with pBtree. -** -** This routine gets called when a rollback occurs. -** All cursors using the same cache must be tripped -** to prevent them from trying to use the btree after -** the rollback. The rollback may have deleted tables -** or moved root pages, so it is not sufficient to -** save the state of the cursor. The cursor must be -** invalidated. -*/ -SQLITE_PRIVATE void sqlite3BtreeTripAllCursors(Btree *pBtree, int errCode){ - BtCursor *p; - if( pBtree==0 ) return; - sqlite3BtreeEnter(pBtree); - for(p=pBtree->pBt->pCursor; p; p=p->pNext){ - int i; - sqlite3BtreeClearCursor(p); - p->eState = CURSOR_FAULT; - p->skipNext = errCode; - for(i=0; i<=p->iPage; i++){ - releasePage(p->apPage[i]); - p->apPage[i] = 0; - } - } - sqlite3BtreeLeave(pBtree); -} - -/* -** Rollback the transaction in progress. All cursors will be -** invalided by this operation. Any attempt to use a cursor -** that was open at the beginning of this operation will result -** in an error. -** -** This will release the write lock on the database file. If there -** are no active cursors, it also releases the read lock. -*/ -SQLITE_PRIVATE int sqlite3BtreeRollback(Btree *p, int tripCode){ - int rc; - BtShared *pBt = p->pBt; - MemPage *pPage1; - - sqlite3BtreeEnter(p); - if( tripCode==SQLITE_OK ){ - rc = tripCode = saveAllCursors(pBt, 0, 0); - }else{ - rc = SQLITE_OK; - } - if( tripCode ){ - sqlite3BtreeTripAllCursors(p, tripCode); - } - btreeIntegrity(p); - - if( p->inTrans==TRANS_WRITE ){ - int rc2; - - assert( TRANS_WRITE==pBt->inTransaction ); - rc2 = sqlite3PagerRollback(pBt->pPager); - if( rc2!=SQLITE_OK ){ - rc = rc2; - } - - /* The rollback may have destroyed the pPage1->aData value. So - ** call btreeGetPage() on page 1 again to make - ** sure pPage1->aData is set correctly. */ - if( btreeGetPage(pBt, 1, &pPage1, 0)==SQLITE_OK ){ - int nPage = get4byte(28+(u8*)pPage1->aData); - testcase( nPage==0 ); - if( nPage==0 ) sqlite3PagerPagecount(pBt->pPager, &nPage); - testcase( pBt->nPage!=nPage ); - pBt->nPage = nPage; - releasePage(pPage1); - } - assert( countValidCursors(pBt, 1)==0 ); - pBt->inTransaction = TRANS_READ; - btreeClearHasContent(pBt); - } - - btreeEndTransaction(p); - sqlite3BtreeLeave(p); - return rc; -} - -/* -** Start a statement subtransaction. The subtransaction can can be rolled -** back independently of the main transaction. You must start a transaction -** before starting a subtransaction. The subtransaction is ended automatically -** if the main transaction commits or rolls back. -** -** Statement subtransactions are used around individual SQL statements -** that are contained within a BEGIN...COMMIT block. If a constraint -** error occurs within the statement, the effect of that one statement -** can be rolled back without having to rollback the entire transaction. -** -** A statement sub-transaction is implemented as an anonymous savepoint. The -** value passed as the second parameter is the total number of savepoints, -** including the new anonymous savepoint, open on the B-Tree. i.e. if there -** are no active savepoints and no other statement-transactions open, -** iStatement is 1. This anonymous savepoint can be released or rolled back -** using the sqlite3BtreeSavepoint() function. -*/ -SQLITE_PRIVATE int sqlite3BtreeBeginStmt(Btree *p, int iStatement){ - int rc; - BtShared *pBt = p->pBt; - sqlite3BtreeEnter(p); - assert( p->inTrans==TRANS_WRITE ); - assert( (pBt->btsFlags & BTS_READ_ONLY)==0 ); - assert( iStatement>0 ); - assert( iStatement>p->db->nSavepoint ); - assert( pBt->inTransaction==TRANS_WRITE ); - /* At the pager level, a statement transaction is a savepoint with - ** an index greater than all savepoints created explicitly using - ** SQL statements. It is illegal to open, release or rollback any - ** such savepoints while the statement transaction savepoint is active. - */ - rc = sqlite3PagerOpenSavepoint(pBt->pPager, iStatement); - sqlite3BtreeLeave(p); - return rc; -} - -/* -** The second argument to this function, op, is always SAVEPOINT_ROLLBACK -** or SAVEPOINT_RELEASE. This function either releases or rolls back the -** savepoint identified by parameter iSavepoint, depending on the value -** of op. -** -** Normally, iSavepoint is greater than or equal to zero. However, if op is -** SAVEPOINT_ROLLBACK, then iSavepoint may also be -1. In this case the -** contents of the entire transaction are rolled back. This is different -** from a normal transaction rollback, as no locks are released and the -** transaction remains open. -*/ -SQLITE_PRIVATE int sqlite3BtreeSavepoint(Btree *p, int op, int iSavepoint){ - int rc = SQLITE_OK; - if( p && p->inTrans==TRANS_WRITE ){ - BtShared *pBt = p->pBt; - assert( op==SAVEPOINT_RELEASE || op==SAVEPOINT_ROLLBACK ); - assert( iSavepoint>=0 || (iSavepoint==-1 && op==SAVEPOINT_ROLLBACK) ); - sqlite3BtreeEnter(p); - rc = sqlite3PagerSavepoint(pBt->pPager, op, iSavepoint); - if( rc==SQLITE_OK ){ - if( iSavepoint<0 && (pBt->btsFlags & BTS_INITIALLY_EMPTY)!=0 ){ - pBt->nPage = 0; - } - rc = newDatabase(pBt); - pBt->nPage = get4byte(28 + pBt->pPage1->aData); - - /* The database size was written into the offset 28 of the header - ** when the transaction started, so we know that the value at offset - ** 28 is nonzero. */ - assert( pBt->nPage>0 ); - } - sqlite3BtreeLeave(p); - } - return rc; -} - -/* -** Create a new cursor for the BTree whose root is on the page -** iTable. If a read-only cursor is requested, it is assumed that -** the caller already has at least a read-only transaction open -** on the database already. If a write-cursor is requested, then -** the caller is assumed to have an open write transaction. -** -** If wrFlag==0, then the cursor can only be used for reading. -** If wrFlag==1, then the cursor can be used for reading or for -** writing if other conditions for writing are also met. These -** are the conditions that must be met in order for writing to -** be allowed: -** -** 1: The cursor must have been opened with wrFlag==1 -** -** 2: Other database connections that share the same pager cache -** but which are not in the READ_UNCOMMITTED state may not have -** cursors open with wrFlag==0 on the same table. Otherwise -** the changes made by this write cursor would be visible to -** the read cursors in the other database connection. -** -** 3: The database must be writable (not on read-only media) -** -** 4: There must be an active transaction. -** -** No checking is done to make sure that page iTable really is the -** root page of a b-tree. If it is not, then the cursor acquired -** will not work correctly. -** -** It is assumed that the sqlite3BtreeCursorZero() has been called -** on pCur to initialize the memory space prior to invoking this routine. -*/ -static int btreeCursor( - Btree *p, /* The btree */ - int iTable, /* Root page of table to open */ - int wrFlag, /* 1 to write. 0 read-only */ - struct KeyInfo *pKeyInfo, /* First arg to comparison function */ - BtCursor *pCur /* Space for new cursor */ -){ - BtShared *pBt = p->pBt; /* Shared b-tree handle */ - - assert( sqlite3BtreeHoldsMutex(p) ); - assert( wrFlag==0 || wrFlag==1 ); - - /* The following assert statements verify that if this is a sharable - ** b-tree database, the connection is holding the required table locks, - ** and that no other connection has any open cursor that conflicts with - ** this lock. */ - assert( hasSharedCacheTableLock(p, iTable, pKeyInfo!=0, wrFlag+1) ); - assert( wrFlag==0 || !hasReadConflicts(p, iTable) ); - - /* Assert that the caller has opened the required transaction. */ - assert( p->inTrans>TRANS_NONE ); - assert( wrFlag==0 || p->inTrans==TRANS_WRITE ); - assert( pBt->pPage1 && pBt->pPage1->aData ); - - if( NEVER(wrFlag && (pBt->btsFlags & BTS_READ_ONLY)!=0) ){ - return SQLITE_READONLY; - } - if( iTable==1 && btreePagecount(pBt)==0 ){ - assert( wrFlag==0 ); - iTable = 0; - } - - /* Now that no other errors can occur, finish filling in the BtCursor - ** variables and link the cursor into the BtShared list. */ - pCur->pgnoRoot = (Pgno)iTable; - pCur->iPage = -1; - pCur->pKeyInfo = pKeyInfo; - pCur->pBtree = p; - pCur->pBt = pBt; - assert( wrFlag==0 || wrFlag==BTCF_WriteFlag ); - pCur->curFlags = wrFlag; - pCur->pNext = pBt->pCursor; - if( pCur->pNext ){ - pCur->pNext->pPrev = pCur; - } - pBt->pCursor = pCur; - pCur->eState = CURSOR_INVALID; - return SQLITE_OK; -} -SQLITE_PRIVATE int sqlite3BtreeCursor( - Btree *p, /* The btree */ - int iTable, /* Root page of table to open */ - int wrFlag, /* 1 to write. 0 read-only */ - struct KeyInfo *pKeyInfo, /* First arg to xCompare() */ - BtCursor *pCur /* Write new cursor here */ -){ - int rc; - sqlite3BtreeEnter(p); - rc = btreeCursor(p, iTable, wrFlag, pKeyInfo, pCur); - sqlite3BtreeLeave(p); - return rc; -} - -/* -** Return the size of a BtCursor object in bytes. -** -** This interfaces is needed so that users of cursors can preallocate -** sufficient storage to hold a cursor. The BtCursor object is opaque -** to users so they cannot do the sizeof() themselves - they must call -** this routine. -*/ -SQLITE_PRIVATE int sqlite3BtreeCursorSize(void){ - return ROUND8(sizeof(BtCursor)); -} - -/* -** Initialize memory that will be converted into a BtCursor object. -** -** The simple approach here would be to memset() the entire object -** to zero. But it turns out that the apPage[] and aiIdx[] arrays -** do not need to be zeroed and they are large, so we can save a lot -** of run-time by skipping the initialization of those elements. -*/ -SQLITE_PRIVATE void sqlite3BtreeCursorZero(BtCursor *p){ - memset(p, 0, offsetof(BtCursor, iPage)); -} - -/* -** Close a cursor. The read lock on the database file is released -** when the last cursor is closed. -*/ -SQLITE_PRIVATE int sqlite3BtreeCloseCursor(BtCursor *pCur){ - Btree *pBtree = pCur->pBtree; - if( pBtree ){ - int i; - BtShared *pBt = pCur->pBt; - sqlite3BtreeEnter(pBtree); - sqlite3BtreeClearCursor(pCur); - if( pCur->pPrev ){ - pCur->pPrev->pNext = pCur->pNext; - }else{ - pBt->pCursor = pCur->pNext; - } - if( pCur->pNext ){ - pCur->pNext->pPrev = pCur->pPrev; - } - for(i=0; i<=pCur->iPage; i++){ - releasePage(pCur->apPage[i]); - } - unlockBtreeIfUnused(pBt); - sqlite3DbFree(pBtree->db, pCur->aOverflow); - /* sqlite3_free(pCur); */ - sqlite3BtreeLeave(pBtree); - } - return SQLITE_OK; -} - -/* -** Make sure the BtCursor* given in the argument has a valid -** BtCursor.info structure. If it is not already valid, call -** btreeParseCell() to fill it in. -** -** BtCursor.info is a cache of the information in the current cell. -** Using this cache reduces the number of calls to btreeParseCell(). -** -** 2007-06-25: There is a bug in some versions of MSVC that cause the -** compiler to crash when getCellInfo() is implemented as a macro. -** But there is a measureable speed advantage to using the macro on gcc -** (when less compiler optimizations like -Os or -O0 are used and the -** compiler is not doing agressive inlining.) So we use a real function -** for MSVC and a macro for everything else. Ticket #2457. -*/ -#ifndef NDEBUG - static void assertCellInfo(BtCursor *pCur){ - CellInfo info; - int iPage = pCur->iPage; - memset(&info, 0, sizeof(info)); - btreeParseCell(pCur->apPage[iPage], pCur->aiIdx[iPage], &info); - assert( CORRUPT_DB || memcmp(&info, &pCur->info, sizeof(info))==0 ); - } -#else - #define assertCellInfo(x) -#endif -#ifdef _MSC_VER - /* Use a real function in MSVC to work around bugs in that compiler. */ - static void getCellInfo(BtCursor *pCur){ - if( pCur->info.nSize==0 ){ - int iPage = pCur->iPage; - btreeParseCell(pCur->apPage[iPage],pCur->aiIdx[iPage],&pCur->info); - pCur->curFlags |= BTCF_ValidNKey; - }else{ - assertCellInfo(pCur); - } - } -#else /* if not _MSC_VER */ - /* Use a macro in all other compilers so that the function is inlined */ -#define getCellInfo(pCur) \ - if( pCur->info.nSize==0 ){ \ - int iPage = pCur->iPage; \ - btreeParseCell(pCur->apPage[iPage],pCur->aiIdx[iPage],&pCur->info); \ - pCur->curFlags |= BTCF_ValidNKey; \ - }else{ \ - assertCellInfo(pCur); \ - } -#endif /* _MSC_VER */ - -#ifndef NDEBUG /* The next routine used only within assert() statements */ -/* -** Return true if the given BtCursor is valid. A valid cursor is one -** that is currently pointing to a row in a (non-empty) table. -** This is a verification routine is used only within assert() statements. -*/ -SQLITE_PRIVATE int sqlite3BtreeCursorIsValid(BtCursor *pCur){ - return pCur && pCur->eState==CURSOR_VALID; -} -#endif /* NDEBUG */ - -/* -** Set *pSize to the size of the buffer needed to hold the value of -** the key for the current entry. If the cursor is not pointing -** to a valid entry, *pSize is set to 0. -** -** For a table with the INTKEY flag set, this routine returns the key -** itself, not the number of bytes in the key. -** -** The caller must position the cursor prior to invoking this routine. -** -** This routine cannot fail. It always returns SQLITE_OK. -*/ -SQLITE_PRIVATE int sqlite3BtreeKeySize(BtCursor *pCur, i64 *pSize){ - assert( cursorHoldsMutex(pCur) ); - assert( pCur->eState==CURSOR_INVALID || pCur->eState==CURSOR_VALID ); - if( pCur->eState!=CURSOR_VALID ){ - *pSize = 0; - }else{ - getCellInfo(pCur); - *pSize = pCur->info.nKey; - } - return SQLITE_OK; -} - -/* -** Set *pSize to the number of bytes of data in the entry the -** cursor currently points to. -** -** The caller must guarantee that the cursor is pointing to a non-NULL -** valid entry. In other words, the calling procedure must guarantee -** that the cursor has Cursor.eState==CURSOR_VALID. -** -** Failure is not possible. This function always returns SQLITE_OK. -** It might just as well be a procedure (returning void) but we continue -** to return an integer result code for historical reasons. -*/ -SQLITE_PRIVATE int sqlite3BtreeDataSize(BtCursor *pCur, u32 *pSize){ - assert( cursorHoldsMutex(pCur) ); - assert( pCur->eState==CURSOR_VALID ); - getCellInfo(pCur); - *pSize = pCur->info.nData; - return SQLITE_OK; -} - -/* -** Given the page number of an overflow page in the database (parameter -** ovfl), this function finds the page number of the next page in the -** linked list of overflow pages. If possible, it uses the auto-vacuum -** pointer-map data instead of reading the content of page ovfl to do so. -** -** If an error occurs an SQLite error code is returned. Otherwise: -** -** The page number of the next overflow page in the linked list is -** written to *pPgnoNext. If page ovfl is the last page in its linked -** list, *pPgnoNext is set to zero. -** -** If ppPage is not NULL, and a reference to the MemPage object corresponding -** to page number pOvfl was obtained, then *ppPage is set to point to that -** reference. It is the responsibility of the caller to call releasePage() -** on *ppPage to free the reference. In no reference was obtained (because -** the pointer-map was used to obtain the value for *pPgnoNext), then -** *ppPage is set to zero. -*/ -static int getOverflowPage( - BtShared *pBt, /* The database file */ - Pgno ovfl, /* Current overflow page number */ - MemPage **ppPage, /* OUT: MemPage handle (may be NULL) */ - Pgno *pPgnoNext /* OUT: Next overflow page number */ -){ - Pgno next = 0; - MemPage *pPage = 0; - int rc = SQLITE_OK; - - assert( sqlite3_mutex_held(pBt->mutex) ); - assert(pPgnoNext); - -#ifndef SQLITE_OMIT_AUTOVACUUM - /* Try to find the next page in the overflow list using the - ** autovacuum pointer-map pages. Guess that the next page in - ** the overflow list is page number (ovfl+1). If that guess turns - ** out to be wrong, fall back to loading the data of page - ** number ovfl to determine the next page number. - */ - if( pBt->autoVacuum ){ - Pgno pgno; - Pgno iGuess = ovfl+1; - u8 eType; - - while( PTRMAP_ISPAGE(pBt, iGuess) || iGuess==PENDING_BYTE_PAGE(pBt) ){ - iGuess++; - } - - if( iGuess<=btreePagecount(pBt) ){ - rc = ptrmapGet(pBt, iGuess, &eType, &pgno); - if( rc==SQLITE_OK && eType==PTRMAP_OVERFLOW2 && pgno==ovfl ){ - next = iGuess; - rc = SQLITE_DONE; - } - } - } -#endif - - assert( next==0 || rc==SQLITE_DONE ); - if( rc==SQLITE_OK ){ - rc = btreeGetPage(pBt, ovfl, &pPage, (ppPage==0) ? PAGER_GET_READONLY : 0); - assert( rc==SQLITE_OK || pPage==0 ); - if( rc==SQLITE_OK ){ - next = get4byte(pPage->aData); - } - } - - *pPgnoNext = next; - if( ppPage ){ - *ppPage = pPage; - }else{ - releasePage(pPage); - } - return (rc==SQLITE_DONE ? SQLITE_OK : rc); -} - -/* -** Copy data from a buffer to a page, or from a page to a buffer. -** -** pPayload is a pointer to data stored on database page pDbPage. -** If argument eOp is false, then nByte bytes of data are copied -** from pPayload to the buffer pointed at by pBuf. If eOp is true, -** then sqlite3PagerWrite() is called on pDbPage and nByte bytes -** of data are copied from the buffer pBuf to pPayload. -** -** SQLITE_OK is returned on success, otherwise an error code. -*/ -static int copyPayload( - void *pPayload, /* Pointer to page data */ - void *pBuf, /* Pointer to buffer */ - int nByte, /* Number of bytes to copy */ - int eOp, /* 0 -> copy from page, 1 -> copy to page */ - DbPage *pDbPage /* Page containing pPayload */ -){ - if( eOp ){ - /* Copy data from buffer to page (a write operation) */ - int rc = sqlite3PagerWrite(pDbPage); - if( rc!=SQLITE_OK ){ - return rc; - } - memcpy(pPayload, pBuf, nByte); - }else{ - /* Copy data from page to buffer (a read operation) */ - memcpy(pBuf, pPayload, nByte); - } - return SQLITE_OK; -} - -/* -** This function is used to read or overwrite payload information -** for the entry that the pCur cursor is pointing to. The eOp -** argument is interpreted as follows: -** -** 0: The operation is a read. Populate the overflow cache. -** 1: The operation is a write. Populate the overflow cache. -** 2: The operation is a read. Do not populate the overflow cache. -** -** A total of "amt" bytes are read or written beginning at "offset". -** Data is read to or from the buffer pBuf. -** -** The content being read or written might appear on the main page -** or be scattered out on multiple overflow pages. -** -** If the current cursor entry uses one or more overflow pages and the -** eOp argument is not 2, this function may allocate space for and lazily -** popluates the overflow page-list cache array (BtCursor.aOverflow). -** Subsequent calls use this cache to make seeking to the supplied offset -** more efficient. -** -** Once an overflow page-list cache has been allocated, it may be -** invalidated if some other cursor writes to the same table, or if -** the cursor is moved to a different row. Additionally, in auto-vacuum -** mode, the following events may invalidate an overflow page-list cache. -** -** * An incremental vacuum, -** * A commit in auto_vacuum="full" mode, -** * Creating a table (may require moving an overflow page). -*/ -static int accessPayload( - BtCursor *pCur, /* Cursor pointing to entry to read from */ - u32 offset, /* Begin reading this far into payload */ - u32 amt, /* Read this many bytes */ - unsigned char *pBuf, /* Write the bytes into this buffer */ - int eOp /* zero to read. non-zero to write. */ -){ - unsigned char *aPayload; - int rc = SQLITE_OK; - u32 nKey; - int iIdx = 0; - MemPage *pPage = pCur->apPage[pCur->iPage]; /* Btree page of current entry */ - BtShared *pBt = pCur->pBt; /* Btree this cursor belongs to */ -#ifdef SQLITE_DIRECT_OVERFLOW_READ - int bEnd; /* True if reading to end of data */ -#endif - - assert( pPage ); - assert( pCur->eState==CURSOR_VALID ); - assert( pCur->aiIdx[pCur->iPage]nCell ); - assert( cursorHoldsMutex(pCur) ); - assert( eOp!=2 || offset==0 ); /* Always start from beginning for eOp==2 */ - - getCellInfo(pCur); - aPayload = pCur->info.pCell + pCur->info.nHeader; - nKey = (pPage->intKey ? 0 : (int)pCur->info.nKey); -#ifdef SQLITE_DIRECT_OVERFLOW_READ - bEnd = (offset+amt==nKey+pCur->info.nData); -#endif - - if( NEVER(offset+amt > nKey+pCur->info.nData) - || &aPayload[pCur->info.nLocal] > &pPage->aData[pBt->usableSize] - ){ - /* Trying to read or write past the end of the data is an error */ - return SQLITE_CORRUPT_BKPT; - } - - /* Check if data must be read/written to/from the btree page itself. */ - if( offsetinfo.nLocal ){ - int a = amt; - if( a+offset>pCur->info.nLocal ){ - a = pCur->info.nLocal - offset; - } - rc = copyPayload(&aPayload[offset], pBuf, a, (eOp & 0x01), pPage->pDbPage); - offset = 0; - pBuf += a; - amt -= a; - }else{ - offset -= pCur->info.nLocal; - } - - if( rc==SQLITE_OK && amt>0 ){ - const u32 ovflSize = pBt->usableSize - 4; /* Bytes content per ovfl page */ - Pgno nextPage; - - nextPage = get4byte(&aPayload[pCur->info.nLocal]); - - /* If the BtCursor.aOverflow[] has not been allocated, allocate it now. - ** Except, do not allocate aOverflow[] for eOp==2. - ** - ** The aOverflow[] array is sized at one entry for each overflow page - ** in the overflow chain. The page number of the first overflow page is - ** stored in aOverflow[0], etc. A value of 0 in the aOverflow[] array - ** means "not yet known" (the cache is lazily populated). - */ - if( eOp!=2 && (pCur->curFlags & BTCF_ValidOvfl)==0 ){ - int nOvfl = (pCur->info.nPayload-pCur->info.nLocal+ovflSize-1)/ovflSize; - if( nOvfl>pCur->nOvflAlloc ){ - Pgno *aNew = (Pgno*)sqlite3DbRealloc( - pCur->pBtree->db, pCur->aOverflow, nOvfl*2*sizeof(Pgno) - ); - if( aNew==0 ){ - rc = SQLITE_NOMEM; - }else{ - pCur->nOvflAlloc = nOvfl*2; - pCur->aOverflow = aNew; - } - } - if( rc==SQLITE_OK ){ - memset(pCur->aOverflow, 0, nOvfl*sizeof(Pgno)); - pCur->curFlags |= BTCF_ValidOvfl; - } - } - - /* If the overflow page-list cache has been allocated and the - ** entry for the first required overflow page is valid, skip - ** directly to it. - */ - if( (pCur->curFlags & BTCF_ValidOvfl)!=0 && pCur->aOverflow[offset/ovflSize] ){ - iIdx = (offset/ovflSize); - nextPage = pCur->aOverflow[iIdx]; - offset = (offset%ovflSize); - } - - for( ; rc==SQLITE_OK && amt>0 && nextPage; iIdx++){ - - /* If required, populate the overflow page-list cache. */ - if( (pCur->curFlags & BTCF_ValidOvfl)!=0 ){ - assert(!pCur->aOverflow[iIdx] || pCur->aOverflow[iIdx]==nextPage); - pCur->aOverflow[iIdx] = nextPage; - } - - if( offset>=ovflSize ){ - /* The only reason to read this page is to obtain the page - ** number for the next page in the overflow chain. The page - ** data is not required. So first try to lookup the overflow - ** page-list cache, if any, then fall back to the getOverflowPage() - ** function. - ** - ** Note that the aOverflow[] array must be allocated because eOp!=2 - ** here. If eOp==2, then offset==0 and this branch is never taken. - */ - assert( eOp!=2 ); - assert( pCur->curFlags & BTCF_ValidOvfl ); - if( pCur->aOverflow[iIdx+1] ){ - nextPage = pCur->aOverflow[iIdx+1]; - }else{ - rc = getOverflowPage(pBt, nextPage, 0, &nextPage); - } - offset -= ovflSize; - }else{ - /* Need to read this page properly. It contains some of the - ** range of data that is being read (eOp==0) or written (eOp!=0). - */ -#ifdef SQLITE_DIRECT_OVERFLOW_READ - sqlite3_file *fd; -#endif - int a = amt; - if( a + offset > ovflSize ){ - a = ovflSize - offset; - } - -#ifdef SQLITE_DIRECT_OVERFLOW_READ - /* If all the following are true: - ** - ** 1) this is a read operation, and - ** 2) data is required from the start of this overflow page, and - ** 3) the database is file-backed, and - ** 4) there is no open write-transaction, and - ** 5) the database is not a WAL database, - ** 6) all data from the page is being read. - ** - ** then data can be read directly from the database file into the - ** output buffer, bypassing the page-cache altogether. This speeds - ** up loading large records that span many overflow pages. - */ - if( (eOp&0x01)==0 /* (1) */ - && offset==0 /* (2) */ - && (bEnd || a==ovflSize) /* (6) */ - && pBt->inTransaction==TRANS_READ /* (4) */ - && (fd = sqlite3PagerFile(pBt->pPager))->pMethods /* (3) */ - && pBt->pPage1->aData[19]==0x01 /* (5) */ - ){ - u8 aSave[4]; - u8 *aWrite = &pBuf[-4]; - memcpy(aSave, aWrite, 4); - rc = sqlite3OsRead(fd, aWrite, a+4, (i64)pBt->pageSize*(nextPage-1)); - nextPage = get4byte(aWrite); - memcpy(aWrite, aSave, 4); - }else -#endif - - { - DbPage *pDbPage; - rc = sqlite3PagerAcquire(pBt->pPager, nextPage, &pDbPage, - ((eOp&0x01)==0 ? PAGER_GET_READONLY : 0) - ); - if( rc==SQLITE_OK ){ - aPayload = sqlite3PagerGetData(pDbPage); - nextPage = get4byte(aPayload); - rc = copyPayload(&aPayload[offset+4], pBuf, a, (eOp&0x01), pDbPage); - sqlite3PagerUnref(pDbPage); - offset = 0; - } - } - amt -= a; - pBuf += a; - } - } - } - - if( rc==SQLITE_OK && amt>0 ){ - return SQLITE_CORRUPT_BKPT; - } - return rc; -} - -/* -** Read part of the key associated with cursor pCur. Exactly -** "amt" bytes will be transfered into pBuf[]. The transfer -** begins at "offset". -** -** The caller must ensure that pCur is pointing to a valid row -** in the table. -** -** Return SQLITE_OK on success or an error code if anything goes -** wrong. An error is returned if "offset+amt" is larger than -** the available payload. -*/ -SQLITE_PRIVATE int sqlite3BtreeKey(BtCursor *pCur, u32 offset, u32 amt, void *pBuf){ - assert( cursorHoldsMutex(pCur) ); - assert( pCur->eState==CURSOR_VALID ); - assert( pCur->iPage>=0 && pCur->apPage[pCur->iPage] ); - assert( pCur->aiIdx[pCur->iPage]apPage[pCur->iPage]->nCell ); - return accessPayload(pCur, offset, amt, (unsigned char*)pBuf, 0); -} - -/* -** Read part of the data associated with cursor pCur. Exactly -** "amt" bytes will be transfered into pBuf[]. The transfer -** begins at "offset". -** -** Return SQLITE_OK on success or an error code if anything goes -** wrong. An error is returned if "offset+amt" is larger than -** the available payload. -*/ -SQLITE_PRIVATE int sqlite3BtreeData(BtCursor *pCur, u32 offset, u32 amt, void *pBuf){ - int rc; - -#ifndef SQLITE_OMIT_INCRBLOB - if ( pCur->eState==CURSOR_INVALID ){ - return SQLITE_ABORT; - } -#endif - - assert( cursorHoldsMutex(pCur) ); - rc = restoreCursorPosition(pCur); - if( rc==SQLITE_OK ){ - assert( pCur->eState==CURSOR_VALID ); - assert( pCur->iPage>=0 && pCur->apPage[pCur->iPage] ); - assert( pCur->aiIdx[pCur->iPage]apPage[pCur->iPage]->nCell ); - rc = accessPayload(pCur, offset, amt, pBuf, 0); - } - return rc; -} - -/* -** Return a pointer to payload information from the entry that the -** pCur cursor is pointing to. The pointer is to the beginning of -** the key if index btrees (pPage->intKey==0) and is the data for -** table btrees (pPage->intKey==1). The number of bytes of available -** key/data is written into *pAmt. If *pAmt==0, then the value -** returned will not be a valid pointer. -** -** This routine is an optimization. It is common for the entire key -** and data to fit on the local page and for there to be no overflow -** pages. When that is so, this routine can be used to access the -** key and data without making a copy. If the key and/or data spills -** onto overflow pages, then accessPayload() must be used to reassemble -** the key/data and copy it into a preallocated buffer. -** -** The pointer returned by this routine looks directly into the cached -** page of the database. The data might change or move the next time -** any btree routine is called. -*/ -static const void *fetchPayload( - BtCursor *pCur, /* Cursor pointing to entry to read from */ - u32 *pAmt /* Write the number of available bytes here */ -){ - assert( pCur!=0 && pCur->iPage>=0 && pCur->apPage[pCur->iPage]); - assert( pCur->eState==CURSOR_VALID ); - assert( sqlite3_mutex_held(pCur->pBtree->db->mutex) ); - assert( cursorHoldsMutex(pCur) ); - assert( pCur->aiIdx[pCur->iPage]apPage[pCur->iPage]->nCell ); - assert( pCur->info.nSize>0 ); - *pAmt = pCur->info.nLocal; - return (void*)(pCur->info.pCell + pCur->info.nHeader); -} - - -/* -** For the entry that cursor pCur is point to, return as -** many bytes of the key or data as are available on the local -** b-tree page. Write the number of available bytes into *pAmt. -** -** The pointer returned is ephemeral. The key/data may move -** or be destroyed on the next call to any Btree routine, -** including calls from other threads against the same cache. -** Hence, a mutex on the BtShared should be held prior to calling -** this routine. -** -** These routines is used to get quick access to key and data -** in the common case where no overflow pages are used. -*/ -SQLITE_PRIVATE const void *sqlite3BtreeKeyFetch(BtCursor *pCur, u32 *pAmt){ - return fetchPayload(pCur, pAmt); -} -SQLITE_PRIVATE const void *sqlite3BtreeDataFetch(BtCursor *pCur, u32 *pAmt){ - return fetchPayload(pCur, pAmt); -} - - -/* -** Move the cursor down to a new child page. The newPgno argument is the -** page number of the child page to move to. -** -** This function returns SQLITE_CORRUPT if the page-header flags field of -** the new child page does not match the flags field of the parent (i.e. -** if an intkey page appears to be the parent of a non-intkey page, or -** vice-versa). -*/ -static int moveToChild(BtCursor *pCur, u32 newPgno){ - int rc; - int i = pCur->iPage; - MemPage *pNewPage; - BtShared *pBt = pCur->pBt; - - assert( cursorHoldsMutex(pCur) ); - assert( pCur->eState==CURSOR_VALID ); - assert( pCur->iPageiPage>=0 ); - if( pCur->iPage>=(BTCURSOR_MAX_DEPTH-1) ){ - return SQLITE_CORRUPT_BKPT; - } - rc = getAndInitPage(pBt, newPgno, &pNewPage, - (pCur->curFlags & BTCF_WriteFlag)==0 ? PAGER_GET_READONLY : 0); - if( rc ) return rc; - pCur->apPage[i+1] = pNewPage; - pCur->aiIdx[i+1] = 0; - pCur->iPage++; - - pCur->info.nSize = 0; - pCur->curFlags &= ~(BTCF_ValidNKey|BTCF_ValidOvfl); - if( pNewPage->nCell<1 || pNewPage->intKey!=pCur->apPage[i]->intKey ){ - return SQLITE_CORRUPT_BKPT; - } - return SQLITE_OK; -} - -#if 0 -/* -** Page pParent is an internal (non-leaf) tree page. This function -** asserts that page number iChild is the left-child if the iIdx'th -** cell in page pParent. Or, if iIdx is equal to the total number of -** cells in pParent, that page number iChild is the right-child of -** the page. -*/ -static void assertParentIndex(MemPage *pParent, int iIdx, Pgno iChild){ - assert( iIdx<=pParent->nCell ); - if( iIdx==pParent->nCell ){ - assert( get4byte(&pParent->aData[pParent->hdrOffset+8])==iChild ); - }else{ - assert( get4byte(findCell(pParent, iIdx))==iChild ); - } -} -#else -# define assertParentIndex(x,y,z) -#endif - -/* -** Move the cursor up to the parent page. -** -** pCur->idx is set to the cell index that contains the pointer -** to the page we are coming from. If we are coming from the -** right-most child page then pCur->idx is set to one more than -** the largest cell index. -*/ -static void moveToParent(BtCursor *pCur){ - assert( cursorHoldsMutex(pCur) ); - assert( pCur->eState==CURSOR_VALID ); - assert( pCur->iPage>0 ); - assert( pCur->apPage[pCur->iPage] ); - - /* UPDATE: It is actually possible for the condition tested by the assert - ** below to be untrue if the database file is corrupt. This can occur if - ** one cursor has modified page pParent while a reference to it is held - ** by a second cursor. Which can only happen if a single page is linked - ** into more than one b-tree structure in a corrupt database. */ -#if 0 - assertParentIndex( - pCur->apPage[pCur->iPage-1], - pCur->aiIdx[pCur->iPage-1], - pCur->apPage[pCur->iPage]->pgno - ); -#endif - testcase( pCur->aiIdx[pCur->iPage-1] > pCur->apPage[pCur->iPage-1]->nCell ); - - releasePage(pCur->apPage[pCur->iPage]); - pCur->iPage--; - pCur->info.nSize = 0; - pCur->curFlags &= ~(BTCF_ValidNKey|BTCF_ValidOvfl); -} - -/* -** Move the cursor to point to the root page of its b-tree structure. -** -** If the table has a virtual root page, then the cursor is moved to point -** to the virtual root page instead of the actual root page. A table has a -** virtual root page when the actual root page contains no cells and a -** single child page. This can only happen with the table rooted at page 1. -** -** If the b-tree structure is empty, the cursor state is set to -** CURSOR_INVALID. Otherwise, the cursor is set to point to the first -** cell located on the root (or virtual root) page and the cursor state -** is set to CURSOR_VALID. -** -** If this function returns successfully, it may be assumed that the -** page-header flags indicate that the [virtual] root-page is the expected -** kind of b-tree page (i.e. if when opening the cursor the caller did not -** specify a KeyInfo structure the flags byte is set to 0x05 or 0x0D, -** indicating a table b-tree, or if the caller did specify a KeyInfo -** structure the flags byte is set to 0x02 or 0x0A, indicating an index -** b-tree). -*/ -static int moveToRoot(BtCursor *pCur){ - MemPage *pRoot; - int rc = SQLITE_OK; - - assert( cursorHoldsMutex(pCur) ); - assert( CURSOR_INVALID < CURSOR_REQUIRESEEK ); - assert( CURSOR_VALID < CURSOR_REQUIRESEEK ); - assert( CURSOR_FAULT > CURSOR_REQUIRESEEK ); - if( pCur->eState>=CURSOR_REQUIRESEEK ){ - if( pCur->eState==CURSOR_FAULT ){ - assert( pCur->skipNext!=SQLITE_OK ); - return pCur->skipNext; - } - sqlite3BtreeClearCursor(pCur); - } - - if( pCur->iPage>=0 ){ - while( pCur->iPage ) releasePage(pCur->apPage[pCur->iPage--]); - }else if( pCur->pgnoRoot==0 ){ - pCur->eState = CURSOR_INVALID; - return SQLITE_OK; - }else{ - rc = getAndInitPage(pCur->pBtree->pBt, pCur->pgnoRoot, &pCur->apPage[0], - (pCur->curFlags & BTCF_WriteFlag)==0 ? PAGER_GET_READONLY : 0); - if( rc!=SQLITE_OK ){ - pCur->eState = CURSOR_INVALID; - return rc; - } - pCur->iPage = 0; - } - pRoot = pCur->apPage[0]; - assert( pRoot->pgno==pCur->pgnoRoot ); - - /* If pCur->pKeyInfo is not NULL, then the caller that opened this cursor - ** expected to open it on an index b-tree. Otherwise, if pKeyInfo is - ** NULL, the caller expects a table b-tree. If this is not the case, - ** return an SQLITE_CORRUPT error. - ** - ** Earlier versions of SQLite assumed that this test could not fail - ** if the root page was already loaded when this function was called (i.e. - ** if pCur->iPage>=0). But this is not so if the database is corrupted - ** in such a way that page pRoot is linked into a second b-tree table - ** (or the freelist). */ - assert( pRoot->intKey==1 || pRoot->intKey==0 ); - if( pRoot->isInit==0 || (pCur->pKeyInfo==0)!=pRoot->intKey ){ - return SQLITE_CORRUPT_BKPT; - } - - pCur->aiIdx[0] = 0; - pCur->info.nSize = 0; - pCur->curFlags &= ~(BTCF_AtLast|BTCF_ValidNKey|BTCF_ValidOvfl); - - if( pRoot->nCell>0 ){ - pCur->eState = CURSOR_VALID; - }else if( !pRoot->leaf ){ - Pgno subpage; - if( pRoot->pgno!=1 ) return SQLITE_CORRUPT_BKPT; - subpage = get4byte(&pRoot->aData[pRoot->hdrOffset+8]); - pCur->eState = CURSOR_VALID; - rc = moveToChild(pCur, subpage); - }else{ - pCur->eState = CURSOR_INVALID; - } - return rc; -} - -/* -** Move the cursor down to the left-most leaf entry beneath the -** entry to which it is currently pointing. -** -** The left-most leaf is the one with the smallest key - the first -** in ascending order. -*/ -static int moveToLeftmost(BtCursor *pCur){ - Pgno pgno; - int rc = SQLITE_OK; - MemPage *pPage; - - assert( cursorHoldsMutex(pCur) ); - assert( pCur->eState==CURSOR_VALID ); - while( rc==SQLITE_OK && !(pPage = pCur->apPage[pCur->iPage])->leaf ){ - assert( pCur->aiIdx[pCur->iPage]nCell ); - pgno = get4byte(findCell(pPage, pCur->aiIdx[pCur->iPage])); - rc = moveToChild(pCur, pgno); - } - return rc; -} - -/* -** Move the cursor down to the right-most leaf entry beneath the -** page to which it is currently pointing. Notice the difference -** between moveToLeftmost() and moveToRightmost(). moveToLeftmost() -** finds the left-most entry beneath the *entry* whereas moveToRightmost() -** finds the right-most entry beneath the *page*. -** -** The right-most entry is the one with the largest key - the last -** key in ascending order. -*/ -static int moveToRightmost(BtCursor *pCur){ - Pgno pgno; - int rc = SQLITE_OK; - MemPage *pPage = 0; - - assert( cursorHoldsMutex(pCur) ); - assert( pCur->eState==CURSOR_VALID ); - while( rc==SQLITE_OK && !(pPage = pCur->apPage[pCur->iPage])->leaf ){ - pgno = get4byte(&pPage->aData[pPage->hdrOffset+8]); - pCur->aiIdx[pCur->iPage] = pPage->nCell; - rc = moveToChild(pCur, pgno); - } - if( rc==SQLITE_OK ){ - pCur->aiIdx[pCur->iPage] = pPage->nCell-1; - pCur->info.nSize = 0; - pCur->curFlags &= ~BTCF_ValidNKey; - } - return rc; -} - -/* Move the cursor to the first entry in the table. Return SQLITE_OK -** on success. Set *pRes to 0 if the cursor actually points to something -** or set *pRes to 1 if the table is empty. -*/ -SQLITE_PRIVATE int sqlite3BtreeFirst(BtCursor *pCur, int *pRes){ - int rc; - - assert( cursorHoldsMutex(pCur) ); - assert( sqlite3_mutex_held(pCur->pBtree->db->mutex) ); - rc = moveToRoot(pCur); - if( rc==SQLITE_OK ){ - if( pCur->eState==CURSOR_INVALID ){ - assert( pCur->pgnoRoot==0 || pCur->apPage[pCur->iPage]->nCell==0 ); - *pRes = 1; - }else{ - assert( pCur->apPage[pCur->iPage]->nCell>0 ); - *pRes = 0; - rc = moveToLeftmost(pCur); - } - } - return rc; -} - -/* Move the cursor to the last entry in the table. Return SQLITE_OK -** on success. Set *pRes to 0 if the cursor actually points to something -** or set *pRes to 1 if the table is empty. -*/ -SQLITE_PRIVATE int sqlite3BtreeLast(BtCursor *pCur, int *pRes){ - int rc; - - assert( cursorHoldsMutex(pCur) ); - assert( sqlite3_mutex_held(pCur->pBtree->db->mutex) ); - - /* If the cursor already points to the last entry, this is a no-op. */ - if( CURSOR_VALID==pCur->eState && (pCur->curFlags & BTCF_AtLast)!=0 ){ -#ifdef SQLITE_DEBUG - /* This block serves to assert() that the cursor really does point - ** to the last entry in the b-tree. */ - int ii; - for(ii=0; iiiPage; ii++){ - assert( pCur->aiIdx[ii]==pCur->apPage[ii]->nCell ); - } - assert( pCur->aiIdx[pCur->iPage]==pCur->apPage[pCur->iPage]->nCell-1 ); - assert( pCur->apPage[pCur->iPage]->leaf ); -#endif - return SQLITE_OK; - } - - rc = moveToRoot(pCur); - if( rc==SQLITE_OK ){ - if( CURSOR_INVALID==pCur->eState ){ - assert( pCur->pgnoRoot==0 || pCur->apPage[pCur->iPage]->nCell==0 ); - *pRes = 1; - }else{ - assert( pCur->eState==CURSOR_VALID ); - *pRes = 0; - rc = moveToRightmost(pCur); - if( rc==SQLITE_OK ){ - pCur->curFlags |= BTCF_AtLast; - }else{ - pCur->curFlags &= ~BTCF_AtLast; - } - - } - } - return rc; -} - -/* Move the cursor so that it points to an entry near the key -** specified by pIdxKey or intKey. Return a success code. -** -** For INTKEY tables, the intKey parameter is used. pIdxKey -** must be NULL. For index tables, pIdxKey is used and intKey -** is ignored. -** -** If an exact match is not found, then the cursor is always -** left pointing at a leaf page which would hold the entry if it -** were present. The cursor might point to an entry that comes -** before or after the key. -** -** An integer is written into *pRes which is the result of -** comparing the key with the entry to which the cursor is -** pointing. The meaning of the integer written into -** *pRes is as follows: -** -** *pRes<0 The cursor is left pointing at an entry that -** is smaller than intKey/pIdxKey or if the table is empty -** and the cursor is therefore left point to nothing. -** -** *pRes==0 The cursor is left pointing at an entry that -** exactly matches intKey/pIdxKey. -** -** *pRes>0 The cursor is left pointing at an entry that -** is larger than intKey/pIdxKey. -** -*/ -SQLITE_PRIVATE int sqlite3BtreeMovetoUnpacked( - BtCursor *pCur, /* The cursor to be moved */ - UnpackedRecord *pIdxKey, /* Unpacked index key */ - i64 intKey, /* The table key */ - int biasRight, /* If true, bias the search to the high end */ - int *pRes /* Write search results here */ -){ - int rc; - RecordCompare xRecordCompare; - - assert( cursorHoldsMutex(pCur) ); - assert( sqlite3_mutex_held(pCur->pBtree->db->mutex) ); - assert( pRes ); - assert( (pIdxKey==0)==(pCur->pKeyInfo==0) ); - - /* If the cursor is already positioned at the point we are trying - ** to move to, then just return without doing any work */ - if( pCur->eState==CURSOR_VALID && (pCur->curFlags & BTCF_ValidNKey)!=0 - && pCur->apPage[0]->intKey - ){ - if( pCur->info.nKey==intKey ){ - *pRes = 0; - return SQLITE_OK; - } - if( (pCur->curFlags & BTCF_AtLast)!=0 && pCur->info.nKeyisCorrupt = 0; - assert( pIdxKey->default_rc==1 - || pIdxKey->default_rc==0 - || pIdxKey->default_rc==-1 - ); - }else{ - xRecordCompare = 0; /* All keys are integers */ - } - - rc = moveToRoot(pCur); - if( rc ){ - return rc; - } - assert( pCur->pgnoRoot==0 || pCur->apPage[pCur->iPage] ); - assert( pCur->pgnoRoot==0 || pCur->apPage[pCur->iPage]->isInit ); - assert( pCur->eState==CURSOR_INVALID || pCur->apPage[pCur->iPage]->nCell>0 ); - if( pCur->eState==CURSOR_INVALID ){ - *pRes = -1; - assert( pCur->pgnoRoot==0 || pCur->apPage[pCur->iPage]->nCell==0 ); - return SQLITE_OK; - } - assert( pCur->apPage[0]->intKey || pIdxKey ); - for(;;){ - int lwr, upr, idx, c; - Pgno chldPg; - MemPage *pPage = pCur->apPage[pCur->iPage]; - u8 *pCell; /* Pointer to current cell in pPage */ - - /* pPage->nCell must be greater than zero. If this is the root-page - ** the cursor would have been INVALID above and this for(;;) loop - ** not run. If this is not the root-page, then the moveToChild() routine - ** would have already detected db corruption. Similarly, pPage must - ** be the right kind (index or table) of b-tree page. Otherwise - ** a moveToChild() or moveToRoot() call would have detected corruption. */ - assert( pPage->nCell>0 ); - assert( pPage->intKey==(pIdxKey==0) ); - lwr = 0; - upr = pPage->nCell-1; - assert( biasRight==0 || biasRight==1 ); - idx = upr>>(1-biasRight); /* idx = biasRight ? upr : (lwr+upr)/2; */ - pCur->aiIdx[pCur->iPage] = (u16)idx; - if( xRecordCompare==0 ){ - for(;;){ - i64 nCellKey; - pCell = findCell(pPage, idx) + pPage->childPtrSize; - if( pPage->hasData ){ - while( 0x80 <= *(pCell++) ){ - if( pCell>=pPage->aDataEnd ) return SQLITE_CORRUPT_BKPT; - } - } - getVarint(pCell, (u64*)&nCellKey); - if( nCellKeyupr ){ c = -1; break; } - }else if( nCellKey>intKey ){ - upr = idx-1; - if( lwr>upr ){ c = +1; break; } - }else{ - assert( nCellKey==intKey ); - pCur->curFlags |= BTCF_ValidNKey; - pCur->info.nKey = nCellKey; - pCur->aiIdx[pCur->iPage] = (u16)idx; - if( !pPage->leaf ){ - lwr = idx; - goto moveto_next_layer; - }else{ - *pRes = 0; - rc = SQLITE_OK; - goto moveto_finish; - } - } - assert( lwr+upr>=0 ); - idx = (lwr+upr)>>1; /* idx = (lwr+upr)/2; */ - } - }else{ - for(;;){ - int nCell; - pCell = findCell(pPage, idx) + pPage->childPtrSize; - - /* The maximum supported page-size is 65536 bytes. This means that - ** the maximum number of record bytes stored on an index B-Tree - ** page is less than 16384 bytes and may be stored as a 2-byte - ** varint. This information is used to attempt to avoid parsing - ** the entire cell by checking for the cases where the record is - ** stored entirely within the b-tree page by inspecting the first - ** 2 bytes of the cell. - */ - nCell = pCell[0]; - if( nCell<=pPage->max1bytePayload ){ - /* This branch runs if the record-size field of the cell is a - ** single byte varint and the record fits entirely on the main - ** b-tree page. */ - testcase( pCell+nCell+1==pPage->aDataEnd ); - c = xRecordCompare(nCell, (void*)&pCell[1], pIdxKey, 0); - }else if( !(pCell[1] & 0x80) - && (nCell = ((nCell&0x7f)<<7) + pCell[1])<=pPage->maxLocal - ){ - /* The record-size field is a 2 byte varint and the record - ** fits entirely on the main b-tree page. */ - testcase( pCell+nCell+2==pPage->aDataEnd ); - c = xRecordCompare(nCell, (void*)&pCell[2], pIdxKey, 0); - }else{ - /* The record flows over onto one or more overflow pages. In - ** this case the whole cell needs to be parsed, a buffer allocated - ** and accessPayload() used to retrieve the record into the - ** buffer before VdbeRecordCompare() can be called. */ - void *pCellKey; - u8 * const pCellBody = pCell - pPage->childPtrSize; - btreeParseCellPtr(pPage, pCellBody, &pCur->info); - nCell = (int)pCur->info.nKey; - pCellKey = sqlite3Malloc( nCell ); - if( pCellKey==0 ){ - rc = SQLITE_NOMEM; - goto moveto_finish; - } - pCur->aiIdx[pCur->iPage] = (u16)idx; - rc = accessPayload(pCur, 0, nCell, (unsigned char*)pCellKey, 2); - if( rc ){ - sqlite3_free(pCellKey); - goto moveto_finish; - } - c = xRecordCompare(nCell, pCellKey, pIdxKey, 0); - sqlite3_free(pCellKey); - } - assert( pIdxKey->isCorrupt==0 || c==0 ); - if( c<0 ){ - lwr = idx+1; - }else if( c>0 ){ - upr = idx-1; - }else{ - assert( c==0 ); - *pRes = 0; - rc = SQLITE_OK; - pCur->aiIdx[pCur->iPage] = (u16)idx; - if( pIdxKey->isCorrupt ) rc = SQLITE_CORRUPT; - goto moveto_finish; - } - if( lwr>upr ) break; - assert( lwr+upr>=0 ); - idx = (lwr+upr)>>1; /* idx = (lwr+upr)/2 */ - } - } - assert( lwr==upr+1 || (pPage->intKey && !pPage->leaf) ); - assert( pPage->isInit ); - if( pPage->leaf ){ - assert( pCur->aiIdx[pCur->iPage]apPage[pCur->iPage]->nCell ); - pCur->aiIdx[pCur->iPage] = (u16)idx; - *pRes = c; - rc = SQLITE_OK; - goto moveto_finish; - } -moveto_next_layer: - if( lwr>=pPage->nCell ){ - chldPg = get4byte(&pPage->aData[pPage->hdrOffset+8]); - }else{ - chldPg = get4byte(findCell(pPage, lwr)); - } - pCur->aiIdx[pCur->iPage] = (u16)lwr; - rc = moveToChild(pCur, chldPg); - if( rc ) break; - } -moveto_finish: - pCur->info.nSize = 0; - pCur->curFlags &= ~(BTCF_ValidNKey|BTCF_ValidOvfl); - return rc; -} - - -/* -** Return TRUE if the cursor is not pointing at an entry of the table. -** -** TRUE will be returned after a call to sqlite3BtreeNext() moves -** past the last entry in the table or sqlite3BtreePrev() moves past -** the first entry. TRUE is also returned if the table is empty. -*/ -SQLITE_PRIVATE int sqlite3BtreeEof(BtCursor *pCur){ - /* TODO: What if the cursor is in CURSOR_REQUIRESEEK but all table entries - ** have been deleted? This API will need to change to return an error code - ** as well as the boolean result value. - */ - return (CURSOR_VALID!=pCur->eState); -} - -/* -** Advance the cursor to the next entry in the database. If -** successful then set *pRes=0. If the cursor -** was already pointing to the last entry in the database before -** this routine was called, then set *pRes=1. -** -** The calling function will set *pRes to 0 or 1. The initial *pRes value -** will be 1 if the cursor being stepped corresponds to an SQL index and -** if this routine could have been skipped if that SQL index had been -** a unique index. Otherwise the caller will have set *pRes to zero. -** Zero is the common case. The btree implementation is free to use the -** initial *pRes value as a hint to improve performance, but the current -** SQLite btree implementation does not. (Note that the comdb2 btree -** implementation does use this hint, however.) -*/ -SQLITE_PRIVATE int sqlite3BtreeNext(BtCursor *pCur, int *pRes){ - int rc; - int idx; - MemPage *pPage; - - assert( cursorHoldsMutex(pCur) ); - assert( pRes!=0 ); - assert( *pRes==0 || *pRes==1 ); - assert( pCur->skipNext==0 || pCur->eState!=CURSOR_VALID ); - if( pCur->eState!=CURSOR_VALID ){ - invalidateOverflowCache(pCur); - rc = restoreCursorPosition(pCur); - if( rc!=SQLITE_OK ){ - *pRes = 0; - return rc; - } - if( CURSOR_INVALID==pCur->eState ){ - *pRes = 1; - return SQLITE_OK; - } - if( pCur->skipNext ){ - assert( pCur->eState==CURSOR_VALID || pCur->eState==CURSOR_SKIPNEXT ); - pCur->eState = CURSOR_VALID; - if( pCur->skipNext>0 ){ - pCur->skipNext = 0; - *pRes = 0; - return SQLITE_OK; - } - pCur->skipNext = 0; - } - } - - pPage = pCur->apPage[pCur->iPage]; - idx = ++pCur->aiIdx[pCur->iPage]; - assert( pPage->isInit ); - - /* If the database file is corrupt, it is possible for the value of idx - ** to be invalid here. This can only occur if a second cursor modifies - ** the page while cursor pCur is holding a reference to it. Which can - ** only happen if the database is corrupt in such a way as to link the - ** page into more than one b-tree structure. */ - testcase( idx>pPage->nCell ); - - pCur->info.nSize = 0; - pCur->curFlags &= ~(BTCF_ValidNKey|BTCF_ValidOvfl); - if( idx>=pPage->nCell ){ - if( !pPage->leaf ){ - rc = moveToChild(pCur, get4byte(&pPage->aData[pPage->hdrOffset+8])); - if( rc ){ - *pRes = 0; - return rc; - } - rc = moveToLeftmost(pCur); - *pRes = 0; - return rc; - } - do{ - if( pCur->iPage==0 ){ - *pRes = 1; - pCur->eState = CURSOR_INVALID; - return SQLITE_OK; - } - moveToParent(pCur); - pPage = pCur->apPage[pCur->iPage]; - }while( pCur->aiIdx[pCur->iPage]>=pPage->nCell ); - *pRes = 0; - if( pPage->intKey ){ - rc = sqlite3BtreeNext(pCur, pRes); - }else{ - rc = SQLITE_OK; - } - return rc; - } - *pRes = 0; - if( pPage->leaf ){ - return SQLITE_OK; - } - rc = moveToLeftmost(pCur); - return rc; -} - - -/* -** Step the cursor to the back to the previous entry in the database. If -** successful then set *pRes=0. If the cursor -** was already pointing to the first entry in the database before -** this routine was called, then set *pRes=1. -** -** The calling function will set *pRes to 0 or 1. The initial *pRes value -** will be 1 if the cursor being stepped corresponds to an SQL index and -** if this routine could have been skipped if that SQL index had been -** a unique index. Otherwise the caller will have set *pRes to zero. -** Zero is the common case. The btree implementation is free to use the -** initial *pRes value as a hint to improve performance, but the current -** SQLite btree implementation does not. (Note that the comdb2 btree -** implementation does use this hint, however.) -*/ -SQLITE_PRIVATE int sqlite3BtreePrevious(BtCursor *pCur, int *pRes){ - int rc; - MemPage *pPage; - - assert( cursorHoldsMutex(pCur) ); - assert( pRes!=0 ); - assert( *pRes==0 || *pRes==1 ); - assert( pCur->skipNext==0 || pCur->eState!=CURSOR_VALID ); - pCur->curFlags &= ~(BTCF_AtLast|BTCF_ValidOvfl); - if( pCur->eState!=CURSOR_VALID ){ - if( ALWAYS(pCur->eState>=CURSOR_REQUIRESEEK) ){ - rc = btreeRestoreCursorPosition(pCur); - if( rc!=SQLITE_OK ){ - *pRes = 0; - return rc; - } - } - if( CURSOR_INVALID==pCur->eState ){ - *pRes = 1; - return SQLITE_OK; - } - if( pCur->skipNext ){ - assert( pCur->eState==CURSOR_VALID || pCur->eState==CURSOR_SKIPNEXT ); - pCur->eState = CURSOR_VALID; - if( pCur->skipNext<0 ){ - pCur->skipNext = 0; - *pRes = 0; - return SQLITE_OK; - } - pCur->skipNext = 0; - } - } - - pPage = pCur->apPage[pCur->iPage]; - assert( pPage->isInit ); - if( !pPage->leaf ){ - int idx = pCur->aiIdx[pCur->iPage]; - rc = moveToChild(pCur, get4byte(findCell(pPage, idx))); - if( rc ){ - *pRes = 0; - return rc; - } - rc = moveToRightmost(pCur); - }else{ - while( pCur->aiIdx[pCur->iPage]==0 ){ - if( pCur->iPage==0 ){ - pCur->eState = CURSOR_INVALID; - *pRes = 1; - return SQLITE_OK; - } - moveToParent(pCur); - } - pCur->info.nSize = 0; - pCur->curFlags &= ~(BTCF_ValidNKey|BTCF_ValidOvfl); - - pCur->aiIdx[pCur->iPage]--; - pPage = pCur->apPage[pCur->iPage]; - if( pPage->intKey && !pPage->leaf ){ - rc = sqlite3BtreePrevious(pCur, pRes); - }else{ - rc = SQLITE_OK; - } - } - *pRes = 0; - return rc; -} - -/* -** Allocate a new page from the database file. -** -** The new page is marked as dirty. (In other words, sqlite3PagerWrite() -** has already been called on the new page.) The new page has also -** been referenced and the calling routine is responsible for calling -** sqlite3PagerUnref() on the new page when it is done. -** -** SQLITE_OK is returned on success. Any other return value indicates -** an error. *ppPage and *pPgno are undefined in the event of an error. -** Do not invoke sqlite3PagerUnref() on *ppPage if an error is returned. -** -** If the "nearby" parameter is not 0, then an effort is made to -** locate a page close to the page number "nearby". This can be used in an -** attempt to keep related pages close to each other in the database file, -** which in turn can make database access faster. -** -** If the eMode parameter is BTALLOC_EXACT and the nearby page exists -** anywhere on the free-list, then it is guaranteed to be returned. If -** eMode is BTALLOC_LT then the page returned will be less than or equal -** to nearby if any such page exists. If eMode is BTALLOC_ANY then there -** are no restrictions on which page is returned. -*/ -static int allocateBtreePage( - BtShared *pBt, /* The btree */ - MemPage **ppPage, /* Store pointer to the allocated page here */ - Pgno *pPgno, /* Store the page number here */ - Pgno nearby, /* Search for a page near this one */ - u8 eMode /* BTALLOC_EXACT, BTALLOC_LT, or BTALLOC_ANY */ -){ - MemPage *pPage1; - int rc; - u32 n; /* Number of pages on the freelist */ - u32 k; /* Number of leaves on the trunk of the freelist */ - MemPage *pTrunk = 0; - MemPage *pPrevTrunk = 0; - Pgno mxPage; /* Total size of the database file */ - - assert( sqlite3_mutex_held(pBt->mutex) ); - assert( eMode==BTALLOC_ANY || (nearby>0 && IfNotOmitAV(pBt->autoVacuum)) ); - pPage1 = pBt->pPage1; - mxPage = btreePagecount(pBt); - n = get4byte(&pPage1->aData[36]); - testcase( n==mxPage-1 ); - if( n>=mxPage ){ - return SQLITE_CORRUPT_BKPT; - } - if( n>0 ){ - /* There are pages on the freelist. Reuse one of those pages. */ - Pgno iTrunk; - u8 searchList = 0; /* If the free-list must be searched for 'nearby' */ - - /* If eMode==BTALLOC_EXACT and a query of the pointer-map - ** shows that the page 'nearby' is somewhere on the free-list, then - ** the entire-list will be searched for that page. - */ -#ifndef SQLITE_OMIT_AUTOVACUUM - if( eMode==BTALLOC_EXACT ){ - if( nearby<=mxPage ){ - u8 eType; - assert( nearby>0 ); - assert( pBt->autoVacuum ); - rc = ptrmapGet(pBt, nearby, &eType, 0); - if( rc ) return rc; - if( eType==PTRMAP_FREEPAGE ){ - searchList = 1; - } - } - }else if( eMode==BTALLOC_LE ){ - searchList = 1; - } -#endif - - /* Decrement the free-list count by 1. Set iTrunk to the index of the - ** first free-list trunk page. iPrevTrunk is initially 1. - */ - rc = sqlite3PagerWrite(pPage1->pDbPage); - if( rc ) return rc; - put4byte(&pPage1->aData[36], n-1); - - /* The code within this loop is run only once if the 'searchList' variable - ** is not true. Otherwise, it runs once for each trunk-page on the - ** free-list until the page 'nearby' is located (eMode==BTALLOC_EXACT) - ** or until a page less than 'nearby' is located (eMode==BTALLOC_LT) - */ - do { - pPrevTrunk = pTrunk; - if( pPrevTrunk ){ - iTrunk = get4byte(&pPrevTrunk->aData[0]); - }else{ - iTrunk = get4byte(&pPage1->aData[32]); - } - testcase( iTrunk==mxPage ); - if( iTrunk>mxPage ){ - rc = SQLITE_CORRUPT_BKPT; - }else{ - rc = btreeGetPage(pBt, iTrunk, &pTrunk, 0); - } - if( rc ){ - pTrunk = 0; - goto end_allocate_page; - } - assert( pTrunk!=0 ); - assert( pTrunk->aData!=0 ); - - k = get4byte(&pTrunk->aData[4]); /* # of leaves on this trunk page */ - if( k==0 && !searchList ){ - /* The trunk has no leaves and the list is not being searched. - ** So extract the trunk page itself and use it as the newly - ** allocated page */ - assert( pPrevTrunk==0 ); - rc = sqlite3PagerWrite(pTrunk->pDbPage); - if( rc ){ - goto end_allocate_page; - } - *pPgno = iTrunk; - memcpy(&pPage1->aData[32], &pTrunk->aData[0], 4); - *ppPage = pTrunk; - pTrunk = 0; - TRACE(("ALLOCATE: %d trunk - %d free pages left\n", *pPgno, n-1)); - }else if( k>(u32)(pBt->usableSize/4 - 2) ){ - /* Value of k is out of range. Database corruption */ - rc = SQLITE_CORRUPT_BKPT; - goto end_allocate_page; -#ifndef SQLITE_OMIT_AUTOVACUUM - }else if( searchList - && (nearby==iTrunk || (iTrunkpDbPage); - if( rc ){ - goto end_allocate_page; - } - if( k==0 ){ - if( !pPrevTrunk ){ - memcpy(&pPage1->aData[32], &pTrunk->aData[0], 4); - }else{ - rc = sqlite3PagerWrite(pPrevTrunk->pDbPage); - if( rc!=SQLITE_OK ){ - goto end_allocate_page; - } - memcpy(&pPrevTrunk->aData[0], &pTrunk->aData[0], 4); - } - }else{ - /* The trunk page is required by the caller but it contains - ** pointers to free-list leaves. The first leaf becomes a trunk - ** page in this case. - */ - MemPage *pNewTrunk; - Pgno iNewTrunk = get4byte(&pTrunk->aData[8]); - if( iNewTrunk>mxPage ){ - rc = SQLITE_CORRUPT_BKPT; - goto end_allocate_page; - } - testcase( iNewTrunk==mxPage ); - rc = btreeGetPage(pBt, iNewTrunk, &pNewTrunk, 0); - if( rc!=SQLITE_OK ){ - goto end_allocate_page; - } - rc = sqlite3PagerWrite(pNewTrunk->pDbPage); - if( rc!=SQLITE_OK ){ - releasePage(pNewTrunk); - goto end_allocate_page; - } - memcpy(&pNewTrunk->aData[0], &pTrunk->aData[0], 4); - put4byte(&pNewTrunk->aData[4], k-1); - memcpy(&pNewTrunk->aData[8], &pTrunk->aData[12], (k-1)*4); - releasePage(pNewTrunk); - if( !pPrevTrunk ){ - assert( sqlite3PagerIswriteable(pPage1->pDbPage) ); - put4byte(&pPage1->aData[32], iNewTrunk); - }else{ - rc = sqlite3PagerWrite(pPrevTrunk->pDbPage); - if( rc ){ - goto end_allocate_page; - } - put4byte(&pPrevTrunk->aData[0], iNewTrunk); - } - } - pTrunk = 0; - TRACE(("ALLOCATE: %d trunk - %d free pages left\n", *pPgno, n-1)); -#endif - }else if( k>0 ){ - /* Extract a leaf from the trunk */ - u32 closest; - Pgno iPage; - unsigned char *aData = pTrunk->aData; - if( nearby>0 ){ - u32 i; - closest = 0; - if( eMode==BTALLOC_LE ){ - for(i=0; imxPage ){ - rc = SQLITE_CORRUPT_BKPT; - goto end_allocate_page; - } - testcase( iPage==mxPage ); - if( !searchList - || (iPage==nearby || (iPagepgno, n-1)); - rc = sqlite3PagerWrite(pTrunk->pDbPage); - if( rc ) goto end_allocate_page; - if( closestpDbPage); - if( rc!=SQLITE_OK ){ - releasePage(*ppPage); - } - } - searchList = 0; - } - } - releasePage(pPrevTrunk); - pPrevTrunk = 0; - }while( searchList ); - }else{ - /* There are no pages on the freelist, so append a new page to the - ** database image. - ** - ** Normally, new pages allocated by this block can be requested from the - ** pager layer with the 'no-content' flag set. This prevents the pager - ** from trying to read the pages content from disk. However, if the - ** current transaction has already run one or more incremental-vacuum - ** steps, then the page we are about to allocate may contain content - ** that is required in the event of a rollback. In this case, do - ** not set the no-content flag. This causes the pager to load and journal - ** the current page content before overwriting it. - ** - ** Note that the pager will not actually attempt to load or journal - ** content for any page that really does lie past the end of the database - ** file on disk. So the effects of disabling the no-content optimization - ** here are confined to those pages that lie between the end of the - ** database image and the end of the database file. - */ - int bNoContent = (0==IfNotOmitAV(pBt->bDoTruncate)) ? PAGER_GET_NOCONTENT : 0; - - rc = sqlite3PagerWrite(pBt->pPage1->pDbPage); - if( rc ) return rc; - pBt->nPage++; - if( pBt->nPage==PENDING_BYTE_PAGE(pBt) ) pBt->nPage++; - -#ifndef SQLITE_OMIT_AUTOVACUUM - if( pBt->autoVacuum && PTRMAP_ISPAGE(pBt, pBt->nPage) ){ - /* If *pPgno refers to a pointer-map page, allocate two new pages - ** at the end of the file instead of one. The first allocated page - ** becomes a new pointer-map page, the second is used by the caller. - */ - MemPage *pPg = 0; - TRACE(("ALLOCATE: %d from end of file (pointer-map page)\n", pBt->nPage)); - assert( pBt->nPage!=PENDING_BYTE_PAGE(pBt) ); - rc = btreeGetPage(pBt, pBt->nPage, &pPg, bNoContent); - if( rc==SQLITE_OK ){ - rc = sqlite3PagerWrite(pPg->pDbPage); - releasePage(pPg); - } - if( rc ) return rc; - pBt->nPage++; - if( pBt->nPage==PENDING_BYTE_PAGE(pBt) ){ pBt->nPage++; } - } -#endif - put4byte(28 + (u8*)pBt->pPage1->aData, pBt->nPage); - *pPgno = pBt->nPage; - - assert( *pPgno!=PENDING_BYTE_PAGE(pBt) ); - rc = btreeGetPage(pBt, *pPgno, ppPage, bNoContent); - if( rc ) return rc; - rc = sqlite3PagerWrite((*ppPage)->pDbPage); - if( rc!=SQLITE_OK ){ - releasePage(*ppPage); - } - TRACE(("ALLOCATE: %d from end of file\n", *pPgno)); - } - - assert( *pPgno!=PENDING_BYTE_PAGE(pBt) ); - -end_allocate_page: - releasePage(pTrunk); - releasePage(pPrevTrunk); - if( rc==SQLITE_OK ){ - if( sqlite3PagerPageRefcount((*ppPage)->pDbPage)>1 ){ - releasePage(*ppPage); - *ppPage = 0; - return SQLITE_CORRUPT_BKPT; - } - (*ppPage)->isInit = 0; - }else{ - *ppPage = 0; - } - assert( rc!=SQLITE_OK || sqlite3PagerIswriteable((*ppPage)->pDbPage) ); - return rc; -} - -/* -** This function is used to add page iPage to the database file free-list. -** It is assumed that the page is not already a part of the free-list. -** -** The value passed as the second argument to this function is optional. -** If the caller happens to have a pointer to the MemPage object -** corresponding to page iPage handy, it may pass it as the second value. -** Otherwise, it may pass NULL. -** -** If a pointer to a MemPage object is passed as the second argument, -** its reference count is not altered by this function. -*/ -static int freePage2(BtShared *pBt, MemPage *pMemPage, Pgno iPage){ - MemPage *pTrunk = 0; /* Free-list trunk page */ - Pgno iTrunk = 0; /* Page number of free-list trunk page */ - MemPage *pPage1 = pBt->pPage1; /* Local reference to page 1 */ - MemPage *pPage; /* Page being freed. May be NULL. */ - int rc; /* Return Code */ - int nFree; /* Initial number of pages on free-list */ - - assert( sqlite3_mutex_held(pBt->mutex) ); - assert( iPage>1 ); - assert( !pMemPage || pMemPage->pgno==iPage ); - - if( pMemPage ){ - pPage = pMemPage; - sqlite3PagerRef(pPage->pDbPage); - }else{ - pPage = btreePageLookup(pBt, iPage); - } - - /* Increment the free page count on pPage1 */ - rc = sqlite3PagerWrite(pPage1->pDbPage); - if( rc ) goto freepage_out; - nFree = get4byte(&pPage1->aData[36]); - put4byte(&pPage1->aData[36], nFree+1); - - if( pBt->btsFlags & BTS_SECURE_DELETE ){ - /* If the secure_delete option is enabled, then - ** always fully overwrite deleted information with zeros. - */ - if( (!pPage && ((rc = btreeGetPage(pBt, iPage, &pPage, 0))!=0) ) - || ((rc = sqlite3PagerWrite(pPage->pDbPage))!=0) - ){ - goto freepage_out; - } - memset(pPage->aData, 0, pPage->pBt->pageSize); - } - - /* If the database supports auto-vacuum, write an entry in the pointer-map - ** to indicate that the page is free. - */ - if( ISAUTOVACUUM ){ - ptrmapPut(pBt, iPage, PTRMAP_FREEPAGE, 0, &rc); - if( rc ) goto freepage_out; - } - - /* Now manipulate the actual database free-list structure. There are two - ** possibilities. If the free-list is currently empty, or if the first - ** trunk page in the free-list is full, then this page will become a - ** new free-list trunk page. Otherwise, it will become a leaf of the - ** first trunk page in the current free-list. This block tests if it - ** is possible to add the page as a new free-list leaf. - */ - if( nFree!=0 ){ - u32 nLeaf; /* Initial number of leaf cells on trunk page */ - - iTrunk = get4byte(&pPage1->aData[32]); - rc = btreeGetPage(pBt, iTrunk, &pTrunk, 0); - if( rc!=SQLITE_OK ){ - goto freepage_out; - } - - nLeaf = get4byte(&pTrunk->aData[4]); - assert( pBt->usableSize>32 ); - if( nLeaf > (u32)pBt->usableSize/4 - 2 ){ - rc = SQLITE_CORRUPT_BKPT; - goto freepage_out; - } - if( nLeaf < (u32)pBt->usableSize/4 - 8 ){ - /* In this case there is room on the trunk page to insert the page - ** being freed as a new leaf. - ** - ** Note that the trunk page is not really full until it contains - ** usableSize/4 - 2 entries, not usableSize/4 - 8 entries as we have - ** coded. But due to a coding error in versions of SQLite prior to - ** 3.6.0, databases with freelist trunk pages holding more than - ** usableSize/4 - 8 entries will be reported as corrupt. In order - ** to maintain backwards compatibility with older versions of SQLite, - ** we will continue to restrict the number of entries to usableSize/4 - 8 - ** for now. At some point in the future (once everyone has upgraded - ** to 3.6.0 or later) we should consider fixing the conditional above - ** to read "usableSize/4-2" instead of "usableSize/4-8". - */ - rc = sqlite3PagerWrite(pTrunk->pDbPage); - if( rc==SQLITE_OK ){ - put4byte(&pTrunk->aData[4], nLeaf+1); - put4byte(&pTrunk->aData[8+nLeaf*4], iPage); - if( pPage && (pBt->btsFlags & BTS_SECURE_DELETE)==0 ){ - sqlite3PagerDontWrite(pPage->pDbPage); - } - rc = btreeSetHasContent(pBt, iPage); - } - TRACE(("FREE-PAGE: %d leaf on trunk page %d\n",pPage->pgno,pTrunk->pgno)); - goto freepage_out; - } - } - - /* If control flows to this point, then it was not possible to add the - ** the page being freed as a leaf page of the first trunk in the free-list. - ** Possibly because the free-list is empty, or possibly because the - ** first trunk in the free-list is full. Either way, the page being freed - ** will become the new first trunk page in the free-list. - */ - if( pPage==0 && SQLITE_OK!=(rc = btreeGetPage(pBt, iPage, &pPage, 0)) ){ - goto freepage_out; - } - rc = sqlite3PagerWrite(pPage->pDbPage); - if( rc!=SQLITE_OK ){ - goto freepage_out; - } - put4byte(pPage->aData, iTrunk); - put4byte(&pPage->aData[4], 0); - put4byte(&pPage1->aData[32], iPage); - TRACE(("FREE-PAGE: %d new trunk page replacing %d\n", pPage->pgno, iTrunk)); - -freepage_out: - if( pPage ){ - pPage->isInit = 0; - } - releasePage(pPage); - releasePage(pTrunk); - return rc; -} -static void freePage(MemPage *pPage, int *pRC){ - if( (*pRC)==SQLITE_OK ){ - *pRC = freePage2(pPage->pBt, pPage, pPage->pgno); - } -} - -/* -** Free any overflow pages associated with the given Cell. -*/ -static int clearCell(MemPage *pPage, unsigned char *pCell){ - BtShared *pBt = pPage->pBt; - CellInfo info; - Pgno ovflPgno; - int rc; - int nOvfl; - u32 ovflPageSize; - - assert( sqlite3_mutex_held(pPage->pBt->mutex) ); - btreeParseCellPtr(pPage, pCell, &info); - if( info.iOverflow==0 ){ - return SQLITE_OK; /* No overflow pages. Return without doing anything */ - } - if( pCell+info.iOverflow+3 > pPage->aData+pPage->maskPage ){ - return SQLITE_CORRUPT_BKPT; /* Cell extends past end of page */ - } - ovflPgno = get4byte(&pCell[info.iOverflow]); - assert( pBt->usableSize > 4 ); - ovflPageSize = pBt->usableSize - 4; - nOvfl = (info.nPayload - info.nLocal + ovflPageSize - 1)/ovflPageSize; - assert( ovflPgno==0 || nOvfl>0 ); - while( nOvfl-- ){ - Pgno iNext = 0; - MemPage *pOvfl = 0; - if( ovflPgno<2 || ovflPgno>btreePagecount(pBt) ){ - /* 0 is not a legal page number and page 1 cannot be an - ** overflow page. Therefore if ovflPgno<2 or past the end of the - ** file the database must be corrupt. */ - return SQLITE_CORRUPT_BKPT; - } - if( nOvfl ){ - rc = getOverflowPage(pBt, ovflPgno, &pOvfl, &iNext); - if( rc ) return rc; - } - - if( ( pOvfl || ((pOvfl = btreePageLookup(pBt, ovflPgno))!=0) ) - && sqlite3PagerPageRefcount(pOvfl->pDbPage)!=1 - ){ - /* There is no reason any cursor should have an outstanding reference - ** to an overflow page belonging to a cell that is being deleted/updated. - ** So if there exists more than one reference to this page, then it - ** must not really be an overflow page and the database must be corrupt. - ** It is helpful to detect this before calling freePage2(), as - ** freePage2() may zero the page contents if secure-delete mode is - ** enabled. If this 'overflow' page happens to be a page that the - ** caller is iterating through or using in some other way, this - ** can be problematic. - */ - rc = SQLITE_CORRUPT_BKPT; - }else{ - rc = freePage2(pBt, pOvfl, ovflPgno); - } - - if( pOvfl ){ - sqlite3PagerUnref(pOvfl->pDbPage); - } - if( rc ) return rc; - ovflPgno = iNext; - } - return SQLITE_OK; -} - -/* -** Create the byte sequence used to represent a cell on page pPage -** and write that byte sequence into pCell[]. Overflow pages are -** allocated and filled in as necessary. The calling procedure -** is responsible for making sure sufficient space has been allocated -** for pCell[]. -** -** Note that pCell does not necessary need to point to the pPage->aData -** area. pCell might point to some temporary storage. The cell will -** be constructed in this temporary area then copied into pPage->aData -** later. -*/ -static int fillInCell( - MemPage *pPage, /* The page that contains the cell */ - unsigned char *pCell, /* Complete text of the cell */ - const void *pKey, i64 nKey, /* The key */ - const void *pData,int nData, /* The data */ - int nZero, /* Extra zero bytes to append to pData */ - int *pnSize /* Write cell size here */ -){ - int nPayload; - const u8 *pSrc; - int nSrc, n, rc; - int spaceLeft; - MemPage *pOvfl = 0; - MemPage *pToRelease = 0; - unsigned char *pPrior; - unsigned char *pPayload; - BtShared *pBt = pPage->pBt; - Pgno pgnoOvfl = 0; - int nHeader; - CellInfo info; - - assert( sqlite3_mutex_held(pPage->pBt->mutex) ); - - /* pPage is not necessarily writeable since pCell might be auxiliary - ** buffer space that is separate from the pPage buffer area */ - assert( pCellaData || pCell>=&pPage->aData[pBt->pageSize] - || sqlite3PagerIswriteable(pPage->pDbPage) ); - - /* Fill in the header. */ - nHeader = 0; - if( !pPage->leaf ){ - nHeader += 4; - } - if( pPage->hasData ){ - nHeader += putVarint32(&pCell[nHeader], nData+nZero); - }else{ - nData = nZero = 0; - } - nHeader += putVarint(&pCell[nHeader], *(u64*)&nKey); - btreeParseCellPtr(pPage, pCell, &info); - assert( info.nHeader==nHeader ); - assert( info.nKey==nKey ); - assert( info.nData==(u32)(nData+nZero) ); - - /* Fill in the payload */ - nPayload = nData + nZero; - if( pPage->intKey ){ - pSrc = pData; - nSrc = nData; - nData = 0; - }else{ - if( NEVER(nKey>0x7fffffff || pKey==0) ){ - return SQLITE_CORRUPT_BKPT; - } - nPayload += (int)nKey; - pSrc = pKey; - nSrc = (int)nKey; - } - *pnSize = info.nSize; - spaceLeft = info.nLocal; - pPayload = &pCell[nHeader]; - pPrior = &pCell[info.iOverflow]; - - while( nPayload>0 ){ - if( spaceLeft==0 ){ -#ifndef SQLITE_OMIT_AUTOVACUUM - Pgno pgnoPtrmap = pgnoOvfl; /* Overflow page pointer-map entry page */ - if( pBt->autoVacuum ){ - do{ - pgnoOvfl++; - } while( - PTRMAP_ISPAGE(pBt, pgnoOvfl) || pgnoOvfl==PENDING_BYTE_PAGE(pBt) - ); - } -#endif - rc = allocateBtreePage(pBt, &pOvfl, &pgnoOvfl, pgnoOvfl, 0); -#ifndef SQLITE_OMIT_AUTOVACUUM - /* If the database supports auto-vacuum, and the second or subsequent - ** overflow page is being allocated, add an entry to the pointer-map - ** for that page now. - ** - ** If this is the first overflow page, then write a partial entry - ** to the pointer-map. If we write nothing to this pointer-map slot, - ** then the optimistic overflow chain processing in clearCell() - ** may misinterpret the uninitialized values and delete the - ** wrong pages from the database. - */ - if( pBt->autoVacuum && rc==SQLITE_OK ){ - u8 eType = (pgnoPtrmap?PTRMAP_OVERFLOW2:PTRMAP_OVERFLOW1); - ptrmapPut(pBt, pgnoOvfl, eType, pgnoPtrmap, &rc); - if( rc ){ - releasePage(pOvfl); - } - } -#endif - if( rc ){ - releasePage(pToRelease); - return rc; - } - - /* If pToRelease is not zero than pPrior points into the data area - ** of pToRelease. Make sure pToRelease is still writeable. */ - assert( pToRelease==0 || sqlite3PagerIswriteable(pToRelease->pDbPage) ); - - /* If pPrior is part of the data area of pPage, then make sure pPage - ** is still writeable */ - assert( pPrioraData || pPrior>=&pPage->aData[pBt->pageSize] - || sqlite3PagerIswriteable(pPage->pDbPage) ); - - put4byte(pPrior, pgnoOvfl); - releasePage(pToRelease); - pToRelease = pOvfl; - pPrior = pOvfl->aData; - put4byte(pPrior, 0); - pPayload = &pOvfl->aData[4]; - spaceLeft = pBt->usableSize - 4; - } - n = nPayload; - if( n>spaceLeft ) n = spaceLeft; - - /* If pToRelease is not zero than pPayload points into the data area - ** of pToRelease. Make sure pToRelease is still writeable. */ - assert( pToRelease==0 || sqlite3PagerIswriteable(pToRelease->pDbPage) ); - - /* If pPayload is part of the data area of pPage, then make sure pPage - ** is still writeable */ - assert( pPayloadaData || pPayload>=&pPage->aData[pBt->pageSize] - || sqlite3PagerIswriteable(pPage->pDbPage) ); - - if( nSrc>0 ){ - if( n>nSrc ) n = nSrc; - assert( pSrc ); - memcpy(pPayload, pSrc, n); - }else{ - memset(pPayload, 0, n); - } - nPayload -= n; - pPayload += n; - pSrc += n; - nSrc -= n; - spaceLeft -= n; - if( nSrc==0 ){ - nSrc = nData; - pSrc = pData; - } - } - releasePage(pToRelease); - return SQLITE_OK; -} - -/* -** Remove the i-th cell from pPage. This routine effects pPage only. -** The cell content is not freed or deallocated. It is assumed that -** the cell content has been copied someplace else. This routine just -** removes the reference to the cell from pPage. -** -** "sz" must be the number of bytes in the cell. -*/ -static void dropCell(MemPage *pPage, int idx, int sz, int *pRC){ - u32 pc; /* Offset to cell content of cell being deleted */ - u8 *data; /* pPage->aData */ - u8 *ptr; /* Used to move bytes around within data[] */ - int rc; /* The return code */ - int hdr; /* Beginning of the header. 0 most pages. 100 page 1 */ - - if( *pRC ) return; - - assert( idx>=0 && idxnCell ); - assert( sz==cellSize(pPage, idx) ); - assert( sqlite3PagerIswriteable(pPage->pDbPage) ); - assert( sqlite3_mutex_held(pPage->pBt->mutex) ); - data = pPage->aData; - ptr = &pPage->aCellIdx[2*idx]; - pc = get2byte(ptr); - hdr = pPage->hdrOffset; - testcase( pc==get2byte(&data[hdr+5]) ); - testcase( pc+sz==pPage->pBt->usableSize ); - if( pc < (u32)get2byte(&data[hdr+5]) || pc+sz > pPage->pBt->usableSize ){ - *pRC = SQLITE_CORRUPT_BKPT; - return; - } - rc = freeSpace(pPage, pc, sz); - if( rc ){ - *pRC = rc; - return; - } - pPage->nCell--; - memmove(ptr, ptr+2, 2*(pPage->nCell - idx)); - put2byte(&data[hdr+3], pPage->nCell); - pPage->nFree += 2; -} - -/* -** Insert a new cell on pPage at cell index "i". pCell points to the -** content of the cell. -** -** If the cell content will fit on the page, then put it there. If it -** will not fit, then make a copy of the cell content into pTemp if -** pTemp is not null. Regardless of pTemp, allocate a new entry -** in pPage->apOvfl[] and make it point to the cell content (either -** in pTemp or the original pCell) and also record its index. -** Allocating a new entry in pPage->aCell[] implies that -** pPage->nOverflow is incremented. -** -** If nSkip is non-zero, then do not copy the first nSkip bytes of the -** cell. The caller will overwrite them after this function returns. If -** nSkip is non-zero, then pCell may not point to an invalid memory location -** (but pCell+nSkip is always valid). -*/ -static void insertCell( - MemPage *pPage, /* Page into which we are copying */ - int i, /* New cell becomes the i-th cell of the page */ - u8 *pCell, /* Content of the new cell */ - int sz, /* Bytes of content in pCell */ - u8 *pTemp, /* Temp storage space for pCell, if needed */ - Pgno iChild, /* If non-zero, replace first 4 bytes with this value */ - int *pRC /* Read and write return code from here */ -){ - int idx = 0; /* Where to write new cell content in data[] */ - int j; /* Loop counter */ - int end; /* First byte past the last cell pointer in data[] */ - int ins; /* Index in data[] where new cell pointer is inserted */ - int cellOffset; /* Address of first cell pointer in data[] */ - u8 *data; /* The content of the whole page */ - int nSkip = (iChild ? 4 : 0); - - if( *pRC ) return; - - assert( i>=0 && i<=pPage->nCell+pPage->nOverflow ); - assert( pPage->nCell<=MX_CELL(pPage->pBt) && MX_CELL(pPage->pBt)<=10921 ); - assert( pPage->nOverflow<=ArraySize(pPage->apOvfl) ); - assert( ArraySize(pPage->apOvfl)==ArraySize(pPage->aiOvfl) ); - assert( sqlite3_mutex_held(pPage->pBt->mutex) ); - /* The cell should normally be sized correctly. However, when moving a - ** malformed cell from a leaf page to an interior page, if the cell size - ** wanted to be less than 4 but got rounded up to 4 on the leaf, then size - ** might be less than 8 (leaf-size + pointer) on the interior node. Hence - ** the term after the || in the following assert(). */ - assert( sz==cellSizePtr(pPage, pCell) || (sz==8 && iChild>0) ); - if( pPage->nOverflow || sz+2>pPage->nFree ){ - if( pTemp ){ - memcpy(pTemp+nSkip, pCell+nSkip, sz-nSkip); - pCell = pTemp; - } - if( iChild ){ - put4byte(pCell, iChild); - } - j = pPage->nOverflow++; - assert( j<(int)(sizeof(pPage->apOvfl)/sizeof(pPage->apOvfl[0])) ); - pPage->apOvfl[j] = pCell; - pPage->aiOvfl[j] = (u16)i; - }else{ - int rc = sqlite3PagerWrite(pPage->pDbPage); - if( rc!=SQLITE_OK ){ - *pRC = rc; - return; - } - assert( sqlite3PagerIswriteable(pPage->pDbPage) ); - data = pPage->aData; - cellOffset = pPage->cellOffset; - end = cellOffset + 2*pPage->nCell; - ins = cellOffset + 2*i; - rc = allocateSpace(pPage, sz, &idx); - if( rc ){ *pRC = rc; return; } - /* The allocateSpace() routine guarantees the following two properties - ** if it returns success */ - assert( idx >= end+2 ); - assert( idx+sz <= (int)pPage->pBt->usableSize ); - pPage->nCell++; - pPage->nFree -= (u16)(2 + sz); - memcpy(&data[idx+nSkip], pCell+nSkip, sz-nSkip); - if( iChild ){ - put4byte(&data[idx], iChild); - } - memmove(&data[ins+2], &data[ins], end-ins); - put2byte(&data[ins], idx); - put2byte(&data[pPage->hdrOffset+3], pPage->nCell); -#ifndef SQLITE_OMIT_AUTOVACUUM - if( pPage->pBt->autoVacuum ){ - /* The cell may contain a pointer to an overflow page. If so, write - ** the entry for the overflow page into the pointer map. - */ - ptrmapPutOvflPtr(pPage, pCell, pRC); - } -#endif - } -} - -/* -** Add a list of cells to a page. The page should be initially empty. -** The cells are guaranteed to fit on the page. -*/ -static void assemblePage( - MemPage *pPage, /* The page to be assemblied */ - int nCell, /* The number of cells to add to this page */ - u8 **apCell, /* Pointers to cell bodies */ - u16 *aSize /* Sizes of the cells */ -){ - int i; /* Loop counter */ - u8 *pCellptr; /* Address of next cell pointer */ - int cellbody; /* Address of next cell body */ - u8 * const data = pPage->aData; /* Pointer to data for pPage */ - const int hdr = pPage->hdrOffset; /* Offset of header on pPage */ - const int nUsable = pPage->pBt->usableSize; /* Usable size of page */ - - assert( pPage->nOverflow==0 ); - assert( sqlite3_mutex_held(pPage->pBt->mutex) ); - assert( nCell>=0 && nCell<=(int)MX_CELL(pPage->pBt) - && (int)MX_CELL(pPage->pBt)<=10921); - assert( sqlite3PagerIswriteable(pPage->pDbPage) ); - - /* Check that the page has just been zeroed by zeroPage() */ - assert( pPage->nCell==0 ); - assert( get2byteNotZero(&data[hdr+5])==nUsable ); - - pCellptr = &pPage->aCellIdx[nCell*2]; - cellbody = nUsable; - for(i=nCell-1; i>=0; i--){ - u16 sz = aSize[i]; - pCellptr -= 2; - cellbody -= sz; - put2byte(pCellptr, cellbody); - memcpy(&data[cellbody], apCell[i], sz); - } - put2byte(&data[hdr+3], nCell); - put2byte(&data[hdr+5], cellbody); - pPage->nFree -= (nCell*2 + nUsable - cellbody); - pPage->nCell = (u16)nCell; -} - -/* -** The following parameters determine how many adjacent pages get involved -** in a balancing operation. NN is the number of neighbors on either side -** of the page that participate in the balancing operation. NB is the -** total number of pages that participate, including the target page and -** NN neighbors on either side. -** -** The minimum value of NN is 1 (of course). Increasing NN above 1 -** (to 2 or 3) gives a modest improvement in SELECT and DELETE performance -** in exchange for a larger degradation in INSERT and UPDATE performance. -** The value of NN appears to give the best results overall. -*/ -#define NN 1 /* Number of neighbors on either side of pPage */ -#define NB (NN*2+1) /* Total pages involved in the balance */ - - -#ifndef SQLITE_OMIT_QUICKBALANCE -/* -** This version of balance() handles the common special case where -** a new entry is being inserted on the extreme right-end of the -** tree, in other words, when the new entry will become the largest -** entry in the tree. -** -** Instead of trying to balance the 3 right-most leaf pages, just add -** a new page to the right-hand side and put the one new entry in -** that page. This leaves the right side of the tree somewhat -** unbalanced. But odds are that we will be inserting new entries -** at the end soon afterwards so the nearly empty page will quickly -** fill up. On average. -** -** pPage is the leaf page which is the right-most page in the tree. -** pParent is its parent. pPage must have a single overflow entry -** which is also the right-most entry on the page. -** -** The pSpace buffer is used to store a temporary copy of the divider -** cell that will be inserted into pParent. Such a cell consists of a 4 -** byte page number followed by a variable length integer. In other -** words, at most 13 bytes. Hence the pSpace buffer must be at -** least 13 bytes in size. -*/ -static int balance_quick(MemPage *pParent, MemPage *pPage, u8 *pSpace){ - BtShared *const pBt = pPage->pBt; /* B-Tree Database */ - MemPage *pNew; /* Newly allocated page */ - int rc; /* Return Code */ - Pgno pgnoNew; /* Page number of pNew */ - - assert( sqlite3_mutex_held(pPage->pBt->mutex) ); - assert( sqlite3PagerIswriteable(pParent->pDbPage) ); - assert( pPage->nOverflow==1 ); - - /* This error condition is now caught prior to reaching this function */ - if( pPage->nCell==0 ) return SQLITE_CORRUPT_BKPT; - - /* Allocate a new page. This page will become the right-sibling of - ** pPage. Make the parent page writable, so that the new divider cell - ** may be inserted. If both these operations are successful, proceed. - */ - rc = allocateBtreePage(pBt, &pNew, &pgnoNew, 0, 0); - - if( rc==SQLITE_OK ){ - - u8 *pOut = &pSpace[4]; - u8 *pCell = pPage->apOvfl[0]; - u16 szCell = cellSizePtr(pPage, pCell); - u8 *pStop; - - assert( sqlite3PagerIswriteable(pNew->pDbPage) ); - assert( pPage->aData[0]==(PTF_INTKEY|PTF_LEAFDATA|PTF_LEAF) ); - zeroPage(pNew, PTF_INTKEY|PTF_LEAFDATA|PTF_LEAF); - assemblePage(pNew, 1, &pCell, &szCell); - - /* If this is an auto-vacuum database, update the pointer map - ** with entries for the new page, and any pointer from the - ** cell on the page to an overflow page. If either of these - ** operations fails, the return code is set, but the contents - ** of the parent page are still manipulated by thh code below. - ** That is Ok, at this point the parent page is guaranteed to - ** be marked as dirty. Returning an error code will cause a - ** rollback, undoing any changes made to the parent page. - */ - if( ISAUTOVACUUM ){ - ptrmapPut(pBt, pgnoNew, PTRMAP_BTREE, pParent->pgno, &rc); - if( szCell>pNew->minLocal ){ - ptrmapPutOvflPtr(pNew, pCell, &rc); - } - } - - /* Create a divider cell to insert into pParent. The divider cell - ** consists of a 4-byte page number (the page number of pPage) and - ** a variable length key value (which must be the same value as the - ** largest key on pPage). - ** - ** To find the largest key value on pPage, first find the right-most - ** cell on pPage. The first two fields of this cell are the - ** record-length (a variable length integer at most 32-bits in size) - ** and the key value (a variable length integer, may have any value). - ** The first of the while(...) loops below skips over the record-length - ** field. The second while(...) loop copies the key value from the - ** cell on pPage into the pSpace buffer. - */ - pCell = findCell(pPage, pPage->nCell-1); - pStop = &pCell[9]; - while( (*(pCell++)&0x80) && pCellnCell, pSpace, (int)(pOut-pSpace), - 0, pPage->pgno, &rc); - - /* Set the right-child pointer of pParent to point to the new page. */ - put4byte(&pParent->aData[pParent->hdrOffset+8], pgnoNew); - - /* Release the reference to the new page. */ - releasePage(pNew); - } - - return rc; -} -#endif /* SQLITE_OMIT_QUICKBALANCE */ - -#if 0 -/* -** This function does not contribute anything to the operation of SQLite. -** it is sometimes activated temporarily while debugging code responsible -** for setting pointer-map entries. -*/ -static int ptrmapCheckPages(MemPage **apPage, int nPage){ - int i, j; - for(i=0; ipBt; - assert( pPage->isInit ); - - for(j=0; jnCell; j++){ - CellInfo info; - u8 *z; - - z = findCell(pPage, j); - btreeParseCellPtr(pPage, z, &info); - if( info.iOverflow ){ - Pgno ovfl = get4byte(&z[info.iOverflow]); - ptrmapGet(pBt, ovfl, &e, &n); - assert( n==pPage->pgno && e==PTRMAP_OVERFLOW1 ); - } - if( !pPage->leaf ){ - Pgno child = get4byte(z); - ptrmapGet(pBt, child, &e, &n); - assert( n==pPage->pgno && e==PTRMAP_BTREE ); - } - } - if( !pPage->leaf ){ - Pgno child = get4byte(&pPage->aData[pPage->hdrOffset+8]); - ptrmapGet(pBt, child, &e, &n); - assert( n==pPage->pgno && e==PTRMAP_BTREE ); - } - } - return 1; -} -#endif - -/* -** This function is used to copy the contents of the b-tree node stored -** on page pFrom to page pTo. If page pFrom was not a leaf page, then -** the pointer-map entries for each child page are updated so that the -** parent page stored in the pointer map is page pTo. If pFrom contained -** any cells with overflow page pointers, then the corresponding pointer -** map entries are also updated so that the parent page is page pTo. -** -** If pFrom is currently carrying any overflow cells (entries in the -** MemPage.apOvfl[] array), they are not copied to pTo. -** -** Before returning, page pTo is reinitialized using btreeInitPage(). -** -** The performance of this function is not critical. It is only used by -** the balance_shallower() and balance_deeper() procedures, neither of -** which are called often under normal circumstances. -*/ -static void copyNodeContent(MemPage *pFrom, MemPage *pTo, int *pRC){ - if( (*pRC)==SQLITE_OK ){ - BtShared * const pBt = pFrom->pBt; - u8 * const aFrom = pFrom->aData; - u8 * const aTo = pTo->aData; - int const iFromHdr = pFrom->hdrOffset; - int const iToHdr = ((pTo->pgno==1) ? 100 : 0); - int rc; - int iData; - - - assert( pFrom->isInit ); - assert( pFrom->nFree>=iToHdr ); - assert( get2byte(&aFrom[iFromHdr+5]) <= (int)pBt->usableSize ); - - /* Copy the b-tree node content from page pFrom to page pTo. */ - iData = get2byte(&aFrom[iFromHdr+5]); - memcpy(&aTo[iData], &aFrom[iData], pBt->usableSize-iData); - memcpy(&aTo[iToHdr], &aFrom[iFromHdr], pFrom->cellOffset + 2*pFrom->nCell); - - /* Reinitialize page pTo so that the contents of the MemPage structure - ** match the new data. The initialization of pTo can actually fail under - ** fairly obscure circumstances, even though it is a copy of initialized - ** page pFrom. - */ - pTo->isInit = 0; - rc = btreeInitPage(pTo); - if( rc!=SQLITE_OK ){ - *pRC = rc; - return; - } - - /* If this is an auto-vacuum database, update the pointer-map entries - ** for any b-tree or overflow pages that pTo now contains the pointers to. - */ - if( ISAUTOVACUUM ){ - *pRC = setChildPtrmaps(pTo); - } - } -} - -/* -** This routine redistributes cells on the iParentIdx'th child of pParent -** (hereafter "the page") and up to 2 siblings so that all pages have about the -** same amount of free space. Usually a single sibling on either side of the -** page are used in the balancing, though both siblings might come from one -** side if the page is the first or last child of its parent. If the page -** has fewer than 2 siblings (something which can only happen if the page -** is a root page or a child of a root page) then all available siblings -** participate in the balancing. -** -** The number of siblings of the page might be increased or decreased by -** one or two in an effort to keep pages nearly full but not over full. -** -** Note that when this routine is called, some of the cells on the page -** might not actually be stored in MemPage.aData[]. This can happen -** if the page is overfull. This routine ensures that all cells allocated -** to the page and its siblings fit into MemPage.aData[] before returning. -** -** In the course of balancing the page and its siblings, cells may be -** inserted into or removed from the parent page (pParent). Doing so -** may cause the parent page to become overfull or underfull. If this -** happens, it is the responsibility of the caller to invoke the correct -** balancing routine to fix this problem (see the balance() routine). -** -** If this routine fails for any reason, it might leave the database -** in a corrupted state. So if this routine fails, the database should -** be rolled back. -** -** The third argument to this function, aOvflSpace, is a pointer to a -** buffer big enough to hold one page. If while inserting cells into the parent -** page (pParent) the parent page becomes overfull, this buffer is -** used to store the parent's overflow cells. Because this function inserts -** a maximum of four divider cells into the parent page, and the maximum -** size of a cell stored within an internal node is always less than 1/4 -** of the page-size, the aOvflSpace[] buffer is guaranteed to be large -** enough for all overflow cells. -** -** If aOvflSpace is set to a null pointer, this function returns -** SQLITE_NOMEM. -*/ -#if defined(_MSC_VER) && _MSC_VER >= 1700 && defined(_M_ARM) -#pragma optimize("", off) -#endif -static int balance_nonroot( - MemPage *pParent, /* Parent page of siblings being balanced */ - int iParentIdx, /* Index of "the page" in pParent */ - u8 *aOvflSpace, /* page-size bytes of space for parent ovfl */ - int isRoot, /* True if pParent is a root-page */ - int bBulk /* True if this call is part of a bulk load */ -){ - BtShared *pBt; /* The whole database */ - int nCell = 0; /* Number of cells in apCell[] */ - int nMaxCells = 0; /* Allocated size of apCell, szCell, aFrom. */ - int nNew = 0; /* Number of pages in apNew[] */ - int nOld; /* Number of pages in apOld[] */ - int i, j, k; /* Loop counters */ - int nxDiv; /* Next divider slot in pParent->aCell[] */ - int rc = SQLITE_OK; /* The return code */ - u16 leafCorrection; /* 4 if pPage is a leaf. 0 if not */ - int leafData; /* True if pPage is a leaf of a LEAFDATA tree */ - int usableSpace; /* Bytes in pPage beyond the header */ - int pageFlags; /* Value of pPage->aData[0] */ - int subtotal; /* Subtotal of bytes in cells on one page */ - int iSpace1 = 0; /* First unused byte of aSpace1[] */ - int iOvflSpace = 0; /* First unused byte of aOvflSpace[] */ - int szScratch; /* Size of scratch memory requested */ - MemPage *apOld[NB]; /* pPage and up to two siblings */ - MemPage *apCopy[NB]; /* Private copies of apOld[] pages */ - MemPage *apNew[NB+2]; /* pPage and up to NB siblings after balancing */ - u8 *pRight; /* Location in parent of right-sibling pointer */ - u8 *apDiv[NB-1]; /* Divider cells in pParent */ - int cntNew[NB+2]; /* Index in aCell[] of cell after i-th page */ - int szNew[NB+2]; /* Combined size of cells place on i-th page */ - u8 **apCell = 0; /* All cells begin balanced */ - u16 *szCell; /* Local size of all cells in apCell[] */ - u8 *aSpace1; /* Space for copies of dividers cells */ - Pgno pgno; /* Temp var to store a page number in */ - - pBt = pParent->pBt; - assert( sqlite3_mutex_held(pBt->mutex) ); - assert( sqlite3PagerIswriteable(pParent->pDbPage) ); - -#if 0 - TRACE(("BALANCE: begin page %d child of %d\n", pPage->pgno, pParent->pgno)); -#endif - - /* At this point pParent may have at most one overflow cell. And if - ** this overflow cell is present, it must be the cell with - ** index iParentIdx. This scenario comes about when this function - ** is called (indirectly) from sqlite3BtreeDelete(). - */ - assert( pParent->nOverflow==0 || pParent->nOverflow==1 ); - assert( pParent->nOverflow==0 || pParent->aiOvfl[0]==iParentIdx ); - - if( !aOvflSpace ){ - return SQLITE_NOMEM; - } - - /* Find the sibling pages to balance. Also locate the cells in pParent - ** that divide the siblings. An attempt is made to find NN siblings on - ** either side of pPage. More siblings are taken from one side, however, - ** if there are fewer than NN siblings on the other side. If pParent - ** has NB or fewer children then all children of pParent are taken. - ** - ** This loop also drops the divider cells from the parent page. This - ** way, the remainder of the function does not have to deal with any - ** overflow cells in the parent page, since if any existed they will - ** have already been removed. - */ - i = pParent->nOverflow + pParent->nCell; - if( i<2 ){ - nxDiv = 0; - }else{ - assert( bBulk==0 || bBulk==1 ); - if( iParentIdx==0 ){ - nxDiv = 0; - }else if( iParentIdx==i ){ - nxDiv = i-2+bBulk; - }else{ - assert( bBulk==0 ); - nxDiv = iParentIdx-1; - } - i = 2-bBulk; - } - nOld = i+1; - if( (i+nxDiv-pParent->nOverflow)==pParent->nCell ){ - pRight = &pParent->aData[pParent->hdrOffset+8]; - }else{ - pRight = findCell(pParent, i+nxDiv-pParent->nOverflow); - } - pgno = get4byte(pRight); - while( 1 ){ - rc = getAndInitPage(pBt, pgno, &apOld[i], 0); - if( rc ){ - memset(apOld, 0, (i+1)*sizeof(MemPage*)); - goto balance_cleanup; - } - nMaxCells += 1+apOld[i]->nCell+apOld[i]->nOverflow; - if( (i--)==0 ) break; - - if( i+nxDiv==pParent->aiOvfl[0] && pParent->nOverflow ){ - apDiv[i] = pParent->apOvfl[0]; - pgno = get4byte(apDiv[i]); - szNew[i] = cellSizePtr(pParent, apDiv[i]); - pParent->nOverflow = 0; - }else{ - apDiv[i] = findCell(pParent, i+nxDiv-pParent->nOverflow); - pgno = get4byte(apDiv[i]); - szNew[i] = cellSizePtr(pParent, apDiv[i]); - - /* Drop the cell from the parent page. apDiv[i] still points to - ** the cell within the parent, even though it has been dropped. - ** This is safe because dropping a cell only overwrites the first - ** four bytes of it, and this function does not need the first - ** four bytes of the divider cell. So the pointer is safe to use - ** later on. - ** - ** But not if we are in secure-delete mode. In secure-delete mode, - ** the dropCell() routine will overwrite the entire cell with zeroes. - ** In this case, temporarily copy the cell into the aOvflSpace[] - ** buffer. It will be copied out again as soon as the aSpace[] buffer - ** is allocated. */ - if( pBt->btsFlags & BTS_SECURE_DELETE ){ - int iOff; - - iOff = SQLITE_PTR_TO_INT(apDiv[i]) - SQLITE_PTR_TO_INT(pParent->aData); - if( (iOff+szNew[i])>(int)pBt->usableSize ){ - rc = SQLITE_CORRUPT_BKPT; - memset(apOld, 0, (i+1)*sizeof(MemPage*)); - goto balance_cleanup; - }else{ - memcpy(&aOvflSpace[iOff], apDiv[i], szNew[i]); - apDiv[i] = &aOvflSpace[apDiv[i]-pParent->aData]; - } - } - dropCell(pParent, i+nxDiv-pParent->nOverflow, szNew[i], &rc); - } - } - - /* Make nMaxCells a multiple of 4 in order to preserve 8-byte - ** alignment */ - nMaxCells = (nMaxCells + 3)&~3; - - /* - ** Allocate space for memory structures - */ - k = pBt->pageSize + ROUND8(sizeof(MemPage)); - szScratch = - nMaxCells*sizeof(u8*) /* apCell */ - + nMaxCells*sizeof(u16) /* szCell */ - + pBt->pageSize /* aSpace1 */ - + k*nOld; /* Page copies (apCopy) */ - apCell = sqlite3ScratchMalloc( szScratch ); - if( apCell==0 ){ - rc = SQLITE_NOMEM; - goto balance_cleanup; - } - szCell = (u16*)&apCell[nMaxCells]; - aSpace1 = (u8*)&szCell[nMaxCells]; - assert( EIGHT_BYTE_ALIGNMENT(aSpace1) ); - - /* - ** Load pointers to all cells on sibling pages and the divider cells - ** into the local apCell[] array. Make copies of the divider cells - ** into space obtained from aSpace1[] and remove the divider cells - ** from pParent. - ** - ** If the siblings are on leaf pages, then the child pointers of the - ** divider cells are stripped from the cells before they are copied - ** into aSpace1[]. In this way, all cells in apCell[] are without - ** child pointers. If siblings are not leaves, then all cell in - ** apCell[] include child pointers. Either way, all cells in apCell[] - ** are alike. - ** - ** leafCorrection: 4 if pPage is a leaf. 0 if pPage is not a leaf. - ** leafData: 1 if pPage holds key+data and pParent holds only keys. - */ - leafCorrection = apOld[0]->leaf*4; - leafData = apOld[0]->hasData; - for(i=0; ipageSize + k*i]; - memcpy(pOld, apOld[i], sizeof(MemPage)); - pOld->aData = (void*)&pOld[1]; - memcpy(pOld->aData, apOld[i]->aData, pBt->pageSize); - - limit = pOld->nCell+pOld->nOverflow; - if( pOld->nOverflow>0 ){ - for(j=0; jaData; - u16 maskPage = pOld->maskPage; - u16 cellOffset = pOld->cellOffset; - for(j=0; jmaxLocal+23 ); - assert( iSpace1 <= (int)pBt->pageSize ); - memcpy(pTemp, apDiv[i], sz); - apCell[nCell] = pTemp+leafCorrection; - assert( leafCorrection==0 || leafCorrection==4 ); - szCell[nCell] = szCell[nCell] - leafCorrection; - if( !pOld->leaf ){ - assert( leafCorrection==0 ); - assert( pOld->hdrOffset==0 ); - /* The right pointer of the child page pOld becomes the left - ** pointer of the divider cell */ - memcpy(apCell[nCell], &pOld->aData[8], 4); - }else{ - assert( leafCorrection==4 ); - if( szCell[nCell]<4 ){ - /* Do not allow any cells smaller than 4 bytes. */ - szCell[nCell] = 4; - } - } - nCell++; - } - } - - /* - ** Figure out the number of pages needed to hold all nCell cells. - ** Store this number in "k". Also compute szNew[] which is the total - ** size of all cells on the i-th page and cntNew[] which is the index - ** in apCell[] of the cell that divides page i from page i+1. - ** cntNew[k] should equal nCell. - ** - ** Values computed by this block: - ** - ** k: The total number of sibling pages - ** szNew[i]: Spaced used on the i-th sibling page. - ** cntNew[i]: Index in apCell[] and szCell[] for the first cell to - ** the right of the i-th sibling page. - ** usableSpace: Number of bytes of space available on each sibling. - ** - */ - usableSpace = pBt->usableSize - 12 + leafCorrection; - for(subtotal=k=i=0; i usableSpace ){ - szNew[k] = subtotal - szCell[i]; - cntNew[k] = i; - if( leafData ){ i--; } - subtotal = 0; - k++; - if( k>NB+1 ){ rc = SQLITE_CORRUPT_BKPT; goto balance_cleanup; } - } - } - szNew[k] = subtotal; - cntNew[k] = nCell; - k++; - - /* - ** The packing computed by the previous block is biased toward the siblings - ** on the left side. The left siblings are always nearly full, while the - ** right-most sibling might be nearly empty. This block of code attempts - ** to adjust the packing of siblings to get a better balance. - ** - ** This adjustment is more than an optimization. The packing above might - ** be so out of balance as to be illegal. For example, the right-most - ** sibling might be completely empty. This adjustment is not optional. - */ - for(i=k-1; i>0; i--){ - int szRight = szNew[i]; /* Size of sibling on the right */ - int szLeft = szNew[i-1]; /* Size of sibling on the left */ - int r; /* Index of right-most cell in left sibling */ - int d; /* Index of first cell to the left of right sibling */ - - r = cntNew[i-1] - 1; - d = r + 1 - leafData; - assert( d0) or pPage is - ** a virtual root page. A virtual root page is when the real root - ** page is page 1 and we are the only child of that page. - ** - ** UPDATE: The assert() below is not necessarily true if the database - ** file is corrupt. The corruption will be detected and reported later - ** in this procedure so there is no need to act upon it now. - */ -#if 0 - assert( cntNew[0]>0 || (pParent->pgno==1 && pParent->nCell==0) ); -#endif - - TRACE(("BALANCE: old: %d %d %d ", - apOld[0]->pgno, - nOld>=2 ? apOld[1]->pgno : 0, - nOld>=3 ? apOld[2]->pgno : 0 - )); - - /* - ** Allocate k new pages. Reuse old pages where possible. - */ - if( apOld[0]->pgno<=1 ){ - rc = SQLITE_CORRUPT_BKPT; - goto balance_cleanup; - } - pageFlags = apOld[0]->aData[0]; - for(i=0; ipDbPage); - nNew++; - if( rc ) goto balance_cleanup; - }else{ - assert( i>0 ); - rc = allocateBtreePage(pBt, &pNew, &pgno, (bBulk ? 1 : pgno), 0); - if( rc ) goto balance_cleanup; - apNew[i] = pNew; - nNew++; - - /* Set the pointer-map entry for the new sibling page. */ - if( ISAUTOVACUUM ){ - ptrmapPut(pBt, pNew->pgno, PTRMAP_BTREE, pParent->pgno, &rc); - if( rc!=SQLITE_OK ){ - goto balance_cleanup; - } - } - } - } - - /* Free any old pages that were not reused as new pages. - */ - while( ipgno; - int minI = i; - for(j=i+1; jpgno<(unsigned)minV ){ - minI = j; - minV = apNew[j]->pgno; - } - } - if( minI>i ){ - MemPage *pT; - pT = apNew[i]; - apNew[i] = apNew[minI]; - apNew[minI] = pT; - } - } - TRACE(("new: %d(%d) %d(%d) %d(%d) %d(%d) %d(%d)\n", - apNew[0]->pgno, szNew[0], - nNew>=2 ? apNew[1]->pgno : 0, nNew>=2 ? szNew[1] : 0, - nNew>=3 ? apNew[2]->pgno : 0, nNew>=3 ? szNew[2] : 0, - nNew>=4 ? apNew[3]->pgno : 0, nNew>=4 ? szNew[3] : 0, - nNew>=5 ? apNew[4]->pgno : 0, nNew>=5 ? szNew[4] : 0)); - - assert( sqlite3PagerIswriteable(pParent->pDbPage) ); - put4byte(pRight, apNew[nNew-1]->pgno); - - /* - ** Evenly distribute the data in apCell[] across the new pages. - ** Insert divider cells into pParent as necessary. - */ - j = 0; - for(i=0; inCell>0 || (nNew==1 && cntNew[0]==0) ); - assert( pNew->nOverflow==0 ); - - j = cntNew[i]; - - /* If the sibling page assembled above was not the right-most sibling, - ** insert a divider cell into the parent page. - */ - assert( ileaf ){ - memcpy(&pNew->aData[8], pCell, 4); - }else if( leafData ){ - /* If the tree is a leaf-data tree, and the siblings are leaves, - ** then there is no divider cell in apCell[]. Instead, the divider - ** cell consists of the integer key for the right-most cell of - ** the sibling-page assembled above only. - */ - CellInfo info; - j--; - btreeParseCellPtr(pNew, apCell[j], &info); - pCell = pTemp; - sz = 4 + putVarint(&pCell[4], info.nKey); - pTemp = 0; - }else{ - pCell -= 4; - /* Obscure case for non-leaf-data trees: If the cell at pCell was - ** previously stored on a leaf node, and its reported size was 4 - ** bytes, then it may actually be smaller than this - ** (see btreeParseCellPtr(), 4 bytes is the minimum size of - ** any cell). But it is important to pass the correct size to - ** insertCell(), so reparse the cell now. - ** - ** Note that this can never happen in an SQLite data file, as all - ** cells are at least 4 bytes. It only happens in b-trees used - ** to evaluate "IN (SELECT ...)" and similar clauses. - */ - if( szCell[j]==4 ){ - assert(leafCorrection==4); - sz = cellSizePtr(pParent, pCell); - } - } - iOvflSpace += sz; - assert( sz<=pBt->maxLocal+23 ); - assert( iOvflSpace <= (int)pBt->pageSize ); - insertCell(pParent, nxDiv, pCell, sz, pTemp, pNew->pgno, &rc); - if( rc!=SQLITE_OK ) goto balance_cleanup; - assert( sqlite3PagerIswriteable(pParent->pDbPage) ); - - j++; - nxDiv++; - } - } - assert( j==nCell ); - assert( nOld>0 ); - assert( nNew>0 ); - if( (pageFlags & PTF_LEAF)==0 ){ - u8 *zChild = &apCopy[nOld-1]->aData[8]; - memcpy(&apNew[nNew-1]->aData[8], zChild, 4); - } - - if( isRoot && pParent->nCell==0 && pParent->hdrOffset<=apNew[0]->nFree ){ - /* The root page of the b-tree now contains no cells. The only sibling - ** page is the right-child of the parent. Copy the contents of the - ** child page into the parent, decreasing the overall height of the - ** b-tree structure by one. This is described as the "balance-shallower" - ** sub-algorithm in some documentation. - ** - ** If this is an auto-vacuum database, the call to copyNodeContent() - ** sets all pointer-map entries corresponding to database image pages - ** for which the pointer is stored within the content being copied. - ** - ** The second assert below verifies that the child page is defragmented - ** (it must be, as it was just reconstructed using assemblePage()). This - ** is important if the parent page happens to be page 1 of the database - ** image. */ - assert( nNew==1 ); - assert( apNew[0]->nFree == - (get2byte(&apNew[0]->aData[5])-apNew[0]->cellOffset-apNew[0]->nCell*2) - ); - copyNodeContent(apNew[0], pParent, &rc); - freePage(apNew[0], &rc); - }else if( ISAUTOVACUUM ){ - /* Fix the pointer-map entries for all the cells that were shifted around. - ** There are several different types of pointer-map entries that need to - ** be dealt with by this routine. Some of these have been set already, but - ** many have not. The following is a summary: - ** - ** 1) The entries associated with new sibling pages that were not - ** siblings when this function was called. These have already - ** been set. We don't need to worry about old siblings that were - ** moved to the free-list - the freePage() code has taken care - ** of those. - ** - ** 2) The pointer-map entries associated with the first overflow - ** page in any overflow chains used by new divider cells. These - ** have also already been taken care of by the insertCell() code. - ** - ** 3) If the sibling pages are not leaves, then the child pages of - ** cells stored on the sibling pages may need to be updated. - ** - ** 4) If the sibling pages are not internal intkey nodes, then any - ** overflow pages used by these cells may need to be updated - ** (internal intkey nodes never contain pointers to overflow pages). - ** - ** 5) If the sibling pages are not leaves, then the pointer-map - ** entries for the right-child pages of each sibling may need - ** to be updated. - ** - ** Cases 1 and 2 are dealt with above by other code. The next - ** block deals with cases 3 and 4 and the one after that, case 5. Since - ** setting a pointer map entry is a relatively expensive operation, this - ** code only sets pointer map entries for child or overflow pages that have - ** actually moved between pages. */ - MemPage *pNew = apNew[0]; - MemPage *pOld = apCopy[0]; - int nOverflow = pOld->nOverflow; - int iNextOld = pOld->nCell + nOverflow; - int iOverflow = (nOverflow ? pOld->aiOvfl[0] : -1); - j = 0; /* Current 'old' sibling page */ - k = 0; /* Current 'new' sibling page */ - for(i=0; inCell + pOld->nOverflow; - if( pOld->nOverflow ){ - nOverflow = pOld->nOverflow; - iOverflow = i + !leafData + pOld->aiOvfl[0]; - } - isDivider = !leafData; - } - - assert(nOverflow>0 || iOverflowaiOvfl[0]==pOld->aiOvfl[1]-1); - assert(nOverflow<3 || pOld->aiOvfl[1]==pOld->aiOvfl[2]-1); - if( i==iOverflow ){ - isDivider = 1; - if( (--nOverflow)>0 ){ - iOverflow++; - } - } - - if( i==cntNew[k] ){ - /* Cell i is the cell immediately following the last cell on new - ** sibling page k. If the siblings are not leaf pages of an - ** intkey b-tree, then cell i is a divider cell. */ - pNew = apNew[++k]; - if( !leafData ) continue; - } - assert( jpgno!=pNew->pgno ){ - if( !leafCorrection ){ - ptrmapPut(pBt, get4byte(apCell[i]), PTRMAP_BTREE, pNew->pgno, &rc); - } - if( szCell[i]>pNew->minLocal ){ - ptrmapPutOvflPtr(pNew, apCell[i], &rc); - } - } - } - - if( !leafCorrection ){ - for(i=0; iaData[8]); - ptrmapPut(pBt, key, PTRMAP_BTREE, apNew[i]->pgno, &rc); - } - } - -#if 0 - /* The ptrmapCheckPages() contains assert() statements that verify that - ** all pointer map pages are set correctly. This is helpful while - ** debugging. This is usually disabled because a corrupt database may - ** cause an assert() statement to fail. */ - ptrmapCheckPages(apNew, nNew); - ptrmapCheckPages(&pParent, 1); -#endif - } - - assert( pParent->isInit ); - TRACE(("BALANCE: finished: old=%d new=%d cells=%d\n", - nOld, nNew, nCell)); - - /* - ** Cleanup before returning. - */ -balance_cleanup: - sqlite3ScratchFree(apCell); - for(i=0; i= 1700 && defined(_M_ARM) -#pragma optimize("", on) -#endif - - -/* -** This function is called when the root page of a b-tree structure is -** overfull (has one or more overflow pages). -** -** A new child page is allocated and the contents of the current root -** page, including overflow cells, are copied into the child. The root -** page is then overwritten to make it an empty page with the right-child -** pointer pointing to the new page. -** -** Before returning, all pointer-map entries corresponding to pages -** that the new child-page now contains pointers to are updated. The -** entry corresponding to the new right-child pointer of the root -** page is also updated. -** -** If successful, *ppChild is set to contain a reference to the child -** page and SQLITE_OK is returned. In this case the caller is required -** to call releasePage() on *ppChild exactly once. If an error occurs, -** an error code is returned and *ppChild is set to 0. -*/ -static int balance_deeper(MemPage *pRoot, MemPage **ppChild){ - int rc; /* Return value from subprocedures */ - MemPage *pChild = 0; /* Pointer to a new child page */ - Pgno pgnoChild = 0; /* Page number of the new child page */ - BtShared *pBt = pRoot->pBt; /* The BTree */ - - assert( pRoot->nOverflow>0 ); - assert( sqlite3_mutex_held(pBt->mutex) ); - - /* Make pRoot, the root page of the b-tree, writable. Allocate a new - ** page that will become the new right-child of pPage. Copy the contents - ** of the node stored on pRoot into the new child page. - */ - rc = sqlite3PagerWrite(pRoot->pDbPage); - if( rc==SQLITE_OK ){ - rc = allocateBtreePage(pBt,&pChild,&pgnoChild,pRoot->pgno,0); - copyNodeContent(pRoot, pChild, &rc); - if( ISAUTOVACUUM ){ - ptrmapPut(pBt, pgnoChild, PTRMAP_BTREE, pRoot->pgno, &rc); - } - } - if( rc ){ - *ppChild = 0; - releasePage(pChild); - return rc; - } - assert( sqlite3PagerIswriteable(pChild->pDbPage) ); - assert( sqlite3PagerIswriteable(pRoot->pDbPage) ); - assert( pChild->nCell==pRoot->nCell ); - - TRACE(("BALANCE: copy root %d into %d\n", pRoot->pgno, pChild->pgno)); - - /* Copy the overflow cells from pRoot to pChild */ - memcpy(pChild->aiOvfl, pRoot->aiOvfl, - pRoot->nOverflow*sizeof(pRoot->aiOvfl[0])); - memcpy(pChild->apOvfl, pRoot->apOvfl, - pRoot->nOverflow*sizeof(pRoot->apOvfl[0])); - pChild->nOverflow = pRoot->nOverflow; - - /* Zero the contents of pRoot. Then install pChild as the right-child. */ - zeroPage(pRoot, pChild->aData[0] & ~PTF_LEAF); - put4byte(&pRoot->aData[pRoot->hdrOffset+8], pgnoChild); - - *ppChild = pChild; - return SQLITE_OK; -} - -/* -** The page that pCur currently points to has just been modified in -** some way. This function figures out if this modification means the -** tree needs to be balanced, and if so calls the appropriate balancing -** routine. Balancing routines are: -** -** balance_quick() -** balance_deeper() -** balance_nonroot() -*/ -static int balance(BtCursor *pCur){ - int rc = SQLITE_OK; - const int nMin = pCur->pBt->usableSize * 2 / 3; - u8 aBalanceQuickSpace[13]; - u8 *pFree = 0; - - TESTONLY( int balance_quick_called = 0 ); - TESTONLY( int balance_deeper_called = 0 ); - - do { - int iPage = pCur->iPage; - MemPage *pPage = pCur->apPage[iPage]; - - if( iPage==0 ){ - if( pPage->nOverflow ){ - /* The root page of the b-tree is overfull. In this case call the - ** balance_deeper() function to create a new child for the root-page - ** and copy the current contents of the root-page to it. The - ** next iteration of the do-loop will balance the child page. - */ - assert( (balance_deeper_called++)==0 ); - rc = balance_deeper(pPage, &pCur->apPage[1]); - if( rc==SQLITE_OK ){ - pCur->iPage = 1; - pCur->aiIdx[0] = 0; - pCur->aiIdx[1] = 0; - assert( pCur->apPage[1]->nOverflow ); - } - }else{ - break; - } - }else if( pPage->nOverflow==0 && pPage->nFree<=nMin ){ - break; - }else{ - MemPage * const pParent = pCur->apPage[iPage-1]; - int const iIdx = pCur->aiIdx[iPage-1]; - - rc = sqlite3PagerWrite(pParent->pDbPage); - if( rc==SQLITE_OK ){ -#ifndef SQLITE_OMIT_QUICKBALANCE - if( pPage->hasData - && pPage->nOverflow==1 - && pPage->aiOvfl[0]==pPage->nCell - && pParent->pgno!=1 - && pParent->nCell==iIdx - ){ - /* Call balance_quick() to create a new sibling of pPage on which - ** to store the overflow cell. balance_quick() inserts a new cell - ** into pParent, which may cause pParent overflow. If this - ** happens, the next interation of the do-loop will balance pParent - ** use either balance_nonroot() or balance_deeper(). Until this - ** happens, the overflow cell is stored in the aBalanceQuickSpace[] - ** buffer. - ** - ** The purpose of the following assert() is to check that only a - ** single call to balance_quick() is made for each call to this - ** function. If this were not verified, a subtle bug involving reuse - ** of the aBalanceQuickSpace[] might sneak in. - */ - assert( (balance_quick_called++)==0 ); - rc = balance_quick(pParent, pPage, aBalanceQuickSpace); - }else -#endif - { - /* In this case, call balance_nonroot() to redistribute cells - ** between pPage and up to 2 of its sibling pages. This involves - ** modifying the contents of pParent, which may cause pParent to - ** become overfull or underfull. The next iteration of the do-loop - ** will balance the parent page to correct this. - ** - ** If the parent page becomes overfull, the overflow cell or cells - ** are stored in the pSpace buffer allocated immediately below. - ** A subsequent iteration of the do-loop will deal with this by - ** calling balance_nonroot() (balance_deeper() may be called first, - ** but it doesn't deal with overflow cells - just moves them to a - ** different page). Once this subsequent call to balance_nonroot() - ** has completed, it is safe to release the pSpace buffer used by - ** the previous call, as the overflow cell data will have been - ** copied either into the body of a database page or into the new - ** pSpace buffer passed to the latter call to balance_nonroot(). - */ - u8 *pSpace = sqlite3PageMalloc(pCur->pBt->pageSize); - rc = balance_nonroot(pParent, iIdx, pSpace, iPage==1, pCur->hints); - if( pFree ){ - /* If pFree is not NULL, it points to the pSpace buffer used - ** by a previous call to balance_nonroot(). Its contents are - ** now stored either on real database pages or within the - ** new pSpace buffer, so it may be safely freed here. */ - sqlite3PageFree(pFree); - } - - /* The pSpace buffer will be freed after the next call to - ** balance_nonroot(), or just before this function returns, whichever - ** comes first. */ - pFree = pSpace; - } - } - - pPage->nOverflow = 0; - - /* The next iteration of the do-loop balances the parent page. */ - releasePage(pPage); - pCur->iPage--; - } - }while( rc==SQLITE_OK ); - - if( pFree ){ - sqlite3PageFree(pFree); - } - return rc; -} - - -/* -** Insert a new record into the BTree. The key is given by (pKey,nKey) -** and the data is given by (pData,nData). The cursor is used only to -** define what table the record should be inserted into. The cursor -** is left pointing at a random location. -** -** For an INTKEY table, only the nKey value of the key is used. pKey is -** ignored. For a ZERODATA table, the pData and nData are both ignored. -** -** If the seekResult parameter is non-zero, then a successful call to -** MovetoUnpacked() to seek cursor pCur to (pKey, nKey) has already -** been performed. seekResult is the search result returned (a negative -** number if pCur points at an entry that is smaller than (pKey, nKey), or -** a positive value if pCur points at an etry that is larger than -** (pKey, nKey)). -** -** If the seekResult parameter is non-zero, then the caller guarantees that -** cursor pCur is pointing at the existing copy of a row that is to be -** overwritten. If the seekResult parameter is 0, then cursor pCur may -** point to any entry or to no entry at all and so this function has to seek -** the cursor before the new key can be inserted. -*/ -SQLITE_PRIVATE int sqlite3BtreeInsert( - BtCursor *pCur, /* Insert data into the table of this cursor */ - const void *pKey, i64 nKey, /* The key of the new record */ - const void *pData, int nData, /* The data of the new record */ - int nZero, /* Number of extra 0 bytes to append to data */ - int appendBias, /* True if this is likely an append */ - int seekResult /* Result of prior MovetoUnpacked() call */ -){ - int rc; - int loc = seekResult; /* -1: before desired location +1: after */ - int szNew = 0; - int idx; - MemPage *pPage; - Btree *p = pCur->pBtree; - BtShared *pBt = p->pBt; - unsigned char *oldCell; - unsigned char *newCell = 0; - - if( pCur->eState==CURSOR_FAULT ){ - assert( pCur->skipNext!=SQLITE_OK ); - return pCur->skipNext; - } - - assert( cursorHoldsMutex(pCur) ); - assert( (pCur->curFlags & BTCF_WriteFlag)!=0 && pBt->inTransaction==TRANS_WRITE - && (pBt->btsFlags & BTS_READ_ONLY)==0 ); - assert( hasSharedCacheTableLock(p, pCur->pgnoRoot, pCur->pKeyInfo!=0, 2) ); - - /* Assert that the caller has been consistent. If this cursor was opened - ** expecting an index b-tree, then the caller should be inserting blob - ** keys with no associated data. If the cursor was opened expecting an - ** intkey table, the caller should be inserting integer keys with a - ** blob of associated data. */ - assert( (pKey==0)==(pCur->pKeyInfo==0) ); - - /* Save the positions of any other cursors open on this table. - ** - ** In some cases, the call to btreeMoveto() below is a no-op. For - ** example, when inserting data into a table with auto-generated integer - ** keys, the VDBE layer invokes sqlite3BtreeLast() to figure out the - ** integer key to use. It then calls this function to actually insert the - ** data into the intkey B-Tree. In this case btreeMoveto() recognizes - ** that the cursor is already where it needs to be and returns without - ** doing any work. To avoid thwarting these optimizations, it is important - ** not to clear the cursor here. - */ - rc = saveAllCursors(pBt, pCur->pgnoRoot, pCur); - if( rc ) return rc; - - if( pCur->pKeyInfo==0 ){ - /* If this is an insert into a table b-tree, invalidate any incrblob - ** cursors open on the row being replaced */ - invalidateIncrblobCursors(p, nKey, 0); - - /* If the cursor is currently on the last row and we are appending a - ** new row onto the end, set the "loc" to avoid an unnecessary btreeMoveto() - ** call */ - if( (pCur->curFlags&BTCF_ValidNKey)!=0 && nKey>0 && pCur->info.nKey==nKey-1 ){ - loc = -1; - } - } - - if( !loc ){ - rc = btreeMoveto(pCur, pKey, nKey, appendBias, &loc); - if( rc ) return rc; - } - assert( pCur->eState==CURSOR_VALID || (pCur->eState==CURSOR_INVALID && loc) ); - - pPage = pCur->apPage[pCur->iPage]; - assert( pPage->intKey || nKey>=0 ); - assert( pPage->leaf || !pPage->intKey ); - - TRACE(("INSERT: table=%d nkey=%lld ndata=%d page=%d %s\n", - pCur->pgnoRoot, nKey, nData, pPage->pgno, - loc==0 ? "overwrite" : "new entry")); - assert( pPage->isInit ); - allocateTempSpace(pBt); - newCell = pBt->pTmpSpace; - if( newCell==0 ) return SQLITE_NOMEM; - rc = fillInCell(pPage, newCell, pKey, nKey, pData, nData, nZero, &szNew); - if( rc ) goto end_insert; - assert( szNew==cellSizePtr(pPage, newCell) ); - assert( szNew <= MX_CELL_SIZE(pBt) ); - idx = pCur->aiIdx[pCur->iPage]; - if( loc==0 ){ - u16 szOld; - assert( idxnCell ); - rc = sqlite3PagerWrite(pPage->pDbPage); - if( rc ){ - goto end_insert; - } - oldCell = findCell(pPage, idx); - if( !pPage->leaf ){ - memcpy(newCell, oldCell, 4); - } - szOld = cellSizePtr(pPage, oldCell); - rc = clearCell(pPage, oldCell); - dropCell(pPage, idx, szOld, &rc); - if( rc ) goto end_insert; - }else if( loc<0 && pPage->nCell>0 ){ - assert( pPage->leaf ); - idx = ++pCur->aiIdx[pCur->iPage]; - }else{ - assert( pPage->leaf ); - } - insertCell(pPage, idx, newCell, szNew, 0, 0, &rc); - assert( rc!=SQLITE_OK || pPage->nCell>0 || pPage->nOverflow>0 ); - - /* If no error has occurred and pPage has an overflow cell, call balance() - ** to redistribute the cells within the tree. Since balance() may move - ** the cursor, zero the BtCursor.info.nSize and BTCF_ValidNKey - ** variables. - ** - ** Previous versions of SQLite called moveToRoot() to move the cursor - ** back to the root page as balance() used to invalidate the contents - ** of BtCursor.apPage[] and BtCursor.aiIdx[]. Instead of doing that, - ** set the cursor state to "invalid". This makes common insert operations - ** slightly faster. - ** - ** There is a subtle but important optimization here too. When inserting - ** multiple records into an intkey b-tree using a single cursor (as can - ** happen while processing an "INSERT INTO ... SELECT" statement), it - ** is advantageous to leave the cursor pointing to the last entry in - ** the b-tree if possible. If the cursor is left pointing to the last - ** entry in the table, and the next row inserted has an integer key - ** larger than the largest existing key, it is possible to insert the - ** row without seeking the cursor. This can be a big performance boost. - */ - pCur->info.nSize = 0; - if( rc==SQLITE_OK && pPage->nOverflow ){ - pCur->curFlags &= ~(BTCF_ValidNKey); - rc = balance(pCur); - - /* Must make sure nOverflow is reset to zero even if the balance() - ** fails. Internal data structure corruption will result otherwise. - ** Also, set the cursor state to invalid. This stops saveCursorPosition() - ** from trying to save the current position of the cursor. */ - pCur->apPage[pCur->iPage]->nOverflow = 0; - pCur->eState = CURSOR_INVALID; - } - assert( pCur->apPage[pCur->iPage]->nOverflow==0 ); - -end_insert: - return rc; -} - -/* -** Delete the entry that the cursor is pointing to. The cursor -** is left pointing at a arbitrary location. -*/ -SQLITE_PRIVATE int sqlite3BtreeDelete(BtCursor *pCur){ - Btree *p = pCur->pBtree; - BtShared *pBt = p->pBt; - int rc; /* Return code */ - MemPage *pPage; /* Page to delete cell from */ - unsigned char *pCell; /* Pointer to cell to delete */ - int iCellIdx; /* Index of cell to delete */ - int iCellDepth; /* Depth of node containing pCell */ - - assert( cursorHoldsMutex(pCur) ); - assert( pBt->inTransaction==TRANS_WRITE ); - assert( (pBt->btsFlags & BTS_READ_ONLY)==0 ); - assert( pCur->curFlags & BTCF_WriteFlag ); - assert( hasSharedCacheTableLock(p, pCur->pgnoRoot, pCur->pKeyInfo!=0, 2) ); - assert( !hasReadConflicts(p, pCur->pgnoRoot) ); - - if( NEVER(pCur->aiIdx[pCur->iPage]>=pCur->apPage[pCur->iPage]->nCell) - || NEVER(pCur->eState!=CURSOR_VALID) - ){ - return SQLITE_ERROR; /* Something has gone awry. */ - } - - iCellDepth = pCur->iPage; - iCellIdx = pCur->aiIdx[iCellDepth]; - pPage = pCur->apPage[iCellDepth]; - pCell = findCell(pPage, iCellIdx); - - /* If the page containing the entry to delete is not a leaf page, move - ** the cursor to the largest entry in the tree that is smaller than - ** the entry being deleted. This cell will replace the cell being deleted - ** from the internal node. The 'previous' entry is used for this instead - ** of the 'next' entry, as the previous entry is always a part of the - ** sub-tree headed by the child page of the cell being deleted. This makes - ** balancing the tree following the delete operation easier. */ - if( !pPage->leaf ){ - int notUsed = 0; - rc = sqlite3BtreePrevious(pCur, ¬Used); - if( rc ) return rc; - } - - /* Save the positions of any other cursors open on this table before - ** making any modifications. Make the page containing the entry to be - ** deleted writable. Then free any overflow pages associated with the - ** entry and finally remove the cell itself from within the page. - */ - rc = saveAllCursors(pBt, pCur->pgnoRoot, pCur); - if( rc ) return rc; - - /* If this is a delete operation to remove a row from a table b-tree, - ** invalidate any incrblob cursors open on the row being deleted. */ - if( pCur->pKeyInfo==0 ){ - invalidateIncrblobCursors(p, pCur->info.nKey, 0); - } - - rc = sqlite3PagerWrite(pPage->pDbPage); - if( rc ) return rc; - rc = clearCell(pPage, pCell); - dropCell(pPage, iCellIdx, cellSizePtr(pPage, pCell), &rc); - if( rc ) return rc; - - /* If the cell deleted was not located on a leaf page, then the cursor - ** is currently pointing to the largest entry in the sub-tree headed - ** by the child-page of the cell that was just deleted from an internal - ** node. The cell from the leaf node needs to be moved to the internal - ** node to replace the deleted cell. */ - if( !pPage->leaf ){ - MemPage *pLeaf = pCur->apPage[pCur->iPage]; - int nCell; - Pgno n = pCur->apPage[iCellDepth+1]->pgno; - unsigned char *pTmp; - - pCell = findCell(pLeaf, pLeaf->nCell-1); - nCell = cellSizePtr(pLeaf, pCell); - assert( MX_CELL_SIZE(pBt) >= nCell ); - - allocateTempSpace(pBt); - pTmp = pBt->pTmpSpace; - - rc = sqlite3PagerWrite(pLeaf->pDbPage); - insertCell(pPage, iCellIdx, pCell-4, nCell+4, pTmp, n, &rc); - dropCell(pLeaf, pLeaf->nCell-1, nCell, &rc); - if( rc ) return rc; - } - - /* Balance the tree. If the entry deleted was located on a leaf page, - ** then the cursor still points to that page. In this case the first - ** call to balance() repairs the tree, and the if(...) condition is - ** never true. - ** - ** Otherwise, if the entry deleted was on an internal node page, then - ** pCur is pointing to the leaf page from which a cell was removed to - ** replace the cell deleted from the internal node. This is slightly - ** tricky as the leaf node may be underfull, and the internal node may - ** be either under or overfull. In this case run the balancing algorithm - ** on the leaf node first. If the balance proceeds far enough up the - ** tree that we can be sure that any problem in the internal node has - ** been corrected, so be it. Otherwise, after balancing the leaf node, - ** walk the cursor up the tree to the internal node and balance it as - ** well. */ - rc = balance(pCur); - if( rc==SQLITE_OK && pCur->iPage>iCellDepth ){ - while( pCur->iPage>iCellDepth ){ - releasePage(pCur->apPage[pCur->iPage--]); - } - rc = balance(pCur); - } - - if( rc==SQLITE_OK ){ - moveToRoot(pCur); - } - return rc; -} - -/* -** Create a new BTree table. Write into *piTable the page -** number for the root page of the new table. -** -** The type of type is determined by the flags parameter. Only the -** following values of flags are currently in use. Other values for -** flags might not work: -** -** BTREE_INTKEY|BTREE_LEAFDATA Used for SQL tables with rowid keys -** BTREE_ZERODATA Used for SQL indices -*/ -static int btreeCreateTable(Btree *p, int *piTable, int createTabFlags){ - BtShared *pBt = p->pBt; - MemPage *pRoot; - Pgno pgnoRoot; - int rc; - int ptfFlags; /* Page-type flage for the root page of new table */ - - assert( sqlite3BtreeHoldsMutex(p) ); - assert( pBt->inTransaction==TRANS_WRITE ); - assert( (pBt->btsFlags & BTS_READ_ONLY)==0 ); - -#ifdef SQLITE_OMIT_AUTOVACUUM - rc = allocateBtreePage(pBt, &pRoot, &pgnoRoot, 1, 0); - if( rc ){ - return rc; - } -#else - if( pBt->autoVacuum ){ - Pgno pgnoMove; /* Move a page here to make room for the root-page */ - MemPage *pPageMove; /* The page to move to. */ - - /* Creating a new table may probably require moving an existing database - ** to make room for the new tables root page. In case this page turns - ** out to be an overflow page, delete all overflow page-map caches - ** held by open cursors. - */ - invalidateAllOverflowCache(pBt); - - /* Read the value of meta[3] from the database to determine where the - ** root page of the new table should go. meta[3] is the largest root-page - ** created so far, so the new root-page is (meta[3]+1). - */ - sqlite3BtreeGetMeta(p, BTREE_LARGEST_ROOT_PAGE, &pgnoRoot); - pgnoRoot++; - - /* The new root-page may not be allocated on a pointer-map page, or the - ** PENDING_BYTE page. - */ - while( pgnoRoot==PTRMAP_PAGENO(pBt, pgnoRoot) || - pgnoRoot==PENDING_BYTE_PAGE(pBt) ){ - pgnoRoot++; - } - assert( pgnoRoot>=3 ); - - /* Allocate a page. The page that currently resides at pgnoRoot will - ** be moved to the allocated page (unless the allocated page happens - ** to reside at pgnoRoot). - */ - rc = allocateBtreePage(pBt, &pPageMove, &pgnoMove, pgnoRoot, BTALLOC_EXACT); - if( rc!=SQLITE_OK ){ - return rc; - } - - if( pgnoMove!=pgnoRoot ){ - /* pgnoRoot is the page that will be used for the root-page of - ** the new table (assuming an error did not occur). But we were - ** allocated pgnoMove. If required (i.e. if it was not allocated - ** by extending the file), the current page at position pgnoMove - ** is already journaled. - */ - u8 eType = 0; - Pgno iPtrPage = 0; - - /* Save the positions of any open cursors. This is required in - ** case they are holding a reference to an xFetch reference - ** corresponding to page pgnoRoot. */ - rc = saveAllCursors(pBt, 0, 0); - releasePage(pPageMove); - if( rc!=SQLITE_OK ){ - return rc; - } - - /* Move the page currently at pgnoRoot to pgnoMove. */ - rc = btreeGetPage(pBt, pgnoRoot, &pRoot, 0); - if( rc!=SQLITE_OK ){ - return rc; - } - rc = ptrmapGet(pBt, pgnoRoot, &eType, &iPtrPage); - if( eType==PTRMAP_ROOTPAGE || eType==PTRMAP_FREEPAGE ){ - rc = SQLITE_CORRUPT_BKPT; - } - if( rc!=SQLITE_OK ){ - releasePage(pRoot); - return rc; - } - assert( eType!=PTRMAP_ROOTPAGE ); - assert( eType!=PTRMAP_FREEPAGE ); - rc = relocatePage(pBt, pRoot, eType, iPtrPage, pgnoMove, 0); - releasePage(pRoot); - - /* Obtain the page at pgnoRoot */ - if( rc!=SQLITE_OK ){ - return rc; - } - rc = btreeGetPage(pBt, pgnoRoot, &pRoot, 0); - if( rc!=SQLITE_OK ){ - return rc; - } - rc = sqlite3PagerWrite(pRoot->pDbPage); - if( rc!=SQLITE_OK ){ - releasePage(pRoot); - return rc; - } - }else{ - pRoot = pPageMove; - } - - /* Update the pointer-map and meta-data with the new root-page number. */ - ptrmapPut(pBt, pgnoRoot, PTRMAP_ROOTPAGE, 0, &rc); - if( rc ){ - releasePage(pRoot); - return rc; - } - - /* When the new root page was allocated, page 1 was made writable in - ** order either to increase the database filesize, or to decrement the - ** freelist count. Hence, the sqlite3BtreeUpdateMeta() call cannot fail. - */ - assert( sqlite3PagerIswriteable(pBt->pPage1->pDbPage) ); - rc = sqlite3BtreeUpdateMeta(p, 4, pgnoRoot); - if( NEVER(rc) ){ - releasePage(pRoot); - return rc; - } - - }else{ - rc = allocateBtreePage(pBt, &pRoot, &pgnoRoot, 1, 0); - if( rc ) return rc; - } -#endif - assert( sqlite3PagerIswriteable(pRoot->pDbPage) ); - if( createTabFlags & BTREE_INTKEY ){ - ptfFlags = PTF_INTKEY | PTF_LEAFDATA | PTF_LEAF; - }else{ - ptfFlags = PTF_ZERODATA | PTF_LEAF; - } - zeroPage(pRoot, ptfFlags); - sqlite3PagerUnref(pRoot->pDbPage); - assert( (pBt->openFlags & BTREE_SINGLE)==0 || pgnoRoot==2 ); - *piTable = (int)pgnoRoot; - return SQLITE_OK; -} -SQLITE_PRIVATE int sqlite3BtreeCreateTable(Btree *p, int *piTable, int flags){ - int rc; - sqlite3BtreeEnter(p); - rc = btreeCreateTable(p, piTable, flags); - sqlite3BtreeLeave(p); - return rc; -} - -/* -** Erase the given database page and all its children. Return -** the page to the freelist. -*/ -static int clearDatabasePage( - BtShared *pBt, /* The BTree that contains the table */ - Pgno pgno, /* Page number to clear */ - int freePageFlag, /* Deallocate page if true */ - int *pnChange /* Add number of Cells freed to this counter */ -){ - MemPage *pPage; - int rc; - unsigned char *pCell; - int i; - int hdr; - - assert( sqlite3_mutex_held(pBt->mutex) ); - if( pgno>btreePagecount(pBt) ){ - return SQLITE_CORRUPT_BKPT; - } - - rc = getAndInitPage(pBt, pgno, &pPage, 0); - if( rc ) return rc; - hdr = pPage->hdrOffset; - for(i=0; inCell; i++){ - pCell = findCell(pPage, i); - if( !pPage->leaf ){ - rc = clearDatabasePage(pBt, get4byte(pCell), 1, pnChange); - if( rc ) goto cleardatabasepage_out; - } - rc = clearCell(pPage, pCell); - if( rc ) goto cleardatabasepage_out; - } - if( !pPage->leaf ){ - rc = clearDatabasePage(pBt, get4byte(&pPage->aData[hdr+8]), 1, pnChange); - if( rc ) goto cleardatabasepage_out; - }else if( pnChange ){ - assert( pPage->intKey ); - *pnChange += pPage->nCell; - } - if( freePageFlag ){ - freePage(pPage, &rc); - }else if( (rc = sqlite3PagerWrite(pPage->pDbPage))==0 ){ - zeroPage(pPage, pPage->aData[hdr] | PTF_LEAF); - } - -cleardatabasepage_out: - releasePage(pPage); - return rc; -} - -/* -** Delete all information from a single table in the database. iTable is -** the page number of the root of the table. After this routine returns, -** the root page is empty, but still exists. -** -** This routine will fail with SQLITE_LOCKED if there are any open -** read cursors on the table. Open write cursors are moved to the -** root of the table. -** -** If pnChange is not NULL, then table iTable must be an intkey table. The -** integer value pointed to by pnChange is incremented by the number of -** entries in the table. -*/ -SQLITE_PRIVATE int sqlite3BtreeClearTable(Btree *p, int iTable, int *pnChange){ - int rc; - BtShared *pBt = p->pBt; - sqlite3BtreeEnter(p); - assert( p->inTrans==TRANS_WRITE ); - - rc = saveAllCursors(pBt, (Pgno)iTable, 0); - - if( SQLITE_OK==rc ){ - /* Invalidate all incrblob cursors open on table iTable (assuming iTable - ** is the root of a table b-tree - if it is not, the following call is - ** a no-op). */ - invalidateIncrblobCursors(p, 0, 1); - rc = clearDatabasePage(pBt, (Pgno)iTable, 0, pnChange); - } - sqlite3BtreeLeave(p); - return rc; -} - -/* -** Delete all information from the single table that pCur is open on. -** -** This routine only work for pCur on an ephemeral table. -*/ -SQLITE_PRIVATE int sqlite3BtreeClearTableOfCursor(BtCursor *pCur){ - return sqlite3BtreeClearTable(pCur->pBtree, pCur->pgnoRoot, 0); -} - -/* -** Erase all information in a table and add the root of the table to -** the freelist. Except, the root of the principle table (the one on -** page 1) is never added to the freelist. -** -** This routine will fail with SQLITE_LOCKED if there are any open -** cursors on the table. -** -** If AUTOVACUUM is enabled and the page at iTable is not the last -** root page in the database file, then the last root page -** in the database file is moved into the slot formerly occupied by -** iTable and that last slot formerly occupied by the last root page -** is added to the freelist instead of iTable. In this say, all -** root pages are kept at the beginning of the database file, which -** is necessary for AUTOVACUUM to work right. *piMoved is set to the -** page number that used to be the last root page in the file before -** the move. If no page gets moved, *piMoved is set to 0. -** The last root page is recorded in meta[3] and the value of -** meta[3] is updated by this procedure. -*/ -static int btreeDropTable(Btree *p, Pgno iTable, int *piMoved){ - int rc; - MemPage *pPage = 0; - BtShared *pBt = p->pBt; - - assert( sqlite3BtreeHoldsMutex(p) ); - assert( p->inTrans==TRANS_WRITE ); - - /* It is illegal to drop a table if any cursors are open on the - ** database. This is because in auto-vacuum mode the backend may - ** need to move another root-page to fill a gap left by the deleted - ** root page. If an open cursor was using this page a problem would - ** occur. - ** - ** This error is caught long before control reaches this point. - */ - if( NEVER(pBt->pCursor) ){ - sqlite3ConnectionBlocked(p->db, pBt->pCursor->pBtree->db); - return SQLITE_LOCKED_SHAREDCACHE; - } - - rc = btreeGetPage(pBt, (Pgno)iTable, &pPage, 0); - if( rc ) return rc; - rc = sqlite3BtreeClearTable(p, iTable, 0); - if( rc ){ - releasePage(pPage); - return rc; - } - - *piMoved = 0; - - if( iTable>1 ){ -#ifdef SQLITE_OMIT_AUTOVACUUM - freePage(pPage, &rc); - releasePage(pPage); -#else - if( pBt->autoVacuum ){ - Pgno maxRootPgno; - sqlite3BtreeGetMeta(p, BTREE_LARGEST_ROOT_PAGE, &maxRootPgno); - - if( iTable==maxRootPgno ){ - /* If the table being dropped is the table with the largest root-page - ** number in the database, put the root page on the free list. - */ - freePage(pPage, &rc); - releasePage(pPage); - if( rc!=SQLITE_OK ){ - return rc; - } - }else{ - /* The table being dropped does not have the largest root-page - ** number in the database. So move the page that does into the - ** gap left by the deleted root-page. - */ - MemPage *pMove; - releasePage(pPage); - rc = btreeGetPage(pBt, maxRootPgno, &pMove, 0); - if( rc!=SQLITE_OK ){ - return rc; - } - rc = relocatePage(pBt, pMove, PTRMAP_ROOTPAGE, 0, iTable, 0); - releasePage(pMove); - if( rc!=SQLITE_OK ){ - return rc; - } - pMove = 0; - rc = btreeGetPage(pBt, maxRootPgno, &pMove, 0); - freePage(pMove, &rc); - releasePage(pMove); - if( rc!=SQLITE_OK ){ - return rc; - } - *piMoved = maxRootPgno; - } - - /* Set the new 'max-root-page' value in the database header. This - ** is the old value less one, less one more if that happens to - ** be a root-page number, less one again if that is the - ** PENDING_BYTE_PAGE. - */ - maxRootPgno--; - while( maxRootPgno==PENDING_BYTE_PAGE(pBt) - || PTRMAP_ISPAGE(pBt, maxRootPgno) ){ - maxRootPgno--; - } - assert( maxRootPgno!=PENDING_BYTE_PAGE(pBt) ); - - rc = sqlite3BtreeUpdateMeta(p, 4, maxRootPgno); - }else{ - freePage(pPage, &rc); - releasePage(pPage); - } -#endif - }else{ - /* If sqlite3BtreeDropTable was called on page 1. - ** This really never should happen except in a corrupt - ** database. - */ - zeroPage(pPage, PTF_INTKEY|PTF_LEAF ); - releasePage(pPage); - } - return rc; -} -SQLITE_PRIVATE int sqlite3BtreeDropTable(Btree *p, int iTable, int *piMoved){ - int rc; - sqlite3BtreeEnter(p); - rc = btreeDropTable(p, iTable, piMoved); - sqlite3BtreeLeave(p); - return rc; -} - - -/* -** This function may only be called if the b-tree connection already -** has a read or write transaction open on the database. -** -** Read the meta-information out of a database file. Meta[0] -** is the number of free pages currently in the database. Meta[1] -** through meta[15] are available for use by higher layers. Meta[0] -** is read-only, the others are read/write. -** -** The schema layer numbers meta values differently. At the schema -** layer (and the SetCookie and ReadCookie opcodes) the number of -** free pages is not visible. So Cookie[0] is the same as Meta[1]. -*/ -SQLITE_PRIVATE void sqlite3BtreeGetMeta(Btree *p, int idx, u32 *pMeta){ - BtShared *pBt = p->pBt; - - sqlite3BtreeEnter(p); - assert( p->inTrans>TRANS_NONE ); - assert( SQLITE_OK==querySharedCacheTableLock(p, MASTER_ROOT, READ_LOCK) ); - assert( pBt->pPage1 ); - assert( idx>=0 && idx<=15 ); - - *pMeta = get4byte(&pBt->pPage1->aData[36 + idx*4]); - - /* If auto-vacuum is disabled in this build and this is an auto-vacuum - ** database, mark the database as read-only. */ -#ifdef SQLITE_OMIT_AUTOVACUUM - if( idx==BTREE_LARGEST_ROOT_PAGE && *pMeta>0 ){ - pBt->btsFlags |= BTS_READ_ONLY; - } -#endif - - sqlite3BtreeLeave(p); -} - -/* -** Write meta-information back into the database. Meta[0] is -** read-only and may not be written. -*/ -SQLITE_PRIVATE int sqlite3BtreeUpdateMeta(Btree *p, int idx, u32 iMeta){ - BtShared *pBt = p->pBt; - unsigned char *pP1; - int rc; - assert( idx>=1 && idx<=15 ); - sqlite3BtreeEnter(p); - assert( p->inTrans==TRANS_WRITE ); - assert( pBt->pPage1!=0 ); - pP1 = pBt->pPage1->aData; - rc = sqlite3PagerWrite(pBt->pPage1->pDbPage); - if( rc==SQLITE_OK ){ - put4byte(&pP1[36 + idx*4], iMeta); -#ifndef SQLITE_OMIT_AUTOVACUUM - if( idx==BTREE_INCR_VACUUM ){ - assert( pBt->autoVacuum || iMeta==0 ); - assert( iMeta==0 || iMeta==1 ); - pBt->incrVacuum = (u8)iMeta; - } -#endif - } - sqlite3BtreeLeave(p); - return rc; -} - -#ifndef SQLITE_OMIT_BTREECOUNT -/* -** The first argument, pCur, is a cursor opened on some b-tree. Count the -** number of entries in the b-tree and write the result to *pnEntry. -** -** SQLITE_OK is returned if the operation is successfully executed. -** Otherwise, if an error is encountered (i.e. an IO error or database -** corruption) an SQLite error code is returned. -*/ -SQLITE_PRIVATE int sqlite3BtreeCount(BtCursor *pCur, i64 *pnEntry){ - i64 nEntry = 0; /* Value to return in *pnEntry */ - int rc; /* Return code */ - - if( pCur->pgnoRoot==0 ){ - *pnEntry = 0; - return SQLITE_OK; - } - rc = moveToRoot(pCur); - - /* Unless an error occurs, the following loop runs one iteration for each - ** page in the B-Tree structure (not including overflow pages). - */ - while( rc==SQLITE_OK ){ - int iIdx; /* Index of child node in parent */ - MemPage *pPage; /* Current page of the b-tree */ - - /* If this is a leaf page or the tree is not an int-key tree, then - ** this page contains countable entries. Increment the entry counter - ** accordingly. - */ - pPage = pCur->apPage[pCur->iPage]; - if( pPage->leaf || !pPage->intKey ){ - nEntry += pPage->nCell; - } - - /* pPage is a leaf node. This loop navigates the cursor so that it - ** points to the first interior cell that it points to the parent of - ** the next page in the tree that has not yet been visited. The - ** pCur->aiIdx[pCur->iPage] value is set to the index of the parent cell - ** of the page, or to the number of cells in the page if the next page - ** to visit is the right-child of its parent. - ** - ** If all pages in the tree have been visited, return SQLITE_OK to the - ** caller. - */ - if( pPage->leaf ){ - do { - if( pCur->iPage==0 ){ - /* All pages of the b-tree have been visited. Return successfully. */ - *pnEntry = nEntry; - return SQLITE_OK; - } - moveToParent(pCur); - }while ( pCur->aiIdx[pCur->iPage]>=pCur->apPage[pCur->iPage]->nCell ); - - pCur->aiIdx[pCur->iPage]++; - pPage = pCur->apPage[pCur->iPage]; - } - - /* Descend to the child node of the cell that the cursor currently - ** points at. This is the right-child if (iIdx==pPage->nCell). - */ - iIdx = pCur->aiIdx[pCur->iPage]; - if( iIdx==pPage->nCell ){ - rc = moveToChild(pCur, get4byte(&pPage->aData[pPage->hdrOffset+8])); - }else{ - rc = moveToChild(pCur, get4byte(findCell(pPage, iIdx))); - } - } - - /* An error has occurred. Return an error code. */ - return rc; -} -#endif - -/* -** Return the pager associated with a BTree. This routine is used for -** testing and debugging only. -*/ -SQLITE_PRIVATE Pager *sqlite3BtreePager(Btree *p){ - return p->pBt->pPager; -} - -#ifndef SQLITE_OMIT_INTEGRITY_CHECK -/* -** Append a message to the error message string. -*/ -static void checkAppendMsg( - IntegrityCk *pCheck, - char *zMsg1, - const char *zFormat, - ... -){ - va_list ap; - if( !pCheck->mxErr ) return; - pCheck->mxErr--; - pCheck->nErr++; - va_start(ap, zFormat); - if( pCheck->errMsg.nChar ){ - sqlite3StrAccumAppend(&pCheck->errMsg, "\n", 1); - } - if( zMsg1 ){ - sqlite3StrAccumAppendAll(&pCheck->errMsg, zMsg1); - } - sqlite3VXPrintf(&pCheck->errMsg, 1, zFormat, ap); - va_end(ap); - if( pCheck->errMsg.accError==STRACCUM_NOMEM ){ - pCheck->mallocFailed = 1; - } -} -#endif /* SQLITE_OMIT_INTEGRITY_CHECK */ - -#ifndef SQLITE_OMIT_INTEGRITY_CHECK - -/* -** Return non-zero if the bit in the IntegrityCk.aPgRef[] array that -** corresponds to page iPg is already set. -*/ -static int getPageReferenced(IntegrityCk *pCheck, Pgno iPg){ - assert( iPg<=pCheck->nPage && sizeof(pCheck->aPgRef[0])==1 ); - return (pCheck->aPgRef[iPg/8] & (1 << (iPg & 0x07))); -} - -/* -** Set the bit in the IntegrityCk.aPgRef[] array that corresponds to page iPg. -*/ -static void setPageReferenced(IntegrityCk *pCheck, Pgno iPg){ - assert( iPg<=pCheck->nPage && sizeof(pCheck->aPgRef[0])==1 ); - pCheck->aPgRef[iPg/8] |= (1 << (iPg & 0x07)); -} - - -/* -** Add 1 to the reference count for page iPage. If this is the second -** reference to the page, add an error message to pCheck->zErrMsg. -** Return 1 if there are 2 ore more references to the page and 0 if -** if this is the first reference to the page. -** -** Also check that the page number is in bounds. -*/ -static int checkRef(IntegrityCk *pCheck, Pgno iPage, char *zContext){ - if( iPage==0 ) return 1; - if( iPage>pCheck->nPage ){ - checkAppendMsg(pCheck, zContext, "invalid page number %d", iPage); - return 1; - } - if( getPageReferenced(pCheck, iPage) ){ - checkAppendMsg(pCheck, zContext, "2nd reference to page %d", iPage); - return 1; - } - setPageReferenced(pCheck, iPage); - return 0; -} - -#ifndef SQLITE_OMIT_AUTOVACUUM -/* -** Check that the entry in the pointer-map for page iChild maps to -** page iParent, pointer type ptrType. If not, append an error message -** to pCheck. -*/ -static void checkPtrmap( - IntegrityCk *pCheck, /* Integrity check context */ - Pgno iChild, /* Child page number */ - u8 eType, /* Expected pointer map type */ - Pgno iParent, /* Expected pointer map parent page number */ - char *zContext /* Context description (used for error msg) */ -){ - int rc; - u8 ePtrmapType; - Pgno iPtrmapParent; - - rc = ptrmapGet(pCheck->pBt, iChild, &ePtrmapType, &iPtrmapParent); - if( rc!=SQLITE_OK ){ - if( rc==SQLITE_NOMEM || rc==SQLITE_IOERR_NOMEM ) pCheck->mallocFailed = 1; - checkAppendMsg(pCheck, zContext, "Failed to read ptrmap key=%d", iChild); - return; - } - - if( ePtrmapType!=eType || iPtrmapParent!=iParent ){ - checkAppendMsg(pCheck, zContext, - "Bad ptr map entry key=%d expected=(%d,%d) got=(%d,%d)", - iChild, eType, iParent, ePtrmapType, iPtrmapParent); - } -} -#endif - -/* -** Check the integrity of the freelist or of an overflow page list. -** Verify that the number of pages on the list is N. -*/ -static void checkList( - IntegrityCk *pCheck, /* Integrity checking context */ - int isFreeList, /* True for a freelist. False for overflow page list */ - int iPage, /* Page number for first page in the list */ - int N, /* Expected number of pages in the list */ - char *zContext /* Context for error messages */ -){ - int i; - int expected = N; - int iFirst = iPage; - while( N-- > 0 && pCheck->mxErr ){ - DbPage *pOvflPage; - unsigned char *pOvflData; - if( iPage<1 ){ - checkAppendMsg(pCheck, zContext, - "%d of %d pages missing from overflow list starting at %d", - N+1, expected, iFirst); - break; - } - if( checkRef(pCheck, iPage, zContext) ) break; - if( sqlite3PagerGet(pCheck->pPager, (Pgno)iPage, &pOvflPage) ){ - checkAppendMsg(pCheck, zContext, "failed to get page %d", iPage); - break; - } - pOvflData = (unsigned char *)sqlite3PagerGetData(pOvflPage); - if( isFreeList ){ - int n = get4byte(&pOvflData[4]); -#ifndef SQLITE_OMIT_AUTOVACUUM - if( pCheck->pBt->autoVacuum ){ - checkPtrmap(pCheck, iPage, PTRMAP_FREEPAGE, 0, zContext); - } -#endif - if( n>(int)pCheck->pBt->usableSize/4-2 ){ - checkAppendMsg(pCheck, zContext, - "freelist leaf count too big on page %d", iPage); - N--; - }else{ - for(i=0; ipBt->autoVacuum ){ - checkPtrmap(pCheck, iFreePage, PTRMAP_FREEPAGE, 0, zContext); - } -#endif - checkRef(pCheck, iFreePage, zContext); - } - N -= n; - } - } -#ifndef SQLITE_OMIT_AUTOVACUUM - else{ - /* If this database supports auto-vacuum and iPage is not the last - ** page in this overflow list, check that the pointer-map entry for - ** the following page matches iPage. - */ - if( pCheck->pBt->autoVacuum && N>0 ){ - i = get4byte(pOvflData); - checkPtrmap(pCheck, i, PTRMAP_OVERFLOW2, iPage, zContext); - } - } -#endif - iPage = get4byte(pOvflData); - sqlite3PagerUnref(pOvflPage); - } -} -#endif /* SQLITE_OMIT_INTEGRITY_CHECK */ - -#ifndef SQLITE_OMIT_INTEGRITY_CHECK -/* -** Do various sanity checks on a single page of a tree. Return -** the tree depth. Root pages return 0. Parents of root pages -** return 1, and so forth. -** -** These checks are done: -** -** 1. Make sure that cells and freeblocks do not overlap -** but combine to completely cover the page. -** NO 2. Make sure cell keys are in order. -** NO 3. Make sure no key is less than or equal to zLowerBound. -** NO 4. Make sure no key is greater than or equal to zUpperBound. -** 5. Check the integrity of overflow pages. -** 6. Recursively call checkTreePage on all children. -** 7. Verify that the depth of all children is the same. -** 8. Make sure this page is at least 33% full or else it is -** the root of the tree. -*/ -static int checkTreePage( - IntegrityCk *pCheck, /* Context for the sanity check */ - int iPage, /* Page number of the page to check */ - char *zParentContext, /* Parent context */ - i64 *pnParentMinKey, - i64 *pnParentMaxKey -){ - MemPage *pPage; - int i, rc, depth, d2, pgno, cnt; - int hdr, cellStart; - int nCell; - u8 *data; - BtShared *pBt; - int usableSize; - char zContext[100]; - char *hit = 0; - i64 nMinKey = 0; - i64 nMaxKey = 0; - - sqlite3_snprintf(sizeof(zContext), zContext, "Page %d: ", iPage); - - /* Check that the page exists - */ - pBt = pCheck->pBt; - usableSize = pBt->usableSize; - if( iPage==0 ) return 0; - if( checkRef(pCheck, iPage, zParentContext) ) return 0; - if( (rc = btreeGetPage(pBt, (Pgno)iPage, &pPage, 0))!=0 ){ - checkAppendMsg(pCheck, zContext, - "unable to get the page. error code=%d", rc); - return 0; - } - - /* Clear MemPage.isInit to make sure the corruption detection code in - ** btreeInitPage() is executed. */ - pPage->isInit = 0; - if( (rc = btreeInitPage(pPage))!=0 ){ - assert( rc==SQLITE_CORRUPT ); /* The only possible error from InitPage */ - checkAppendMsg(pCheck, zContext, - "btreeInitPage() returns error code %d", rc); - releasePage(pPage); - return 0; - } - - /* Check out all the cells. - */ - depth = 0; - for(i=0; inCell && pCheck->mxErr; i++){ - u8 *pCell; - u32 sz; - CellInfo info; - - /* Check payload overflow pages - */ - sqlite3_snprintf(sizeof(zContext), zContext, - "On tree page %d cell %d: ", iPage, i); - pCell = findCell(pPage,i); - btreeParseCellPtr(pPage, pCell, &info); - sz = info.nData; - if( !pPage->intKey ) sz += (int)info.nKey; - /* For intKey pages, check that the keys are in order. - */ - else if( i==0 ) nMinKey = nMaxKey = info.nKey; - else{ - if( info.nKey <= nMaxKey ){ - checkAppendMsg(pCheck, zContext, - "Rowid %lld out of order (previous was %lld)", info.nKey, nMaxKey); - } - nMaxKey = info.nKey; - } - assert( sz==info.nPayload ); - if( (sz>info.nLocal) - && (&pCell[info.iOverflow]<=&pPage->aData[pBt->usableSize]) - ){ - int nPage = (sz - info.nLocal + usableSize - 5)/(usableSize - 4); - Pgno pgnoOvfl = get4byte(&pCell[info.iOverflow]); -#ifndef SQLITE_OMIT_AUTOVACUUM - if( pBt->autoVacuum ){ - checkPtrmap(pCheck, pgnoOvfl, PTRMAP_OVERFLOW1, iPage, zContext); - } -#endif - checkList(pCheck, 0, pgnoOvfl, nPage, zContext); - } - - /* Check sanity of left child page. - */ - if( !pPage->leaf ){ - pgno = get4byte(pCell); -#ifndef SQLITE_OMIT_AUTOVACUUM - if( pBt->autoVacuum ){ - checkPtrmap(pCheck, pgno, PTRMAP_BTREE, iPage, zContext); - } -#endif - d2 = checkTreePage(pCheck, pgno, zContext, &nMinKey, i==0 ? NULL : &nMaxKey); - if( i>0 && d2!=depth ){ - checkAppendMsg(pCheck, zContext, "Child page depth differs"); - } - depth = d2; - } - } - - if( !pPage->leaf ){ - pgno = get4byte(&pPage->aData[pPage->hdrOffset+8]); - sqlite3_snprintf(sizeof(zContext), zContext, - "On page %d at right child: ", iPage); -#ifndef SQLITE_OMIT_AUTOVACUUM - if( pBt->autoVacuum ){ - checkPtrmap(pCheck, pgno, PTRMAP_BTREE, iPage, zContext); - } -#endif - checkTreePage(pCheck, pgno, zContext, NULL, !pPage->nCell ? NULL : &nMaxKey); - } - - /* For intKey leaf pages, check that the min/max keys are in order - ** with any left/parent/right pages. - */ - if( pPage->leaf && pPage->intKey ){ - /* if we are a left child page */ - if( pnParentMinKey ){ - /* if we are the left most child page */ - if( !pnParentMaxKey ){ - if( nMaxKey > *pnParentMinKey ){ - checkAppendMsg(pCheck, zContext, - "Rowid %lld out of order (max larger than parent min of %lld)", - nMaxKey, *pnParentMinKey); - } - }else{ - if( nMinKey <= *pnParentMinKey ){ - checkAppendMsg(pCheck, zContext, - "Rowid %lld out of order (min less than parent min of %lld)", - nMinKey, *pnParentMinKey); - } - if( nMaxKey > *pnParentMaxKey ){ - checkAppendMsg(pCheck, zContext, - "Rowid %lld out of order (max larger than parent max of %lld)", - nMaxKey, *pnParentMaxKey); - } - *pnParentMinKey = nMaxKey; - } - /* else if we're a right child page */ - } else if( pnParentMaxKey ){ - if( nMinKey <= *pnParentMaxKey ){ - checkAppendMsg(pCheck, zContext, - "Rowid %lld out of order (min less than parent max of %lld)", - nMinKey, *pnParentMaxKey); - } - } - } - - /* Check for complete coverage of the page - */ - data = pPage->aData; - hdr = pPage->hdrOffset; - hit = sqlite3PageMalloc( pBt->pageSize ); - if( hit==0 ){ - pCheck->mallocFailed = 1; - }else{ - int contentOffset = get2byteNotZero(&data[hdr+5]); - assert( contentOffset<=usableSize ); /* Enforced by btreeInitPage() */ - memset(hit+contentOffset, 0, usableSize-contentOffset); - memset(hit, 1, contentOffset); - nCell = get2byte(&data[hdr+3]); - cellStart = hdr + 12 - 4*pPage->leaf; - for(i=0; i=usableSize ){ - checkAppendMsg(pCheck, 0, - "Corruption detected in cell %d on page %d",i,iPage); - }else{ - for(j=pc+size-1; j>=pc; j--) hit[j]++; - } - } - i = get2byte(&data[hdr+1]); - while( i>0 ){ - int size, j; - assert( i<=usableSize-4 ); /* Enforced by btreeInitPage() */ - size = get2byte(&data[i+2]); - assert( i+size<=usableSize ); /* Enforced by btreeInitPage() */ - for(j=i+size-1; j>=i; j--) hit[j]++; - j = get2byte(&data[i]); - assert( j==0 || j>i+size ); /* Enforced by btreeInitPage() */ - assert( j<=usableSize-4 ); /* Enforced by btreeInitPage() */ - i = j; - } - for(i=cnt=0; i1 ){ - checkAppendMsg(pCheck, 0, - "Multiple uses for byte %d of page %d", i, iPage); - break; - } - } - if( cnt!=data[hdr+7] ){ - checkAppendMsg(pCheck, 0, - "Fragmentation of %d bytes reported as %d on page %d", - cnt, data[hdr+7], iPage); - } - } - sqlite3PageFree(hit); - releasePage(pPage); - return depth+1; -} -#endif /* SQLITE_OMIT_INTEGRITY_CHECK */ - -#ifndef SQLITE_OMIT_INTEGRITY_CHECK -/* -** This routine does a complete check of the given BTree file. aRoot[] is -** an array of pages numbers were each page number is the root page of -** a table. nRoot is the number of entries in aRoot. -** -** A read-only or read-write transaction must be opened before calling -** this function. -** -** Write the number of error seen in *pnErr. Except for some memory -** allocation errors, an error message held in memory obtained from -** malloc is returned if *pnErr is non-zero. If *pnErr==0 then NULL is -** returned. If a memory allocation error occurs, NULL is returned. -*/ -SQLITE_PRIVATE char *sqlite3BtreeIntegrityCheck( - Btree *p, /* The btree to be checked */ - int *aRoot, /* An array of root pages numbers for individual trees */ - int nRoot, /* Number of entries in aRoot[] */ - int mxErr, /* Stop reporting errors after this many */ - int *pnErr /* Write number of errors seen to this variable */ -){ - Pgno i; - int nRef; - IntegrityCk sCheck; - BtShared *pBt = p->pBt; - char zErr[100]; - - sqlite3BtreeEnter(p); - assert( p->inTrans>TRANS_NONE && pBt->inTransaction>TRANS_NONE ); - nRef = sqlite3PagerRefcount(pBt->pPager); - sCheck.pBt = pBt; - sCheck.pPager = pBt->pPager; - sCheck.nPage = btreePagecount(sCheck.pBt); - sCheck.mxErr = mxErr; - sCheck.nErr = 0; - sCheck.mallocFailed = 0; - *pnErr = 0; - if( sCheck.nPage==0 ){ - sqlite3BtreeLeave(p); - return 0; - } - - sCheck.aPgRef = sqlite3MallocZero((sCheck.nPage / 8)+ 1); - if( !sCheck.aPgRef ){ - *pnErr = 1; - sqlite3BtreeLeave(p); - return 0; - } - i = PENDING_BYTE_PAGE(pBt); - if( i<=sCheck.nPage ) setPageReferenced(&sCheck, i); - sqlite3StrAccumInit(&sCheck.errMsg, zErr, sizeof(zErr), SQLITE_MAX_LENGTH); - sCheck.errMsg.useMalloc = 2; - - /* Check the integrity of the freelist - */ - checkList(&sCheck, 1, get4byte(&pBt->pPage1->aData[32]), - get4byte(&pBt->pPage1->aData[36]), "Main freelist: "); - - /* Check all the tables. - */ - for(i=0; (int)iautoVacuum && aRoot[i]>1 ){ - checkPtrmap(&sCheck, aRoot[i], PTRMAP_ROOTPAGE, 0, 0); - } -#endif - checkTreePage(&sCheck, aRoot[i], "List of tree roots: ", NULL, NULL); - } - - /* Make sure every page in the file is referenced - */ - for(i=1; i<=sCheck.nPage && sCheck.mxErr; i++){ -#ifdef SQLITE_OMIT_AUTOVACUUM - if( getPageReferenced(&sCheck, i)==0 ){ - checkAppendMsg(&sCheck, 0, "Page %d is never used", i); - } -#else - /* If the database supports auto-vacuum, make sure no tables contain - ** references to pointer-map pages. - */ - if( getPageReferenced(&sCheck, i)==0 && - (PTRMAP_PAGENO(pBt, i)!=i || !pBt->autoVacuum) ){ - checkAppendMsg(&sCheck, 0, "Page %d is never used", i); - } - if( getPageReferenced(&sCheck, i)!=0 && - (PTRMAP_PAGENO(pBt, i)==i && pBt->autoVacuum) ){ - checkAppendMsg(&sCheck, 0, "Pointer map page %d is referenced", i); - } -#endif - } - - /* Make sure this analysis did not leave any unref() pages. - ** This is an internal consistency check; an integrity check - ** of the integrity check. - */ - if( NEVER(nRef != sqlite3PagerRefcount(pBt->pPager)) ){ - checkAppendMsg(&sCheck, 0, - "Outstanding page count goes from %d to %d during this analysis", - nRef, sqlite3PagerRefcount(pBt->pPager) - ); - } - - /* Clean up and report errors. - */ - sqlite3BtreeLeave(p); - sqlite3_free(sCheck.aPgRef); - if( sCheck.mallocFailed ){ - sqlite3StrAccumReset(&sCheck.errMsg); - *pnErr = sCheck.nErr+1; - return 0; - } - *pnErr = sCheck.nErr; - if( sCheck.nErr==0 ) sqlite3StrAccumReset(&sCheck.errMsg); - return sqlite3StrAccumFinish(&sCheck.errMsg); -} -#endif /* SQLITE_OMIT_INTEGRITY_CHECK */ - -/* -** Return the full pathname of the underlying database file. Return -** an empty string if the database is in-memory or a TEMP database. -** -** The pager filename is invariant as long as the pager is -** open so it is safe to access without the BtShared mutex. -*/ -SQLITE_PRIVATE const char *sqlite3BtreeGetFilename(Btree *p){ - assert( p->pBt->pPager!=0 ); - return sqlite3PagerFilename(p->pBt->pPager, 1); -} - -/* -** Return the pathname of the journal file for this database. The return -** value of this routine is the same regardless of whether the journal file -** has been created or not. -** -** The pager journal filename is invariant as long as the pager is -** open so it is safe to access without the BtShared mutex. -*/ -SQLITE_PRIVATE const char *sqlite3BtreeGetJournalname(Btree *p){ - assert( p->pBt->pPager!=0 ); - return sqlite3PagerJournalname(p->pBt->pPager); -} - -/* -** Return non-zero if a transaction is active. -*/ -SQLITE_PRIVATE int sqlite3BtreeIsInTrans(Btree *p){ - assert( p==0 || sqlite3_mutex_held(p->db->mutex) ); - return (p && (p->inTrans==TRANS_WRITE)); -} - -#ifndef SQLITE_OMIT_WAL -/* -** Run a checkpoint on the Btree passed as the first argument. -** -** Return SQLITE_LOCKED if this or any other connection has an open -** transaction on the shared-cache the argument Btree is connected to. -** -** Parameter eMode is one of SQLITE_CHECKPOINT_PASSIVE, FULL or RESTART. -*/ -SQLITE_PRIVATE int sqlite3BtreeCheckpoint(Btree *p, int eMode, int *pnLog, int *pnCkpt){ - int rc = SQLITE_OK; - if( p ){ - BtShared *pBt = p->pBt; - sqlite3BtreeEnter(p); - if( pBt->inTransaction!=TRANS_NONE ){ - rc = SQLITE_LOCKED; - }else{ - rc = sqlite3PagerCheckpoint(pBt->pPager, eMode, pnLog, pnCkpt); - } - sqlite3BtreeLeave(p); - } - return rc; -} -#endif - -/* -** Return non-zero if a read (or write) transaction is active. -*/ -SQLITE_PRIVATE int sqlite3BtreeIsInReadTrans(Btree *p){ - assert( p ); - assert( sqlite3_mutex_held(p->db->mutex) ); - return p->inTrans!=TRANS_NONE; -} - -SQLITE_PRIVATE int sqlite3BtreeIsInBackup(Btree *p){ - assert( p ); - assert( sqlite3_mutex_held(p->db->mutex) ); - return p->nBackup!=0; -} - -/* -** This function returns a pointer to a blob of memory associated with -** a single shared-btree. The memory is used by client code for its own -** purposes (for example, to store a high-level schema associated with -** the shared-btree). The btree layer manages reference counting issues. -** -** The first time this is called on a shared-btree, nBytes bytes of memory -** are allocated, zeroed, and returned to the caller. For each subsequent -** call the nBytes parameter is ignored and a pointer to the same blob -** of memory returned. -** -** If the nBytes parameter is 0 and the blob of memory has not yet been -** allocated, a null pointer is returned. If the blob has already been -** allocated, it is returned as normal. -** -** Just before the shared-btree is closed, the function passed as the -** xFree argument when the memory allocation was made is invoked on the -** blob of allocated memory. The xFree function should not call sqlite3_free() -** on the memory, the btree layer does that. -*/ -SQLITE_PRIVATE void *sqlite3BtreeSchema(Btree *p, int nBytes, void(*xFree)(void *)){ - BtShared *pBt = p->pBt; - sqlite3BtreeEnter(p); - if( !pBt->pSchema && nBytes ){ - pBt->pSchema = sqlite3DbMallocZero(0, nBytes); - pBt->xFreeSchema = xFree; - } - sqlite3BtreeLeave(p); - return pBt->pSchema; -} - -/* -** Return SQLITE_LOCKED_SHAREDCACHE if another user of the same shared -** btree as the argument handle holds an exclusive lock on the -** sqlite_master table. Otherwise SQLITE_OK. -*/ -SQLITE_PRIVATE int sqlite3BtreeSchemaLocked(Btree *p){ - int rc; - assert( sqlite3_mutex_held(p->db->mutex) ); - sqlite3BtreeEnter(p); - rc = querySharedCacheTableLock(p, MASTER_ROOT, READ_LOCK); - assert( rc==SQLITE_OK || rc==SQLITE_LOCKED_SHAREDCACHE ); - sqlite3BtreeLeave(p); - return rc; -} - - -#ifndef SQLITE_OMIT_SHARED_CACHE -/* -** Obtain a lock on the table whose root page is iTab. The -** lock is a write lock if isWritelock is true or a read lock -** if it is false. -*/ -SQLITE_PRIVATE int sqlite3BtreeLockTable(Btree *p, int iTab, u8 isWriteLock){ - int rc = SQLITE_OK; - assert( p->inTrans!=TRANS_NONE ); - if( p->sharable ){ - u8 lockType = READ_LOCK + isWriteLock; - assert( READ_LOCK+1==WRITE_LOCK ); - assert( isWriteLock==0 || isWriteLock==1 ); - - sqlite3BtreeEnter(p); - rc = querySharedCacheTableLock(p, iTab, lockType); - if( rc==SQLITE_OK ){ - rc = setSharedCacheTableLock(p, iTab, lockType); - } - sqlite3BtreeLeave(p); - } - return rc; -} -#endif - -#ifndef SQLITE_OMIT_INCRBLOB -/* -** Argument pCsr must be a cursor opened for writing on an -** INTKEY table currently pointing at a valid table entry. -** This function modifies the data stored as part of that entry. -** -** Only the data content may only be modified, it is not possible to -** change the length of the data stored. If this function is called with -** parameters that attempt to write past the end of the existing data, -** no modifications are made and SQLITE_CORRUPT is returned. -*/ -SQLITE_PRIVATE int sqlite3BtreePutData(BtCursor *pCsr, u32 offset, u32 amt, void *z){ - int rc; - assert( cursorHoldsMutex(pCsr) ); - assert( sqlite3_mutex_held(pCsr->pBtree->db->mutex) ); - assert( pCsr->curFlags & BTCF_Incrblob ); - - rc = restoreCursorPosition(pCsr); - if( rc!=SQLITE_OK ){ - return rc; - } - assert( pCsr->eState!=CURSOR_REQUIRESEEK ); - if( pCsr->eState!=CURSOR_VALID ){ - return SQLITE_ABORT; - } - - /* Save the positions of all other cursors open on this table. This is - ** required in case any of them are holding references to an xFetch - ** version of the b-tree page modified by the accessPayload call below. - ** - ** Note that pCsr must be open on a BTREE_INTKEY table and saveCursorPosition() - ** and hence saveAllCursors() cannot fail on a BTREE_INTKEY table, hence - ** saveAllCursors can only return SQLITE_OK. - */ - VVA_ONLY(rc =) saveAllCursors(pCsr->pBt, pCsr->pgnoRoot, pCsr); - assert( rc==SQLITE_OK ); - - /* Check some assumptions: - ** (a) the cursor is open for writing, - ** (b) there is a read/write transaction open, - ** (c) the connection holds a write-lock on the table (if required), - ** (d) there are no conflicting read-locks, and - ** (e) the cursor points at a valid row of an intKey table. - */ - if( (pCsr->curFlags & BTCF_WriteFlag)==0 ){ - return SQLITE_READONLY; - } - assert( (pCsr->pBt->btsFlags & BTS_READ_ONLY)==0 - && pCsr->pBt->inTransaction==TRANS_WRITE ); - assert( hasSharedCacheTableLock(pCsr->pBtree, pCsr->pgnoRoot, 0, 2) ); - assert( !hasReadConflicts(pCsr->pBtree, pCsr->pgnoRoot) ); - assert( pCsr->apPage[pCsr->iPage]->intKey ); - - return accessPayload(pCsr, offset, amt, (unsigned char *)z, 1); -} - -/* -** Mark this cursor as an incremental blob cursor. -*/ -SQLITE_PRIVATE void sqlite3BtreeIncrblobCursor(BtCursor *pCur){ - pCur->curFlags |= BTCF_Incrblob; -} -#endif - -/* -** Set both the "read version" (single byte at byte offset 18) and -** "write version" (single byte at byte offset 19) fields in the database -** header to iVersion. -*/ -SQLITE_PRIVATE int sqlite3BtreeSetVersion(Btree *pBtree, int iVersion){ - BtShared *pBt = pBtree->pBt; - int rc; /* Return code */ - - assert( iVersion==1 || iVersion==2 ); - - /* If setting the version fields to 1, do not automatically open the - ** WAL connection, even if the version fields are currently set to 2. - */ - pBt->btsFlags &= ~BTS_NO_WAL; - if( iVersion==1 ) pBt->btsFlags |= BTS_NO_WAL; - - rc = sqlite3BtreeBeginTrans(pBtree, 0); - if( rc==SQLITE_OK ){ - u8 *aData = pBt->pPage1->aData; - if( aData[18]!=(u8)iVersion || aData[19]!=(u8)iVersion ){ - rc = sqlite3BtreeBeginTrans(pBtree, 2); - if( rc==SQLITE_OK ){ - rc = sqlite3PagerWrite(pBt->pPage1->pDbPage); - if( rc==SQLITE_OK ){ - aData[18] = (u8)iVersion; - aData[19] = (u8)iVersion; - } - } - } - } - - pBt->btsFlags &= ~BTS_NO_WAL; - return rc; -} - -/* -** set the mask of hint flags for cursor pCsr. Currently the only valid -** values are 0 and BTREE_BULKLOAD. -*/ -SQLITE_PRIVATE void sqlite3BtreeCursorHints(BtCursor *pCsr, unsigned int mask){ - assert( mask==BTREE_BULKLOAD || mask==0 ); - pCsr->hints = mask; -} - -/* -** Return true if the given Btree is read-only. -*/ -SQLITE_PRIVATE int sqlite3BtreeIsReadonly(Btree *p){ - return (p->pBt->btsFlags & BTS_READ_ONLY)!=0; -} - -/************** End of btree.c ***********************************************/ -/************** Begin file backup.c ******************************************/ -/* -** 2009 January 28 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** This file contains the implementation of the sqlite3_backup_XXX() -** API functions and the related features. -*/ - -/* -** Structure allocated for each backup operation. -*/ -struct sqlite3_backup { - sqlite3* pDestDb; /* Destination database handle */ - Btree *pDest; /* Destination b-tree file */ - u32 iDestSchema; /* Original schema cookie in destination */ - int bDestLocked; /* True once a write-transaction is open on pDest */ - - Pgno iNext; /* Page number of the next source page to copy */ - sqlite3* pSrcDb; /* Source database handle */ - Btree *pSrc; /* Source b-tree file */ - - int rc; /* Backup process error code */ - - /* These two variables are set by every call to backup_step(). They are - ** read by calls to backup_remaining() and backup_pagecount(). - */ - Pgno nRemaining; /* Number of pages left to copy */ - Pgno nPagecount; /* Total number of pages to copy */ - - int isAttached; /* True once backup has been registered with pager */ - sqlite3_backup *pNext; /* Next backup associated with source pager */ -}; - -/* -** THREAD SAFETY NOTES: -** -** Once it has been created using backup_init(), a single sqlite3_backup -** structure may be accessed via two groups of thread-safe entry points: -** -** * Via the sqlite3_backup_XXX() API function backup_step() and -** backup_finish(). Both these functions obtain the source database -** handle mutex and the mutex associated with the source BtShared -** structure, in that order. -** -** * Via the BackupUpdate() and BackupRestart() functions, which are -** invoked by the pager layer to report various state changes in -** the page cache associated with the source database. The mutex -** associated with the source database BtShared structure will always -** be held when either of these functions are invoked. -** -** The other sqlite3_backup_XXX() API functions, backup_remaining() and -** backup_pagecount() are not thread-safe functions. If they are called -** while some other thread is calling backup_step() or backup_finish(), -** the values returned may be invalid. There is no way for a call to -** BackupUpdate() or BackupRestart() to interfere with backup_remaining() -** or backup_pagecount(). -** -** Depending on the SQLite configuration, the database handles and/or -** the Btree objects may have their own mutexes that require locking. -** Non-sharable Btrees (in-memory databases for example), do not have -** associated mutexes. -*/ - -/* -** Return a pointer corresponding to database zDb (i.e. "main", "temp") -** in connection handle pDb. If such a database cannot be found, return -** a NULL pointer and write an error message to pErrorDb. -** -** If the "temp" database is requested, it may need to be opened by this -** function. If an error occurs while doing so, return 0 and write an -** error message to pErrorDb. -*/ -static Btree *findBtree(sqlite3 *pErrorDb, sqlite3 *pDb, const char *zDb){ - int i = sqlite3FindDbName(pDb, zDb); - - if( i==1 ){ - Parse *pParse; - int rc = 0; - pParse = sqlite3StackAllocZero(pErrorDb, sizeof(*pParse)); - if( pParse==0 ){ - sqlite3Error(pErrorDb, SQLITE_NOMEM, "out of memory"); - rc = SQLITE_NOMEM; - }else{ - pParse->db = pDb; - if( sqlite3OpenTempDatabase(pParse) ){ - sqlite3Error(pErrorDb, pParse->rc, "%s", pParse->zErrMsg); - rc = SQLITE_ERROR; - } - sqlite3DbFree(pErrorDb, pParse->zErrMsg); - sqlite3ParserReset(pParse); - sqlite3StackFree(pErrorDb, pParse); - } - if( rc ){ - return 0; - } - } - - if( i<0 ){ - sqlite3Error(pErrorDb, SQLITE_ERROR, "unknown database %s", zDb); - return 0; - } - - return pDb->aDb[i].pBt; -} - -/* -** Attempt to set the page size of the destination to match the page size -** of the source. -*/ -static int setDestPgsz(sqlite3_backup *p){ - int rc; - rc = sqlite3BtreeSetPageSize(p->pDest,sqlite3BtreeGetPageSize(p->pSrc),-1,0); - return rc; -} - -/* -** Create an sqlite3_backup process to copy the contents of zSrcDb from -** connection handle pSrcDb to zDestDb in pDestDb. If successful, return -** a pointer to the new sqlite3_backup object. -** -** If an error occurs, NULL is returned and an error code and error message -** stored in database handle pDestDb. -*/ -SQLITE_API sqlite3_backup *sqlite3_backup_init( - sqlite3* pDestDb, /* Database to write to */ - const char *zDestDb, /* Name of database within pDestDb */ - sqlite3* pSrcDb, /* Database connection to read from */ - const char *zSrcDb /* Name of database within pSrcDb */ -){ - sqlite3_backup *p; /* Value to return */ - - /* Lock the source database handle. The destination database - ** handle is not locked in this routine, but it is locked in - ** sqlite3_backup_step(). The user is required to ensure that no - ** other thread accesses the destination handle for the duration - ** of the backup operation. Any attempt to use the destination - ** database connection while a backup is in progress may cause - ** a malfunction or a deadlock. - */ - sqlite3_mutex_enter(pSrcDb->mutex); - sqlite3_mutex_enter(pDestDb->mutex); - - if( pSrcDb==pDestDb ){ - sqlite3Error( - pDestDb, SQLITE_ERROR, "source and destination must be distinct" - ); - p = 0; - }else { - /* Allocate space for a new sqlite3_backup object... - ** EVIDENCE-OF: R-64852-21591 The sqlite3_backup object is created by a - ** call to sqlite3_backup_init() and is destroyed by a call to - ** sqlite3_backup_finish(). */ - p = (sqlite3_backup *)sqlite3MallocZero(sizeof(sqlite3_backup)); - if( !p ){ - sqlite3Error(pDestDb, SQLITE_NOMEM, 0); - } - } - - /* If the allocation succeeded, populate the new object. */ - if( p ){ - p->pSrc = findBtree(pDestDb, pSrcDb, zSrcDb); - p->pDest = findBtree(pDestDb, pDestDb, zDestDb); - p->pDestDb = pDestDb; - p->pSrcDb = pSrcDb; - p->iNext = 1; - p->isAttached = 0; - - if( 0==p->pSrc || 0==p->pDest || setDestPgsz(p)==SQLITE_NOMEM ){ - /* One (or both) of the named databases did not exist or an OOM - ** error was hit. The error has already been written into the - ** pDestDb handle. All that is left to do here is free the - ** sqlite3_backup structure. - */ - sqlite3_free(p); - p = 0; - } - } - if( p ){ - p->pSrc->nBackup++; - } - - sqlite3_mutex_leave(pDestDb->mutex); - sqlite3_mutex_leave(pSrcDb->mutex); - return p; -} - -/* -** Argument rc is an SQLite error code. Return true if this error is -** considered fatal if encountered during a backup operation. All errors -** are considered fatal except for SQLITE_BUSY and SQLITE_LOCKED. -*/ -static int isFatalError(int rc){ - return (rc!=SQLITE_OK && rc!=SQLITE_BUSY && ALWAYS(rc!=SQLITE_LOCKED)); -} - -/* -** Parameter zSrcData points to a buffer containing the data for -** page iSrcPg from the source database. Copy this data into the -** destination database. -*/ -static int backupOnePage( - sqlite3_backup *p, /* Backup handle */ - Pgno iSrcPg, /* Source database page to backup */ - const u8 *zSrcData, /* Source database page data */ - int bUpdate /* True for an update, false otherwise */ -){ - Pager * const pDestPager = sqlite3BtreePager(p->pDest); - const int nSrcPgsz = sqlite3BtreeGetPageSize(p->pSrc); - int nDestPgsz = sqlite3BtreeGetPageSize(p->pDest); - const int nCopy = MIN(nSrcPgsz, nDestPgsz); - const i64 iEnd = (i64)iSrcPg*(i64)nSrcPgsz; -#ifdef SQLITE_HAS_CODEC - /* Use BtreeGetReserveNoMutex() for the source b-tree, as although it is - ** guaranteed that the shared-mutex is held by this thread, handle - ** p->pSrc may not actually be the owner. */ - int nSrcReserve = sqlite3BtreeGetReserveNoMutex(p->pSrc); - int nDestReserve = sqlite3BtreeGetReserve(p->pDest); -#endif - int rc = SQLITE_OK; - i64 iOff; - - assert( sqlite3BtreeGetReserveNoMutex(p->pSrc)>=0 ); - assert( p->bDestLocked ); - assert( !isFatalError(p->rc) ); - assert( iSrcPg!=PENDING_BYTE_PAGE(p->pSrc->pBt) ); - assert( zSrcData ); - - /* Catch the case where the destination is an in-memory database and the - ** page sizes of the source and destination differ. - */ - if( nSrcPgsz!=nDestPgsz && sqlite3PagerIsMemdb(pDestPager) ){ - rc = SQLITE_READONLY; - } - -#ifdef SQLITE_HAS_CODEC - /* Backup is not possible if the page size of the destination is changing - ** and a codec is in use. - */ - if( nSrcPgsz!=nDestPgsz && sqlite3PagerGetCodec(pDestPager)!=0 ){ - rc = SQLITE_READONLY; - } - - /* Backup is not possible if the number of bytes of reserve space differ - ** between source and destination. If there is a difference, try to - ** fix the destination to agree with the source. If that is not possible, - ** then the backup cannot proceed. - */ - if( nSrcReserve!=nDestReserve ){ - u32 newPgsz = nSrcPgsz; - rc = sqlite3PagerSetPagesize(pDestPager, &newPgsz, nSrcReserve); - if( rc==SQLITE_OK && newPgsz!=nSrcPgsz ) rc = SQLITE_READONLY; - } -#endif - - /* This loop runs once for each destination page spanned by the source - ** page. For each iteration, variable iOff is set to the byte offset - ** of the destination page. - */ - for(iOff=iEnd-(i64)nSrcPgsz; rc==SQLITE_OK && iOffpDest->pBt) ) continue; - if( SQLITE_OK==(rc = sqlite3PagerGet(pDestPager, iDest, &pDestPg)) - && SQLITE_OK==(rc = sqlite3PagerWrite(pDestPg)) - ){ - const u8 *zIn = &zSrcData[iOff%nSrcPgsz]; - u8 *zDestData = sqlite3PagerGetData(pDestPg); - u8 *zOut = &zDestData[iOff%nDestPgsz]; - - /* Copy the data from the source page into the destination page. - ** Then clear the Btree layer MemPage.isInit flag. Both this module - ** and the pager code use this trick (clearing the first byte - ** of the page 'extra' space to invalidate the Btree layers - ** cached parse of the page). MemPage.isInit is marked - ** "MUST BE FIRST" for this purpose. - */ - memcpy(zOut, zIn, nCopy); - ((u8 *)sqlite3PagerGetExtra(pDestPg))[0] = 0; - if( iOff==0 && bUpdate==0 ){ - sqlite3Put4byte(&zOut[28], sqlite3BtreeLastPage(p->pSrc)); - } - } - sqlite3PagerUnref(pDestPg); - } - - return rc; -} - -/* -** If pFile is currently larger than iSize bytes, then truncate it to -** exactly iSize bytes. If pFile is not larger than iSize bytes, then -** this function is a no-op. -** -** Return SQLITE_OK if everything is successful, or an SQLite error -** code if an error occurs. -*/ -static int backupTruncateFile(sqlite3_file *pFile, i64 iSize){ - i64 iCurrent; - int rc = sqlite3OsFileSize(pFile, &iCurrent); - if( rc==SQLITE_OK && iCurrent>iSize ){ - rc = sqlite3OsTruncate(pFile, iSize); - } - return rc; -} - -/* -** Register this backup object with the associated source pager for -** callbacks when pages are changed or the cache invalidated. -*/ -static void attachBackupObject(sqlite3_backup *p){ - sqlite3_backup **pp; - assert( sqlite3BtreeHoldsMutex(p->pSrc) ); - pp = sqlite3PagerBackupPtr(sqlite3BtreePager(p->pSrc)); - p->pNext = *pp; - *pp = p; - p->isAttached = 1; -} - -/* -** Copy nPage pages from the source b-tree to the destination. -*/ -SQLITE_API int sqlite3_backup_step(sqlite3_backup *p, int nPage){ - int rc; - int destMode; /* Destination journal mode */ - int pgszSrc = 0; /* Source page size */ - int pgszDest = 0; /* Destination page size */ - - sqlite3_mutex_enter(p->pSrcDb->mutex); - sqlite3BtreeEnter(p->pSrc); - if( p->pDestDb ){ - sqlite3_mutex_enter(p->pDestDb->mutex); - } - - rc = p->rc; - if( !isFatalError(rc) ){ - Pager * const pSrcPager = sqlite3BtreePager(p->pSrc); /* Source pager */ - Pager * const pDestPager = sqlite3BtreePager(p->pDest); /* Dest pager */ - int ii; /* Iterator variable */ - int nSrcPage = -1; /* Size of source db in pages */ - int bCloseTrans = 0; /* True if src db requires unlocking */ - - /* If the source pager is currently in a write-transaction, return - ** SQLITE_BUSY immediately. - */ - if( p->pDestDb && p->pSrc->pBt->inTransaction==TRANS_WRITE ){ - rc = SQLITE_BUSY; - }else{ - rc = SQLITE_OK; - } - - /* Lock the destination database, if it is not locked already. */ - if( SQLITE_OK==rc && p->bDestLocked==0 - && SQLITE_OK==(rc = sqlite3BtreeBeginTrans(p->pDest, 2)) - ){ - p->bDestLocked = 1; - sqlite3BtreeGetMeta(p->pDest, BTREE_SCHEMA_VERSION, &p->iDestSchema); - } - - /* If there is no open read-transaction on the source database, open - ** one now. If a transaction is opened here, then it will be closed - ** before this function exits. - */ - if( rc==SQLITE_OK && 0==sqlite3BtreeIsInReadTrans(p->pSrc) ){ - rc = sqlite3BtreeBeginTrans(p->pSrc, 0); - bCloseTrans = 1; - } - - /* Do not allow backup if the destination database is in WAL mode - ** and the page sizes are different between source and destination */ - pgszSrc = sqlite3BtreeGetPageSize(p->pSrc); - pgszDest = sqlite3BtreeGetPageSize(p->pDest); - destMode = sqlite3PagerGetJournalMode(sqlite3BtreePager(p->pDest)); - if( SQLITE_OK==rc && destMode==PAGER_JOURNALMODE_WAL && pgszSrc!=pgszDest ){ - rc = SQLITE_READONLY; - } - - /* Now that there is a read-lock on the source database, query the - ** source pager for the number of pages in the database. - */ - nSrcPage = (int)sqlite3BtreeLastPage(p->pSrc); - assert( nSrcPage>=0 ); - for(ii=0; (nPage<0 || iiiNext<=(Pgno)nSrcPage && !rc; ii++){ - const Pgno iSrcPg = p->iNext; /* Source page number */ - if( iSrcPg!=PENDING_BYTE_PAGE(p->pSrc->pBt) ){ - DbPage *pSrcPg; /* Source page object */ - rc = sqlite3PagerAcquire(pSrcPager, iSrcPg, &pSrcPg, - PAGER_GET_READONLY); - if( rc==SQLITE_OK ){ - rc = backupOnePage(p, iSrcPg, sqlite3PagerGetData(pSrcPg), 0); - sqlite3PagerUnref(pSrcPg); - } - } - p->iNext++; - } - if( rc==SQLITE_OK ){ - p->nPagecount = nSrcPage; - p->nRemaining = nSrcPage+1-p->iNext; - if( p->iNext>(Pgno)nSrcPage ){ - rc = SQLITE_DONE; - }else if( !p->isAttached ){ - attachBackupObject(p); - } - } - - /* Update the schema version field in the destination database. This - ** is to make sure that the schema-version really does change in - ** the case where the source and destination databases have the - ** same schema version. - */ - if( rc==SQLITE_DONE ){ - if( nSrcPage==0 ){ - rc = sqlite3BtreeNewDb(p->pDest); - nSrcPage = 1; - } - if( rc==SQLITE_OK || rc==SQLITE_DONE ){ - rc = sqlite3BtreeUpdateMeta(p->pDest,1,p->iDestSchema+1); - } - if( rc==SQLITE_OK ){ - if( p->pDestDb ){ - sqlite3ResetAllSchemasOfConnection(p->pDestDb); - } - if( destMode==PAGER_JOURNALMODE_WAL ){ - rc = sqlite3BtreeSetVersion(p->pDest, 2); - } - } - if( rc==SQLITE_OK ){ - int nDestTruncate; - /* Set nDestTruncate to the final number of pages in the destination - ** database. The complication here is that the destination page - ** size may be different to the source page size. - ** - ** If the source page size is smaller than the destination page size, - ** round up. In this case the call to sqlite3OsTruncate() below will - ** fix the size of the file. However it is important to call - ** sqlite3PagerTruncateImage() here so that any pages in the - ** destination file that lie beyond the nDestTruncate page mark are - ** journalled by PagerCommitPhaseOne() before they are destroyed - ** by the file truncation. - */ - assert( pgszSrc==sqlite3BtreeGetPageSize(p->pSrc) ); - assert( pgszDest==sqlite3BtreeGetPageSize(p->pDest) ); - if( pgszSrcpDest->pBt) ){ - nDestTruncate--; - } - }else{ - nDestTruncate = nSrcPage * (pgszSrc/pgszDest); - } - assert( nDestTruncate>0 ); - - if( pgszSrc= iSize || ( - nDestTruncate==(int)(PENDING_BYTE_PAGE(p->pDest->pBt)-1) - && iSize>=PENDING_BYTE && iSize<=PENDING_BYTE+pgszDest - )); - - /* This block ensures that all data required to recreate the original - ** database has been stored in the journal for pDestPager and the - ** journal synced to disk. So at this point we may safely modify - ** the database file in any way, knowing that if a power failure - ** occurs, the original database will be reconstructed from the - ** journal file. */ - sqlite3PagerPagecount(pDestPager, &nDstPage); - for(iPg=nDestTruncate; rc==SQLITE_OK && iPg<=(Pgno)nDstPage; iPg++){ - if( iPg!=PENDING_BYTE_PAGE(p->pDest->pBt) ){ - DbPage *pPg; - rc = sqlite3PagerGet(pDestPager, iPg, &pPg); - if( rc==SQLITE_OK ){ - rc = sqlite3PagerWrite(pPg); - sqlite3PagerUnref(pPg); - } - } - } - if( rc==SQLITE_OK ){ - rc = sqlite3PagerCommitPhaseOne(pDestPager, 0, 1); - } - - /* Write the extra pages and truncate the database file as required */ - iEnd = MIN(PENDING_BYTE + pgszDest, iSize); - for( - iOff=PENDING_BYTE+pgszSrc; - rc==SQLITE_OK && iOffpDest, 0)) - ){ - rc = SQLITE_DONE; - } - } - } - - /* If bCloseTrans is true, then this function opened a read transaction - ** on the source database. Close the read transaction here. There is - ** no need to check the return values of the btree methods here, as - ** "committing" a read-only transaction cannot fail. - */ - if( bCloseTrans ){ - TESTONLY( int rc2 ); - TESTONLY( rc2 = ) sqlite3BtreeCommitPhaseOne(p->pSrc, 0); - TESTONLY( rc2 |= ) sqlite3BtreeCommitPhaseTwo(p->pSrc, 0); - assert( rc2==SQLITE_OK ); - } - - if( rc==SQLITE_IOERR_NOMEM ){ - rc = SQLITE_NOMEM; - } - p->rc = rc; - } - if( p->pDestDb ){ - sqlite3_mutex_leave(p->pDestDb->mutex); - } - sqlite3BtreeLeave(p->pSrc); - sqlite3_mutex_leave(p->pSrcDb->mutex); - return rc; -} - -/* -** Release all resources associated with an sqlite3_backup* handle. -*/ -SQLITE_API int sqlite3_backup_finish(sqlite3_backup *p){ - sqlite3_backup **pp; /* Ptr to head of pagers backup list */ - sqlite3 *pSrcDb; /* Source database connection */ - int rc; /* Value to return */ - - /* Enter the mutexes */ - if( p==0 ) return SQLITE_OK; - pSrcDb = p->pSrcDb; - sqlite3_mutex_enter(pSrcDb->mutex); - sqlite3BtreeEnter(p->pSrc); - if( p->pDestDb ){ - sqlite3_mutex_enter(p->pDestDb->mutex); - } - - /* Detach this backup from the source pager. */ - if( p->pDestDb ){ - p->pSrc->nBackup--; - } - if( p->isAttached ){ - pp = sqlite3PagerBackupPtr(sqlite3BtreePager(p->pSrc)); - while( *pp!=p ){ - pp = &(*pp)->pNext; - } - *pp = p->pNext; - } - - /* If a transaction is still open on the Btree, roll it back. */ - sqlite3BtreeRollback(p->pDest, SQLITE_OK); - - /* Set the error code of the destination database handle. */ - rc = (p->rc==SQLITE_DONE) ? SQLITE_OK : p->rc; - if( p->pDestDb ){ - sqlite3Error(p->pDestDb, rc, 0); - - /* Exit the mutexes and free the backup context structure. */ - sqlite3LeaveMutexAndCloseZombie(p->pDestDb); - } - sqlite3BtreeLeave(p->pSrc); - if( p->pDestDb ){ - /* EVIDENCE-OF: R-64852-21591 The sqlite3_backup object is created by a - ** call to sqlite3_backup_init() and is destroyed by a call to - ** sqlite3_backup_finish(). */ - sqlite3_free(p); - } - sqlite3LeaveMutexAndCloseZombie(pSrcDb); - return rc; -} - -/* -** Return the number of pages still to be backed up as of the most recent -** call to sqlite3_backup_step(). -*/ -SQLITE_API int sqlite3_backup_remaining(sqlite3_backup *p){ - return p->nRemaining; -} - -/* -** Return the total number of pages in the source database as of the most -** recent call to sqlite3_backup_step(). -*/ -SQLITE_API int sqlite3_backup_pagecount(sqlite3_backup *p){ - return p->nPagecount; -} - -/* -** This function is called after the contents of page iPage of the -** source database have been modified. If page iPage has already been -** copied into the destination database, then the data written to the -** destination is now invalidated. The destination copy of iPage needs -** to be updated with the new data before the backup operation is -** complete. -** -** It is assumed that the mutex associated with the BtShared object -** corresponding to the source database is held when this function is -** called. -*/ -SQLITE_PRIVATE void sqlite3BackupUpdate(sqlite3_backup *pBackup, Pgno iPage, const u8 *aData){ - sqlite3_backup *p; /* Iterator variable */ - for(p=pBackup; p; p=p->pNext){ - assert( sqlite3_mutex_held(p->pSrc->pBt->mutex) ); - if( !isFatalError(p->rc) && iPageiNext ){ - /* The backup process p has already copied page iPage. But now it - ** has been modified by a transaction on the source pager. Copy - ** the new data into the backup. - */ - int rc; - assert( p->pDestDb ); - sqlite3_mutex_enter(p->pDestDb->mutex); - rc = backupOnePage(p, iPage, aData, 1); - sqlite3_mutex_leave(p->pDestDb->mutex); - assert( rc!=SQLITE_BUSY && rc!=SQLITE_LOCKED ); - if( rc!=SQLITE_OK ){ - p->rc = rc; - } - } - } -} - -/* -** Restart the backup process. This is called when the pager layer -** detects that the database has been modified by an external database -** connection. In this case there is no way of knowing which of the -** pages that have been copied into the destination database are still -** valid and which are not, so the entire process needs to be restarted. -** -** It is assumed that the mutex associated with the BtShared object -** corresponding to the source database is held when this function is -** called. -*/ -SQLITE_PRIVATE void sqlite3BackupRestart(sqlite3_backup *pBackup){ - sqlite3_backup *p; /* Iterator variable */ - for(p=pBackup; p; p=p->pNext){ - assert( sqlite3_mutex_held(p->pSrc->pBt->mutex) ); - p->iNext = 1; - } -} - -#ifndef SQLITE_OMIT_VACUUM -/* -** Copy the complete content of pBtFrom into pBtTo. A transaction -** must be active for both files. -** -** The size of file pTo may be reduced by this operation. If anything -** goes wrong, the transaction on pTo is rolled back. If successful, the -** transaction is committed before returning. -*/ -SQLITE_PRIVATE int sqlite3BtreeCopyFile(Btree *pTo, Btree *pFrom){ - int rc; - sqlite3_file *pFd; /* File descriptor for database pTo */ - sqlite3_backup b; - sqlite3BtreeEnter(pTo); - sqlite3BtreeEnter(pFrom); - - assert( sqlite3BtreeIsInTrans(pTo) ); - pFd = sqlite3PagerFile(sqlite3BtreePager(pTo)); - if( pFd->pMethods ){ - i64 nByte = sqlite3BtreeGetPageSize(pFrom)*(i64)sqlite3BtreeLastPage(pFrom); - rc = sqlite3OsFileControl(pFd, SQLITE_FCNTL_OVERWRITE, &nByte); - if( rc==SQLITE_NOTFOUND ) rc = SQLITE_OK; - if( rc ) goto copy_finished; - } - - /* Set up an sqlite3_backup object. sqlite3_backup.pDestDb must be set - ** to 0. This is used by the implementations of sqlite3_backup_step() - ** and sqlite3_backup_finish() to detect that they are being called - ** from this function, not directly by the user. - */ - memset(&b, 0, sizeof(b)); - b.pSrcDb = pFrom->db; - b.pSrc = pFrom; - b.pDest = pTo; - b.iNext = 1; - - /* 0x7FFFFFFF is the hard limit for the number of pages in a database - ** file. By passing this as the number of pages to copy to - ** sqlite3_backup_step(), we can guarantee that the copy finishes - ** within a single call (unless an error occurs). The assert() statement - ** checks this assumption - (p->rc) should be set to either SQLITE_DONE - ** or an error code. - */ - sqlite3_backup_step(&b, 0x7FFFFFFF); - assert( b.rc!=SQLITE_OK ); - rc = sqlite3_backup_finish(&b); - if( rc==SQLITE_OK ){ - pTo->pBt->btsFlags &= ~BTS_PAGESIZE_FIXED; - }else{ - sqlite3PagerClearCache(sqlite3BtreePager(b.pDest)); - } - - assert( sqlite3BtreeIsInTrans(pTo)==0 ); -copy_finished: - sqlite3BtreeLeave(pFrom); - sqlite3BtreeLeave(pTo); - return rc; -} -#endif /* SQLITE_OMIT_VACUUM */ - -/************** End of backup.c **********************************************/ -/************** Begin file vdbemem.c *****************************************/ -/* -** 2004 May 26 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** -** This file contains code use to manipulate "Mem" structure. A "Mem" -** stores a single value in the VDBE. Mem is an opaque structure visible -** only within the VDBE. Interface routines refer to a Mem using the -** name sqlite_value -*/ - -#ifdef SQLITE_DEBUG -/* -** Check invariants on a Mem object. -** -** This routine is intended for use inside of assert() statements, like -** this: assert( sqlite3VdbeCheckMemInvariants(pMem) ); -*/ -SQLITE_PRIVATE int sqlite3VdbeCheckMemInvariants(Mem *p){ - /* The MEM_Dyn bit is set if and only if Mem.xDel is a non-NULL destructor - ** function for Mem.z - */ - assert( (p->flags & MEM_Dyn)==0 || p->xDel!=0 ); - assert( (p->flags & MEM_Dyn)!=0 || p->xDel==0 ); - - /* If p holds a string or blob, the Mem.z must point to exactly - ** one of the following: - ** - ** (1) Memory in Mem.zMalloc and managed by the Mem object - ** (2) Memory to be freed using Mem.xDel - ** (3) An ephermal string or blob - ** (4) A static string or blob - */ - if( (p->flags & (MEM_Str|MEM_Blob)) && p->z!=0 ){ - assert( - ((p->z==p->zMalloc)? 1 : 0) + - ((p->flags&MEM_Dyn)!=0 ? 1 : 0) + - ((p->flags&MEM_Ephem)!=0 ? 1 : 0) + - ((p->flags&MEM_Static)!=0 ? 1 : 0) == 1 - ); - } - - return 1; -} -#endif - - -/* -** If pMem is an object with a valid string representation, this routine -** ensures the internal encoding for the string representation is -** 'desiredEnc', one of SQLITE_UTF8, SQLITE_UTF16LE or SQLITE_UTF16BE. -** -** If pMem is not a string object, or the encoding of the string -** representation is already stored using the requested encoding, then this -** routine is a no-op. -** -** SQLITE_OK is returned if the conversion is successful (or not required). -** SQLITE_NOMEM may be returned if a malloc() fails during conversion -** between formats. -*/ -SQLITE_PRIVATE int sqlite3VdbeChangeEncoding(Mem *pMem, int desiredEnc){ -#ifndef SQLITE_OMIT_UTF16 - int rc; -#endif - assert( (pMem->flags&MEM_RowSet)==0 ); - assert( desiredEnc==SQLITE_UTF8 || desiredEnc==SQLITE_UTF16LE - || desiredEnc==SQLITE_UTF16BE ); - if( !(pMem->flags&MEM_Str) || pMem->enc==desiredEnc ){ - return SQLITE_OK; - } - assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) ); -#ifdef SQLITE_OMIT_UTF16 - return SQLITE_ERROR; -#else - - /* MemTranslate() may return SQLITE_OK or SQLITE_NOMEM. If NOMEM is returned, - ** then the encoding of the value may not have changed. - */ - rc = sqlite3VdbeMemTranslate(pMem, (u8)desiredEnc); - assert(rc==SQLITE_OK || rc==SQLITE_NOMEM); - assert(rc==SQLITE_OK || pMem->enc!=desiredEnc); - assert(rc==SQLITE_NOMEM || pMem->enc==desiredEnc); - return rc; -#endif -} - -/* -** Make sure pMem->z points to a writable allocation of at least -** min(n,32) bytes. -** -** If the bPreserve argument is true, then copy of the content of -** pMem->z into the new allocation. pMem must be either a string or -** blob if bPreserve is true. If bPreserve is false, any prior content -** in pMem->z is discarded. -*/ -SQLITE_PRIVATE int sqlite3VdbeMemGrow(Mem *pMem, int n, int bPreserve){ - assert( sqlite3VdbeCheckMemInvariants(pMem) ); - assert( (pMem->flags&MEM_RowSet)==0 ); - - /* If the bPreserve flag is set to true, then the memory cell must already - ** contain a valid string or blob value. */ - assert( bPreserve==0 || pMem->flags&(MEM_Blob|MEM_Str) ); - testcase( bPreserve && pMem->z==0 ); - - if( pMem->zMalloc==0 || sqlite3DbMallocSize(pMem->db, pMem->zMalloc)z==pMem->zMalloc ){ - pMem->z = pMem->zMalloc = sqlite3DbReallocOrFree(pMem->db, pMem->z, n); - bPreserve = 0; - }else{ - sqlite3DbFree(pMem->db, pMem->zMalloc); - pMem->zMalloc = sqlite3DbMallocRaw(pMem->db, n); - } - if( pMem->zMalloc==0 ){ - VdbeMemRelease(pMem); - pMem->z = 0; - pMem->flags = MEM_Null; - return SQLITE_NOMEM; - } - } - - if( pMem->z && bPreserve && pMem->z!=pMem->zMalloc ){ - memcpy(pMem->zMalloc, pMem->z, pMem->n); - } - if( (pMem->flags&MEM_Dyn)!=0 ){ - assert( pMem->xDel!=0 && pMem->xDel!=SQLITE_DYNAMIC ); - pMem->xDel((void *)(pMem->z)); - } - - pMem->z = pMem->zMalloc; - pMem->flags &= ~(MEM_Dyn|MEM_Ephem|MEM_Static); - pMem->xDel = 0; - return SQLITE_OK; -} - -/* -** Make the given Mem object MEM_Dyn. In other words, make it so -** that any TEXT or BLOB content is stored in memory obtained from -** malloc(). In this way, we know that the memory is safe to be -** overwritten or altered. -** -** Return SQLITE_OK on success or SQLITE_NOMEM if malloc fails. -*/ -SQLITE_PRIVATE int sqlite3VdbeMemMakeWriteable(Mem *pMem){ - int f; - assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) ); - assert( (pMem->flags&MEM_RowSet)==0 ); - ExpandBlob(pMem); - f = pMem->flags; - if( (f&(MEM_Str|MEM_Blob)) && pMem->z!=pMem->zMalloc ){ - if( sqlite3VdbeMemGrow(pMem, pMem->n + 2, 1) ){ - return SQLITE_NOMEM; - } - pMem->z[pMem->n] = 0; - pMem->z[pMem->n+1] = 0; - pMem->flags |= MEM_Term; -#ifdef SQLITE_DEBUG - pMem->pScopyFrom = 0; -#endif - } - - return SQLITE_OK; -} - -/* -** If the given Mem* has a zero-filled tail, turn it into an ordinary -** blob stored in dynamically allocated space. -*/ -#ifndef SQLITE_OMIT_INCRBLOB -SQLITE_PRIVATE int sqlite3VdbeMemExpandBlob(Mem *pMem){ - if( pMem->flags & MEM_Zero ){ - int nByte; - assert( pMem->flags&MEM_Blob ); - assert( (pMem->flags&MEM_RowSet)==0 ); - assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) ); - - /* Set nByte to the number of bytes required to store the expanded blob. */ - nByte = pMem->n + pMem->u.nZero; - if( nByte<=0 ){ - nByte = 1; - } - if( sqlite3VdbeMemGrow(pMem, nByte, 1) ){ - return SQLITE_NOMEM; - } - - memset(&pMem->z[pMem->n], 0, pMem->u.nZero); - pMem->n += pMem->u.nZero; - pMem->flags &= ~(MEM_Zero|MEM_Term); - } - return SQLITE_OK; -} -#endif - - -/* -** Make sure the given Mem is \u0000 terminated. -*/ -SQLITE_PRIVATE int sqlite3VdbeMemNulTerminate(Mem *pMem){ - assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) ); - if( (pMem->flags & MEM_Term)!=0 || (pMem->flags & MEM_Str)==0 ){ - return SQLITE_OK; /* Nothing to do */ - } - if( sqlite3VdbeMemGrow(pMem, pMem->n+2, 1) ){ - return SQLITE_NOMEM; - } - pMem->z[pMem->n] = 0; - pMem->z[pMem->n+1] = 0; - pMem->flags |= MEM_Term; - return SQLITE_OK; -} - -/* -** Add MEM_Str to the set of representations for the given Mem. Numbers -** are converted using sqlite3_snprintf(). Converting a BLOB to a string -** is a no-op. -** -** Existing representations MEM_Int and MEM_Real are *not* invalidated. -** -** A MEM_Null value will never be passed to this function. This function is -** used for converting values to text for returning to the user (i.e. via -** sqlite3_value_text()), or for ensuring that values to be used as btree -** keys are strings. In the former case a NULL pointer is returned the -** user and the later is an internal programming error. -*/ -SQLITE_PRIVATE int sqlite3VdbeMemStringify(Mem *pMem, int enc){ - int rc = SQLITE_OK; - int fg = pMem->flags; - const int nByte = 32; - - assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) ); - assert( !(fg&MEM_Zero) ); - assert( !(fg&(MEM_Str|MEM_Blob)) ); - assert( fg&(MEM_Int|MEM_Real) ); - assert( (pMem->flags&MEM_RowSet)==0 ); - assert( EIGHT_BYTE_ALIGNMENT(pMem) ); - - - if( sqlite3VdbeMemGrow(pMem, nByte, 0) ){ - return SQLITE_NOMEM; - } - - /* For a Real or Integer, use sqlite3_mprintf() to produce the UTF-8 - ** string representation of the value. Then, if the required encoding - ** is UTF-16le or UTF-16be do a translation. - ** - ** FIX ME: It would be better if sqlite3_snprintf() could do UTF-16. - */ - if( fg & MEM_Int ){ - sqlite3_snprintf(nByte, pMem->z, "%lld", pMem->u.i); - }else{ - assert( fg & MEM_Real ); - sqlite3_snprintf(nByte, pMem->z, "%!.15g", pMem->r); - } - pMem->n = sqlite3Strlen30(pMem->z); - pMem->enc = SQLITE_UTF8; - pMem->flags |= MEM_Str|MEM_Term; - sqlite3VdbeChangeEncoding(pMem, enc); - return rc; -} - -/* -** Memory cell pMem contains the context of an aggregate function. -** This routine calls the finalize method for that function. The -** result of the aggregate is stored back into pMem. -** -** Return SQLITE_ERROR if the finalizer reports an error. SQLITE_OK -** otherwise. -*/ -SQLITE_PRIVATE int sqlite3VdbeMemFinalize(Mem *pMem, FuncDef *pFunc){ - int rc = SQLITE_OK; - if( ALWAYS(pFunc && pFunc->xFinalize) ){ - sqlite3_context ctx; - assert( (pMem->flags & MEM_Null)!=0 || pFunc==pMem->u.pDef ); - assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) ); - memset(&ctx, 0, sizeof(ctx)); - ctx.s.flags = MEM_Null; - ctx.s.db = pMem->db; - ctx.pMem = pMem; - ctx.pFunc = pFunc; - pFunc->xFinalize(&ctx); /* IMP: R-24505-23230 */ - assert( 0==(pMem->flags&MEM_Dyn) && !pMem->xDel ); - sqlite3DbFree(pMem->db, pMem->zMalloc); - memcpy(pMem, &ctx.s, sizeof(ctx.s)); - rc = ctx.isError; - } - return rc; -} - -/* -** If the memory cell contains a string value that must be freed by -** invoking an external callback, free it now. Calling this function -** does not free any Mem.zMalloc buffer. -*/ -SQLITE_PRIVATE void sqlite3VdbeMemReleaseExternal(Mem *p){ - assert( p->db==0 || sqlite3_mutex_held(p->db->mutex) ); - if( p->flags&MEM_Agg ){ - sqlite3VdbeMemFinalize(p, p->u.pDef); - assert( (p->flags & MEM_Agg)==0 ); - sqlite3VdbeMemRelease(p); - }else if( p->flags&MEM_Dyn ){ - assert( (p->flags&MEM_RowSet)==0 ); - assert( p->xDel!=SQLITE_DYNAMIC && p->xDel!=0 ); - p->xDel((void *)p->z); - p->xDel = 0; - }else if( p->flags&MEM_RowSet ){ - sqlite3RowSetClear(p->u.pRowSet); - }else if( p->flags&MEM_Frame ){ - sqlite3VdbeMemSetNull(p); - } -} - -/* -** Release any memory held by the Mem. This may leave the Mem in an -** inconsistent state, for example with (Mem.z==0) and -** (Mem.flags==MEM_Str). -*/ -SQLITE_PRIVATE void sqlite3VdbeMemRelease(Mem *p){ - assert( sqlite3VdbeCheckMemInvariants(p) ); - VdbeMemRelease(p); - if( p->zMalloc ){ - sqlite3DbFree(p->db, p->zMalloc); - p->zMalloc = 0; - } - p->z = 0; - assert( p->xDel==0 ); /* Zeroed by VdbeMemRelease() above */ -} - -/* -** Convert a 64-bit IEEE double into a 64-bit signed integer. -** If the double is out of range of a 64-bit signed integer then -** return the closest available 64-bit signed integer. -*/ -static i64 doubleToInt64(double r){ -#ifdef SQLITE_OMIT_FLOATING_POINT - /* When floating-point is omitted, double and int64 are the same thing */ - return r; -#else - /* - ** Many compilers we encounter do not define constants for the - ** minimum and maximum 64-bit integers, or they define them - ** inconsistently. And many do not understand the "LL" notation. - ** So we define our own static constants here using nothing - ** larger than a 32-bit integer constant. - */ - static const i64 maxInt = LARGEST_INT64; - static const i64 minInt = SMALLEST_INT64; - - if( r<=(double)minInt ){ - return minInt; - }else if( r>=(double)maxInt ){ - return maxInt; - }else{ - return (i64)r; - } -#endif -} - -/* -** Return some kind of integer value which is the best we can do -** at representing the value that *pMem describes as an integer. -** If pMem is an integer, then the value is exact. If pMem is -** a floating-point then the value returned is the integer part. -** If pMem is a string or blob, then we make an attempt to convert -** it into a integer and return that. If pMem represents an -** an SQL-NULL value, return 0. -** -** If pMem represents a string value, its encoding might be changed. -*/ -SQLITE_PRIVATE i64 sqlite3VdbeIntValue(Mem *pMem){ - int flags; - assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) ); - assert( EIGHT_BYTE_ALIGNMENT(pMem) ); - flags = pMem->flags; - if( flags & MEM_Int ){ - return pMem->u.i; - }else if( flags & MEM_Real ){ - return doubleToInt64(pMem->r); - }else if( flags & (MEM_Str|MEM_Blob) ){ - i64 value = 0; - assert( pMem->z || pMem->n==0 ); - testcase( pMem->z==0 ); - sqlite3Atoi64(pMem->z, &value, pMem->n, pMem->enc); - return value; - }else{ - return 0; - } -} - -/* -** Return the best representation of pMem that we can get into a -** double. If pMem is already a double or an integer, return its -** value. If it is a string or blob, try to convert it to a double. -** If it is a NULL, return 0.0. -*/ -SQLITE_PRIVATE double sqlite3VdbeRealValue(Mem *pMem){ - assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) ); - assert( EIGHT_BYTE_ALIGNMENT(pMem) ); - if( pMem->flags & MEM_Real ){ - return pMem->r; - }else if( pMem->flags & MEM_Int ){ - return (double)pMem->u.i; - }else if( pMem->flags & (MEM_Str|MEM_Blob) ){ - /* (double)0 In case of SQLITE_OMIT_FLOATING_POINT... */ - double val = (double)0; - sqlite3AtoF(pMem->z, &val, pMem->n, pMem->enc); - return val; - }else{ - /* (double)0 In case of SQLITE_OMIT_FLOATING_POINT... */ - return (double)0; - } -} - -/* -** The MEM structure is already a MEM_Real. Try to also make it a -** MEM_Int if we can. -*/ -SQLITE_PRIVATE void sqlite3VdbeIntegerAffinity(Mem *pMem){ - assert( pMem->flags & MEM_Real ); - assert( (pMem->flags & MEM_RowSet)==0 ); - assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) ); - assert( EIGHT_BYTE_ALIGNMENT(pMem) ); - - pMem->u.i = doubleToInt64(pMem->r); - - /* Only mark the value as an integer if - ** - ** (1) the round-trip conversion real->int->real is a no-op, and - ** (2) The integer is neither the largest nor the smallest - ** possible integer (ticket #3922) - ** - ** The second and third terms in the following conditional enforces - ** the second condition under the assumption that addition overflow causes - ** values to wrap around. - */ - if( pMem->r==(double)pMem->u.i - && pMem->u.i>SMALLEST_INT64 - && pMem->u.iflags |= MEM_Int; - } -} - -/* -** Convert pMem to type integer. Invalidate any prior representations. -*/ -SQLITE_PRIVATE int sqlite3VdbeMemIntegerify(Mem *pMem){ - assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) ); - assert( (pMem->flags & MEM_RowSet)==0 ); - assert( EIGHT_BYTE_ALIGNMENT(pMem) ); - - pMem->u.i = sqlite3VdbeIntValue(pMem); - MemSetTypeFlag(pMem, MEM_Int); - return SQLITE_OK; -} - -/* -** Convert pMem so that it is of type MEM_Real. -** Invalidate any prior representations. -*/ -SQLITE_PRIVATE int sqlite3VdbeMemRealify(Mem *pMem){ - assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) ); - assert( EIGHT_BYTE_ALIGNMENT(pMem) ); - - pMem->r = sqlite3VdbeRealValue(pMem); - MemSetTypeFlag(pMem, MEM_Real); - return SQLITE_OK; -} - -/* -** Convert pMem so that it has types MEM_Real or MEM_Int or both. -** Invalidate any prior representations. -** -** Every effort is made to force the conversion, even if the input -** is a string that does not look completely like a number. Convert -** as much of the string as we can and ignore the rest. -*/ -SQLITE_PRIVATE int sqlite3VdbeMemNumerify(Mem *pMem){ - if( (pMem->flags & (MEM_Int|MEM_Real|MEM_Null))==0 ){ - assert( (pMem->flags & (MEM_Blob|MEM_Str))!=0 ); - assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) ); - if( 0==sqlite3Atoi64(pMem->z, &pMem->u.i, pMem->n, pMem->enc) ){ - MemSetTypeFlag(pMem, MEM_Int); - }else{ - pMem->r = sqlite3VdbeRealValue(pMem); - MemSetTypeFlag(pMem, MEM_Real); - sqlite3VdbeIntegerAffinity(pMem); - } - } - assert( (pMem->flags & (MEM_Int|MEM_Real|MEM_Null))!=0 ); - pMem->flags &= ~(MEM_Str|MEM_Blob); - return SQLITE_OK; -} - -/* -** Delete any previous value and set the value stored in *pMem to NULL. -*/ -SQLITE_PRIVATE void sqlite3VdbeMemSetNull(Mem *pMem){ - if( pMem->flags & MEM_Frame ){ - VdbeFrame *pFrame = pMem->u.pFrame; - pFrame->pParent = pFrame->v->pDelFrame; - pFrame->v->pDelFrame = pFrame; - } - if( pMem->flags & MEM_RowSet ){ - sqlite3RowSetClear(pMem->u.pRowSet); - } - MemSetTypeFlag(pMem, MEM_Null); -} -SQLITE_PRIVATE void sqlite3ValueSetNull(sqlite3_value *p){ - sqlite3VdbeMemSetNull((Mem*)p); -} - -/* -** Delete any previous value and set the value to be a BLOB of length -** n containing all zeros. -*/ -SQLITE_PRIVATE void sqlite3VdbeMemSetZeroBlob(Mem *pMem, int n){ - sqlite3VdbeMemRelease(pMem); - pMem->flags = MEM_Blob|MEM_Zero; - pMem->n = 0; - if( n<0 ) n = 0; - pMem->u.nZero = n; - pMem->enc = SQLITE_UTF8; - -#ifdef SQLITE_OMIT_INCRBLOB - sqlite3VdbeMemGrow(pMem, n, 0); - if( pMem->z ){ - pMem->n = n; - memset(pMem->z, 0, n); - } -#endif -} - -/* -** Delete any previous value and set the value stored in *pMem to val, -** manifest type INTEGER. -*/ -SQLITE_PRIVATE void sqlite3VdbeMemSetInt64(Mem *pMem, i64 val){ - sqlite3VdbeMemRelease(pMem); - pMem->u.i = val; - pMem->flags = MEM_Int; -} - -#ifndef SQLITE_OMIT_FLOATING_POINT -/* -** Delete any previous value and set the value stored in *pMem to val, -** manifest type REAL. -*/ -SQLITE_PRIVATE void sqlite3VdbeMemSetDouble(Mem *pMem, double val){ - if( sqlite3IsNaN(val) ){ - sqlite3VdbeMemSetNull(pMem); - }else{ - sqlite3VdbeMemRelease(pMem); - pMem->r = val; - pMem->flags = MEM_Real; - } -} -#endif - -/* -** Delete any previous value and set the value of pMem to be an -** empty boolean index. -*/ -SQLITE_PRIVATE void sqlite3VdbeMemSetRowSet(Mem *pMem){ - sqlite3 *db = pMem->db; - assert( db!=0 ); - assert( (pMem->flags & MEM_RowSet)==0 ); - sqlite3VdbeMemRelease(pMem); - pMem->zMalloc = sqlite3DbMallocRaw(db, 64); - if( db->mallocFailed ){ - pMem->flags = MEM_Null; - }else{ - assert( pMem->zMalloc ); - pMem->u.pRowSet = sqlite3RowSetInit(db, pMem->zMalloc, - sqlite3DbMallocSize(db, pMem->zMalloc)); - assert( pMem->u.pRowSet!=0 ); - pMem->flags = MEM_RowSet; - } -} - -/* -** Return true if the Mem object contains a TEXT or BLOB that is -** too large - whose size exceeds SQLITE_MAX_LENGTH. -*/ -SQLITE_PRIVATE int sqlite3VdbeMemTooBig(Mem *p){ - assert( p->db!=0 ); - if( p->flags & (MEM_Str|MEM_Blob) ){ - int n = p->n; - if( p->flags & MEM_Zero ){ - n += p->u.nZero; - } - return n>p->db->aLimit[SQLITE_LIMIT_LENGTH]; - } - return 0; -} - -#ifdef SQLITE_DEBUG -/* -** This routine prepares a memory cell for modication by breaking -** its link to a shallow copy and by marking any current shallow -** copies of this cell as invalid. -** -** This is used for testing and debugging only - to make sure shallow -** copies are not misused. -*/ -SQLITE_PRIVATE void sqlite3VdbeMemAboutToChange(Vdbe *pVdbe, Mem *pMem){ - int i; - Mem *pX; - for(i=1, pX=&pVdbe->aMem[1]; i<=pVdbe->nMem; i++, pX++){ - if( pX->pScopyFrom==pMem ){ - pX->flags |= MEM_Undefined; - pX->pScopyFrom = 0; - } - } - pMem->pScopyFrom = 0; -} -#endif /* SQLITE_DEBUG */ - -/* -** Size of struct Mem not including the Mem.zMalloc member. -*/ -#define MEMCELLSIZE offsetof(Mem,zMalloc) - -/* -** Make an shallow copy of pFrom into pTo. Prior contents of -** pTo are freed. The pFrom->z field is not duplicated. If -** pFrom->z is used, then pTo->z points to the same thing as pFrom->z -** and flags gets srcType (either MEM_Ephem or MEM_Static). -*/ -SQLITE_PRIVATE void sqlite3VdbeMemShallowCopy(Mem *pTo, const Mem *pFrom, int srcType){ - assert( (pFrom->flags & MEM_RowSet)==0 ); - VdbeMemRelease(pTo); - memcpy(pTo, pFrom, MEMCELLSIZE); - pTo->xDel = 0; - if( (pFrom->flags&MEM_Static)==0 ){ - pTo->flags &= ~(MEM_Dyn|MEM_Static|MEM_Ephem); - assert( srcType==MEM_Ephem || srcType==MEM_Static ); - pTo->flags |= srcType; - } -} - -/* -** Make a full copy of pFrom into pTo. Prior contents of pTo are -** freed before the copy is made. -*/ -SQLITE_PRIVATE int sqlite3VdbeMemCopy(Mem *pTo, const Mem *pFrom){ - int rc = SQLITE_OK; - - assert( (pFrom->flags & MEM_RowSet)==0 ); - VdbeMemRelease(pTo); - memcpy(pTo, pFrom, MEMCELLSIZE); - pTo->flags &= ~MEM_Dyn; - pTo->xDel = 0; - - if( pTo->flags&(MEM_Str|MEM_Blob) ){ - if( 0==(pFrom->flags&MEM_Static) ){ - pTo->flags |= MEM_Ephem; - rc = sqlite3VdbeMemMakeWriteable(pTo); - } - } - - return rc; -} - -/* -** Transfer the contents of pFrom to pTo. Any existing value in pTo is -** freed. If pFrom contains ephemeral data, a copy is made. -** -** pFrom contains an SQL NULL when this routine returns. -*/ -SQLITE_PRIVATE void sqlite3VdbeMemMove(Mem *pTo, Mem *pFrom){ - assert( pFrom->db==0 || sqlite3_mutex_held(pFrom->db->mutex) ); - assert( pTo->db==0 || sqlite3_mutex_held(pTo->db->mutex) ); - assert( pFrom->db==0 || pTo->db==0 || pFrom->db==pTo->db ); - - sqlite3VdbeMemRelease(pTo); - memcpy(pTo, pFrom, sizeof(Mem)); - pFrom->flags = MEM_Null; - pFrom->xDel = 0; - pFrom->zMalloc = 0; -} - -/* -** Change the value of a Mem to be a string or a BLOB. -** -** The memory management strategy depends on the value of the xDel -** parameter. If the value passed is SQLITE_TRANSIENT, then the -** string is copied into a (possibly existing) buffer managed by the -** Mem structure. Otherwise, any existing buffer is freed and the -** pointer copied. -** -** If the string is too large (if it exceeds the SQLITE_LIMIT_LENGTH -** size limit) then no memory allocation occurs. If the string can be -** stored without allocating memory, then it is. If a memory allocation -** is required to store the string, then value of pMem is unchanged. In -** either case, SQLITE_TOOBIG is returned. -*/ -SQLITE_PRIVATE int sqlite3VdbeMemSetStr( - Mem *pMem, /* Memory cell to set to string value */ - const char *z, /* String pointer */ - int n, /* Bytes in string, or negative */ - u8 enc, /* Encoding of z. 0 for BLOBs */ - void (*xDel)(void*) /* Destructor function */ -){ - int nByte = n; /* New value for pMem->n */ - int iLimit; /* Maximum allowed string or blob size */ - u16 flags = 0; /* New value for pMem->flags */ - - assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) ); - assert( (pMem->flags & MEM_RowSet)==0 ); - - /* If z is a NULL pointer, set pMem to contain an SQL NULL. */ - if( !z ){ - sqlite3VdbeMemSetNull(pMem); - return SQLITE_OK; - } - - if( pMem->db ){ - iLimit = pMem->db->aLimit[SQLITE_LIMIT_LENGTH]; - }else{ - iLimit = SQLITE_MAX_LENGTH; - } - flags = (enc==0?MEM_Blob:MEM_Str); - if( nByte<0 ){ - assert( enc!=0 ); - if( enc==SQLITE_UTF8 ){ - for(nByte=0; nByte<=iLimit && z[nByte]; nByte++){} - }else{ - for(nByte=0; nByte<=iLimit && (z[nByte] | z[nByte+1]); nByte+=2){} - } - flags |= MEM_Term; - } - - /* The following block sets the new values of Mem.z and Mem.xDel. It - ** also sets a flag in local variable "flags" to indicate the memory - ** management (one of MEM_Dyn or MEM_Static). - */ - if( xDel==SQLITE_TRANSIENT ){ - int nAlloc = nByte; - if( flags&MEM_Term ){ - nAlloc += (enc==SQLITE_UTF8?1:2); - } - if( nByte>iLimit ){ - return SQLITE_TOOBIG; - } - if( sqlite3VdbeMemGrow(pMem, nAlloc, 0) ){ - return SQLITE_NOMEM; - } - memcpy(pMem->z, z, nAlloc); - }else if( xDel==SQLITE_DYNAMIC ){ - sqlite3VdbeMemRelease(pMem); - pMem->zMalloc = pMem->z = (char *)z; - pMem->xDel = 0; - }else{ - sqlite3VdbeMemRelease(pMem); - pMem->z = (char *)z; - pMem->xDel = xDel; - flags |= ((xDel==SQLITE_STATIC)?MEM_Static:MEM_Dyn); - } - - pMem->n = nByte; - pMem->flags = flags; - pMem->enc = (enc==0 ? SQLITE_UTF8 : enc); - -#ifndef SQLITE_OMIT_UTF16 - if( pMem->enc!=SQLITE_UTF8 && sqlite3VdbeMemHandleBom(pMem) ){ - return SQLITE_NOMEM; - } -#endif - - if( nByte>iLimit ){ - return SQLITE_TOOBIG; - } - - return SQLITE_OK; -} - -/* -** Move data out of a btree key or data field and into a Mem structure. -** The data or key is taken from the entry that pCur is currently pointing -** to. offset and amt determine what portion of the data or key to retrieve. -** key is true to get the key or false to get data. The result is written -** into the pMem element. -** -** The pMem structure is assumed to be uninitialized. Any prior content -** is overwritten without being freed. -** -** If this routine fails for any reason (malloc returns NULL or unable -** to read from the disk) then the pMem is left in an inconsistent state. -*/ -SQLITE_PRIVATE int sqlite3VdbeMemFromBtree( - BtCursor *pCur, /* Cursor pointing at record to retrieve. */ - u32 offset, /* Offset from the start of data to return bytes from. */ - u32 amt, /* Number of bytes to return. */ - int key, /* If true, retrieve from the btree key, not data. */ - Mem *pMem /* OUT: Return data in this Mem structure. */ -){ - char *zData; /* Data from the btree layer */ - u32 available = 0; /* Number of bytes available on the local btree page */ - int rc = SQLITE_OK; /* Return code */ - - assert( sqlite3BtreeCursorIsValid(pCur) ); - - /* Note: the calls to BtreeKeyFetch() and DataFetch() below assert() - ** that both the BtShared and database handle mutexes are held. */ - assert( (pMem->flags & MEM_RowSet)==0 ); - if( key ){ - zData = (char *)sqlite3BtreeKeyFetch(pCur, &available); - }else{ - zData = (char *)sqlite3BtreeDataFetch(pCur, &available); - } - assert( zData!=0 ); - - if( offset+amt<=available ){ - sqlite3VdbeMemRelease(pMem); - pMem->z = &zData[offset]; - pMem->flags = MEM_Blob|MEM_Ephem; - pMem->n = (int)amt; - }else if( SQLITE_OK==(rc = sqlite3VdbeMemGrow(pMem, amt+2, 0)) ){ - if( key ){ - rc = sqlite3BtreeKey(pCur, offset, amt, pMem->z); - }else{ - rc = sqlite3BtreeData(pCur, offset, amt, pMem->z); - } - if( rc==SQLITE_OK ){ - pMem->z[amt] = 0; - pMem->z[amt+1] = 0; - pMem->flags = MEM_Blob|MEM_Term; - pMem->n = (int)amt; - }else{ - sqlite3VdbeMemRelease(pMem); - } - } - - return rc; -} - -/* This function is only available internally, it is not part of the -** external API. It works in a similar way to sqlite3_value_text(), -** except the data returned is in the encoding specified by the second -** parameter, which must be one of SQLITE_UTF16BE, SQLITE_UTF16LE or -** SQLITE_UTF8. -** -** (2006-02-16:) The enc value can be or-ed with SQLITE_UTF16_ALIGNED. -** If that is the case, then the result must be aligned on an even byte -** boundary. -*/ -SQLITE_PRIVATE const void *sqlite3ValueText(sqlite3_value* pVal, u8 enc){ - if( !pVal ) return 0; - - assert( pVal->db==0 || sqlite3_mutex_held(pVal->db->mutex) ); - assert( (enc&3)==(enc&~SQLITE_UTF16_ALIGNED) ); - assert( (pVal->flags & MEM_RowSet)==0 ); - - if( pVal->flags&MEM_Null ){ - return 0; - } - assert( (MEM_Blob>>3) == MEM_Str ); - pVal->flags |= (pVal->flags & MEM_Blob)>>3; - ExpandBlob(pVal); - if( pVal->flags&MEM_Str ){ - sqlite3VdbeChangeEncoding(pVal, enc & ~SQLITE_UTF16_ALIGNED); - if( (enc & SQLITE_UTF16_ALIGNED)!=0 && 1==(1&SQLITE_PTR_TO_INT(pVal->z)) ){ - assert( (pVal->flags & (MEM_Ephem|MEM_Static))!=0 ); - if( sqlite3VdbeMemMakeWriteable(pVal)!=SQLITE_OK ){ - return 0; - } - } - sqlite3VdbeMemNulTerminate(pVal); /* IMP: R-31275-44060 */ - }else{ - assert( (pVal->flags&MEM_Blob)==0 ); - sqlite3VdbeMemStringify(pVal, enc); - assert( 0==(1&SQLITE_PTR_TO_INT(pVal->z)) ); - } - assert(pVal->enc==(enc & ~SQLITE_UTF16_ALIGNED) || pVal->db==0 - || pVal->db->mallocFailed ); - if( pVal->enc==(enc & ~SQLITE_UTF16_ALIGNED) ){ - return pVal->z; - }else{ - return 0; - } -} - -/* -** Create a new sqlite3_value object. -*/ -SQLITE_PRIVATE sqlite3_value *sqlite3ValueNew(sqlite3 *db){ - Mem *p = sqlite3DbMallocZero(db, sizeof(*p)); - if( p ){ - p->flags = MEM_Null; - p->db = db; - } - return p; -} - -/* -** Context object passed by sqlite3Stat4ProbeSetValue() through to -** valueNew(). See comments above valueNew() for details. -*/ -struct ValueNewStat4Ctx { - Parse *pParse; - Index *pIdx; - UnpackedRecord **ppRec; - int iVal; -}; - -/* -** Allocate and return a pointer to a new sqlite3_value object. If -** the second argument to this function is NULL, the object is allocated -** by calling sqlite3ValueNew(). -** -** Otherwise, if the second argument is non-zero, then this function is -** being called indirectly by sqlite3Stat4ProbeSetValue(). If it has not -** already been allocated, allocate the UnpackedRecord structure that -** that function will return to its caller here. Then return a pointer -** an sqlite3_value within the UnpackedRecord.a[] array. -*/ -static sqlite3_value *valueNew(sqlite3 *db, struct ValueNewStat4Ctx *p){ -#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 - if( p ){ - UnpackedRecord *pRec = p->ppRec[0]; - - if( pRec==0 ){ - Index *pIdx = p->pIdx; /* Index being probed */ - int nByte; /* Bytes of space to allocate */ - int i; /* Counter variable */ - int nCol = pIdx->nColumn; /* Number of index columns including rowid */ - - nByte = sizeof(Mem) * nCol + ROUND8(sizeof(UnpackedRecord)); - pRec = (UnpackedRecord*)sqlite3DbMallocZero(db, nByte); - if( pRec ){ - pRec->pKeyInfo = sqlite3KeyInfoOfIndex(p->pParse, pIdx); - if( pRec->pKeyInfo ){ - assert( pRec->pKeyInfo->nField+pRec->pKeyInfo->nXField==nCol ); - assert( pRec->pKeyInfo->enc==ENC(db) ); - pRec->aMem = (Mem *)((u8*)pRec + ROUND8(sizeof(UnpackedRecord))); - for(i=0; iaMem[i].flags = MEM_Null; - pRec->aMem[i].db = db; - } - }else{ - sqlite3DbFree(db, pRec); - pRec = 0; - } - } - if( pRec==0 ) return 0; - p->ppRec[0] = pRec; - } - - pRec->nField = p->iVal+1; - return &pRec->aMem[p->iVal]; - } -#else - UNUSED_PARAMETER(p); -#endif /* defined(SQLITE_ENABLE_STAT3_OR_STAT4) */ - return sqlite3ValueNew(db); -} - -/* -** Extract a value from the supplied expression in the manner described -** above sqlite3ValueFromExpr(). Allocate the sqlite3_value object -** using valueNew(). -** -** If pCtx is NULL and an error occurs after the sqlite3_value object -** has been allocated, it is freed before returning. Or, if pCtx is not -** NULL, it is assumed that the caller will free any allocated object -** in all cases. -*/ -static int valueFromExpr( - sqlite3 *db, /* The database connection */ - Expr *pExpr, /* The expression to evaluate */ - u8 enc, /* Encoding to use */ - u8 affinity, /* Affinity to use */ - sqlite3_value **ppVal, /* Write the new value here */ - struct ValueNewStat4Ctx *pCtx /* Second argument for valueNew() */ -){ - int op; - char *zVal = 0; - sqlite3_value *pVal = 0; - int negInt = 1; - const char *zNeg = ""; - int rc = SQLITE_OK; - - if( !pExpr ){ - *ppVal = 0; - return SQLITE_OK; - } - op = pExpr->op; - if( NEVER(op==TK_REGISTER) ) op = pExpr->op2; - - /* Handle negative integers in a single step. This is needed in the - ** case when the value is -9223372036854775808. - */ - if( op==TK_UMINUS - && (pExpr->pLeft->op==TK_INTEGER || pExpr->pLeft->op==TK_FLOAT) ){ - pExpr = pExpr->pLeft; - op = pExpr->op; - negInt = -1; - zNeg = "-"; - } - - if( op==TK_STRING || op==TK_FLOAT || op==TK_INTEGER ){ - pVal = valueNew(db, pCtx); - if( pVal==0 ) goto no_mem; - if( ExprHasProperty(pExpr, EP_IntValue) ){ - sqlite3VdbeMemSetInt64(pVal, (i64)pExpr->u.iValue*negInt); - }else{ - zVal = sqlite3MPrintf(db, "%s%s", zNeg, pExpr->u.zToken); - if( zVal==0 ) goto no_mem; - sqlite3ValueSetStr(pVal, -1, zVal, SQLITE_UTF8, SQLITE_DYNAMIC); - } - if( (op==TK_INTEGER || op==TK_FLOAT ) && affinity==SQLITE_AFF_NONE ){ - sqlite3ValueApplyAffinity(pVal, SQLITE_AFF_NUMERIC, SQLITE_UTF8); - }else{ - sqlite3ValueApplyAffinity(pVal, affinity, SQLITE_UTF8); - } - if( pVal->flags & (MEM_Int|MEM_Real) ) pVal->flags &= ~MEM_Str; - if( enc!=SQLITE_UTF8 ){ - rc = sqlite3VdbeChangeEncoding(pVal, enc); - } - }else if( op==TK_UMINUS ) { - /* This branch happens for multiple negative signs. Ex: -(-5) */ - if( SQLITE_OK==sqlite3ValueFromExpr(db,pExpr->pLeft,enc,affinity,&pVal) - && pVal!=0 - ){ - sqlite3VdbeMemNumerify(pVal); - if( pVal->u.i==SMALLEST_INT64 ){ - pVal->flags &= ~MEM_Int; - pVal->flags |= MEM_Real; - pVal->r = (double)SMALLEST_INT64; - }else{ - pVal->u.i = -pVal->u.i; - } - pVal->r = -pVal->r; - sqlite3ValueApplyAffinity(pVal, affinity, enc); - } - }else if( op==TK_NULL ){ - pVal = valueNew(db, pCtx); - if( pVal==0 ) goto no_mem; - } -#ifndef SQLITE_OMIT_BLOB_LITERAL - else if( op==TK_BLOB ){ - int nVal; - assert( pExpr->u.zToken[0]=='x' || pExpr->u.zToken[0]=='X' ); - assert( pExpr->u.zToken[1]=='\'' ); - pVal = valueNew(db, pCtx); - if( !pVal ) goto no_mem; - zVal = &pExpr->u.zToken[2]; - nVal = sqlite3Strlen30(zVal)-1; - assert( zVal[nVal]=='\'' ); - sqlite3VdbeMemSetStr(pVal, sqlite3HexToBlob(db, zVal, nVal), nVal/2, - 0, SQLITE_DYNAMIC); - } -#endif - - *ppVal = pVal; - return rc; - -no_mem: - db->mallocFailed = 1; - sqlite3DbFree(db, zVal); - assert( *ppVal==0 ); -#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 - if( pCtx==0 ) sqlite3ValueFree(pVal); -#else - assert( pCtx==0 ); sqlite3ValueFree(pVal); -#endif - return SQLITE_NOMEM; -} - -/* -** Create a new sqlite3_value object, containing the value of pExpr. -** -** This only works for very simple expressions that consist of one constant -** token (i.e. "5", "5.1", "'a string'"). If the expression can -** be converted directly into a value, then the value is allocated and -** a pointer written to *ppVal. The caller is responsible for deallocating -** the value by passing it to sqlite3ValueFree() later on. If the expression -** cannot be converted to a value, then *ppVal is set to NULL. -*/ -SQLITE_PRIVATE int sqlite3ValueFromExpr( - sqlite3 *db, /* The database connection */ - Expr *pExpr, /* The expression to evaluate */ - u8 enc, /* Encoding to use */ - u8 affinity, /* Affinity to use */ - sqlite3_value **ppVal /* Write the new value here */ -){ - return valueFromExpr(db, pExpr, enc, affinity, ppVal, 0); -} - -#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 -/* -** The implementation of the sqlite_record() function. This function accepts -** a single argument of any type. The return value is a formatted database -** record (a blob) containing the argument value. -** -** This is used to convert the value stored in the 'sample' column of the -** sqlite_stat3 table to the record format SQLite uses internally. -*/ -static void recordFunc( - sqlite3_context *context, - int argc, - sqlite3_value **argv -){ - const int file_format = 1; - int iSerial; /* Serial type */ - int nSerial; /* Bytes of space for iSerial as varint */ - int nVal; /* Bytes of space required for argv[0] */ - int nRet; - sqlite3 *db; - u8 *aRet; - - UNUSED_PARAMETER( argc ); - iSerial = sqlite3VdbeSerialType(argv[0], file_format); - nSerial = sqlite3VarintLen(iSerial); - nVal = sqlite3VdbeSerialTypeLen(iSerial); - db = sqlite3_context_db_handle(context); - - nRet = 1 + nSerial + nVal; - aRet = sqlite3DbMallocRaw(db, nRet); - if( aRet==0 ){ - sqlite3_result_error_nomem(context); - }else{ - aRet[0] = nSerial+1; - sqlite3PutVarint(&aRet[1], iSerial); - sqlite3VdbeSerialPut(&aRet[1+nSerial], argv[0], iSerial); - sqlite3_result_blob(context, aRet, nRet, SQLITE_TRANSIENT); - sqlite3DbFree(db, aRet); - } -} - -/* -** Register built-in functions used to help read ANALYZE data. -*/ -SQLITE_PRIVATE void sqlite3AnalyzeFunctions(void){ - static SQLITE_WSD FuncDef aAnalyzeTableFuncs[] = { - FUNCTION(sqlite_record, 1, 0, 0, recordFunc), - }; - int i; - FuncDefHash *pHash = &GLOBAL(FuncDefHash, sqlite3GlobalFunctions); - FuncDef *aFunc = (FuncDef*)&GLOBAL(FuncDef, aAnalyzeTableFuncs); - for(i=0; idb; - - - struct ValueNewStat4Ctx alloc; - alloc.pParse = pParse; - alloc.pIdx = pIdx; - alloc.ppRec = ppRec; - alloc.iVal = iVal; - - /* Skip over any TK_COLLATE nodes */ - pExpr = sqlite3ExprSkipCollate(pExpr); - - if( !pExpr ){ - pVal = valueNew(db, &alloc); - if( pVal ){ - sqlite3VdbeMemSetNull((Mem*)pVal); - } - }else if( pExpr->op==TK_VARIABLE - || NEVER(pExpr->op==TK_REGISTER && pExpr->op2==TK_VARIABLE) - ){ - Vdbe *v; - int iBindVar = pExpr->iColumn; - sqlite3VdbeSetVarmask(pParse->pVdbe, iBindVar); - if( (v = pParse->pReprepare)!=0 ){ - pVal = valueNew(db, &alloc); - if( pVal ){ - rc = sqlite3VdbeMemCopy((Mem*)pVal, &v->aVar[iBindVar-1]); - if( rc==SQLITE_OK ){ - sqlite3ValueApplyAffinity(pVal, affinity, ENC(db)); - } - pVal->db = pParse->db; - } - } - }else{ - rc = valueFromExpr(db, pExpr, ENC(db), affinity, &pVal, &alloc); - } - *pbOk = (pVal!=0); - - assert( pVal==0 || pVal->db==db ); - return rc; -} - -/* -** Unless it is NULL, the argument must be an UnpackedRecord object returned -** by an earlier call to sqlite3Stat4ProbeSetValue(). This call deletes -** the object. -*/ -SQLITE_PRIVATE void sqlite3Stat4ProbeFree(UnpackedRecord *pRec){ - if( pRec ){ - int i; - int nCol = pRec->pKeyInfo->nField+pRec->pKeyInfo->nXField; - Mem *aMem = pRec->aMem; - sqlite3 *db = aMem[0].db; - for(i=0; ipKeyInfo); - sqlite3DbFree(db, pRec); - } -} -#endif /* ifdef SQLITE_ENABLE_STAT4 */ - -/* -** Change the string value of an sqlite3_value object -*/ -SQLITE_PRIVATE void sqlite3ValueSetStr( - sqlite3_value *v, /* Value to be set */ - int n, /* Length of string z */ - const void *z, /* Text of the new string */ - u8 enc, /* Encoding to use */ - void (*xDel)(void*) /* Destructor for the string */ -){ - if( v ) sqlite3VdbeMemSetStr((Mem *)v, z, n, enc, xDel); -} - -/* -** Free an sqlite3_value object -*/ -SQLITE_PRIVATE void sqlite3ValueFree(sqlite3_value *v){ - if( !v ) return; - sqlite3VdbeMemRelease((Mem *)v); - sqlite3DbFree(((Mem*)v)->db, v); -} - -/* -** Return the number of bytes in the sqlite3_value object assuming -** that it uses the encoding "enc" -*/ -SQLITE_PRIVATE int sqlite3ValueBytes(sqlite3_value *pVal, u8 enc){ - Mem *p = (Mem*)pVal; - if( (p->flags & MEM_Blob)!=0 || sqlite3ValueText(pVal, enc) ){ - if( p->flags & MEM_Zero ){ - return p->n + p->u.nZero; - }else{ - return p->n; - } - } - return 0; -} - -/************** End of vdbemem.c *********************************************/ -/************** Begin file vdbeaux.c *****************************************/ -/* -** 2003 September 6 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** This file contains code used for creating, destroying, and populating -** a VDBE (or an "sqlite3_stmt" as it is known to the outside world.) Prior -** to version 2.8.7, all this code was combined into the vdbe.c source file. -** But that file was getting too big so this subroutines were split out. -*/ - -/* -** Create a new virtual database engine. -*/ -SQLITE_PRIVATE Vdbe *sqlite3VdbeCreate(Parse *pParse){ - sqlite3 *db = pParse->db; - Vdbe *p; - p = sqlite3DbMallocZero(db, sizeof(Vdbe) ); - if( p==0 ) return 0; - p->db = db; - if( db->pVdbe ){ - db->pVdbe->pPrev = p; - } - p->pNext = db->pVdbe; - p->pPrev = 0; - db->pVdbe = p; - p->magic = VDBE_MAGIC_INIT; - p->pParse = pParse; - assert( pParse->aLabel==0 ); - assert( pParse->nLabel==0 ); - assert( pParse->nOpAlloc==0 ); - return p; -} - -/* -** Remember the SQL string for a prepared statement. -*/ -SQLITE_PRIVATE void sqlite3VdbeSetSql(Vdbe *p, const char *z, int n, int isPrepareV2){ - assert( isPrepareV2==1 || isPrepareV2==0 ); - if( p==0 ) return; -#if defined(SQLITE_OMIT_TRACE) && !defined(SQLITE_ENABLE_SQLLOG) - if( !isPrepareV2 ) return; -#endif - assert( p->zSql==0 ); - p->zSql = sqlite3DbStrNDup(p->db, z, n); - p->isPrepareV2 = (u8)isPrepareV2; -} - -/* -** Return the SQL associated with a prepared statement -*/ -SQLITE_API const char *sqlite3_sql(sqlite3_stmt *pStmt){ - Vdbe *p = (Vdbe *)pStmt; - return (p && p->isPrepareV2) ? p->zSql : 0; -} - -/* -** Swap all content between two VDBE structures. -*/ -SQLITE_PRIVATE void sqlite3VdbeSwap(Vdbe *pA, Vdbe *pB){ - Vdbe tmp, *pTmp; - char *zTmp; - tmp = *pA; - *pA = *pB; - *pB = tmp; - pTmp = pA->pNext; - pA->pNext = pB->pNext; - pB->pNext = pTmp; - pTmp = pA->pPrev; - pA->pPrev = pB->pPrev; - pB->pPrev = pTmp; - zTmp = pA->zSql; - pA->zSql = pB->zSql; - pB->zSql = zTmp; - pB->isPrepareV2 = pA->isPrepareV2; -} - -/* -** Resize the Vdbe.aOp array so that it is at least one op larger than -** it was. -** -** If an out-of-memory error occurs while resizing the array, return -** SQLITE_NOMEM. In this case Vdbe.aOp and Vdbe.nOpAlloc remain -** unchanged (this is so that any opcodes already allocated can be -** correctly deallocated along with the rest of the Vdbe). -*/ -static int growOpArray(Vdbe *v){ - VdbeOp *pNew; - Parse *p = v->pParse; - int nNew = (p->nOpAlloc ? p->nOpAlloc*2 : (int)(1024/sizeof(Op))); - pNew = sqlite3DbRealloc(p->db, v->aOp, nNew*sizeof(Op)); - if( pNew ){ - p->nOpAlloc = sqlite3DbMallocSize(p->db, pNew)/sizeof(Op); - v->aOp = pNew; - } - return (pNew ? SQLITE_OK : SQLITE_NOMEM); -} - -#ifdef SQLITE_DEBUG -/* This routine is just a convenient place to set a breakpoint that will -** fire after each opcode is inserted and displayed using -** "PRAGMA vdbe_addoptrace=on". -*/ -static void test_addop_breakpoint(void){ - static int n = 0; - n++; -} -#endif - -/* -** Add a new instruction to the list of instructions current in the -** VDBE. Return the address of the new instruction. -** -** Parameters: -** -** p Pointer to the VDBE -** -** op The opcode for this instruction -** -** p1, p2, p3 Operands -** -** Use the sqlite3VdbeResolveLabel() function to fix an address and -** the sqlite3VdbeChangeP4() function to change the value of the P4 -** operand. -*/ -SQLITE_PRIVATE int sqlite3VdbeAddOp3(Vdbe *p, int op, int p1, int p2, int p3){ - int i; - VdbeOp *pOp; - - i = p->nOp; - assert( p->magic==VDBE_MAGIC_INIT ); - assert( op>0 && op<0xff ); - if( p->pParse->nOpAlloc<=i ){ - if( growOpArray(p) ){ - return 1; - } - } - p->nOp++; - pOp = &p->aOp[i]; - pOp->opcode = (u8)op; - pOp->p5 = 0; - pOp->p1 = p1; - pOp->p2 = p2; - pOp->p3 = p3; - pOp->p4.p = 0; - pOp->p4type = P4_NOTUSED; -#ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS - pOp->zComment = 0; -#endif -#ifdef SQLITE_DEBUG - if( p->db->flags & SQLITE_VdbeAddopTrace ){ - int jj, kk; - Parse *pParse = p->pParse; - for(jj=kk=0; jjaColCache + jj; - if( x->iLevel>pParse->iCacheLevel || x->iReg==0 ) continue; - printf(" r[%d]={%d:%d}", x->iReg, x->iTable, x->iColumn); - kk++; - } - if( kk ) printf("\n"); - sqlite3VdbePrintOp(0, i, &p->aOp[i]); - test_addop_breakpoint(); - } -#endif -#ifdef VDBE_PROFILE - pOp->cycles = 0; - pOp->cnt = 0; -#endif -#ifdef SQLITE_VDBE_COVERAGE - pOp->iSrcLine = 0; -#endif - return i; -} -SQLITE_PRIVATE int sqlite3VdbeAddOp0(Vdbe *p, int op){ - return sqlite3VdbeAddOp3(p, op, 0, 0, 0); -} -SQLITE_PRIVATE int sqlite3VdbeAddOp1(Vdbe *p, int op, int p1){ - return sqlite3VdbeAddOp3(p, op, p1, 0, 0); -} -SQLITE_PRIVATE int sqlite3VdbeAddOp2(Vdbe *p, int op, int p1, int p2){ - return sqlite3VdbeAddOp3(p, op, p1, p2, 0); -} - - -/* -** Add an opcode that includes the p4 value as a pointer. -*/ -SQLITE_PRIVATE int sqlite3VdbeAddOp4( - Vdbe *p, /* Add the opcode to this VM */ - int op, /* The new opcode */ - int p1, /* The P1 operand */ - int p2, /* The P2 operand */ - int p3, /* The P3 operand */ - const char *zP4, /* The P4 operand */ - int p4type /* P4 operand type */ -){ - int addr = sqlite3VdbeAddOp3(p, op, p1, p2, p3); - sqlite3VdbeChangeP4(p, addr, zP4, p4type); - return addr; -} - -/* -** Add an OP_ParseSchema opcode. This routine is broken out from -** sqlite3VdbeAddOp4() since it needs to also needs to mark all btrees -** as having been used. -** -** The zWhere string must have been obtained from sqlite3_malloc(). -** This routine will take ownership of the allocated memory. -*/ -SQLITE_PRIVATE void sqlite3VdbeAddParseSchemaOp(Vdbe *p, int iDb, char *zWhere){ - int j; - int addr = sqlite3VdbeAddOp3(p, OP_ParseSchema, iDb, 0, 0); - sqlite3VdbeChangeP4(p, addr, zWhere, P4_DYNAMIC); - for(j=0; jdb->nDb; j++) sqlite3VdbeUsesBtree(p, j); -} - -/* -** Add an opcode that includes the p4 value as an integer. -*/ -SQLITE_PRIVATE int sqlite3VdbeAddOp4Int( - Vdbe *p, /* Add the opcode to this VM */ - int op, /* The new opcode */ - int p1, /* The P1 operand */ - int p2, /* The P2 operand */ - int p3, /* The P3 operand */ - int p4 /* The P4 operand as an integer */ -){ - int addr = sqlite3VdbeAddOp3(p, op, p1, p2, p3); - sqlite3VdbeChangeP4(p, addr, SQLITE_INT_TO_PTR(p4), P4_INT32); - return addr; -} - -/* -** Create a new symbolic label for an instruction that has yet to be -** coded. The symbolic label is really just a negative number. The -** label can be used as the P2 value of an operation. Later, when -** the label is resolved to a specific address, the VDBE will scan -** through its operation list and change all values of P2 which match -** the label into the resolved address. -** -** The VDBE knows that a P2 value is a label because labels are -** always negative and P2 values are suppose to be non-negative. -** Hence, a negative P2 value is a label that has yet to be resolved. -** -** Zero is returned if a malloc() fails. -*/ -SQLITE_PRIVATE int sqlite3VdbeMakeLabel(Vdbe *v){ - Parse *p = v->pParse; - int i = p->nLabel++; - assert( v->magic==VDBE_MAGIC_INIT ); - if( (i & (i-1))==0 ){ - p->aLabel = sqlite3DbReallocOrFree(p->db, p->aLabel, - (i*2+1)*sizeof(p->aLabel[0])); - } - if( p->aLabel ){ - p->aLabel[i] = -1; - } - return -1-i; -} - -/* -** Resolve label "x" to be the address of the next instruction to -** be inserted. The parameter "x" must have been obtained from -** a prior call to sqlite3VdbeMakeLabel(). -*/ -SQLITE_PRIVATE void sqlite3VdbeResolveLabel(Vdbe *v, int x){ - Parse *p = v->pParse; - int j = -1-x; - assert( v->magic==VDBE_MAGIC_INIT ); - assert( jnLabel ); - if( ALWAYS(j>=0) && p->aLabel ){ - p->aLabel[j] = v->nOp; - } - p->iFixedOp = v->nOp - 1; -} - -/* -** Mark the VDBE as one that can only be run one time. -*/ -SQLITE_PRIVATE void sqlite3VdbeRunOnlyOnce(Vdbe *p){ - p->runOnlyOnce = 1; -} - -#ifdef SQLITE_DEBUG /* sqlite3AssertMayAbort() logic */ - -/* -** The following type and function are used to iterate through all opcodes -** in a Vdbe main program and each of the sub-programs (triggers) it may -** invoke directly or indirectly. It should be used as follows: -** -** Op *pOp; -** VdbeOpIter sIter; -** -** memset(&sIter, 0, sizeof(sIter)); -** sIter.v = v; // v is of type Vdbe* -** while( (pOp = opIterNext(&sIter)) ){ -** // Do something with pOp -** } -** sqlite3DbFree(v->db, sIter.apSub); -** -*/ -typedef struct VdbeOpIter VdbeOpIter; -struct VdbeOpIter { - Vdbe *v; /* Vdbe to iterate through the opcodes of */ - SubProgram **apSub; /* Array of subprograms */ - int nSub; /* Number of entries in apSub */ - int iAddr; /* Address of next instruction to return */ - int iSub; /* 0 = main program, 1 = first sub-program etc. */ -}; -static Op *opIterNext(VdbeOpIter *p){ - Vdbe *v = p->v; - Op *pRet = 0; - Op *aOp; - int nOp; - - if( p->iSub<=p->nSub ){ - - if( p->iSub==0 ){ - aOp = v->aOp; - nOp = v->nOp; - }else{ - aOp = p->apSub[p->iSub-1]->aOp; - nOp = p->apSub[p->iSub-1]->nOp; - } - assert( p->iAddriAddr]; - p->iAddr++; - if( p->iAddr==nOp ){ - p->iSub++; - p->iAddr = 0; - } - - if( pRet->p4type==P4_SUBPROGRAM ){ - int nByte = (p->nSub+1)*sizeof(SubProgram*); - int j; - for(j=0; jnSub; j++){ - if( p->apSub[j]==pRet->p4.pProgram ) break; - } - if( j==p->nSub ){ - p->apSub = sqlite3DbReallocOrFree(v->db, p->apSub, nByte); - if( !p->apSub ){ - pRet = 0; - }else{ - p->apSub[p->nSub++] = pRet->p4.pProgram; - } - } - } - } - - return pRet; -} - -/* -** Check if the program stored in the VM associated with pParse may -** throw an ABORT exception (causing the statement, but not entire transaction -** to be rolled back). This condition is true if the main program or any -** sub-programs contains any of the following: -** -** * OP_Halt with P1=SQLITE_CONSTRAINT and P2=OE_Abort. -** * OP_HaltIfNull with P1=SQLITE_CONSTRAINT and P2=OE_Abort. -** * OP_Destroy -** * OP_VUpdate -** * OP_VRename -** * OP_FkCounter with P2==0 (immediate foreign key constraint) -** -** Then check that the value of Parse.mayAbort is true if an -** ABORT may be thrown, or false otherwise. Return true if it does -** match, or false otherwise. This function is intended to be used as -** part of an assert statement in the compiler. Similar to: -** -** assert( sqlite3VdbeAssertMayAbort(pParse->pVdbe, pParse->mayAbort) ); -*/ -SQLITE_PRIVATE int sqlite3VdbeAssertMayAbort(Vdbe *v, int mayAbort){ - int hasAbort = 0; - Op *pOp; - VdbeOpIter sIter; - memset(&sIter, 0, sizeof(sIter)); - sIter.v = v; - - while( (pOp = opIterNext(&sIter))!=0 ){ - int opcode = pOp->opcode; - if( opcode==OP_Destroy || opcode==OP_VUpdate || opcode==OP_VRename -#ifndef SQLITE_OMIT_FOREIGN_KEY - || (opcode==OP_FkCounter && pOp->p1==0 && pOp->p2==1) -#endif - || ((opcode==OP_Halt || opcode==OP_HaltIfNull) - && ((pOp->p1&0xff)==SQLITE_CONSTRAINT && pOp->p2==OE_Abort)) - ){ - hasAbort = 1; - break; - } - } - sqlite3DbFree(v->db, sIter.apSub); - - /* Return true if hasAbort==mayAbort. Or if a malloc failure occurred. - ** If malloc failed, then the while() loop above may not have iterated - ** through all opcodes and hasAbort may be set incorrectly. Return - ** true for this case to prevent the assert() in the callers frame - ** from failing. */ - return ( v->db->mallocFailed || hasAbort==mayAbort ); -} -#endif /* SQLITE_DEBUG - the sqlite3AssertMayAbort() function */ - -/* -** Loop through the program looking for P2 values that are negative -** on jump instructions. Each such value is a label. Resolve the -** label by setting the P2 value to its correct non-zero value. -** -** This routine is called once after all opcodes have been inserted. -** -** Variable *pMaxFuncArgs is set to the maximum value of any P2 argument -** to an OP_Function, OP_AggStep or OP_VFilter opcode. This is used by -** sqlite3VdbeMakeReady() to size the Vdbe.apArg[] array. -** -** The Op.opflags field is set on all opcodes. -*/ -static void resolveP2Values(Vdbe *p, int *pMaxFuncArgs){ - int i; - int nMaxArgs = *pMaxFuncArgs; - Op *pOp; - Parse *pParse = p->pParse; - int *aLabel = pParse->aLabel; - p->readOnly = 1; - p->bIsReader = 0; - for(pOp=p->aOp, i=p->nOp-1; i>=0; i--, pOp++){ - u8 opcode = pOp->opcode; - - /* NOTE: Be sure to update mkopcodeh.awk when adding or removing - ** cases from this switch! */ - switch( opcode ){ - case OP_Function: - case OP_AggStep: { - if( pOp->p5>nMaxArgs ) nMaxArgs = pOp->p5; - break; - } - case OP_Transaction: { - if( pOp->p2!=0 ) p->readOnly = 0; - /* fall thru */ - } - case OP_AutoCommit: - case OP_Savepoint: { - p->bIsReader = 1; - break; - } -#ifndef SQLITE_OMIT_WAL - case OP_Checkpoint: -#endif - case OP_Vacuum: - case OP_JournalMode: { - p->readOnly = 0; - p->bIsReader = 1; - break; - } -#ifndef SQLITE_OMIT_VIRTUALTABLE - case OP_VUpdate: { - if( pOp->p2>nMaxArgs ) nMaxArgs = pOp->p2; - break; - } - case OP_VFilter: { - int n; - assert( p->nOp - i >= 3 ); - assert( pOp[-1].opcode==OP_Integer ); - n = pOp[-1].p1; - if( n>nMaxArgs ) nMaxArgs = n; - break; - } -#endif - case OP_Next: - case OP_NextIfOpen: - case OP_SorterNext: { - pOp->p4.xAdvance = sqlite3BtreeNext; - pOp->p4type = P4_ADVANCE; - break; - } - case OP_Prev: - case OP_PrevIfOpen: { - pOp->p4.xAdvance = sqlite3BtreePrevious; - pOp->p4type = P4_ADVANCE; - break; - } - } - - pOp->opflags = sqlite3OpcodeProperty[opcode]; - if( (pOp->opflags & OPFLG_JUMP)!=0 && pOp->p2<0 ){ - assert( -1-pOp->p2nLabel ); - pOp->p2 = aLabel[-1-pOp->p2]; - } - } - sqlite3DbFree(p->db, pParse->aLabel); - pParse->aLabel = 0; - pParse->nLabel = 0; - *pMaxFuncArgs = nMaxArgs; - assert( p->bIsReader!=0 || p->btreeMask==0 ); -} - -/* -** Return the address of the next instruction to be inserted. -*/ -SQLITE_PRIVATE int sqlite3VdbeCurrentAddr(Vdbe *p){ - assert( p->magic==VDBE_MAGIC_INIT ); - return p->nOp; -} - -/* -** This function returns a pointer to the array of opcodes associated with -** the Vdbe passed as the first argument. It is the callers responsibility -** to arrange for the returned array to be eventually freed using the -** vdbeFreeOpArray() function. -** -** Before returning, *pnOp is set to the number of entries in the returned -** array. Also, *pnMaxArg is set to the larger of its current value and -** the number of entries in the Vdbe.apArg[] array required to execute the -** returned program. -*/ -SQLITE_PRIVATE VdbeOp *sqlite3VdbeTakeOpArray(Vdbe *p, int *pnOp, int *pnMaxArg){ - VdbeOp *aOp = p->aOp; - assert( aOp && !p->db->mallocFailed ); - - /* Check that sqlite3VdbeUsesBtree() was not called on this VM */ - assert( p->btreeMask==0 ); - - resolveP2Values(p, pnMaxArg); - *pnOp = p->nOp; - p->aOp = 0; - return aOp; -} - -/* -** Add a whole list of operations to the operation stack. Return the -** address of the first operation added. -*/ -SQLITE_PRIVATE int sqlite3VdbeAddOpList(Vdbe *p, int nOp, VdbeOpList const *aOp, int iLineno){ - int addr; - assert( p->magic==VDBE_MAGIC_INIT ); - if( p->nOp + nOp > p->pParse->nOpAlloc && growOpArray(p) ){ - return 0; - } - addr = p->nOp; - if( ALWAYS(nOp>0) ){ - int i; - VdbeOpList const *pIn = aOp; - for(i=0; ip2; - VdbeOp *pOut = &p->aOp[i+addr]; - pOut->opcode = pIn->opcode; - pOut->p1 = pIn->p1; - if( p2<0 ){ - assert( sqlite3OpcodeProperty[pOut->opcode] & OPFLG_JUMP ); - pOut->p2 = addr + ADDR(p2); - }else{ - pOut->p2 = p2; - } - pOut->p3 = pIn->p3; - pOut->p4type = P4_NOTUSED; - pOut->p4.p = 0; - pOut->p5 = 0; -#ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS - pOut->zComment = 0; -#endif -#ifdef SQLITE_VDBE_COVERAGE - pOut->iSrcLine = iLineno+i; -#else - (void)iLineno; -#endif -#ifdef SQLITE_DEBUG - if( p->db->flags & SQLITE_VdbeAddopTrace ){ - sqlite3VdbePrintOp(0, i+addr, &p->aOp[i+addr]); - } -#endif - } - p->nOp += nOp; - } - return addr; -} - -/* -** Change the value of the P1 operand for a specific instruction. -** This routine is useful when a large program is loaded from a -** static array using sqlite3VdbeAddOpList but we want to make a -** few minor changes to the program. -*/ -SQLITE_PRIVATE void sqlite3VdbeChangeP1(Vdbe *p, u32 addr, int val){ - assert( p!=0 ); - if( ((u32)p->nOp)>addr ){ - p->aOp[addr].p1 = val; - } -} - -/* -** Change the value of the P2 operand for a specific instruction. -** This routine is useful for setting a jump destination. -*/ -SQLITE_PRIVATE void sqlite3VdbeChangeP2(Vdbe *p, u32 addr, int val){ - assert( p!=0 ); - if( ((u32)p->nOp)>addr ){ - p->aOp[addr].p2 = val; - } -} - -/* -** Change the value of the P3 operand for a specific instruction. -*/ -SQLITE_PRIVATE void sqlite3VdbeChangeP3(Vdbe *p, u32 addr, int val){ - assert( p!=0 ); - if( ((u32)p->nOp)>addr ){ - p->aOp[addr].p3 = val; - } -} - -/* -** Change the value of the P5 operand for the most recently -** added operation. -*/ -SQLITE_PRIVATE void sqlite3VdbeChangeP5(Vdbe *p, u8 val){ - assert( p!=0 ); - if( p->aOp ){ - assert( p->nOp>0 ); - p->aOp[p->nOp-1].p5 = val; - } -} - -/* -** Change the P2 operand of instruction addr so that it points to -** the address of the next instruction to be coded. -*/ -SQLITE_PRIVATE void sqlite3VdbeJumpHere(Vdbe *p, int addr){ - sqlite3VdbeChangeP2(p, addr, p->nOp); - p->pParse->iFixedOp = p->nOp - 1; -} - - -/* -** If the input FuncDef structure is ephemeral, then free it. If -** the FuncDef is not ephermal, then do nothing. -*/ -static void freeEphemeralFunction(sqlite3 *db, FuncDef *pDef){ - if( ALWAYS(pDef) && (pDef->funcFlags & SQLITE_FUNC_EPHEM)!=0 ){ - sqlite3DbFree(db, pDef); - } -} - -static void vdbeFreeOpArray(sqlite3 *, Op *, int); - -/* -** Delete a P4 value if necessary. -*/ -static void freeP4(sqlite3 *db, int p4type, void *p4){ - if( p4 ){ - assert( db ); - switch( p4type ){ - case P4_REAL: - case P4_INT64: - case P4_DYNAMIC: - case P4_INTARRAY: { - sqlite3DbFree(db, p4); - break; - } - case P4_KEYINFO: { - if( db->pnBytesFreed==0 ) sqlite3KeyInfoUnref((KeyInfo*)p4); - break; - } - case P4_MPRINTF: { - if( db->pnBytesFreed==0 ) sqlite3_free(p4); - break; - } - case P4_FUNCDEF: { - freeEphemeralFunction(db, (FuncDef*)p4); - break; - } - case P4_MEM: { - if( db->pnBytesFreed==0 ){ - sqlite3ValueFree((sqlite3_value*)p4); - }else{ - Mem *p = (Mem*)p4; - sqlite3DbFree(db, p->zMalloc); - sqlite3DbFree(db, p); - } - break; - } - case P4_VTAB : { - if( db->pnBytesFreed==0 ) sqlite3VtabUnlock((VTable *)p4); - break; - } - } - } -} - -/* -** Free the space allocated for aOp and any p4 values allocated for the -** opcodes contained within. If aOp is not NULL it is assumed to contain -** nOp entries. -*/ -static void vdbeFreeOpArray(sqlite3 *db, Op *aOp, int nOp){ - if( aOp ){ - Op *pOp; - for(pOp=aOp; pOp<&aOp[nOp]; pOp++){ - freeP4(db, pOp->p4type, pOp->p4.p); -#ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS - sqlite3DbFree(db, pOp->zComment); -#endif - } - } - sqlite3DbFree(db, aOp); -} - -/* -** Link the SubProgram object passed as the second argument into the linked -** list at Vdbe.pSubProgram. This list is used to delete all sub-program -** objects when the VM is no longer required. -*/ -SQLITE_PRIVATE void sqlite3VdbeLinkSubProgram(Vdbe *pVdbe, SubProgram *p){ - p->pNext = pVdbe->pProgram; - pVdbe->pProgram = p; -} - -/* -** Change the opcode at addr into OP_Noop -*/ -SQLITE_PRIVATE void sqlite3VdbeChangeToNoop(Vdbe *p, int addr){ - if( p->aOp ){ - VdbeOp *pOp = &p->aOp[addr]; - sqlite3 *db = p->db; - freeP4(db, pOp->p4type, pOp->p4.p); - memset(pOp, 0, sizeof(pOp[0])); - pOp->opcode = OP_Noop; - if( addr==p->nOp-1 ) p->nOp--; - } -} - -/* -** Remove the last opcode inserted -*/ -SQLITE_PRIVATE int sqlite3VdbeDeletePriorOpcode(Vdbe *p, u8 op){ - if( (p->nOp-1)>(p->pParse->iFixedOp) && p->aOp[p->nOp-1].opcode==op ){ - sqlite3VdbeChangeToNoop(p, p->nOp-1); - return 1; - }else{ - return 0; - } -} - -/* -** Change the value of the P4 operand for a specific instruction. -** This routine is useful when a large program is loaded from a -** static array using sqlite3VdbeAddOpList but we want to make a -** few minor changes to the program. -** -** If n>=0 then the P4 operand is dynamic, meaning that a copy of -** the string is made into memory obtained from sqlite3_malloc(). -** A value of n==0 means copy bytes of zP4 up to and including the -** first null byte. If n>0 then copy n+1 bytes of zP4. -** -** Other values of n (P4_STATIC, P4_COLLSEQ etc.) indicate that zP4 points -** to a string or structure that is guaranteed to exist for the lifetime of -** the Vdbe. In these cases we can just copy the pointer. -** -** If addr<0 then change P4 on the most recently inserted instruction. -*/ -SQLITE_PRIVATE void sqlite3VdbeChangeP4(Vdbe *p, int addr, const char *zP4, int n){ - Op *pOp; - sqlite3 *db; - assert( p!=0 ); - db = p->db; - assert( p->magic==VDBE_MAGIC_INIT ); - if( p->aOp==0 || db->mallocFailed ){ - if( n!=P4_VTAB ){ - freeP4(db, n, (void*)*(char**)&zP4); - } - return; - } - assert( p->nOp>0 ); - assert( addrnOp ); - if( addr<0 ){ - addr = p->nOp - 1; - } - pOp = &p->aOp[addr]; - assert( pOp->p4type==P4_NOTUSED - || pOp->p4type==P4_INT32 - || pOp->p4type==P4_KEYINFO ); - freeP4(db, pOp->p4type, pOp->p4.p); - pOp->p4.p = 0; - if( n==P4_INT32 ){ - /* Note: this cast is safe, because the origin data point was an int - ** that was cast to a (const char *). */ - pOp->p4.i = SQLITE_PTR_TO_INT(zP4); - pOp->p4type = P4_INT32; - }else if( zP4==0 ){ - pOp->p4.p = 0; - pOp->p4type = P4_NOTUSED; - }else if( n==P4_KEYINFO ){ - pOp->p4.p = (void*)zP4; - pOp->p4type = P4_KEYINFO; - }else if( n==P4_VTAB ){ - pOp->p4.p = (void*)zP4; - pOp->p4type = P4_VTAB; - sqlite3VtabLock((VTable *)zP4); - assert( ((VTable *)zP4)->db==p->db ); - }else if( n<0 ){ - pOp->p4.p = (void*)zP4; - pOp->p4type = (signed char)n; - }else{ - if( n==0 ) n = sqlite3Strlen30(zP4); - pOp->p4.z = sqlite3DbStrNDup(p->db, zP4, n); - pOp->p4type = P4_DYNAMIC; - } -} - -/* -** Set the P4 on the most recently added opcode to the KeyInfo for the -** index given. -*/ -SQLITE_PRIVATE void sqlite3VdbeSetP4KeyInfo(Parse *pParse, Index *pIdx){ - Vdbe *v = pParse->pVdbe; - assert( v!=0 ); - assert( pIdx!=0 ); - sqlite3VdbeChangeP4(v, -1, (char*)sqlite3KeyInfoOfIndex(pParse, pIdx), - P4_KEYINFO); -} - -#ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS -/* -** Change the comment on the most recently coded instruction. Or -** insert a No-op and add the comment to that new instruction. This -** makes the code easier to read during debugging. None of this happens -** in a production build. -*/ -static void vdbeVComment(Vdbe *p, const char *zFormat, va_list ap){ - assert( p->nOp>0 || p->aOp==0 ); - assert( p->aOp==0 || p->aOp[p->nOp-1].zComment==0 || p->db->mallocFailed ); - if( p->nOp ){ - assert( p->aOp ); - sqlite3DbFree(p->db, p->aOp[p->nOp-1].zComment); - p->aOp[p->nOp-1].zComment = sqlite3VMPrintf(p->db, zFormat, ap); - } -} -SQLITE_PRIVATE void sqlite3VdbeComment(Vdbe *p, const char *zFormat, ...){ - va_list ap; - if( p ){ - va_start(ap, zFormat); - vdbeVComment(p, zFormat, ap); - va_end(ap); - } -} -SQLITE_PRIVATE void sqlite3VdbeNoopComment(Vdbe *p, const char *zFormat, ...){ - va_list ap; - if( p ){ - sqlite3VdbeAddOp0(p, OP_Noop); - va_start(ap, zFormat); - vdbeVComment(p, zFormat, ap); - va_end(ap); - } -} -#endif /* NDEBUG */ - -#ifdef SQLITE_VDBE_COVERAGE -/* -** Set the value if the iSrcLine field for the previously coded instruction. -*/ -SQLITE_PRIVATE void sqlite3VdbeSetLineNumber(Vdbe *v, int iLine){ - sqlite3VdbeGetOp(v,-1)->iSrcLine = iLine; -} -#endif /* SQLITE_VDBE_COVERAGE */ - -/* -** Return the opcode for a given address. If the address is -1, then -** return the most recently inserted opcode. -** -** If a memory allocation error has occurred prior to the calling of this -** routine, then a pointer to a dummy VdbeOp will be returned. That opcode -** is readable but not writable, though it is cast to a writable value. -** The return of a dummy opcode allows the call to continue functioning -** after a OOM fault without having to check to see if the return from -** this routine is a valid pointer. But because the dummy.opcode is 0, -** dummy will never be written to. This is verified by code inspection and -** by running with Valgrind. -*/ -SQLITE_PRIVATE VdbeOp *sqlite3VdbeGetOp(Vdbe *p, int addr){ - /* C89 specifies that the constant "dummy" will be initialized to all - ** zeros, which is correct. MSVC generates a warning, nevertheless. */ - static VdbeOp dummy; /* Ignore the MSVC warning about no initializer */ - assert( p->magic==VDBE_MAGIC_INIT ); - if( addr<0 ){ - addr = p->nOp - 1; - } - assert( (addr>=0 && addrnOp) || p->db->mallocFailed ); - if( p->db->mallocFailed ){ - return (VdbeOp*)&dummy; - }else{ - return &p->aOp[addr]; - } -} - -#if defined(SQLITE_ENABLE_EXPLAIN_COMMENTS) -/* -** Return an integer value for one of the parameters to the opcode pOp -** determined by character c. -*/ -static int translateP(char c, const Op *pOp){ - if( c=='1' ) return pOp->p1; - if( c=='2' ) return pOp->p2; - if( c=='3' ) return pOp->p3; - if( c=='4' ) return pOp->p4.i; - return pOp->p5; -} - -/* -** Compute a string for the "comment" field of a VDBE opcode listing. -** -** The Synopsis: field in comments in the vdbe.c source file gets converted -** to an extra string that is appended to the sqlite3OpcodeName(). In the -** absence of other comments, this synopsis becomes the comment on the opcode. -** Some translation occurs: -** -** "PX" -> "r[X]" -** "PX@PY" -> "r[X..X+Y-1]" or "r[x]" if y is 0 or 1 -** "PX@PY+1" -> "r[X..X+Y]" or "r[x]" if y is 0 -** "PY..PY" -> "r[X..Y]" or "r[x]" if y<=x -*/ -static int displayComment( - const Op *pOp, /* The opcode to be commented */ - const char *zP4, /* Previously obtained value for P4 */ - char *zTemp, /* Write result here */ - int nTemp /* Space available in zTemp[] */ -){ - const char *zOpName; - const char *zSynopsis; - int nOpName; - int ii, jj; - zOpName = sqlite3OpcodeName(pOp->opcode); - nOpName = sqlite3Strlen30(zOpName); - if( zOpName[nOpName+1] ){ - int seenCom = 0; - char c; - zSynopsis = zOpName += nOpName + 1; - for(ii=jj=0; jjzComment); - seenCom = 1; - }else{ - int v1 = translateP(c, pOp); - int v2; - sqlite3_snprintf(nTemp-jj, zTemp+jj, "%d", v1); - if( strncmp(zSynopsis+ii+1, "@P", 2)==0 ){ - ii += 3; - jj += sqlite3Strlen30(zTemp+jj); - v2 = translateP(zSynopsis[ii], pOp); - if( strncmp(zSynopsis+ii+1,"+1",2)==0 ){ - ii += 2; - v2++; - } - if( v2>1 ){ - sqlite3_snprintf(nTemp-jj, zTemp+jj, "..%d", v1+v2-1); - } - }else if( strncmp(zSynopsis+ii+1, "..P3", 4)==0 && pOp->p3==0 ){ - ii += 4; - } - } - jj += sqlite3Strlen30(zTemp+jj); - }else{ - zTemp[jj++] = c; - } - } - if( !seenCom && jjzComment ){ - sqlite3_snprintf(nTemp-jj, zTemp+jj, "; %s", pOp->zComment); - jj += sqlite3Strlen30(zTemp+jj); - } - if( jjzComment ){ - sqlite3_snprintf(nTemp, zTemp, "%s", pOp->zComment); - jj = sqlite3Strlen30(zTemp); - }else{ - zTemp[0] = 0; - jj = 0; - } - return jj; -} -#endif /* SQLITE_DEBUG */ - - -#if !defined(SQLITE_OMIT_EXPLAIN) || !defined(NDEBUG) \ - || defined(VDBE_PROFILE) || defined(SQLITE_DEBUG) -/* -** Compute a string that describes the P4 parameter for an opcode. -** Use zTemp for any required temporary buffer space. -*/ -static char *displayP4(Op *pOp, char *zTemp, int nTemp){ - char *zP4 = zTemp; - assert( nTemp>=20 ); - switch( pOp->p4type ){ - case P4_KEYINFO: { - int i, j; - KeyInfo *pKeyInfo = pOp->p4.pKeyInfo; - assert( pKeyInfo->aSortOrder!=0 ); - sqlite3_snprintf(nTemp, zTemp, "k(%d", pKeyInfo->nField); - i = sqlite3Strlen30(zTemp); - for(j=0; jnField; j++){ - CollSeq *pColl = pKeyInfo->aColl[j]; - const char *zColl = pColl ? pColl->zName : "nil"; - int n = sqlite3Strlen30(zColl); - if( n==6 && memcmp(zColl,"BINARY",6)==0 ){ - zColl = "B"; - n = 1; - } - if( i+n>nTemp-6 ){ - memcpy(&zTemp[i],",...",4); - break; - } - zTemp[i++] = ','; - if( pKeyInfo->aSortOrder[j] ){ - zTemp[i++] = '-'; - } - memcpy(&zTemp[i], zColl, n+1); - i += n; - } - zTemp[i++] = ')'; - zTemp[i] = 0; - assert( ip4.pColl; - sqlite3_snprintf(nTemp, zTemp, "(%.20s)", pColl->zName); - break; - } - case P4_FUNCDEF: { - FuncDef *pDef = pOp->p4.pFunc; - sqlite3_snprintf(nTemp, zTemp, "%s(%d)", pDef->zName, pDef->nArg); - break; - } - case P4_INT64: { - sqlite3_snprintf(nTemp, zTemp, "%lld", *pOp->p4.pI64); - break; - } - case P4_INT32: { - sqlite3_snprintf(nTemp, zTemp, "%d", pOp->p4.i); - break; - } - case P4_REAL: { - sqlite3_snprintf(nTemp, zTemp, "%.16g", *pOp->p4.pReal); - break; - } - case P4_MEM: { - Mem *pMem = pOp->p4.pMem; - if( pMem->flags & MEM_Str ){ - zP4 = pMem->z; - }else if( pMem->flags & MEM_Int ){ - sqlite3_snprintf(nTemp, zTemp, "%lld", pMem->u.i); - }else if( pMem->flags & MEM_Real ){ - sqlite3_snprintf(nTemp, zTemp, "%.16g", pMem->r); - }else if( pMem->flags & MEM_Null ){ - sqlite3_snprintf(nTemp, zTemp, "NULL"); - }else{ - assert( pMem->flags & MEM_Blob ); - zP4 = "(blob)"; - } - break; - } -#ifndef SQLITE_OMIT_VIRTUALTABLE - case P4_VTAB: { - sqlite3_vtab *pVtab = pOp->p4.pVtab->pVtab; - sqlite3_snprintf(nTemp, zTemp, "vtab:%p:%p", pVtab, pVtab->pModule); - break; - } -#endif - case P4_INTARRAY: { - sqlite3_snprintf(nTemp, zTemp, "intarray"); - break; - } - case P4_SUBPROGRAM: { - sqlite3_snprintf(nTemp, zTemp, "program"); - break; - } - case P4_ADVANCE: { - zTemp[0] = 0; - break; - } - default: { - zP4 = pOp->p4.z; - if( zP4==0 ){ - zP4 = zTemp; - zTemp[0] = 0; - } - } - } - assert( zP4!=0 ); - return zP4; -} -#endif - -/* -** Declare to the Vdbe that the BTree object at db->aDb[i] is used. -** -** The prepared statements need to know in advance the complete set of -** attached databases that will be use. A mask of these databases -** is maintained in p->btreeMask. The p->lockMask value is the subset of -** p->btreeMask of databases that will require a lock. -*/ -SQLITE_PRIVATE void sqlite3VdbeUsesBtree(Vdbe *p, int i){ - assert( i>=0 && idb->nDb && i<(int)sizeof(yDbMask)*8 ); - assert( i<(int)sizeof(p->btreeMask)*8 ); - p->btreeMask |= ((yDbMask)1)<db->aDb[i].pBt) ){ - p->lockMask |= ((yDbMask)1)<0 -/* -** If SQLite is compiled to support shared-cache mode and to be threadsafe, -** this routine obtains the mutex associated with each BtShared structure -** that may be accessed by the VM passed as an argument. In doing so it also -** sets the BtShared.db member of each of the BtShared structures, ensuring -** that the correct busy-handler callback is invoked if required. -** -** If SQLite is not threadsafe but does support shared-cache mode, then -** sqlite3BtreeEnter() is invoked to set the BtShared.db variables -** of all of BtShared structures accessible via the database handle -** associated with the VM. -** -** If SQLite is not threadsafe and does not support shared-cache mode, this -** function is a no-op. -** -** The p->btreeMask field is a bitmask of all btrees that the prepared -** statement p will ever use. Let N be the number of bits in p->btreeMask -** corresponding to btrees that use shared cache. Then the runtime of -** this routine is N*N. But as N is rarely more than 1, this should not -** be a problem. -*/ -SQLITE_PRIVATE void sqlite3VdbeEnter(Vdbe *p){ - int i; - yDbMask mask; - sqlite3 *db; - Db *aDb; - int nDb; - if( p->lockMask==0 ) return; /* The common case */ - db = p->db; - aDb = db->aDb; - nDb = db->nDb; - for(i=0, mask=1; ilockMask)!=0 && ALWAYS(aDb[i].pBt!=0) ){ - sqlite3BtreeEnter(aDb[i].pBt); - } - } -} -#endif - -#if !defined(SQLITE_OMIT_SHARED_CACHE) && SQLITE_THREADSAFE>0 -/* -** Unlock all of the btrees previously locked by a call to sqlite3VdbeEnter(). -*/ -SQLITE_PRIVATE void sqlite3VdbeLeave(Vdbe *p){ - int i; - yDbMask mask; - sqlite3 *db; - Db *aDb; - int nDb; - if( p->lockMask==0 ) return; /* The common case */ - db = p->db; - aDb = db->aDb; - nDb = db->nDb; - for(i=0, mask=1; ilockMask)!=0 && ALWAYS(aDb[i].pBt!=0) ){ - sqlite3BtreeLeave(aDb[i].pBt); - } - } -} -#endif - -#if defined(VDBE_PROFILE) || defined(SQLITE_DEBUG) -/* -** Print a single opcode. This routine is used for debugging only. -*/ -SQLITE_PRIVATE void sqlite3VdbePrintOp(FILE *pOut, int pc, Op *pOp){ - char *zP4; - char zPtr[50]; - char zCom[100]; - static const char *zFormat1 = "%4d %-13s %4d %4d %4d %-13s %.2X %s\n"; - if( pOut==0 ) pOut = stdout; - zP4 = displayP4(pOp, zPtr, sizeof(zPtr)); -#ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS - displayComment(pOp, zP4, zCom, sizeof(zCom)); -#else - zCom[0] = 0; -#endif - /* NB: The sqlite3OpcodeName() function is implemented by code created - ** by the mkopcodeh.awk and mkopcodec.awk scripts which extract the - ** information from the vdbe.c source text */ - fprintf(pOut, zFormat1, pc, - sqlite3OpcodeName(pOp->opcode), pOp->p1, pOp->p2, pOp->p3, zP4, pOp->p5, - zCom - ); - fflush(pOut); -} -#endif - -/* -** Release an array of N Mem elements -*/ -static void releaseMemArray(Mem *p, int N){ - if( p && N ){ - Mem *pEnd; - sqlite3 *db = p->db; - u8 malloc_failed = db->mallocFailed; - if( db->pnBytesFreed ){ - for(pEnd=&p[N]; pzMalloc); - } - return; - } - for(pEnd=&p[N]; pflags & MEM_Agg ); - testcase( p->flags & MEM_Dyn ); - testcase( p->flags & MEM_Frame ); - testcase( p->flags & MEM_RowSet ); - if( p->flags&(MEM_Agg|MEM_Dyn|MEM_Frame|MEM_RowSet) ){ - sqlite3VdbeMemRelease(p); - }else if( p->zMalloc ){ - sqlite3DbFree(db, p->zMalloc); - p->zMalloc = 0; - } - - p->flags = MEM_Undefined; - } - db->mallocFailed = malloc_failed; - } -} - -/* -** Delete a VdbeFrame object and its contents. VdbeFrame objects are -** allocated by the OP_Program opcode in sqlite3VdbeExec(). -*/ -SQLITE_PRIVATE void sqlite3VdbeFrameDelete(VdbeFrame *p){ - int i; - Mem *aMem = VdbeFrameMem(p); - VdbeCursor **apCsr = (VdbeCursor **)&aMem[p->nChildMem]; - for(i=0; inChildCsr; i++){ - sqlite3VdbeFreeCursor(p->v, apCsr[i]); - } - releaseMemArray(aMem, p->nChildMem); - sqlite3DbFree(p->v->db, p); -} - -#ifndef SQLITE_OMIT_EXPLAIN -/* -** Give a listing of the program in the virtual machine. -** -** The interface is the same as sqlite3VdbeExec(). But instead of -** running the code, it invokes the callback once for each instruction. -** This feature is used to implement "EXPLAIN". -** -** When p->explain==1, each instruction is listed. When -** p->explain==2, only OP_Explain instructions are listed and these -** are shown in a different format. p->explain==2 is used to implement -** EXPLAIN QUERY PLAN. -** -** When p->explain==1, first the main program is listed, then each of -** the trigger subprograms are listed one by one. -*/ -SQLITE_PRIVATE int sqlite3VdbeList( - Vdbe *p /* The VDBE */ -){ - int nRow; /* Stop when row count reaches this */ - int nSub = 0; /* Number of sub-vdbes seen so far */ - SubProgram **apSub = 0; /* Array of sub-vdbes */ - Mem *pSub = 0; /* Memory cell hold array of subprogs */ - sqlite3 *db = p->db; /* The database connection */ - int i; /* Loop counter */ - int rc = SQLITE_OK; /* Return code */ - Mem *pMem = &p->aMem[1]; /* First Mem of result set */ - - assert( p->explain ); - assert( p->magic==VDBE_MAGIC_RUN ); - assert( p->rc==SQLITE_OK || p->rc==SQLITE_BUSY || p->rc==SQLITE_NOMEM ); - - /* Even though this opcode does not use dynamic strings for - ** the result, result columns may become dynamic if the user calls - ** sqlite3_column_text16(), causing a translation to UTF-16 encoding. - */ - releaseMemArray(pMem, 8); - p->pResultSet = 0; - - if( p->rc==SQLITE_NOMEM ){ - /* This happens if a malloc() inside a call to sqlite3_column_text() or - ** sqlite3_column_text16() failed. */ - db->mallocFailed = 1; - return SQLITE_ERROR; - } - - /* When the number of output rows reaches nRow, that means the - ** listing has finished and sqlite3_step() should return SQLITE_DONE. - ** nRow is the sum of the number of rows in the main program, plus - ** the sum of the number of rows in all trigger subprograms encountered - ** so far. The nRow value will increase as new trigger subprograms are - ** encountered, but p->pc will eventually catch up to nRow. - */ - nRow = p->nOp; - if( p->explain==1 ){ - /* The first 8 memory cells are used for the result set. So we will - ** commandeer the 9th cell to use as storage for an array of pointers - ** to trigger subprograms. The VDBE is guaranteed to have at least 9 - ** cells. */ - assert( p->nMem>9 ); - pSub = &p->aMem[9]; - if( pSub->flags&MEM_Blob ){ - /* On the first call to sqlite3_step(), pSub will hold a NULL. It is - ** initialized to a BLOB by the P4_SUBPROGRAM processing logic below */ - nSub = pSub->n/sizeof(Vdbe*); - apSub = (SubProgram **)pSub->z; - } - for(i=0; inOp; - } - } - - do{ - i = p->pc++; - }while( iexplain==2 && p->aOp[i].opcode!=OP_Explain ); - if( i>=nRow ){ - p->rc = SQLITE_OK; - rc = SQLITE_DONE; - }else if( db->u1.isInterrupted ){ - p->rc = SQLITE_INTERRUPT; - rc = SQLITE_ERROR; - sqlite3SetString(&p->zErrMsg, db, "%s", sqlite3ErrStr(p->rc)); - }else{ - char *zP4; - Op *pOp; - if( inOp ){ - /* The output line number is small enough that we are still in the - ** main program. */ - pOp = &p->aOp[i]; - }else{ - /* We are currently listing subprograms. Figure out which one and - ** pick up the appropriate opcode. */ - int j; - i -= p->nOp; - for(j=0; i>=apSub[j]->nOp; j++){ - i -= apSub[j]->nOp; - } - pOp = &apSub[j]->aOp[i]; - } - if( p->explain==1 ){ - pMem->flags = MEM_Int; - pMem->u.i = i; /* Program counter */ - pMem++; - - pMem->flags = MEM_Static|MEM_Str|MEM_Term; - pMem->z = (char*)sqlite3OpcodeName(pOp->opcode); /* Opcode */ - assert( pMem->z!=0 ); - pMem->n = sqlite3Strlen30(pMem->z); - pMem->enc = SQLITE_UTF8; - pMem++; - - /* When an OP_Program opcode is encounter (the only opcode that has - ** a P4_SUBPROGRAM argument), expand the size of the array of subprograms - ** kept in p->aMem[9].z to hold the new program - assuming this subprogram - ** has not already been seen. - */ - if( pOp->p4type==P4_SUBPROGRAM ){ - int nByte = (nSub+1)*sizeof(SubProgram*); - int j; - for(j=0; jp4.pProgram ) break; - } - if( j==nSub && SQLITE_OK==sqlite3VdbeMemGrow(pSub, nByte, nSub!=0) ){ - apSub = (SubProgram **)pSub->z; - apSub[nSub++] = pOp->p4.pProgram; - pSub->flags |= MEM_Blob; - pSub->n = nSub*sizeof(SubProgram*); - } - } - } - - pMem->flags = MEM_Int; - pMem->u.i = pOp->p1; /* P1 */ - pMem++; - - pMem->flags = MEM_Int; - pMem->u.i = pOp->p2; /* P2 */ - pMem++; - - pMem->flags = MEM_Int; - pMem->u.i = pOp->p3; /* P3 */ - pMem++; - - if( sqlite3VdbeMemGrow(pMem, 32, 0) ){ /* P4 */ - assert( p->db->mallocFailed ); - return SQLITE_ERROR; - } - pMem->flags = MEM_Str|MEM_Term; - zP4 = displayP4(pOp, pMem->z, 32); - if( zP4!=pMem->z ){ - sqlite3VdbeMemSetStr(pMem, zP4, -1, SQLITE_UTF8, 0); - }else{ - assert( pMem->z!=0 ); - pMem->n = sqlite3Strlen30(pMem->z); - pMem->enc = SQLITE_UTF8; - } - pMem++; - - if( p->explain==1 ){ - if( sqlite3VdbeMemGrow(pMem, 4, 0) ){ - assert( p->db->mallocFailed ); - return SQLITE_ERROR; - } - pMem->flags = MEM_Str|MEM_Term; - pMem->n = 2; - sqlite3_snprintf(3, pMem->z, "%.2x", pOp->p5); /* P5 */ - pMem->enc = SQLITE_UTF8; - pMem++; - -#ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS - if( sqlite3VdbeMemGrow(pMem, 500, 0) ){ - assert( p->db->mallocFailed ); - return SQLITE_ERROR; - } - pMem->flags = MEM_Str|MEM_Term; - pMem->n = displayComment(pOp, zP4, pMem->z, 500); - pMem->enc = SQLITE_UTF8; -#else - pMem->flags = MEM_Null; /* Comment */ -#endif - } - - p->nResColumn = 8 - 4*(p->explain-1); - p->pResultSet = &p->aMem[1]; - p->rc = SQLITE_OK; - rc = SQLITE_ROW; - } - return rc; -} -#endif /* SQLITE_OMIT_EXPLAIN */ - -#ifdef SQLITE_DEBUG -/* -** Print the SQL that was used to generate a VDBE program. -*/ -SQLITE_PRIVATE void sqlite3VdbePrintSql(Vdbe *p){ - const char *z = 0; - if( p->zSql ){ - z = p->zSql; - }else if( p->nOp>=1 ){ - const VdbeOp *pOp = &p->aOp[0]; - if( pOp->opcode==OP_Init && pOp->p4.z!=0 ){ - z = pOp->p4.z; - while( sqlite3Isspace(*z) ) z++; - } - } - if( z ) printf("SQL: [%s]\n", z); -} -#endif - -#if !defined(SQLITE_OMIT_TRACE) && defined(SQLITE_ENABLE_IOTRACE) -/* -** Print an IOTRACE message showing SQL content. -*/ -SQLITE_PRIVATE void sqlite3VdbeIOTraceSql(Vdbe *p){ - int nOp = p->nOp; - VdbeOp *pOp; - if( sqlite3IoTrace==0 ) return; - if( nOp<1 ) return; - pOp = &p->aOp[0]; - if( pOp->opcode==OP_Init && pOp->p4.z!=0 ){ - int i, j; - char z[1000]; - sqlite3_snprintf(sizeof(z), z, "%s", pOp->p4.z); - for(i=0; sqlite3Isspace(z[i]); i++){} - for(j=0; z[i]; i++){ - if( sqlite3Isspace(z[i]) ){ - if( z[i-1]!=' ' ){ - z[j++] = ' '; - } - }else{ - z[j++] = z[i]; - } - } - z[j] = 0; - sqlite3IoTrace("SQL %s\n", z); - } -} -#endif /* !SQLITE_OMIT_TRACE && SQLITE_ENABLE_IOTRACE */ - -/* -** Allocate space from a fixed size buffer and return a pointer to -** that space. If insufficient space is available, return NULL. -** -** The pBuf parameter is the initial value of a pointer which will -** receive the new memory. pBuf is normally NULL. If pBuf is not -** NULL, it means that memory space has already been allocated and that -** this routine should not allocate any new memory. When pBuf is not -** NULL simply return pBuf. Only allocate new memory space when pBuf -** is NULL. -** -** nByte is the number of bytes of space needed. -** -** *ppFrom points to available space and pEnd points to the end of the -** available space. When space is allocated, *ppFrom is advanced past -** the end of the allocated space. -** -** *pnByte is a counter of the number of bytes of space that have failed -** to allocate. If there is insufficient space in *ppFrom to satisfy the -** request, then increment *pnByte by the amount of the request. -*/ -static void *allocSpace( - void *pBuf, /* Where return pointer will be stored */ - int nByte, /* Number of bytes to allocate */ - u8 **ppFrom, /* IN/OUT: Allocate from *ppFrom */ - u8 *pEnd, /* Pointer to 1 byte past the end of *ppFrom buffer */ - int *pnByte /* If allocation cannot be made, increment *pnByte */ -){ - assert( EIGHT_BYTE_ALIGNMENT(*ppFrom) ); - if( pBuf ) return pBuf; - nByte = ROUND8(nByte); - if( &(*ppFrom)[nByte] <= pEnd ){ - pBuf = (void*)*ppFrom; - *ppFrom += nByte; - }else{ - *pnByte += nByte; - } - return pBuf; -} - -/* -** Rewind the VDBE back to the beginning in preparation for -** running it. -*/ -SQLITE_PRIVATE void sqlite3VdbeRewind(Vdbe *p){ -#if defined(SQLITE_DEBUG) || defined(VDBE_PROFILE) - int i; -#endif - assert( p!=0 ); - assert( p->magic==VDBE_MAGIC_INIT ); - - /* There should be at least one opcode. - */ - assert( p->nOp>0 ); - - /* Set the magic to VDBE_MAGIC_RUN sooner rather than later. */ - p->magic = VDBE_MAGIC_RUN; - -#ifdef SQLITE_DEBUG - for(i=1; inMem; i++){ - assert( p->aMem[i].db==p->db ); - } -#endif - p->pc = -1; - p->rc = SQLITE_OK; - p->errorAction = OE_Abort; - p->magic = VDBE_MAGIC_RUN; - p->nChange = 0; - p->cacheCtr = 1; - p->minWriteFileFormat = 255; - p->iStatement = 0; - p->nFkConstraint = 0; -#ifdef VDBE_PROFILE - for(i=0; inOp; i++){ - p->aOp[i].cnt = 0; - p->aOp[i].cycles = 0; - } -#endif -} - -/* -** Prepare a virtual machine for execution for the first time after -** creating the virtual machine. This involves things such -** as allocating stack space and initializing the program counter. -** After the VDBE has be prepped, it can be executed by one or more -** calls to sqlite3VdbeExec(). -** -** This function may be called exact once on a each virtual machine. -** After this routine is called the VM has been "packaged" and is ready -** to run. After this routine is called, futher calls to -** sqlite3VdbeAddOp() functions are prohibited. This routine disconnects -** the Vdbe from the Parse object that helped generate it so that the -** the Vdbe becomes an independent entity and the Parse object can be -** destroyed. -** -** Use the sqlite3VdbeRewind() procedure to restore a virtual machine back -** to its initial state after it has been run. -*/ -SQLITE_PRIVATE void sqlite3VdbeMakeReady( - Vdbe *p, /* The VDBE */ - Parse *pParse /* Parsing context */ -){ - sqlite3 *db; /* The database connection */ - int nVar; /* Number of parameters */ - int nMem; /* Number of VM memory registers */ - int nCursor; /* Number of cursors required */ - int nArg; /* Number of arguments in subprograms */ - int nOnce; /* Number of OP_Once instructions */ - int n; /* Loop counter */ - u8 *zCsr; /* Memory available for allocation */ - u8 *zEnd; /* First byte past allocated memory */ - int nByte; /* How much extra memory is needed */ - - assert( p!=0 ); - assert( p->nOp>0 ); - assert( pParse!=0 ); - assert( p->magic==VDBE_MAGIC_INIT ); - assert( pParse==p->pParse ); - db = p->db; - assert( db->mallocFailed==0 ); - nVar = pParse->nVar; - nMem = pParse->nMem; - nCursor = pParse->nTab; - nArg = pParse->nMaxArg; - nOnce = pParse->nOnce; - if( nOnce==0 ) nOnce = 1; /* Ensure at least one byte in p->aOnceFlag[] */ - - /* For each cursor required, also allocate a memory cell. Memory - ** cells (nMem+1-nCursor)..nMem, inclusive, will never be used by - ** the vdbe program. Instead they are used to allocate space for - ** VdbeCursor/BtCursor structures. The blob of memory associated with - ** cursor 0 is stored in memory cell nMem. Memory cell (nMem-1) - ** stores the blob of memory associated with cursor 1, etc. - ** - ** See also: allocateCursor(). - */ - nMem += nCursor; - - /* Allocate space for memory registers, SQL variables, VDBE cursors and - ** an array to marshal SQL function arguments in. - */ - zCsr = (u8*)&p->aOp[p->nOp]; /* Memory avaliable for allocation */ - zEnd = (u8*)&p->aOp[pParse->nOpAlloc]; /* First byte past end of zCsr[] */ - - resolveP2Values(p, &nArg); - p->usesStmtJournal = (u8)(pParse->isMultiWrite && pParse->mayAbort); - if( pParse->explain && nMem<10 ){ - nMem = 10; - } - memset(zCsr, 0, zEnd-zCsr); - zCsr += (zCsr - (u8*)0)&7; - assert( EIGHT_BYTE_ALIGNMENT(zCsr) ); - p->expired = 0; - - /* Memory for registers, parameters, cursor, etc, is allocated in two - ** passes. On the first pass, we try to reuse unused space at the - ** end of the opcode array. If we are unable to satisfy all memory - ** requirements by reusing the opcode array tail, then the second - ** pass will fill in the rest using a fresh allocation. - ** - ** This two-pass approach that reuses as much memory as possible from - ** the leftover space at the end of the opcode array can significantly - ** reduce the amount of memory held by a prepared statement. - */ - do { - nByte = 0; - p->aMem = allocSpace(p->aMem, nMem*sizeof(Mem), &zCsr, zEnd, &nByte); - p->aVar = allocSpace(p->aVar, nVar*sizeof(Mem), &zCsr, zEnd, &nByte); - p->apArg = allocSpace(p->apArg, nArg*sizeof(Mem*), &zCsr, zEnd, &nByte); - p->azVar = allocSpace(p->azVar, nVar*sizeof(char*), &zCsr, zEnd, &nByte); - p->apCsr = allocSpace(p->apCsr, nCursor*sizeof(VdbeCursor*), - &zCsr, zEnd, &nByte); - p->aOnceFlag = allocSpace(p->aOnceFlag, nOnce, &zCsr, zEnd, &nByte); - if( nByte ){ - p->pFree = sqlite3DbMallocZero(db, nByte); - } - zCsr = p->pFree; - zEnd = &zCsr[nByte]; - }while( nByte && !db->mallocFailed ); - - p->nCursor = nCursor; - p->nOnceFlag = nOnce; - if( p->aVar ){ - p->nVar = (ynVar)nVar; - for(n=0; naVar[n].flags = MEM_Null; - p->aVar[n].db = db; - } - } - if( p->azVar ){ - p->nzVar = pParse->nzVar; - memcpy(p->azVar, pParse->azVar, p->nzVar*sizeof(p->azVar[0])); - memset(pParse->azVar, 0, pParse->nzVar*sizeof(pParse->azVar[0])); - } - if( p->aMem ){ - p->aMem--; /* aMem[] goes from 1..nMem */ - p->nMem = nMem; /* not from 0..nMem-1 */ - for(n=1; n<=nMem; n++){ - p->aMem[n].flags = MEM_Undefined; - p->aMem[n].db = db; - } - } - p->explain = pParse->explain; - sqlite3VdbeRewind(p); -} - -/* -** Close a VDBE cursor and release all the resources that cursor -** happens to hold. -*/ -SQLITE_PRIVATE void sqlite3VdbeFreeCursor(Vdbe *p, VdbeCursor *pCx){ - if( pCx==0 ){ - return; - } - sqlite3VdbeSorterClose(p->db, pCx); - if( pCx->pBt ){ - sqlite3BtreeClose(pCx->pBt); - /* The pCx->pCursor will be close automatically, if it exists, by - ** the call above. */ - }else if( pCx->pCursor ){ - sqlite3BtreeCloseCursor(pCx->pCursor); - } -#ifndef SQLITE_OMIT_VIRTUALTABLE - if( pCx->pVtabCursor ){ - sqlite3_vtab_cursor *pVtabCursor = pCx->pVtabCursor; - const sqlite3_module *pModule = pVtabCursor->pVtab->pModule; - p->inVtabMethod = 1; - pModule->xClose(pVtabCursor); - p->inVtabMethod = 0; - } -#endif -} - -/* -** Copy the values stored in the VdbeFrame structure to its Vdbe. This -** is used, for example, when a trigger sub-program is halted to restore -** control to the main program. -*/ -SQLITE_PRIVATE int sqlite3VdbeFrameRestore(VdbeFrame *pFrame){ - Vdbe *v = pFrame->v; - v->aOnceFlag = pFrame->aOnceFlag; - v->nOnceFlag = pFrame->nOnceFlag; - v->aOp = pFrame->aOp; - v->nOp = pFrame->nOp; - v->aMem = pFrame->aMem; - v->nMem = pFrame->nMem; - v->apCsr = pFrame->apCsr; - v->nCursor = pFrame->nCursor; - v->db->lastRowid = pFrame->lastRowid; - v->nChange = pFrame->nChange; - return pFrame->pc; -} - -/* -** Close all cursors. -** -** Also release any dynamic memory held by the VM in the Vdbe.aMem memory -** cell array. This is necessary as the memory cell array may contain -** pointers to VdbeFrame objects, which may in turn contain pointers to -** open cursors. -*/ -static void closeAllCursors(Vdbe *p){ - if( p->pFrame ){ - VdbeFrame *pFrame; - for(pFrame=p->pFrame; pFrame->pParent; pFrame=pFrame->pParent); - sqlite3VdbeFrameRestore(pFrame); - } - p->pFrame = 0; - p->nFrame = 0; - - if( p->apCsr ){ - int i; - for(i=0; inCursor; i++){ - VdbeCursor *pC = p->apCsr[i]; - if( pC ){ - sqlite3VdbeFreeCursor(p, pC); - p->apCsr[i] = 0; - } - } - } - if( p->aMem ){ - releaseMemArray(&p->aMem[1], p->nMem); - } - while( p->pDelFrame ){ - VdbeFrame *pDel = p->pDelFrame; - p->pDelFrame = pDel->pParent; - sqlite3VdbeFrameDelete(pDel); - } - - /* Delete any auxdata allocations made by the VM */ - sqlite3VdbeDeleteAuxData(p, -1, 0); - assert( p->pAuxData==0 ); -} - -/* -** Clean up the VM after execution. -** -** This routine will automatically close any cursors, lists, and/or -** sorters that were left open. It also deletes the values of -** variables in the aVar[] array. -*/ -static void Cleanup(Vdbe *p){ - sqlite3 *db = p->db; - -#ifdef SQLITE_DEBUG - /* Execute assert() statements to ensure that the Vdbe.apCsr[] and - ** Vdbe.aMem[] arrays have already been cleaned up. */ - int i; - if( p->apCsr ) for(i=0; inCursor; i++) assert( p->apCsr[i]==0 ); - if( p->aMem ){ - for(i=1; i<=p->nMem; i++) assert( p->aMem[i].flags==MEM_Undefined ); - } -#endif - - sqlite3DbFree(db, p->zErrMsg); - p->zErrMsg = 0; - p->pResultSet = 0; -} - -/* -** Set the number of result columns that will be returned by this SQL -** statement. This is now set at compile time, rather than during -** execution of the vdbe program so that sqlite3_column_count() can -** be called on an SQL statement before sqlite3_step(). -*/ -SQLITE_PRIVATE void sqlite3VdbeSetNumCols(Vdbe *p, int nResColumn){ - Mem *pColName; - int n; - sqlite3 *db = p->db; - - releaseMemArray(p->aColName, p->nResColumn*COLNAME_N); - sqlite3DbFree(db, p->aColName); - n = nResColumn*COLNAME_N; - p->nResColumn = (u16)nResColumn; - p->aColName = pColName = (Mem*)sqlite3DbMallocZero(db, sizeof(Mem)*n ); - if( p->aColName==0 ) return; - while( n-- > 0 ){ - pColName->flags = MEM_Null; - pColName->db = p->db; - pColName++; - } -} - -/* -** Set the name of the idx'th column to be returned by the SQL statement. -** zName must be a pointer to a nul terminated string. -** -** This call must be made after a call to sqlite3VdbeSetNumCols(). -** -** The final parameter, xDel, must be one of SQLITE_DYNAMIC, SQLITE_STATIC -** or SQLITE_TRANSIENT. If it is SQLITE_DYNAMIC, then the buffer pointed -** to by zName will be freed by sqlite3DbFree() when the vdbe is destroyed. -*/ -SQLITE_PRIVATE int sqlite3VdbeSetColName( - Vdbe *p, /* Vdbe being configured */ - int idx, /* Index of column zName applies to */ - int var, /* One of the COLNAME_* constants */ - const char *zName, /* Pointer to buffer containing name */ - void (*xDel)(void*) /* Memory management strategy for zName */ -){ - int rc; - Mem *pColName; - assert( idxnResColumn ); - assert( vardb->mallocFailed ){ - assert( !zName || xDel!=SQLITE_DYNAMIC ); - return SQLITE_NOMEM; - } - assert( p->aColName!=0 ); - pColName = &(p->aColName[idx+var*p->nResColumn]); - rc = sqlite3VdbeMemSetStr(pColName, zName, -1, SQLITE_UTF8, xDel); - assert( rc!=0 || !zName || (pColName->flags&MEM_Term)!=0 ); - return rc; -} - -/* -** A read or write transaction may or may not be active on database handle -** db. If a transaction is active, commit it. If there is a -** write-transaction spanning more than one database file, this routine -** takes care of the master journal trickery. -*/ -static int vdbeCommit(sqlite3 *db, Vdbe *p){ - int i; - int nTrans = 0; /* Number of databases with an active write-transaction */ - int rc = SQLITE_OK; - int needXcommit = 0; - -#ifdef SQLITE_OMIT_VIRTUALTABLE - /* With this option, sqlite3VtabSync() is defined to be simply - ** SQLITE_OK so p is not used. - */ - UNUSED_PARAMETER(p); -#endif - - /* Before doing anything else, call the xSync() callback for any - ** virtual module tables written in this transaction. This has to - ** be done before determining whether a master journal file is - ** required, as an xSync() callback may add an attached database - ** to the transaction. - */ - rc = sqlite3VtabSync(db, p); - - /* This loop determines (a) if the commit hook should be invoked and - ** (b) how many database files have open write transactions, not - ** including the temp database. (b) is important because if more than - ** one database file has an open write transaction, a master journal - ** file is required for an atomic commit. - */ - for(i=0; rc==SQLITE_OK && inDb; i++){ - Btree *pBt = db->aDb[i].pBt; - if( sqlite3BtreeIsInTrans(pBt) ){ - needXcommit = 1; - if( i!=1 ) nTrans++; - sqlite3BtreeEnter(pBt); - rc = sqlite3PagerExclusiveLock(sqlite3BtreePager(pBt)); - sqlite3BtreeLeave(pBt); - } - } - if( rc!=SQLITE_OK ){ - return rc; - } - - /* If there are any write-transactions at all, invoke the commit hook */ - if( needXcommit && db->xCommitCallback ){ - rc = db->xCommitCallback(db->pCommitArg); - if( rc ){ - return SQLITE_CONSTRAINT_COMMITHOOK; - } - } - - /* The simple case - no more than one database file (not counting the - ** TEMP database) has a transaction active. There is no need for the - ** master-journal. - ** - ** If the return value of sqlite3BtreeGetFilename() is a zero length - ** string, it means the main database is :memory: or a temp file. In - ** that case we do not support atomic multi-file commits, so use the - ** simple case then too. - */ - if( 0==sqlite3Strlen30(sqlite3BtreeGetFilename(db->aDb[0].pBt)) - || nTrans<=1 - ){ - for(i=0; rc==SQLITE_OK && inDb; i++){ - Btree *pBt = db->aDb[i].pBt; - if( pBt ){ - rc = sqlite3BtreeCommitPhaseOne(pBt, 0); - } - } - - /* Do the commit only if all databases successfully complete phase 1. - ** If one of the BtreeCommitPhaseOne() calls fails, this indicates an - ** IO error while deleting or truncating a journal file. It is unlikely, - ** but could happen. In this case abandon processing and return the error. - */ - for(i=0; rc==SQLITE_OK && inDb; i++){ - Btree *pBt = db->aDb[i].pBt; - if( pBt ){ - rc = sqlite3BtreeCommitPhaseTwo(pBt, 0); - } - } - if( rc==SQLITE_OK ){ - sqlite3VtabCommit(db); - } - } - - /* The complex case - There is a multi-file write-transaction active. - ** This requires a master journal file to ensure the transaction is - ** committed atomicly. - */ -#ifndef SQLITE_OMIT_DISKIO - else{ - sqlite3_vfs *pVfs = db->pVfs; - int needSync = 0; - char *zMaster = 0; /* File-name for the master journal */ - char const *zMainFile = sqlite3BtreeGetFilename(db->aDb[0].pBt); - sqlite3_file *pMaster = 0; - i64 offset = 0; - int res; - int retryCount = 0; - int nMainFile; - - /* Select a master journal file name */ - nMainFile = sqlite3Strlen30(zMainFile); - zMaster = sqlite3MPrintf(db, "%s-mjXXXXXX9XXz", zMainFile); - if( zMaster==0 ) return SQLITE_NOMEM; - do { - u32 iRandom; - if( retryCount ){ - if( retryCount>100 ){ - sqlite3_log(SQLITE_FULL, "MJ delete: %s", zMaster); - sqlite3OsDelete(pVfs, zMaster, 0); - break; - }else if( retryCount==1 ){ - sqlite3_log(SQLITE_FULL, "MJ collide: %s", zMaster); - } - } - retryCount++; - sqlite3_randomness(sizeof(iRandom), &iRandom); - sqlite3_snprintf(13, &zMaster[nMainFile], "-mj%06X9%02X", - (iRandom>>8)&0xffffff, iRandom&0xff); - /* The antipenultimate character of the master journal name must - ** be "9" to avoid name collisions when using 8+3 filenames. */ - assert( zMaster[sqlite3Strlen30(zMaster)-3]=='9' ); - sqlite3FileSuffix3(zMainFile, zMaster); - rc = sqlite3OsAccess(pVfs, zMaster, SQLITE_ACCESS_EXISTS, &res); - }while( rc==SQLITE_OK && res ); - if( rc==SQLITE_OK ){ - /* Open the master journal. */ - rc = sqlite3OsOpenMalloc(pVfs, zMaster, &pMaster, - SQLITE_OPEN_READWRITE|SQLITE_OPEN_CREATE| - SQLITE_OPEN_EXCLUSIVE|SQLITE_OPEN_MASTER_JOURNAL, 0 - ); - } - if( rc!=SQLITE_OK ){ - sqlite3DbFree(db, zMaster); - return rc; - } - - /* Write the name of each database file in the transaction into the new - ** master journal file. If an error occurs at this point close - ** and delete the master journal file. All the individual journal files - ** still have 'null' as the master journal pointer, so they will roll - ** back independently if a failure occurs. - */ - for(i=0; inDb; i++){ - Btree *pBt = db->aDb[i].pBt; - if( sqlite3BtreeIsInTrans(pBt) ){ - char const *zFile = sqlite3BtreeGetJournalname(pBt); - if( zFile==0 ){ - continue; /* Ignore TEMP and :memory: databases */ - } - assert( zFile[0]!=0 ); - if( !needSync && !sqlite3BtreeSyncDisabled(pBt) ){ - needSync = 1; - } - rc = sqlite3OsWrite(pMaster, zFile, sqlite3Strlen30(zFile)+1, offset); - offset += sqlite3Strlen30(zFile)+1; - if( rc!=SQLITE_OK ){ - sqlite3OsCloseFree(pMaster); - sqlite3OsDelete(pVfs, zMaster, 0); - sqlite3DbFree(db, zMaster); - return rc; - } - } - } - - /* Sync the master journal file. If the IOCAP_SEQUENTIAL device - ** flag is set this is not required. - */ - if( needSync - && 0==(sqlite3OsDeviceCharacteristics(pMaster)&SQLITE_IOCAP_SEQUENTIAL) - && SQLITE_OK!=(rc = sqlite3OsSync(pMaster, SQLITE_SYNC_NORMAL)) - ){ - sqlite3OsCloseFree(pMaster); - sqlite3OsDelete(pVfs, zMaster, 0); - sqlite3DbFree(db, zMaster); - return rc; - } - - /* Sync all the db files involved in the transaction. The same call - ** sets the master journal pointer in each individual journal. If - ** an error occurs here, do not delete the master journal file. - ** - ** If the error occurs during the first call to - ** sqlite3BtreeCommitPhaseOne(), then there is a chance that the - ** master journal file will be orphaned. But we cannot delete it, - ** in case the master journal file name was written into the journal - ** file before the failure occurred. - */ - for(i=0; rc==SQLITE_OK && inDb; i++){ - Btree *pBt = db->aDb[i].pBt; - if( pBt ){ - rc = sqlite3BtreeCommitPhaseOne(pBt, zMaster); - } - } - sqlite3OsCloseFree(pMaster); - assert( rc!=SQLITE_BUSY ); - if( rc!=SQLITE_OK ){ - sqlite3DbFree(db, zMaster); - return rc; - } - - /* Delete the master journal file. This commits the transaction. After - ** doing this the directory is synced again before any individual - ** transaction files are deleted. - */ - rc = sqlite3OsDelete(pVfs, zMaster, 1); - sqlite3DbFree(db, zMaster); - zMaster = 0; - if( rc ){ - return rc; - } - - /* All files and directories have already been synced, so the following - ** calls to sqlite3BtreeCommitPhaseTwo() are only closing files and - ** deleting or truncating journals. If something goes wrong while - ** this is happening we don't really care. The integrity of the - ** transaction is already guaranteed, but some stray 'cold' journals - ** may be lying around. Returning an error code won't help matters. - */ - disable_simulated_io_errors(); - sqlite3BeginBenignMalloc(); - for(i=0; inDb; i++){ - Btree *pBt = db->aDb[i].pBt; - if( pBt ){ - sqlite3BtreeCommitPhaseTwo(pBt, 1); - } - } - sqlite3EndBenignMalloc(); - enable_simulated_io_errors(); - - sqlite3VtabCommit(db); - } -#endif - - return rc; -} - -/* -** This routine checks that the sqlite3.nVdbeActive count variable -** matches the number of vdbe's in the list sqlite3.pVdbe that are -** currently active. An assertion fails if the two counts do not match. -** This is an internal self-check only - it is not an essential processing -** step. -** -** This is a no-op if NDEBUG is defined. -*/ -#ifndef NDEBUG -static void checkActiveVdbeCnt(sqlite3 *db){ - Vdbe *p; - int cnt = 0; - int nWrite = 0; - int nRead = 0; - p = db->pVdbe; - while( p ){ - if( p->magic==VDBE_MAGIC_RUN && p->pc>=0 ){ - cnt++; - if( p->readOnly==0 ) nWrite++; - if( p->bIsReader ) nRead++; - } - p = p->pNext; - } - assert( cnt==db->nVdbeActive ); - assert( nWrite==db->nVdbeWrite ); - assert( nRead==db->nVdbeRead ); -} -#else -#define checkActiveVdbeCnt(x) -#endif - -/* -** If the Vdbe passed as the first argument opened a statement-transaction, -** close it now. Argument eOp must be either SAVEPOINT_ROLLBACK or -** SAVEPOINT_RELEASE. If it is SAVEPOINT_ROLLBACK, then the statement -** transaction is rolled back. If eOp is SAVEPOINT_RELEASE, then the -** statement transaction is committed. -** -** If an IO error occurs, an SQLITE_IOERR_XXX error code is returned. -** Otherwise SQLITE_OK. -*/ -SQLITE_PRIVATE int sqlite3VdbeCloseStatement(Vdbe *p, int eOp){ - sqlite3 *const db = p->db; - int rc = SQLITE_OK; - - /* If p->iStatement is greater than zero, then this Vdbe opened a - ** statement transaction that should be closed here. The only exception - ** is that an IO error may have occurred, causing an emergency rollback. - ** In this case (db->nStatement==0), and there is nothing to do. - */ - if( db->nStatement && p->iStatement ){ - int i; - const int iSavepoint = p->iStatement-1; - - assert( eOp==SAVEPOINT_ROLLBACK || eOp==SAVEPOINT_RELEASE); - assert( db->nStatement>0 ); - assert( p->iStatement==(db->nStatement+db->nSavepoint) ); - - for(i=0; inDb; i++){ - int rc2 = SQLITE_OK; - Btree *pBt = db->aDb[i].pBt; - if( pBt ){ - if( eOp==SAVEPOINT_ROLLBACK ){ - rc2 = sqlite3BtreeSavepoint(pBt, SAVEPOINT_ROLLBACK, iSavepoint); - } - if( rc2==SQLITE_OK ){ - rc2 = sqlite3BtreeSavepoint(pBt, SAVEPOINT_RELEASE, iSavepoint); - } - if( rc==SQLITE_OK ){ - rc = rc2; - } - } - } - db->nStatement--; - p->iStatement = 0; - - if( rc==SQLITE_OK ){ - if( eOp==SAVEPOINT_ROLLBACK ){ - rc = sqlite3VtabSavepoint(db, SAVEPOINT_ROLLBACK, iSavepoint); - } - if( rc==SQLITE_OK ){ - rc = sqlite3VtabSavepoint(db, SAVEPOINT_RELEASE, iSavepoint); - } - } - - /* If the statement transaction is being rolled back, also restore the - ** database handles deferred constraint counter to the value it had when - ** the statement transaction was opened. */ - if( eOp==SAVEPOINT_ROLLBACK ){ - db->nDeferredCons = p->nStmtDefCons; - db->nDeferredImmCons = p->nStmtDefImmCons; - } - } - return rc; -} - -/* -** This function is called when a transaction opened by the database -** handle associated with the VM passed as an argument is about to be -** committed. If there are outstanding deferred foreign key constraint -** violations, return SQLITE_ERROR. Otherwise, SQLITE_OK. -** -** If there are outstanding FK violations and this function returns -** SQLITE_ERROR, set the result of the VM to SQLITE_CONSTRAINT_FOREIGNKEY -** and write an error message to it. Then return SQLITE_ERROR. -*/ -#ifndef SQLITE_OMIT_FOREIGN_KEY -SQLITE_PRIVATE int sqlite3VdbeCheckFk(Vdbe *p, int deferred){ - sqlite3 *db = p->db; - if( (deferred && (db->nDeferredCons+db->nDeferredImmCons)>0) - || (!deferred && p->nFkConstraint>0) - ){ - p->rc = SQLITE_CONSTRAINT_FOREIGNKEY; - p->errorAction = OE_Abort; - sqlite3SetString(&p->zErrMsg, db, "FOREIGN KEY constraint failed"); - return SQLITE_ERROR; - } - return SQLITE_OK; -} -#endif - -/* -** This routine is called the when a VDBE tries to halt. If the VDBE -** has made changes and is in autocommit mode, then commit those -** changes. If a rollback is needed, then do the rollback. -** -** This routine is the only way to move the state of a VM from -** SQLITE_MAGIC_RUN to SQLITE_MAGIC_HALT. It is harmless to -** call this on a VM that is in the SQLITE_MAGIC_HALT state. -** -** Return an error code. If the commit could not complete because of -** lock contention, return SQLITE_BUSY. If SQLITE_BUSY is returned, it -** means the close did not happen and needs to be repeated. -*/ -SQLITE_PRIVATE int sqlite3VdbeHalt(Vdbe *p){ - int rc; /* Used to store transient return codes */ - sqlite3 *db = p->db; - - /* This function contains the logic that determines if a statement or - ** transaction will be committed or rolled back as a result of the - ** execution of this virtual machine. - ** - ** If any of the following errors occur: - ** - ** SQLITE_NOMEM - ** SQLITE_IOERR - ** SQLITE_FULL - ** SQLITE_INTERRUPT - ** - ** Then the internal cache might have been left in an inconsistent - ** state. We need to rollback the statement transaction, if there is - ** one, or the complete transaction if there is no statement transaction. - */ - - if( p->db->mallocFailed ){ - p->rc = SQLITE_NOMEM; - } - if( p->aOnceFlag ) memset(p->aOnceFlag, 0, p->nOnceFlag); - closeAllCursors(p); - if( p->magic!=VDBE_MAGIC_RUN ){ - return SQLITE_OK; - } - checkActiveVdbeCnt(db); - - /* No commit or rollback needed if the program never started or if the - ** SQL statement does not read or write a database file. */ - if( p->pc>=0 && p->bIsReader ){ - int mrc; /* Primary error code from p->rc */ - int eStatementOp = 0; - int isSpecialError; /* Set to true if a 'special' error */ - - /* Lock all btrees used by the statement */ - sqlite3VdbeEnter(p); - - /* Check for one of the special errors */ - mrc = p->rc & 0xff; - assert( p->rc!=SQLITE_IOERR_BLOCKED ); /* This error no longer exists */ - isSpecialError = mrc==SQLITE_NOMEM || mrc==SQLITE_IOERR - || mrc==SQLITE_INTERRUPT || mrc==SQLITE_FULL; - if( isSpecialError ){ - /* If the query was read-only and the error code is SQLITE_INTERRUPT, - ** no rollback is necessary. Otherwise, at least a savepoint - ** transaction must be rolled back to restore the database to a - ** consistent state. - ** - ** Even if the statement is read-only, it is important to perform - ** a statement or transaction rollback operation. If the error - ** occurred while writing to the journal, sub-journal or database - ** file as part of an effort to free up cache space (see function - ** pagerStress() in pager.c), the rollback is required to restore - ** the pager to a consistent state. - */ - if( !p->readOnly || mrc!=SQLITE_INTERRUPT ){ - if( (mrc==SQLITE_NOMEM || mrc==SQLITE_FULL) && p->usesStmtJournal ){ - eStatementOp = SAVEPOINT_ROLLBACK; - }else{ - /* We are forced to roll back the active transaction. Before doing - ** so, abort any other statements this handle currently has active. - */ - sqlite3RollbackAll(db, SQLITE_ABORT_ROLLBACK); - sqlite3CloseSavepoints(db); - db->autoCommit = 1; - } - } - } - - /* Check for immediate foreign key violations. */ - if( p->rc==SQLITE_OK ){ - sqlite3VdbeCheckFk(p, 0); - } - - /* If the auto-commit flag is set and this is the only active writer - ** VM, then we do either a commit or rollback of the current transaction. - ** - ** Note: This block also runs if one of the special errors handled - ** above has occurred. - */ - if( !sqlite3VtabInSync(db) - && db->autoCommit - && db->nVdbeWrite==(p->readOnly==0) - ){ - if( p->rc==SQLITE_OK || (p->errorAction==OE_Fail && !isSpecialError) ){ - rc = sqlite3VdbeCheckFk(p, 1); - if( rc!=SQLITE_OK ){ - if( NEVER(p->readOnly) ){ - sqlite3VdbeLeave(p); - return SQLITE_ERROR; - } - rc = SQLITE_CONSTRAINT_FOREIGNKEY; - }else{ - /* The auto-commit flag is true, the vdbe program was successful - ** or hit an 'OR FAIL' constraint and there are no deferred foreign - ** key constraints to hold up the transaction. This means a commit - ** is required. */ - rc = vdbeCommit(db, p); - } - if( rc==SQLITE_BUSY && p->readOnly ){ - sqlite3VdbeLeave(p); - return SQLITE_BUSY; - }else if( rc!=SQLITE_OK ){ - p->rc = rc; - sqlite3RollbackAll(db, SQLITE_OK); - }else{ - db->nDeferredCons = 0; - db->nDeferredImmCons = 0; - db->flags &= ~SQLITE_DeferFKs; - sqlite3CommitInternalChanges(db); - } - }else{ - sqlite3RollbackAll(db, SQLITE_OK); - } - db->nStatement = 0; - }else if( eStatementOp==0 ){ - if( p->rc==SQLITE_OK || p->errorAction==OE_Fail ){ - eStatementOp = SAVEPOINT_RELEASE; - }else if( p->errorAction==OE_Abort ){ - eStatementOp = SAVEPOINT_ROLLBACK; - }else{ - sqlite3RollbackAll(db, SQLITE_ABORT_ROLLBACK); - sqlite3CloseSavepoints(db); - db->autoCommit = 1; - } - } - - /* If eStatementOp is non-zero, then a statement transaction needs to - ** be committed or rolled back. Call sqlite3VdbeCloseStatement() to - ** do so. If this operation returns an error, and the current statement - ** error code is SQLITE_OK or SQLITE_CONSTRAINT, then promote the - ** current statement error code. - */ - if( eStatementOp ){ - rc = sqlite3VdbeCloseStatement(p, eStatementOp); - if( rc ){ - if( p->rc==SQLITE_OK || (p->rc&0xff)==SQLITE_CONSTRAINT ){ - p->rc = rc; - sqlite3DbFree(db, p->zErrMsg); - p->zErrMsg = 0; - } - sqlite3RollbackAll(db, SQLITE_ABORT_ROLLBACK); - sqlite3CloseSavepoints(db); - db->autoCommit = 1; - } - } - - /* If this was an INSERT, UPDATE or DELETE and no statement transaction - ** has been rolled back, update the database connection change-counter. - */ - if( p->changeCntOn ){ - if( eStatementOp!=SAVEPOINT_ROLLBACK ){ - sqlite3VdbeSetChanges(db, p->nChange); - }else{ - sqlite3VdbeSetChanges(db, 0); - } - p->nChange = 0; - } - - /* Release the locks */ - sqlite3VdbeLeave(p); - } - - /* We have successfully halted and closed the VM. Record this fact. */ - if( p->pc>=0 ){ - db->nVdbeActive--; - if( !p->readOnly ) db->nVdbeWrite--; - if( p->bIsReader ) db->nVdbeRead--; - assert( db->nVdbeActive>=db->nVdbeRead ); - assert( db->nVdbeRead>=db->nVdbeWrite ); - assert( db->nVdbeWrite>=0 ); - } - p->magic = VDBE_MAGIC_HALT; - checkActiveVdbeCnt(db); - if( p->db->mallocFailed ){ - p->rc = SQLITE_NOMEM; - } - - /* If the auto-commit flag is set to true, then any locks that were held - ** by connection db have now been released. Call sqlite3ConnectionUnlocked() - ** to invoke any required unlock-notify callbacks. - */ - if( db->autoCommit ){ - sqlite3ConnectionUnlocked(db); - } - - assert( db->nVdbeActive>0 || db->autoCommit==0 || db->nStatement==0 ); - return (p->rc==SQLITE_BUSY ? SQLITE_BUSY : SQLITE_OK); -} - - -/* -** Each VDBE holds the result of the most recent sqlite3_step() call -** in p->rc. This routine sets that result back to SQLITE_OK. -*/ -SQLITE_PRIVATE void sqlite3VdbeResetStepResult(Vdbe *p){ - p->rc = SQLITE_OK; -} - -/* -** Copy the error code and error message belonging to the VDBE passed -** as the first argument to its database handle (so that they will be -** returned by calls to sqlite3_errcode() and sqlite3_errmsg()). -** -** This function does not clear the VDBE error code or message, just -** copies them to the database handle. -*/ -SQLITE_PRIVATE int sqlite3VdbeTransferError(Vdbe *p){ - sqlite3 *db = p->db; - int rc = p->rc; - if( p->zErrMsg ){ - u8 mallocFailed = db->mallocFailed; - sqlite3BeginBenignMalloc(); - if( db->pErr==0 ) db->pErr = sqlite3ValueNew(db); - sqlite3ValueSetStr(db->pErr, -1, p->zErrMsg, SQLITE_UTF8, SQLITE_TRANSIENT); - sqlite3EndBenignMalloc(); - db->mallocFailed = mallocFailed; - db->errCode = rc; - }else{ - sqlite3Error(db, rc, 0); - } - return rc; -} - -#ifdef SQLITE_ENABLE_SQLLOG -/* -** If an SQLITE_CONFIG_SQLLOG hook is registered and the VM has been run, -** invoke it. -*/ -static void vdbeInvokeSqllog(Vdbe *v){ - if( sqlite3GlobalConfig.xSqllog && v->rc==SQLITE_OK && v->zSql && v->pc>=0 ){ - char *zExpanded = sqlite3VdbeExpandSql(v, v->zSql); - assert( v->db->init.busy==0 ); - if( zExpanded ){ - sqlite3GlobalConfig.xSqllog( - sqlite3GlobalConfig.pSqllogArg, v->db, zExpanded, 1 - ); - sqlite3DbFree(v->db, zExpanded); - } - } -} -#else -# define vdbeInvokeSqllog(x) -#endif - -/* -** Clean up a VDBE after execution but do not delete the VDBE just yet. -** Write any error messages into *pzErrMsg. Return the result code. -** -** After this routine is run, the VDBE should be ready to be executed -** again. -** -** To look at it another way, this routine resets the state of the -** virtual machine from VDBE_MAGIC_RUN or VDBE_MAGIC_HALT back to -** VDBE_MAGIC_INIT. -*/ -SQLITE_PRIVATE int sqlite3VdbeReset(Vdbe *p){ - sqlite3 *db; - db = p->db; - - /* If the VM did not run to completion or if it encountered an - ** error, then it might not have been halted properly. So halt - ** it now. - */ - sqlite3VdbeHalt(p); - - /* If the VDBE has be run even partially, then transfer the error code - ** and error message from the VDBE into the main database structure. But - ** if the VDBE has just been set to run but has not actually executed any - ** instructions yet, leave the main database error information unchanged. - */ - if( p->pc>=0 ){ - vdbeInvokeSqllog(p); - sqlite3VdbeTransferError(p); - sqlite3DbFree(db, p->zErrMsg); - p->zErrMsg = 0; - if( p->runOnlyOnce ) p->expired = 1; - }else if( p->rc && p->expired ){ - /* The expired flag was set on the VDBE before the first call - ** to sqlite3_step(). For consistency (since sqlite3_step() was - ** called), set the database error in this case as well. - */ - sqlite3Error(db, p->rc, p->zErrMsg ? "%s" : 0, p->zErrMsg); - sqlite3DbFree(db, p->zErrMsg); - p->zErrMsg = 0; - } - - /* Reclaim all memory used by the VDBE - */ - Cleanup(p); - - /* Save profiling information from this VDBE run. - */ -#ifdef VDBE_PROFILE - { - FILE *out = fopen("vdbe_profile.out", "a"); - if( out ){ - int i; - fprintf(out, "---- "); - for(i=0; inOp; i++){ - fprintf(out, "%02x", p->aOp[i].opcode); - } - fprintf(out, "\n"); - if( p->zSql ){ - char c, pc = 0; - fprintf(out, "-- "); - for(i=0; (c = p->zSql[i])!=0; i++){ - if( pc=='\n' ) fprintf(out, "-- "); - putc(c, out); - pc = c; - } - if( pc!='\n' ) fprintf(out, "\n"); - } - for(i=0; inOp; i++){ - char zHdr[100]; - sqlite3_snprintf(sizeof(zHdr), zHdr, "%6u %12llu %8llu ", - p->aOp[i].cnt, - p->aOp[i].cycles, - p->aOp[i].cnt>0 ? p->aOp[i].cycles/p->aOp[i].cnt : 0 - ); - fprintf(out, "%s", zHdr); - sqlite3VdbePrintOp(out, i, &p->aOp[i]); - } - fclose(out); - } - } -#endif - p->iCurrentTime = 0; - p->magic = VDBE_MAGIC_INIT; - return p->rc & db->errMask; -} - -/* -** Clean up and delete a VDBE after execution. Return an integer which is -** the result code. Write any error message text into *pzErrMsg. -*/ -SQLITE_PRIVATE int sqlite3VdbeFinalize(Vdbe *p){ - int rc = SQLITE_OK; - if( p->magic==VDBE_MAGIC_RUN || p->magic==VDBE_MAGIC_HALT ){ - rc = sqlite3VdbeReset(p); - assert( (rc & p->db->errMask)==rc ); - } - sqlite3VdbeDelete(p); - return rc; -} - -/* -** If parameter iOp is less than zero, then invoke the destructor for -** all auxiliary data pointers currently cached by the VM passed as -** the first argument. -** -** Or, if iOp is greater than or equal to zero, then the destructor is -** only invoked for those auxiliary data pointers created by the user -** function invoked by the OP_Function opcode at instruction iOp of -** VM pVdbe, and only then if: -** -** * the associated function parameter is the 32nd or later (counting -** from left to right), or -** -** * the corresponding bit in argument mask is clear (where the first -** function parameter corrsponds to bit 0 etc.). -*/ -SQLITE_PRIVATE void sqlite3VdbeDeleteAuxData(Vdbe *pVdbe, int iOp, int mask){ - AuxData **pp = &pVdbe->pAuxData; - while( *pp ){ - AuxData *pAux = *pp; - if( (iOp<0) - || (pAux->iOp==iOp && (pAux->iArg>31 || !(mask & MASKBIT32(pAux->iArg)))) - ){ - testcase( pAux->iArg==31 ); - if( pAux->xDelete ){ - pAux->xDelete(pAux->pAux); - } - *pp = pAux->pNext; - sqlite3DbFree(pVdbe->db, pAux); - }else{ - pp= &pAux->pNext; - } - } -} - -/* -** Free all memory associated with the Vdbe passed as the second argument, -** except for object itself, which is preserved. -** -** The difference between this function and sqlite3VdbeDelete() is that -** VdbeDelete() also unlinks the Vdbe from the list of VMs associated with -** the database connection and frees the object itself. -*/ -SQLITE_PRIVATE void sqlite3VdbeClearObject(sqlite3 *db, Vdbe *p){ - SubProgram *pSub, *pNext; - int i; - assert( p->db==0 || p->db==db ); - releaseMemArray(p->aVar, p->nVar); - releaseMemArray(p->aColName, p->nResColumn*COLNAME_N); - for(pSub=p->pProgram; pSub; pSub=pNext){ - pNext = pSub->pNext; - vdbeFreeOpArray(db, pSub->aOp, pSub->nOp); - sqlite3DbFree(db, pSub); - } - for(i=p->nzVar-1; i>=0; i--) sqlite3DbFree(db, p->azVar[i]); - vdbeFreeOpArray(db, p->aOp, p->nOp); - sqlite3DbFree(db, p->aColName); - sqlite3DbFree(db, p->zSql); - sqlite3DbFree(db, p->pFree); -#if defined(SQLITE_ENABLE_TREE_EXPLAIN) - sqlite3DbFree(db, p->zExplain); - sqlite3DbFree(db, p->pExplain); -#endif -} - -/* -** Delete an entire VDBE. -*/ -SQLITE_PRIVATE void sqlite3VdbeDelete(Vdbe *p){ - sqlite3 *db; - - if( NEVER(p==0) ) return; - db = p->db; - assert( sqlite3_mutex_held(db->mutex) ); - sqlite3VdbeClearObject(db, p); - if( p->pPrev ){ - p->pPrev->pNext = p->pNext; - }else{ - assert( db->pVdbe==p ); - db->pVdbe = p->pNext; - } - if( p->pNext ){ - p->pNext->pPrev = p->pPrev; - } - p->magic = VDBE_MAGIC_DEAD; - p->db = 0; - sqlite3DbFree(db, p); -} - -/* -** Make sure the cursor p is ready to read or write the row to which it -** was last positioned. Return an error code if an OOM fault or I/O error -** prevents us from positioning the cursor to its correct position. -** -** If a MoveTo operation is pending on the given cursor, then do that -** MoveTo now. If no move is pending, check to see if the row has been -** deleted out from under the cursor and if it has, mark the row as -** a NULL row. -** -** If the cursor is already pointing to the correct row and that row has -** not been deleted out from under the cursor, then this routine is a no-op. -*/ -SQLITE_PRIVATE int sqlite3VdbeCursorMoveto(VdbeCursor *p){ - if( p->deferredMoveto ){ - int res, rc; -#ifdef SQLITE_TEST - extern int sqlite3_search_count; -#endif - assert( p->isTable ); - rc = sqlite3BtreeMovetoUnpacked(p->pCursor, 0, p->movetoTarget, 0, &res); - if( rc ) return rc; - p->lastRowid = p->movetoTarget; - if( res!=0 ) return SQLITE_CORRUPT_BKPT; - p->rowidIsValid = 1; -#ifdef SQLITE_TEST - sqlite3_search_count++; -#endif - p->deferredMoveto = 0; - p->cacheStatus = CACHE_STALE; - }else if( p->pCursor ){ - int hasMoved; - int rc = sqlite3BtreeCursorHasMoved(p->pCursor, &hasMoved); - if( rc ) return rc; - if( hasMoved ){ - p->cacheStatus = CACHE_STALE; - if( hasMoved==2 ) p->nullRow = 1; - } - } - return SQLITE_OK; -} - -/* -** The following functions: -** -** sqlite3VdbeSerialType() -** sqlite3VdbeSerialTypeLen() -** sqlite3VdbeSerialLen() -** sqlite3VdbeSerialPut() -** sqlite3VdbeSerialGet() -** -** encapsulate the code that serializes values for storage in SQLite -** data and index records. Each serialized value consists of a -** 'serial-type' and a blob of data. The serial type is an 8-byte unsigned -** integer, stored as a varint. -** -** In an SQLite index record, the serial type is stored directly before -** the blob of data that it corresponds to. In a table record, all serial -** types are stored at the start of the record, and the blobs of data at -** the end. Hence these functions allow the caller to handle the -** serial-type and data blob separately. -** -** The following table describes the various storage classes for data: -** -** serial type bytes of data type -** -------------- --------------- --------------- -** 0 0 NULL -** 1 1 signed integer -** 2 2 signed integer -** 3 3 signed integer -** 4 4 signed integer -** 5 6 signed integer -** 6 8 signed integer -** 7 8 IEEE float -** 8 0 Integer constant 0 -** 9 0 Integer constant 1 -** 10,11 reserved for expansion -** N>=12 and even (N-12)/2 BLOB -** N>=13 and odd (N-13)/2 text -** -** The 8 and 9 types were added in 3.3.0, file format 4. Prior versions -** of SQLite will not understand those serial types. -*/ - -/* -** Return the serial-type for the value stored in pMem. -*/ -SQLITE_PRIVATE u32 sqlite3VdbeSerialType(Mem *pMem, int file_format){ - int flags = pMem->flags; - int n; - - if( flags&MEM_Null ){ - return 0; - } - if( flags&MEM_Int ){ - /* Figure out whether to use 1, 2, 4, 6 or 8 bytes. */ -# define MAX_6BYTE ((((i64)0x00008000)<<32)-1) - i64 i = pMem->u.i; - u64 u; - if( i<0 ){ - if( i<(-MAX_6BYTE) ) return 6; - /* Previous test prevents: u = -(-9223372036854775808) */ - u = -i; - }else{ - u = i; - } - if( u<=127 ){ - return ((i&1)==i && file_format>=4) ? 8+(u32)u : 1; - } - if( u<=32767 ) return 2; - if( u<=8388607 ) return 3; - if( u<=2147483647 ) return 4; - if( u<=MAX_6BYTE ) return 5; - return 6; - } - if( flags&MEM_Real ){ - return 7; - } - assert( pMem->db->mallocFailed || flags&(MEM_Str|MEM_Blob) ); - n = pMem->n; - if( flags & MEM_Zero ){ - n += pMem->u.nZero; - } - assert( n>=0 ); - return ((n*2) + 12 + ((flags&MEM_Str)!=0)); -} - -/* -** Return the length of the data corresponding to the supplied serial-type. -*/ -SQLITE_PRIVATE u32 sqlite3VdbeSerialTypeLen(u32 serial_type){ - if( serial_type>=12 ){ - return (serial_type-12)/2; - }else{ - static const u8 aSize[] = { 0, 1, 2, 3, 4, 6, 8, 8, 0, 0, 0, 0 }; - return aSize[serial_type]; - } -} - -/* -** If we are on an architecture with mixed-endian floating -** points (ex: ARM7) then swap the lower 4 bytes with the -** upper 4 bytes. Return the result. -** -** For most architectures, this is a no-op. -** -** (later): It is reported to me that the mixed-endian problem -** on ARM7 is an issue with GCC, not with the ARM7 chip. It seems -** that early versions of GCC stored the two words of a 64-bit -** float in the wrong order. And that error has been propagated -** ever since. The blame is not necessarily with GCC, though. -** GCC might have just copying the problem from a prior compiler. -** I am also told that newer versions of GCC that follow a different -** ABI get the byte order right. -** -** Developers using SQLite on an ARM7 should compile and run their -** application using -DSQLITE_DEBUG=1 at least once. With DEBUG -** enabled, some asserts below will ensure that the byte order of -** floating point values is correct. -** -** (2007-08-30) Frank van Vugt has studied this problem closely -** and has send his findings to the SQLite developers. Frank -** writes that some Linux kernels offer floating point hardware -** emulation that uses only 32-bit mantissas instead of a full -** 48-bits as required by the IEEE standard. (This is the -** CONFIG_FPE_FASTFPE option.) On such systems, floating point -** byte swapping becomes very complicated. To avoid problems, -** the necessary byte swapping is carried out using a 64-bit integer -** rather than a 64-bit float. Frank assures us that the code here -** works for him. We, the developers, have no way to independently -** verify this, but Frank seems to know what he is talking about -** so we trust him. -*/ -#ifdef SQLITE_MIXED_ENDIAN_64BIT_FLOAT -static u64 floatSwap(u64 in){ - union { - u64 r; - u32 i[2]; - } u; - u32 t; - - u.r = in; - t = u.i[0]; - u.i[0] = u.i[1]; - u.i[1] = t; - return u.r; -} -# define swapMixedEndianFloat(X) X = floatSwap(X) -#else -# define swapMixedEndianFloat(X) -#endif - -/* -** Write the serialized data blob for the value stored in pMem into -** buf. It is assumed that the caller has allocated sufficient space. -** Return the number of bytes written. -** -** nBuf is the amount of space left in buf[]. The caller is responsible -** for allocating enough space to buf[] to hold the entire field, exclusive -** of the pMem->u.nZero bytes for a MEM_Zero value. -** -** Return the number of bytes actually written into buf[]. The number -** of bytes in the zero-filled tail is included in the return value only -** if those bytes were zeroed in buf[]. -*/ -SQLITE_PRIVATE u32 sqlite3VdbeSerialPut(u8 *buf, Mem *pMem, u32 serial_type){ - u32 len; - - /* Integer and Real */ - if( serial_type<=7 && serial_type>0 ){ - u64 v; - u32 i; - if( serial_type==7 ){ - assert( sizeof(v)==sizeof(pMem->r) ); - memcpy(&v, &pMem->r, sizeof(v)); - swapMixedEndianFloat(v); - }else{ - v = pMem->u.i; - } - len = i = sqlite3VdbeSerialTypeLen(serial_type); - while( i-- ){ - buf[i] = (u8)(v&0xFF); - v >>= 8; - } - return len; - } - - /* String or blob */ - if( serial_type>=12 ){ - assert( pMem->n + ((pMem->flags & MEM_Zero)?pMem->u.nZero:0) - == (int)sqlite3VdbeSerialTypeLen(serial_type) ); - len = pMem->n; - memcpy(buf, pMem->z, len); - return len; - } - - /* NULL or constants 0 or 1 */ - return 0; -} - -/* Input "x" is a sequence of unsigned characters that represent a -** big-endian integer. Return the equivalent native integer -*/ -#define ONE_BYTE_INT(x) ((i8)(x)[0]) -#define TWO_BYTE_INT(x) (256*(i8)((x)[0])|(x)[1]) -#define THREE_BYTE_INT(x) (65536*(i8)((x)[0])|((x)[1]<<8)|(x)[2]) -#define FOUR_BYTE_UINT(x) (((u32)(x)[0]<<24)|((x)[1]<<16)|((x)[2]<<8)|(x)[3]) - -/* -** Deserialize the data blob pointed to by buf as serial type serial_type -** and store the result in pMem. Return the number of bytes read. -*/ -SQLITE_PRIVATE u32 sqlite3VdbeSerialGet( - const unsigned char *buf, /* Buffer to deserialize from */ - u32 serial_type, /* Serial type to deserialize */ - Mem *pMem /* Memory cell to write value into */ -){ - u64 x; - u32 y; - switch( serial_type ){ - case 10: /* Reserved for future use */ - case 11: /* Reserved for future use */ - case 0: { /* NULL */ - pMem->flags = MEM_Null; - break; - } - case 1: { /* 1-byte signed integer */ - pMem->u.i = ONE_BYTE_INT(buf); - pMem->flags = MEM_Int; - testcase( pMem->u.i<0 ); - return 1; - } - case 2: { /* 2-byte signed integer */ - pMem->u.i = TWO_BYTE_INT(buf); - pMem->flags = MEM_Int; - testcase( pMem->u.i<0 ); - return 2; - } - case 3: { /* 3-byte signed integer */ - pMem->u.i = THREE_BYTE_INT(buf); - pMem->flags = MEM_Int; - testcase( pMem->u.i<0 ); - return 3; - } - case 4: { /* 4-byte signed integer */ - y = FOUR_BYTE_UINT(buf); - pMem->u.i = (i64)*(int*)&y; - pMem->flags = MEM_Int; - testcase( pMem->u.i<0 ); - return 4; - } - case 5: { /* 6-byte signed integer */ - pMem->u.i = FOUR_BYTE_UINT(buf+2) + (((i64)1)<<32)*TWO_BYTE_INT(buf); - pMem->flags = MEM_Int; - testcase( pMem->u.i<0 ); - return 6; - } - case 6: /* 8-byte signed integer */ - case 7: { /* IEEE floating point */ -#if !defined(NDEBUG) && !defined(SQLITE_OMIT_FLOATING_POINT) - /* Verify that integers and floating point values use the same - ** byte order. Or, that if SQLITE_MIXED_ENDIAN_64BIT_FLOAT is - ** defined that 64-bit floating point values really are mixed - ** endian. - */ - static const u64 t1 = ((u64)0x3ff00000)<<32; - static const double r1 = 1.0; - u64 t2 = t1; - swapMixedEndianFloat(t2); - assert( sizeof(r1)==sizeof(t2) && memcmp(&r1, &t2, sizeof(r1))==0 ); -#endif - x = FOUR_BYTE_UINT(buf); - y = FOUR_BYTE_UINT(buf+4); - x = (x<<32) | y; - if( serial_type==6 ){ - pMem->u.i = *(i64*)&x; - pMem->flags = MEM_Int; - testcase( pMem->u.i<0 ); - }else{ - assert( sizeof(x)==8 && sizeof(pMem->r)==8 ); - swapMixedEndianFloat(x); - memcpy(&pMem->r, &x, sizeof(x)); - pMem->flags = sqlite3IsNaN(pMem->r) ? MEM_Null : MEM_Real; - } - return 8; - } - case 8: /* Integer 0 */ - case 9: { /* Integer 1 */ - pMem->u.i = serial_type-8; - pMem->flags = MEM_Int; - return 0; - } - default: { - static const u16 aFlag[] = { MEM_Blob|MEM_Ephem, MEM_Str|MEM_Ephem }; - u32 len = (serial_type-12)/2; - pMem->z = (char *)buf; - pMem->n = len; - pMem->xDel = 0; - pMem->flags = aFlag[serial_type&1]; - return len; - } - } - return 0; -} - -/* -** This routine is used to allocate sufficient space for an UnpackedRecord -** structure large enough to be used with sqlite3VdbeRecordUnpack() if -** the first argument is a pointer to KeyInfo structure pKeyInfo. -** -** The space is either allocated using sqlite3DbMallocRaw() or from within -** the unaligned buffer passed via the second and third arguments (presumably -** stack space). If the former, then *ppFree is set to a pointer that should -** be eventually freed by the caller using sqlite3DbFree(). Or, if the -** allocation comes from the pSpace/szSpace buffer, *ppFree is set to NULL -** before returning. -** -** If an OOM error occurs, NULL is returned. -*/ -SQLITE_PRIVATE UnpackedRecord *sqlite3VdbeAllocUnpackedRecord( - KeyInfo *pKeyInfo, /* Description of the record */ - char *pSpace, /* Unaligned space available */ - int szSpace, /* Size of pSpace[] in bytes */ - char **ppFree /* OUT: Caller should free this pointer */ -){ - UnpackedRecord *p; /* Unpacked record to return */ - int nOff; /* Increment pSpace by nOff to align it */ - int nByte; /* Number of bytes required for *p */ - - /* We want to shift the pointer pSpace up such that it is 8-byte aligned. - ** Thus, we need to calculate a value, nOff, between 0 and 7, to shift - ** it by. If pSpace is already 8-byte aligned, nOff should be zero. - */ - nOff = (8 - (SQLITE_PTR_TO_INT(pSpace) & 7)) & 7; - nByte = ROUND8(sizeof(UnpackedRecord)) + sizeof(Mem)*(pKeyInfo->nField+1); - if( nByte>szSpace+nOff ){ - p = (UnpackedRecord *)sqlite3DbMallocRaw(pKeyInfo->db, nByte); - *ppFree = (char *)p; - if( !p ) return 0; - }else{ - p = (UnpackedRecord*)&pSpace[nOff]; - *ppFree = 0; - } - - p->aMem = (Mem*)&((char*)p)[ROUND8(sizeof(UnpackedRecord))]; - assert( pKeyInfo->aSortOrder!=0 ); - p->pKeyInfo = pKeyInfo; - p->nField = pKeyInfo->nField + 1; - return p; -} - -/* -** Given the nKey-byte encoding of a record in pKey[], populate the -** UnpackedRecord structure indicated by the fourth argument with the -** contents of the decoded record. -*/ -SQLITE_PRIVATE void sqlite3VdbeRecordUnpack( - KeyInfo *pKeyInfo, /* Information about the record format */ - int nKey, /* Size of the binary record */ - const void *pKey, /* The binary record */ - UnpackedRecord *p /* Populate this structure before returning. */ -){ - const unsigned char *aKey = (const unsigned char *)pKey; - int d; - u32 idx; /* Offset in aKey[] to read from */ - u16 u; /* Unsigned loop counter */ - u32 szHdr; - Mem *pMem = p->aMem; - - p->default_rc = 0; - assert( EIGHT_BYTE_ALIGNMENT(pMem) ); - idx = getVarint32(aKey, szHdr); - d = szHdr; - u = 0; - while( idxnField && d<=nKey ){ - u32 serial_type; - - idx += getVarint32(&aKey[idx], serial_type); - pMem->enc = pKeyInfo->enc; - pMem->db = pKeyInfo->db; - /* pMem->flags = 0; // sqlite3VdbeSerialGet() will set this for us */ - pMem->zMalloc = 0; - d += sqlite3VdbeSerialGet(&aKey[d], serial_type, pMem); - pMem++; - u++; - } - assert( u<=pKeyInfo->nField + 1 ); - p->nField = u; -} - -#if SQLITE_DEBUG -/* -** This function compares two index or table record keys in the same way -** as the sqlite3VdbeRecordCompare() routine. Unlike VdbeRecordCompare(), -** this function deserializes and compares values using the -** sqlite3VdbeSerialGet() and sqlite3MemCompare() functions. It is used -** in assert() statements to ensure that the optimized code in -** sqlite3VdbeRecordCompare() returns results with these two primitives. -*/ -static int vdbeRecordCompareDebug( - int nKey1, const void *pKey1, /* Left key */ - const UnpackedRecord *pPKey2 /* Right key */ -){ - u32 d1; /* Offset into aKey[] of next data element */ - u32 idx1; /* Offset into aKey[] of next header element */ - u32 szHdr1; /* Number of bytes in header */ - int i = 0; - int rc = 0; - const unsigned char *aKey1 = (const unsigned char *)pKey1; - KeyInfo *pKeyInfo; - Mem mem1; - - pKeyInfo = pPKey2->pKeyInfo; - mem1.enc = pKeyInfo->enc; - mem1.db = pKeyInfo->db; - /* mem1.flags = 0; // Will be initialized by sqlite3VdbeSerialGet() */ - VVA_ONLY( mem1.zMalloc = 0; ) /* Only needed by assert() statements */ - - /* Compilers may complain that mem1.u.i is potentially uninitialized. - ** We could initialize it, as shown here, to silence those complaints. - ** But in fact, mem1.u.i will never actually be used uninitialized, and doing - ** the unnecessary initialization has a measurable negative performance - ** impact, since this routine is a very high runner. And so, we choose - ** to ignore the compiler warnings and leave this variable uninitialized. - */ - /* mem1.u.i = 0; // not needed, here to silence compiler warning */ - - idx1 = getVarint32(aKey1, szHdr1); - d1 = szHdr1; - assert( pKeyInfo->nField+pKeyInfo->nXField>=pPKey2->nField || CORRUPT_DB ); - assert( pKeyInfo->aSortOrder!=0 ); - assert( pKeyInfo->nField>0 ); - assert( idx1<=szHdr1 || CORRUPT_DB ); - do{ - u32 serial_type1; - - /* Read the serial types for the next element in each key. */ - idx1 += getVarint32( aKey1+idx1, serial_type1 ); - - /* Verify that there is enough key space remaining to avoid - ** a buffer overread. The "d1+serial_type1+2" subexpression will - ** always be greater than or equal to the amount of required key space. - ** Use that approximation to avoid the more expensive call to - ** sqlite3VdbeSerialTypeLen() in the common case. - */ - if( d1+serial_type1+2>(u32)nKey1 - && d1+sqlite3VdbeSerialTypeLen(serial_type1)>(u32)nKey1 - ){ - break; - } - - /* Extract the values to be compared. - */ - d1 += sqlite3VdbeSerialGet(&aKey1[d1], serial_type1, &mem1); - - /* Do the comparison - */ - rc = sqlite3MemCompare(&mem1, &pPKey2->aMem[i], pKeyInfo->aColl[i]); - if( rc!=0 ){ - assert( mem1.zMalloc==0 ); /* See comment below */ - if( pKeyInfo->aSortOrder[i] ){ - rc = -rc; /* Invert the result for DESC sort order. */ - } - return rc; - } - i++; - }while( idx1nField ); - - /* No memory allocation is ever used on mem1. Prove this using - ** the following assert(). If the assert() fails, it indicates a - ** memory leak and a need to call sqlite3VdbeMemRelease(&mem1). - */ - assert( mem1.zMalloc==0 ); - - /* rc==0 here means that one of the keys ran out of fields and - ** all the fields up to that point were equal. Return the the default_rc - ** value. */ - return pPKey2->default_rc; -} -#endif - -/* -** Both *pMem1 and *pMem2 contain string values. Compare the two values -** using the collation sequence pColl. As usual, return a negative , zero -** or positive value if *pMem1 is less than, equal to or greater than -** *pMem2, respectively. Similar in spirit to "rc = (*pMem1) - (*pMem2);". -*/ -static int vdbeCompareMemString( - const Mem *pMem1, - const Mem *pMem2, - const CollSeq *pColl -){ - if( pMem1->enc==pColl->enc ){ - /* The strings are already in the correct encoding. Call the - ** comparison function directly */ - return pColl->xCmp(pColl->pUser,pMem1->n,pMem1->z,pMem2->n,pMem2->z); - }else{ - int rc; - const void *v1, *v2; - int n1, n2; - Mem c1; - Mem c2; - memset(&c1, 0, sizeof(c1)); - memset(&c2, 0, sizeof(c2)); - sqlite3VdbeMemShallowCopy(&c1, pMem1, MEM_Ephem); - sqlite3VdbeMemShallowCopy(&c2, pMem2, MEM_Ephem); - v1 = sqlite3ValueText((sqlite3_value*)&c1, pColl->enc); - n1 = v1==0 ? 0 : c1.n; - v2 = sqlite3ValueText((sqlite3_value*)&c2, pColl->enc); - n2 = v2==0 ? 0 : c2.n; - rc = pColl->xCmp(pColl->pUser, n1, v1, n2, v2); - sqlite3VdbeMemRelease(&c1); - sqlite3VdbeMemRelease(&c2); - return rc; - } -} - -/* -** Compare the values contained by the two memory cells, returning -** negative, zero or positive if pMem1 is less than, equal to, or greater -** than pMem2. Sorting order is NULL's first, followed by numbers (integers -** and reals) sorted numerically, followed by text ordered by the collating -** sequence pColl and finally blob's ordered by memcmp(). -** -** Two NULL values are considered equal by this function. -*/ -SQLITE_PRIVATE int sqlite3MemCompare(const Mem *pMem1, const Mem *pMem2, const CollSeq *pColl){ - int rc; - int f1, f2; - int combined_flags; - - f1 = pMem1->flags; - f2 = pMem2->flags; - combined_flags = f1|f2; - assert( (combined_flags & MEM_RowSet)==0 ); - - /* If one value is NULL, it is less than the other. If both values - ** are NULL, return 0. - */ - if( combined_flags&MEM_Null ){ - return (f2&MEM_Null) - (f1&MEM_Null); - } - - /* If one value is a number and the other is not, the number is less. - ** If both are numbers, compare as reals if one is a real, or as integers - ** if both values are integers. - */ - if( combined_flags&(MEM_Int|MEM_Real) ){ - double r1, r2; - if( (f1 & f2 & MEM_Int)!=0 ){ - if( pMem1->u.i < pMem2->u.i ) return -1; - if( pMem1->u.i > pMem2->u.i ) return 1; - return 0; - } - if( (f1&MEM_Real)!=0 ){ - r1 = pMem1->r; - }else if( (f1&MEM_Int)!=0 ){ - r1 = (double)pMem1->u.i; - }else{ - return 1; - } - if( (f2&MEM_Real)!=0 ){ - r2 = pMem2->r; - }else if( (f2&MEM_Int)!=0 ){ - r2 = (double)pMem2->u.i; - }else{ - return -1; - } - if( r1r2 ) return 1; - return 0; - } - - /* If one value is a string and the other is a blob, the string is less. - ** If both are strings, compare using the collating functions. - */ - if( combined_flags&MEM_Str ){ - if( (f1 & MEM_Str)==0 ){ - return 1; - } - if( (f2 & MEM_Str)==0 ){ - return -1; - } - - assert( pMem1->enc==pMem2->enc ); - assert( pMem1->enc==SQLITE_UTF8 || - pMem1->enc==SQLITE_UTF16LE || pMem1->enc==SQLITE_UTF16BE ); - - /* The collation sequence must be defined at this point, even if - ** the user deletes the collation sequence after the vdbe program is - ** compiled (this was not always the case). - */ - assert( !pColl || pColl->xCmp ); - - if( pColl ){ - return vdbeCompareMemString(pMem1, pMem2, pColl); - } - /* If a NULL pointer was passed as the collate function, fall through - ** to the blob case and use memcmp(). */ - } - - /* Both values must be blobs. Compare using memcmp(). */ - rc = memcmp(pMem1->z, pMem2->z, (pMem1->n>pMem2->n)?pMem2->n:pMem1->n); - if( rc==0 ){ - rc = pMem1->n - pMem2->n; - } - return rc; -} - - -/* -** The first argument passed to this function is a serial-type that -** corresponds to an integer - all values between 1 and 9 inclusive -** except 7. The second points to a buffer containing an integer value -** serialized according to serial_type. This function deserializes -** and returns the value. -*/ -static i64 vdbeRecordDecodeInt(u32 serial_type, const u8 *aKey){ - u32 y; - assert( CORRUPT_DB || (serial_type>=1 && serial_type<=9 && serial_type!=7) ); - switch( serial_type ){ - case 0: - case 1: - testcase( aKey[0]&0x80 ); - return ONE_BYTE_INT(aKey); - case 2: - testcase( aKey[0]&0x80 ); - return TWO_BYTE_INT(aKey); - case 3: - testcase( aKey[0]&0x80 ); - return THREE_BYTE_INT(aKey); - case 4: { - testcase( aKey[0]&0x80 ); - y = FOUR_BYTE_UINT(aKey); - return (i64)*(int*)&y; - } - case 5: { - testcase( aKey[0]&0x80 ); - return FOUR_BYTE_UINT(aKey+2) + (((i64)1)<<32)*TWO_BYTE_INT(aKey); - } - case 6: { - u64 x = FOUR_BYTE_UINT(aKey); - testcase( aKey[0]&0x80 ); - x = (x<<32) | FOUR_BYTE_UINT(aKey+4); - return (i64)*(i64*)&x; - } - } - - return (serial_type - 8); -} - -/* -** This function compares the two table rows or index records -** specified by {nKey1, pKey1} and pPKey2. It returns a negative, zero -** or positive integer if key1 is less than, equal to or -** greater than key2. The {nKey1, pKey1} key must be a blob -** created by th OP_MakeRecord opcode of the VDBE. The pPKey2 -** key must be a parsed key such as obtained from -** sqlite3VdbeParseRecord. -** -** If argument bSkip is non-zero, it is assumed that the caller has already -** determined that the first fields of the keys are equal. -** -** Key1 and Key2 do not have to contain the same number of fields. If all -** fields that appear in both keys are equal, then pPKey2->default_rc is -** returned. -** -** If database corruption is discovered, set pPKey2->isCorrupt to non-zero -** and return 0. -*/ -SQLITE_PRIVATE int sqlite3VdbeRecordCompare( - int nKey1, const void *pKey1, /* Left key */ - UnpackedRecord *pPKey2, /* Right key */ - int bSkip /* If true, skip the first field */ -){ - u32 d1; /* Offset into aKey[] of next data element */ - int i; /* Index of next field to compare */ - u32 szHdr1; /* Size of record header in bytes */ - u32 idx1; /* Offset of first type in header */ - int rc = 0; /* Return value */ - Mem *pRhs = pPKey2->aMem; /* Next field of pPKey2 to compare */ - KeyInfo *pKeyInfo = pPKey2->pKeyInfo; - const unsigned char *aKey1 = (const unsigned char *)pKey1; - Mem mem1; - - /* If bSkip is true, then the caller has already determined that the first - ** two elements in the keys are equal. Fix the various stack variables so - ** that this routine begins comparing at the second field. */ - if( bSkip ){ - u32 s1; - idx1 = 1 + getVarint32(&aKey1[1], s1); - szHdr1 = aKey1[0]; - d1 = szHdr1 + sqlite3VdbeSerialTypeLen(s1); - i = 1; - pRhs++; - }else{ - idx1 = getVarint32(aKey1, szHdr1); - d1 = szHdr1; - if( d1>(unsigned)nKey1 ){ - pPKey2->isCorrupt = (u8)SQLITE_CORRUPT_BKPT; - return 0; /* Corruption */ - } - i = 0; - } - - VVA_ONLY( mem1.zMalloc = 0; ) /* Only needed by assert() statements */ - assert( pPKey2->pKeyInfo->nField+pPKey2->pKeyInfo->nXField>=pPKey2->nField - || CORRUPT_DB ); - assert( pPKey2->pKeyInfo->aSortOrder!=0 ); - assert( pPKey2->pKeyInfo->nField>0 ); - assert( idx1<=szHdr1 || CORRUPT_DB ); - do{ - u32 serial_type; - - /* RHS is an integer */ - if( pRhs->flags & MEM_Int ){ - serial_type = aKey1[idx1]; - testcase( serial_type==12 ); - if( serial_type>=12 ){ - rc = +1; - }else if( serial_type==0 ){ - rc = -1; - }else if( serial_type==7 ){ - double rhs = (double)pRhs->u.i; - sqlite3VdbeSerialGet(&aKey1[d1], serial_type, &mem1); - if( mem1.rrhs ){ - rc = +1; - } - }else{ - i64 lhs = vdbeRecordDecodeInt(serial_type, &aKey1[d1]); - i64 rhs = pRhs->u.i; - if( lhsrhs ){ - rc = +1; - } - } - } - - /* RHS is real */ - else if( pRhs->flags & MEM_Real ){ - serial_type = aKey1[idx1]; - if( serial_type>=12 ){ - rc = +1; - }else if( serial_type==0 ){ - rc = -1; - }else{ - double rhs = pRhs->r; - double lhs; - sqlite3VdbeSerialGet(&aKey1[d1], serial_type, &mem1); - if( serial_type==7 ){ - lhs = mem1.r; - }else{ - lhs = (double)mem1.u.i; - } - if( lhsrhs ){ - rc = +1; - } - } - } - - /* RHS is a string */ - else if( pRhs->flags & MEM_Str ){ - getVarint32(&aKey1[idx1], serial_type); - testcase( serial_type==12 ); - if( serial_type<12 ){ - rc = -1; - }else if( !(serial_type & 0x01) ){ - rc = +1; - }else{ - mem1.n = (serial_type - 12) / 2; - testcase( (d1+mem1.n)==(unsigned)nKey1 ); - testcase( (d1+mem1.n+1)==(unsigned)nKey1 ); - if( (d1+mem1.n) > (unsigned)nKey1 ){ - pPKey2->isCorrupt = (u8)SQLITE_CORRUPT_BKPT; - return 0; /* Corruption */ - }else if( pKeyInfo->aColl[i] ){ - mem1.enc = pKeyInfo->enc; - mem1.db = pKeyInfo->db; - mem1.flags = MEM_Str; - mem1.z = (char*)&aKey1[d1]; - rc = vdbeCompareMemString(&mem1, pRhs, pKeyInfo->aColl[i]); - }else{ - int nCmp = MIN(mem1.n, pRhs->n); - rc = memcmp(&aKey1[d1], pRhs->z, nCmp); - if( rc==0 ) rc = mem1.n - pRhs->n; - } - } - } - - /* RHS is a blob */ - else if( pRhs->flags & MEM_Blob ){ - getVarint32(&aKey1[idx1], serial_type); - testcase( serial_type==12 ); - if( serial_type<12 || (serial_type & 0x01) ){ - rc = -1; - }else{ - int nStr = (serial_type - 12) / 2; - testcase( (d1+nStr)==(unsigned)nKey1 ); - testcase( (d1+nStr+1)==(unsigned)nKey1 ); - if( (d1+nStr) > (unsigned)nKey1 ){ - pPKey2->isCorrupt = (u8)SQLITE_CORRUPT_BKPT; - return 0; /* Corruption */ - }else{ - int nCmp = MIN(nStr, pRhs->n); - rc = memcmp(&aKey1[d1], pRhs->z, nCmp); - if( rc==0 ) rc = nStr - pRhs->n; - } - } - } - - /* RHS is null */ - else{ - serial_type = aKey1[idx1]; - rc = (serial_type!=0); - } - - if( rc!=0 ){ - if( pKeyInfo->aSortOrder[i] ){ - rc = -rc; - } - assert( CORRUPT_DB - || (rc<0 && vdbeRecordCompareDebug(nKey1, pKey1, pPKey2)<0) - || (rc>0 && vdbeRecordCompareDebug(nKey1, pKey1, pPKey2)>0) - || pKeyInfo->db->mallocFailed - ); - assert( mem1.zMalloc==0 ); /* See comment below */ - return rc; - } - - i++; - pRhs++; - d1 += sqlite3VdbeSerialTypeLen(serial_type); - idx1 += sqlite3VarintLen(serial_type); - }while( idx1<(unsigned)szHdr1 && inField && d1<=(unsigned)nKey1 ); - - /* No memory allocation is ever used on mem1. Prove this using - ** the following assert(). If the assert() fails, it indicates a - ** memory leak and a need to call sqlite3VdbeMemRelease(&mem1). */ - assert( mem1.zMalloc==0 ); - - /* rc==0 here means that one or both of the keys ran out of fields and - ** all the fields up to that point were equal. Return the the default_rc - ** value. */ - assert( CORRUPT_DB - || pPKey2->default_rc==vdbeRecordCompareDebug(nKey1, pKey1, pPKey2) - ); - return pPKey2->default_rc; -} - -/* -** This function is an optimized version of sqlite3VdbeRecordCompare() -** that (a) the first field of pPKey2 is an integer, and (b) the -** size-of-header varint at the start of (pKey1/nKey1) fits in a single -** byte (i.e. is less than 128). -** -** To avoid concerns about buffer overreads, this routine is only used -** on schemas where the maximum valid header size is 63 bytes or less. -*/ -static int vdbeRecordCompareInt( - int nKey1, const void *pKey1, /* Left key */ - UnpackedRecord *pPKey2, /* Right key */ - int bSkip /* Ignored */ -){ - const u8 *aKey = &((const u8*)pKey1)[*(const u8*)pKey1 & 0x3F]; - int serial_type = ((const u8*)pKey1)[1]; - int res; - u32 y; - u64 x; - i64 v = pPKey2->aMem[0].u.i; - i64 lhs; - UNUSED_PARAMETER(bSkip); - - assert( bSkip==0 ); - assert( (*(u8*)pKey1)<=0x3F || CORRUPT_DB ); - switch( serial_type ){ - case 1: { /* 1-byte signed integer */ - lhs = ONE_BYTE_INT(aKey); - testcase( lhs<0 ); - break; - } - case 2: { /* 2-byte signed integer */ - lhs = TWO_BYTE_INT(aKey); - testcase( lhs<0 ); - break; - } - case 3: { /* 3-byte signed integer */ - lhs = THREE_BYTE_INT(aKey); - testcase( lhs<0 ); - break; - } - case 4: { /* 4-byte signed integer */ - y = FOUR_BYTE_UINT(aKey); - lhs = (i64)*(int*)&y; - testcase( lhs<0 ); - break; - } - case 5: { /* 6-byte signed integer */ - lhs = FOUR_BYTE_UINT(aKey+2) + (((i64)1)<<32)*TWO_BYTE_INT(aKey); - testcase( lhs<0 ); - break; - } - case 6: { /* 8-byte signed integer */ - x = FOUR_BYTE_UINT(aKey); - x = (x<<32) | FOUR_BYTE_UINT(aKey+4); - lhs = *(i64*)&x; - testcase( lhs<0 ); - break; - } - case 8: - lhs = 0; - break; - case 9: - lhs = 1; - break; - - /* This case could be removed without changing the results of running - ** this code. Including it causes gcc to generate a faster switch - ** statement (since the range of switch targets now starts at zero and - ** is contiguous) but does not cause any duplicate code to be generated - ** (as gcc is clever enough to combine the two like cases). Other - ** compilers might be similar. */ - case 0: case 7: - return sqlite3VdbeRecordCompare(nKey1, pKey1, pPKey2, 0); - - default: - return sqlite3VdbeRecordCompare(nKey1, pKey1, pPKey2, 0); - } - - if( v>lhs ){ - res = pPKey2->r1; - }else if( vr2; - }else if( pPKey2->nField>1 ){ - /* The first fields of the two keys are equal. Compare the trailing - ** fields. */ - res = sqlite3VdbeRecordCompare(nKey1, pKey1, pPKey2, 1); - }else{ - /* The first fields of the two keys are equal and there are no trailing - ** fields. Return pPKey2->default_rc in this case. */ - res = pPKey2->default_rc; - } - - assert( (res==0 && vdbeRecordCompareDebug(nKey1, pKey1, pPKey2)==0) - || (res<0 && vdbeRecordCompareDebug(nKey1, pKey1, pPKey2)<0) - || (res>0 && vdbeRecordCompareDebug(nKey1, pKey1, pPKey2)>0) - || CORRUPT_DB - ); - return res; -} - -/* -** This function is an optimized version of sqlite3VdbeRecordCompare() -** that (a) the first field of pPKey2 is a string, that (b) the first field -** uses the collation sequence BINARY and (c) that the size-of-header varint -** at the start of (pKey1/nKey1) fits in a single byte. -*/ -static int vdbeRecordCompareString( - int nKey1, const void *pKey1, /* Left key */ - UnpackedRecord *pPKey2, /* Right key */ - int bSkip -){ - const u8 *aKey1 = (const u8*)pKey1; - int serial_type; - int res; - UNUSED_PARAMETER(bSkip); - - assert( bSkip==0 ); - getVarint32(&aKey1[1], serial_type); - - if( serial_type<12 ){ - res = pPKey2->r1; /* (pKey1/nKey1) is a number or a null */ - }else if( !(serial_type & 0x01) ){ - res = pPKey2->r2; /* (pKey1/nKey1) is a blob */ - }else{ - int nCmp; - int nStr; - int szHdr = aKey1[0]; - - nStr = (serial_type-12) / 2; - if( (szHdr + nStr) > nKey1 ){ - pPKey2->isCorrupt = (u8)SQLITE_CORRUPT_BKPT; - return 0; /* Corruption */ - } - nCmp = MIN( pPKey2->aMem[0].n, nStr ); - res = memcmp(&aKey1[szHdr], pPKey2->aMem[0].z, nCmp); - - if( res==0 ){ - res = nStr - pPKey2->aMem[0].n; - if( res==0 ){ - if( pPKey2->nField>1 ){ - res = sqlite3VdbeRecordCompare(nKey1, pKey1, pPKey2, 1); - }else{ - res = pPKey2->default_rc; - } - }else if( res>0 ){ - res = pPKey2->r2; - }else{ - res = pPKey2->r1; - } - }else if( res>0 ){ - res = pPKey2->r2; - }else{ - res = pPKey2->r1; - } - } - - assert( (res==0 && vdbeRecordCompareDebug(nKey1, pKey1, pPKey2)==0) - || (res<0 && vdbeRecordCompareDebug(nKey1, pKey1, pPKey2)<0) - || (res>0 && vdbeRecordCompareDebug(nKey1, pKey1, pPKey2)>0) - || CORRUPT_DB - ); - return res; -} - -/* -** Return a pointer to an sqlite3VdbeRecordCompare() compatible function -** suitable for comparing serialized records to the unpacked record passed -** as the only argument. -*/ -SQLITE_PRIVATE RecordCompare sqlite3VdbeFindCompare(UnpackedRecord *p){ - /* varintRecordCompareInt() and varintRecordCompareString() both assume - ** that the size-of-header varint that occurs at the start of each record - ** fits in a single byte (i.e. is 127 or less). varintRecordCompareInt() - ** also assumes that it is safe to overread a buffer by at least the - ** maximum possible legal header size plus 8 bytes. Because there is - ** guaranteed to be at least 74 (but not 136) bytes of padding following each - ** buffer passed to varintRecordCompareInt() this makes it convenient to - ** limit the size of the header to 64 bytes in cases where the first field - ** is an integer. - ** - ** The easiest way to enforce this limit is to consider only records with - ** 13 fields or less. If the first field is an integer, the maximum legal - ** header size is (12*5 + 1 + 1) bytes. */ - if( (p->pKeyInfo->nField + p->pKeyInfo->nXField)<=13 ){ - int flags = p->aMem[0].flags; - if( p->pKeyInfo->aSortOrder[0] ){ - p->r1 = 1; - p->r2 = -1; - }else{ - p->r1 = -1; - p->r2 = 1; - } - if( (flags & MEM_Int) ){ - return vdbeRecordCompareInt; - } - testcase( flags & MEM_Real ); - testcase( flags & MEM_Null ); - testcase( flags & MEM_Blob ); - if( (flags & (MEM_Real|MEM_Null|MEM_Blob))==0 && p->pKeyInfo->aColl[0]==0 ){ - assert( flags & MEM_Str ); - return vdbeRecordCompareString; - } - } - - return sqlite3VdbeRecordCompare; -} - -/* -** pCur points at an index entry created using the OP_MakeRecord opcode. -** Read the rowid (the last field in the record) and store it in *rowid. -** Return SQLITE_OK if everything works, or an error code otherwise. -** -** pCur might be pointing to text obtained from a corrupt database file. -** So the content cannot be trusted. Do appropriate checks on the content. -*/ -SQLITE_PRIVATE int sqlite3VdbeIdxRowid(sqlite3 *db, BtCursor *pCur, i64 *rowid){ - i64 nCellKey = 0; - int rc; - u32 szHdr; /* Size of the header */ - u32 typeRowid; /* Serial type of the rowid */ - u32 lenRowid; /* Size of the rowid */ - Mem m, v; - - UNUSED_PARAMETER(db); - - /* Get the size of the index entry. Only indices entries of less - ** than 2GiB are support - anything large must be database corruption. - ** Any corruption is detected in sqlite3BtreeParseCellPtr(), though, so - ** this code can safely assume that nCellKey is 32-bits - */ - assert( sqlite3BtreeCursorIsValid(pCur) ); - VVA_ONLY(rc =) sqlite3BtreeKeySize(pCur, &nCellKey); - assert( rc==SQLITE_OK ); /* pCur is always valid so KeySize cannot fail */ - assert( (nCellKey & SQLITE_MAX_U32)==(u64)nCellKey ); - - /* Read in the complete content of the index entry */ - memset(&m, 0, sizeof(m)); - rc = sqlite3VdbeMemFromBtree(pCur, 0, (u32)nCellKey, 1, &m); - if( rc ){ - return rc; - } - - /* The index entry must begin with a header size */ - (void)getVarint32((u8*)m.z, szHdr); - testcase( szHdr==3 ); - testcase( szHdr==m.n ); - if( unlikely(szHdr<3 || (int)szHdr>m.n) ){ - goto idx_rowid_corruption; - } - - /* The last field of the index should be an integer - the ROWID. - ** Verify that the last entry really is an integer. */ - (void)getVarint32((u8*)&m.z[szHdr-1], typeRowid); - testcase( typeRowid==1 ); - testcase( typeRowid==2 ); - testcase( typeRowid==3 ); - testcase( typeRowid==4 ); - testcase( typeRowid==5 ); - testcase( typeRowid==6 ); - testcase( typeRowid==8 ); - testcase( typeRowid==9 ); - if( unlikely(typeRowid<1 || typeRowid>9 || typeRowid==7) ){ - goto idx_rowid_corruption; - } - lenRowid = sqlite3VdbeSerialTypeLen(typeRowid); - testcase( (u32)m.n==szHdr+lenRowid ); - if( unlikely((u32)m.npCursor; - Mem m; - - assert( sqlite3BtreeCursorIsValid(pCur) ); - VVA_ONLY(rc =) sqlite3BtreeKeySize(pCur, &nCellKey); - assert( rc==SQLITE_OK ); /* pCur is always valid so KeySize cannot fail */ - /* nCellKey will always be between 0 and 0xffffffff because of the way - ** that btreeParseCellPtr() and sqlite3GetVarint32() are implemented */ - if( nCellKey<=0 || nCellKey>0x7fffffff ){ - *res = 0; - return SQLITE_CORRUPT_BKPT; - } - memset(&m, 0, sizeof(m)); - rc = sqlite3VdbeMemFromBtree(pC->pCursor, 0, (u32)nCellKey, 1, &m); - if( rc ){ - return rc; - } - *res = sqlite3VdbeRecordCompare(m.n, m.z, pUnpacked, 0); - sqlite3VdbeMemRelease(&m); - return SQLITE_OK; -} - -/* -** This routine sets the value to be returned by subsequent calls to -** sqlite3_changes() on the database handle 'db'. -*/ -SQLITE_PRIVATE void sqlite3VdbeSetChanges(sqlite3 *db, int nChange){ - assert( sqlite3_mutex_held(db->mutex) ); - db->nChange = nChange; - db->nTotalChange += nChange; -} - -/* -** Set a flag in the vdbe to update the change counter when it is finalised -** or reset. -*/ -SQLITE_PRIVATE void sqlite3VdbeCountChanges(Vdbe *v){ - v->changeCntOn = 1; -} - -/* -** Mark every prepared statement associated with a database connection -** as expired. -** -** An expired statement means that recompilation of the statement is -** recommend. Statements expire when things happen that make their -** programs obsolete. Removing user-defined functions or collating -** sequences, or changing an authorization function are the types of -** things that make prepared statements obsolete. -*/ -SQLITE_PRIVATE void sqlite3ExpirePreparedStatements(sqlite3 *db){ - Vdbe *p; - for(p = db->pVdbe; p; p=p->pNext){ - p->expired = 1; - } -} - -/* -** Return the database associated with the Vdbe. -*/ -SQLITE_PRIVATE sqlite3 *sqlite3VdbeDb(Vdbe *v){ - return v->db; -} - -/* -** Return a pointer to an sqlite3_value structure containing the value bound -** parameter iVar of VM v. Except, if the value is an SQL NULL, return -** 0 instead. Unless it is NULL, apply affinity aff (one of the SQLITE_AFF_* -** constants) to the value before returning it. -** -** The returned value must be freed by the caller using sqlite3ValueFree(). -*/ -SQLITE_PRIVATE sqlite3_value *sqlite3VdbeGetBoundValue(Vdbe *v, int iVar, u8 aff){ - assert( iVar>0 ); - if( v ){ - Mem *pMem = &v->aVar[iVar-1]; - if( 0==(pMem->flags & MEM_Null) ){ - sqlite3_value *pRet = sqlite3ValueNew(v->db); - if( pRet ){ - sqlite3VdbeMemCopy((Mem *)pRet, pMem); - sqlite3ValueApplyAffinity(pRet, aff, SQLITE_UTF8); - } - return pRet; - } - } - return 0; -} - -/* -** Configure SQL variable iVar so that binding a new value to it signals -** to sqlite3_reoptimize() that re-preparing the statement may result -** in a better query plan. -*/ -SQLITE_PRIVATE void sqlite3VdbeSetVarmask(Vdbe *v, int iVar){ - assert( iVar>0 ); - if( iVar>32 ){ - v->expmask = 0xffffffff; - }else{ - v->expmask |= ((u32)1 << (iVar-1)); - } -} - -#ifndef SQLITE_OMIT_VIRTUALTABLE -/* -** Transfer error message text from an sqlite3_vtab.zErrMsg (text stored -** in memory obtained from sqlite3_malloc) into a Vdbe.zErrMsg (text stored -** in memory obtained from sqlite3DbMalloc). -*/ -SQLITE_PRIVATE void sqlite3VtabImportErrmsg(Vdbe *p, sqlite3_vtab *pVtab){ - sqlite3 *db = p->db; - sqlite3DbFree(db, p->zErrMsg); - p->zErrMsg = sqlite3DbStrDup(db, pVtab->zErrMsg); - sqlite3_free(pVtab->zErrMsg); - pVtab->zErrMsg = 0; -} -#endif /* SQLITE_OMIT_VIRTUALTABLE */ - -/************** End of vdbeaux.c *********************************************/ -/************** Begin file vdbeapi.c *****************************************/ -/* -** 2004 May 26 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** -** This file contains code use to implement APIs that are part of the -** VDBE. -*/ - -#ifndef SQLITE_OMIT_DEPRECATED -/* -** Return TRUE (non-zero) of the statement supplied as an argument needs -** to be recompiled. A statement needs to be recompiled whenever the -** execution environment changes in a way that would alter the program -** that sqlite3_prepare() generates. For example, if new functions or -** collating sequences are registered or if an authorizer function is -** added or changed. -*/ -SQLITE_API int sqlite3_expired(sqlite3_stmt *pStmt){ - Vdbe *p = (Vdbe*)pStmt; - return p==0 || p->expired; -} -#endif - -/* -** Check on a Vdbe to make sure it has not been finalized. Log -** an error and return true if it has been finalized (or is otherwise -** invalid). Return false if it is ok. -*/ -static int vdbeSafety(Vdbe *p){ - if( p->db==0 ){ - sqlite3_log(SQLITE_MISUSE, "API called with finalized prepared statement"); - return 1; - }else{ - return 0; - } -} -static int vdbeSafetyNotNull(Vdbe *p){ - if( p==0 ){ - sqlite3_log(SQLITE_MISUSE, "API called with NULL prepared statement"); - return 1; - }else{ - return vdbeSafety(p); - } -} - -/* -** The following routine destroys a virtual machine that is created by -** the sqlite3_compile() routine. The integer returned is an SQLITE_ -** success/failure code that describes the result of executing the virtual -** machine. -** -** This routine sets the error code and string returned by -** sqlite3_errcode(), sqlite3_errmsg() and sqlite3_errmsg16(). -*/ -SQLITE_API int sqlite3_finalize(sqlite3_stmt *pStmt){ - int rc; - if( pStmt==0 ){ - /* IMPLEMENTATION-OF: R-57228-12904 Invoking sqlite3_finalize() on a NULL - ** pointer is a harmless no-op. */ - rc = SQLITE_OK; - }else{ - Vdbe *v = (Vdbe*)pStmt; - sqlite3 *db = v->db; - if( vdbeSafety(v) ) return SQLITE_MISUSE_BKPT; - sqlite3_mutex_enter(db->mutex); - rc = sqlite3VdbeFinalize(v); - rc = sqlite3ApiExit(db, rc); - sqlite3LeaveMutexAndCloseZombie(db); - } - return rc; -} - -/* -** Terminate the current execution of an SQL statement and reset it -** back to its starting state so that it can be reused. A success code from -** the prior execution is returned. -** -** This routine sets the error code and string returned by -** sqlite3_errcode(), sqlite3_errmsg() and sqlite3_errmsg16(). -*/ -SQLITE_API int sqlite3_reset(sqlite3_stmt *pStmt){ - int rc; - if( pStmt==0 ){ - rc = SQLITE_OK; - }else{ - Vdbe *v = (Vdbe*)pStmt; - sqlite3_mutex_enter(v->db->mutex); - rc = sqlite3VdbeReset(v); - sqlite3VdbeRewind(v); - assert( (rc & (v->db->errMask))==rc ); - rc = sqlite3ApiExit(v->db, rc); - sqlite3_mutex_leave(v->db->mutex); - } - return rc; -} - -/* -** Set all the parameters in the compiled SQL statement to NULL. -*/ -SQLITE_API int sqlite3_clear_bindings(sqlite3_stmt *pStmt){ - int i; - int rc = SQLITE_OK; - Vdbe *p = (Vdbe*)pStmt; -#if SQLITE_THREADSAFE - sqlite3_mutex *mutex = ((Vdbe*)pStmt)->db->mutex; -#endif - sqlite3_mutex_enter(mutex); - for(i=0; inVar; i++){ - sqlite3VdbeMemRelease(&p->aVar[i]); - p->aVar[i].flags = MEM_Null; - } - if( p->isPrepareV2 && p->expmask ){ - p->expired = 1; - } - sqlite3_mutex_leave(mutex); - return rc; -} - - -/**************************** sqlite3_value_ ******************************* -** The following routines extract information from a Mem or sqlite3_value -** structure. -*/ -SQLITE_API const void *sqlite3_value_blob(sqlite3_value *pVal){ - Mem *p = (Mem*)pVal; - if( p->flags & (MEM_Blob|MEM_Str) ){ - sqlite3VdbeMemExpandBlob(p); - p->flags |= MEM_Blob; - return p->n ? p->z : 0; - }else{ - return sqlite3_value_text(pVal); - } -} -SQLITE_API int sqlite3_value_bytes(sqlite3_value *pVal){ - return sqlite3ValueBytes(pVal, SQLITE_UTF8); -} -SQLITE_API int sqlite3_value_bytes16(sqlite3_value *pVal){ - return sqlite3ValueBytes(pVal, SQLITE_UTF16NATIVE); -} -SQLITE_API double sqlite3_value_double(sqlite3_value *pVal){ - return sqlite3VdbeRealValue((Mem*)pVal); -} -SQLITE_API int sqlite3_value_int(sqlite3_value *pVal){ - return (int)sqlite3VdbeIntValue((Mem*)pVal); -} -SQLITE_API sqlite_int64 sqlite3_value_int64(sqlite3_value *pVal){ - return sqlite3VdbeIntValue((Mem*)pVal); -} -SQLITE_API const unsigned char *sqlite3_value_text(sqlite3_value *pVal){ - return (const unsigned char *)sqlite3ValueText(pVal, SQLITE_UTF8); -} -#ifndef SQLITE_OMIT_UTF16 -SQLITE_API const void *sqlite3_value_text16(sqlite3_value* pVal){ - return sqlite3ValueText(pVal, SQLITE_UTF16NATIVE); -} -SQLITE_API const void *sqlite3_value_text16be(sqlite3_value *pVal){ - return sqlite3ValueText(pVal, SQLITE_UTF16BE); -} -SQLITE_API const void *sqlite3_value_text16le(sqlite3_value *pVal){ - return sqlite3ValueText(pVal, SQLITE_UTF16LE); -} -#endif /* SQLITE_OMIT_UTF16 */ -SQLITE_API int sqlite3_value_type(sqlite3_value* pVal){ - static const u8 aType[] = { - SQLITE_BLOB, /* 0x00 */ - SQLITE_NULL, /* 0x01 */ - SQLITE_TEXT, /* 0x02 */ - SQLITE_NULL, /* 0x03 */ - SQLITE_INTEGER, /* 0x04 */ - SQLITE_NULL, /* 0x05 */ - SQLITE_INTEGER, /* 0x06 */ - SQLITE_NULL, /* 0x07 */ - SQLITE_FLOAT, /* 0x08 */ - SQLITE_NULL, /* 0x09 */ - SQLITE_FLOAT, /* 0x0a */ - SQLITE_NULL, /* 0x0b */ - SQLITE_INTEGER, /* 0x0c */ - SQLITE_NULL, /* 0x0d */ - SQLITE_INTEGER, /* 0x0e */ - SQLITE_NULL, /* 0x0f */ - SQLITE_BLOB, /* 0x10 */ - SQLITE_NULL, /* 0x11 */ - SQLITE_TEXT, /* 0x12 */ - SQLITE_NULL, /* 0x13 */ - SQLITE_INTEGER, /* 0x14 */ - SQLITE_NULL, /* 0x15 */ - SQLITE_INTEGER, /* 0x16 */ - SQLITE_NULL, /* 0x17 */ - SQLITE_FLOAT, /* 0x18 */ - SQLITE_NULL, /* 0x19 */ - SQLITE_FLOAT, /* 0x1a */ - SQLITE_NULL, /* 0x1b */ - SQLITE_INTEGER, /* 0x1c */ - SQLITE_NULL, /* 0x1d */ - SQLITE_INTEGER, /* 0x1e */ - SQLITE_NULL, /* 0x1f */ - }; - return aType[pVal->flags&MEM_AffMask]; -} - -/**************************** sqlite3_result_ ******************************* -** The following routines are used by user-defined functions to specify -** the function result. -** -** The setStrOrError() funtion calls sqlite3VdbeMemSetStr() to store the -** result as a string or blob but if the string or blob is too large, it -** then sets the error code to SQLITE_TOOBIG -*/ -static void setResultStrOrError( - sqlite3_context *pCtx, /* Function context */ - const char *z, /* String pointer */ - int n, /* Bytes in string, or negative */ - u8 enc, /* Encoding of z. 0 for BLOBs */ - void (*xDel)(void*) /* Destructor function */ -){ - if( sqlite3VdbeMemSetStr(&pCtx->s, z, n, enc, xDel)==SQLITE_TOOBIG ){ - sqlite3_result_error_toobig(pCtx); - } -} -SQLITE_API void sqlite3_result_blob( - sqlite3_context *pCtx, - const void *z, - int n, - void (*xDel)(void *) -){ - assert( n>=0 ); - assert( sqlite3_mutex_held(pCtx->s.db->mutex) ); - setResultStrOrError(pCtx, z, n, 0, xDel); -} -SQLITE_API void sqlite3_result_double(sqlite3_context *pCtx, double rVal){ - assert( sqlite3_mutex_held(pCtx->s.db->mutex) ); - sqlite3VdbeMemSetDouble(&pCtx->s, rVal); -} -SQLITE_API void sqlite3_result_error(sqlite3_context *pCtx, const char *z, int n){ - assert( sqlite3_mutex_held(pCtx->s.db->mutex) ); - pCtx->isError = SQLITE_ERROR; - pCtx->fErrorOrAux = 1; - sqlite3VdbeMemSetStr(&pCtx->s, z, n, SQLITE_UTF8, SQLITE_TRANSIENT); -} -#ifndef SQLITE_OMIT_UTF16 -SQLITE_API void sqlite3_result_error16(sqlite3_context *pCtx, const void *z, int n){ - assert( sqlite3_mutex_held(pCtx->s.db->mutex) ); - pCtx->isError = SQLITE_ERROR; - pCtx->fErrorOrAux = 1; - sqlite3VdbeMemSetStr(&pCtx->s, z, n, SQLITE_UTF16NATIVE, SQLITE_TRANSIENT); -} -#endif -SQLITE_API void sqlite3_result_int(sqlite3_context *pCtx, int iVal){ - assert( sqlite3_mutex_held(pCtx->s.db->mutex) ); - sqlite3VdbeMemSetInt64(&pCtx->s, (i64)iVal); -} -SQLITE_API void sqlite3_result_int64(sqlite3_context *pCtx, i64 iVal){ - assert( sqlite3_mutex_held(pCtx->s.db->mutex) ); - sqlite3VdbeMemSetInt64(&pCtx->s, iVal); -} -SQLITE_API void sqlite3_result_null(sqlite3_context *pCtx){ - assert( sqlite3_mutex_held(pCtx->s.db->mutex) ); - sqlite3VdbeMemSetNull(&pCtx->s); -} -SQLITE_API void sqlite3_result_text( - sqlite3_context *pCtx, - const char *z, - int n, - void (*xDel)(void *) -){ - assert( sqlite3_mutex_held(pCtx->s.db->mutex) ); - setResultStrOrError(pCtx, z, n, SQLITE_UTF8, xDel); -} -#ifndef SQLITE_OMIT_UTF16 -SQLITE_API void sqlite3_result_text16( - sqlite3_context *pCtx, - const void *z, - int n, - void (*xDel)(void *) -){ - assert( sqlite3_mutex_held(pCtx->s.db->mutex) ); - setResultStrOrError(pCtx, z, n, SQLITE_UTF16NATIVE, xDel); -} -SQLITE_API void sqlite3_result_text16be( - sqlite3_context *pCtx, - const void *z, - int n, - void (*xDel)(void *) -){ - assert( sqlite3_mutex_held(pCtx->s.db->mutex) ); - setResultStrOrError(pCtx, z, n, SQLITE_UTF16BE, xDel); -} -SQLITE_API void sqlite3_result_text16le( - sqlite3_context *pCtx, - const void *z, - int n, - void (*xDel)(void *) -){ - assert( sqlite3_mutex_held(pCtx->s.db->mutex) ); - setResultStrOrError(pCtx, z, n, SQLITE_UTF16LE, xDel); -} -#endif /* SQLITE_OMIT_UTF16 */ -SQLITE_API void sqlite3_result_value(sqlite3_context *pCtx, sqlite3_value *pValue){ - assert( sqlite3_mutex_held(pCtx->s.db->mutex) ); - sqlite3VdbeMemCopy(&pCtx->s, pValue); -} -SQLITE_API void sqlite3_result_zeroblob(sqlite3_context *pCtx, int n){ - assert( sqlite3_mutex_held(pCtx->s.db->mutex) ); - sqlite3VdbeMemSetZeroBlob(&pCtx->s, n); -} -SQLITE_API void sqlite3_result_error_code(sqlite3_context *pCtx, int errCode){ - pCtx->isError = errCode; - pCtx->fErrorOrAux = 1; - if( pCtx->s.flags & MEM_Null ){ - sqlite3VdbeMemSetStr(&pCtx->s, sqlite3ErrStr(errCode), -1, - SQLITE_UTF8, SQLITE_STATIC); - } -} - -/* Force an SQLITE_TOOBIG error. */ -SQLITE_API void sqlite3_result_error_toobig(sqlite3_context *pCtx){ - assert( sqlite3_mutex_held(pCtx->s.db->mutex) ); - pCtx->isError = SQLITE_TOOBIG; - pCtx->fErrorOrAux = 1; - sqlite3VdbeMemSetStr(&pCtx->s, "string or blob too big", -1, - SQLITE_UTF8, SQLITE_STATIC); -} - -/* An SQLITE_NOMEM error. */ -SQLITE_API void sqlite3_result_error_nomem(sqlite3_context *pCtx){ - assert( sqlite3_mutex_held(pCtx->s.db->mutex) ); - sqlite3VdbeMemSetNull(&pCtx->s); - pCtx->isError = SQLITE_NOMEM; - pCtx->fErrorOrAux = 1; - pCtx->s.db->mallocFailed = 1; -} - -/* -** This function is called after a transaction has been committed. It -** invokes callbacks registered with sqlite3_wal_hook() as required. -*/ -static int doWalCallbacks(sqlite3 *db){ - int rc = SQLITE_OK; -#ifndef SQLITE_OMIT_WAL - int i; - for(i=0; inDb; i++){ - Btree *pBt = db->aDb[i].pBt; - if( pBt ){ - int nEntry = sqlite3PagerWalCallback(sqlite3BtreePager(pBt)); - if( db->xWalCallback && nEntry>0 && rc==SQLITE_OK ){ - rc = db->xWalCallback(db->pWalArg, db, db->aDb[i].zName, nEntry); - } - } - } -#endif - return rc; -} - -/* -** Execute the statement pStmt, either until a row of data is ready, the -** statement is completely executed or an error occurs. -** -** This routine implements the bulk of the logic behind the sqlite_step() -** API. The only thing omitted is the automatic recompile if a -** schema change has occurred. That detail is handled by the -** outer sqlite3_step() wrapper procedure. -*/ -static int sqlite3Step(Vdbe *p){ - sqlite3 *db; - int rc; - - assert(p); - if( p->magic!=VDBE_MAGIC_RUN ){ - /* We used to require that sqlite3_reset() be called before retrying - ** sqlite3_step() after any error or after SQLITE_DONE. But beginning - ** with version 3.7.0, we changed this so that sqlite3_reset() would - ** be called automatically instead of throwing the SQLITE_MISUSE error. - ** This "automatic-reset" change is not technically an incompatibility, - ** since any application that receives an SQLITE_MISUSE is broken by - ** definition. - ** - ** Nevertheless, some published applications that were originally written - ** for version 3.6.23 or earlier do in fact depend on SQLITE_MISUSE - ** returns, and those were broken by the automatic-reset change. As a - ** a work-around, the SQLITE_OMIT_AUTORESET compile-time restores the - ** legacy behavior of returning SQLITE_MISUSE for cases where the - ** previous sqlite3_step() returned something other than a SQLITE_LOCKED - ** or SQLITE_BUSY error. - */ -#ifdef SQLITE_OMIT_AUTORESET - if( p->rc==SQLITE_BUSY || p->rc==SQLITE_LOCKED ){ - sqlite3_reset((sqlite3_stmt*)p); - }else{ - return SQLITE_MISUSE_BKPT; - } -#else - sqlite3_reset((sqlite3_stmt*)p); -#endif - } - - /* Check that malloc() has not failed. If it has, return early. */ - db = p->db; - if( db->mallocFailed ){ - p->rc = SQLITE_NOMEM; - return SQLITE_NOMEM; - } - - if( p->pc<=0 && p->expired ){ - p->rc = SQLITE_SCHEMA; - rc = SQLITE_ERROR; - goto end_of_step; - } - if( p->pc<0 ){ - /* If there are no other statements currently running, then - ** reset the interrupt flag. This prevents a call to sqlite3_interrupt - ** from interrupting a statement that has not yet started. - */ - if( db->nVdbeActive==0 ){ - db->u1.isInterrupted = 0; - } - - assert( db->nVdbeWrite>0 || db->autoCommit==0 - || (db->nDeferredCons==0 && db->nDeferredImmCons==0) - ); - -#ifndef SQLITE_OMIT_TRACE - if( db->xProfile && !db->init.busy ){ - sqlite3OsCurrentTimeInt64(db->pVfs, &p->startTime); - } -#endif - - db->nVdbeActive++; - if( p->readOnly==0 ) db->nVdbeWrite++; - if( p->bIsReader ) db->nVdbeRead++; - p->pc = 0; - } -#ifndef SQLITE_OMIT_EXPLAIN - if( p->explain ){ - rc = sqlite3VdbeList(p); - }else -#endif /* SQLITE_OMIT_EXPLAIN */ - { - db->nVdbeExec++; - rc = sqlite3VdbeExec(p); - db->nVdbeExec--; - } - -#ifndef SQLITE_OMIT_TRACE - /* Invoke the profile callback if there is one - */ - if( rc!=SQLITE_ROW && db->xProfile && !db->init.busy && p->zSql ){ - sqlite3_int64 iNow; - sqlite3OsCurrentTimeInt64(db->pVfs, &iNow); - db->xProfile(db->pProfileArg, p->zSql, (iNow - p->startTime)*1000000); - } -#endif - - if( rc==SQLITE_DONE ){ - assert( p->rc==SQLITE_OK ); - p->rc = doWalCallbacks(db); - if( p->rc!=SQLITE_OK ){ - rc = SQLITE_ERROR; - } - } - - db->errCode = rc; - if( SQLITE_NOMEM==sqlite3ApiExit(p->db, p->rc) ){ - p->rc = SQLITE_NOMEM; - } -end_of_step: - /* At this point local variable rc holds the value that should be - ** returned if this statement was compiled using the legacy - ** sqlite3_prepare() interface. According to the docs, this can only - ** be one of the values in the first assert() below. Variable p->rc - ** contains the value that would be returned if sqlite3_finalize() - ** were called on statement p. - */ - assert( rc==SQLITE_ROW || rc==SQLITE_DONE || rc==SQLITE_ERROR - || rc==SQLITE_BUSY || rc==SQLITE_MISUSE - ); - assert( p->rc!=SQLITE_ROW && p->rc!=SQLITE_DONE ); - if( p->isPrepareV2 && rc!=SQLITE_ROW && rc!=SQLITE_DONE ){ - /* If this statement was prepared using sqlite3_prepare_v2(), and an - ** error has occurred, then return the error code in p->rc to the - ** caller. Set the error code in the database handle to the same value. - */ - rc = sqlite3VdbeTransferError(p); - } - return (rc&db->errMask); -} - -/* -** This is the top-level implementation of sqlite3_step(). Call -** sqlite3Step() to do most of the work. If a schema error occurs, -** call sqlite3Reprepare() and try again. -*/ -SQLITE_API int sqlite3_step(sqlite3_stmt *pStmt){ - int rc = SQLITE_OK; /* Result from sqlite3Step() */ - int rc2 = SQLITE_OK; /* Result from sqlite3Reprepare() */ - Vdbe *v = (Vdbe*)pStmt; /* the prepared statement */ - int cnt = 0; /* Counter to prevent infinite loop of reprepares */ - sqlite3 *db; /* The database connection */ - - if( vdbeSafetyNotNull(v) ){ - return SQLITE_MISUSE_BKPT; - } - db = v->db; - sqlite3_mutex_enter(db->mutex); - v->doingRerun = 0; - while( (rc = sqlite3Step(v))==SQLITE_SCHEMA - && cnt++ < SQLITE_MAX_SCHEMA_RETRY - && (rc2 = rc = sqlite3Reprepare(v))==SQLITE_OK ){ - sqlite3_reset(pStmt); - v->doingRerun = 1; - assert( v->expired==0 ); - } - if( rc2!=SQLITE_OK ){ - /* This case occurs after failing to recompile an sql statement. - ** The error message from the SQL compiler has already been loaded - ** into the database handle. This block copies the error message - ** from the database handle into the statement and sets the statement - ** program counter to 0 to ensure that when the statement is - ** finalized or reset the parser error message is available via - ** sqlite3_errmsg() and sqlite3_errcode(). - */ - const char *zErr = (const char *)sqlite3_value_text(db->pErr); - assert( zErr!=0 || db->mallocFailed ); - sqlite3DbFree(db, v->zErrMsg); - if( !db->mallocFailed ){ - v->zErrMsg = sqlite3DbStrDup(db, zErr); - v->rc = rc2; - } else { - v->zErrMsg = 0; - v->rc = rc = SQLITE_NOMEM; - } - } - rc = sqlite3ApiExit(db, rc); - sqlite3_mutex_leave(db->mutex); - return rc; -} - - -/* -** Extract the user data from a sqlite3_context structure and return a -** pointer to it. -*/ -SQLITE_API void *sqlite3_user_data(sqlite3_context *p){ - assert( p && p->pFunc ); - return p->pFunc->pUserData; -} - -/* -** Extract the user data from a sqlite3_context structure and return a -** pointer to it. -** -** IMPLEMENTATION-OF: R-46798-50301 The sqlite3_context_db_handle() interface -** returns a copy of the pointer to the database connection (the 1st -** parameter) of the sqlite3_create_function() and -** sqlite3_create_function16() routines that originally registered the -** application defined function. -*/ -SQLITE_API sqlite3 *sqlite3_context_db_handle(sqlite3_context *p){ - assert( p && p->pFunc ); - return p->s.db; -} - -/* -** Return the current time for a statement -*/ -SQLITE_PRIVATE sqlite3_int64 sqlite3StmtCurrentTime(sqlite3_context *p){ - Vdbe *v = p->pVdbe; - int rc; - if( v->iCurrentTime==0 ){ - rc = sqlite3OsCurrentTimeInt64(p->s.db->pVfs, &v->iCurrentTime); - if( rc ) v->iCurrentTime = 0; - } - return v->iCurrentTime; -} - -/* -** The following is the implementation of an SQL function that always -** fails with an error message stating that the function is used in the -** wrong context. The sqlite3_overload_function() API might construct -** SQL function that use this routine so that the functions will exist -** for name resolution but are actually overloaded by the xFindFunction -** method of virtual tables. -*/ -SQLITE_PRIVATE void sqlite3InvalidFunction( - sqlite3_context *context, /* The function calling context */ - int NotUsed, /* Number of arguments to the function */ - sqlite3_value **NotUsed2 /* Value of each argument */ -){ - const char *zName = context->pFunc->zName; - char *zErr; - UNUSED_PARAMETER2(NotUsed, NotUsed2); - zErr = sqlite3_mprintf( - "unable to use function %s in the requested context", zName); - sqlite3_result_error(context, zErr, -1); - sqlite3_free(zErr); -} - -/* -** Allocate or return the aggregate context for a user function. A new -** context is allocated on the first call. Subsequent calls return the -** same context that was returned on prior calls. -*/ -SQLITE_API void *sqlite3_aggregate_context(sqlite3_context *p, int nByte){ - Mem *pMem; - assert( p && p->pFunc && p->pFunc->xStep ); - assert( sqlite3_mutex_held(p->s.db->mutex) ); - pMem = p->pMem; - testcase( nByte<0 ); - if( (pMem->flags & MEM_Agg)==0 ){ - if( nByte<=0 ){ - sqlite3VdbeMemReleaseExternal(pMem); - pMem->flags = MEM_Null; - pMem->z = 0; - }else{ - sqlite3VdbeMemGrow(pMem, nByte, 0); - pMem->flags = MEM_Agg; - pMem->u.pDef = p->pFunc; - if( pMem->z ){ - memset(pMem->z, 0, nByte); - } - } - } - return (void*)pMem->z; -} - -/* -** Return the auxilary data pointer, if any, for the iArg'th argument to -** the user-function defined by pCtx. -*/ -SQLITE_API void *sqlite3_get_auxdata(sqlite3_context *pCtx, int iArg){ - AuxData *pAuxData; - - assert( sqlite3_mutex_held(pCtx->s.db->mutex) ); - for(pAuxData=pCtx->pVdbe->pAuxData; pAuxData; pAuxData=pAuxData->pNext){ - if( pAuxData->iOp==pCtx->iOp && pAuxData->iArg==iArg ) break; - } - - return (pAuxData ? pAuxData->pAux : 0); -} - -/* -** Set the auxilary data pointer and delete function, for the iArg'th -** argument to the user-function defined by pCtx. Any previous value is -** deleted by calling the delete function specified when it was set. -*/ -SQLITE_API void sqlite3_set_auxdata( - sqlite3_context *pCtx, - int iArg, - void *pAux, - void (*xDelete)(void*) -){ - AuxData *pAuxData; - Vdbe *pVdbe = pCtx->pVdbe; - - assert( sqlite3_mutex_held(pCtx->s.db->mutex) ); - if( iArg<0 ) goto failed; - - for(pAuxData=pVdbe->pAuxData; pAuxData; pAuxData=pAuxData->pNext){ - if( pAuxData->iOp==pCtx->iOp && pAuxData->iArg==iArg ) break; - } - if( pAuxData==0 ){ - pAuxData = sqlite3DbMallocZero(pVdbe->db, sizeof(AuxData)); - if( !pAuxData ) goto failed; - pAuxData->iOp = pCtx->iOp; - pAuxData->iArg = iArg; - pAuxData->pNext = pVdbe->pAuxData; - pVdbe->pAuxData = pAuxData; - if( pCtx->fErrorOrAux==0 ){ - pCtx->isError = 0; - pCtx->fErrorOrAux = 1; - } - }else if( pAuxData->xDelete ){ - pAuxData->xDelete(pAuxData->pAux); - } - - pAuxData->pAux = pAux; - pAuxData->xDelete = xDelete; - return; - -failed: - if( xDelete ){ - xDelete(pAux); - } -} - -#ifndef SQLITE_OMIT_DEPRECATED -/* -** Return the number of times the Step function of a aggregate has been -** called. -** -** This function is deprecated. Do not use it for new code. It is -** provide only to avoid breaking legacy code. New aggregate function -** implementations should keep their own counts within their aggregate -** context. -*/ -SQLITE_API int sqlite3_aggregate_count(sqlite3_context *p){ - assert( p && p->pMem && p->pFunc && p->pFunc->xStep ); - return p->pMem->n; -} -#endif - -/* -** Return the number of columns in the result set for the statement pStmt. -*/ -SQLITE_API int sqlite3_column_count(sqlite3_stmt *pStmt){ - Vdbe *pVm = (Vdbe *)pStmt; - return pVm ? pVm->nResColumn : 0; -} - -/* -** Return the number of values available from the current row of the -** currently executing statement pStmt. -*/ -SQLITE_API int sqlite3_data_count(sqlite3_stmt *pStmt){ - Vdbe *pVm = (Vdbe *)pStmt; - if( pVm==0 || pVm->pResultSet==0 ) return 0; - return pVm->nResColumn; -} - -/* -** Return a pointer to static memory containing an SQL NULL value. -*/ -static const Mem *columnNullValue(void){ - /* Even though the Mem structure contains an element - ** of type i64, on certain architectures (x86) with certain compiler - ** switches (-Os), gcc may align this Mem object on a 4-byte boundary - ** instead of an 8-byte one. This all works fine, except that when - ** running with SQLITE_DEBUG defined the SQLite code sometimes assert()s - ** that a Mem structure is located on an 8-byte boundary. To prevent - ** these assert()s from failing, when building with SQLITE_DEBUG defined - ** using gcc, we force nullMem to be 8-byte aligned using the magical - ** __attribute__((aligned(8))) macro. */ - static const Mem nullMem -#if defined(SQLITE_DEBUG) && defined(__GNUC__) - __attribute__((aligned(8))) -#endif - = {0, "", (double)0, {0}, 0, MEM_Null, 0, -#ifdef SQLITE_DEBUG - 0, 0, /* pScopyFrom, pFiller */ -#endif - 0, 0 }; - return &nullMem; -} - -/* -** Check to see if column iCol of the given statement is valid. If -** it is, return a pointer to the Mem for the value of that column. -** If iCol is not valid, return a pointer to a Mem which has a value -** of NULL. -*/ -static Mem *columnMem(sqlite3_stmt *pStmt, int i){ - Vdbe *pVm; - Mem *pOut; - - pVm = (Vdbe *)pStmt; - if( pVm && pVm->pResultSet!=0 && inResColumn && i>=0 ){ - sqlite3_mutex_enter(pVm->db->mutex); - pOut = &pVm->pResultSet[i]; - }else{ - if( pVm && ALWAYS(pVm->db) ){ - sqlite3_mutex_enter(pVm->db->mutex); - sqlite3Error(pVm->db, SQLITE_RANGE, 0); - } - pOut = (Mem*)columnNullValue(); - } - return pOut; -} - -/* -** This function is called after invoking an sqlite3_value_XXX function on a -** column value (i.e. a value returned by evaluating an SQL expression in the -** select list of a SELECT statement) that may cause a malloc() failure. If -** malloc() has failed, the threads mallocFailed flag is cleared and the result -** code of statement pStmt set to SQLITE_NOMEM. -** -** Specifically, this is called from within: -** -** sqlite3_column_int() -** sqlite3_column_int64() -** sqlite3_column_text() -** sqlite3_column_text16() -** sqlite3_column_real() -** sqlite3_column_bytes() -** sqlite3_column_bytes16() -** sqiite3_column_blob() -*/ -static void columnMallocFailure(sqlite3_stmt *pStmt) -{ - /* If malloc() failed during an encoding conversion within an - ** sqlite3_column_XXX API, then set the return code of the statement to - ** SQLITE_NOMEM. The next call to _step() (if any) will return SQLITE_ERROR - ** and _finalize() will return NOMEM. - */ - Vdbe *p = (Vdbe *)pStmt; - if( p ){ - p->rc = sqlite3ApiExit(p->db, p->rc); - sqlite3_mutex_leave(p->db->mutex); - } -} - -/**************************** sqlite3_column_ ******************************* -** The following routines are used to access elements of the current row -** in the result set. -*/ -SQLITE_API const void *sqlite3_column_blob(sqlite3_stmt *pStmt, int i){ - const void *val; - val = sqlite3_value_blob( columnMem(pStmt,i) ); - /* Even though there is no encoding conversion, value_blob() might - ** need to call malloc() to expand the result of a zeroblob() - ** expression. - */ - columnMallocFailure(pStmt); - return val; -} -SQLITE_API int sqlite3_column_bytes(sqlite3_stmt *pStmt, int i){ - int val = sqlite3_value_bytes( columnMem(pStmt,i) ); - columnMallocFailure(pStmt); - return val; -} -SQLITE_API int sqlite3_column_bytes16(sqlite3_stmt *pStmt, int i){ - int val = sqlite3_value_bytes16( columnMem(pStmt,i) ); - columnMallocFailure(pStmt); - return val; -} -SQLITE_API double sqlite3_column_double(sqlite3_stmt *pStmt, int i){ - double val = sqlite3_value_double( columnMem(pStmt,i) ); - columnMallocFailure(pStmt); - return val; -} -SQLITE_API int sqlite3_column_int(sqlite3_stmt *pStmt, int i){ - int val = sqlite3_value_int( columnMem(pStmt,i) ); - columnMallocFailure(pStmt); - return val; -} -SQLITE_API sqlite_int64 sqlite3_column_int64(sqlite3_stmt *pStmt, int i){ - sqlite_int64 val = sqlite3_value_int64( columnMem(pStmt,i) ); - columnMallocFailure(pStmt); - return val; -} -SQLITE_API const unsigned char *sqlite3_column_text(sqlite3_stmt *pStmt, int i){ - const unsigned char *val = sqlite3_value_text( columnMem(pStmt,i) ); - columnMallocFailure(pStmt); - return val; -} -SQLITE_API sqlite3_value *sqlite3_column_value(sqlite3_stmt *pStmt, int i){ - Mem *pOut = columnMem(pStmt, i); - if( pOut->flags&MEM_Static ){ - pOut->flags &= ~MEM_Static; - pOut->flags |= MEM_Ephem; - } - columnMallocFailure(pStmt); - return (sqlite3_value *)pOut; -} -#ifndef SQLITE_OMIT_UTF16 -SQLITE_API const void *sqlite3_column_text16(sqlite3_stmt *pStmt, int i){ - const void *val = sqlite3_value_text16( columnMem(pStmt,i) ); - columnMallocFailure(pStmt); - return val; -} -#endif /* SQLITE_OMIT_UTF16 */ -SQLITE_API int sqlite3_column_type(sqlite3_stmt *pStmt, int i){ - int iType = sqlite3_value_type( columnMem(pStmt,i) ); - columnMallocFailure(pStmt); - return iType; -} - -/* -** Convert the N-th element of pStmt->pColName[] into a string using -** xFunc() then return that string. If N is out of range, return 0. -** -** There are up to 5 names for each column. useType determines which -** name is returned. Here are the names: -** -** 0 The column name as it should be displayed for output -** 1 The datatype name for the column -** 2 The name of the database that the column derives from -** 3 The name of the table that the column derives from -** 4 The name of the table column that the result column derives from -** -** If the result is not a simple column reference (if it is an expression -** or a constant) then useTypes 2, 3, and 4 return NULL. -*/ -static const void *columnName( - sqlite3_stmt *pStmt, - int N, - const void *(*xFunc)(Mem*), - int useType -){ - const void *ret = 0; - Vdbe *p = (Vdbe *)pStmt; - int n; - sqlite3 *db = p->db; - - assert( db!=0 ); - n = sqlite3_column_count(pStmt); - if( N=0 ){ - N += useType*n; - sqlite3_mutex_enter(db->mutex); - assert( db->mallocFailed==0 ); - ret = xFunc(&p->aColName[N]); - /* A malloc may have failed inside of the xFunc() call. If this - ** is the case, clear the mallocFailed flag and return NULL. - */ - if( db->mallocFailed ){ - db->mallocFailed = 0; - ret = 0; - } - sqlite3_mutex_leave(db->mutex); - } - return ret; -} - -/* -** Return the name of the Nth column of the result set returned by SQL -** statement pStmt. -*/ -SQLITE_API const char *sqlite3_column_name(sqlite3_stmt *pStmt, int N){ - return columnName( - pStmt, N, (const void*(*)(Mem*))sqlite3_value_text, COLNAME_NAME); -} -#ifndef SQLITE_OMIT_UTF16 -SQLITE_API const void *sqlite3_column_name16(sqlite3_stmt *pStmt, int N){ - return columnName( - pStmt, N, (const void*(*)(Mem*))sqlite3_value_text16, COLNAME_NAME); -} -#endif - -/* -** Constraint: If you have ENABLE_COLUMN_METADATA then you must -** not define OMIT_DECLTYPE. -*/ -#if defined(SQLITE_OMIT_DECLTYPE) && defined(SQLITE_ENABLE_COLUMN_METADATA) -# error "Must not define both SQLITE_OMIT_DECLTYPE \ - and SQLITE_ENABLE_COLUMN_METADATA" -#endif - -#ifndef SQLITE_OMIT_DECLTYPE -/* -** Return the column declaration type (if applicable) of the 'i'th column -** of the result set of SQL statement pStmt. -*/ -SQLITE_API const char *sqlite3_column_decltype(sqlite3_stmt *pStmt, int N){ - return columnName( - pStmt, N, (const void*(*)(Mem*))sqlite3_value_text, COLNAME_DECLTYPE); -} -#ifndef SQLITE_OMIT_UTF16 -SQLITE_API const void *sqlite3_column_decltype16(sqlite3_stmt *pStmt, int N){ - return columnName( - pStmt, N, (const void*(*)(Mem*))sqlite3_value_text16, COLNAME_DECLTYPE); -} -#endif /* SQLITE_OMIT_UTF16 */ -#endif /* SQLITE_OMIT_DECLTYPE */ - -#ifdef SQLITE_ENABLE_COLUMN_METADATA -/* -** Return the name of the database from which a result column derives. -** NULL is returned if the result column is an expression or constant or -** anything else which is not an unabiguous reference to a database column. -*/ -SQLITE_API const char *sqlite3_column_database_name(sqlite3_stmt *pStmt, int N){ - return columnName( - pStmt, N, (const void*(*)(Mem*))sqlite3_value_text, COLNAME_DATABASE); -} -#ifndef SQLITE_OMIT_UTF16 -SQLITE_API const void *sqlite3_column_database_name16(sqlite3_stmt *pStmt, int N){ - return columnName( - pStmt, N, (const void*(*)(Mem*))sqlite3_value_text16, COLNAME_DATABASE); -} -#endif /* SQLITE_OMIT_UTF16 */ - -/* -** Return the name of the table from which a result column derives. -** NULL is returned if the result column is an expression or constant or -** anything else which is not an unabiguous reference to a database column. -*/ -SQLITE_API const char *sqlite3_column_table_name(sqlite3_stmt *pStmt, int N){ - return columnName( - pStmt, N, (const void*(*)(Mem*))sqlite3_value_text, COLNAME_TABLE); -} -#ifndef SQLITE_OMIT_UTF16 -SQLITE_API const void *sqlite3_column_table_name16(sqlite3_stmt *pStmt, int N){ - return columnName( - pStmt, N, (const void*(*)(Mem*))sqlite3_value_text16, COLNAME_TABLE); -} -#endif /* SQLITE_OMIT_UTF16 */ - -/* -** Return the name of the table column from which a result column derives. -** NULL is returned if the result column is an expression or constant or -** anything else which is not an unabiguous reference to a database column. -*/ -SQLITE_API const char *sqlite3_column_origin_name(sqlite3_stmt *pStmt, int N){ - return columnName( - pStmt, N, (const void*(*)(Mem*))sqlite3_value_text, COLNAME_COLUMN); -} -#ifndef SQLITE_OMIT_UTF16 -SQLITE_API const void *sqlite3_column_origin_name16(sqlite3_stmt *pStmt, int N){ - return columnName( - pStmt, N, (const void*(*)(Mem*))sqlite3_value_text16, COLNAME_COLUMN); -} -#endif /* SQLITE_OMIT_UTF16 */ -#endif /* SQLITE_ENABLE_COLUMN_METADATA */ - - -/******************************* sqlite3_bind_ *************************** -** -** Routines used to attach values to wildcards in a compiled SQL statement. -*/ -/* -** Unbind the value bound to variable i in virtual machine p. This is the -** the same as binding a NULL value to the column. If the "i" parameter is -** out of range, then SQLITE_RANGE is returned. Othewise SQLITE_OK. -** -** A successful evaluation of this routine acquires the mutex on p. -** the mutex is released if any kind of error occurs. -** -** The error code stored in database p->db is overwritten with the return -** value in any case. -*/ -static int vdbeUnbind(Vdbe *p, int i){ - Mem *pVar; - if( vdbeSafetyNotNull(p) ){ - return SQLITE_MISUSE_BKPT; - } - sqlite3_mutex_enter(p->db->mutex); - if( p->magic!=VDBE_MAGIC_RUN || p->pc>=0 ){ - sqlite3Error(p->db, SQLITE_MISUSE, 0); - sqlite3_mutex_leave(p->db->mutex); - sqlite3_log(SQLITE_MISUSE, - "bind on a busy prepared statement: [%s]", p->zSql); - return SQLITE_MISUSE_BKPT; - } - if( i<1 || i>p->nVar ){ - sqlite3Error(p->db, SQLITE_RANGE, 0); - sqlite3_mutex_leave(p->db->mutex); - return SQLITE_RANGE; - } - i--; - pVar = &p->aVar[i]; - sqlite3VdbeMemRelease(pVar); - pVar->flags = MEM_Null; - sqlite3Error(p->db, SQLITE_OK, 0); - - /* If the bit corresponding to this variable in Vdbe.expmask is set, then - ** binding a new value to this variable invalidates the current query plan. - ** - ** IMPLEMENTATION-OF: R-48440-37595 If the specific value bound to host - ** parameter in the WHERE clause might influence the choice of query plan - ** for a statement, then the statement will be automatically recompiled, - ** as if there had been a schema change, on the first sqlite3_step() call - ** following any change to the bindings of that parameter. - */ - if( p->isPrepareV2 && - ((i<32 && p->expmask & ((u32)1 << i)) || p->expmask==0xffffffff) - ){ - p->expired = 1; - } - return SQLITE_OK; -} - -/* -** Bind a text or BLOB value. -*/ -static int bindText( - sqlite3_stmt *pStmt, /* The statement to bind against */ - int i, /* Index of the parameter to bind */ - const void *zData, /* Pointer to the data to be bound */ - int nData, /* Number of bytes of data to be bound */ - void (*xDel)(void*), /* Destructor for the data */ - u8 encoding /* Encoding for the data */ -){ - Vdbe *p = (Vdbe *)pStmt; - Mem *pVar; - int rc; - - rc = vdbeUnbind(p, i); - if( rc==SQLITE_OK ){ - if( zData!=0 ){ - pVar = &p->aVar[i-1]; - rc = sqlite3VdbeMemSetStr(pVar, zData, nData, encoding, xDel); - if( rc==SQLITE_OK && encoding!=0 ){ - rc = sqlite3VdbeChangeEncoding(pVar, ENC(p->db)); - } - sqlite3Error(p->db, rc, 0); - rc = sqlite3ApiExit(p->db, rc); - } - sqlite3_mutex_leave(p->db->mutex); - }else if( xDel!=SQLITE_STATIC && xDel!=SQLITE_TRANSIENT ){ - xDel((void*)zData); - } - return rc; -} - - -/* -** Bind a blob value to an SQL statement variable. -*/ -SQLITE_API int sqlite3_bind_blob( - sqlite3_stmt *pStmt, - int i, - const void *zData, - int nData, - void (*xDel)(void*) -){ - return bindText(pStmt, i, zData, nData, xDel, 0); -} -SQLITE_API int sqlite3_bind_double(sqlite3_stmt *pStmt, int i, double rValue){ - int rc; - Vdbe *p = (Vdbe *)pStmt; - rc = vdbeUnbind(p, i); - if( rc==SQLITE_OK ){ - sqlite3VdbeMemSetDouble(&p->aVar[i-1], rValue); - sqlite3_mutex_leave(p->db->mutex); - } - return rc; -} -SQLITE_API int sqlite3_bind_int(sqlite3_stmt *p, int i, int iValue){ - return sqlite3_bind_int64(p, i, (i64)iValue); -} -SQLITE_API int sqlite3_bind_int64(sqlite3_stmt *pStmt, int i, sqlite_int64 iValue){ - int rc; - Vdbe *p = (Vdbe *)pStmt; - rc = vdbeUnbind(p, i); - if( rc==SQLITE_OK ){ - sqlite3VdbeMemSetInt64(&p->aVar[i-1], iValue); - sqlite3_mutex_leave(p->db->mutex); - } - return rc; -} -SQLITE_API int sqlite3_bind_null(sqlite3_stmt *pStmt, int i){ - int rc; - Vdbe *p = (Vdbe*)pStmt; - rc = vdbeUnbind(p, i); - if( rc==SQLITE_OK ){ - sqlite3_mutex_leave(p->db->mutex); - } - return rc; -} -SQLITE_API int sqlite3_bind_text( - sqlite3_stmt *pStmt, - int i, - const char *zData, - int nData, - void (*xDel)(void*) -){ - return bindText(pStmt, i, zData, nData, xDel, SQLITE_UTF8); -} -#ifndef SQLITE_OMIT_UTF16 -SQLITE_API int sqlite3_bind_text16( - sqlite3_stmt *pStmt, - int i, - const void *zData, - int nData, - void (*xDel)(void*) -){ - return bindText(pStmt, i, zData, nData, xDel, SQLITE_UTF16NATIVE); -} -#endif /* SQLITE_OMIT_UTF16 */ -SQLITE_API int sqlite3_bind_value(sqlite3_stmt *pStmt, int i, const sqlite3_value *pValue){ - int rc; - switch( sqlite3_value_type((sqlite3_value*)pValue) ){ - case SQLITE_INTEGER: { - rc = sqlite3_bind_int64(pStmt, i, pValue->u.i); - break; - } - case SQLITE_FLOAT: { - rc = sqlite3_bind_double(pStmt, i, pValue->r); - break; - } - case SQLITE_BLOB: { - if( pValue->flags & MEM_Zero ){ - rc = sqlite3_bind_zeroblob(pStmt, i, pValue->u.nZero); - }else{ - rc = sqlite3_bind_blob(pStmt, i, pValue->z, pValue->n,SQLITE_TRANSIENT); - } - break; - } - case SQLITE_TEXT: { - rc = bindText(pStmt,i, pValue->z, pValue->n, SQLITE_TRANSIENT, - pValue->enc); - break; - } - default: { - rc = sqlite3_bind_null(pStmt, i); - break; - } - } - return rc; -} -SQLITE_API int sqlite3_bind_zeroblob(sqlite3_stmt *pStmt, int i, int n){ - int rc; - Vdbe *p = (Vdbe *)pStmt; - rc = vdbeUnbind(p, i); - if( rc==SQLITE_OK ){ - sqlite3VdbeMemSetZeroBlob(&p->aVar[i-1], n); - sqlite3_mutex_leave(p->db->mutex); - } - return rc; -} - -/* -** Return the number of wildcards that can be potentially bound to. -** This routine is added to support DBD::SQLite. -*/ -SQLITE_API int sqlite3_bind_parameter_count(sqlite3_stmt *pStmt){ - Vdbe *p = (Vdbe*)pStmt; - return p ? p->nVar : 0; -} - -/* -** Return the name of a wildcard parameter. Return NULL if the index -** is out of range or if the wildcard is unnamed. -** -** The result is always UTF-8. -*/ -SQLITE_API const char *sqlite3_bind_parameter_name(sqlite3_stmt *pStmt, int i){ - Vdbe *p = (Vdbe*)pStmt; - if( p==0 || i<1 || i>p->nzVar ){ - return 0; - } - return p->azVar[i-1]; -} - -/* -** Given a wildcard parameter name, return the index of the variable -** with that name. If there is no variable with the given name, -** return 0. -*/ -SQLITE_PRIVATE int sqlite3VdbeParameterIndex(Vdbe *p, const char *zName, int nName){ - int i; - if( p==0 ){ - return 0; - } - if( zName ){ - for(i=0; inzVar; i++){ - const char *z = p->azVar[i]; - if( z && strncmp(z,zName,nName)==0 && z[nName]==0 ){ - return i+1; - } - } - } - return 0; -} -SQLITE_API int sqlite3_bind_parameter_index(sqlite3_stmt *pStmt, const char *zName){ - return sqlite3VdbeParameterIndex((Vdbe*)pStmt, zName, sqlite3Strlen30(zName)); -} - -/* -** Transfer all bindings from the first statement over to the second. -*/ -SQLITE_PRIVATE int sqlite3TransferBindings(sqlite3_stmt *pFromStmt, sqlite3_stmt *pToStmt){ - Vdbe *pFrom = (Vdbe*)pFromStmt; - Vdbe *pTo = (Vdbe*)pToStmt; - int i; - assert( pTo->db==pFrom->db ); - assert( pTo->nVar==pFrom->nVar ); - sqlite3_mutex_enter(pTo->db->mutex); - for(i=0; inVar; i++){ - sqlite3VdbeMemMove(&pTo->aVar[i], &pFrom->aVar[i]); - } - sqlite3_mutex_leave(pTo->db->mutex); - return SQLITE_OK; -} - -#ifndef SQLITE_OMIT_DEPRECATED -/* -** Deprecated external interface. Internal/core SQLite code -** should call sqlite3TransferBindings. -** -** Is is misuse to call this routine with statements from different -** database connections. But as this is a deprecated interface, we -** will not bother to check for that condition. -** -** If the two statements contain a different number of bindings, then -** an SQLITE_ERROR is returned. Nothing else can go wrong, so otherwise -** SQLITE_OK is returned. -*/ -SQLITE_API int sqlite3_transfer_bindings(sqlite3_stmt *pFromStmt, sqlite3_stmt *pToStmt){ - Vdbe *pFrom = (Vdbe*)pFromStmt; - Vdbe *pTo = (Vdbe*)pToStmt; - if( pFrom->nVar!=pTo->nVar ){ - return SQLITE_ERROR; - } - if( pTo->isPrepareV2 && pTo->expmask ){ - pTo->expired = 1; - } - if( pFrom->isPrepareV2 && pFrom->expmask ){ - pFrom->expired = 1; - } - return sqlite3TransferBindings(pFromStmt, pToStmt); -} -#endif - -/* -** Return the sqlite3* database handle to which the prepared statement given -** in the argument belongs. This is the same database handle that was -** the first argument to the sqlite3_prepare() that was used to create -** the statement in the first place. -*/ -SQLITE_API sqlite3 *sqlite3_db_handle(sqlite3_stmt *pStmt){ - return pStmt ? ((Vdbe*)pStmt)->db : 0; -} - -/* -** Return true if the prepared statement is guaranteed to not modify the -** database. -*/ -SQLITE_API int sqlite3_stmt_readonly(sqlite3_stmt *pStmt){ - return pStmt ? ((Vdbe*)pStmt)->readOnly : 1; -} - -/* -** Return true if the prepared statement is in need of being reset. -*/ -SQLITE_API int sqlite3_stmt_busy(sqlite3_stmt *pStmt){ - Vdbe *v = (Vdbe*)pStmt; - return v!=0 && v->pc>0 && v->magic==VDBE_MAGIC_RUN; -} - -/* -** Return a pointer to the next prepared statement after pStmt associated -** with database connection pDb. If pStmt is NULL, return the first -** prepared statement for the database connection. Return NULL if there -** are no more. -*/ -SQLITE_API sqlite3_stmt *sqlite3_next_stmt(sqlite3 *pDb, sqlite3_stmt *pStmt){ - sqlite3_stmt *pNext; - sqlite3_mutex_enter(pDb->mutex); - if( pStmt==0 ){ - pNext = (sqlite3_stmt*)pDb->pVdbe; - }else{ - pNext = (sqlite3_stmt*)((Vdbe*)pStmt)->pNext; - } - sqlite3_mutex_leave(pDb->mutex); - return pNext; -} - -/* -** Return the value of a status counter for a prepared statement -*/ -SQLITE_API int sqlite3_stmt_status(sqlite3_stmt *pStmt, int op, int resetFlag){ - Vdbe *pVdbe = (Vdbe*)pStmt; - u32 v = pVdbe->aCounter[op]; - if( resetFlag ) pVdbe->aCounter[op] = 0; - return (int)v; -} - -/************** End of vdbeapi.c *********************************************/ -/************** Begin file vdbetrace.c ***************************************/ -/* -** 2009 November 25 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** -** This file contains code used to insert the values of host parameters -** (aka "wildcards") into the SQL text output by sqlite3_trace(). -** -** The Vdbe parse-tree explainer is also found here. -*/ - -#ifndef SQLITE_OMIT_TRACE - -/* -** zSql is a zero-terminated string of UTF-8 SQL text. Return the number of -** bytes in this text up to but excluding the first character in -** a host parameter. If the text contains no host parameters, return -** the total number of bytes in the text. -*/ -static int findNextHostParameter(const char *zSql, int *pnToken){ - int tokenType; - int nTotal = 0; - int n; - - *pnToken = 0; - while( zSql[0] ){ - n = sqlite3GetToken((u8*)zSql, &tokenType); - assert( n>0 && tokenType!=TK_ILLEGAL ); - if( tokenType==TK_VARIABLE ){ - *pnToken = n; - break; - } - nTotal += n; - zSql += n; - } - return nTotal; -} - -/* -** This function returns a pointer to a nul-terminated string in memory -** obtained from sqlite3DbMalloc(). If sqlite3.nVdbeExec is 1, then the -** string contains a copy of zRawSql but with host parameters expanded to -** their current bindings. Or, if sqlite3.nVdbeExec is greater than 1, -** then the returned string holds a copy of zRawSql with "-- " prepended -** to each line of text. -** -** If the SQLITE_TRACE_SIZE_LIMIT macro is defined to an integer, then -** then long strings and blobs are truncated to that many bytes. This -** can be used to prevent unreasonably large trace strings when dealing -** with large (multi-megabyte) strings and blobs. -** -** The calling function is responsible for making sure the memory returned -** is eventually freed. -** -** ALGORITHM: Scan the input string looking for host parameters in any of -** these forms: ?, ?N, $A, @A, :A. Take care to avoid text within -** string literals, quoted identifier names, and comments. For text forms, -** the host parameter index is found by scanning the perpared -** statement for the corresponding OP_Variable opcode. Once the host -** parameter index is known, locate the value in p->aVar[]. Then render -** the value as a literal in place of the host parameter name. -*/ -SQLITE_PRIVATE char *sqlite3VdbeExpandSql( - Vdbe *p, /* The prepared statement being evaluated */ - const char *zRawSql /* Raw text of the SQL statement */ -){ - sqlite3 *db; /* The database connection */ - int idx = 0; /* Index of a host parameter */ - int nextIndex = 1; /* Index of next ? host parameter */ - int n; /* Length of a token prefix */ - int nToken; /* Length of the parameter token */ - int i; /* Loop counter */ - Mem *pVar; /* Value of a host parameter */ - StrAccum out; /* Accumulate the output here */ - char zBase[100]; /* Initial working space */ - - db = p->db; - sqlite3StrAccumInit(&out, zBase, sizeof(zBase), - db->aLimit[SQLITE_LIMIT_LENGTH]); - out.db = db; - if( db->nVdbeExec>1 ){ - while( *zRawSql ){ - const char *zStart = zRawSql; - while( *(zRawSql++)!='\n' && *zRawSql ); - sqlite3StrAccumAppend(&out, "-- ", 3); - assert( (zRawSql - zStart) > 0 ); - sqlite3StrAccumAppend(&out, zStart, (int)(zRawSql-zStart)); - } - }else{ - while( zRawSql[0] ){ - n = findNextHostParameter(zRawSql, &nToken); - assert( n>0 ); - sqlite3StrAccumAppend(&out, zRawSql, n); - zRawSql += n; - assert( zRawSql[0] || nToken==0 ); - if( nToken==0 ) break; - if( zRawSql[0]=='?' ){ - if( nToken>1 ){ - assert( sqlite3Isdigit(zRawSql[1]) ); - sqlite3GetInt32(&zRawSql[1], &idx); - }else{ - idx = nextIndex; - } - }else{ - assert( zRawSql[0]==':' || zRawSql[0]=='$' || zRawSql[0]=='@' ); - testcase( zRawSql[0]==':' ); - testcase( zRawSql[0]=='$' ); - testcase( zRawSql[0]=='@' ); - idx = sqlite3VdbeParameterIndex(p, zRawSql, nToken); - assert( idx>0 ); - } - zRawSql += nToken; - nextIndex = idx + 1; - assert( idx>0 && idx<=p->nVar ); - pVar = &p->aVar[idx-1]; - if( pVar->flags & MEM_Null ){ - sqlite3StrAccumAppend(&out, "NULL", 4); - }else if( pVar->flags & MEM_Int ){ - sqlite3XPrintf(&out, 0, "%lld", pVar->u.i); - }else if( pVar->flags & MEM_Real ){ - sqlite3XPrintf(&out, 0, "%!.15g", pVar->r); - }else if( pVar->flags & MEM_Str ){ - int nOut; /* Number of bytes of the string text to include in output */ -#ifndef SQLITE_OMIT_UTF16 - u8 enc = ENC(db); - Mem utf8; - if( enc!=SQLITE_UTF8 ){ - memset(&utf8, 0, sizeof(utf8)); - utf8.db = db; - sqlite3VdbeMemSetStr(&utf8, pVar->z, pVar->n, enc, SQLITE_STATIC); - sqlite3VdbeChangeEncoding(&utf8, SQLITE_UTF8); - pVar = &utf8; - } -#endif - nOut = pVar->n; -#ifdef SQLITE_TRACE_SIZE_LIMIT - if( nOut>SQLITE_TRACE_SIZE_LIMIT ){ - nOut = SQLITE_TRACE_SIZE_LIMIT; - while( nOutn && (pVar->z[nOut]&0xc0)==0x80 ){ nOut++; } - } -#endif - sqlite3XPrintf(&out, 0, "'%.*q'", nOut, pVar->z); -#ifdef SQLITE_TRACE_SIZE_LIMIT - if( nOutn ){ - sqlite3XPrintf(&out, 0, "/*+%d bytes*/", pVar->n-nOut); - } -#endif -#ifndef SQLITE_OMIT_UTF16 - if( enc!=SQLITE_UTF8 ) sqlite3VdbeMemRelease(&utf8); -#endif - }else if( pVar->flags & MEM_Zero ){ - sqlite3XPrintf(&out, 0, "zeroblob(%d)", pVar->u.nZero); - }else{ - int nOut; /* Number of bytes of the blob to include in output */ - assert( pVar->flags & MEM_Blob ); - sqlite3StrAccumAppend(&out, "x'", 2); - nOut = pVar->n; -#ifdef SQLITE_TRACE_SIZE_LIMIT - if( nOut>SQLITE_TRACE_SIZE_LIMIT ) nOut = SQLITE_TRACE_SIZE_LIMIT; -#endif - for(i=0; iz[i]&0xff); - } - sqlite3StrAccumAppend(&out, "'", 1); -#ifdef SQLITE_TRACE_SIZE_LIMIT - if( nOutn ){ - sqlite3XPrintf(&out, 0, "/*+%d bytes*/", pVar->n-nOut); - } -#endif - } - } - } - return sqlite3StrAccumFinish(&out); -} - -#endif /* #ifndef SQLITE_OMIT_TRACE */ - -/***************************************************************************** -** The following code implements the data-structure explaining logic -** for the Vdbe. -*/ - -#if defined(SQLITE_ENABLE_TREE_EXPLAIN) - -/* -** Allocate a new Explain object -*/ -SQLITE_PRIVATE void sqlite3ExplainBegin(Vdbe *pVdbe){ - if( pVdbe ){ - Explain *p; - sqlite3BeginBenignMalloc(); - p = (Explain *)sqlite3MallocZero( sizeof(Explain) ); - if( p ){ - p->pVdbe = pVdbe; - sqlite3_free(pVdbe->pExplain); - pVdbe->pExplain = p; - sqlite3StrAccumInit(&p->str, p->zBase, sizeof(p->zBase), - SQLITE_MAX_LENGTH); - p->str.useMalloc = 2; - }else{ - sqlite3EndBenignMalloc(); - } - } -} - -/* -** Return true if the Explain ends with a new-line. -*/ -static int endsWithNL(Explain *p){ - return p && p->str.zText && p->str.nChar - && p->str.zText[p->str.nChar-1]=='\n'; -} - -/* -** Append text to the indentation -*/ -SQLITE_PRIVATE void sqlite3ExplainPrintf(Vdbe *pVdbe, const char *zFormat, ...){ - Explain *p; - if( pVdbe && (p = pVdbe->pExplain)!=0 ){ - va_list ap; - if( p->nIndent && endsWithNL(p) ){ - int n = p->nIndent; - if( n>ArraySize(p->aIndent) ) n = ArraySize(p->aIndent); - sqlite3AppendSpace(&p->str, p->aIndent[n-1]); - } - va_start(ap, zFormat); - sqlite3VXPrintf(&p->str, SQLITE_PRINTF_INTERNAL, zFormat, ap); - va_end(ap); - } -} - -/* -** Append a '\n' if there is not already one. -*/ -SQLITE_PRIVATE void sqlite3ExplainNL(Vdbe *pVdbe){ - Explain *p; - if( pVdbe && (p = pVdbe->pExplain)!=0 && !endsWithNL(p) ){ - sqlite3StrAccumAppend(&p->str, "\n", 1); - } -} - -/* -** Push a new indentation level. Subsequent lines will be indented -** so that they begin at the current cursor position. -*/ -SQLITE_PRIVATE void sqlite3ExplainPush(Vdbe *pVdbe){ - Explain *p; - if( pVdbe && (p = pVdbe->pExplain)!=0 ){ - if( p->str.zText && p->nIndentaIndent) ){ - const char *z = p->str.zText; - int i = p->str.nChar-1; - int x; - while( i>=0 && z[i]!='\n' ){ i--; } - x = (p->str.nChar - 1) - i; - if( p->nIndent && xaIndent[p->nIndent-1] ){ - x = p->aIndent[p->nIndent-1]; - } - p->aIndent[p->nIndent] = x; - } - p->nIndent++; - } -} - -/* -** Pop the indentation stack by one level. -*/ -SQLITE_PRIVATE void sqlite3ExplainPop(Vdbe *p){ - if( p && p->pExplain ) p->pExplain->nIndent--; -} - -/* -** Free the indentation structure -*/ -SQLITE_PRIVATE void sqlite3ExplainFinish(Vdbe *pVdbe){ - if( pVdbe && pVdbe->pExplain ){ - sqlite3_free(pVdbe->zExplain); - sqlite3ExplainNL(pVdbe); - pVdbe->zExplain = sqlite3StrAccumFinish(&pVdbe->pExplain->str); - sqlite3_free(pVdbe->pExplain); - pVdbe->pExplain = 0; - sqlite3EndBenignMalloc(); - } -} - -/* -** Return the explanation of a virtual machine. -*/ -SQLITE_PRIVATE const char *sqlite3VdbeExplanation(Vdbe *pVdbe){ - return (pVdbe && pVdbe->zExplain) ? pVdbe->zExplain : 0; -} -#endif /* defined(SQLITE_DEBUG) */ - -/************** End of vdbetrace.c *******************************************/ -/************** Begin file vdbe.c ********************************************/ -/* -** 2001 September 15 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** The code in this file implements the function that runs the -** bytecode of a prepared statement. -** -** Various scripts scan this source file in order to generate HTML -** documentation, headers files, or other derived files. The formatting -** of the code in this file is, therefore, important. See other comments -** in this file for details. If in doubt, do not deviate from existing -** commenting and indentation practices when changing or adding code. -*/ - -/* -** Invoke this macro on memory cells just prior to changing the -** value of the cell. This macro verifies that shallow copies are -** not misused. A shallow copy of a string or blob just copies a -** pointer to the string or blob, not the content. If the original -** is changed while the copy is still in use, the string or blob might -** be changed out from under the copy. This macro verifies that nothing -** like that ever happens. -*/ -#ifdef SQLITE_DEBUG -# define memAboutToChange(P,M) sqlite3VdbeMemAboutToChange(P,M) -#else -# define memAboutToChange(P,M) -#endif - -/* -** The following global variable is incremented every time a cursor -** moves, either by the OP_SeekXX, OP_Next, or OP_Prev opcodes. The test -** procedures use this information to make sure that indices are -** working correctly. This variable has no function other than to -** help verify the correct operation of the library. -*/ -#ifdef SQLITE_TEST -SQLITE_API int sqlite3_search_count = 0; -#endif - -/* -** When this global variable is positive, it gets decremented once before -** each instruction in the VDBE. When it reaches zero, the u1.isInterrupted -** field of the sqlite3 structure is set in order to simulate an interrupt. -** -** This facility is used for testing purposes only. It does not function -** in an ordinary build. -*/ -#ifdef SQLITE_TEST -SQLITE_API int sqlite3_interrupt_count = 0; -#endif - -/* -** The next global variable is incremented each type the OP_Sort opcode -** is executed. The test procedures use this information to make sure that -** sorting is occurring or not occurring at appropriate times. This variable -** has no function other than to help verify the correct operation of the -** library. -*/ -#ifdef SQLITE_TEST -SQLITE_API int sqlite3_sort_count = 0; -#endif - -/* -** The next global variable records the size of the largest MEM_Blob -** or MEM_Str that has been used by a VDBE opcode. The test procedures -** use this information to make sure that the zero-blob functionality -** is working correctly. This variable has no function other than to -** help verify the correct operation of the library. -*/ -#ifdef SQLITE_TEST -SQLITE_API int sqlite3_max_blobsize = 0; -static void updateMaxBlobsize(Mem *p){ - if( (p->flags & (MEM_Str|MEM_Blob))!=0 && p->n>sqlite3_max_blobsize ){ - sqlite3_max_blobsize = p->n; - } -} -#endif - -/* -** The next global variable is incremented each time the OP_Found opcode -** is executed. This is used to test whether or not the foreign key -** operation implemented using OP_FkIsZero is working. This variable -** has no function other than to help verify the correct operation of the -** library. -*/ -#ifdef SQLITE_TEST -SQLITE_API int sqlite3_found_count = 0; -#endif - -/* -** Test a register to see if it exceeds the current maximum blob size. -** If it does, record the new maximum blob size. -*/ -#if defined(SQLITE_TEST) && !defined(SQLITE_OMIT_BUILTIN_TEST) -# define UPDATE_MAX_BLOBSIZE(P) updateMaxBlobsize(P) -#else -# define UPDATE_MAX_BLOBSIZE(P) -#endif - -/* -** Invoke the VDBE coverage callback, if that callback is defined. This -** feature is used for test suite validation only and does not appear an -** production builds. -** -** M is an integer, 2 or 3, that indices how many different ways the -** branch can go. It is usually 2. "I" is the direction the branch -** goes. 0 means falls through. 1 means branch is taken. 2 means the -** second alternative branch is taken. -*/ -#if !defined(SQLITE_VDBE_COVERAGE) -# define VdbeBranchTaken(I,M) -#else -# define VdbeBranchTaken(I,M) vdbeTakeBranch(pOp->iSrcLine,I,M) - static void vdbeTakeBranch(int iSrcLine, u8 I, u8 M){ - if( iSrcLine<=2 && ALWAYS(iSrcLine>0) ){ - M = iSrcLine; - /* Assert the truth of VdbeCoverageAlwaysTaken() and - ** VdbeCoverageNeverTaken() */ - assert( (M & I)==I ); - }else{ - if( sqlite3GlobalConfig.xVdbeBranch==0 ) return; /*NO_TEST*/ - sqlite3GlobalConfig.xVdbeBranch(sqlite3GlobalConfig.pVdbeBranchArg, - iSrcLine,I,M); - } - } -#endif - -/* -** Convert the given register into a string if it isn't one -** already. Return non-zero if a malloc() fails. -*/ -#define Stringify(P, enc) \ - if(((P)->flags&(MEM_Str|MEM_Blob))==0 && sqlite3VdbeMemStringify(P,enc)) \ - { goto no_mem; } - -/* -** An ephemeral string value (signified by the MEM_Ephem flag) contains -** a pointer to a dynamically allocated string where some other entity -** is responsible for deallocating that string. Because the register -** does not control the string, it might be deleted without the register -** knowing it. -** -** This routine converts an ephemeral string into a dynamically allocated -** string that the register itself controls. In other words, it -** converts an MEM_Ephem string into a string with P.z==P.zMalloc. -*/ -#define Deephemeralize(P) \ - if( ((P)->flags&MEM_Ephem)!=0 \ - && sqlite3VdbeMemMakeWriteable(P) ){ goto no_mem;} - -/* Return true if the cursor was opened using the OP_OpenSorter opcode. */ -#define isSorter(x) ((x)->pSorter!=0) - -/* -** Allocate VdbeCursor number iCur. Return a pointer to it. Return NULL -** if we run out of memory. -*/ -static VdbeCursor *allocateCursor( - Vdbe *p, /* The virtual machine */ - int iCur, /* Index of the new VdbeCursor */ - int nField, /* Number of fields in the table or index */ - int iDb, /* Database the cursor belongs to, or -1 */ - int isBtreeCursor /* True for B-Tree. False for pseudo-table or vtab */ -){ - /* Find the memory cell that will be used to store the blob of memory - ** required for this VdbeCursor structure. It is convenient to use a - ** vdbe memory cell to manage the memory allocation required for a - ** VdbeCursor structure for the following reasons: - ** - ** * Sometimes cursor numbers are used for a couple of different - ** purposes in a vdbe program. The different uses might require - ** different sized allocations. Memory cells provide growable - ** allocations. - ** - ** * When using ENABLE_MEMORY_MANAGEMENT, memory cell buffers can - ** be freed lazily via the sqlite3_release_memory() API. This - ** minimizes the number of malloc calls made by the system. - ** - ** Memory cells for cursors are allocated at the top of the address - ** space. Memory cell (p->nMem) corresponds to cursor 0. Space for - ** cursor 1 is managed by memory cell (p->nMem-1), etc. - */ - Mem *pMem = &p->aMem[p->nMem-iCur]; - - int nByte; - VdbeCursor *pCx = 0; - nByte = - ROUND8(sizeof(VdbeCursor)) + 2*sizeof(u32)*nField + - (isBtreeCursor?sqlite3BtreeCursorSize():0); - - assert( iCurnCursor ); - if( p->apCsr[iCur] ){ - sqlite3VdbeFreeCursor(p, p->apCsr[iCur]); - p->apCsr[iCur] = 0; - } - if( SQLITE_OK==sqlite3VdbeMemGrow(pMem, nByte, 0) ){ - p->apCsr[iCur] = pCx = (VdbeCursor*)pMem->z; - memset(pCx, 0, sizeof(VdbeCursor)); - pCx->iDb = iDb; - pCx->nField = nField; - if( isBtreeCursor ){ - pCx->pCursor = (BtCursor*) - &pMem->z[ROUND8(sizeof(VdbeCursor))+2*sizeof(u32)*nField]; - sqlite3BtreeCursorZero(pCx->pCursor); - } - } - return pCx; -} - -/* -** Try to convert a value into a numeric representation if we can -** do so without loss of information. In other words, if the string -** looks like a number, convert it into a number. If it does not -** look like a number, leave it alone. -*/ -static void applyNumericAffinity(Mem *pRec){ - if( (pRec->flags & (MEM_Real|MEM_Int))==0 ){ - double rValue; - i64 iValue; - u8 enc = pRec->enc; - if( (pRec->flags&MEM_Str)==0 ) return; - if( sqlite3AtoF(pRec->z, &rValue, pRec->n, enc)==0 ) return; - if( 0==sqlite3Atoi64(pRec->z, &iValue, pRec->n, enc) ){ - pRec->u.i = iValue; - pRec->flags |= MEM_Int; - }else{ - pRec->r = rValue; - pRec->flags |= MEM_Real; - } - } -} - -/* -** Processing is determine by the affinity parameter: -** -** SQLITE_AFF_INTEGER: -** SQLITE_AFF_REAL: -** SQLITE_AFF_NUMERIC: -** Try to convert pRec to an integer representation or a -** floating-point representation if an integer representation -** is not possible. Note that the integer representation is -** always preferred, even if the affinity is REAL, because -** an integer representation is more space efficient on disk. -** -** SQLITE_AFF_TEXT: -** Convert pRec to a text representation. -** -** SQLITE_AFF_NONE: -** No-op. pRec is unchanged. -*/ -static void applyAffinity( - Mem *pRec, /* The value to apply affinity to */ - char affinity, /* The affinity to be applied */ - u8 enc /* Use this text encoding */ -){ - if( affinity==SQLITE_AFF_TEXT ){ - /* Only attempt the conversion to TEXT if there is an integer or real - ** representation (blob and NULL do not get converted) but no string - ** representation. - */ - if( 0==(pRec->flags&MEM_Str) && (pRec->flags&(MEM_Real|MEM_Int)) ){ - sqlite3VdbeMemStringify(pRec, enc); - } - pRec->flags &= ~(MEM_Real|MEM_Int); - }else if( affinity!=SQLITE_AFF_NONE ){ - assert( affinity==SQLITE_AFF_INTEGER || affinity==SQLITE_AFF_REAL - || affinity==SQLITE_AFF_NUMERIC ); - applyNumericAffinity(pRec); - if( pRec->flags & MEM_Real ){ - sqlite3VdbeIntegerAffinity(pRec); - } - } -} - -/* -** Try to convert the type of a function argument or a result column -** into a numeric representation. Use either INTEGER or REAL whichever -** is appropriate. But only do the conversion if it is possible without -** loss of information and return the revised type of the argument. -*/ -SQLITE_API int sqlite3_value_numeric_type(sqlite3_value *pVal){ - int eType = sqlite3_value_type(pVal); - if( eType==SQLITE_TEXT ){ - Mem *pMem = (Mem*)pVal; - applyNumericAffinity(pMem); - eType = sqlite3_value_type(pVal); - } - return eType; -} - -/* -** Exported version of applyAffinity(). This one works on sqlite3_value*, -** not the internal Mem* type. -*/ -SQLITE_PRIVATE void sqlite3ValueApplyAffinity( - sqlite3_value *pVal, - u8 affinity, - u8 enc -){ - applyAffinity((Mem *)pVal, affinity, enc); -} - -/* -** Return the numeric type for pMem, either MEM_Int or MEM_Real or both or -** none. -** -** Unlike applyNumericAffinity(), this routine does not modify pMem->flags. -** But it does set pMem->r and pMem->u.i appropriately. -*/ -static u16 numericType(Mem *pMem){ - if( pMem->flags & (MEM_Int|MEM_Real) ){ - return pMem->flags & (MEM_Int|MEM_Real); - } - if( pMem->flags & (MEM_Str|MEM_Blob) ){ - if( sqlite3AtoF(pMem->z, &pMem->r, pMem->n, pMem->enc)==0 ){ - return 0; - } - if( sqlite3Atoi64(pMem->z, &pMem->u.i, pMem->n, pMem->enc)==SQLITE_OK ){ - return MEM_Int; - } - return MEM_Real; - } - return 0; -} - -#ifdef SQLITE_DEBUG -/* -** Write a nice string representation of the contents of cell pMem -** into buffer zBuf, length nBuf. -*/ -SQLITE_PRIVATE void sqlite3VdbeMemPrettyPrint(Mem *pMem, char *zBuf){ - char *zCsr = zBuf; - int f = pMem->flags; - - static const char *const encnames[] = {"(X)", "(8)", "(16LE)", "(16BE)"}; - - if( f&MEM_Blob ){ - int i; - char c; - if( f & MEM_Dyn ){ - c = 'z'; - assert( (f & (MEM_Static|MEM_Ephem))==0 ); - }else if( f & MEM_Static ){ - c = 't'; - assert( (f & (MEM_Dyn|MEM_Ephem))==0 ); - }else if( f & MEM_Ephem ){ - c = 'e'; - assert( (f & (MEM_Static|MEM_Dyn))==0 ); - }else{ - c = 's'; - } - - sqlite3_snprintf(100, zCsr, "%c", c); - zCsr += sqlite3Strlen30(zCsr); - sqlite3_snprintf(100, zCsr, "%d[", pMem->n); - zCsr += sqlite3Strlen30(zCsr); - for(i=0; i<16 && in; i++){ - sqlite3_snprintf(100, zCsr, "%02X", ((int)pMem->z[i] & 0xFF)); - zCsr += sqlite3Strlen30(zCsr); - } - for(i=0; i<16 && in; i++){ - char z = pMem->z[i]; - if( z<32 || z>126 ) *zCsr++ = '.'; - else *zCsr++ = z; - } - - sqlite3_snprintf(100, zCsr, "]%s", encnames[pMem->enc]); - zCsr += sqlite3Strlen30(zCsr); - if( f & MEM_Zero ){ - sqlite3_snprintf(100, zCsr,"+%dz",pMem->u.nZero); - zCsr += sqlite3Strlen30(zCsr); - } - *zCsr = '\0'; - }else if( f & MEM_Str ){ - int j, k; - zBuf[0] = ' '; - if( f & MEM_Dyn ){ - zBuf[1] = 'z'; - assert( (f & (MEM_Static|MEM_Ephem))==0 ); - }else if( f & MEM_Static ){ - zBuf[1] = 't'; - assert( (f & (MEM_Dyn|MEM_Ephem))==0 ); - }else if( f & MEM_Ephem ){ - zBuf[1] = 'e'; - assert( (f & (MEM_Static|MEM_Dyn))==0 ); - }else{ - zBuf[1] = 's'; - } - k = 2; - sqlite3_snprintf(100, &zBuf[k], "%d", pMem->n); - k += sqlite3Strlen30(&zBuf[k]); - zBuf[k++] = '['; - for(j=0; j<15 && jn; j++){ - u8 c = pMem->z[j]; - if( c>=0x20 && c<0x7f ){ - zBuf[k++] = c; - }else{ - zBuf[k++] = '.'; - } - } - zBuf[k++] = ']'; - sqlite3_snprintf(100,&zBuf[k], encnames[pMem->enc]); - k += sqlite3Strlen30(&zBuf[k]); - zBuf[k++] = 0; - } -} -#endif - -#ifdef SQLITE_DEBUG -/* -** Print the value of a register for tracing purposes: -*/ -static void memTracePrint(Mem *p){ - if( p->flags & MEM_Undefined ){ - printf(" undefined"); - }else if( p->flags & MEM_Null ){ - printf(" NULL"); - }else if( (p->flags & (MEM_Int|MEM_Str))==(MEM_Int|MEM_Str) ){ - printf(" si:%lld", p->u.i); - }else if( p->flags & MEM_Int ){ - printf(" i:%lld", p->u.i); -#ifndef SQLITE_OMIT_FLOATING_POINT - }else if( p->flags & MEM_Real ){ - printf(" r:%g", p->r); -#endif - }else if( p->flags & MEM_RowSet ){ - printf(" (rowset)"); - }else{ - char zBuf[200]; - sqlite3VdbeMemPrettyPrint(p, zBuf); - printf(" %s", zBuf); - } -} -static void registerTrace(int iReg, Mem *p){ - printf("REG[%d] = ", iReg); - memTracePrint(p); - printf("\n"); -} -#endif - -#ifdef SQLITE_DEBUG -# define REGISTER_TRACE(R,M) if(db->flags&SQLITE_VdbeTrace)registerTrace(R,M) -#else -# define REGISTER_TRACE(R,M) -#endif - - -#ifdef VDBE_PROFILE - -/* -** hwtime.h contains inline assembler code for implementing -** high-performance timing routines. -*/ -/************** Include hwtime.h in the middle of vdbe.c *********************/ -/************** Begin file hwtime.h ******************************************/ -/* -** 2008 May 27 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -****************************************************************************** -** -** This file contains inline asm code for retrieving "high-performance" -** counters for x86 class CPUs. -*/ -#ifndef _HWTIME_H_ -#define _HWTIME_H_ - -/* -** The following routine only works on pentium-class (or newer) processors. -** It uses the RDTSC opcode to read the cycle count value out of the -** processor and returns that value. This can be used for high-res -** profiling. -*/ -#if (defined(__GNUC__) || defined(_MSC_VER)) && \ - (defined(i386) || defined(__i386__) || defined(_M_IX86)) - - #if defined(__GNUC__) - - __inline__ sqlite_uint64 sqlite3Hwtime(void){ - unsigned int lo, hi; - __asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi)); - return (sqlite_uint64)hi << 32 | lo; - } - - #elif defined(_MSC_VER) - - __declspec(naked) __inline sqlite_uint64 __cdecl sqlite3Hwtime(void){ - __asm { - rdtsc - ret ; return value at EDX:EAX - } - } - - #endif - -#elif (defined(__GNUC__) && defined(__x86_64__)) - - __inline__ sqlite_uint64 sqlite3Hwtime(void){ - unsigned long val; - __asm__ __volatile__ ("rdtsc" : "=A" (val)); - return val; - } - -#elif (defined(__GNUC__) && defined(__ppc__)) - - __inline__ sqlite_uint64 sqlite3Hwtime(void){ - unsigned long long retval; - unsigned long junk; - __asm__ __volatile__ ("\n\ - 1: mftbu %1\n\ - mftb %L0\n\ - mftbu %0\n\ - cmpw %0,%1\n\ - bne 1b" - : "=r" (retval), "=r" (junk)); - return retval; - } - -#else - - #error Need implementation of sqlite3Hwtime() for your platform. - - /* - ** To compile without implementing sqlite3Hwtime() for your platform, - ** you can remove the above #error and use the following - ** stub function. You will lose timing support for many - ** of the debugging and testing utilities, but it should at - ** least compile and run. - */ -SQLITE_PRIVATE sqlite_uint64 sqlite3Hwtime(void){ return ((sqlite_uint64)0); } - -#endif - -#endif /* !defined(_HWTIME_H_) */ - -/************** End of hwtime.h **********************************************/ -/************** Continuing where we left off in vdbe.c ***********************/ - -#endif - -#ifndef NDEBUG -/* -** This function is only called from within an assert() expression. It -** checks that the sqlite3.nTransaction variable is correctly set to -** the number of non-transaction savepoints currently in the -** linked list starting at sqlite3.pSavepoint. -** -** Usage: -** -** assert( checkSavepointCount(db) ); -*/ -static int checkSavepointCount(sqlite3 *db){ - int n = 0; - Savepoint *p; - for(p=db->pSavepoint; p; p=p->pNext) n++; - assert( n==(db->nSavepoint + db->isTransactionSavepoint) ); - return 1; -} -#endif - - -/* -** Execute as much of a VDBE program as we can. -** This is the core of sqlite3_step(). -*/ -SQLITE_PRIVATE int sqlite3VdbeExec( - Vdbe *p /* The VDBE */ -){ - int pc=0; /* The program counter */ - Op *aOp = p->aOp; /* Copy of p->aOp */ - Op *pOp; /* Current operation */ - int rc = SQLITE_OK; /* Value to return */ - sqlite3 *db = p->db; /* The database */ - u8 resetSchemaOnFault = 0; /* Reset schema after an error if positive */ - u8 encoding = ENC(db); /* The database encoding */ - int iCompare = 0; /* Result of last OP_Compare operation */ - unsigned nVmStep = 0; /* Number of virtual machine steps */ -#ifndef SQLITE_OMIT_PROGRESS_CALLBACK - unsigned nProgressLimit = 0;/* Invoke xProgress() when nVmStep reaches this */ -#endif - Mem *aMem = p->aMem; /* Copy of p->aMem */ - Mem *pIn1 = 0; /* 1st input operand */ - Mem *pIn2 = 0; /* 2nd input operand */ - Mem *pIn3 = 0; /* 3rd input operand */ - Mem *pOut = 0; /* Output operand */ - int *aPermute = 0; /* Permutation of columns for OP_Compare */ - i64 lastRowid = db->lastRowid; /* Saved value of the last insert ROWID */ -#ifdef VDBE_PROFILE - u64 start; /* CPU clock count at start of opcode */ -#endif - /*** INSERT STACK UNION HERE ***/ - - assert( p->magic==VDBE_MAGIC_RUN ); /* sqlite3_step() verifies this */ - sqlite3VdbeEnter(p); - if( p->rc==SQLITE_NOMEM ){ - /* This happens if a malloc() inside a call to sqlite3_column_text() or - ** sqlite3_column_text16() failed. */ - goto no_mem; - } - assert( p->rc==SQLITE_OK || p->rc==SQLITE_BUSY ); - assert( p->bIsReader || p->readOnly!=0 ); - p->rc = SQLITE_OK; - p->iCurrentTime = 0; - assert( p->explain==0 ); - p->pResultSet = 0; - db->busyHandler.nBusy = 0; - if( db->u1.isInterrupted ) goto abort_due_to_interrupt; - sqlite3VdbeIOTraceSql(p); -#ifndef SQLITE_OMIT_PROGRESS_CALLBACK - if( db->xProgress ){ - assert( 0 < db->nProgressOps ); - nProgressLimit = (unsigned)p->aCounter[SQLITE_STMTSTATUS_VM_STEP]; - if( nProgressLimit==0 ){ - nProgressLimit = db->nProgressOps; - }else{ - nProgressLimit %= (unsigned)db->nProgressOps; - } - } -#endif -#ifdef SQLITE_DEBUG - sqlite3BeginBenignMalloc(); - if( p->pc==0 - && (p->db->flags & (SQLITE_VdbeListing|SQLITE_VdbeEQP|SQLITE_VdbeTrace))!=0 - ){ - int i; - int once = 1; - sqlite3VdbePrintSql(p); - if( p->db->flags & SQLITE_VdbeListing ){ - printf("VDBE Program Listing:\n"); - for(i=0; inOp; i++){ - sqlite3VdbePrintOp(stdout, i, &aOp[i]); - } - } - if( p->db->flags & SQLITE_VdbeEQP ){ - for(i=0; inOp; i++){ - if( aOp[i].opcode==OP_Explain ){ - if( once ) printf("VDBE Query Plan:\n"); - printf("%s\n", aOp[i].p4.z); - once = 0; - } - } - } - if( p->db->flags & SQLITE_VdbeTrace ) printf("VDBE Trace:\n"); - } - sqlite3EndBenignMalloc(); -#endif - for(pc=p->pc; rc==SQLITE_OK; pc++){ - assert( pc>=0 && pcnOp ); - if( db->mallocFailed ) goto no_mem; -#ifdef VDBE_PROFILE - start = sqlite3Hwtime(); -#endif - nVmStep++; - pOp = &aOp[pc]; - - /* Only allow tracing if SQLITE_DEBUG is defined. - */ -#ifdef SQLITE_DEBUG - if( db->flags & SQLITE_VdbeTrace ){ - sqlite3VdbePrintOp(stdout, pc, pOp); - } -#endif - - - /* Check to see if we need to simulate an interrupt. This only happens - ** if we have a special test build. - */ -#ifdef SQLITE_TEST - if( sqlite3_interrupt_count>0 ){ - sqlite3_interrupt_count--; - if( sqlite3_interrupt_count==0 ){ - sqlite3_interrupt(db); - } - } -#endif - - /* On any opcode with the "out2-prerelease" tag, free any - ** external allocations out of mem[p2] and set mem[p2] to be - ** an undefined integer. Opcodes will either fill in the integer - ** value or convert mem[p2] to a different type. - */ - assert( pOp->opflags==sqlite3OpcodeProperty[pOp->opcode] ); - if( pOp->opflags & OPFLG_OUT2_PRERELEASE ){ - assert( pOp->p2>0 ); - assert( pOp->p2<=(p->nMem-p->nCursor) ); - pOut = &aMem[pOp->p2]; - memAboutToChange(p, pOut); - VdbeMemRelease(pOut); - pOut->flags = MEM_Int; - } - - /* Sanity checking on other operands */ -#ifdef SQLITE_DEBUG - if( (pOp->opflags & OPFLG_IN1)!=0 ){ - assert( pOp->p1>0 ); - assert( pOp->p1<=(p->nMem-p->nCursor) ); - assert( memIsValid(&aMem[pOp->p1]) ); - assert( sqlite3VdbeCheckMemInvariants(&aMem[pOp->p1]) ); - REGISTER_TRACE(pOp->p1, &aMem[pOp->p1]); - } - if( (pOp->opflags & OPFLG_IN2)!=0 ){ - assert( pOp->p2>0 ); - assert( pOp->p2<=(p->nMem-p->nCursor) ); - assert( memIsValid(&aMem[pOp->p2]) ); - assert( sqlite3VdbeCheckMemInvariants(&aMem[pOp->p2]) ); - REGISTER_TRACE(pOp->p2, &aMem[pOp->p2]); - } - if( (pOp->opflags & OPFLG_IN3)!=0 ){ - assert( pOp->p3>0 ); - assert( pOp->p3<=(p->nMem-p->nCursor) ); - assert( memIsValid(&aMem[pOp->p3]) ); - assert( sqlite3VdbeCheckMemInvariants(&aMem[pOp->p3]) ); - REGISTER_TRACE(pOp->p3, &aMem[pOp->p3]); - } - if( (pOp->opflags & OPFLG_OUT2)!=0 ){ - assert( pOp->p2>0 ); - assert( pOp->p2<=(p->nMem-p->nCursor) ); - memAboutToChange(p, &aMem[pOp->p2]); - } - if( (pOp->opflags & OPFLG_OUT3)!=0 ){ - assert( pOp->p3>0 ); - assert( pOp->p3<=(p->nMem-p->nCursor) ); - memAboutToChange(p, &aMem[pOp->p3]); - } -#endif - - switch( pOp->opcode ){ - -/***************************************************************************** -** What follows is a massive switch statement where each case implements a -** separate instruction in the virtual machine. If we follow the usual -** indentation conventions, each case should be indented by 6 spaces. But -** that is a lot of wasted space on the left margin. So the code within -** the switch statement will break with convention and be flush-left. Another -** big comment (similar to this one) will mark the point in the code where -** we transition back to normal indentation. -** -** The formatting of each case is important. The makefile for SQLite -** generates two C files "opcodes.h" and "opcodes.c" by scanning this -** file looking for lines that begin with "case OP_". The opcodes.h files -** will be filled with #defines that give unique integer values to each -** opcode and the opcodes.c file is filled with an array of strings where -** each string is the symbolic name for the corresponding opcode. If the -** case statement is followed by a comment of the form "/# same as ... #/" -** that comment is used to determine the particular value of the opcode. -** -** Other keywords in the comment that follows each case are used to -** construct the OPFLG_INITIALIZER value that initializes opcodeProperty[]. -** Keywords include: in1, in2, in3, out2_prerelease, out2, out3. See -** the mkopcodeh.awk script for additional information. -** -** Documentation about VDBE opcodes is generated by scanning this file -** for lines of that contain "Opcode:". That line and all subsequent -** comment lines are used in the generation of the opcode.html documentation -** file. -** -** SUMMARY: -** -** Formatting is important to scripts that scan this file. -** Do not deviate from the formatting style currently in use. -** -*****************************************************************************/ - -/* Opcode: Goto * P2 * * * -** -** An unconditional jump to address P2. -** The next instruction executed will be -** the one at index P2 from the beginning of -** the program. -** -** The P1 parameter is not actually used by this opcode. However, it -** is sometimes set to 1 instead of 0 as a hint to the command-line shell -** that this Goto is the bottom of a loop and that the lines from P2 down -** to the current line should be indented for EXPLAIN output. -*/ -case OP_Goto: { /* jump */ - pc = pOp->p2 - 1; - - /* Opcodes that are used as the bottom of a loop (OP_Next, OP_Prev, - ** OP_VNext, OP_RowSetNext, or OP_SorterNext) all jump here upon - ** completion. Check to see if sqlite3_interrupt() has been called - ** or if the progress callback needs to be invoked. - ** - ** This code uses unstructured "goto" statements and does not look clean. - ** But that is not due to sloppy coding habits. The code is written this - ** way for performance, to avoid having to run the interrupt and progress - ** checks on every opcode. This helps sqlite3_step() to run about 1.5% - ** faster according to "valgrind --tool=cachegrind" */ -check_for_interrupt: - if( db->u1.isInterrupted ) goto abort_due_to_interrupt; -#ifndef SQLITE_OMIT_PROGRESS_CALLBACK - /* Call the progress callback if it is configured and the required number - ** of VDBE ops have been executed (either since this invocation of - ** sqlite3VdbeExec() or since last time the progress callback was called). - ** If the progress callback returns non-zero, exit the virtual machine with - ** a return code SQLITE_ABORT. - */ - if( db->xProgress!=0 && nVmStep>=nProgressLimit ){ - assert( db->nProgressOps!=0 ); - nProgressLimit = nVmStep + db->nProgressOps - (nVmStep%db->nProgressOps); - if( db->xProgress(db->pProgressArg) ){ - rc = SQLITE_INTERRUPT; - goto vdbe_error_halt; - } - } -#endif - - break; -} - -/* Opcode: Gosub P1 P2 * * * -** -** Write the current address onto register P1 -** and then jump to address P2. -*/ -case OP_Gosub: { /* jump */ - assert( pOp->p1>0 && pOp->p1<=(p->nMem-p->nCursor) ); - pIn1 = &aMem[pOp->p1]; - assert( VdbeMemDynamic(pIn1)==0 ); - memAboutToChange(p, pIn1); - pIn1->flags = MEM_Int; - pIn1->u.i = pc; - REGISTER_TRACE(pOp->p1, pIn1); - pc = pOp->p2 - 1; - break; -} - -/* Opcode: Return P1 * * * * -** -** Jump to the next instruction after the address in register P1. After -** the jump, register P1 becomes undefined. -*/ -case OP_Return: { /* in1 */ - pIn1 = &aMem[pOp->p1]; - assert( pIn1->flags==MEM_Int ); - pc = (int)pIn1->u.i; - pIn1->flags = MEM_Undefined; - break; -} - -/* Opcode: InitCoroutine P1 P2 P3 * * -** -** Set up register P1 so that it will OP_Yield to the co-routine -** located at address P3. -** -** If P2!=0 then the co-routine implementation immediately follows -** this opcode. So jump over the co-routine implementation to -** address P2. -*/ -case OP_InitCoroutine: { /* jump */ - assert( pOp->p1>0 && pOp->p1<=(p->nMem-p->nCursor) ); - assert( pOp->p2>=0 && pOp->p2nOp ); - assert( pOp->p3>=0 && pOp->p3nOp ); - pOut = &aMem[pOp->p1]; - assert( !VdbeMemDynamic(pOut) ); - pOut->u.i = pOp->p3 - 1; - pOut->flags = MEM_Int; - if( pOp->p2 ) pc = pOp->p2 - 1; - break; -} - -/* Opcode: EndCoroutine P1 * * * * -** -** The instruction at the address in register P1 is an OP_Yield. -** Jump to the P2 parameter of that OP_Yield. -** After the jump, register P1 becomes undefined. -*/ -case OP_EndCoroutine: { /* in1 */ - VdbeOp *pCaller; - pIn1 = &aMem[pOp->p1]; - assert( pIn1->flags==MEM_Int ); - assert( pIn1->u.i>=0 && pIn1->u.inOp ); - pCaller = &aOp[pIn1->u.i]; - assert( pCaller->opcode==OP_Yield ); - assert( pCaller->p2>=0 && pCaller->p2nOp ); - pc = pCaller->p2 - 1; - pIn1->flags = MEM_Undefined; - break; -} - -/* Opcode: Yield P1 P2 * * * -** -** Swap the program counter with the value in register P1. -** -** If the co-routine ends with OP_Yield or OP_Return then continue -** to the next instruction. But if the co-routine ends with -** OP_EndCoroutine, jump immediately to P2. -*/ -case OP_Yield: { /* in1, jump */ - int pcDest; - pIn1 = &aMem[pOp->p1]; - assert( VdbeMemDynamic(pIn1)==0 ); - pIn1->flags = MEM_Int; - pcDest = (int)pIn1->u.i; - pIn1->u.i = pc; - REGISTER_TRACE(pOp->p1, pIn1); - pc = pcDest; - break; -} - -/* Opcode: HaltIfNull P1 P2 P3 P4 P5 -** Synopsis: if r[P3]=null halt -** -** Check the value in register P3. If it is NULL then Halt using -** parameter P1, P2, and P4 as if this were a Halt instruction. If the -** value in register P3 is not NULL, then this routine is a no-op. -** The P5 parameter should be 1. -*/ -case OP_HaltIfNull: { /* in3 */ - pIn3 = &aMem[pOp->p3]; - if( (pIn3->flags & MEM_Null)==0 ) break; - /* Fall through into OP_Halt */ -} - -/* Opcode: Halt P1 P2 * P4 P5 -** -** Exit immediately. All open cursors, etc are closed -** automatically. -** -** P1 is the result code returned by sqlite3_exec(), sqlite3_reset(), -** or sqlite3_finalize(). For a normal halt, this should be SQLITE_OK (0). -** For errors, it can be some other value. If P1!=0 then P2 will determine -** whether or not to rollback the current transaction. Do not rollback -** if P2==OE_Fail. Do the rollback if P2==OE_Rollback. If P2==OE_Abort, -** then back out all changes that have occurred during this execution of the -** VDBE, but do not rollback the transaction. -** -** If P4 is not null then it is an error message string. -** -** P5 is a value between 0 and 4, inclusive, that modifies the P4 string. -** -** 0: (no change) -** 1: NOT NULL contraint failed: P4 -** 2: UNIQUE constraint failed: P4 -** 3: CHECK constraint failed: P4 -** 4: FOREIGN KEY constraint failed: P4 -** -** If P5 is not zero and P4 is NULL, then everything after the ":" is -** omitted. -** -** There is an implied "Halt 0 0 0" instruction inserted at the very end of -** every program. So a jump past the last instruction of the program -** is the same as executing Halt. -*/ -case OP_Halt: { - const char *zType; - const char *zLogFmt; - - if( pOp->p1==SQLITE_OK && p->pFrame ){ - /* Halt the sub-program. Return control to the parent frame. */ - VdbeFrame *pFrame = p->pFrame; - p->pFrame = pFrame->pParent; - p->nFrame--; - sqlite3VdbeSetChanges(db, p->nChange); - pc = sqlite3VdbeFrameRestore(pFrame); - lastRowid = db->lastRowid; - if( pOp->p2==OE_Ignore ){ - /* Instruction pc is the OP_Program that invoked the sub-program - ** currently being halted. If the p2 instruction of this OP_Halt - ** instruction is set to OE_Ignore, then the sub-program is throwing - ** an IGNORE exception. In this case jump to the address specified - ** as the p2 of the calling OP_Program. */ - pc = p->aOp[pc].p2-1; - } - aOp = p->aOp; - aMem = p->aMem; - break; - } - p->rc = pOp->p1; - p->errorAction = (u8)pOp->p2; - p->pc = pc; - if( p->rc ){ - if( pOp->p5 ){ - static const char * const azType[] = { "NOT NULL", "UNIQUE", "CHECK", - "FOREIGN KEY" }; - assert( pOp->p5>=1 && pOp->p5<=4 ); - testcase( pOp->p5==1 ); - testcase( pOp->p5==2 ); - testcase( pOp->p5==3 ); - testcase( pOp->p5==4 ); - zType = azType[pOp->p5-1]; - }else{ - zType = 0; - } - assert( zType!=0 || pOp->p4.z!=0 ); - zLogFmt = "abort at %d in [%s]: %s"; - if( zType && pOp->p4.z ){ - sqlite3SetString(&p->zErrMsg, db, "%s constraint failed: %s", - zType, pOp->p4.z); - }else if( pOp->p4.z ){ - sqlite3SetString(&p->zErrMsg, db, "%s", pOp->p4.z); - }else{ - sqlite3SetString(&p->zErrMsg, db, "%s constraint failed", zType); - } - sqlite3_log(pOp->p1, zLogFmt, pc, p->zSql, p->zErrMsg); - } - rc = sqlite3VdbeHalt(p); - assert( rc==SQLITE_BUSY || rc==SQLITE_OK || rc==SQLITE_ERROR ); - if( rc==SQLITE_BUSY ){ - p->rc = rc = SQLITE_BUSY; - }else{ - assert( rc==SQLITE_OK || (p->rc&0xff)==SQLITE_CONSTRAINT ); - assert( rc==SQLITE_OK || db->nDeferredCons>0 || db->nDeferredImmCons>0 ); - rc = p->rc ? SQLITE_ERROR : SQLITE_DONE; - } - goto vdbe_return; -} - -/* Opcode: Integer P1 P2 * * * -** Synopsis: r[P2]=P1 -** -** The 32-bit integer value P1 is written into register P2. -*/ -case OP_Integer: { /* out2-prerelease */ - pOut->u.i = pOp->p1; - break; -} - -/* Opcode: Int64 * P2 * P4 * -** Synopsis: r[P2]=P4 -** -** P4 is a pointer to a 64-bit integer value. -** Write that value into register P2. -*/ -case OP_Int64: { /* out2-prerelease */ - assert( pOp->p4.pI64!=0 ); - pOut->u.i = *pOp->p4.pI64; - break; -} - -#ifndef SQLITE_OMIT_FLOATING_POINT -/* Opcode: Real * P2 * P4 * -** Synopsis: r[P2]=P4 -** -** P4 is a pointer to a 64-bit floating point value. -** Write that value into register P2. -*/ -case OP_Real: { /* same as TK_FLOAT, out2-prerelease */ - pOut->flags = MEM_Real; - assert( !sqlite3IsNaN(*pOp->p4.pReal) ); - pOut->r = *pOp->p4.pReal; - break; -} -#endif - -/* Opcode: String8 * P2 * P4 * -** Synopsis: r[P2]='P4' -** -** P4 points to a nul terminated UTF-8 string. This opcode is transformed -** into an OP_String before it is executed for the first time. During -** this transformation, the length of string P4 is computed and stored -** as the P1 parameter. -*/ -case OP_String8: { /* same as TK_STRING, out2-prerelease */ - assert( pOp->p4.z!=0 ); - pOp->opcode = OP_String; - pOp->p1 = sqlite3Strlen30(pOp->p4.z); - -#ifndef SQLITE_OMIT_UTF16 - if( encoding!=SQLITE_UTF8 ){ - rc = sqlite3VdbeMemSetStr(pOut, pOp->p4.z, -1, SQLITE_UTF8, SQLITE_STATIC); - if( rc==SQLITE_TOOBIG ) goto too_big; - if( SQLITE_OK!=sqlite3VdbeChangeEncoding(pOut, encoding) ) goto no_mem; - assert( pOut->zMalloc==pOut->z ); - assert( VdbeMemDynamic(pOut)==0 ); - pOut->zMalloc = 0; - pOut->flags |= MEM_Static; - if( pOp->p4type==P4_DYNAMIC ){ - sqlite3DbFree(db, pOp->p4.z); - } - pOp->p4type = P4_DYNAMIC; - pOp->p4.z = pOut->z; - pOp->p1 = pOut->n; - } -#endif - if( pOp->p1>db->aLimit[SQLITE_LIMIT_LENGTH] ){ - goto too_big; - } - /* Fall through to the next case, OP_String */ -} - -/* Opcode: String P1 P2 * P4 * -** Synopsis: r[P2]='P4' (len=P1) -** -** The string value P4 of length P1 (bytes) is stored in register P2. -*/ -case OP_String: { /* out2-prerelease */ - assert( pOp->p4.z!=0 ); - pOut->flags = MEM_Str|MEM_Static|MEM_Term; - pOut->z = pOp->p4.z; - pOut->n = pOp->p1; - pOut->enc = encoding; - UPDATE_MAX_BLOBSIZE(pOut); - break; -} - -/* Opcode: Null P1 P2 P3 * * -** Synopsis: r[P2..P3]=NULL -** -** Write a NULL into registers P2. If P3 greater than P2, then also write -** NULL into register P3 and every register in between P2 and P3. If P3 -** is less than P2 (typically P3 is zero) then only register P2 is -** set to NULL. -** -** If the P1 value is non-zero, then also set the MEM_Cleared flag so that -** NULL values will not compare equal even if SQLITE_NULLEQ is set on -** OP_Ne or OP_Eq. -*/ -case OP_Null: { /* out2-prerelease */ - int cnt; - u16 nullFlag; - cnt = pOp->p3-pOp->p2; - assert( pOp->p3<=(p->nMem-p->nCursor) ); - pOut->flags = nullFlag = pOp->p1 ? (MEM_Null|MEM_Cleared) : MEM_Null; - while( cnt>0 ){ - pOut++; - memAboutToChange(p, pOut); - VdbeMemRelease(pOut); - pOut->flags = nullFlag; - cnt--; - } - break; -} - -/* Opcode: SoftNull P1 * * * * -** Synopsis: r[P1]=NULL -** -** Set register P1 to have the value NULL as seen by the OP_MakeRecord -** instruction, but do not free any string or blob memory associated with -** the register, so that if the value was a string or blob that was -** previously copied using OP_SCopy, the copies will continue to be valid. -*/ -case OP_SoftNull: { - assert( pOp->p1>0 && pOp->p1<=(p->nMem-p->nCursor) ); - pOut = &aMem[pOp->p1]; - pOut->flags = (pOut->flags|MEM_Null)&~MEM_Undefined; - break; -} - -/* Opcode: Blob P1 P2 * P4 * -** Synopsis: r[P2]=P4 (len=P1) -** -** P4 points to a blob of data P1 bytes long. Store this -** blob in register P2. -*/ -case OP_Blob: { /* out2-prerelease */ - assert( pOp->p1 <= SQLITE_MAX_LENGTH ); - sqlite3VdbeMemSetStr(pOut, pOp->p4.z, pOp->p1, 0, 0); - pOut->enc = encoding; - UPDATE_MAX_BLOBSIZE(pOut); - break; -} - -/* Opcode: Variable P1 P2 * P4 * -** Synopsis: r[P2]=parameter(P1,P4) -** -** Transfer the values of bound parameter P1 into register P2 -** -** If the parameter is named, then its name appears in P4. -** The P4 value is used by sqlite3_bind_parameter_name(). -*/ -case OP_Variable: { /* out2-prerelease */ - Mem *pVar; /* Value being transferred */ - - assert( pOp->p1>0 && pOp->p1<=p->nVar ); - assert( pOp->p4.z==0 || pOp->p4.z==p->azVar[pOp->p1-1] ); - pVar = &p->aVar[pOp->p1 - 1]; - if( sqlite3VdbeMemTooBig(pVar) ){ - goto too_big; - } - sqlite3VdbeMemShallowCopy(pOut, pVar, MEM_Static); - UPDATE_MAX_BLOBSIZE(pOut); - break; -} - -/* Opcode: Move P1 P2 P3 * * -** Synopsis: r[P2@P3]=r[P1@P3] -** -** Move the P3 values in register P1..P1+P3-1 over into -** registers P2..P2+P3-1. Registers P1..P1+P3-1 are -** left holding a NULL. It is an error for register ranges -** P1..P1+P3-1 and P2..P2+P3-1 to overlap. It is an error -** for P3 to be less than 1. -*/ -case OP_Move: { - char *zMalloc; /* Holding variable for allocated memory */ - int n; /* Number of registers left to copy */ - int p1; /* Register to copy from */ - int p2; /* Register to copy to */ - - n = pOp->p3; - p1 = pOp->p1; - p2 = pOp->p2; - assert( n>0 && p1>0 && p2>0 ); - assert( p1+n<=p2 || p2+n<=p1 ); - - pIn1 = &aMem[p1]; - pOut = &aMem[p2]; - do{ - assert( pOut<=&aMem[(p->nMem-p->nCursor)] ); - assert( pIn1<=&aMem[(p->nMem-p->nCursor)] ); - assert( memIsValid(pIn1) ); - memAboutToChange(p, pOut); - VdbeMemRelease(pOut); - zMalloc = pOut->zMalloc; - memcpy(pOut, pIn1, sizeof(Mem)); -#ifdef SQLITE_DEBUG - if( pOut->pScopyFrom>=&aMem[p1] && pOut->pScopyFrom<&aMem[p1+pOp->p3] ){ - pOut->pScopyFrom += p1 - pOp->p2; - } -#endif - pIn1->flags = MEM_Undefined; - pIn1->xDel = 0; - pIn1->zMalloc = zMalloc; - REGISTER_TRACE(p2++, pOut); - pIn1++; - pOut++; - }while( --n ); - break; -} - -/* Opcode: Copy P1 P2 P3 * * -** Synopsis: r[P2@P3+1]=r[P1@P3+1] -** -** Make a copy of registers P1..P1+P3 into registers P2..P2+P3. -** -** This instruction makes a deep copy of the value. A duplicate -** is made of any string or blob constant. See also OP_SCopy. -*/ -case OP_Copy: { - int n; - - n = pOp->p3; - pIn1 = &aMem[pOp->p1]; - pOut = &aMem[pOp->p2]; - assert( pOut!=pIn1 ); - while( 1 ){ - sqlite3VdbeMemShallowCopy(pOut, pIn1, MEM_Ephem); - Deephemeralize(pOut); -#ifdef SQLITE_DEBUG - pOut->pScopyFrom = 0; -#endif - REGISTER_TRACE(pOp->p2+pOp->p3-n, pOut); - if( (n--)==0 ) break; - pOut++; - pIn1++; - } - break; -} - -/* Opcode: SCopy P1 P2 * * * -** Synopsis: r[P2]=r[P1] -** -** Make a shallow copy of register P1 into register P2. -** -** This instruction makes a shallow copy of the value. If the value -** is a string or blob, then the copy is only a pointer to the -** original and hence if the original changes so will the copy. -** Worse, if the original is deallocated, the copy becomes invalid. -** Thus the program must guarantee that the original will not change -** during the lifetime of the copy. Use OP_Copy to make a complete -** copy. -*/ -case OP_SCopy: { /* out2 */ - pIn1 = &aMem[pOp->p1]; - pOut = &aMem[pOp->p2]; - assert( pOut!=pIn1 ); - sqlite3VdbeMemShallowCopy(pOut, pIn1, MEM_Ephem); -#ifdef SQLITE_DEBUG - if( pOut->pScopyFrom==0 ) pOut->pScopyFrom = pIn1; -#endif - break; -} - -/* Opcode: ResultRow P1 P2 * * * -** Synopsis: output=r[P1@P2] -** -** The registers P1 through P1+P2-1 contain a single row of -** results. This opcode causes the sqlite3_step() call to terminate -** with an SQLITE_ROW return code and it sets up the sqlite3_stmt -** structure to provide access to the r(P1)..r(P1+P2-1) values as -** the result row. -*/ -case OP_ResultRow: { - Mem *pMem; - int i; - assert( p->nResColumn==pOp->p2 ); - assert( pOp->p1>0 ); - assert( pOp->p1+pOp->p2<=(p->nMem-p->nCursor)+1 ); - -#ifndef SQLITE_OMIT_PROGRESS_CALLBACK - /* Run the progress counter just before returning. - */ - if( db->xProgress!=0 - && nVmStep>=nProgressLimit - && db->xProgress(db->pProgressArg)!=0 - ){ - rc = SQLITE_INTERRUPT; - goto vdbe_error_halt; - } -#endif - - /* If this statement has violated immediate foreign key constraints, do - ** not return the number of rows modified. And do not RELEASE the statement - ** transaction. It needs to be rolled back. */ - if( SQLITE_OK!=(rc = sqlite3VdbeCheckFk(p, 0)) ){ - assert( db->flags&SQLITE_CountRows ); - assert( p->usesStmtJournal ); - break; - } - - /* If the SQLITE_CountRows flag is set in sqlite3.flags mask, then - ** DML statements invoke this opcode to return the number of rows - ** modified to the user. This is the only way that a VM that - ** opens a statement transaction may invoke this opcode. - ** - ** In case this is such a statement, close any statement transaction - ** opened by this VM before returning control to the user. This is to - ** ensure that statement-transactions are always nested, not overlapping. - ** If the open statement-transaction is not closed here, then the user - ** may step another VM that opens its own statement transaction. This - ** may lead to overlapping statement transactions. - ** - ** The statement transaction is never a top-level transaction. Hence - ** the RELEASE call below can never fail. - */ - assert( p->iStatement==0 || db->flags&SQLITE_CountRows ); - rc = sqlite3VdbeCloseStatement(p, SAVEPOINT_RELEASE); - if( NEVER(rc!=SQLITE_OK) ){ - break; - } - - /* Invalidate all ephemeral cursor row caches */ - p->cacheCtr = (p->cacheCtr + 2)|1; - - /* Make sure the results of the current row are \000 terminated - ** and have an assigned type. The results are de-ephemeralized as - ** a side effect. - */ - pMem = p->pResultSet = &aMem[pOp->p1]; - for(i=0; ip2; i++){ - assert( memIsValid(&pMem[i]) ); - Deephemeralize(&pMem[i]); - assert( (pMem[i].flags & MEM_Ephem)==0 - || (pMem[i].flags & (MEM_Str|MEM_Blob))==0 ); - sqlite3VdbeMemNulTerminate(&pMem[i]); - REGISTER_TRACE(pOp->p1+i, &pMem[i]); - } - if( db->mallocFailed ) goto no_mem; - - /* Return SQLITE_ROW - */ - p->pc = pc + 1; - rc = SQLITE_ROW; - goto vdbe_return; -} - -/* Opcode: Concat P1 P2 P3 * * -** Synopsis: r[P3]=r[P2]+r[P1] -** -** Add the text in register P1 onto the end of the text in -** register P2 and store the result in register P3. -** If either the P1 or P2 text are NULL then store NULL in P3. -** -** P3 = P2 || P1 -** -** It is illegal for P1 and P3 to be the same register. Sometimes, -** if P3 is the same register as P2, the implementation is able -** to avoid a memcpy(). -*/ -case OP_Concat: { /* same as TK_CONCAT, in1, in2, out3 */ - i64 nByte; - - pIn1 = &aMem[pOp->p1]; - pIn2 = &aMem[pOp->p2]; - pOut = &aMem[pOp->p3]; - assert( pIn1!=pOut ); - if( (pIn1->flags | pIn2->flags) & MEM_Null ){ - sqlite3VdbeMemSetNull(pOut); - break; - } - if( ExpandBlob(pIn1) || ExpandBlob(pIn2) ) goto no_mem; - Stringify(pIn1, encoding); - Stringify(pIn2, encoding); - nByte = pIn1->n + pIn2->n; - if( nByte>db->aLimit[SQLITE_LIMIT_LENGTH] ){ - goto too_big; - } - if( sqlite3VdbeMemGrow(pOut, (int)nByte+2, pOut==pIn2) ){ - goto no_mem; - } - MemSetTypeFlag(pOut, MEM_Str); - if( pOut!=pIn2 ){ - memcpy(pOut->z, pIn2->z, pIn2->n); - } - memcpy(&pOut->z[pIn2->n], pIn1->z, pIn1->n); - pOut->z[nByte]=0; - pOut->z[nByte+1] = 0; - pOut->flags |= MEM_Term; - pOut->n = (int)nByte; - pOut->enc = encoding; - UPDATE_MAX_BLOBSIZE(pOut); - break; -} - -/* Opcode: Add P1 P2 P3 * * -** Synopsis: r[P3]=r[P1]+r[P2] -** -** Add the value in register P1 to the value in register P2 -** and store the result in register P3. -** If either input is NULL, the result is NULL. -*/ -/* Opcode: Multiply P1 P2 P3 * * -** Synopsis: r[P3]=r[P1]*r[P2] -** -** -** Multiply the value in register P1 by the value in register P2 -** and store the result in register P3. -** If either input is NULL, the result is NULL. -*/ -/* Opcode: Subtract P1 P2 P3 * * -** Synopsis: r[P3]=r[P2]-r[P1] -** -** Subtract the value in register P1 from the value in register P2 -** and store the result in register P3. -** If either input is NULL, the result is NULL. -*/ -/* Opcode: Divide P1 P2 P3 * * -** Synopsis: r[P3]=r[P2]/r[P1] -** -** Divide the value in register P1 by the value in register P2 -** and store the result in register P3 (P3=P2/P1). If the value in -** register P1 is zero, then the result is NULL. If either input is -** NULL, the result is NULL. -*/ -/* Opcode: Remainder P1 P2 P3 * * -** Synopsis: r[P3]=r[P2]%r[P1] -** -** Compute the remainder after integer register P2 is divided by -** register P1 and store the result in register P3. -** If the value in register P1 is zero the result is NULL. -** If either operand is NULL, the result is NULL. -*/ -case OP_Add: /* same as TK_PLUS, in1, in2, out3 */ -case OP_Subtract: /* same as TK_MINUS, in1, in2, out3 */ -case OP_Multiply: /* same as TK_STAR, in1, in2, out3 */ -case OP_Divide: /* same as TK_SLASH, in1, in2, out3 */ -case OP_Remainder: { /* same as TK_REM, in1, in2, out3 */ - char bIntint; /* Started out as two integer operands */ - u16 flags; /* Combined MEM_* flags from both inputs */ - u16 type1; /* Numeric type of left operand */ - u16 type2; /* Numeric type of right operand */ - i64 iA; /* Integer value of left operand */ - i64 iB; /* Integer value of right operand */ - double rA; /* Real value of left operand */ - double rB; /* Real value of right operand */ - - pIn1 = &aMem[pOp->p1]; - type1 = numericType(pIn1); - pIn2 = &aMem[pOp->p2]; - type2 = numericType(pIn2); - pOut = &aMem[pOp->p3]; - flags = pIn1->flags | pIn2->flags; - if( (flags & MEM_Null)!=0 ) goto arithmetic_result_is_null; - if( (type1 & type2 & MEM_Int)!=0 ){ - iA = pIn1->u.i; - iB = pIn2->u.i; - bIntint = 1; - switch( pOp->opcode ){ - case OP_Add: if( sqlite3AddInt64(&iB,iA) ) goto fp_math; break; - case OP_Subtract: if( sqlite3SubInt64(&iB,iA) ) goto fp_math; break; - case OP_Multiply: if( sqlite3MulInt64(&iB,iA) ) goto fp_math; break; - case OP_Divide: { - if( iA==0 ) goto arithmetic_result_is_null; - if( iA==-1 && iB==SMALLEST_INT64 ) goto fp_math; - iB /= iA; - break; - } - default: { - if( iA==0 ) goto arithmetic_result_is_null; - if( iA==-1 ) iA = 1; - iB %= iA; - break; - } - } - pOut->u.i = iB; - MemSetTypeFlag(pOut, MEM_Int); - }else{ - bIntint = 0; -fp_math: - rA = sqlite3VdbeRealValue(pIn1); - rB = sqlite3VdbeRealValue(pIn2); - switch( pOp->opcode ){ - case OP_Add: rB += rA; break; - case OP_Subtract: rB -= rA; break; - case OP_Multiply: rB *= rA; break; - case OP_Divide: { - /* (double)0 In case of SQLITE_OMIT_FLOATING_POINT... */ - if( rA==(double)0 ) goto arithmetic_result_is_null; - rB /= rA; - break; - } - default: { - iA = (i64)rA; - iB = (i64)rB; - if( iA==0 ) goto arithmetic_result_is_null; - if( iA==-1 ) iA = 1; - rB = (double)(iB % iA); - break; - } - } -#ifdef SQLITE_OMIT_FLOATING_POINT - pOut->u.i = rB; - MemSetTypeFlag(pOut, MEM_Int); -#else - if( sqlite3IsNaN(rB) ){ - goto arithmetic_result_is_null; - } - pOut->r = rB; - MemSetTypeFlag(pOut, MEM_Real); - if( ((type1|type2)&MEM_Real)==0 && !bIntint ){ - sqlite3VdbeIntegerAffinity(pOut); - } -#endif - } - break; - -arithmetic_result_is_null: - sqlite3VdbeMemSetNull(pOut); - break; -} - -/* Opcode: CollSeq P1 * * P4 -** -** P4 is a pointer to a CollSeq struct. If the next call to a user function -** or aggregate calls sqlite3GetFuncCollSeq(), this collation sequence will -** be returned. This is used by the built-in min(), max() and nullif() -** functions. -** -** If P1 is not zero, then it is a register that a subsequent min() or -** max() aggregate will set to 1 if the current row is not the minimum or -** maximum. The P1 register is initialized to 0 by this instruction. -** -** The interface used by the implementation of the aforementioned functions -** to retrieve the collation sequence set by this opcode is not available -** publicly, only to user functions defined in func.c. -*/ -case OP_CollSeq: { - assert( pOp->p4type==P4_COLLSEQ ); - if( pOp->p1 ){ - sqlite3VdbeMemSetInt64(&aMem[pOp->p1], 0); - } - break; -} - -/* Opcode: Function P1 P2 P3 P4 P5 -** Synopsis: r[P3]=func(r[P2@P5]) -** -** Invoke a user function (P4 is a pointer to a Function structure that -** defines the function) with P5 arguments taken from register P2 and -** successors. The result of the function is stored in register P3. -** Register P3 must not be one of the function inputs. -** -** P1 is a 32-bit bitmask indicating whether or not each argument to the -** function was determined to be constant at compile time. If the first -** argument was constant then bit 0 of P1 is set. This is used to determine -** whether meta data associated with a user function argument using the -** sqlite3_set_auxdata() API may be safely retained until the next -** invocation of this opcode. -** -** See also: AggStep and AggFinal -*/ -case OP_Function: { - int i; - Mem *pArg; - sqlite3_context ctx; - sqlite3_value **apVal; - int n; - - n = pOp->p5; - apVal = p->apArg; - assert( apVal || n==0 ); - assert( pOp->p3>0 && pOp->p3<=(p->nMem-p->nCursor) ); - pOut = &aMem[pOp->p3]; - memAboutToChange(p, pOut); - - assert( n==0 || (pOp->p2>0 && pOp->p2+n<=(p->nMem-p->nCursor)+1) ); - assert( pOp->p3p2 || pOp->p3>=pOp->p2+n ); - pArg = &aMem[pOp->p2]; - for(i=0; ip2+i, pArg); - } - - assert( pOp->p4type==P4_FUNCDEF ); - ctx.pFunc = pOp->p4.pFunc; - ctx.iOp = pc; - ctx.pVdbe = p; - - /* The output cell may already have a buffer allocated. Move - ** the pointer to ctx.s so in case the user-function can use - ** the already allocated buffer instead of allocating a new one. - */ - memcpy(&ctx.s, pOut, sizeof(Mem)); - pOut->flags = MEM_Null; - pOut->xDel = 0; - pOut->zMalloc = 0; - MemSetTypeFlag(&ctx.s, MEM_Null); - - ctx.fErrorOrAux = 0; - if( ctx.pFunc->funcFlags & SQLITE_FUNC_NEEDCOLL ){ - assert( pOp>aOp ); - assert( pOp[-1].p4type==P4_COLLSEQ ); - assert( pOp[-1].opcode==OP_CollSeq ); - ctx.pColl = pOp[-1].p4.pColl; - } - db->lastRowid = lastRowid; - (*ctx.pFunc->xFunc)(&ctx, n, apVal); /* IMP: R-24505-23230 */ - lastRowid = db->lastRowid; - - if( db->mallocFailed ){ - /* Even though a malloc() has failed, the implementation of the - ** user function may have called an sqlite3_result_XXX() function - ** to return a value. The following call releases any resources - ** associated with such a value. - */ - sqlite3VdbeMemRelease(&ctx.s); - goto no_mem; - } - - /* If the function returned an error, throw an exception */ - if( ctx.fErrorOrAux ){ - if( ctx.isError ){ - sqlite3SetString(&p->zErrMsg, db, "%s", sqlite3_value_text(&ctx.s)); - rc = ctx.isError; - } - sqlite3VdbeDeleteAuxData(p, pc, pOp->p1); - } - - /* Copy the result of the function into register P3 */ - sqlite3VdbeChangeEncoding(&ctx.s, encoding); - assert( pOut->flags==MEM_Null ); - memcpy(pOut, &ctx.s, sizeof(Mem)); - if( sqlite3VdbeMemTooBig(pOut) ){ - goto too_big; - } - -#if 0 - /* The app-defined function has done something that as caused this - ** statement to expire. (Perhaps the function called sqlite3_exec() - ** with a CREATE TABLE statement.) - */ - if( p->expired ) rc = SQLITE_ABORT; -#endif - - REGISTER_TRACE(pOp->p3, pOut); - UPDATE_MAX_BLOBSIZE(pOut); - break; -} - -/* Opcode: BitAnd P1 P2 P3 * * -** Synopsis: r[P3]=r[P1]&r[P2] -** -** Take the bit-wise AND of the values in register P1 and P2 and -** store the result in register P3. -** If either input is NULL, the result is NULL. -*/ -/* Opcode: BitOr P1 P2 P3 * * -** Synopsis: r[P3]=r[P1]|r[P2] -** -** Take the bit-wise OR of the values in register P1 and P2 and -** store the result in register P3. -** If either input is NULL, the result is NULL. -*/ -/* Opcode: ShiftLeft P1 P2 P3 * * -** Synopsis: r[P3]=r[P2]<>r[P1] -** -** Shift the integer value in register P2 to the right by the -** number of bits specified by the integer in register P1. -** Store the result in register P3. -** If either input is NULL, the result is NULL. -*/ -case OP_BitAnd: /* same as TK_BITAND, in1, in2, out3 */ -case OP_BitOr: /* same as TK_BITOR, in1, in2, out3 */ -case OP_ShiftLeft: /* same as TK_LSHIFT, in1, in2, out3 */ -case OP_ShiftRight: { /* same as TK_RSHIFT, in1, in2, out3 */ - i64 iA; - u64 uA; - i64 iB; - u8 op; - - pIn1 = &aMem[pOp->p1]; - pIn2 = &aMem[pOp->p2]; - pOut = &aMem[pOp->p3]; - if( (pIn1->flags | pIn2->flags) & MEM_Null ){ - sqlite3VdbeMemSetNull(pOut); - break; - } - iA = sqlite3VdbeIntValue(pIn2); - iB = sqlite3VdbeIntValue(pIn1); - op = pOp->opcode; - if( op==OP_BitAnd ){ - iA &= iB; - }else if( op==OP_BitOr ){ - iA |= iB; - }else if( iB!=0 ){ - assert( op==OP_ShiftRight || op==OP_ShiftLeft ); - - /* If shifting by a negative amount, shift in the other direction */ - if( iB<0 ){ - assert( OP_ShiftRight==OP_ShiftLeft+1 ); - op = 2*OP_ShiftLeft + 1 - op; - iB = iB>(-64) ? -iB : 64; - } - - if( iB>=64 ){ - iA = (iA>=0 || op==OP_ShiftLeft) ? 0 : -1; - }else{ - memcpy(&uA, &iA, sizeof(uA)); - if( op==OP_ShiftLeft ){ - uA <<= iB; - }else{ - uA >>= iB; - /* Sign-extend on a right shift of a negative number */ - if( iA<0 ) uA |= ((((u64)0xffffffff)<<32)|0xffffffff) << (64-iB); - } - memcpy(&iA, &uA, sizeof(iA)); - } - } - pOut->u.i = iA; - MemSetTypeFlag(pOut, MEM_Int); - break; -} - -/* Opcode: AddImm P1 P2 * * * -** Synopsis: r[P1]=r[P1]+P2 -** -** Add the constant P2 to the value in register P1. -** The result is always an integer. -** -** To force any register to be an integer, just add 0. -*/ -case OP_AddImm: { /* in1 */ - pIn1 = &aMem[pOp->p1]; - memAboutToChange(p, pIn1); - sqlite3VdbeMemIntegerify(pIn1); - pIn1->u.i += pOp->p2; - break; -} - -/* Opcode: MustBeInt P1 P2 * * * -** -** Force the value in register P1 to be an integer. If the value -** in P1 is not an integer and cannot be converted into an integer -** without data loss, then jump immediately to P2, or if P2==0 -** raise an SQLITE_MISMATCH exception. -*/ -case OP_MustBeInt: { /* jump, in1 */ - pIn1 = &aMem[pOp->p1]; - if( (pIn1->flags & MEM_Int)==0 ){ - applyAffinity(pIn1, SQLITE_AFF_NUMERIC, encoding); - VdbeBranchTaken((pIn1->flags&MEM_Int)==0, 2); - if( (pIn1->flags & MEM_Int)==0 ){ - if( pOp->p2==0 ){ - rc = SQLITE_MISMATCH; - goto abort_due_to_error; - }else{ - pc = pOp->p2 - 1; - break; - } - } - } - MemSetTypeFlag(pIn1, MEM_Int); - break; -} - -#ifndef SQLITE_OMIT_FLOATING_POINT -/* Opcode: RealAffinity P1 * * * * -** -** If register P1 holds an integer convert it to a real value. -** -** This opcode is used when extracting information from a column that -** has REAL affinity. Such column values may still be stored as -** integers, for space efficiency, but after extraction we want them -** to have only a real value. -*/ -case OP_RealAffinity: { /* in1 */ - pIn1 = &aMem[pOp->p1]; - if( pIn1->flags & MEM_Int ){ - sqlite3VdbeMemRealify(pIn1); - } - break; -} -#endif - -#ifndef SQLITE_OMIT_CAST -/* Opcode: ToText P1 * * * * -** -** Force the value in register P1 to be text. -** If the value is numeric, convert it to a string using the -** equivalent of sprintf(). Blob values are unchanged and -** are afterwards simply interpreted as text. -** -** A NULL value is not changed by this routine. It remains NULL. -*/ -case OP_ToText: { /* same as TK_TO_TEXT, in1 */ - pIn1 = &aMem[pOp->p1]; - memAboutToChange(p, pIn1); - if( pIn1->flags & MEM_Null ) break; - assert( MEM_Str==(MEM_Blob>>3) ); - pIn1->flags |= (pIn1->flags&MEM_Blob)>>3; - applyAffinity(pIn1, SQLITE_AFF_TEXT, encoding); - rc = ExpandBlob(pIn1); - assert( pIn1->flags & MEM_Str || db->mallocFailed ); - pIn1->flags &= ~(MEM_Int|MEM_Real|MEM_Blob|MEM_Zero); - UPDATE_MAX_BLOBSIZE(pIn1); - break; -} - -/* Opcode: ToBlob P1 * * * * -** -** Force the value in register P1 to be a BLOB. -** If the value is numeric, convert it to a string first. -** Strings are simply reinterpreted as blobs with no change -** to the underlying data. -** -** A NULL value is not changed by this routine. It remains NULL. -*/ -case OP_ToBlob: { /* same as TK_TO_BLOB, in1 */ - pIn1 = &aMem[pOp->p1]; - if( pIn1->flags & MEM_Null ) break; - if( (pIn1->flags & MEM_Blob)==0 ){ - applyAffinity(pIn1, SQLITE_AFF_TEXT, encoding); - assert( pIn1->flags & MEM_Str || db->mallocFailed ); - MemSetTypeFlag(pIn1, MEM_Blob); - }else{ - pIn1->flags &= ~(MEM_TypeMask&~MEM_Blob); - } - UPDATE_MAX_BLOBSIZE(pIn1); - break; -} - -/* Opcode: ToNumeric P1 * * * * -** -** Force the value in register P1 to be numeric (either an -** integer or a floating-point number.) -** If the value is text or blob, try to convert it to an using the -** equivalent of atoi() or atof() and store 0 if no such conversion -** is possible. -** -** A NULL value is not changed by this routine. It remains NULL. -*/ -case OP_ToNumeric: { /* same as TK_TO_NUMERIC, in1 */ - pIn1 = &aMem[pOp->p1]; - sqlite3VdbeMemNumerify(pIn1); - break; -} -#endif /* SQLITE_OMIT_CAST */ - -/* Opcode: ToInt P1 * * * * -** -** Force the value in register P1 to be an integer. If -** The value is currently a real number, drop its fractional part. -** If the value is text or blob, try to convert it to an integer using the -** equivalent of atoi() and store 0 if no such conversion is possible. -** -** A NULL value is not changed by this routine. It remains NULL. -*/ -case OP_ToInt: { /* same as TK_TO_INT, in1 */ - pIn1 = &aMem[pOp->p1]; - if( (pIn1->flags & MEM_Null)==0 ){ - sqlite3VdbeMemIntegerify(pIn1); - } - break; -} - -#if !defined(SQLITE_OMIT_CAST) && !defined(SQLITE_OMIT_FLOATING_POINT) -/* Opcode: ToReal P1 * * * * -** -** Force the value in register P1 to be a floating point number. -** If The value is currently an integer, convert it. -** If the value is text or blob, try to convert it to an integer using the -** equivalent of atoi() and store 0.0 if no such conversion is possible. -** -** A NULL value is not changed by this routine. It remains NULL. -*/ -case OP_ToReal: { /* same as TK_TO_REAL, in1 */ - pIn1 = &aMem[pOp->p1]; - memAboutToChange(p, pIn1); - if( (pIn1->flags & MEM_Null)==0 ){ - sqlite3VdbeMemRealify(pIn1); - } - break; -} -#endif /* !defined(SQLITE_OMIT_CAST) && !defined(SQLITE_OMIT_FLOATING_POINT) */ - -/* Opcode: Lt P1 P2 P3 P4 P5 -** Synopsis: if r[P1]r[P3] goto P2 -** -** This works just like the Lt opcode except that the jump is taken if -** the content of register P3 is greater than the content of -** register P1. See the Lt opcode for additional information. -*/ -/* Opcode: Ge P1 P2 P3 P4 P5 -** Synopsis: if r[P1]>=r[P3] goto P2 -** -** This works just like the Lt opcode except that the jump is taken if -** the content of register P3 is greater than or equal to the content of -** register P1. See the Lt opcode for additional information. -*/ -case OP_Eq: /* same as TK_EQ, jump, in1, in3 */ -case OP_Ne: /* same as TK_NE, jump, in1, in3 */ -case OP_Lt: /* same as TK_LT, jump, in1, in3 */ -case OP_Le: /* same as TK_LE, jump, in1, in3 */ -case OP_Gt: /* same as TK_GT, jump, in1, in3 */ -case OP_Ge: { /* same as TK_GE, jump, in1, in3 */ - int res; /* Result of the comparison of pIn1 against pIn3 */ - char affinity; /* Affinity to use for comparison */ - u16 flags1; /* Copy of initial value of pIn1->flags */ - u16 flags3; /* Copy of initial value of pIn3->flags */ - - pIn1 = &aMem[pOp->p1]; - pIn3 = &aMem[pOp->p3]; - flags1 = pIn1->flags; - flags3 = pIn3->flags; - if( (flags1 | flags3)&MEM_Null ){ - /* One or both operands are NULL */ - if( pOp->p5 & SQLITE_NULLEQ ){ - /* If SQLITE_NULLEQ is set (which will only happen if the operator is - ** OP_Eq or OP_Ne) then take the jump or not depending on whether - ** or not both operands are null. - */ - assert( pOp->opcode==OP_Eq || pOp->opcode==OP_Ne ); - assert( (flags1 & MEM_Cleared)==0 ); - assert( (pOp->p5 & SQLITE_JUMPIFNULL)==0 ); - if( (flags1&MEM_Null)!=0 - && (flags3&MEM_Null)!=0 - && (flags3&MEM_Cleared)==0 - ){ - res = 0; /* Results are equal */ - }else{ - res = 1; /* Results are not equal */ - } - }else{ - /* SQLITE_NULLEQ is clear and at least one operand is NULL, - ** then the result is always NULL. - ** The jump is taken if the SQLITE_JUMPIFNULL bit is set. - */ - if( pOp->p5 & SQLITE_STOREP2 ){ - pOut = &aMem[pOp->p2]; - MemSetTypeFlag(pOut, MEM_Null); - REGISTER_TRACE(pOp->p2, pOut); - }else{ - VdbeBranchTaken(2,3); - if( pOp->p5 & SQLITE_JUMPIFNULL ){ - pc = pOp->p2-1; - } - } - break; - } - }else{ - /* Neither operand is NULL. Do a comparison. */ - affinity = pOp->p5 & SQLITE_AFF_MASK; - if( affinity ){ - applyAffinity(pIn1, affinity, encoding); - applyAffinity(pIn3, affinity, encoding); - if( db->mallocFailed ) goto no_mem; - } - - assert( pOp->p4type==P4_COLLSEQ || pOp->p4.pColl==0 ); - ExpandBlob(pIn1); - ExpandBlob(pIn3); - res = sqlite3MemCompare(pIn3, pIn1, pOp->p4.pColl); - } - switch( pOp->opcode ){ - case OP_Eq: res = res==0; break; - case OP_Ne: res = res!=0; break; - case OP_Lt: res = res<0; break; - case OP_Le: res = res<=0; break; - case OP_Gt: res = res>0; break; - default: res = res>=0; break; - } - - if( pOp->p5 & SQLITE_STOREP2 ){ - pOut = &aMem[pOp->p2]; - memAboutToChange(p, pOut); - MemSetTypeFlag(pOut, MEM_Int); - pOut->u.i = res; - REGISTER_TRACE(pOp->p2, pOut); - }else{ - VdbeBranchTaken(res!=0, (pOp->p5 & SQLITE_NULLEQ)?2:3); - if( res ){ - pc = pOp->p2-1; - } - } - /* Undo any changes made by applyAffinity() to the input registers. */ - pIn1->flags = (pIn1->flags&~MEM_TypeMask) | (flags1&MEM_TypeMask); - pIn3->flags = (pIn3->flags&~MEM_TypeMask) | (flags3&MEM_TypeMask); - break; -} - -/* Opcode: Permutation * * * P4 * -** -** Set the permutation used by the OP_Compare operator to be the array -** of integers in P4. -** -** The permutation is only valid until the next OP_Compare that has -** the OPFLAG_PERMUTE bit set in P5. Typically the OP_Permutation should -** occur immediately prior to the OP_Compare. -*/ -case OP_Permutation: { - assert( pOp->p4type==P4_INTARRAY ); - assert( pOp->p4.ai ); - aPermute = pOp->p4.ai; - break; -} - -/* Opcode: Compare P1 P2 P3 P4 P5 -** Synopsis: r[P1@P3] <-> r[P2@P3] -** -** Compare two vectors of registers in reg(P1)..reg(P1+P3-1) (call this -** vector "A") and in reg(P2)..reg(P2+P3-1) ("B"). Save the result of -** the comparison for use by the next OP_Jump instruct. -** -** If P5 has the OPFLAG_PERMUTE bit set, then the order of comparison is -** determined by the most recent OP_Permutation operator. If the -** OPFLAG_PERMUTE bit is clear, then register are compared in sequential -** order. -** -** P4 is a KeyInfo structure that defines collating sequences and sort -** orders for the comparison. The permutation applies to registers -** only. The KeyInfo elements are used sequentially. -** -** The comparison is a sort comparison, so NULLs compare equal, -** NULLs are less than numbers, numbers are less than strings, -** and strings are less than blobs. -*/ -case OP_Compare: { - int n; - int i; - int p1; - int p2; - const KeyInfo *pKeyInfo; - int idx; - CollSeq *pColl; /* Collating sequence to use on this term */ - int bRev; /* True for DESCENDING sort order */ - - if( (pOp->p5 & OPFLAG_PERMUTE)==0 ) aPermute = 0; - n = pOp->p3; - pKeyInfo = pOp->p4.pKeyInfo; - assert( n>0 ); - assert( pKeyInfo!=0 ); - p1 = pOp->p1; - p2 = pOp->p2; -#if SQLITE_DEBUG - if( aPermute ){ - int k, mx = 0; - for(k=0; kmx ) mx = aPermute[k]; - assert( p1>0 && p1+mx<=(p->nMem-p->nCursor)+1 ); - assert( p2>0 && p2+mx<=(p->nMem-p->nCursor)+1 ); - }else{ - assert( p1>0 && p1+n<=(p->nMem-p->nCursor)+1 ); - assert( p2>0 && p2+n<=(p->nMem-p->nCursor)+1 ); - } -#endif /* SQLITE_DEBUG */ - for(i=0; inField ); - pColl = pKeyInfo->aColl[i]; - bRev = pKeyInfo->aSortOrder[i]; - iCompare = sqlite3MemCompare(&aMem[p1+idx], &aMem[p2+idx], pColl); - if( iCompare ){ - if( bRev ) iCompare = -iCompare; - break; - } - } - aPermute = 0; - break; -} - -/* Opcode: Jump P1 P2 P3 * * -** -** Jump to the instruction at address P1, P2, or P3 depending on whether -** in the most recent OP_Compare instruction the P1 vector was less than -** equal to, or greater than the P2 vector, respectively. -*/ -case OP_Jump: { /* jump */ - if( iCompare<0 ){ - pc = pOp->p1 - 1; VdbeBranchTaken(0,3); - }else if( iCompare==0 ){ - pc = pOp->p2 - 1; VdbeBranchTaken(1,3); - }else{ - pc = pOp->p3 - 1; VdbeBranchTaken(2,3); - } - break; -} - -/* Opcode: And P1 P2 P3 * * -** Synopsis: r[P3]=(r[P1] && r[P2]) -** -** Take the logical AND of the values in registers P1 and P2 and -** write the result into register P3. -** -** If either P1 or P2 is 0 (false) then the result is 0 even if -** the other input is NULL. A NULL and true or two NULLs give -** a NULL output. -*/ -/* Opcode: Or P1 P2 P3 * * -** Synopsis: r[P3]=(r[P1] || r[P2]) -** -** Take the logical OR of the values in register P1 and P2 and -** store the answer in register P3. -** -** If either P1 or P2 is nonzero (true) then the result is 1 (true) -** even if the other input is NULL. A NULL and false or two NULLs -** give a NULL output. -*/ -case OP_And: /* same as TK_AND, in1, in2, out3 */ -case OP_Or: { /* same as TK_OR, in1, in2, out3 */ - int v1; /* Left operand: 0==FALSE, 1==TRUE, 2==UNKNOWN or NULL */ - int v2; /* Right operand: 0==FALSE, 1==TRUE, 2==UNKNOWN or NULL */ - - pIn1 = &aMem[pOp->p1]; - if( pIn1->flags & MEM_Null ){ - v1 = 2; - }else{ - v1 = sqlite3VdbeIntValue(pIn1)!=0; - } - pIn2 = &aMem[pOp->p2]; - if( pIn2->flags & MEM_Null ){ - v2 = 2; - }else{ - v2 = sqlite3VdbeIntValue(pIn2)!=0; - } - if( pOp->opcode==OP_And ){ - static const unsigned char and_logic[] = { 0, 0, 0, 0, 1, 2, 0, 2, 2 }; - v1 = and_logic[v1*3+v2]; - }else{ - static const unsigned char or_logic[] = { 0, 1, 2, 1, 1, 1, 2, 1, 2 }; - v1 = or_logic[v1*3+v2]; - } - pOut = &aMem[pOp->p3]; - if( v1==2 ){ - MemSetTypeFlag(pOut, MEM_Null); - }else{ - pOut->u.i = v1; - MemSetTypeFlag(pOut, MEM_Int); - } - break; -} - -/* Opcode: Not P1 P2 * * * -** Synopsis: r[P2]= !r[P1] -** -** Interpret the value in register P1 as a boolean value. Store the -** boolean complement in register P2. If the value in register P1 is -** NULL, then a NULL is stored in P2. -*/ -case OP_Not: { /* same as TK_NOT, in1, out2 */ - pIn1 = &aMem[pOp->p1]; - pOut = &aMem[pOp->p2]; - if( pIn1->flags & MEM_Null ){ - sqlite3VdbeMemSetNull(pOut); - }else{ - sqlite3VdbeMemSetInt64(pOut, !sqlite3VdbeIntValue(pIn1)); - } - break; -} - -/* Opcode: BitNot P1 P2 * * * -** Synopsis: r[P1]= ~r[P1] -** -** Interpret the content of register P1 as an integer. Store the -** ones-complement of the P1 value into register P2. If P1 holds -** a NULL then store a NULL in P2. -*/ -case OP_BitNot: { /* same as TK_BITNOT, in1, out2 */ - pIn1 = &aMem[pOp->p1]; - pOut = &aMem[pOp->p2]; - if( pIn1->flags & MEM_Null ){ - sqlite3VdbeMemSetNull(pOut); - }else{ - sqlite3VdbeMemSetInt64(pOut, ~sqlite3VdbeIntValue(pIn1)); - } - break; -} - -/* Opcode: Once P1 P2 * * * -** -** Check if OP_Once flag P1 is set. If so, jump to instruction P2. Otherwise, -** set the flag and fall through to the next instruction. In other words, -** this opcode causes all following opcodes up through P2 (but not including -** P2) to run just once and to be skipped on subsequent times through the loop. -*/ -case OP_Once: { /* jump */ - assert( pOp->p1nOnceFlag ); - VdbeBranchTaken(p->aOnceFlag[pOp->p1]!=0, 2); - if( p->aOnceFlag[pOp->p1] ){ - pc = pOp->p2-1; - }else{ - p->aOnceFlag[pOp->p1] = 1; - } - break; -} - -/* Opcode: If P1 P2 P3 * * -** -** Jump to P2 if the value in register P1 is true. The value -** is considered true if it is numeric and non-zero. If the value -** in P1 is NULL then take the jump if P3 is non-zero. -*/ -/* Opcode: IfNot P1 P2 P3 * * -** -** Jump to P2 if the value in register P1 is False. The value -** is considered false if it has a numeric value of zero. If the value -** in P1 is NULL then take the jump if P3 is zero. -*/ -case OP_If: /* jump, in1 */ -case OP_IfNot: { /* jump, in1 */ - int c; - pIn1 = &aMem[pOp->p1]; - if( pIn1->flags & MEM_Null ){ - c = pOp->p3; - }else{ -#ifdef SQLITE_OMIT_FLOATING_POINT - c = sqlite3VdbeIntValue(pIn1)!=0; -#else - c = sqlite3VdbeRealValue(pIn1)!=0.0; -#endif - if( pOp->opcode==OP_IfNot ) c = !c; - } - VdbeBranchTaken(c!=0, 2); - if( c ){ - pc = pOp->p2-1; - } - break; -} - -/* Opcode: IsNull P1 P2 * * * -** Synopsis: if r[P1]==NULL goto P2 -** -** Jump to P2 if the value in register P1 is NULL. -*/ -case OP_IsNull: { /* same as TK_ISNULL, jump, in1 */ - pIn1 = &aMem[pOp->p1]; - VdbeBranchTaken( (pIn1->flags & MEM_Null)!=0, 2); - if( (pIn1->flags & MEM_Null)!=0 ){ - pc = pOp->p2 - 1; - } - break; -} - -/* Opcode: NotNull P1 P2 * * * -** Synopsis: if r[P1]!=NULL goto P2 -** -** Jump to P2 if the value in register P1 is not NULL. -*/ -case OP_NotNull: { /* same as TK_NOTNULL, jump, in1 */ - pIn1 = &aMem[pOp->p1]; - VdbeBranchTaken( (pIn1->flags & MEM_Null)==0, 2); - if( (pIn1->flags & MEM_Null)==0 ){ - pc = pOp->p2 - 1; - } - break; -} - -/* Opcode: Column P1 P2 P3 P4 P5 -** Synopsis: r[P3]=PX -** -** Interpret the data that cursor P1 points to as a structure built using -** the MakeRecord instruction. (See the MakeRecord opcode for additional -** information about the format of the data.) Extract the P2-th column -** from this record. If there are less that (P2+1) -** values in the record, extract a NULL. -** -** The value extracted is stored in register P3. -** -** If the column contains fewer than P2 fields, then extract a NULL. Or, -** if the P4 argument is a P4_MEM use the value of the P4 argument as -** the result. -** -** If the OPFLAG_CLEARCACHE bit is set on P5 and P1 is a pseudo-table cursor, -** then the cache of the cursor is reset prior to extracting the column. -** The first OP_Column against a pseudo-table after the value of the content -** register has changed should have this bit set. -** -** If the OPFLAG_LENGTHARG and OPFLAG_TYPEOFARG bits are set on P5 when -** the result is guaranteed to only be used as the argument of a length() -** or typeof() function, respectively. The loading of large blobs can be -** skipped for length() and all content loading can be skipped for typeof(). -*/ -case OP_Column: { - i64 payloadSize64; /* Number of bytes in the record */ - int p2; /* column number to retrieve */ - VdbeCursor *pC; /* The VDBE cursor */ - BtCursor *pCrsr; /* The BTree cursor */ - u32 *aType; /* aType[i] holds the numeric type of the i-th column */ - u32 *aOffset; /* aOffset[i] is offset to start of data for i-th column */ - int len; /* The length of the serialized data for the column */ - int i; /* Loop counter */ - Mem *pDest; /* Where to write the extracted value */ - Mem sMem; /* For storing the record being decoded */ - const u8 *zData; /* Part of the record being decoded */ - const u8 *zHdr; /* Next unparsed byte of the header */ - const u8 *zEndHdr; /* Pointer to first byte after the header */ - u32 offset; /* Offset into the data */ - u32 szField; /* Number of bytes in the content of a field */ - u32 avail; /* Number of bytes of available data */ - u32 t; /* A type code from the record header */ - Mem *pReg; /* PseudoTable input register */ - - p2 = pOp->p2; - assert( pOp->p3>0 && pOp->p3<=(p->nMem-p->nCursor) ); - pDest = &aMem[pOp->p3]; - memAboutToChange(p, pDest); - assert( pOp->p1>=0 && pOp->p1nCursor ); - pC = p->apCsr[pOp->p1]; - assert( pC!=0 ); - assert( p2nField ); - aType = pC->aType; - aOffset = aType + pC->nField; -#ifndef SQLITE_OMIT_VIRTUALTABLE - assert( pC->pVtabCursor==0 ); /* OP_Column never called on virtual table */ -#endif - pCrsr = pC->pCursor; - assert( pCrsr!=0 || pC->pseudoTableReg>0 ); /* pCrsr NULL on PseudoTables */ - assert( pCrsr!=0 || pC->nullRow ); /* pC->nullRow on PseudoTables */ - - /* If the cursor cache is stale, bring it up-to-date */ - rc = sqlite3VdbeCursorMoveto(pC); - if( rc ) goto abort_due_to_error; - if( pC->cacheStatus!=p->cacheCtr || (pOp->p5&OPFLAG_CLEARCACHE)!=0 ){ - if( pC->nullRow ){ - if( pCrsr==0 ){ - assert( pC->pseudoTableReg>0 ); - pReg = &aMem[pC->pseudoTableReg]; - assert( pReg->flags & MEM_Blob ); - assert( memIsValid(pReg) ); - pC->payloadSize = pC->szRow = avail = pReg->n; - pC->aRow = (u8*)pReg->z; - }else{ - MemSetTypeFlag(pDest, MEM_Null); - goto op_column_out; - } - }else{ - assert( pCrsr ); - if( pC->isTable==0 ){ - assert( sqlite3BtreeCursorIsValid(pCrsr) ); - VVA_ONLY(rc =) sqlite3BtreeKeySize(pCrsr, &payloadSize64); - assert( rc==SQLITE_OK ); /* True because of CursorMoveto() call above */ - /* sqlite3BtreeParseCellPtr() uses getVarint32() to extract the - ** payload size, so it is impossible for payloadSize64 to be - ** larger than 32 bits. */ - assert( (payloadSize64 & SQLITE_MAX_U32)==(u64)payloadSize64 ); - pC->aRow = sqlite3BtreeKeyFetch(pCrsr, &avail); - pC->payloadSize = (u32)payloadSize64; - }else{ - assert( sqlite3BtreeCursorIsValid(pCrsr) ); - VVA_ONLY(rc =) sqlite3BtreeDataSize(pCrsr, &pC->payloadSize); - assert( rc==SQLITE_OK ); /* DataSize() cannot fail */ - pC->aRow = sqlite3BtreeDataFetch(pCrsr, &avail); - } - assert( avail<=65536 ); /* Maximum page size is 64KiB */ - if( pC->payloadSize <= (u32)avail ){ - pC->szRow = pC->payloadSize; - }else{ - pC->szRow = avail; - } - if( pC->payloadSize > (u32)db->aLimit[SQLITE_LIMIT_LENGTH] ){ - goto too_big; - } - } - pC->cacheStatus = p->cacheCtr; - pC->iHdrOffset = getVarint32(pC->aRow, offset); - pC->nHdrParsed = 0; - aOffset[0] = offset; - if( availaRow does not have to hold the entire row, but it does at least - ** need to cover the header of the record. If pC->aRow does not contain - ** the complete header, then set it to zero, forcing the header to be - ** dynamically allocated. */ - pC->aRow = 0; - pC->szRow = 0; - } - - /* Make sure a corrupt database has not given us an oversize header. - ** Do this now to avoid an oversize memory allocation. - ** - ** Type entries can be between 1 and 5 bytes each. But 4 and 5 byte - ** types use so much data space that there can only be 4096 and 32 of - ** them, respectively. So the maximum header length results from a - ** 3-byte type for each of the maximum of 32768 columns plus three - ** extra bytes for the header length itself. 32768*3 + 3 = 98307. - */ - if( offset > 98307 || offset > pC->payloadSize ){ - rc = SQLITE_CORRUPT_BKPT; - goto op_column_error; - } - } - - /* Make sure at least the first p2+1 entries of the header have been - ** parsed and valid information is in aOffset[] and aType[]. - */ - if( pC->nHdrParsed<=p2 ){ - /* If there is more header available for parsing in the record, try - ** to extract additional fields up through the p2+1-th field - */ - if( pC->iHdrOffsetaRow==0 ){ - memset(&sMem, 0, sizeof(sMem)); - rc = sqlite3VdbeMemFromBtree(pCrsr, 0, aOffset[0], - !pC->isTable, &sMem); - if( rc!=SQLITE_OK ){ - goto op_column_error; - } - zData = (u8*)sMem.z; - }else{ - zData = pC->aRow; - } - - /* Fill in aType[i] and aOffset[i] values through the p2-th field. */ - i = pC->nHdrParsed; - offset = aOffset[i]; - zHdr = zData + pC->iHdrOffset; - zEndHdr = zData + aOffset[0]; - assert( i<=p2 && zHdrnHdrParsed = i; - pC->iHdrOffset = (u32)(zHdr - zData); - if( pC->aRow==0 ){ - sqlite3VdbeMemRelease(&sMem); - sMem.flags = MEM_Null; - } - - /* If we have read more header data than was contained in the header, - ** or if the end of the last field appears to be past the end of the - ** record, or if the end of the last field appears to be before the end - ** of the record (when all fields present), then we must be dealing - ** with a corrupt database. - */ - if( (zHdr > zEndHdr) - || (offset > pC->payloadSize) - || (zHdr==zEndHdr && offset!=pC->payloadSize) - ){ - rc = SQLITE_CORRUPT_BKPT; - goto op_column_error; - } - } - - /* If after trying to extra new entries from the header, nHdrParsed is - ** still not up to p2, that means that the record has fewer than p2 - ** columns. So the result will be either the default value or a NULL. - */ - if( pC->nHdrParsed<=p2 ){ - if( pOp->p4type==P4_MEM ){ - sqlite3VdbeMemShallowCopy(pDest, pOp->p4.pMem, MEM_Static); - }else{ - MemSetTypeFlag(pDest, MEM_Null); - } - goto op_column_out; - } - } - - /* Extract the content for the p2+1-th column. Control can only - ** reach this point if aOffset[p2], aOffset[p2+1], and aType[p2] are - ** all valid. - */ - assert( p2nHdrParsed ); - assert( rc==SQLITE_OK ); - assert( sqlite3VdbeCheckMemInvariants(pDest) ); - if( pC->szRow>=aOffset[p2+1] ){ - /* This is the common case where the desired content fits on the original - ** page - where the content is not on an overflow page */ - VdbeMemRelease(pDest); - sqlite3VdbeSerialGet(pC->aRow+aOffset[p2], aType[p2], pDest); - }else{ - /* This branch happens only when content is on overflow pages */ - t = aType[p2]; - if( ((pOp->p5 & (OPFLAG_LENGTHARG|OPFLAG_TYPEOFARG))!=0 - && ((t>=12 && (t&1)==0) || (pOp->p5 & OPFLAG_TYPEOFARG)!=0)) - || (len = sqlite3VdbeSerialTypeLen(t))==0 - ){ - /* Content is irrelevant for the typeof() function and for - ** the length(X) function if X is a blob. So we might as well use - ** bogus content rather than reading content from disk. NULL works - ** for text and blob and whatever is in the payloadSize64 variable - ** will work for everything else. Content is also irrelevant if - ** the content length is 0. */ - zData = t<=13 ? (u8*)&payloadSize64 : 0; - sMem.zMalloc = 0; - }else{ - memset(&sMem, 0, sizeof(sMem)); - sqlite3VdbeMemMove(&sMem, pDest); - rc = sqlite3VdbeMemFromBtree(pCrsr, aOffset[p2], len, !pC->isTable, - &sMem); - if( rc!=SQLITE_OK ){ - goto op_column_error; - } - zData = (u8*)sMem.z; - } - sqlite3VdbeSerialGet(zData, t, pDest); - /* If we dynamically allocated space to hold the data (in the - ** sqlite3VdbeMemFromBtree() call above) then transfer control of that - ** dynamically allocated space over to the pDest structure. - ** This prevents a memory copy. */ - if( sMem.zMalloc ){ - assert( sMem.z==sMem.zMalloc ); - assert( VdbeMemDynamic(pDest)==0 ); - assert( (pDest->flags & (MEM_Blob|MEM_Str))==0 || pDest->z==sMem.z ); - pDest->flags &= ~(MEM_Ephem|MEM_Static); - pDest->flags |= MEM_Term; - pDest->z = sMem.z; - pDest->zMalloc = sMem.zMalloc; - } - } - pDest->enc = encoding; - -op_column_out: - Deephemeralize(pDest); -op_column_error: - UPDATE_MAX_BLOBSIZE(pDest); - REGISTER_TRACE(pOp->p3, pDest); - break; -} - -/* Opcode: Affinity P1 P2 * P4 * -** Synopsis: affinity(r[P1@P2]) -** -** Apply affinities to a range of P2 registers starting with P1. -** -** P4 is a string that is P2 characters long. The nth character of the -** string indicates the column affinity that should be used for the nth -** memory cell in the range. -*/ -case OP_Affinity: { - const char *zAffinity; /* The affinity to be applied */ - char cAff; /* A single character of affinity */ - - zAffinity = pOp->p4.z; - assert( zAffinity!=0 ); - assert( zAffinity[pOp->p2]==0 ); - pIn1 = &aMem[pOp->p1]; - while( (cAff = *(zAffinity++))!=0 ){ - assert( pIn1 <= &p->aMem[(p->nMem-p->nCursor)] ); - assert( memIsValid(pIn1) ); - applyAffinity(pIn1, cAff, encoding); - pIn1++; - } - break; -} - -/* Opcode: MakeRecord P1 P2 P3 P4 * -** Synopsis: r[P3]=mkrec(r[P1@P2]) -** -** Convert P2 registers beginning with P1 into the [record format] -** use as a data record in a database table or as a key -** in an index. The OP_Column opcode can decode the record later. -** -** P4 may be a string that is P2 characters long. The nth character of the -** string indicates the column affinity that should be used for the nth -** field of the index key. -** -** The mapping from character to affinity is given by the SQLITE_AFF_ -** macros defined in sqliteInt.h. -** -** If P4 is NULL then all index fields have the affinity NONE. -*/ -case OP_MakeRecord: { - u8 *zNewRecord; /* A buffer to hold the data for the new record */ - Mem *pRec; /* The new record */ - u64 nData; /* Number of bytes of data space */ - int nHdr; /* Number of bytes of header space */ - i64 nByte; /* Data space required for this record */ - int nZero; /* Number of zero bytes at the end of the record */ - int nVarint; /* Number of bytes in a varint */ - u32 serial_type; /* Type field */ - Mem *pData0; /* First field to be combined into the record */ - Mem *pLast; /* Last field of the record */ - int nField; /* Number of fields in the record */ - char *zAffinity; /* The affinity string for the record */ - int file_format; /* File format to use for encoding */ - int i; /* Space used in zNewRecord[] header */ - int j; /* Space used in zNewRecord[] content */ - int len; /* Length of a field */ - - /* Assuming the record contains N fields, the record format looks - ** like this: - ** - ** ------------------------------------------------------------------------ - ** | hdr-size | type 0 | type 1 | ... | type N-1 | data0 | ... | data N-1 | - ** ------------------------------------------------------------------------ - ** - ** Data(0) is taken from register P1. Data(1) comes from register P1+1 - ** and so froth. - ** - ** Each type field is a varint representing the serial type of the - ** corresponding data element (see sqlite3VdbeSerialType()). The - ** hdr-size field is also a varint which is the offset from the beginning - ** of the record to data0. - */ - nData = 0; /* Number of bytes of data space */ - nHdr = 0; /* Number of bytes of header space */ - nZero = 0; /* Number of zero bytes at the end of the record */ - nField = pOp->p1; - zAffinity = pOp->p4.z; - assert( nField>0 && pOp->p2>0 && pOp->p2+nField<=(p->nMem-p->nCursor)+1 ); - pData0 = &aMem[nField]; - nField = pOp->p2; - pLast = &pData0[nField-1]; - file_format = p->minWriteFileFormat; - - /* Identify the output register */ - assert( pOp->p3p1 || pOp->p3>=pOp->p1+pOp->p2 ); - pOut = &aMem[pOp->p3]; - memAboutToChange(p, pOut); - - /* Apply the requested affinity to all inputs - */ - assert( pData0<=pLast ); - if( zAffinity ){ - pRec = pData0; - do{ - applyAffinity(pRec++, *(zAffinity++), encoding); - assert( zAffinity[0]==0 || pRec<=pLast ); - }while( zAffinity[0] ); - } - - /* Loop through the elements that will make up the record to figure - ** out how much space is required for the new record. - */ - pRec = pLast; - do{ - assert( memIsValid(pRec) ); - serial_type = sqlite3VdbeSerialType(pRec, file_format); - len = sqlite3VdbeSerialTypeLen(serial_type); - if( pRec->flags & MEM_Zero ){ - if( nData ){ - sqlite3VdbeMemExpandBlob(pRec); - }else{ - nZero += pRec->u.nZero; - len -= pRec->u.nZero; - } - } - nData += len; - testcase( serial_type==127 ); - testcase( serial_type==128 ); - nHdr += serial_type<=127 ? 1 : sqlite3VarintLen(serial_type); - }while( (--pRec)>=pData0 ); - - /* Add the initial header varint and total the size */ - testcase( nHdr==126 ); - testcase( nHdr==127 ); - if( nHdr<=126 ){ - /* The common case */ - nHdr += 1; - }else{ - /* Rare case of a really large header */ - nVarint = sqlite3VarintLen(nHdr); - nHdr += nVarint; - if( nVarintdb->aLimit[SQLITE_LIMIT_LENGTH] ){ - goto too_big; - } - - /* Make sure the output register has a buffer large enough to store - ** the new record. The output register (pOp->p3) is not allowed to - ** be one of the input registers (because the following call to - ** sqlite3VdbeMemGrow() could clobber the value before it is used). - */ - if( sqlite3VdbeMemGrow(pOut, (int)nByte, 0) ){ - goto no_mem; - } - zNewRecord = (u8 *)pOut->z; - - /* Write the record */ - i = putVarint32(zNewRecord, nHdr); - j = nHdr; - assert( pData0<=pLast ); - pRec = pData0; - do{ - serial_type = sqlite3VdbeSerialType(pRec, file_format); - i += putVarint32(&zNewRecord[i], serial_type); /* serial type */ - j += sqlite3VdbeSerialPut(&zNewRecord[j], pRec, serial_type); /* content */ - }while( (++pRec)<=pLast ); - assert( i==nHdr ); - assert( j==nByte ); - - assert( pOp->p3>0 && pOp->p3<=(p->nMem-p->nCursor) ); - pOut->n = (int)nByte; - pOut->flags = MEM_Blob; - pOut->xDel = 0; - if( nZero ){ - pOut->u.nZero = nZero; - pOut->flags |= MEM_Zero; - } - pOut->enc = SQLITE_UTF8; /* In case the blob is ever converted to text */ - REGISTER_TRACE(pOp->p3, pOut); - UPDATE_MAX_BLOBSIZE(pOut); - break; -} - -/* Opcode: Count P1 P2 * * * -** Synopsis: r[P2]=count() -** -** Store the number of entries (an integer value) in the table or index -** opened by cursor P1 in register P2 -*/ -#ifndef SQLITE_OMIT_BTREECOUNT -case OP_Count: { /* out2-prerelease */ - i64 nEntry; - BtCursor *pCrsr; - - pCrsr = p->apCsr[pOp->p1]->pCursor; - assert( pCrsr ); - nEntry = 0; /* Not needed. Only used to silence a warning. */ - rc = sqlite3BtreeCount(pCrsr, &nEntry); - pOut->u.i = nEntry; - break; -} -#endif - -/* Opcode: Savepoint P1 * * P4 * -** -** Open, release or rollback the savepoint named by parameter P4, depending -** on the value of P1. To open a new savepoint, P1==0. To release (commit) an -** existing savepoint, P1==1, or to rollback an existing savepoint P1==2. -*/ -case OP_Savepoint: { - int p1; /* Value of P1 operand */ - char *zName; /* Name of savepoint */ - int nName; - Savepoint *pNew; - Savepoint *pSavepoint; - Savepoint *pTmp; - int iSavepoint; - int ii; - - p1 = pOp->p1; - zName = pOp->p4.z; - - /* Assert that the p1 parameter is valid. Also that if there is no open - ** transaction, then there cannot be any savepoints. - */ - assert( db->pSavepoint==0 || db->autoCommit==0 ); - assert( p1==SAVEPOINT_BEGIN||p1==SAVEPOINT_RELEASE||p1==SAVEPOINT_ROLLBACK ); - assert( db->pSavepoint || db->isTransactionSavepoint==0 ); - assert( checkSavepointCount(db) ); - assert( p->bIsReader ); - - if( p1==SAVEPOINT_BEGIN ){ - if( db->nVdbeWrite>0 ){ - /* A new savepoint cannot be created if there are active write - ** statements (i.e. open read/write incremental blob handles). - */ - sqlite3SetString(&p->zErrMsg, db, "cannot open savepoint - " - "SQL statements in progress"); - rc = SQLITE_BUSY; - }else{ - nName = sqlite3Strlen30(zName); - -#ifndef SQLITE_OMIT_VIRTUALTABLE - /* This call is Ok even if this savepoint is actually a transaction - ** savepoint (and therefore should not prompt xSavepoint()) callbacks. - ** If this is a transaction savepoint being opened, it is guaranteed - ** that the db->aVTrans[] array is empty. */ - assert( db->autoCommit==0 || db->nVTrans==0 ); - rc = sqlite3VtabSavepoint(db, SAVEPOINT_BEGIN, - db->nStatement+db->nSavepoint); - if( rc!=SQLITE_OK ) goto abort_due_to_error; -#endif - - /* Create a new savepoint structure. */ - pNew = sqlite3DbMallocRaw(db, sizeof(Savepoint)+nName+1); - if( pNew ){ - pNew->zName = (char *)&pNew[1]; - memcpy(pNew->zName, zName, nName+1); - - /* If there is no open transaction, then mark this as a special - ** "transaction savepoint". */ - if( db->autoCommit ){ - db->autoCommit = 0; - db->isTransactionSavepoint = 1; - }else{ - db->nSavepoint++; - } - - /* Link the new savepoint into the database handle's list. */ - pNew->pNext = db->pSavepoint; - db->pSavepoint = pNew; - pNew->nDeferredCons = db->nDeferredCons; - pNew->nDeferredImmCons = db->nDeferredImmCons; - } - } - }else{ - iSavepoint = 0; - - /* Find the named savepoint. If there is no such savepoint, then an - ** an error is returned to the user. */ - for( - pSavepoint = db->pSavepoint; - pSavepoint && sqlite3StrICmp(pSavepoint->zName, zName); - pSavepoint = pSavepoint->pNext - ){ - iSavepoint++; - } - if( !pSavepoint ){ - sqlite3SetString(&p->zErrMsg, db, "no such savepoint: %s", zName); - rc = SQLITE_ERROR; - }else if( db->nVdbeWrite>0 && p1==SAVEPOINT_RELEASE ){ - /* It is not possible to release (commit) a savepoint if there are - ** active write statements. - */ - sqlite3SetString(&p->zErrMsg, db, - "cannot release savepoint - SQL statements in progress" - ); - rc = SQLITE_BUSY; - }else{ - - /* Determine whether or not this is a transaction savepoint. If so, - ** and this is a RELEASE command, then the current transaction - ** is committed. - */ - int isTransaction = pSavepoint->pNext==0 && db->isTransactionSavepoint; - if( isTransaction && p1==SAVEPOINT_RELEASE ){ - if( (rc = sqlite3VdbeCheckFk(p, 1))!=SQLITE_OK ){ - goto vdbe_return; - } - db->autoCommit = 1; - if( sqlite3VdbeHalt(p)==SQLITE_BUSY ){ - p->pc = pc; - db->autoCommit = 0; - p->rc = rc = SQLITE_BUSY; - goto vdbe_return; - } - db->isTransactionSavepoint = 0; - rc = p->rc; - }else{ - iSavepoint = db->nSavepoint - iSavepoint - 1; - if( p1==SAVEPOINT_ROLLBACK ){ - for(ii=0; iinDb; ii++){ - sqlite3BtreeTripAllCursors(db->aDb[ii].pBt, SQLITE_ABORT); - } - } - for(ii=0; iinDb; ii++){ - rc = sqlite3BtreeSavepoint(db->aDb[ii].pBt, p1, iSavepoint); - if( rc!=SQLITE_OK ){ - goto abort_due_to_error; - } - } - if( p1==SAVEPOINT_ROLLBACK && (db->flags&SQLITE_InternChanges)!=0 ){ - sqlite3ExpirePreparedStatements(db); - sqlite3ResetAllSchemasOfConnection(db); - db->flags = (db->flags | SQLITE_InternChanges); - } - } - - /* Regardless of whether this is a RELEASE or ROLLBACK, destroy all - ** savepoints nested inside of the savepoint being operated on. */ - while( db->pSavepoint!=pSavepoint ){ - pTmp = db->pSavepoint; - db->pSavepoint = pTmp->pNext; - sqlite3DbFree(db, pTmp); - db->nSavepoint--; - } - - /* If it is a RELEASE, then destroy the savepoint being operated on - ** too. If it is a ROLLBACK TO, then set the number of deferred - ** constraint violations present in the database to the value stored - ** when the savepoint was created. */ - if( p1==SAVEPOINT_RELEASE ){ - assert( pSavepoint==db->pSavepoint ); - db->pSavepoint = pSavepoint->pNext; - sqlite3DbFree(db, pSavepoint); - if( !isTransaction ){ - db->nSavepoint--; - } - }else{ - db->nDeferredCons = pSavepoint->nDeferredCons; - db->nDeferredImmCons = pSavepoint->nDeferredImmCons; - } - - if( !isTransaction ){ - rc = sqlite3VtabSavepoint(db, p1, iSavepoint); - if( rc!=SQLITE_OK ) goto abort_due_to_error; - } - } - } - - break; -} - -/* Opcode: AutoCommit P1 P2 * * * -** -** Set the database auto-commit flag to P1 (1 or 0). If P2 is true, roll -** back any currently active btree transactions. If there are any active -** VMs (apart from this one), then a ROLLBACK fails. A COMMIT fails if -** there are active writing VMs or active VMs that use shared cache. -** -** This instruction causes the VM to halt. -*/ -case OP_AutoCommit: { - int desiredAutoCommit; - int iRollback; - int turnOnAC; - - desiredAutoCommit = pOp->p1; - iRollback = pOp->p2; - turnOnAC = desiredAutoCommit && !db->autoCommit; - assert( desiredAutoCommit==1 || desiredAutoCommit==0 ); - assert( desiredAutoCommit==1 || iRollback==0 ); - assert( db->nVdbeActive>0 ); /* At least this one VM is active */ - assert( p->bIsReader ); - -#if 0 - if( turnOnAC && iRollback && db->nVdbeActive>1 ){ - /* If this instruction implements a ROLLBACK and other VMs are - ** still running, and a transaction is active, return an error indicating - ** that the other VMs must complete first. - */ - sqlite3SetString(&p->zErrMsg, db, "cannot rollback transaction - " - "SQL statements in progress"); - rc = SQLITE_BUSY; - }else -#endif - if( turnOnAC && !iRollback && db->nVdbeWrite>0 ){ - /* If this instruction implements a COMMIT and other VMs are writing - ** return an error indicating that the other VMs must complete first. - */ - sqlite3SetString(&p->zErrMsg, db, "cannot commit transaction - " - "SQL statements in progress"); - rc = SQLITE_BUSY; - }else if( desiredAutoCommit!=db->autoCommit ){ - if( iRollback ){ - assert( desiredAutoCommit==1 ); - sqlite3RollbackAll(db, SQLITE_ABORT_ROLLBACK); - db->autoCommit = 1; - }else if( (rc = sqlite3VdbeCheckFk(p, 1))!=SQLITE_OK ){ - goto vdbe_return; - }else{ - db->autoCommit = (u8)desiredAutoCommit; - if( sqlite3VdbeHalt(p)==SQLITE_BUSY ){ - p->pc = pc; - db->autoCommit = (u8)(1-desiredAutoCommit); - p->rc = rc = SQLITE_BUSY; - goto vdbe_return; - } - } - assert( db->nStatement==0 ); - sqlite3CloseSavepoints(db); - if( p->rc==SQLITE_OK ){ - rc = SQLITE_DONE; - }else{ - rc = SQLITE_ERROR; - } - goto vdbe_return; - }else{ - sqlite3SetString(&p->zErrMsg, db, - (!desiredAutoCommit)?"cannot start a transaction within a transaction":( - (iRollback)?"cannot rollback - no transaction is active": - "cannot commit - no transaction is active")); - - rc = SQLITE_ERROR; - } - break; -} - -/* Opcode: Transaction P1 P2 P3 P4 P5 -** -** Begin a transaction on database P1 if a transaction is not already -** active. -** If P2 is non-zero, then a write-transaction is started, or if a -** read-transaction is already active, it is upgraded to a write-transaction. -** If P2 is zero, then a read-transaction is started. -** -** P1 is the index of the database file on which the transaction is -** started. Index 0 is the main database file and index 1 is the -** file used for temporary tables. Indices of 2 or more are used for -** attached databases. -** -** If a write-transaction is started and the Vdbe.usesStmtJournal flag is -** true (this flag is set if the Vdbe may modify more than one row and may -** throw an ABORT exception), a statement transaction may also be opened. -** More specifically, a statement transaction is opened iff the database -** connection is currently not in autocommit mode, or if there are other -** active statements. A statement transaction allows the changes made by this -** VDBE to be rolled back after an error without having to roll back the -** entire transaction. If no error is encountered, the statement transaction -** will automatically commit when the VDBE halts. -** -** If P5!=0 then this opcode also checks the schema cookie against P3 -** and the schema generation counter against P4. -** The cookie changes its value whenever the database schema changes. -** This operation is used to detect when that the cookie has changed -** and that the current process needs to reread the schema. If the schema -** cookie in P3 differs from the schema cookie in the database header or -** if the schema generation counter in P4 differs from the current -** generation counter, then an SQLITE_SCHEMA error is raised and execution -** halts. The sqlite3_step() wrapper function might then reprepare the -** statement and rerun it from the beginning. -*/ -case OP_Transaction: { - Btree *pBt; - int iMeta; - int iGen; - - assert( p->bIsReader ); - assert( p->readOnly==0 || pOp->p2==0 ); - assert( pOp->p1>=0 && pOp->p1nDb ); - assert( (p->btreeMask & (((yDbMask)1)<p1))!=0 ); - if( pOp->p2 && (db->flags & SQLITE_QueryOnly)!=0 ){ - rc = SQLITE_READONLY; - goto abort_due_to_error; - } - pBt = db->aDb[pOp->p1].pBt; - - if( pBt ){ - rc = sqlite3BtreeBeginTrans(pBt, pOp->p2); - if( rc==SQLITE_BUSY ){ - p->pc = pc; - p->rc = rc = SQLITE_BUSY; - goto vdbe_return; - } - if( rc!=SQLITE_OK ){ - goto abort_due_to_error; - } - - if( pOp->p2 && p->usesStmtJournal - && (db->autoCommit==0 || db->nVdbeRead>1) - ){ - assert( sqlite3BtreeIsInTrans(pBt) ); - if( p->iStatement==0 ){ - assert( db->nStatement>=0 && db->nSavepoint>=0 ); - db->nStatement++; - p->iStatement = db->nSavepoint + db->nStatement; - } - - rc = sqlite3VtabSavepoint(db, SAVEPOINT_BEGIN, p->iStatement-1); - if( rc==SQLITE_OK ){ - rc = sqlite3BtreeBeginStmt(pBt, p->iStatement); - } - - /* Store the current value of the database handles deferred constraint - ** counter. If the statement transaction needs to be rolled back, - ** the value of this counter needs to be restored too. */ - p->nStmtDefCons = db->nDeferredCons; - p->nStmtDefImmCons = db->nDeferredImmCons; - } - - /* Gather the schema version number for checking */ - sqlite3BtreeGetMeta(pBt, BTREE_SCHEMA_VERSION, (u32 *)&iMeta); - iGen = db->aDb[pOp->p1].pSchema->iGeneration; - }else{ - iGen = iMeta = 0; - } - assert( pOp->p5==0 || pOp->p4type==P4_INT32 ); - if( pOp->p5 && (iMeta!=pOp->p3 || iGen!=pOp->p4.i) ){ - sqlite3DbFree(db, p->zErrMsg); - p->zErrMsg = sqlite3DbStrDup(db, "database schema has changed"); - /* If the schema-cookie from the database file matches the cookie - ** stored with the in-memory representation of the schema, do - ** not reload the schema from the database file. - ** - ** If virtual-tables are in use, this is not just an optimization. - ** Often, v-tables store their data in other SQLite tables, which - ** are queried from within xNext() and other v-table methods using - ** prepared queries. If such a query is out-of-date, we do not want to - ** discard the database schema, as the user code implementing the - ** v-table would have to be ready for the sqlite3_vtab structure itself - ** to be invalidated whenever sqlite3_step() is called from within - ** a v-table method. - */ - if( db->aDb[pOp->p1].pSchema->schema_cookie!=iMeta ){ - sqlite3ResetOneSchema(db, pOp->p1); - } - p->expired = 1; - rc = SQLITE_SCHEMA; - } - break; -} - -/* Opcode: ReadCookie P1 P2 P3 * * -** -** Read cookie number P3 from database P1 and write it into register P2. -** P3==1 is the schema version. P3==2 is the database format. -** P3==3 is the recommended pager cache size, and so forth. P1==0 is -** the main database file and P1==1 is the database file used to store -** temporary tables. -** -** There must be a read-lock on the database (either a transaction -** must be started or there must be an open cursor) before -** executing this instruction. -*/ -case OP_ReadCookie: { /* out2-prerelease */ - int iMeta; - int iDb; - int iCookie; - - assert( p->bIsReader ); - iDb = pOp->p1; - iCookie = pOp->p3; - assert( pOp->p3=0 && iDbnDb ); - assert( db->aDb[iDb].pBt!=0 ); - assert( (p->btreeMask & (((yDbMask)1)<aDb[iDb].pBt, iCookie, (u32 *)&iMeta); - pOut->u.i = iMeta; - break; -} - -/* Opcode: SetCookie P1 P2 P3 * * -** -** Write the content of register P3 (interpreted as an integer) -** into cookie number P2 of database P1. P2==1 is the schema version. -** P2==2 is the database format. P2==3 is the recommended pager cache -** size, and so forth. P1==0 is the main database file and P1==1 is the -** database file used to store temporary tables. -** -** A transaction must be started before executing this opcode. -*/ -case OP_SetCookie: { /* in3 */ - Db *pDb; - assert( pOp->p2p1>=0 && pOp->p1nDb ); - assert( (p->btreeMask & (((yDbMask)1)<p1))!=0 ); - assert( p->readOnly==0 ); - pDb = &db->aDb[pOp->p1]; - assert( pDb->pBt!=0 ); - assert( sqlite3SchemaMutexHeld(db, pOp->p1, 0) ); - pIn3 = &aMem[pOp->p3]; - sqlite3VdbeMemIntegerify(pIn3); - /* See note about index shifting on OP_ReadCookie */ - rc = sqlite3BtreeUpdateMeta(pDb->pBt, pOp->p2, (int)pIn3->u.i); - if( pOp->p2==BTREE_SCHEMA_VERSION ){ - /* When the schema cookie changes, record the new cookie internally */ - pDb->pSchema->schema_cookie = (int)pIn3->u.i; - db->flags |= SQLITE_InternChanges; - }else if( pOp->p2==BTREE_FILE_FORMAT ){ - /* Record changes in the file format */ - pDb->pSchema->file_format = (u8)pIn3->u.i; - } - if( pOp->p1==1 ){ - /* Invalidate all prepared statements whenever the TEMP database - ** schema is changed. Ticket #1644 */ - sqlite3ExpirePreparedStatements(db); - p->expired = 0; - } - break; -} - -/* Opcode: OpenRead P1 P2 P3 P4 P5 -** Synopsis: root=P2 iDb=P3 -** -** Open a read-only cursor for the database table whose root page is -** P2 in a database file. The database file is determined by P3. -** P3==0 means the main database, P3==1 means the database used for -** temporary tables, and P3>1 means used the corresponding attached -** database. Give the new cursor an identifier of P1. The P1 -** values need not be contiguous but all P1 values should be small integers. -** It is an error for P1 to be negative. -** -** If P5!=0 then use the content of register P2 as the root page, not -** the value of P2 itself. -** -** There will be a read lock on the database whenever there is an -** open cursor. If the database was unlocked prior to this instruction -** then a read lock is acquired as part of this instruction. A read -** lock allows other processes to read the database but prohibits -** any other process from modifying the database. The read lock is -** released when all cursors are closed. If this instruction attempts -** to get a read lock but fails, the script terminates with an -** SQLITE_BUSY error code. -** -** The P4 value may be either an integer (P4_INT32) or a pointer to -** a KeyInfo structure (P4_KEYINFO). If it is a pointer to a KeyInfo -** structure, then said structure defines the content and collating -** sequence of the index being opened. Otherwise, if P4 is an integer -** value, it is set to the number of columns in the table. -** -** See also OpenWrite. -*/ -/* Opcode: OpenWrite P1 P2 P3 P4 P5 -** Synopsis: root=P2 iDb=P3 -** -** Open a read/write cursor named P1 on the table or index whose root -** page is P2. Or if P5!=0 use the content of register P2 to find the -** root page. -** -** The P4 value may be either an integer (P4_INT32) or a pointer to -** a KeyInfo structure (P4_KEYINFO). If it is a pointer to a KeyInfo -** structure, then said structure defines the content and collating -** sequence of the index being opened. Otherwise, if P4 is an integer -** value, it is set to the number of columns in the table, or to the -** largest index of any column of the table that is actually used. -** -** This instruction works just like OpenRead except that it opens the cursor -** in read/write mode. For a given table, there can be one or more read-only -** cursors or a single read/write cursor but not both. -** -** See also OpenRead. -*/ -case OP_OpenRead: -case OP_OpenWrite: { - int nField; - KeyInfo *pKeyInfo; - int p2; - int iDb; - int wrFlag; - Btree *pX; - VdbeCursor *pCur; - Db *pDb; - - assert( (pOp->p5&(OPFLAG_P2ISREG|OPFLAG_BULKCSR))==pOp->p5 ); - assert( pOp->opcode==OP_OpenWrite || pOp->p5==0 ); - assert( p->bIsReader ); - assert( pOp->opcode==OP_OpenRead || p->readOnly==0 ); - - if( p->expired ){ - rc = SQLITE_ABORT; - break; - } - - nField = 0; - pKeyInfo = 0; - p2 = pOp->p2; - iDb = pOp->p3; - assert( iDb>=0 && iDbnDb ); - assert( (p->btreeMask & (((yDbMask)1)<aDb[iDb]; - pX = pDb->pBt; - assert( pX!=0 ); - if( pOp->opcode==OP_OpenWrite ){ - wrFlag = 1; - assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); - if( pDb->pSchema->file_format < p->minWriteFileFormat ){ - p->minWriteFileFormat = pDb->pSchema->file_format; - } - }else{ - wrFlag = 0; - } - if( pOp->p5 & OPFLAG_P2ISREG ){ - assert( p2>0 ); - assert( p2<=(p->nMem-p->nCursor) ); - pIn2 = &aMem[p2]; - assert( memIsValid(pIn2) ); - assert( (pIn2->flags & MEM_Int)!=0 ); - sqlite3VdbeMemIntegerify(pIn2); - p2 = (int)pIn2->u.i; - /* The p2 value always comes from a prior OP_CreateTable opcode and - ** that opcode will always set the p2 value to 2 or more or else fail. - ** If there were a failure, the prepared statement would have halted - ** before reaching this instruction. */ - if( NEVER(p2<2) ) { - rc = SQLITE_CORRUPT_BKPT; - goto abort_due_to_error; - } - } - if( pOp->p4type==P4_KEYINFO ){ - pKeyInfo = pOp->p4.pKeyInfo; - assert( pKeyInfo->enc==ENC(db) ); - assert( pKeyInfo->db==db ); - nField = pKeyInfo->nField+pKeyInfo->nXField; - }else if( pOp->p4type==P4_INT32 ){ - nField = pOp->p4.i; - } - assert( pOp->p1>=0 ); - assert( nField>=0 ); - testcase( nField==0 ); /* Table with INTEGER PRIMARY KEY and nothing else */ - pCur = allocateCursor(p, pOp->p1, nField, iDb, 1); - if( pCur==0 ) goto no_mem; - pCur->nullRow = 1; - pCur->isOrdered = 1; - rc = sqlite3BtreeCursor(pX, p2, wrFlag, pKeyInfo, pCur->pCursor); - pCur->pKeyInfo = pKeyInfo; - assert( OPFLAG_BULKCSR==BTREE_BULKLOAD ); - sqlite3BtreeCursorHints(pCur->pCursor, (pOp->p5 & OPFLAG_BULKCSR)); - - /* Since it performs no memory allocation or IO, the only value that - ** sqlite3BtreeCursor() may return is SQLITE_OK. */ - assert( rc==SQLITE_OK ); - - /* Set the VdbeCursor.isTable variable. Previous versions of - ** SQLite used to check if the root-page flags were sane at this point - ** and report database corruption if they were not, but this check has - ** since moved into the btree layer. */ - pCur->isTable = pOp->p4type!=P4_KEYINFO; - break; -} - -/* Opcode: OpenEphemeral P1 P2 * P4 P5 -** Synopsis: nColumn=P2 -** -** Open a new cursor P1 to a transient table. -** The cursor is always opened read/write even if -** the main database is read-only. The ephemeral -** table is deleted automatically when the cursor is closed. -** -** P2 is the number of columns in the ephemeral table. -** The cursor points to a BTree table if P4==0 and to a BTree index -** if P4 is not 0. If P4 is not NULL, it points to a KeyInfo structure -** that defines the format of keys in the index. -** -** The P5 parameter can be a mask of the BTREE_* flags defined -** in btree.h. These flags control aspects of the operation of -** the btree. The BTREE_OMIT_JOURNAL and BTREE_SINGLE flags are -** added automatically. -*/ -/* Opcode: OpenAutoindex P1 P2 * P4 * -** Synopsis: nColumn=P2 -** -** This opcode works the same as OP_OpenEphemeral. It has a -** different name to distinguish its use. Tables created using -** by this opcode will be used for automatically created transient -** indices in joins. -*/ -case OP_OpenAutoindex: -case OP_OpenEphemeral: { - VdbeCursor *pCx; - KeyInfo *pKeyInfo; - - static const int vfsFlags = - SQLITE_OPEN_READWRITE | - SQLITE_OPEN_CREATE | - SQLITE_OPEN_EXCLUSIVE | - SQLITE_OPEN_DELETEONCLOSE | - SQLITE_OPEN_TRANSIENT_DB; - assert( pOp->p1>=0 ); - assert( pOp->p2>=0 ); - pCx = allocateCursor(p, pOp->p1, pOp->p2, -1, 1); - if( pCx==0 ) goto no_mem; - pCx->nullRow = 1; - pCx->isEphemeral = 1; - rc = sqlite3BtreeOpen(db->pVfs, 0, db, &pCx->pBt, - BTREE_OMIT_JOURNAL | BTREE_SINGLE | pOp->p5, vfsFlags); - if( rc==SQLITE_OK ){ - rc = sqlite3BtreeBeginTrans(pCx->pBt, 1); - } - if( rc==SQLITE_OK ){ - /* If a transient index is required, create it by calling - ** sqlite3BtreeCreateTable() with the BTREE_BLOBKEY flag before - ** opening it. If a transient table is required, just use the - ** automatically created table with root-page 1 (an BLOB_INTKEY table). - */ - if( (pKeyInfo = pOp->p4.pKeyInfo)!=0 ){ - int pgno; - assert( pOp->p4type==P4_KEYINFO ); - rc = sqlite3BtreeCreateTable(pCx->pBt, &pgno, BTREE_BLOBKEY | pOp->p5); - if( rc==SQLITE_OK ){ - assert( pgno==MASTER_ROOT+1 ); - assert( pKeyInfo->db==db ); - assert( pKeyInfo->enc==ENC(db) ); - pCx->pKeyInfo = pKeyInfo; - rc = sqlite3BtreeCursor(pCx->pBt, pgno, 1, pKeyInfo, pCx->pCursor); - } - pCx->isTable = 0; - }else{ - rc = sqlite3BtreeCursor(pCx->pBt, MASTER_ROOT, 1, 0, pCx->pCursor); - pCx->isTable = 1; - } - } - pCx->isOrdered = (pOp->p5!=BTREE_UNORDERED); - break; -} - -/* Opcode: SorterOpen P1 P2 * P4 * -** -** This opcode works like OP_OpenEphemeral except that it opens -** a transient index that is specifically designed to sort large -** tables using an external merge-sort algorithm. -*/ -case OP_SorterOpen: { - VdbeCursor *pCx; - - assert( pOp->p1>=0 ); - assert( pOp->p2>=0 ); - pCx = allocateCursor(p, pOp->p1, pOp->p2, -1, 1); - if( pCx==0 ) goto no_mem; - pCx->pKeyInfo = pOp->p4.pKeyInfo; - assert( pCx->pKeyInfo->db==db ); - assert( pCx->pKeyInfo->enc==ENC(db) ); - rc = sqlite3VdbeSorterInit(db, pCx); - break; -} - -/* Opcode: OpenPseudo P1 P2 P3 * * -** Synopsis: P3 columns in r[P2] -** -** Open a new cursor that points to a fake table that contains a single -** row of data. The content of that one row is the content of memory -** register P2. In other words, cursor P1 becomes an alias for the -** MEM_Blob content contained in register P2. -** -** A pseudo-table created by this opcode is used to hold a single -** row output from the sorter so that the row can be decomposed into -** individual columns using the OP_Column opcode. The OP_Column opcode -** is the only cursor opcode that works with a pseudo-table. -** -** P3 is the number of fields in the records that will be stored by -** the pseudo-table. -*/ -case OP_OpenPseudo: { - VdbeCursor *pCx; - - assert( pOp->p1>=0 ); - assert( pOp->p3>=0 ); - pCx = allocateCursor(p, pOp->p1, pOp->p3, -1, 0); - if( pCx==0 ) goto no_mem; - pCx->nullRow = 1; - pCx->pseudoTableReg = pOp->p2; - pCx->isTable = 1; - assert( pOp->p5==0 ); - break; -} - -/* Opcode: Close P1 * * * * -** -** Close a cursor previously opened as P1. If P1 is not -** currently open, this instruction is a no-op. -*/ -case OP_Close: { - assert( pOp->p1>=0 && pOp->p1nCursor ); - sqlite3VdbeFreeCursor(p, p->apCsr[pOp->p1]); - p->apCsr[pOp->p1] = 0; - break; -} - -/* Opcode: SeekGe P1 P2 P3 P4 * -** Synopsis: key=r[P3@P4] -** -** If cursor P1 refers to an SQL table (B-Tree that uses integer keys), -** use the value in register P3 as the key. If cursor P1 refers -** to an SQL index, then P3 is the first in an array of P4 registers -** that are used as an unpacked index key. -** -** Reposition cursor P1 so that it points to the smallest entry that -** is greater than or equal to the key value. If there are no records -** greater than or equal to the key and P2 is not zero, then jump to P2. -** -** See also: Found, NotFound, SeekLt, SeekGt, SeekLe -*/ -/* Opcode: SeekGt P1 P2 P3 P4 * -** Synopsis: key=r[P3@P4] -** -** If cursor P1 refers to an SQL table (B-Tree that uses integer keys), -** use the value in register P3 as a key. If cursor P1 refers -** to an SQL index, then P3 is the first in an array of P4 registers -** that are used as an unpacked index key. -** -** Reposition cursor P1 so that it points to the smallest entry that -** is greater than the key value. If there are no records greater than -** the key and P2 is not zero, then jump to P2. -** -** See also: Found, NotFound, SeekLt, SeekGe, SeekLe -*/ -/* Opcode: SeekLt P1 P2 P3 P4 * -** Synopsis: key=r[P3@P4] -** -** If cursor P1 refers to an SQL table (B-Tree that uses integer keys), -** use the value in register P3 as a key. If cursor P1 refers -** to an SQL index, then P3 is the first in an array of P4 registers -** that are used as an unpacked index key. -** -** Reposition cursor P1 so that it points to the largest entry that -** is less than the key value. If there are no records less than -** the key and P2 is not zero, then jump to P2. -** -** See also: Found, NotFound, SeekGt, SeekGe, SeekLe -*/ -/* Opcode: SeekLe P1 P2 P3 P4 * -** Synopsis: key=r[P3@P4] -** -** If cursor P1 refers to an SQL table (B-Tree that uses integer keys), -** use the value in register P3 as a key. If cursor P1 refers -** to an SQL index, then P3 is the first in an array of P4 registers -** that are used as an unpacked index key. -** -** Reposition cursor P1 so that it points to the largest entry that -** is less than or equal to the key value. If there are no records -** less than or equal to the key and P2 is not zero, then jump to P2. -** -** See also: Found, NotFound, SeekGt, SeekGe, SeekLt -*/ -case OP_SeekLT: /* jump, in3 */ -case OP_SeekLE: /* jump, in3 */ -case OP_SeekGE: /* jump, in3 */ -case OP_SeekGT: { /* jump, in3 */ - int res; - int oc; - VdbeCursor *pC; - UnpackedRecord r; - int nField; - i64 iKey; /* The rowid we are to seek to */ - - assert( pOp->p1>=0 && pOp->p1nCursor ); - assert( pOp->p2!=0 ); - pC = p->apCsr[pOp->p1]; - assert( pC!=0 ); - assert( pC->pseudoTableReg==0 ); - assert( OP_SeekLE == OP_SeekLT+1 ); - assert( OP_SeekGE == OP_SeekLT+2 ); - assert( OP_SeekGT == OP_SeekLT+3 ); - assert( pC->isOrdered ); - assert( pC->pCursor!=0 ); - oc = pOp->opcode; - pC->nullRow = 0; - if( pC->isTable ){ - /* The input value in P3 might be of any type: integer, real, string, - ** blob, or NULL. But it needs to be an integer before we can do - ** the seek, so covert it. */ - pIn3 = &aMem[pOp->p3]; - applyNumericAffinity(pIn3); - iKey = sqlite3VdbeIntValue(pIn3); - pC->rowidIsValid = 0; - - /* If the P3 value could not be converted into an integer without - ** loss of information, then special processing is required... */ - if( (pIn3->flags & MEM_Int)==0 ){ - if( (pIn3->flags & MEM_Real)==0 ){ - /* If the P3 value cannot be converted into any kind of a number, - ** then the seek is not possible, so jump to P2 */ - pc = pOp->p2 - 1; VdbeBranchTaken(1,2); - break; - } - - /* If the approximation iKey is larger than the actual real search - ** term, substitute >= for > and < for <=. e.g. if the search term - ** is 4.9 and the integer approximation 5: - ** - ** (x > 4.9) -> (x >= 5) - ** (x <= 4.9) -> (x < 5) - */ - if( pIn3->r<(double)iKey ){ - assert( OP_SeekGE==(OP_SeekGT-1) ); - assert( OP_SeekLT==(OP_SeekLE-1) ); - assert( (OP_SeekLE & 0x0001)==(OP_SeekGT & 0x0001) ); - if( (oc & 0x0001)==(OP_SeekGT & 0x0001) ) oc--; - } - - /* If the approximation iKey is smaller than the actual real search - ** term, substitute <= for < and > for >=. */ - else if( pIn3->r>(double)iKey ){ - assert( OP_SeekLE==(OP_SeekLT+1) ); - assert( OP_SeekGT==(OP_SeekGE+1) ); - assert( (OP_SeekLT & 0x0001)==(OP_SeekGE & 0x0001) ); - if( (oc & 0x0001)==(OP_SeekLT & 0x0001) ) oc++; - } - } - rc = sqlite3BtreeMovetoUnpacked(pC->pCursor, 0, (u64)iKey, 0, &res); - if( rc!=SQLITE_OK ){ - goto abort_due_to_error; - } - if( res==0 ){ - pC->rowidIsValid = 1; - pC->lastRowid = iKey; - } - }else{ - nField = pOp->p4.i; - assert( pOp->p4type==P4_INT32 ); - assert( nField>0 ); - r.pKeyInfo = pC->pKeyInfo; - r.nField = (u16)nField; - - /* The next line of code computes as follows, only faster: - ** if( oc==OP_SeekGT || oc==OP_SeekLE ){ - ** r.default_rc = -1; - ** }else{ - ** r.default_rc = +1; - ** } - */ - r.default_rc = ((1 & (oc - OP_SeekLT)) ? -1 : +1); - assert( oc!=OP_SeekGT || r.default_rc==-1 ); - assert( oc!=OP_SeekLE || r.default_rc==-1 ); - assert( oc!=OP_SeekGE || r.default_rc==+1 ); - assert( oc!=OP_SeekLT || r.default_rc==+1 ); - - r.aMem = &aMem[pOp->p3]; -#ifdef SQLITE_DEBUG - { int i; for(i=0; ipCursor, &r, 0, 0, &res); - if( rc!=SQLITE_OK ){ - goto abort_due_to_error; - } - pC->rowidIsValid = 0; - } - pC->deferredMoveto = 0; - pC->cacheStatus = CACHE_STALE; -#ifdef SQLITE_TEST - sqlite3_search_count++; -#endif - if( oc>=OP_SeekGE ){ assert( oc==OP_SeekGE || oc==OP_SeekGT ); - if( res<0 || (res==0 && oc==OP_SeekGT) ){ - res = 0; - rc = sqlite3BtreeNext(pC->pCursor, &res); - if( rc!=SQLITE_OK ) goto abort_due_to_error; - pC->rowidIsValid = 0; - }else{ - res = 0; - } - }else{ - assert( oc==OP_SeekLT || oc==OP_SeekLE ); - if( res>0 || (res==0 && oc==OP_SeekLT) ){ - res = 0; - rc = sqlite3BtreePrevious(pC->pCursor, &res); - if( rc!=SQLITE_OK ) goto abort_due_to_error; - pC->rowidIsValid = 0; - }else{ - /* res might be negative because the table is empty. Check to - ** see if this is the case. - */ - res = sqlite3BtreeEof(pC->pCursor); - } - } - assert( pOp->p2>0 ); - VdbeBranchTaken(res!=0,2); - if( res ){ - pc = pOp->p2 - 1; - } - break; -} - -/* Opcode: Seek P1 P2 * * * -** Synopsis: intkey=r[P2] -** -** P1 is an open table cursor and P2 is a rowid integer. Arrange -** for P1 to move so that it points to the rowid given by P2. -** -** This is actually a deferred seek. Nothing actually happens until -** the cursor is used to read a record. That way, if no reads -** occur, no unnecessary I/O happens. -*/ -case OP_Seek: { /* in2 */ - VdbeCursor *pC; - - assert( pOp->p1>=0 && pOp->p1nCursor ); - pC = p->apCsr[pOp->p1]; - assert( pC!=0 ); - assert( pC->pCursor!=0 ); - assert( pC->isTable ); - pC->nullRow = 0; - pIn2 = &aMem[pOp->p2]; - pC->movetoTarget = sqlite3VdbeIntValue(pIn2); - pC->rowidIsValid = 0; - pC->deferredMoveto = 1; - break; -} - - -/* Opcode: Found P1 P2 P3 P4 * -** Synopsis: key=r[P3@P4] -** -** If P4==0 then register P3 holds a blob constructed by MakeRecord. If -** P4>0 then register P3 is the first of P4 registers that form an unpacked -** record. -** -** Cursor P1 is on an index btree. If the record identified by P3 and P4 -** is a prefix of any entry in P1 then a jump is made to P2 and -** P1 is left pointing at the matching entry. -** -** See also: NotFound, NoConflict, NotExists. SeekGe -*/ -/* Opcode: NotFound P1 P2 P3 P4 * -** Synopsis: key=r[P3@P4] -** -** If P4==0 then register P3 holds a blob constructed by MakeRecord. If -** P4>0 then register P3 is the first of P4 registers that form an unpacked -** record. -** -** Cursor P1 is on an index btree. If the record identified by P3 and P4 -** is not the prefix of any entry in P1 then a jump is made to P2. If P1 -** does contain an entry whose prefix matches the P3/P4 record then control -** falls through to the next instruction and P1 is left pointing at the -** matching entry. -** -** See also: Found, NotExists, NoConflict -*/ -/* Opcode: NoConflict P1 P2 P3 P4 * -** Synopsis: key=r[P3@P4] -** -** If P4==0 then register P3 holds a blob constructed by MakeRecord. If -** P4>0 then register P3 is the first of P4 registers that form an unpacked -** record. -** -** Cursor P1 is on an index btree. If the record identified by P3 and P4 -** contains any NULL value, jump immediately to P2. If all terms of the -** record are not-NULL then a check is done to determine if any row in the -** P1 index btree has a matching key prefix. If there are no matches, jump -** immediately to P2. If there is a match, fall through and leave the P1 -** cursor pointing to the matching row. -** -** This opcode is similar to OP_NotFound with the exceptions that the -** branch is always taken if any part of the search key input is NULL. -** -** See also: NotFound, Found, NotExists -*/ -case OP_NoConflict: /* jump, in3 */ -case OP_NotFound: /* jump, in3 */ -case OP_Found: { /* jump, in3 */ - int alreadyExists; - int ii; - VdbeCursor *pC; - int res; - char *pFree; - UnpackedRecord *pIdxKey; - UnpackedRecord r; - char aTempRec[ROUND8(sizeof(UnpackedRecord)) + sizeof(Mem)*4 + 7]; - -#ifdef SQLITE_TEST - if( pOp->opcode!=OP_NoConflict ) sqlite3_found_count++; -#endif - - assert( pOp->p1>=0 && pOp->p1nCursor ); - assert( pOp->p4type==P4_INT32 ); - pC = p->apCsr[pOp->p1]; - assert( pC!=0 ); - pIn3 = &aMem[pOp->p3]; - assert( pC->pCursor!=0 ); - assert( pC->isTable==0 ); - pFree = 0; /* Not needed. Only used to suppress a compiler warning. */ - if( pOp->p4.i>0 ){ - r.pKeyInfo = pC->pKeyInfo; - r.nField = (u16)pOp->p4.i; - r.aMem = pIn3; - for(ii=0; iip3+ii, &r.aMem[ii]); -#endif - } - pIdxKey = &r; - }else{ - pIdxKey = sqlite3VdbeAllocUnpackedRecord( - pC->pKeyInfo, aTempRec, sizeof(aTempRec), &pFree - ); - if( pIdxKey==0 ) goto no_mem; - assert( pIn3->flags & MEM_Blob ); - assert( (pIn3->flags & MEM_Zero)==0 ); /* zeroblobs already expanded */ - sqlite3VdbeRecordUnpack(pC->pKeyInfo, pIn3->n, pIn3->z, pIdxKey); - } - pIdxKey->default_rc = 0; - if( pOp->opcode==OP_NoConflict ){ - /* For the OP_NoConflict opcode, take the jump if any of the - ** input fields are NULL, since any key with a NULL will not - ** conflict */ - for(ii=0; iip2 - 1; VdbeBranchTaken(1,2); - break; - } - } - } - rc = sqlite3BtreeMovetoUnpacked(pC->pCursor, pIdxKey, 0, 0, &res); - if( pOp->p4.i==0 ){ - sqlite3DbFree(db, pFree); - } - if( rc!=SQLITE_OK ){ - break; - } - pC->seekResult = res; - alreadyExists = (res==0); - pC->nullRow = 1-alreadyExists; - pC->deferredMoveto = 0; - pC->cacheStatus = CACHE_STALE; - if( pOp->opcode==OP_Found ){ - VdbeBranchTaken(alreadyExists!=0,2); - if( alreadyExists ) pc = pOp->p2 - 1; - }else{ - VdbeBranchTaken(alreadyExists==0,2); - if( !alreadyExists ) pc = pOp->p2 - 1; - } - break; -} - -/* Opcode: NotExists P1 P2 P3 * * -** Synopsis: intkey=r[P3] -** -** P1 is the index of a cursor open on an SQL table btree (with integer -** keys). P3 is an integer rowid. If P1 does not contain a record with -** rowid P3 then jump immediately to P2. If P1 does contain a record -** with rowid P3 then leave the cursor pointing at that record and fall -** through to the next instruction. -** -** The OP_NotFound opcode performs the same operation on index btrees -** (with arbitrary multi-value keys). -** -** See also: Found, NotFound, NoConflict -*/ -case OP_NotExists: { /* jump, in3 */ - VdbeCursor *pC; - BtCursor *pCrsr; - int res; - u64 iKey; - - pIn3 = &aMem[pOp->p3]; - assert( pIn3->flags & MEM_Int ); - assert( pOp->p1>=0 && pOp->p1nCursor ); - pC = p->apCsr[pOp->p1]; - assert( pC!=0 ); - assert( pC->isTable ); - assert( pC->pseudoTableReg==0 ); - pCrsr = pC->pCursor; - assert( pCrsr!=0 ); - res = 0; - iKey = pIn3->u.i; - rc = sqlite3BtreeMovetoUnpacked(pCrsr, 0, iKey, 0, &res); - pC->lastRowid = pIn3->u.i; - pC->rowidIsValid = res==0 ?1:0; - pC->nullRow = 0; - pC->cacheStatus = CACHE_STALE; - pC->deferredMoveto = 0; - VdbeBranchTaken(res!=0,2); - if( res!=0 ){ - pc = pOp->p2 - 1; - assert( pC->rowidIsValid==0 ); - } - pC->seekResult = res; - break; -} - -/* Opcode: Sequence P1 P2 * * * -** Synopsis: r[P2]=cursor[P1].ctr++ -** -** Find the next available sequence number for cursor P1. -** Write the sequence number into register P2. -** The sequence number on the cursor is incremented after this -** instruction. -*/ -case OP_Sequence: { /* out2-prerelease */ - assert( pOp->p1>=0 && pOp->p1nCursor ); - assert( p->apCsr[pOp->p1]!=0 ); - pOut->u.i = p->apCsr[pOp->p1]->seqCount++; - break; -} - - -/* Opcode: NewRowid P1 P2 P3 * * -** Synopsis: r[P2]=rowid -** -** Get a new integer record number (a.k.a "rowid") used as the key to a table. -** The record number is not previously used as a key in the database -** table that cursor P1 points to. The new record number is written -** written to register P2. -** -** If P3>0 then P3 is a register in the root frame of this VDBE that holds -** the largest previously generated record number. No new record numbers are -** allowed to be less than this value. When this value reaches its maximum, -** an SQLITE_FULL error is generated. The P3 register is updated with the ' -** generated record number. This P3 mechanism is used to help implement the -** AUTOINCREMENT feature. -*/ -case OP_NewRowid: { /* out2-prerelease */ - i64 v; /* The new rowid */ - VdbeCursor *pC; /* Cursor of table to get the new rowid */ - int res; /* Result of an sqlite3BtreeLast() */ - int cnt; /* Counter to limit the number of searches */ - Mem *pMem; /* Register holding largest rowid for AUTOINCREMENT */ - VdbeFrame *pFrame; /* Root frame of VDBE */ - - v = 0; - res = 0; - assert( pOp->p1>=0 && pOp->p1nCursor ); - pC = p->apCsr[pOp->p1]; - assert( pC!=0 ); - if( NEVER(pC->pCursor==0) ){ - /* The zero initialization above is all that is needed */ - }else{ - /* The next rowid or record number (different terms for the same - ** thing) is obtained in a two-step algorithm. - ** - ** First we attempt to find the largest existing rowid and add one - ** to that. But if the largest existing rowid is already the maximum - ** positive integer, we have to fall through to the second - ** probabilistic algorithm - ** - ** The second algorithm is to select a rowid at random and see if - ** it already exists in the table. If it does not exist, we have - ** succeeded. If the random rowid does exist, we select a new one - ** and try again, up to 100 times. - */ - assert( pC->isTable ); - -#ifdef SQLITE_32BIT_ROWID -# define MAX_ROWID 0x7fffffff -#else - /* Some compilers complain about constants of the form 0x7fffffffffffffff. - ** Others complain about 0x7ffffffffffffffffLL. The following macro seems - ** to provide the constant while making all compilers happy. - */ -# define MAX_ROWID (i64)( (((u64)0x7fffffff)<<32) | (u64)0xffffffff ) -#endif - - if( !pC->useRandomRowid ){ - rc = sqlite3BtreeLast(pC->pCursor, &res); - if( rc!=SQLITE_OK ){ - goto abort_due_to_error; - } - if( res ){ - v = 1; /* IMP: R-61914-48074 */ - }else{ - assert( sqlite3BtreeCursorIsValid(pC->pCursor) ); - rc = sqlite3BtreeKeySize(pC->pCursor, &v); - assert( rc==SQLITE_OK ); /* Cannot fail following BtreeLast() */ - if( v>=MAX_ROWID ){ - pC->useRandomRowid = 1; - }else{ - v++; /* IMP: R-29538-34987 */ - } - } - } - -#ifndef SQLITE_OMIT_AUTOINCREMENT - if( pOp->p3 ){ - /* Assert that P3 is a valid memory cell. */ - assert( pOp->p3>0 ); - if( p->pFrame ){ - for(pFrame=p->pFrame; pFrame->pParent; pFrame=pFrame->pParent); - /* Assert that P3 is a valid memory cell. */ - assert( pOp->p3<=pFrame->nMem ); - pMem = &pFrame->aMem[pOp->p3]; - }else{ - /* Assert that P3 is a valid memory cell. */ - assert( pOp->p3<=(p->nMem-p->nCursor) ); - pMem = &aMem[pOp->p3]; - memAboutToChange(p, pMem); - } - assert( memIsValid(pMem) ); - - REGISTER_TRACE(pOp->p3, pMem); - sqlite3VdbeMemIntegerify(pMem); - assert( (pMem->flags & MEM_Int)!=0 ); /* mem(P3) holds an integer */ - if( pMem->u.i==MAX_ROWID || pC->useRandomRowid ){ - rc = SQLITE_FULL; /* IMP: R-12275-61338 */ - goto abort_due_to_error; - } - if( vu.i+1 ){ - v = pMem->u.i + 1; - } - pMem->u.i = v; - } -#endif - if( pC->useRandomRowid ){ - /* IMPLEMENTATION-OF: R-07677-41881 If the largest ROWID is equal to the - ** largest possible integer (9223372036854775807) then the database - ** engine starts picking positive candidate ROWIDs at random until - ** it finds one that is not previously used. */ - assert( pOp->p3==0 ); /* We cannot be in random rowid mode if this is - ** an AUTOINCREMENT table. */ - /* on the first attempt, simply do one more than previous */ - v = lastRowid; - v &= (MAX_ROWID>>1); /* ensure doesn't go negative */ - v++; /* ensure non-zero */ - cnt = 0; - while( ((rc = sqlite3BtreeMovetoUnpacked(pC->pCursor, 0, (u64)v, - 0, &res))==SQLITE_OK) - && (res==0) - && (++cnt<100)){ - /* collision - try another random rowid */ - sqlite3_randomness(sizeof(v), &v); - if( cnt<5 ){ - /* try "small" random rowids for the initial attempts */ - v &= 0xffffff; - }else{ - v &= (MAX_ROWID>>1); /* ensure doesn't go negative */ - } - v++; /* ensure non-zero */ - } - if( rc==SQLITE_OK && res==0 ){ - rc = SQLITE_FULL; /* IMP: R-38219-53002 */ - goto abort_due_to_error; - } - assert( v>0 ); /* EV: R-40812-03570 */ - } - pC->rowidIsValid = 0; - pC->deferredMoveto = 0; - pC->cacheStatus = CACHE_STALE; - } - pOut->u.i = v; - break; -} - -/* Opcode: Insert P1 P2 P3 P4 P5 -** Synopsis: intkey=r[P3] data=r[P2] -** -** Write an entry into the table of cursor P1. A new entry is -** created if it doesn't already exist or the data for an existing -** entry is overwritten. The data is the value MEM_Blob stored in register -** number P2. The key is stored in register P3. The key must -** be a MEM_Int. -** -** If the OPFLAG_NCHANGE flag of P5 is set, then the row change count is -** incremented (otherwise not). If the OPFLAG_LASTROWID flag of P5 is set, -** then rowid is stored for subsequent return by the -** sqlite3_last_insert_rowid() function (otherwise it is unmodified). -** -** If the OPFLAG_USESEEKRESULT flag of P5 is set and if the result of -** the last seek operation (OP_NotExists) was a success, then this -** operation will not attempt to find the appropriate row before doing -** the insert but will instead overwrite the row that the cursor is -** currently pointing to. Presumably, the prior OP_NotExists opcode -** has already positioned the cursor correctly. This is an optimization -** that boosts performance by avoiding redundant seeks. -** -** If the OPFLAG_ISUPDATE flag is set, then this opcode is part of an -** UPDATE operation. Otherwise (if the flag is clear) then this opcode -** is part of an INSERT operation. The difference is only important to -** the update hook. -** -** Parameter P4 may point to a string containing the table-name, or -** may be NULL. If it is not NULL, then the update-hook -** (sqlite3.xUpdateCallback) is invoked following a successful insert. -** -** (WARNING/TODO: If P1 is a pseudo-cursor and P2 is dynamically -** allocated, then ownership of P2 is transferred to the pseudo-cursor -** and register P2 becomes ephemeral. If the cursor is changed, the -** value of register P2 will then change. Make sure this does not -** cause any problems.) -** -** This instruction only works on tables. The equivalent instruction -** for indices is OP_IdxInsert. -*/ -/* Opcode: InsertInt P1 P2 P3 P4 P5 -** Synopsis: intkey=P3 data=r[P2] -** -** This works exactly like OP_Insert except that the key is the -** integer value P3, not the value of the integer stored in register P3. -*/ -case OP_Insert: -case OP_InsertInt: { - Mem *pData; /* MEM cell holding data for the record to be inserted */ - Mem *pKey; /* MEM cell holding key for the record */ - i64 iKey; /* The integer ROWID or key for the record to be inserted */ - VdbeCursor *pC; /* Cursor to table into which insert is written */ - int nZero; /* Number of zero-bytes to append */ - int seekResult; /* Result of prior seek or 0 if no USESEEKRESULT flag */ - const char *zDb; /* database name - used by the update hook */ - const char *zTbl; /* Table name - used by the opdate hook */ - int op; /* Opcode for update hook: SQLITE_UPDATE or SQLITE_INSERT */ - - pData = &aMem[pOp->p2]; - assert( pOp->p1>=0 && pOp->p1nCursor ); - assert( memIsValid(pData) ); - pC = p->apCsr[pOp->p1]; - assert( pC!=0 ); - assert( pC->pCursor!=0 ); - assert( pC->pseudoTableReg==0 ); - assert( pC->isTable ); - REGISTER_TRACE(pOp->p2, pData); - - if( pOp->opcode==OP_Insert ){ - pKey = &aMem[pOp->p3]; - assert( pKey->flags & MEM_Int ); - assert( memIsValid(pKey) ); - REGISTER_TRACE(pOp->p3, pKey); - iKey = pKey->u.i; - }else{ - assert( pOp->opcode==OP_InsertInt ); - iKey = pOp->p3; - } - - if( pOp->p5 & OPFLAG_NCHANGE ) p->nChange++; - if( pOp->p5 & OPFLAG_LASTROWID ) db->lastRowid = lastRowid = iKey; - if( pData->flags & MEM_Null ){ - pData->z = 0; - pData->n = 0; - }else{ - assert( pData->flags & (MEM_Blob|MEM_Str) ); - } - seekResult = ((pOp->p5 & OPFLAG_USESEEKRESULT) ? pC->seekResult : 0); - if( pData->flags & MEM_Zero ){ - nZero = pData->u.nZero; - }else{ - nZero = 0; - } - rc = sqlite3BtreeInsert(pC->pCursor, 0, iKey, - pData->z, pData->n, nZero, - (pOp->p5 & OPFLAG_APPEND)!=0, seekResult - ); - pC->rowidIsValid = 0; - pC->deferredMoveto = 0; - pC->cacheStatus = CACHE_STALE; - - /* Invoke the update-hook if required. */ - if( rc==SQLITE_OK && db->xUpdateCallback && pOp->p4.z ){ - zDb = db->aDb[pC->iDb].zName; - zTbl = pOp->p4.z; - op = ((pOp->p5 & OPFLAG_ISUPDATE) ? SQLITE_UPDATE : SQLITE_INSERT); - assert( pC->isTable ); - db->xUpdateCallback(db->pUpdateArg, op, zDb, zTbl, iKey); - assert( pC->iDb>=0 ); - } - break; -} - -/* Opcode: Delete P1 P2 * P4 * -** -** Delete the record at which the P1 cursor is currently pointing. -** -** The cursor will be left pointing at either the next or the previous -** record in the table. If it is left pointing at the next record, then -** the next Next instruction will be a no-op. Hence it is OK to delete -** a record from within an Next loop. -** -** If the OPFLAG_NCHANGE flag of P2 is set, then the row change count is -** incremented (otherwise not). -** -** P1 must not be pseudo-table. It has to be a real table with -** multiple rows. -** -** If P4 is not NULL, then it is the name of the table that P1 is -** pointing to. The update hook will be invoked, if it exists. -** If P4 is not NULL then the P1 cursor must have been positioned -** using OP_NotFound prior to invoking this opcode. -*/ -case OP_Delete: { - i64 iKey; - VdbeCursor *pC; - - assert( pOp->p1>=0 && pOp->p1nCursor ); - pC = p->apCsr[pOp->p1]; - assert( pC!=0 ); - assert( pC->pCursor!=0 ); /* Only valid for real tables, no pseudotables */ - iKey = pC->lastRowid; /* Only used for the update hook */ - - /* The OP_Delete opcode always follows an OP_NotExists or OP_Last or - ** OP_Column on the same table without any intervening operations that - ** might move or invalidate the cursor. Hence cursor pC is always pointing - ** to the row to be deleted and the sqlite3VdbeCursorMoveto() operation - ** below is always a no-op and cannot fail. We will run it anyhow, though, - ** to guard against future changes to the code generator. - **/ - assert( pC->deferredMoveto==0 ); - rc = sqlite3VdbeCursorMoveto(pC); - if( NEVER(rc!=SQLITE_OK) ) goto abort_due_to_error; - - rc = sqlite3BtreeDelete(pC->pCursor); - pC->cacheStatus = CACHE_STALE; - - /* Invoke the update-hook if required. */ - if( rc==SQLITE_OK && db->xUpdateCallback && pOp->p4.z && pC->isTable ){ - db->xUpdateCallback(db->pUpdateArg, SQLITE_DELETE, - db->aDb[pC->iDb].zName, pOp->p4.z, iKey); - assert( pC->iDb>=0 ); - } - if( pOp->p2 & OPFLAG_NCHANGE ) p->nChange++; - break; -} -/* Opcode: ResetCount * * * * * -** -** The value of the change counter is copied to the database handle -** change counter (returned by subsequent calls to sqlite3_changes()). -** Then the VMs internal change counter resets to 0. -** This is used by trigger programs. -*/ -case OP_ResetCount: { - sqlite3VdbeSetChanges(db, p->nChange); - p->nChange = 0; - break; -} - -/* Opcode: SorterCompare P1 P2 P3 P4 -** Synopsis: if key(P1)!=rtrim(r[P3],P4) goto P2 -** -** P1 is a sorter cursor. This instruction compares a prefix of the -** the record blob in register P3 against a prefix of the entry that -** the sorter cursor currently points to. The final P4 fields of both -** the P3 and sorter record are ignored. -** -** If either P3 or the sorter contains a NULL in one of their significant -** fields (not counting the P4 fields at the end which are ignored) then -** the comparison is assumed to be equal. -** -** Fall through to next instruction if the two records compare equal to -** each other. Jump to P2 if they are different. -*/ -case OP_SorterCompare: { - VdbeCursor *pC; - int res; - int nIgnore; - - pC = p->apCsr[pOp->p1]; - assert( isSorter(pC) ); - assert( pOp->p4type==P4_INT32 ); - pIn3 = &aMem[pOp->p3]; - nIgnore = pOp->p4.i; - rc = sqlite3VdbeSorterCompare(pC, pIn3, nIgnore, &res); - VdbeBranchTaken(res!=0,2); - if( res ){ - pc = pOp->p2-1; - } - break; -}; - -/* Opcode: SorterData P1 P2 * * * -** Synopsis: r[P2]=data -** -** Write into register P2 the current sorter data for sorter cursor P1. -*/ -case OP_SorterData: { - VdbeCursor *pC; - - pOut = &aMem[pOp->p2]; - pC = p->apCsr[pOp->p1]; - assert( isSorter(pC) ); - rc = sqlite3VdbeSorterRowkey(pC, pOut); - assert( rc!=SQLITE_OK || (pOut->flags & MEM_Blob) ); - break; -} - -/* Opcode: RowData P1 P2 * * * -** Synopsis: r[P2]=data -** -** Write into register P2 the complete row data for cursor P1. -** There is no interpretation of the data. -** It is just copied onto the P2 register exactly as -** it is found in the database file. -** -** If the P1 cursor must be pointing to a valid row (not a NULL row) -** of a real table, not a pseudo-table. -*/ -/* Opcode: RowKey P1 P2 * * * -** Synopsis: r[P2]=key -** -** Write into register P2 the complete row key for cursor P1. -** There is no interpretation of the data. -** The key is copied onto the P2 register exactly as -** it is found in the database file. -** -** If the P1 cursor must be pointing to a valid row (not a NULL row) -** of a real table, not a pseudo-table. -*/ -case OP_RowKey: -case OP_RowData: { - VdbeCursor *pC; - BtCursor *pCrsr; - u32 n; - i64 n64; - - pOut = &aMem[pOp->p2]; - memAboutToChange(p, pOut); - - /* Note that RowKey and RowData are really exactly the same instruction */ - assert( pOp->p1>=0 && pOp->p1nCursor ); - pC = p->apCsr[pOp->p1]; - assert( isSorter(pC)==0 ); - assert( pC->isTable || pOp->opcode!=OP_RowData ); - assert( pC->isTable==0 || pOp->opcode==OP_RowData ); - assert( pC!=0 ); - assert( pC->nullRow==0 ); - assert( pC->pseudoTableReg==0 ); - assert( pC->pCursor!=0 ); - pCrsr = pC->pCursor; - assert( sqlite3BtreeCursorIsValid(pCrsr) ); - - /* The OP_RowKey and OP_RowData opcodes always follow OP_NotExists or - ** OP_Rewind/Op_Next with no intervening instructions that might invalidate - ** the cursor. Hence the following sqlite3VdbeCursorMoveto() call is always - ** a no-op and can never fail. But we leave it in place as a safety. - */ - assert( pC->deferredMoveto==0 ); - rc = sqlite3VdbeCursorMoveto(pC); - if( NEVER(rc!=SQLITE_OK) ) goto abort_due_to_error; - - if( pC->isTable==0 ){ - assert( !pC->isTable ); - VVA_ONLY(rc =) sqlite3BtreeKeySize(pCrsr, &n64); - assert( rc==SQLITE_OK ); /* True because of CursorMoveto() call above */ - if( n64>db->aLimit[SQLITE_LIMIT_LENGTH] ){ - goto too_big; - } - n = (u32)n64; - }else{ - VVA_ONLY(rc =) sqlite3BtreeDataSize(pCrsr, &n); - assert( rc==SQLITE_OK ); /* DataSize() cannot fail */ - if( n>(u32)db->aLimit[SQLITE_LIMIT_LENGTH] ){ - goto too_big; - } - } - if( sqlite3VdbeMemGrow(pOut, n, 0) ){ - goto no_mem; - } - pOut->n = n; - MemSetTypeFlag(pOut, MEM_Blob); - if( pC->isTable==0 ){ - rc = sqlite3BtreeKey(pCrsr, 0, n, pOut->z); - }else{ - rc = sqlite3BtreeData(pCrsr, 0, n, pOut->z); - } - pOut->enc = SQLITE_UTF8; /* In case the blob is ever cast to text */ - UPDATE_MAX_BLOBSIZE(pOut); - REGISTER_TRACE(pOp->p2, pOut); - break; -} - -/* Opcode: Rowid P1 P2 * * * -** Synopsis: r[P2]=rowid -** -** Store in register P2 an integer which is the key of the table entry that -** P1 is currently point to. -** -** P1 can be either an ordinary table or a virtual table. There used to -** be a separate OP_VRowid opcode for use with virtual tables, but this -** one opcode now works for both table types. -*/ -case OP_Rowid: { /* out2-prerelease */ - VdbeCursor *pC; - i64 v; - sqlite3_vtab *pVtab; - const sqlite3_module *pModule; - - assert( pOp->p1>=0 && pOp->p1nCursor ); - pC = p->apCsr[pOp->p1]; - assert( pC!=0 ); - assert( pC->pseudoTableReg==0 || pC->nullRow ); - if( pC->nullRow ){ - pOut->flags = MEM_Null; - break; - }else if( pC->deferredMoveto ){ - v = pC->movetoTarget; -#ifndef SQLITE_OMIT_VIRTUALTABLE - }else if( pC->pVtabCursor ){ - pVtab = pC->pVtabCursor->pVtab; - pModule = pVtab->pModule; - assert( pModule->xRowid ); - rc = pModule->xRowid(pC->pVtabCursor, &v); - sqlite3VtabImportErrmsg(p, pVtab); -#endif /* SQLITE_OMIT_VIRTUALTABLE */ - }else{ - assert( pC->pCursor!=0 ); - rc = sqlite3VdbeCursorMoveto(pC); - if( rc ) goto abort_due_to_error; - if( pC->rowidIsValid ){ - v = pC->lastRowid; - }else{ - rc = sqlite3BtreeKeySize(pC->pCursor, &v); - assert( rc==SQLITE_OK ); /* Always so because of CursorMoveto() above */ - } - } - pOut->u.i = v; - break; -} - -/* Opcode: NullRow P1 * * * * -** -** Move the cursor P1 to a null row. Any OP_Column operations -** that occur while the cursor is on the null row will always -** write a NULL. -*/ -case OP_NullRow: { - VdbeCursor *pC; - - assert( pOp->p1>=0 && pOp->p1nCursor ); - pC = p->apCsr[pOp->p1]; - assert( pC!=0 ); - pC->nullRow = 1; - pC->rowidIsValid = 0; - pC->cacheStatus = CACHE_STALE; - if( pC->pCursor ){ - sqlite3BtreeClearCursor(pC->pCursor); - } - break; -} - -/* Opcode: Last P1 P2 * * * -** -** The next use of the Rowid or Column or Next instruction for P1 -** will refer to the last entry in the database table or index. -** If the table or index is empty and P2>0, then jump immediately to P2. -** If P2 is 0 or if the table or index is not empty, fall through -** to the following instruction. -*/ -case OP_Last: { /* jump */ - VdbeCursor *pC; - BtCursor *pCrsr; - int res; - - assert( pOp->p1>=0 && pOp->p1nCursor ); - pC = p->apCsr[pOp->p1]; - assert( pC!=0 ); - pCrsr = pC->pCursor; - res = 0; - assert( pCrsr!=0 ); - rc = sqlite3BtreeLast(pCrsr, &res); - pC->nullRow = (u8)res; - pC->deferredMoveto = 0; - pC->rowidIsValid = 0; - pC->cacheStatus = CACHE_STALE; - if( pOp->p2>0 ){ - VdbeBranchTaken(res!=0,2); - if( res ) pc = pOp->p2 - 1; - } - break; -} - - -/* Opcode: Sort P1 P2 * * * -** -** This opcode does exactly the same thing as OP_Rewind except that -** it increments an undocumented global variable used for testing. -** -** Sorting is accomplished by writing records into a sorting index, -** then rewinding that index and playing it back from beginning to -** end. We use the OP_Sort opcode instead of OP_Rewind to do the -** rewinding so that the global variable will be incremented and -** regression tests can determine whether or not the optimizer is -** correctly optimizing out sorts. -*/ -case OP_SorterSort: /* jump */ -case OP_Sort: { /* jump */ -#ifdef SQLITE_TEST - sqlite3_sort_count++; - sqlite3_search_count--; -#endif - p->aCounter[SQLITE_STMTSTATUS_SORT]++; - /* Fall through into OP_Rewind */ -} -/* Opcode: Rewind P1 P2 * * * -** -** The next use of the Rowid or Column or Next instruction for P1 -** will refer to the first entry in the database table or index. -** If the table or index is empty and P2>0, then jump immediately to P2. -** If P2 is 0 or if the table or index is not empty, fall through -** to the following instruction. -*/ -case OP_Rewind: { /* jump */ - VdbeCursor *pC; - BtCursor *pCrsr; - int res; - - assert( pOp->p1>=0 && pOp->p1nCursor ); - pC = p->apCsr[pOp->p1]; - assert( pC!=0 ); - assert( isSorter(pC)==(pOp->opcode==OP_SorterSort) ); - res = 1; - if( isSorter(pC) ){ - rc = sqlite3VdbeSorterRewind(db, pC, &res); - }else{ - pCrsr = pC->pCursor; - assert( pCrsr ); - rc = sqlite3BtreeFirst(pCrsr, &res); - pC->deferredMoveto = 0; - pC->cacheStatus = CACHE_STALE; - pC->rowidIsValid = 0; - } - pC->nullRow = (u8)res; - assert( pOp->p2>0 && pOp->p2nOp ); - VdbeBranchTaken(res!=0,2); - if( res ){ - pc = pOp->p2 - 1; - } - break; -} - -/* Opcode: Next P1 P2 P3 P4 P5 -** -** Advance cursor P1 so that it points to the next key/data pair in its -** table or index. If there are no more key/value pairs then fall through -** to the following instruction. But if the cursor advance was successful, -** jump immediately to P2. -** -** The P1 cursor must be for a real table, not a pseudo-table. P1 must have -** been opened prior to this opcode or the program will segfault. -** -** The P3 value is a hint to the btree implementation. If P3==1, that -** means P1 is an SQL index and that this instruction could have been -** omitted if that index had been unique. P3 is usually 0. P3 is -** always either 0 or 1. -** -** P4 is always of type P4_ADVANCE. The function pointer points to -** sqlite3BtreeNext(). -** -** If P5 is positive and the jump is taken, then event counter -** number P5-1 in the prepared statement is incremented. -** -** See also: Prev, NextIfOpen -*/ -/* Opcode: NextIfOpen P1 P2 P3 P4 P5 -** -** This opcode works just like OP_Next except that if cursor P1 is not -** open it behaves a no-op. -*/ -/* Opcode: Prev P1 P2 P3 P4 P5 -** -** Back up cursor P1 so that it points to the previous key/data pair in its -** table or index. If there is no previous key/value pairs then fall through -** to the following instruction. But if the cursor backup was successful, -** jump immediately to P2. -** -** The P1 cursor must be for a real table, not a pseudo-table. If P1 is -** not open then the behavior is undefined. -** -** The P3 value is a hint to the btree implementation. If P3==1, that -** means P1 is an SQL index and that this instruction could have been -** omitted if that index had been unique. P3 is usually 0. P3 is -** always either 0 or 1. -** -** P4 is always of type P4_ADVANCE. The function pointer points to -** sqlite3BtreePrevious(). -** -** If P5 is positive and the jump is taken, then event counter -** number P5-1 in the prepared statement is incremented. -*/ -/* Opcode: PrevIfOpen P1 P2 P3 P4 P5 -** -** This opcode works just like OP_Prev except that if cursor P1 is not -** open it behaves a no-op. -*/ -case OP_SorterNext: { /* jump */ - VdbeCursor *pC; - int res; - - pC = p->apCsr[pOp->p1]; - assert( isSorter(pC) ); - res = 0; - rc = sqlite3VdbeSorterNext(db, pC, &res); - goto next_tail; -case OP_PrevIfOpen: /* jump */ -case OP_NextIfOpen: /* jump */ - if( p->apCsr[pOp->p1]==0 ) break; - /* Fall through */ -case OP_Prev: /* jump */ -case OP_Next: /* jump */ - assert( pOp->p1>=0 && pOp->p1nCursor ); - assert( pOp->p5aCounter) ); - pC = p->apCsr[pOp->p1]; - res = pOp->p3; - assert( pC!=0 ); - assert( pC->deferredMoveto==0 ); - assert( pC->pCursor ); - assert( res==0 || (res==1 && pC->isTable==0) ); - testcase( res==1 ); - assert( pOp->opcode!=OP_Next || pOp->p4.xAdvance==sqlite3BtreeNext ); - assert( pOp->opcode!=OP_Prev || pOp->p4.xAdvance==sqlite3BtreePrevious ); - assert( pOp->opcode!=OP_NextIfOpen || pOp->p4.xAdvance==sqlite3BtreeNext ); - assert( pOp->opcode!=OP_PrevIfOpen || pOp->p4.xAdvance==sqlite3BtreePrevious); - rc = pOp->p4.xAdvance(pC->pCursor, &res); -next_tail: - pC->cacheStatus = CACHE_STALE; - VdbeBranchTaken(res==0,2); - if( res==0 ){ - pC->nullRow = 0; - pc = pOp->p2 - 1; - p->aCounter[pOp->p5]++; -#ifdef SQLITE_TEST - sqlite3_search_count++; -#endif - }else{ - pC->nullRow = 1; - } - pC->rowidIsValid = 0; - goto check_for_interrupt; -} - -/* Opcode: IdxInsert P1 P2 P3 * P5 -** Synopsis: key=r[P2] -** -** Register P2 holds an SQL index key made using the -** MakeRecord instructions. This opcode writes that key -** into the index P1. Data for the entry is nil. -** -** P3 is a flag that provides a hint to the b-tree layer that this -** insert is likely to be an append. -** -** If P5 has the OPFLAG_NCHANGE bit set, then the change counter is -** incremented by this instruction. If the OPFLAG_NCHANGE bit is clear, -** then the change counter is unchanged. -** -** If P5 has the OPFLAG_USESEEKRESULT bit set, then the cursor must have -** just done a seek to the spot where the new entry is to be inserted. -** This flag avoids doing an extra seek. -** -** This instruction only works for indices. The equivalent instruction -** for tables is OP_Insert. -*/ -case OP_SorterInsert: /* in2 */ -case OP_IdxInsert: { /* in2 */ - VdbeCursor *pC; - BtCursor *pCrsr; - int nKey; - const char *zKey; - - assert( pOp->p1>=0 && pOp->p1nCursor ); - pC = p->apCsr[pOp->p1]; - assert( pC!=0 ); - assert( isSorter(pC)==(pOp->opcode==OP_SorterInsert) ); - pIn2 = &aMem[pOp->p2]; - assert( pIn2->flags & MEM_Blob ); - pCrsr = pC->pCursor; - if( pOp->p5 & OPFLAG_NCHANGE ) p->nChange++; - assert( pCrsr!=0 ); - assert( pC->isTable==0 ); - rc = ExpandBlob(pIn2); - if( rc==SQLITE_OK ){ - if( isSorter(pC) ){ - rc = sqlite3VdbeSorterWrite(db, pC, pIn2); - }else{ - nKey = pIn2->n; - zKey = pIn2->z; - rc = sqlite3BtreeInsert(pCrsr, zKey, nKey, "", 0, 0, pOp->p3, - ((pOp->p5 & OPFLAG_USESEEKRESULT) ? pC->seekResult : 0) - ); - assert( pC->deferredMoveto==0 ); - pC->cacheStatus = CACHE_STALE; - } - } - break; -} - -/* Opcode: IdxDelete P1 P2 P3 * * -** Synopsis: key=r[P2@P3] -** -** The content of P3 registers starting at register P2 form -** an unpacked index key. This opcode removes that entry from the -** index opened by cursor P1. -*/ -case OP_IdxDelete: { - VdbeCursor *pC; - BtCursor *pCrsr; - int res; - UnpackedRecord r; - - assert( pOp->p3>0 ); - assert( pOp->p2>0 && pOp->p2+pOp->p3<=(p->nMem-p->nCursor)+1 ); - assert( pOp->p1>=0 && pOp->p1nCursor ); - pC = p->apCsr[pOp->p1]; - assert( pC!=0 ); - pCrsr = pC->pCursor; - assert( pCrsr!=0 ); - assert( pOp->p5==0 ); - r.pKeyInfo = pC->pKeyInfo; - r.nField = (u16)pOp->p3; - r.default_rc = 0; - r.aMem = &aMem[pOp->p2]; -#ifdef SQLITE_DEBUG - { int i; for(i=0; ideferredMoveto==0 ); - pC->cacheStatus = CACHE_STALE; - break; -} - -/* Opcode: IdxRowid P1 P2 * * * -** Synopsis: r[P2]=rowid -** -** Write into register P2 an integer which is the last entry in the record at -** the end of the index key pointed to by cursor P1. This integer should be -** the rowid of the table entry to which this index entry points. -** -** See also: Rowid, MakeRecord. -*/ -case OP_IdxRowid: { /* out2-prerelease */ - BtCursor *pCrsr; - VdbeCursor *pC; - i64 rowid; - - assert( pOp->p1>=0 && pOp->p1nCursor ); - pC = p->apCsr[pOp->p1]; - assert( pC!=0 ); - pCrsr = pC->pCursor; - assert( pCrsr!=0 ); - pOut->flags = MEM_Null; - rc = sqlite3VdbeCursorMoveto(pC); - if( NEVER(rc) ) goto abort_due_to_error; - assert( pC->deferredMoveto==0 ); - assert( pC->isTable==0 ); - if( !pC->nullRow ){ - rowid = 0; /* Not needed. Only used to silence a warning. */ - rc = sqlite3VdbeIdxRowid(db, pCrsr, &rowid); - if( rc!=SQLITE_OK ){ - goto abort_due_to_error; - } - pOut->u.i = rowid; - pOut->flags = MEM_Int; - } - break; -} - -/* Opcode: IdxGE P1 P2 P3 P4 P5 -** Synopsis: key=r[P3@P4] -** -** The P4 register values beginning with P3 form an unpacked index -** key that omits the PRIMARY KEY. Compare this key value against the index -** that P1 is currently pointing to, ignoring the PRIMARY KEY or ROWID -** fields at the end. -** -** If the P1 index entry is greater than or equal to the key value -** then jump to P2. Otherwise fall through to the next instruction. -*/ -/* Opcode: IdxGT P1 P2 P3 P4 P5 -** Synopsis: key=r[P3@P4] -** -** The P4 register values beginning with P3 form an unpacked index -** key that omits the PRIMARY KEY. Compare this key value against the index -** that P1 is currently pointing to, ignoring the PRIMARY KEY or ROWID -** fields at the end. -** -** If the P1 index entry is greater than the key value -** then jump to P2. Otherwise fall through to the next instruction. -*/ -/* Opcode: IdxLT P1 P2 P3 P4 P5 -** Synopsis: key=r[P3@P4] -** -** The P4 register values beginning with P3 form an unpacked index -** key that omits the PRIMARY KEY or ROWID. Compare this key value against -** the index that P1 is currently pointing to, ignoring the PRIMARY KEY or -** ROWID on the P1 index. -** -** If the P1 index entry is less than the key value then jump to P2. -** Otherwise fall through to the next instruction. -*/ -/* Opcode: IdxLE P1 P2 P3 P4 P5 -** Synopsis: key=r[P3@P4] -** -** The P4 register values beginning with P3 form an unpacked index -** key that omits the PRIMARY KEY or ROWID. Compare this key value against -** the index that P1 is currently pointing to, ignoring the PRIMARY KEY or -** ROWID on the P1 index. -** -** If the P1 index entry is less than or equal to the key value then jump -** to P2. Otherwise fall through to the next instruction. -*/ -case OP_IdxLE: /* jump */ -case OP_IdxGT: /* jump */ -case OP_IdxLT: /* jump */ -case OP_IdxGE: { /* jump */ - VdbeCursor *pC; - int res; - UnpackedRecord r; - - assert( pOp->p1>=0 && pOp->p1nCursor ); - pC = p->apCsr[pOp->p1]; - assert( pC!=0 ); - assert( pC->isOrdered ); - assert( pC->pCursor!=0); - assert( pC->deferredMoveto==0 ); - assert( pOp->p5==0 || pOp->p5==1 ); - assert( pOp->p4type==P4_INT32 ); - r.pKeyInfo = pC->pKeyInfo; - r.nField = (u16)pOp->p4.i; - if( pOp->opcodeopcode==OP_IdxLE || pOp->opcode==OP_IdxGT ); - r.default_rc = -1; - }else{ - assert( pOp->opcode==OP_IdxGE || pOp->opcode==OP_IdxLT ); - r.default_rc = 0; - } - r.aMem = &aMem[pOp->p3]; -#ifdef SQLITE_DEBUG - { int i; for(i=0; iopcode&1)==(OP_IdxLT&1) ){ - assert( pOp->opcode==OP_IdxLE || pOp->opcode==OP_IdxLT ); - res = -res; - }else{ - assert( pOp->opcode==OP_IdxGE || pOp->opcode==OP_IdxGT ); - res++; - } - VdbeBranchTaken(res>0,2); - if( res>0 ){ - pc = pOp->p2 - 1 ; - } - break; -} - -/* Opcode: Destroy P1 P2 P3 * * -** -** Delete an entire database table or index whose root page in the database -** file is given by P1. -** -** The table being destroyed is in the main database file if P3==0. If -** P3==1 then the table to be clear is in the auxiliary database file -** that is used to store tables create using CREATE TEMPORARY TABLE. -** -** If AUTOVACUUM is enabled then it is possible that another root page -** might be moved into the newly deleted root page in order to keep all -** root pages contiguous at the beginning of the database. The former -** value of the root page that moved - its value before the move occurred - -** is stored in register P2. If no page -** movement was required (because the table being dropped was already -** the last one in the database) then a zero is stored in register P2. -** If AUTOVACUUM is disabled then a zero is stored in register P2. -** -** See also: Clear -*/ -case OP_Destroy: { /* out2-prerelease */ - int iMoved; - int iCnt; - Vdbe *pVdbe; - int iDb; - - assert( p->readOnly==0 ); -#ifndef SQLITE_OMIT_VIRTUALTABLE - iCnt = 0; - for(pVdbe=db->pVdbe; pVdbe; pVdbe = pVdbe->pNext){ - if( pVdbe->magic==VDBE_MAGIC_RUN && pVdbe->bIsReader - && pVdbe->inVtabMethod<2 && pVdbe->pc>=0 - ){ - iCnt++; - } - } -#else - iCnt = db->nVdbeRead; -#endif - pOut->flags = MEM_Null; - if( iCnt>1 ){ - rc = SQLITE_LOCKED; - p->errorAction = OE_Abort; - }else{ - iDb = pOp->p3; - assert( iCnt==1 ); - assert( (p->btreeMask & (((yDbMask)1)<aDb[iDb].pBt, pOp->p1, &iMoved); - pOut->flags = MEM_Int; - pOut->u.i = iMoved; -#ifndef SQLITE_OMIT_AUTOVACUUM - if( rc==SQLITE_OK && iMoved!=0 ){ - sqlite3RootPageMoved(db, iDb, iMoved, pOp->p1); - /* All OP_Destroy operations occur on the same btree */ - assert( resetSchemaOnFault==0 || resetSchemaOnFault==iDb+1 ); - resetSchemaOnFault = iDb+1; - } -#endif - } - break; -} - -/* Opcode: Clear P1 P2 P3 -** -** Delete all contents of the database table or index whose root page -** in the database file is given by P1. But, unlike Destroy, do not -** remove the table or index from the database file. -** -** The table being clear is in the main database file if P2==0. If -** P2==1 then the table to be clear is in the auxiliary database file -** that is used to store tables create using CREATE TEMPORARY TABLE. -** -** If the P3 value is non-zero, then the table referred to must be an -** intkey table (an SQL table, not an index). In this case the row change -** count is incremented by the number of rows in the table being cleared. -** If P3 is greater than zero, then the value stored in register P3 is -** also incremented by the number of rows in the table being cleared. -** -** See also: Destroy -*/ -case OP_Clear: { - int nChange; - - nChange = 0; - assert( p->readOnly==0 ); - assert( (p->btreeMask & (((yDbMask)1)<p2))!=0 ); - rc = sqlite3BtreeClearTable( - db->aDb[pOp->p2].pBt, pOp->p1, (pOp->p3 ? &nChange : 0) - ); - if( pOp->p3 ){ - p->nChange += nChange; - if( pOp->p3>0 ){ - assert( memIsValid(&aMem[pOp->p3]) ); - memAboutToChange(p, &aMem[pOp->p3]); - aMem[pOp->p3].u.i += nChange; - } - } - break; -} - -/* Opcode: ResetSorter P1 * * * * -** -** Delete all contents from the ephemeral table or sorter -** that is open on cursor P1. -** -** This opcode only works for cursors used for sorting and -** opened with OP_OpenEphemeral or OP_SorterOpen. -*/ -case OP_ResetSorter: { - VdbeCursor *pC; - - assert( pOp->p1>=0 && pOp->p1nCursor ); - pC = p->apCsr[pOp->p1]; - assert( pC!=0 ); - if( pC->pSorter ){ - sqlite3VdbeSorterReset(db, pC->pSorter); - }else{ - assert( pC->isEphemeral ); - rc = sqlite3BtreeClearTableOfCursor(pC->pCursor); - } - break; -} - -/* Opcode: CreateTable P1 P2 * * * -** Synopsis: r[P2]=root iDb=P1 -** -** Allocate a new table in the main database file if P1==0 or in the -** auxiliary database file if P1==1 or in an attached database if -** P1>1. Write the root page number of the new table into -** register P2 -** -** The difference between a table and an index is this: A table must -** have a 4-byte integer key and can have arbitrary data. An index -** has an arbitrary key but no data. -** -** See also: CreateIndex -*/ -/* Opcode: CreateIndex P1 P2 * * * -** Synopsis: r[P2]=root iDb=P1 -** -** Allocate a new index in the main database file if P1==0 or in the -** auxiliary database file if P1==1 or in an attached database if -** P1>1. Write the root page number of the new table into -** register P2. -** -** See documentation on OP_CreateTable for additional information. -*/ -case OP_CreateIndex: /* out2-prerelease */ -case OP_CreateTable: { /* out2-prerelease */ - int pgno; - int flags; - Db *pDb; - - pgno = 0; - assert( pOp->p1>=0 && pOp->p1nDb ); - assert( (p->btreeMask & (((yDbMask)1)<p1))!=0 ); - assert( p->readOnly==0 ); - pDb = &db->aDb[pOp->p1]; - assert( pDb->pBt!=0 ); - if( pOp->opcode==OP_CreateTable ){ - /* flags = BTREE_INTKEY; */ - flags = BTREE_INTKEY; - }else{ - flags = BTREE_BLOBKEY; - } - rc = sqlite3BtreeCreateTable(pDb->pBt, &pgno, flags); - pOut->u.i = pgno; - break; -} - -/* Opcode: ParseSchema P1 * * P4 * -** -** Read and parse all entries from the SQLITE_MASTER table of database P1 -** that match the WHERE clause P4. -** -** This opcode invokes the parser to create a new virtual machine, -** then runs the new virtual machine. It is thus a re-entrant opcode. -*/ -case OP_ParseSchema: { - int iDb; - const char *zMaster; - char *zSql; - InitData initData; - - /* Any prepared statement that invokes this opcode will hold mutexes - ** on every btree. This is a prerequisite for invoking - ** sqlite3InitCallback(). - */ -#ifdef SQLITE_DEBUG - for(iDb=0; iDbnDb; iDb++){ - assert( iDb==1 || sqlite3BtreeHoldsMutex(db->aDb[iDb].pBt) ); - } -#endif - - iDb = pOp->p1; - assert( iDb>=0 && iDbnDb ); - assert( DbHasProperty(db, iDb, DB_SchemaLoaded) ); - /* Used to be a conditional */ { - zMaster = SCHEMA_TABLE(iDb); - initData.db = db; - initData.iDb = pOp->p1; - initData.pzErrMsg = &p->zErrMsg; - zSql = sqlite3MPrintf(db, - "SELECT name, rootpage, sql FROM '%q'.%s WHERE %s ORDER BY rowid", - db->aDb[iDb].zName, zMaster, pOp->p4.z); - if( zSql==0 ){ - rc = SQLITE_NOMEM; - }else{ - assert( db->init.busy==0 ); - db->init.busy = 1; - initData.rc = SQLITE_OK; - assert( !db->mallocFailed ); - rc = sqlite3_exec(db, zSql, sqlite3InitCallback, &initData, 0); - if( rc==SQLITE_OK ) rc = initData.rc; - sqlite3DbFree(db, zSql); - db->init.busy = 0; - } - } - if( rc ) sqlite3ResetAllSchemasOfConnection(db); - if( rc==SQLITE_NOMEM ){ - goto no_mem; - } - break; -} - -#if !defined(SQLITE_OMIT_ANALYZE) -/* Opcode: LoadAnalysis P1 * * * * -** -** Read the sqlite_stat1 table for database P1 and load the content -** of that table into the internal index hash table. This will cause -** the analysis to be used when preparing all subsequent queries. -*/ -case OP_LoadAnalysis: { - assert( pOp->p1>=0 && pOp->p1nDb ); - rc = sqlite3AnalysisLoad(db, pOp->p1); - break; -} -#endif /* !defined(SQLITE_OMIT_ANALYZE) */ - -/* Opcode: DropTable P1 * * P4 * -** -** Remove the internal (in-memory) data structures that describe -** the table named P4 in database P1. This is called after a table -** is dropped in order to keep the internal representation of the -** schema consistent with what is on disk. -*/ -case OP_DropTable: { - sqlite3UnlinkAndDeleteTable(db, pOp->p1, pOp->p4.z); - break; -} - -/* Opcode: DropIndex P1 * * P4 * -** -** Remove the internal (in-memory) data structures that describe -** the index named P4 in database P1. This is called after an index -** is dropped in order to keep the internal representation of the -** schema consistent with what is on disk. -*/ -case OP_DropIndex: { - sqlite3UnlinkAndDeleteIndex(db, pOp->p1, pOp->p4.z); - break; -} - -/* Opcode: DropTrigger P1 * * P4 * -** -** Remove the internal (in-memory) data structures that describe -** the trigger named P4 in database P1. This is called after a trigger -** is dropped in order to keep the internal representation of the -** schema consistent with what is on disk. -*/ -case OP_DropTrigger: { - sqlite3UnlinkAndDeleteTrigger(db, pOp->p1, pOp->p4.z); - break; -} - - -#ifndef SQLITE_OMIT_INTEGRITY_CHECK -/* Opcode: IntegrityCk P1 P2 P3 * P5 -** -** Do an analysis of the currently open database. Store in -** register P1 the text of an error message describing any problems. -** If no problems are found, store a NULL in register P1. -** -** The register P3 contains the maximum number of allowed errors. -** At most reg(P3) errors will be reported. -** In other words, the analysis stops as soon as reg(P1) errors are -** seen. Reg(P1) is updated with the number of errors remaining. -** -** The root page numbers of all tables in the database are integer -** stored in reg(P1), reg(P1+1), reg(P1+2), .... There are P2 tables -** total. -** -** If P5 is not zero, the check is done on the auxiliary database -** file, not the main database file. -** -** This opcode is used to implement the integrity_check pragma. -*/ -case OP_IntegrityCk: { - int nRoot; /* Number of tables to check. (Number of root pages.) */ - int *aRoot; /* Array of rootpage numbers for tables to be checked */ - int j; /* Loop counter */ - int nErr; /* Number of errors reported */ - char *z; /* Text of the error report */ - Mem *pnErr; /* Register keeping track of errors remaining */ - - assert( p->bIsReader ); - nRoot = pOp->p2; - assert( nRoot>0 ); - aRoot = sqlite3DbMallocRaw(db, sizeof(int)*(nRoot+1) ); - if( aRoot==0 ) goto no_mem; - assert( pOp->p3>0 && pOp->p3<=(p->nMem-p->nCursor) ); - pnErr = &aMem[pOp->p3]; - assert( (pnErr->flags & MEM_Int)!=0 ); - assert( (pnErr->flags & (MEM_Str|MEM_Blob))==0 ); - pIn1 = &aMem[pOp->p1]; - for(j=0; jp5nDb ); - assert( (p->btreeMask & (((yDbMask)1)<p5))!=0 ); - z = sqlite3BtreeIntegrityCheck(db->aDb[pOp->p5].pBt, aRoot, nRoot, - (int)pnErr->u.i, &nErr); - sqlite3DbFree(db, aRoot); - pnErr->u.i -= nErr; - sqlite3VdbeMemSetNull(pIn1); - if( nErr==0 ){ - assert( z==0 ); - }else if( z==0 ){ - goto no_mem; - }else{ - sqlite3VdbeMemSetStr(pIn1, z, -1, SQLITE_UTF8, sqlite3_free); - } - UPDATE_MAX_BLOBSIZE(pIn1); - sqlite3VdbeChangeEncoding(pIn1, encoding); - break; -} -#endif /* SQLITE_OMIT_INTEGRITY_CHECK */ - -/* Opcode: RowSetAdd P1 P2 * * * -** Synopsis: rowset(P1)=r[P2] -** -** Insert the integer value held by register P2 into a boolean index -** held in register P1. -** -** An assertion fails if P2 is not an integer. -*/ -case OP_RowSetAdd: { /* in1, in2 */ - pIn1 = &aMem[pOp->p1]; - pIn2 = &aMem[pOp->p2]; - assert( (pIn2->flags & MEM_Int)!=0 ); - if( (pIn1->flags & MEM_RowSet)==0 ){ - sqlite3VdbeMemSetRowSet(pIn1); - if( (pIn1->flags & MEM_RowSet)==0 ) goto no_mem; - } - sqlite3RowSetInsert(pIn1->u.pRowSet, pIn2->u.i); - break; -} - -/* Opcode: RowSetRead P1 P2 P3 * * -** Synopsis: r[P3]=rowset(P1) -** -** Extract the smallest value from boolean index P1 and put that value into -** register P3. Or, if boolean index P1 is initially empty, leave P3 -** unchanged and jump to instruction P2. -*/ -case OP_RowSetRead: { /* jump, in1, out3 */ - i64 val; - - pIn1 = &aMem[pOp->p1]; - if( (pIn1->flags & MEM_RowSet)==0 - || sqlite3RowSetNext(pIn1->u.pRowSet, &val)==0 - ){ - /* The boolean index is empty */ - sqlite3VdbeMemSetNull(pIn1); - pc = pOp->p2 - 1; - VdbeBranchTaken(1,2); - }else{ - /* A value was pulled from the index */ - sqlite3VdbeMemSetInt64(&aMem[pOp->p3], val); - VdbeBranchTaken(0,2); - } - goto check_for_interrupt; -} - -/* Opcode: RowSetTest P1 P2 P3 P4 -** Synopsis: if r[P3] in rowset(P1) goto P2 -** -** Register P3 is assumed to hold a 64-bit integer value. If register P1 -** contains a RowSet object and that RowSet object contains -** the value held in P3, jump to register P2. Otherwise, insert the -** integer in P3 into the RowSet and continue on to the -** next opcode. -** -** The RowSet object is optimized for the case where successive sets -** of integers, where each set contains no duplicates. Each set -** of values is identified by a unique P4 value. The first set -** must have P4==0, the final set P4=-1. P4 must be either -1 or -** non-negative. For non-negative values of P4 only the lower 4 -** bits are significant. -** -** This allows optimizations: (a) when P4==0 there is no need to test -** the rowset object for P3, as it is guaranteed not to contain it, -** (b) when P4==-1 there is no need to insert the value, as it will -** never be tested for, and (c) when a value that is part of set X is -** inserted, there is no need to search to see if the same value was -** previously inserted as part of set X (only if it was previously -** inserted as part of some other set). -*/ -case OP_RowSetTest: { /* jump, in1, in3 */ - int iSet; - int exists; - - pIn1 = &aMem[pOp->p1]; - pIn3 = &aMem[pOp->p3]; - iSet = pOp->p4.i; - assert( pIn3->flags&MEM_Int ); - - /* If there is anything other than a rowset object in memory cell P1, - ** delete it now and initialize P1 with an empty rowset - */ - if( (pIn1->flags & MEM_RowSet)==0 ){ - sqlite3VdbeMemSetRowSet(pIn1); - if( (pIn1->flags & MEM_RowSet)==0 ) goto no_mem; - } - - assert( pOp->p4type==P4_INT32 ); - assert( iSet==-1 || iSet>=0 ); - if( iSet ){ - exists = sqlite3RowSetTest(pIn1->u.pRowSet, iSet, pIn3->u.i); - VdbeBranchTaken(exists!=0,2); - if( exists ){ - pc = pOp->p2 - 1; - break; - } - } - if( iSet>=0 ){ - sqlite3RowSetInsert(pIn1->u.pRowSet, pIn3->u.i); - } - break; -} - - -#ifndef SQLITE_OMIT_TRIGGER - -/* Opcode: Program P1 P2 P3 P4 P5 -** -** Execute the trigger program passed as P4 (type P4_SUBPROGRAM). -** -** P1 contains the address of the memory cell that contains the first memory -** cell in an array of values used as arguments to the sub-program. P2 -** contains the address to jump to if the sub-program throws an IGNORE -** exception using the RAISE() function. Register P3 contains the address -** of a memory cell in this (the parent) VM that is used to allocate the -** memory required by the sub-vdbe at runtime. -** -** P4 is a pointer to the VM containing the trigger program. -** -** If P5 is non-zero, then recursive program invocation is enabled. -*/ -case OP_Program: { /* jump */ - int nMem; /* Number of memory registers for sub-program */ - int nByte; /* Bytes of runtime space required for sub-program */ - Mem *pRt; /* Register to allocate runtime space */ - Mem *pMem; /* Used to iterate through memory cells */ - Mem *pEnd; /* Last memory cell in new array */ - VdbeFrame *pFrame; /* New vdbe frame to execute in */ - SubProgram *pProgram; /* Sub-program to execute */ - void *t; /* Token identifying trigger */ - - pProgram = pOp->p4.pProgram; - pRt = &aMem[pOp->p3]; - assert( pProgram->nOp>0 ); - - /* If the p5 flag is clear, then recursive invocation of triggers is - ** disabled for backwards compatibility (p5 is set if this sub-program - ** is really a trigger, not a foreign key action, and the flag set - ** and cleared by the "PRAGMA recursive_triggers" command is clear). - ** - ** It is recursive invocation of triggers, at the SQL level, that is - ** disabled. In some cases a single trigger may generate more than one - ** SubProgram (if the trigger may be executed with more than one different - ** ON CONFLICT algorithm). SubProgram structures associated with a - ** single trigger all have the same value for the SubProgram.token - ** variable. */ - if( pOp->p5 ){ - t = pProgram->token; - for(pFrame=p->pFrame; pFrame && pFrame->token!=t; pFrame=pFrame->pParent); - if( pFrame ) break; - } - - if( p->nFrame>=db->aLimit[SQLITE_LIMIT_TRIGGER_DEPTH] ){ - rc = SQLITE_ERROR; - sqlite3SetString(&p->zErrMsg, db, "too many levels of trigger recursion"); - break; - } - - /* Register pRt is used to store the memory required to save the state - ** of the current program, and the memory required at runtime to execute - ** the trigger program. If this trigger has been fired before, then pRt - ** is already allocated. Otherwise, it must be initialized. */ - if( (pRt->flags&MEM_Frame)==0 ){ - /* SubProgram.nMem is set to the number of memory cells used by the - ** program stored in SubProgram.aOp. As well as these, one memory - ** cell is required for each cursor used by the program. Set local - ** variable nMem (and later, VdbeFrame.nChildMem) to this value. - */ - nMem = pProgram->nMem + pProgram->nCsr; - nByte = ROUND8(sizeof(VdbeFrame)) - + nMem * sizeof(Mem) - + pProgram->nCsr * sizeof(VdbeCursor *) - + pProgram->nOnce * sizeof(u8); - pFrame = sqlite3DbMallocZero(db, nByte); - if( !pFrame ){ - goto no_mem; - } - sqlite3VdbeMemRelease(pRt); - pRt->flags = MEM_Frame; - pRt->u.pFrame = pFrame; - - pFrame->v = p; - pFrame->nChildMem = nMem; - pFrame->nChildCsr = pProgram->nCsr; - pFrame->pc = pc; - pFrame->aMem = p->aMem; - pFrame->nMem = p->nMem; - pFrame->apCsr = p->apCsr; - pFrame->nCursor = p->nCursor; - pFrame->aOp = p->aOp; - pFrame->nOp = p->nOp; - pFrame->token = pProgram->token; - pFrame->aOnceFlag = p->aOnceFlag; - pFrame->nOnceFlag = p->nOnceFlag; - - pEnd = &VdbeFrameMem(pFrame)[pFrame->nChildMem]; - for(pMem=VdbeFrameMem(pFrame); pMem!=pEnd; pMem++){ - pMem->flags = MEM_Undefined; - pMem->db = db; - } - }else{ - pFrame = pRt->u.pFrame; - assert( pProgram->nMem+pProgram->nCsr==pFrame->nChildMem ); - assert( pProgram->nCsr==pFrame->nChildCsr ); - assert( pc==pFrame->pc ); - } - - p->nFrame++; - pFrame->pParent = p->pFrame; - pFrame->lastRowid = lastRowid; - pFrame->nChange = p->nChange; - p->nChange = 0; - p->pFrame = pFrame; - p->aMem = aMem = &VdbeFrameMem(pFrame)[-1]; - p->nMem = pFrame->nChildMem; - p->nCursor = (u16)pFrame->nChildCsr; - p->apCsr = (VdbeCursor **)&aMem[p->nMem+1]; - p->aOp = aOp = pProgram->aOp; - p->nOp = pProgram->nOp; - p->aOnceFlag = (u8 *)&p->apCsr[p->nCursor]; - p->nOnceFlag = pProgram->nOnce; - pc = -1; - memset(p->aOnceFlag, 0, p->nOnceFlag); - - break; -} - -/* Opcode: Param P1 P2 * * * -** -** This opcode is only ever present in sub-programs called via the -** OP_Program instruction. Copy a value currently stored in a memory -** cell of the calling (parent) frame to cell P2 in the current frames -** address space. This is used by trigger programs to access the new.* -** and old.* values. -** -** The address of the cell in the parent frame is determined by adding -** the value of the P1 argument to the value of the P1 argument to the -** calling OP_Program instruction. -*/ -case OP_Param: { /* out2-prerelease */ - VdbeFrame *pFrame; - Mem *pIn; - pFrame = p->pFrame; - pIn = &pFrame->aMem[pOp->p1 + pFrame->aOp[pFrame->pc].p1]; - sqlite3VdbeMemShallowCopy(pOut, pIn, MEM_Ephem); - break; -} - -#endif /* #ifndef SQLITE_OMIT_TRIGGER */ - -#ifndef SQLITE_OMIT_FOREIGN_KEY -/* Opcode: FkCounter P1 P2 * * * -** Synopsis: fkctr[P1]+=P2 -** -** Increment a "constraint counter" by P2 (P2 may be negative or positive). -** If P1 is non-zero, the database constraint counter is incremented -** (deferred foreign key constraints). Otherwise, if P1 is zero, the -** statement counter is incremented (immediate foreign key constraints). -*/ -case OP_FkCounter: { - if( db->flags & SQLITE_DeferFKs ){ - db->nDeferredImmCons += pOp->p2; - }else if( pOp->p1 ){ - db->nDeferredCons += pOp->p2; - }else{ - p->nFkConstraint += pOp->p2; - } - break; -} - -/* Opcode: FkIfZero P1 P2 * * * -** Synopsis: if fkctr[P1]==0 goto P2 -** -** This opcode tests if a foreign key constraint-counter is currently zero. -** If so, jump to instruction P2. Otherwise, fall through to the next -** instruction. -** -** If P1 is non-zero, then the jump is taken if the database constraint-counter -** is zero (the one that counts deferred constraint violations). If P1 is -** zero, the jump is taken if the statement constraint-counter is zero -** (immediate foreign key constraint violations). -*/ -case OP_FkIfZero: { /* jump */ - if( pOp->p1 ){ - VdbeBranchTaken(db->nDeferredCons==0 && db->nDeferredImmCons==0, 2); - if( db->nDeferredCons==0 && db->nDeferredImmCons==0 ) pc = pOp->p2-1; - }else{ - VdbeBranchTaken(p->nFkConstraint==0 && db->nDeferredImmCons==0, 2); - if( p->nFkConstraint==0 && db->nDeferredImmCons==0 ) pc = pOp->p2-1; - } - break; -} -#endif /* #ifndef SQLITE_OMIT_FOREIGN_KEY */ - -#ifndef SQLITE_OMIT_AUTOINCREMENT -/* Opcode: MemMax P1 P2 * * * -** Synopsis: r[P1]=max(r[P1],r[P2]) -** -** P1 is a register in the root frame of this VM (the root frame is -** different from the current frame if this instruction is being executed -** within a sub-program). Set the value of register P1 to the maximum of -** its current value and the value in register P2. -** -** This instruction throws an error if the memory cell is not initially -** an integer. -*/ -case OP_MemMax: { /* in2 */ - VdbeFrame *pFrame; - if( p->pFrame ){ - for(pFrame=p->pFrame; pFrame->pParent; pFrame=pFrame->pParent); - pIn1 = &pFrame->aMem[pOp->p1]; - }else{ - pIn1 = &aMem[pOp->p1]; - } - assert( memIsValid(pIn1) ); - sqlite3VdbeMemIntegerify(pIn1); - pIn2 = &aMem[pOp->p2]; - sqlite3VdbeMemIntegerify(pIn2); - if( pIn1->u.iu.i){ - pIn1->u.i = pIn2->u.i; - } - break; -} -#endif /* SQLITE_OMIT_AUTOINCREMENT */ - -/* Opcode: IfPos P1 P2 * * * -** Synopsis: if r[P1]>0 goto P2 -** -** If the value of register P1 is 1 or greater, jump to P2. -** -** It is illegal to use this instruction on a register that does -** not contain an integer. An assertion fault will result if you try. -*/ -case OP_IfPos: { /* jump, in1 */ - pIn1 = &aMem[pOp->p1]; - assert( pIn1->flags&MEM_Int ); - VdbeBranchTaken( pIn1->u.i>0, 2); - if( pIn1->u.i>0 ){ - pc = pOp->p2 - 1; - } - break; -} - -/* Opcode: IfNeg P1 P2 * * * -** Synopsis: if r[P1]<0 goto P2 -** -** If the value of register P1 is less than zero, jump to P2. -** -** It is illegal to use this instruction on a register that does -** not contain an integer. An assertion fault will result if you try. -*/ -case OP_IfNeg: { /* jump, in1 */ - pIn1 = &aMem[pOp->p1]; - assert( pIn1->flags&MEM_Int ); - VdbeBranchTaken(pIn1->u.i<0, 2); - if( pIn1->u.i<0 ){ - pc = pOp->p2 - 1; - } - break; -} - -/* Opcode: IfZero P1 P2 P3 * * -** Synopsis: r[P1]+=P3, if r[P1]==0 goto P2 -** -** The register P1 must contain an integer. Add literal P3 to the -** value in register P1. If the result is exactly 0, jump to P2. -** -** It is illegal to use this instruction on a register that does -** not contain an integer. An assertion fault will result if you try. -*/ -case OP_IfZero: { /* jump, in1 */ - pIn1 = &aMem[pOp->p1]; - assert( pIn1->flags&MEM_Int ); - pIn1->u.i += pOp->p3; - VdbeBranchTaken(pIn1->u.i==0, 2); - if( pIn1->u.i==0 ){ - pc = pOp->p2 - 1; - } - break; -} - -/* Opcode: AggStep * P2 P3 P4 P5 -** Synopsis: accum=r[P3] step(r[P2@P5]) -** -** Execute the step function for an aggregate. The -** function has P5 arguments. P4 is a pointer to the FuncDef -** structure that specifies the function. Use register -** P3 as the accumulator. -** -** The P5 arguments are taken from register P2 and its -** successors. -*/ -case OP_AggStep: { - int n; - int i; - Mem *pMem; - Mem *pRec; - sqlite3_context ctx; - sqlite3_value **apVal; - - n = pOp->p5; - assert( n>=0 ); - pRec = &aMem[pOp->p2]; - apVal = p->apArg; - assert( apVal || n==0 ); - for(i=0; ip4.pFunc; - assert( pOp->p3>0 && pOp->p3<=(p->nMem-p->nCursor) ); - ctx.pMem = pMem = &aMem[pOp->p3]; - pMem->n++; - ctx.s.flags = MEM_Null; - ctx.s.z = 0; - ctx.s.zMalloc = 0; - ctx.s.xDel = 0; - ctx.s.db = db; - ctx.isError = 0; - ctx.pColl = 0; - ctx.skipFlag = 0; - if( ctx.pFunc->funcFlags & SQLITE_FUNC_NEEDCOLL ){ - assert( pOp>p->aOp ); - assert( pOp[-1].p4type==P4_COLLSEQ ); - assert( pOp[-1].opcode==OP_CollSeq ); - ctx.pColl = pOp[-1].p4.pColl; - } - (ctx.pFunc->xStep)(&ctx, n, apVal); /* IMP: R-24505-23230 */ - if( ctx.isError ){ - sqlite3SetString(&p->zErrMsg, db, "%s", sqlite3_value_text(&ctx.s)); - rc = ctx.isError; - } - if( ctx.skipFlag ){ - assert( pOp[-1].opcode==OP_CollSeq ); - i = pOp[-1].p1; - if( i ) sqlite3VdbeMemSetInt64(&aMem[i], 1); - } - - sqlite3VdbeMemRelease(&ctx.s); - - break; -} - -/* Opcode: AggFinal P1 P2 * P4 * -** Synopsis: accum=r[P1] N=P2 -** -** Execute the finalizer function for an aggregate. P1 is -** the memory location that is the accumulator for the aggregate. -** -** P2 is the number of arguments that the step function takes and -** P4 is a pointer to the FuncDef for this function. The P2 -** argument is not used by this opcode. It is only there to disambiguate -** functions that can take varying numbers of arguments. The -** P4 argument is only needed for the degenerate case where -** the step function was not previously called. -*/ -case OP_AggFinal: { - Mem *pMem; - assert( pOp->p1>0 && pOp->p1<=(p->nMem-p->nCursor) ); - pMem = &aMem[pOp->p1]; - assert( (pMem->flags & ~(MEM_Null|MEM_Agg))==0 ); - rc = sqlite3VdbeMemFinalize(pMem, pOp->p4.pFunc); - if( rc ){ - sqlite3SetString(&p->zErrMsg, db, "%s", sqlite3_value_text(pMem)); - } - sqlite3VdbeChangeEncoding(pMem, encoding); - UPDATE_MAX_BLOBSIZE(pMem); - if( sqlite3VdbeMemTooBig(pMem) ){ - goto too_big; - } - break; -} - -#ifndef SQLITE_OMIT_WAL -/* Opcode: Checkpoint P1 P2 P3 * * -** -** Checkpoint database P1. This is a no-op if P1 is not currently in -** WAL mode. Parameter P2 is one of SQLITE_CHECKPOINT_PASSIVE, FULL -** or RESTART. Write 1 or 0 into mem[P3] if the checkpoint returns -** SQLITE_BUSY or not, respectively. Write the number of pages in the -** WAL after the checkpoint into mem[P3+1] and the number of pages -** in the WAL that have been checkpointed after the checkpoint -** completes into mem[P3+2]. However on an error, mem[P3+1] and -** mem[P3+2] are initialized to -1. -*/ -case OP_Checkpoint: { - int i; /* Loop counter */ - int aRes[3]; /* Results */ - Mem *pMem; /* Write results here */ - - assert( p->readOnly==0 ); - aRes[0] = 0; - aRes[1] = aRes[2] = -1; - assert( pOp->p2==SQLITE_CHECKPOINT_PASSIVE - || pOp->p2==SQLITE_CHECKPOINT_FULL - || pOp->p2==SQLITE_CHECKPOINT_RESTART - ); - rc = sqlite3Checkpoint(db, pOp->p1, pOp->p2, &aRes[1], &aRes[2]); - if( rc==SQLITE_BUSY ){ - rc = SQLITE_OK; - aRes[0] = 1; - } - for(i=0, pMem = &aMem[pOp->p3]; i<3; i++, pMem++){ - sqlite3VdbeMemSetInt64(pMem, (i64)aRes[i]); - } - break; -}; -#endif - -#ifndef SQLITE_OMIT_PRAGMA -/* Opcode: JournalMode P1 P2 P3 * * -** -** Change the journal mode of database P1 to P3. P3 must be one of the -** PAGER_JOURNALMODE_XXX values. If changing between the various rollback -** modes (delete, truncate, persist, off and memory), this is a simple -** operation. No IO is required. -** -** If changing into or out of WAL mode the procedure is more complicated. -** -** Write a string containing the final journal-mode to register P2. -*/ -case OP_JournalMode: { /* out2-prerelease */ - Btree *pBt; /* Btree to change journal mode of */ - Pager *pPager; /* Pager associated with pBt */ - int eNew; /* New journal mode */ - int eOld; /* The old journal mode */ -#ifndef SQLITE_OMIT_WAL - const char *zFilename; /* Name of database file for pPager */ -#endif - - eNew = pOp->p3; - assert( eNew==PAGER_JOURNALMODE_DELETE - || eNew==PAGER_JOURNALMODE_TRUNCATE - || eNew==PAGER_JOURNALMODE_PERSIST - || eNew==PAGER_JOURNALMODE_OFF - || eNew==PAGER_JOURNALMODE_MEMORY - || eNew==PAGER_JOURNALMODE_WAL - || eNew==PAGER_JOURNALMODE_QUERY - ); - assert( pOp->p1>=0 && pOp->p1nDb ); - assert( p->readOnly==0 ); - - pBt = db->aDb[pOp->p1].pBt; - pPager = sqlite3BtreePager(pBt); - eOld = sqlite3PagerGetJournalMode(pPager); - if( eNew==PAGER_JOURNALMODE_QUERY ) eNew = eOld; - if( !sqlite3PagerOkToChangeJournalMode(pPager) ) eNew = eOld; - -#ifndef SQLITE_OMIT_WAL - zFilename = sqlite3PagerFilename(pPager, 1); - - /* Do not allow a transition to journal_mode=WAL for a database - ** in temporary storage or if the VFS does not support shared memory - */ - if( eNew==PAGER_JOURNALMODE_WAL - && (sqlite3Strlen30(zFilename)==0 /* Temp file */ - || !sqlite3PagerWalSupported(pPager)) /* No shared-memory support */ - ){ - eNew = eOld; - } - - if( (eNew!=eOld) - && (eOld==PAGER_JOURNALMODE_WAL || eNew==PAGER_JOURNALMODE_WAL) - ){ - if( !db->autoCommit || db->nVdbeRead>1 ){ - rc = SQLITE_ERROR; - sqlite3SetString(&p->zErrMsg, db, - "cannot change %s wal mode from within a transaction", - (eNew==PAGER_JOURNALMODE_WAL ? "into" : "out of") - ); - break; - }else{ - - if( eOld==PAGER_JOURNALMODE_WAL ){ - /* If leaving WAL mode, close the log file. If successful, the call - ** to PagerCloseWal() checkpoints and deletes the write-ahead-log - ** file. An EXCLUSIVE lock may still be held on the database file - ** after a successful return. - */ - rc = sqlite3PagerCloseWal(pPager); - if( rc==SQLITE_OK ){ - sqlite3PagerSetJournalMode(pPager, eNew); - } - }else if( eOld==PAGER_JOURNALMODE_MEMORY ){ - /* Cannot transition directly from MEMORY to WAL. Use mode OFF - ** as an intermediate */ - sqlite3PagerSetJournalMode(pPager, PAGER_JOURNALMODE_OFF); - } - - /* Open a transaction on the database file. Regardless of the journal - ** mode, this transaction always uses a rollback journal. - */ - assert( sqlite3BtreeIsInTrans(pBt)==0 ); - if( rc==SQLITE_OK ){ - rc = sqlite3BtreeSetVersion(pBt, (eNew==PAGER_JOURNALMODE_WAL ? 2 : 1)); - } - } - } -#endif /* ifndef SQLITE_OMIT_WAL */ - - if( rc ){ - eNew = eOld; - } - eNew = sqlite3PagerSetJournalMode(pPager, eNew); - - pOut = &aMem[pOp->p2]; - pOut->flags = MEM_Str|MEM_Static|MEM_Term; - pOut->z = (char *)sqlite3JournalModename(eNew); - pOut->n = sqlite3Strlen30(pOut->z); - pOut->enc = SQLITE_UTF8; - sqlite3VdbeChangeEncoding(pOut, encoding); - break; -}; -#endif /* SQLITE_OMIT_PRAGMA */ - -#if !defined(SQLITE_OMIT_VACUUM) && !defined(SQLITE_OMIT_ATTACH) -/* Opcode: Vacuum * * * * * -** -** Vacuum the entire database. This opcode will cause other virtual -** machines to be created and run. It may not be called from within -** a transaction. -*/ -case OP_Vacuum: { - assert( p->readOnly==0 ); - rc = sqlite3RunVacuum(&p->zErrMsg, db); - break; -} -#endif - -#if !defined(SQLITE_OMIT_AUTOVACUUM) -/* Opcode: IncrVacuum P1 P2 * * * -** -** Perform a single step of the incremental vacuum procedure on -** the P1 database. If the vacuum has finished, jump to instruction -** P2. Otherwise, fall through to the next instruction. -*/ -case OP_IncrVacuum: { /* jump */ - Btree *pBt; - - assert( pOp->p1>=0 && pOp->p1nDb ); - assert( (p->btreeMask & (((yDbMask)1)<p1))!=0 ); - assert( p->readOnly==0 ); - pBt = db->aDb[pOp->p1].pBt; - rc = sqlite3BtreeIncrVacuum(pBt); - VdbeBranchTaken(rc==SQLITE_DONE,2); - if( rc==SQLITE_DONE ){ - pc = pOp->p2 - 1; - rc = SQLITE_OK; - } - break; -} -#endif - -/* Opcode: Expire P1 * * * * -** -** Cause precompiled statements to become expired. An expired statement -** fails with an error code of SQLITE_SCHEMA if it is ever executed -** (via sqlite3_step()). -** -** If P1 is 0, then all SQL statements become expired. If P1 is non-zero, -** then only the currently executing statement is affected. -*/ -case OP_Expire: { - if( !pOp->p1 ){ - sqlite3ExpirePreparedStatements(db); - }else{ - p->expired = 1; - } - break; -} - -#ifndef SQLITE_OMIT_SHARED_CACHE -/* Opcode: TableLock P1 P2 P3 P4 * -** Synopsis: iDb=P1 root=P2 write=P3 -** -** Obtain a lock on a particular table. This instruction is only used when -** the shared-cache feature is enabled. -** -** P1 is the index of the database in sqlite3.aDb[] of the database -** on which the lock is acquired. A readlock is obtained if P3==0 or -** a write lock if P3==1. -** -** P2 contains the root-page of the table to lock. -** -** P4 contains a pointer to the name of the table being locked. This is only -** used to generate an error message if the lock cannot be obtained. -*/ -case OP_TableLock: { - u8 isWriteLock = (u8)pOp->p3; - if( isWriteLock || 0==(db->flags&SQLITE_ReadUncommitted) ){ - int p1 = pOp->p1; - assert( p1>=0 && p1nDb ); - assert( (p->btreeMask & (((yDbMask)1)<aDb[p1].pBt, pOp->p2, isWriteLock); - if( (rc&0xFF)==SQLITE_LOCKED ){ - const char *z = pOp->p4.z; - sqlite3SetString(&p->zErrMsg, db, "database table is locked: %s", z); - } - } - break; -} -#endif /* SQLITE_OMIT_SHARED_CACHE */ - -#ifndef SQLITE_OMIT_VIRTUALTABLE -/* Opcode: VBegin * * * P4 * -** -** P4 may be a pointer to an sqlite3_vtab structure. If so, call the -** xBegin method for that table. -** -** Also, whether or not P4 is set, check that this is not being called from -** within a callback to a virtual table xSync() method. If it is, the error -** code will be set to SQLITE_LOCKED. -*/ -case OP_VBegin: { - VTable *pVTab; - pVTab = pOp->p4.pVtab; - rc = sqlite3VtabBegin(db, pVTab); - if( pVTab ) sqlite3VtabImportErrmsg(p, pVTab->pVtab); - break; -} -#endif /* SQLITE_OMIT_VIRTUALTABLE */ - -#ifndef SQLITE_OMIT_VIRTUALTABLE -/* Opcode: VCreate P1 * * P4 * -** -** P4 is the name of a virtual table in database P1. Call the xCreate method -** for that table. -*/ -case OP_VCreate: { - rc = sqlite3VtabCallCreate(db, pOp->p1, pOp->p4.z, &p->zErrMsg); - break; -} -#endif /* SQLITE_OMIT_VIRTUALTABLE */ - -#ifndef SQLITE_OMIT_VIRTUALTABLE -/* Opcode: VDestroy P1 * * P4 * -** -** P4 is the name of a virtual table in database P1. Call the xDestroy method -** of that table. -*/ -case OP_VDestroy: { - p->inVtabMethod = 2; - rc = sqlite3VtabCallDestroy(db, pOp->p1, pOp->p4.z); - p->inVtabMethod = 0; - break; -} -#endif /* SQLITE_OMIT_VIRTUALTABLE */ - -#ifndef SQLITE_OMIT_VIRTUALTABLE -/* Opcode: VOpen P1 * * P4 * -** -** P4 is a pointer to a virtual table object, an sqlite3_vtab structure. -** P1 is a cursor number. This opcode opens a cursor to the virtual -** table and stores that cursor in P1. -*/ -case OP_VOpen: { - VdbeCursor *pCur; - sqlite3_vtab_cursor *pVtabCursor; - sqlite3_vtab *pVtab; - sqlite3_module *pModule; - - assert( p->bIsReader ); - pCur = 0; - pVtabCursor = 0; - pVtab = pOp->p4.pVtab->pVtab; - pModule = (sqlite3_module *)pVtab->pModule; - assert(pVtab && pModule); - rc = pModule->xOpen(pVtab, &pVtabCursor); - sqlite3VtabImportErrmsg(p, pVtab); - if( SQLITE_OK==rc ){ - /* Initialize sqlite3_vtab_cursor base class */ - pVtabCursor->pVtab = pVtab; - - /* Initialize vdbe cursor object */ - pCur = allocateCursor(p, pOp->p1, 0, -1, 0); - if( pCur ){ - pCur->pVtabCursor = pVtabCursor; - }else{ - db->mallocFailed = 1; - pModule->xClose(pVtabCursor); - } - } - break; -} -#endif /* SQLITE_OMIT_VIRTUALTABLE */ - -#ifndef SQLITE_OMIT_VIRTUALTABLE -/* Opcode: VFilter P1 P2 P3 P4 * -** Synopsis: iplan=r[P3] zplan='P4' -** -** P1 is a cursor opened using VOpen. P2 is an address to jump to if -** the filtered result set is empty. -** -** P4 is either NULL or a string that was generated by the xBestIndex -** method of the module. The interpretation of the P4 string is left -** to the module implementation. -** -** This opcode invokes the xFilter method on the virtual table specified -** by P1. The integer query plan parameter to xFilter is stored in register -** P3. Register P3+1 stores the argc parameter to be passed to the -** xFilter method. Registers P3+2..P3+1+argc are the argc -** additional parameters which are passed to -** xFilter as argv. Register P3+2 becomes argv[0] when passed to xFilter. -** -** A jump is made to P2 if the result set after filtering would be empty. -*/ -case OP_VFilter: { /* jump */ - int nArg; - int iQuery; - const sqlite3_module *pModule; - Mem *pQuery; - Mem *pArgc; - sqlite3_vtab_cursor *pVtabCursor; - sqlite3_vtab *pVtab; - VdbeCursor *pCur; - int res; - int i; - Mem **apArg; - - pQuery = &aMem[pOp->p3]; - pArgc = &pQuery[1]; - pCur = p->apCsr[pOp->p1]; - assert( memIsValid(pQuery) ); - REGISTER_TRACE(pOp->p3, pQuery); - assert( pCur->pVtabCursor ); - pVtabCursor = pCur->pVtabCursor; - pVtab = pVtabCursor->pVtab; - pModule = pVtab->pModule; - - /* Grab the index number and argc parameters */ - assert( (pQuery->flags&MEM_Int)!=0 && pArgc->flags==MEM_Int ); - nArg = (int)pArgc->u.i; - iQuery = (int)pQuery->u.i; - - /* Invoke the xFilter method */ - { - res = 0; - apArg = p->apArg; - for(i = 0; iinVtabMethod = 1; - rc = pModule->xFilter(pVtabCursor, iQuery, pOp->p4.z, nArg, apArg); - p->inVtabMethod = 0; - sqlite3VtabImportErrmsg(p, pVtab); - if( rc==SQLITE_OK ){ - res = pModule->xEof(pVtabCursor); - } - VdbeBranchTaken(res!=0,2); - if( res ){ - pc = pOp->p2 - 1; - } - } - pCur->nullRow = 0; - - break; -} -#endif /* SQLITE_OMIT_VIRTUALTABLE */ - -#ifndef SQLITE_OMIT_VIRTUALTABLE -/* Opcode: VColumn P1 P2 P3 * * -** Synopsis: r[P3]=vcolumn(P2) -** -** Store the value of the P2-th column of -** the row of the virtual-table that the -** P1 cursor is pointing to into register P3. -*/ -case OP_VColumn: { - sqlite3_vtab *pVtab; - const sqlite3_module *pModule; - Mem *pDest; - sqlite3_context sContext; - - VdbeCursor *pCur = p->apCsr[pOp->p1]; - assert( pCur->pVtabCursor ); - assert( pOp->p3>0 && pOp->p3<=(p->nMem-p->nCursor) ); - pDest = &aMem[pOp->p3]; - memAboutToChange(p, pDest); - if( pCur->nullRow ){ - sqlite3VdbeMemSetNull(pDest); - break; - } - pVtab = pCur->pVtabCursor->pVtab; - pModule = pVtab->pModule; - assert( pModule->xColumn ); - memset(&sContext, 0, sizeof(sContext)); - - /* The output cell may already have a buffer allocated. Move - ** the current contents to sContext.s so in case the user-function - ** can use the already allocated buffer instead of allocating a - ** new one. - */ - sqlite3VdbeMemMove(&sContext.s, pDest); - MemSetTypeFlag(&sContext.s, MEM_Null); - - rc = pModule->xColumn(pCur->pVtabCursor, &sContext, pOp->p2); - sqlite3VtabImportErrmsg(p, pVtab); - if( sContext.isError ){ - rc = sContext.isError; - } - - /* Copy the result of the function to the P3 register. We - ** do this regardless of whether or not an error occurred to ensure any - ** dynamic allocation in sContext.s (a Mem struct) is released. - */ - sqlite3VdbeChangeEncoding(&sContext.s, encoding); - sqlite3VdbeMemMove(pDest, &sContext.s); - REGISTER_TRACE(pOp->p3, pDest); - UPDATE_MAX_BLOBSIZE(pDest); - - if( sqlite3VdbeMemTooBig(pDest) ){ - goto too_big; - } - break; -} -#endif /* SQLITE_OMIT_VIRTUALTABLE */ - -#ifndef SQLITE_OMIT_VIRTUALTABLE -/* Opcode: VNext P1 P2 * * * -** -** Advance virtual table P1 to the next row in its result set and -** jump to instruction P2. Or, if the virtual table has reached -** the end of its result set, then fall through to the next instruction. -*/ -case OP_VNext: { /* jump */ - sqlite3_vtab *pVtab; - const sqlite3_module *pModule; - int res; - VdbeCursor *pCur; - - res = 0; - pCur = p->apCsr[pOp->p1]; - assert( pCur->pVtabCursor ); - if( pCur->nullRow ){ - break; - } - pVtab = pCur->pVtabCursor->pVtab; - pModule = pVtab->pModule; - assert( pModule->xNext ); - - /* Invoke the xNext() method of the module. There is no way for the - ** underlying implementation to return an error if one occurs during - ** xNext(). Instead, if an error occurs, true is returned (indicating that - ** data is available) and the error code returned when xColumn or - ** some other method is next invoked on the save virtual table cursor. - */ - p->inVtabMethod = 1; - rc = pModule->xNext(pCur->pVtabCursor); - p->inVtabMethod = 0; - sqlite3VtabImportErrmsg(p, pVtab); - if( rc==SQLITE_OK ){ - res = pModule->xEof(pCur->pVtabCursor); - } - VdbeBranchTaken(!res,2); - if( !res ){ - /* If there is data, jump to P2 */ - pc = pOp->p2 - 1; - } - goto check_for_interrupt; -} -#endif /* SQLITE_OMIT_VIRTUALTABLE */ - -#ifndef SQLITE_OMIT_VIRTUALTABLE -/* Opcode: VRename P1 * * P4 * -** -** P4 is a pointer to a virtual table object, an sqlite3_vtab structure. -** This opcode invokes the corresponding xRename method. The value -** in register P1 is passed as the zName argument to the xRename method. -*/ -case OP_VRename: { - sqlite3_vtab *pVtab; - Mem *pName; - - pVtab = pOp->p4.pVtab->pVtab; - pName = &aMem[pOp->p1]; - assert( pVtab->pModule->xRename ); - assert( memIsValid(pName) ); - assert( p->readOnly==0 ); - REGISTER_TRACE(pOp->p1, pName); - assert( pName->flags & MEM_Str ); - testcase( pName->enc==SQLITE_UTF8 ); - testcase( pName->enc==SQLITE_UTF16BE ); - testcase( pName->enc==SQLITE_UTF16LE ); - rc = sqlite3VdbeChangeEncoding(pName, SQLITE_UTF8); - if( rc==SQLITE_OK ){ - rc = pVtab->pModule->xRename(pVtab, pName->z); - sqlite3VtabImportErrmsg(p, pVtab); - p->expired = 0; - } - break; -} -#endif - -#ifndef SQLITE_OMIT_VIRTUALTABLE -/* Opcode: VUpdate P1 P2 P3 P4 P5 -** Synopsis: data=r[P3@P2] -** -** P4 is a pointer to a virtual table object, an sqlite3_vtab structure. -** This opcode invokes the corresponding xUpdate method. P2 values -** are contiguous memory cells starting at P3 to pass to the xUpdate -** invocation. The value in register (P3+P2-1) corresponds to the -** p2th element of the argv array passed to xUpdate. -** -** The xUpdate method will do a DELETE or an INSERT or both. -** The argv[0] element (which corresponds to memory cell P3) -** is the rowid of a row to delete. If argv[0] is NULL then no -** deletion occurs. The argv[1] element is the rowid of the new -** row. This can be NULL to have the virtual table select the new -** rowid for itself. The subsequent elements in the array are -** the values of columns in the new row. -** -** If P2==1 then no insert is performed. argv[0] is the rowid of -** a row to delete. -** -** P1 is a boolean flag. If it is set to true and the xUpdate call -** is successful, then the value returned by sqlite3_last_insert_rowid() -** is set to the value of the rowid for the row just inserted. -** -** P5 is the error actions (OE_Replace, OE_Fail, OE_Ignore, etc) to -** apply in the case of a constraint failure on an insert or update. -*/ -case OP_VUpdate: { - sqlite3_vtab *pVtab; - sqlite3_module *pModule; - int nArg; - int i; - sqlite_int64 rowid; - Mem **apArg; - Mem *pX; - - assert( pOp->p2==1 || pOp->p5==OE_Fail || pOp->p5==OE_Rollback - || pOp->p5==OE_Abort || pOp->p5==OE_Ignore || pOp->p5==OE_Replace - ); - assert( p->readOnly==0 ); - pVtab = pOp->p4.pVtab->pVtab; - pModule = (sqlite3_module *)pVtab->pModule; - nArg = pOp->p2; - assert( pOp->p4type==P4_VTAB ); - if( ALWAYS(pModule->xUpdate) ){ - u8 vtabOnConflict = db->vtabOnConflict; - apArg = p->apArg; - pX = &aMem[pOp->p3]; - for(i=0; ivtabOnConflict = pOp->p5; - rc = pModule->xUpdate(pVtab, nArg, apArg, &rowid); - db->vtabOnConflict = vtabOnConflict; - sqlite3VtabImportErrmsg(p, pVtab); - if( rc==SQLITE_OK && pOp->p1 ){ - assert( nArg>1 && apArg[0] && (apArg[0]->flags&MEM_Null) ); - db->lastRowid = lastRowid = rowid; - } - if( (rc&0xff)==SQLITE_CONSTRAINT && pOp->p4.pVtab->bConstraint ){ - if( pOp->p5==OE_Ignore ){ - rc = SQLITE_OK; - }else{ - p->errorAction = ((pOp->p5==OE_Replace) ? OE_Abort : pOp->p5); - } - }else{ - p->nChange++; - } - } - break; -} -#endif /* SQLITE_OMIT_VIRTUALTABLE */ - -#ifndef SQLITE_OMIT_PAGER_PRAGMAS -/* Opcode: Pagecount P1 P2 * * * -** -** Write the current number of pages in database P1 to memory cell P2. -*/ -case OP_Pagecount: { /* out2-prerelease */ - pOut->u.i = sqlite3BtreeLastPage(db->aDb[pOp->p1].pBt); - break; -} -#endif - - -#ifndef SQLITE_OMIT_PAGER_PRAGMAS -/* Opcode: MaxPgcnt P1 P2 P3 * * -** -** Try to set the maximum page count for database P1 to the value in P3. -** Do not let the maximum page count fall below the current page count and -** do not change the maximum page count value if P3==0. -** -** Store the maximum page count after the change in register P2. -*/ -case OP_MaxPgcnt: { /* out2-prerelease */ - unsigned int newMax; - Btree *pBt; - - pBt = db->aDb[pOp->p1].pBt; - newMax = 0; - if( pOp->p3 ){ - newMax = sqlite3BtreeLastPage(pBt); - if( newMax < (unsigned)pOp->p3 ) newMax = (unsigned)pOp->p3; - } - pOut->u.i = sqlite3BtreeMaxPageCount(pBt, newMax); - break; -} -#endif - - -/* Opcode: Init * P2 * P4 * -** Synopsis: Start at P2 -** -** Programs contain a single instance of this opcode as the very first -** opcode. -** -** If tracing is enabled (by the sqlite3_trace()) interface, then -** the UTF-8 string contained in P4 is emitted on the trace callback. -** Or if P4 is blank, use the string returned by sqlite3_sql(). -** -** If P2 is not zero, jump to instruction P2. -*/ -case OP_Init: { /* jump */ - char *zTrace; - char *z; - - if( pOp->p2 ){ - pc = pOp->p2 - 1; - } -#ifndef SQLITE_OMIT_TRACE - if( db->xTrace - && !p->doingRerun - && (zTrace = (pOp->p4.z ? pOp->p4.z : p->zSql))!=0 - ){ - z = sqlite3VdbeExpandSql(p, zTrace); - db->xTrace(db->pTraceArg, z); - sqlite3DbFree(db, z); - } -#ifdef SQLITE_USE_FCNTL_TRACE - zTrace = (pOp->p4.z ? pOp->p4.z : p->zSql); - if( zTrace ){ - int i; - for(i=0; inDb; i++){ - if( (MASKBIT(i) & p->btreeMask)==0 ) continue; - sqlite3_file_control(db, db->aDb[i].zName, SQLITE_FCNTL_TRACE, zTrace); - } - } -#endif /* SQLITE_USE_FCNTL_TRACE */ -#ifdef SQLITE_DEBUG - if( (db->flags & SQLITE_SqlTrace)!=0 - && (zTrace = (pOp->p4.z ? pOp->p4.z : p->zSql))!=0 - ){ - sqlite3DebugPrintf("SQL-trace: %s\n", zTrace); - } -#endif /* SQLITE_DEBUG */ -#endif /* SQLITE_OMIT_TRACE */ - break; -} - - -/* Opcode: Noop * * * * * -** -** Do nothing. This instruction is often useful as a jump -** destination. -*/ -/* -** The magic Explain opcode are only inserted when explain==2 (which -** is to say when the EXPLAIN QUERY PLAN syntax is used.) -** This opcode records information from the optimizer. It is the -** the same as a no-op. This opcodesnever appears in a real VM program. -*/ -default: { /* This is really OP_Noop and OP_Explain */ - assert( pOp->opcode==OP_Noop || pOp->opcode==OP_Explain ); - break; -} - -/***************************************************************************** -** The cases of the switch statement above this line should all be indented -** by 6 spaces. But the left-most 6 spaces have been removed to improve the -** readability. From this point on down, the normal indentation rules are -** restored. -*****************************************************************************/ - } - -#ifdef VDBE_PROFILE - { - u64 endTime = sqlite3Hwtime(); - if( endTime>start ) pOp->cycles += endTime - start; - pOp->cnt++; - } -#endif - - /* The following code adds nothing to the actual functionality - ** of the program. It is only here for testing and debugging. - ** On the other hand, it does burn CPU cycles every time through - ** the evaluator loop. So we can leave it out when NDEBUG is defined. - */ -#ifndef NDEBUG - assert( pc>=-1 && pcnOp ); - -#ifdef SQLITE_DEBUG - if( db->flags & SQLITE_VdbeTrace ){ - if( rc!=0 ) printf("rc=%d\n",rc); - if( pOp->opflags & (OPFLG_OUT2_PRERELEASE|OPFLG_OUT2) ){ - registerTrace(pOp->p2, &aMem[pOp->p2]); - } - if( pOp->opflags & OPFLG_OUT3 ){ - registerTrace(pOp->p3, &aMem[pOp->p3]); - } - } -#endif /* SQLITE_DEBUG */ -#endif /* NDEBUG */ - } /* The end of the for(;;) loop the loops through opcodes */ - - /* If we reach this point, it means that execution is finished with - ** an error of some kind. - */ -vdbe_error_halt: - assert( rc ); - p->rc = rc; - testcase( sqlite3GlobalConfig.xLog!=0 ); - sqlite3_log(rc, "statement aborts at %d: [%s] %s", - pc, p->zSql, p->zErrMsg); - sqlite3VdbeHalt(p); - if( rc==SQLITE_IOERR_NOMEM ) db->mallocFailed = 1; - rc = SQLITE_ERROR; - if( resetSchemaOnFault>0 ){ - sqlite3ResetOneSchema(db, resetSchemaOnFault-1); - } - - /* This is the only way out of this procedure. We have to - ** release the mutexes on btrees that were acquired at the - ** top. */ -vdbe_return: - db->lastRowid = lastRowid; - testcase( nVmStep>0 ); - p->aCounter[SQLITE_STMTSTATUS_VM_STEP] += (int)nVmStep; - sqlite3VdbeLeave(p); - return rc; - - /* Jump to here if a string or blob larger than SQLITE_MAX_LENGTH - ** is encountered. - */ -too_big: - sqlite3SetString(&p->zErrMsg, db, "string or blob too big"); - rc = SQLITE_TOOBIG; - goto vdbe_error_halt; - - /* Jump to here if a malloc() fails. - */ -no_mem: - db->mallocFailed = 1; - sqlite3SetString(&p->zErrMsg, db, "out of memory"); - rc = SQLITE_NOMEM; - goto vdbe_error_halt; - - /* Jump to here for any other kind of fatal error. The "rc" variable - ** should hold the error number. - */ -abort_due_to_error: - assert( p->zErrMsg==0 ); - if( db->mallocFailed ) rc = SQLITE_NOMEM; - if( rc!=SQLITE_IOERR_NOMEM ){ - sqlite3SetString(&p->zErrMsg, db, "%s", sqlite3ErrStr(rc)); - } - goto vdbe_error_halt; - - /* Jump to here if the sqlite3_interrupt() API sets the interrupt - ** flag. - */ -abort_due_to_interrupt: - assert( db->u1.isInterrupted ); - rc = SQLITE_INTERRUPT; - p->rc = rc; - sqlite3SetString(&p->zErrMsg, db, "%s", sqlite3ErrStr(rc)); - goto vdbe_error_halt; -} - - -/************** End of vdbe.c ************************************************/ -/************** Begin file vdbeblob.c ****************************************/ -/* -** 2007 May 1 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** -** This file contains code used to implement incremental BLOB I/O. -*/ - - -#ifndef SQLITE_OMIT_INCRBLOB - -/* -** Valid sqlite3_blob* handles point to Incrblob structures. -*/ -typedef struct Incrblob Incrblob; -struct Incrblob { - int flags; /* Copy of "flags" passed to sqlite3_blob_open() */ - int nByte; /* Size of open blob, in bytes */ - int iOffset; /* Byte offset of blob in cursor data */ - int iCol; /* Table column this handle is open on */ - BtCursor *pCsr; /* Cursor pointing at blob row */ - sqlite3_stmt *pStmt; /* Statement holding cursor open */ - sqlite3 *db; /* The associated database */ -}; - - -/* -** This function is used by both blob_open() and blob_reopen(). It seeks -** the b-tree cursor associated with blob handle p to point to row iRow. -** If successful, SQLITE_OK is returned and subsequent calls to -** sqlite3_blob_read() or sqlite3_blob_write() access the specified row. -** -** If an error occurs, or if the specified row does not exist or does not -** contain a value of type TEXT or BLOB in the column nominated when the -** blob handle was opened, then an error code is returned and *pzErr may -** be set to point to a buffer containing an error message. It is the -** responsibility of the caller to free the error message buffer using -** sqlite3DbFree(). -** -** If an error does occur, then the b-tree cursor is closed. All subsequent -** calls to sqlite3_blob_read(), blob_write() or blob_reopen() will -** immediately return SQLITE_ABORT. -*/ -static int blobSeekToRow(Incrblob *p, sqlite3_int64 iRow, char **pzErr){ - int rc; /* Error code */ - char *zErr = 0; /* Error message */ - Vdbe *v = (Vdbe *)p->pStmt; - - /* Set the value of the SQL statements only variable to integer iRow. - ** This is done directly instead of using sqlite3_bind_int64() to avoid - ** triggering asserts related to mutexes. - */ - assert( v->aVar[0].flags&MEM_Int ); - v->aVar[0].u.i = iRow; - - rc = sqlite3_step(p->pStmt); - if( rc==SQLITE_ROW ){ - VdbeCursor *pC = v->apCsr[0]; - u32 type = pC->aType[p->iCol]; - if( type<12 ){ - zErr = sqlite3MPrintf(p->db, "cannot open value of type %s", - type==0?"null": type==7?"real": "integer" - ); - rc = SQLITE_ERROR; - sqlite3_finalize(p->pStmt); - p->pStmt = 0; - }else{ - p->iOffset = pC->aType[p->iCol + pC->nField]; - p->nByte = sqlite3VdbeSerialTypeLen(type); - p->pCsr = pC->pCursor; - sqlite3BtreeIncrblobCursor(p->pCsr); - } - } - - if( rc==SQLITE_ROW ){ - rc = SQLITE_OK; - }else if( p->pStmt ){ - rc = sqlite3_finalize(p->pStmt); - p->pStmt = 0; - if( rc==SQLITE_OK ){ - zErr = sqlite3MPrintf(p->db, "no such rowid: %lld", iRow); - rc = SQLITE_ERROR; - }else{ - zErr = sqlite3MPrintf(p->db, "%s", sqlite3_errmsg(p->db)); - } - } - - assert( rc!=SQLITE_OK || zErr==0 ); - assert( rc!=SQLITE_ROW && rc!=SQLITE_DONE ); - - *pzErr = zErr; - return rc; -} - -/* -** Open a blob handle. -*/ -SQLITE_API int sqlite3_blob_open( - sqlite3* db, /* The database connection */ - const char *zDb, /* The attached database containing the blob */ - const char *zTable, /* The table containing the blob */ - const char *zColumn, /* The column containing the blob */ - sqlite_int64 iRow, /* The row containing the glob */ - int flags, /* True -> read/write access, false -> read-only */ - sqlite3_blob **ppBlob /* Handle for accessing the blob returned here */ -){ - int nAttempt = 0; - int iCol; /* Index of zColumn in row-record */ - - /* This VDBE program seeks a btree cursor to the identified - ** db/table/row entry. The reason for using a vdbe program instead - ** of writing code to use the b-tree layer directly is that the - ** vdbe program will take advantage of the various transaction, - ** locking and error handling infrastructure built into the vdbe. - ** - ** After seeking the cursor, the vdbe executes an OP_ResultRow. - ** Code external to the Vdbe then "borrows" the b-tree cursor and - ** uses it to implement the blob_read(), blob_write() and - ** blob_bytes() functions. - ** - ** The sqlite3_blob_close() function finalizes the vdbe program, - ** which closes the b-tree cursor and (possibly) commits the - ** transaction. - */ - static const int iLn = VDBE_OFFSET_LINENO(4); - static const VdbeOpList openBlob[] = { - /* {OP_Transaction, 0, 0, 0}, // 0: Inserted separately */ - {OP_TableLock, 0, 0, 0}, /* 1: Acquire a read or write lock */ - /* One of the following two instructions is replaced by an OP_Noop. */ - {OP_OpenRead, 0, 0, 0}, /* 2: Open cursor 0 for reading */ - {OP_OpenWrite, 0, 0, 0}, /* 3: Open cursor 0 for read/write */ - {OP_Variable, 1, 1, 1}, /* 4: Push the rowid to the stack */ - {OP_NotExists, 0, 10, 1}, /* 5: Seek the cursor */ - {OP_Column, 0, 0, 1}, /* 6 */ - {OP_ResultRow, 1, 0, 0}, /* 7 */ - {OP_Goto, 0, 4, 0}, /* 8 */ - {OP_Close, 0, 0, 0}, /* 9 */ - {OP_Halt, 0, 0, 0}, /* 10 */ - }; - - int rc = SQLITE_OK; - char *zErr = 0; - Table *pTab; - Parse *pParse = 0; - Incrblob *pBlob = 0; - - flags = !!flags; /* flags = (flags ? 1 : 0); */ - *ppBlob = 0; - - sqlite3_mutex_enter(db->mutex); - - pBlob = (Incrblob *)sqlite3DbMallocZero(db, sizeof(Incrblob)); - if( !pBlob ) goto blob_open_out; - pParse = sqlite3StackAllocRaw(db, sizeof(*pParse)); - if( !pParse ) goto blob_open_out; - - do { - memset(pParse, 0, sizeof(Parse)); - pParse->db = db; - sqlite3DbFree(db, zErr); - zErr = 0; - - sqlite3BtreeEnterAll(db); - pTab = sqlite3LocateTable(pParse, 0, zTable, zDb); - if( pTab && IsVirtual(pTab) ){ - pTab = 0; - sqlite3ErrorMsg(pParse, "cannot open virtual table: %s", zTable); - } - if( pTab && !HasRowid(pTab) ){ - pTab = 0; - sqlite3ErrorMsg(pParse, "cannot open table without rowid: %s", zTable); - } -#ifndef SQLITE_OMIT_VIEW - if( pTab && pTab->pSelect ){ - pTab = 0; - sqlite3ErrorMsg(pParse, "cannot open view: %s", zTable); - } -#endif - if( !pTab ){ - if( pParse->zErrMsg ){ - sqlite3DbFree(db, zErr); - zErr = pParse->zErrMsg; - pParse->zErrMsg = 0; - } - rc = SQLITE_ERROR; - sqlite3BtreeLeaveAll(db); - goto blob_open_out; - } - - /* Now search pTab for the exact column. */ - for(iCol=0; iColnCol; iCol++) { - if( sqlite3StrICmp(pTab->aCol[iCol].zName, zColumn)==0 ){ - break; - } - } - if( iCol==pTab->nCol ){ - sqlite3DbFree(db, zErr); - zErr = sqlite3MPrintf(db, "no such column: \"%s\"", zColumn); - rc = SQLITE_ERROR; - sqlite3BtreeLeaveAll(db); - goto blob_open_out; - } - - /* If the value is being opened for writing, check that the - ** column is not indexed, and that it is not part of a foreign key. - ** It is against the rules to open a column to which either of these - ** descriptions applies for writing. */ - if( flags ){ - const char *zFault = 0; - Index *pIdx; -#ifndef SQLITE_OMIT_FOREIGN_KEY - if( db->flags&SQLITE_ForeignKeys ){ - /* Check that the column is not part of an FK child key definition. It - ** is not necessary to check if it is part of a parent key, as parent - ** key columns must be indexed. The check below will pick up this - ** case. */ - FKey *pFKey; - for(pFKey=pTab->pFKey; pFKey; pFKey=pFKey->pNextFrom){ - int j; - for(j=0; jnCol; j++){ - if( pFKey->aCol[j].iFrom==iCol ){ - zFault = "foreign key"; - } - } - } - } -#endif - for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ - int j; - for(j=0; jnKeyCol; j++){ - if( pIdx->aiColumn[j]==iCol ){ - zFault = "indexed"; - } - } - } - if( zFault ){ - sqlite3DbFree(db, zErr); - zErr = sqlite3MPrintf(db, "cannot open %s column for writing", zFault); - rc = SQLITE_ERROR; - sqlite3BtreeLeaveAll(db); - goto blob_open_out; - } - } - - pBlob->pStmt = (sqlite3_stmt *)sqlite3VdbeCreate(pParse); - assert( pBlob->pStmt || db->mallocFailed ); - if( pBlob->pStmt ){ - Vdbe *v = (Vdbe *)pBlob->pStmt; - int iDb = sqlite3SchemaToIndex(db, pTab->pSchema); - - - sqlite3VdbeAddOp4Int(v, OP_Transaction, iDb, flags, - pTab->pSchema->schema_cookie, - pTab->pSchema->iGeneration); - sqlite3VdbeChangeP5(v, 1); - sqlite3VdbeAddOpList(v, ArraySize(openBlob), openBlob, iLn); - - /* Make sure a mutex is held on the table to be accessed */ - sqlite3VdbeUsesBtree(v, iDb); - - /* Configure the OP_TableLock instruction */ -#ifdef SQLITE_OMIT_SHARED_CACHE - sqlite3VdbeChangeToNoop(v, 1); -#else - sqlite3VdbeChangeP1(v, 1, iDb); - sqlite3VdbeChangeP2(v, 1, pTab->tnum); - sqlite3VdbeChangeP3(v, 1, flags); - sqlite3VdbeChangeP4(v, 1, pTab->zName, P4_TRANSIENT); -#endif - - /* Remove either the OP_OpenWrite or OpenRead. Set the P2 - ** parameter of the other to pTab->tnum. */ - sqlite3VdbeChangeToNoop(v, 3 - flags); - sqlite3VdbeChangeP2(v, 2 + flags, pTab->tnum); - sqlite3VdbeChangeP3(v, 2 + flags, iDb); - - /* Configure the number of columns. Configure the cursor to - ** think that the table has one more column than it really - ** does. An OP_Column to retrieve this imaginary column will - ** always return an SQL NULL. This is useful because it means - ** we can invoke OP_Column to fill in the vdbe cursors type - ** and offset cache without causing any IO. - */ - sqlite3VdbeChangeP4(v, 2+flags, SQLITE_INT_TO_PTR(pTab->nCol+1),P4_INT32); - sqlite3VdbeChangeP2(v, 6, pTab->nCol); - if( !db->mallocFailed ){ - pParse->nVar = 1; - pParse->nMem = 1; - pParse->nTab = 1; - sqlite3VdbeMakeReady(v, pParse); - } - } - - pBlob->flags = flags; - pBlob->iCol = iCol; - pBlob->db = db; - sqlite3BtreeLeaveAll(db); - if( db->mallocFailed ){ - goto blob_open_out; - } - sqlite3_bind_int64(pBlob->pStmt, 1, iRow); - rc = blobSeekToRow(pBlob, iRow, &zErr); - } while( (++nAttempt)mallocFailed==0 ){ - *ppBlob = (sqlite3_blob *)pBlob; - }else{ - if( pBlob && pBlob->pStmt ) sqlite3VdbeFinalize((Vdbe *)pBlob->pStmt); - sqlite3DbFree(db, pBlob); - } - sqlite3Error(db, rc, (zErr ? "%s" : 0), zErr); - sqlite3DbFree(db, zErr); - sqlite3ParserReset(pParse); - sqlite3StackFree(db, pParse); - rc = sqlite3ApiExit(db, rc); - sqlite3_mutex_leave(db->mutex); - return rc; -} - -/* -** Close a blob handle that was previously created using -** sqlite3_blob_open(). -*/ -SQLITE_API int sqlite3_blob_close(sqlite3_blob *pBlob){ - Incrblob *p = (Incrblob *)pBlob; - int rc; - sqlite3 *db; - - if( p ){ - db = p->db; - sqlite3_mutex_enter(db->mutex); - rc = sqlite3_finalize(p->pStmt); - sqlite3DbFree(db, p); - sqlite3_mutex_leave(db->mutex); - }else{ - rc = SQLITE_OK; - } - return rc; -} - -/* -** Perform a read or write operation on a blob -*/ -static int blobReadWrite( - sqlite3_blob *pBlob, - void *z, - int n, - int iOffset, - int (*xCall)(BtCursor*, u32, u32, void*) -){ - int rc; - Incrblob *p = (Incrblob *)pBlob; - Vdbe *v; - sqlite3 *db; - - if( p==0 ) return SQLITE_MISUSE_BKPT; - db = p->db; - sqlite3_mutex_enter(db->mutex); - v = (Vdbe*)p->pStmt; - - if( n<0 || iOffset<0 || (iOffset+n)>p->nByte ){ - /* Request is out of range. Return a transient error. */ - rc = SQLITE_ERROR; - sqlite3Error(db, SQLITE_ERROR, 0); - }else if( v==0 ){ - /* If there is no statement handle, then the blob-handle has - ** already been invalidated. Return SQLITE_ABORT in this case. - */ - rc = SQLITE_ABORT; - }else{ - /* Call either BtreeData() or BtreePutData(). If SQLITE_ABORT is - ** returned, clean-up the statement handle. - */ - assert( db == v->db ); - sqlite3BtreeEnterCursor(p->pCsr); - rc = xCall(p->pCsr, iOffset+p->iOffset, n, z); - sqlite3BtreeLeaveCursor(p->pCsr); - if( rc==SQLITE_ABORT ){ - sqlite3VdbeFinalize(v); - p->pStmt = 0; - }else{ - db->errCode = rc; - v->rc = rc; - } - } - rc = sqlite3ApiExit(db, rc); - sqlite3_mutex_leave(db->mutex); - return rc; -} - -/* -** Read data from a blob handle. -*/ -SQLITE_API int sqlite3_blob_read(sqlite3_blob *pBlob, void *z, int n, int iOffset){ - return blobReadWrite(pBlob, z, n, iOffset, sqlite3BtreeData); -} - -/* -** Write data to a blob handle. -*/ -SQLITE_API int sqlite3_blob_write(sqlite3_blob *pBlob, const void *z, int n, int iOffset){ - return blobReadWrite(pBlob, (void *)z, n, iOffset, sqlite3BtreePutData); -} - -/* -** Query a blob handle for the size of the data. -** -** The Incrblob.nByte field is fixed for the lifetime of the Incrblob -** so no mutex is required for access. -*/ -SQLITE_API int sqlite3_blob_bytes(sqlite3_blob *pBlob){ - Incrblob *p = (Incrblob *)pBlob; - return (p && p->pStmt) ? p->nByte : 0; -} - -/* -** Move an existing blob handle to point to a different row of the same -** database table. -** -** If an error occurs, or if the specified row does not exist or does not -** contain a blob or text value, then an error code is returned and the -** database handle error code and message set. If this happens, then all -** subsequent calls to sqlite3_blob_xxx() functions (except blob_close()) -** immediately return SQLITE_ABORT. -*/ -SQLITE_API int sqlite3_blob_reopen(sqlite3_blob *pBlob, sqlite3_int64 iRow){ - int rc; - Incrblob *p = (Incrblob *)pBlob; - sqlite3 *db; - - if( p==0 ) return SQLITE_MISUSE_BKPT; - db = p->db; - sqlite3_mutex_enter(db->mutex); - - if( p->pStmt==0 ){ - /* If there is no statement handle, then the blob-handle has - ** already been invalidated. Return SQLITE_ABORT in this case. - */ - rc = SQLITE_ABORT; - }else{ - char *zErr; - rc = blobSeekToRow(p, iRow, &zErr); - if( rc!=SQLITE_OK ){ - sqlite3Error(db, rc, (zErr ? "%s" : 0), zErr); - sqlite3DbFree(db, zErr); - } - assert( rc!=SQLITE_SCHEMA ); - } - - rc = sqlite3ApiExit(db, rc); - assert( rc==SQLITE_OK || p->pStmt==0 ); - sqlite3_mutex_leave(db->mutex); - return rc; -} - -#endif /* #ifndef SQLITE_OMIT_INCRBLOB */ - -/************** End of vdbeblob.c ********************************************/ -/************** Begin file vdbesort.c ****************************************/ -/* -** 2011 July 9 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** This file contains code for the VdbeSorter object, used in concert with -** a VdbeCursor to sort large numbers of keys (as may be required, for -** example, by CREATE INDEX statements on tables too large to fit in main -** memory). -*/ - - - -typedef struct VdbeSorterIter VdbeSorterIter; -typedef struct SorterRecord SorterRecord; -typedef struct FileWriter FileWriter; - -/* -** NOTES ON DATA STRUCTURE USED FOR N-WAY MERGES: -** -** As keys are added to the sorter, they are written to disk in a series -** of sorted packed-memory-arrays (PMAs). The size of each PMA is roughly -** the same as the cache-size allowed for temporary databases. In order -** to allow the caller to extract keys from the sorter in sorted order, -** all PMAs currently stored on disk must be merged together. This comment -** describes the data structure used to do so. The structure supports -** merging any number of arrays in a single pass with no redundant comparison -** operations. -** -** The aIter[] array contains an iterator for each of the PMAs being merged. -** An aIter[] iterator either points to a valid key or else is at EOF. For -** the purposes of the paragraphs below, we assume that the array is actually -** N elements in size, where N is the smallest power of 2 greater to or equal -** to the number of iterators being merged. The extra aIter[] elements are -** treated as if they are empty (always at EOF). -** -** The aTree[] array is also N elements in size. The value of N is stored in -** the VdbeSorter.nTree variable. -** -** The final (N/2) elements of aTree[] contain the results of comparing -** pairs of iterator keys together. Element i contains the result of -** comparing aIter[2*i-N] and aIter[2*i-N+1]. Whichever key is smaller, the -** aTree element is set to the index of it. -** -** For the purposes of this comparison, EOF is considered greater than any -** other key value. If the keys are equal (only possible with two EOF -** values), it doesn't matter which index is stored. -** -** The (N/4) elements of aTree[] that precede the final (N/2) described -** above contains the index of the smallest of each block of 4 iterators. -** And so on. So that aTree[1] contains the index of the iterator that -** currently points to the smallest key value. aTree[0] is unused. -** -** Example: -** -** aIter[0] -> Banana -** aIter[1] -> Feijoa -** aIter[2] -> Elderberry -** aIter[3] -> Currant -** aIter[4] -> Grapefruit -** aIter[5] -> Apple -** aIter[6] -> Durian -** aIter[7] -> EOF -** -** aTree[] = { X, 5 0, 5 0, 3, 5, 6 } -** -** The current element is "Apple" (the value of the key indicated by -** iterator 5). When the Next() operation is invoked, iterator 5 will -** be advanced to the next key in its segment. Say the next key is -** "Eggplant": -** -** aIter[5] -> Eggplant -** -** The contents of aTree[] are updated first by comparing the new iterator -** 5 key to the current key of iterator 4 (still "Grapefruit"). The iterator -** 5 value is still smaller, so aTree[6] is set to 5. And so on up the tree. -** The value of iterator 6 - "Durian" - is now smaller than that of iterator -** 5, so aTree[3] is set to 6. Key 0 is smaller than key 6 (BananaaAlloc); - sqlite3DbFree(db, pIter->aBuffer); - memset(pIter, 0, sizeof(VdbeSorterIter)); -} - -/* -** Read nByte bytes of data from the stream of data iterated by object p. -** If successful, set *ppOut to point to a buffer containing the data -** and return SQLITE_OK. Otherwise, if an error occurs, return an SQLite -** error code. -** -** The buffer indicated by *ppOut may only be considered valid until the -** next call to this function. -*/ -static int vdbeSorterIterRead( - sqlite3 *db, /* Database handle (for malloc) */ - VdbeSorterIter *p, /* Iterator */ - int nByte, /* Bytes of data to read */ - u8 **ppOut /* OUT: Pointer to buffer containing data */ -){ - int iBuf; /* Offset within buffer to read from */ - int nAvail; /* Bytes of data available in buffer */ - assert( p->aBuffer ); - - /* If there is no more data to be read from the buffer, read the next - ** p->nBuffer bytes of data from the file into it. Or, if there are less - ** than p->nBuffer bytes remaining in the PMA, read all remaining data. */ - iBuf = p->iReadOff % p->nBuffer; - if( iBuf==0 ){ - int nRead; /* Bytes to read from disk */ - int rc; /* sqlite3OsRead() return code */ - - /* Determine how many bytes of data to read. */ - if( (p->iEof - p->iReadOff) > (i64)p->nBuffer ){ - nRead = p->nBuffer; - }else{ - nRead = (int)(p->iEof - p->iReadOff); - } - assert( nRead>0 ); - - /* Read data from the file. Return early if an error occurs. */ - rc = sqlite3OsRead(p->pFile, p->aBuffer, nRead, p->iReadOff); - assert( rc!=SQLITE_IOERR_SHORT_READ ); - if( rc!=SQLITE_OK ) return rc; - } - nAvail = p->nBuffer - iBuf; - - if( nByte<=nAvail ){ - /* The requested data is available in the in-memory buffer. In this - ** case there is no need to make a copy of the data, just return a - ** pointer into the buffer to the caller. */ - *ppOut = &p->aBuffer[iBuf]; - p->iReadOff += nByte; - }else{ - /* The requested data is not all available in the in-memory buffer. - ** In this case, allocate space at p->aAlloc[] to copy the requested - ** range into. Then return a copy of pointer p->aAlloc to the caller. */ - int nRem; /* Bytes remaining to copy */ - - /* Extend the p->aAlloc[] allocation if required. */ - if( p->nAllocnAlloc*2; - while( nByte>nNew ) nNew = nNew*2; - p->aAlloc = sqlite3DbReallocOrFree(db, p->aAlloc, nNew); - if( !p->aAlloc ) return SQLITE_NOMEM; - p->nAlloc = nNew; - } - - /* Copy as much data as is available in the buffer into the start of - ** p->aAlloc[]. */ - memcpy(p->aAlloc, &p->aBuffer[iBuf], nAvail); - p->iReadOff += nAvail; - nRem = nByte - nAvail; - - /* The following loop copies up to p->nBuffer bytes per iteration into - ** the p->aAlloc[] buffer. */ - while( nRem>0 ){ - int rc; /* vdbeSorterIterRead() return code */ - int nCopy; /* Number of bytes to copy */ - u8 *aNext; /* Pointer to buffer to copy data from */ - - nCopy = nRem; - if( nRem>p->nBuffer ) nCopy = p->nBuffer; - rc = vdbeSorterIterRead(db, p, nCopy, &aNext); - if( rc!=SQLITE_OK ) return rc; - assert( aNext!=p->aAlloc ); - memcpy(&p->aAlloc[nByte - nRem], aNext, nCopy); - nRem -= nCopy; - } - - *ppOut = p->aAlloc; - } - - return SQLITE_OK; -} - -/* -** Read a varint from the stream of data accessed by p. Set *pnOut to -** the value read. -*/ -static int vdbeSorterIterVarint(sqlite3 *db, VdbeSorterIter *p, u64 *pnOut){ - int iBuf; - - iBuf = p->iReadOff % p->nBuffer; - if( iBuf && (p->nBuffer-iBuf)>=9 ){ - p->iReadOff += sqlite3GetVarint(&p->aBuffer[iBuf], pnOut); - }else{ - u8 aVarint[16], *a; - int i = 0, rc; - do{ - rc = vdbeSorterIterRead(db, p, 1, &a); - if( rc ) return rc; - aVarint[(i++)&0xf] = a[0]; - }while( (a[0]&0x80)!=0 ); - sqlite3GetVarint(aVarint, pnOut); - } - - return SQLITE_OK; -} - - -/* -** Advance iterator pIter to the next key in its PMA. Return SQLITE_OK if -** no error occurs, or an SQLite error code if one does. -*/ -static int vdbeSorterIterNext( - sqlite3 *db, /* Database handle (for sqlite3DbMalloc() ) */ - VdbeSorterIter *pIter /* Iterator to advance */ -){ - int rc; /* Return Code */ - u64 nRec = 0; /* Size of record in bytes */ - - if( pIter->iReadOff>=pIter->iEof ){ - /* This is an EOF condition */ - vdbeSorterIterZero(db, pIter); - return SQLITE_OK; - } - - rc = vdbeSorterIterVarint(db, pIter, &nRec); - if( rc==SQLITE_OK ){ - pIter->nKey = (int)nRec; - rc = vdbeSorterIterRead(db, pIter, (int)nRec, &pIter->aKey); - } - - return rc; -} - -/* -** Initialize iterator pIter to scan through the PMA stored in file pFile -** starting at offset iStart and ending at offset iEof-1. This function -** leaves the iterator pointing to the first key in the PMA (or EOF if the -** PMA is empty). -*/ -static int vdbeSorterIterInit( - sqlite3 *db, /* Database handle */ - const VdbeSorter *pSorter, /* Sorter object */ - i64 iStart, /* Start offset in pFile */ - VdbeSorterIter *pIter, /* Iterator to populate */ - i64 *pnByte /* IN/OUT: Increment this value by PMA size */ -){ - int rc = SQLITE_OK; - int nBuf; - - nBuf = sqlite3BtreeGetPageSize(db->aDb[0].pBt); - - assert( pSorter->iWriteOff>iStart ); - assert( pIter->aAlloc==0 ); - assert( pIter->aBuffer==0 ); - pIter->pFile = pSorter->pTemp1; - pIter->iReadOff = iStart; - pIter->nAlloc = 128; - pIter->aAlloc = (u8 *)sqlite3DbMallocRaw(db, pIter->nAlloc); - pIter->nBuffer = nBuf; - pIter->aBuffer = (u8 *)sqlite3DbMallocRaw(db, nBuf); - - if( !pIter->aBuffer ){ - rc = SQLITE_NOMEM; - }else{ - int iBuf; - - iBuf = iStart % nBuf; - if( iBuf ){ - int nRead = nBuf - iBuf; - if( (iStart + nRead) > pSorter->iWriteOff ){ - nRead = (int)(pSorter->iWriteOff - iStart); - } - rc = sqlite3OsRead( - pSorter->pTemp1, &pIter->aBuffer[iBuf], nRead, iStart - ); - } - - if( rc==SQLITE_OK ){ - u64 nByte; /* Size of PMA in bytes */ - pIter->iEof = pSorter->iWriteOff; - rc = vdbeSorterIterVarint(db, pIter, &nByte); - pIter->iEof = pIter->iReadOff + nByte; - *pnByte += nByte; - } - } - - if( rc==SQLITE_OK ){ - rc = vdbeSorterIterNext(db, pIter); - } - return rc; -} - - -/* -** Compare key1 (buffer pKey1, size nKey1 bytes) with key2 (buffer pKey2, -** size nKey2 bytes). Argument pKeyInfo supplies the collation functions -** used by the comparison. If an error occurs, return an SQLite error code. -** Otherwise, return SQLITE_OK and set *pRes to a negative, zero or positive -** value, depending on whether key1 is smaller, equal to or larger than key2. -** -** If the bOmitRowid argument is non-zero, assume both keys end in a rowid -** field. For the purposes of the comparison, ignore it. Also, if bOmitRowid -** is true and key1 contains even a single NULL value, it is considered to -** be less than key2. Even if key2 also contains NULL values. -** -** If pKey2 is passed a NULL pointer, then it is assumed that the pCsr->aSpace -** has been allocated and contains an unpacked record that is used as key2. -*/ -static void vdbeSorterCompare( - const VdbeCursor *pCsr, /* Cursor object (for pKeyInfo) */ - int nIgnore, /* Ignore the last nIgnore fields */ - const void *pKey1, int nKey1, /* Left side of comparison */ - const void *pKey2, int nKey2, /* Right side of comparison */ - int *pRes /* OUT: Result of comparison */ -){ - KeyInfo *pKeyInfo = pCsr->pKeyInfo; - VdbeSorter *pSorter = pCsr->pSorter; - UnpackedRecord *r2 = pSorter->pUnpacked; - int i; - - if( pKey2 ){ - sqlite3VdbeRecordUnpack(pKeyInfo, nKey2, pKey2, r2); - } - - if( nIgnore ){ - r2->nField = pKeyInfo->nField - nIgnore; - assert( r2->nField>0 ); - for(i=0; inField; i++){ - if( r2->aMem[i].flags & MEM_Null ){ - *pRes = -1; - return; - } - } - assert( r2->default_rc==0 ); - } - - *pRes = sqlite3VdbeRecordCompare(nKey1, pKey1, r2, 0); -} - -/* -** This function is called to compare two iterator keys when merging -** multiple b-tree segments. Parameter iOut is the index of the aTree[] -** value to recalculate. -*/ -static int vdbeSorterDoCompare(const VdbeCursor *pCsr, int iOut){ - VdbeSorter *pSorter = pCsr->pSorter; - int i1; - int i2; - int iRes; - VdbeSorterIter *p1; - VdbeSorterIter *p2; - - assert( iOutnTree && iOut>0 ); - - if( iOut>=(pSorter->nTree/2) ){ - i1 = (iOut - pSorter->nTree/2) * 2; - i2 = i1 + 1; - }else{ - i1 = pSorter->aTree[iOut*2]; - i2 = pSorter->aTree[iOut*2+1]; - } - - p1 = &pSorter->aIter[i1]; - p2 = &pSorter->aIter[i2]; - - if( p1->pFile==0 ){ - iRes = i2; - }else if( p2->pFile==0 ){ - iRes = i1; - }else{ - int res; - assert( pCsr->pSorter->pUnpacked!=0 ); /* allocated in vdbeSorterMerge() */ - vdbeSorterCompare( - pCsr, 0, p1->aKey, p1->nKey, p2->aKey, p2->nKey, &res - ); - if( res<=0 ){ - iRes = i1; - }else{ - iRes = i2; - } - } - - pSorter->aTree[iOut] = iRes; - return SQLITE_OK; -} - -/* -** Initialize the temporary index cursor just opened as a sorter cursor. -*/ -SQLITE_PRIVATE int sqlite3VdbeSorterInit(sqlite3 *db, VdbeCursor *pCsr){ - int pgsz; /* Page size of main database */ - int mxCache; /* Cache size */ - VdbeSorter *pSorter; /* The new sorter */ - char *d; /* Dummy */ - - assert( pCsr->pKeyInfo && pCsr->pBt==0 ); - pCsr->pSorter = pSorter = sqlite3DbMallocZero(db, sizeof(VdbeSorter)); - if( pSorter==0 ){ - return SQLITE_NOMEM; - } - - pSorter->pUnpacked = sqlite3VdbeAllocUnpackedRecord(pCsr->pKeyInfo, 0, 0, &d); - if( pSorter->pUnpacked==0 ) return SQLITE_NOMEM; - assert( pSorter->pUnpacked==(UnpackedRecord *)d ); - - if( !sqlite3TempInMemory(db) ){ - pgsz = sqlite3BtreeGetPageSize(db->aDb[0].pBt); - pSorter->mnPmaSize = SORTER_MIN_WORKING * pgsz; - mxCache = db->aDb[0].pSchema->cache_size; - if( mxCachemxPmaSize = mxCache * pgsz; - } - - return SQLITE_OK; -} - -/* -** Free the list of sorted records starting at pRecord. -*/ -static void vdbeSorterRecordFree(sqlite3 *db, SorterRecord *pRecord){ - SorterRecord *p; - SorterRecord *pNext; - for(p=pRecord; p; p=pNext){ - pNext = p->pNext; - sqlite3DbFree(db, p); - } -} - -/* -** Reset a sorting cursor back to its original empty state. -*/ -SQLITE_PRIVATE void sqlite3VdbeSorterReset(sqlite3 *db, VdbeSorter *pSorter){ - if( pSorter->aIter ){ - int i; - for(i=0; inTree; i++){ - vdbeSorterIterZero(db, &pSorter->aIter[i]); - } - sqlite3DbFree(db, pSorter->aIter); - pSorter->aIter = 0; - } - if( pSorter->pTemp1 ){ - sqlite3OsCloseFree(pSorter->pTemp1); - pSorter->pTemp1 = 0; - } - vdbeSorterRecordFree(db, pSorter->pRecord); - pSorter->pRecord = 0; - pSorter->iWriteOff = 0; - pSorter->iReadOff = 0; - pSorter->nInMemory = 0; - pSorter->nTree = 0; - pSorter->nPMA = 0; - pSorter->aTree = 0; -} - - -/* -** Free any cursor components allocated by sqlite3VdbeSorterXXX routines. -*/ -SQLITE_PRIVATE void sqlite3VdbeSorterClose(sqlite3 *db, VdbeCursor *pCsr){ - VdbeSorter *pSorter = pCsr->pSorter; - if( pSorter ){ - sqlite3VdbeSorterReset(db, pSorter); - sqlite3DbFree(db, pSorter->pUnpacked); - sqlite3DbFree(db, pSorter); - pCsr->pSorter = 0; - } -} - -/* -** Allocate space for a file-handle and open a temporary file. If successful, -** set *ppFile to point to the malloc'd file-handle and return SQLITE_OK. -** Otherwise, set *ppFile to 0 and return an SQLite error code. -*/ -static int vdbeSorterOpenTempFile(sqlite3 *db, sqlite3_file **ppFile){ - int dummy; - return sqlite3OsOpenMalloc(db->pVfs, 0, ppFile, - SQLITE_OPEN_TEMP_JOURNAL | - SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE | - SQLITE_OPEN_EXCLUSIVE | SQLITE_OPEN_DELETEONCLOSE, &dummy - ); -} - -/* -** Merge the two sorted lists p1 and p2 into a single list. -** Set *ppOut to the head of the new list. -*/ -static void vdbeSorterMerge( - const VdbeCursor *pCsr, /* For pKeyInfo */ - SorterRecord *p1, /* First list to merge */ - SorterRecord *p2, /* Second list to merge */ - SorterRecord **ppOut /* OUT: Head of merged list */ -){ - SorterRecord *pFinal = 0; - SorterRecord **pp = &pFinal; - void *pVal2 = p2 ? p2->pVal : 0; - - while( p1 && p2 ){ - int res; - vdbeSorterCompare(pCsr, 0, p1->pVal, p1->nVal, pVal2, p2->nVal, &res); - if( res<=0 ){ - *pp = p1; - pp = &p1->pNext; - p1 = p1->pNext; - pVal2 = 0; - }else{ - *pp = p2; - pp = &p2->pNext; - p2 = p2->pNext; - if( p2==0 ) break; - pVal2 = p2->pVal; - } - } - *pp = p1 ? p1 : p2; - *ppOut = pFinal; -} - -/* -** Sort the linked list of records headed at pCsr->pRecord. Return SQLITE_OK -** if successful, or an SQLite error code (i.e. SQLITE_NOMEM) if an error -** occurs. -*/ -static int vdbeSorterSort(const VdbeCursor *pCsr){ - int i; - SorterRecord **aSlot; - SorterRecord *p; - VdbeSorter *pSorter = pCsr->pSorter; - - aSlot = (SorterRecord **)sqlite3MallocZero(64 * sizeof(SorterRecord *)); - if( !aSlot ){ - return SQLITE_NOMEM; - } - - p = pSorter->pRecord; - while( p ){ - SorterRecord *pNext = p->pNext; - p->pNext = 0; - for(i=0; aSlot[i]; i++){ - vdbeSorterMerge(pCsr, p, aSlot[i], &p); - aSlot[i] = 0; - } - aSlot[i] = p; - p = pNext; - } - - p = 0; - for(i=0; i<64; i++){ - vdbeSorterMerge(pCsr, p, aSlot[i], &p); - } - pSorter->pRecord = p; - - sqlite3_free(aSlot); - return SQLITE_OK; -} - -/* -** Initialize a file-writer object. -*/ -static void fileWriterInit( - sqlite3 *db, /* Database (for malloc) */ - sqlite3_file *pFile, /* File to write to */ - FileWriter *p, /* Object to populate */ - i64 iStart /* Offset of pFile to begin writing at */ -){ - int nBuf = sqlite3BtreeGetPageSize(db->aDb[0].pBt); - - memset(p, 0, sizeof(FileWriter)); - p->aBuffer = (u8 *)sqlite3DbMallocRaw(db, nBuf); - if( !p->aBuffer ){ - p->eFWErr = SQLITE_NOMEM; - }else{ - p->iBufEnd = p->iBufStart = (iStart % nBuf); - p->iWriteOff = iStart - p->iBufStart; - p->nBuffer = nBuf; - p->pFile = pFile; - } -} - -/* -** Write nData bytes of data to the file-write object. Return SQLITE_OK -** if successful, or an SQLite error code if an error occurs. -*/ -static void fileWriterWrite(FileWriter *p, u8 *pData, int nData){ - int nRem = nData; - while( nRem>0 && p->eFWErr==0 ){ - int nCopy = nRem; - if( nCopy>(p->nBuffer - p->iBufEnd) ){ - nCopy = p->nBuffer - p->iBufEnd; - } - - memcpy(&p->aBuffer[p->iBufEnd], &pData[nData-nRem], nCopy); - p->iBufEnd += nCopy; - if( p->iBufEnd==p->nBuffer ){ - p->eFWErr = sqlite3OsWrite(p->pFile, - &p->aBuffer[p->iBufStart], p->iBufEnd - p->iBufStart, - p->iWriteOff + p->iBufStart - ); - p->iBufStart = p->iBufEnd = 0; - p->iWriteOff += p->nBuffer; - } - assert( p->iBufEndnBuffer ); - - nRem -= nCopy; - } -} - -/* -** Flush any buffered data to disk and clean up the file-writer object. -** The results of using the file-writer after this call are undefined. -** Return SQLITE_OK if flushing the buffered data succeeds or is not -** required. Otherwise, return an SQLite error code. -** -** Before returning, set *piEof to the offset immediately following the -** last byte written to the file. -*/ -static int fileWriterFinish(sqlite3 *db, FileWriter *p, i64 *piEof){ - int rc; - if( p->eFWErr==0 && ALWAYS(p->aBuffer) && p->iBufEnd>p->iBufStart ){ - p->eFWErr = sqlite3OsWrite(p->pFile, - &p->aBuffer[p->iBufStart], p->iBufEnd - p->iBufStart, - p->iWriteOff + p->iBufStart - ); - } - *piEof = (p->iWriteOff + p->iBufEnd); - sqlite3DbFree(db, p->aBuffer); - rc = p->eFWErr; - memset(p, 0, sizeof(FileWriter)); - return rc; -} - -/* -** Write value iVal encoded as a varint to the file-write object. Return -** SQLITE_OK if successful, or an SQLite error code if an error occurs. -*/ -static void fileWriterWriteVarint(FileWriter *p, u64 iVal){ - int nByte; - u8 aByte[10]; - nByte = sqlite3PutVarint(aByte, iVal); - fileWriterWrite(p, aByte, nByte); -} - -/* -** Write the current contents of the in-memory linked-list to a PMA. Return -** SQLITE_OK if successful, or an SQLite error code otherwise. -** -** The format of a PMA is: -** -** * A varint. This varint contains the total number of bytes of content -** in the PMA (not including the varint itself). -** -** * One or more records packed end-to-end in order of ascending keys. -** Each record consists of a varint followed by a blob of data (the -** key). The varint is the number of bytes in the blob of data. -*/ -static int vdbeSorterListToPMA(sqlite3 *db, const VdbeCursor *pCsr){ - int rc = SQLITE_OK; /* Return code */ - VdbeSorter *pSorter = pCsr->pSorter; - FileWriter writer; - - memset(&writer, 0, sizeof(FileWriter)); - - if( pSorter->nInMemory==0 ){ - assert( pSorter->pRecord==0 ); - return rc; - } - - rc = vdbeSorterSort(pCsr); - - /* If the first temporary PMA file has not been opened, open it now. */ - if( rc==SQLITE_OK && pSorter->pTemp1==0 ){ - rc = vdbeSorterOpenTempFile(db, &pSorter->pTemp1); - assert( rc!=SQLITE_OK || pSorter->pTemp1 ); - assert( pSorter->iWriteOff==0 ); - assert( pSorter->nPMA==0 ); - } - - if( rc==SQLITE_OK ){ - SorterRecord *p; - SorterRecord *pNext = 0; - - fileWriterInit(db, pSorter->pTemp1, &writer, pSorter->iWriteOff); - pSorter->nPMA++; - fileWriterWriteVarint(&writer, pSorter->nInMemory); - for(p=pSorter->pRecord; p; p=pNext){ - pNext = p->pNext; - fileWriterWriteVarint(&writer, p->nVal); - fileWriterWrite(&writer, p->pVal, p->nVal); - sqlite3DbFree(db, p); - } - pSorter->pRecord = p; - rc = fileWriterFinish(db, &writer, &pSorter->iWriteOff); - } - - return rc; -} - -/* -** Add a record to the sorter. -*/ -SQLITE_PRIVATE int sqlite3VdbeSorterWrite( - sqlite3 *db, /* Database handle */ - const VdbeCursor *pCsr, /* Sorter cursor */ - Mem *pVal /* Memory cell containing record */ -){ - VdbeSorter *pSorter = pCsr->pSorter; - int rc = SQLITE_OK; /* Return Code */ - SorterRecord *pNew; /* New list element */ - - assert( pSorter ); - pSorter->nInMemory += sqlite3VarintLen(pVal->n) + pVal->n; - - pNew = (SorterRecord *)sqlite3DbMallocRaw(db, pVal->n + sizeof(SorterRecord)); - if( pNew==0 ){ - rc = SQLITE_NOMEM; - }else{ - pNew->pVal = (void *)&pNew[1]; - memcpy(pNew->pVal, pVal->z, pVal->n); - pNew->nVal = pVal->n; - pNew->pNext = pSorter->pRecord; - pSorter->pRecord = pNew; - } - - /* See if the contents of the sorter should now be written out. They - ** are written out when either of the following are true: - ** - ** * The total memory allocated for the in-memory list is greater - ** than (page-size * cache-size), or - ** - ** * The total memory allocated for the in-memory list is greater - ** than (page-size * 10) and sqlite3HeapNearlyFull() returns true. - */ - if( rc==SQLITE_OK && pSorter->mxPmaSize>0 && ( - (pSorter->nInMemory>pSorter->mxPmaSize) - || (pSorter->nInMemory>pSorter->mnPmaSize && sqlite3HeapNearlyFull()) - )){ -#ifdef SQLITE_DEBUG - i64 nExpect = pSorter->iWriteOff - + sqlite3VarintLen(pSorter->nInMemory) - + pSorter->nInMemory; -#endif - rc = vdbeSorterListToPMA(db, pCsr); - pSorter->nInMemory = 0; - assert( rc!=SQLITE_OK || (nExpect==pSorter->iWriteOff) ); - } - - return rc; -} - -/* -** Helper function for sqlite3VdbeSorterRewind(). -*/ -static int vdbeSorterInitMerge( - sqlite3 *db, /* Database handle */ - const VdbeCursor *pCsr, /* Cursor handle for this sorter */ - i64 *pnByte /* Sum of bytes in all opened PMAs */ -){ - VdbeSorter *pSorter = pCsr->pSorter; - int rc = SQLITE_OK; /* Return code */ - int i; /* Used to iterator through aIter[] */ - i64 nByte = 0; /* Total bytes in all opened PMAs */ - - /* Initialize the iterators. */ - for(i=0; iaIter[i]; - rc = vdbeSorterIterInit(db, pSorter, pSorter->iReadOff, pIter, &nByte); - pSorter->iReadOff = pIter->iEof; - assert( rc!=SQLITE_OK || pSorter->iReadOff<=pSorter->iWriteOff ); - if( rc!=SQLITE_OK || pSorter->iReadOff>=pSorter->iWriteOff ) break; - } - - /* Initialize the aTree[] array. */ - for(i=pSorter->nTree-1; rc==SQLITE_OK && i>0; i--){ - rc = vdbeSorterDoCompare(pCsr, i); - } - - *pnByte = nByte; - return rc; -} - -/* -** Once the sorter has been populated, this function is called to prepare -** for iterating through its contents in sorted order. -*/ -SQLITE_PRIVATE int sqlite3VdbeSorterRewind(sqlite3 *db, const VdbeCursor *pCsr, int *pbEof){ - VdbeSorter *pSorter = pCsr->pSorter; - int rc; /* Return code */ - sqlite3_file *pTemp2 = 0; /* Second temp file to use */ - i64 iWrite2 = 0; /* Write offset for pTemp2 */ - int nIter; /* Number of iterators used */ - int nByte; /* Bytes of space required for aIter/aTree */ - int N = 2; /* Power of 2 >= nIter */ - - assert( pSorter ); - - /* If no data has been written to disk, then do not do so now. Instead, - ** sort the VdbeSorter.pRecord list. The vdbe layer will read data directly - ** from the in-memory list. */ - if( pSorter->nPMA==0 ){ - *pbEof = !pSorter->pRecord; - assert( pSorter->aTree==0 ); - return vdbeSorterSort(pCsr); - } - - /* Write the current in-memory list to a PMA. */ - rc = vdbeSorterListToPMA(db, pCsr); - if( rc!=SQLITE_OK ) return rc; - - /* Allocate space for aIter[] and aTree[]. */ - nIter = pSorter->nPMA; - if( nIter>SORTER_MAX_MERGE_COUNT ) nIter = SORTER_MAX_MERGE_COUNT; - assert( nIter>0 ); - while( NaIter = (VdbeSorterIter *)sqlite3DbMallocZero(db, nByte); - if( !pSorter->aIter ) return SQLITE_NOMEM; - pSorter->aTree = (int *)&pSorter->aIter[N]; - pSorter->nTree = N; - - do { - int iNew; /* Index of new, merged, PMA */ - - for(iNew=0; - rc==SQLITE_OK && iNew*SORTER_MAX_MERGE_COUNTnPMA; - iNew++ - ){ - int rc2; /* Return code from fileWriterFinish() */ - FileWriter writer; /* Object used to write to disk */ - i64 nWrite; /* Number of bytes in new PMA */ - - memset(&writer, 0, sizeof(FileWriter)); - - /* If there are SORTER_MAX_MERGE_COUNT or less PMAs in file pTemp1, - ** initialize an iterator for each of them and break out of the loop. - ** These iterators will be incrementally merged as the VDBE layer calls - ** sqlite3VdbeSorterNext(). - ** - ** Otherwise, if pTemp1 contains more than SORTER_MAX_MERGE_COUNT PMAs, - ** initialize interators for SORTER_MAX_MERGE_COUNT of them. These PMAs - ** are merged into a single PMA that is written to file pTemp2. - */ - rc = vdbeSorterInitMerge(db, pCsr, &nWrite); - assert( rc!=SQLITE_OK || pSorter->aIter[ pSorter->aTree[1] ].pFile ); - if( rc!=SQLITE_OK || pSorter->nPMA<=SORTER_MAX_MERGE_COUNT ){ - break; - } - - /* Open the second temp file, if it is not already open. */ - if( pTemp2==0 ){ - assert( iWrite2==0 ); - rc = vdbeSorterOpenTempFile(db, &pTemp2); - } - - if( rc==SQLITE_OK ){ - int bEof = 0; - fileWriterInit(db, pTemp2, &writer, iWrite2); - fileWriterWriteVarint(&writer, nWrite); - while( rc==SQLITE_OK && bEof==0 ){ - VdbeSorterIter *pIter = &pSorter->aIter[ pSorter->aTree[1] ]; - assert( pIter->pFile ); - - fileWriterWriteVarint(&writer, pIter->nKey); - fileWriterWrite(&writer, pIter->aKey, pIter->nKey); - rc = sqlite3VdbeSorterNext(db, pCsr, &bEof); - } - rc2 = fileWriterFinish(db, &writer, &iWrite2); - if( rc==SQLITE_OK ) rc = rc2; - } - } - - if( pSorter->nPMA<=SORTER_MAX_MERGE_COUNT ){ - break; - }else{ - sqlite3_file *pTmp = pSorter->pTemp1; - pSorter->nPMA = iNew; - pSorter->pTemp1 = pTemp2; - pTemp2 = pTmp; - pSorter->iWriteOff = iWrite2; - pSorter->iReadOff = 0; - iWrite2 = 0; - } - }while( rc==SQLITE_OK ); - - if( pTemp2 ){ - sqlite3OsCloseFree(pTemp2); - } - *pbEof = (pSorter->aIter[pSorter->aTree[1]].pFile==0); - return rc; -} - -/* -** Advance to the next element in the sorter. -*/ -SQLITE_PRIVATE int sqlite3VdbeSorterNext(sqlite3 *db, const VdbeCursor *pCsr, int *pbEof){ - VdbeSorter *pSorter = pCsr->pSorter; - int rc; /* Return code */ - - if( pSorter->aTree ){ - int iPrev = pSorter->aTree[1];/* Index of iterator to advance */ - rc = vdbeSorterIterNext(db, &pSorter->aIter[iPrev]); - if( rc==SQLITE_OK ){ - int i; /* Index of aTree[] to recalculate */ - VdbeSorterIter *pIter1; /* First iterator to compare */ - VdbeSorterIter *pIter2; /* Second iterator to compare */ - u8 *pKey2; /* To pIter2->aKey, or 0 if record cached */ - - /* Find the first two iterators to compare. The one that was just - ** advanced (iPrev) and the one next to it in the array. */ - pIter1 = &pSorter->aIter[(iPrev & 0xFFFE)]; - pIter2 = &pSorter->aIter[(iPrev | 0x0001)]; - pKey2 = pIter2->aKey; - - for(i=(pSorter->nTree+iPrev)/2; i>0; i=i/2){ - /* Compare pIter1 and pIter2. Store the result in variable iRes. */ - int iRes; - if( pIter1->pFile==0 ){ - iRes = +1; - }else if( pIter2->pFile==0 ){ - iRes = -1; - }else{ - vdbeSorterCompare(pCsr, 0, - pIter1->aKey, pIter1->nKey, pKey2, pIter2->nKey, &iRes - ); - } - - /* If pIter1 contained the smaller value, set aTree[i] to its index. - ** Then set pIter2 to the next iterator to compare to pIter1. In this - ** case there is no cache of pIter2 in pSorter->pUnpacked, so set - ** pKey2 to point to the record belonging to pIter2. - ** - ** Alternatively, if pIter2 contains the smaller of the two values, - ** set aTree[i] to its index and update pIter1. If vdbeSorterCompare() - ** was actually called above, then pSorter->pUnpacked now contains - ** a value equivalent to pIter2. So set pKey2 to NULL to prevent - ** vdbeSorterCompare() from decoding pIter2 again. */ - if( iRes<=0 ){ - pSorter->aTree[i] = (int)(pIter1 - pSorter->aIter); - pIter2 = &pSorter->aIter[ pSorter->aTree[i ^ 0x0001] ]; - pKey2 = pIter2->aKey; - }else{ - if( pIter1->pFile ) pKey2 = 0; - pSorter->aTree[i] = (int)(pIter2 - pSorter->aIter); - pIter1 = &pSorter->aIter[ pSorter->aTree[i ^ 0x0001] ]; - } - - } - *pbEof = (pSorter->aIter[pSorter->aTree[1]].pFile==0); - } - }else{ - SorterRecord *pFree = pSorter->pRecord; - pSorter->pRecord = pFree->pNext; - pFree->pNext = 0; - vdbeSorterRecordFree(db, pFree); - *pbEof = !pSorter->pRecord; - rc = SQLITE_OK; - } - return rc; -} - -/* -** Return a pointer to a buffer owned by the sorter that contains the -** current key. -*/ -static void *vdbeSorterRowkey( - const VdbeSorter *pSorter, /* Sorter object */ - int *pnKey /* OUT: Size of current key in bytes */ -){ - void *pKey; - if( pSorter->aTree ){ - VdbeSorterIter *pIter; - pIter = &pSorter->aIter[ pSorter->aTree[1] ]; - *pnKey = pIter->nKey; - pKey = pIter->aKey; - }else{ - *pnKey = pSorter->pRecord->nVal; - pKey = pSorter->pRecord->pVal; - } - return pKey; -} - -/* -** Copy the current sorter key into the memory cell pOut. -*/ -SQLITE_PRIVATE int sqlite3VdbeSorterRowkey(const VdbeCursor *pCsr, Mem *pOut){ - VdbeSorter *pSorter = pCsr->pSorter; - void *pKey; int nKey; /* Sorter key to copy into pOut */ - - pKey = vdbeSorterRowkey(pSorter, &nKey); - if( sqlite3VdbeMemGrow(pOut, nKey, 0) ){ - return SQLITE_NOMEM; - } - pOut->n = nKey; - MemSetTypeFlag(pOut, MEM_Blob); - memcpy(pOut->z, pKey, nKey); - - return SQLITE_OK; -} - -/* -** Compare the key in memory cell pVal with the key that the sorter cursor -** passed as the first argument currently points to. For the purposes of -** the comparison, ignore the rowid field at the end of each record. -** -** If an error occurs, return an SQLite error code (i.e. SQLITE_NOMEM). -** Otherwise, set *pRes to a negative, zero or positive value if the -** key in pVal is smaller than, equal to or larger than the current sorter -** key. -*/ -SQLITE_PRIVATE int sqlite3VdbeSorterCompare( - const VdbeCursor *pCsr, /* Sorter cursor */ - Mem *pVal, /* Value to compare to current sorter key */ - int nIgnore, /* Ignore this many fields at the end */ - int *pRes /* OUT: Result of comparison */ -){ - VdbeSorter *pSorter = pCsr->pSorter; - void *pKey; int nKey; /* Sorter key to compare pVal with */ - - pKey = vdbeSorterRowkey(pSorter, &nKey); - vdbeSorterCompare(pCsr, nIgnore, pVal->z, pVal->n, pKey, nKey, pRes); - return SQLITE_OK; -} - -/************** End of vdbesort.c ********************************************/ -/************** Begin file journal.c *****************************************/ -/* -** 2007 August 22 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** -** This file implements a special kind of sqlite3_file object used -** by SQLite to create journal files if the atomic-write optimization -** is enabled. -** -** The distinctive characteristic of this sqlite3_file is that the -** actual on disk file is created lazily. When the file is created, -** the caller specifies a buffer size for an in-memory buffer to -** be used to service read() and write() requests. The actual file -** on disk is not created or populated until either: -** -** 1) The in-memory representation grows too large for the allocated -** buffer, or -** 2) The sqlite3JournalCreate() function is called. -*/ -#ifdef SQLITE_ENABLE_ATOMIC_WRITE - - -/* -** A JournalFile object is a subclass of sqlite3_file used by -** as an open file handle for journal files. -*/ -struct JournalFile { - sqlite3_io_methods *pMethod; /* I/O methods on journal files */ - int nBuf; /* Size of zBuf[] in bytes */ - char *zBuf; /* Space to buffer journal writes */ - int iSize; /* Amount of zBuf[] currently used */ - int flags; /* xOpen flags */ - sqlite3_vfs *pVfs; /* The "real" underlying VFS */ - sqlite3_file *pReal; /* The "real" underlying file descriptor */ - const char *zJournal; /* Name of the journal file */ -}; -typedef struct JournalFile JournalFile; - -/* -** If it does not already exists, create and populate the on-disk file -** for JournalFile p. -*/ -static int createFile(JournalFile *p){ - int rc = SQLITE_OK; - if( !p->pReal ){ - sqlite3_file *pReal = (sqlite3_file *)&p[1]; - rc = sqlite3OsOpen(p->pVfs, p->zJournal, pReal, p->flags, 0); - if( rc==SQLITE_OK ){ - p->pReal = pReal; - if( p->iSize>0 ){ - assert(p->iSize<=p->nBuf); - rc = sqlite3OsWrite(p->pReal, p->zBuf, p->iSize, 0); - } - if( rc!=SQLITE_OK ){ - /* If an error occurred while writing to the file, close it before - ** returning. This way, SQLite uses the in-memory journal data to - ** roll back changes made to the internal page-cache before this - ** function was called. */ - sqlite3OsClose(pReal); - p->pReal = 0; - } - } - } - return rc; -} - -/* -** Close the file. -*/ -static int jrnlClose(sqlite3_file *pJfd){ - JournalFile *p = (JournalFile *)pJfd; - if( p->pReal ){ - sqlite3OsClose(p->pReal); - } - sqlite3_free(p->zBuf); - return SQLITE_OK; -} - -/* -** Read data from the file. -*/ -static int jrnlRead( - sqlite3_file *pJfd, /* The journal file from which to read */ - void *zBuf, /* Put the results here */ - int iAmt, /* Number of bytes to read */ - sqlite_int64 iOfst /* Begin reading at this offset */ -){ - int rc = SQLITE_OK; - JournalFile *p = (JournalFile *)pJfd; - if( p->pReal ){ - rc = sqlite3OsRead(p->pReal, zBuf, iAmt, iOfst); - }else if( (iAmt+iOfst)>p->iSize ){ - rc = SQLITE_IOERR_SHORT_READ; - }else{ - memcpy(zBuf, &p->zBuf[iOfst], iAmt); - } - return rc; -} - -/* -** Write data to the file. -*/ -static int jrnlWrite( - sqlite3_file *pJfd, /* The journal file into which to write */ - const void *zBuf, /* Take data to be written from here */ - int iAmt, /* Number of bytes to write */ - sqlite_int64 iOfst /* Begin writing at this offset into the file */ -){ - int rc = SQLITE_OK; - JournalFile *p = (JournalFile *)pJfd; - if( !p->pReal && (iOfst+iAmt)>p->nBuf ){ - rc = createFile(p); - } - if( rc==SQLITE_OK ){ - if( p->pReal ){ - rc = sqlite3OsWrite(p->pReal, zBuf, iAmt, iOfst); - }else{ - memcpy(&p->zBuf[iOfst], zBuf, iAmt); - if( p->iSize<(iOfst+iAmt) ){ - p->iSize = (iOfst+iAmt); - } - } - } - return rc; -} - -/* -** Truncate the file. -*/ -static int jrnlTruncate(sqlite3_file *pJfd, sqlite_int64 size){ - int rc = SQLITE_OK; - JournalFile *p = (JournalFile *)pJfd; - if( p->pReal ){ - rc = sqlite3OsTruncate(p->pReal, size); - }else if( sizeiSize ){ - p->iSize = size; - } - return rc; -} - -/* -** Sync the file. -*/ -static int jrnlSync(sqlite3_file *pJfd, int flags){ - int rc; - JournalFile *p = (JournalFile *)pJfd; - if( p->pReal ){ - rc = sqlite3OsSync(p->pReal, flags); - }else{ - rc = SQLITE_OK; - } - return rc; -} - -/* -** Query the size of the file in bytes. -*/ -static int jrnlFileSize(sqlite3_file *pJfd, sqlite_int64 *pSize){ - int rc = SQLITE_OK; - JournalFile *p = (JournalFile *)pJfd; - if( p->pReal ){ - rc = sqlite3OsFileSize(p->pReal, pSize); - }else{ - *pSize = (sqlite_int64) p->iSize; - } - return rc; -} - -/* -** Table of methods for JournalFile sqlite3_file object. -*/ -static struct sqlite3_io_methods JournalFileMethods = { - 1, /* iVersion */ - jrnlClose, /* xClose */ - jrnlRead, /* xRead */ - jrnlWrite, /* xWrite */ - jrnlTruncate, /* xTruncate */ - jrnlSync, /* xSync */ - jrnlFileSize, /* xFileSize */ - 0, /* xLock */ - 0, /* xUnlock */ - 0, /* xCheckReservedLock */ - 0, /* xFileControl */ - 0, /* xSectorSize */ - 0, /* xDeviceCharacteristics */ - 0, /* xShmMap */ - 0, /* xShmLock */ - 0, /* xShmBarrier */ - 0 /* xShmUnmap */ -}; - -/* -** Open a journal file. -*/ -SQLITE_PRIVATE int sqlite3JournalOpen( - sqlite3_vfs *pVfs, /* The VFS to use for actual file I/O */ - const char *zName, /* Name of the journal file */ - sqlite3_file *pJfd, /* Preallocated, blank file handle */ - int flags, /* Opening flags */ - int nBuf /* Bytes buffered before opening the file */ -){ - JournalFile *p = (JournalFile *)pJfd; - memset(p, 0, sqlite3JournalSize(pVfs)); - if( nBuf>0 ){ - p->zBuf = sqlite3MallocZero(nBuf); - if( !p->zBuf ){ - return SQLITE_NOMEM; - } - }else{ - return sqlite3OsOpen(pVfs, zName, pJfd, flags, 0); - } - p->pMethod = &JournalFileMethods; - p->nBuf = nBuf; - p->flags = flags; - p->zJournal = zName; - p->pVfs = pVfs; - return SQLITE_OK; -} - -/* -** If the argument p points to a JournalFile structure, and the underlying -** file has not yet been created, create it now. -*/ -SQLITE_PRIVATE int sqlite3JournalCreate(sqlite3_file *p){ - if( p->pMethods!=&JournalFileMethods ){ - return SQLITE_OK; - } - return createFile((JournalFile *)p); -} - -/* -** The file-handle passed as the only argument is guaranteed to be an open -** file. It may or may not be of class JournalFile. If the file is a -** JournalFile, and the underlying file on disk has not yet been opened, -** return 0. Otherwise, return 1. -*/ -SQLITE_PRIVATE int sqlite3JournalExists(sqlite3_file *p){ - return (p->pMethods!=&JournalFileMethods || ((JournalFile *)p)->pReal!=0); -} - -/* -** Return the number of bytes required to store a JournalFile that uses vfs -** pVfs to create the underlying on-disk files. -*/ -SQLITE_PRIVATE int sqlite3JournalSize(sqlite3_vfs *pVfs){ - return (pVfs->szOsFile+sizeof(JournalFile)); -} -#endif - -/************** End of journal.c *********************************************/ -/************** Begin file memjournal.c **************************************/ -/* -** 2008 October 7 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** -** This file contains code use to implement an in-memory rollback journal. -** The in-memory rollback journal is used to journal transactions for -** ":memory:" databases and when the journal_mode=MEMORY pragma is used. -*/ - -/* Forward references to internal structures */ -typedef struct MemJournal MemJournal; -typedef struct FilePoint FilePoint; -typedef struct FileChunk FileChunk; - -/* Space to hold the rollback journal is allocated in increments of -** this many bytes. -** -** The size chosen is a little less than a power of two. That way, -** the FileChunk object will have a size that almost exactly fills -** a power-of-two allocation. This mimimizes wasted space in power-of-two -** memory allocators. -*/ -#define JOURNAL_CHUNKSIZE ((int)(1024-sizeof(FileChunk*))) - -/* -** The rollback journal is composed of a linked list of these structures. -*/ -struct FileChunk { - FileChunk *pNext; /* Next chunk in the journal */ - u8 zChunk[JOURNAL_CHUNKSIZE]; /* Content of this chunk */ -}; - -/* -** An instance of this object serves as a cursor into the rollback journal. -** The cursor can be either for reading or writing. -*/ -struct FilePoint { - sqlite3_int64 iOffset; /* Offset from the beginning of the file */ - FileChunk *pChunk; /* Specific chunk into which cursor points */ -}; - -/* -** This subclass is a subclass of sqlite3_file. Each open memory-journal -** is an instance of this class. -*/ -struct MemJournal { - sqlite3_io_methods *pMethod; /* Parent class. MUST BE FIRST */ - FileChunk *pFirst; /* Head of in-memory chunk-list */ - FilePoint endpoint; /* Pointer to the end of the file */ - FilePoint readpoint; /* Pointer to the end of the last xRead() */ -}; - -/* -** Read data from the in-memory journal file. This is the implementation -** of the sqlite3_vfs.xRead method. -*/ -static int memjrnlRead( - sqlite3_file *pJfd, /* The journal file from which to read */ - void *zBuf, /* Put the results here */ - int iAmt, /* Number of bytes to read */ - sqlite_int64 iOfst /* Begin reading at this offset */ -){ - MemJournal *p = (MemJournal *)pJfd; - u8 *zOut = zBuf; - int nRead = iAmt; - int iChunkOffset; - FileChunk *pChunk; - - /* SQLite never tries to read past the end of a rollback journal file */ - assert( iOfst+iAmt<=p->endpoint.iOffset ); - - if( p->readpoint.iOffset!=iOfst || iOfst==0 ){ - sqlite3_int64 iOff = 0; - for(pChunk=p->pFirst; - ALWAYS(pChunk) && (iOff+JOURNAL_CHUNKSIZE)<=iOfst; - pChunk=pChunk->pNext - ){ - iOff += JOURNAL_CHUNKSIZE; - } - }else{ - pChunk = p->readpoint.pChunk; - } - - iChunkOffset = (int)(iOfst%JOURNAL_CHUNKSIZE); - do { - int iSpace = JOURNAL_CHUNKSIZE - iChunkOffset; - int nCopy = MIN(nRead, (JOURNAL_CHUNKSIZE - iChunkOffset)); - memcpy(zOut, &pChunk->zChunk[iChunkOffset], nCopy); - zOut += nCopy; - nRead -= iSpace; - iChunkOffset = 0; - } while( nRead>=0 && (pChunk=pChunk->pNext)!=0 && nRead>0 ); - p->readpoint.iOffset = iOfst+iAmt; - p->readpoint.pChunk = pChunk; - - return SQLITE_OK; -} - -/* -** Write data to the file. -*/ -static int memjrnlWrite( - sqlite3_file *pJfd, /* The journal file into which to write */ - const void *zBuf, /* Take data to be written from here */ - int iAmt, /* Number of bytes to write */ - sqlite_int64 iOfst /* Begin writing at this offset into the file */ -){ - MemJournal *p = (MemJournal *)pJfd; - int nWrite = iAmt; - u8 *zWrite = (u8 *)zBuf; - - /* An in-memory journal file should only ever be appended to. Random - ** access writes are not required by sqlite. - */ - assert( iOfst==p->endpoint.iOffset ); - UNUSED_PARAMETER(iOfst); - - while( nWrite>0 ){ - FileChunk *pChunk = p->endpoint.pChunk; - int iChunkOffset = (int)(p->endpoint.iOffset%JOURNAL_CHUNKSIZE); - int iSpace = MIN(nWrite, JOURNAL_CHUNKSIZE - iChunkOffset); - - if( iChunkOffset==0 ){ - /* New chunk is required to extend the file. */ - FileChunk *pNew = sqlite3_malloc(sizeof(FileChunk)); - if( !pNew ){ - return SQLITE_IOERR_NOMEM; - } - pNew->pNext = 0; - if( pChunk ){ - assert( p->pFirst ); - pChunk->pNext = pNew; - }else{ - assert( !p->pFirst ); - p->pFirst = pNew; - } - p->endpoint.pChunk = pNew; - } - - memcpy(&p->endpoint.pChunk->zChunk[iChunkOffset], zWrite, iSpace); - zWrite += iSpace; - nWrite -= iSpace; - p->endpoint.iOffset += iSpace; - } - - return SQLITE_OK; -} - -/* -** Truncate the file. -*/ -static int memjrnlTruncate(sqlite3_file *pJfd, sqlite_int64 size){ - MemJournal *p = (MemJournal *)pJfd; - FileChunk *pChunk; - assert(size==0); - UNUSED_PARAMETER(size); - pChunk = p->pFirst; - while( pChunk ){ - FileChunk *pTmp = pChunk; - pChunk = pChunk->pNext; - sqlite3_free(pTmp); - } - sqlite3MemJournalOpen(pJfd); - return SQLITE_OK; -} - -/* -** Close the file. -*/ -static int memjrnlClose(sqlite3_file *pJfd){ - memjrnlTruncate(pJfd, 0); - return SQLITE_OK; -} - - -/* -** Sync the file. -** -** Syncing an in-memory journal is a no-op. And, in fact, this routine -** is never called in a working implementation. This implementation -** exists purely as a contingency, in case some malfunction in some other -** part of SQLite causes Sync to be called by mistake. -*/ -static int memjrnlSync(sqlite3_file *NotUsed, int NotUsed2){ - UNUSED_PARAMETER2(NotUsed, NotUsed2); - return SQLITE_OK; -} - -/* -** Query the size of the file in bytes. -*/ -static int memjrnlFileSize(sqlite3_file *pJfd, sqlite_int64 *pSize){ - MemJournal *p = (MemJournal *)pJfd; - *pSize = (sqlite_int64) p->endpoint.iOffset; - return SQLITE_OK; -} - -/* -** Table of methods for MemJournal sqlite3_file object. -*/ -static const struct sqlite3_io_methods MemJournalMethods = { - 1, /* iVersion */ - memjrnlClose, /* xClose */ - memjrnlRead, /* xRead */ - memjrnlWrite, /* xWrite */ - memjrnlTruncate, /* xTruncate */ - memjrnlSync, /* xSync */ - memjrnlFileSize, /* xFileSize */ - 0, /* xLock */ - 0, /* xUnlock */ - 0, /* xCheckReservedLock */ - 0, /* xFileControl */ - 0, /* xSectorSize */ - 0, /* xDeviceCharacteristics */ - 0, /* xShmMap */ - 0, /* xShmLock */ - 0, /* xShmBarrier */ - 0, /* xShmUnmap */ - 0, /* xFetch */ - 0 /* xUnfetch */ -}; - -/* -** Open a journal file. -*/ -SQLITE_PRIVATE void sqlite3MemJournalOpen(sqlite3_file *pJfd){ - MemJournal *p = (MemJournal *)pJfd; - assert( EIGHT_BYTE_ALIGNMENT(p) ); - memset(p, 0, sqlite3MemJournalSize()); - p->pMethod = (sqlite3_io_methods*)&MemJournalMethods; -} - -/* -** Return true if the file-handle passed as an argument is -** an in-memory journal -*/ -SQLITE_PRIVATE int sqlite3IsMemJournal(sqlite3_file *pJfd){ - return pJfd->pMethods==&MemJournalMethods; -} - -/* -** Return the number of bytes required to store a MemJournal file descriptor. -*/ -SQLITE_PRIVATE int sqlite3MemJournalSize(void){ - return sizeof(MemJournal); -} - -/************** End of memjournal.c ******************************************/ -/************** Begin file walker.c ******************************************/ -/* -** 2008 August 16 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** This file contains routines used for walking the parser tree for -** an SQL statement. -*/ -/* #include */ -/* #include */ - - -/* -** Walk an expression tree. Invoke the callback once for each node -** of the expression, while decending. (In other words, the callback -** is invoked before visiting children.) -** -** The return value from the callback should be one of the WRC_* -** constants to specify how to proceed with the walk. -** -** WRC_Continue Continue descending down the tree. -** -** WRC_Prune Do not descend into child nodes. But allow -** the walk to continue with sibling nodes. -** -** WRC_Abort Do no more callbacks. Unwind the stack and -** return the top-level walk call. -** -** The return value from this routine is WRC_Abort to abandon the tree walk -** and WRC_Continue to continue. -*/ -SQLITE_PRIVATE int sqlite3WalkExpr(Walker *pWalker, Expr *pExpr){ - int rc; - if( pExpr==0 ) return WRC_Continue; - testcase( ExprHasProperty(pExpr, EP_TokenOnly) ); - testcase( ExprHasProperty(pExpr, EP_Reduced) ); - rc = pWalker->xExprCallback(pWalker, pExpr); - if( rc==WRC_Continue - && !ExprHasProperty(pExpr,EP_TokenOnly) ){ - if( sqlite3WalkExpr(pWalker, pExpr->pLeft) ) return WRC_Abort; - if( sqlite3WalkExpr(pWalker, pExpr->pRight) ) return WRC_Abort; - if( ExprHasProperty(pExpr, EP_xIsSelect) ){ - if( sqlite3WalkSelect(pWalker, pExpr->x.pSelect) ) return WRC_Abort; - }else{ - if( sqlite3WalkExprList(pWalker, pExpr->x.pList) ) return WRC_Abort; - } - } - return rc & WRC_Abort; -} - -/* -** Call sqlite3WalkExpr() for every expression in list p or until -** an abort request is seen. -*/ -SQLITE_PRIVATE int sqlite3WalkExprList(Walker *pWalker, ExprList *p){ - int i; - struct ExprList_item *pItem; - if( p ){ - for(i=p->nExpr, pItem=p->a; i>0; i--, pItem++){ - if( sqlite3WalkExpr(pWalker, pItem->pExpr) ) return WRC_Abort; - } - } - return WRC_Continue; -} - -/* -** Walk all expressions associated with SELECT statement p. Do -** not invoke the SELECT callback on p, but do (of course) invoke -** any expr callbacks and SELECT callbacks that come from subqueries. -** Return WRC_Abort or WRC_Continue. -*/ -SQLITE_PRIVATE int sqlite3WalkSelectExpr(Walker *pWalker, Select *p){ - if( sqlite3WalkExprList(pWalker, p->pEList) ) return WRC_Abort; - if( sqlite3WalkExpr(pWalker, p->pWhere) ) return WRC_Abort; - if( sqlite3WalkExprList(pWalker, p->pGroupBy) ) return WRC_Abort; - if( sqlite3WalkExpr(pWalker, p->pHaving) ) return WRC_Abort; - if( sqlite3WalkExprList(pWalker, p->pOrderBy) ) return WRC_Abort; - if( sqlite3WalkExpr(pWalker, p->pLimit) ) return WRC_Abort; - if( sqlite3WalkExpr(pWalker, p->pOffset) ) return WRC_Abort; - return WRC_Continue; -} - -/* -** Walk the parse trees associated with all subqueries in the -** FROM clause of SELECT statement p. Do not invoke the select -** callback on p, but do invoke it on each FROM clause subquery -** and on any subqueries further down in the tree. Return -** WRC_Abort or WRC_Continue; -*/ -SQLITE_PRIVATE int sqlite3WalkSelectFrom(Walker *pWalker, Select *p){ - SrcList *pSrc; - int i; - struct SrcList_item *pItem; - - pSrc = p->pSrc; - if( ALWAYS(pSrc) ){ - for(i=pSrc->nSrc, pItem=pSrc->a; i>0; i--, pItem++){ - if( sqlite3WalkSelect(pWalker, pItem->pSelect) ){ - return WRC_Abort; - } - } - } - return WRC_Continue; -} - -/* -** Call sqlite3WalkExpr() for every expression in Select statement p. -** Invoke sqlite3WalkSelect() for subqueries in the FROM clause and -** on the compound select chain, p->pPrior. -** -** If it is not NULL, the xSelectCallback() callback is invoked before -** the walk of the expressions and FROM clause. The xSelectCallback2() -** method, if it is not NULL, is invoked following the walk of the -** expressions and FROM clause. -** -** Return WRC_Continue under normal conditions. Return WRC_Abort if -** there is an abort request. -** -** If the Walker does not have an xSelectCallback() then this routine -** is a no-op returning WRC_Continue. -*/ -SQLITE_PRIVATE int sqlite3WalkSelect(Walker *pWalker, Select *p){ - int rc; - if( p==0 || (pWalker->xSelectCallback==0 && pWalker->xSelectCallback2==0) ){ - return WRC_Continue; - } - rc = WRC_Continue; - pWalker->walkerDepth++; - while( p ){ - if( pWalker->xSelectCallback ){ - rc = pWalker->xSelectCallback(pWalker, p); - if( rc ) break; - } - if( sqlite3WalkSelectExpr(pWalker, p) - || sqlite3WalkSelectFrom(pWalker, p) - ){ - pWalker->walkerDepth--; - return WRC_Abort; - } - if( pWalker->xSelectCallback2 ){ - pWalker->xSelectCallback2(pWalker, p); - } - p = p->pPrior; - } - pWalker->walkerDepth--; - return rc & WRC_Abort; -} - -/************** End of walker.c **********************************************/ -/************** Begin file resolve.c *****************************************/ -/* -** 2008 August 18 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** -** This file contains routines used for walking the parser tree and -** resolve all identifiers by associating them with a particular -** table and column. -*/ -/* #include */ -/* #include */ - -/* -** Walk the expression tree pExpr and increase the aggregate function -** depth (the Expr.op2 field) by N on every TK_AGG_FUNCTION node. -** This needs to occur when copying a TK_AGG_FUNCTION node from an -** outer query into an inner subquery. -** -** incrAggFunctionDepth(pExpr,n) is the main routine. incrAggDepth(..) -** is a helper function - a callback for the tree walker. -*/ -static int incrAggDepth(Walker *pWalker, Expr *pExpr){ - if( pExpr->op==TK_AGG_FUNCTION ) pExpr->op2 += pWalker->u.i; - return WRC_Continue; -} -static void incrAggFunctionDepth(Expr *pExpr, int N){ - if( N>0 ){ - Walker w; - memset(&w, 0, sizeof(w)); - w.xExprCallback = incrAggDepth; - w.u.i = N; - sqlite3WalkExpr(&w, pExpr); - } -} - -/* -** Turn the pExpr expression into an alias for the iCol-th column of the -** result set in pEList. -** -** If the result set column is a simple column reference, then this routine -** makes an exact copy. But for any other kind of expression, this -** routine make a copy of the result set column as the argument to the -** TK_AS operator. The TK_AS operator causes the expression to be -** evaluated just once and then reused for each alias. -** -** The reason for suppressing the TK_AS term when the expression is a simple -** column reference is so that the column reference will be recognized as -** usable by indices within the WHERE clause processing logic. -** -** The TK_AS operator is inhibited if zType[0]=='G'. This means -** that in a GROUP BY clause, the expression is evaluated twice. Hence: -** -** SELECT random()%5 AS x, count(*) FROM tab GROUP BY x -** -** Is equivalent to: -** -** SELECT random()%5 AS x, count(*) FROM tab GROUP BY random()%5 -** -** The result of random()%5 in the GROUP BY clause is probably different -** from the result in the result-set. On the other hand Standard SQL does -** not allow the GROUP BY clause to contain references to result-set columns. -** So this should never come up in well-formed queries. -** -** If the reference is followed by a COLLATE operator, then make sure -** the COLLATE operator is preserved. For example: -** -** SELECT a+b, c+d FROM t1 ORDER BY 1 COLLATE nocase; -** -** Should be transformed into: -** -** SELECT a+b, c+d FROM t1 ORDER BY (a+b) COLLATE nocase; -** -** The nSubquery parameter specifies how many levels of subquery the -** alias is removed from the original expression. The usually value is -** zero but it might be more if the alias is contained within a subquery -** of the original expression. The Expr.op2 field of TK_AGG_FUNCTION -** structures must be increased by the nSubquery amount. -*/ -static void resolveAlias( - Parse *pParse, /* Parsing context */ - ExprList *pEList, /* A result set */ - int iCol, /* A column in the result set. 0..pEList->nExpr-1 */ - Expr *pExpr, /* Transform this into an alias to the result set */ - const char *zType, /* "GROUP" or "ORDER" or "" */ - int nSubquery /* Number of subqueries that the label is moving */ -){ - Expr *pOrig; /* The iCol-th column of the result set */ - Expr *pDup; /* Copy of pOrig */ - sqlite3 *db; /* The database connection */ - - assert( iCol>=0 && iColnExpr ); - pOrig = pEList->a[iCol].pExpr; - assert( pOrig!=0 ); - assert( pOrig->flags & EP_Resolved ); - db = pParse->db; - pDup = sqlite3ExprDup(db, pOrig, 0); - if( pDup==0 ) return; - if( pOrig->op!=TK_COLUMN && zType[0]!='G' ){ - incrAggFunctionDepth(pDup, nSubquery); - pDup = sqlite3PExpr(pParse, TK_AS, pDup, 0, 0); - if( pDup==0 ) return; - ExprSetProperty(pDup, EP_Skip); - if( pEList->a[iCol].u.x.iAlias==0 ){ - pEList->a[iCol].u.x.iAlias = (u16)(++pParse->nAlias); - } - pDup->iTable = pEList->a[iCol].u.x.iAlias; - } - if( pExpr->op==TK_COLLATE ){ - pDup = sqlite3ExprAddCollateString(pParse, pDup, pExpr->u.zToken); - } - - /* Before calling sqlite3ExprDelete(), set the EP_Static flag. This - ** prevents ExprDelete() from deleting the Expr structure itself, - ** allowing it to be repopulated by the memcpy() on the following line. - ** The pExpr->u.zToken might point into memory that will be freed by the - ** sqlite3DbFree(db, pDup) on the last line of this block, so be sure to - ** make a copy of the token before doing the sqlite3DbFree(). - */ - ExprSetProperty(pExpr, EP_Static); - sqlite3ExprDelete(db, pExpr); - memcpy(pExpr, pDup, sizeof(*pExpr)); - if( !ExprHasProperty(pExpr, EP_IntValue) && pExpr->u.zToken!=0 ){ - assert( (pExpr->flags & (EP_Reduced|EP_TokenOnly))==0 ); - pExpr->u.zToken = sqlite3DbStrDup(db, pExpr->u.zToken); - pExpr->flags |= EP_MemToken; - } - sqlite3DbFree(db, pDup); -} - - -/* -** Return TRUE if the name zCol occurs anywhere in the USING clause. -** -** Return FALSE if the USING clause is NULL or if it does not contain -** zCol. -*/ -static int nameInUsingClause(IdList *pUsing, const char *zCol){ - if( pUsing ){ - int k; - for(k=0; knId; k++){ - if( sqlite3StrICmp(pUsing->a[k].zName, zCol)==0 ) return 1; - } - } - return 0; -} - -/* -** Subqueries stores the original database, table and column names for their -** result sets in ExprList.a[].zSpan, in the form "DATABASE.TABLE.COLUMN". -** Check to see if the zSpan given to this routine matches the zDb, zTab, -** and zCol. If any of zDb, zTab, and zCol are NULL then those fields will -** match anything. -*/ -SQLITE_PRIVATE int sqlite3MatchSpanName( - const char *zSpan, - const char *zCol, - const char *zTab, - const char *zDb -){ - int n; - for(n=0; ALWAYS(zSpan[n]) && zSpan[n]!='.'; n++){} - if( zDb && (sqlite3StrNICmp(zSpan, zDb, n)!=0 || zDb[n]!=0) ){ - return 0; - } - zSpan += n+1; - for(n=0; ALWAYS(zSpan[n]) && zSpan[n]!='.'; n++){} - if( zTab && (sqlite3StrNICmp(zSpan, zTab, n)!=0 || zTab[n]!=0) ){ - return 0; - } - zSpan += n+1; - if( zCol && sqlite3StrICmp(zSpan, zCol)!=0 ){ - return 0; - } - return 1; -} - -/* -** Given the name of a column of the form X.Y.Z or Y.Z or just Z, look up -** that name in the set of source tables in pSrcList and make the pExpr -** expression node refer back to that source column. The following changes -** are made to pExpr: -** -** pExpr->iDb Set the index in db->aDb[] of the database X -** (even if X is implied). -** pExpr->iTable Set to the cursor number for the table obtained -** from pSrcList. -** pExpr->pTab Points to the Table structure of X.Y (even if -** X and/or Y are implied.) -** pExpr->iColumn Set to the column number within the table. -** pExpr->op Set to TK_COLUMN. -** pExpr->pLeft Any expression this points to is deleted -** pExpr->pRight Any expression this points to is deleted. -** -** The zDb variable is the name of the database (the "X"). This value may be -** NULL meaning that name is of the form Y.Z or Z. Any available database -** can be used. The zTable variable is the name of the table (the "Y"). This -** value can be NULL if zDb is also NULL. If zTable is NULL it -** means that the form of the name is Z and that columns from any table -** can be used. -** -** If the name cannot be resolved unambiguously, leave an error message -** in pParse and return WRC_Abort. Return WRC_Prune on success. -*/ -static int lookupName( - Parse *pParse, /* The parsing context */ - const char *zDb, /* Name of the database containing table, or NULL */ - const char *zTab, /* Name of table containing column, or NULL */ - const char *zCol, /* Name of the column. */ - NameContext *pNC, /* The name context used to resolve the name */ - Expr *pExpr /* Make this EXPR node point to the selected column */ -){ - int i, j; /* Loop counters */ - int cnt = 0; /* Number of matching column names */ - int cntTab = 0; /* Number of matching table names */ - int nSubquery = 0; /* How many levels of subquery */ - sqlite3 *db = pParse->db; /* The database connection */ - struct SrcList_item *pItem; /* Use for looping over pSrcList items */ - struct SrcList_item *pMatch = 0; /* The matching pSrcList item */ - NameContext *pTopNC = pNC; /* First namecontext in the list */ - Schema *pSchema = 0; /* Schema of the expression */ - int isTrigger = 0; /* True if resolved to a trigger column */ - Table *pTab = 0; /* Table hold the row */ - Column *pCol; /* A column of pTab */ - - assert( pNC ); /* the name context cannot be NULL. */ - assert( zCol ); /* The Z in X.Y.Z cannot be NULL */ - assert( !ExprHasProperty(pExpr, EP_TokenOnly|EP_Reduced) ); - - /* Initialize the node to no-match */ - pExpr->iTable = -1; - pExpr->pTab = 0; - ExprSetVVAProperty(pExpr, EP_NoReduce); - - /* Translate the schema name in zDb into a pointer to the corresponding - ** schema. If not found, pSchema will remain NULL and nothing will match - ** resulting in an appropriate error message toward the end of this routine - */ - if( zDb ){ - testcase( pNC->ncFlags & NC_PartIdx ); - testcase( pNC->ncFlags & NC_IsCheck ); - if( (pNC->ncFlags & (NC_PartIdx|NC_IsCheck))!=0 ){ - /* Silently ignore database qualifiers inside CHECK constraints and partial - ** indices. Do not raise errors because that might break legacy and - ** because it does not hurt anything to just ignore the database name. */ - zDb = 0; - }else{ - for(i=0; inDb; i++){ - assert( db->aDb[i].zName ); - if( sqlite3StrICmp(db->aDb[i].zName,zDb)==0 ){ - pSchema = db->aDb[i].pSchema; - break; - } - } - } - } - - /* Start at the inner-most context and move outward until a match is found */ - while( pNC && cnt==0 ){ - ExprList *pEList; - SrcList *pSrcList = pNC->pSrcList; - - if( pSrcList ){ - for(i=0, pItem=pSrcList->a; inSrc; i++, pItem++){ - pTab = pItem->pTab; - assert( pTab!=0 && pTab->zName!=0 ); - assert( pTab->nCol>0 ); - if( pItem->pSelect && (pItem->pSelect->selFlags & SF_NestedFrom)!=0 ){ - int hit = 0; - pEList = pItem->pSelect->pEList; - for(j=0; jnExpr; j++){ - if( sqlite3MatchSpanName(pEList->a[j].zSpan, zCol, zTab, zDb) ){ - cnt++; - cntTab = 2; - pMatch = pItem; - pExpr->iColumn = j; - hit = 1; - } - } - if( hit || zTab==0 ) continue; - } - if( zDb && pTab->pSchema!=pSchema ){ - continue; - } - if( zTab ){ - const char *zTabName = pItem->zAlias ? pItem->zAlias : pTab->zName; - assert( zTabName!=0 ); - if( sqlite3StrICmp(zTabName, zTab)!=0 ){ - continue; - } - } - if( 0==(cntTab++) ){ - pMatch = pItem; - } - for(j=0, pCol=pTab->aCol; jnCol; j++, pCol++){ - if( sqlite3StrICmp(pCol->zName, zCol)==0 ){ - /* If there has been exactly one prior match and this match - ** is for the right-hand table of a NATURAL JOIN or is in a - ** USING clause, then skip this match. - */ - if( cnt==1 ){ - if( pItem->jointype & JT_NATURAL ) continue; - if( nameInUsingClause(pItem->pUsing, zCol) ) continue; - } - cnt++; - pMatch = pItem; - /* Substitute the rowid (column -1) for the INTEGER PRIMARY KEY */ - pExpr->iColumn = j==pTab->iPKey ? -1 : (i16)j; - break; - } - } - } - if( pMatch ){ - pExpr->iTable = pMatch->iCursor; - pExpr->pTab = pMatch->pTab; - pSchema = pExpr->pTab->pSchema; - } - } /* if( pSrcList ) */ - -#ifndef SQLITE_OMIT_TRIGGER - /* If we have not already resolved the name, then maybe - ** it is a new.* or old.* trigger argument reference - */ - if( zDb==0 && zTab!=0 && cntTab==0 && pParse->pTriggerTab!=0 ){ - int op = pParse->eTriggerOp; - assert( op==TK_DELETE || op==TK_UPDATE || op==TK_INSERT ); - if( op!=TK_DELETE && sqlite3StrICmp("new",zTab) == 0 ){ - pExpr->iTable = 1; - pTab = pParse->pTriggerTab; - }else if( op!=TK_INSERT && sqlite3StrICmp("old",zTab)==0 ){ - pExpr->iTable = 0; - pTab = pParse->pTriggerTab; - }else{ - pTab = 0; - } - - if( pTab ){ - int iCol; - pSchema = pTab->pSchema; - cntTab++; - for(iCol=0, pCol=pTab->aCol; iColnCol; iCol++, pCol++){ - if( sqlite3StrICmp(pCol->zName, zCol)==0 ){ - if( iCol==pTab->iPKey ){ - iCol = -1; - } - break; - } - } - if( iCol>=pTab->nCol && sqlite3IsRowid(zCol) && HasRowid(pTab) ){ - /* IMP: R-24309-18625 */ - /* IMP: R-44911-55124 */ - iCol = -1; - } - if( iColnCol ){ - cnt++; - if( iCol<0 ){ - pExpr->affinity = SQLITE_AFF_INTEGER; - }else if( pExpr->iTable==0 ){ - testcase( iCol==31 ); - testcase( iCol==32 ); - pParse->oldmask |= (iCol>=32 ? 0xffffffff : (((u32)1)<newmask |= (iCol>=32 ? 0xffffffff : (((u32)1)<iColumn = (i16)iCol; - pExpr->pTab = pTab; - isTrigger = 1; - } - } - } -#endif /* !defined(SQLITE_OMIT_TRIGGER) */ - - /* - ** Perhaps the name is a reference to the ROWID - */ - if( cnt==0 && cntTab==1 && pMatch && sqlite3IsRowid(zCol) - && HasRowid(pMatch->pTab) ){ - cnt = 1; - pExpr->iColumn = -1; /* IMP: R-44911-55124 */ - pExpr->affinity = SQLITE_AFF_INTEGER; - } - - /* - ** If the input is of the form Z (not Y.Z or X.Y.Z) then the name Z - ** might refer to an result-set alias. This happens, for example, when - ** we are resolving names in the WHERE clause of the following command: - ** - ** SELECT a+b AS x FROM table WHERE x<10; - ** - ** In cases like this, replace pExpr with a copy of the expression that - ** forms the result set entry ("a+b" in the example) and return immediately. - ** Note that the expression in the result set should have already been - ** resolved by the time the WHERE clause is resolved. - ** - ** The ability to use an output result-set column in the WHERE, GROUP BY, - ** or HAVING clauses, or as part of a larger expression in the ORDRE BY - ** clause is not standard SQL. This is a (goofy) SQLite extension, that - ** is supported for backwards compatibility only. TO DO: Issue a warning - ** on sqlite3_log() whenever the capability is used. - */ - if( (pEList = pNC->pEList)!=0 - && zTab==0 - && cnt==0 - ){ - for(j=0; jnExpr; j++){ - char *zAs = pEList->a[j].zName; - if( zAs!=0 && sqlite3StrICmp(zAs, zCol)==0 ){ - Expr *pOrig; - assert( pExpr->pLeft==0 && pExpr->pRight==0 ); - assert( pExpr->x.pList==0 ); - assert( pExpr->x.pSelect==0 ); - pOrig = pEList->a[j].pExpr; - if( (pNC->ncFlags&NC_AllowAgg)==0 && ExprHasProperty(pOrig, EP_Agg) ){ - sqlite3ErrorMsg(pParse, "misuse of aliased aggregate %s", zAs); - return WRC_Abort; - } - resolveAlias(pParse, pEList, j, pExpr, "", nSubquery); - cnt = 1; - pMatch = 0; - assert( zTab==0 && zDb==0 ); - goto lookupname_end; - } - } - } - - /* Advance to the next name context. The loop will exit when either - ** we have a match (cnt>0) or when we run out of name contexts. - */ - if( cnt==0 ){ - pNC = pNC->pNext; - nSubquery++; - } - } - - /* - ** If X and Y are NULL (in other words if only the column name Z is - ** supplied) and the value of Z is enclosed in double-quotes, then - ** Z is a string literal if it doesn't match any column names. In that - ** case, we need to return right away and not make any changes to - ** pExpr. - ** - ** Because no reference was made to outer contexts, the pNC->nRef - ** fields are not changed in any context. - */ - if( cnt==0 && zTab==0 && ExprHasProperty(pExpr,EP_DblQuoted) ){ - pExpr->op = TK_STRING; - pExpr->pTab = 0; - return WRC_Prune; - } - - /* - ** cnt==0 means there was not match. cnt>1 means there were two or - ** more matches. Either way, we have an error. - */ - if( cnt!=1 ){ - const char *zErr; - zErr = cnt==0 ? "no such column" : "ambiguous column name"; - if( zDb ){ - sqlite3ErrorMsg(pParse, "%s: %s.%s.%s", zErr, zDb, zTab, zCol); - }else if( zTab ){ - sqlite3ErrorMsg(pParse, "%s: %s.%s", zErr, zTab, zCol); - }else{ - sqlite3ErrorMsg(pParse, "%s: %s", zErr, zCol); - } - pParse->checkSchema = 1; - pTopNC->nErr++; - } - - /* If a column from a table in pSrcList is referenced, then record - ** this fact in the pSrcList.a[].colUsed bitmask. Column 0 causes - ** bit 0 to be set. Column 1 sets bit 1. And so forth. If the - ** column number is greater than the number of bits in the bitmask - ** then set the high-order bit of the bitmask. - */ - if( pExpr->iColumn>=0 && pMatch!=0 ){ - int n = pExpr->iColumn; - testcase( n==BMS-1 ); - if( n>=BMS ){ - n = BMS-1; - } - assert( pMatch->iCursor==pExpr->iTable ); - pMatch->colUsed |= ((Bitmask)1)<pLeft); - pExpr->pLeft = 0; - sqlite3ExprDelete(db, pExpr->pRight); - pExpr->pRight = 0; - pExpr->op = (isTrigger ? TK_TRIGGER : TK_COLUMN); -lookupname_end: - if( cnt==1 ){ - assert( pNC!=0 ); - if( pExpr->op!=TK_AS ){ - sqlite3AuthRead(pParse, pExpr, pSchema, pNC->pSrcList); - } - /* Increment the nRef value on all name contexts from TopNC up to - ** the point where the name matched. */ - for(;;){ - assert( pTopNC!=0 ); - pTopNC->nRef++; - if( pTopNC==pNC ) break; - pTopNC = pTopNC->pNext; - } - return WRC_Prune; - } else { - return WRC_Abort; - } -} - -/* -** Allocate and return a pointer to an expression to load the column iCol -** from datasource iSrc in SrcList pSrc. -*/ -SQLITE_PRIVATE Expr *sqlite3CreateColumnExpr(sqlite3 *db, SrcList *pSrc, int iSrc, int iCol){ - Expr *p = sqlite3ExprAlloc(db, TK_COLUMN, 0, 0); - if( p ){ - struct SrcList_item *pItem = &pSrc->a[iSrc]; - p->pTab = pItem->pTab; - p->iTable = pItem->iCursor; - if( p->pTab->iPKey==iCol ){ - p->iColumn = -1; - }else{ - p->iColumn = (ynVar)iCol; - testcase( iCol==BMS ); - testcase( iCol==BMS-1 ); - pItem->colUsed |= ((Bitmask)1)<<(iCol>=BMS ? BMS-1 : iCol); - } - ExprSetProperty(p, EP_Resolved); - } - return p; -} - -/* -** Report an error that an expression is not valid for a partial index WHERE -** clause. -*/ -static void notValidPartIdxWhere( - Parse *pParse, /* Leave error message here */ - NameContext *pNC, /* The name context */ - const char *zMsg /* Type of error */ -){ - if( (pNC->ncFlags & NC_PartIdx)!=0 ){ - sqlite3ErrorMsg(pParse, "%s prohibited in partial index WHERE clauses", - zMsg); - } -} - -#ifndef SQLITE_OMIT_CHECK -/* -** Report an error that an expression is not valid for a CHECK constraint. -*/ -static void notValidCheckConstraint( - Parse *pParse, /* Leave error message here */ - NameContext *pNC, /* The name context */ - const char *zMsg /* Type of error */ -){ - if( (pNC->ncFlags & NC_IsCheck)!=0 ){ - sqlite3ErrorMsg(pParse,"%s prohibited in CHECK constraints", zMsg); - } -} -#else -# define notValidCheckConstraint(P,N,M) -#endif - -/* -** Expression p should encode a floating point value between 1.0 and 0.0. -** Return 1024 times this value. Or return -1 if p is not a floating point -** value between 1.0 and 0.0. -*/ -static int exprProbability(Expr *p){ - double r = -1.0; - if( p->op!=TK_FLOAT ) return -1; - sqlite3AtoF(p->u.zToken, &r, sqlite3Strlen30(p->u.zToken), SQLITE_UTF8); - assert( r>=0.0 ); - if( r>1.0 ) return -1; - return (int)(r*1000.0); -} - -/* -** This routine is callback for sqlite3WalkExpr(). -** -** Resolve symbolic names into TK_COLUMN operators for the current -** node in the expression tree. Return 0 to continue the search down -** the tree or 2 to abort the tree walk. -** -** This routine also does error checking and name resolution for -** function names. The operator for aggregate functions is changed -** to TK_AGG_FUNCTION. -*/ -static int resolveExprStep(Walker *pWalker, Expr *pExpr){ - NameContext *pNC; - Parse *pParse; - - pNC = pWalker->u.pNC; - assert( pNC!=0 ); - pParse = pNC->pParse; - assert( pParse==pWalker->pParse ); - - if( ExprHasProperty(pExpr, EP_Resolved) ) return WRC_Prune; - ExprSetProperty(pExpr, EP_Resolved); -#ifndef NDEBUG - if( pNC->pSrcList && pNC->pSrcList->nAlloc>0 ){ - SrcList *pSrcList = pNC->pSrcList; - int i; - for(i=0; ipSrcList->nSrc; i++){ - assert( pSrcList->a[i].iCursor>=0 && pSrcList->a[i].iCursornTab); - } - } -#endif - switch( pExpr->op ){ - -#if defined(SQLITE_ENABLE_UPDATE_DELETE_LIMIT) && !defined(SQLITE_OMIT_SUBQUERY) - /* The special operator TK_ROW means use the rowid for the first - ** column in the FROM clause. This is used by the LIMIT and ORDER BY - ** clause processing on UPDATE and DELETE statements. - */ - case TK_ROW: { - SrcList *pSrcList = pNC->pSrcList; - struct SrcList_item *pItem; - assert( pSrcList && pSrcList->nSrc==1 ); - pItem = pSrcList->a; - pExpr->op = TK_COLUMN; - pExpr->pTab = pItem->pTab; - pExpr->iTable = pItem->iCursor; - pExpr->iColumn = -1; - pExpr->affinity = SQLITE_AFF_INTEGER; - break; - } -#endif /* defined(SQLITE_ENABLE_UPDATE_DELETE_LIMIT) && !defined(SQLITE_OMIT_SUBQUERY) */ - - /* A lone identifier is the name of a column. - */ - case TK_ID: { - return lookupName(pParse, 0, 0, pExpr->u.zToken, pNC, pExpr); - } - - /* A table name and column name: ID.ID - ** Or a database, table and column: ID.ID.ID - */ - case TK_DOT: { - const char *zColumn; - const char *zTable; - const char *zDb; - Expr *pRight; - - /* if( pSrcList==0 ) break; */ - pRight = pExpr->pRight; - if( pRight->op==TK_ID ){ - zDb = 0; - zTable = pExpr->pLeft->u.zToken; - zColumn = pRight->u.zToken; - }else{ - assert( pRight->op==TK_DOT ); - zDb = pExpr->pLeft->u.zToken; - zTable = pRight->pLeft->u.zToken; - zColumn = pRight->pRight->u.zToken; - } - return lookupName(pParse, zDb, zTable, zColumn, pNC, pExpr); - } - - /* Resolve function names - */ - case TK_FUNCTION: { - ExprList *pList = pExpr->x.pList; /* The argument list */ - int n = pList ? pList->nExpr : 0; /* Number of arguments */ - int no_such_func = 0; /* True if no such function exists */ - int wrong_num_args = 0; /* True if wrong number of arguments */ - int is_agg = 0; /* True if is an aggregate function */ - int auth; /* Authorization to use the function */ - int nId; /* Number of characters in function name */ - const char *zId; /* The function name. */ - FuncDef *pDef; /* Information about the function */ - u8 enc = ENC(pParse->db); /* The database encoding */ - - assert( !ExprHasProperty(pExpr, EP_xIsSelect) ); - notValidPartIdxWhere(pParse, pNC, "functions"); - zId = pExpr->u.zToken; - nId = sqlite3Strlen30(zId); - pDef = sqlite3FindFunction(pParse->db, zId, nId, n, enc, 0); - if( pDef==0 ){ - pDef = sqlite3FindFunction(pParse->db, zId, nId, -2, enc, 0); - if( pDef==0 ){ - no_such_func = 1; - }else{ - wrong_num_args = 1; - } - }else{ - is_agg = pDef->xFunc==0; - if( pDef->funcFlags & SQLITE_FUNC_UNLIKELY ){ - ExprSetProperty(pExpr, EP_Unlikely|EP_Skip); - if( n==2 ){ - pExpr->iTable = exprProbability(pList->a[1].pExpr); - if( pExpr->iTable<0 ){ - sqlite3ErrorMsg(pParse, "second argument to likelihood() must be a " - "constant between 0.0 and 1.0"); - pNC->nErr++; - } - }else{ - /* EVIDENCE-OF: R-61304-29449 The unlikely(X) function is equivalent to - ** likelihood(X, 0.0625). - ** EVIDENCE-OF: R-01283-11636 The unlikely(X) function is short-hand for - ** likelihood(X,0.0625). */ - pExpr->iTable = 62; /* TUNING: Default 2nd arg to unlikely() is 0.0625 */ - } - } - } -#ifndef SQLITE_OMIT_AUTHORIZATION - if( pDef ){ - auth = sqlite3AuthCheck(pParse, SQLITE_FUNCTION, 0, pDef->zName, 0); - if( auth!=SQLITE_OK ){ - if( auth==SQLITE_DENY ){ - sqlite3ErrorMsg(pParse, "not authorized to use function: %s", - pDef->zName); - pNC->nErr++; - } - pExpr->op = TK_NULL; - return WRC_Prune; - } - if( pDef->funcFlags & SQLITE_FUNC_CONSTANT ) ExprSetProperty(pExpr,EP_Constant); - } -#endif - if( is_agg && (pNC->ncFlags & NC_AllowAgg)==0 ){ - sqlite3ErrorMsg(pParse, "misuse of aggregate function %.*s()", nId,zId); - pNC->nErr++; - is_agg = 0; - }else if( no_such_func && pParse->db->init.busy==0 ){ - sqlite3ErrorMsg(pParse, "no such function: %.*s", nId, zId); - pNC->nErr++; - }else if( wrong_num_args ){ - sqlite3ErrorMsg(pParse,"wrong number of arguments to function %.*s()", - nId, zId); - pNC->nErr++; - } - if( is_agg ) pNC->ncFlags &= ~NC_AllowAgg; - sqlite3WalkExprList(pWalker, pList); - if( is_agg ){ - NameContext *pNC2 = pNC; - pExpr->op = TK_AGG_FUNCTION; - pExpr->op2 = 0; - while( pNC2 && !sqlite3FunctionUsesThisSrc(pExpr, pNC2->pSrcList) ){ - pExpr->op2++; - pNC2 = pNC2->pNext; - } - if( pNC2 ) pNC2->ncFlags |= NC_HasAgg; - pNC->ncFlags |= NC_AllowAgg; - } - /* FIX ME: Compute pExpr->affinity based on the expected return - ** type of the function - */ - return WRC_Prune; - } -#ifndef SQLITE_OMIT_SUBQUERY - case TK_SELECT: - case TK_EXISTS: testcase( pExpr->op==TK_EXISTS ); -#endif - case TK_IN: { - testcase( pExpr->op==TK_IN ); - if( ExprHasProperty(pExpr, EP_xIsSelect) ){ - int nRef = pNC->nRef; - notValidCheckConstraint(pParse, pNC, "subqueries"); - notValidPartIdxWhere(pParse, pNC, "subqueries"); - sqlite3WalkSelect(pWalker, pExpr->x.pSelect); - assert( pNC->nRef>=nRef ); - if( nRef!=pNC->nRef ){ - ExprSetProperty(pExpr, EP_VarSelect); - } - } - break; - } - case TK_VARIABLE: { - notValidCheckConstraint(pParse, pNC, "parameters"); - notValidPartIdxWhere(pParse, pNC, "parameters"); - break; - } - } - return (pParse->nErr || pParse->db->mallocFailed) ? WRC_Abort : WRC_Continue; -} - -/* -** pEList is a list of expressions which are really the result set of the -** a SELECT statement. pE is a term in an ORDER BY or GROUP BY clause. -** This routine checks to see if pE is a simple identifier which corresponds -** to the AS-name of one of the terms of the expression list. If it is, -** this routine return an integer between 1 and N where N is the number of -** elements in pEList, corresponding to the matching entry. If there is -** no match, or if pE is not a simple identifier, then this routine -** return 0. -** -** pEList has been resolved. pE has not. -*/ -static int resolveAsName( - Parse *pParse, /* Parsing context for error messages */ - ExprList *pEList, /* List of expressions to scan */ - Expr *pE /* Expression we are trying to match */ -){ - int i; /* Loop counter */ - - UNUSED_PARAMETER(pParse); - - if( pE->op==TK_ID ){ - char *zCol = pE->u.zToken; - for(i=0; inExpr; i++){ - char *zAs = pEList->a[i].zName; - if( zAs!=0 && sqlite3StrICmp(zAs, zCol)==0 ){ - return i+1; - } - } - } - return 0; -} - -/* -** pE is a pointer to an expression which is a single term in the -** ORDER BY of a compound SELECT. The expression has not been -** name resolved. -** -** At the point this routine is called, we already know that the -** ORDER BY term is not an integer index into the result set. That -** case is handled by the calling routine. -** -** Attempt to match pE against result set columns in the left-most -** SELECT statement. Return the index i of the matching column, -** as an indication to the caller that it should sort by the i-th column. -** The left-most column is 1. In other words, the value returned is the -** same integer value that would be used in the SQL statement to indicate -** the column. -** -** If there is no match, return 0. Return -1 if an error occurs. -*/ -static int resolveOrderByTermToExprList( - Parse *pParse, /* Parsing context for error messages */ - Select *pSelect, /* The SELECT statement with the ORDER BY clause */ - Expr *pE /* The specific ORDER BY term */ -){ - int i; /* Loop counter */ - ExprList *pEList; /* The columns of the result set */ - NameContext nc; /* Name context for resolving pE */ - sqlite3 *db; /* Database connection */ - int rc; /* Return code from subprocedures */ - u8 savedSuppErr; /* Saved value of db->suppressErr */ - - assert( sqlite3ExprIsInteger(pE, &i)==0 ); - pEList = pSelect->pEList; - - /* Resolve all names in the ORDER BY term expression - */ - memset(&nc, 0, sizeof(nc)); - nc.pParse = pParse; - nc.pSrcList = pSelect->pSrc; - nc.pEList = pEList; - nc.ncFlags = NC_AllowAgg; - nc.nErr = 0; - db = pParse->db; - savedSuppErr = db->suppressErr; - db->suppressErr = 1; - rc = sqlite3ResolveExprNames(&nc, pE); - db->suppressErr = savedSuppErr; - if( rc ) return 0; - - /* Try to match the ORDER BY expression against an expression - ** in the result set. Return an 1-based index of the matching - ** result-set entry. - */ - for(i=0; inExpr; i++){ - if( sqlite3ExprCompare(pEList->a[i].pExpr, pE, -1)<2 ){ - return i+1; - } - } - - /* If no match, return 0. */ - return 0; -} - -/* -** Generate an ORDER BY or GROUP BY term out-of-range error. -*/ -static void resolveOutOfRangeError( - Parse *pParse, /* The error context into which to write the error */ - const char *zType, /* "ORDER" or "GROUP" */ - int i, /* The index (1-based) of the term out of range */ - int mx /* Largest permissible value of i */ -){ - sqlite3ErrorMsg(pParse, - "%r %s BY term out of range - should be " - "between 1 and %d", i, zType, mx); -} - -/* -** Analyze the ORDER BY clause in a compound SELECT statement. Modify -** each term of the ORDER BY clause is a constant integer between 1 -** and N where N is the number of columns in the compound SELECT. -** -** ORDER BY terms that are already an integer between 1 and N are -** unmodified. ORDER BY terms that are integers outside the range of -** 1 through N generate an error. ORDER BY terms that are expressions -** are matched against result set expressions of compound SELECT -** beginning with the left-most SELECT and working toward the right. -** At the first match, the ORDER BY expression is transformed into -** the integer column number. -** -** Return the number of errors seen. -*/ -static int resolveCompoundOrderBy( - Parse *pParse, /* Parsing context. Leave error messages here */ - Select *pSelect /* The SELECT statement containing the ORDER BY */ -){ - int i; - ExprList *pOrderBy; - ExprList *pEList; - sqlite3 *db; - int moreToDo = 1; - - pOrderBy = pSelect->pOrderBy; - if( pOrderBy==0 ) return 0; - db = pParse->db; -#if SQLITE_MAX_COLUMN - if( pOrderBy->nExpr>db->aLimit[SQLITE_LIMIT_COLUMN] ){ - sqlite3ErrorMsg(pParse, "too many terms in ORDER BY clause"); - return 1; - } -#endif - for(i=0; inExpr; i++){ - pOrderBy->a[i].done = 0; - } - pSelect->pNext = 0; - while( pSelect->pPrior ){ - pSelect->pPrior->pNext = pSelect; - pSelect = pSelect->pPrior; - } - while( pSelect && moreToDo ){ - struct ExprList_item *pItem; - moreToDo = 0; - pEList = pSelect->pEList; - assert( pEList!=0 ); - for(i=0, pItem=pOrderBy->a; inExpr; i++, pItem++){ - int iCol = -1; - Expr *pE, *pDup; - if( pItem->done ) continue; - pE = sqlite3ExprSkipCollate(pItem->pExpr); - if( sqlite3ExprIsInteger(pE, &iCol) ){ - if( iCol<=0 || iCol>pEList->nExpr ){ - resolveOutOfRangeError(pParse, "ORDER", i+1, pEList->nExpr); - return 1; - } - }else{ - iCol = resolveAsName(pParse, pEList, pE); - if( iCol==0 ){ - pDup = sqlite3ExprDup(db, pE, 0); - if( !db->mallocFailed ){ - assert(pDup); - iCol = resolveOrderByTermToExprList(pParse, pSelect, pDup); - } - sqlite3ExprDelete(db, pDup); - } - } - if( iCol>0 ){ - /* Convert the ORDER BY term into an integer column number iCol, - ** taking care to preserve the COLLATE clause if it exists */ - Expr *pNew = sqlite3Expr(db, TK_INTEGER, 0); - if( pNew==0 ) return 1; - pNew->flags |= EP_IntValue; - pNew->u.iValue = iCol; - if( pItem->pExpr==pE ){ - pItem->pExpr = pNew; - }else{ - assert( pItem->pExpr->op==TK_COLLATE ); - assert( pItem->pExpr->pLeft==pE ); - pItem->pExpr->pLeft = pNew; - } - sqlite3ExprDelete(db, pE); - pItem->u.x.iOrderByCol = (u16)iCol; - pItem->done = 1; - }else{ - moreToDo = 1; - } - } - pSelect = pSelect->pNext; - } - for(i=0; inExpr; i++){ - if( pOrderBy->a[i].done==0 ){ - sqlite3ErrorMsg(pParse, "%r ORDER BY term does not match any " - "column in the result set", i+1); - return 1; - } - } - return 0; -} - -/* -** Check every term in the ORDER BY or GROUP BY clause pOrderBy of -** the SELECT statement pSelect. If any term is reference to a -** result set expression (as determined by the ExprList.a.u.x.iOrderByCol -** field) then convert that term into a copy of the corresponding result set -** column. -** -** If any errors are detected, add an error message to pParse and -** return non-zero. Return zero if no errors are seen. -*/ -SQLITE_PRIVATE int sqlite3ResolveOrderGroupBy( - Parse *pParse, /* Parsing context. Leave error messages here */ - Select *pSelect, /* The SELECT statement containing the clause */ - ExprList *pOrderBy, /* The ORDER BY or GROUP BY clause to be processed */ - const char *zType /* "ORDER" or "GROUP" */ -){ - int i; - sqlite3 *db = pParse->db; - ExprList *pEList; - struct ExprList_item *pItem; - - if( pOrderBy==0 || pParse->db->mallocFailed ) return 0; -#if SQLITE_MAX_COLUMN - if( pOrderBy->nExpr>db->aLimit[SQLITE_LIMIT_COLUMN] ){ - sqlite3ErrorMsg(pParse, "too many terms in %s BY clause", zType); - return 1; - } -#endif - pEList = pSelect->pEList; - assert( pEList!=0 ); /* sqlite3SelectNew() guarantees this */ - for(i=0, pItem=pOrderBy->a; inExpr; i++, pItem++){ - if( pItem->u.x.iOrderByCol ){ - if( pItem->u.x.iOrderByCol>pEList->nExpr ){ - resolveOutOfRangeError(pParse, zType, i+1, pEList->nExpr); - return 1; - } - resolveAlias(pParse, pEList, pItem->u.x.iOrderByCol-1, pItem->pExpr, zType,0); - } - } - return 0; -} - -/* -** pOrderBy is an ORDER BY or GROUP BY clause in SELECT statement pSelect. -** The Name context of the SELECT statement is pNC. zType is either -** "ORDER" or "GROUP" depending on which type of clause pOrderBy is. -** -** This routine resolves each term of the clause into an expression. -** If the order-by term is an integer I between 1 and N (where N is the -** number of columns in the result set of the SELECT) then the expression -** in the resolution is a copy of the I-th result-set expression. If -** the order-by term is an identifier that corresponds to the AS-name of -** a result-set expression, then the term resolves to a copy of the -** result-set expression. Otherwise, the expression is resolved in -** the usual way - using sqlite3ResolveExprNames(). -** -** This routine returns the number of errors. If errors occur, then -** an appropriate error message might be left in pParse. (OOM errors -** excepted.) -*/ -static int resolveOrderGroupBy( - NameContext *pNC, /* The name context of the SELECT statement */ - Select *pSelect, /* The SELECT statement holding pOrderBy */ - ExprList *pOrderBy, /* An ORDER BY or GROUP BY clause to resolve */ - const char *zType /* Either "ORDER" or "GROUP", as appropriate */ -){ - int i, j; /* Loop counters */ - int iCol; /* Column number */ - struct ExprList_item *pItem; /* A term of the ORDER BY clause */ - Parse *pParse; /* Parsing context */ - int nResult; /* Number of terms in the result set */ - - if( pOrderBy==0 ) return 0; - nResult = pSelect->pEList->nExpr; - pParse = pNC->pParse; - for(i=0, pItem=pOrderBy->a; inExpr; i++, pItem++){ - Expr *pE = pItem->pExpr; - Expr *pE2 = sqlite3ExprSkipCollate(pE); - if( zType[0]!='G' ){ - iCol = resolveAsName(pParse, pSelect->pEList, pE2); - if( iCol>0 ){ - /* If an AS-name match is found, mark this ORDER BY column as being - ** a copy of the iCol-th result-set column. The subsequent call to - ** sqlite3ResolveOrderGroupBy() will convert the expression to a - ** copy of the iCol-th result-set expression. */ - pItem->u.x.iOrderByCol = (u16)iCol; - continue; - } - } - if( sqlite3ExprIsInteger(pE2, &iCol) ){ - /* The ORDER BY term is an integer constant. Again, set the column - ** number so that sqlite3ResolveOrderGroupBy() will convert the - ** order-by term to a copy of the result-set expression */ - if( iCol<1 || iCol>0xffff ){ - resolveOutOfRangeError(pParse, zType, i+1, nResult); - return 1; - } - pItem->u.x.iOrderByCol = (u16)iCol; - continue; - } - - /* Otherwise, treat the ORDER BY term as an ordinary expression */ - pItem->u.x.iOrderByCol = 0; - if( sqlite3ResolveExprNames(pNC, pE) ){ - return 1; - } - for(j=0; jpEList->nExpr; j++){ - if( sqlite3ExprCompare(pE, pSelect->pEList->a[j].pExpr, -1)==0 ){ - pItem->u.x.iOrderByCol = j+1; - } - } - } - return sqlite3ResolveOrderGroupBy(pParse, pSelect, pOrderBy, zType); -} - -/* -** Resolve names in the SELECT statement p and all of its descendents. -*/ -static int resolveSelectStep(Walker *pWalker, Select *p){ - NameContext *pOuterNC; /* Context that contains this SELECT */ - NameContext sNC; /* Name context of this SELECT */ - int isCompound; /* True if p is a compound select */ - int nCompound; /* Number of compound terms processed so far */ - Parse *pParse; /* Parsing context */ - ExprList *pEList; /* Result set expression list */ - int i; /* Loop counter */ - ExprList *pGroupBy; /* The GROUP BY clause */ - Select *pLeftmost; /* Left-most of SELECT of a compound */ - sqlite3 *db; /* Database connection */ - - - assert( p!=0 ); - if( p->selFlags & SF_Resolved ){ - return WRC_Prune; - } - pOuterNC = pWalker->u.pNC; - pParse = pWalker->pParse; - db = pParse->db; - - /* Normally sqlite3SelectExpand() will be called first and will have - ** already expanded this SELECT. However, if this is a subquery within - ** an expression, sqlite3ResolveExprNames() will be called without a - ** prior call to sqlite3SelectExpand(). When that happens, let - ** sqlite3SelectPrep() do all of the processing for this SELECT. - ** sqlite3SelectPrep() will invoke both sqlite3SelectExpand() and - ** this routine in the correct order. - */ - if( (p->selFlags & SF_Expanded)==0 ){ - sqlite3SelectPrep(pParse, p, pOuterNC); - return (pParse->nErr || db->mallocFailed) ? WRC_Abort : WRC_Prune; - } - - isCompound = p->pPrior!=0; - nCompound = 0; - pLeftmost = p; - while( p ){ - assert( (p->selFlags & SF_Expanded)!=0 ); - assert( (p->selFlags & SF_Resolved)==0 ); - p->selFlags |= SF_Resolved; - - /* Resolve the expressions in the LIMIT and OFFSET clauses. These - ** are not allowed to refer to any names, so pass an empty NameContext. - */ - memset(&sNC, 0, sizeof(sNC)); - sNC.pParse = pParse; - if( sqlite3ResolveExprNames(&sNC, p->pLimit) || - sqlite3ResolveExprNames(&sNC, p->pOffset) ){ - return WRC_Abort; - } - - /* Recursively resolve names in all subqueries - */ - for(i=0; ipSrc->nSrc; i++){ - struct SrcList_item *pItem = &p->pSrc->a[i]; - if( pItem->pSelect ){ - NameContext *pNC; /* Used to iterate name contexts */ - int nRef = 0; /* Refcount for pOuterNC and outer contexts */ - const char *zSavedContext = pParse->zAuthContext; - - /* Count the total number of references to pOuterNC and all of its - ** parent contexts. After resolving references to expressions in - ** pItem->pSelect, check if this value has changed. If so, then - ** SELECT statement pItem->pSelect must be correlated. Set the - ** pItem->isCorrelated flag if this is the case. */ - for(pNC=pOuterNC; pNC; pNC=pNC->pNext) nRef += pNC->nRef; - - if( pItem->zName ) pParse->zAuthContext = pItem->zName; - sqlite3ResolveSelectNames(pParse, pItem->pSelect, pOuterNC); - pParse->zAuthContext = zSavedContext; - if( pParse->nErr || db->mallocFailed ) return WRC_Abort; - - for(pNC=pOuterNC; pNC; pNC=pNC->pNext) nRef -= pNC->nRef; - assert( pItem->isCorrelated==0 && nRef<=0 ); - pItem->isCorrelated = (nRef!=0); - } - } - - /* Set up the local name-context to pass to sqlite3ResolveExprNames() to - ** resolve the result-set expression list. - */ - sNC.ncFlags = NC_AllowAgg; - sNC.pSrcList = p->pSrc; - sNC.pNext = pOuterNC; - - /* Resolve names in the result set. */ - pEList = p->pEList; - assert( pEList!=0 ); - for(i=0; inExpr; i++){ - Expr *pX = pEList->a[i].pExpr; - if( sqlite3ResolveExprNames(&sNC, pX) ){ - return WRC_Abort; - } - } - - /* If there are no aggregate functions in the result-set, and no GROUP BY - ** expression, do not allow aggregates in any of the other expressions. - */ - assert( (p->selFlags & SF_Aggregate)==0 ); - pGroupBy = p->pGroupBy; - if( pGroupBy || (sNC.ncFlags & NC_HasAgg)!=0 ){ - p->selFlags |= SF_Aggregate; - }else{ - sNC.ncFlags &= ~NC_AllowAgg; - } - - /* If a HAVING clause is present, then there must be a GROUP BY clause. - */ - if( p->pHaving && !pGroupBy ){ - sqlite3ErrorMsg(pParse, "a GROUP BY clause is required before HAVING"); - return WRC_Abort; - } - - /* Add the output column list to the name-context before parsing the - ** other expressions in the SELECT statement. This is so that - ** expressions in the WHERE clause (etc.) can refer to expressions by - ** aliases in the result set. - ** - ** Minor point: If this is the case, then the expression will be - ** re-evaluated for each reference to it. - */ - sNC.pEList = p->pEList; - if( sqlite3ResolveExprNames(&sNC, p->pHaving) ) return WRC_Abort; - if( sqlite3ResolveExprNames(&sNC, p->pWhere) ) return WRC_Abort; - - /* The ORDER BY and GROUP BY clauses may not refer to terms in - ** outer queries - */ - sNC.pNext = 0; - sNC.ncFlags |= NC_AllowAgg; - - /* Process the ORDER BY clause for singleton SELECT statements. - ** The ORDER BY clause for compounds SELECT statements is handled - ** below, after all of the result-sets for all of the elements of - ** the compound have been resolved. - */ - if( !isCompound && resolveOrderGroupBy(&sNC, p, p->pOrderBy, "ORDER") ){ - return WRC_Abort; - } - if( db->mallocFailed ){ - return WRC_Abort; - } - - /* Resolve the GROUP BY clause. At the same time, make sure - ** the GROUP BY clause does not contain aggregate functions. - */ - if( pGroupBy ){ - struct ExprList_item *pItem; - - if( resolveOrderGroupBy(&sNC, p, pGroupBy, "GROUP") || db->mallocFailed ){ - return WRC_Abort; - } - for(i=0, pItem=pGroupBy->a; inExpr; i++, pItem++){ - if( ExprHasProperty(pItem->pExpr, EP_Agg) ){ - sqlite3ErrorMsg(pParse, "aggregate functions are not allowed in " - "the GROUP BY clause"); - return WRC_Abort; - } - } - } - - /* Advance to the next term of the compound - */ - p = p->pPrior; - nCompound++; - } - - /* Resolve the ORDER BY on a compound SELECT after all terms of - ** the compound have been resolved. - */ - if( isCompound && resolveCompoundOrderBy(pParse, pLeftmost) ){ - return WRC_Abort; - } - - return WRC_Prune; -} - -/* -** This routine walks an expression tree and resolves references to -** table columns and result-set columns. At the same time, do error -** checking on function usage and set a flag if any aggregate functions -** are seen. -** -** To resolve table columns references we look for nodes (or subtrees) of the -** form X.Y.Z or Y.Z or just Z where -** -** X: The name of a database. Ex: "main" or "temp" or -** the symbolic name assigned to an ATTACH-ed database. -** -** Y: The name of a table in a FROM clause. Or in a trigger -** one of the special names "old" or "new". -** -** Z: The name of a column in table Y. -** -** The node at the root of the subtree is modified as follows: -** -** Expr.op Changed to TK_COLUMN -** Expr.pTab Points to the Table object for X.Y -** Expr.iColumn The column index in X.Y. -1 for the rowid. -** Expr.iTable The VDBE cursor number for X.Y -** -** -** To resolve result-set references, look for expression nodes of the -** form Z (with no X and Y prefix) where the Z matches the right-hand -** size of an AS clause in the result-set of a SELECT. The Z expression -** is replaced by a copy of the left-hand side of the result-set expression. -** Table-name and function resolution occurs on the substituted expression -** tree. For example, in: -** -** SELECT a+b AS x, c+d AS y FROM t1 ORDER BY x; -** -** The "x" term of the order by is replaced by "a+b" to render: -** -** SELECT a+b AS x, c+d AS y FROM t1 ORDER BY a+b; -** -** Function calls are checked to make sure that the function is -** defined and that the correct number of arguments are specified. -** If the function is an aggregate function, then the NC_HasAgg flag is -** set and the opcode is changed from TK_FUNCTION to TK_AGG_FUNCTION. -** If an expression contains aggregate functions then the EP_Agg -** property on the expression is set. -** -** An error message is left in pParse if anything is amiss. The number -** if errors is returned. -*/ -SQLITE_PRIVATE int sqlite3ResolveExprNames( - NameContext *pNC, /* Namespace to resolve expressions in. */ - Expr *pExpr /* The expression to be analyzed. */ -){ - u8 savedHasAgg; - Walker w; - - if( pExpr==0 ) return 0; -#if SQLITE_MAX_EXPR_DEPTH>0 - { - Parse *pParse = pNC->pParse; - if( sqlite3ExprCheckHeight(pParse, pExpr->nHeight+pNC->pParse->nHeight) ){ - return 1; - } - pParse->nHeight += pExpr->nHeight; - } -#endif - savedHasAgg = pNC->ncFlags & NC_HasAgg; - pNC->ncFlags &= ~NC_HasAgg; - memset(&w, 0, sizeof(w)); - w.xExprCallback = resolveExprStep; - w.xSelectCallback = resolveSelectStep; - w.pParse = pNC->pParse; - w.u.pNC = pNC; - sqlite3WalkExpr(&w, pExpr); -#if SQLITE_MAX_EXPR_DEPTH>0 - pNC->pParse->nHeight -= pExpr->nHeight; -#endif - if( pNC->nErr>0 || w.pParse->nErr>0 ){ - ExprSetProperty(pExpr, EP_Error); - } - if( pNC->ncFlags & NC_HasAgg ){ - ExprSetProperty(pExpr, EP_Agg); - }else if( savedHasAgg ){ - pNC->ncFlags |= NC_HasAgg; - } - return ExprHasProperty(pExpr, EP_Error); -} - - -/* -** Resolve all names in all expressions of a SELECT and in all -** decendents of the SELECT, including compounds off of p->pPrior, -** subqueries in expressions, and subqueries used as FROM clause -** terms. -** -** See sqlite3ResolveExprNames() for a description of the kinds of -** transformations that occur. -** -** All SELECT statements should have been expanded using -** sqlite3SelectExpand() prior to invoking this routine. -*/ -SQLITE_PRIVATE void sqlite3ResolveSelectNames( - Parse *pParse, /* The parser context */ - Select *p, /* The SELECT statement being coded. */ - NameContext *pOuterNC /* Name context for parent SELECT statement */ -){ - Walker w; - - assert( p!=0 ); - memset(&w, 0, sizeof(w)); - w.xExprCallback = resolveExprStep; - w.xSelectCallback = resolveSelectStep; - w.pParse = pParse; - w.u.pNC = pOuterNC; - sqlite3WalkSelect(&w, p); -} - -/* -** Resolve names in expressions that can only reference a single table: -** -** * CHECK constraints -** * WHERE clauses on partial indices -** -** The Expr.iTable value for Expr.op==TK_COLUMN nodes of the expression -** is set to -1 and the Expr.iColumn value is set to the column number. -** -** Any errors cause an error message to be set in pParse. -*/ -SQLITE_PRIVATE void sqlite3ResolveSelfReference( - Parse *pParse, /* Parsing context */ - Table *pTab, /* The table being referenced */ - int type, /* NC_IsCheck or NC_PartIdx */ - Expr *pExpr, /* Expression to resolve. May be NULL. */ - ExprList *pList /* Expression list to resolve. May be NUL. */ -){ - SrcList sSrc; /* Fake SrcList for pParse->pNewTable */ - NameContext sNC; /* Name context for pParse->pNewTable */ - int i; /* Loop counter */ - - assert( type==NC_IsCheck || type==NC_PartIdx ); - memset(&sNC, 0, sizeof(sNC)); - memset(&sSrc, 0, sizeof(sSrc)); - sSrc.nSrc = 1; - sSrc.a[0].zName = pTab->zName; - sSrc.a[0].pTab = pTab; - sSrc.a[0].iCursor = -1; - sNC.pParse = pParse; - sNC.pSrcList = &sSrc; - sNC.ncFlags = type; - if( sqlite3ResolveExprNames(&sNC, pExpr) ) return; - if( pList ){ - for(i=0; inExpr; i++){ - if( sqlite3ResolveExprNames(&sNC, pList->a[i].pExpr) ){ - return; - } - } - } -} - -/************** End of resolve.c *********************************************/ -/************** Begin file expr.c ********************************************/ -/* -** 2001 September 15 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** This file contains routines used for analyzing expressions and -** for generating VDBE code that evaluates expressions in SQLite. -*/ - -/* -** Return the 'affinity' of the expression pExpr if any. -** -** If pExpr is a column, a reference to a column via an 'AS' alias, -** or a sub-select with a column as the return value, then the -** affinity of that column is returned. Otherwise, 0x00 is returned, -** indicating no affinity for the expression. -** -** i.e. the WHERE clause expresssions in the following statements all -** have an affinity: -** -** CREATE TABLE t1(a); -** SELECT * FROM t1 WHERE a; -** SELECT a AS b FROM t1 WHERE b; -** SELECT * FROM t1 WHERE (select a from t1); -*/ -SQLITE_PRIVATE char sqlite3ExprAffinity(Expr *pExpr){ - int op; - pExpr = sqlite3ExprSkipCollate(pExpr); - if( pExpr->flags & EP_Generic ) return SQLITE_AFF_NONE; - op = pExpr->op; - if( op==TK_SELECT ){ - assert( pExpr->flags&EP_xIsSelect ); - return sqlite3ExprAffinity(pExpr->x.pSelect->pEList->a[0].pExpr); - } -#ifndef SQLITE_OMIT_CAST - if( op==TK_CAST ){ - assert( !ExprHasProperty(pExpr, EP_IntValue) ); - return sqlite3AffinityType(pExpr->u.zToken, 0); - } -#endif - if( (op==TK_AGG_COLUMN || op==TK_COLUMN || op==TK_REGISTER) - && pExpr->pTab!=0 - ){ - /* op==TK_REGISTER && pExpr->pTab!=0 happens when pExpr was originally - ** a TK_COLUMN but was previously evaluated and cached in a register */ - int j = pExpr->iColumn; - if( j<0 ) return SQLITE_AFF_INTEGER; - assert( pExpr->pTab && jpTab->nCol ); - return pExpr->pTab->aCol[j].affinity; - } - return pExpr->affinity; -} - -/* -** Set the collating sequence for expression pExpr to be the collating -** sequence named by pToken. Return a pointer to a new Expr node that -** implements the COLLATE operator. -** -** If a memory allocation error occurs, that fact is recorded in pParse->db -** and the pExpr parameter is returned unchanged. -*/ -SQLITE_PRIVATE Expr *sqlite3ExprAddCollateToken( - Parse *pParse, /* Parsing context */ - Expr *pExpr, /* Add the "COLLATE" clause to this expression */ - const Token *pCollName /* Name of collating sequence */ -){ - if( pCollName->n>0 ){ - Expr *pNew = sqlite3ExprAlloc(pParse->db, TK_COLLATE, pCollName, 1); - if( pNew ){ - pNew->pLeft = pExpr; - pNew->flags |= EP_Collate|EP_Skip; - pExpr = pNew; - } - } - return pExpr; -} -SQLITE_PRIVATE Expr *sqlite3ExprAddCollateString(Parse *pParse, Expr *pExpr, const char *zC){ - Token s; - assert( zC!=0 ); - s.z = zC; - s.n = sqlite3Strlen30(s.z); - return sqlite3ExprAddCollateToken(pParse, pExpr, &s); -} - -/* -** Skip over any TK_COLLATE or TK_AS operators and any unlikely() -** or likelihood() function at the root of an expression. -*/ -SQLITE_PRIVATE Expr *sqlite3ExprSkipCollate(Expr *pExpr){ - while( pExpr && ExprHasProperty(pExpr, EP_Skip) ){ - if( ExprHasProperty(pExpr, EP_Unlikely) ){ - assert( !ExprHasProperty(pExpr, EP_xIsSelect) ); - assert( pExpr->x.pList->nExpr>0 ); - assert( pExpr->op==TK_FUNCTION ); - pExpr = pExpr->x.pList->a[0].pExpr; - }else{ - assert( pExpr->op==TK_COLLATE || pExpr->op==TK_AS ); - pExpr = pExpr->pLeft; - } - } - return pExpr; -} - -/* -** Return the collation sequence for the expression pExpr. If -** there is no defined collating sequence, return NULL. -** -** The collating sequence might be determined by a COLLATE operator -** or by the presence of a column with a defined collating sequence. -** COLLATE operators take first precedence. Left operands take -** precedence over right operands. -*/ -SQLITE_PRIVATE CollSeq *sqlite3ExprCollSeq(Parse *pParse, Expr *pExpr){ - sqlite3 *db = pParse->db; - CollSeq *pColl = 0; - Expr *p = pExpr; - while( p ){ - int op = p->op; - if( p->flags & EP_Generic ) break; - if( op==TK_CAST || op==TK_UPLUS ){ - p = p->pLeft; - continue; - } - if( op==TK_COLLATE || (op==TK_REGISTER && p->op2==TK_COLLATE) ){ - pColl = sqlite3GetCollSeq(pParse, ENC(db), 0, p->u.zToken); - break; - } - if( p->pTab!=0 - && (op==TK_AGG_COLUMN || op==TK_COLUMN - || op==TK_REGISTER || op==TK_TRIGGER) - ){ - /* op==TK_REGISTER && p->pTab!=0 happens when pExpr was originally - ** a TK_COLUMN but was previously evaluated and cached in a register */ - int j = p->iColumn; - if( j>=0 ){ - const char *zColl = p->pTab->aCol[j].zColl; - pColl = sqlite3FindCollSeq(db, ENC(db), zColl, 0); - } - break; - } - if( p->flags & EP_Collate ){ - if( ALWAYS(p->pLeft) && (p->pLeft->flags & EP_Collate)!=0 ){ - p = p->pLeft; - }else{ - p = p->pRight; - } - }else{ - break; - } - } - if( sqlite3CheckCollSeq(pParse, pColl) ){ - pColl = 0; - } - return pColl; -} - -/* -** pExpr is an operand of a comparison operator. aff2 is the -** type affinity of the other operand. This routine returns the -** type affinity that should be used for the comparison operator. -*/ -SQLITE_PRIVATE char sqlite3CompareAffinity(Expr *pExpr, char aff2){ - char aff1 = sqlite3ExprAffinity(pExpr); - if( aff1 && aff2 ){ - /* Both sides of the comparison are columns. If one has numeric - ** affinity, use that. Otherwise use no affinity. - */ - if( sqlite3IsNumericAffinity(aff1) || sqlite3IsNumericAffinity(aff2) ){ - return SQLITE_AFF_NUMERIC; - }else{ - return SQLITE_AFF_NONE; - } - }else if( !aff1 && !aff2 ){ - /* Neither side of the comparison is a column. Compare the - ** results directly. - */ - return SQLITE_AFF_NONE; - }else{ - /* One side is a column, the other is not. Use the columns affinity. */ - assert( aff1==0 || aff2==0 ); - return (aff1 + aff2); - } -} - -/* -** pExpr is a comparison operator. Return the type affinity that should -** be applied to both operands prior to doing the comparison. -*/ -static char comparisonAffinity(Expr *pExpr){ - char aff; - assert( pExpr->op==TK_EQ || pExpr->op==TK_IN || pExpr->op==TK_LT || - pExpr->op==TK_GT || pExpr->op==TK_GE || pExpr->op==TK_LE || - pExpr->op==TK_NE || pExpr->op==TK_IS || pExpr->op==TK_ISNOT ); - assert( pExpr->pLeft ); - aff = sqlite3ExprAffinity(pExpr->pLeft); - if( pExpr->pRight ){ - aff = sqlite3CompareAffinity(pExpr->pRight, aff); - }else if( ExprHasProperty(pExpr, EP_xIsSelect) ){ - aff = sqlite3CompareAffinity(pExpr->x.pSelect->pEList->a[0].pExpr, aff); - }else if( !aff ){ - aff = SQLITE_AFF_NONE; - } - return aff; -} - -/* -** pExpr is a comparison expression, eg. '=', '<', IN(...) etc. -** idx_affinity is the affinity of an indexed column. Return true -** if the index with affinity idx_affinity may be used to implement -** the comparison in pExpr. -*/ -SQLITE_PRIVATE int sqlite3IndexAffinityOk(Expr *pExpr, char idx_affinity){ - char aff = comparisonAffinity(pExpr); - switch( aff ){ - case SQLITE_AFF_NONE: - return 1; - case SQLITE_AFF_TEXT: - return idx_affinity==SQLITE_AFF_TEXT; - default: - return sqlite3IsNumericAffinity(idx_affinity); - } -} - -/* -** Return the P5 value that should be used for a binary comparison -** opcode (OP_Eq, OP_Ge etc.) used to compare pExpr1 and pExpr2. -*/ -static u8 binaryCompareP5(Expr *pExpr1, Expr *pExpr2, int jumpIfNull){ - u8 aff = (char)sqlite3ExprAffinity(pExpr2); - aff = (u8)sqlite3CompareAffinity(pExpr1, aff) | (u8)jumpIfNull; - return aff; -} - -/* -** Return a pointer to the collation sequence that should be used by -** a binary comparison operator comparing pLeft and pRight. -** -** If the left hand expression has a collating sequence type, then it is -** used. Otherwise the collation sequence for the right hand expression -** is used, or the default (BINARY) if neither expression has a collating -** type. -** -** Argument pRight (but not pLeft) may be a null pointer. In this case, -** it is not considered. -*/ -SQLITE_PRIVATE CollSeq *sqlite3BinaryCompareCollSeq( - Parse *pParse, - Expr *pLeft, - Expr *pRight -){ - CollSeq *pColl; - assert( pLeft ); - if( pLeft->flags & EP_Collate ){ - pColl = sqlite3ExprCollSeq(pParse, pLeft); - }else if( pRight && (pRight->flags & EP_Collate)!=0 ){ - pColl = sqlite3ExprCollSeq(pParse, pRight); - }else{ - pColl = sqlite3ExprCollSeq(pParse, pLeft); - if( !pColl ){ - pColl = sqlite3ExprCollSeq(pParse, pRight); - } - } - return pColl; -} - -/* -** Generate code for a comparison operator. -*/ -static int codeCompare( - Parse *pParse, /* The parsing (and code generating) context */ - Expr *pLeft, /* The left operand */ - Expr *pRight, /* The right operand */ - int opcode, /* The comparison opcode */ - int in1, int in2, /* Register holding operands */ - int dest, /* Jump here if true. */ - int jumpIfNull /* If true, jump if either operand is NULL */ -){ - int p5; - int addr; - CollSeq *p4; - - p4 = sqlite3BinaryCompareCollSeq(pParse, pLeft, pRight); - p5 = binaryCompareP5(pLeft, pRight, jumpIfNull); - addr = sqlite3VdbeAddOp4(pParse->pVdbe, opcode, in2, dest, in1, - (void*)p4, P4_COLLSEQ); - sqlite3VdbeChangeP5(pParse->pVdbe, (u8)p5); - return addr; -} - -#if SQLITE_MAX_EXPR_DEPTH>0 -/* -** Check that argument nHeight is less than or equal to the maximum -** expression depth allowed. If it is not, leave an error message in -** pParse. -*/ -SQLITE_PRIVATE int sqlite3ExprCheckHeight(Parse *pParse, int nHeight){ - int rc = SQLITE_OK; - int mxHeight = pParse->db->aLimit[SQLITE_LIMIT_EXPR_DEPTH]; - if( nHeight>mxHeight ){ - sqlite3ErrorMsg(pParse, - "Expression tree is too large (maximum depth %d)", mxHeight - ); - rc = SQLITE_ERROR; - } - return rc; -} - -/* The following three functions, heightOfExpr(), heightOfExprList() -** and heightOfSelect(), are used to determine the maximum height -** of any expression tree referenced by the structure passed as the -** first argument. -** -** If this maximum height is greater than the current value pointed -** to by pnHeight, the second parameter, then set *pnHeight to that -** value. -*/ -static void heightOfExpr(Expr *p, int *pnHeight){ - if( p ){ - if( p->nHeight>*pnHeight ){ - *pnHeight = p->nHeight; - } - } -} -static void heightOfExprList(ExprList *p, int *pnHeight){ - if( p ){ - int i; - for(i=0; inExpr; i++){ - heightOfExpr(p->a[i].pExpr, pnHeight); - } - } -} -static void heightOfSelect(Select *p, int *pnHeight){ - if( p ){ - heightOfExpr(p->pWhere, pnHeight); - heightOfExpr(p->pHaving, pnHeight); - heightOfExpr(p->pLimit, pnHeight); - heightOfExpr(p->pOffset, pnHeight); - heightOfExprList(p->pEList, pnHeight); - heightOfExprList(p->pGroupBy, pnHeight); - heightOfExprList(p->pOrderBy, pnHeight); - heightOfSelect(p->pPrior, pnHeight); - } -} - -/* -** Set the Expr.nHeight variable in the structure passed as an -** argument. An expression with no children, Expr.pList or -** Expr.pSelect member has a height of 1. Any other expression -** has a height equal to the maximum height of any other -** referenced Expr plus one. -*/ -static void exprSetHeight(Expr *p){ - int nHeight = 0; - heightOfExpr(p->pLeft, &nHeight); - heightOfExpr(p->pRight, &nHeight); - if( ExprHasProperty(p, EP_xIsSelect) ){ - heightOfSelect(p->x.pSelect, &nHeight); - }else{ - heightOfExprList(p->x.pList, &nHeight); - } - p->nHeight = nHeight + 1; -} - -/* -** Set the Expr.nHeight variable using the exprSetHeight() function. If -** the height is greater than the maximum allowed expression depth, -** leave an error in pParse. -*/ -SQLITE_PRIVATE void sqlite3ExprSetHeight(Parse *pParse, Expr *p){ - exprSetHeight(p); - sqlite3ExprCheckHeight(pParse, p->nHeight); -} - -/* -** Return the maximum height of any expression tree referenced -** by the select statement passed as an argument. -*/ -SQLITE_PRIVATE int sqlite3SelectExprHeight(Select *p){ - int nHeight = 0; - heightOfSelect(p, &nHeight); - return nHeight; -} -#else - #define exprSetHeight(y) -#endif /* SQLITE_MAX_EXPR_DEPTH>0 */ - -/* -** This routine is the core allocator for Expr nodes. -** -** Construct a new expression node and return a pointer to it. Memory -** for this node and for the pToken argument is a single allocation -** obtained from sqlite3DbMalloc(). The calling function -** is responsible for making sure the node eventually gets freed. -** -** If dequote is true, then the token (if it exists) is dequoted. -** If dequote is false, no dequoting is performance. The deQuote -** parameter is ignored if pToken is NULL or if the token does not -** appear to be quoted. If the quotes were of the form "..." (double-quotes) -** then the EP_DblQuoted flag is set on the expression node. -** -** Special case: If op==TK_INTEGER and pToken points to a string that -** can be translated into a 32-bit integer, then the token is not -** stored in u.zToken. Instead, the integer values is written -** into u.iValue and the EP_IntValue flag is set. No extra storage -** is allocated to hold the integer text and the dequote flag is ignored. -*/ -SQLITE_PRIVATE Expr *sqlite3ExprAlloc( - sqlite3 *db, /* Handle for sqlite3DbMallocZero() (may be null) */ - int op, /* Expression opcode */ - const Token *pToken, /* Token argument. Might be NULL */ - int dequote /* True to dequote */ -){ - Expr *pNew; - int nExtra = 0; - int iValue = 0; - - if( pToken ){ - if( op!=TK_INTEGER || pToken->z==0 - || sqlite3GetInt32(pToken->z, &iValue)==0 ){ - nExtra = pToken->n+1; - assert( iValue>=0 ); - } - } - pNew = sqlite3DbMallocZero(db, sizeof(Expr)+nExtra); - if( pNew ){ - pNew->op = (u8)op; - pNew->iAgg = -1; - if( pToken ){ - if( nExtra==0 ){ - pNew->flags |= EP_IntValue; - pNew->u.iValue = iValue; - }else{ - int c; - pNew->u.zToken = (char*)&pNew[1]; - assert( pToken->z!=0 || pToken->n==0 ); - if( pToken->n ) memcpy(pNew->u.zToken, pToken->z, pToken->n); - pNew->u.zToken[pToken->n] = 0; - if( dequote && nExtra>=3 - && ((c = pToken->z[0])=='\'' || c=='"' || c=='[' || c=='`') ){ - sqlite3Dequote(pNew->u.zToken); - if( c=='"' ) pNew->flags |= EP_DblQuoted; - } - } - } -#if SQLITE_MAX_EXPR_DEPTH>0 - pNew->nHeight = 1; -#endif - } - return pNew; -} - -/* -** Allocate a new expression node from a zero-terminated token that has -** already been dequoted. -*/ -SQLITE_PRIVATE Expr *sqlite3Expr( - sqlite3 *db, /* Handle for sqlite3DbMallocZero() (may be null) */ - int op, /* Expression opcode */ - const char *zToken /* Token argument. Might be NULL */ -){ - Token x; - x.z = zToken; - x.n = zToken ? sqlite3Strlen30(zToken) : 0; - return sqlite3ExprAlloc(db, op, &x, 0); -} - -/* -** Attach subtrees pLeft and pRight to the Expr node pRoot. -** -** If pRoot==NULL that means that a memory allocation error has occurred. -** In that case, delete the subtrees pLeft and pRight. -*/ -SQLITE_PRIVATE void sqlite3ExprAttachSubtrees( - sqlite3 *db, - Expr *pRoot, - Expr *pLeft, - Expr *pRight -){ - if( pRoot==0 ){ - assert( db->mallocFailed ); - sqlite3ExprDelete(db, pLeft); - sqlite3ExprDelete(db, pRight); - }else{ - if( pRight ){ - pRoot->pRight = pRight; - pRoot->flags |= EP_Collate & pRight->flags; - } - if( pLeft ){ - pRoot->pLeft = pLeft; - pRoot->flags |= EP_Collate & pLeft->flags; - } - exprSetHeight(pRoot); - } -} - -/* -** Allocate a Expr node which joins as many as two subtrees. -** -** One or both of the subtrees can be NULL. Return a pointer to the new -** Expr node. Or, if an OOM error occurs, set pParse->db->mallocFailed, -** free the subtrees and return NULL. -*/ -SQLITE_PRIVATE Expr *sqlite3PExpr( - Parse *pParse, /* Parsing context */ - int op, /* Expression opcode */ - Expr *pLeft, /* Left operand */ - Expr *pRight, /* Right operand */ - const Token *pToken /* Argument token */ -){ - Expr *p; - if( op==TK_AND && pLeft && pRight ){ - /* Take advantage of short-circuit false optimization for AND */ - p = sqlite3ExprAnd(pParse->db, pLeft, pRight); - }else{ - p = sqlite3ExprAlloc(pParse->db, op, pToken, 1); - sqlite3ExprAttachSubtrees(pParse->db, p, pLeft, pRight); - } - if( p ) { - sqlite3ExprCheckHeight(pParse, p->nHeight); - } - return p; -} - -/* -** If the expression is always either TRUE or FALSE (respectively), -** then return 1. If one cannot determine the truth value of the -** expression at compile-time return 0. -** -** This is an optimization. If is OK to return 0 here even if -** the expression really is always false or false (a false negative). -** But it is a bug to return 1 if the expression might have different -** boolean values in different circumstances (a false positive.) -** -** Note that if the expression is part of conditional for a -** LEFT JOIN, then we cannot determine at compile-time whether or not -** is it true or false, so always return 0. -*/ -static int exprAlwaysTrue(Expr *p){ - int v = 0; - if( ExprHasProperty(p, EP_FromJoin) ) return 0; - if( !sqlite3ExprIsInteger(p, &v) ) return 0; - return v!=0; -} -static int exprAlwaysFalse(Expr *p){ - int v = 0; - if( ExprHasProperty(p, EP_FromJoin) ) return 0; - if( !sqlite3ExprIsInteger(p, &v) ) return 0; - return v==0; -} - -/* -** Join two expressions using an AND operator. If either expression is -** NULL, then just return the other expression. -** -** If one side or the other of the AND is known to be false, then instead -** of returning an AND expression, just return a constant expression with -** a value of false. -*/ -SQLITE_PRIVATE Expr *sqlite3ExprAnd(sqlite3 *db, Expr *pLeft, Expr *pRight){ - if( pLeft==0 ){ - return pRight; - }else if( pRight==0 ){ - return pLeft; - }else if( exprAlwaysFalse(pLeft) || exprAlwaysFalse(pRight) ){ - sqlite3ExprDelete(db, pLeft); - sqlite3ExprDelete(db, pRight); - return sqlite3ExprAlloc(db, TK_INTEGER, &sqlite3IntTokens[0], 0); - }else{ - Expr *pNew = sqlite3ExprAlloc(db, TK_AND, 0, 0); - sqlite3ExprAttachSubtrees(db, pNew, pLeft, pRight); - return pNew; - } -} - -/* -** Construct a new expression node for a function with multiple -** arguments. -*/ -SQLITE_PRIVATE Expr *sqlite3ExprFunction(Parse *pParse, ExprList *pList, Token *pToken){ - Expr *pNew; - sqlite3 *db = pParse->db; - assert( pToken ); - pNew = sqlite3ExprAlloc(db, TK_FUNCTION, pToken, 1); - if( pNew==0 ){ - sqlite3ExprListDelete(db, pList); /* Avoid memory leak when malloc fails */ - return 0; - } - pNew->x.pList = pList; - assert( !ExprHasProperty(pNew, EP_xIsSelect) ); - sqlite3ExprSetHeight(pParse, pNew); - return pNew; -} - -/* -** Assign a variable number to an expression that encodes a wildcard -** in the original SQL statement. -** -** Wildcards consisting of a single "?" are assigned the next sequential -** variable number. -** -** Wildcards of the form "?nnn" are assigned the number "nnn". We make -** sure "nnn" is not too be to avoid a denial of service attack when -** the SQL statement comes from an external source. -** -** Wildcards of the form ":aaa", "@aaa", or "$aaa" are assigned the same number -** as the previous instance of the same wildcard. Or if this is the first -** instance of the wildcard, the next sequenial variable number is -** assigned. -*/ -SQLITE_PRIVATE void sqlite3ExprAssignVarNumber(Parse *pParse, Expr *pExpr){ - sqlite3 *db = pParse->db; - const char *z; - - if( pExpr==0 ) return; - assert( !ExprHasProperty(pExpr, EP_IntValue|EP_Reduced|EP_TokenOnly) ); - z = pExpr->u.zToken; - assert( z!=0 ); - assert( z[0]!=0 ); - if( z[1]==0 ){ - /* Wildcard of the form "?". Assign the next variable number */ - assert( z[0]=='?' ); - pExpr->iColumn = (ynVar)(++pParse->nVar); - }else{ - ynVar x = 0; - u32 n = sqlite3Strlen30(z); - if( z[0]=='?' ){ - /* Wildcard of the form "?nnn". Convert "nnn" to an integer and - ** use it as the variable number */ - i64 i; - int bOk = 0==sqlite3Atoi64(&z[1], &i, n-1, SQLITE_UTF8); - pExpr->iColumn = x = (ynVar)i; - testcase( i==0 ); - testcase( i==1 ); - testcase( i==db->aLimit[SQLITE_LIMIT_VARIABLE_NUMBER]-1 ); - testcase( i==db->aLimit[SQLITE_LIMIT_VARIABLE_NUMBER] ); - if( bOk==0 || i<1 || i>db->aLimit[SQLITE_LIMIT_VARIABLE_NUMBER] ){ - sqlite3ErrorMsg(pParse, "variable number must be between ?1 and ?%d", - db->aLimit[SQLITE_LIMIT_VARIABLE_NUMBER]); - x = 0; - } - if( i>pParse->nVar ){ - pParse->nVar = (int)i; - } - }else{ - /* Wildcards like ":aaa", "$aaa" or "@aaa". Reuse the same variable - ** number as the prior appearance of the same name, or if the name - ** has never appeared before, reuse the same variable number - */ - ynVar i; - for(i=0; inzVar; i++){ - if( pParse->azVar[i] && strcmp(pParse->azVar[i],z)==0 ){ - pExpr->iColumn = x = (ynVar)i+1; - break; - } - } - if( x==0 ) x = pExpr->iColumn = (ynVar)(++pParse->nVar); - } - if( x>0 ){ - if( x>pParse->nzVar ){ - char **a; - a = sqlite3DbRealloc(db, pParse->azVar, x*sizeof(a[0])); - if( a==0 ) return; /* Error reported through db->mallocFailed */ - pParse->azVar = a; - memset(&a[pParse->nzVar], 0, (x-pParse->nzVar)*sizeof(a[0])); - pParse->nzVar = x; - } - if( z[0]!='?' || pParse->azVar[x-1]==0 ){ - sqlite3DbFree(db, pParse->azVar[x-1]); - pParse->azVar[x-1] = sqlite3DbStrNDup(db, z, n); - } - } - } - if( !pParse->nErr && pParse->nVar>db->aLimit[SQLITE_LIMIT_VARIABLE_NUMBER] ){ - sqlite3ErrorMsg(pParse, "too many SQL variables"); - } -} - -/* -** Recursively delete an expression tree. -*/ -SQLITE_PRIVATE void sqlite3ExprDelete(sqlite3 *db, Expr *p){ - if( p==0 ) return; - /* Sanity check: Assert that the IntValue is non-negative if it exists */ - assert( !ExprHasProperty(p, EP_IntValue) || p->u.iValue>=0 ); - if( !ExprHasProperty(p, EP_TokenOnly) ){ - /* The Expr.x union is never used at the same time as Expr.pRight */ - assert( p->x.pList==0 || p->pRight==0 ); - sqlite3ExprDelete(db, p->pLeft); - sqlite3ExprDelete(db, p->pRight); - if( ExprHasProperty(p, EP_MemToken) ) sqlite3DbFree(db, p->u.zToken); - if( ExprHasProperty(p, EP_xIsSelect) ){ - sqlite3SelectDelete(db, p->x.pSelect); - }else{ - sqlite3ExprListDelete(db, p->x.pList); - } - } - if( !ExprHasProperty(p, EP_Static) ){ - sqlite3DbFree(db, p); - } -} - -/* -** Return the number of bytes allocated for the expression structure -** passed as the first argument. This is always one of EXPR_FULLSIZE, -** EXPR_REDUCEDSIZE or EXPR_TOKENONLYSIZE. -*/ -static int exprStructSize(Expr *p){ - if( ExprHasProperty(p, EP_TokenOnly) ) return EXPR_TOKENONLYSIZE; - if( ExprHasProperty(p, EP_Reduced) ) return EXPR_REDUCEDSIZE; - return EXPR_FULLSIZE; -} - -/* -** The dupedExpr*Size() routines each return the number of bytes required -** to store a copy of an expression or expression tree. They differ in -** how much of the tree is measured. -** -** dupedExprStructSize() Size of only the Expr structure -** dupedExprNodeSize() Size of Expr + space for token -** dupedExprSize() Expr + token + subtree components -** -*************************************************************************** -** -** The dupedExprStructSize() function returns two values OR-ed together: -** (1) the space required for a copy of the Expr structure only and -** (2) the EP_xxx flags that indicate what the structure size should be. -** The return values is always one of: -** -** EXPR_FULLSIZE -** EXPR_REDUCEDSIZE | EP_Reduced -** EXPR_TOKENONLYSIZE | EP_TokenOnly -** -** The size of the structure can be found by masking the return value -** of this routine with 0xfff. The flags can be found by masking the -** return value with EP_Reduced|EP_TokenOnly. -** -** Note that with flags==EXPRDUP_REDUCE, this routines works on full-size -** (unreduced) Expr objects as they or originally constructed by the parser. -** During expression analysis, extra information is computed and moved into -** later parts of teh Expr object and that extra information might get chopped -** off if the expression is reduced. Note also that it does not work to -** make a EXPRDUP_REDUCE copy of a reduced expression. It is only legal -** to reduce a pristine expression tree from the parser. The implementation -** of dupedExprStructSize() contain multiple assert() statements that attempt -** to enforce this constraint. -*/ -static int dupedExprStructSize(Expr *p, int flags){ - int nSize; - assert( flags==EXPRDUP_REDUCE || flags==0 ); /* Only one flag value allowed */ - assert( EXPR_FULLSIZE<=0xfff ); - assert( (0xfff & (EP_Reduced|EP_TokenOnly))==0 ); - if( 0==(flags&EXPRDUP_REDUCE) ){ - nSize = EXPR_FULLSIZE; - }else{ - assert( !ExprHasProperty(p, EP_TokenOnly|EP_Reduced) ); - assert( !ExprHasProperty(p, EP_FromJoin) ); - assert( !ExprHasProperty(p, EP_MemToken) ); - assert( !ExprHasProperty(p, EP_NoReduce) ); - if( p->pLeft || p->x.pList ){ - nSize = EXPR_REDUCEDSIZE | EP_Reduced; - }else{ - assert( p->pRight==0 ); - nSize = EXPR_TOKENONLYSIZE | EP_TokenOnly; - } - } - return nSize; -} - -/* -** This function returns the space in bytes required to store the copy -** of the Expr structure and a copy of the Expr.u.zToken string (if that -** string is defined.) -*/ -static int dupedExprNodeSize(Expr *p, int flags){ - int nByte = dupedExprStructSize(p, flags) & 0xfff; - if( !ExprHasProperty(p, EP_IntValue) && p->u.zToken ){ - nByte += sqlite3Strlen30(p->u.zToken)+1; - } - return ROUND8(nByte); -} - -/* -** Return the number of bytes required to create a duplicate of the -** expression passed as the first argument. The second argument is a -** mask containing EXPRDUP_XXX flags. -** -** The value returned includes space to create a copy of the Expr struct -** itself and the buffer referred to by Expr.u.zToken, if any. -** -** If the EXPRDUP_REDUCE flag is set, then the return value includes -** space to duplicate all Expr nodes in the tree formed by Expr.pLeft -** and Expr.pRight variables (but not for any structures pointed to or -** descended from the Expr.x.pList or Expr.x.pSelect variables). -*/ -static int dupedExprSize(Expr *p, int flags){ - int nByte = 0; - if( p ){ - nByte = dupedExprNodeSize(p, flags); - if( flags&EXPRDUP_REDUCE ){ - nByte += dupedExprSize(p->pLeft, flags) + dupedExprSize(p->pRight, flags); - } - } - return nByte; -} - -/* -** This function is similar to sqlite3ExprDup(), except that if pzBuffer -** is not NULL then *pzBuffer is assumed to point to a buffer large enough -** to store the copy of expression p, the copies of p->u.zToken -** (if applicable), and the copies of the p->pLeft and p->pRight expressions, -** if any. Before returning, *pzBuffer is set to the first byte passed the -** portion of the buffer copied into by this function. -*/ -static Expr *exprDup(sqlite3 *db, Expr *p, int flags, u8 **pzBuffer){ - Expr *pNew = 0; /* Value to return */ - if( p ){ - const int isReduced = (flags&EXPRDUP_REDUCE); - u8 *zAlloc; - u32 staticFlag = 0; - - assert( pzBuffer==0 || isReduced ); - - /* Figure out where to write the new Expr structure. */ - if( pzBuffer ){ - zAlloc = *pzBuffer; - staticFlag = EP_Static; - }else{ - zAlloc = sqlite3DbMallocRaw(db, dupedExprSize(p, flags)); - } - pNew = (Expr *)zAlloc; - - if( pNew ){ - /* Set nNewSize to the size allocated for the structure pointed to - ** by pNew. This is either EXPR_FULLSIZE, EXPR_REDUCEDSIZE or - ** EXPR_TOKENONLYSIZE. nToken is set to the number of bytes consumed - ** by the copy of the p->u.zToken string (if any). - */ - const unsigned nStructSize = dupedExprStructSize(p, flags); - const int nNewSize = nStructSize & 0xfff; - int nToken; - if( !ExprHasProperty(p, EP_IntValue) && p->u.zToken ){ - nToken = sqlite3Strlen30(p->u.zToken) + 1; - }else{ - nToken = 0; - } - if( isReduced ){ - assert( ExprHasProperty(p, EP_Reduced)==0 ); - memcpy(zAlloc, p, nNewSize); - }else{ - int nSize = exprStructSize(p); - memcpy(zAlloc, p, nSize); - memset(&zAlloc[nSize], 0, EXPR_FULLSIZE-nSize); - } - - /* Set the EP_Reduced, EP_TokenOnly, and EP_Static flags appropriately. */ - pNew->flags &= ~(EP_Reduced|EP_TokenOnly|EP_Static|EP_MemToken); - pNew->flags |= nStructSize & (EP_Reduced|EP_TokenOnly); - pNew->flags |= staticFlag; - - /* Copy the p->u.zToken string, if any. */ - if( nToken ){ - char *zToken = pNew->u.zToken = (char*)&zAlloc[nNewSize]; - memcpy(zToken, p->u.zToken, nToken); - } - - if( 0==((p->flags|pNew->flags) & EP_TokenOnly) ){ - /* Fill in the pNew->x.pSelect or pNew->x.pList member. */ - if( ExprHasProperty(p, EP_xIsSelect) ){ - pNew->x.pSelect = sqlite3SelectDup(db, p->x.pSelect, isReduced); - }else{ - pNew->x.pList = sqlite3ExprListDup(db, p->x.pList, isReduced); - } - } - - /* Fill in pNew->pLeft and pNew->pRight. */ - if( ExprHasProperty(pNew, EP_Reduced|EP_TokenOnly) ){ - zAlloc += dupedExprNodeSize(p, flags); - if( ExprHasProperty(pNew, EP_Reduced) ){ - pNew->pLeft = exprDup(db, p->pLeft, EXPRDUP_REDUCE, &zAlloc); - pNew->pRight = exprDup(db, p->pRight, EXPRDUP_REDUCE, &zAlloc); - } - if( pzBuffer ){ - *pzBuffer = zAlloc; - } - }else{ - if( !ExprHasProperty(p, EP_TokenOnly) ){ - pNew->pLeft = sqlite3ExprDup(db, p->pLeft, 0); - pNew->pRight = sqlite3ExprDup(db, p->pRight, 0); - } - } - - } - } - return pNew; -} - -/* -** Create and return a deep copy of the object passed as the second -** argument. If an OOM condition is encountered, NULL is returned -** and the db->mallocFailed flag set. -*/ -#ifndef SQLITE_OMIT_CTE -static With *withDup(sqlite3 *db, With *p){ - With *pRet = 0; - if( p ){ - int nByte = sizeof(*p) + sizeof(p->a[0]) * (p->nCte-1); - pRet = sqlite3DbMallocZero(db, nByte); - if( pRet ){ - int i; - pRet->nCte = p->nCte; - for(i=0; inCte; i++){ - pRet->a[i].pSelect = sqlite3SelectDup(db, p->a[i].pSelect, 0); - pRet->a[i].pCols = sqlite3ExprListDup(db, p->a[i].pCols, 0); - pRet->a[i].zName = sqlite3DbStrDup(db, p->a[i].zName); - } - } - } - return pRet; -} -#else -# define withDup(x,y) 0 -#endif - -/* -** The following group of routines make deep copies of expressions, -** expression lists, ID lists, and select statements. The copies can -** be deleted (by being passed to their respective ...Delete() routines) -** without effecting the originals. -** -** The expression list, ID, and source lists return by sqlite3ExprListDup(), -** sqlite3IdListDup(), and sqlite3SrcListDup() can not be further expanded -** by subsequent calls to sqlite*ListAppend() routines. -** -** Any tables that the SrcList might point to are not duplicated. -** -** The flags parameter contains a combination of the EXPRDUP_XXX flags. -** If the EXPRDUP_REDUCE flag is set, then the structure returned is a -** truncated version of the usual Expr structure that will be stored as -** part of the in-memory representation of the database schema. -*/ -SQLITE_PRIVATE Expr *sqlite3ExprDup(sqlite3 *db, Expr *p, int flags){ - return exprDup(db, p, flags, 0); -} -SQLITE_PRIVATE ExprList *sqlite3ExprListDup(sqlite3 *db, ExprList *p, int flags){ - ExprList *pNew; - struct ExprList_item *pItem, *pOldItem; - int i; - if( p==0 ) return 0; - pNew = sqlite3DbMallocRaw(db, sizeof(*pNew) ); - if( pNew==0 ) return 0; - pNew->nExpr = i = p->nExpr; - if( (flags & EXPRDUP_REDUCE)==0 ) for(i=1; inExpr; i+=i){} - pNew->a = pItem = sqlite3DbMallocRaw(db, i*sizeof(p->a[0]) ); - if( pItem==0 ){ - sqlite3DbFree(db, pNew); - return 0; - } - pOldItem = p->a; - for(i=0; inExpr; i++, pItem++, pOldItem++){ - Expr *pOldExpr = pOldItem->pExpr; - pItem->pExpr = sqlite3ExprDup(db, pOldExpr, flags); - pItem->zName = sqlite3DbStrDup(db, pOldItem->zName); - pItem->zSpan = sqlite3DbStrDup(db, pOldItem->zSpan); - pItem->sortOrder = pOldItem->sortOrder; - pItem->done = 0; - pItem->bSpanIsTab = pOldItem->bSpanIsTab; - pItem->u = pOldItem->u; - } - return pNew; -} - -/* -** If cursors, triggers, views and subqueries are all omitted from -** the build, then none of the following routines, except for -** sqlite3SelectDup(), can be called. sqlite3SelectDup() is sometimes -** called with a NULL argument. -*/ -#if !defined(SQLITE_OMIT_VIEW) || !defined(SQLITE_OMIT_TRIGGER) \ - || !defined(SQLITE_OMIT_SUBQUERY) -SQLITE_PRIVATE SrcList *sqlite3SrcListDup(sqlite3 *db, SrcList *p, int flags){ - SrcList *pNew; - int i; - int nByte; - if( p==0 ) return 0; - nByte = sizeof(*p) + (p->nSrc>0 ? sizeof(p->a[0]) * (p->nSrc-1) : 0); - pNew = sqlite3DbMallocRaw(db, nByte ); - if( pNew==0 ) return 0; - pNew->nSrc = pNew->nAlloc = p->nSrc; - for(i=0; inSrc; i++){ - struct SrcList_item *pNewItem = &pNew->a[i]; - struct SrcList_item *pOldItem = &p->a[i]; - Table *pTab; - pNewItem->pSchema = pOldItem->pSchema; - pNewItem->zDatabase = sqlite3DbStrDup(db, pOldItem->zDatabase); - pNewItem->zName = sqlite3DbStrDup(db, pOldItem->zName); - pNewItem->zAlias = sqlite3DbStrDup(db, pOldItem->zAlias); - pNewItem->jointype = pOldItem->jointype; - pNewItem->iCursor = pOldItem->iCursor; - pNewItem->addrFillSub = pOldItem->addrFillSub; - pNewItem->regReturn = pOldItem->regReturn; - pNewItem->isCorrelated = pOldItem->isCorrelated; - pNewItem->viaCoroutine = pOldItem->viaCoroutine; - pNewItem->isRecursive = pOldItem->isRecursive; - pNewItem->zIndex = sqlite3DbStrDup(db, pOldItem->zIndex); - pNewItem->notIndexed = pOldItem->notIndexed; - pNewItem->pIndex = pOldItem->pIndex; - pTab = pNewItem->pTab = pOldItem->pTab; - if( pTab ){ - pTab->nRef++; - } - pNewItem->pSelect = sqlite3SelectDup(db, pOldItem->pSelect, flags); - pNewItem->pOn = sqlite3ExprDup(db, pOldItem->pOn, flags); - pNewItem->pUsing = sqlite3IdListDup(db, pOldItem->pUsing); - pNewItem->colUsed = pOldItem->colUsed; - } - return pNew; -} -SQLITE_PRIVATE IdList *sqlite3IdListDup(sqlite3 *db, IdList *p){ - IdList *pNew; - int i; - if( p==0 ) return 0; - pNew = sqlite3DbMallocRaw(db, sizeof(*pNew) ); - if( pNew==0 ) return 0; - pNew->nId = p->nId; - pNew->a = sqlite3DbMallocRaw(db, p->nId*sizeof(p->a[0]) ); - if( pNew->a==0 ){ - sqlite3DbFree(db, pNew); - return 0; - } - /* Note that because the size of the allocation for p->a[] is not - ** necessarily a power of two, sqlite3IdListAppend() may not be called - ** on the duplicate created by this function. */ - for(i=0; inId; i++){ - struct IdList_item *pNewItem = &pNew->a[i]; - struct IdList_item *pOldItem = &p->a[i]; - pNewItem->zName = sqlite3DbStrDup(db, pOldItem->zName); - pNewItem->idx = pOldItem->idx; - } - return pNew; -} -SQLITE_PRIVATE Select *sqlite3SelectDup(sqlite3 *db, Select *p, int flags){ - Select *pNew, *pPrior; - if( p==0 ) return 0; - pNew = sqlite3DbMallocRaw(db, sizeof(*p) ); - if( pNew==0 ) return 0; - pNew->pEList = sqlite3ExprListDup(db, p->pEList, flags); - pNew->pSrc = sqlite3SrcListDup(db, p->pSrc, flags); - pNew->pWhere = sqlite3ExprDup(db, p->pWhere, flags); - pNew->pGroupBy = sqlite3ExprListDup(db, p->pGroupBy, flags); - pNew->pHaving = sqlite3ExprDup(db, p->pHaving, flags); - pNew->pOrderBy = sqlite3ExprListDup(db, p->pOrderBy, flags); - pNew->op = p->op; - pNew->pPrior = pPrior = sqlite3SelectDup(db, p->pPrior, flags); - if( pPrior ) pPrior->pNext = pNew; - pNew->pNext = 0; - pNew->pLimit = sqlite3ExprDup(db, p->pLimit, flags); - pNew->pOffset = sqlite3ExprDup(db, p->pOffset, flags); - pNew->iLimit = 0; - pNew->iOffset = 0; - pNew->selFlags = p->selFlags & ~SF_UsesEphemeral; - pNew->addrOpenEphm[0] = -1; - pNew->addrOpenEphm[1] = -1; - pNew->nSelectRow = p->nSelectRow; - pNew->pWith = withDup(db, p->pWith); - return pNew; -} -#else -SQLITE_PRIVATE Select *sqlite3SelectDup(sqlite3 *db, Select *p, int flags){ - assert( p==0 ); - return 0; -} -#endif - - -/* -** Add a new element to the end of an expression list. If pList is -** initially NULL, then create a new expression list. -** -** If a memory allocation error occurs, the entire list is freed and -** NULL is returned. If non-NULL is returned, then it is guaranteed -** that the new entry was successfully appended. -*/ -SQLITE_PRIVATE ExprList *sqlite3ExprListAppend( - Parse *pParse, /* Parsing context */ - ExprList *pList, /* List to which to append. Might be NULL */ - Expr *pExpr /* Expression to be appended. Might be NULL */ -){ - sqlite3 *db = pParse->db; - if( pList==0 ){ - pList = sqlite3DbMallocZero(db, sizeof(ExprList) ); - if( pList==0 ){ - goto no_mem; - } - pList->a = sqlite3DbMallocRaw(db, sizeof(pList->a[0])); - if( pList->a==0 ) goto no_mem; - }else if( (pList->nExpr & (pList->nExpr-1))==0 ){ - struct ExprList_item *a; - assert( pList->nExpr>0 ); - a = sqlite3DbRealloc(db, pList->a, pList->nExpr*2*sizeof(pList->a[0])); - if( a==0 ){ - goto no_mem; - } - pList->a = a; - } - assert( pList->a!=0 ); - if( 1 ){ - struct ExprList_item *pItem = &pList->a[pList->nExpr++]; - memset(pItem, 0, sizeof(*pItem)); - pItem->pExpr = pExpr; - } - return pList; - -no_mem: - /* Avoid leaking memory if malloc has failed. */ - sqlite3ExprDelete(db, pExpr); - sqlite3ExprListDelete(db, pList); - return 0; -} - -/* -** Set the ExprList.a[].zName element of the most recently added item -** on the expression list. -** -** pList might be NULL following an OOM error. But pName should never be -** NULL. If a memory allocation fails, the pParse->db->mallocFailed flag -** is set. -*/ -SQLITE_PRIVATE void sqlite3ExprListSetName( - Parse *pParse, /* Parsing context */ - ExprList *pList, /* List to which to add the span. */ - Token *pName, /* Name to be added */ - int dequote /* True to cause the name to be dequoted */ -){ - assert( pList!=0 || pParse->db->mallocFailed!=0 ); - if( pList ){ - struct ExprList_item *pItem; - assert( pList->nExpr>0 ); - pItem = &pList->a[pList->nExpr-1]; - assert( pItem->zName==0 ); - pItem->zName = sqlite3DbStrNDup(pParse->db, pName->z, pName->n); - if( dequote && pItem->zName ) sqlite3Dequote(pItem->zName); - } -} - -/* -** Set the ExprList.a[].zSpan element of the most recently added item -** on the expression list. -** -** pList might be NULL following an OOM error. But pSpan should never be -** NULL. If a memory allocation fails, the pParse->db->mallocFailed flag -** is set. -*/ -SQLITE_PRIVATE void sqlite3ExprListSetSpan( - Parse *pParse, /* Parsing context */ - ExprList *pList, /* List to which to add the span. */ - ExprSpan *pSpan /* The span to be added */ -){ - sqlite3 *db = pParse->db; - assert( pList!=0 || db->mallocFailed!=0 ); - if( pList ){ - struct ExprList_item *pItem = &pList->a[pList->nExpr-1]; - assert( pList->nExpr>0 ); - assert( db->mallocFailed || pItem->pExpr==pSpan->pExpr ); - sqlite3DbFree(db, pItem->zSpan); - pItem->zSpan = sqlite3DbStrNDup(db, (char*)pSpan->zStart, - (int)(pSpan->zEnd - pSpan->zStart)); - } -} - -/* -** If the expression list pEList contains more than iLimit elements, -** leave an error message in pParse. -*/ -SQLITE_PRIVATE void sqlite3ExprListCheckLength( - Parse *pParse, - ExprList *pEList, - const char *zObject -){ - int mx = pParse->db->aLimit[SQLITE_LIMIT_COLUMN]; - testcase( pEList && pEList->nExpr==mx ); - testcase( pEList && pEList->nExpr==mx+1 ); - if( pEList && pEList->nExpr>mx ){ - sqlite3ErrorMsg(pParse, "too many columns in %s", zObject); - } -} - -/* -** Delete an entire expression list. -*/ -SQLITE_PRIVATE void sqlite3ExprListDelete(sqlite3 *db, ExprList *pList){ - int i; - struct ExprList_item *pItem; - if( pList==0 ) return; - assert( pList->a!=0 || pList->nExpr==0 ); - for(pItem=pList->a, i=0; inExpr; i++, pItem++){ - sqlite3ExprDelete(db, pItem->pExpr); - sqlite3DbFree(db, pItem->zName); - sqlite3DbFree(db, pItem->zSpan); - } - sqlite3DbFree(db, pList->a); - sqlite3DbFree(db, pList); -} - -/* -** These routines are Walker callbacks. Walker.u.pi is a pointer -** to an integer. These routines are checking an expression to see -** if it is a constant. Set *Walker.u.pi to 0 if the expression is -** not constant. -** -** These callback routines are used to implement the following: -** -** sqlite3ExprIsConstant() -** sqlite3ExprIsConstantNotJoin() -** sqlite3ExprIsConstantOrFunction() -** -*/ -static int exprNodeIsConstant(Walker *pWalker, Expr *pExpr){ - - /* If pWalker->u.i is 3 then any term of the expression that comes from - ** the ON or USING clauses of a join disqualifies the expression - ** from being considered constant. */ - if( pWalker->u.i==3 && ExprHasProperty(pExpr, EP_FromJoin) ){ - pWalker->u.i = 0; - return WRC_Abort; - } - - switch( pExpr->op ){ - /* Consider functions to be constant if all their arguments are constant - ** and either pWalker->u.i==2 or the function as the SQLITE_FUNC_CONST - ** flag. */ - case TK_FUNCTION: - if( pWalker->u.i==2 || ExprHasProperty(pExpr,EP_Constant) ){ - return WRC_Continue; - } - /* Fall through */ - case TK_ID: - case TK_COLUMN: - case TK_AGG_FUNCTION: - case TK_AGG_COLUMN: - testcase( pExpr->op==TK_ID ); - testcase( pExpr->op==TK_COLUMN ); - testcase( pExpr->op==TK_AGG_FUNCTION ); - testcase( pExpr->op==TK_AGG_COLUMN ); - pWalker->u.i = 0; - return WRC_Abort; - default: - testcase( pExpr->op==TK_SELECT ); /* selectNodeIsConstant will disallow */ - testcase( pExpr->op==TK_EXISTS ); /* selectNodeIsConstant will disallow */ - return WRC_Continue; - } -} -static int selectNodeIsConstant(Walker *pWalker, Select *NotUsed){ - UNUSED_PARAMETER(NotUsed); - pWalker->u.i = 0; - return WRC_Abort; -} -static int exprIsConst(Expr *p, int initFlag){ - Walker w; - memset(&w, 0, sizeof(w)); - w.u.i = initFlag; - w.xExprCallback = exprNodeIsConstant; - w.xSelectCallback = selectNodeIsConstant; - sqlite3WalkExpr(&w, p); - return w.u.i; -} - -/* -** Walk an expression tree. Return 1 if the expression is constant -** and 0 if it involves variables or function calls. -** -** For the purposes of this function, a double-quoted string (ex: "abc") -** is considered a variable but a single-quoted string (ex: 'abc') is -** a constant. -*/ -SQLITE_PRIVATE int sqlite3ExprIsConstant(Expr *p){ - return exprIsConst(p, 1); -} - -/* -** Walk an expression tree. Return 1 if the expression is constant -** that does no originate from the ON or USING clauses of a join. -** Return 0 if it involves variables or function calls or terms from -** an ON or USING clause. -*/ -SQLITE_PRIVATE int sqlite3ExprIsConstantNotJoin(Expr *p){ - return exprIsConst(p, 3); -} - -/* -** Walk an expression tree. Return 1 if the expression is constant -** or a function call with constant arguments. Return and 0 if there -** are any variables. -** -** For the purposes of this function, a double-quoted string (ex: "abc") -** is considered a variable but a single-quoted string (ex: 'abc') is -** a constant. -*/ -SQLITE_PRIVATE int sqlite3ExprIsConstantOrFunction(Expr *p){ - return exprIsConst(p, 2); -} - -/* -** If the expression p codes a constant integer that is small enough -** to fit in a 32-bit integer, return 1 and put the value of the integer -** in *pValue. If the expression is not an integer or if it is too big -** to fit in a signed 32-bit integer, return 0 and leave *pValue unchanged. -*/ -SQLITE_PRIVATE int sqlite3ExprIsInteger(Expr *p, int *pValue){ - int rc = 0; - - /* If an expression is an integer literal that fits in a signed 32-bit - ** integer, then the EP_IntValue flag will have already been set */ - assert( p->op!=TK_INTEGER || (p->flags & EP_IntValue)!=0 - || sqlite3GetInt32(p->u.zToken, &rc)==0 ); - - if( p->flags & EP_IntValue ){ - *pValue = p->u.iValue; - return 1; - } - switch( p->op ){ - case TK_UPLUS: { - rc = sqlite3ExprIsInteger(p->pLeft, pValue); - break; - } - case TK_UMINUS: { - int v; - if( sqlite3ExprIsInteger(p->pLeft, &v) ){ - assert( v!=(-2147483647-1) ); - *pValue = -v; - rc = 1; - } - break; - } - default: break; - } - return rc; -} - -/* -** Return FALSE if there is no chance that the expression can be NULL. -** -** If the expression might be NULL or if the expression is too complex -** to tell return TRUE. -** -** This routine is used as an optimization, to skip OP_IsNull opcodes -** when we know that a value cannot be NULL. Hence, a false positive -** (returning TRUE when in fact the expression can never be NULL) might -** be a small performance hit but is otherwise harmless. On the other -** hand, a false negative (returning FALSE when the result could be NULL) -** will likely result in an incorrect answer. So when in doubt, return -** TRUE. -*/ -SQLITE_PRIVATE int sqlite3ExprCanBeNull(const Expr *p){ - u8 op; - while( p->op==TK_UPLUS || p->op==TK_UMINUS ){ p = p->pLeft; } - op = p->op; - if( op==TK_REGISTER ) op = p->op2; - switch( op ){ - case TK_INTEGER: - case TK_STRING: - case TK_FLOAT: - case TK_BLOB: - return 0; - default: - return 1; - } -} - -/* -** Return TRUE if the given expression is a constant which would be -** unchanged by OP_Affinity with the affinity given in the second -** argument. -** -** This routine is used to determine if the OP_Affinity operation -** can be omitted. When in doubt return FALSE. A false negative -** is harmless. A false positive, however, can result in the wrong -** answer. -*/ -SQLITE_PRIVATE int sqlite3ExprNeedsNoAffinityChange(const Expr *p, char aff){ - u8 op; - if( aff==SQLITE_AFF_NONE ) return 1; - while( p->op==TK_UPLUS || p->op==TK_UMINUS ){ p = p->pLeft; } - op = p->op; - if( op==TK_REGISTER ) op = p->op2; - switch( op ){ - case TK_INTEGER: { - return aff==SQLITE_AFF_INTEGER || aff==SQLITE_AFF_NUMERIC; - } - case TK_FLOAT: { - return aff==SQLITE_AFF_REAL || aff==SQLITE_AFF_NUMERIC; - } - case TK_STRING: { - return aff==SQLITE_AFF_TEXT; - } - case TK_BLOB: { - return 1; - } - case TK_COLUMN: { - assert( p->iTable>=0 ); /* p cannot be part of a CHECK constraint */ - return p->iColumn<0 - && (aff==SQLITE_AFF_INTEGER || aff==SQLITE_AFF_NUMERIC); - } - default: { - return 0; - } - } -} - -/* -** Return TRUE if the given string is a row-id column name. -*/ -SQLITE_PRIVATE int sqlite3IsRowid(const char *z){ - if( sqlite3StrICmp(z, "_ROWID_")==0 ) return 1; - if( sqlite3StrICmp(z, "ROWID")==0 ) return 1; - if( sqlite3StrICmp(z, "OID")==0 ) return 1; - return 0; -} - -/* -** Return true if we are able to the IN operator optimization on a -** query of the form -** -** x IN (SELECT ...) -** -** Where the SELECT... clause is as specified by the parameter to this -** routine. -** -** The Select object passed in has already been preprocessed and no -** errors have been found. -*/ -#ifndef SQLITE_OMIT_SUBQUERY -static int isCandidateForInOpt(Select *p){ - SrcList *pSrc; - ExprList *pEList; - Table *pTab; - if( p==0 ) return 0; /* right-hand side of IN is SELECT */ - if( p->pPrior ) return 0; /* Not a compound SELECT */ - if( p->selFlags & (SF_Distinct|SF_Aggregate) ){ - testcase( (p->selFlags & (SF_Distinct|SF_Aggregate))==SF_Distinct ); - testcase( (p->selFlags & (SF_Distinct|SF_Aggregate))==SF_Aggregate ); - return 0; /* No DISTINCT keyword and no aggregate functions */ - } - assert( p->pGroupBy==0 ); /* Has no GROUP BY clause */ - if( p->pLimit ) return 0; /* Has no LIMIT clause */ - assert( p->pOffset==0 ); /* No LIMIT means no OFFSET */ - if( p->pWhere ) return 0; /* Has no WHERE clause */ - pSrc = p->pSrc; - assert( pSrc!=0 ); - if( pSrc->nSrc!=1 ) return 0; /* Single term in FROM clause */ - if( pSrc->a[0].pSelect ) return 0; /* FROM is not a subquery or view */ - pTab = pSrc->a[0].pTab; - if( NEVER(pTab==0) ) return 0; - assert( pTab->pSelect==0 ); /* FROM clause is not a view */ - if( IsVirtual(pTab) ) return 0; /* FROM clause not a virtual table */ - pEList = p->pEList; - if( pEList->nExpr!=1 ) return 0; /* One column in the result set */ - if( pEList->a[0].pExpr->op!=TK_COLUMN ) return 0; /* Result is a column */ - return 1; -} -#endif /* SQLITE_OMIT_SUBQUERY */ - -/* -** Code an OP_Once instruction and allocate space for its flag. Return the -** address of the new instruction. -*/ -SQLITE_PRIVATE int sqlite3CodeOnce(Parse *pParse){ - Vdbe *v = sqlite3GetVdbe(pParse); /* Virtual machine being coded */ - return sqlite3VdbeAddOp1(v, OP_Once, pParse->nOnce++); -} - -/* -** This function is used by the implementation of the IN (...) operator. -** The pX parameter is the expression on the RHS of the IN operator, which -** might be either a list of expressions or a subquery. -** -** The job of this routine is to find or create a b-tree object that can -** be used either to test for membership in the RHS set or to iterate through -** all members of the RHS set, skipping duplicates. -** -** A cursor is opened on the b-tree object that the RHS of the IN operator -** and pX->iTable is set to the index of that cursor. -** -** The returned value of this function indicates the b-tree type, as follows: -** -** IN_INDEX_ROWID - The cursor was opened on a database table. -** IN_INDEX_INDEX_ASC - The cursor was opened on an ascending index. -** IN_INDEX_INDEX_DESC - The cursor was opened on a descending index. -** IN_INDEX_EPH - The cursor was opened on a specially created and -** populated epheremal table. -** -** An existing b-tree might be used if the RHS expression pX is a simple -** subquery such as: -** -** SELECT FROM -** -** If the RHS of the IN operator is a list or a more complex subquery, then -** an ephemeral table might need to be generated from the RHS and then -** pX->iTable made to point to the ephermeral table instead of an -** existing table. -** -** If the prNotFound parameter is 0, then the b-tree will be used to iterate -** through the set members, skipping any duplicates. In this case an -** epheremal table must be used unless the selected is guaranteed -** to be unique - either because it is an INTEGER PRIMARY KEY or it -** has a UNIQUE constraint or UNIQUE index. -** -** If the prNotFound parameter is not 0, then the b-tree will be used -** for fast set membership tests. In this case an epheremal table must -** be used unless is an INTEGER PRIMARY KEY or an index can -** be found with as its left-most column. -** -** When the b-tree is being used for membership tests, the calling function -** needs to know whether or not the structure contains an SQL NULL -** value in order to correctly evaluate expressions like "X IN (Y, Z)". -** If there is any chance that the (...) might contain a NULL value at -** runtime, then a register is allocated and the register number written -** to *prNotFound. If there is no chance that the (...) contains a -** NULL value, then *prNotFound is left unchanged. -** -** If a register is allocated and its location stored in *prNotFound, then -** its initial value is NULL. If the (...) does not remain constant -** for the duration of the query (i.e. the SELECT within the (...) -** is a correlated subquery) then the value of the allocated register is -** reset to NULL each time the subquery is rerun. This allows the -** caller to use vdbe code equivalent to the following: -** -** if( register==NULL ){ -** has_null = -** register = 1 -** } -** -** in order to avoid running the -** test more often than is necessary. -*/ -#ifndef SQLITE_OMIT_SUBQUERY -SQLITE_PRIVATE int sqlite3FindInIndex(Parse *pParse, Expr *pX, int *prNotFound){ - Select *p; /* SELECT to the right of IN operator */ - int eType = 0; /* Type of RHS table. IN_INDEX_* */ - int iTab = pParse->nTab++; /* Cursor of the RHS table */ - int mustBeUnique = (prNotFound==0); /* True if RHS must be unique */ - Vdbe *v = sqlite3GetVdbe(pParse); /* Virtual machine being coded */ - - assert( pX->op==TK_IN ); - - /* Check to see if an existing table or index can be used to - ** satisfy the query. This is preferable to generating a new - ** ephemeral table. - */ - p = (ExprHasProperty(pX, EP_xIsSelect) ? pX->x.pSelect : 0); - if( ALWAYS(pParse->nErr==0) && isCandidateForInOpt(p) ){ - sqlite3 *db = pParse->db; /* Database connection */ - Table *pTab; /* Table
    . */ - Expr *pExpr; /* Expression */ - i16 iCol; /* Index of column */ - i16 iDb; /* Database idx for pTab */ - - assert( p ); /* Because of isCandidateForInOpt(p) */ - assert( p->pEList!=0 ); /* Because of isCandidateForInOpt(p) */ - assert( p->pEList->a[0].pExpr!=0 ); /* Because of isCandidateForInOpt(p) */ - assert( p->pSrc!=0 ); /* Because of isCandidateForInOpt(p) */ - pTab = p->pSrc->a[0].pTab; - pExpr = p->pEList->a[0].pExpr; - iCol = (i16)pExpr->iColumn; - - /* Code an OP_Transaction and OP_TableLock for
    . */ - iDb = sqlite3SchemaToIndex(db, pTab->pSchema); - sqlite3CodeVerifySchema(pParse, iDb); - sqlite3TableLock(pParse, iDb, pTab->tnum, 0, pTab->zName); - - /* This function is only called from two places. In both cases the vdbe - ** has already been allocated. So assume sqlite3GetVdbe() is always - ** successful here. - */ - assert(v); - if( iCol<0 ){ - int iAddr = sqlite3CodeOnce(pParse); - VdbeCoverage(v); - - sqlite3OpenTable(pParse, iTab, iDb, pTab, OP_OpenRead); - eType = IN_INDEX_ROWID; - - sqlite3VdbeJumpHere(v, iAddr); - }else{ - Index *pIdx; /* Iterator variable */ - - /* The collation sequence used by the comparison. If an index is to - ** be used in place of a temp-table, it must be ordered according - ** to this collation sequence. */ - CollSeq *pReq = sqlite3BinaryCompareCollSeq(pParse, pX->pLeft, pExpr); - - /* Check that the affinity that will be used to perform the - ** comparison is the same as the affinity of the column. If - ** it is not, it is not possible to use any index. - */ - int affinity_ok = sqlite3IndexAffinityOk(pX, pTab->aCol[iCol].affinity); - - for(pIdx=pTab->pIndex; pIdx && eType==0 && affinity_ok; pIdx=pIdx->pNext){ - if( (pIdx->aiColumn[0]==iCol) - && sqlite3FindCollSeq(db, ENC(db), pIdx->azColl[0], 0)==pReq - && (!mustBeUnique || (pIdx->nKeyCol==1 && pIdx->onError!=OE_None)) - ){ - int iAddr = sqlite3CodeOnce(pParse); VdbeCoverage(v); - sqlite3VdbeAddOp3(v, OP_OpenRead, iTab, pIdx->tnum, iDb); - sqlite3VdbeSetP4KeyInfo(pParse, pIdx); - VdbeComment((v, "%s", pIdx->zName)); - assert( IN_INDEX_INDEX_DESC == IN_INDEX_INDEX_ASC+1 ); - eType = IN_INDEX_INDEX_ASC + pIdx->aSortOrder[0]; - - if( prNotFound && !pTab->aCol[iCol].notNull ){ - *prNotFound = ++pParse->nMem; - sqlite3VdbeAddOp2(v, OP_Null, 0, *prNotFound); - } - sqlite3VdbeJumpHere(v, iAddr); - } - } - } - } - - if( eType==0 ){ - /* Could not found an existing table or index to use as the RHS b-tree. - ** We will have to generate an ephemeral table to do the job. - */ - u32 savedNQueryLoop = pParse->nQueryLoop; - int rMayHaveNull = 0; - eType = IN_INDEX_EPH; - if( prNotFound ){ - *prNotFound = rMayHaveNull = ++pParse->nMem; - sqlite3VdbeAddOp2(v, OP_Null, 0, *prNotFound); - }else{ - pParse->nQueryLoop = 0; - if( pX->pLeft->iColumn<0 && !ExprHasProperty(pX, EP_xIsSelect) ){ - eType = IN_INDEX_ROWID; - } - } - sqlite3CodeSubselect(pParse, pX, rMayHaveNull, eType==IN_INDEX_ROWID); - pParse->nQueryLoop = savedNQueryLoop; - }else{ - pX->iTable = iTab; - } - return eType; -} -#endif - -/* -** Generate code for scalar subqueries used as a subquery expression, EXISTS, -** or IN operators. Examples: -** -** (SELECT a FROM b) -- subquery -** EXISTS (SELECT a FROM b) -- EXISTS subquery -** x IN (4,5,11) -- IN operator with list on right-hand side -** x IN (SELECT a FROM b) -- IN operator with subquery on the right -** -** The pExpr parameter describes the expression that contains the IN -** operator or subquery. -** -** If parameter isRowid is non-zero, then expression pExpr is guaranteed -** to be of the form " IN (?, ?, ?)", where is a reference -** to some integer key column of a table B-Tree. In this case, use an -** intkey B-Tree to store the set of IN(...) values instead of the usual -** (slower) variable length keys B-Tree. -** -** If rMayHaveNull is non-zero, that means that the operation is an IN -** (not a SELECT or EXISTS) and that the RHS might contains NULLs. -** Furthermore, the IN is in a WHERE clause and that we really want -** to iterate over the RHS of the IN operator in order to quickly locate -** all corresponding LHS elements. All this routine does is initialize -** the register given by rMayHaveNull to NULL. Calling routines will take -** care of changing this register value to non-NULL if the RHS is NULL-free. -** -** If rMayHaveNull is zero, that means that the subquery is being used -** for membership testing only. There is no need to initialize any -** registers to indicate the presence or absence of NULLs on the RHS. -** -** For a SELECT or EXISTS operator, return the register that holds the -** result. For IN operators or if an error occurs, the return value is 0. -*/ -#ifndef SQLITE_OMIT_SUBQUERY -SQLITE_PRIVATE int sqlite3CodeSubselect( - Parse *pParse, /* Parsing context */ - Expr *pExpr, /* The IN, SELECT, or EXISTS operator */ - int rMayHaveNull, /* Register that records whether NULLs exist in RHS */ - int isRowid /* If true, LHS of IN operator is a rowid */ -){ - int testAddr = -1; /* One-time test address */ - int rReg = 0; /* Register storing resulting */ - Vdbe *v = sqlite3GetVdbe(pParse); - if( NEVER(v==0) ) return 0; - sqlite3ExprCachePush(pParse); - - /* This code must be run in its entirety every time it is encountered - ** if any of the following is true: - ** - ** * The right-hand side is a correlated subquery - ** * The right-hand side is an expression list containing variables - ** * We are inside a trigger - ** - ** If all of the above are false, then we can run this code just once - ** save the results, and reuse the same result on subsequent invocations. - */ - if( !ExprHasProperty(pExpr, EP_VarSelect) ){ - testAddr = sqlite3CodeOnce(pParse); VdbeCoverage(v); - } - -#ifndef SQLITE_OMIT_EXPLAIN - if( pParse->explain==2 ){ - char *zMsg = sqlite3MPrintf( - pParse->db, "EXECUTE %s%s SUBQUERY %d", testAddr>=0?"":"CORRELATED ", - pExpr->op==TK_IN?"LIST":"SCALAR", pParse->iNextSelectId - ); - sqlite3VdbeAddOp4(v, OP_Explain, pParse->iSelectId, 0, 0, zMsg, P4_DYNAMIC); - } -#endif - - switch( pExpr->op ){ - case TK_IN: { - char affinity; /* Affinity of the LHS of the IN */ - int addr; /* Address of OP_OpenEphemeral instruction */ - Expr *pLeft = pExpr->pLeft; /* the LHS of the IN operator */ - KeyInfo *pKeyInfo = 0; /* Key information */ - - if( rMayHaveNull ){ - sqlite3VdbeAddOp2(v, OP_Null, 0, rMayHaveNull); - } - - affinity = sqlite3ExprAffinity(pLeft); - - /* Whether this is an 'x IN(SELECT...)' or an 'x IN()' - ** expression it is handled the same way. An ephemeral table is - ** filled with single-field index keys representing the results - ** from the SELECT or the . - ** - ** If the 'x' expression is a column value, or the SELECT... - ** statement returns a column value, then the affinity of that - ** column is used to build the index keys. If both 'x' and the - ** SELECT... statement are columns, then numeric affinity is used - ** if either column has NUMERIC or INTEGER affinity. If neither - ** 'x' nor the SELECT... statement are columns, then numeric affinity - ** is used. - */ - pExpr->iTable = pParse->nTab++; - addr = sqlite3VdbeAddOp2(v, OP_OpenEphemeral, pExpr->iTable, !isRowid); - pKeyInfo = isRowid ? 0 : sqlite3KeyInfoAlloc(pParse->db, 1, 1); - - if( ExprHasProperty(pExpr, EP_xIsSelect) ){ - /* Case 1: expr IN (SELECT ...) - ** - ** Generate code to write the results of the select into the temporary - ** table allocated and opened above. - */ - SelectDest dest; - ExprList *pEList; - - assert( !isRowid ); - sqlite3SelectDestInit(&dest, SRT_Set, pExpr->iTable); - dest.affSdst = (u8)affinity; - assert( (pExpr->iTable&0x0000FFFF)==pExpr->iTable ); - pExpr->x.pSelect->iLimit = 0; - testcase( pKeyInfo==0 ); /* Caused by OOM in sqlite3KeyInfoAlloc() */ - if( sqlite3Select(pParse, pExpr->x.pSelect, &dest) ){ - sqlite3KeyInfoUnref(pKeyInfo); - return 0; - } - pEList = pExpr->x.pSelect->pEList; - assert( pKeyInfo!=0 ); /* OOM will cause exit after sqlite3Select() */ - assert( pEList!=0 ); - assert( pEList->nExpr>0 ); - assert( sqlite3KeyInfoIsWriteable(pKeyInfo) ); - pKeyInfo->aColl[0] = sqlite3BinaryCompareCollSeq(pParse, pExpr->pLeft, - pEList->a[0].pExpr); - }else if( ALWAYS(pExpr->x.pList!=0) ){ - /* Case 2: expr IN (exprlist) - ** - ** For each expression, build an index key from the evaluation and - ** store it in the temporary table. If is a column, then use - ** that columns affinity when building index keys. If is not - ** a column, use numeric affinity. - */ - int i; - ExprList *pList = pExpr->x.pList; - struct ExprList_item *pItem; - int r1, r2, r3; - - if( !affinity ){ - affinity = SQLITE_AFF_NONE; - } - if( pKeyInfo ){ - assert( sqlite3KeyInfoIsWriteable(pKeyInfo) ); - pKeyInfo->aColl[0] = sqlite3ExprCollSeq(pParse, pExpr->pLeft); - } - - /* Loop through each expression in . */ - r1 = sqlite3GetTempReg(pParse); - r2 = sqlite3GetTempReg(pParse); - sqlite3VdbeAddOp2(v, OP_Null, 0, r2); - for(i=pList->nExpr, pItem=pList->a; i>0; i--, pItem++){ - Expr *pE2 = pItem->pExpr; - int iValToIns; - - /* If the expression is not constant then we will need to - ** disable the test that was generated above that makes sure - ** this code only executes once. Because for a non-constant - ** expression we need to rerun this code each time. - */ - if( testAddr>=0 && !sqlite3ExprIsConstant(pE2) ){ - sqlite3VdbeChangeToNoop(v, testAddr); - testAddr = -1; - } - - /* Evaluate the expression and insert it into the temp table */ - if( isRowid && sqlite3ExprIsInteger(pE2, &iValToIns) ){ - sqlite3VdbeAddOp3(v, OP_InsertInt, pExpr->iTable, r2, iValToIns); - }else{ - r3 = sqlite3ExprCodeTarget(pParse, pE2, r1); - if( isRowid ){ - sqlite3VdbeAddOp2(v, OP_MustBeInt, r3, - sqlite3VdbeCurrentAddr(v)+2); - VdbeCoverage(v); - sqlite3VdbeAddOp3(v, OP_Insert, pExpr->iTable, r2, r3); - }else{ - sqlite3VdbeAddOp4(v, OP_MakeRecord, r3, 1, r2, &affinity, 1); - sqlite3ExprCacheAffinityChange(pParse, r3, 1); - sqlite3VdbeAddOp2(v, OP_IdxInsert, pExpr->iTable, r2); - } - } - } - sqlite3ReleaseTempReg(pParse, r1); - sqlite3ReleaseTempReg(pParse, r2); - } - if( pKeyInfo ){ - sqlite3VdbeChangeP4(v, addr, (void *)pKeyInfo, P4_KEYINFO); - } - break; - } - - case TK_EXISTS: - case TK_SELECT: - default: { - /* If this has to be a scalar SELECT. Generate code to put the - ** value of this select in a memory cell and record the number - ** of the memory cell in iColumn. If this is an EXISTS, write - ** an integer 0 (not exists) or 1 (exists) into a memory cell - ** and record that memory cell in iColumn. - */ - Select *pSel; /* SELECT statement to encode */ - SelectDest dest; /* How to deal with SELECt result */ - - testcase( pExpr->op==TK_EXISTS ); - testcase( pExpr->op==TK_SELECT ); - assert( pExpr->op==TK_EXISTS || pExpr->op==TK_SELECT ); - - assert( ExprHasProperty(pExpr, EP_xIsSelect) ); - pSel = pExpr->x.pSelect; - sqlite3SelectDestInit(&dest, 0, ++pParse->nMem); - if( pExpr->op==TK_SELECT ){ - dest.eDest = SRT_Mem; - sqlite3VdbeAddOp2(v, OP_Null, 0, dest.iSDParm); - VdbeComment((v, "Init subquery result")); - }else{ - dest.eDest = SRT_Exists; - sqlite3VdbeAddOp2(v, OP_Integer, 0, dest.iSDParm); - VdbeComment((v, "Init EXISTS result")); - } - sqlite3ExprDelete(pParse->db, pSel->pLimit); - pSel->pLimit = sqlite3PExpr(pParse, TK_INTEGER, 0, 0, - &sqlite3IntTokens[1]); - pSel->iLimit = 0; - if( sqlite3Select(pParse, pSel, &dest) ){ - return 0; - } - rReg = dest.iSDParm; - ExprSetVVAProperty(pExpr, EP_NoReduce); - break; - } - } - - if( testAddr>=0 ){ - sqlite3VdbeJumpHere(v, testAddr); - } - sqlite3ExprCachePop(pParse); - - return rReg; -} -#endif /* SQLITE_OMIT_SUBQUERY */ - -#ifndef SQLITE_OMIT_SUBQUERY -/* -** Generate code for an IN expression. -** -** x IN (SELECT ...) -** x IN (value, value, ...) -** -** The left-hand side (LHS) is a scalar expression. The right-hand side (RHS) -** is an array of zero or more values. The expression is true if the LHS is -** contained within the RHS. The value of the expression is unknown (NULL) -** if the LHS is NULL or if the LHS is not contained within the RHS and the -** RHS contains one or more NULL values. -** -** This routine generates code will jump to destIfFalse if the LHS is not -** contained within the RHS. If due to NULLs we cannot determine if the LHS -** is contained in the RHS then jump to destIfNull. If the LHS is contained -** within the RHS then fall through. -*/ -static void sqlite3ExprCodeIN( - Parse *pParse, /* Parsing and code generating context */ - Expr *pExpr, /* The IN expression */ - int destIfFalse, /* Jump here if LHS is not contained in the RHS */ - int destIfNull /* Jump here if the results are unknown due to NULLs */ -){ - int rRhsHasNull = 0; /* Register that is true if RHS contains NULL values */ - char affinity; /* Comparison affinity to use */ - int eType; /* Type of the RHS */ - int r1; /* Temporary use register */ - Vdbe *v; /* Statement under construction */ - - /* Compute the RHS. After this step, the table with cursor - ** pExpr->iTable will contains the values that make up the RHS. - */ - v = pParse->pVdbe; - assert( v!=0 ); /* OOM detected prior to this routine */ - VdbeNoopComment((v, "begin IN expr")); - eType = sqlite3FindInIndex(pParse, pExpr, &rRhsHasNull); - - /* Figure out the affinity to use to create a key from the results - ** of the expression. affinityStr stores a static string suitable for - ** P4 of OP_MakeRecord. - */ - affinity = comparisonAffinity(pExpr); - - /* Code the LHS, the from " IN (...)". - */ - sqlite3ExprCachePush(pParse); - r1 = sqlite3GetTempReg(pParse); - sqlite3ExprCode(pParse, pExpr->pLeft, r1); - - /* If the LHS is NULL, then the result is either false or NULL depending - ** on whether the RHS is empty or not, respectively. - */ - if( destIfNull==destIfFalse ){ - /* Shortcut for the common case where the false and NULL outcomes are - ** the same. */ - sqlite3VdbeAddOp2(v, OP_IsNull, r1, destIfNull); VdbeCoverage(v); - }else{ - int addr1 = sqlite3VdbeAddOp1(v, OP_NotNull, r1); VdbeCoverage(v); - sqlite3VdbeAddOp2(v, OP_Rewind, pExpr->iTable, destIfFalse); - VdbeCoverage(v); - sqlite3VdbeAddOp2(v, OP_Goto, 0, destIfNull); - sqlite3VdbeJumpHere(v, addr1); - } - - if( eType==IN_INDEX_ROWID ){ - /* In this case, the RHS is the ROWID of table b-tree - */ - sqlite3VdbeAddOp2(v, OP_MustBeInt, r1, destIfFalse); VdbeCoverage(v); - sqlite3VdbeAddOp3(v, OP_NotExists, pExpr->iTable, destIfFalse, r1); - VdbeCoverage(v); - }else{ - /* In this case, the RHS is an index b-tree. - */ - sqlite3VdbeAddOp4(v, OP_Affinity, r1, 1, 0, &affinity, 1); - - /* If the set membership test fails, then the result of the - ** "x IN (...)" expression must be either 0 or NULL. If the set - ** contains no NULL values, then the result is 0. If the set - ** contains one or more NULL values, then the result of the - ** expression is also NULL. - */ - if( rRhsHasNull==0 || destIfFalse==destIfNull ){ - /* This branch runs if it is known at compile time that the RHS - ** cannot contain NULL values. This happens as the result - ** of a "NOT NULL" constraint in the database schema. - ** - ** Also run this branch if NULL is equivalent to FALSE - ** for this particular IN operator. - */ - sqlite3VdbeAddOp4Int(v, OP_NotFound, pExpr->iTable, destIfFalse, r1, 1); - VdbeCoverage(v); - }else{ - /* In this branch, the RHS of the IN might contain a NULL and - ** the presence of a NULL on the RHS makes a difference in the - ** outcome. - */ - int j1, j2; - - /* First check to see if the LHS is contained in the RHS. If so, - ** then the presence of NULLs in the RHS does not matter, so jump - ** over all of the code that follows. - */ - j1 = sqlite3VdbeAddOp4Int(v, OP_Found, pExpr->iTable, 0, r1, 1); - VdbeCoverage(v); - - /* Here we begin generating code that runs if the LHS is not - ** contained within the RHS. Generate additional code that - ** tests the RHS for NULLs. If the RHS contains a NULL then - ** jump to destIfNull. If there are no NULLs in the RHS then - ** jump to destIfFalse. - */ - sqlite3VdbeAddOp2(v, OP_If, rRhsHasNull, destIfNull); VdbeCoverage(v); - sqlite3VdbeAddOp2(v, OP_IfNot, rRhsHasNull, destIfFalse); VdbeCoverage(v); - j2 = sqlite3VdbeAddOp4Int(v, OP_Found, pExpr->iTable, 0, rRhsHasNull, 1); - VdbeCoverage(v); - sqlite3VdbeAddOp2(v, OP_Integer, 0, rRhsHasNull); - sqlite3VdbeAddOp2(v, OP_Goto, 0, destIfFalse); - sqlite3VdbeJumpHere(v, j2); - sqlite3VdbeAddOp2(v, OP_Integer, 1, rRhsHasNull); - sqlite3VdbeAddOp2(v, OP_Goto, 0, destIfNull); - - /* The OP_Found at the top of this branch jumps here when true, - ** causing the overall IN expression evaluation to fall through. - */ - sqlite3VdbeJumpHere(v, j1); - } - } - sqlite3ReleaseTempReg(pParse, r1); - sqlite3ExprCachePop(pParse); - VdbeComment((v, "end IN expr")); -} -#endif /* SQLITE_OMIT_SUBQUERY */ - -/* -** Duplicate an 8-byte value -*/ -static char *dup8bytes(Vdbe *v, const char *in){ - char *out = sqlite3DbMallocRaw(sqlite3VdbeDb(v), 8); - if( out ){ - memcpy(out, in, 8); - } - return out; -} - -#ifndef SQLITE_OMIT_FLOATING_POINT -/* -** Generate an instruction that will put the floating point -** value described by z[0..n-1] into register iMem. -** -** The z[] string will probably not be zero-terminated. But the -** z[n] character is guaranteed to be something that does not look -** like the continuation of the number. -*/ -static void codeReal(Vdbe *v, const char *z, int negateFlag, int iMem){ - if( ALWAYS(z!=0) ){ - double value; - char *zV; - sqlite3AtoF(z, &value, sqlite3Strlen30(z), SQLITE_UTF8); - assert( !sqlite3IsNaN(value) ); /* The new AtoF never returns NaN */ - if( negateFlag ) value = -value; - zV = dup8bytes(v, (char*)&value); - sqlite3VdbeAddOp4(v, OP_Real, 0, iMem, 0, zV, P4_REAL); - } -} -#endif - - -/* -** Generate an instruction that will put the integer describe by -** text z[0..n-1] into register iMem. -** -** Expr.u.zToken is always UTF8 and zero-terminated. -*/ -static void codeInteger(Parse *pParse, Expr *pExpr, int negFlag, int iMem){ - Vdbe *v = pParse->pVdbe; - if( pExpr->flags & EP_IntValue ){ - int i = pExpr->u.iValue; - assert( i>=0 ); - if( negFlag ) i = -i; - sqlite3VdbeAddOp2(v, OP_Integer, i, iMem); - }else{ - int c; - i64 value; - const char *z = pExpr->u.zToken; - assert( z!=0 ); - c = sqlite3Atoi64(z, &value, sqlite3Strlen30(z), SQLITE_UTF8); - if( c==0 || (c==2 && negFlag) ){ - char *zV; - if( negFlag ){ value = c==2 ? SMALLEST_INT64 : -value; } - zV = dup8bytes(v, (char*)&value); - sqlite3VdbeAddOp4(v, OP_Int64, 0, iMem, 0, zV, P4_INT64); - }else{ -#ifdef SQLITE_OMIT_FLOATING_POINT - sqlite3ErrorMsg(pParse, "oversized integer: %s%s", negFlag ? "-" : "", z); -#else - codeReal(v, z, negFlag, iMem); -#endif - } - } -} - -/* -** Clear a cache entry. -*/ -static void cacheEntryClear(Parse *pParse, struct yColCache *p){ - if( p->tempReg ){ - if( pParse->nTempRegaTempReg) ){ - pParse->aTempReg[pParse->nTempReg++] = p->iReg; - } - p->tempReg = 0; - } -} - - -/* -** Record in the column cache that a particular column from a -** particular table is stored in a particular register. -*/ -SQLITE_PRIVATE void sqlite3ExprCacheStore(Parse *pParse, int iTab, int iCol, int iReg){ - int i; - int minLru; - int idxLru; - struct yColCache *p; - - assert( iReg>0 ); /* Register numbers are always positive */ - assert( iCol>=-1 && iCol<32768 ); /* Finite column numbers */ - - /* The SQLITE_ColumnCache flag disables the column cache. This is used - ** for testing only - to verify that SQLite always gets the same answer - ** with and without the column cache. - */ - if( OptimizationDisabled(pParse->db, SQLITE_ColumnCache) ) return; - - /* First replace any existing entry. - ** - ** Actually, the way the column cache is currently used, we are guaranteed - ** that the object will never already be in cache. Verify this guarantee. - */ -#ifndef NDEBUG - for(i=0, p=pParse->aColCache; iiReg==0 || p->iTable!=iTab || p->iColumn!=iCol ); - } -#endif - - /* Find an empty slot and replace it */ - for(i=0, p=pParse->aColCache; iiReg==0 ){ - p->iLevel = pParse->iCacheLevel; - p->iTable = iTab; - p->iColumn = iCol; - p->iReg = iReg; - p->tempReg = 0; - p->lru = pParse->iCacheCnt++; - return; - } - } - - /* Replace the last recently used */ - minLru = 0x7fffffff; - idxLru = -1; - for(i=0, p=pParse->aColCache; ilrulru; - } - } - if( ALWAYS(idxLru>=0) ){ - p = &pParse->aColCache[idxLru]; - p->iLevel = pParse->iCacheLevel; - p->iTable = iTab; - p->iColumn = iCol; - p->iReg = iReg; - p->tempReg = 0; - p->lru = pParse->iCacheCnt++; - return; - } -} - -/* -** Indicate that registers between iReg..iReg+nReg-1 are being overwritten. -** Purge the range of registers from the column cache. -*/ -SQLITE_PRIVATE void sqlite3ExprCacheRemove(Parse *pParse, int iReg, int nReg){ - int i; - int iLast = iReg + nReg - 1; - struct yColCache *p; - for(i=0, p=pParse->aColCache; iiReg; - if( r>=iReg && r<=iLast ){ - cacheEntryClear(pParse, p); - p->iReg = 0; - } - } -} - -/* -** Remember the current column cache context. Any new entries added -** added to the column cache after this call are removed when the -** corresponding pop occurs. -*/ -SQLITE_PRIVATE void sqlite3ExprCachePush(Parse *pParse){ - pParse->iCacheLevel++; -#ifdef SQLITE_DEBUG - if( pParse->db->flags & SQLITE_VdbeAddopTrace ){ - printf("PUSH to %d\n", pParse->iCacheLevel); - } -#endif -} - -/* -** Remove from the column cache any entries that were added since the -** the previous sqlite3ExprCachePush operation. In other words, restore -** the cache to the state it was in prior the most recent Push. -*/ -SQLITE_PRIVATE void sqlite3ExprCachePop(Parse *pParse){ - int i; - struct yColCache *p; - assert( pParse->iCacheLevel>=1 ); - pParse->iCacheLevel--; -#ifdef SQLITE_DEBUG - if( pParse->db->flags & SQLITE_VdbeAddopTrace ){ - printf("POP to %d\n", pParse->iCacheLevel); - } -#endif - for(i=0, p=pParse->aColCache; iiReg && p->iLevel>pParse->iCacheLevel ){ - cacheEntryClear(pParse, p); - p->iReg = 0; - } - } -} - -/* -** When a cached column is reused, make sure that its register is -** no longer available as a temp register. ticket #3879: that same -** register might be in the cache in multiple places, so be sure to -** get them all. -*/ -static void sqlite3ExprCachePinRegister(Parse *pParse, int iReg){ - int i; - struct yColCache *p; - for(i=0, p=pParse->aColCache; iiReg==iReg ){ - p->tempReg = 0; - } - } -} - -/* -** Generate code to extract the value of the iCol-th column of a table. -*/ -SQLITE_PRIVATE void sqlite3ExprCodeGetColumnOfTable( - Vdbe *v, /* The VDBE under construction */ - Table *pTab, /* The table containing the value */ - int iTabCur, /* The table cursor. Or the PK cursor for WITHOUT ROWID */ - int iCol, /* Index of the column to extract */ - int regOut /* Extract the value into this register */ -){ - if( iCol<0 || iCol==pTab->iPKey ){ - sqlite3VdbeAddOp2(v, OP_Rowid, iTabCur, regOut); - }else{ - int op = IsVirtual(pTab) ? OP_VColumn : OP_Column; - int x = iCol; - if( !HasRowid(pTab) ){ - x = sqlite3ColumnOfIndex(sqlite3PrimaryKeyIndex(pTab), iCol); - } - sqlite3VdbeAddOp3(v, op, iTabCur, x, regOut); - } - if( iCol>=0 ){ - sqlite3ColumnDefault(v, pTab, iCol, regOut); - } -} - -/* -** Generate code that will extract the iColumn-th column from -** table pTab and store the column value in a register. An effort -** is made to store the column value in register iReg, but this is -** not guaranteed. The location of the column value is returned. -** -** There must be an open cursor to pTab in iTable when this routine -** is called. If iColumn<0 then code is generated that extracts the rowid. -*/ -SQLITE_PRIVATE int sqlite3ExprCodeGetColumn( - Parse *pParse, /* Parsing and code generating context */ - Table *pTab, /* Description of the table we are reading from */ - int iColumn, /* Index of the table column */ - int iTable, /* The cursor pointing to the table */ - int iReg, /* Store results here */ - u8 p5 /* P5 value for OP_Column */ -){ - Vdbe *v = pParse->pVdbe; - int i; - struct yColCache *p; - - for(i=0, p=pParse->aColCache; iiReg>0 && p->iTable==iTable && p->iColumn==iColumn ){ - p->lru = pParse->iCacheCnt++; - sqlite3ExprCachePinRegister(pParse, p->iReg); - return p->iReg; - } - } - assert( v!=0 ); - sqlite3ExprCodeGetColumnOfTable(v, pTab, iTable, iColumn, iReg); - if( p5 ){ - sqlite3VdbeChangeP5(v, p5); - }else{ - sqlite3ExprCacheStore(pParse, iTable, iColumn, iReg); - } - return iReg; -} - -/* -** Clear all column cache entries. -*/ -SQLITE_PRIVATE void sqlite3ExprCacheClear(Parse *pParse){ - int i; - struct yColCache *p; - -#if SQLITE_DEBUG - if( pParse->db->flags & SQLITE_VdbeAddopTrace ){ - printf("CLEAR\n"); - } -#endif - for(i=0, p=pParse->aColCache; iiReg ){ - cacheEntryClear(pParse, p); - p->iReg = 0; - } - } -} - -/* -** Record the fact that an affinity change has occurred on iCount -** registers starting with iStart. -*/ -SQLITE_PRIVATE void sqlite3ExprCacheAffinityChange(Parse *pParse, int iStart, int iCount){ - sqlite3ExprCacheRemove(pParse, iStart, iCount); -} - -/* -** Generate code to move content from registers iFrom...iFrom+nReg-1 -** over to iTo..iTo+nReg-1. Keep the column cache up-to-date. -*/ -SQLITE_PRIVATE void sqlite3ExprCodeMove(Parse *pParse, int iFrom, int iTo, int nReg){ - int i; - struct yColCache *p; - assert( iFrom>=iTo+nReg || iFrom+nReg<=iTo ); - sqlite3VdbeAddOp3(pParse->pVdbe, OP_Move, iFrom, iTo, nReg); - for(i=0, p=pParse->aColCache; iiReg; - if( x>=iFrom && xiReg += iTo-iFrom; - } - } -} - -#if defined(SQLITE_DEBUG) || defined(SQLITE_COVERAGE_TEST) -/* -** Return true if any register in the range iFrom..iTo (inclusive) -** is used as part of the column cache. -** -** This routine is used within assert() and testcase() macros only -** and does not appear in a normal build. -*/ -static int usedAsColumnCache(Parse *pParse, int iFrom, int iTo){ - int i; - struct yColCache *p; - for(i=0, p=pParse->aColCache; iiReg; - if( r>=iFrom && r<=iTo ) return 1; /*NO_TEST*/ - } - return 0; -} -#endif /* SQLITE_DEBUG || SQLITE_COVERAGE_TEST */ - -/* -** Convert an expression node to a TK_REGISTER -*/ -static void exprToRegister(Expr *p, int iReg){ - p->op2 = p->op; - p->op = TK_REGISTER; - p->iTable = iReg; - ExprClearProperty(p, EP_Skip); -} - -/* -** Generate code into the current Vdbe to evaluate the given -** expression. Attempt to store the results in register "target". -** Return the register where results are stored. -** -** With this routine, there is no guarantee that results will -** be stored in target. The result might be stored in some other -** register if it is convenient to do so. The calling function -** must check the return code and move the results to the desired -** register. -*/ -SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target){ - Vdbe *v = pParse->pVdbe; /* The VM under construction */ - int op; /* The opcode being coded */ - int inReg = target; /* Results stored in register inReg */ - int regFree1 = 0; /* If non-zero free this temporary register */ - int regFree2 = 0; /* If non-zero free this temporary register */ - int r1, r2, r3, r4; /* Various register numbers */ - sqlite3 *db = pParse->db; /* The database connection */ - Expr tempX; /* Temporary expression node */ - - assert( target>0 && target<=pParse->nMem ); - if( v==0 ){ - assert( pParse->db->mallocFailed ); - return 0; - } - - if( pExpr==0 ){ - op = TK_NULL; - }else{ - op = pExpr->op; - } - switch( op ){ - case TK_AGG_COLUMN: { - AggInfo *pAggInfo = pExpr->pAggInfo; - struct AggInfo_col *pCol = &pAggInfo->aCol[pExpr->iAgg]; - if( !pAggInfo->directMode ){ - assert( pCol->iMem>0 ); - inReg = pCol->iMem; - break; - }else if( pAggInfo->useSortingIdx ){ - sqlite3VdbeAddOp3(v, OP_Column, pAggInfo->sortingIdxPTab, - pCol->iSorterColumn, target); - break; - } - /* Otherwise, fall thru into the TK_COLUMN case */ - } - case TK_COLUMN: { - int iTab = pExpr->iTable; - if( iTab<0 ){ - if( pParse->ckBase>0 ){ - /* Generating CHECK constraints or inserting into partial index */ - inReg = pExpr->iColumn + pParse->ckBase; - break; - }else{ - /* Deleting from a partial index */ - iTab = pParse->iPartIdxTab; - } - } - inReg = sqlite3ExprCodeGetColumn(pParse, pExpr->pTab, - pExpr->iColumn, iTab, target, - pExpr->op2); - break; - } - case TK_INTEGER: { - codeInteger(pParse, pExpr, 0, target); - break; - } -#ifndef SQLITE_OMIT_FLOATING_POINT - case TK_FLOAT: { - assert( !ExprHasProperty(pExpr, EP_IntValue) ); - codeReal(v, pExpr->u.zToken, 0, target); - break; - } -#endif - case TK_STRING: { - assert( !ExprHasProperty(pExpr, EP_IntValue) ); - sqlite3VdbeAddOp4(v, OP_String8, 0, target, 0, pExpr->u.zToken, 0); - break; - } - case TK_NULL: { - sqlite3VdbeAddOp2(v, OP_Null, 0, target); - break; - } -#ifndef SQLITE_OMIT_BLOB_LITERAL - case TK_BLOB: { - int n; - const char *z; - char *zBlob; - assert( !ExprHasProperty(pExpr, EP_IntValue) ); - assert( pExpr->u.zToken[0]=='x' || pExpr->u.zToken[0]=='X' ); - assert( pExpr->u.zToken[1]=='\'' ); - z = &pExpr->u.zToken[2]; - n = sqlite3Strlen30(z) - 1; - assert( z[n]=='\'' ); - zBlob = sqlite3HexToBlob(sqlite3VdbeDb(v), z, n); - sqlite3VdbeAddOp4(v, OP_Blob, n/2, target, 0, zBlob, P4_DYNAMIC); - break; - } -#endif - case TK_VARIABLE: { - assert( !ExprHasProperty(pExpr, EP_IntValue) ); - assert( pExpr->u.zToken!=0 ); - assert( pExpr->u.zToken[0]!=0 ); - sqlite3VdbeAddOp2(v, OP_Variable, pExpr->iColumn, target); - if( pExpr->u.zToken[1]!=0 ){ - assert( pExpr->u.zToken[0]=='?' - || strcmp(pExpr->u.zToken, pParse->azVar[pExpr->iColumn-1])==0 ); - sqlite3VdbeChangeP4(v, -1, pParse->azVar[pExpr->iColumn-1], P4_STATIC); - } - break; - } - case TK_REGISTER: { - inReg = pExpr->iTable; - break; - } - case TK_AS: { - inReg = sqlite3ExprCodeTarget(pParse, pExpr->pLeft, target); - break; - } -#ifndef SQLITE_OMIT_CAST - case TK_CAST: { - /* Expressions of the form: CAST(pLeft AS token) */ - int aff, to_op; - inReg = sqlite3ExprCodeTarget(pParse, pExpr->pLeft, target); - assert( !ExprHasProperty(pExpr, EP_IntValue) ); - aff = sqlite3AffinityType(pExpr->u.zToken, 0); - to_op = aff - SQLITE_AFF_TEXT + OP_ToText; - assert( to_op==OP_ToText || aff!=SQLITE_AFF_TEXT ); - assert( to_op==OP_ToBlob || aff!=SQLITE_AFF_NONE ); - assert( to_op==OP_ToNumeric || aff!=SQLITE_AFF_NUMERIC ); - assert( to_op==OP_ToInt || aff!=SQLITE_AFF_INTEGER ); - assert( to_op==OP_ToReal || aff!=SQLITE_AFF_REAL ); - testcase( to_op==OP_ToText ); - testcase( to_op==OP_ToBlob ); - testcase( to_op==OP_ToNumeric ); - testcase( to_op==OP_ToInt ); - testcase( to_op==OP_ToReal ); - if( inReg!=target ){ - sqlite3VdbeAddOp2(v, OP_SCopy, inReg, target); - inReg = target; - } - sqlite3VdbeAddOp1(v, to_op, inReg); - testcase( usedAsColumnCache(pParse, inReg, inReg) ); - sqlite3ExprCacheAffinityChange(pParse, inReg, 1); - break; - } -#endif /* SQLITE_OMIT_CAST */ - case TK_LT: - case TK_LE: - case TK_GT: - case TK_GE: - case TK_NE: - case TK_EQ: { - r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, ®Free1); - r2 = sqlite3ExprCodeTemp(pParse, pExpr->pRight, ®Free2); - codeCompare(pParse, pExpr->pLeft, pExpr->pRight, op, - r1, r2, inReg, SQLITE_STOREP2); - assert(TK_LT==OP_Lt); testcase(op==OP_Lt); VdbeCoverageIf(v,op==OP_Lt); - assert(TK_LE==OP_Le); testcase(op==OP_Le); VdbeCoverageIf(v,op==OP_Le); - assert(TK_GT==OP_Gt); testcase(op==OP_Gt); VdbeCoverageIf(v,op==OP_Gt); - assert(TK_GE==OP_Ge); testcase(op==OP_Ge); VdbeCoverageIf(v,op==OP_Ge); - assert(TK_EQ==OP_Eq); testcase(op==OP_Eq); VdbeCoverageIf(v,op==OP_Eq); - assert(TK_NE==OP_Ne); testcase(op==OP_Ne); VdbeCoverageIf(v,op==OP_Ne); - testcase( regFree1==0 ); - testcase( regFree2==0 ); - break; - } - case TK_IS: - case TK_ISNOT: { - testcase( op==TK_IS ); - testcase( op==TK_ISNOT ); - r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, ®Free1); - r2 = sqlite3ExprCodeTemp(pParse, pExpr->pRight, ®Free2); - op = (op==TK_IS) ? TK_EQ : TK_NE; - codeCompare(pParse, pExpr->pLeft, pExpr->pRight, op, - r1, r2, inReg, SQLITE_STOREP2 | SQLITE_NULLEQ); - VdbeCoverageIf(v, op==TK_EQ); - VdbeCoverageIf(v, op==TK_NE); - testcase( regFree1==0 ); - testcase( regFree2==0 ); - break; - } - case TK_AND: - case TK_OR: - case TK_PLUS: - case TK_STAR: - case TK_MINUS: - case TK_REM: - case TK_BITAND: - case TK_BITOR: - case TK_SLASH: - case TK_LSHIFT: - case TK_RSHIFT: - case TK_CONCAT: { - assert( TK_AND==OP_And ); testcase( op==TK_AND ); - assert( TK_OR==OP_Or ); testcase( op==TK_OR ); - assert( TK_PLUS==OP_Add ); testcase( op==TK_PLUS ); - assert( TK_MINUS==OP_Subtract ); testcase( op==TK_MINUS ); - assert( TK_REM==OP_Remainder ); testcase( op==TK_REM ); - assert( TK_BITAND==OP_BitAnd ); testcase( op==TK_BITAND ); - assert( TK_BITOR==OP_BitOr ); testcase( op==TK_BITOR ); - assert( TK_SLASH==OP_Divide ); testcase( op==TK_SLASH ); - assert( TK_LSHIFT==OP_ShiftLeft ); testcase( op==TK_LSHIFT ); - assert( TK_RSHIFT==OP_ShiftRight ); testcase( op==TK_RSHIFT ); - assert( TK_CONCAT==OP_Concat ); testcase( op==TK_CONCAT ); - r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, ®Free1); - r2 = sqlite3ExprCodeTemp(pParse, pExpr->pRight, ®Free2); - sqlite3VdbeAddOp3(v, op, r2, r1, target); - testcase( regFree1==0 ); - testcase( regFree2==0 ); - break; - } - case TK_UMINUS: { - Expr *pLeft = pExpr->pLeft; - assert( pLeft ); - if( pLeft->op==TK_INTEGER ){ - codeInteger(pParse, pLeft, 1, target); -#ifndef SQLITE_OMIT_FLOATING_POINT - }else if( pLeft->op==TK_FLOAT ){ - assert( !ExprHasProperty(pExpr, EP_IntValue) ); - codeReal(v, pLeft->u.zToken, 1, target); -#endif - }else{ - tempX.op = TK_INTEGER; - tempX.flags = EP_IntValue|EP_TokenOnly; - tempX.u.iValue = 0; - r1 = sqlite3ExprCodeTemp(pParse, &tempX, ®Free1); - r2 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, ®Free2); - sqlite3VdbeAddOp3(v, OP_Subtract, r2, r1, target); - testcase( regFree2==0 ); - } - inReg = target; - break; - } - case TK_BITNOT: - case TK_NOT: { - assert( TK_BITNOT==OP_BitNot ); testcase( op==TK_BITNOT ); - assert( TK_NOT==OP_Not ); testcase( op==TK_NOT ); - r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, ®Free1); - testcase( regFree1==0 ); - inReg = target; - sqlite3VdbeAddOp2(v, op, r1, inReg); - break; - } - case TK_ISNULL: - case TK_NOTNULL: { - int addr; - assert( TK_ISNULL==OP_IsNull ); testcase( op==TK_ISNULL ); - assert( TK_NOTNULL==OP_NotNull ); testcase( op==TK_NOTNULL ); - sqlite3VdbeAddOp2(v, OP_Integer, 1, target); - r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, ®Free1); - testcase( regFree1==0 ); - addr = sqlite3VdbeAddOp1(v, op, r1); - VdbeCoverageIf(v, op==TK_ISNULL); - VdbeCoverageIf(v, op==TK_NOTNULL); - sqlite3VdbeAddOp2(v, OP_AddImm, target, -1); - sqlite3VdbeJumpHere(v, addr); - break; - } - case TK_AGG_FUNCTION: { - AggInfo *pInfo = pExpr->pAggInfo; - if( pInfo==0 ){ - assert( !ExprHasProperty(pExpr, EP_IntValue) ); - sqlite3ErrorMsg(pParse, "misuse of aggregate: %s()", pExpr->u.zToken); - }else{ - inReg = pInfo->aFunc[pExpr->iAgg].iMem; - } - break; - } - case TK_FUNCTION: { - ExprList *pFarg; /* List of function arguments */ - int nFarg; /* Number of function arguments */ - FuncDef *pDef; /* The function definition object */ - int nId; /* Length of the function name in bytes */ - const char *zId; /* The function name */ - u32 constMask = 0; /* Mask of function arguments that are constant */ - int i; /* Loop counter */ - u8 enc = ENC(db); /* The text encoding used by this database */ - CollSeq *pColl = 0; /* A collating sequence */ - - assert( !ExprHasProperty(pExpr, EP_xIsSelect) ); - if( ExprHasProperty(pExpr, EP_TokenOnly) ){ - pFarg = 0; - }else{ - pFarg = pExpr->x.pList; - } - nFarg = pFarg ? pFarg->nExpr : 0; - assert( !ExprHasProperty(pExpr, EP_IntValue) ); - zId = pExpr->u.zToken; - nId = sqlite3Strlen30(zId); - pDef = sqlite3FindFunction(db, zId, nId, nFarg, enc, 0); - if( pDef==0 ){ - sqlite3ErrorMsg(pParse, "unknown function: %.*s()", nId, zId); - break; - } - - /* Attempt a direct implementation of the built-in COALESCE() and - ** IFNULL() functions. This avoids unnecessary evalation of - ** arguments past the first non-NULL argument. - */ - if( pDef->funcFlags & SQLITE_FUNC_COALESCE ){ - int endCoalesce = sqlite3VdbeMakeLabel(v); - assert( nFarg>=2 ); - sqlite3ExprCode(pParse, pFarg->a[0].pExpr, target); - for(i=1; ia[i].pExpr, target); - sqlite3ExprCachePop(pParse); - } - sqlite3VdbeResolveLabel(v, endCoalesce); - break; - } - - /* The UNLIKELY() function is a no-op. The result is the value - ** of the first argument. - */ - if( pDef->funcFlags & SQLITE_FUNC_UNLIKELY ){ - assert( nFarg>=1 ); - sqlite3ExprCode(pParse, pFarg->a[0].pExpr, target); - break; - } - - for(i=0; ia[i].pExpr) ){ - testcase( i==31 ); - constMask |= MASKBIT32(i); - } - if( (pDef->funcFlags & SQLITE_FUNC_NEEDCOLL)!=0 && !pColl ){ - pColl = sqlite3ExprCollSeq(pParse, pFarg->a[i].pExpr); - } - } - if( pFarg ){ - if( constMask ){ - r1 = pParse->nMem+1; - pParse->nMem += nFarg; - }else{ - r1 = sqlite3GetTempRange(pParse, nFarg); - } - - /* For length() and typeof() functions with a column argument, - ** set the P5 parameter to the OP_Column opcode to OPFLAG_LENGTHARG - ** or OPFLAG_TYPEOFARG respectively, to avoid unnecessary data - ** loading. - */ - if( (pDef->funcFlags & (SQLITE_FUNC_LENGTH|SQLITE_FUNC_TYPEOF))!=0 ){ - u8 exprOp; - assert( nFarg==1 ); - assert( pFarg->a[0].pExpr!=0 ); - exprOp = pFarg->a[0].pExpr->op; - if( exprOp==TK_COLUMN || exprOp==TK_AGG_COLUMN ){ - assert( SQLITE_FUNC_LENGTH==OPFLAG_LENGTHARG ); - assert( SQLITE_FUNC_TYPEOF==OPFLAG_TYPEOFARG ); - testcase( pDef->funcFlags & OPFLAG_LENGTHARG ); - pFarg->a[0].pExpr->op2 = - pDef->funcFlags & (OPFLAG_LENGTHARG|OPFLAG_TYPEOFARG); - } - } - - sqlite3ExprCachePush(pParse); /* Ticket 2ea2425d34be */ - sqlite3ExprCodeExprList(pParse, pFarg, r1, - SQLITE_ECEL_DUP|SQLITE_ECEL_FACTOR); - sqlite3ExprCachePop(pParse); /* Ticket 2ea2425d34be */ - }else{ - r1 = 0; - } -#ifndef SQLITE_OMIT_VIRTUALTABLE - /* Possibly overload the function if the first argument is - ** a virtual table column. - ** - ** For infix functions (LIKE, GLOB, REGEXP, and MATCH) use the - ** second argument, not the first, as the argument to test to - ** see if it is a column in a virtual table. This is done because - ** the left operand of infix functions (the operand we want to - ** control overloading) ends up as the second argument to the - ** function. The expression "A glob B" is equivalent to - ** "glob(B,A). We want to use the A in "A glob B" to test - ** for function overloading. But we use the B term in "glob(B,A)". - */ - if( nFarg>=2 && (pExpr->flags & EP_InfixFunc) ){ - pDef = sqlite3VtabOverloadFunction(db, pDef, nFarg, pFarg->a[1].pExpr); - }else if( nFarg>0 ){ - pDef = sqlite3VtabOverloadFunction(db, pDef, nFarg, pFarg->a[0].pExpr); - } -#endif - if( pDef->funcFlags & SQLITE_FUNC_NEEDCOLL ){ - if( !pColl ) pColl = db->pDfltColl; - sqlite3VdbeAddOp4(v, OP_CollSeq, 0, 0, 0, (char *)pColl, P4_COLLSEQ); - } - sqlite3VdbeAddOp4(v, OP_Function, constMask, r1, target, - (char*)pDef, P4_FUNCDEF); - sqlite3VdbeChangeP5(v, (u8)nFarg); - if( nFarg && constMask==0 ){ - sqlite3ReleaseTempRange(pParse, r1, nFarg); - } - break; - } -#ifndef SQLITE_OMIT_SUBQUERY - case TK_EXISTS: - case TK_SELECT: { - testcase( op==TK_EXISTS ); - testcase( op==TK_SELECT ); - inReg = sqlite3CodeSubselect(pParse, pExpr, 0, 0); - break; - } - case TK_IN: { - int destIfFalse = sqlite3VdbeMakeLabel(v); - int destIfNull = sqlite3VdbeMakeLabel(v); - sqlite3VdbeAddOp2(v, OP_Null, 0, target); - sqlite3ExprCodeIN(pParse, pExpr, destIfFalse, destIfNull); - sqlite3VdbeAddOp2(v, OP_Integer, 1, target); - sqlite3VdbeResolveLabel(v, destIfFalse); - sqlite3VdbeAddOp2(v, OP_AddImm, target, 0); - sqlite3VdbeResolveLabel(v, destIfNull); - break; - } -#endif /* SQLITE_OMIT_SUBQUERY */ - - - /* - ** x BETWEEN y AND z - ** - ** This is equivalent to - ** - ** x>=y AND x<=z - ** - ** X is stored in pExpr->pLeft. - ** Y is stored in pExpr->pList->a[0].pExpr. - ** Z is stored in pExpr->pList->a[1].pExpr. - */ - case TK_BETWEEN: { - Expr *pLeft = pExpr->pLeft; - struct ExprList_item *pLItem = pExpr->x.pList->a; - Expr *pRight = pLItem->pExpr; - - r1 = sqlite3ExprCodeTemp(pParse, pLeft, ®Free1); - r2 = sqlite3ExprCodeTemp(pParse, pRight, ®Free2); - testcase( regFree1==0 ); - testcase( regFree2==0 ); - r3 = sqlite3GetTempReg(pParse); - r4 = sqlite3GetTempReg(pParse); - codeCompare(pParse, pLeft, pRight, OP_Ge, - r1, r2, r3, SQLITE_STOREP2); VdbeCoverage(v); - pLItem++; - pRight = pLItem->pExpr; - sqlite3ReleaseTempReg(pParse, regFree2); - r2 = sqlite3ExprCodeTemp(pParse, pRight, ®Free2); - testcase( regFree2==0 ); - codeCompare(pParse, pLeft, pRight, OP_Le, r1, r2, r4, SQLITE_STOREP2); - VdbeCoverage(v); - sqlite3VdbeAddOp3(v, OP_And, r3, r4, target); - sqlite3ReleaseTempReg(pParse, r3); - sqlite3ReleaseTempReg(pParse, r4); - break; - } - case TK_COLLATE: - case TK_UPLUS: { - inReg = sqlite3ExprCodeTarget(pParse, pExpr->pLeft, target); - break; - } - - case TK_TRIGGER: { - /* If the opcode is TK_TRIGGER, then the expression is a reference - ** to a column in the new.* or old.* pseudo-tables available to - ** trigger programs. In this case Expr.iTable is set to 1 for the - ** new.* pseudo-table, or 0 for the old.* pseudo-table. Expr.iColumn - ** is set to the column of the pseudo-table to read, or to -1 to - ** read the rowid field. - ** - ** The expression is implemented using an OP_Param opcode. The p1 - ** parameter is set to 0 for an old.rowid reference, or to (i+1) - ** to reference another column of the old.* pseudo-table, where - ** i is the index of the column. For a new.rowid reference, p1 is - ** set to (n+1), where n is the number of columns in each pseudo-table. - ** For a reference to any other column in the new.* pseudo-table, p1 - ** is set to (n+2+i), where n and i are as defined previously. For - ** example, if the table on which triggers are being fired is - ** declared as: - ** - ** CREATE TABLE t1(a, b); - ** - ** Then p1 is interpreted as follows: - ** - ** p1==0 -> old.rowid p1==3 -> new.rowid - ** p1==1 -> old.a p1==4 -> new.a - ** p1==2 -> old.b p1==5 -> new.b - */ - Table *pTab = pExpr->pTab; - int p1 = pExpr->iTable * (pTab->nCol+1) + 1 + pExpr->iColumn; - - assert( pExpr->iTable==0 || pExpr->iTable==1 ); - assert( pExpr->iColumn>=-1 && pExpr->iColumnnCol ); - assert( pTab->iPKey<0 || pExpr->iColumn!=pTab->iPKey ); - assert( p1>=0 && p1<(pTab->nCol*2+2) ); - - sqlite3VdbeAddOp2(v, OP_Param, p1, target); - VdbeComment((v, "%s.%s -> $%d", - (pExpr->iTable ? "new" : "old"), - (pExpr->iColumn<0 ? "rowid" : pExpr->pTab->aCol[pExpr->iColumn].zName), - target - )); - -#ifndef SQLITE_OMIT_FLOATING_POINT - /* If the column has REAL affinity, it may currently be stored as an - ** integer. Use OP_RealAffinity to make sure it is really real. */ - if( pExpr->iColumn>=0 - && pTab->aCol[pExpr->iColumn].affinity==SQLITE_AFF_REAL - ){ - sqlite3VdbeAddOp1(v, OP_RealAffinity, target); - } -#endif - break; - } - - - /* - ** Form A: - ** CASE x WHEN e1 THEN r1 WHEN e2 THEN r2 ... WHEN eN THEN rN ELSE y END - ** - ** Form B: - ** CASE WHEN e1 THEN r1 WHEN e2 THEN r2 ... WHEN eN THEN rN ELSE y END - ** - ** Form A is can be transformed into the equivalent form B as follows: - ** CASE WHEN x=e1 THEN r1 WHEN x=e2 THEN r2 ... - ** WHEN x=eN THEN rN ELSE y END - ** - ** X (if it exists) is in pExpr->pLeft. - ** Y is in the last element of pExpr->x.pList if pExpr->x.pList->nExpr is - ** odd. The Y is also optional. If the number of elements in x.pList - ** is even, then Y is omitted and the "otherwise" result is NULL. - ** Ei is in pExpr->pList->a[i*2] and Ri is pExpr->pList->a[i*2+1]. - ** - ** The result of the expression is the Ri for the first matching Ei, - ** or if there is no matching Ei, the ELSE term Y, or if there is - ** no ELSE term, NULL. - */ - default: assert( op==TK_CASE ); { - int endLabel; /* GOTO label for end of CASE stmt */ - int nextCase; /* GOTO label for next WHEN clause */ - int nExpr; /* 2x number of WHEN terms */ - int i; /* Loop counter */ - ExprList *pEList; /* List of WHEN terms */ - struct ExprList_item *aListelem; /* Array of WHEN terms */ - Expr opCompare; /* The X==Ei expression */ - Expr *pX; /* The X expression */ - Expr *pTest = 0; /* X==Ei (form A) or just Ei (form B) */ - VVA_ONLY( int iCacheLevel = pParse->iCacheLevel; ) - - assert( !ExprHasProperty(pExpr, EP_xIsSelect) && pExpr->x.pList ); - assert(pExpr->x.pList->nExpr > 0); - pEList = pExpr->x.pList; - aListelem = pEList->a; - nExpr = pEList->nExpr; - endLabel = sqlite3VdbeMakeLabel(v); - if( (pX = pExpr->pLeft)!=0 ){ - tempX = *pX; - testcase( pX->op==TK_COLUMN ); - exprToRegister(&tempX, sqlite3ExprCodeTemp(pParse, pX, ®Free1)); - testcase( regFree1==0 ); - opCompare.op = TK_EQ; - opCompare.pLeft = &tempX; - pTest = &opCompare; - /* Ticket b351d95f9cd5ef17e9d9dbae18f5ca8611190001: - ** The value in regFree1 might get SCopy-ed into the file result. - ** So make sure that the regFree1 register is not reused for other - ** purposes and possibly overwritten. */ - regFree1 = 0; - } - for(i=0; iop==TK_COLUMN ); - sqlite3ExprIfFalse(pParse, pTest, nextCase, SQLITE_JUMPIFNULL); - testcase( aListelem[i+1].pExpr->op==TK_COLUMN ); - sqlite3ExprCode(pParse, aListelem[i+1].pExpr, target); - sqlite3VdbeAddOp2(v, OP_Goto, 0, endLabel); - sqlite3ExprCachePop(pParse); - sqlite3VdbeResolveLabel(v, nextCase); - } - if( (nExpr&1)!=0 ){ - sqlite3ExprCachePush(pParse); - sqlite3ExprCode(pParse, pEList->a[nExpr-1].pExpr, target); - sqlite3ExprCachePop(pParse); - }else{ - sqlite3VdbeAddOp2(v, OP_Null, 0, target); - } - assert( db->mallocFailed || pParse->nErr>0 - || pParse->iCacheLevel==iCacheLevel ); - sqlite3VdbeResolveLabel(v, endLabel); - break; - } -#ifndef SQLITE_OMIT_TRIGGER - case TK_RAISE: { - assert( pExpr->affinity==OE_Rollback - || pExpr->affinity==OE_Abort - || pExpr->affinity==OE_Fail - || pExpr->affinity==OE_Ignore - ); - if( !pParse->pTriggerTab ){ - sqlite3ErrorMsg(pParse, - "RAISE() may only be used within a trigger-program"); - return 0; - } - if( pExpr->affinity==OE_Abort ){ - sqlite3MayAbort(pParse); - } - assert( !ExprHasProperty(pExpr, EP_IntValue) ); - if( pExpr->affinity==OE_Ignore ){ - sqlite3VdbeAddOp4( - v, OP_Halt, SQLITE_OK, OE_Ignore, 0, pExpr->u.zToken,0); - VdbeCoverage(v); - }else{ - sqlite3HaltConstraint(pParse, SQLITE_CONSTRAINT_TRIGGER, - pExpr->affinity, pExpr->u.zToken, 0, 0); - } - - break; - } -#endif - } - sqlite3ReleaseTempReg(pParse, regFree1); - sqlite3ReleaseTempReg(pParse, regFree2); - return inReg; -} - -/* -** Factor out the code of the given expression to initialization time. -*/ -SQLITE_PRIVATE void sqlite3ExprCodeAtInit( - Parse *pParse, /* Parsing context */ - Expr *pExpr, /* The expression to code when the VDBE initializes */ - int regDest, /* Store the value in this register */ - u8 reusable /* True if this expression is reusable */ -){ - ExprList *p; - assert( ConstFactorOk(pParse) ); - p = pParse->pConstExpr; - pExpr = sqlite3ExprDup(pParse->db, pExpr, 0); - p = sqlite3ExprListAppend(pParse, p, pExpr); - if( p ){ - struct ExprList_item *pItem = &p->a[p->nExpr-1]; - pItem->u.iConstExprReg = regDest; - pItem->reusable = reusable; - } - pParse->pConstExpr = p; -} - -/* -** Generate code to evaluate an expression and store the results -** into a register. Return the register number where the results -** are stored. -** -** If the register is a temporary register that can be deallocated, -** then write its number into *pReg. If the result register is not -** a temporary, then set *pReg to zero. -** -** If pExpr is a constant, then this routine might generate this -** code to fill the register in the initialization section of the -** VDBE program, in order to factor it out of the evaluation loop. -*/ -SQLITE_PRIVATE int sqlite3ExprCodeTemp(Parse *pParse, Expr *pExpr, int *pReg){ - int r2; - pExpr = sqlite3ExprSkipCollate(pExpr); - if( ConstFactorOk(pParse) - && pExpr->op!=TK_REGISTER - && sqlite3ExprIsConstantNotJoin(pExpr) - ){ - ExprList *p = pParse->pConstExpr; - int i; - *pReg = 0; - if( p ){ - struct ExprList_item *pItem; - for(pItem=p->a, i=p->nExpr; i>0; pItem++, i--){ - if( pItem->reusable && sqlite3ExprCompare(pItem->pExpr,pExpr,-1)==0 ){ - return pItem->u.iConstExprReg; - } - } - } - r2 = ++pParse->nMem; - sqlite3ExprCodeAtInit(pParse, pExpr, r2, 1); - }else{ - int r1 = sqlite3GetTempReg(pParse); - r2 = sqlite3ExprCodeTarget(pParse, pExpr, r1); - if( r2==r1 ){ - *pReg = r1; - }else{ - sqlite3ReleaseTempReg(pParse, r1); - *pReg = 0; - } - } - return r2; -} - -/* -** Generate code that will evaluate expression pExpr and store the -** results in register target. The results are guaranteed to appear -** in register target. -*/ -SQLITE_PRIVATE void sqlite3ExprCode(Parse *pParse, Expr *pExpr, int target){ - int inReg; - - assert( target>0 && target<=pParse->nMem ); - if( pExpr && pExpr->op==TK_REGISTER ){ - sqlite3VdbeAddOp2(pParse->pVdbe, OP_Copy, pExpr->iTable, target); - }else{ - inReg = sqlite3ExprCodeTarget(pParse, pExpr, target); - assert( pParse->pVdbe || pParse->db->mallocFailed ); - if( inReg!=target && pParse->pVdbe ){ - sqlite3VdbeAddOp2(pParse->pVdbe, OP_SCopy, inReg, target); - } - } -} - -/* -** Generate code that will evaluate expression pExpr and store the -** results in register target. The results are guaranteed to appear -** in register target. If the expression is constant, then this routine -** might choose to code the expression at initialization time. -*/ -SQLITE_PRIVATE void sqlite3ExprCodeFactorable(Parse *pParse, Expr *pExpr, int target){ - if( pParse->okConstFactor && sqlite3ExprIsConstant(pExpr) ){ - sqlite3ExprCodeAtInit(pParse, pExpr, target, 0); - }else{ - sqlite3ExprCode(pParse, pExpr, target); - } -} - -/* -** Generate code that evalutes the given expression and puts the result -** in register target. -** -** Also make a copy of the expression results into another "cache" register -** and modify the expression so that the next time it is evaluated, -** the result is a copy of the cache register. -** -** This routine is used for expressions that are used multiple -** times. They are evaluated once and the results of the expression -** are reused. -*/ -SQLITE_PRIVATE void sqlite3ExprCodeAndCache(Parse *pParse, Expr *pExpr, int target){ - Vdbe *v = pParse->pVdbe; - int iMem; - - assert( target>0 ); - assert( pExpr->op!=TK_REGISTER ); - sqlite3ExprCode(pParse, pExpr, target); - iMem = ++pParse->nMem; - sqlite3VdbeAddOp2(v, OP_Copy, target, iMem); - exprToRegister(pExpr, iMem); -} - -#if defined(SQLITE_ENABLE_TREE_EXPLAIN) -/* -** Generate a human-readable explanation of an expression tree. -*/ -SQLITE_PRIVATE void sqlite3ExplainExpr(Vdbe *pOut, Expr *pExpr){ - int op; /* The opcode being coded */ - const char *zBinOp = 0; /* Binary operator */ - const char *zUniOp = 0; /* Unary operator */ - if( pExpr==0 ){ - op = TK_NULL; - }else{ - op = pExpr->op; - } - switch( op ){ - case TK_AGG_COLUMN: { - sqlite3ExplainPrintf(pOut, "AGG{%d:%d}", - pExpr->iTable, pExpr->iColumn); - break; - } - case TK_COLUMN: { - if( pExpr->iTable<0 ){ - /* This only happens when coding check constraints */ - sqlite3ExplainPrintf(pOut, "COLUMN(%d)", pExpr->iColumn); - }else{ - sqlite3ExplainPrintf(pOut, "{%d:%d}", - pExpr->iTable, pExpr->iColumn); - } - break; - } - case TK_INTEGER: { - if( pExpr->flags & EP_IntValue ){ - sqlite3ExplainPrintf(pOut, "%d", pExpr->u.iValue); - }else{ - sqlite3ExplainPrintf(pOut, "%s", pExpr->u.zToken); - } - break; - } -#ifndef SQLITE_OMIT_FLOATING_POINT - case TK_FLOAT: { - sqlite3ExplainPrintf(pOut,"%s", pExpr->u.zToken); - break; - } -#endif - case TK_STRING: { - sqlite3ExplainPrintf(pOut,"%Q", pExpr->u.zToken); - break; - } - case TK_NULL: { - sqlite3ExplainPrintf(pOut,"NULL"); - break; - } -#ifndef SQLITE_OMIT_BLOB_LITERAL - case TK_BLOB: { - sqlite3ExplainPrintf(pOut,"%s", pExpr->u.zToken); - break; - } -#endif - case TK_VARIABLE: { - sqlite3ExplainPrintf(pOut,"VARIABLE(%s,%d)", - pExpr->u.zToken, pExpr->iColumn); - break; - } - case TK_REGISTER: { - sqlite3ExplainPrintf(pOut,"REGISTER(%d)", pExpr->iTable); - break; - } - case TK_AS: { - sqlite3ExplainExpr(pOut, pExpr->pLeft); - break; - } -#ifndef SQLITE_OMIT_CAST - case TK_CAST: { - /* Expressions of the form: CAST(pLeft AS token) */ - const char *zAff = "unk"; - switch( sqlite3AffinityType(pExpr->u.zToken, 0) ){ - case SQLITE_AFF_TEXT: zAff = "TEXT"; break; - case SQLITE_AFF_NONE: zAff = "NONE"; break; - case SQLITE_AFF_NUMERIC: zAff = "NUMERIC"; break; - case SQLITE_AFF_INTEGER: zAff = "INTEGER"; break; - case SQLITE_AFF_REAL: zAff = "REAL"; break; - } - sqlite3ExplainPrintf(pOut, "CAST-%s(", zAff); - sqlite3ExplainExpr(pOut, pExpr->pLeft); - sqlite3ExplainPrintf(pOut, ")"); - break; - } -#endif /* SQLITE_OMIT_CAST */ - case TK_LT: zBinOp = "LT"; break; - case TK_LE: zBinOp = "LE"; break; - case TK_GT: zBinOp = "GT"; break; - case TK_GE: zBinOp = "GE"; break; - case TK_NE: zBinOp = "NE"; break; - case TK_EQ: zBinOp = "EQ"; break; - case TK_IS: zBinOp = "IS"; break; - case TK_ISNOT: zBinOp = "ISNOT"; break; - case TK_AND: zBinOp = "AND"; break; - case TK_OR: zBinOp = "OR"; break; - case TK_PLUS: zBinOp = "ADD"; break; - case TK_STAR: zBinOp = "MUL"; break; - case TK_MINUS: zBinOp = "SUB"; break; - case TK_REM: zBinOp = "REM"; break; - case TK_BITAND: zBinOp = "BITAND"; break; - case TK_BITOR: zBinOp = "BITOR"; break; - case TK_SLASH: zBinOp = "DIV"; break; - case TK_LSHIFT: zBinOp = "LSHIFT"; break; - case TK_RSHIFT: zBinOp = "RSHIFT"; break; - case TK_CONCAT: zBinOp = "CONCAT"; break; - - case TK_UMINUS: zUniOp = "UMINUS"; break; - case TK_UPLUS: zUniOp = "UPLUS"; break; - case TK_BITNOT: zUniOp = "BITNOT"; break; - case TK_NOT: zUniOp = "NOT"; break; - case TK_ISNULL: zUniOp = "ISNULL"; break; - case TK_NOTNULL: zUniOp = "NOTNULL"; break; - - case TK_COLLATE: { - sqlite3ExplainExpr(pOut, pExpr->pLeft); - sqlite3ExplainPrintf(pOut,".COLLATE(%s)",pExpr->u.zToken); - break; - } - - case TK_AGG_FUNCTION: - case TK_FUNCTION: { - ExprList *pFarg; /* List of function arguments */ - if( ExprHasProperty(pExpr, EP_TokenOnly) ){ - pFarg = 0; - }else{ - pFarg = pExpr->x.pList; - } - if( op==TK_AGG_FUNCTION ){ - sqlite3ExplainPrintf(pOut, "AGG_FUNCTION%d:%s(", - pExpr->op2, pExpr->u.zToken); - }else{ - sqlite3ExplainPrintf(pOut, "FUNCTION:%s(", pExpr->u.zToken); - } - if( pFarg ){ - sqlite3ExplainExprList(pOut, pFarg); - } - sqlite3ExplainPrintf(pOut, ")"); - break; - } -#ifndef SQLITE_OMIT_SUBQUERY - case TK_EXISTS: { - sqlite3ExplainPrintf(pOut, "EXISTS("); - sqlite3ExplainSelect(pOut, pExpr->x.pSelect); - sqlite3ExplainPrintf(pOut,")"); - break; - } - case TK_SELECT: { - sqlite3ExplainPrintf(pOut, "("); - sqlite3ExplainSelect(pOut, pExpr->x.pSelect); - sqlite3ExplainPrintf(pOut, ")"); - break; - } - case TK_IN: { - sqlite3ExplainPrintf(pOut, "IN("); - sqlite3ExplainExpr(pOut, pExpr->pLeft); - sqlite3ExplainPrintf(pOut, ","); - if( ExprHasProperty(pExpr, EP_xIsSelect) ){ - sqlite3ExplainSelect(pOut, pExpr->x.pSelect); - }else{ - sqlite3ExplainExprList(pOut, pExpr->x.pList); - } - sqlite3ExplainPrintf(pOut, ")"); - break; - } -#endif /* SQLITE_OMIT_SUBQUERY */ - - /* - ** x BETWEEN y AND z - ** - ** This is equivalent to - ** - ** x>=y AND x<=z - ** - ** X is stored in pExpr->pLeft. - ** Y is stored in pExpr->pList->a[0].pExpr. - ** Z is stored in pExpr->pList->a[1].pExpr. - */ - case TK_BETWEEN: { - Expr *pX = pExpr->pLeft; - Expr *pY = pExpr->x.pList->a[0].pExpr; - Expr *pZ = pExpr->x.pList->a[1].pExpr; - sqlite3ExplainPrintf(pOut, "BETWEEN("); - sqlite3ExplainExpr(pOut, pX); - sqlite3ExplainPrintf(pOut, ","); - sqlite3ExplainExpr(pOut, pY); - sqlite3ExplainPrintf(pOut, ","); - sqlite3ExplainExpr(pOut, pZ); - sqlite3ExplainPrintf(pOut, ")"); - break; - } - case TK_TRIGGER: { - /* If the opcode is TK_TRIGGER, then the expression is a reference - ** to a column in the new.* or old.* pseudo-tables available to - ** trigger programs. In this case Expr.iTable is set to 1 for the - ** new.* pseudo-table, or 0 for the old.* pseudo-table. Expr.iColumn - ** is set to the column of the pseudo-table to read, or to -1 to - ** read the rowid field. - */ - sqlite3ExplainPrintf(pOut, "%s(%d)", - pExpr->iTable ? "NEW" : "OLD", pExpr->iColumn); - break; - } - case TK_CASE: { - sqlite3ExplainPrintf(pOut, "CASE("); - sqlite3ExplainExpr(pOut, pExpr->pLeft); - sqlite3ExplainPrintf(pOut, ","); - sqlite3ExplainExprList(pOut, pExpr->x.pList); - break; - } -#ifndef SQLITE_OMIT_TRIGGER - case TK_RAISE: { - const char *zType = "unk"; - switch( pExpr->affinity ){ - case OE_Rollback: zType = "rollback"; break; - case OE_Abort: zType = "abort"; break; - case OE_Fail: zType = "fail"; break; - case OE_Ignore: zType = "ignore"; break; - } - sqlite3ExplainPrintf(pOut, "RAISE-%s(%s)", zType, pExpr->u.zToken); - break; - } -#endif - } - if( zBinOp ){ - sqlite3ExplainPrintf(pOut,"%s(", zBinOp); - sqlite3ExplainExpr(pOut, pExpr->pLeft); - sqlite3ExplainPrintf(pOut,","); - sqlite3ExplainExpr(pOut, pExpr->pRight); - sqlite3ExplainPrintf(pOut,")"); - }else if( zUniOp ){ - sqlite3ExplainPrintf(pOut,"%s(", zUniOp); - sqlite3ExplainExpr(pOut, pExpr->pLeft); - sqlite3ExplainPrintf(pOut,")"); - } -} -#endif /* defined(SQLITE_ENABLE_TREE_EXPLAIN) */ - -#if defined(SQLITE_ENABLE_TREE_EXPLAIN) -/* -** Generate a human-readable explanation of an expression list. -*/ -SQLITE_PRIVATE void sqlite3ExplainExprList(Vdbe *pOut, ExprList *pList){ - int i; - if( pList==0 || pList->nExpr==0 ){ - sqlite3ExplainPrintf(pOut, "(empty-list)"); - return; - }else if( pList->nExpr==1 ){ - sqlite3ExplainExpr(pOut, pList->a[0].pExpr); - }else{ - sqlite3ExplainPush(pOut); - for(i=0; inExpr; i++){ - sqlite3ExplainPrintf(pOut, "item[%d] = ", i); - sqlite3ExplainPush(pOut); - sqlite3ExplainExpr(pOut, pList->a[i].pExpr); - sqlite3ExplainPop(pOut); - if( pList->a[i].zName ){ - sqlite3ExplainPrintf(pOut, " AS %s", pList->a[i].zName); - } - if( pList->a[i].bSpanIsTab ){ - sqlite3ExplainPrintf(pOut, " (%s)", pList->a[i].zSpan); - } - if( inExpr-1 ){ - sqlite3ExplainNL(pOut); - } - } - sqlite3ExplainPop(pOut); - } -} -#endif /* SQLITE_DEBUG */ - -/* -** Generate code that pushes the value of every element of the given -** expression list into a sequence of registers beginning at target. -** -** Return the number of elements evaluated. -** -** The SQLITE_ECEL_DUP flag prevents the arguments from being -** filled using OP_SCopy. OP_Copy must be used instead. -** -** The SQLITE_ECEL_FACTOR argument allows constant arguments to be -** factored out into initialization code. -*/ -SQLITE_PRIVATE int sqlite3ExprCodeExprList( - Parse *pParse, /* Parsing context */ - ExprList *pList, /* The expression list to be coded */ - int target, /* Where to write results */ - u8 flags /* SQLITE_ECEL_* flags */ -){ - struct ExprList_item *pItem; - int i, n; - u8 copyOp = (flags & SQLITE_ECEL_DUP) ? OP_Copy : OP_SCopy; - assert( pList!=0 ); - assert( target>0 ); - assert( pParse->pVdbe!=0 ); /* Never gets this far otherwise */ - n = pList->nExpr; - if( !ConstFactorOk(pParse) ) flags &= ~SQLITE_ECEL_FACTOR; - for(pItem=pList->a, i=0; ipExpr; - if( (flags & SQLITE_ECEL_FACTOR)!=0 && sqlite3ExprIsConstant(pExpr) ){ - sqlite3ExprCodeAtInit(pParse, pExpr, target+i, 0); - }else{ - int inReg = sqlite3ExprCodeTarget(pParse, pExpr, target+i); - if( inReg!=target+i ){ - VdbeOp *pOp; - Vdbe *v = pParse->pVdbe; - if( copyOp==OP_Copy - && (pOp=sqlite3VdbeGetOp(v, -1))->opcode==OP_Copy - && pOp->p1+pOp->p3+1==inReg - && pOp->p2+pOp->p3+1==target+i - ){ - pOp->p3++; - }else{ - sqlite3VdbeAddOp2(v, copyOp, inReg, target+i); - } - } - } - } - return n; -} - -/* -** Generate code for a BETWEEN operator. -** -** x BETWEEN y AND z -** -** The above is equivalent to -** -** x>=y AND x<=z -** -** Code it as such, taking care to do the common subexpression -** elementation of x. -*/ -static void exprCodeBetween( - Parse *pParse, /* Parsing and code generating context */ - Expr *pExpr, /* The BETWEEN expression */ - int dest, /* Jump here if the jump is taken */ - int jumpIfTrue, /* Take the jump if the BETWEEN is true */ - int jumpIfNull /* Take the jump if the BETWEEN is NULL */ -){ - Expr exprAnd; /* The AND operator in x>=y AND x<=z */ - Expr compLeft; /* The x>=y term */ - Expr compRight; /* The x<=z term */ - Expr exprX; /* The x subexpression */ - int regFree1 = 0; /* Temporary use register */ - - assert( !ExprHasProperty(pExpr, EP_xIsSelect) ); - exprX = *pExpr->pLeft; - exprAnd.op = TK_AND; - exprAnd.pLeft = &compLeft; - exprAnd.pRight = &compRight; - compLeft.op = TK_GE; - compLeft.pLeft = &exprX; - compLeft.pRight = pExpr->x.pList->a[0].pExpr; - compRight.op = TK_LE; - compRight.pLeft = &exprX; - compRight.pRight = pExpr->x.pList->a[1].pExpr; - exprToRegister(&exprX, sqlite3ExprCodeTemp(pParse, &exprX, ®Free1)); - if( jumpIfTrue ){ - sqlite3ExprIfTrue(pParse, &exprAnd, dest, jumpIfNull); - }else{ - sqlite3ExprIfFalse(pParse, &exprAnd, dest, jumpIfNull); - } - sqlite3ReleaseTempReg(pParse, regFree1); - - /* Ensure adequate test coverage */ - testcase( jumpIfTrue==0 && jumpIfNull==0 && regFree1==0 ); - testcase( jumpIfTrue==0 && jumpIfNull==0 && regFree1!=0 ); - testcase( jumpIfTrue==0 && jumpIfNull!=0 && regFree1==0 ); - testcase( jumpIfTrue==0 && jumpIfNull!=0 && regFree1!=0 ); - testcase( jumpIfTrue!=0 && jumpIfNull==0 && regFree1==0 ); - testcase( jumpIfTrue!=0 && jumpIfNull==0 && regFree1!=0 ); - testcase( jumpIfTrue!=0 && jumpIfNull!=0 && regFree1==0 ); - testcase( jumpIfTrue!=0 && jumpIfNull!=0 && regFree1!=0 ); -} - -/* -** Generate code for a boolean expression such that a jump is made -** to the label "dest" if the expression is true but execution -** continues straight thru if the expression is false. -** -** If the expression evaluates to NULL (neither true nor false), then -** take the jump if the jumpIfNull flag is SQLITE_JUMPIFNULL. -** -** This code depends on the fact that certain token values (ex: TK_EQ) -** are the same as opcode values (ex: OP_Eq) that implement the corresponding -** operation. Special comments in vdbe.c and the mkopcodeh.awk script in -** the make process cause these values to align. Assert()s in the code -** below verify that the numbers are aligned correctly. -*/ -SQLITE_PRIVATE void sqlite3ExprIfTrue(Parse *pParse, Expr *pExpr, int dest, int jumpIfNull){ - Vdbe *v = pParse->pVdbe; - int op = 0; - int regFree1 = 0; - int regFree2 = 0; - int r1, r2; - - assert( jumpIfNull==SQLITE_JUMPIFNULL || jumpIfNull==0 ); - if( NEVER(v==0) ) return; /* Existence of VDBE checked by caller */ - if( NEVER(pExpr==0) ) return; /* No way this can happen */ - op = pExpr->op; - switch( op ){ - case TK_AND: { - int d2 = sqlite3VdbeMakeLabel(v); - testcase( jumpIfNull==0 ); - sqlite3ExprIfFalse(pParse, pExpr->pLeft, d2,jumpIfNull^SQLITE_JUMPIFNULL); - sqlite3ExprCachePush(pParse); - sqlite3ExprIfTrue(pParse, pExpr->pRight, dest, jumpIfNull); - sqlite3VdbeResolveLabel(v, d2); - sqlite3ExprCachePop(pParse); - break; - } - case TK_OR: { - testcase( jumpIfNull==0 ); - sqlite3ExprIfTrue(pParse, pExpr->pLeft, dest, jumpIfNull); - sqlite3ExprCachePush(pParse); - sqlite3ExprIfTrue(pParse, pExpr->pRight, dest, jumpIfNull); - sqlite3ExprCachePop(pParse); - break; - } - case TK_NOT: { - testcase( jumpIfNull==0 ); - sqlite3ExprIfFalse(pParse, pExpr->pLeft, dest, jumpIfNull); - break; - } - case TK_LT: - case TK_LE: - case TK_GT: - case TK_GE: - case TK_NE: - case TK_EQ: { - testcase( jumpIfNull==0 ); - r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, ®Free1); - r2 = sqlite3ExprCodeTemp(pParse, pExpr->pRight, ®Free2); - codeCompare(pParse, pExpr->pLeft, pExpr->pRight, op, - r1, r2, dest, jumpIfNull); - assert(TK_LT==OP_Lt); testcase(op==OP_Lt); VdbeCoverageIf(v,op==OP_Lt); - assert(TK_LE==OP_Le); testcase(op==OP_Le); VdbeCoverageIf(v,op==OP_Le); - assert(TK_GT==OP_Gt); testcase(op==OP_Gt); VdbeCoverageIf(v,op==OP_Gt); - assert(TK_GE==OP_Ge); testcase(op==OP_Ge); VdbeCoverageIf(v,op==OP_Ge); - assert(TK_EQ==OP_Eq); testcase(op==OP_Eq); VdbeCoverageIf(v,op==OP_Eq); - assert(TK_NE==OP_Ne); testcase(op==OP_Ne); VdbeCoverageIf(v,op==OP_Ne); - testcase( regFree1==0 ); - testcase( regFree2==0 ); - break; - } - case TK_IS: - case TK_ISNOT: { - testcase( op==TK_IS ); - testcase( op==TK_ISNOT ); - r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, ®Free1); - r2 = sqlite3ExprCodeTemp(pParse, pExpr->pRight, ®Free2); - op = (op==TK_IS) ? TK_EQ : TK_NE; - codeCompare(pParse, pExpr->pLeft, pExpr->pRight, op, - r1, r2, dest, SQLITE_NULLEQ); - VdbeCoverageIf(v, op==TK_EQ); - VdbeCoverageIf(v, op==TK_NE); - testcase( regFree1==0 ); - testcase( regFree2==0 ); - break; - } - case TK_ISNULL: - case TK_NOTNULL: { - assert( TK_ISNULL==OP_IsNull ); testcase( op==TK_ISNULL ); - assert( TK_NOTNULL==OP_NotNull ); testcase( op==TK_NOTNULL ); - r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, ®Free1); - sqlite3VdbeAddOp2(v, op, r1, dest); - VdbeCoverageIf(v, op==TK_ISNULL); - VdbeCoverageIf(v, op==TK_NOTNULL); - testcase( regFree1==0 ); - break; - } - case TK_BETWEEN: { - testcase( jumpIfNull==0 ); - exprCodeBetween(pParse, pExpr, dest, 1, jumpIfNull); - break; - } -#ifndef SQLITE_OMIT_SUBQUERY - case TK_IN: { - int destIfFalse = sqlite3VdbeMakeLabel(v); - int destIfNull = jumpIfNull ? dest : destIfFalse; - sqlite3ExprCodeIN(pParse, pExpr, destIfFalse, destIfNull); - sqlite3VdbeAddOp2(v, OP_Goto, 0, dest); - sqlite3VdbeResolveLabel(v, destIfFalse); - break; - } -#endif - default: { - if( exprAlwaysTrue(pExpr) ){ - sqlite3VdbeAddOp2(v, OP_Goto, 0, dest); - }else if( exprAlwaysFalse(pExpr) ){ - /* No-op */ - }else{ - r1 = sqlite3ExprCodeTemp(pParse, pExpr, ®Free1); - sqlite3VdbeAddOp3(v, OP_If, r1, dest, jumpIfNull!=0); - VdbeCoverage(v); - testcase( regFree1==0 ); - testcase( jumpIfNull==0 ); - } - break; - } - } - sqlite3ReleaseTempReg(pParse, regFree1); - sqlite3ReleaseTempReg(pParse, regFree2); -} - -/* -** Generate code for a boolean expression such that a jump is made -** to the label "dest" if the expression is false but execution -** continues straight thru if the expression is true. -** -** If the expression evaluates to NULL (neither true nor false) then -** jump if jumpIfNull is SQLITE_JUMPIFNULL or fall through if jumpIfNull -** is 0. -*/ -SQLITE_PRIVATE void sqlite3ExprIfFalse(Parse *pParse, Expr *pExpr, int dest, int jumpIfNull){ - Vdbe *v = pParse->pVdbe; - int op = 0; - int regFree1 = 0; - int regFree2 = 0; - int r1, r2; - - assert( jumpIfNull==SQLITE_JUMPIFNULL || jumpIfNull==0 ); - if( NEVER(v==0) ) return; /* Existence of VDBE checked by caller */ - if( pExpr==0 ) return; - - /* The value of pExpr->op and op are related as follows: - ** - ** pExpr->op op - ** --------- ---------- - ** TK_ISNULL OP_NotNull - ** TK_NOTNULL OP_IsNull - ** TK_NE OP_Eq - ** TK_EQ OP_Ne - ** TK_GT OP_Le - ** TK_LE OP_Gt - ** TK_GE OP_Lt - ** TK_LT OP_Ge - ** - ** For other values of pExpr->op, op is undefined and unused. - ** The value of TK_ and OP_ constants are arranged such that we - ** can compute the mapping above using the following expression. - ** Assert()s verify that the computation is correct. - */ - op = ((pExpr->op+(TK_ISNULL&1))^1)-(TK_ISNULL&1); - - /* Verify correct alignment of TK_ and OP_ constants - */ - assert( pExpr->op!=TK_ISNULL || op==OP_NotNull ); - assert( pExpr->op!=TK_NOTNULL || op==OP_IsNull ); - assert( pExpr->op!=TK_NE || op==OP_Eq ); - assert( pExpr->op!=TK_EQ || op==OP_Ne ); - assert( pExpr->op!=TK_LT || op==OP_Ge ); - assert( pExpr->op!=TK_LE || op==OP_Gt ); - assert( pExpr->op!=TK_GT || op==OP_Le ); - assert( pExpr->op!=TK_GE || op==OP_Lt ); - - switch( pExpr->op ){ - case TK_AND: { - testcase( jumpIfNull==0 ); - sqlite3ExprIfFalse(pParse, pExpr->pLeft, dest, jumpIfNull); - sqlite3ExprCachePush(pParse); - sqlite3ExprIfFalse(pParse, pExpr->pRight, dest, jumpIfNull); - sqlite3ExprCachePop(pParse); - break; - } - case TK_OR: { - int d2 = sqlite3VdbeMakeLabel(v); - testcase( jumpIfNull==0 ); - sqlite3ExprIfTrue(pParse, pExpr->pLeft, d2, jumpIfNull^SQLITE_JUMPIFNULL); - sqlite3ExprCachePush(pParse); - sqlite3ExprIfFalse(pParse, pExpr->pRight, dest, jumpIfNull); - sqlite3VdbeResolveLabel(v, d2); - sqlite3ExprCachePop(pParse); - break; - } - case TK_NOT: { - testcase( jumpIfNull==0 ); - sqlite3ExprIfTrue(pParse, pExpr->pLeft, dest, jumpIfNull); - break; - } - case TK_LT: - case TK_LE: - case TK_GT: - case TK_GE: - case TK_NE: - case TK_EQ: { - testcase( jumpIfNull==0 ); - r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, ®Free1); - r2 = sqlite3ExprCodeTemp(pParse, pExpr->pRight, ®Free2); - codeCompare(pParse, pExpr->pLeft, pExpr->pRight, op, - r1, r2, dest, jumpIfNull); - assert(TK_LT==OP_Lt); testcase(op==OP_Lt); VdbeCoverageIf(v,op==OP_Lt); - assert(TK_LE==OP_Le); testcase(op==OP_Le); VdbeCoverageIf(v,op==OP_Le); - assert(TK_GT==OP_Gt); testcase(op==OP_Gt); VdbeCoverageIf(v,op==OP_Gt); - assert(TK_GE==OP_Ge); testcase(op==OP_Ge); VdbeCoverageIf(v,op==OP_Ge); - assert(TK_EQ==OP_Eq); testcase(op==OP_Eq); VdbeCoverageIf(v,op==OP_Eq); - assert(TK_NE==OP_Ne); testcase(op==OP_Ne); VdbeCoverageIf(v,op==OP_Ne); - testcase( regFree1==0 ); - testcase( regFree2==0 ); - break; - } - case TK_IS: - case TK_ISNOT: { - testcase( pExpr->op==TK_IS ); - testcase( pExpr->op==TK_ISNOT ); - r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, ®Free1); - r2 = sqlite3ExprCodeTemp(pParse, pExpr->pRight, ®Free2); - op = (pExpr->op==TK_IS) ? TK_NE : TK_EQ; - codeCompare(pParse, pExpr->pLeft, pExpr->pRight, op, - r1, r2, dest, SQLITE_NULLEQ); - VdbeCoverageIf(v, op==TK_EQ); - VdbeCoverageIf(v, op==TK_NE); - testcase( regFree1==0 ); - testcase( regFree2==0 ); - break; - } - case TK_ISNULL: - case TK_NOTNULL: { - r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, ®Free1); - sqlite3VdbeAddOp2(v, op, r1, dest); - testcase( op==TK_ISNULL ); VdbeCoverageIf(v, op==TK_ISNULL); - testcase( op==TK_NOTNULL ); VdbeCoverageIf(v, op==TK_NOTNULL); - testcase( regFree1==0 ); - break; - } - case TK_BETWEEN: { - testcase( jumpIfNull==0 ); - exprCodeBetween(pParse, pExpr, dest, 0, jumpIfNull); - break; - } -#ifndef SQLITE_OMIT_SUBQUERY - case TK_IN: { - if( jumpIfNull ){ - sqlite3ExprCodeIN(pParse, pExpr, dest, dest); - }else{ - int destIfNull = sqlite3VdbeMakeLabel(v); - sqlite3ExprCodeIN(pParse, pExpr, dest, destIfNull); - sqlite3VdbeResolveLabel(v, destIfNull); - } - break; - } -#endif - default: { - if( exprAlwaysFalse(pExpr) ){ - sqlite3VdbeAddOp2(v, OP_Goto, 0, dest); - }else if( exprAlwaysTrue(pExpr) ){ - /* no-op */ - }else{ - r1 = sqlite3ExprCodeTemp(pParse, pExpr, ®Free1); - sqlite3VdbeAddOp3(v, OP_IfNot, r1, dest, jumpIfNull!=0); - VdbeCoverage(v); - testcase( regFree1==0 ); - testcase( jumpIfNull==0 ); - } - break; - } - } - sqlite3ReleaseTempReg(pParse, regFree1); - sqlite3ReleaseTempReg(pParse, regFree2); -} - -/* -** Do a deep comparison of two expression trees. Return 0 if the two -** expressions are completely identical. Return 1 if they differ only -** by a COLLATE operator at the top level. Return 2 if there are differences -** other than the top-level COLLATE operator. -** -** If any subelement of pB has Expr.iTable==(-1) then it is allowed -** to compare equal to an equivalent element in pA with Expr.iTable==iTab. -** -** The pA side might be using TK_REGISTER. If that is the case and pB is -** not using TK_REGISTER but is otherwise equivalent, then still return 0. -** -** Sometimes this routine will return 2 even if the two expressions -** really are equivalent. If we cannot prove that the expressions are -** identical, we return 2 just to be safe. So if this routine -** returns 2, then you do not really know for certain if the two -** expressions are the same. But if you get a 0 or 1 return, then you -** can be sure the expressions are the same. In the places where -** this routine is used, it does not hurt to get an extra 2 - that -** just might result in some slightly slower code. But returning -** an incorrect 0 or 1 could lead to a malfunction. -*/ -SQLITE_PRIVATE int sqlite3ExprCompare(Expr *pA, Expr *pB, int iTab){ - u32 combinedFlags; - if( pA==0 || pB==0 ){ - return pB==pA ? 0 : 2; - } - combinedFlags = pA->flags | pB->flags; - if( combinedFlags & EP_IntValue ){ - if( (pA->flags&pB->flags&EP_IntValue)!=0 && pA->u.iValue==pB->u.iValue ){ - return 0; - } - return 2; - } - if( pA->op!=pB->op ){ - if( pA->op==TK_COLLATE && sqlite3ExprCompare(pA->pLeft, pB, iTab)<2 ){ - return 1; - } - if( pB->op==TK_COLLATE && sqlite3ExprCompare(pA, pB->pLeft, iTab)<2 ){ - return 1; - } - return 2; - } - if( pA->op!=TK_COLUMN && ALWAYS(pA->op!=TK_AGG_COLUMN) && pA->u.zToken ){ - if( strcmp(pA->u.zToken,pB->u.zToken)!=0 ){ - return pA->op==TK_COLLATE ? 1 : 2; - } - } - if( (pA->flags & EP_Distinct)!=(pB->flags & EP_Distinct) ) return 2; - if( ALWAYS((combinedFlags & EP_TokenOnly)==0) ){ - if( combinedFlags & EP_xIsSelect ) return 2; - if( sqlite3ExprCompare(pA->pLeft, pB->pLeft, iTab) ) return 2; - if( sqlite3ExprCompare(pA->pRight, pB->pRight, iTab) ) return 2; - if( sqlite3ExprListCompare(pA->x.pList, pB->x.pList, iTab) ) return 2; - if( ALWAYS((combinedFlags & EP_Reduced)==0) ){ - if( pA->iColumn!=pB->iColumn ) return 2; - if( pA->iTable!=pB->iTable - && (pA->iTable!=iTab || NEVER(pB->iTable>=0)) ) return 2; - } - } - return 0; -} - -/* -** Compare two ExprList objects. Return 0 if they are identical and -** non-zero if they differ in any way. -** -** If any subelement of pB has Expr.iTable==(-1) then it is allowed -** to compare equal to an equivalent element in pA with Expr.iTable==iTab. -** -** This routine might return non-zero for equivalent ExprLists. The -** only consequence will be disabled optimizations. But this routine -** must never return 0 if the two ExprList objects are different, or -** a malfunction will result. -** -** Two NULL pointers are considered to be the same. But a NULL pointer -** always differs from a non-NULL pointer. -*/ -SQLITE_PRIVATE int sqlite3ExprListCompare(ExprList *pA, ExprList *pB, int iTab){ - int i; - if( pA==0 && pB==0 ) return 0; - if( pA==0 || pB==0 ) return 1; - if( pA->nExpr!=pB->nExpr ) return 1; - for(i=0; inExpr; i++){ - Expr *pExprA = pA->a[i].pExpr; - Expr *pExprB = pB->a[i].pExpr; - if( pA->a[i].sortOrder!=pB->a[i].sortOrder ) return 1; - if( sqlite3ExprCompare(pExprA, pExprB, iTab) ) return 1; - } - return 0; -} - -/* -** Return true if we can prove the pE2 will always be true if pE1 is -** true. Return false if we cannot complete the proof or if pE2 might -** be false. Examples: -** -** pE1: x==5 pE2: x==5 Result: true -** pE1: x>0 pE2: x==5 Result: false -** pE1: x=21 pE2: x=21 OR y=43 Result: true -** pE1: x!=123 pE2: x IS NOT NULL Result: true -** pE1: x!=?1 pE2: x IS NOT NULL Result: true -** pE1: x IS NULL pE2: x IS NOT NULL Result: false -** pE1: x IS ?2 pE2: x IS NOT NULL Reuslt: false -** -** When comparing TK_COLUMN nodes between pE1 and pE2, if pE2 has -** Expr.iTable<0 then assume a table number given by iTab. -** -** When in doubt, return false. Returning true might give a performance -** improvement. Returning false might cause a performance reduction, but -** it will always give the correct answer and is hence always safe. -*/ -SQLITE_PRIVATE int sqlite3ExprImpliesExpr(Expr *pE1, Expr *pE2, int iTab){ - if( sqlite3ExprCompare(pE1, pE2, iTab)==0 ){ - return 1; - } - if( pE2->op==TK_OR - && (sqlite3ExprImpliesExpr(pE1, pE2->pLeft, iTab) - || sqlite3ExprImpliesExpr(pE1, pE2->pRight, iTab) ) - ){ - return 1; - } - if( pE2->op==TK_NOTNULL - && sqlite3ExprCompare(pE1->pLeft, pE2->pLeft, iTab)==0 - && (pE1->op!=TK_ISNULL && pE1->op!=TK_IS) - ){ - return 1; - } - return 0; -} - -/* -** An instance of the following structure is used by the tree walker -** to count references to table columns in the arguments of an -** aggregate function, in order to implement the -** sqlite3FunctionThisSrc() routine. -*/ -struct SrcCount { - SrcList *pSrc; /* One particular FROM clause in a nested query */ - int nThis; /* Number of references to columns in pSrcList */ - int nOther; /* Number of references to columns in other FROM clauses */ -}; - -/* -** Count the number of references to columns. -*/ -static int exprSrcCount(Walker *pWalker, Expr *pExpr){ - /* The NEVER() on the second term is because sqlite3FunctionUsesThisSrc() - ** is always called before sqlite3ExprAnalyzeAggregates() and so the - ** TK_COLUMNs have not yet been converted into TK_AGG_COLUMN. If - ** sqlite3FunctionUsesThisSrc() is used differently in the future, the - ** NEVER() will need to be removed. */ - if( pExpr->op==TK_COLUMN || NEVER(pExpr->op==TK_AGG_COLUMN) ){ - int i; - struct SrcCount *p = pWalker->u.pSrcCount; - SrcList *pSrc = p->pSrc; - for(i=0; inSrc; i++){ - if( pExpr->iTable==pSrc->a[i].iCursor ) break; - } - if( inSrc ){ - p->nThis++; - }else{ - p->nOther++; - } - } - return WRC_Continue; -} - -/* -** Determine if any of the arguments to the pExpr Function reference -** pSrcList. Return true if they do. Also return true if the function -** has no arguments or has only constant arguments. Return false if pExpr -** references columns but not columns of tables found in pSrcList. -*/ -SQLITE_PRIVATE int sqlite3FunctionUsesThisSrc(Expr *pExpr, SrcList *pSrcList){ - Walker w; - struct SrcCount cnt; - assert( pExpr->op==TK_AGG_FUNCTION ); - memset(&w, 0, sizeof(w)); - w.xExprCallback = exprSrcCount; - w.u.pSrcCount = &cnt; - cnt.pSrc = pSrcList; - cnt.nThis = 0; - cnt.nOther = 0; - sqlite3WalkExprList(&w, pExpr->x.pList); - return cnt.nThis>0 || cnt.nOther==0; -} - -/* -** Add a new element to the pAggInfo->aCol[] array. Return the index of -** the new element. Return a negative number if malloc fails. -*/ -static int addAggInfoColumn(sqlite3 *db, AggInfo *pInfo){ - int i; - pInfo->aCol = sqlite3ArrayAllocate( - db, - pInfo->aCol, - sizeof(pInfo->aCol[0]), - &pInfo->nColumn, - &i - ); - return i; -} - -/* -** Add a new element to the pAggInfo->aFunc[] array. Return the index of -** the new element. Return a negative number if malloc fails. -*/ -static int addAggInfoFunc(sqlite3 *db, AggInfo *pInfo){ - int i; - pInfo->aFunc = sqlite3ArrayAllocate( - db, - pInfo->aFunc, - sizeof(pInfo->aFunc[0]), - &pInfo->nFunc, - &i - ); - return i; -} - -/* -** This is the xExprCallback for a tree walker. It is used to -** implement sqlite3ExprAnalyzeAggregates(). See sqlite3ExprAnalyzeAggregates -** for additional information. -*/ -static int analyzeAggregate(Walker *pWalker, Expr *pExpr){ - int i; - NameContext *pNC = pWalker->u.pNC; - Parse *pParse = pNC->pParse; - SrcList *pSrcList = pNC->pSrcList; - AggInfo *pAggInfo = pNC->pAggInfo; - - switch( pExpr->op ){ - case TK_AGG_COLUMN: - case TK_COLUMN: { - testcase( pExpr->op==TK_AGG_COLUMN ); - testcase( pExpr->op==TK_COLUMN ); - /* Check to see if the column is in one of the tables in the FROM - ** clause of the aggregate query */ - if( ALWAYS(pSrcList!=0) ){ - struct SrcList_item *pItem = pSrcList->a; - for(i=0; inSrc; i++, pItem++){ - struct AggInfo_col *pCol; - assert( !ExprHasProperty(pExpr, EP_TokenOnly|EP_Reduced) ); - if( pExpr->iTable==pItem->iCursor ){ - /* If we reach this point, it means that pExpr refers to a table - ** that is in the FROM clause of the aggregate query. - ** - ** Make an entry for the column in pAggInfo->aCol[] if there - ** is not an entry there already. - */ - int k; - pCol = pAggInfo->aCol; - for(k=0; knColumn; k++, pCol++){ - if( pCol->iTable==pExpr->iTable && - pCol->iColumn==pExpr->iColumn ){ - break; - } - } - if( (k>=pAggInfo->nColumn) - && (k = addAggInfoColumn(pParse->db, pAggInfo))>=0 - ){ - pCol = &pAggInfo->aCol[k]; - pCol->pTab = pExpr->pTab; - pCol->iTable = pExpr->iTable; - pCol->iColumn = pExpr->iColumn; - pCol->iMem = ++pParse->nMem; - pCol->iSorterColumn = -1; - pCol->pExpr = pExpr; - if( pAggInfo->pGroupBy ){ - int j, n; - ExprList *pGB = pAggInfo->pGroupBy; - struct ExprList_item *pTerm = pGB->a; - n = pGB->nExpr; - for(j=0; jpExpr; - if( pE->op==TK_COLUMN && pE->iTable==pExpr->iTable && - pE->iColumn==pExpr->iColumn ){ - pCol->iSorterColumn = j; - break; - } - } - } - if( pCol->iSorterColumn<0 ){ - pCol->iSorterColumn = pAggInfo->nSortingColumn++; - } - } - /* There is now an entry for pExpr in pAggInfo->aCol[] (either - ** because it was there before or because we just created it). - ** Convert the pExpr to be a TK_AGG_COLUMN referring to that - ** pAggInfo->aCol[] entry. - */ - ExprSetVVAProperty(pExpr, EP_NoReduce); - pExpr->pAggInfo = pAggInfo; - pExpr->op = TK_AGG_COLUMN; - pExpr->iAgg = (i16)k; - break; - } /* endif pExpr->iTable==pItem->iCursor */ - } /* end loop over pSrcList */ - } - return WRC_Prune; - } - case TK_AGG_FUNCTION: { - if( (pNC->ncFlags & NC_InAggFunc)==0 - && pWalker->walkerDepth==pExpr->op2 - ){ - /* Check to see if pExpr is a duplicate of another aggregate - ** function that is already in the pAggInfo structure - */ - struct AggInfo_func *pItem = pAggInfo->aFunc; - for(i=0; inFunc; i++, pItem++){ - if( sqlite3ExprCompare(pItem->pExpr, pExpr, -1)==0 ){ - break; - } - } - if( i>=pAggInfo->nFunc ){ - /* pExpr is original. Make a new entry in pAggInfo->aFunc[] - */ - u8 enc = ENC(pParse->db); - i = addAggInfoFunc(pParse->db, pAggInfo); - if( i>=0 ){ - assert( !ExprHasProperty(pExpr, EP_xIsSelect) ); - pItem = &pAggInfo->aFunc[i]; - pItem->pExpr = pExpr; - pItem->iMem = ++pParse->nMem; - assert( !ExprHasProperty(pExpr, EP_IntValue) ); - pItem->pFunc = sqlite3FindFunction(pParse->db, - pExpr->u.zToken, sqlite3Strlen30(pExpr->u.zToken), - pExpr->x.pList ? pExpr->x.pList->nExpr : 0, enc, 0); - if( pExpr->flags & EP_Distinct ){ - pItem->iDistinct = pParse->nTab++; - }else{ - pItem->iDistinct = -1; - } - } - } - /* Make pExpr point to the appropriate pAggInfo->aFunc[] entry - */ - assert( !ExprHasProperty(pExpr, EP_TokenOnly|EP_Reduced) ); - ExprSetVVAProperty(pExpr, EP_NoReduce); - pExpr->iAgg = (i16)i; - pExpr->pAggInfo = pAggInfo; - return WRC_Prune; - }else{ - return WRC_Continue; - } - } - } - return WRC_Continue; -} -static int analyzeAggregatesInSelect(Walker *pWalker, Select *pSelect){ - UNUSED_PARAMETER(pWalker); - UNUSED_PARAMETER(pSelect); - return WRC_Continue; -} - -/* -** Analyze the pExpr expression looking for aggregate functions and -** for variables that need to be added to AggInfo object that pNC->pAggInfo -** points to. Additional entries are made on the AggInfo object as -** necessary. -** -** This routine should only be called after the expression has been -** analyzed by sqlite3ResolveExprNames(). -*/ -SQLITE_PRIVATE void sqlite3ExprAnalyzeAggregates(NameContext *pNC, Expr *pExpr){ - Walker w; - memset(&w, 0, sizeof(w)); - w.xExprCallback = analyzeAggregate; - w.xSelectCallback = analyzeAggregatesInSelect; - w.u.pNC = pNC; - assert( pNC->pSrcList!=0 ); - sqlite3WalkExpr(&w, pExpr); -} - -/* -** Call sqlite3ExprAnalyzeAggregates() for every expression in an -** expression list. Return the number of errors. -** -** If an error is found, the analysis is cut short. -*/ -SQLITE_PRIVATE void sqlite3ExprAnalyzeAggList(NameContext *pNC, ExprList *pList){ - struct ExprList_item *pItem; - int i; - if( pList ){ - for(pItem=pList->a, i=0; inExpr; i++, pItem++){ - sqlite3ExprAnalyzeAggregates(pNC, pItem->pExpr); - } - } -} - -/* -** Allocate a single new register for use to hold some intermediate result. -*/ -SQLITE_PRIVATE int sqlite3GetTempReg(Parse *pParse){ - if( pParse->nTempReg==0 ){ - return ++pParse->nMem; - } - return pParse->aTempReg[--pParse->nTempReg]; -} - -/* -** Deallocate a register, making available for reuse for some other -** purpose. -** -** If a register is currently being used by the column cache, then -** the dallocation is deferred until the column cache line that uses -** the register becomes stale. -*/ -SQLITE_PRIVATE void sqlite3ReleaseTempReg(Parse *pParse, int iReg){ - if( iReg && pParse->nTempRegaTempReg) ){ - int i; - struct yColCache *p; - for(i=0, p=pParse->aColCache; iiReg==iReg ){ - p->tempReg = 1; - return; - } - } - pParse->aTempReg[pParse->nTempReg++] = iReg; - } -} - -/* -** Allocate or deallocate a block of nReg consecutive registers -*/ -SQLITE_PRIVATE int sqlite3GetTempRange(Parse *pParse, int nReg){ - int i, n; - i = pParse->iRangeReg; - n = pParse->nRangeReg; - if( nReg<=n ){ - assert( !usedAsColumnCache(pParse, i, i+n-1) ); - pParse->iRangeReg += nReg; - pParse->nRangeReg -= nReg; - }else{ - i = pParse->nMem+1; - pParse->nMem += nReg; - } - return i; -} -SQLITE_PRIVATE void sqlite3ReleaseTempRange(Parse *pParse, int iReg, int nReg){ - sqlite3ExprCacheRemove(pParse, iReg, nReg); - if( nReg>pParse->nRangeReg ){ - pParse->nRangeReg = nReg; - pParse->iRangeReg = iReg; - } -} - -/* -** Mark all temporary registers as being unavailable for reuse. -*/ -SQLITE_PRIVATE void sqlite3ClearTempRegCache(Parse *pParse){ - pParse->nTempReg = 0; - pParse->nRangeReg = 0; -} - -/************** End of expr.c ************************************************/ -/************** Begin file alter.c *******************************************/ -/* -** 2005 February 15 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** This file contains C code routines that used to generate VDBE code -** that implements the ALTER TABLE command. -*/ - -/* -** The code in this file only exists if we are not omitting the -** ALTER TABLE logic from the build. -*/ -#ifndef SQLITE_OMIT_ALTERTABLE - - -/* -** This function is used by SQL generated to implement the -** ALTER TABLE command. The first argument is the text of a CREATE TABLE or -** CREATE INDEX command. The second is a table name. The table name in -** the CREATE TABLE or CREATE INDEX statement is replaced with the third -** argument and the result returned. Examples: -** -** sqlite_rename_table('CREATE TABLE abc(a, b, c)', 'def') -** -> 'CREATE TABLE def(a, b, c)' -** -** sqlite_rename_table('CREATE INDEX i ON abc(a)', 'def') -** -> 'CREATE INDEX i ON def(a, b, c)' -*/ -static void renameTableFunc( - sqlite3_context *context, - int NotUsed, - sqlite3_value **argv -){ - unsigned char const *zSql = sqlite3_value_text(argv[0]); - unsigned char const *zTableName = sqlite3_value_text(argv[1]); - - int token; - Token tname; - unsigned char const *zCsr = zSql; - int len = 0; - char *zRet; - - sqlite3 *db = sqlite3_context_db_handle(context); - - UNUSED_PARAMETER(NotUsed); - - /* The principle used to locate the table name in the CREATE TABLE - ** statement is that the table name is the first non-space token that - ** is immediately followed by a TK_LP or TK_USING token. - */ - if( zSql ){ - do { - if( !*zCsr ){ - /* Ran out of input before finding an opening bracket. Return NULL. */ - return; - } - - /* Store the token that zCsr points to in tname. */ - tname.z = (char*)zCsr; - tname.n = len; - - /* Advance zCsr to the next token. Store that token type in 'token', - ** and its length in 'len' (to be used next iteration of this loop). - */ - do { - zCsr += len; - len = sqlite3GetToken(zCsr, &token); - } while( token==TK_SPACE ); - assert( len>0 ); - } while( token!=TK_LP && token!=TK_USING ); - - zRet = sqlite3MPrintf(db, "%.*s\"%w\"%s", (int)(((u8*)tname.z) - zSql), - zSql, zTableName, tname.z+tname.n); - sqlite3_result_text(context, zRet, -1, SQLITE_DYNAMIC); - } -} - -/* -** This C function implements an SQL user function that is used by SQL code -** generated by the ALTER TABLE ... RENAME command to modify the definition -** of any foreign key constraints that use the table being renamed as the -** parent table. It is passed three arguments: -** -** 1) The complete text of the CREATE TABLE statement being modified, -** 2) The old name of the table being renamed, and -** 3) The new name of the table being renamed. -** -** It returns the new CREATE TABLE statement. For example: -** -** sqlite_rename_parent('CREATE TABLE t1(a REFERENCES t2)', 't2', 't3') -** -> 'CREATE TABLE t1(a REFERENCES t3)' -*/ -#ifndef SQLITE_OMIT_FOREIGN_KEY -static void renameParentFunc( - sqlite3_context *context, - int NotUsed, - sqlite3_value **argv -){ - sqlite3 *db = sqlite3_context_db_handle(context); - char *zOutput = 0; - char *zResult; - unsigned char const *zInput = sqlite3_value_text(argv[0]); - unsigned char const *zOld = sqlite3_value_text(argv[1]); - unsigned char const *zNew = sqlite3_value_text(argv[2]); - - unsigned const char *z; /* Pointer to token */ - int n; /* Length of token z */ - int token; /* Type of token */ - - UNUSED_PARAMETER(NotUsed); - if( zInput==0 || zOld==0 ) return; - for(z=zInput; *z; z=z+n){ - n = sqlite3GetToken(z, &token); - if( token==TK_REFERENCES ){ - char *zParent; - do { - z += n; - n = sqlite3GetToken(z, &token); - }while( token==TK_SPACE ); - - zParent = sqlite3DbStrNDup(db, (const char *)z, n); - if( zParent==0 ) break; - sqlite3Dequote(zParent); - if( 0==sqlite3StrICmp((const char *)zOld, zParent) ){ - char *zOut = sqlite3MPrintf(db, "%s%.*s\"%w\"", - (zOutput?zOutput:""), (int)(z-zInput), zInput, (const char *)zNew - ); - sqlite3DbFree(db, zOutput); - zOutput = zOut; - zInput = &z[n]; - } - sqlite3DbFree(db, zParent); - } - } - - zResult = sqlite3MPrintf(db, "%s%s", (zOutput?zOutput:""), zInput), - sqlite3_result_text(context, zResult, -1, SQLITE_DYNAMIC); - sqlite3DbFree(db, zOutput); -} -#endif - -#ifndef SQLITE_OMIT_TRIGGER -/* This function is used by SQL generated to implement the -** ALTER TABLE command. The first argument is the text of a CREATE TRIGGER -** statement. The second is a table name. The table name in the CREATE -** TRIGGER statement is replaced with the third argument and the result -** returned. This is analagous to renameTableFunc() above, except for CREATE -** TRIGGER, not CREATE INDEX and CREATE TABLE. -*/ -static void renameTriggerFunc( - sqlite3_context *context, - int NotUsed, - sqlite3_value **argv -){ - unsigned char const *zSql = sqlite3_value_text(argv[0]); - unsigned char const *zTableName = sqlite3_value_text(argv[1]); - - int token; - Token tname; - int dist = 3; - unsigned char const *zCsr = zSql; - int len = 0; - char *zRet; - sqlite3 *db = sqlite3_context_db_handle(context); - - UNUSED_PARAMETER(NotUsed); - - /* The principle used to locate the table name in the CREATE TRIGGER - ** statement is that the table name is the first token that is immediatedly - ** preceded by either TK_ON or TK_DOT and immediatedly followed by one - ** of TK_WHEN, TK_BEGIN or TK_FOR. - */ - if( zSql ){ - do { - - if( !*zCsr ){ - /* Ran out of input before finding the table name. Return NULL. */ - return; - } - - /* Store the token that zCsr points to in tname. */ - tname.z = (char*)zCsr; - tname.n = len; - - /* Advance zCsr to the next token. Store that token type in 'token', - ** and its length in 'len' (to be used next iteration of this loop). - */ - do { - zCsr += len; - len = sqlite3GetToken(zCsr, &token); - }while( token==TK_SPACE ); - assert( len>0 ); - - /* Variable 'dist' stores the number of tokens read since the most - ** recent TK_DOT or TK_ON. This means that when a WHEN, FOR or BEGIN - ** token is read and 'dist' equals 2, the condition stated above - ** to be met. - ** - ** Note that ON cannot be a database, table or column name, so - ** there is no need to worry about syntax like - ** "CREATE TRIGGER ... ON ON.ON BEGIN ..." etc. - */ - dist++; - if( token==TK_DOT || token==TK_ON ){ - dist = 0; - } - } while( dist!=2 || (token!=TK_WHEN && token!=TK_FOR && token!=TK_BEGIN) ); - - /* Variable tname now contains the token that is the old table-name - ** in the CREATE TRIGGER statement. - */ - zRet = sqlite3MPrintf(db, "%.*s\"%w\"%s", (int)(((u8*)tname.z) - zSql), - zSql, zTableName, tname.z+tname.n); - sqlite3_result_text(context, zRet, -1, SQLITE_DYNAMIC); - } -} -#endif /* !SQLITE_OMIT_TRIGGER */ - -/* -** Register built-in functions used to help implement ALTER TABLE -*/ -SQLITE_PRIVATE void sqlite3AlterFunctions(void){ - static SQLITE_WSD FuncDef aAlterTableFuncs[] = { - FUNCTION(sqlite_rename_table, 2, 0, 0, renameTableFunc), -#ifndef SQLITE_OMIT_TRIGGER - FUNCTION(sqlite_rename_trigger, 2, 0, 0, renameTriggerFunc), -#endif -#ifndef SQLITE_OMIT_FOREIGN_KEY - FUNCTION(sqlite_rename_parent, 3, 0, 0, renameParentFunc), -#endif - }; - int i; - FuncDefHash *pHash = &GLOBAL(FuncDefHash, sqlite3GlobalFunctions); - FuncDef *aFunc = (FuncDef*)&GLOBAL(FuncDef, aAlterTableFuncs); - - for(i=0; i OR name= OR ... -** -** If argument zWhere is NULL, then a pointer string containing the text -** "name=" is returned, where is the quoted version -** of the string passed as argument zConstant. The returned buffer is -** allocated using sqlite3DbMalloc(). It is the responsibility of the -** caller to ensure that it is eventually freed. -** -** If argument zWhere is not NULL, then the string returned is -** " OR name=", where is the contents of zWhere. -** In this case zWhere is passed to sqlite3DbFree() before returning. -** -*/ -static char *whereOrName(sqlite3 *db, char *zWhere, char *zConstant){ - char *zNew; - if( !zWhere ){ - zNew = sqlite3MPrintf(db, "name=%Q", zConstant); - }else{ - zNew = sqlite3MPrintf(db, "%s OR name=%Q", zWhere, zConstant); - sqlite3DbFree(db, zWhere); - } - return zNew; -} - -#if !defined(SQLITE_OMIT_FOREIGN_KEY) && !defined(SQLITE_OMIT_TRIGGER) -/* -** Generate the text of a WHERE expression which can be used to select all -** tables that have foreign key constraints that refer to table pTab (i.e. -** constraints for which pTab is the parent table) from the sqlite_master -** table. -*/ -static char *whereForeignKeys(Parse *pParse, Table *pTab){ - FKey *p; - char *zWhere = 0; - for(p=sqlite3FkReferences(pTab); p; p=p->pNextTo){ - zWhere = whereOrName(pParse->db, zWhere, p->pFrom->zName); - } - return zWhere; -} -#endif - -/* -** Generate the text of a WHERE expression which can be used to select all -** temporary triggers on table pTab from the sqlite_temp_master table. If -** table pTab has no temporary triggers, or is itself stored in the -** temporary database, NULL is returned. -*/ -static char *whereTempTriggers(Parse *pParse, Table *pTab){ - Trigger *pTrig; - char *zWhere = 0; - const Schema *pTempSchema = pParse->db->aDb[1].pSchema; /* Temp db schema */ - - /* If the table is not located in the temp-db (in which case NULL is - ** returned, loop through the tables list of triggers. For each trigger - ** that is not part of the temp-db schema, add a clause to the WHERE - ** expression being built up in zWhere. - */ - if( pTab->pSchema!=pTempSchema ){ - sqlite3 *db = pParse->db; - for(pTrig=sqlite3TriggerList(pParse, pTab); pTrig; pTrig=pTrig->pNext){ - if( pTrig->pSchema==pTempSchema ){ - zWhere = whereOrName(db, zWhere, pTrig->zName); - } - } - } - if( zWhere ){ - char *zNew = sqlite3MPrintf(pParse->db, "type='trigger' AND (%s)", zWhere); - sqlite3DbFree(pParse->db, zWhere); - zWhere = zNew; - } - return zWhere; -} - -/* -** Generate code to drop and reload the internal representation of table -** pTab from the database, including triggers and temporary triggers. -** Argument zName is the name of the table in the database schema at -** the time the generated code is executed. This can be different from -** pTab->zName if this function is being called to code part of an -** "ALTER TABLE RENAME TO" statement. -*/ -static void reloadTableSchema(Parse *pParse, Table *pTab, const char *zName){ - Vdbe *v; - char *zWhere; - int iDb; /* Index of database containing pTab */ -#ifndef SQLITE_OMIT_TRIGGER - Trigger *pTrig; -#endif - - v = sqlite3GetVdbe(pParse); - if( NEVER(v==0) ) return; - assert( sqlite3BtreeHoldsAllMutexes(pParse->db) ); - iDb = sqlite3SchemaToIndex(pParse->db, pTab->pSchema); - assert( iDb>=0 ); - -#ifndef SQLITE_OMIT_TRIGGER - /* Drop any table triggers from the internal schema. */ - for(pTrig=sqlite3TriggerList(pParse, pTab); pTrig; pTrig=pTrig->pNext){ - int iTrigDb = sqlite3SchemaToIndex(pParse->db, pTrig->pSchema); - assert( iTrigDb==iDb || iTrigDb==1 ); - sqlite3VdbeAddOp4(v, OP_DropTrigger, iTrigDb, 0, 0, pTrig->zName, 0); - } -#endif - - /* Drop the table and index from the internal schema. */ - sqlite3VdbeAddOp4(v, OP_DropTable, iDb, 0, 0, pTab->zName, 0); - - /* Reload the table, index and permanent trigger schemas. */ - zWhere = sqlite3MPrintf(pParse->db, "tbl_name=%Q", zName); - if( !zWhere ) return; - sqlite3VdbeAddParseSchemaOp(v, iDb, zWhere); - -#ifndef SQLITE_OMIT_TRIGGER - /* Now, if the table is not stored in the temp database, reload any temp - ** triggers. Don't use IN(...) in case SQLITE_OMIT_SUBQUERY is defined. - */ - if( (zWhere=whereTempTriggers(pParse, pTab))!=0 ){ - sqlite3VdbeAddParseSchemaOp(v, 1, zWhere); - } -#endif -} - -/* -** Parameter zName is the name of a table that is about to be altered -** (either with ALTER TABLE ... RENAME TO or ALTER TABLE ... ADD COLUMN). -** If the table is a system table, this function leaves an error message -** in pParse->zErr (system tables may not be altered) and returns non-zero. -** -** Or, if zName is not a system table, zero is returned. -*/ -static int isSystemTable(Parse *pParse, const char *zName){ - if( sqlite3Strlen30(zName)>6 && 0==sqlite3StrNICmp(zName, "sqlite_", 7) ){ - sqlite3ErrorMsg(pParse, "table %s may not be altered", zName); - return 1; - } - return 0; -} - -/* -** Generate code to implement the "ALTER TABLE xxx RENAME TO yyy" -** command. -*/ -SQLITE_PRIVATE void sqlite3AlterRenameTable( - Parse *pParse, /* Parser context. */ - SrcList *pSrc, /* The table to rename. */ - Token *pName /* The new table name. */ -){ - int iDb; /* Database that contains the table */ - char *zDb; /* Name of database iDb */ - Table *pTab; /* Table being renamed */ - char *zName = 0; /* NULL-terminated version of pName */ - sqlite3 *db = pParse->db; /* Database connection */ - int nTabName; /* Number of UTF-8 characters in zTabName */ - const char *zTabName; /* Original name of the table */ - Vdbe *v; -#ifndef SQLITE_OMIT_TRIGGER - char *zWhere = 0; /* Where clause to locate temp triggers */ -#endif - VTable *pVTab = 0; /* Non-zero if this is a v-tab with an xRename() */ - int savedDbFlags; /* Saved value of db->flags */ - - savedDbFlags = db->flags; - if( NEVER(db->mallocFailed) ) goto exit_rename_table; - assert( pSrc->nSrc==1 ); - assert( sqlite3BtreeHoldsAllMutexes(pParse->db) ); - - pTab = sqlite3LocateTableItem(pParse, 0, &pSrc->a[0]); - if( !pTab ) goto exit_rename_table; - iDb = sqlite3SchemaToIndex(pParse->db, pTab->pSchema); - zDb = db->aDb[iDb].zName; - db->flags |= SQLITE_PreferBuiltin; - - /* Get a NULL terminated version of the new table name. */ - zName = sqlite3NameFromToken(db, pName); - if( !zName ) goto exit_rename_table; - - /* Check that a table or index named 'zName' does not already exist - ** in database iDb. If so, this is an error. - */ - if( sqlite3FindTable(db, zName, zDb) || sqlite3FindIndex(db, zName, zDb) ){ - sqlite3ErrorMsg(pParse, - "there is already another table or index with this name: %s", zName); - goto exit_rename_table; - } - - /* Make sure it is not a system table being altered, or a reserved name - ** that the table is being renamed to. - */ - if( SQLITE_OK!=isSystemTable(pParse, pTab->zName) ){ - goto exit_rename_table; - } - if( SQLITE_OK!=sqlite3CheckObjectName(pParse, zName) ){ goto - exit_rename_table; - } - -#ifndef SQLITE_OMIT_VIEW - if( pTab->pSelect ){ - sqlite3ErrorMsg(pParse, "view %s may not be altered", pTab->zName); - goto exit_rename_table; - } -#endif - -#ifndef SQLITE_OMIT_AUTHORIZATION - /* Invoke the authorization callback. */ - if( sqlite3AuthCheck(pParse, SQLITE_ALTER_TABLE, zDb, pTab->zName, 0) ){ - goto exit_rename_table; - } -#endif - -#ifndef SQLITE_OMIT_VIRTUALTABLE - if( sqlite3ViewGetColumnNames(pParse, pTab) ){ - goto exit_rename_table; - } - if( IsVirtual(pTab) ){ - pVTab = sqlite3GetVTable(db, pTab); - if( pVTab->pVtab->pModule->xRename==0 ){ - pVTab = 0; - } - } -#endif - - /* Begin a transaction for database iDb. - ** Then modify the schema cookie (since the ALTER TABLE modifies the - ** schema). Open a statement transaction if the table is a virtual - ** table. - */ - v = sqlite3GetVdbe(pParse); - if( v==0 ){ - goto exit_rename_table; - } - sqlite3BeginWriteOperation(pParse, pVTab!=0, iDb); - sqlite3ChangeCookie(pParse, iDb); - - /* If this is a virtual table, invoke the xRename() function if - ** one is defined. The xRename() callback will modify the names - ** of any resources used by the v-table implementation (including other - ** SQLite tables) that are identified by the name of the virtual table. - */ -#ifndef SQLITE_OMIT_VIRTUALTABLE - if( pVTab ){ - int i = ++pParse->nMem; - sqlite3VdbeAddOp4(v, OP_String8, 0, i, 0, zName, 0); - sqlite3VdbeAddOp4(v, OP_VRename, i, 0, 0,(const char*)pVTab, P4_VTAB); - sqlite3MayAbort(pParse); - } -#endif - - /* figure out how many UTF-8 characters are in zName */ - zTabName = pTab->zName; - nTabName = sqlite3Utf8CharLen(zTabName, -1); - -#if !defined(SQLITE_OMIT_FOREIGN_KEY) && !defined(SQLITE_OMIT_TRIGGER) - if( db->flags&SQLITE_ForeignKeys ){ - /* If foreign-key support is enabled, rewrite the CREATE TABLE - ** statements corresponding to all child tables of foreign key constraints - ** for which the renamed table is the parent table. */ - if( (zWhere=whereForeignKeys(pParse, pTab))!=0 ){ - sqlite3NestedParse(pParse, - "UPDATE \"%w\".%s SET " - "sql = sqlite_rename_parent(sql, %Q, %Q) " - "WHERE %s;", zDb, SCHEMA_TABLE(iDb), zTabName, zName, zWhere); - sqlite3DbFree(db, zWhere); - } - } -#endif - - /* Modify the sqlite_master table to use the new table name. */ - sqlite3NestedParse(pParse, - "UPDATE %Q.%s SET " -#ifdef SQLITE_OMIT_TRIGGER - "sql = sqlite_rename_table(sql, %Q), " -#else - "sql = CASE " - "WHEN type = 'trigger' THEN sqlite_rename_trigger(sql, %Q)" - "ELSE sqlite_rename_table(sql, %Q) END, " -#endif - "tbl_name = %Q, " - "name = CASE " - "WHEN type='table' THEN %Q " - "WHEN name LIKE 'sqlite_autoindex%%' AND type='index' THEN " - "'sqlite_autoindex_' || %Q || substr(name,%d+18) " - "ELSE name END " - "WHERE tbl_name=%Q COLLATE nocase AND " - "(type='table' OR type='index' OR type='trigger');", - zDb, SCHEMA_TABLE(iDb), zName, zName, zName, -#ifndef SQLITE_OMIT_TRIGGER - zName, -#endif - zName, nTabName, zTabName - ); - -#ifndef SQLITE_OMIT_AUTOINCREMENT - /* If the sqlite_sequence table exists in this database, then update - ** it with the new table name. - */ - if( sqlite3FindTable(db, "sqlite_sequence", zDb) ){ - sqlite3NestedParse(pParse, - "UPDATE \"%w\".sqlite_sequence set name = %Q WHERE name = %Q", - zDb, zName, pTab->zName); - } -#endif - -#ifndef SQLITE_OMIT_TRIGGER - /* If there are TEMP triggers on this table, modify the sqlite_temp_master - ** table. Don't do this if the table being ALTERed is itself located in - ** the temp database. - */ - if( (zWhere=whereTempTriggers(pParse, pTab))!=0 ){ - sqlite3NestedParse(pParse, - "UPDATE sqlite_temp_master SET " - "sql = sqlite_rename_trigger(sql, %Q), " - "tbl_name = %Q " - "WHERE %s;", zName, zName, zWhere); - sqlite3DbFree(db, zWhere); - } -#endif - -#if !defined(SQLITE_OMIT_FOREIGN_KEY) && !defined(SQLITE_OMIT_TRIGGER) - if( db->flags&SQLITE_ForeignKeys ){ - FKey *p; - for(p=sqlite3FkReferences(pTab); p; p=p->pNextTo){ - Table *pFrom = p->pFrom; - if( pFrom!=pTab ){ - reloadTableSchema(pParse, p->pFrom, pFrom->zName); - } - } - } -#endif - - /* Drop and reload the internal table schema. */ - reloadTableSchema(pParse, pTab, zName); - -exit_rename_table: - sqlite3SrcListDelete(db, pSrc); - sqlite3DbFree(db, zName); - db->flags = savedDbFlags; -} - - -/* -** Generate code to make sure the file format number is at least minFormat. -** The generated code will increase the file format number if necessary. -*/ -SQLITE_PRIVATE void sqlite3MinimumFileFormat(Parse *pParse, int iDb, int minFormat){ - Vdbe *v; - v = sqlite3GetVdbe(pParse); - /* The VDBE should have been allocated before this routine is called. - ** If that allocation failed, we would have quit before reaching this - ** point */ - if( ALWAYS(v) ){ - int r1 = sqlite3GetTempReg(pParse); - int r2 = sqlite3GetTempReg(pParse); - int j1; - sqlite3VdbeAddOp3(v, OP_ReadCookie, iDb, r1, BTREE_FILE_FORMAT); - sqlite3VdbeUsesBtree(v, iDb); - sqlite3VdbeAddOp2(v, OP_Integer, minFormat, r2); - j1 = sqlite3VdbeAddOp3(v, OP_Ge, r2, 0, r1); - sqlite3VdbeChangeP5(v, SQLITE_NOTNULL); VdbeCoverage(v); - sqlite3VdbeAddOp3(v, OP_SetCookie, iDb, BTREE_FILE_FORMAT, r2); - sqlite3VdbeJumpHere(v, j1); - sqlite3ReleaseTempReg(pParse, r1); - sqlite3ReleaseTempReg(pParse, r2); - } -} - -/* -** This function is called after an "ALTER TABLE ... ADD" statement -** has been parsed. Argument pColDef contains the text of the new -** column definition. -** -** The Table structure pParse->pNewTable was extended to include -** the new column during parsing. -*/ -SQLITE_PRIVATE void sqlite3AlterFinishAddColumn(Parse *pParse, Token *pColDef){ - Table *pNew; /* Copy of pParse->pNewTable */ - Table *pTab; /* Table being altered */ - int iDb; /* Database number */ - const char *zDb; /* Database name */ - const char *zTab; /* Table name */ - char *zCol; /* Null-terminated column definition */ - Column *pCol; /* The new column */ - Expr *pDflt; /* Default value for the new column */ - sqlite3 *db; /* The database connection; */ - - db = pParse->db; - if( pParse->nErr || db->mallocFailed ) return; - pNew = pParse->pNewTable; - assert( pNew ); - - assert( sqlite3BtreeHoldsAllMutexes(db) ); - iDb = sqlite3SchemaToIndex(db, pNew->pSchema); - zDb = db->aDb[iDb].zName; - zTab = &pNew->zName[16]; /* Skip the "sqlite_altertab_" prefix on the name */ - pCol = &pNew->aCol[pNew->nCol-1]; - pDflt = pCol->pDflt; - pTab = sqlite3FindTable(db, zTab, zDb); - assert( pTab ); - -#ifndef SQLITE_OMIT_AUTHORIZATION - /* Invoke the authorization callback. */ - if( sqlite3AuthCheck(pParse, SQLITE_ALTER_TABLE, zDb, pTab->zName, 0) ){ - return; - } -#endif - - /* If the default value for the new column was specified with a - ** literal NULL, then set pDflt to 0. This simplifies checking - ** for an SQL NULL default below. - */ - if( pDflt && pDflt->op==TK_NULL ){ - pDflt = 0; - } - - /* Check that the new column is not specified as PRIMARY KEY or UNIQUE. - ** If there is a NOT NULL constraint, then the default value for the - ** column must not be NULL. - */ - if( pCol->colFlags & COLFLAG_PRIMKEY ){ - sqlite3ErrorMsg(pParse, "Cannot add a PRIMARY KEY column"); - return; - } - if( pNew->pIndex ){ - sqlite3ErrorMsg(pParse, "Cannot add a UNIQUE column"); - return; - } - if( (db->flags&SQLITE_ForeignKeys) && pNew->pFKey && pDflt ){ - sqlite3ErrorMsg(pParse, - "Cannot add a REFERENCES column with non-NULL default value"); - return; - } - if( pCol->notNull && !pDflt ){ - sqlite3ErrorMsg(pParse, - "Cannot add a NOT NULL column with default value NULL"); - return; - } - - /* Ensure the default expression is something that sqlite3ValueFromExpr() - ** can handle (i.e. not CURRENT_TIME etc.) - */ - if( pDflt ){ - sqlite3_value *pVal = 0; - if( sqlite3ValueFromExpr(db, pDflt, SQLITE_UTF8, SQLITE_AFF_NONE, &pVal) ){ - db->mallocFailed = 1; - return; - } - if( !pVal ){ - sqlite3ErrorMsg(pParse, "Cannot add a column with non-constant default"); - return; - } - sqlite3ValueFree(pVal); - } - - /* Modify the CREATE TABLE statement. */ - zCol = sqlite3DbStrNDup(db, (char*)pColDef->z, pColDef->n); - if( zCol ){ - char *zEnd = &zCol[pColDef->n-1]; - int savedDbFlags = db->flags; - while( zEnd>zCol && (*zEnd==';' || sqlite3Isspace(*zEnd)) ){ - *zEnd-- = '\0'; - } - db->flags |= SQLITE_PreferBuiltin; - sqlite3NestedParse(pParse, - "UPDATE \"%w\".%s SET " - "sql = substr(sql,1,%d) || ', ' || %Q || substr(sql,%d) " - "WHERE type = 'table' AND name = %Q", - zDb, SCHEMA_TABLE(iDb), pNew->addColOffset, zCol, pNew->addColOffset+1, - zTab - ); - sqlite3DbFree(db, zCol); - db->flags = savedDbFlags; - } - - /* If the default value of the new column is NULL, then set the file - ** format to 2. If the default value of the new column is not NULL, - ** the file format becomes 3. - */ - sqlite3MinimumFileFormat(pParse, iDb, pDflt ? 3 : 2); - - /* Reload the schema of the modified table. */ - reloadTableSchema(pParse, pTab, pTab->zName); -} - -/* -** This function is called by the parser after the table-name in -** an "ALTER TABLE ADD" statement is parsed. Argument -** pSrc is the full-name of the table being altered. -** -** This routine makes a (partial) copy of the Table structure -** for the table being altered and sets Parse.pNewTable to point -** to it. Routines called by the parser as the column definition -** is parsed (i.e. sqlite3AddColumn()) add the new Column data to -** the copy. The copy of the Table structure is deleted by tokenize.c -** after parsing is finished. -** -** Routine sqlite3AlterFinishAddColumn() will be called to complete -** coding the "ALTER TABLE ... ADD" statement. -*/ -SQLITE_PRIVATE void sqlite3AlterBeginAddColumn(Parse *pParse, SrcList *pSrc){ - Table *pNew; - Table *pTab; - Vdbe *v; - int iDb; - int i; - int nAlloc; - sqlite3 *db = pParse->db; - - /* Look up the table being altered. */ - assert( pParse->pNewTable==0 ); - assert( sqlite3BtreeHoldsAllMutexes(db) ); - if( db->mallocFailed ) goto exit_begin_add_column; - pTab = sqlite3LocateTableItem(pParse, 0, &pSrc->a[0]); - if( !pTab ) goto exit_begin_add_column; - -#ifndef SQLITE_OMIT_VIRTUALTABLE - if( IsVirtual(pTab) ){ - sqlite3ErrorMsg(pParse, "virtual tables may not be altered"); - goto exit_begin_add_column; - } -#endif - - /* Make sure this is not an attempt to ALTER a view. */ - if( pTab->pSelect ){ - sqlite3ErrorMsg(pParse, "Cannot add a column to a view"); - goto exit_begin_add_column; - } - if( SQLITE_OK!=isSystemTable(pParse, pTab->zName) ){ - goto exit_begin_add_column; - } - - assert( pTab->addColOffset>0 ); - iDb = sqlite3SchemaToIndex(db, pTab->pSchema); - - /* Put a copy of the Table struct in Parse.pNewTable for the - ** sqlite3AddColumn() function and friends to modify. But modify - ** the name by adding an "sqlite_altertab_" prefix. By adding this - ** prefix, we insure that the name will not collide with an existing - ** table because user table are not allowed to have the "sqlite_" - ** prefix on their name. - */ - pNew = (Table*)sqlite3DbMallocZero(db, sizeof(Table)); - if( !pNew ) goto exit_begin_add_column; - pParse->pNewTable = pNew; - pNew->nRef = 1; - pNew->nCol = pTab->nCol; - assert( pNew->nCol>0 ); - nAlloc = (((pNew->nCol-1)/8)*8)+8; - assert( nAlloc>=pNew->nCol && nAlloc%8==0 && nAlloc-pNew->nCol<8 ); - pNew->aCol = (Column*)sqlite3DbMallocZero(db, sizeof(Column)*nAlloc); - pNew->zName = sqlite3MPrintf(db, "sqlite_altertab_%s", pTab->zName); - if( !pNew->aCol || !pNew->zName ){ - db->mallocFailed = 1; - goto exit_begin_add_column; - } - memcpy(pNew->aCol, pTab->aCol, sizeof(Column)*pNew->nCol); - for(i=0; inCol; i++){ - Column *pCol = &pNew->aCol[i]; - pCol->zName = sqlite3DbStrDup(db, pCol->zName); - pCol->zColl = 0; - pCol->zType = 0; - pCol->pDflt = 0; - pCol->zDflt = 0; - } - pNew->pSchema = db->aDb[iDb].pSchema; - pNew->addColOffset = pTab->addColOffset; - pNew->nRef = 1; - - /* Begin a transaction and increment the schema cookie. */ - sqlite3BeginWriteOperation(pParse, 0, iDb); - v = sqlite3GetVdbe(pParse); - if( !v ) goto exit_begin_add_column; - sqlite3ChangeCookie(pParse, iDb); - -exit_begin_add_column: - sqlite3SrcListDelete(db, pSrc); - return; -} -#endif /* SQLITE_ALTER_TABLE */ - -/************** End of alter.c ***********************************************/ -/************** Begin file analyze.c *****************************************/ -/* -** 2005-07-08 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** This file contains code associated with the ANALYZE command. -** -** The ANALYZE command gather statistics about the content of tables -** and indices. These statistics are made available to the query planner -** to help it make better decisions about how to perform queries. -** -** The following system tables are or have been supported: -** -** CREATE TABLE sqlite_stat1(tbl, idx, stat); -** CREATE TABLE sqlite_stat2(tbl, idx, sampleno, sample); -** CREATE TABLE sqlite_stat3(tbl, idx, nEq, nLt, nDLt, sample); -** CREATE TABLE sqlite_stat4(tbl, idx, nEq, nLt, nDLt, sample); -** -** Additional tables might be added in future releases of SQLite. -** The sqlite_stat2 table is not created or used unless the SQLite version -** is between 3.6.18 and 3.7.8, inclusive, and unless SQLite is compiled -** with SQLITE_ENABLE_STAT2. The sqlite_stat2 table is deprecated. -** The sqlite_stat2 table is superseded by sqlite_stat3, which is only -** created and used by SQLite versions 3.7.9 and later and with -** SQLITE_ENABLE_STAT3 defined. The functionality of sqlite_stat3 -** is a superset of sqlite_stat2. The sqlite_stat4 is an enhanced -** version of sqlite_stat3 and is only available when compiled with -** SQLITE_ENABLE_STAT4 and in SQLite versions 3.8.1 and later. It is -** not possible to enable both STAT3 and STAT4 at the same time. If they -** are both enabled, then STAT4 takes precedence. -** -** For most applications, sqlite_stat1 provides all the statisics required -** for the query planner to make good choices. -** -** Format of sqlite_stat1: -** -** There is normally one row per index, with the index identified by the -** name in the idx column. The tbl column is the name of the table to -** which the index belongs. In each such row, the stat column will be -** a string consisting of a list of integers. The first integer in this -** list is the number of rows in the index. (This is the same as the -** number of rows in the table, except for partial indices.) The second -** integer is the average number of rows in the index that have the same -** value in the first column of the index. The third integer is the average -** number of rows in the index that have the same value for the first two -** columns. The N-th integer (for N>1) is the average number of rows in -** the index which have the same value for the first N-1 columns. For -** a K-column index, there will be K+1 integers in the stat column. If -** the index is unique, then the last integer will be 1. -** -** The list of integers in the stat column can optionally be followed -** by the keyword "unordered". The "unordered" keyword, if it is present, -** must be separated from the last integer by a single space. If the -** "unordered" keyword is present, then the query planner assumes that -** the index is unordered and will not use the index for a range query. -** -** If the sqlite_stat1.idx column is NULL, then the sqlite_stat1.stat -** column contains a single integer which is the (estimated) number of -** rows in the table identified by sqlite_stat1.tbl. -** -** Format of sqlite_stat2: -** -** The sqlite_stat2 is only created and is only used if SQLite is compiled -** with SQLITE_ENABLE_STAT2 and if the SQLite version number is between -** 3.6.18 and 3.7.8. The "stat2" table contains additional information -** about the distribution of keys within an index. The index is identified by -** the "idx" column and the "tbl" column is the name of the table to which -** the index belongs. There are usually 10 rows in the sqlite_stat2 -** table for each index. -** -** The sqlite_stat2 entries for an index that have sampleno between 0 and 9 -** inclusive are samples of the left-most key value in the index taken at -** evenly spaced points along the index. Let the number of samples be S -** (10 in the standard build) and let C be the number of rows in the index. -** Then the sampled rows are given by: -** -** rownumber = (i*C*2 + C)/(S*2) -** -** For i between 0 and S-1. Conceptually, the index space is divided into -** S uniform buckets and the samples are the middle row from each bucket. -** -** The format for sqlite_stat2 is recorded here for legacy reference. This -** version of SQLite does not support sqlite_stat2. It neither reads nor -** writes the sqlite_stat2 table. This version of SQLite only supports -** sqlite_stat3. -** -** Format for sqlite_stat3: -** -** The sqlite_stat3 format is a subset of sqlite_stat4. Hence, the -** sqlite_stat4 format will be described first. Further information -** about sqlite_stat3 follows the sqlite_stat4 description. -** -** Format for sqlite_stat4: -** -** As with sqlite_stat2, the sqlite_stat4 table contains histogram data -** to aid the query planner in choosing good indices based on the values -** that indexed columns are compared against in the WHERE clauses of -** queries. -** -** The sqlite_stat4 table contains multiple entries for each index. -** The idx column names the index and the tbl column is the table of the -** index. If the idx and tbl columns are the same, then the sample is -** of the INTEGER PRIMARY KEY. The sample column is a blob which is the -** binary encoding of a key from the index. The nEq column is a -** list of integers. The first integer is the approximate number -** of entries in the index whose left-most column exactly matches -** the left-most column of the sample. The second integer in nEq -** is the approximate number of entries in the index where the -** first two columns match the first two columns of the sample. -** And so forth. nLt is another list of integers that show the approximate -** number of entries that are strictly less than the sample. The first -** integer in nLt contains the number of entries in the index where the -** left-most column is less than the left-most column of the sample. -** The K-th integer in the nLt entry is the number of index entries -** where the first K columns are less than the first K columns of the -** sample. The nDLt column is like nLt except that it contains the -** number of distinct entries in the index that are less than the -** sample. -** -** There can be an arbitrary number of sqlite_stat4 entries per index. -** The ANALYZE command will typically generate sqlite_stat4 tables -** that contain between 10 and 40 samples which are distributed across -** the key space, though not uniformly, and which include samples with -** large nEq values. -** -** Format for sqlite_stat3 redux: -** -** The sqlite_stat3 table is like sqlite_stat4 except that it only -** looks at the left-most column of the index. The sqlite_stat3.sample -** column contains the actual value of the left-most column instead -** of a blob encoding of the complete index key as is found in -** sqlite_stat4.sample. The nEq, nLt, and nDLt entries of sqlite_stat3 -** all contain just a single integer which is the same as the first -** integer in the equivalent columns in sqlite_stat4. -*/ -#ifndef SQLITE_OMIT_ANALYZE - -#if defined(SQLITE_ENABLE_STAT4) -# define IsStat4 1 -# define IsStat3 0 -#elif defined(SQLITE_ENABLE_STAT3) -# define IsStat4 0 -# define IsStat3 1 -#else -# define IsStat4 0 -# define IsStat3 0 -# undef SQLITE_STAT4_SAMPLES -# define SQLITE_STAT4_SAMPLES 1 -#endif -#define IsStat34 (IsStat3+IsStat4) /* 1 for STAT3 or STAT4. 0 otherwise */ - -/* -** This routine generates code that opens the sqlite_statN tables. -** The sqlite_stat1 table is always relevant. sqlite_stat2 is now -** obsolete. sqlite_stat3 and sqlite_stat4 are only opened when -** appropriate compile-time options are provided. -** -** If the sqlite_statN tables do not previously exist, it is created. -** -** Argument zWhere may be a pointer to a buffer containing a table name, -** or it may be a NULL pointer. If it is not NULL, then all entries in -** the sqlite_statN tables associated with the named table are deleted. -** If zWhere==0, then code is generated to delete all stat table entries. -*/ -static void openStatTable( - Parse *pParse, /* Parsing context */ - int iDb, /* The database we are looking in */ - int iStatCur, /* Open the sqlite_stat1 table on this cursor */ - const char *zWhere, /* Delete entries for this table or index */ - const char *zWhereType /* Either "tbl" or "idx" */ -){ - static const struct { - const char *zName; - const char *zCols; - } aTable[] = { - { "sqlite_stat1", "tbl,idx,stat" }, -#if defined(SQLITE_ENABLE_STAT4) - { "sqlite_stat4", "tbl,idx,neq,nlt,ndlt,sample" }, - { "sqlite_stat3", 0 }, -#elif defined(SQLITE_ENABLE_STAT3) - { "sqlite_stat3", "tbl,idx,neq,nlt,ndlt,sample" }, - { "sqlite_stat4", 0 }, -#else - { "sqlite_stat3", 0 }, - { "sqlite_stat4", 0 }, -#endif - }; - int i; - sqlite3 *db = pParse->db; - Db *pDb; - Vdbe *v = sqlite3GetVdbe(pParse); - int aRoot[ArraySize(aTable)]; - u8 aCreateTbl[ArraySize(aTable)]; - - if( v==0 ) return; - assert( sqlite3BtreeHoldsAllMutexes(db) ); - assert( sqlite3VdbeDb(v)==db ); - pDb = &db->aDb[iDb]; - - /* Create new statistic tables if they do not exist, or clear them - ** if they do already exist. - */ - for(i=0; izName))==0 ){ - if( aTable[i].zCols ){ - /* The sqlite_statN table does not exist. Create it. Note that a - ** side-effect of the CREATE TABLE statement is to leave the rootpage - ** of the new table in register pParse->regRoot. This is important - ** because the OpenWrite opcode below will be needing it. */ - sqlite3NestedParse(pParse, - "CREATE TABLE %Q.%s(%s)", pDb->zName, zTab, aTable[i].zCols - ); - aRoot[i] = pParse->regRoot; - aCreateTbl[i] = OPFLAG_P2ISREG; - } - }else{ - /* The table already exists. If zWhere is not NULL, delete all entries - ** associated with the table zWhere. If zWhere is NULL, delete the - ** entire contents of the table. */ - aRoot[i] = pStat->tnum; - aCreateTbl[i] = 0; - sqlite3TableLock(pParse, iDb, aRoot[i], 1, zTab); - if( zWhere ){ - sqlite3NestedParse(pParse, - "DELETE FROM %Q.%s WHERE %s=%Q", - pDb->zName, zTab, zWhereType, zWhere - ); - }else{ - /* The sqlite_stat[134] table already exists. Delete all rows. */ - sqlite3VdbeAddOp2(v, OP_Clear, aRoot[i], iDb); - } - } - } - - /* Open the sqlite_stat[134] tables for writing. */ - for(i=0; aTable[i].zCols; i++){ - assert( inRowid ){ - sqlite3DbFree(db, p->u.aRowid); - p->nRowid = 0; - } -} -#endif - -/* Initialize the BLOB value of a ROWID -*/ -#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 -static void sampleSetRowid(sqlite3 *db, Stat4Sample *p, int n, const u8 *pData){ - assert( db!=0 ); - if( p->nRowid ) sqlite3DbFree(db, p->u.aRowid); - p->u.aRowid = sqlite3DbMallocRaw(db, n); - if( p->u.aRowid ){ - p->nRowid = n; - memcpy(p->u.aRowid, pData, n); - }else{ - p->nRowid = 0; - } -} -#endif - -/* Initialize the INTEGER value of a ROWID. -*/ -#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 -static void sampleSetRowidInt64(sqlite3 *db, Stat4Sample *p, i64 iRowid){ - assert( db!=0 ); - if( p->nRowid ) sqlite3DbFree(db, p->u.aRowid); - p->nRowid = 0; - p->u.iRowid = iRowid; -} -#endif - - -/* -** Copy the contents of object (*pFrom) into (*pTo). -*/ -#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 -static void sampleCopy(Stat4Accum *p, Stat4Sample *pTo, Stat4Sample *pFrom){ - pTo->isPSample = pFrom->isPSample; - pTo->iCol = pFrom->iCol; - pTo->iHash = pFrom->iHash; - memcpy(pTo->anEq, pFrom->anEq, sizeof(tRowcnt)*p->nCol); - memcpy(pTo->anLt, pFrom->anLt, sizeof(tRowcnt)*p->nCol); - memcpy(pTo->anDLt, pFrom->anDLt, sizeof(tRowcnt)*p->nCol); - if( pFrom->nRowid ){ - sampleSetRowid(p->db, pTo, pFrom->nRowid, pFrom->u.aRowid); - }else{ - sampleSetRowidInt64(p->db, pTo, pFrom->u.iRowid); - } -} -#endif - -/* -** Reclaim all memory of a Stat4Accum structure. -*/ -static void stat4Destructor(void *pOld){ - Stat4Accum *p = (Stat4Accum*)pOld; -#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 - int i; - for(i=0; inCol; i++) sampleClear(p->db, p->aBest+i); - for(i=0; imxSample; i++) sampleClear(p->db, p->a+i); - sampleClear(p->db, &p->current); -#endif - sqlite3DbFree(p->db, p); -} - -/* -** Implementation of the stat_init(N,C) SQL function. The two parameters -** are the number of rows in the table or index (C) and the number of columns -** in the index (N). The second argument (C) is only used for STAT3 and STAT4. -** -** This routine allocates the Stat4Accum object in heap memory. The return -** value is a pointer to the the Stat4Accum object encoded as a blob (i.e. -** the size of the blob is sizeof(void*) bytes). -*/ -static void statInit( - sqlite3_context *context, - int argc, - sqlite3_value **argv -){ - Stat4Accum *p; - int nCol; /* Number of columns in index being sampled */ - int nColUp; /* nCol rounded up for alignment */ - int n; /* Bytes of space to allocate */ - sqlite3 *db; /* Database connection */ -#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 - int mxSample = SQLITE_STAT4_SAMPLES; -#endif - - /* Decode the three function arguments */ - UNUSED_PARAMETER(argc); - nCol = sqlite3_value_int(argv[0]); - assert( nCol>1 ); /* >1 because it includes the rowid column */ - nColUp = sizeof(tRowcnt)<8 ? (nCol+1)&~1 : nCol; - - /* Allocate the space required for the Stat4Accum object */ - n = sizeof(*p) - + sizeof(tRowcnt)*nColUp /* Stat4Accum.anEq */ - + sizeof(tRowcnt)*nColUp /* Stat4Accum.anDLt */ -#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 - + sizeof(tRowcnt)*nColUp /* Stat4Accum.anLt */ - + sizeof(Stat4Sample)*(nCol+mxSample) /* Stat4Accum.aBest[], a[] */ - + sizeof(tRowcnt)*3*nColUp*(nCol+mxSample) -#endif - ; - db = sqlite3_context_db_handle(context); - p = sqlite3DbMallocZero(db, n); - if( p==0 ){ - sqlite3_result_error_nomem(context); - return; - } - - p->db = db; - p->nRow = 0; - p->nCol = nCol; - p->current.anDLt = (tRowcnt*)&p[1]; - p->current.anEq = &p->current.anDLt[nColUp]; - -#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 - { - u8 *pSpace; /* Allocated space not yet assigned */ - int i; /* Used to iterate through p->aSample[] */ - - p->iGet = -1; - p->mxSample = mxSample; - p->nPSample = (tRowcnt)(sqlite3_value_int64(argv[1])/(mxSample/3+1) + 1); - p->current.anLt = &p->current.anEq[nColUp]; - p->iPrn = nCol*0x689e962d ^ sqlite3_value_int(argv[1])*0xd0944565; - - /* Set up the Stat4Accum.a[] and aBest[] arrays */ - p->a = (struct Stat4Sample*)&p->current.anLt[nColUp]; - p->aBest = &p->a[mxSample]; - pSpace = (u8*)(&p->a[mxSample+nCol]); - for(i=0; i<(mxSample+nCol); i++){ - p->a[i].anEq = (tRowcnt *)pSpace; pSpace += (sizeof(tRowcnt) * nColUp); - p->a[i].anLt = (tRowcnt *)pSpace; pSpace += (sizeof(tRowcnt) * nColUp); - p->a[i].anDLt = (tRowcnt *)pSpace; pSpace += (sizeof(tRowcnt) * nColUp); - } - assert( (pSpace - (u8*)p)==n ); - - for(i=0; iaBest[i].iCol = i; - } - } -#endif - - /* Return a pointer to the allocated object to the caller */ - sqlite3_result_blob(context, p, sizeof(p), stat4Destructor); -} -static const FuncDef statInitFuncdef = { - 1+IsStat34, /* nArg */ - SQLITE_UTF8, /* funcFlags */ - 0, /* pUserData */ - 0, /* pNext */ - statInit, /* xFunc */ - 0, /* xStep */ - 0, /* xFinalize */ - "stat_init", /* zName */ - 0, /* pHash */ - 0 /* pDestructor */ -}; - -#ifdef SQLITE_ENABLE_STAT4 -/* -** pNew and pOld are both candidate non-periodic samples selected for -** the same column (pNew->iCol==pOld->iCol). Ignoring this column and -** considering only any trailing columns and the sample hash value, this -** function returns true if sample pNew is to be preferred over pOld. -** In other words, if we assume that the cardinalities of the selected -** column for pNew and pOld are equal, is pNew to be preferred over pOld. -** -** This function assumes that for each argument sample, the contents of -** the anEq[] array from pSample->anEq[pSample->iCol+1] onwards are valid. -*/ -static int sampleIsBetterPost( - Stat4Accum *pAccum, - Stat4Sample *pNew, - Stat4Sample *pOld -){ - int nCol = pAccum->nCol; - int i; - assert( pNew->iCol==pOld->iCol ); - for(i=pNew->iCol+1; ianEq[i]>pOld->anEq[i] ) return 1; - if( pNew->anEq[i]anEq[i] ) return 0; - } - if( pNew->iHash>pOld->iHash ) return 1; - return 0; -} -#endif - -#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 -/* -** Return true if pNew is to be preferred over pOld. -** -** This function assumes that for each argument sample, the contents of -** the anEq[] array from pSample->anEq[pSample->iCol] onwards are valid. -*/ -static int sampleIsBetter( - Stat4Accum *pAccum, - Stat4Sample *pNew, - Stat4Sample *pOld -){ - tRowcnt nEqNew = pNew->anEq[pNew->iCol]; - tRowcnt nEqOld = pOld->anEq[pOld->iCol]; - - assert( pOld->isPSample==0 && pNew->isPSample==0 ); - assert( IsStat4 || (pNew->iCol==0 && pOld->iCol==0) ); - - if( (nEqNew>nEqOld) ) return 1; -#ifdef SQLITE_ENABLE_STAT4 - if( nEqNew==nEqOld ){ - if( pNew->iColiCol ) return 1; - return (pNew->iCol==pOld->iCol && sampleIsBetterPost(pAccum, pNew, pOld)); - } - return 0; -#else - return (nEqNew==nEqOld && pNew->iHash>pOld->iHash); -#endif -} - -/* -** Copy the contents of sample *pNew into the p->a[] array. If necessary, -** remove the least desirable sample from p->a[] to make room. -*/ -static void sampleInsert(Stat4Accum *p, Stat4Sample *pNew, int nEqZero){ - Stat4Sample *pSample = 0; - int i; - - assert( IsStat4 || nEqZero==0 ); - -#ifdef SQLITE_ENABLE_STAT4 - if( pNew->isPSample==0 ){ - Stat4Sample *pUpgrade = 0; - assert( pNew->anEq[pNew->iCol]>0 ); - - /* This sample is being added because the prefix that ends in column - ** iCol occurs many times in the table. However, if we have already - ** added a sample that shares this prefix, there is no need to add - ** this one. Instead, upgrade the priority of the highest priority - ** existing sample that shares this prefix. */ - for(i=p->nSample-1; i>=0; i--){ - Stat4Sample *pOld = &p->a[i]; - if( pOld->anEq[pNew->iCol]==0 ){ - if( pOld->isPSample ) return; - assert( pOld->iCol>pNew->iCol ); - assert( sampleIsBetter(p, pNew, pOld) ); - if( pUpgrade==0 || sampleIsBetter(p, pOld, pUpgrade) ){ - pUpgrade = pOld; - } - } - } - if( pUpgrade ){ - pUpgrade->iCol = pNew->iCol; - pUpgrade->anEq[pUpgrade->iCol] = pNew->anEq[pUpgrade->iCol]; - goto find_new_min; - } - } -#endif - - /* If necessary, remove sample iMin to make room for the new sample. */ - if( p->nSample>=p->mxSample ){ - Stat4Sample *pMin = &p->a[p->iMin]; - tRowcnt *anEq = pMin->anEq; - tRowcnt *anLt = pMin->anLt; - tRowcnt *anDLt = pMin->anDLt; - sampleClear(p->db, pMin); - memmove(pMin, &pMin[1], sizeof(p->a[0])*(p->nSample-p->iMin-1)); - pSample = &p->a[p->nSample-1]; - pSample->nRowid = 0; - pSample->anEq = anEq; - pSample->anDLt = anDLt; - pSample->anLt = anLt; - p->nSample = p->mxSample-1; - } - - /* The "rows less-than" for the rowid column must be greater than that - ** for the last sample in the p->a[] array. Otherwise, the samples would - ** be out of order. */ -#ifdef SQLITE_ENABLE_STAT4 - assert( p->nSample==0 - || pNew->anLt[p->nCol-1] > p->a[p->nSample-1].anLt[p->nCol-1] ); -#endif - - /* Insert the new sample */ - pSample = &p->a[p->nSample]; - sampleCopy(p, pSample, pNew); - p->nSample++; - - /* Zero the first nEqZero entries in the anEq[] array. */ - memset(pSample->anEq, 0, sizeof(tRowcnt)*nEqZero); - -#ifdef SQLITE_ENABLE_STAT4 - find_new_min: -#endif - if( p->nSample>=p->mxSample ){ - int iMin = -1; - for(i=0; imxSample; i++){ - if( p->a[i].isPSample ) continue; - if( iMin<0 || sampleIsBetter(p, &p->a[iMin], &p->a[i]) ){ - iMin = i; - } - } - assert( iMin>=0 ); - p->iMin = iMin; - } -} -#endif /* SQLITE_ENABLE_STAT3_OR_STAT4 */ - -/* -** Field iChng of the index being scanned has changed. So at this point -** p->current contains a sample that reflects the previous row of the -** index. The value of anEq[iChng] and subsequent anEq[] elements are -** correct at this point. -*/ -static void samplePushPrevious(Stat4Accum *p, int iChng){ -#ifdef SQLITE_ENABLE_STAT4 - int i; - - /* Check if any samples from the aBest[] array should be pushed - ** into IndexSample.a[] at this point. */ - for(i=(p->nCol-2); i>=iChng; i--){ - Stat4Sample *pBest = &p->aBest[i]; - pBest->anEq[i] = p->current.anEq[i]; - if( p->nSamplemxSample || sampleIsBetter(p, pBest, &p->a[p->iMin]) ){ - sampleInsert(p, pBest, i); - } - } - - /* Update the anEq[] fields of any samples already collected. */ - for(i=p->nSample-1; i>=0; i--){ - int j; - for(j=iChng; jnCol; j++){ - if( p->a[i].anEq[j]==0 ) p->a[i].anEq[j] = p->current.anEq[j]; - } - } -#endif - -#if defined(SQLITE_ENABLE_STAT3) && !defined(SQLITE_ENABLE_STAT4) - if( iChng==0 ){ - tRowcnt nLt = p->current.anLt[0]; - tRowcnt nEq = p->current.anEq[0]; - - /* Check if this is to be a periodic sample. If so, add it. */ - if( (nLt/p->nPSample)!=(nLt+nEq)/p->nPSample ){ - p->current.isPSample = 1; - sampleInsert(p, &p->current, 0); - p->current.isPSample = 0; - }else - - /* Or if it is a non-periodic sample. Add it in this case too. */ - if( p->nSamplemxSample - || sampleIsBetter(p, &p->current, &p->a[p->iMin]) - ){ - sampleInsert(p, &p->current, 0); - } - } -#endif - -#ifndef SQLITE_ENABLE_STAT3_OR_STAT4 - UNUSED_PARAMETER( p ); - UNUSED_PARAMETER( iChng ); -#endif -} - -/* -** Implementation of the stat_push SQL function: stat_push(P,C,R) -** Arguments: -** -** P Pointer to the Stat4Accum object created by stat_init() -** C Index of left-most column to differ from previous row -** R Rowid for the current row. Might be a key record for -** WITHOUT ROWID tables. -** -** The SQL function always returns NULL. -** -** The R parameter is only used for STAT3 and STAT4 -*/ -static void statPush( - sqlite3_context *context, - int argc, - sqlite3_value **argv -){ - int i; - - /* The three function arguments */ - Stat4Accum *p = (Stat4Accum*)sqlite3_value_blob(argv[0]); - int iChng = sqlite3_value_int(argv[1]); - - UNUSED_PARAMETER( argc ); - UNUSED_PARAMETER( context ); - assert( p->nCol>1 ); /* Includes rowid field */ - assert( iChngnCol ); - - if( p->nRow==0 ){ - /* This is the first call to this function. Do initialization. */ - for(i=0; inCol; i++) p->current.anEq[i] = 1; - }else{ - /* Second and subsequent calls get processed here */ - samplePushPrevious(p, iChng); - - /* Update anDLt[], anLt[] and anEq[] to reflect the values that apply - ** to the current row of the index. */ - for(i=0; icurrent.anEq[i]++; - } - for(i=iChng; inCol; i++){ - p->current.anDLt[i]++; -#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 - p->current.anLt[i] += p->current.anEq[i]; -#endif - p->current.anEq[i] = 1; - } - } - p->nRow++; -#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 - if( sqlite3_value_type(argv[2])==SQLITE_INTEGER ){ - sampleSetRowidInt64(p->db, &p->current, sqlite3_value_int64(argv[2])); - }else{ - sampleSetRowid(p->db, &p->current, sqlite3_value_bytes(argv[2]), - sqlite3_value_blob(argv[2])); - } - p->current.iHash = p->iPrn = p->iPrn*1103515245 + 12345; -#endif - -#ifdef SQLITE_ENABLE_STAT4 - { - tRowcnt nLt = p->current.anLt[p->nCol-1]; - - /* Check if this is to be a periodic sample. If so, add it. */ - if( (nLt/p->nPSample)!=(nLt+1)/p->nPSample ){ - p->current.isPSample = 1; - p->current.iCol = 0; - sampleInsert(p, &p->current, p->nCol-1); - p->current.isPSample = 0; - } - - /* Update the aBest[] array. */ - for(i=0; i<(p->nCol-1); i++){ - p->current.iCol = i; - if( i>=iChng || sampleIsBetterPost(p, &p->current, &p->aBest[i]) ){ - sampleCopy(p, &p->aBest[i], &p->current); - } - } - } -#endif -} -static const FuncDef statPushFuncdef = { - 2+IsStat34, /* nArg */ - SQLITE_UTF8, /* funcFlags */ - 0, /* pUserData */ - 0, /* pNext */ - statPush, /* xFunc */ - 0, /* xStep */ - 0, /* xFinalize */ - "stat_push", /* zName */ - 0, /* pHash */ - 0 /* pDestructor */ -}; - -#define STAT_GET_STAT1 0 /* "stat" column of stat1 table */ -#define STAT_GET_ROWID 1 /* "rowid" column of stat[34] entry */ -#define STAT_GET_NEQ 2 /* "neq" column of stat[34] entry */ -#define STAT_GET_NLT 3 /* "nlt" column of stat[34] entry */ -#define STAT_GET_NDLT 4 /* "ndlt" column of stat[34] entry */ - -/* -** Implementation of the stat_get(P,J) SQL function. This routine is -** used to query the results. Content is returned for parameter J -** which is one of the STAT_GET_xxxx values defined above. -** -** If neither STAT3 nor STAT4 are enabled, then J is always -** STAT_GET_STAT1 and is hence omitted and this routine becomes -** a one-parameter function, stat_get(P), that always returns the -** stat1 table entry information. -*/ -static void statGet( - sqlite3_context *context, - int argc, - sqlite3_value **argv -){ - Stat4Accum *p = (Stat4Accum*)sqlite3_value_blob(argv[0]); -#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 - /* STAT3 and STAT4 have a parameter on this routine. */ - int eCall = sqlite3_value_int(argv[1]); - assert( argc==2 ); - assert( eCall==STAT_GET_STAT1 || eCall==STAT_GET_NEQ - || eCall==STAT_GET_ROWID || eCall==STAT_GET_NLT - || eCall==STAT_GET_NDLT - ); - if( eCall==STAT_GET_STAT1 ) -#else - assert( argc==1 ); -#endif - { - /* Return the value to store in the "stat" column of the sqlite_stat1 - ** table for this index. - ** - ** The value is a string composed of a list of integers describing - ** the index. The first integer in the list is the total number of - ** entries in the index. There is one additional integer in the list - ** for each indexed column. This additional integer is an estimate of - ** the number of rows matched by a stabbing query on the index using - ** a key with the corresponding number of fields. In other words, - ** if the index is on columns (a,b) and the sqlite_stat1 value is - ** "100 10 2", then SQLite estimates that: - ** - ** * the index contains 100 rows, - ** * "WHERE a=?" matches 10 rows, and - ** * "WHERE a=? AND b=?" matches 2 rows. - ** - ** If D is the count of distinct values and K is the total number of - ** rows, then each estimate is computed as: - ** - ** I = (K+D-1)/D - */ - char *z; - int i; - - char *zRet = sqlite3MallocZero(p->nCol * 25); - if( zRet==0 ){ - sqlite3_result_error_nomem(context); - return; - } - - sqlite3_snprintf(24, zRet, "%llu", (u64)p->nRow); - z = zRet + sqlite3Strlen30(zRet); - for(i=0; i<(p->nCol-1); i++){ - u64 nDistinct = p->current.anDLt[i] + 1; - u64 iVal = (p->nRow + nDistinct - 1) / nDistinct; - sqlite3_snprintf(24, z, " %llu", iVal); - z += sqlite3Strlen30(z); - assert( p->current.anEq[i] ); - } - assert( z[0]=='\0' && z>zRet ); - - sqlite3_result_text(context, zRet, -1, sqlite3_free); - } -#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 - else if( eCall==STAT_GET_ROWID ){ - if( p->iGet<0 ){ - samplePushPrevious(p, 0); - p->iGet = 0; - } - if( p->iGetnSample ){ - Stat4Sample *pS = p->a + p->iGet; - if( pS->nRowid==0 ){ - sqlite3_result_int64(context, pS->u.iRowid); - }else{ - sqlite3_result_blob(context, pS->u.aRowid, pS->nRowid, - SQLITE_TRANSIENT); - } - } - }else{ - tRowcnt *aCnt = 0; - - assert( p->iGetnSample ); - switch( eCall ){ - case STAT_GET_NEQ: aCnt = p->a[p->iGet].anEq; break; - case STAT_GET_NLT: aCnt = p->a[p->iGet].anLt; break; - default: { - aCnt = p->a[p->iGet].anDLt; - p->iGet++; - break; - } - } - - if( IsStat3 ){ - sqlite3_result_int64(context, (i64)aCnt[0]); - }else{ - char *zRet = sqlite3MallocZero(p->nCol * 25); - if( zRet==0 ){ - sqlite3_result_error_nomem(context); - }else{ - int i; - char *z = zRet; - for(i=0; inCol; i++){ - sqlite3_snprintf(24, z, "%llu ", (u64)aCnt[i]); - z += sqlite3Strlen30(z); - } - assert( z[0]=='\0' && z>zRet ); - z[-1] = '\0'; - sqlite3_result_text(context, zRet, -1, sqlite3_free); - } - } - } -#endif /* SQLITE_ENABLE_STAT3_OR_STAT4 */ -#ifndef SQLITE_DEBUG - UNUSED_PARAMETER( argc ); -#endif -} -static const FuncDef statGetFuncdef = { - 1+IsStat34, /* nArg */ - SQLITE_UTF8, /* funcFlags */ - 0, /* pUserData */ - 0, /* pNext */ - statGet, /* xFunc */ - 0, /* xStep */ - 0, /* xFinalize */ - "stat_get", /* zName */ - 0, /* pHash */ - 0 /* pDestructor */ -}; - -static void callStatGet(Vdbe *v, int regStat4, int iParam, int regOut){ - assert( regOut!=regStat4 && regOut!=regStat4+1 ); -#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 - sqlite3VdbeAddOp2(v, OP_Integer, iParam, regStat4+1); -#elif SQLITE_DEBUG - assert( iParam==STAT_GET_STAT1 ); -#else - UNUSED_PARAMETER( iParam ); -#endif - sqlite3VdbeAddOp3(v, OP_Function, 0, regStat4, regOut); - sqlite3VdbeChangeP4(v, -1, (char*)&statGetFuncdef, P4_FUNCDEF); - sqlite3VdbeChangeP5(v, 1 + IsStat34); -} - -/* -** Generate code to do an analysis of all indices associated with -** a single table. -*/ -static void analyzeOneTable( - Parse *pParse, /* Parser context */ - Table *pTab, /* Table whose indices are to be analyzed */ - Index *pOnlyIdx, /* If not NULL, only analyze this one index */ - int iStatCur, /* Index of VdbeCursor that writes the sqlite_stat1 table */ - int iMem, /* Available memory locations begin here */ - int iTab /* Next available cursor */ -){ - sqlite3 *db = pParse->db; /* Database handle */ - Index *pIdx; /* An index to being analyzed */ - int iIdxCur; /* Cursor open on index being analyzed */ - int iTabCur; /* Table cursor */ - Vdbe *v; /* The virtual machine being built up */ - int i; /* Loop counter */ - int jZeroRows = -1; /* Jump from here if number of rows is zero */ - int iDb; /* Index of database containing pTab */ - u8 needTableCnt = 1; /* True to count the table */ - int regNewRowid = iMem++; /* Rowid for the inserted record */ - int regStat4 = iMem++; /* Register to hold Stat4Accum object */ - int regChng = iMem++; /* Index of changed index field */ -#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 - int regRowid = iMem++; /* Rowid argument passed to stat_push() */ -#endif - int regTemp = iMem++; /* Temporary use register */ - int regTabname = iMem++; /* Register containing table name */ - int regIdxname = iMem++; /* Register containing index name */ - int regStat1 = iMem++; /* Value for the stat column of sqlite_stat1 */ - int regPrev = iMem; /* MUST BE LAST (see below) */ - - pParse->nMem = MAX(pParse->nMem, iMem); - v = sqlite3GetVdbe(pParse); - if( v==0 || NEVER(pTab==0) ){ - return; - } - if( pTab->tnum==0 ){ - /* Do not gather statistics on views or virtual tables */ - return; - } - if( sqlite3_strnicmp(pTab->zName, "sqlite_", 7)==0 ){ - /* Do not gather statistics on system tables */ - return; - } - assert( sqlite3BtreeHoldsAllMutexes(db) ); - iDb = sqlite3SchemaToIndex(db, pTab->pSchema); - assert( iDb>=0 ); - assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); -#ifndef SQLITE_OMIT_AUTHORIZATION - if( sqlite3AuthCheck(pParse, SQLITE_ANALYZE, pTab->zName, 0, - db->aDb[iDb].zName ) ){ - return; - } -#endif - - /* Establish a read-lock on the table at the shared-cache level. - ** Open a read-only cursor on the table. Also allocate a cursor number - ** to use for scanning indexes (iIdxCur). No index cursor is opened at - ** this time though. */ - sqlite3TableLock(pParse, iDb, pTab->tnum, 0, pTab->zName); - iTabCur = iTab++; - iIdxCur = iTab++; - pParse->nTab = MAX(pParse->nTab, iTab); - sqlite3OpenTable(pParse, iTabCur, iDb, pTab, OP_OpenRead); - sqlite3VdbeAddOp4(v, OP_String8, 0, regTabname, 0, pTab->zName, 0); - - for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ - int nCol; /* Number of columns indexed by pIdx */ - int *aGotoChng; /* Array of jump instruction addresses */ - int addrRewind; /* Address of "OP_Rewind iIdxCur" */ - int addrGotoChng0; /* Address of "Goto addr_chng_0" */ - int addrNextRow; /* Address of "next_row:" */ - const char *zIdxName; /* Name of the index */ - - if( pOnlyIdx && pOnlyIdx!=pIdx ) continue; - if( pIdx->pPartIdxWhere==0 ) needTableCnt = 0; - VdbeNoopComment((v, "Begin analysis of %s", pIdx->zName)); - nCol = pIdx->nKeyCol; - aGotoChng = sqlite3DbMallocRaw(db, sizeof(int)*(nCol+1)); - if( aGotoChng==0 ) continue; - - /* Populate the register containing the index name. */ - if( IsPrimaryKeyIndex(pIdx) && !HasRowid(pTab) ){ - zIdxName = pTab->zName; - }else{ - zIdxName = pIdx->zName; - } - sqlite3VdbeAddOp4(v, OP_String8, 0, regIdxname, 0, zIdxName, 0); - - /* - ** Pseudo-code for loop that calls stat_push(): - ** - ** Rewind csr - ** if eof(csr) goto end_of_scan; - ** regChng = 0 - ** goto chng_addr_0; - ** - ** next_row: - ** regChng = 0 - ** if( idx(0) != regPrev(0) ) goto chng_addr_0 - ** regChng = 1 - ** if( idx(1) != regPrev(1) ) goto chng_addr_1 - ** ... - ** regChng = N - ** goto chng_addr_N - ** - ** chng_addr_0: - ** regPrev(0) = idx(0) - ** chng_addr_1: - ** regPrev(1) = idx(1) - ** ... - ** - ** chng_addr_N: - ** regRowid = idx(rowid) - ** stat_push(P, regChng, regRowid) - ** Next csr - ** if !eof(csr) goto next_row; - ** - ** end_of_scan: - */ - - /* Make sure there are enough memory cells allocated to accommodate - ** the regPrev array and a trailing rowid (the rowid slot is required - ** when building a record to insert into the sample column of - ** the sqlite_stat4 table. */ - pParse->nMem = MAX(pParse->nMem, regPrev+nCol); - - /* Open a read-only cursor on the index being analyzed. */ - assert( iDb==sqlite3SchemaToIndex(db, pIdx->pSchema) ); - sqlite3VdbeAddOp3(v, OP_OpenRead, iIdxCur, pIdx->tnum, iDb); - sqlite3VdbeSetP4KeyInfo(pParse, pIdx); - VdbeComment((v, "%s", pIdx->zName)); - - /* Invoke the stat_init() function. The arguments are: - ** - ** (1) the number of columns in the index including the rowid, - ** (2) the number of rows in the index, - ** - ** The second argument is only used for STAT3 and STAT4 - */ -#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 - sqlite3VdbeAddOp2(v, OP_Count, iIdxCur, regStat4+2); -#endif - sqlite3VdbeAddOp2(v, OP_Integer, nCol+1, regStat4+1); - sqlite3VdbeAddOp3(v, OP_Function, 0, regStat4+1, regStat4); - sqlite3VdbeChangeP4(v, -1, (char*)&statInitFuncdef, P4_FUNCDEF); - sqlite3VdbeChangeP5(v, 1+IsStat34); - - /* Implementation of the following: - ** - ** Rewind csr - ** if eof(csr) goto end_of_scan; - ** regChng = 0 - ** goto next_push_0; - ** - */ - addrRewind = sqlite3VdbeAddOp1(v, OP_Rewind, iIdxCur); - VdbeCoverage(v); - sqlite3VdbeAddOp2(v, OP_Integer, 0, regChng); - addrGotoChng0 = sqlite3VdbeAddOp0(v, OP_Goto); - - /* - ** next_row: - ** regChng = 0 - ** if( idx(0) != regPrev(0) ) goto chng_addr_0 - ** regChng = 1 - ** if( idx(1) != regPrev(1) ) goto chng_addr_1 - ** ... - ** regChng = N - ** goto chng_addr_N - */ - addrNextRow = sqlite3VdbeCurrentAddr(v); - for(i=0; iazColl[i]); - sqlite3VdbeAddOp2(v, OP_Integer, i, regChng); - sqlite3VdbeAddOp3(v, OP_Column, iIdxCur, i, regTemp); - aGotoChng[i] = - sqlite3VdbeAddOp4(v, OP_Ne, regTemp, 0, regPrev+i, pColl, P4_COLLSEQ); - sqlite3VdbeChangeP5(v, SQLITE_NULLEQ); - VdbeCoverage(v); - } - sqlite3VdbeAddOp2(v, OP_Integer, nCol, regChng); - aGotoChng[nCol] = sqlite3VdbeAddOp0(v, OP_Goto); - - /* - ** chng_addr_0: - ** regPrev(0) = idx(0) - ** chng_addr_1: - ** regPrev(1) = idx(1) - ** ... - */ - sqlite3VdbeJumpHere(v, addrGotoChng0); - for(i=0; ipTable); - int j, k, regKey; - regKey = sqlite3GetTempRange(pParse, pPk->nKeyCol); - for(j=0; jnKeyCol; j++){ - k = sqlite3ColumnOfIndex(pIdx, pPk->aiColumn[j]); - sqlite3VdbeAddOp3(v, OP_Column, iIdxCur, k, regKey+j); - VdbeComment((v, "%s", pTab->aCol[pPk->aiColumn[j]].zName)); - } - sqlite3VdbeAddOp3(v, OP_MakeRecord, regKey, pPk->nKeyCol, regRowid); - sqlite3ReleaseTempRange(pParse, regKey, pPk->nKeyCol); - } -#endif - assert( regChng==(regStat4+1) ); - sqlite3VdbeAddOp3(v, OP_Function, 1, regStat4, regTemp); - sqlite3VdbeChangeP4(v, -1, (char*)&statPushFuncdef, P4_FUNCDEF); - sqlite3VdbeChangeP5(v, 2+IsStat34); - sqlite3VdbeAddOp2(v, OP_Next, iIdxCur, addrNextRow); VdbeCoverage(v); - - /* Add the entry to the stat1 table. */ - callStatGet(v, regStat4, STAT_GET_STAT1, regStat1); - sqlite3VdbeAddOp4(v, OP_MakeRecord, regTabname, 3, regTemp, "aaa", 0); - sqlite3VdbeAddOp2(v, OP_NewRowid, iStatCur, regNewRowid); - sqlite3VdbeAddOp3(v, OP_Insert, iStatCur, regTemp, regNewRowid); - sqlite3VdbeChangeP5(v, OPFLAG_APPEND); - - /* Add the entries to the stat3 or stat4 table. */ -#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 - { - int regEq = regStat1; - int regLt = regStat1+1; - int regDLt = regStat1+2; - int regSample = regStat1+3; - int regCol = regStat1+4; - int regSampleRowid = regCol + nCol; - int addrNext; - int addrIsNull; - u8 seekOp = HasRowid(pTab) ? OP_NotExists : OP_NotFound; - - pParse->nMem = MAX(pParse->nMem, regCol+nCol+1); - - addrNext = sqlite3VdbeCurrentAddr(v); - callStatGet(v, regStat4, STAT_GET_ROWID, regSampleRowid); - addrIsNull = sqlite3VdbeAddOp1(v, OP_IsNull, regSampleRowid); - VdbeCoverage(v); - callStatGet(v, regStat4, STAT_GET_NEQ, regEq); - callStatGet(v, regStat4, STAT_GET_NLT, regLt); - callStatGet(v, regStat4, STAT_GET_NDLT, regDLt); - sqlite3VdbeAddOp4Int(v, seekOp, iTabCur, addrNext, regSampleRowid, 0); - /* We know that the regSampleRowid row exists because it was read by - ** the previous loop. Thus the not-found jump of seekOp will never - ** be taken */ - VdbeCoverageNeverTaken(v); -#ifdef SQLITE_ENABLE_STAT3 - sqlite3ExprCodeGetColumnOfTable(v, pTab, iTabCur, - pIdx->aiColumn[0], regSample); -#else - for(i=0; iaiColumn[i]; - sqlite3ExprCodeGetColumnOfTable(v, pTab, iTabCur, iCol, regCol+i); - } - sqlite3VdbeAddOp3(v, OP_MakeRecord, regCol, nCol+1, regSample); -#endif - sqlite3VdbeAddOp3(v, OP_MakeRecord, regTabname, 6, regTemp); - sqlite3VdbeAddOp2(v, OP_NewRowid, iStatCur+1, regNewRowid); - sqlite3VdbeAddOp3(v, OP_Insert, iStatCur+1, regTemp, regNewRowid); - sqlite3VdbeAddOp2(v, OP_Goto, 1, addrNext); /* P1==1 for end-of-loop */ - sqlite3VdbeJumpHere(v, addrIsNull); - } -#endif /* SQLITE_ENABLE_STAT3_OR_STAT4 */ - - /* End of analysis */ - sqlite3VdbeJumpHere(v, addrRewind); - sqlite3DbFree(db, aGotoChng); - } - - - /* Create a single sqlite_stat1 entry containing NULL as the index - ** name and the row count as the content. - */ - if( pOnlyIdx==0 && needTableCnt ){ - VdbeComment((v, "%s", pTab->zName)); - sqlite3VdbeAddOp2(v, OP_Count, iTabCur, regStat1); - jZeroRows = sqlite3VdbeAddOp1(v, OP_IfNot, regStat1); VdbeCoverage(v); - sqlite3VdbeAddOp2(v, OP_Null, 0, regIdxname); - sqlite3VdbeAddOp4(v, OP_MakeRecord, regTabname, 3, regTemp, "aaa", 0); - sqlite3VdbeAddOp2(v, OP_NewRowid, iStatCur, regNewRowid); - sqlite3VdbeAddOp3(v, OP_Insert, iStatCur, regTemp, regNewRowid); - sqlite3VdbeChangeP5(v, OPFLAG_APPEND); - sqlite3VdbeJumpHere(v, jZeroRows); - } -} - - -/* -** Generate code that will cause the most recent index analysis to -** be loaded into internal hash tables where is can be used. -*/ -static void loadAnalysis(Parse *pParse, int iDb){ - Vdbe *v = sqlite3GetVdbe(pParse); - if( v ){ - sqlite3VdbeAddOp1(v, OP_LoadAnalysis, iDb); - } -} - -/* -** Generate code that will do an analysis of an entire database -*/ -static void analyzeDatabase(Parse *pParse, int iDb){ - sqlite3 *db = pParse->db; - Schema *pSchema = db->aDb[iDb].pSchema; /* Schema of database iDb */ - HashElem *k; - int iStatCur; - int iMem; - int iTab; - - sqlite3BeginWriteOperation(pParse, 0, iDb); - iStatCur = pParse->nTab; - pParse->nTab += 3; - openStatTable(pParse, iDb, iStatCur, 0, 0); - iMem = pParse->nMem+1; - iTab = pParse->nTab; - assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); - for(k=sqliteHashFirst(&pSchema->tblHash); k; k=sqliteHashNext(k)){ - Table *pTab = (Table*)sqliteHashData(k); - analyzeOneTable(pParse, pTab, 0, iStatCur, iMem, iTab); - } - loadAnalysis(pParse, iDb); -} - -/* -** Generate code that will do an analysis of a single table in -** a database. If pOnlyIdx is not NULL then it is a single index -** in pTab that should be analyzed. -*/ -static void analyzeTable(Parse *pParse, Table *pTab, Index *pOnlyIdx){ - int iDb; - int iStatCur; - - assert( pTab!=0 ); - assert( sqlite3BtreeHoldsAllMutexes(pParse->db) ); - iDb = sqlite3SchemaToIndex(pParse->db, pTab->pSchema); - sqlite3BeginWriteOperation(pParse, 0, iDb); - iStatCur = pParse->nTab; - pParse->nTab += 3; - if( pOnlyIdx ){ - openStatTable(pParse, iDb, iStatCur, pOnlyIdx->zName, "idx"); - }else{ - openStatTable(pParse, iDb, iStatCur, pTab->zName, "tbl"); - } - analyzeOneTable(pParse, pTab, pOnlyIdx, iStatCur,pParse->nMem+1,pParse->nTab); - loadAnalysis(pParse, iDb); -} - -/* -** Generate code for the ANALYZE command. The parser calls this routine -** when it recognizes an ANALYZE command. -** -** ANALYZE -- 1 -** ANALYZE -- 2 -** ANALYZE ?.? -- 3 -** -** Form 1 causes all indices in all attached databases to be analyzed. -** Form 2 analyzes all indices the single database named. -** Form 3 analyzes all indices associated with the named table. -*/ -SQLITE_PRIVATE void sqlite3Analyze(Parse *pParse, Token *pName1, Token *pName2){ - sqlite3 *db = pParse->db; - int iDb; - int i; - char *z, *zDb; - Table *pTab; - Index *pIdx; - Token *pTableName; - - /* Read the database schema. If an error occurs, leave an error message - ** and code in pParse and return NULL. */ - assert( sqlite3BtreeHoldsAllMutexes(pParse->db) ); - if( SQLITE_OK!=sqlite3ReadSchema(pParse) ){ - return; - } - - assert( pName2!=0 || pName1==0 ); - if( pName1==0 ){ - /* Form 1: Analyze everything */ - for(i=0; inDb; i++){ - if( i==1 ) continue; /* Do not analyze the TEMP database */ - analyzeDatabase(pParse, i); - } - }else if( pName2->n==0 ){ - /* Form 2: Analyze the database or table named */ - iDb = sqlite3FindDb(db, pName1); - if( iDb>=0 ){ - analyzeDatabase(pParse, iDb); - }else{ - z = sqlite3NameFromToken(db, pName1); - if( z ){ - if( (pIdx = sqlite3FindIndex(db, z, 0))!=0 ){ - analyzeTable(pParse, pIdx->pTable, pIdx); - }else if( (pTab = sqlite3LocateTable(pParse, 0, z, 0))!=0 ){ - analyzeTable(pParse, pTab, 0); - } - sqlite3DbFree(db, z); - } - } - }else{ - /* Form 3: Analyze the fully qualified table name */ - iDb = sqlite3TwoPartName(pParse, pName1, pName2, &pTableName); - if( iDb>=0 ){ - zDb = db->aDb[iDb].zName; - z = sqlite3NameFromToken(db, pTableName); - if( z ){ - if( (pIdx = sqlite3FindIndex(db, z, zDb))!=0 ){ - analyzeTable(pParse, pIdx->pTable, pIdx); - }else if( (pTab = sqlite3LocateTable(pParse, 0, z, zDb))!=0 ){ - analyzeTable(pParse, pTab, 0); - } - sqlite3DbFree(db, z); - } - } - } -} - -/* -** Used to pass information from the analyzer reader through to the -** callback routine. -*/ -typedef struct analysisInfo analysisInfo; -struct analysisInfo { - sqlite3 *db; - const char *zDatabase; -}; - -/* -** The first argument points to a nul-terminated string containing a -** list of space separated integers. Read the first nOut of these into -** the array aOut[]. -*/ -static void decodeIntArray( - char *zIntArray, /* String containing int array to decode */ - int nOut, /* Number of slots in aOut[] */ - tRowcnt *aOut, /* Store integers here */ - LogEst *aLog, /* Or, if aOut==0, here */ - Index *pIndex /* Handle extra flags for this index, if not NULL */ -){ - char *z = zIntArray; - int c; - int i; - tRowcnt v; - -#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 - if( z==0 ) z = ""; -#else - if( NEVER(z==0) ) z = ""; -#endif - for(i=0; *z && i='0' && c<='9' ){ - v = v*10 + c - '0'; - z++; - } -#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 - if( aOut ){ - aOut[i] = v; - }else -#else - assert( aOut==0 ); - UNUSED_PARAMETER(aOut); -#endif - { - aLog[i] = sqlite3LogEst(v); - } - if( *z==' ' ) z++; - } -#ifndef SQLITE_ENABLE_STAT3_OR_STAT4 - assert( pIndex!=0 ); -#else - if( pIndex ) -#endif - { - if( strcmp(z, "unordered")==0 ){ - pIndex->bUnordered = 1; - }else if( sqlite3_strglob("sz=[0-9]*", z)==0 ){ - int v32 = 0; - sqlite3GetInt32(z+3, &v32); - pIndex->szIdxRow = sqlite3LogEst(v32); - } - } -} - -/* -** This callback is invoked once for each index when reading the -** sqlite_stat1 table. -** -** argv[0] = name of the table -** argv[1] = name of the index (might be NULL) -** argv[2] = results of analysis - on integer for each column -** -** Entries for which argv[1]==NULL simply record the number of rows in -** the table. -*/ -static int analysisLoader(void *pData, int argc, char **argv, char **NotUsed){ - analysisInfo *pInfo = (analysisInfo*)pData; - Index *pIndex; - Table *pTable; - const char *z; - - assert( argc==3 ); - UNUSED_PARAMETER2(NotUsed, argc); - - if( argv==0 || argv[0]==0 || argv[2]==0 ){ - return 0; - } - pTable = sqlite3FindTable(pInfo->db, argv[0], pInfo->zDatabase); - if( pTable==0 ){ - return 0; - } - if( argv[1]==0 ){ - pIndex = 0; - }else if( sqlite3_stricmp(argv[0],argv[1])==0 ){ - pIndex = sqlite3PrimaryKeyIndex(pTable); - }else{ - pIndex = sqlite3FindIndex(pInfo->db, argv[1], pInfo->zDatabase); - } - z = argv[2]; - - if( pIndex ){ - decodeIntArray((char*)z, pIndex->nKeyCol+1, 0, pIndex->aiRowLogEst, pIndex); - if( pIndex->pPartIdxWhere==0 ) pTable->nRowLogEst = pIndex->aiRowLogEst[0]; - }else{ - Index fakeIdx; - fakeIdx.szIdxRow = pTable->szTabRow; - decodeIntArray((char*)z, 1, 0, &pTable->nRowLogEst, &fakeIdx); - pTable->szTabRow = fakeIdx.szIdxRow; - } - - return 0; -} - -/* -** If the Index.aSample variable is not NULL, delete the aSample[] array -** and its contents. -*/ -SQLITE_PRIVATE void sqlite3DeleteIndexSamples(sqlite3 *db, Index *pIdx){ -#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 - if( pIdx->aSample ){ - int j; - for(j=0; jnSample; j++){ - IndexSample *p = &pIdx->aSample[j]; - sqlite3DbFree(db, p->p); - } - sqlite3DbFree(db, pIdx->aSample); - } - if( db && db->pnBytesFreed==0 ){ - pIdx->nSample = 0; - pIdx->aSample = 0; - } -#else - UNUSED_PARAMETER(db); - UNUSED_PARAMETER(pIdx); -#endif /* SQLITE_ENABLE_STAT3_OR_STAT4 */ -} - -#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 -/* -** Populate the pIdx->aAvgEq[] array based on the samples currently -** stored in pIdx->aSample[]. -*/ -static void initAvgEq(Index *pIdx){ - if( pIdx ){ - IndexSample *aSample = pIdx->aSample; - IndexSample *pFinal = &aSample[pIdx->nSample-1]; - int iCol; - for(iCol=0; iColnKeyCol; iCol++){ - int i; /* Used to iterate through samples */ - tRowcnt sumEq = 0; /* Sum of the nEq values */ - tRowcnt nSum = 0; /* Number of terms contributing to sumEq */ - tRowcnt avgEq = 0; - tRowcnt nDLt = pFinal->anDLt[iCol]; - - /* Set nSum to the number of distinct (iCol+1) field prefixes that - ** occur in the stat4 table for this index before pFinal. Set - ** sumEq to the sum of the nEq values for column iCol for the same - ** set (adding the value only once where there exist dupicate - ** prefixes). */ - for(i=0; i<(pIdx->nSample-1); i++){ - if( aSample[i].anDLt[iCol]!=aSample[i+1].anDLt[iCol] ){ - sumEq += aSample[i].anEq[iCol]; - nSum++; - } - } - if( nDLt>nSum ){ - avgEq = (pFinal->anLt[iCol] - sumEq)/(nDLt - nSum); - } - if( avgEq==0 ) avgEq = 1; - pIdx->aAvgEq[iCol] = avgEq; - if( pIdx->nSampleCol==1 ) break; - } - } -} - -/* -** Look up an index by name. Or, if the name of a WITHOUT ROWID table -** is supplied instead, find the PRIMARY KEY index for that table. -*/ -static Index *findIndexOrPrimaryKey( - sqlite3 *db, - const char *zName, - const char *zDb -){ - Index *pIdx = sqlite3FindIndex(db, zName, zDb); - if( pIdx==0 ){ - Table *pTab = sqlite3FindTable(db, zName, zDb); - if( pTab && !HasRowid(pTab) ) pIdx = sqlite3PrimaryKeyIndex(pTab); - } - return pIdx; -} - -/* -** Load the content from either the sqlite_stat4 or sqlite_stat3 table -** into the relevant Index.aSample[] arrays. -** -** Arguments zSql1 and zSql2 must point to SQL statements that return -** data equivalent to the following (statements are different for stat3, -** see the caller of this function for details): -** -** zSql1: SELECT idx,count(*) FROM %Q.sqlite_stat4 GROUP BY idx -** zSql2: SELECT idx,neq,nlt,ndlt,sample FROM %Q.sqlite_stat4 -** -** where %Q is replaced with the database name before the SQL is executed. -*/ -static int loadStatTbl( - sqlite3 *db, /* Database handle */ - int bStat3, /* Assume single column records only */ - const char *zSql1, /* SQL statement 1 (see above) */ - const char *zSql2, /* SQL statement 2 (see above) */ - const char *zDb /* Database name (e.g. "main") */ -){ - int rc; /* Result codes from subroutines */ - sqlite3_stmt *pStmt = 0; /* An SQL statement being run */ - char *zSql; /* Text of the SQL statement */ - Index *pPrevIdx = 0; /* Previous index in the loop */ - IndexSample *pSample; /* A slot in pIdx->aSample[] */ - - assert( db->lookaside.bEnabled==0 ); - zSql = sqlite3MPrintf(db, zSql1, zDb); - if( !zSql ){ - return SQLITE_NOMEM; - } - rc = sqlite3_prepare(db, zSql, -1, &pStmt, 0); - sqlite3DbFree(db, zSql); - if( rc ) return rc; - - while( sqlite3_step(pStmt)==SQLITE_ROW ){ - int nIdxCol = 1; /* Number of columns in stat4 records */ - int nAvgCol = 1; /* Number of entries in Index.aAvgEq */ - - char *zIndex; /* Index name */ - Index *pIdx; /* Pointer to the index object */ - int nSample; /* Number of samples */ - int nByte; /* Bytes of space required */ - int i; /* Bytes of space required */ - tRowcnt *pSpace; - - zIndex = (char *)sqlite3_column_text(pStmt, 0); - if( zIndex==0 ) continue; - nSample = sqlite3_column_int(pStmt, 1); - pIdx = findIndexOrPrimaryKey(db, zIndex, zDb); - assert( pIdx==0 || bStat3 || pIdx->nSample==0 ); - /* Index.nSample is non-zero at this point if data has already been - ** loaded from the stat4 table. In this case ignore stat3 data. */ - if( pIdx==0 || pIdx->nSample ) continue; - if( bStat3==0 ){ - nIdxCol = pIdx->nKeyCol+1; - nAvgCol = pIdx->nKeyCol; - } - pIdx->nSampleCol = nIdxCol; - nByte = sizeof(IndexSample) * nSample; - nByte += sizeof(tRowcnt) * nIdxCol * 3 * nSample; - nByte += nAvgCol * sizeof(tRowcnt); /* Space for Index.aAvgEq[] */ - - pIdx->aSample = sqlite3DbMallocZero(db, nByte); - if( pIdx->aSample==0 ){ - sqlite3_finalize(pStmt); - return SQLITE_NOMEM; - } - pSpace = (tRowcnt*)&pIdx->aSample[nSample]; - pIdx->aAvgEq = pSpace; pSpace += nAvgCol; - for(i=0; iaSample[i].anEq = pSpace; pSpace += nIdxCol; - pIdx->aSample[i].anLt = pSpace; pSpace += nIdxCol; - pIdx->aSample[i].anDLt = pSpace; pSpace += nIdxCol; - } - assert( ((u8*)pSpace)-nByte==(u8*)(pIdx->aSample) ); - } - rc = sqlite3_finalize(pStmt); - if( rc ) return rc; - - zSql = sqlite3MPrintf(db, zSql2, zDb); - if( !zSql ){ - return SQLITE_NOMEM; - } - rc = sqlite3_prepare(db, zSql, -1, &pStmt, 0); - sqlite3DbFree(db, zSql); - if( rc ) return rc; - - while( sqlite3_step(pStmt)==SQLITE_ROW ){ - char *zIndex; /* Index name */ - Index *pIdx; /* Pointer to the index object */ - int nCol = 1; /* Number of columns in index */ - - zIndex = (char *)sqlite3_column_text(pStmt, 0); - if( zIndex==0 ) continue; - pIdx = findIndexOrPrimaryKey(db, zIndex, zDb); - if( pIdx==0 ) continue; - /* This next condition is true if data has already been loaded from - ** the sqlite_stat4 table. In this case ignore stat3 data. */ - nCol = pIdx->nSampleCol; - if( bStat3 && nCol>1 ) continue; - if( pIdx!=pPrevIdx ){ - initAvgEq(pPrevIdx); - pPrevIdx = pIdx; - } - pSample = &pIdx->aSample[pIdx->nSample]; - decodeIntArray((char*)sqlite3_column_text(pStmt,1),nCol,pSample->anEq,0,0); - decodeIntArray((char*)sqlite3_column_text(pStmt,2),nCol,pSample->anLt,0,0); - decodeIntArray((char*)sqlite3_column_text(pStmt,3),nCol,pSample->anDLt,0,0); - - /* Take a copy of the sample. Add two 0x00 bytes the end of the buffer. - ** This is in case the sample record is corrupted. In that case, the - ** sqlite3VdbeRecordCompare() may read up to two varints past the - ** end of the allocated buffer before it realizes it is dealing with - ** a corrupt record. Adding the two 0x00 bytes prevents this from causing - ** a buffer overread. */ - pSample->n = sqlite3_column_bytes(pStmt, 4); - pSample->p = sqlite3DbMallocZero(db, pSample->n + 2); - if( pSample->p==0 ){ - sqlite3_finalize(pStmt); - return SQLITE_NOMEM; - } - memcpy(pSample->p, sqlite3_column_blob(pStmt, 4), pSample->n); - pIdx->nSample++; - } - rc = sqlite3_finalize(pStmt); - if( rc==SQLITE_OK ) initAvgEq(pPrevIdx); - return rc; -} - -/* -** Load content from the sqlite_stat4 and sqlite_stat3 tables into -** the Index.aSample[] arrays of all indices. -*/ -static int loadStat4(sqlite3 *db, const char *zDb){ - int rc = SQLITE_OK; /* Result codes from subroutines */ - - assert( db->lookaside.bEnabled==0 ); - if( sqlite3FindTable(db, "sqlite_stat4", zDb) ){ - rc = loadStatTbl(db, 0, - "SELECT idx,count(*) FROM %Q.sqlite_stat4 GROUP BY idx", - "SELECT idx,neq,nlt,ndlt,sample FROM %Q.sqlite_stat4", - zDb - ); - } - - if( rc==SQLITE_OK && sqlite3FindTable(db, "sqlite_stat3", zDb) ){ - rc = loadStatTbl(db, 1, - "SELECT idx,count(*) FROM %Q.sqlite_stat3 GROUP BY idx", - "SELECT idx,neq,nlt,ndlt,sqlite_record(sample) FROM %Q.sqlite_stat3", - zDb - ); - } - - return rc; -} -#endif /* SQLITE_ENABLE_STAT3_OR_STAT4 */ - -/* -** Load the content of the sqlite_stat1 and sqlite_stat3/4 tables. The -** contents of sqlite_stat1 are used to populate the Index.aiRowEst[] -** arrays. The contents of sqlite_stat3/4 are used to populate the -** Index.aSample[] arrays. -** -** If the sqlite_stat1 table is not present in the database, SQLITE_ERROR -** is returned. In this case, even if SQLITE_ENABLE_STAT3/4 was defined -** during compilation and the sqlite_stat3/4 table is present, no data is -** read from it. -** -** If SQLITE_ENABLE_STAT3/4 was defined during compilation and the -** sqlite_stat4 table is not present in the database, SQLITE_ERROR is -** returned. However, in this case, data is read from the sqlite_stat1 -** table (if it is present) before returning. -** -** If an OOM error occurs, this function always sets db->mallocFailed. -** This means if the caller does not care about other errors, the return -** code may be ignored. -*/ -SQLITE_PRIVATE int sqlite3AnalysisLoad(sqlite3 *db, int iDb){ - analysisInfo sInfo; - HashElem *i; - char *zSql; - int rc; - - assert( iDb>=0 && iDbnDb ); - assert( db->aDb[iDb].pBt!=0 ); - - /* Clear any prior statistics */ - assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); - for(i=sqliteHashFirst(&db->aDb[iDb].pSchema->idxHash);i;i=sqliteHashNext(i)){ - Index *pIdx = sqliteHashData(i); - sqlite3DefaultRowEst(pIdx); -#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 - sqlite3DeleteIndexSamples(db, pIdx); - pIdx->aSample = 0; -#endif - } - - /* Check to make sure the sqlite_stat1 table exists */ - sInfo.db = db; - sInfo.zDatabase = db->aDb[iDb].zName; - if( sqlite3FindTable(db, "sqlite_stat1", sInfo.zDatabase)==0 ){ - return SQLITE_ERROR; - } - - /* Load new statistics out of the sqlite_stat1 table */ - zSql = sqlite3MPrintf(db, - "SELECT tbl,idx,stat FROM %Q.sqlite_stat1", sInfo.zDatabase); - if( zSql==0 ){ - rc = SQLITE_NOMEM; - }else{ - rc = sqlite3_exec(db, zSql, analysisLoader, &sInfo, 0); - sqlite3DbFree(db, zSql); - } - - - /* Load the statistics from the sqlite_stat4 table. */ -#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 - if( rc==SQLITE_OK ){ - int lookasideEnabled = db->lookaside.bEnabled; - db->lookaside.bEnabled = 0; - rc = loadStat4(db, sInfo.zDatabase); - db->lookaside.bEnabled = lookasideEnabled; - } -#endif - - if( rc==SQLITE_NOMEM ){ - db->mallocFailed = 1; - } - return rc; -} - - -#endif /* SQLITE_OMIT_ANALYZE */ - -/************** End of analyze.c *********************************************/ -/************** Begin file attach.c ******************************************/ -/* -** 2003 April 6 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** This file contains code used to implement the ATTACH and DETACH commands. -*/ - -#ifndef SQLITE_OMIT_ATTACH -/* -** Resolve an expression that was part of an ATTACH or DETACH statement. This -** is slightly different from resolving a normal SQL expression, because simple -** identifiers are treated as strings, not possible column names or aliases. -** -** i.e. if the parser sees: -** -** ATTACH DATABASE abc AS def -** -** it treats the two expressions as literal strings 'abc' and 'def' instead of -** looking for columns of the same name. -** -** This only applies to the root node of pExpr, so the statement: -** -** ATTACH DATABASE abc||def AS 'db2' -** -** will fail because neither abc or def can be resolved. -*/ -static int resolveAttachExpr(NameContext *pName, Expr *pExpr) -{ - int rc = SQLITE_OK; - if( pExpr ){ - if( pExpr->op!=TK_ID ){ - rc = sqlite3ResolveExprNames(pName, pExpr); - }else{ - pExpr->op = TK_STRING; - } - } - return rc; -} - -/* -** An SQL user-function registered to do the work of an ATTACH statement. The -** three arguments to the function come directly from an attach statement: -** -** ATTACH DATABASE x AS y KEY z -** -** SELECT sqlite_attach(x, y, z) -** -** If the optional "KEY z" syntax is omitted, an SQL NULL is passed as the -** third argument. -*/ -static void attachFunc( - sqlite3_context *context, - int NotUsed, - sqlite3_value **argv -){ - int i; - int rc = 0; - sqlite3 *db = sqlite3_context_db_handle(context); - const char *zName; - const char *zFile; - char *zPath = 0; - char *zErr = 0; - unsigned int flags; - Db *aNew; - char *zErrDyn = 0; - sqlite3_vfs *pVfs; - - UNUSED_PARAMETER(NotUsed); - - zFile = (const char *)sqlite3_value_text(argv[0]); - zName = (const char *)sqlite3_value_text(argv[1]); - if( zFile==0 ) zFile = ""; - if( zName==0 ) zName = ""; - - /* Check for the following errors: - ** - ** * Too many attached databases, - ** * Transaction currently open - ** * Specified database name already being used. - */ - if( db->nDb>=db->aLimit[SQLITE_LIMIT_ATTACHED]+2 ){ - zErrDyn = sqlite3MPrintf(db, "too many attached databases - max %d", - db->aLimit[SQLITE_LIMIT_ATTACHED] - ); - goto attach_error; - } - if( !db->autoCommit ){ - zErrDyn = sqlite3MPrintf(db, "cannot ATTACH database within transaction"); - goto attach_error; - } - for(i=0; inDb; i++){ - char *z = db->aDb[i].zName; - assert( z && zName ); - if( sqlite3StrICmp(z, zName)==0 ){ - zErrDyn = sqlite3MPrintf(db, "database %s is already in use", zName); - goto attach_error; - } - } - - /* Allocate the new entry in the db->aDb[] array and initialize the schema - ** hash tables. - */ - if( db->aDb==db->aDbStatic ){ - aNew = sqlite3DbMallocRaw(db, sizeof(db->aDb[0])*3 ); - if( aNew==0 ) return; - memcpy(aNew, db->aDb, sizeof(db->aDb[0])*2); - }else{ - aNew = sqlite3DbRealloc(db, db->aDb, sizeof(db->aDb[0])*(db->nDb+1) ); - if( aNew==0 ) return; - } - db->aDb = aNew; - aNew = &db->aDb[db->nDb]; - memset(aNew, 0, sizeof(*aNew)); - - /* Open the database file. If the btree is successfully opened, use - ** it to obtain the database schema. At this point the schema may - ** or may not be initialized. - */ - flags = db->openFlags; - rc = sqlite3ParseUri(db->pVfs->zName, zFile, &flags, &pVfs, &zPath, &zErr); - if( rc!=SQLITE_OK ){ - if( rc==SQLITE_NOMEM ) db->mallocFailed = 1; - sqlite3_result_error(context, zErr, -1); - sqlite3_free(zErr); - return; - } - assert( pVfs ); - flags |= SQLITE_OPEN_MAIN_DB; - rc = sqlite3BtreeOpen(pVfs, zPath, db, &aNew->pBt, 0, flags); - sqlite3_free( zPath ); - db->nDb++; - if( rc==SQLITE_CONSTRAINT ){ - rc = SQLITE_ERROR; - zErrDyn = sqlite3MPrintf(db, "database is already attached"); - }else if( rc==SQLITE_OK ){ - Pager *pPager; - aNew->pSchema = sqlite3SchemaGet(db, aNew->pBt); - if( !aNew->pSchema ){ - rc = SQLITE_NOMEM; - }else if( aNew->pSchema->file_format && aNew->pSchema->enc!=ENC(db) ){ - zErrDyn = sqlite3MPrintf(db, - "attached databases must use the same text encoding as main database"); - rc = SQLITE_ERROR; - } - pPager = sqlite3BtreePager(aNew->pBt); - sqlite3PagerLockingMode(pPager, db->dfltLockMode); - sqlite3BtreeSecureDelete(aNew->pBt, - sqlite3BtreeSecureDelete(db->aDb[0].pBt,-1) ); -#ifndef SQLITE_OMIT_PAGER_PRAGMAS - sqlite3BtreeSetPagerFlags(aNew->pBt, 3 | (db->flags & PAGER_FLAGS_MASK)); -#endif - } - aNew->safety_level = 3; - aNew->zName = sqlite3DbStrDup(db, zName); - if( rc==SQLITE_OK && aNew->zName==0 ){ - rc = SQLITE_NOMEM; - } - - -#ifdef SQLITE_HAS_CODEC - if( rc==SQLITE_OK ){ - extern int sqlite3CodecAttach(sqlite3*, int, const void*, int); - extern void sqlite3CodecGetKey(sqlite3*, int, void**, int*); - int nKey; - char *zKey; - int t = sqlite3_value_type(argv[2]); - switch( t ){ - case SQLITE_INTEGER: - case SQLITE_FLOAT: - zErrDyn = sqlite3DbStrDup(db, "Invalid key value"); - rc = SQLITE_ERROR; - break; - - case SQLITE_TEXT: - case SQLITE_BLOB: - nKey = sqlite3_value_bytes(argv[2]); - zKey = (char *)sqlite3_value_blob(argv[2]); - rc = sqlite3CodecAttach(db, db->nDb-1, zKey, nKey); - break; - - case SQLITE_NULL: - /* No key specified. Use the key from the main database */ - sqlite3CodecGetKey(db, 0, (void**)&zKey, &nKey); - if( nKey>0 || sqlite3BtreeGetReserve(db->aDb[0].pBt)>0 ){ - rc = sqlite3CodecAttach(db, db->nDb-1, zKey, nKey); - } - break; - } - } -#endif - - /* If the file was opened successfully, read the schema for the new database. - ** If this fails, or if opening the file failed, then close the file and - ** remove the entry from the db->aDb[] array. i.e. put everything back the way - ** we found it. - */ - if( rc==SQLITE_OK ){ - sqlite3BtreeEnterAll(db); - rc = sqlite3Init(db, &zErrDyn); - sqlite3BtreeLeaveAll(db); - } - if( rc ){ - int iDb = db->nDb - 1; - assert( iDb>=2 ); - if( db->aDb[iDb].pBt ){ - sqlite3BtreeClose(db->aDb[iDb].pBt); - db->aDb[iDb].pBt = 0; - db->aDb[iDb].pSchema = 0; - } - sqlite3ResetAllSchemasOfConnection(db); - db->nDb = iDb; - if( rc==SQLITE_NOMEM || rc==SQLITE_IOERR_NOMEM ){ - db->mallocFailed = 1; - sqlite3DbFree(db, zErrDyn); - zErrDyn = sqlite3MPrintf(db, "out of memory"); - }else if( zErrDyn==0 ){ - zErrDyn = sqlite3MPrintf(db, "unable to open database: %s", zFile); - } - goto attach_error; - } - - return; - -attach_error: - /* Return an error if we get here */ - if( zErrDyn ){ - sqlite3_result_error(context, zErrDyn, -1); - sqlite3DbFree(db, zErrDyn); - } - if( rc ) sqlite3_result_error_code(context, rc); -} - -/* -** An SQL user-function registered to do the work of an DETACH statement. The -** three arguments to the function come directly from a detach statement: -** -** DETACH DATABASE x -** -** SELECT sqlite_detach(x) -*/ -static void detachFunc( - sqlite3_context *context, - int NotUsed, - sqlite3_value **argv -){ - const char *zName = (const char *)sqlite3_value_text(argv[0]); - sqlite3 *db = sqlite3_context_db_handle(context); - int i; - Db *pDb = 0; - char zErr[128]; - - UNUSED_PARAMETER(NotUsed); - - if( zName==0 ) zName = ""; - for(i=0; inDb; i++){ - pDb = &db->aDb[i]; - if( pDb->pBt==0 ) continue; - if( sqlite3StrICmp(pDb->zName, zName)==0 ) break; - } - - if( i>=db->nDb ){ - sqlite3_snprintf(sizeof(zErr),zErr, "no such database: %s", zName); - goto detach_error; - } - if( i<2 ){ - sqlite3_snprintf(sizeof(zErr),zErr, "cannot detach database %s", zName); - goto detach_error; - } - if( !db->autoCommit ){ - sqlite3_snprintf(sizeof(zErr), zErr, - "cannot DETACH database within transaction"); - goto detach_error; - } - if( sqlite3BtreeIsInReadTrans(pDb->pBt) || sqlite3BtreeIsInBackup(pDb->pBt) ){ - sqlite3_snprintf(sizeof(zErr),zErr, "database %s is locked", zName); - goto detach_error; - } - - sqlite3BtreeClose(pDb->pBt); - pDb->pBt = 0; - pDb->pSchema = 0; - sqlite3ResetAllSchemasOfConnection(db); - return; - -detach_error: - sqlite3_result_error(context, zErr, -1); -} - -/* -** This procedure generates VDBE code for a single invocation of either the -** sqlite_detach() or sqlite_attach() SQL user functions. -*/ -static void codeAttach( - Parse *pParse, /* The parser context */ - int type, /* Either SQLITE_ATTACH or SQLITE_DETACH */ - FuncDef const *pFunc,/* FuncDef wrapper for detachFunc() or attachFunc() */ - Expr *pAuthArg, /* Expression to pass to authorization callback */ - Expr *pFilename, /* Name of database file */ - Expr *pDbname, /* Name of the database to use internally */ - Expr *pKey /* Database key for encryption extension */ -){ - int rc; - NameContext sName; - Vdbe *v; - sqlite3* db = pParse->db; - int regArgs; - - memset(&sName, 0, sizeof(NameContext)); - sName.pParse = pParse; - - if( - SQLITE_OK!=(rc = resolveAttachExpr(&sName, pFilename)) || - SQLITE_OK!=(rc = resolveAttachExpr(&sName, pDbname)) || - SQLITE_OK!=(rc = resolveAttachExpr(&sName, pKey)) - ){ - pParse->nErr++; - goto attach_end; - } - -#ifndef SQLITE_OMIT_AUTHORIZATION - if( pAuthArg ){ - char *zAuthArg; - if( pAuthArg->op==TK_STRING ){ - zAuthArg = pAuthArg->u.zToken; - }else{ - zAuthArg = 0; - } - rc = sqlite3AuthCheck(pParse, type, zAuthArg, 0, 0); - if(rc!=SQLITE_OK ){ - goto attach_end; - } - } -#endif /* SQLITE_OMIT_AUTHORIZATION */ - - - v = sqlite3GetVdbe(pParse); - regArgs = sqlite3GetTempRange(pParse, 4); - sqlite3ExprCode(pParse, pFilename, regArgs); - sqlite3ExprCode(pParse, pDbname, regArgs+1); - sqlite3ExprCode(pParse, pKey, regArgs+2); - - assert( v || db->mallocFailed ); - if( v ){ - sqlite3VdbeAddOp3(v, OP_Function, 0, regArgs+3-pFunc->nArg, regArgs+3); - assert( pFunc->nArg==-1 || (pFunc->nArg&0xff)==pFunc->nArg ); - sqlite3VdbeChangeP5(v, (u8)(pFunc->nArg)); - sqlite3VdbeChangeP4(v, -1, (char *)pFunc, P4_FUNCDEF); - - /* Code an OP_Expire. For an ATTACH statement, set P1 to true (expire this - ** statement only). For DETACH, set it to false (expire all existing - ** statements). - */ - sqlite3VdbeAddOp1(v, OP_Expire, (type==SQLITE_ATTACH)); - } - -attach_end: - sqlite3ExprDelete(db, pFilename); - sqlite3ExprDelete(db, pDbname); - sqlite3ExprDelete(db, pKey); -} - -/* -** Called by the parser to compile a DETACH statement. -** -** DETACH pDbname -*/ -SQLITE_PRIVATE void sqlite3Detach(Parse *pParse, Expr *pDbname){ - static const FuncDef detach_func = { - 1, /* nArg */ - SQLITE_UTF8, /* funcFlags */ - 0, /* pUserData */ - 0, /* pNext */ - detachFunc, /* xFunc */ - 0, /* xStep */ - 0, /* xFinalize */ - "sqlite_detach", /* zName */ - 0, /* pHash */ - 0 /* pDestructor */ - }; - codeAttach(pParse, SQLITE_DETACH, &detach_func, pDbname, 0, 0, pDbname); -} - -/* -** Called by the parser to compile an ATTACH statement. -** -** ATTACH p AS pDbname KEY pKey -*/ -SQLITE_PRIVATE void sqlite3Attach(Parse *pParse, Expr *p, Expr *pDbname, Expr *pKey){ - static const FuncDef attach_func = { - 3, /* nArg */ - SQLITE_UTF8, /* funcFlags */ - 0, /* pUserData */ - 0, /* pNext */ - attachFunc, /* xFunc */ - 0, /* xStep */ - 0, /* xFinalize */ - "sqlite_attach", /* zName */ - 0, /* pHash */ - 0 /* pDestructor */ - }; - codeAttach(pParse, SQLITE_ATTACH, &attach_func, p, p, pDbname, pKey); -} -#endif /* SQLITE_OMIT_ATTACH */ - -/* -** Initialize a DbFixer structure. This routine must be called prior -** to passing the structure to one of the sqliteFixAAAA() routines below. -*/ -SQLITE_PRIVATE void sqlite3FixInit( - DbFixer *pFix, /* The fixer to be initialized */ - Parse *pParse, /* Error messages will be written here */ - int iDb, /* This is the database that must be used */ - const char *zType, /* "view", "trigger", or "index" */ - const Token *pName /* Name of the view, trigger, or index */ -){ - sqlite3 *db; - - db = pParse->db; - assert( db->nDb>iDb ); - pFix->pParse = pParse; - pFix->zDb = db->aDb[iDb].zName; - pFix->pSchema = db->aDb[iDb].pSchema; - pFix->zType = zType; - pFix->pName = pName; - pFix->bVarOnly = (iDb==1); -} - -/* -** The following set of routines walk through the parse tree and assign -** a specific database to all table references where the database name -** was left unspecified in the original SQL statement. The pFix structure -** must have been initialized by a prior call to sqlite3FixInit(). -** -** These routines are used to make sure that an index, trigger, or -** view in one database does not refer to objects in a different database. -** (Exception: indices, triggers, and views in the TEMP database are -** allowed to refer to anything.) If a reference is explicitly made -** to an object in a different database, an error message is added to -** pParse->zErrMsg and these routines return non-zero. If everything -** checks out, these routines return 0. -*/ -SQLITE_PRIVATE int sqlite3FixSrcList( - DbFixer *pFix, /* Context of the fixation */ - SrcList *pList /* The Source list to check and modify */ -){ - int i; - const char *zDb; - struct SrcList_item *pItem; - - if( NEVER(pList==0) ) return 0; - zDb = pFix->zDb; - for(i=0, pItem=pList->a; inSrc; i++, pItem++){ - if( pFix->bVarOnly==0 ){ - if( pItem->zDatabase && sqlite3StrICmp(pItem->zDatabase, zDb) ){ - sqlite3ErrorMsg(pFix->pParse, - "%s %T cannot reference objects in database %s", - pFix->zType, pFix->pName, pItem->zDatabase); - return 1; - } - sqlite3DbFree(pFix->pParse->db, pItem->zDatabase); - pItem->zDatabase = 0; - pItem->pSchema = pFix->pSchema; - } -#if !defined(SQLITE_OMIT_VIEW) || !defined(SQLITE_OMIT_TRIGGER) - if( sqlite3FixSelect(pFix, pItem->pSelect) ) return 1; - if( sqlite3FixExpr(pFix, pItem->pOn) ) return 1; -#endif - } - return 0; -} -#if !defined(SQLITE_OMIT_VIEW) || !defined(SQLITE_OMIT_TRIGGER) -SQLITE_PRIVATE int sqlite3FixSelect( - DbFixer *pFix, /* Context of the fixation */ - Select *pSelect /* The SELECT statement to be fixed to one database */ -){ - while( pSelect ){ - if( sqlite3FixExprList(pFix, pSelect->pEList) ){ - return 1; - } - if( sqlite3FixSrcList(pFix, pSelect->pSrc) ){ - return 1; - } - if( sqlite3FixExpr(pFix, pSelect->pWhere) ){ - return 1; - } - if( sqlite3FixExprList(pFix, pSelect->pGroupBy) ){ - return 1; - } - if( sqlite3FixExpr(pFix, pSelect->pHaving) ){ - return 1; - } - if( sqlite3FixExprList(pFix, pSelect->pOrderBy) ){ - return 1; - } - if( sqlite3FixExpr(pFix, pSelect->pLimit) ){ - return 1; - } - if( sqlite3FixExpr(pFix, pSelect->pOffset) ){ - return 1; - } - pSelect = pSelect->pPrior; - } - return 0; -} -SQLITE_PRIVATE int sqlite3FixExpr( - DbFixer *pFix, /* Context of the fixation */ - Expr *pExpr /* The expression to be fixed to one database */ -){ - while( pExpr ){ - if( pExpr->op==TK_VARIABLE ){ - if( pFix->pParse->db->init.busy ){ - pExpr->op = TK_NULL; - }else{ - sqlite3ErrorMsg(pFix->pParse, "%s cannot use variables", pFix->zType); - return 1; - } - } - if( ExprHasProperty(pExpr, EP_TokenOnly) ) break; - if( ExprHasProperty(pExpr, EP_xIsSelect) ){ - if( sqlite3FixSelect(pFix, pExpr->x.pSelect) ) return 1; - }else{ - if( sqlite3FixExprList(pFix, pExpr->x.pList) ) return 1; - } - if( sqlite3FixExpr(pFix, pExpr->pRight) ){ - return 1; - } - pExpr = pExpr->pLeft; - } - return 0; -} -SQLITE_PRIVATE int sqlite3FixExprList( - DbFixer *pFix, /* Context of the fixation */ - ExprList *pList /* The expression to be fixed to one database */ -){ - int i; - struct ExprList_item *pItem; - if( pList==0 ) return 0; - for(i=0, pItem=pList->a; inExpr; i++, pItem++){ - if( sqlite3FixExpr(pFix, pItem->pExpr) ){ - return 1; - } - } - return 0; -} -#endif - -#ifndef SQLITE_OMIT_TRIGGER -SQLITE_PRIVATE int sqlite3FixTriggerStep( - DbFixer *pFix, /* Context of the fixation */ - TriggerStep *pStep /* The trigger step be fixed to one database */ -){ - while( pStep ){ - if( sqlite3FixSelect(pFix, pStep->pSelect) ){ - return 1; - } - if( sqlite3FixExpr(pFix, pStep->pWhere) ){ - return 1; - } - if( sqlite3FixExprList(pFix, pStep->pExprList) ){ - return 1; - } - pStep = pStep->pNext; - } - return 0; -} -#endif - -/************** End of attach.c **********************************************/ -/************** Begin file auth.c ********************************************/ -/* -** 2003 January 11 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** This file contains code used to implement the sqlite3_set_authorizer() -** API. This facility is an optional feature of the library. Embedded -** systems that do not need this facility may omit it by recompiling -** the library with -DSQLITE_OMIT_AUTHORIZATION=1 -*/ - -/* -** All of the code in this file may be omitted by defining a single -** macro. -*/ -#ifndef SQLITE_OMIT_AUTHORIZATION - -/* -** Set or clear the access authorization function. -** -** The access authorization function is be called during the compilation -** phase to verify that the user has read and/or write access permission on -** various fields of the database. The first argument to the auth function -** is a copy of the 3rd argument to this routine. The second argument -** to the auth function is one of these constants: -** -** SQLITE_CREATE_INDEX -** SQLITE_CREATE_TABLE -** SQLITE_CREATE_TEMP_INDEX -** SQLITE_CREATE_TEMP_TABLE -** SQLITE_CREATE_TEMP_TRIGGER -** SQLITE_CREATE_TEMP_VIEW -** SQLITE_CREATE_TRIGGER -** SQLITE_CREATE_VIEW -** SQLITE_DELETE -** SQLITE_DROP_INDEX -** SQLITE_DROP_TABLE -** SQLITE_DROP_TEMP_INDEX -** SQLITE_DROP_TEMP_TABLE -** SQLITE_DROP_TEMP_TRIGGER -** SQLITE_DROP_TEMP_VIEW -** SQLITE_DROP_TRIGGER -** SQLITE_DROP_VIEW -** SQLITE_INSERT -** SQLITE_PRAGMA -** SQLITE_READ -** SQLITE_SELECT -** SQLITE_TRANSACTION -** SQLITE_UPDATE -** -** The third and fourth arguments to the auth function are the name of -** the table and the column that are being accessed. The auth function -** should return either SQLITE_OK, SQLITE_DENY, or SQLITE_IGNORE. If -** SQLITE_OK is returned, it means that access is allowed. SQLITE_DENY -** means that the SQL statement will never-run - the sqlite3_exec() call -** will return with an error. SQLITE_IGNORE means that the SQL statement -** should run but attempts to read the specified column will return NULL -** and attempts to write the column will be ignored. -** -** Setting the auth function to NULL disables this hook. The default -** setting of the auth function is NULL. -*/ -SQLITE_API int sqlite3_set_authorizer( - sqlite3 *db, - int (*xAuth)(void*,int,const char*,const char*,const char*,const char*), - void *pArg -){ - sqlite3_mutex_enter(db->mutex); - db->xAuth = xAuth; - db->pAuthArg = pArg; - sqlite3ExpirePreparedStatements(db); - sqlite3_mutex_leave(db->mutex); - return SQLITE_OK; -} - -/* -** Write an error message into pParse->zErrMsg that explains that the -** user-supplied authorization function returned an illegal value. -*/ -static void sqliteAuthBadReturnCode(Parse *pParse){ - sqlite3ErrorMsg(pParse, "authorizer malfunction"); - pParse->rc = SQLITE_ERROR; -} - -/* -** Invoke the authorization callback for permission to read column zCol from -** table zTab in database zDb. This function assumes that an authorization -** callback has been registered (i.e. that sqlite3.xAuth is not NULL). -** -** If SQLITE_IGNORE is returned and pExpr is not NULL, then pExpr is changed -** to an SQL NULL expression. Otherwise, if pExpr is NULL, then SQLITE_IGNORE -** is treated as SQLITE_DENY. In this case an error is left in pParse. -*/ -SQLITE_PRIVATE int sqlite3AuthReadCol( - Parse *pParse, /* The parser context */ - const char *zTab, /* Table name */ - const char *zCol, /* Column name */ - int iDb /* Index of containing database. */ -){ - sqlite3 *db = pParse->db; /* Database handle */ - char *zDb = db->aDb[iDb].zName; /* Name of attached database */ - int rc; /* Auth callback return code */ - - rc = db->xAuth(db->pAuthArg, SQLITE_READ, zTab,zCol,zDb,pParse->zAuthContext); - if( rc==SQLITE_DENY ){ - if( db->nDb>2 || iDb!=0 ){ - sqlite3ErrorMsg(pParse, "access to %s.%s.%s is prohibited",zDb,zTab,zCol); - }else{ - sqlite3ErrorMsg(pParse, "access to %s.%s is prohibited", zTab, zCol); - } - pParse->rc = SQLITE_AUTH; - }else if( rc!=SQLITE_IGNORE && rc!=SQLITE_OK ){ - sqliteAuthBadReturnCode(pParse); - } - return rc; -} - -/* -** The pExpr should be a TK_COLUMN expression. The table referred to -** is in pTabList or else it is the NEW or OLD table of a trigger. -** Check to see if it is OK to read this particular column. -** -** If the auth function returns SQLITE_IGNORE, change the TK_COLUMN -** instruction into a TK_NULL. If the auth function returns SQLITE_DENY, -** then generate an error. -*/ -SQLITE_PRIVATE void sqlite3AuthRead( - Parse *pParse, /* The parser context */ - Expr *pExpr, /* The expression to check authorization on */ - Schema *pSchema, /* The schema of the expression */ - SrcList *pTabList /* All table that pExpr might refer to */ -){ - sqlite3 *db = pParse->db; - Table *pTab = 0; /* The table being read */ - const char *zCol; /* Name of the column of the table */ - int iSrc; /* Index in pTabList->a[] of table being read */ - int iDb; /* The index of the database the expression refers to */ - int iCol; /* Index of column in table */ - - if( db->xAuth==0 ) return; - iDb = sqlite3SchemaToIndex(pParse->db, pSchema); - if( iDb<0 ){ - /* An attempt to read a column out of a subquery or other - ** temporary table. */ - return; - } - - assert( pExpr->op==TK_COLUMN || pExpr->op==TK_TRIGGER ); - if( pExpr->op==TK_TRIGGER ){ - pTab = pParse->pTriggerTab; - }else{ - assert( pTabList ); - for(iSrc=0; ALWAYS(iSrcnSrc); iSrc++){ - if( pExpr->iTable==pTabList->a[iSrc].iCursor ){ - pTab = pTabList->a[iSrc].pTab; - break; - } - } - } - iCol = pExpr->iColumn; - if( NEVER(pTab==0) ) return; - - if( iCol>=0 ){ - assert( iColnCol ); - zCol = pTab->aCol[iCol].zName; - }else if( pTab->iPKey>=0 ){ - assert( pTab->iPKeynCol ); - zCol = pTab->aCol[pTab->iPKey].zName; - }else{ - zCol = "ROWID"; - } - assert( iDb>=0 && iDbnDb ); - if( SQLITE_IGNORE==sqlite3AuthReadCol(pParse, pTab->zName, zCol, iDb) ){ - pExpr->op = TK_NULL; - } -} - -/* -** Do an authorization check using the code and arguments given. Return -** either SQLITE_OK (zero) or SQLITE_IGNORE or SQLITE_DENY. If SQLITE_DENY -** is returned, then the error count and error message in pParse are -** modified appropriately. -*/ -SQLITE_PRIVATE int sqlite3AuthCheck( - Parse *pParse, - int code, - const char *zArg1, - const char *zArg2, - const char *zArg3 -){ - sqlite3 *db = pParse->db; - int rc; - - /* Don't do any authorization checks if the database is initialising - ** or if the parser is being invoked from within sqlite3_declare_vtab. - */ - if( db->init.busy || IN_DECLARE_VTAB ){ - return SQLITE_OK; - } - - if( db->xAuth==0 ){ - return SQLITE_OK; - } - rc = db->xAuth(db->pAuthArg, code, zArg1, zArg2, zArg3, pParse->zAuthContext); - if( rc==SQLITE_DENY ){ - sqlite3ErrorMsg(pParse, "not authorized"); - pParse->rc = SQLITE_AUTH; - }else if( rc!=SQLITE_OK && rc!=SQLITE_IGNORE ){ - rc = SQLITE_DENY; - sqliteAuthBadReturnCode(pParse); - } - return rc; -} - -/* -** Push an authorization context. After this routine is called, the -** zArg3 argument to authorization callbacks will be zContext until -** popped. Or if pParse==0, this routine is a no-op. -*/ -SQLITE_PRIVATE void sqlite3AuthContextPush( - Parse *pParse, - AuthContext *pContext, - const char *zContext -){ - assert( pParse ); - pContext->pParse = pParse; - pContext->zAuthContext = pParse->zAuthContext; - pParse->zAuthContext = zContext; -} - -/* -** Pop an authorization context that was previously pushed -** by sqlite3AuthContextPush -*/ -SQLITE_PRIVATE void sqlite3AuthContextPop(AuthContext *pContext){ - if( pContext->pParse ){ - pContext->pParse->zAuthContext = pContext->zAuthContext; - pContext->pParse = 0; - } -} - -#endif /* SQLITE_OMIT_AUTHORIZATION */ - -/************** End of auth.c ************************************************/ -/************** Begin file build.c *******************************************/ -/* -** 2001 September 15 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** This file contains C code routines that are called by the SQLite parser -** when syntax rules are reduced. The routines in this file handle the -** following kinds of SQL syntax: -** -** CREATE TABLE -** DROP TABLE -** CREATE INDEX -** DROP INDEX -** creating ID lists -** BEGIN TRANSACTION -** COMMIT -** ROLLBACK -*/ - -/* -** This routine is called when a new SQL statement is beginning to -** be parsed. Initialize the pParse structure as needed. -*/ -SQLITE_PRIVATE void sqlite3BeginParse(Parse *pParse, int explainFlag){ - pParse->explain = (u8)explainFlag; - pParse->nVar = 0; -} - -#ifndef SQLITE_OMIT_SHARED_CACHE -/* -** The TableLock structure is only used by the sqlite3TableLock() and -** codeTableLocks() functions. -*/ -struct TableLock { - int iDb; /* The database containing the table to be locked */ - int iTab; /* The root page of the table to be locked */ - u8 isWriteLock; /* True for write lock. False for a read lock */ - const char *zName; /* Name of the table */ -}; - -/* -** Record the fact that we want to lock a table at run-time. -** -** The table to be locked has root page iTab and is found in database iDb. -** A read or a write lock can be taken depending on isWritelock. -** -** This routine just records the fact that the lock is desired. The -** code to make the lock occur is generated by a later call to -** codeTableLocks() which occurs during sqlite3FinishCoding(). -*/ -SQLITE_PRIVATE void sqlite3TableLock( - Parse *pParse, /* Parsing context */ - int iDb, /* Index of the database containing the table to lock */ - int iTab, /* Root page number of the table to be locked */ - u8 isWriteLock, /* True for a write lock */ - const char *zName /* Name of the table to be locked */ -){ - Parse *pToplevel = sqlite3ParseToplevel(pParse); - int i; - int nBytes; - TableLock *p; - assert( iDb>=0 ); - - for(i=0; inTableLock; i++){ - p = &pToplevel->aTableLock[i]; - if( p->iDb==iDb && p->iTab==iTab ){ - p->isWriteLock = (p->isWriteLock || isWriteLock); - return; - } - } - - nBytes = sizeof(TableLock) * (pToplevel->nTableLock+1); - pToplevel->aTableLock = - sqlite3DbReallocOrFree(pToplevel->db, pToplevel->aTableLock, nBytes); - if( pToplevel->aTableLock ){ - p = &pToplevel->aTableLock[pToplevel->nTableLock++]; - p->iDb = iDb; - p->iTab = iTab; - p->isWriteLock = isWriteLock; - p->zName = zName; - }else{ - pToplevel->nTableLock = 0; - pToplevel->db->mallocFailed = 1; - } -} - -/* -** Code an OP_TableLock instruction for each table locked by the -** statement (configured by calls to sqlite3TableLock()). -*/ -static void codeTableLocks(Parse *pParse){ - int i; - Vdbe *pVdbe; - - pVdbe = sqlite3GetVdbe(pParse); - assert( pVdbe!=0 ); /* sqlite3GetVdbe cannot fail: VDBE already allocated */ - - for(i=0; inTableLock; i++){ - TableLock *p = &pParse->aTableLock[i]; - int p1 = p->iDb; - sqlite3VdbeAddOp4(pVdbe, OP_TableLock, p1, p->iTab, p->isWriteLock, - p->zName, P4_STATIC); - } -} -#else - #define codeTableLocks(x) -#endif - -/* -** This routine is called after a single SQL statement has been -** parsed and a VDBE program to execute that statement has been -** prepared. This routine puts the finishing touches on the -** VDBE program and resets the pParse structure for the next -** parse. -** -** Note that if an error occurred, it might be the case that -** no VDBE code was generated. -*/ -SQLITE_PRIVATE void sqlite3FinishCoding(Parse *pParse){ - sqlite3 *db; - Vdbe *v; - - assert( pParse->pToplevel==0 ); - db = pParse->db; - if( db->mallocFailed ) return; - if( pParse->nested ) return; - if( pParse->nErr ) return; - - /* Begin by generating some termination code at the end of the - ** vdbe program - */ - v = sqlite3GetVdbe(pParse); - assert( !pParse->isMultiWrite - || sqlite3VdbeAssertMayAbort(v, pParse->mayAbort)); - if( v ){ - while( sqlite3VdbeDeletePriorOpcode(v, OP_Close) ){} - sqlite3VdbeAddOp0(v, OP_Halt); - - /* The cookie mask contains one bit for each database file open. - ** (Bit 0 is for main, bit 1 is for temp, and so forth.) Bits are - ** set for each database that is used. Generate code to start a - ** transaction on each used database and to verify the schema cookie - ** on each used database. - */ - if( db->mallocFailed==0 && (pParse->cookieMask || pParse->pConstExpr) ){ - yDbMask mask; - int iDb, i; - assert( sqlite3VdbeGetOp(v, 0)->opcode==OP_Init ); - sqlite3VdbeJumpHere(v, 0); - for(iDb=0, mask=1; iDbnDb; mask<<=1, iDb++){ - if( (mask & pParse->cookieMask)==0 ) continue; - sqlite3VdbeUsesBtree(v, iDb); - sqlite3VdbeAddOp4Int(v, - OP_Transaction, /* Opcode */ - iDb, /* P1 */ - (mask & pParse->writeMask)!=0, /* P2 */ - pParse->cookieValue[iDb], /* P3 */ - db->aDb[iDb].pSchema->iGeneration /* P4 */ - ); - if( db->init.busy==0 ) sqlite3VdbeChangeP5(v, 1); - } -#ifndef SQLITE_OMIT_VIRTUALTABLE - for(i=0; inVtabLock; i++){ - char *vtab = (char *)sqlite3GetVTable(db, pParse->apVtabLock[i]); - sqlite3VdbeAddOp4(v, OP_VBegin, 0, 0, 0, vtab, P4_VTAB); - } - pParse->nVtabLock = 0; -#endif - - /* Once all the cookies have been verified and transactions opened, - ** obtain the required table-locks. This is a no-op unless the - ** shared-cache feature is enabled. - */ - codeTableLocks(pParse); - - /* Initialize any AUTOINCREMENT data structures required. - */ - sqlite3AutoincrementBegin(pParse); - - /* Code constant expressions that where factored out of inner loops */ - if( pParse->pConstExpr ){ - ExprList *pEL = pParse->pConstExpr; - pParse->okConstFactor = 0; - for(i=0; inExpr; i++){ - sqlite3ExprCode(pParse, pEL->a[i].pExpr, pEL->a[i].u.iConstExprReg); - } - } - - /* Finally, jump back to the beginning of the executable code. */ - sqlite3VdbeAddOp2(v, OP_Goto, 0, 1); - } - } - - - /* Get the VDBE program ready for execution - */ - if( v && ALWAYS(pParse->nErr==0) && !db->mallocFailed ){ - assert( pParse->iCacheLevel==0 ); /* Disables and re-enables match */ - /* A minimum of one cursor is required if autoincrement is used - * See ticket [a696379c1f08866] */ - if( pParse->pAinc!=0 && pParse->nTab==0 ) pParse->nTab = 1; - sqlite3VdbeMakeReady(v, pParse); - pParse->rc = SQLITE_DONE; - pParse->colNamesSet = 0; - }else{ - pParse->rc = SQLITE_ERROR; - } - pParse->nTab = 0; - pParse->nMem = 0; - pParse->nSet = 0; - pParse->nVar = 0; - pParse->cookieMask = 0; -} - -/* -** Run the parser and code generator recursively in order to generate -** code for the SQL statement given onto the end of the pParse context -** currently under construction. When the parser is run recursively -** this way, the final OP_Halt is not appended and other initialization -** and finalization steps are omitted because those are handling by the -** outermost parser. -** -** Not everything is nestable. This facility is designed to permit -** INSERT, UPDATE, and DELETE operations against SQLITE_MASTER. Use -** care if you decide to try to use this routine for some other purposes. -*/ -SQLITE_PRIVATE void sqlite3NestedParse(Parse *pParse, const char *zFormat, ...){ - va_list ap; - char *zSql; - char *zErrMsg = 0; - sqlite3 *db = pParse->db; -# define SAVE_SZ (sizeof(Parse) - offsetof(Parse,nVar)) - char saveBuf[SAVE_SZ]; - - if( pParse->nErr ) return; - assert( pParse->nested<10 ); /* Nesting should only be of limited depth */ - va_start(ap, zFormat); - zSql = sqlite3VMPrintf(db, zFormat, ap); - va_end(ap); - if( zSql==0 ){ - return; /* A malloc must have failed */ - } - pParse->nested++; - memcpy(saveBuf, &pParse->nVar, SAVE_SZ); - memset(&pParse->nVar, 0, SAVE_SZ); - sqlite3RunParser(pParse, zSql, &zErrMsg); - sqlite3DbFree(db, zErrMsg); - sqlite3DbFree(db, zSql); - memcpy(&pParse->nVar, saveBuf, SAVE_SZ); - pParse->nested--; -} - -/* -** Locate the in-memory structure that describes a particular database -** table given the name of that table and (optionally) the name of the -** database containing the table. Return NULL if not found. -** -** If zDatabase is 0, all databases are searched for the table and the -** first matching table is returned. (No checking for duplicate table -** names is done.) The search order is TEMP first, then MAIN, then any -** auxiliary databases added using the ATTACH command. -** -** See also sqlite3LocateTable(). -*/ -SQLITE_PRIVATE Table *sqlite3FindTable(sqlite3 *db, const char *zName, const char *zDatabase){ - Table *p = 0; - int i; - int nName; - assert( zName!=0 ); - nName = sqlite3Strlen30(zName); - /* All mutexes are required for schema access. Make sure we hold them. */ - assert( zDatabase!=0 || sqlite3BtreeHoldsAllMutexes(db) ); - for(i=OMIT_TEMPDB; inDb; i++){ - int j = (i<2) ? i^1 : i; /* Search TEMP before MAIN */ - if( zDatabase!=0 && sqlite3StrICmp(zDatabase, db->aDb[j].zName) ) continue; - assert( sqlite3SchemaMutexHeld(db, j, 0) ); - p = sqlite3HashFind(&db->aDb[j].pSchema->tblHash, zName, nName); - if( p ) break; - } - return p; -} - -/* -** Locate the in-memory structure that describes a particular database -** table given the name of that table and (optionally) the name of the -** database containing the table. Return NULL if not found. Also leave an -** error message in pParse->zErrMsg. -** -** The difference between this routine and sqlite3FindTable() is that this -** routine leaves an error message in pParse->zErrMsg where -** sqlite3FindTable() does not. -*/ -SQLITE_PRIVATE Table *sqlite3LocateTable( - Parse *pParse, /* context in which to report errors */ - int isView, /* True if looking for a VIEW rather than a TABLE */ - const char *zName, /* Name of the table we are looking for */ - const char *zDbase /* Name of the database. Might be NULL */ -){ - Table *p; - - /* Read the database schema. If an error occurs, leave an error message - ** and code in pParse and return NULL. */ - if( SQLITE_OK!=sqlite3ReadSchema(pParse) ){ - return 0; - } - - p = sqlite3FindTable(pParse->db, zName, zDbase); - if( p==0 ){ - const char *zMsg = isView ? "no such view" : "no such table"; - if( zDbase ){ - sqlite3ErrorMsg(pParse, "%s: %s.%s", zMsg, zDbase, zName); - }else{ - sqlite3ErrorMsg(pParse, "%s: %s", zMsg, zName); - } - pParse->checkSchema = 1; - } - return p; -} - -/* -** Locate the table identified by *p. -** -** This is a wrapper around sqlite3LocateTable(). The difference between -** sqlite3LocateTable() and this function is that this function restricts -** the search to schema (p->pSchema) if it is not NULL. p->pSchema may be -** non-NULL if it is part of a view or trigger program definition. See -** sqlite3FixSrcList() for details. -*/ -SQLITE_PRIVATE Table *sqlite3LocateTableItem( - Parse *pParse, - int isView, - struct SrcList_item *p -){ - const char *zDb; - assert( p->pSchema==0 || p->zDatabase==0 ); - if( p->pSchema ){ - int iDb = sqlite3SchemaToIndex(pParse->db, p->pSchema); - zDb = pParse->db->aDb[iDb].zName; - }else{ - zDb = p->zDatabase; - } - return sqlite3LocateTable(pParse, isView, p->zName, zDb); -} - -/* -** Locate the in-memory structure that describes -** a particular index given the name of that index -** and the name of the database that contains the index. -** Return NULL if not found. -** -** If zDatabase is 0, all databases are searched for the -** table and the first matching index is returned. (No checking -** for duplicate index names is done.) The search order is -** TEMP first, then MAIN, then any auxiliary databases added -** using the ATTACH command. -*/ -SQLITE_PRIVATE Index *sqlite3FindIndex(sqlite3 *db, const char *zName, const char *zDb){ - Index *p = 0; - int i; - int nName = sqlite3Strlen30(zName); - /* All mutexes are required for schema access. Make sure we hold them. */ - assert( zDb!=0 || sqlite3BtreeHoldsAllMutexes(db) ); - for(i=OMIT_TEMPDB; inDb; i++){ - int j = (i<2) ? i^1 : i; /* Search TEMP before MAIN */ - Schema *pSchema = db->aDb[j].pSchema; - assert( pSchema ); - if( zDb && sqlite3StrICmp(zDb, db->aDb[j].zName) ) continue; - assert( sqlite3SchemaMutexHeld(db, j, 0) ); - p = sqlite3HashFind(&pSchema->idxHash, zName, nName); - if( p ) break; - } - return p; -} - -/* -** Reclaim the memory used by an index -*/ -static void freeIndex(sqlite3 *db, Index *p){ -#ifndef SQLITE_OMIT_ANALYZE - sqlite3DeleteIndexSamples(db, p); -#endif - if( db==0 || db->pnBytesFreed==0 ) sqlite3KeyInfoUnref(p->pKeyInfo); - sqlite3ExprDelete(db, p->pPartIdxWhere); - sqlite3DbFree(db, p->zColAff); - if( p->isResized ) sqlite3DbFree(db, p->azColl); - sqlite3DbFree(db, p); -} - -/* -** For the index called zIdxName which is found in the database iDb, -** unlike that index from its Table then remove the index from -** the index hash table and free all memory structures associated -** with the index. -*/ -SQLITE_PRIVATE void sqlite3UnlinkAndDeleteIndex(sqlite3 *db, int iDb, const char *zIdxName){ - Index *pIndex; - int len; - Hash *pHash; - - assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); - pHash = &db->aDb[iDb].pSchema->idxHash; - len = sqlite3Strlen30(zIdxName); - pIndex = sqlite3HashInsert(pHash, zIdxName, len, 0); - if( ALWAYS(pIndex) ){ - if( pIndex->pTable->pIndex==pIndex ){ - pIndex->pTable->pIndex = pIndex->pNext; - }else{ - Index *p; - /* Justification of ALWAYS(); The index must be on the list of - ** indices. */ - p = pIndex->pTable->pIndex; - while( ALWAYS(p) && p->pNext!=pIndex ){ p = p->pNext; } - if( ALWAYS(p && p->pNext==pIndex) ){ - p->pNext = pIndex->pNext; - } - } - freeIndex(db, pIndex); - } - db->flags |= SQLITE_InternChanges; -} - -/* -** Look through the list of open database files in db->aDb[] and if -** any have been closed, remove them from the list. Reallocate the -** db->aDb[] structure to a smaller size, if possible. -** -** Entry 0 (the "main" database) and entry 1 (the "temp" database) -** are never candidates for being collapsed. -*/ -SQLITE_PRIVATE void sqlite3CollapseDatabaseArray(sqlite3 *db){ - int i, j; - for(i=j=2; inDb; i++){ - struct Db *pDb = &db->aDb[i]; - if( pDb->pBt==0 ){ - sqlite3DbFree(db, pDb->zName); - pDb->zName = 0; - continue; - } - if( jaDb[j] = db->aDb[i]; - } - j++; - } - memset(&db->aDb[j], 0, (db->nDb-j)*sizeof(db->aDb[j])); - db->nDb = j; - if( db->nDb<=2 && db->aDb!=db->aDbStatic ){ - memcpy(db->aDbStatic, db->aDb, 2*sizeof(db->aDb[0])); - sqlite3DbFree(db, db->aDb); - db->aDb = db->aDbStatic; - } -} - -/* -** Reset the schema for the database at index iDb. Also reset the -** TEMP schema. -*/ -SQLITE_PRIVATE void sqlite3ResetOneSchema(sqlite3 *db, int iDb){ - Db *pDb; - assert( iDbnDb ); - - /* Case 1: Reset the single schema identified by iDb */ - pDb = &db->aDb[iDb]; - assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); - assert( pDb->pSchema!=0 ); - sqlite3SchemaClear(pDb->pSchema); - - /* If any database other than TEMP is reset, then also reset TEMP - ** since TEMP might be holding triggers that reference tables in the - ** other database. - */ - if( iDb!=1 ){ - pDb = &db->aDb[1]; - assert( pDb->pSchema!=0 ); - sqlite3SchemaClear(pDb->pSchema); - } - return; -} - -/* -** Erase all schema information from all attached databases (including -** "main" and "temp") for a single database connection. -*/ -SQLITE_PRIVATE void sqlite3ResetAllSchemasOfConnection(sqlite3 *db){ - int i; - sqlite3BtreeEnterAll(db); - for(i=0; inDb; i++){ - Db *pDb = &db->aDb[i]; - if( pDb->pSchema ){ - sqlite3SchemaClear(pDb->pSchema); - } - } - db->flags &= ~SQLITE_InternChanges; - sqlite3VtabUnlockList(db); - sqlite3BtreeLeaveAll(db); - sqlite3CollapseDatabaseArray(db); -} - -/* -** This routine is called when a commit occurs. -*/ -SQLITE_PRIVATE void sqlite3CommitInternalChanges(sqlite3 *db){ - db->flags &= ~SQLITE_InternChanges; -} - -/* -** Delete memory allocated for the column names of a table or view (the -** Table.aCol[] array). -*/ -static void sqliteDeleteColumnNames(sqlite3 *db, Table *pTable){ - int i; - Column *pCol; - assert( pTable!=0 ); - if( (pCol = pTable->aCol)!=0 ){ - for(i=0; inCol; i++, pCol++){ - sqlite3DbFree(db, pCol->zName); - sqlite3ExprDelete(db, pCol->pDflt); - sqlite3DbFree(db, pCol->zDflt); - sqlite3DbFree(db, pCol->zType); - sqlite3DbFree(db, pCol->zColl); - } - sqlite3DbFree(db, pTable->aCol); - } -} - -/* -** Remove the memory data structures associated with the given -** Table. No changes are made to disk by this routine. -** -** This routine just deletes the data structure. It does not unlink -** the table data structure from the hash table. But it does destroy -** memory structures of the indices and foreign keys associated with -** the table. -** -** The db parameter is optional. It is needed if the Table object -** contains lookaside memory. (Table objects in the schema do not use -** lookaside memory, but some ephemeral Table objects do.) Or the -** db parameter can be used with db->pnBytesFreed to measure the memory -** used by the Table object. -*/ -SQLITE_PRIVATE void sqlite3DeleteTable(sqlite3 *db, Table *pTable){ - Index *pIndex, *pNext; - TESTONLY( int nLookaside; ) /* Used to verify lookaside not used for schema */ - - assert( !pTable || pTable->nRef>0 ); - - /* Do not delete the table until the reference count reaches zero. */ - if( !pTable ) return; - if( ((!db || db->pnBytesFreed==0) && (--pTable->nRef)>0) ) return; - - /* Record the number of outstanding lookaside allocations in schema Tables - ** prior to doing any free() operations. Since schema Tables do not use - ** lookaside, this number should not change. */ - TESTONLY( nLookaside = (db && (pTable->tabFlags & TF_Ephemeral)==0) ? - db->lookaside.nOut : 0 ); - - /* Delete all indices associated with this table. */ - for(pIndex = pTable->pIndex; pIndex; pIndex=pNext){ - pNext = pIndex->pNext; - assert( pIndex->pSchema==pTable->pSchema ); - if( !db || db->pnBytesFreed==0 ){ - char *zName = pIndex->zName; - TESTONLY ( Index *pOld = ) sqlite3HashInsert( - &pIndex->pSchema->idxHash, zName, sqlite3Strlen30(zName), 0 - ); - assert( db==0 || sqlite3SchemaMutexHeld(db, 0, pIndex->pSchema) ); - assert( pOld==pIndex || pOld==0 ); - } - freeIndex(db, pIndex); - } - - /* Delete any foreign keys attached to this table. */ - sqlite3FkDelete(db, pTable); - - /* Delete the Table structure itself. - */ - sqliteDeleteColumnNames(db, pTable); - sqlite3DbFree(db, pTable->zName); - sqlite3DbFree(db, pTable->zColAff); - sqlite3SelectDelete(db, pTable->pSelect); -#ifndef SQLITE_OMIT_CHECK - sqlite3ExprListDelete(db, pTable->pCheck); -#endif -#ifndef SQLITE_OMIT_VIRTUALTABLE - sqlite3VtabClear(db, pTable); -#endif - sqlite3DbFree(db, pTable); - - /* Verify that no lookaside memory was used by schema tables */ - assert( nLookaside==0 || nLookaside==db->lookaside.nOut ); -} - -/* -** Unlink the given table from the hash tables and the delete the -** table structure with all its indices and foreign keys. -*/ -SQLITE_PRIVATE void sqlite3UnlinkAndDeleteTable(sqlite3 *db, int iDb, const char *zTabName){ - Table *p; - Db *pDb; - - assert( db!=0 ); - assert( iDb>=0 && iDbnDb ); - assert( zTabName ); - assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); - testcase( zTabName[0]==0 ); /* Zero-length table names are allowed */ - pDb = &db->aDb[iDb]; - p = sqlite3HashInsert(&pDb->pSchema->tblHash, zTabName, - sqlite3Strlen30(zTabName),0); - sqlite3DeleteTable(db, p); - db->flags |= SQLITE_InternChanges; -} - -/* -** Given a token, return a string that consists of the text of that -** token. Space to hold the returned string -** is obtained from sqliteMalloc() and must be freed by the calling -** function. -** -** Any quotation marks (ex: "name", 'name', [name], or `name`) that -** surround the body of the token are removed. -** -** Tokens are often just pointers into the original SQL text and so -** are not \000 terminated and are not persistent. The returned string -** is \000 terminated and is persistent. -*/ -SQLITE_PRIVATE char *sqlite3NameFromToken(sqlite3 *db, Token *pName){ - char *zName; - if( pName ){ - zName = sqlite3DbStrNDup(db, (char*)pName->z, pName->n); - sqlite3Dequote(zName); - }else{ - zName = 0; - } - return zName; -} - -/* -** Open the sqlite_master table stored in database number iDb for -** writing. The table is opened using cursor 0. -*/ -SQLITE_PRIVATE void sqlite3OpenMasterTable(Parse *p, int iDb){ - Vdbe *v = sqlite3GetVdbe(p); - sqlite3TableLock(p, iDb, MASTER_ROOT, 1, SCHEMA_TABLE(iDb)); - sqlite3VdbeAddOp4Int(v, OP_OpenWrite, 0, MASTER_ROOT, iDb, 5); - if( p->nTab==0 ){ - p->nTab = 1; - } -} - -/* -** Parameter zName points to a nul-terminated buffer containing the name -** of a database ("main", "temp" or the name of an attached db). This -** function returns the index of the named database in db->aDb[], or -** -1 if the named db cannot be found. -*/ -SQLITE_PRIVATE int sqlite3FindDbName(sqlite3 *db, const char *zName){ - int i = -1; /* Database number */ - if( zName ){ - Db *pDb; - int n = sqlite3Strlen30(zName); - for(i=(db->nDb-1), pDb=&db->aDb[i]; i>=0; i--, pDb--){ - if( (!OMIT_TEMPDB || i!=1 ) && n==sqlite3Strlen30(pDb->zName) && - 0==sqlite3StrICmp(pDb->zName, zName) ){ - break; - } - } - } - return i; -} - -/* -** The token *pName contains the name of a database (either "main" or -** "temp" or the name of an attached db). This routine returns the -** index of the named database in db->aDb[], or -1 if the named db -** does not exist. -*/ -SQLITE_PRIVATE int sqlite3FindDb(sqlite3 *db, Token *pName){ - int i; /* Database number */ - char *zName; /* Name we are searching for */ - zName = sqlite3NameFromToken(db, pName); - i = sqlite3FindDbName(db, zName); - sqlite3DbFree(db, zName); - return i; -} - -/* The table or view or trigger name is passed to this routine via tokens -** pName1 and pName2. If the table name was fully qualified, for example: -** -** CREATE TABLE xxx.yyy (...); -** -** Then pName1 is set to "xxx" and pName2 "yyy". On the other hand if -** the table name is not fully qualified, i.e.: -** -** CREATE TABLE yyy(...); -** -** Then pName1 is set to "yyy" and pName2 is "". -** -** This routine sets the *ppUnqual pointer to point at the token (pName1 or -** pName2) that stores the unqualified table name. The index of the -** database "xxx" is returned. -*/ -SQLITE_PRIVATE int sqlite3TwoPartName( - Parse *pParse, /* Parsing and code generating context */ - Token *pName1, /* The "xxx" in the name "xxx.yyy" or "xxx" */ - Token *pName2, /* The "yyy" in the name "xxx.yyy" */ - Token **pUnqual /* Write the unqualified object name here */ -){ - int iDb; /* Database holding the object */ - sqlite3 *db = pParse->db; - - if( ALWAYS(pName2!=0) && pName2->n>0 ){ - if( db->init.busy ) { - sqlite3ErrorMsg(pParse, "corrupt database"); - pParse->nErr++; - return -1; - } - *pUnqual = pName2; - iDb = sqlite3FindDb(db, pName1); - if( iDb<0 ){ - sqlite3ErrorMsg(pParse, "unknown database %T", pName1); - pParse->nErr++; - return -1; - } - }else{ - assert( db->init.iDb==0 || db->init.busy ); - iDb = db->init.iDb; - *pUnqual = pName1; - } - return iDb; -} - -/* -** This routine is used to check if the UTF-8 string zName is a legal -** unqualified name for a new schema object (table, index, view or -** trigger). All names are legal except those that begin with the string -** "sqlite_" (in upper, lower or mixed case). This portion of the namespace -** is reserved for internal use. -*/ -SQLITE_PRIVATE int sqlite3CheckObjectName(Parse *pParse, const char *zName){ - if( !pParse->db->init.busy && pParse->nested==0 - && (pParse->db->flags & SQLITE_WriteSchema)==0 - && 0==sqlite3StrNICmp(zName, "sqlite_", 7) ){ - sqlite3ErrorMsg(pParse, "object name reserved for internal use: %s", zName); - return SQLITE_ERROR; - } - return SQLITE_OK; -} - -/* -** Return the PRIMARY KEY index of a table -*/ -SQLITE_PRIVATE Index *sqlite3PrimaryKeyIndex(Table *pTab){ - Index *p; - for(p=pTab->pIndex; p && !IsPrimaryKeyIndex(p); p=p->pNext){} - return p; -} - -/* -** Return the column of index pIdx that corresponds to table -** column iCol. Return -1 if not found. -*/ -SQLITE_PRIVATE i16 sqlite3ColumnOfIndex(Index *pIdx, i16 iCol){ - int i; - for(i=0; inColumn; i++){ - if( iCol==pIdx->aiColumn[i] ) return i; - } - return -1; -} - -/* -** Begin constructing a new table representation in memory. This is -** the first of several action routines that get called in response -** to a CREATE TABLE statement. In particular, this routine is called -** after seeing tokens "CREATE" and "TABLE" and the table name. The isTemp -** flag is true if the table should be stored in the auxiliary database -** file instead of in the main database file. This is normally the case -** when the "TEMP" or "TEMPORARY" keyword occurs in between -** CREATE and TABLE. -** -** The new table record is initialized and put in pParse->pNewTable. -** As more of the CREATE TABLE statement is parsed, additional action -** routines will be called to add more information to this record. -** At the end of the CREATE TABLE statement, the sqlite3EndTable() routine -** is called to complete the construction of the new table record. -*/ -SQLITE_PRIVATE void sqlite3StartTable( - Parse *pParse, /* Parser context */ - Token *pName1, /* First part of the name of the table or view */ - Token *pName2, /* Second part of the name of the table or view */ - int isTemp, /* True if this is a TEMP table */ - int isView, /* True if this is a VIEW */ - int isVirtual, /* True if this is a VIRTUAL table */ - int noErr /* Do nothing if table already exists */ -){ - Table *pTable; - char *zName = 0; /* The name of the new table */ - sqlite3 *db = pParse->db; - Vdbe *v; - int iDb; /* Database number to create the table in */ - Token *pName; /* Unqualified name of the table to create */ - - /* The table or view name to create is passed to this routine via tokens - ** pName1 and pName2. If the table name was fully qualified, for example: - ** - ** CREATE TABLE xxx.yyy (...); - ** - ** Then pName1 is set to "xxx" and pName2 "yyy". On the other hand if - ** the table name is not fully qualified, i.e.: - ** - ** CREATE TABLE yyy(...); - ** - ** Then pName1 is set to "yyy" and pName2 is "". - ** - ** The call below sets the pName pointer to point at the token (pName1 or - ** pName2) that stores the unqualified table name. The variable iDb is - ** set to the index of the database that the table or view is to be - ** created in. - */ - iDb = sqlite3TwoPartName(pParse, pName1, pName2, &pName); - if( iDb<0 ) return; - if( !OMIT_TEMPDB && isTemp && pName2->n>0 && iDb!=1 ){ - /* If creating a temp table, the name may not be qualified. Unless - ** the database name is "temp" anyway. */ - sqlite3ErrorMsg(pParse, "temporary table name must be unqualified"); - return; - } - if( !OMIT_TEMPDB && isTemp ) iDb = 1; - - pParse->sNameToken = *pName; - zName = sqlite3NameFromToken(db, pName); - if( zName==0 ) return; - if( SQLITE_OK!=sqlite3CheckObjectName(pParse, zName) ){ - goto begin_table_error; - } - if( db->init.iDb==1 ) isTemp = 1; -#ifndef SQLITE_OMIT_AUTHORIZATION - assert( (isTemp & 1)==isTemp ); - { - int code; - char *zDb = db->aDb[iDb].zName; - if( sqlite3AuthCheck(pParse, SQLITE_INSERT, SCHEMA_TABLE(isTemp), 0, zDb) ){ - goto begin_table_error; - } - if( isView ){ - if( !OMIT_TEMPDB && isTemp ){ - code = SQLITE_CREATE_TEMP_VIEW; - }else{ - code = SQLITE_CREATE_VIEW; - } - }else{ - if( !OMIT_TEMPDB && isTemp ){ - code = SQLITE_CREATE_TEMP_TABLE; - }else{ - code = SQLITE_CREATE_TABLE; - } - } - if( !isVirtual && sqlite3AuthCheck(pParse, code, zName, 0, zDb) ){ - goto begin_table_error; - } - } -#endif - - /* Make sure the new table name does not collide with an existing - ** index or table name in the same database. Issue an error message if - ** it does. The exception is if the statement being parsed was passed - ** to an sqlite3_declare_vtab() call. In that case only the column names - ** and types will be used, so there is no need to test for namespace - ** collisions. - */ - if( !IN_DECLARE_VTAB ){ - char *zDb = db->aDb[iDb].zName; - if( SQLITE_OK!=sqlite3ReadSchema(pParse) ){ - goto begin_table_error; - } - pTable = sqlite3FindTable(db, zName, zDb); - if( pTable ){ - if( !noErr ){ - sqlite3ErrorMsg(pParse, "table %T already exists", pName); - }else{ - assert( !db->init.busy ); - sqlite3CodeVerifySchema(pParse, iDb); - } - goto begin_table_error; - } - if( sqlite3FindIndex(db, zName, zDb)!=0 ){ - sqlite3ErrorMsg(pParse, "there is already an index named %s", zName); - goto begin_table_error; - } - } - - pTable = sqlite3DbMallocZero(db, sizeof(Table)); - if( pTable==0 ){ - db->mallocFailed = 1; - pParse->rc = SQLITE_NOMEM; - pParse->nErr++; - goto begin_table_error; - } - pTable->zName = zName; - pTable->iPKey = -1; - pTable->pSchema = db->aDb[iDb].pSchema; - pTable->nRef = 1; - pTable->nRowLogEst = 200; assert( 200==sqlite3LogEst(1048576) ); - assert( pParse->pNewTable==0 ); - pParse->pNewTable = pTable; - - /* If this is the magic sqlite_sequence table used by autoincrement, - ** then record a pointer to this table in the main database structure - ** so that INSERT can find the table easily. - */ -#ifndef SQLITE_OMIT_AUTOINCREMENT - if( !pParse->nested && strcmp(zName, "sqlite_sequence")==0 ){ - assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); - pTable->pSchema->pSeqTab = pTable; - } -#endif - - /* Begin generating the code that will insert the table record into - ** the SQLITE_MASTER table. Note in particular that we must go ahead - ** and allocate the record number for the table entry now. Before any - ** PRIMARY KEY or UNIQUE keywords are parsed. Those keywords will cause - ** indices to be created and the table record must come before the - ** indices. Hence, the record number for the table must be allocated - ** now. - */ - if( !db->init.busy && (v = sqlite3GetVdbe(pParse))!=0 ){ - int j1; - int fileFormat; - int reg1, reg2, reg3; - sqlite3BeginWriteOperation(pParse, 0, iDb); - -#ifndef SQLITE_OMIT_VIRTUALTABLE - if( isVirtual ){ - sqlite3VdbeAddOp0(v, OP_VBegin); - } -#endif - - /* If the file format and encoding in the database have not been set, - ** set them now. - */ - reg1 = pParse->regRowid = ++pParse->nMem; - reg2 = pParse->regRoot = ++pParse->nMem; - reg3 = ++pParse->nMem; - sqlite3VdbeAddOp3(v, OP_ReadCookie, iDb, reg3, BTREE_FILE_FORMAT); - sqlite3VdbeUsesBtree(v, iDb); - j1 = sqlite3VdbeAddOp1(v, OP_If, reg3); VdbeCoverage(v); - fileFormat = (db->flags & SQLITE_LegacyFileFmt)!=0 ? - 1 : SQLITE_MAX_FILE_FORMAT; - sqlite3VdbeAddOp2(v, OP_Integer, fileFormat, reg3); - sqlite3VdbeAddOp3(v, OP_SetCookie, iDb, BTREE_FILE_FORMAT, reg3); - sqlite3VdbeAddOp2(v, OP_Integer, ENC(db), reg3); - sqlite3VdbeAddOp3(v, OP_SetCookie, iDb, BTREE_TEXT_ENCODING, reg3); - sqlite3VdbeJumpHere(v, j1); - - /* This just creates a place-holder record in the sqlite_master table. - ** The record created does not contain anything yet. It will be replaced - ** by the real entry in code generated at sqlite3EndTable(). - ** - ** The rowid for the new entry is left in register pParse->regRowid. - ** The root page number of the new table is left in reg pParse->regRoot. - ** The rowid and root page number values are needed by the code that - ** sqlite3EndTable will generate. - */ -#if !defined(SQLITE_OMIT_VIEW) || !defined(SQLITE_OMIT_VIRTUALTABLE) - if( isView || isVirtual ){ - sqlite3VdbeAddOp2(v, OP_Integer, 0, reg2); - }else -#endif - { - pParse->addrCrTab = sqlite3VdbeAddOp2(v, OP_CreateTable, iDb, reg2); - } - sqlite3OpenMasterTable(pParse, iDb); - sqlite3VdbeAddOp2(v, OP_NewRowid, 0, reg1); - sqlite3VdbeAddOp2(v, OP_Null, 0, reg3); - sqlite3VdbeAddOp3(v, OP_Insert, 0, reg3, reg1); - sqlite3VdbeChangeP5(v, OPFLAG_APPEND); - sqlite3VdbeAddOp0(v, OP_Close); - } - - /* Normal (non-error) return. */ - return; - - /* If an error occurs, we jump here */ -begin_table_error: - sqlite3DbFree(db, zName); - return; -} - -/* -** This macro is used to compare two strings in a case-insensitive manner. -** It is slightly faster than calling sqlite3StrICmp() directly, but -** produces larger code. -** -** WARNING: This macro is not compatible with the strcmp() family. It -** returns true if the two strings are equal, otherwise false. -*/ -#define STRICMP(x, y) (\ -sqlite3UpperToLower[*(unsigned char *)(x)]== \ -sqlite3UpperToLower[*(unsigned char *)(y)] \ -&& sqlite3StrICmp((x)+1,(y)+1)==0 ) - -/* -** Add a new column to the table currently being constructed. -** -** The parser calls this routine once for each column declaration -** in a CREATE TABLE statement. sqlite3StartTable() gets called -** first to get things going. Then this routine is called for each -** column. -*/ -SQLITE_PRIVATE void sqlite3AddColumn(Parse *pParse, Token *pName){ - Table *p; - int i; - char *z; - Column *pCol; - sqlite3 *db = pParse->db; - if( (p = pParse->pNewTable)==0 ) return; -#if SQLITE_MAX_COLUMN - if( p->nCol+1>db->aLimit[SQLITE_LIMIT_COLUMN] ){ - sqlite3ErrorMsg(pParse, "too many columns on %s", p->zName); - return; - } -#endif - z = sqlite3NameFromToken(db, pName); - if( z==0 ) return; - for(i=0; inCol; i++){ - if( STRICMP(z, p->aCol[i].zName) ){ - sqlite3ErrorMsg(pParse, "duplicate column name: %s", z); - sqlite3DbFree(db, z); - return; - } - } - if( (p->nCol & 0x7)==0 ){ - Column *aNew; - aNew = sqlite3DbRealloc(db,p->aCol,(p->nCol+8)*sizeof(p->aCol[0])); - if( aNew==0 ){ - sqlite3DbFree(db, z); - return; - } - p->aCol = aNew; - } - pCol = &p->aCol[p->nCol]; - memset(pCol, 0, sizeof(p->aCol[0])); - pCol->zName = z; - - /* If there is no type specified, columns have the default affinity - ** 'NONE'. If there is a type specified, then sqlite3AddColumnType() will - ** be called next to set pCol->affinity correctly. - */ - pCol->affinity = SQLITE_AFF_NONE; - pCol->szEst = 1; - p->nCol++; -} - -/* -** This routine is called by the parser while in the middle of -** parsing a CREATE TABLE statement. A "NOT NULL" constraint has -** been seen on a column. This routine sets the notNull flag on -** the column currently under construction. -*/ -SQLITE_PRIVATE void sqlite3AddNotNull(Parse *pParse, int onError){ - Table *p; - p = pParse->pNewTable; - if( p==0 || NEVER(p->nCol<1) ) return; - p->aCol[p->nCol-1].notNull = (u8)onError; -} - -/* -** Scan the column type name zType (length nType) and return the -** associated affinity type. -** -** This routine does a case-independent search of zType for the -** substrings in the following table. If one of the substrings is -** found, the corresponding affinity is returned. If zType contains -** more than one of the substrings, entries toward the top of -** the table take priority. For example, if zType is 'BLOBINT', -** SQLITE_AFF_INTEGER is returned. -** -** Substring | Affinity -** -------------------------------- -** 'INT' | SQLITE_AFF_INTEGER -** 'CHAR' | SQLITE_AFF_TEXT -** 'CLOB' | SQLITE_AFF_TEXT -** 'TEXT' | SQLITE_AFF_TEXT -** 'BLOB' | SQLITE_AFF_NONE -** 'REAL' | SQLITE_AFF_REAL -** 'FLOA' | SQLITE_AFF_REAL -** 'DOUB' | SQLITE_AFF_REAL -** -** If none of the substrings in the above table are found, -** SQLITE_AFF_NUMERIC is returned. -*/ -SQLITE_PRIVATE char sqlite3AffinityType(const char *zIn, u8 *pszEst){ - u32 h = 0; - char aff = SQLITE_AFF_NUMERIC; - const char *zChar = 0; - - if( zIn==0 ) return aff; - while( zIn[0] ){ - h = (h<<8) + sqlite3UpperToLower[(*zIn)&0xff]; - zIn++; - if( h==(('c'<<24)+('h'<<16)+('a'<<8)+'r') ){ /* CHAR */ - aff = SQLITE_AFF_TEXT; - zChar = zIn; - }else if( h==(('c'<<24)+('l'<<16)+('o'<<8)+'b') ){ /* CLOB */ - aff = SQLITE_AFF_TEXT; - }else if( h==(('t'<<24)+('e'<<16)+('x'<<8)+'t') ){ /* TEXT */ - aff = SQLITE_AFF_TEXT; - }else if( h==(('b'<<24)+('l'<<16)+('o'<<8)+'b') /* BLOB */ - && (aff==SQLITE_AFF_NUMERIC || aff==SQLITE_AFF_REAL) ){ - aff = SQLITE_AFF_NONE; - if( zIn[0]=='(' ) zChar = zIn; -#ifndef SQLITE_OMIT_FLOATING_POINT - }else if( h==(('r'<<24)+('e'<<16)+('a'<<8)+'l') /* REAL */ - && aff==SQLITE_AFF_NUMERIC ){ - aff = SQLITE_AFF_REAL; - }else if( h==(('f'<<24)+('l'<<16)+('o'<<8)+'a') /* FLOA */ - && aff==SQLITE_AFF_NUMERIC ){ - aff = SQLITE_AFF_REAL; - }else if( h==(('d'<<24)+('o'<<16)+('u'<<8)+'b') /* DOUB */ - && aff==SQLITE_AFF_NUMERIC ){ - aff = SQLITE_AFF_REAL; -#endif - }else if( (h&0x00FFFFFF)==(('i'<<16)+('n'<<8)+'t') ){ /* INT */ - aff = SQLITE_AFF_INTEGER; - break; - } - } - - /* If pszEst is not NULL, store an estimate of the field size. The - ** estimate is scaled so that the size of an integer is 1. */ - if( pszEst ){ - *pszEst = 1; /* default size is approx 4 bytes */ - if( aff<=SQLITE_AFF_NONE ){ - if( zChar ){ - while( zChar[0] ){ - if( sqlite3Isdigit(zChar[0]) ){ - int v = 0; - sqlite3GetInt32(zChar, &v); - v = v/4 + 1; - if( v>255 ) v = 255; - *pszEst = v; /* BLOB(k), VARCHAR(k), CHAR(k) -> r=(k/4+1) */ - break; - } - zChar++; - } - }else{ - *pszEst = 5; /* BLOB, TEXT, CLOB -> r=5 (approx 20 bytes)*/ - } - } - } - return aff; -} - -/* -** This routine is called by the parser while in the middle of -** parsing a CREATE TABLE statement. The pFirst token is the first -** token in the sequence of tokens that describe the type of the -** column currently under construction. pLast is the last token -** in the sequence. Use this information to construct a string -** that contains the typename of the column and store that string -** in zType. -*/ -SQLITE_PRIVATE void sqlite3AddColumnType(Parse *pParse, Token *pType){ - Table *p; - Column *pCol; - - p = pParse->pNewTable; - if( p==0 || NEVER(p->nCol<1) ) return; - pCol = &p->aCol[p->nCol-1]; - assert( pCol->zType==0 ); - pCol->zType = sqlite3NameFromToken(pParse->db, pType); - pCol->affinity = sqlite3AffinityType(pCol->zType, &pCol->szEst); -} - -/* -** The expression is the default value for the most recently added column -** of the table currently under construction. -** -** Default value expressions must be constant. Raise an exception if this -** is not the case. -** -** This routine is called by the parser while in the middle of -** parsing a CREATE TABLE statement. -*/ -SQLITE_PRIVATE void sqlite3AddDefaultValue(Parse *pParse, ExprSpan *pSpan){ - Table *p; - Column *pCol; - sqlite3 *db = pParse->db; - p = pParse->pNewTable; - if( p!=0 ){ - pCol = &(p->aCol[p->nCol-1]); - if( !sqlite3ExprIsConstantOrFunction(pSpan->pExpr) ){ - sqlite3ErrorMsg(pParse, "default value of column [%s] is not constant", - pCol->zName); - }else{ - /* A copy of pExpr is used instead of the original, as pExpr contains - ** tokens that point to volatile memory. The 'span' of the expression - ** is required by pragma table_info. - */ - sqlite3ExprDelete(db, pCol->pDflt); - pCol->pDflt = sqlite3ExprDup(db, pSpan->pExpr, EXPRDUP_REDUCE); - sqlite3DbFree(db, pCol->zDflt); - pCol->zDflt = sqlite3DbStrNDup(db, (char*)pSpan->zStart, - (int)(pSpan->zEnd - pSpan->zStart)); - } - } - sqlite3ExprDelete(db, pSpan->pExpr); -} - -/* -** Designate the PRIMARY KEY for the table. pList is a list of names -** of columns that form the primary key. If pList is NULL, then the -** most recently added column of the table is the primary key. -** -** A table can have at most one primary key. If the table already has -** a primary key (and this is the second primary key) then create an -** error. -** -** If the PRIMARY KEY is on a single column whose datatype is INTEGER, -** then we will try to use that column as the rowid. Set the Table.iPKey -** field of the table under construction to be the index of the -** INTEGER PRIMARY KEY column. Table.iPKey is set to -1 if there is -** no INTEGER PRIMARY KEY. -** -** If the key is not an INTEGER PRIMARY KEY, then create a unique -** index for the key. No index is created for INTEGER PRIMARY KEYs. -*/ -SQLITE_PRIVATE void sqlite3AddPrimaryKey( - Parse *pParse, /* Parsing context */ - ExprList *pList, /* List of field names to be indexed */ - int onError, /* What to do with a uniqueness conflict */ - int autoInc, /* True if the AUTOINCREMENT keyword is present */ - int sortOrder /* SQLITE_SO_ASC or SQLITE_SO_DESC */ -){ - Table *pTab = pParse->pNewTable; - char *zType = 0; - int iCol = -1, i; - int nTerm; - if( pTab==0 || IN_DECLARE_VTAB ) goto primary_key_exit; - if( pTab->tabFlags & TF_HasPrimaryKey ){ - sqlite3ErrorMsg(pParse, - "table \"%s\" has more than one primary key", pTab->zName); - goto primary_key_exit; - } - pTab->tabFlags |= TF_HasPrimaryKey; - if( pList==0 ){ - iCol = pTab->nCol - 1; - pTab->aCol[iCol].colFlags |= COLFLAG_PRIMKEY; - zType = pTab->aCol[iCol].zType; - nTerm = 1; - }else{ - nTerm = pList->nExpr; - for(i=0; inCol; iCol++){ - if( sqlite3StrICmp(pList->a[i].zName, pTab->aCol[iCol].zName)==0 ){ - pTab->aCol[iCol].colFlags |= COLFLAG_PRIMKEY; - zType = pTab->aCol[iCol].zType; - break; - } - } - } - } - if( nTerm==1 - && zType && sqlite3StrICmp(zType, "INTEGER")==0 - && sortOrder==SQLITE_SO_ASC - ){ - pTab->iPKey = iCol; - pTab->keyConf = (u8)onError; - assert( autoInc==0 || autoInc==1 ); - pTab->tabFlags |= autoInc*TF_Autoincrement; - if( pList ) pParse->iPkSortOrder = pList->a[0].sortOrder; - }else if( autoInc ){ -#ifndef SQLITE_OMIT_AUTOINCREMENT - sqlite3ErrorMsg(pParse, "AUTOINCREMENT is only allowed on an " - "INTEGER PRIMARY KEY"); -#endif - }else{ - Vdbe *v = pParse->pVdbe; - Index *p; - if( v ) pParse->addrSkipPK = sqlite3VdbeAddOp0(v, OP_Noop); - p = sqlite3CreateIndex(pParse, 0, 0, 0, pList, onError, 0, - 0, sortOrder, 0); - if( p ){ - p->idxType = SQLITE_IDXTYPE_PRIMARYKEY; - if( v ) sqlite3VdbeJumpHere(v, pParse->addrSkipPK); - } - pList = 0; - } - -primary_key_exit: - sqlite3ExprListDelete(pParse->db, pList); - return; -} - -/* -** Add a new CHECK constraint to the table currently under construction. -*/ -SQLITE_PRIVATE void sqlite3AddCheckConstraint( - Parse *pParse, /* Parsing context */ - Expr *pCheckExpr /* The check expression */ -){ -#ifndef SQLITE_OMIT_CHECK - Table *pTab = pParse->pNewTable; - sqlite3 *db = pParse->db; - if( pTab && !IN_DECLARE_VTAB - && !sqlite3BtreeIsReadonly(db->aDb[db->init.iDb].pBt) - ){ - pTab->pCheck = sqlite3ExprListAppend(pParse, pTab->pCheck, pCheckExpr); - if( pParse->constraintName.n ){ - sqlite3ExprListSetName(pParse, pTab->pCheck, &pParse->constraintName, 1); - } - }else -#endif - { - sqlite3ExprDelete(pParse->db, pCheckExpr); - } -} - -/* -** Set the collation function of the most recently parsed table column -** to the CollSeq given. -*/ -SQLITE_PRIVATE void sqlite3AddCollateType(Parse *pParse, Token *pToken){ - Table *p; - int i; - char *zColl; /* Dequoted name of collation sequence */ - sqlite3 *db; - - if( (p = pParse->pNewTable)==0 ) return; - i = p->nCol-1; - db = pParse->db; - zColl = sqlite3NameFromToken(db, pToken); - if( !zColl ) return; - - if( sqlite3LocateCollSeq(pParse, zColl) ){ - Index *pIdx; - sqlite3DbFree(db, p->aCol[i].zColl); - p->aCol[i].zColl = zColl; - - /* If the column is declared as " PRIMARY KEY COLLATE ", - ** then an index may have been created on this column before the - ** collation type was added. Correct this if it is the case. - */ - for(pIdx=p->pIndex; pIdx; pIdx=pIdx->pNext){ - assert( pIdx->nKeyCol==1 ); - if( pIdx->aiColumn[0]==i ){ - pIdx->azColl[0] = p->aCol[i].zColl; - } - } - }else{ - sqlite3DbFree(db, zColl); - } -} - -/* -** This function returns the collation sequence for database native text -** encoding identified by the string zName, length nName. -** -** If the requested collation sequence is not available, or not available -** in the database native encoding, the collation factory is invoked to -** request it. If the collation factory does not supply such a sequence, -** and the sequence is available in another text encoding, then that is -** returned instead. -** -** If no versions of the requested collations sequence are available, or -** another error occurs, NULL is returned and an error message written into -** pParse. -** -** This routine is a wrapper around sqlite3FindCollSeq(). This routine -** invokes the collation factory if the named collation cannot be found -** and generates an error message. -** -** See also: sqlite3FindCollSeq(), sqlite3GetCollSeq() -*/ -SQLITE_PRIVATE CollSeq *sqlite3LocateCollSeq(Parse *pParse, const char *zName){ - sqlite3 *db = pParse->db; - u8 enc = ENC(db); - u8 initbusy = db->init.busy; - CollSeq *pColl; - - pColl = sqlite3FindCollSeq(db, enc, zName, initbusy); - if( !initbusy && (!pColl || !pColl->xCmp) ){ - pColl = sqlite3GetCollSeq(pParse, enc, pColl, zName); - } - - return pColl; -} - - -/* -** Generate code that will increment the schema cookie. -** -** The schema cookie is used to determine when the schema for the -** database changes. After each schema change, the cookie value -** changes. When a process first reads the schema it records the -** cookie. Thereafter, whenever it goes to access the database, -** it checks the cookie to make sure the schema has not changed -** since it was last read. -** -** This plan is not completely bullet-proof. It is possible for -** the schema to change multiple times and for the cookie to be -** set back to prior value. But schema changes are infrequent -** and the probability of hitting the same cookie value is only -** 1 chance in 2^32. So we're safe enough. -*/ -SQLITE_PRIVATE void sqlite3ChangeCookie(Parse *pParse, int iDb){ - int r1 = sqlite3GetTempReg(pParse); - sqlite3 *db = pParse->db; - Vdbe *v = pParse->pVdbe; - assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); - sqlite3VdbeAddOp2(v, OP_Integer, db->aDb[iDb].pSchema->schema_cookie+1, r1); - sqlite3VdbeAddOp3(v, OP_SetCookie, iDb, BTREE_SCHEMA_VERSION, r1); - sqlite3ReleaseTempReg(pParse, r1); -} - -/* -** Measure the number of characters needed to output the given -** identifier. The number returned includes any quotes used -** but does not include the null terminator. -** -** The estimate is conservative. It might be larger that what is -** really needed. -*/ -static int identLength(const char *z){ - int n; - for(n=0; *z; n++, z++){ - if( *z=='"' ){ n++; } - } - return n + 2; -} - -/* -** The first parameter is a pointer to an output buffer. The second -** parameter is a pointer to an integer that contains the offset at -** which to write into the output buffer. This function copies the -** nul-terminated string pointed to by the third parameter, zSignedIdent, -** to the specified offset in the buffer and updates *pIdx to refer -** to the first byte after the last byte written before returning. -** -** If the string zSignedIdent consists entirely of alpha-numeric -** characters, does not begin with a digit and is not an SQL keyword, -** then it is copied to the output buffer exactly as it is. Otherwise, -** it is quoted using double-quotes. -*/ -static void identPut(char *z, int *pIdx, char *zSignedIdent){ - unsigned char *zIdent = (unsigned char*)zSignedIdent; - int i, j, needQuote; - i = *pIdx; - - for(j=0; zIdent[j]; j++){ - if( !sqlite3Isalnum(zIdent[j]) && zIdent[j]!='_' ) break; - } - needQuote = sqlite3Isdigit(zIdent[0]) - || sqlite3KeywordCode(zIdent, j)!=TK_ID - || zIdent[j]!=0 - || j==0; - - if( needQuote ) z[i++] = '"'; - for(j=0; zIdent[j]; j++){ - z[i++] = zIdent[j]; - if( zIdent[j]=='"' ) z[i++] = '"'; - } - if( needQuote ) z[i++] = '"'; - z[i] = 0; - *pIdx = i; -} - -/* -** Generate a CREATE TABLE statement appropriate for the given -** table. Memory to hold the text of the statement is obtained -** from sqliteMalloc() and must be freed by the calling function. -*/ -static char *createTableStmt(sqlite3 *db, Table *p){ - int i, k, n; - char *zStmt; - char *zSep, *zSep2, *zEnd; - Column *pCol; - n = 0; - for(pCol = p->aCol, i=0; inCol; i++, pCol++){ - n += identLength(pCol->zName) + 5; - } - n += identLength(p->zName); - if( n<50 ){ - zSep = ""; - zSep2 = ","; - zEnd = ")"; - }else{ - zSep = "\n "; - zSep2 = ",\n "; - zEnd = "\n)"; - } - n += 35 + 6*p->nCol; - zStmt = sqlite3DbMallocRaw(0, n); - if( zStmt==0 ){ - db->mallocFailed = 1; - return 0; - } - sqlite3_snprintf(n, zStmt, "CREATE TABLE "); - k = sqlite3Strlen30(zStmt); - identPut(zStmt, &k, p->zName); - zStmt[k++] = '('; - for(pCol=p->aCol, i=0; inCol; i++, pCol++){ - static const char * const azType[] = { - /* SQLITE_AFF_TEXT */ " TEXT", - /* SQLITE_AFF_NONE */ "", - /* SQLITE_AFF_NUMERIC */ " NUM", - /* SQLITE_AFF_INTEGER */ " INT", - /* SQLITE_AFF_REAL */ " REAL" - }; - int len; - const char *zType; - - sqlite3_snprintf(n-k, &zStmt[k], zSep); - k += sqlite3Strlen30(&zStmt[k]); - zSep = zSep2; - identPut(zStmt, &k, pCol->zName); - assert( pCol->affinity-SQLITE_AFF_TEXT >= 0 ); - assert( pCol->affinity-SQLITE_AFF_TEXT < ArraySize(azType) ); - testcase( pCol->affinity==SQLITE_AFF_TEXT ); - testcase( pCol->affinity==SQLITE_AFF_NONE ); - testcase( pCol->affinity==SQLITE_AFF_NUMERIC ); - testcase( pCol->affinity==SQLITE_AFF_INTEGER ); - testcase( pCol->affinity==SQLITE_AFF_REAL ); - - zType = azType[pCol->affinity - SQLITE_AFF_TEXT]; - len = sqlite3Strlen30(zType); - assert( pCol->affinity==SQLITE_AFF_NONE - || pCol->affinity==sqlite3AffinityType(zType, 0) ); - memcpy(&zStmt[k], zType, len); - k += len; - assert( k<=n ); - } - sqlite3_snprintf(n-k, &zStmt[k], "%s", zEnd); - return zStmt; -} - -/* -** Resize an Index object to hold N columns total. Return SQLITE_OK -** on success and SQLITE_NOMEM on an OOM error. -*/ -static int resizeIndexObject(sqlite3 *db, Index *pIdx, int N){ - char *zExtra; - int nByte; - if( pIdx->nColumn>=N ) return SQLITE_OK; - assert( pIdx->isResized==0 ); - nByte = (sizeof(char*) + sizeof(i16) + 1)*N; - zExtra = sqlite3DbMallocZero(db, nByte); - if( zExtra==0 ) return SQLITE_NOMEM; - memcpy(zExtra, pIdx->azColl, sizeof(char*)*pIdx->nColumn); - pIdx->azColl = (char**)zExtra; - zExtra += sizeof(char*)*N; - memcpy(zExtra, pIdx->aiColumn, sizeof(i16)*pIdx->nColumn); - pIdx->aiColumn = (i16*)zExtra; - zExtra += sizeof(i16)*N; - memcpy(zExtra, pIdx->aSortOrder, pIdx->nColumn); - pIdx->aSortOrder = (u8*)zExtra; - pIdx->nColumn = N; - pIdx->isResized = 1; - return SQLITE_OK; -} - -/* -** Estimate the total row width for a table. -*/ -static void estimateTableWidth(Table *pTab){ - unsigned wTable = 0; - const Column *pTabCol; - int i; - for(i=pTab->nCol, pTabCol=pTab->aCol; i>0; i--, pTabCol++){ - wTable += pTabCol->szEst; - } - if( pTab->iPKey<0 ) wTable++; - pTab->szTabRow = sqlite3LogEst(wTable*4); -} - -/* -** Estimate the average size of a row for an index. -*/ -static void estimateIndexWidth(Index *pIdx){ - unsigned wIndex = 0; - int i; - const Column *aCol = pIdx->pTable->aCol; - for(i=0; inColumn; i++){ - i16 x = pIdx->aiColumn[i]; - assert( xpTable->nCol ); - wIndex += x<0 ? 1 : aCol[pIdx->aiColumn[i]].szEst; - } - pIdx->szIdxRow = sqlite3LogEst(wIndex*4); -} - -/* Return true if value x is found any of the first nCol entries of aiCol[] -*/ -static int hasColumn(const i16 *aiCol, int nCol, int x){ - while( nCol-- > 0 ) if( x==*(aiCol++) ) return 1; - return 0; -} - -/* -** This routine runs at the end of parsing a CREATE TABLE statement that -** has a WITHOUT ROWID clause. The job of this routine is to convert both -** internal schema data structures and the generated VDBE code so that they -** are appropriate for a WITHOUT ROWID table instead of a rowid table. -** Changes include: -** -** (1) Convert the OP_CreateTable into an OP_CreateIndex. There is -** no rowid btree for a WITHOUT ROWID. Instead, the canonical -** data storage is a covering index btree. -** (2) Bypass the creation of the sqlite_master table entry -** for the PRIMARY KEY as the the primary key index is now -** identified by the sqlite_master table entry of the table itself. -** (3) Set the Index.tnum of the PRIMARY KEY Index object in the -** schema to the rootpage from the main table. -** (4) Set all columns of the PRIMARY KEY schema object to be NOT NULL. -** (5) Add all table columns to the PRIMARY KEY Index object -** so that the PRIMARY KEY is a covering index. The surplus -** columns are part of KeyInfo.nXField and are not used for -** sorting or lookup or uniqueness checks. -** (6) Replace the rowid tail on all automatically generated UNIQUE -** indices with the PRIMARY KEY columns. -*/ -static void convertToWithoutRowidTable(Parse *pParse, Table *pTab){ - Index *pIdx; - Index *pPk; - int nPk; - int i, j; - sqlite3 *db = pParse->db; - Vdbe *v = pParse->pVdbe; - - /* Convert the OP_CreateTable opcode that would normally create the - ** root-page for the table into a OP_CreateIndex opcode. The index - ** created will become the PRIMARY KEY index. - */ - if( pParse->addrCrTab ){ - assert( v ); - sqlite3VdbeGetOp(v, pParse->addrCrTab)->opcode = OP_CreateIndex; - } - - /* Bypass the creation of the PRIMARY KEY btree and the sqlite_master - ** table entry. - */ - if( pParse->addrSkipPK ){ - assert( v ); - sqlite3VdbeGetOp(v, pParse->addrSkipPK)->opcode = OP_Goto; - } - - /* Locate the PRIMARY KEY index. Or, if this table was originally - ** an INTEGER PRIMARY KEY table, create a new PRIMARY KEY index. - */ - if( pTab->iPKey>=0 ){ - ExprList *pList; - pList = sqlite3ExprListAppend(pParse, 0, 0); - if( pList==0 ) return; - pList->a[0].zName = sqlite3DbStrDup(pParse->db, - pTab->aCol[pTab->iPKey].zName); - pList->a[0].sortOrder = pParse->iPkSortOrder; - assert( pParse->pNewTable==pTab ); - pPk = sqlite3CreateIndex(pParse, 0, 0, 0, pList, pTab->keyConf, 0, 0, 0, 0); - if( pPk==0 ) return; - pPk->idxType = SQLITE_IDXTYPE_PRIMARYKEY; - pTab->iPKey = -1; - }else{ - pPk = sqlite3PrimaryKeyIndex(pTab); - } - pPk->isCovering = 1; - assert( pPk!=0 ); - nPk = pPk->nKeyCol; - - /* Make sure every column of the PRIMARY KEY is NOT NULL */ - for(i=0; iaCol[pPk->aiColumn[i]].notNull = 1; - } - pPk->uniqNotNull = 1; - - /* The root page of the PRIMARY KEY is the table root page */ - pPk->tnum = pTab->tnum; - - /* Update the in-memory representation of all UNIQUE indices by converting - ** the final rowid column into one or more columns of the PRIMARY KEY. - */ - for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ - int n; - if( IsPrimaryKeyIndex(pIdx) ) continue; - for(i=n=0; iaiColumn, pIdx->nKeyCol, pPk->aiColumn[i]) ) n++; - } - if( n==0 ){ - /* This index is a superset of the primary key */ - pIdx->nColumn = pIdx->nKeyCol; - continue; - } - if( resizeIndexObject(db, pIdx, pIdx->nKeyCol+n) ) return; - for(i=0, j=pIdx->nKeyCol; iaiColumn, pIdx->nKeyCol, pPk->aiColumn[i]) ){ - pIdx->aiColumn[j] = pPk->aiColumn[i]; - pIdx->azColl[j] = pPk->azColl[i]; - j++; - } - } - assert( pIdx->nColumn>=pIdx->nKeyCol+n ); - assert( pIdx->nColumn>=j ); - } - - /* Add all table columns to the PRIMARY KEY index - */ - if( nPknCol ){ - if( resizeIndexObject(db, pPk, pTab->nCol) ) return; - for(i=0, j=nPk; inCol; i++){ - if( !hasColumn(pPk->aiColumn, j, i) ){ - assert( jnColumn ); - pPk->aiColumn[j] = i; - pPk->azColl[j] = "BINARY"; - j++; - } - } - assert( pPk->nColumn==j ); - assert( pTab->nCol==j ); - }else{ - pPk->nColumn = pTab->nCol; - } -} - -/* -** This routine is called to report the final ")" that terminates -** a CREATE TABLE statement. -** -** The table structure that other action routines have been building -** is added to the internal hash tables, assuming no errors have -** occurred. -** -** An entry for the table is made in the master table on disk, unless -** this is a temporary table or db->init.busy==1. When db->init.busy==1 -** it means we are reading the sqlite_master table because we just -** connected to the database or because the sqlite_master table has -** recently changed, so the entry for this table already exists in -** the sqlite_master table. We do not want to create it again. -** -** If the pSelect argument is not NULL, it means that this routine -** was called to create a table generated from a -** "CREATE TABLE ... AS SELECT ..." statement. The column names of -** the new table will match the result set of the SELECT. -*/ -SQLITE_PRIVATE void sqlite3EndTable( - Parse *pParse, /* Parse context */ - Token *pCons, /* The ',' token after the last column defn. */ - Token *pEnd, /* The ')' before options in the CREATE TABLE */ - u8 tabOpts, /* Extra table options. Usually 0. */ - Select *pSelect /* Select from a "CREATE ... AS SELECT" */ -){ - Table *p; /* The new table */ - sqlite3 *db = pParse->db; /* The database connection */ - int iDb; /* Database in which the table lives */ - Index *pIdx; /* An implied index of the table */ - - if( (pEnd==0 && pSelect==0) || db->mallocFailed ){ - return; - } - p = pParse->pNewTable; - if( p==0 ) return; - - assert( !db->init.busy || !pSelect ); - - /* If the db->init.busy is 1 it means we are reading the SQL off the - ** "sqlite_master" or "sqlite_temp_master" table on the disk. - ** So do not write to the disk again. Extract the root page number - ** for the table from the db->init.newTnum field. (The page number - ** should have been put there by the sqliteOpenCb routine.) - */ - if( db->init.busy ){ - p->tnum = db->init.newTnum; - } - - /* Special processing for WITHOUT ROWID Tables */ - if( tabOpts & TF_WithoutRowid ){ - if( (p->tabFlags & TF_Autoincrement) ){ - sqlite3ErrorMsg(pParse, - "AUTOINCREMENT not allowed on WITHOUT ROWID tables"); - return; - } - if( (p->tabFlags & TF_HasPrimaryKey)==0 ){ - sqlite3ErrorMsg(pParse, "PRIMARY KEY missing on table %s", p->zName); - }else{ - p->tabFlags |= TF_WithoutRowid; - convertToWithoutRowidTable(pParse, p); - } - } - - iDb = sqlite3SchemaToIndex(db, p->pSchema); - -#ifndef SQLITE_OMIT_CHECK - /* Resolve names in all CHECK constraint expressions. - */ - if( p->pCheck ){ - sqlite3ResolveSelfReference(pParse, p, NC_IsCheck, 0, p->pCheck); - } -#endif /* !defined(SQLITE_OMIT_CHECK) */ - - /* Estimate the average row size for the table and for all implied indices */ - estimateTableWidth(p); - for(pIdx=p->pIndex; pIdx; pIdx=pIdx->pNext){ - estimateIndexWidth(pIdx); - } - - /* If not initializing, then create a record for the new table - ** in the SQLITE_MASTER table of the database. - ** - ** If this is a TEMPORARY table, write the entry into the auxiliary - ** file instead of into the main database file. - */ - if( !db->init.busy ){ - int n; - Vdbe *v; - char *zType; /* "view" or "table" */ - char *zType2; /* "VIEW" or "TABLE" */ - char *zStmt; /* Text of the CREATE TABLE or CREATE VIEW statement */ - - v = sqlite3GetVdbe(pParse); - if( NEVER(v==0) ) return; - - sqlite3VdbeAddOp1(v, OP_Close, 0); - - /* - ** Initialize zType for the new view or table. - */ - if( p->pSelect==0 ){ - /* A regular table */ - zType = "table"; - zType2 = "TABLE"; -#ifndef SQLITE_OMIT_VIEW - }else{ - /* A view */ - zType = "view"; - zType2 = "VIEW"; -#endif - } - - /* If this is a CREATE TABLE xx AS SELECT ..., execute the SELECT - ** statement to populate the new table. The root-page number for the - ** new table is in register pParse->regRoot. - ** - ** Once the SELECT has been coded by sqlite3Select(), it is in a - ** suitable state to query for the column names and types to be used - ** by the new table. - ** - ** A shared-cache write-lock is not required to write to the new table, - ** as a schema-lock must have already been obtained to create it. Since - ** a schema-lock excludes all other database users, the write-lock would - ** be redundant. - */ - if( pSelect ){ - SelectDest dest; - Table *pSelTab; - - assert(pParse->nTab==1); - sqlite3VdbeAddOp3(v, OP_OpenWrite, 1, pParse->regRoot, iDb); - sqlite3VdbeChangeP5(v, OPFLAG_P2ISREG); - pParse->nTab = 2; - sqlite3SelectDestInit(&dest, SRT_Table, 1); - sqlite3Select(pParse, pSelect, &dest); - sqlite3VdbeAddOp1(v, OP_Close, 1); - if( pParse->nErr==0 ){ - pSelTab = sqlite3ResultSetOfSelect(pParse, pSelect); - if( pSelTab==0 ) return; - assert( p->aCol==0 ); - p->nCol = pSelTab->nCol; - p->aCol = pSelTab->aCol; - pSelTab->nCol = 0; - pSelTab->aCol = 0; - sqlite3DeleteTable(db, pSelTab); - } - } - - /* Compute the complete text of the CREATE statement */ - if( pSelect ){ - zStmt = createTableStmt(db, p); - }else{ - Token *pEnd2 = tabOpts ? &pParse->sLastToken : pEnd; - n = (int)(pEnd2->z - pParse->sNameToken.z); - if( pEnd2->z[0]!=';' ) n += pEnd2->n; - zStmt = sqlite3MPrintf(db, - "CREATE %s %.*s", zType2, n, pParse->sNameToken.z - ); - } - - /* A slot for the record has already been allocated in the - ** SQLITE_MASTER table. We just need to update that slot with all - ** the information we've collected. - */ - sqlite3NestedParse(pParse, - "UPDATE %Q.%s " - "SET type='%s', name=%Q, tbl_name=%Q, rootpage=#%d, sql=%Q " - "WHERE rowid=#%d", - db->aDb[iDb].zName, SCHEMA_TABLE(iDb), - zType, - p->zName, - p->zName, - pParse->regRoot, - zStmt, - pParse->regRowid - ); - sqlite3DbFree(db, zStmt); - sqlite3ChangeCookie(pParse, iDb); - -#ifndef SQLITE_OMIT_AUTOINCREMENT - /* Check to see if we need to create an sqlite_sequence table for - ** keeping track of autoincrement keys. - */ - if( p->tabFlags & TF_Autoincrement ){ - Db *pDb = &db->aDb[iDb]; - assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); - if( pDb->pSchema->pSeqTab==0 ){ - sqlite3NestedParse(pParse, - "CREATE TABLE %Q.sqlite_sequence(name,seq)", - pDb->zName - ); - } - } -#endif - - /* Reparse everything to update our internal data structures */ - sqlite3VdbeAddParseSchemaOp(v, iDb, - sqlite3MPrintf(db, "tbl_name='%q' AND type!='trigger'", p->zName)); - } - - - /* Add the table to the in-memory representation of the database. - */ - if( db->init.busy ){ - Table *pOld; - Schema *pSchema = p->pSchema; - assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); - pOld = sqlite3HashInsert(&pSchema->tblHash, p->zName, - sqlite3Strlen30(p->zName),p); - if( pOld ){ - assert( p==pOld ); /* Malloc must have failed inside HashInsert() */ - db->mallocFailed = 1; - return; - } - pParse->pNewTable = 0; - db->flags |= SQLITE_InternChanges; - -#ifndef SQLITE_OMIT_ALTERTABLE - if( !p->pSelect ){ - const char *zName = (const char *)pParse->sNameToken.z; - int nName; - assert( !pSelect && pCons && pEnd ); - if( pCons->z==0 ){ - pCons = pEnd; - } - nName = (int)((const char *)pCons->z - zName); - p->addColOffset = 13 + sqlite3Utf8CharLen(zName, nName); - } -#endif - } -} - -#ifndef SQLITE_OMIT_VIEW -/* -** The parser calls this routine in order to create a new VIEW -*/ -SQLITE_PRIVATE void sqlite3CreateView( - Parse *pParse, /* The parsing context */ - Token *pBegin, /* The CREATE token that begins the statement */ - Token *pName1, /* The token that holds the name of the view */ - Token *pName2, /* The token that holds the name of the view */ - Select *pSelect, /* A SELECT statement that will become the new view */ - int isTemp, /* TRUE for a TEMPORARY view */ - int noErr /* Suppress error messages if VIEW already exists */ -){ - Table *p; - int n; - const char *z; - Token sEnd; - DbFixer sFix; - Token *pName = 0; - int iDb; - sqlite3 *db = pParse->db; - - if( pParse->nVar>0 ){ - sqlite3ErrorMsg(pParse, "parameters are not allowed in views"); - sqlite3SelectDelete(db, pSelect); - return; - } - sqlite3StartTable(pParse, pName1, pName2, isTemp, 1, 0, noErr); - p = pParse->pNewTable; - if( p==0 || pParse->nErr ){ - sqlite3SelectDelete(db, pSelect); - return; - } - sqlite3TwoPartName(pParse, pName1, pName2, &pName); - iDb = sqlite3SchemaToIndex(db, p->pSchema); - sqlite3FixInit(&sFix, pParse, iDb, "view", pName); - if( sqlite3FixSelect(&sFix, pSelect) ){ - sqlite3SelectDelete(db, pSelect); - return; - } - - /* Make a copy of the entire SELECT statement that defines the view. - ** This will force all the Expr.token.z values to be dynamically - ** allocated rather than point to the input string - which means that - ** they will persist after the current sqlite3_exec() call returns. - */ - p->pSelect = sqlite3SelectDup(db, pSelect, EXPRDUP_REDUCE); - sqlite3SelectDelete(db, pSelect); - if( db->mallocFailed ){ - return; - } - if( !db->init.busy ){ - sqlite3ViewGetColumnNames(pParse, p); - } - - /* Locate the end of the CREATE VIEW statement. Make sEnd point to - ** the end. - */ - sEnd = pParse->sLastToken; - if( ALWAYS(sEnd.z[0]!=0) && sEnd.z[0]!=';' ){ - sEnd.z += sEnd.n; - } - sEnd.n = 0; - n = (int)(sEnd.z - pBegin->z); - z = pBegin->z; - while( ALWAYS(n>0) && sqlite3Isspace(z[n-1]) ){ n--; } - sEnd.z = &z[n-1]; - sEnd.n = 1; - - /* Use sqlite3EndTable() to add the view to the SQLITE_MASTER table */ - sqlite3EndTable(pParse, 0, &sEnd, 0, 0); - return; -} -#endif /* SQLITE_OMIT_VIEW */ - -#if !defined(SQLITE_OMIT_VIEW) || !defined(SQLITE_OMIT_VIRTUALTABLE) -/* -** The Table structure pTable is really a VIEW. Fill in the names of -** the columns of the view in the pTable structure. Return the number -** of errors. If an error is seen leave an error message in pParse->zErrMsg. -*/ -SQLITE_PRIVATE int sqlite3ViewGetColumnNames(Parse *pParse, Table *pTable){ - Table *pSelTab; /* A fake table from which we get the result set */ - Select *pSel; /* Copy of the SELECT that implements the view */ - int nErr = 0; /* Number of errors encountered */ - int n; /* Temporarily holds the number of cursors assigned */ - sqlite3 *db = pParse->db; /* Database connection for malloc errors */ - int (*xAuth)(void*,int,const char*,const char*,const char*,const char*); - - assert( pTable ); - -#ifndef SQLITE_OMIT_VIRTUALTABLE - if( sqlite3VtabCallConnect(pParse, pTable) ){ - return SQLITE_ERROR; - } - if( IsVirtual(pTable) ) return 0; -#endif - -#ifndef SQLITE_OMIT_VIEW - /* A positive nCol means the columns names for this view are - ** already known. - */ - if( pTable->nCol>0 ) return 0; - - /* A negative nCol is a special marker meaning that we are currently - ** trying to compute the column names. If we enter this routine with - ** a negative nCol, it means two or more views form a loop, like this: - ** - ** CREATE VIEW one AS SELECT * FROM two; - ** CREATE VIEW two AS SELECT * FROM one; - ** - ** Actually, the error above is now caught prior to reaching this point. - ** But the following test is still important as it does come up - ** in the following: - ** - ** CREATE TABLE main.ex1(a); - ** CREATE TEMP VIEW ex1 AS SELECT a FROM ex1; - ** SELECT * FROM temp.ex1; - */ - if( pTable->nCol<0 ){ - sqlite3ErrorMsg(pParse, "view %s is circularly defined", pTable->zName); - return 1; - } - assert( pTable->nCol>=0 ); - - /* If we get this far, it means we need to compute the table names. - ** Note that the call to sqlite3ResultSetOfSelect() will expand any - ** "*" elements in the results set of the view and will assign cursors - ** to the elements of the FROM clause. But we do not want these changes - ** to be permanent. So the computation is done on a copy of the SELECT - ** statement that defines the view. - */ - assert( pTable->pSelect ); - pSel = sqlite3SelectDup(db, pTable->pSelect, 0); - if( pSel ){ - u8 enableLookaside = db->lookaside.bEnabled; - n = pParse->nTab; - sqlite3SrcListAssignCursors(pParse, pSel->pSrc); - pTable->nCol = -1; - db->lookaside.bEnabled = 0; -#ifndef SQLITE_OMIT_AUTHORIZATION - xAuth = db->xAuth; - db->xAuth = 0; - pSelTab = sqlite3ResultSetOfSelect(pParse, pSel); - db->xAuth = xAuth; -#else - pSelTab = sqlite3ResultSetOfSelect(pParse, pSel); -#endif - db->lookaside.bEnabled = enableLookaside; - pParse->nTab = n; - if( pSelTab ){ - assert( pTable->aCol==0 ); - pTable->nCol = pSelTab->nCol; - pTable->aCol = pSelTab->aCol; - pSelTab->nCol = 0; - pSelTab->aCol = 0; - sqlite3DeleteTable(db, pSelTab); - assert( sqlite3SchemaMutexHeld(db, 0, pTable->pSchema) ); - pTable->pSchema->flags |= DB_UnresetViews; - }else{ - pTable->nCol = 0; - nErr++; - } - sqlite3SelectDelete(db, pSel); - } else { - nErr++; - } -#endif /* SQLITE_OMIT_VIEW */ - return nErr; -} -#endif /* !defined(SQLITE_OMIT_VIEW) || !defined(SQLITE_OMIT_VIRTUALTABLE) */ - -#ifndef SQLITE_OMIT_VIEW -/* -** Clear the column names from every VIEW in database idx. -*/ -static void sqliteViewResetAll(sqlite3 *db, int idx){ - HashElem *i; - assert( sqlite3SchemaMutexHeld(db, idx, 0) ); - if( !DbHasProperty(db, idx, DB_UnresetViews) ) return; - for(i=sqliteHashFirst(&db->aDb[idx].pSchema->tblHash); i;i=sqliteHashNext(i)){ - Table *pTab = sqliteHashData(i); - if( pTab->pSelect ){ - sqliteDeleteColumnNames(db, pTab); - pTab->aCol = 0; - pTab->nCol = 0; - } - } - DbClearProperty(db, idx, DB_UnresetViews); -} -#else -# define sqliteViewResetAll(A,B) -#endif /* SQLITE_OMIT_VIEW */ - -/* -** This function is called by the VDBE to adjust the internal schema -** used by SQLite when the btree layer moves a table root page. The -** root-page of a table or index in database iDb has changed from iFrom -** to iTo. -** -** Ticket #1728: The symbol table might still contain information -** on tables and/or indices that are the process of being deleted. -** If you are unlucky, one of those deleted indices or tables might -** have the same rootpage number as the real table or index that is -** being moved. So we cannot stop searching after the first match -** because the first match might be for one of the deleted indices -** or tables and not the table/index that is actually being moved. -** We must continue looping until all tables and indices with -** rootpage==iFrom have been converted to have a rootpage of iTo -** in order to be certain that we got the right one. -*/ -#ifndef SQLITE_OMIT_AUTOVACUUM -SQLITE_PRIVATE void sqlite3RootPageMoved(sqlite3 *db, int iDb, int iFrom, int iTo){ - HashElem *pElem; - Hash *pHash; - Db *pDb; - - assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); - pDb = &db->aDb[iDb]; - pHash = &pDb->pSchema->tblHash; - for(pElem=sqliteHashFirst(pHash); pElem; pElem=sqliteHashNext(pElem)){ - Table *pTab = sqliteHashData(pElem); - if( pTab->tnum==iFrom ){ - pTab->tnum = iTo; - } - } - pHash = &pDb->pSchema->idxHash; - for(pElem=sqliteHashFirst(pHash); pElem; pElem=sqliteHashNext(pElem)){ - Index *pIdx = sqliteHashData(pElem); - if( pIdx->tnum==iFrom ){ - pIdx->tnum = iTo; - } - } -} -#endif - -/* -** Write code to erase the table with root-page iTable from database iDb. -** Also write code to modify the sqlite_master table and internal schema -** if a root-page of another table is moved by the btree-layer whilst -** erasing iTable (this can happen with an auto-vacuum database). -*/ -static void destroyRootPage(Parse *pParse, int iTable, int iDb){ - Vdbe *v = sqlite3GetVdbe(pParse); - int r1 = sqlite3GetTempReg(pParse); - sqlite3VdbeAddOp3(v, OP_Destroy, iTable, r1, iDb); - sqlite3MayAbort(pParse); -#ifndef SQLITE_OMIT_AUTOVACUUM - /* OP_Destroy stores an in integer r1. If this integer - ** is non-zero, then it is the root page number of a table moved to - ** location iTable. The following code modifies the sqlite_master table to - ** reflect this. - ** - ** The "#NNN" in the SQL is a special constant that means whatever value - ** is in register NNN. See grammar rules associated with the TK_REGISTER - ** token for additional information. - */ - sqlite3NestedParse(pParse, - "UPDATE %Q.%s SET rootpage=%d WHERE #%d AND rootpage=#%d", - pParse->db->aDb[iDb].zName, SCHEMA_TABLE(iDb), iTable, r1, r1); -#endif - sqlite3ReleaseTempReg(pParse, r1); -} - -/* -** Write VDBE code to erase table pTab and all associated indices on disk. -** Code to update the sqlite_master tables and internal schema definitions -** in case a root-page belonging to another table is moved by the btree layer -** is also added (this can happen with an auto-vacuum database). -*/ -static void destroyTable(Parse *pParse, Table *pTab){ -#ifdef SQLITE_OMIT_AUTOVACUUM - Index *pIdx; - int iDb = sqlite3SchemaToIndex(pParse->db, pTab->pSchema); - destroyRootPage(pParse, pTab->tnum, iDb); - for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ - destroyRootPage(pParse, pIdx->tnum, iDb); - } -#else - /* If the database may be auto-vacuum capable (if SQLITE_OMIT_AUTOVACUUM - ** is not defined), then it is important to call OP_Destroy on the - ** table and index root-pages in order, starting with the numerically - ** largest root-page number. This guarantees that none of the root-pages - ** to be destroyed is relocated by an earlier OP_Destroy. i.e. if the - ** following were coded: - ** - ** OP_Destroy 4 0 - ** ... - ** OP_Destroy 5 0 - ** - ** and root page 5 happened to be the largest root-page number in the - ** database, then root page 5 would be moved to page 4 by the - ** "OP_Destroy 4 0" opcode. The subsequent "OP_Destroy 5 0" would hit - ** a free-list page. - */ - int iTab = pTab->tnum; - int iDestroyed = 0; - - while( 1 ){ - Index *pIdx; - int iLargest = 0; - - if( iDestroyed==0 || iTabpIndex; pIdx; pIdx=pIdx->pNext){ - int iIdx = pIdx->tnum; - assert( pIdx->pSchema==pTab->pSchema ); - if( (iDestroyed==0 || (iIdxiLargest ){ - iLargest = iIdx; - } - } - if( iLargest==0 ){ - return; - }else{ - int iDb = sqlite3SchemaToIndex(pParse->db, pTab->pSchema); - assert( iDb>=0 && iDbdb->nDb ); - destroyRootPage(pParse, iLargest, iDb); - iDestroyed = iLargest; - } - } -#endif -} - -/* -** Remove entries from the sqlite_statN tables (for N in (1,2,3)) -** after a DROP INDEX or DROP TABLE command. -*/ -static void sqlite3ClearStatTables( - Parse *pParse, /* The parsing context */ - int iDb, /* The database number */ - const char *zType, /* "idx" or "tbl" */ - const char *zName /* Name of index or table */ -){ - int i; - const char *zDbName = pParse->db->aDb[iDb].zName; - for(i=1; i<=4; i++){ - char zTab[24]; - sqlite3_snprintf(sizeof(zTab),zTab,"sqlite_stat%d",i); - if( sqlite3FindTable(pParse->db, zTab, zDbName) ){ - sqlite3NestedParse(pParse, - "DELETE FROM %Q.%s WHERE %s=%Q", - zDbName, zTab, zType, zName - ); - } - } -} - -/* -** Generate code to drop a table. -*/ -SQLITE_PRIVATE void sqlite3CodeDropTable(Parse *pParse, Table *pTab, int iDb, int isView){ - Vdbe *v; - sqlite3 *db = pParse->db; - Trigger *pTrigger; - Db *pDb = &db->aDb[iDb]; - - v = sqlite3GetVdbe(pParse); - assert( v!=0 ); - sqlite3BeginWriteOperation(pParse, 1, iDb); - -#ifndef SQLITE_OMIT_VIRTUALTABLE - if( IsVirtual(pTab) ){ - sqlite3VdbeAddOp0(v, OP_VBegin); - } -#endif - - /* Drop all triggers associated with the table being dropped. Code - ** is generated to remove entries from sqlite_master and/or - ** sqlite_temp_master if required. - */ - pTrigger = sqlite3TriggerList(pParse, pTab); - while( pTrigger ){ - assert( pTrigger->pSchema==pTab->pSchema || - pTrigger->pSchema==db->aDb[1].pSchema ); - sqlite3DropTriggerPtr(pParse, pTrigger); - pTrigger = pTrigger->pNext; - } - -#ifndef SQLITE_OMIT_AUTOINCREMENT - /* Remove any entries of the sqlite_sequence table associated with - ** the table being dropped. This is done before the table is dropped - ** at the btree level, in case the sqlite_sequence table needs to - ** move as a result of the drop (can happen in auto-vacuum mode). - */ - if( pTab->tabFlags & TF_Autoincrement ){ - sqlite3NestedParse(pParse, - "DELETE FROM %Q.sqlite_sequence WHERE name=%Q", - pDb->zName, pTab->zName - ); - } -#endif - - /* Drop all SQLITE_MASTER table and index entries that refer to the - ** table. The program name loops through the master table and deletes - ** every row that refers to a table of the same name as the one being - ** dropped. Triggers are handled separately because a trigger can be - ** created in the temp database that refers to a table in another - ** database. - */ - sqlite3NestedParse(pParse, - "DELETE FROM %Q.%s WHERE tbl_name=%Q and type!='trigger'", - pDb->zName, SCHEMA_TABLE(iDb), pTab->zName); - if( !isView && !IsVirtual(pTab) ){ - destroyTable(pParse, pTab); - } - - /* Remove the table entry from SQLite's internal schema and modify - ** the schema cookie. - */ - if( IsVirtual(pTab) ){ - sqlite3VdbeAddOp4(v, OP_VDestroy, iDb, 0, 0, pTab->zName, 0); - } - sqlite3VdbeAddOp4(v, OP_DropTable, iDb, 0, 0, pTab->zName, 0); - sqlite3ChangeCookie(pParse, iDb); - sqliteViewResetAll(db, iDb); -} - -/* -** This routine is called to do the work of a DROP TABLE statement. -** pName is the name of the table to be dropped. -*/ -SQLITE_PRIVATE void sqlite3DropTable(Parse *pParse, SrcList *pName, int isView, int noErr){ - Table *pTab; - Vdbe *v; - sqlite3 *db = pParse->db; - int iDb; - - if( db->mallocFailed ){ - goto exit_drop_table; - } - assert( pParse->nErr==0 ); - assert( pName->nSrc==1 ); - if( noErr ) db->suppressErr++; - pTab = sqlite3LocateTableItem(pParse, isView, &pName->a[0]); - if( noErr ) db->suppressErr--; - - if( pTab==0 ){ - if( noErr ) sqlite3CodeVerifyNamedSchema(pParse, pName->a[0].zDatabase); - goto exit_drop_table; - } - iDb = sqlite3SchemaToIndex(db, pTab->pSchema); - assert( iDb>=0 && iDbnDb ); - - /* If pTab is a virtual table, call ViewGetColumnNames() to ensure - ** it is initialized. - */ - if( IsVirtual(pTab) && sqlite3ViewGetColumnNames(pParse, pTab) ){ - goto exit_drop_table; - } -#ifndef SQLITE_OMIT_AUTHORIZATION - { - int code; - const char *zTab = SCHEMA_TABLE(iDb); - const char *zDb = db->aDb[iDb].zName; - const char *zArg2 = 0; - if( sqlite3AuthCheck(pParse, SQLITE_DELETE, zTab, 0, zDb)){ - goto exit_drop_table; - } - if( isView ){ - if( !OMIT_TEMPDB && iDb==1 ){ - code = SQLITE_DROP_TEMP_VIEW; - }else{ - code = SQLITE_DROP_VIEW; - } -#ifndef SQLITE_OMIT_VIRTUALTABLE - }else if( IsVirtual(pTab) ){ - code = SQLITE_DROP_VTABLE; - zArg2 = sqlite3GetVTable(db, pTab)->pMod->zName; -#endif - }else{ - if( !OMIT_TEMPDB && iDb==1 ){ - code = SQLITE_DROP_TEMP_TABLE; - }else{ - code = SQLITE_DROP_TABLE; - } - } - if( sqlite3AuthCheck(pParse, code, pTab->zName, zArg2, zDb) ){ - goto exit_drop_table; - } - if( sqlite3AuthCheck(pParse, SQLITE_DELETE, pTab->zName, 0, zDb) ){ - goto exit_drop_table; - } - } -#endif - if( sqlite3StrNICmp(pTab->zName, "sqlite_", 7)==0 - && sqlite3StrNICmp(pTab->zName, "sqlite_stat", 11)!=0 ){ - sqlite3ErrorMsg(pParse, "table %s may not be dropped", pTab->zName); - goto exit_drop_table; - } - -#ifndef SQLITE_OMIT_VIEW - /* Ensure DROP TABLE is not used on a view, and DROP VIEW is not used - ** on a table. - */ - if( isView && pTab->pSelect==0 ){ - sqlite3ErrorMsg(pParse, "use DROP TABLE to delete table %s", pTab->zName); - goto exit_drop_table; - } - if( !isView && pTab->pSelect ){ - sqlite3ErrorMsg(pParse, "use DROP VIEW to delete view %s", pTab->zName); - goto exit_drop_table; - } -#endif - - /* Generate code to remove the table from the master table - ** on disk. - */ - v = sqlite3GetVdbe(pParse); - if( v ){ - sqlite3BeginWriteOperation(pParse, 1, iDb); - sqlite3ClearStatTables(pParse, iDb, "tbl", pTab->zName); - sqlite3FkDropTable(pParse, pName, pTab); - sqlite3CodeDropTable(pParse, pTab, iDb, isView); - } - -exit_drop_table: - sqlite3SrcListDelete(db, pName); -} - -/* -** This routine is called to create a new foreign key on the table -** currently under construction. pFromCol determines which columns -** in the current table point to the foreign key. If pFromCol==0 then -** connect the key to the last column inserted. pTo is the name of -** the table referred to (a.k.a the "parent" table). pToCol is a list -** of tables in the parent pTo table. flags contains all -** information about the conflict resolution algorithms specified -** in the ON DELETE, ON UPDATE and ON INSERT clauses. -** -** An FKey structure is created and added to the table currently -** under construction in the pParse->pNewTable field. -** -** The foreign key is set for IMMEDIATE processing. A subsequent call -** to sqlite3DeferForeignKey() might change this to DEFERRED. -*/ -SQLITE_PRIVATE void sqlite3CreateForeignKey( - Parse *pParse, /* Parsing context */ - ExprList *pFromCol, /* Columns in this table that point to other table */ - Token *pTo, /* Name of the other table */ - ExprList *pToCol, /* Columns in the other table */ - int flags /* Conflict resolution algorithms. */ -){ - sqlite3 *db = pParse->db; -#ifndef SQLITE_OMIT_FOREIGN_KEY - FKey *pFKey = 0; - FKey *pNextTo; - Table *p = pParse->pNewTable; - int nByte; - int i; - int nCol; - char *z; - - assert( pTo!=0 ); - if( p==0 || IN_DECLARE_VTAB ) goto fk_end; - if( pFromCol==0 ){ - int iCol = p->nCol-1; - if( NEVER(iCol<0) ) goto fk_end; - if( pToCol && pToCol->nExpr!=1 ){ - sqlite3ErrorMsg(pParse, "foreign key on %s" - " should reference only one column of table %T", - p->aCol[iCol].zName, pTo); - goto fk_end; - } - nCol = 1; - }else if( pToCol && pToCol->nExpr!=pFromCol->nExpr ){ - sqlite3ErrorMsg(pParse, - "number of columns in foreign key does not match the number of " - "columns in the referenced table"); - goto fk_end; - }else{ - nCol = pFromCol->nExpr; - } - nByte = sizeof(*pFKey) + (nCol-1)*sizeof(pFKey->aCol[0]) + pTo->n + 1; - if( pToCol ){ - for(i=0; inExpr; i++){ - nByte += sqlite3Strlen30(pToCol->a[i].zName) + 1; - } - } - pFKey = sqlite3DbMallocZero(db, nByte ); - if( pFKey==0 ){ - goto fk_end; - } - pFKey->pFrom = p; - pFKey->pNextFrom = p->pFKey; - z = (char*)&pFKey->aCol[nCol]; - pFKey->zTo = z; - memcpy(z, pTo->z, pTo->n); - z[pTo->n] = 0; - sqlite3Dequote(z); - z += pTo->n+1; - pFKey->nCol = nCol; - if( pFromCol==0 ){ - pFKey->aCol[0].iFrom = p->nCol-1; - }else{ - for(i=0; inCol; j++){ - if( sqlite3StrICmp(p->aCol[j].zName, pFromCol->a[i].zName)==0 ){ - pFKey->aCol[i].iFrom = j; - break; - } - } - if( j>=p->nCol ){ - sqlite3ErrorMsg(pParse, - "unknown column \"%s\" in foreign key definition", - pFromCol->a[i].zName); - goto fk_end; - } - } - } - if( pToCol ){ - for(i=0; ia[i].zName); - pFKey->aCol[i].zCol = z; - memcpy(z, pToCol->a[i].zName, n); - z[n] = 0; - z += n+1; - } - } - pFKey->isDeferred = 0; - pFKey->aAction[0] = (u8)(flags & 0xff); /* ON DELETE action */ - pFKey->aAction[1] = (u8)((flags >> 8 ) & 0xff); /* ON UPDATE action */ - - assert( sqlite3SchemaMutexHeld(db, 0, p->pSchema) ); - pNextTo = (FKey *)sqlite3HashInsert(&p->pSchema->fkeyHash, - pFKey->zTo, sqlite3Strlen30(pFKey->zTo), (void *)pFKey - ); - if( pNextTo==pFKey ){ - db->mallocFailed = 1; - goto fk_end; - } - if( pNextTo ){ - assert( pNextTo->pPrevTo==0 ); - pFKey->pNextTo = pNextTo; - pNextTo->pPrevTo = pFKey; - } - - /* Link the foreign key to the table as the last step. - */ - p->pFKey = pFKey; - pFKey = 0; - -fk_end: - sqlite3DbFree(db, pFKey); -#endif /* !defined(SQLITE_OMIT_FOREIGN_KEY) */ - sqlite3ExprListDelete(db, pFromCol); - sqlite3ExprListDelete(db, pToCol); -} - -/* -** This routine is called when an INITIALLY IMMEDIATE or INITIALLY DEFERRED -** clause is seen as part of a foreign key definition. The isDeferred -** parameter is 1 for INITIALLY DEFERRED and 0 for INITIALLY IMMEDIATE. -** The behavior of the most recently created foreign key is adjusted -** accordingly. -*/ -SQLITE_PRIVATE void sqlite3DeferForeignKey(Parse *pParse, int isDeferred){ -#ifndef SQLITE_OMIT_FOREIGN_KEY - Table *pTab; - FKey *pFKey; - if( (pTab = pParse->pNewTable)==0 || (pFKey = pTab->pFKey)==0 ) return; - assert( isDeferred==0 || isDeferred==1 ); /* EV: R-30323-21917 */ - pFKey->isDeferred = (u8)isDeferred; -#endif -} - -/* -** Generate code that will erase and refill index *pIdx. This is -** used to initialize a newly created index or to recompute the -** content of an index in response to a REINDEX command. -** -** if memRootPage is not negative, it means that the index is newly -** created. The register specified by memRootPage contains the -** root page number of the index. If memRootPage is negative, then -** the index already exists and must be cleared before being refilled and -** the root page number of the index is taken from pIndex->tnum. -*/ -static void sqlite3RefillIndex(Parse *pParse, Index *pIndex, int memRootPage){ - Table *pTab = pIndex->pTable; /* The table that is indexed */ - int iTab = pParse->nTab++; /* Btree cursor used for pTab */ - int iIdx = pParse->nTab++; /* Btree cursor used for pIndex */ - int iSorter; /* Cursor opened by OpenSorter (if in use) */ - int addr1; /* Address of top of loop */ - int addr2; /* Address to jump to for next iteration */ - int tnum; /* Root page of index */ - int iPartIdxLabel; /* Jump to this label to skip a row */ - Vdbe *v; /* Generate code into this virtual machine */ - KeyInfo *pKey; /* KeyInfo for index */ - int regRecord; /* Register holding assemblied index record */ - sqlite3 *db = pParse->db; /* The database connection */ - int iDb = sqlite3SchemaToIndex(db, pIndex->pSchema); - -#ifndef SQLITE_OMIT_AUTHORIZATION - if( sqlite3AuthCheck(pParse, SQLITE_REINDEX, pIndex->zName, 0, - db->aDb[iDb].zName ) ){ - return; - } -#endif - - /* Require a write-lock on the table to perform this operation */ - sqlite3TableLock(pParse, iDb, pTab->tnum, 1, pTab->zName); - - v = sqlite3GetVdbe(pParse); - if( v==0 ) return; - if( memRootPage>=0 ){ - tnum = memRootPage; - }else{ - tnum = pIndex->tnum; - } - pKey = sqlite3KeyInfoOfIndex(pParse, pIndex); - - /* Open the sorter cursor if we are to use one. */ - iSorter = pParse->nTab++; - sqlite3VdbeAddOp4(v, OP_SorterOpen, iSorter, 0, 0, (char*) - sqlite3KeyInfoRef(pKey), P4_KEYINFO); - - /* Open the table. Loop through all rows of the table, inserting index - ** records into the sorter. */ - sqlite3OpenTable(pParse, iTab, iDb, pTab, OP_OpenRead); - addr1 = sqlite3VdbeAddOp2(v, OP_Rewind, iTab, 0); VdbeCoverage(v); - regRecord = sqlite3GetTempReg(pParse); - - sqlite3GenerateIndexKey(pParse,pIndex,iTab,regRecord,0,&iPartIdxLabel,0,0); - sqlite3VdbeAddOp2(v, OP_SorterInsert, iSorter, regRecord); - sqlite3ResolvePartIdxLabel(pParse, iPartIdxLabel); - sqlite3VdbeAddOp2(v, OP_Next, iTab, addr1+1); VdbeCoverage(v); - sqlite3VdbeJumpHere(v, addr1); - if( memRootPage<0 ) sqlite3VdbeAddOp2(v, OP_Clear, tnum, iDb); - sqlite3VdbeAddOp4(v, OP_OpenWrite, iIdx, tnum, iDb, - (char *)pKey, P4_KEYINFO); - sqlite3VdbeChangeP5(v, OPFLAG_BULKCSR|((memRootPage>=0)?OPFLAG_P2ISREG:0)); - - addr1 = sqlite3VdbeAddOp2(v, OP_SorterSort, iSorter, 0); VdbeCoverage(v); - assert( pKey!=0 || db->mallocFailed || pParse->nErr ); - if( pIndex->onError!=OE_None && pKey!=0 ){ - int j2 = sqlite3VdbeCurrentAddr(v) + 3; - sqlite3VdbeAddOp2(v, OP_Goto, 0, j2); - addr2 = sqlite3VdbeCurrentAddr(v); - sqlite3VdbeAddOp4Int(v, OP_SorterCompare, iSorter, j2, regRecord, - pKey->nField - pIndex->nKeyCol); VdbeCoverage(v); - sqlite3UniqueConstraint(pParse, OE_Abort, pIndex); - }else{ - addr2 = sqlite3VdbeCurrentAddr(v); - } - sqlite3VdbeAddOp2(v, OP_SorterData, iSorter, regRecord); - sqlite3VdbeAddOp3(v, OP_IdxInsert, iIdx, regRecord, 1); - sqlite3VdbeChangeP5(v, OPFLAG_USESEEKRESULT); - sqlite3ReleaseTempReg(pParse, regRecord); - sqlite3VdbeAddOp2(v, OP_SorterNext, iSorter, addr2); VdbeCoverage(v); - sqlite3VdbeJumpHere(v, addr1); - - sqlite3VdbeAddOp1(v, OP_Close, iTab); - sqlite3VdbeAddOp1(v, OP_Close, iIdx); - sqlite3VdbeAddOp1(v, OP_Close, iSorter); -} - -/* -** Allocate heap space to hold an Index object with nCol columns. -** -** Increase the allocation size to provide an extra nExtra bytes -** of 8-byte aligned space after the Index object and return a -** pointer to this extra space in *ppExtra. -*/ -SQLITE_PRIVATE Index *sqlite3AllocateIndexObject( - sqlite3 *db, /* Database connection */ - i16 nCol, /* Total number of columns in the index */ - int nExtra, /* Number of bytes of extra space to alloc */ - char **ppExtra /* Pointer to the "extra" space */ -){ - Index *p; /* Allocated index object */ - int nByte; /* Bytes of space for Index object + arrays */ - - nByte = ROUND8(sizeof(Index)) + /* Index structure */ - ROUND8(sizeof(char*)*nCol) + /* Index.azColl */ - ROUND8(sizeof(LogEst)*(nCol+1) + /* Index.aiRowLogEst */ - sizeof(i16)*nCol + /* Index.aiColumn */ - sizeof(u8)*nCol); /* Index.aSortOrder */ - p = sqlite3DbMallocZero(db, nByte + nExtra); - if( p ){ - char *pExtra = ((char*)p)+ROUND8(sizeof(Index)); - p->azColl = (char**)pExtra; pExtra += ROUND8(sizeof(char*)*nCol); - p->aiRowLogEst = (LogEst*)pExtra; pExtra += sizeof(LogEst)*(nCol+1); - p->aiColumn = (i16*)pExtra; pExtra += sizeof(i16)*nCol; - p->aSortOrder = (u8*)pExtra; - p->nColumn = nCol; - p->nKeyCol = nCol - 1; - *ppExtra = ((char*)p) + nByte; - } - return p; -} - -/* -** Create a new index for an SQL table. pName1.pName2 is the name of the index -** and pTblList is the name of the table that is to be indexed. Both will -** be NULL for a primary key or an index that is created to satisfy a -** UNIQUE constraint. If pTable and pIndex are NULL, use pParse->pNewTable -** as the table to be indexed. pParse->pNewTable is a table that is -** currently being constructed by a CREATE TABLE statement. -** -** pList is a list of columns to be indexed. pList will be NULL if this -** is a primary key or unique-constraint on the most recent column added -** to the table currently under construction. -** -** If the index is created successfully, return a pointer to the new Index -** structure. This is used by sqlite3AddPrimaryKey() to mark the index -** as the tables primary key (Index.idxType==SQLITE_IDXTYPE_PRIMARYKEY) -*/ -SQLITE_PRIVATE Index *sqlite3CreateIndex( - Parse *pParse, /* All information about this parse */ - Token *pName1, /* First part of index name. May be NULL */ - Token *pName2, /* Second part of index name. May be NULL */ - SrcList *pTblName, /* Table to index. Use pParse->pNewTable if 0 */ - ExprList *pList, /* A list of columns to be indexed */ - int onError, /* OE_Abort, OE_Ignore, OE_Replace, or OE_None */ - Token *pStart, /* The CREATE token that begins this statement */ - Expr *pPIWhere, /* WHERE clause for partial indices */ - int sortOrder, /* Sort order of primary key when pList==NULL */ - int ifNotExist /* Omit error if index already exists */ -){ - Index *pRet = 0; /* Pointer to return */ - Table *pTab = 0; /* Table to be indexed */ - Index *pIndex = 0; /* The index to be created */ - char *zName = 0; /* Name of the index */ - int nName; /* Number of characters in zName */ - int i, j; - DbFixer sFix; /* For assigning database names to pTable */ - int sortOrderMask; /* 1 to honor DESC in index. 0 to ignore. */ - sqlite3 *db = pParse->db; - Db *pDb; /* The specific table containing the indexed database */ - int iDb; /* Index of the database that is being written */ - Token *pName = 0; /* Unqualified name of the index to create */ - struct ExprList_item *pListItem; /* For looping over pList */ - const Column *pTabCol; /* A column in the table */ - int nExtra = 0; /* Space allocated for zExtra[] */ - int nExtraCol; /* Number of extra columns needed */ - char *zExtra = 0; /* Extra space after the Index object */ - Index *pPk = 0; /* PRIMARY KEY index for WITHOUT ROWID tables */ - - assert( pParse->nErr==0 ); /* Never called with prior errors */ - if( db->mallocFailed || IN_DECLARE_VTAB ){ - goto exit_create_index; - } - if( SQLITE_OK!=sqlite3ReadSchema(pParse) ){ - goto exit_create_index; - } - - /* - ** Find the table that is to be indexed. Return early if not found. - */ - if( pTblName!=0 ){ - - /* Use the two-part index name to determine the database - ** to search for the table. 'Fix' the table name to this db - ** before looking up the table. - */ - assert( pName1 && pName2 ); - iDb = sqlite3TwoPartName(pParse, pName1, pName2, &pName); - if( iDb<0 ) goto exit_create_index; - assert( pName && pName->z ); - -#ifndef SQLITE_OMIT_TEMPDB - /* If the index name was unqualified, check if the table - ** is a temp table. If so, set the database to 1. Do not do this - ** if initialising a database schema. - */ - if( !db->init.busy ){ - pTab = sqlite3SrcListLookup(pParse, pTblName); - if( pName2->n==0 && pTab && pTab->pSchema==db->aDb[1].pSchema ){ - iDb = 1; - } - } -#endif - - sqlite3FixInit(&sFix, pParse, iDb, "index", pName); - if( sqlite3FixSrcList(&sFix, pTblName) ){ - /* Because the parser constructs pTblName from a single identifier, - ** sqlite3FixSrcList can never fail. */ - assert(0); - } - pTab = sqlite3LocateTableItem(pParse, 0, &pTblName->a[0]); - assert( db->mallocFailed==0 || pTab==0 ); - if( pTab==0 ) goto exit_create_index; - if( iDb==1 && db->aDb[iDb].pSchema!=pTab->pSchema ){ - sqlite3ErrorMsg(pParse, - "cannot create a TEMP index on non-TEMP table \"%s\"", - pTab->zName); - goto exit_create_index; - } - if( !HasRowid(pTab) ) pPk = sqlite3PrimaryKeyIndex(pTab); - }else{ - assert( pName==0 ); - assert( pStart==0 ); - pTab = pParse->pNewTable; - if( !pTab ) goto exit_create_index; - iDb = sqlite3SchemaToIndex(db, pTab->pSchema); - } - pDb = &db->aDb[iDb]; - - assert( pTab!=0 ); - assert( pParse->nErr==0 ); - if( sqlite3StrNICmp(pTab->zName, "sqlite_", 7)==0 - && sqlite3StrNICmp(&pTab->zName[7],"altertab_",9)!=0 ){ - sqlite3ErrorMsg(pParse, "table %s may not be indexed", pTab->zName); - goto exit_create_index; - } -#ifndef SQLITE_OMIT_VIEW - if( pTab->pSelect ){ - sqlite3ErrorMsg(pParse, "views may not be indexed"); - goto exit_create_index; - } -#endif -#ifndef SQLITE_OMIT_VIRTUALTABLE - if( IsVirtual(pTab) ){ - sqlite3ErrorMsg(pParse, "virtual tables may not be indexed"); - goto exit_create_index; - } -#endif - - /* - ** Find the name of the index. Make sure there is not already another - ** index or table with the same name. - ** - ** Exception: If we are reading the names of permanent indices from the - ** sqlite_master table (because some other process changed the schema) and - ** one of the index names collides with the name of a temporary table or - ** index, then we will continue to process this index. - ** - ** If pName==0 it means that we are - ** dealing with a primary key or UNIQUE constraint. We have to invent our - ** own name. - */ - if( pName ){ - zName = sqlite3NameFromToken(db, pName); - if( zName==0 ) goto exit_create_index; - assert( pName->z!=0 ); - if( SQLITE_OK!=sqlite3CheckObjectName(pParse, zName) ){ - goto exit_create_index; - } - if( !db->init.busy ){ - if( sqlite3FindTable(db, zName, 0)!=0 ){ - sqlite3ErrorMsg(pParse, "there is already a table named %s", zName); - goto exit_create_index; - } - } - if( sqlite3FindIndex(db, zName, pDb->zName)!=0 ){ - if( !ifNotExist ){ - sqlite3ErrorMsg(pParse, "index %s already exists", zName); - }else{ - assert( !db->init.busy ); - sqlite3CodeVerifySchema(pParse, iDb); - } - goto exit_create_index; - } - }else{ - int n; - Index *pLoop; - for(pLoop=pTab->pIndex, n=1; pLoop; pLoop=pLoop->pNext, n++){} - zName = sqlite3MPrintf(db, "sqlite_autoindex_%s_%d", pTab->zName, n); - if( zName==0 ){ - goto exit_create_index; - } - } - - /* Check for authorization to create an index. - */ -#ifndef SQLITE_OMIT_AUTHORIZATION - { - const char *zDb = pDb->zName; - if( sqlite3AuthCheck(pParse, SQLITE_INSERT, SCHEMA_TABLE(iDb), 0, zDb) ){ - goto exit_create_index; - } - i = SQLITE_CREATE_INDEX; - if( !OMIT_TEMPDB && iDb==1 ) i = SQLITE_CREATE_TEMP_INDEX; - if( sqlite3AuthCheck(pParse, i, zName, pTab->zName, zDb) ){ - goto exit_create_index; - } - } -#endif - - /* If pList==0, it means this routine was called to make a primary - ** key out of the last column added to the table under construction. - ** So create a fake list to simulate this. - */ - if( pList==0 ){ - pList = sqlite3ExprListAppend(pParse, 0, 0); - if( pList==0 ) goto exit_create_index; - pList->a[0].zName = sqlite3DbStrDup(pParse->db, - pTab->aCol[pTab->nCol-1].zName); - pList->a[0].sortOrder = (u8)sortOrder; - } - - /* Figure out how many bytes of space are required to store explicitly - ** specified collation sequence names. - */ - for(i=0; inExpr; i++){ - Expr *pExpr = pList->a[i].pExpr; - if( pExpr ){ - assert( pExpr->op==TK_COLLATE ); - nExtra += (1 + sqlite3Strlen30(pExpr->u.zToken)); - } - } - - /* - ** Allocate the index structure. - */ - nName = sqlite3Strlen30(zName); - nExtraCol = pPk ? pPk->nKeyCol : 1; - pIndex = sqlite3AllocateIndexObject(db, pList->nExpr + nExtraCol, - nName + nExtra + 1, &zExtra); - if( db->mallocFailed ){ - goto exit_create_index; - } - assert( EIGHT_BYTE_ALIGNMENT(pIndex->aiRowLogEst) ); - assert( EIGHT_BYTE_ALIGNMENT(pIndex->azColl) ); - pIndex->zName = zExtra; - zExtra += nName + 1; - memcpy(pIndex->zName, zName, nName+1); - pIndex->pTable = pTab; - pIndex->onError = (u8)onError; - pIndex->uniqNotNull = onError!=OE_None; - pIndex->idxType = pName ? SQLITE_IDXTYPE_APPDEF : SQLITE_IDXTYPE_UNIQUE; - pIndex->pSchema = db->aDb[iDb].pSchema; - pIndex->nKeyCol = pList->nExpr; - if( pPIWhere ){ - sqlite3ResolveSelfReference(pParse, pTab, NC_PartIdx, pPIWhere, 0); - pIndex->pPartIdxWhere = pPIWhere; - pPIWhere = 0; - } - assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); - - /* Check to see if we should honor DESC requests on index columns - */ - if( pDb->pSchema->file_format>=4 ){ - sortOrderMask = -1; /* Honor DESC */ - }else{ - sortOrderMask = 0; /* Ignore DESC */ - } - - /* Scan the names of the columns of the table to be indexed and - ** load the column indices into the Index structure. Report an error - ** if any column is not found. - ** - ** TODO: Add a test to make sure that the same column is not named - ** more than once within the same index. Only the first instance of - ** the column will ever be used by the optimizer. Note that using the - ** same column more than once cannot be an error because that would - ** break backwards compatibility - it needs to be a warning. - */ - for(i=0, pListItem=pList->a; inExpr; i++, pListItem++){ - const char *zColName = pListItem->zName; - int requestedSortOrder; - char *zColl; /* Collation sequence name */ - - for(j=0, pTabCol=pTab->aCol; jnCol; j++, pTabCol++){ - if( sqlite3StrICmp(zColName, pTabCol->zName)==0 ) break; - } - if( j>=pTab->nCol ){ - sqlite3ErrorMsg(pParse, "table %s has no column named %s", - pTab->zName, zColName); - pParse->checkSchema = 1; - goto exit_create_index; - } - assert( pTab->nCol<=0x7fff && j<=0x7fff ); - pIndex->aiColumn[i] = (i16)j; - if( pListItem->pExpr ){ - int nColl; - assert( pListItem->pExpr->op==TK_COLLATE ); - zColl = pListItem->pExpr->u.zToken; - nColl = sqlite3Strlen30(zColl) + 1; - assert( nExtra>=nColl ); - memcpy(zExtra, zColl, nColl); - zColl = zExtra; - zExtra += nColl; - nExtra -= nColl; - }else{ - zColl = pTab->aCol[j].zColl; - if( !zColl ) zColl = "BINARY"; - } - if( !db->init.busy && !sqlite3LocateCollSeq(pParse, zColl) ){ - goto exit_create_index; - } - pIndex->azColl[i] = zColl; - requestedSortOrder = pListItem->sortOrder & sortOrderMask; - pIndex->aSortOrder[i] = (u8)requestedSortOrder; - if( pTab->aCol[j].notNull==0 ) pIndex->uniqNotNull = 0; - } - if( pPk ){ - for(j=0; jnKeyCol; j++){ - int x = pPk->aiColumn[j]; - if( hasColumn(pIndex->aiColumn, pIndex->nKeyCol, x) ){ - pIndex->nColumn--; - }else{ - pIndex->aiColumn[i] = x; - pIndex->azColl[i] = pPk->azColl[j]; - pIndex->aSortOrder[i] = pPk->aSortOrder[j]; - i++; - } - } - assert( i==pIndex->nColumn ); - }else{ - pIndex->aiColumn[i] = -1; - pIndex->azColl[i] = "BINARY"; - } - sqlite3DefaultRowEst(pIndex); - if( pParse->pNewTable==0 ) estimateIndexWidth(pIndex); - - if( pTab==pParse->pNewTable ){ - /* This routine has been called to create an automatic index as a - ** result of a PRIMARY KEY or UNIQUE clause on a column definition, or - ** a PRIMARY KEY or UNIQUE clause following the column definitions. - ** i.e. one of: - ** - ** CREATE TABLE t(x PRIMARY KEY, y); - ** CREATE TABLE t(x, y, UNIQUE(x, y)); - ** - ** Either way, check to see if the table already has such an index. If - ** so, don't bother creating this one. This only applies to - ** automatically created indices. Users can do as they wish with - ** explicit indices. - ** - ** Two UNIQUE or PRIMARY KEY constraints are considered equivalent - ** (and thus suppressing the second one) even if they have different - ** sort orders. - ** - ** If there are different collating sequences or if the columns of - ** the constraint occur in different orders, then the constraints are - ** considered distinct and both result in separate indices. - */ - Index *pIdx; - for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ - int k; - assert( pIdx->onError!=OE_None ); - assert( pIdx->idxType!=SQLITE_IDXTYPE_APPDEF ); - assert( pIndex->onError!=OE_None ); - - if( pIdx->nKeyCol!=pIndex->nKeyCol ) continue; - for(k=0; knKeyCol; k++){ - const char *z1; - const char *z2; - if( pIdx->aiColumn[k]!=pIndex->aiColumn[k] ) break; - z1 = pIdx->azColl[k]; - z2 = pIndex->azColl[k]; - if( z1!=z2 && sqlite3StrICmp(z1, z2) ) break; - } - if( k==pIdx->nKeyCol ){ - if( pIdx->onError!=pIndex->onError ){ - /* This constraint creates the same index as a previous - ** constraint specified somewhere in the CREATE TABLE statement. - ** However the ON CONFLICT clauses are different. If both this - ** constraint and the previous equivalent constraint have explicit - ** ON CONFLICT clauses this is an error. Otherwise, use the - ** explicitly specified behavior for the index. - */ - if( !(pIdx->onError==OE_Default || pIndex->onError==OE_Default) ){ - sqlite3ErrorMsg(pParse, - "conflicting ON CONFLICT clauses specified", 0); - } - if( pIdx->onError==OE_Default ){ - pIdx->onError = pIndex->onError; - } - } - goto exit_create_index; - } - } - } - - /* Link the new Index structure to its table and to the other - ** in-memory database structures. - */ - if( db->init.busy ){ - Index *p; - assert( sqlite3SchemaMutexHeld(db, 0, pIndex->pSchema) ); - p = sqlite3HashInsert(&pIndex->pSchema->idxHash, - pIndex->zName, sqlite3Strlen30(pIndex->zName), - pIndex); - if( p ){ - assert( p==pIndex ); /* Malloc must have failed */ - db->mallocFailed = 1; - goto exit_create_index; - } - db->flags |= SQLITE_InternChanges; - if( pTblName!=0 ){ - pIndex->tnum = db->init.newTnum; - } - } - - /* If this is the initial CREATE INDEX statement (or CREATE TABLE if the - ** index is an implied index for a UNIQUE or PRIMARY KEY constraint) then - ** emit code to allocate the index rootpage on disk and make an entry for - ** the index in the sqlite_master table and populate the index with - ** content. But, do not do this if we are simply reading the sqlite_master - ** table to parse the schema, or if this index is the PRIMARY KEY index - ** of a WITHOUT ROWID table. - ** - ** If pTblName==0 it means this index is generated as an implied PRIMARY KEY - ** or UNIQUE index in a CREATE TABLE statement. Since the table - ** has just been created, it contains no data and the index initialization - ** step can be skipped. - */ - else if( pParse->nErr==0 && (HasRowid(pTab) || pTblName!=0) ){ - Vdbe *v; - char *zStmt; - int iMem = ++pParse->nMem; - - v = sqlite3GetVdbe(pParse); - if( v==0 ) goto exit_create_index; - - - /* Create the rootpage for the index - */ - sqlite3BeginWriteOperation(pParse, 1, iDb); - sqlite3VdbeAddOp2(v, OP_CreateIndex, iDb, iMem); - - /* Gather the complete text of the CREATE INDEX statement into - ** the zStmt variable - */ - if( pStart ){ - int n = (int)(pParse->sLastToken.z - pName->z) + pParse->sLastToken.n; - if( pName->z[n-1]==';' ) n--; - /* A named index with an explicit CREATE INDEX statement */ - zStmt = sqlite3MPrintf(db, "CREATE%s INDEX %.*s", - onError==OE_None ? "" : " UNIQUE", n, pName->z); - }else{ - /* An automatic index created by a PRIMARY KEY or UNIQUE constraint */ - /* zStmt = sqlite3MPrintf(""); */ - zStmt = 0; - } - - /* Add an entry in sqlite_master for this index - */ - sqlite3NestedParse(pParse, - "INSERT INTO %Q.%s VALUES('index',%Q,%Q,#%d,%Q);", - db->aDb[iDb].zName, SCHEMA_TABLE(iDb), - pIndex->zName, - pTab->zName, - iMem, - zStmt - ); - sqlite3DbFree(db, zStmt); - - /* Fill the index with data and reparse the schema. Code an OP_Expire - ** to invalidate all pre-compiled statements. - */ - if( pTblName ){ - sqlite3RefillIndex(pParse, pIndex, iMem); - sqlite3ChangeCookie(pParse, iDb); - sqlite3VdbeAddParseSchemaOp(v, iDb, - sqlite3MPrintf(db, "name='%q' AND type='index'", pIndex->zName)); - sqlite3VdbeAddOp1(v, OP_Expire, 0); - } - } - - /* When adding an index to the list of indices for a table, make - ** sure all indices labeled OE_Replace come after all those labeled - ** OE_Ignore. This is necessary for the correct constraint check - ** processing (in sqlite3GenerateConstraintChecks()) as part of - ** UPDATE and INSERT statements. - */ - if( db->init.busy || pTblName==0 ){ - if( onError!=OE_Replace || pTab->pIndex==0 - || pTab->pIndex->onError==OE_Replace){ - pIndex->pNext = pTab->pIndex; - pTab->pIndex = pIndex; - }else{ - Index *pOther = pTab->pIndex; - while( pOther->pNext && pOther->pNext->onError!=OE_Replace ){ - pOther = pOther->pNext; - } - pIndex->pNext = pOther->pNext; - pOther->pNext = pIndex; - } - pRet = pIndex; - pIndex = 0; - } - - /* Clean up before exiting */ -exit_create_index: - if( pIndex ) freeIndex(db, pIndex); - sqlite3ExprDelete(db, pPIWhere); - sqlite3ExprListDelete(db, pList); - sqlite3SrcListDelete(db, pTblName); - sqlite3DbFree(db, zName); - return pRet; -} - -/* -** Fill the Index.aiRowEst[] array with default information - information -** to be used when we have not run the ANALYZE command. -** -** aiRowEst[0] is suppose to contain the number of elements in the index. -** Since we do not know, guess 1 million. aiRowEst[1] is an estimate of the -** number of rows in the table that match any particular value of the -** first column of the index. aiRowEst[2] is an estimate of the number -** of rows that match any particular combination of the first 2 columns -** of the index. And so forth. It must always be the case that -* -** aiRowEst[N]<=aiRowEst[N-1] -** aiRowEst[N]>=1 -** -** Apart from that, we have little to go on besides intuition as to -** how aiRowEst[] should be initialized. The numbers generated here -** are based on typical values found in actual indices. -*/ -SQLITE_PRIVATE void sqlite3DefaultRowEst(Index *pIdx){ - /* 10, 9, 8, 7, 6 */ - LogEst aVal[] = { 33, 32, 30, 28, 26 }; - LogEst *a = pIdx->aiRowLogEst; - int nCopy = MIN(ArraySize(aVal), pIdx->nKeyCol); - int i; - - /* Set the first entry (number of rows in the index) to the estimated - ** number of rows in the table. Or 10, if the estimated number of rows - ** in the table is less than that. */ - a[0] = pIdx->pTable->nRowLogEst; - if( a[0]<33 ) a[0] = 33; assert( 33==sqlite3LogEst(10) ); - - /* Estimate that a[1] is 10, a[2] is 9, a[3] is 8, a[4] is 7, a[5] is - ** 6 and each subsequent value (if any) is 5. */ - memcpy(&a[1], aVal, nCopy*sizeof(LogEst)); - for(i=nCopy+1; i<=pIdx->nKeyCol; i++){ - a[i] = 23; assert( 23==sqlite3LogEst(5) ); - } - - assert( 0==sqlite3LogEst(1) ); - if( pIdx->onError!=OE_None ) a[pIdx->nKeyCol] = 0; -} - -/* -** This routine will drop an existing named index. This routine -** implements the DROP INDEX statement. -*/ -SQLITE_PRIVATE void sqlite3DropIndex(Parse *pParse, SrcList *pName, int ifExists){ - Index *pIndex; - Vdbe *v; - sqlite3 *db = pParse->db; - int iDb; - - assert( pParse->nErr==0 ); /* Never called with prior errors */ - if( db->mallocFailed ){ - goto exit_drop_index; - } - assert( pName->nSrc==1 ); - if( SQLITE_OK!=sqlite3ReadSchema(pParse) ){ - goto exit_drop_index; - } - pIndex = sqlite3FindIndex(db, pName->a[0].zName, pName->a[0].zDatabase); - if( pIndex==0 ){ - if( !ifExists ){ - sqlite3ErrorMsg(pParse, "no such index: %S", pName, 0); - }else{ - sqlite3CodeVerifyNamedSchema(pParse, pName->a[0].zDatabase); - } - pParse->checkSchema = 1; - goto exit_drop_index; - } - if( pIndex->idxType!=SQLITE_IDXTYPE_APPDEF ){ - sqlite3ErrorMsg(pParse, "index associated with UNIQUE " - "or PRIMARY KEY constraint cannot be dropped", 0); - goto exit_drop_index; - } - iDb = sqlite3SchemaToIndex(db, pIndex->pSchema); -#ifndef SQLITE_OMIT_AUTHORIZATION - { - int code = SQLITE_DROP_INDEX; - Table *pTab = pIndex->pTable; - const char *zDb = db->aDb[iDb].zName; - const char *zTab = SCHEMA_TABLE(iDb); - if( sqlite3AuthCheck(pParse, SQLITE_DELETE, zTab, 0, zDb) ){ - goto exit_drop_index; - } - if( !OMIT_TEMPDB && iDb ) code = SQLITE_DROP_TEMP_INDEX; - if( sqlite3AuthCheck(pParse, code, pIndex->zName, pTab->zName, zDb) ){ - goto exit_drop_index; - } - } -#endif - - /* Generate code to remove the index and from the master table */ - v = sqlite3GetVdbe(pParse); - if( v ){ - sqlite3BeginWriteOperation(pParse, 1, iDb); - sqlite3NestedParse(pParse, - "DELETE FROM %Q.%s WHERE name=%Q AND type='index'", - db->aDb[iDb].zName, SCHEMA_TABLE(iDb), pIndex->zName - ); - sqlite3ClearStatTables(pParse, iDb, "idx", pIndex->zName); - sqlite3ChangeCookie(pParse, iDb); - destroyRootPage(pParse, pIndex->tnum, iDb); - sqlite3VdbeAddOp4(v, OP_DropIndex, iDb, 0, 0, pIndex->zName, 0); - } - -exit_drop_index: - sqlite3SrcListDelete(db, pName); -} - -/* -** pArray is a pointer to an array of objects. Each object in the -** array is szEntry bytes in size. This routine uses sqlite3DbRealloc() -** to extend the array so that there is space for a new object at the end. -** -** When this function is called, *pnEntry contains the current size of -** the array (in entries - so the allocation is ((*pnEntry) * szEntry) bytes -** in total). -** -** If the realloc() is successful (i.e. if no OOM condition occurs), the -** space allocated for the new object is zeroed, *pnEntry updated to -** reflect the new size of the array and a pointer to the new allocation -** returned. *pIdx is set to the index of the new array entry in this case. -** -** Otherwise, if the realloc() fails, *pIdx is set to -1, *pnEntry remains -** unchanged and a copy of pArray returned. -*/ -SQLITE_PRIVATE void *sqlite3ArrayAllocate( - sqlite3 *db, /* Connection to notify of malloc failures */ - void *pArray, /* Array of objects. Might be reallocated */ - int szEntry, /* Size of each object in the array */ - int *pnEntry, /* Number of objects currently in use */ - int *pIdx /* Write the index of a new slot here */ -){ - char *z; - int n = *pnEntry; - if( (n & (n-1))==0 ){ - int sz = (n==0) ? 1 : 2*n; - void *pNew = sqlite3DbRealloc(db, pArray, sz*szEntry); - if( pNew==0 ){ - *pIdx = -1; - return pArray; - } - pArray = pNew; - } - z = (char*)pArray; - memset(&z[n * szEntry], 0, szEntry); - *pIdx = n; - ++*pnEntry; - return pArray; -} - -/* -** Append a new element to the given IdList. Create a new IdList if -** need be. -** -** A new IdList is returned, or NULL if malloc() fails. -*/ -SQLITE_PRIVATE IdList *sqlite3IdListAppend(sqlite3 *db, IdList *pList, Token *pToken){ - int i; - if( pList==0 ){ - pList = sqlite3DbMallocZero(db, sizeof(IdList) ); - if( pList==0 ) return 0; - } - pList->a = sqlite3ArrayAllocate( - db, - pList->a, - sizeof(pList->a[0]), - &pList->nId, - &i - ); - if( i<0 ){ - sqlite3IdListDelete(db, pList); - return 0; - } - pList->a[i].zName = sqlite3NameFromToken(db, pToken); - return pList; -} - -/* -** Delete an IdList. -*/ -SQLITE_PRIVATE void sqlite3IdListDelete(sqlite3 *db, IdList *pList){ - int i; - if( pList==0 ) return; - for(i=0; inId; i++){ - sqlite3DbFree(db, pList->a[i].zName); - } - sqlite3DbFree(db, pList->a); - sqlite3DbFree(db, pList); -} - -/* -** Return the index in pList of the identifier named zId. Return -1 -** if not found. -*/ -SQLITE_PRIVATE int sqlite3IdListIndex(IdList *pList, const char *zName){ - int i; - if( pList==0 ) return -1; - for(i=0; inId; i++){ - if( sqlite3StrICmp(pList->a[i].zName, zName)==0 ) return i; - } - return -1; -} - -/* -** Expand the space allocated for the given SrcList object by -** creating nExtra new slots beginning at iStart. iStart is zero based. -** New slots are zeroed. -** -** For example, suppose a SrcList initially contains two entries: A,B. -** To append 3 new entries onto the end, do this: -** -** sqlite3SrcListEnlarge(db, pSrclist, 3, 2); -** -** After the call above it would contain: A, B, nil, nil, nil. -** If the iStart argument had been 1 instead of 2, then the result -** would have been: A, nil, nil, nil, B. To prepend the new slots, -** the iStart value would be 0. The result then would -** be: nil, nil, nil, A, B. -** -** If a memory allocation fails the SrcList is unchanged. The -** db->mallocFailed flag will be set to true. -*/ -SQLITE_PRIVATE SrcList *sqlite3SrcListEnlarge( - sqlite3 *db, /* Database connection to notify of OOM errors */ - SrcList *pSrc, /* The SrcList to be enlarged */ - int nExtra, /* Number of new slots to add to pSrc->a[] */ - int iStart /* Index in pSrc->a[] of first new slot */ -){ - int i; - - /* Sanity checking on calling parameters */ - assert( iStart>=0 ); - assert( nExtra>=1 ); - assert( pSrc!=0 ); - assert( iStart<=pSrc->nSrc ); - - /* Allocate additional space if needed */ - if( (u32)pSrc->nSrc+nExtra>pSrc->nAlloc ){ - SrcList *pNew; - int nAlloc = pSrc->nSrc+nExtra; - int nGot; - pNew = sqlite3DbRealloc(db, pSrc, - sizeof(*pSrc) + (nAlloc-1)*sizeof(pSrc->a[0]) ); - if( pNew==0 ){ - assert( db->mallocFailed ); - return pSrc; - } - pSrc = pNew; - nGot = (sqlite3DbMallocSize(db, pNew) - sizeof(*pSrc))/sizeof(pSrc->a[0])+1; - pSrc->nAlloc = nGot; - } - - /* Move existing slots that come after the newly inserted slots - ** out of the way */ - for(i=pSrc->nSrc-1; i>=iStart; i--){ - pSrc->a[i+nExtra] = pSrc->a[i]; - } - pSrc->nSrc += nExtra; - - /* Zero the newly allocated slots */ - memset(&pSrc->a[iStart], 0, sizeof(pSrc->a[0])*nExtra); - for(i=iStart; ia[i].iCursor = -1; - } - - /* Return a pointer to the enlarged SrcList */ - return pSrc; -} - - -/* -** Append a new table name to the given SrcList. Create a new SrcList if -** need be. A new entry is created in the SrcList even if pTable is NULL. -** -** A SrcList is returned, or NULL if there is an OOM error. The returned -** SrcList might be the same as the SrcList that was input or it might be -** a new one. If an OOM error does occurs, then the prior value of pList -** that is input to this routine is automatically freed. -** -** If pDatabase is not null, it means that the table has an optional -** database name prefix. Like this: "database.table". The pDatabase -** points to the table name and the pTable points to the database name. -** The SrcList.a[].zName field is filled with the table name which might -** come from pTable (if pDatabase is NULL) or from pDatabase. -** SrcList.a[].zDatabase is filled with the database name from pTable, -** or with NULL if no database is specified. -** -** In other words, if call like this: -** -** sqlite3SrcListAppend(D,A,B,0); -** -** Then B is a table name and the database name is unspecified. If called -** like this: -** -** sqlite3SrcListAppend(D,A,B,C); -** -** Then C is the table name and B is the database name. If C is defined -** then so is B. In other words, we never have a case where: -** -** sqlite3SrcListAppend(D,A,0,C); -** -** Both pTable and pDatabase are assumed to be quoted. They are dequoted -** before being added to the SrcList. -*/ -SQLITE_PRIVATE SrcList *sqlite3SrcListAppend( - sqlite3 *db, /* Connection to notify of malloc failures */ - SrcList *pList, /* Append to this SrcList. NULL creates a new SrcList */ - Token *pTable, /* Table to append */ - Token *pDatabase /* Database of the table */ -){ - struct SrcList_item *pItem; - assert( pDatabase==0 || pTable!=0 ); /* Cannot have C without B */ - if( pList==0 ){ - pList = sqlite3DbMallocZero(db, sizeof(SrcList) ); - if( pList==0 ) return 0; - pList->nAlloc = 1; - } - pList = sqlite3SrcListEnlarge(db, pList, 1, pList->nSrc); - if( db->mallocFailed ){ - sqlite3SrcListDelete(db, pList); - return 0; - } - pItem = &pList->a[pList->nSrc-1]; - if( pDatabase && pDatabase->z==0 ){ - pDatabase = 0; - } - if( pDatabase ){ - Token *pTemp = pDatabase; - pDatabase = pTable; - pTable = pTemp; - } - pItem->zName = sqlite3NameFromToken(db, pTable); - pItem->zDatabase = sqlite3NameFromToken(db, pDatabase); - return pList; -} - -/* -** Assign VdbeCursor index numbers to all tables in a SrcList -*/ -SQLITE_PRIVATE void sqlite3SrcListAssignCursors(Parse *pParse, SrcList *pList){ - int i; - struct SrcList_item *pItem; - assert(pList || pParse->db->mallocFailed ); - if( pList ){ - for(i=0, pItem=pList->a; inSrc; i++, pItem++){ - if( pItem->iCursor>=0 ) break; - pItem->iCursor = pParse->nTab++; - if( pItem->pSelect ){ - sqlite3SrcListAssignCursors(pParse, pItem->pSelect->pSrc); - } - } - } -} - -/* -** Delete an entire SrcList including all its substructure. -*/ -SQLITE_PRIVATE void sqlite3SrcListDelete(sqlite3 *db, SrcList *pList){ - int i; - struct SrcList_item *pItem; - if( pList==0 ) return; - for(pItem=pList->a, i=0; inSrc; i++, pItem++){ - sqlite3DbFree(db, pItem->zDatabase); - sqlite3DbFree(db, pItem->zName); - sqlite3DbFree(db, pItem->zAlias); - sqlite3DbFree(db, pItem->zIndex); - sqlite3DeleteTable(db, pItem->pTab); - sqlite3SelectDelete(db, pItem->pSelect); - sqlite3ExprDelete(db, pItem->pOn); - sqlite3IdListDelete(db, pItem->pUsing); - } - sqlite3DbFree(db, pList); -} - -/* -** This routine is called by the parser to add a new term to the -** end of a growing FROM clause. The "p" parameter is the part of -** the FROM clause that has already been constructed. "p" is NULL -** if this is the first term of the FROM clause. pTable and pDatabase -** are the name of the table and database named in the FROM clause term. -** pDatabase is NULL if the database name qualifier is missing - the -** usual case. If the term has a alias, then pAlias points to the -** alias token. If the term is a subquery, then pSubquery is the -** SELECT statement that the subquery encodes. The pTable and -** pDatabase parameters are NULL for subqueries. The pOn and pUsing -** parameters are the content of the ON and USING clauses. -** -** Return a new SrcList which encodes is the FROM with the new -** term added. -*/ -SQLITE_PRIVATE SrcList *sqlite3SrcListAppendFromTerm( - Parse *pParse, /* Parsing context */ - SrcList *p, /* The left part of the FROM clause already seen */ - Token *pTable, /* Name of the table to add to the FROM clause */ - Token *pDatabase, /* Name of the database containing pTable */ - Token *pAlias, /* The right-hand side of the AS subexpression */ - Select *pSubquery, /* A subquery used in place of a table name */ - Expr *pOn, /* The ON clause of a join */ - IdList *pUsing /* The USING clause of a join */ -){ - struct SrcList_item *pItem; - sqlite3 *db = pParse->db; - if( !p && (pOn || pUsing) ){ - sqlite3ErrorMsg(pParse, "a JOIN clause is required before %s", - (pOn ? "ON" : "USING") - ); - goto append_from_error; - } - p = sqlite3SrcListAppend(db, p, pTable, pDatabase); - if( p==0 || NEVER(p->nSrc==0) ){ - goto append_from_error; - } - pItem = &p->a[p->nSrc-1]; - assert( pAlias!=0 ); - if( pAlias->n ){ - pItem->zAlias = sqlite3NameFromToken(db, pAlias); - } - pItem->pSelect = pSubquery; - pItem->pOn = pOn; - pItem->pUsing = pUsing; - return p; - - append_from_error: - assert( p==0 ); - sqlite3ExprDelete(db, pOn); - sqlite3IdListDelete(db, pUsing); - sqlite3SelectDelete(db, pSubquery); - return 0; -} - -/* -** Add an INDEXED BY or NOT INDEXED clause to the most recently added -** element of the source-list passed as the second argument. -*/ -SQLITE_PRIVATE void sqlite3SrcListIndexedBy(Parse *pParse, SrcList *p, Token *pIndexedBy){ - assert( pIndexedBy!=0 ); - if( p && ALWAYS(p->nSrc>0) ){ - struct SrcList_item *pItem = &p->a[p->nSrc-1]; - assert( pItem->notIndexed==0 && pItem->zIndex==0 ); - if( pIndexedBy->n==1 && !pIndexedBy->z ){ - /* A "NOT INDEXED" clause was supplied. See parse.y - ** construct "indexed_opt" for details. */ - pItem->notIndexed = 1; - }else{ - pItem->zIndex = sqlite3NameFromToken(pParse->db, pIndexedBy); - } - } -} - -/* -** When building up a FROM clause in the parser, the join operator -** is initially attached to the left operand. But the code generator -** expects the join operator to be on the right operand. This routine -** Shifts all join operators from left to right for an entire FROM -** clause. -** -** Example: Suppose the join is like this: -** -** A natural cross join B -** -** The operator is "natural cross join". The A and B operands are stored -** in p->a[0] and p->a[1], respectively. The parser initially stores the -** operator with A. This routine shifts that operator over to B. -*/ -SQLITE_PRIVATE void sqlite3SrcListShiftJoinType(SrcList *p){ - if( p ){ - int i; - assert( p->a || p->nSrc==0 ); - for(i=p->nSrc-1; i>0; i--){ - p->a[i].jointype = p->a[i-1].jointype; - } - p->a[0].jointype = 0; - } -} - -/* -** Begin a transaction -*/ -SQLITE_PRIVATE void sqlite3BeginTransaction(Parse *pParse, int type){ - sqlite3 *db; - Vdbe *v; - int i; - - assert( pParse!=0 ); - db = pParse->db; - assert( db!=0 ); -/* if( db->aDb[0].pBt==0 ) return; */ - if( sqlite3AuthCheck(pParse, SQLITE_TRANSACTION, "BEGIN", 0, 0) ){ - return; - } - v = sqlite3GetVdbe(pParse); - if( !v ) return; - if( type!=TK_DEFERRED ){ - for(i=0; inDb; i++){ - sqlite3VdbeAddOp2(v, OP_Transaction, i, (type==TK_EXCLUSIVE)+1); - sqlite3VdbeUsesBtree(v, i); - } - } - sqlite3VdbeAddOp2(v, OP_AutoCommit, 0, 0); -} - -/* -** Commit a transaction -*/ -SQLITE_PRIVATE void sqlite3CommitTransaction(Parse *pParse){ - Vdbe *v; - - assert( pParse!=0 ); - assert( pParse->db!=0 ); - if( sqlite3AuthCheck(pParse, SQLITE_TRANSACTION, "COMMIT", 0, 0) ){ - return; - } - v = sqlite3GetVdbe(pParse); - if( v ){ - sqlite3VdbeAddOp2(v, OP_AutoCommit, 1, 0); - } -} - -/* -** Rollback a transaction -*/ -SQLITE_PRIVATE void sqlite3RollbackTransaction(Parse *pParse){ - Vdbe *v; - - assert( pParse!=0 ); - assert( pParse->db!=0 ); - if( sqlite3AuthCheck(pParse, SQLITE_TRANSACTION, "ROLLBACK", 0, 0) ){ - return; - } - v = sqlite3GetVdbe(pParse); - if( v ){ - sqlite3VdbeAddOp2(v, OP_AutoCommit, 1, 1); - } -} - -/* -** This function is called by the parser when it parses a command to create, -** release or rollback an SQL savepoint. -*/ -SQLITE_PRIVATE void sqlite3Savepoint(Parse *pParse, int op, Token *pName){ - char *zName = sqlite3NameFromToken(pParse->db, pName); - if( zName ){ - Vdbe *v = sqlite3GetVdbe(pParse); -#ifndef SQLITE_OMIT_AUTHORIZATION - static const char * const az[] = { "BEGIN", "RELEASE", "ROLLBACK" }; - assert( !SAVEPOINT_BEGIN && SAVEPOINT_RELEASE==1 && SAVEPOINT_ROLLBACK==2 ); -#endif - if( !v || sqlite3AuthCheck(pParse, SQLITE_SAVEPOINT, az[op], zName, 0) ){ - sqlite3DbFree(pParse->db, zName); - return; - } - sqlite3VdbeAddOp4(v, OP_Savepoint, op, 0, 0, zName, P4_DYNAMIC); - } -} - -/* -** Make sure the TEMP database is open and available for use. Return -** the number of errors. Leave any error messages in the pParse structure. -*/ -SQLITE_PRIVATE int sqlite3OpenTempDatabase(Parse *pParse){ - sqlite3 *db = pParse->db; - if( db->aDb[1].pBt==0 && !pParse->explain ){ - int rc; - Btree *pBt; - static const int flags = - SQLITE_OPEN_READWRITE | - SQLITE_OPEN_CREATE | - SQLITE_OPEN_EXCLUSIVE | - SQLITE_OPEN_DELETEONCLOSE | - SQLITE_OPEN_TEMP_DB; - - rc = sqlite3BtreeOpen(db->pVfs, 0, db, &pBt, 0, flags); - if( rc!=SQLITE_OK ){ - sqlite3ErrorMsg(pParse, "unable to open a temporary database " - "file for storing temporary tables"); - pParse->rc = rc; - return 1; - } - db->aDb[1].pBt = pBt; - assert( db->aDb[1].pSchema ); - if( SQLITE_NOMEM==sqlite3BtreeSetPageSize(pBt, db->nextPagesize, -1, 0) ){ - db->mallocFailed = 1; - return 1; - } - } - return 0; -} - -/* -** Record the fact that the schema cookie will need to be verified -** for database iDb. The code to actually verify the schema cookie -** will occur at the end of the top-level VDBE and will be generated -** later, by sqlite3FinishCoding(). -*/ -SQLITE_PRIVATE void sqlite3CodeVerifySchema(Parse *pParse, int iDb){ - Parse *pToplevel = sqlite3ParseToplevel(pParse); - sqlite3 *db = pToplevel->db; - yDbMask mask; - - assert( iDb>=0 && iDbnDb ); - assert( db->aDb[iDb].pBt!=0 || iDb==1 ); - assert( iDbcookieMask & mask)==0 ){ - pToplevel->cookieMask |= mask; - pToplevel->cookieValue[iDb] = db->aDb[iDb].pSchema->schema_cookie; - if( !OMIT_TEMPDB && iDb==1 ){ - sqlite3OpenTempDatabase(pToplevel); - } - } -} - -/* -** If argument zDb is NULL, then call sqlite3CodeVerifySchema() for each -** attached database. Otherwise, invoke it for the database named zDb only. -*/ -SQLITE_PRIVATE void sqlite3CodeVerifyNamedSchema(Parse *pParse, const char *zDb){ - sqlite3 *db = pParse->db; - int i; - for(i=0; inDb; i++){ - Db *pDb = &db->aDb[i]; - if( pDb->pBt && (!zDb || 0==sqlite3StrICmp(zDb, pDb->zName)) ){ - sqlite3CodeVerifySchema(pParse, i); - } - } -} - -/* -** Generate VDBE code that prepares for doing an operation that -** might change the database. -** -** This routine starts a new transaction if we are not already within -** a transaction. If we are already within a transaction, then a checkpoint -** is set if the setStatement parameter is true. A checkpoint should -** be set for operations that might fail (due to a constraint) part of -** the way through and which will need to undo some writes without having to -** rollback the whole transaction. For operations where all constraints -** can be checked before any changes are made to the database, it is never -** necessary to undo a write and the checkpoint should not be set. -*/ -SQLITE_PRIVATE void sqlite3BeginWriteOperation(Parse *pParse, int setStatement, int iDb){ - Parse *pToplevel = sqlite3ParseToplevel(pParse); - sqlite3CodeVerifySchema(pParse, iDb); - pToplevel->writeMask |= ((yDbMask)1)<isMultiWrite |= setStatement; -} - -/* -** Indicate that the statement currently under construction might write -** more than one entry (example: deleting one row then inserting another, -** inserting multiple rows in a table, or inserting a row and index entries.) -** If an abort occurs after some of these writes have completed, then it will -** be necessary to undo the completed writes. -*/ -SQLITE_PRIVATE void sqlite3MultiWrite(Parse *pParse){ - Parse *pToplevel = sqlite3ParseToplevel(pParse); - pToplevel->isMultiWrite = 1; -} - -/* -** The code generator calls this routine if is discovers that it is -** possible to abort a statement prior to completion. In order to -** perform this abort without corrupting the database, we need to make -** sure that the statement is protected by a statement transaction. -** -** Technically, we only need to set the mayAbort flag if the -** isMultiWrite flag was previously set. There is a time dependency -** such that the abort must occur after the multiwrite. This makes -** some statements involving the REPLACE conflict resolution algorithm -** go a little faster. But taking advantage of this time dependency -** makes it more difficult to prove that the code is correct (in -** particular, it prevents us from writing an effective -** implementation of sqlite3AssertMayAbort()) and so we have chosen -** to take the safe route and skip the optimization. -*/ -SQLITE_PRIVATE void sqlite3MayAbort(Parse *pParse){ - Parse *pToplevel = sqlite3ParseToplevel(pParse); - pToplevel->mayAbort = 1; -} - -/* -** Code an OP_Halt that causes the vdbe to return an SQLITE_CONSTRAINT -** error. The onError parameter determines which (if any) of the statement -** and/or current transaction is rolled back. -*/ -SQLITE_PRIVATE void sqlite3HaltConstraint( - Parse *pParse, /* Parsing context */ - int errCode, /* extended error code */ - int onError, /* Constraint type */ - char *p4, /* Error message */ - i8 p4type, /* P4_STATIC or P4_TRANSIENT */ - u8 p5Errmsg /* P5_ErrMsg type */ -){ - Vdbe *v = sqlite3GetVdbe(pParse); - assert( (errCode&0xff)==SQLITE_CONSTRAINT ); - if( onError==OE_Abort ){ - sqlite3MayAbort(pParse); - } - sqlite3VdbeAddOp4(v, OP_Halt, errCode, onError, 0, p4, p4type); - if( p5Errmsg ) sqlite3VdbeChangeP5(v, p5Errmsg); -} - -/* -** Code an OP_Halt due to UNIQUE or PRIMARY KEY constraint violation. -*/ -SQLITE_PRIVATE void sqlite3UniqueConstraint( - Parse *pParse, /* Parsing context */ - int onError, /* Constraint type */ - Index *pIdx /* The index that triggers the constraint */ -){ - char *zErr; - int j; - StrAccum errMsg; - Table *pTab = pIdx->pTable; - - sqlite3StrAccumInit(&errMsg, 0, 0, 200); - errMsg.db = pParse->db; - for(j=0; jnKeyCol; j++){ - char *zCol = pTab->aCol[pIdx->aiColumn[j]].zName; - if( j ) sqlite3StrAccumAppend(&errMsg, ", ", 2); - sqlite3StrAccumAppendAll(&errMsg, pTab->zName); - sqlite3StrAccumAppend(&errMsg, ".", 1); - sqlite3StrAccumAppendAll(&errMsg, zCol); - } - zErr = sqlite3StrAccumFinish(&errMsg); - sqlite3HaltConstraint(pParse, - IsPrimaryKeyIndex(pIdx) ? SQLITE_CONSTRAINT_PRIMARYKEY - : SQLITE_CONSTRAINT_UNIQUE, - onError, zErr, P4_DYNAMIC, P5_ConstraintUnique); -} - - -/* -** Code an OP_Halt due to non-unique rowid. -*/ -SQLITE_PRIVATE void sqlite3RowidConstraint( - Parse *pParse, /* Parsing context */ - int onError, /* Conflict resolution algorithm */ - Table *pTab /* The table with the non-unique rowid */ -){ - char *zMsg; - int rc; - if( pTab->iPKey>=0 ){ - zMsg = sqlite3MPrintf(pParse->db, "%s.%s", pTab->zName, - pTab->aCol[pTab->iPKey].zName); - rc = SQLITE_CONSTRAINT_PRIMARYKEY; - }else{ - zMsg = sqlite3MPrintf(pParse->db, "%s.rowid", pTab->zName); - rc = SQLITE_CONSTRAINT_ROWID; - } - sqlite3HaltConstraint(pParse, rc, onError, zMsg, P4_DYNAMIC, - P5_ConstraintUnique); -} - -/* -** Check to see if pIndex uses the collating sequence pColl. Return -** true if it does and false if it does not. -*/ -#ifndef SQLITE_OMIT_REINDEX -static int collationMatch(const char *zColl, Index *pIndex){ - int i; - assert( zColl!=0 ); - for(i=0; inColumn; i++){ - const char *z = pIndex->azColl[i]; - assert( z!=0 || pIndex->aiColumn[i]<0 ); - if( pIndex->aiColumn[i]>=0 && 0==sqlite3StrICmp(z, zColl) ){ - return 1; - } - } - return 0; -} -#endif - -/* -** Recompute all indices of pTab that use the collating sequence pColl. -** If pColl==0 then recompute all indices of pTab. -*/ -#ifndef SQLITE_OMIT_REINDEX -static void reindexTable(Parse *pParse, Table *pTab, char const *zColl){ - Index *pIndex; /* An index associated with pTab */ - - for(pIndex=pTab->pIndex; pIndex; pIndex=pIndex->pNext){ - if( zColl==0 || collationMatch(zColl, pIndex) ){ - int iDb = sqlite3SchemaToIndex(pParse->db, pTab->pSchema); - sqlite3BeginWriteOperation(pParse, 0, iDb); - sqlite3RefillIndex(pParse, pIndex, -1); - } - } -} -#endif - -/* -** Recompute all indices of all tables in all databases where the -** indices use the collating sequence pColl. If pColl==0 then recompute -** all indices everywhere. -*/ -#ifndef SQLITE_OMIT_REINDEX -static void reindexDatabases(Parse *pParse, char const *zColl){ - Db *pDb; /* A single database */ - int iDb; /* The database index number */ - sqlite3 *db = pParse->db; /* The database connection */ - HashElem *k; /* For looping over tables in pDb */ - Table *pTab; /* A table in the database */ - - assert( sqlite3BtreeHoldsAllMutexes(db) ); /* Needed for schema access */ - for(iDb=0, pDb=db->aDb; iDbnDb; iDb++, pDb++){ - assert( pDb!=0 ); - for(k=sqliteHashFirst(&pDb->pSchema->tblHash); k; k=sqliteHashNext(k)){ - pTab = (Table*)sqliteHashData(k); - reindexTable(pParse, pTab, zColl); - } - } -} -#endif - -/* -** Generate code for the REINDEX command. -** -** REINDEX -- 1 -** REINDEX -- 2 -** REINDEX ?.? -- 3 -** REINDEX ?.? -- 4 -** -** Form 1 causes all indices in all attached databases to be rebuilt. -** Form 2 rebuilds all indices in all databases that use the named -** collating function. Forms 3 and 4 rebuild the named index or all -** indices associated with the named table. -*/ -#ifndef SQLITE_OMIT_REINDEX -SQLITE_PRIVATE void sqlite3Reindex(Parse *pParse, Token *pName1, Token *pName2){ - CollSeq *pColl; /* Collating sequence to be reindexed, or NULL */ - char *z; /* Name of a table or index */ - const char *zDb; /* Name of the database */ - Table *pTab; /* A table in the database */ - Index *pIndex; /* An index associated with pTab */ - int iDb; /* The database index number */ - sqlite3 *db = pParse->db; /* The database connection */ - Token *pObjName; /* Name of the table or index to be reindexed */ - - /* Read the database schema. If an error occurs, leave an error message - ** and code in pParse and return NULL. */ - if( SQLITE_OK!=sqlite3ReadSchema(pParse) ){ - return; - } - - if( pName1==0 ){ - reindexDatabases(pParse, 0); - return; - }else if( NEVER(pName2==0) || pName2->z==0 ){ - char *zColl; - assert( pName1->z ); - zColl = sqlite3NameFromToken(pParse->db, pName1); - if( !zColl ) return; - pColl = sqlite3FindCollSeq(db, ENC(db), zColl, 0); - if( pColl ){ - reindexDatabases(pParse, zColl); - sqlite3DbFree(db, zColl); - return; - } - sqlite3DbFree(db, zColl); - } - iDb = sqlite3TwoPartName(pParse, pName1, pName2, &pObjName); - if( iDb<0 ) return; - z = sqlite3NameFromToken(db, pObjName); - if( z==0 ) return; - zDb = db->aDb[iDb].zName; - pTab = sqlite3FindTable(db, z, zDb); - if( pTab ){ - reindexTable(pParse, pTab, 0); - sqlite3DbFree(db, z); - return; - } - pIndex = sqlite3FindIndex(db, z, zDb); - sqlite3DbFree(db, z); - if( pIndex ){ - sqlite3BeginWriteOperation(pParse, 0, iDb); - sqlite3RefillIndex(pParse, pIndex, -1); - return; - } - sqlite3ErrorMsg(pParse, "unable to identify the object to be reindexed"); -} -#endif - -/* -** Return a KeyInfo structure that is appropriate for the given Index. -** -** The KeyInfo structure for an index is cached in the Index object. -** So there might be multiple references to the returned pointer. The -** caller should not try to modify the KeyInfo object. -** -** The caller should invoke sqlite3KeyInfoUnref() on the returned object -** when it has finished using it. -*/ -SQLITE_PRIVATE KeyInfo *sqlite3KeyInfoOfIndex(Parse *pParse, Index *pIdx){ - if( pParse->nErr ) return 0; -#ifndef SQLITE_OMIT_SHARED_CACHE - if( pIdx->pKeyInfo && pIdx->pKeyInfo->db!=pParse->db ){ - sqlite3KeyInfoUnref(pIdx->pKeyInfo); - pIdx->pKeyInfo = 0; - } -#endif - if( pIdx->pKeyInfo==0 ){ - int i; - int nCol = pIdx->nColumn; - int nKey = pIdx->nKeyCol; - KeyInfo *pKey; - if( pIdx->uniqNotNull ){ - pKey = sqlite3KeyInfoAlloc(pParse->db, nKey, nCol-nKey); - }else{ - pKey = sqlite3KeyInfoAlloc(pParse->db, nCol, 0); - } - if( pKey ){ - assert( sqlite3KeyInfoIsWriteable(pKey) ); - for(i=0; iazColl[i]; - assert( zColl!=0 ); - pKey->aColl[i] = strcmp(zColl,"BINARY")==0 ? 0 : - sqlite3LocateCollSeq(pParse, zColl); - pKey->aSortOrder[i] = pIdx->aSortOrder[i]; - } - if( pParse->nErr ){ - sqlite3KeyInfoUnref(pKey); - }else{ - pIdx->pKeyInfo = pKey; - } - } - } - return sqlite3KeyInfoRef(pIdx->pKeyInfo); -} - -#ifndef SQLITE_OMIT_CTE -/* -** This routine is invoked once per CTE by the parser while parsing a -** WITH clause. -*/ -SQLITE_PRIVATE With *sqlite3WithAdd( - Parse *pParse, /* Parsing context */ - With *pWith, /* Existing WITH clause, or NULL */ - Token *pName, /* Name of the common-table */ - ExprList *pArglist, /* Optional column name list for the table */ - Select *pQuery /* Query used to initialize the table */ -){ - sqlite3 *db = pParse->db; - With *pNew; - char *zName; - - /* Check that the CTE name is unique within this WITH clause. If - ** not, store an error in the Parse structure. */ - zName = sqlite3NameFromToken(pParse->db, pName); - if( zName && pWith ){ - int i; - for(i=0; inCte; i++){ - if( sqlite3StrICmp(zName, pWith->a[i].zName)==0 ){ - sqlite3ErrorMsg(pParse, "duplicate WITH table name: %s", zName); - } - } - } - - if( pWith ){ - int nByte = sizeof(*pWith) + (sizeof(pWith->a[1]) * pWith->nCte); - pNew = sqlite3DbRealloc(db, pWith, nByte); - }else{ - pNew = sqlite3DbMallocZero(db, sizeof(*pWith)); - } - assert( zName!=0 || pNew==0 ); - assert( db->mallocFailed==0 || pNew==0 ); - - if( pNew==0 ){ - sqlite3ExprListDelete(db, pArglist); - sqlite3SelectDelete(db, pQuery); - sqlite3DbFree(db, zName); - pNew = pWith; - }else{ - pNew->a[pNew->nCte].pSelect = pQuery; - pNew->a[pNew->nCte].pCols = pArglist; - pNew->a[pNew->nCte].zName = zName; - pNew->a[pNew->nCte].zErr = 0; - pNew->nCte++; - } - - return pNew; -} - -/* -** Free the contents of the With object passed as the second argument. -*/ -SQLITE_PRIVATE void sqlite3WithDelete(sqlite3 *db, With *pWith){ - if( pWith ){ - int i; - for(i=0; inCte; i++){ - struct Cte *pCte = &pWith->a[i]; - sqlite3ExprListDelete(db, pCte->pCols); - sqlite3SelectDelete(db, pCte->pSelect); - sqlite3DbFree(db, pCte->zName); - } - sqlite3DbFree(db, pWith); - } -} -#endif /* !defined(SQLITE_OMIT_CTE) */ - -/************** End of build.c ***********************************************/ -/************** Begin file callback.c ****************************************/ -/* -** 2005 May 23 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** -** This file contains functions used to access the internal hash tables -** of user defined functions and collation sequences. -*/ - - -/* -** Invoke the 'collation needed' callback to request a collation sequence -** in the encoding enc of name zName, length nName. -*/ -static void callCollNeeded(sqlite3 *db, int enc, const char *zName){ - assert( !db->xCollNeeded || !db->xCollNeeded16 ); - if( db->xCollNeeded ){ - char *zExternal = sqlite3DbStrDup(db, zName); - if( !zExternal ) return; - db->xCollNeeded(db->pCollNeededArg, db, enc, zExternal); - sqlite3DbFree(db, zExternal); - } -#ifndef SQLITE_OMIT_UTF16 - if( db->xCollNeeded16 ){ - char const *zExternal; - sqlite3_value *pTmp = sqlite3ValueNew(db); - sqlite3ValueSetStr(pTmp, -1, zName, SQLITE_UTF8, SQLITE_STATIC); - zExternal = sqlite3ValueText(pTmp, SQLITE_UTF16NATIVE); - if( zExternal ){ - db->xCollNeeded16(db->pCollNeededArg, db, (int)ENC(db), zExternal); - } - sqlite3ValueFree(pTmp); - } -#endif -} - -/* -** This routine is called if the collation factory fails to deliver a -** collation function in the best encoding but there may be other versions -** of this collation function (for other text encodings) available. Use one -** of these instead if they exist. Avoid a UTF-8 <-> UTF-16 conversion if -** possible. -*/ -static int synthCollSeq(sqlite3 *db, CollSeq *pColl){ - CollSeq *pColl2; - char *z = pColl->zName; - int i; - static const u8 aEnc[] = { SQLITE_UTF16BE, SQLITE_UTF16LE, SQLITE_UTF8 }; - for(i=0; i<3; i++){ - pColl2 = sqlite3FindCollSeq(db, aEnc[i], z, 0); - if( pColl2->xCmp!=0 ){ - memcpy(pColl, pColl2, sizeof(CollSeq)); - pColl->xDel = 0; /* Do not copy the destructor */ - return SQLITE_OK; - } - } - return SQLITE_ERROR; -} - -/* -** This function is responsible for invoking the collation factory callback -** or substituting a collation sequence of a different encoding when the -** requested collation sequence is not available in the desired encoding. -** -** If it is not NULL, then pColl must point to the database native encoding -** collation sequence with name zName, length nName. -** -** The return value is either the collation sequence to be used in database -** db for collation type name zName, length nName, or NULL, if no collation -** sequence can be found. If no collation is found, leave an error message. -** -** See also: sqlite3LocateCollSeq(), sqlite3FindCollSeq() -*/ -SQLITE_PRIVATE CollSeq *sqlite3GetCollSeq( - Parse *pParse, /* Parsing context */ - u8 enc, /* The desired encoding for the collating sequence */ - CollSeq *pColl, /* Collating sequence with native encoding, or NULL */ - const char *zName /* Collating sequence name */ -){ - CollSeq *p; - sqlite3 *db = pParse->db; - - p = pColl; - if( !p ){ - p = sqlite3FindCollSeq(db, enc, zName, 0); - } - if( !p || !p->xCmp ){ - /* No collation sequence of this type for this encoding is registered. - ** Call the collation factory to see if it can supply us with one. - */ - callCollNeeded(db, enc, zName); - p = sqlite3FindCollSeq(db, enc, zName, 0); - } - if( p && !p->xCmp && synthCollSeq(db, p) ){ - p = 0; - } - assert( !p || p->xCmp ); - if( p==0 ){ - sqlite3ErrorMsg(pParse, "no such collation sequence: %s", zName); - } - return p; -} - -/* -** This routine is called on a collation sequence before it is used to -** check that it is defined. An undefined collation sequence exists when -** a database is loaded that contains references to collation sequences -** that have not been defined by sqlite3_create_collation() etc. -** -** If required, this routine calls the 'collation needed' callback to -** request a definition of the collating sequence. If this doesn't work, -** an equivalent collating sequence that uses a text encoding different -** from the main database is substituted, if one is available. -*/ -SQLITE_PRIVATE int sqlite3CheckCollSeq(Parse *pParse, CollSeq *pColl){ - if( pColl ){ - const char *zName = pColl->zName; - sqlite3 *db = pParse->db; - CollSeq *p = sqlite3GetCollSeq(pParse, ENC(db), pColl, zName); - if( !p ){ - return SQLITE_ERROR; - } - assert( p==pColl ); - } - return SQLITE_OK; -} - - - -/* -** Locate and return an entry from the db.aCollSeq hash table. If the entry -** specified by zName and nName is not found and parameter 'create' is -** true, then create a new entry. Otherwise return NULL. -** -** Each pointer stored in the sqlite3.aCollSeq hash table contains an -** array of three CollSeq structures. The first is the collation sequence -** prefferred for UTF-8, the second UTF-16le, and the third UTF-16be. -** -** Stored immediately after the three collation sequences is a copy of -** the collation sequence name. A pointer to this string is stored in -** each collation sequence structure. -*/ -static CollSeq *findCollSeqEntry( - sqlite3 *db, /* Database connection */ - const char *zName, /* Name of the collating sequence */ - int create /* Create a new entry if true */ -){ - CollSeq *pColl; - int nName = sqlite3Strlen30(zName); - pColl = sqlite3HashFind(&db->aCollSeq, zName, nName); - - if( 0==pColl && create ){ - pColl = sqlite3DbMallocZero(db, 3*sizeof(*pColl) + nName + 1 ); - if( pColl ){ - CollSeq *pDel = 0; - pColl[0].zName = (char*)&pColl[3]; - pColl[0].enc = SQLITE_UTF8; - pColl[1].zName = (char*)&pColl[3]; - pColl[1].enc = SQLITE_UTF16LE; - pColl[2].zName = (char*)&pColl[3]; - pColl[2].enc = SQLITE_UTF16BE; - memcpy(pColl[0].zName, zName, nName); - pColl[0].zName[nName] = 0; - pDel = sqlite3HashInsert(&db->aCollSeq, pColl[0].zName, nName, pColl); - - /* If a malloc() failure occurred in sqlite3HashInsert(), it will - ** return the pColl pointer to be deleted (because it wasn't added - ** to the hash table). - */ - assert( pDel==0 || pDel==pColl ); - if( pDel!=0 ){ - db->mallocFailed = 1; - sqlite3DbFree(db, pDel); - pColl = 0; - } - } - } - return pColl; -} - -/* -** Parameter zName points to a UTF-8 encoded string nName bytes long. -** Return the CollSeq* pointer for the collation sequence named zName -** for the encoding 'enc' from the database 'db'. -** -** If the entry specified is not found and 'create' is true, then create a -** new entry. Otherwise return NULL. -** -** A separate function sqlite3LocateCollSeq() is a wrapper around -** this routine. sqlite3LocateCollSeq() invokes the collation factory -** if necessary and generates an error message if the collating sequence -** cannot be found. -** -** See also: sqlite3LocateCollSeq(), sqlite3GetCollSeq() -*/ -SQLITE_PRIVATE CollSeq *sqlite3FindCollSeq( - sqlite3 *db, - u8 enc, - const char *zName, - int create -){ - CollSeq *pColl; - if( zName ){ - pColl = findCollSeqEntry(db, zName, create); - }else{ - pColl = db->pDfltColl; - } - assert( SQLITE_UTF8==1 && SQLITE_UTF16LE==2 && SQLITE_UTF16BE==3 ); - assert( enc>=SQLITE_UTF8 && enc<=SQLITE_UTF16BE ); - if( pColl ) pColl += enc-1; - return pColl; -} - -/* During the search for the best function definition, this procedure -** is called to test how well the function passed as the first argument -** matches the request for a function with nArg arguments in a system -** that uses encoding enc. The value returned indicates how well the -** request is matched. A higher value indicates a better match. -** -** If nArg is -1 that means to only return a match (non-zero) if p->nArg -** is also -1. In other words, we are searching for a function that -** takes a variable number of arguments. -** -** If nArg is -2 that means that we are searching for any function -** regardless of the number of arguments it uses, so return a positive -** match score for any -** -** The returned value is always between 0 and 6, as follows: -** -** 0: Not a match. -** 1: UTF8/16 conversion required and function takes any number of arguments. -** 2: UTF16 byte order change required and function takes any number of args. -** 3: encoding matches and function takes any number of arguments -** 4: UTF8/16 conversion required - argument count matches exactly -** 5: UTF16 byte order conversion required - argument count matches exactly -** 6: Perfect match: encoding and argument count match exactly. -** -** If nArg==(-2) then any function with a non-null xStep or xFunc is -** a perfect match and any function with both xStep and xFunc NULL is -** a non-match. -*/ -#define FUNC_PERFECT_MATCH 6 /* The score for a perfect match */ -static int matchQuality( - FuncDef *p, /* The function we are evaluating for match quality */ - int nArg, /* Desired number of arguments. (-1)==any */ - u8 enc /* Desired text encoding */ -){ - int match; - - /* nArg of -2 is a special case */ - if( nArg==(-2) ) return (p->xFunc==0 && p->xStep==0) ? 0 : FUNC_PERFECT_MATCH; - - /* Wrong number of arguments means "no match" */ - if( p->nArg!=nArg && p->nArg>=0 ) return 0; - - /* Give a better score to a function with a specific number of arguments - ** than to function that accepts any number of arguments. */ - if( p->nArg==nArg ){ - match = 4; - }else{ - match = 1; - } - - /* Bonus points if the text encoding matches */ - if( enc==(p->funcFlags & SQLITE_FUNC_ENCMASK) ){ - match += 2; /* Exact encoding match */ - }else if( (enc & p->funcFlags & 2)!=0 ){ - match += 1; /* Both are UTF16, but with different byte orders */ - } - - return match; -} - -/* -** Search a FuncDefHash for a function with the given name. Return -** a pointer to the matching FuncDef if found, or 0 if there is no match. -*/ -static FuncDef *functionSearch( - FuncDefHash *pHash, /* Hash table to search */ - int h, /* Hash of the name */ - const char *zFunc, /* Name of function */ - int nFunc /* Number of bytes in zFunc */ -){ - FuncDef *p; - for(p=pHash->a[h]; p; p=p->pHash){ - if( sqlite3StrNICmp(p->zName, zFunc, nFunc)==0 && p->zName[nFunc]==0 ){ - return p; - } - } - return 0; -} - -/* -** Insert a new FuncDef into a FuncDefHash hash table. -*/ -SQLITE_PRIVATE void sqlite3FuncDefInsert( - FuncDefHash *pHash, /* The hash table into which to insert */ - FuncDef *pDef /* The function definition to insert */ -){ - FuncDef *pOther; - int nName = sqlite3Strlen30(pDef->zName); - u8 c1 = (u8)pDef->zName[0]; - int h = (sqlite3UpperToLower[c1] + nName) % ArraySize(pHash->a); - pOther = functionSearch(pHash, h, pDef->zName, nName); - if( pOther ){ - assert( pOther!=pDef && pOther->pNext!=pDef ); - pDef->pNext = pOther->pNext; - pOther->pNext = pDef; - }else{ - pDef->pNext = 0; - pDef->pHash = pHash->a[h]; - pHash->a[h] = pDef; - } -} - - - -/* -** Locate a user function given a name, a number of arguments and a flag -** indicating whether the function prefers UTF-16 over UTF-8. Return a -** pointer to the FuncDef structure that defines that function, or return -** NULL if the function does not exist. -** -** If the createFlag argument is true, then a new (blank) FuncDef -** structure is created and liked into the "db" structure if a -** no matching function previously existed. -** -** If nArg is -2, then the first valid function found is returned. A -** function is valid if either xFunc or xStep is non-zero. The nArg==(-2) -** case is used to see if zName is a valid function name for some number -** of arguments. If nArg is -2, then createFlag must be 0. -** -** If createFlag is false, then a function with the required name and -** number of arguments may be returned even if the eTextRep flag does not -** match that requested. -*/ -SQLITE_PRIVATE FuncDef *sqlite3FindFunction( - sqlite3 *db, /* An open database */ - const char *zName, /* Name of the function. Not null-terminated */ - int nName, /* Number of characters in the name */ - int nArg, /* Number of arguments. -1 means any number */ - u8 enc, /* Preferred text encoding */ - u8 createFlag /* Create new entry if true and does not otherwise exist */ -){ - FuncDef *p; /* Iterator variable */ - FuncDef *pBest = 0; /* Best match found so far */ - int bestScore = 0; /* Score of best match */ - int h; /* Hash value */ - - assert( nArg>=(-2) ); - assert( nArg>=(-1) || createFlag==0 ); - h = (sqlite3UpperToLower[(u8)zName[0]] + nName) % ArraySize(db->aFunc.a); - - /* First search for a match amongst the application-defined functions. - */ - p = functionSearch(&db->aFunc, h, zName, nName); - while( p ){ - int score = matchQuality(p, nArg, enc); - if( score>bestScore ){ - pBest = p; - bestScore = score; - } - p = p->pNext; - } - - /* If no match is found, search the built-in functions. - ** - ** If the SQLITE_PreferBuiltin flag is set, then search the built-in - ** functions even if a prior app-defined function was found. And give - ** priority to built-in functions. - ** - ** Except, if createFlag is true, that means that we are trying to - ** install a new function. Whatever FuncDef structure is returned it will - ** have fields overwritten with new information appropriate for the - ** new function. But the FuncDefs for built-in functions are read-only. - ** So we must not search for built-ins when creating a new function. - */ - if( !createFlag && (pBest==0 || (db->flags & SQLITE_PreferBuiltin)!=0) ){ - FuncDefHash *pHash = &GLOBAL(FuncDefHash, sqlite3GlobalFunctions); - bestScore = 0; - p = functionSearch(pHash, h, zName, nName); - while( p ){ - int score = matchQuality(p, nArg, enc); - if( score>bestScore ){ - pBest = p; - bestScore = score; - } - p = p->pNext; - } - } - - /* If the createFlag parameter is true and the search did not reveal an - ** exact match for the name, number of arguments and encoding, then add a - ** new entry to the hash table and return it. - */ - if( createFlag && bestScorezName = (char *)&pBest[1]; - pBest->nArg = (u16)nArg; - pBest->funcFlags = enc; - memcpy(pBest->zName, zName, nName); - pBest->zName[nName] = 0; - sqlite3FuncDefInsert(&db->aFunc, pBest); - } - - if( pBest && (pBest->xStep || pBest->xFunc || createFlag) ){ - return pBest; - } - return 0; -} - -/* -** Free all resources held by the schema structure. The void* argument points -** at a Schema struct. This function does not call sqlite3DbFree(db, ) on the -** pointer itself, it just cleans up subsidiary resources (i.e. the contents -** of the schema hash tables). -** -** The Schema.cache_size variable is not cleared. -*/ -SQLITE_PRIVATE void sqlite3SchemaClear(void *p){ - Hash temp1; - Hash temp2; - HashElem *pElem; - Schema *pSchema = (Schema *)p; - - temp1 = pSchema->tblHash; - temp2 = pSchema->trigHash; - sqlite3HashInit(&pSchema->trigHash); - sqlite3HashClear(&pSchema->idxHash); - for(pElem=sqliteHashFirst(&temp2); pElem; pElem=sqliteHashNext(pElem)){ - sqlite3DeleteTrigger(0, (Trigger*)sqliteHashData(pElem)); - } - sqlite3HashClear(&temp2); - sqlite3HashInit(&pSchema->tblHash); - for(pElem=sqliteHashFirst(&temp1); pElem; pElem=sqliteHashNext(pElem)){ - Table *pTab = sqliteHashData(pElem); - sqlite3DeleteTable(0, pTab); - } - sqlite3HashClear(&temp1); - sqlite3HashClear(&pSchema->fkeyHash); - pSchema->pSeqTab = 0; - if( pSchema->flags & DB_SchemaLoaded ){ - pSchema->iGeneration++; - pSchema->flags &= ~DB_SchemaLoaded; - } -} - -/* -** Find and return the schema associated with a BTree. Create -** a new one if necessary. -*/ -SQLITE_PRIVATE Schema *sqlite3SchemaGet(sqlite3 *db, Btree *pBt){ - Schema * p; - if( pBt ){ - p = (Schema *)sqlite3BtreeSchema(pBt, sizeof(Schema), sqlite3SchemaClear); - }else{ - p = (Schema *)sqlite3DbMallocZero(0, sizeof(Schema)); - } - if( !p ){ - db->mallocFailed = 1; - }else if ( 0==p->file_format ){ - sqlite3HashInit(&p->tblHash); - sqlite3HashInit(&p->idxHash); - sqlite3HashInit(&p->trigHash); - sqlite3HashInit(&p->fkeyHash); - p->enc = SQLITE_UTF8; - } - return p; -} - -/************** End of callback.c ********************************************/ -/************** Begin file delete.c ******************************************/ -/* -** 2001 September 15 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** This file contains C code routines that are called by the parser -** in order to generate code for DELETE FROM statements. -*/ - -/* -** While a SrcList can in general represent multiple tables and subqueries -** (as in the FROM clause of a SELECT statement) in this case it contains -** the name of a single table, as one might find in an INSERT, DELETE, -** or UPDATE statement. Look up that table in the symbol table and -** return a pointer. Set an error message and return NULL if the table -** name is not found or if any other error occurs. -** -** The following fields are initialized appropriate in pSrc: -** -** pSrc->a[0].pTab Pointer to the Table object -** pSrc->a[0].pIndex Pointer to the INDEXED BY index, if there is one -** -*/ -SQLITE_PRIVATE Table *sqlite3SrcListLookup(Parse *pParse, SrcList *pSrc){ - struct SrcList_item *pItem = pSrc->a; - Table *pTab; - assert( pItem && pSrc->nSrc==1 ); - pTab = sqlite3LocateTableItem(pParse, 0, pItem); - sqlite3DeleteTable(pParse->db, pItem->pTab); - pItem->pTab = pTab; - if( pTab ){ - pTab->nRef++; - } - if( sqlite3IndexedByLookup(pParse, pItem) ){ - pTab = 0; - } - return pTab; -} - -/* -** Check to make sure the given table is writable. If it is not -** writable, generate an error message and return 1. If it is -** writable return 0; -*/ -SQLITE_PRIVATE int sqlite3IsReadOnly(Parse *pParse, Table *pTab, int viewOk){ - /* A table is not writable under the following circumstances: - ** - ** 1) It is a virtual table and no implementation of the xUpdate method - ** has been provided, or - ** 2) It is a system table (i.e. sqlite_master), this call is not - ** part of a nested parse and writable_schema pragma has not - ** been specified. - ** - ** In either case leave an error message in pParse and return non-zero. - */ - if( ( IsVirtual(pTab) - && sqlite3GetVTable(pParse->db, pTab)->pMod->pModule->xUpdate==0 ) - || ( (pTab->tabFlags & TF_Readonly)!=0 - && (pParse->db->flags & SQLITE_WriteSchema)==0 - && pParse->nested==0 ) - ){ - sqlite3ErrorMsg(pParse, "table %s may not be modified", pTab->zName); - return 1; - } - -#ifndef SQLITE_OMIT_VIEW - if( !viewOk && pTab->pSelect ){ - sqlite3ErrorMsg(pParse,"cannot modify %s because it is a view",pTab->zName); - return 1; - } -#endif - return 0; -} - - -#if !defined(SQLITE_OMIT_VIEW) && !defined(SQLITE_OMIT_TRIGGER) -/* -** Evaluate a view and store its result in an ephemeral table. The -** pWhere argument is an optional WHERE clause that restricts the -** set of rows in the view that are to be added to the ephemeral table. -*/ -SQLITE_PRIVATE void sqlite3MaterializeView( - Parse *pParse, /* Parsing context */ - Table *pView, /* View definition */ - Expr *pWhere, /* Optional WHERE clause to be added */ - int iCur /* Cursor number for ephemerial table */ -){ - SelectDest dest; - Select *pSel; - SrcList *pFrom; - sqlite3 *db = pParse->db; - int iDb = sqlite3SchemaToIndex(db, pView->pSchema); - pWhere = sqlite3ExprDup(db, pWhere, 0); - pFrom = sqlite3SrcListAppend(db, 0, 0, 0); - if( pFrom ){ - assert( pFrom->nSrc==1 ); - pFrom->a[0].zName = sqlite3DbStrDup(db, pView->zName); - pFrom->a[0].zDatabase = sqlite3DbStrDup(db, db->aDb[iDb].zName); - assert( pFrom->a[0].pOn==0 ); - assert( pFrom->a[0].pUsing==0 ); - } - pSel = sqlite3SelectNew(pParse, 0, pFrom, pWhere, 0, 0, 0, 0, 0, 0); - sqlite3SelectDestInit(&dest, SRT_EphemTab, iCur); - sqlite3Select(pParse, pSel, &dest); - sqlite3SelectDelete(db, pSel); -} -#endif /* !defined(SQLITE_OMIT_VIEW) && !defined(SQLITE_OMIT_TRIGGER) */ - -#if defined(SQLITE_ENABLE_UPDATE_DELETE_LIMIT) && !defined(SQLITE_OMIT_SUBQUERY) -/* -** Generate an expression tree to implement the WHERE, ORDER BY, -** and LIMIT/OFFSET portion of DELETE and UPDATE statements. -** -** DELETE FROM table_wxyz WHERE a<5 ORDER BY a LIMIT 1; -** \__________________________/ -** pLimitWhere (pInClause) -*/ -SQLITE_PRIVATE Expr *sqlite3LimitWhere( - Parse *pParse, /* The parser context */ - SrcList *pSrc, /* the FROM clause -- which tables to scan */ - Expr *pWhere, /* The WHERE clause. May be null */ - ExprList *pOrderBy, /* The ORDER BY clause. May be null */ - Expr *pLimit, /* The LIMIT clause. May be null */ - Expr *pOffset, /* The OFFSET clause. May be null */ - char *zStmtType /* Either DELETE or UPDATE. For err msgs. */ -){ - Expr *pWhereRowid = NULL; /* WHERE rowid .. */ - Expr *pInClause = NULL; /* WHERE rowid IN ( select ) */ - Expr *pSelectRowid = NULL; /* SELECT rowid ... */ - ExprList *pEList = NULL; /* Expression list contaning only pSelectRowid */ - SrcList *pSelectSrc = NULL; /* SELECT rowid FROM x ... (dup of pSrc) */ - Select *pSelect = NULL; /* Complete SELECT tree */ - - /* Check that there isn't an ORDER BY without a LIMIT clause. - */ - if( pOrderBy && (pLimit == 0) ) { - sqlite3ErrorMsg(pParse, "ORDER BY without LIMIT on %s", zStmtType); - goto limit_where_cleanup_2; - } - - /* We only need to generate a select expression if there - ** is a limit/offset term to enforce. - */ - if( pLimit == 0 ) { - /* if pLimit is null, pOffset will always be null as well. */ - assert( pOffset == 0 ); - return pWhere; - } - - /* Generate a select expression tree to enforce the limit/offset - ** term for the DELETE or UPDATE statement. For example: - ** DELETE FROM table_a WHERE col1=1 ORDER BY col2 LIMIT 1 OFFSET 1 - ** becomes: - ** DELETE FROM table_a WHERE rowid IN ( - ** SELECT rowid FROM table_a WHERE col1=1 ORDER BY col2 LIMIT 1 OFFSET 1 - ** ); - */ - - pSelectRowid = sqlite3PExpr(pParse, TK_ROW, 0, 0, 0); - if( pSelectRowid == 0 ) goto limit_where_cleanup_2; - pEList = sqlite3ExprListAppend(pParse, 0, pSelectRowid); - if( pEList == 0 ) goto limit_where_cleanup_2; - - /* duplicate the FROM clause as it is needed by both the DELETE/UPDATE tree - ** and the SELECT subtree. */ - pSelectSrc = sqlite3SrcListDup(pParse->db, pSrc, 0); - if( pSelectSrc == 0 ) { - sqlite3ExprListDelete(pParse->db, pEList); - goto limit_where_cleanup_2; - } - - /* generate the SELECT expression tree. */ - pSelect = sqlite3SelectNew(pParse,pEList,pSelectSrc,pWhere,0,0, - pOrderBy,0,pLimit,pOffset); - if( pSelect == 0 ) return 0; - - /* now generate the new WHERE rowid IN clause for the DELETE/UDPATE */ - pWhereRowid = sqlite3PExpr(pParse, TK_ROW, 0, 0, 0); - if( pWhereRowid == 0 ) goto limit_where_cleanup_1; - pInClause = sqlite3PExpr(pParse, TK_IN, pWhereRowid, 0, 0); - if( pInClause == 0 ) goto limit_where_cleanup_1; - - pInClause->x.pSelect = pSelect; - pInClause->flags |= EP_xIsSelect; - sqlite3ExprSetHeight(pParse, pInClause); - return pInClause; - - /* something went wrong. clean up anything allocated. */ -limit_where_cleanup_1: - sqlite3SelectDelete(pParse->db, pSelect); - return 0; - -limit_where_cleanup_2: - sqlite3ExprDelete(pParse->db, pWhere); - sqlite3ExprListDelete(pParse->db, pOrderBy); - sqlite3ExprDelete(pParse->db, pLimit); - sqlite3ExprDelete(pParse->db, pOffset); - return 0; -} -#endif /* defined(SQLITE_ENABLE_UPDATE_DELETE_LIMIT) */ - /* && !defined(SQLITE_OMIT_SUBQUERY) */ - -/* -** Generate code for a DELETE FROM statement. -** -** DELETE FROM table_wxyz WHERE a<5 AND b NOT NULL; -** \________/ \________________/ -** pTabList pWhere -*/ -SQLITE_PRIVATE void sqlite3DeleteFrom( - Parse *pParse, /* The parser context */ - SrcList *pTabList, /* The table from which we should delete things */ - Expr *pWhere /* The WHERE clause. May be null */ -){ - Vdbe *v; /* The virtual database engine */ - Table *pTab; /* The table from which records will be deleted */ - const char *zDb; /* Name of database holding pTab */ - int i; /* Loop counter */ - WhereInfo *pWInfo; /* Information about the WHERE clause */ - Index *pIdx; /* For looping over indices of the table */ - int iTabCur; /* Cursor number for the table */ - int iDataCur; /* VDBE cursor for the canonical data source */ - int iIdxCur; /* Cursor number of the first index */ - int nIdx; /* Number of indices */ - sqlite3 *db; /* Main database structure */ - AuthContext sContext; /* Authorization context */ - NameContext sNC; /* Name context to resolve expressions in */ - int iDb; /* Database number */ - int memCnt = -1; /* Memory cell used for change counting */ - int rcauth; /* Value returned by authorization callback */ - int okOnePass; /* True for one-pass algorithm without the FIFO */ - int aiCurOnePass[2]; /* The write cursors opened by WHERE_ONEPASS */ - u8 *aToOpen = 0; /* Open cursor iTabCur+j if aToOpen[j] is true */ - Index *pPk; /* The PRIMARY KEY index on the table */ - int iPk = 0; /* First of nPk registers holding PRIMARY KEY value */ - i16 nPk = 1; /* Number of columns in the PRIMARY KEY */ - int iKey; /* Memory cell holding key of row to be deleted */ - i16 nKey; /* Number of memory cells in the row key */ - int iEphCur = 0; /* Ephemeral table holding all primary key values */ - int iRowSet = 0; /* Register for rowset of rows to delete */ - int addrBypass = 0; /* Address of jump over the delete logic */ - int addrLoop = 0; /* Top of the delete loop */ - int addrDelete = 0; /* Jump directly to the delete logic */ - int addrEphOpen = 0; /* Instruction to open the Ephermeral table */ - -#ifndef SQLITE_OMIT_TRIGGER - int isView; /* True if attempting to delete from a view */ - Trigger *pTrigger; /* List of table triggers, if required */ -#endif - - memset(&sContext, 0, sizeof(sContext)); - db = pParse->db; - if( pParse->nErr || db->mallocFailed ){ - goto delete_from_cleanup; - } - assert( pTabList->nSrc==1 ); - - /* Locate the table which we want to delete. This table has to be - ** put in an SrcList structure because some of the subroutines we - ** will be calling are designed to work with multiple tables and expect - ** an SrcList* parameter instead of just a Table* parameter. - */ - pTab = sqlite3SrcListLookup(pParse, pTabList); - if( pTab==0 ) goto delete_from_cleanup; - - /* Figure out if we have any triggers and if the table being - ** deleted from is a view - */ -#ifndef SQLITE_OMIT_TRIGGER - pTrigger = sqlite3TriggersExist(pParse, pTab, TK_DELETE, 0, 0); - isView = pTab->pSelect!=0; -#else -# define pTrigger 0 -# define isView 0 -#endif -#ifdef SQLITE_OMIT_VIEW -# undef isView -# define isView 0 -#endif - - /* If pTab is really a view, make sure it has been initialized. - */ - if( sqlite3ViewGetColumnNames(pParse, pTab) ){ - goto delete_from_cleanup; - } - - if( sqlite3IsReadOnly(pParse, pTab, (pTrigger?1:0)) ){ - goto delete_from_cleanup; - } - iDb = sqlite3SchemaToIndex(db, pTab->pSchema); - assert( iDbnDb ); - zDb = db->aDb[iDb].zName; - rcauth = sqlite3AuthCheck(pParse, SQLITE_DELETE, pTab->zName, 0, zDb); - assert( rcauth==SQLITE_OK || rcauth==SQLITE_DENY || rcauth==SQLITE_IGNORE ); - if( rcauth==SQLITE_DENY ){ - goto delete_from_cleanup; - } - assert(!isView || pTrigger); - - /* Assign cursor numbers to the table and all its indices. - */ - assert( pTabList->nSrc==1 ); - iTabCur = pTabList->a[0].iCursor = pParse->nTab++; - for(nIdx=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, nIdx++){ - pParse->nTab++; - } - - /* Start the view context - */ - if( isView ){ - sqlite3AuthContextPush(pParse, &sContext, pTab->zName); - } - - /* Begin generating code. - */ - v = sqlite3GetVdbe(pParse); - if( v==0 ){ - goto delete_from_cleanup; - } - if( pParse->nested==0 ) sqlite3VdbeCountChanges(v); - sqlite3BeginWriteOperation(pParse, 1, iDb); - - /* If we are trying to delete from a view, realize that view into - ** a ephemeral table. - */ -#if !defined(SQLITE_OMIT_VIEW) && !defined(SQLITE_OMIT_TRIGGER) - if( isView ){ - sqlite3MaterializeView(pParse, pTab, pWhere, iTabCur); - iDataCur = iIdxCur = iTabCur; - } -#endif - - /* Resolve the column names in the WHERE clause. - */ - memset(&sNC, 0, sizeof(sNC)); - sNC.pParse = pParse; - sNC.pSrcList = pTabList; - if( sqlite3ResolveExprNames(&sNC, pWhere) ){ - goto delete_from_cleanup; - } - - /* Initialize the counter of the number of rows deleted, if - ** we are counting rows. - */ - if( db->flags & SQLITE_CountRows ){ - memCnt = ++pParse->nMem; - sqlite3VdbeAddOp2(v, OP_Integer, 0, memCnt); - } - -#ifndef SQLITE_OMIT_TRUNCATE_OPTIMIZATION - /* Special case: A DELETE without a WHERE clause deletes everything. - ** It is easier just to erase the whole table. Prior to version 3.6.5, - ** this optimization caused the row change count (the value returned by - ** API function sqlite3_count_changes) to be set incorrectly. */ - if( rcauth==SQLITE_OK && pWhere==0 && !pTrigger && !IsVirtual(pTab) - && 0==sqlite3FkRequired(pParse, pTab, 0, 0) - ){ - assert( !isView ); - sqlite3TableLock(pParse, iDb, pTab->tnum, 1, pTab->zName); - if( HasRowid(pTab) ){ - sqlite3VdbeAddOp4(v, OP_Clear, pTab->tnum, iDb, memCnt, - pTab->zName, P4_STATIC); - } - for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ - assert( pIdx->pSchema==pTab->pSchema ); - sqlite3VdbeAddOp2(v, OP_Clear, pIdx->tnum, iDb); - } - }else -#endif /* SQLITE_OMIT_TRUNCATE_OPTIMIZATION */ - { - if( HasRowid(pTab) ){ - /* For a rowid table, initialize the RowSet to an empty set */ - pPk = 0; - nPk = 1; - iRowSet = ++pParse->nMem; - sqlite3VdbeAddOp2(v, OP_Null, 0, iRowSet); - }else{ - /* For a WITHOUT ROWID table, create an ephermeral table used to - ** hold all primary keys for rows to be deleted. */ - pPk = sqlite3PrimaryKeyIndex(pTab); - assert( pPk!=0 ); - nPk = pPk->nKeyCol; - iPk = pParse->nMem+1; - pParse->nMem += nPk; - iEphCur = pParse->nTab++; - addrEphOpen = sqlite3VdbeAddOp2(v, OP_OpenEphemeral, iEphCur, nPk); - sqlite3VdbeSetP4KeyInfo(pParse, pPk); - } - - /* Construct a query to find the rowid or primary key for every row - ** to be deleted, based on the WHERE clause. - */ - pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, 0, 0, - WHERE_ONEPASS_DESIRED|WHERE_DUPLICATES_OK, - iTabCur+1); - if( pWInfo==0 ) goto delete_from_cleanup; - okOnePass = sqlite3WhereOkOnePass(pWInfo, aiCurOnePass); - - /* Keep track of the number of rows to be deleted */ - if( db->flags & SQLITE_CountRows ){ - sqlite3VdbeAddOp2(v, OP_AddImm, memCnt, 1); - } - - /* Extract the rowid or primary key for the current row */ - if( pPk ){ - for(i=0; iaiColumn[i], iPk+i); - } - iKey = iPk; - }else{ - iKey = pParse->nMem + 1; - iKey = sqlite3ExprCodeGetColumn(pParse, pTab, -1, iTabCur, iKey, 0); - if( iKey>pParse->nMem ) pParse->nMem = iKey; - } - - if( okOnePass ){ - /* For ONEPASS, no need to store the rowid/primary-key. There is only - ** one, so just keep it in its register(s) and fall through to the - ** delete code. - */ - nKey = nPk; /* OP_Found will use an unpacked key */ - aToOpen = sqlite3DbMallocRaw(db, nIdx+2); - if( aToOpen==0 ){ - sqlite3WhereEnd(pWInfo); - goto delete_from_cleanup; - } - memset(aToOpen, 1, nIdx+1); - aToOpen[nIdx+1] = 0; - if( aiCurOnePass[0]>=0 ) aToOpen[aiCurOnePass[0]-iTabCur] = 0; - if( aiCurOnePass[1]>=0 ) aToOpen[aiCurOnePass[1]-iTabCur] = 0; - if( addrEphOpen ) sqlite3VdbeChangeToNoop(v, addrEphOpen); - addrDelete = sqlite3VdbeAddOp0(v, OP_Goto); /* Jump to DELETE logic */ - }else if( pPk ){ - /* Construct a composite key for the row to be deleted and remember it */ - iKey = ++pParse->nMem; - nKey = 0; /* Zero tells OP_Found to use a composite key */ - sqlite3VdbeAddOp4(v, OP_MakeRecord, iPk, nPk, iKey, - sqlite3IndexAffinityStr(v, pPk), nPk); - sqlite3VdbeAddOp2(v, OP_IdxInsert, iEphCur, iKey); - }else{ - /* Get the rowid of the row to be deleted and remember it in the RowSet */ - nKey = 1; /* OP_Seek always uses a single rowid */ - sqlite3VdbeAddOp2(v, OP_RowSetAdd, iRowSet, iKey); - } - - /* End of the WHERE loop */ - sqlite3WhereEnd(pWInfo); - if( okOnePass ){ - /* Bypass the delete logic below if the WHERE loop found zero rows */ - addrBypass = sqlite3VdbeMakeLabel(v); - sqlite3VdbeAddOp2(v, OP_Goto, 0, addrBypass); - sqlite3VdbeJumpHere(v, addrDelete); - } - - /* Unless this is a view, open cursors for the table we are - ** deleting from and all its indices. If this is a view, then the - ** only effect this statement has is to fire the INSTEAD OF - ** triggers. - */ - if( !isView ){ - sqlite3OpenTableAndIndices(pParse, pTab, OP_OpenWrite, iTabCur, aToOpen, - &iDataCur, &iIdxCur); - assert( pPk || iDataCur==iTabCur ); - assert( pPk || iIdxCur==iDataCur+1 ); - } - - /* Set up a loop over the rowids/primary-keys that were found in the - ** where-clause loop above. - */ - if( okOnePass ){ - /* Just one row. Hence the top-of-loop is a no-op */ - assert( nKey==nPk ); /* OP_Found will use an unpacked key */ - if( aToOpen[iDataCur-iTabCur] ){ - assert( pPk!=0 ); - sqlite3VdbeAddOp4Int(v, OP_NotFound, iDataCur, addrBypass, iKey, nKey); - VdbeCoverage(v); - } - }else if( pPk ){ - addrLoop = sqlite3VdbeAddOp1(v, OP_Rewind, iEphCur); VdbeCoverage(v); - sqlite3VdbeAddOp2(v, OP_RowKey, iEphCur, iKey); - assert( nKey==0 ); /* OP_Found will use a composite key */ - }else{ - addrLoop = sqlite3VdbeAddOp3(v, OP_RowSetRead, iRowSet, 0, iKey); - VdbeCoverage(v); - assert( nKey==1 ); - } - - /* Delete the row */ -#ifndef SQLITE_OMIT_VIRTUALTABLE - if( IsVirtual(pTab) ){ - const char *pVTab = (const char *)sqlite3GetVTable(db, pTab); - sqlite3VtabMakeWritable(pParse, pTab); - sqlite3VdbeAddOp4(v, OP_VUpdate, 0, 1, iKey, pVTab, P4_VTAB); - sqlite3VdbeChangeP5(v, OE_Abort); - sqlite3MayAbort(pParse); - }else -#endif - { - int count = (pParse->nested==0); /* True to count changes */ - sqlite3GenerateRowDelete(pParse, pTab, pTrigger, iDataCur, iIdxCur, - iKey, nKey, count, OE_Default, okOnePass); - } - - /* End of the loop over all rowids/primary-keys. */ - if( okOnePass ){ - sqlite3VdbeResolveLabel(v, addrBypass); - }else if( pPk ){ - sqlite3VdbeAddOp2(v, OP_Next, iEphCur, addrLoop+1); VdbeCoverage(v); - sqlite3VdbeJumpHere(v, addrLoop); - }else{ - sqlite3VdbeAddOp2(v, OP_Goto, 0, addrLoop); - sqlite3VdbeJumpHere(v, addrLoop); - } - - /* Close the cursors open on the table and its indexes. */ - if( !isView && !IsVirtual(pTab) ){ - if( !pPk ) sqlite3VdbeAddOp1(v, OP_Close, iDataCur); - for(i=0, pIdx=pTab->pIndex; pIdx; i++, pIdx=pIdx->pNext){ - sqlite3VdbeAddOp1(v, OP_Close, iIdxCur + i); - } - } - } /* End non-truncate path */ - - /* Update the sqlite_sequence table by storing the content of the - ** maximum rowid counter values recorded while inserting into - ** autoincrement tables. - */ - if( pParse->nested==0 && pParse->pTriggerTab==0 ){ - sqlite3AutoincrementEnd(pParse); - } - - /* Return the number of rows that were deleted. If this routine is - ** generating code because of a call to sqlite3NestedParse(), do not - ** invoke the callback function. - */ - if( (db->flags&SQLITE_CountRows) && !pParse->nested && !pParse->pTriggerTab ){ - sqlite3VdbeAddOp2(v, OP_ResultRow, memCnt, 1); - sqlite3VdbeSetNumCols(v, 1); - sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "rows deleted", SQLITE_STATIC); - } - -delete_from_cleanup: - sqlite3AuthContextPop(&sContext); - sqlite3SrcListDelete(db, pTabList); - sqlite3ExprDelete(db, pWhere); - sqlite3DbFree(db, aToOpen); - return; -} -/* Make sure "isView" and other macros defined above are undefined. Otherwise -** thely may interfere with compilation of other functions in this file -** (or in another file, if this file becomes part of the amalgamation). */ -#ifdef isView - #undef isView -#endif -#ifdef pTrigger - #undef pTrigger -#endif - -/* -** This routine generates VDBE code that causes a single row of a -** single table to be deleted. Both the original table entry and -** all indices are removed. -** -** Preconditions: -** -** 1. iDataCur is an open cursor on the btree that is the canonical data -** store for the table. (This will be either the table itself, -** in the case of a rowid table, or the PRIMARY KEY index in the case -** of a WITHOUT ROWID table.) -** -** 2. Read/write cursors for all indices of pTab must be open as -** cursor number iIdxCur+i for the i-th index. -** -** 3. The primary key for the row to be deleted must be stored in a -** sequence of nPk memory cells starting at iPk. If nPk==0 that means -** that a search record formed from OP_MakeRecord is contained in the -** single memory location iPk. -*/ -SQLITE_PRIVATE void sqlite3GenerateRowDelete( - Parse *pParse, /* Parsing context */ - Table *pTab, /* Table containing the row to be deleted */ - Trigger *pTrigger, /* List of triggers to (potentially) fire */ - int iDataCur, /* Cursor from which column data is extracted */ - int iIdxCur, /* First index cursor */ - int iPk, /* First memory cell containing the PRIMARY KEY */ - i16 nPk, /* Number of PRIMARY KEY memory cells */ - u8 count, /* If non-zero, increment the row change counter */ - u8 onconf, /* Default ON CONFLICT policy for triggers */ - u8 bNoSeek /* iDataCur is already pointing to the row to delete */ -){ - Vdbe *v = pParse->pVdbe; /* Vdbe */ - int iOld = 0; /* First register in OLD.* array */ - int iLabel; /* Label resolved to end of generated code */ - u8 opSeek; /* Seek opcode */ - - /* Vdbe is guaranteed to have been allocated by this stage. */ - assert( v ); - VdbeModuleComment((v, "BEGIN: GenRowDel(%d,%d,%d,%d)", - iDataCur, iIdxCur, iPk, (int)nPk)); - - /* Seek cursor iCur to the row to delete. If this row no longer exists - ** (this can happen if a trigger program has already deleted it), do - ** not attempt to delete it or fire any DELETE triggers. */ - iLabel = sqlite3VdbeMakeLabel(v); - opSeek = HasRowid(pTab) ? OP_NotExists : OP_NotFound; - if( !bNoSeek ){ - sqlite3VdbeAddOp4Int(v, opSeek, iDataCur, iLabel, iPk, nPk); - VdbeCoverageIf(v, opSeek==OP_NotExists); - VdbeCoverageIf(v, opSeek==OP_NotFound); - } - - /* If there are any triggers to fire, allocate a range of registers to - ** use for the old.* references in the triggers. */ - if( sqlite3FkRequired(pParse, pTab, 0, 0) || pTrigger ){ - u32 mask; /* Mask of OLD.* columns in use */ - int iCol; /* Iterator used while populating OLD.* */ - int addrStart; /* Start of BEFORE trigger programs */ - - /* TODO: Could use temporary registers here. Also could attempt to - ** avoid copying the contents of the rowid register. */ - mask = sqlite3TriggerColmask( - pParse, pTrigger, 0, 0, TRIGGER_BEFORE|TRIGGER_AFTER, pTab, onconf - ); - mask |= sqlite3FkOldmask(pParse, pTab); - iOld = pParse->nMem+1; - pParse->nMem += (1 + pTab->nCol); - - /* Populate the OLD.* pseudo-table register array. These values will be - ** used by any BEFORE and AFTER triggers that exist. */ - sqlite3VdbeAddOp2(v, OP_Copy, iPk, iOld); - for(iCol=0; iColnCol; iCol++){ - testcase( mask!=0xffffffff && iCol==31 ); - testcase( mask!=0xffffffff && iCol==32 ); - if( mask==0xffffffff || (iCol<=31 && (mask & MASKBIT32(iCol))!=0) ){ - sqlite3ExprCodeGetColumnOfTable(v, pTab, iDataCur, iCol, iOld+iCol+1); - } - } - - /* Invoke BEFORE DELETE trigger programs. */ - addrStart = sqlite3VdbeCurrentAddr(v); - sqlite3CodeRowTrigger(pParse, pTrigger, - TK_DELETE, 0, TRIGGER_BEFORE, pTab, iOld, onconf, iLabel - ); - - /* If any BEFORE triggers were coded, then seek the cursor to the - ** row to be deleted again. It may be that the BEFORE triggers moved - ** the cursor or of already deleted the row that the cursor was - ** pointing to. - */ - if( addrStartpSelect==0 ){ - sqlite3GenerateRowIndexDelete(pParse, pTab, iDataCur, iIdxCur, 0); - sqlite3VdbeAddOp2(v, OP_Delete, iDataCur, (count?OPFLAG_NCHANGE:0)); - if( count ){ - sqlite3VdbeChangeP4(v, -1, pTab->zName, P4_TRANSIENT); - } - } - - /* Do any ON CASCADE, SET NULL or SET DEFAULT operations required to - ** handle rows (possibly in other tables) that refer via a foreign key - ** to the row just deleted. */ - sqlite3FkActions(pParse, pTab, 0, iOld, 0, 0); - - /* Invoke AFTER DELETE trigger programs. */ - sqlite3CodeRowTrigger(pParse, pTrigger, - TK_DELETE, 0, TRIGGER_AFTER, pTab, iOld, onconf, iLabel - ); - - /* Jump here if the row had already been deleted before any BEFORE - ** trigger programs were invoked. Or if a trigger program throws a - ** RAISE(IGNORE) exception. */ - sqlite3VdbeResolveLabel(v, iLabel); - VdbeModuleComment((v, "END: GenRowDel()")); -} - -/* -** This routine generates VDBE code that causes the deletion of all -** index entries associated with a single row of a single table, pTab -** -** Preconditions: -** -** 1. A read/write cursor "iDataCur" must be open on the canonical storage -** btree for the table pTab. (This will be either the table itself -** for rowid tables or to the primary key index for WITHOUT ROWID -** tables.) -** -** 2. Read/write cursors for all indices of pTab must be open as -** cursor number iIdxCur+i for the i-th index. (The pTab->pIndex -** index is the 0-th index.) -** -** 3. The "iDataCur" cursor must be already be positioned on the row -** that is to be deleted. -*/ -SQLITE_PRIVATE void sqlite3GenerateRowIndexDelete( - Parse *pParse, /* Parsing and code generating context */ - Table *pTab, /* Table containing the row to be deleted */ - int iDataCur, /* Cursor of table holding data. */ - int iIdxCur, /* First index cursor */ - int *aRegIdx /* Only delete if aRegIdx!=0 && aRegIdx[i]>0 */ -){ - int i; /* Index loop counter */ - int r1 = -1; /* Register holding an index key */ - int iPartIdxLabel; /* Jump destination for skipping partial index entries */ - Index *pIdx; /* Current index */ - Index *pPrior = 0; /* Prior index */ - Vdbe *v; /* The prepared statement under construction */ - Index *pPk; /* PRIMARY KEY index, or NULL for rowid tables */ - - v = pParse->pVdbe; - pPk = HasRowid(pTab) ? 0 : sqlite3PrimaryKeyIndex(pTab); - for(i=0, pIdx=pTab->pIndex; pIdx; i++, pIdx=pIdx->pNext){ - assert( iIdxCur+i!=iDataCur || pPk==pIdx ); - if( aRegIdx!=0 && aRegIdx[i]==0 ) continue; - if( pIdx==pPk ) continue; - VdbeModuleComment((v, "GenRowIdxDel for %s", pIdx->zName)); - r1 = sqlite3GenerateIndexKey(pParse, pIdx, iDataCur, 0, 1, - &iPartIdxLabel, pPrior, r1); - sqlite3VdbeAddOp3(v, OP_IdxDelete, iIdxCur+i, r1, - pIdx->uniqNotNull ? pIdx->nKeyCol : pIdx->nColumn); - sqlite3ResolvePartIdxLabel(pParse, iPartIdxLabel); - pPrior = pIdx; - } -} - -/* -** Generate code that will assemble an index key and stores it in register -** regOut. The key with be for index pIdx which is an index on pTab. -** iCur is the index of a cursor open on the pTab table and pointing to -** the entry that needs indexing. If pTab is a WITHOUT ROWID table, then -** iCur must be the cursor of the PRIMARY KEY index. -** -** Return a register number which is the first in a block of -** registers that holds the elements of the index key. The -** block of registers has already been deallocated by the time -** this routine returns. -** -** If *piPartIdxLabel is not NULL, fill it in with a label and jump -** to that label if pIdx is a partial index that should be skipped. -** The label should be resolved using sqlite3ResolvePartIdxLabel(). -** A partial index should be skipped if its WHERE clause evaluates -** to false or null. If pIdx is not a partial index, *piPartIdxLabel -** will be set to zero which is an empty label that is ignored by -** sqlite3ResolvePartIdxLabel(). -** -** The pPrior and regPrior parameters are used to implement a cache to -** avoid unnecessary register loads. If pPrior is not NULL, then it is -** a pointer to a different index for which an index key has just been -** computed into register regPrior. If the current pIdx index is generating -** its key into the same sequence of registers and if pPrior and pIdx share -** a column in common, then the register corresponding to that column already -** holds the correct value and the loading of that register is skipped. -** This optimization is helpful when doing a DELETE or an INTEGRITY_CHECK -** on a table with multiple indices, and especially with the ROWID or -** PRIMARY KEY columns of the index. -*/ -SQLITE_PRIVATE int sqlite3GenerateIndexKey( - Parse *pParse, /* Parsing context */ - Index *pIdx, /* The index for which to generate a key */ - int iDataCur, /* Cursor number from which to take column data */ - int regOut, /* Put the new key into this register if not 0 */ - int prefixOnly, /* Compute only a unique prefix of the key */ - int *piPartIdxLabel, /* OUT: Jump to this label to skip partial index */ - Index *pPrior, /* Previously generated index key */ - int regPrior /* Register holding previous generated key */ -){ - Vdbe *v = pParse->pVdbe; - int j; - Table *pTab = pIdx->pTable; - int regBase; - int nCol; - - if( piPartIdxLabel ){ - if( pIdx->pPartIdxWhere ){ - *piPartIdxLabel = sqlite3VdbeMakeLabel(v); - pParse->iPartIdxTab = iDataCur; - sqlite3ExprCachePush(pParse); - sqlite3ExprIfFalse(pParse, pIdx->pPartIdxWhere, *piPartIdxLabel, - SQLITE_JUMPIFNULL); - }else{ - *piPartIdxLabel = 0; - } - } - nCol = (prefixOnly && pIdx->uniqNotNull) ? pIdx->nKeyCol : pIdx->nColumn; - regBase = sqlite3GetTempRange(pParse, nCol); - if( pPrior && (regBase!=regPrior || pPrior->pPartIdxWhere) ) pPrior = 0; - for(j=0; jaiColumn[j]==pIdx->aiColumn[j] ) continue; - sqlite3ExprCodeGetColumnOfTable(v, pTab, iDataCur, pIdx->aiColumn[j], - regBase+j); - /* If the column affinity is REAL but the number is an integer, then it - ** might be stored in the table as an integer (using a compact - ** representation) then converted to REAL by an OP_RealAffinity opcode. - ** But we are getting ready to store this value back into an index, where - ** it should be converted by to INTEGER again. So omit the OP_RealAffinity - ** opcode if it is present */ - sqlite3VdbeDeletePriorOpcode(v, OP_RealAffinity); - } - if( regOut ){ - sqlite3VdbeAddOp3(v, OP_MakeRecord, regBase, nCol, regOut); - } - sqlite3ReleaseTempRange(pParse, regBase, nCol); - return regBase; -} - -/* -** If a prior call to sqlite3GenerateIndexKey() generated a jump-over label -** because it was a partial index, then this routine should be called to -** resolve that label. -*/ -SQLITE_PRIVATE void sqlite3ResolvePartIdxLabel(Parse *pParse, int iLabel){ - if( iLabel ){ - sqlite3VdbeResolveLabel(pParse->pVdbe, iLabel); - sqlite3ExprCachePop(pParse); - } -} - -/************** End of delete.c **********************************************/ -/************** Begin file func.c ********************************************/ -/* -** 2002 February 23 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** This file contains the C functions that implement various SQL -** functions of SQLite. -** -** There is only one exported symbol in this file - the function -** sqliteRegisterBuildinFunctions() found at the bottom of the file. -** All other code has file scope. -*/ -/* #include */ -/* #include */ - -/* -** Return the collating function associated with a function. -*/ -static CollSeq *sqlite3GetFuncCollSeq(sqlite3_context *context){ - return context->pColl; -} - -/* -** Indicate that the accumulator load should be skipped on this -** iteration of the aggregate loop. -*/ -static void sqlite3SkipAccumulatorLoad(sqlite3_context *context){ - context->skipFlag = 1; -} - -/* -** Implementation of the non-aggregate min() and max() functions -*/ -static void minmaxFunc( - sqlite3_context *context, - int argc, - sqlite3_value **argv -){ - int i; - int mask; /* 0 for min() or 0xffffffff for max() */ - int iBest; - CollSeq *pColl; - - assert( argc>1 ); - mask = sqlite3_user_data(context)==0 ? 0 : -1; - pColl = sqlite3GetFuncCollSeq(context); - assert( pColl ); - assert( mask==-1 || mask==0 ); - iBest = 0; - if( sqlite3_value_type(argv[0])==SQLITE_NULL ) return; - for(i=1; i=0 ){ - testcase( mask==0 ); - iBest = i; - } - } - sqlite3_result_value(context, argv[iBest]); -} - -/* -** Return the type of the argument. -*/ -static void typeofFunc( - sqlite3_context *context, - int NotUsed, - sqlite3_value **argv -){ - const char *z = 0; - UNUSED_PARAMETER(NotUsed); - switch( sqlite3_value_type(argv[0]) ){ - case SQLITE_INTEGER: z = "integer"; break; - case SQLITE_TEXT: z = "text"; break; - case SQLITE_FLOAT: z = "real"; break; - case SQLITE_BLOB: z = "blob"; break; - default: z = "null"; break; - } - sqlite3_result_text(context, z, -1, SQLITE_STATIC); -} - - -/* -** Implementation of the length() function -*/ -static void lengthFunc( - sqlite3_context *context, - int argc, - sqlite3_value **argv -){ - int len; - - assert( argc==1 ); - UNUSED_PARAMETER(argc); - switch( sqlite3_value_type(argv[0]) ){ - case SQLITE_BLOB: - case SQLITE_INTEGER: - case SQLITE_FLOAT: { - sqlite3_result_int(context, sqlite3_value_bytes(argv[0])); - break; - } - case SQLITE_TEXT: { - const unsigned char *z = sqlite3_value_text(argv[0]); - if( z==0 ) return; - len = 0; - while( *z ){ - len++; - SQLITE_SKIP_UTF8(z); - } - sqlite3_result_int(context, len); - break; - } - default: { - sqlite3_result_null(context); - break; - } - } -} - -/* -** Implementation of the abs() function. -** -** IMP: R-23979-26855 The abs(X) function returns the absolute value of -** the numeric argument X. -*/ -static void absFunc(sqlite3_context *context, int argc, sqlite3_value **argv){ - assert( argc==1 ); - UNUSED_PARAMETER(argc); - switch( sqlite3_value_type(argv[0]) ){ - case SQLITE_INTEGER: { - i64 iVal = sqlite3_value_int64(argv[0]); - if( iVal<0 ){ - if( iVal==SMALLEST_INT64 ){ - /* IMP: R-31676-45509 If X is the integer -9223372036854775808 - ** then abs(X) throws an integer overflow error since there is no - ** equivalent positive 64-bit two complement value. */ - sqlite3_result_error(context, "integer overflow", -1); - return; - } - iVal = -iVal; - } - sqlite3_result_int64(context, iVal); - break; - } - case SQLITE_NULL: { - /* IMP: R-37434-19929 Abs(X) returns NULL if X is NULL. */ - sqlite3_result_null(context); - break; - } - default: { - /* Because sqlite3_value_double() returns 0.0 if the argument is not - ** something that can be converted into a number, we have: - ** IMP: R-57326-31541 Abs(X) return 0.0 if X is a string or blob that - ** cannot be converted to a numeric value. - */ - double rVal = sqlite3_value_double(argv[0]); - if( rVal<0 ) rVal = -rVal; - sqlite3_result_double(context, rVal); - break; - } - } -} - -/* -** Implementation of the instr() function. -** -** instr(haystack,needle) finds the first occurrence of needle -** in haystack and returns the number of previous characters plus 1, -** or 0 if needle does not occur within haystack. -** -** If both haystack and needle are BLOBs, then the result is one more than -** the number of bytes in haystack prior to the first occurrence of needle, -** or 0 if needle never occurs in haystack. -*/ -static void instrFunc( - sqlite3_context *context, - int argc, - sqlite3_value **argv -){ - const unsigned char *zHaystack; - const unsigned char *zNeedle; - int nHaystack; - int nNeedle; - int typeHaystack, typeNeedle; - int N = 1; - int isText; - - UNUSED_PARAMETER(argc); - typeHaystack = sqlite3_value_type(argv[0]); - typeNeedle = sqlite3_value_type(argv[1]); - if( typeHaystack==SQLITE_NULL || typeNeedle==SQLITE_NULL ) return; - nHaystack = sqlite3_value_bytes(argv[0]); - nNeedle = sqlite3_value_bytes(argv[1]); - if( typeHaystack==SQLITE_BLOB && typeNeedle==SQLITE_BLOB ){ - zHaystack = sqlite3_value_blob(argv[0]); - zNeedle = sqlite3_value_blob(argv[1]); - isText = 0; - }else{ - zHaystack = sqlite3_value_text(argv[0]); - zNeedle = sqlite3_value_text(argv[1]); - isText = 1; - } - while( nNeedle<=nHaystack && memcmp(zHaystack, zNeedle, nNeedle)!=0 ){ - N++; - do{ - nHaystack--; - zHaystack++; - }while( isText && (zHaystack[0]&0xc0)==0x80 ); - } - if( nNeedle>nHaystack ) N = 0; - sqlite3_result_int(context, N); -} - -/* -** Implementation of the printf() function. -*/ -static void printfFunc( - sqlite3_context *context, - int argc, - sqlite3_value **argv -){ - PrintfArguments x; - StrAccum str; - const char *zFormat; - int n; - - if( argc>=1 && (zFormat = (const char*)sqlite3_value_text(argv[0]))!=0 ){ - x.nArg = argc-1; - x.nUsed = 0; - x.apArg = argv+1; - sqlite3StrAccumInit(&str, 0, 0, SQLITE_MAX_LENGTH); - str.db = sqlite3_context_db_handle(context); - sqlite3XPrintf(&str, SQLITE_PRINTF_SQLFUNC, zFormat, &x); - n = str.nChar; - sqlite3_result_text(context, sqlite3StrAccumFinish(&str), n, - SQLITE_DYNAMIC); - } -} - -/* -** Implementation of the substr() function. -** -** substr(x,p1,p2) returns p2 characters of x[] beginning with p1. -** p1 is 1-indexed. So substr(x,1,1) returns the first character -** of x. If x is text, then we actually count UTF-8 characters. -** If x is a blob, then we count bytes. -** -** If p1 is negative, then we begin abs(p1) from the end of x[]. -** -** If p2 is negative, return the p2 characters preceding p1. -*/ -static void substrFunc( - sqlite3_context *context, - int argc, - sqlite3_value **argv -){ - const unsigned char *z; - const unsigned char *z2; - int len; - int p0type; - i64 p1, p2; - int negP2 = 0; - - assert( argc==3 || argc==2 ); - if( sqlite3_value_type(argv[1])==SQLITE_NULL - || (argc==3 && sqlite3_value_type(argv[2])==SQLITE_NULL) - ){ - return; - } - p0type = sqlite3_value_type(argv[0]); - p1 = sqlite3_value_int(argv[1]); - if( p0type==SQLITE_BLOB ){ - len = sqlite3_value_bytes(argv[0]); - z = sqlite3_value_blob(argv[0]); - if( z==0 ) return; - assert( len==sqlite3_value_bytes(argv[0]) ); - }else{ - z = sqlite3_value_text(argv[0]); - if( z==0 ) return; - len = 0; - if( p1<0 ){ - for(z2=z; *z2; len++){ - SQLITE_SKIP_UTF8(z2); - } - } - } - if( argc==3 ){ - p2 = sqlite3_value_int(argv[2]); - if( p2<0 ){ - p2 = -p2; - negP2 = 1; - } - }else{ - p2 = sqlite3_context_db_handle(context)->aLimit[SQLITE_LIMIT_LENGTH]; - } - if( p1<0 ){ - p1 += len; - if( p1<0 ){ - p2 += p1; - if( p2<0 ) p2 = 0; - p1 = 0; - } - }else if( p1>0 ){ - p1--; - }else if( p2>0 ){ - p2--; - } - if( negP2 ){ - p1 -= p2; - if( p1<0 ){ - p2 += p1; - p1 = 0; - } - } - assert( p1>=0 && p2>=0 ); - if( p0type!=SQLITE_BLOB ){ - while( *z && p1 ){ - SQLITE_SKIP_UTF8(z); - p1--; - } - for(z2=z; *z2 && p2; p2--){ - SQLITE_SKIP_UTF8(z2); - } - sqlite3_result_text(context, (char*)z, (int)(z2-z), SQLITE_TRANSIENT); - }else{ - if( p1+p2>len ){ - p2 = len-p1; - if( p2<0 ) p2 = 0; - } - sqlite3_result_blob(context, (char*)&z[p1], (int)p2, SQLITE_TRANSIENT); - } -} - -/* -** Implementation of the round() function -*/ -#ifndef SQLITE_OMIT_FLOATING_POINT -static void roundFunc(sqlite3_context *context, int argc, sqlite3_value **argv){ - int n = 0; - double r; - char *zBuf; - assert( argc==1 || argc==2 ); - if( argc==2 ){ - if( SQLITE_NULL==sqlite3_value_type(argv[1]) ) return; - n = sqlite3_value_int(argv[1]); - if( n>30 ) n = 30; - if( n<0 ) n = 0; - } - if( sqlite3_value_type(argv[0])==SQLITE_NULL ) return; - r = sqlite3_value_double(argv[0]); - /* If Y==0 and X will fit in a 64-bit int, - ** handle the rounding directly, - ** otherwise use printf. - */ - if( n==0 && r>=0 && r0 ); - testcase( nByte==db->aLimit[SQLITE_LIMIT_LENGTH] ); - testcase( nByte==db->aLimit[SQLITE_LIMIT_LENGTH]+1 ); - if( nByte>db->aLimit[SQLITE_LIMIT_LENGTH] ){ - sqlite3_result_error_toobig(context); - z = 0; - }else{ - z = sqlite3Malloc((int)nByte); - if( !z ){ - sqlite3_result_error_nomem(context); - } - } - return z; -} - -/* -** Implementation of the upper() and lower() SQL functions. -*/ -static void upperFunc(sqlite3_context *context, int argc, sqlite3_value **argv){ - char *z1; - const char *z2; - int i, n; - UNUSED_PARAMETER(argc); - z2 = (char*)sqlite3_value_text(argv[0]); - n = sqlite3_value_bytes(argv[0]); - /* Verify that the call to _bytes() does not invalidate the _text() pointer */ - assert( z2==(char*)sqlite3_value_text(argv[0]) ); - if( z2 ){ - z1 = contextMalloc(context, ((i64)n)+1); - if( z1 ){ - for(i=0; imatchOne; - u8 matchAll = pInfo->matchAll; - u8 matchSet = pInfo->matchSet; - u8 noCase = pInfo->noCase; - int prevEscape = 0; /* True if the previous character was 'escape' */ - - while( (c = sqlite3Utf8Read(&zPattern))!=0 ){ - if( c==matchAll && !prevEscape ){ - while( (c=sqlite3Utf8Read(&zPattern)) == matchAll - || c == matchOne ){ - if( c==matchOne && sqlite3Utf8Read(&zString)==0 ){ - return 0; - } - } - if( c==0 ){ - return 1; - }else if( c==esc ){ - c = sqlite3Utf8Read(&zPattern); - if( c==0 ){ - return 0; - } - }else if( c==matchSet ){ - assert( esc==0 ); /* This is GLOB, not LIKE */ - assert( matchSet<0x80 ); /* '[' is a single-byte character */ - while( *zString && patternCompare(&zPattern[-1],zString,pInfo,esc)==0 ){ - SQLITE_SKIP_UTF8(zString); - } - return *zString!=0; - } - while( (c2 = sqlite3Utf8Read(&zString))!=0 ){ - if( noCase ){ - GlobUpperToLower(c2); - GlobUpperToLower(c); - while( c2 != 0 && c2 != c ){ - c2 = sqlite3Utf8Read(&zString); - GlobUpperToLower(c2); - } - }else{ - while( c2 != 0 && c2 != c ){ - c2 = sqlite3Utf8Read(&zString); - } - } - if( c2==0 ) return 0; - if( patternCompare(zPattern,zString,pInfo,esc) ) return 1; - } - return 0; - }else if( c==matchOne && !prevEscape ){ - if( sqlite3Utf8Read(&zString)==0 ){ - return 0; - } - }else if( c==matchSet ){ - u32 prior_c = 0; - assert( esc==0 ); /* This only occurs for GLOB, not LIKE */ - seen = 0; - invert = 0; - c = sqlite3Utf8Read(&zString); - if( c==0 ) return 0; - c2 = sqlite3Utf8Read(&zPattern); - if( c2=='^' ){ - invert = 1; - c2 = sqlite3Utf8Read(&zPattern); - } - if( c2==']' ){ - if( c==']' ) seen = 1; - c2 = sqlite3Utf8Read(&zPattern); - } - while( c2 && c2!=']' ){ - if( c2=='-' && zPattern[0]!=']' && zPattern[0]!=0 && prior_c>0 ){ - c2 = sqlite3Utf8Read(&zPattern); - if( c>=prior_c && c<=c2 ) seen = 1; - prior_c = 0; - }else{ - if( c==c2 ){ - seen = 1; - } - prior_c = c2; - } - c2 = sqlite3Utf8Read(&zPattern); - } - if( c2==0 || (seen ^ invert)==0 ){ - return 0; - } - }else if( esc==c && !prevEscape ){ - prevEscape = 1; - }else{ - c2 = sqlite3Utf8Read(&zString); - if( noCase ){ - GlobUpperToLower(c); - GlobUpperToLower(c2); - } - if( c!=c2 ){ - return 0; - } - prevEscape = 0; - } - } - return *zString==0; -} - -/* -** The sqlite3_strglob() interface. -*/ -SQLITE_API int sqlite3_strglob(const char *zGlobPattern, const char *zString){ - return patternCompare((u8*)zGlobPattern, (u8*)zString, &globInfo, 0)==0; -} - -/* -** Count the number of times that the LIKE operator (or GLOB which is -** just a variation of LIKE) gets called. This is used for testing -** only. -*/ -#ifdef SQLITE_TEST -SQLITE_API int sqlite3_like_count = 0; -#endif - - -/* -** Implementation of the like() SQL function. This function implements -** the build-in LIKE operator. The first argument to the function is the -** pattern and the second argument is the string. So, the SQL statements: -** -** A LIKE B -** -** is implemented as like(B,A). -** -** This same function (with a different compareInfo structure) computes -** the GLOB operator. -*/ -static void likeFunc( - sqlite3_context *context, - int argc, - sqlite3_value **argv -){ - const unsigned char *zA, *zB; - u32 escape = 0; - int nPat; - sqlite3 *db = sqlite3_context_db_handle(context); - - zB = sqlite3_value_text(argv[0]); - zA = sqlite3_value_text(argv[1]); - - /* Limit the length of the LIKE or GLOB pattern to avoid problems - ** of deep recursion and N*N behavior in patternCompare(). - */ - nPat = sqlite3_value_bytes(argv[0]); - testcase( nPat==db->aLimit[SQLITE_LIMIT_LIKE_PATTERN_LENGTH] ); - testcase( nPat==db->aLimit[SQLITE_LIMIT_LIKE_PATTERN_LENGTH]+1 ); - if( nPat > db->aLimit[SQLITE_LIMIT_LIKE_PATTERN_LENGTH] ){ - sqlite3_result_error(context, "LIKE or GLOB pattern too complex", -1); - return; - } - assert( zB==sqlite3_value_text(argv[0]) ); /* Encoding did not change */ - - if( argc==3 ){ - /* The escape character string must consist of a single UTF-8 character. - ** Otherwise, return an error. - */ - const unsigned char *zEsc = sqlite3_value_text(argv[2]); - if( zEsc==0 ) return; - if( sqlite3Utf8CharLen((char*)zEsc, -1)!=1 ){ - sqlite3_result_error(context, - "ESCAPE expression must be a single character", -1); - return; - } - escape = sqlite3Utf8Read(&zEsc); - } - if( zA && zB ){ - struct compareInfo *pInfo = sqlite3_user_data(context); -#ifdef SQLITE_TEST - sqlite3_like_count++; -#endif - - sqlite3_result_int(context, patternCompare(zB, zA, pInfo, escape)); - } -} - -/* -** Implementation of the NULLIF(x,y) function. The result is the first -** argument if the arguments are different. The result is NULL if the -** arguments are equal to each other. -*/ -static void nullifFunc( - sqlite3_context *context, - int NotUsed, - sqlite3_value **argv -){ - CollSeq *pColl = sqlite3GetFuncCollSeq(context); - UNUSED_PARAMETER(NotUsed); - if( sqlite3MemCompare(argv[0], argv[1], pColl)!=0 ){ - sqlite3_result_value(context, argv[0]); - } -} - -/* -** Implementation of the sqlite_version() function. The result is the version -** of the SQLite library that is running. -*/ -static void versionFunc( - sqlite3_context *context, - int NotUsed, - sqlite3_value **NotUsed2 -){ - UNUSED_PARAMETER2(NotUsed, NotUsed2); - /* IMP: R-48699-48617 This function is an SQL wrapper around the - ** sqlite3_libversion() C-interface. */ - sqlite3_result_text(context, sqlite3_libversion(), -1, SQLITE_STATIC); -} - -/* -** Implementation of the sqlite_source_id() function. The result is a string -** that identifies the particular version of the source code used to build -** SQLite. -*/ -static void sourceidFunc( - sqlite3_context *context, - int NotUsed, - sqlite3_value **NotUsed2 -){ - UNUSED_PARAMETER2(NotUsed, NotUsed2); - /* IMP: R-24470-31136 This function is an SQL wrapper around the - ** sqlite3_sourceid() C interface. */ - sqlite3_result_text(context, sqlite3_sourceid(), -1, SQLITE_STATIC); -} - -/* -** Implementation of the sqlite_log() function. This is a wrapper around -** sqlite3_log(). The return value is NULL. The function exists purely for -** its side-effects. -*/ -static void errlogFunc( - sqlite3_context *context, - int argc, - sqlite3_value **argv -){ - UNUSED_PARAMETER(argc); - UNUSED_PARAMETER(context); - sqlite3_log(sqlite3_value_int(argv[0]), "%s", sqlite3_value_text(argv[1])); -} - -/* -** Implementation of the sqlite_compileoption_used() function. -** The result is an integer that identifies if the compiler option -** was used to build SQLite. -*/ -#ifndef SQLITE_OMIT_COMPILEOPTION_DIAGS -static void compileoptionusedFunc( - sqlite3_context *context, - int argc, - sqlite3_value **argv -){ - const char *zOptName; - assert( argc==1 ); - UNUSED_PARAMETER(argc); - /* IMP: R-39564-36305 The sqlite_compileoption_used() SQL - ** function is a wrapper around the sqlite3_compileoption_used() C/C++ - ** function. - */ - if( (zOptName = (const char*)sqlite3_value_text(argv[0]))!=0 ){ - sqlite3_result_int(context, sqlite3_compileoption_used(zOptName)); - } -} -#endif /* SQLITE_OMIT_COMPILEOPTION_DIAGS */ - -/* -** Implementation of the sqlite_compileoption_get() function. -** The result is a string that identifies the compiler options -** used to build SQLite. -*/ -#ifndef SQLITE_OMIT_COMPILEOPTION_DIAGS -static void compileoptiongetFunc( - sqlite3_context *context, - int argc, - sqlite3_value **argv -){ - int n; - assert( argc==1 ); - UNUSED_PARAMETER(argc); - /* IMP: R-04922-24076 The sqlite_compileoption_get() SQL function - ** is a wrapper around the sqlite3_compileoption_get() C/C++ function. - */ - n = sqlite3_value_int(argv[0]); - sqlite3_result_text(context, sqlite3_compileoption_get(n), -1, SQLITE_STATIC); -} -#endif /* SQLITE_OMIT_COMPILEOPTION_DIAGS */ - -/* Array for converting from half-bytes (nybbles) into ASCII hex -** digits. */ -static const char hexdigits[] = { - '0', '1', '2', '3', '4', '5', '6', '7', - '8', '9', 'A', 'B', 'C', 'D', 'E', 'F' -}; - -/* -** Implementation of the QUOTE() function. This function takes a single -** argument. If the argument is numeric, the return value is the same as -** the argument. If the argument is NULL, the return value is the string -** "NULL". Otherwise, the argument is enclosed in single quotes with -** single-quote escapes. -*/ -static void quoteFunc(sqlite3_context *context, int argc, sqlite3_value **argv){ - assert( argc==1 ); - UNUSED_PARAMETER(argc); - switch( sqlite3_value_type(argv[0]) ){ - case SQLITE_FLOAT: { - double r1, r2; - char zBuf[50]; - r1 = sqlite3_value_double(argv[0]); - sqlite3_snprintf(sizeof(zBuf), zBuf, "%!.15g", r1); - sqlite3AtoF(zBuf, &r2, 20, SQLITE_UTF8); - if( r1!=r2 ){ - sqlite3_snprintf(sizeof(zBuf), zBuf, "%!.20e", r1); - } - sqlite3_result_text(context, zBuf, -1, SQLITE_TRANSIENT); - break; - } - case SQLITE_INTEGER: { - sqlite3_result_value(context, argv[0]); - break; - } - case SQLITE_BLOB: { - char *zText = 0; - char const *zBlob = sqlite3_value_blob(argv[0]); - int nBlob = sqlite3_value_bytes(argv[0]); - assert( zBlob==sqlite3_value_blob(argv[0]) ); /* No encoding change */ - zText = (char *)contextMalloc(context, (2*(i64)nBlob)+4); - if( zText ){ - int i; - for(i=0; i>4)&0x0F]; - zText[(i*2)+3] = hexdigits[(zBlob[i])&0x0F]; - } - zText[(nBlob*2)+2] = '\''; - zText[(nBlob*2)+3] = '\0'; - zText[0] = 'X'; - zText[1] = '\''; - sqlite3_result_text(context, zText, -1, SQLITE_TRANSIENT); - sqlite3_free(zText); - } - break; - } - case SQLITE_TEXT: { - int i,j; - u64 n; - const unsigned char *zArg = sqlite3_value_text(argv[0]); - char *z; - - if( zArg==0 ) return; - for(i=0, n=0; zArg[i]; i++){ if( zArg[i]=='\'' ) n++; } - z = contextMalloc(context, ((i64)i)+((i64)n)+3); - if( z ){ - z[0] = '\''; - for(i=0, j=1; zArg[i]; i++){ - z[j++] = zArg[i]; - if( zArg[i]=='\'' ){ - z[j++] = '\''; - } - } - z[j++] = '\''; - z[j] = 0; - sqlite3_result_text(context, z, j, sqlite3_free); - } - break; - } - default: { - assert( sqlite3_value_type(argv[0])==SQLITE_NULL ); - sqlite3_result_text(context, "NULL", 4, SQLITE_STATIC); - break; - } - } -} - -/* -** The unicode() function. Return the integer unicode code-point value -** for the first character of the input string. -*/ -static void unicodeFunc( - sqlite3_context *context, - int argc, - sqlite3_value **argv -){ - const unsigned char *z = sqlite3_value_text(argv[0]); - (void)argc; - if( z && z[0] ) sqlite3_result_int(context, sqlite3Utf8Read(&z)); -} - -/* -** The char() function takes zero or more arguments, each of which is -** an integer. It constructs a string where each character of the string -** is the unicode character for the corresponding integer argument. -*/ -static void charFunc( - sqlite3_context *context, - int argc, - sqlite3_value **argv -){ - unsigned char *z, *zOut; - int i; - zOut = z = sqlite3_malloc( argc*4+1 ); - if( z==0 ){ - sqlite3_result_error_nomem(context); - return; - } - for(i=0; i0x10ffff ) x = 0xfffd; - c = (unsigned)(x & 0x1fffff); - if( c<0x00080 ){ - *zOut++ = (u8)(c&0xFF); - }else if( c<0x00800 ){ - *zOut++ = 0xC0 + (u8)((c>>6)&0x1F); - *zOut++ = 0x80 + (u8)(c & 0x3F); - }else if( c<0x10000 ){ - *zOut++ = 0xE0 + (u8)((c>>12)&0x0F); - *zOut++ = 0x80 + (u8)((c>>6) & 0x3F); - *zOut++ = 0x80 + (u8)(c & 0x3F); - }else{ - *zOut++ = 0xF0 + (u8)((c>>18) & 0x07); - *zOut++ = 0x80 + (u8)((c>>12) & 0x3F); - *zOut++ = 0x80 + (u8)((c>>6) & 0x3F); - *zOut++ = 0x80 + (u8)(c & 0x3F); - } \ - } - sqlite3_result_text(context, (char*)z, (int)(zOut-z), sqlite3_free); -} - -/* -** The hex() function. Interpret the argument as a blob. Return -** a hexadecimal rendering as text. -*/ -static void hexFunc( - sqlite3_context *context, - int argc, - sqlite3_value **argv -){ - int i, n; - const unsigned char *pBlob; - char *zHex, *z; - assert( argc==1 ); - UNUSED_PARAMETER(argc); - pBlob = sqlite3_value_blob(argv[0]); - n = sqlite3_value_bytes(argv[0]); - assert( pBlob==sqlite3_value_blob(argv[0]) ); /* No encoding change */ - z = zHex = contextMalloc(context, ((i64)n)*2 + 1); - if( zHex ){ - for(i=0; i>4)&0xf]; - *(z++) = hexdigits[c&0xf]; - } - *z = 0; - sqlite3_result_text(context, zHex, n*2, sqlite3_free); - } -} - -/* -** The zeroblob(N) function returns a zero-filled blob of size N bytes. -*/ -static void zeroblobFunc( - sqlite3_context *context, - int argc, - sqlite3_value **argv -){ - i64 n; - sqlite3 *db = sqlite3_context_db_handle(context); - assert( argc==1 ); - UNUSED_PARAMETER(argc); - n = sqlite3_value_int64(argv[0]); - testcase( n==db->aLimit[SQLITE_LIMIT_LENGTH] ); - testcase( n==db->aLimit[SQLITE_LIMIT_LENGTH]+1 ); - if( n>db->aLimit[SQLITE_LIMIT_LENGTH] ){ - sqlite3_result_error_toobig(context); - }else{ - sqlite3_result_zeroblob(context, (int)n); /* IMP: R-00293-64994 */ - } -} - -/* -** The replace() function. Three arguments are all strings: call -** them A, B, and C. The result is also a string which is derived -** from A by replacing every occurrence of B with C. The match -** must be exact. Collating sequences are not used. -*/ -static void replaceFunc( - sqlite3_context *context, - int argc, - sqlite3_value **argv -){ - const unsigned char *zStr; /* The input string A */ - const unsigned char *zPattern; /* The pattern string B */ - const unsigned char *zRep; /* The replacement string C */ - unsigned char *zOut; /* The output */ - int nStr; /* Size of zStr */ - int nPattern; /* Size of zPattern */ - int nRep; /* Size of zRep */ - i64 nOut; /* Maximum size of zOut */ - int loopLimit; /* Last zStr[] that might match zPattern[] */ - int i, j; /* Loop counters */ - - assert( argc==3 ); - UNUSED_PARAMETER(argc); - zStr = sqlite3_value_text(argv[0]); - if( zStr==0 ) return; - nStr = sqlite3_value_bytes(argv[0]); - assert( zStr==sqlite3_value_text(argv[0]) ); /* No encoding change */ - zPattern = sqlite3_value_text(argv[1]); - if( zPattern==0 ){ - assert( sqlite3_value_type(argv[1])==SQLITE_NULL - || sqlite3_context_db_handle(context)->mallocFailed ); - return; - } - if( zPattern[0]==0 ){ - assert( sqlite3_value_type(argv[1])!=SQLITE_NULL ); - sqlite3_result_value(context, argv[0]); - return; - } - nPattern = sqlite3_value_bytes(argv[1]); - assert( zPattern==sqlite3_value_text(argv[1]) ); /* No encoding change */ - zRep = sqlite3_value_text(argv[2]); - if( zRep==0 ) return; - nRep = sqlite3_value_bytes(argv[2]); - assert( zRep==sqlite3_value_text(argv[2]) ); - nOut = nStr + 1; - assert( nOutaLimit[SQLITE_LIMIT_LENGTH] ); - testcase( nOut-2==db->aLimit[SQLITE_LIMIT_LENGTH] ); - if( nOut-1>db->aLimit[SQLITE_LIMIT_LENGTH] ){ - sqlite3_result_error_toobig(context); - sqlite3_free(zOut); - return; - } - zOld = zOut; - zOut = sqlite3_realloc(zOut, (int)nOut); - if( zOut==0 ){ - sqlite3_result_error_nomem(context); - sqlite3_free(zOld); - return; - } - memcpy(&zOut[j], zRep, nRep); - j += nRep; - i += nPattern-1; - } - } - assert( j+nStr-i+1==nOut ); - memcpy(&zOut[j], &zStr[i], nStr-i); - j += nStr - i; - assert( j<=nOut ); - zOut[j] = 0; - sqlite3_result_text(context, (char*)zOut, j, sqlite3_free); -} - -/* -** Implementation of the TRIM(), LTRIM(), and RTRIM() functions. -** The userdata is 0x1 for left trim, 0x2 for right trim, 0x3 for both. -*/ -static void trimFunc( - sqlite3_context *context, - int argc, - sqlite3_value **argv -){ - const unsigned char *zIn; /* Input string */ - const unsigned char *zCharSet; /* Set of characters to trim */ - int nIn; /* Number of bytes in input */ - int flags; /* 1: trimleft 2: trimright 3: trim */ - int i; /* Loop counter */ - unsigned char *aLen = 0; /* Length of each character in zCharSet */ - unsigned char **azChar = 0; /* Individual characters in zCharSet */ - int nChar; /* Number of characters in zCharSet */ - - if( sqlite3_value_type(argv[0])==SQLITE_NULL ){ - return; - } - zIn = sqlite3_value_text(argv[0]); - if( zIn==0 ) return; - nIn = sqlite3_value_bytes(argv[0]); - assert( zIn==sqlite3_value_text(argv[0]) ); - if( argc==1 ){ - static const unsigned char lenOne[] = { 1 }; - static unsigned char * const azOne[] = { (u8*)" " }; - nChar = 1; - aLen = (u8*)lenOne; - azChar = (unsigned char **)azOne; - zCharSet = 0; - }else if( (zCharSet = sqlite3_value_text(argv[1]))==0 ){ - return; - }else{ - const unsigned char *z; - for(z=zCharSet, nChar=0; *z; nChar++){ - SQLITE_SKIP_UTF8(z); - } - if( nChar>0 ){ - azChar = contextMalloc(context, ((i64)nChar)*(sizeof(char*)+1)); - if( azChar==0 ){ - return; - } - aLen = (unsigned char*)&azChar[nChar]; - for(z=zCharSet, nChar=0; *z; nChar++){ - azChar[nChar] = (unsigned char *)z; - SQLITE_SKIP_UTF8(z); - aLen[nChar] = (u8)(z - azChar[nChar]); - } - } - } - if( nChar>0 ){ - flags = SQLITE_PTR_TO_INT(sqlite3_user_data(context)); - if( flags & 1 ){ - while( nIn>0 ){ - int len = 0; - for(i=0; i=nChar ) break; - zIn += len; - nIn -= len; - } - } - if( flags & 2 ){ - while( nIn>0 ){ - int len = 0; - for(i=0; i=nChar ) break; - nIn -= len; - } - } - if( zCharSet ){ - sqlite3_free(azChar); - } - } - sqlite3_result_text(context, (char*)zIn, nIn, SQLITE_TRANSIENT); -} - - -/* IMP: R-25361-16150 This function is omitted from SQLite by default. It -** is only available if the SQLITE_SOUNDEX compile-time option is used -** when SQLite is built. -*/ -#ifdef SQLITE_SOUNDEX -/* -** Compute the soundex encoding of a word. -** -** IMP: R-59782-00072 The soundex(X) function returns a string that is the -** soundex encoding of the string X. -*/ -static void soundexFunc( - sqlite3_context *context, - int argc, - sqlite3_value **argv -){ - char zResult[8]; - const u8 *zIn; - int i, j; - static const unsigned char iCode[] = { - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 1, 2, 3, 0, 1, 2, 0, 0, 2, 2, 4, 5, 5, 0, - 1, 2, 6, 2, 3, 0, 1, 0, 2, 0, 2, 0, 0, 0, 0, 0, - 0, 0, 1, 2, 3, 0, 1, 2, 0, 0, 2, 2, 4, 5, 5, 0, - 1, 2, 6, 2, 3, 0, 1, 0, 2, 0, 2, 0, 0, 0, 0, 0, - }; - assert( argc==1 ); - zIn = (u8*)sqlite3_value_text(argv[0]); - if( zIn==0 ) zIn = (u8*)""; - for(i=0; zIn[i] && !sqlite3Isalpha(zIn[i]); i++){} - if( zIn[i] ){ - u8 prevcode = iCode[zIn[i]&0x7f]; - zResult[0] = sqlite3Toupper(zIn[i]); - for(j=1; j<4 && zIn[i]; i++){ - int code = iCode[zIn[i]&0x7f]; - if( code>0 ){ - if( code!=prevcode ){ - prevcode = code; - zResult[j++] = code + '0'; - } - }else{ - prevcode = 0; - } - } - while( j<4 ){ - zResult[j++] = '0'; - } - zResult[j] = 0; - sqlite3_result_text(context, zResult, 4, SQLITE_TRANSIENT); - }else{ - /* IMP: R-64894-50321 The string "?000" is returned if the argument - ** is NULL or contains no ASCII alphabetic characters. */ - sqlite3_result_text(context, "?000", 4, SQLITE_STATIC); - } -} -#endif /* SQLITE_SOUNDEX */ - -#ifndef SQLITE_OMIT_LOAD_EXTENSION -/* -** A function that loads a shared-library extension then returns NULL. -*/ -static void loadExt(sqlite3_context *context, int argc, sqlite3_value **argv){ - const char *zFile = (const char *)sqlite3_value_text(argv[0]); - const char *zProc; - sqlite3 *db = sqlite3_context_db_handle(context); - char *zErrMsg = 0; - - if( argc==2 ){ - zProc = (const char *)sqlite3_value_text(argv[1]); - }else{ - zProc = 0; - } - if( zFile && sqlite3_load_extension(db, zFile, zProc, &zErrMsg) ){ - sqlite3_result_error(context, zErrMsg, -1); - sqlite3_free(zErrMsg); - } -} -#endif - - -/* -** An instance of the following structure holds the context of a -** sum() or avg() aggregate computation. -*/ -typedef struct SumCtx SumCtx; -struct SumCtx { - double rSum; /* Floating point sum */ - i64 iSum; /* Integer sum */ - i64 cnt; /* Number of elements summed */ - u8 overflow; /* True if integer overflow seen */ - u8 approx; /* True if non-integer value was input to the sum */ -}; - -/* -** Routines used to compute the sum, average, and total. -** -** The SUM() function follows the (broken) SQL standard which means -** that it returns NULL if it sums over no inputs. TOTAL returns -** 0.0 in that case. In addition, TOTAL always returns a float where -** SUM might return an integer if it never encounters a floating point -** value. TOTAL never fails, but SUM might through an exception if -** it overflows an integer. -*/ -static void sumStep(sqlite3_context *context, int argc, sqlite3_value **argv){ - SumCtx *p; - int type; - assert( argc==1 ); - UNUSED_PARAMETER(argc); - p = sqlite3_aggregate_context(context, sizeof(*p)); - type = sqlite3_value_numeric_type(argv[0]); - if( p && type!=SQLITE_NULL ){ - p->cnt++; - if( type==SQLITE_INTEGER ){ - i64 v = sqlite3_value_int64(argv[0]); - p->rSum += v; - if( (p->approx|p->overflow)==0 && sqlite3AddInt64(&p->iSum, v) ){ - p->overflow = 1; - } - }else{ - p->rSum += sqlite3_value_double(argv[0]); - p->approx = 1; - } - } -} -static void sumFinalize(sqlite3_context *context){ - SumCtx *p; - p = sqlite3_aggregate_context(context, 0); - if( p && p->cnt>0 ){ - if( p->overflow ){ - sqlite3_result_error(context,"integer overflow",-1); - }else if( p->approx ){ - sqlite3_result_double(context, p->rSum); - }else{ - sqlite3_result_int64(context, p->iSum); - } - } -} -static void avgFinalize(sqlite3_context *context){ - SumCtx *p; - p = sqlite3_aggregate_context(context, 0); - if( p && p->cnt>0 ){ - sqlite3_result_double(context, p->rSum/(double)p->cnt); - } -} -static void totalFinalize(sqlite3_context *context){ - SumCtx *p; - p = sqlite3_aggregate_context(context, 0); - /* (double)0 In case of SQLITE_OMIT_FLOATING_POINT... */ - sqlite3_result_double(context, p ? p->rSum : (double)0); -} - -/* -** The following structure keeps track of state information for the -** count() aggregate function. -*/ -typedef struct CountCtx CountCtx; -struct CountCtx { - i64 n; -}; - -/* -** Routines to implement the count() aggregate function. -*/ -static void countStep(sqlite3_context *context, int argc, sqlite3_value **argv){ - CountCtx *p; - p = sqlite3_aggregate_context(context, sizeof(*p)); - if( (argc==0 || SQLITE_NULL!=sqlite3_value_type(argv[0])) && p ){ - p->n++; - } - -#ifndef SQLITE_OMIT_DEPRECATED - /* The sqlite3_aggregate_count() function is deprecated. But just to make - ** sure it still operates correctly, verify that its count agrees with our - ** internal count when using count(*) and when the total count can be - ** expressed as a 32-bit integer. */ - assert( argc==1 || p==0 || p->n>0x7fffffff - || p->n==sqlite3_aggregate_count(context) ); -#endif -} -static void countFinalize(sqlite3_context *context){ - CountCtx *p; - p = sqlite3_aggregate_context(context, 0); - sqlite3_result_int64(context, p ? p->n : 0); -} - -/* -** Routines to implement min() and max() aggregate functions. -*/ -static void minmaxStep( - sqlite3_context *context, - int NotUsed, - sqlite3_value **argv -){ - Mem *pArg = (Mem *)argv[0]; - Mem *pBest; - UNUSED_PARAMETER(NotUsed); - - pBest = (Mem *)sqlite3_aggregate_context(context, sizeof(*pBest)); - if( !pBest ) return; - - if( sqlite3_value_type(argv[0])==SQLITE_NULL ){ - if( pBest->flags ) sqlite3SkipAccumulatorLoad(context); - }else if( pBest->flags ){ - int max; - int cmp; - CollSeq *pColl = sqlite3GetFuncCollSeq(context); - /* This step function is used for both the min() and max() aggregates, - ** the only difference between the two being that the sense of the - ** comparison is inverted. For the max() aggregate, the - ** sqlite3_user_data() function returns (void *)-1. For min() it - ** returns (void *)db, where db is the sqlite3* database pointer. - ** Therefore the next statement sets variable 'max' to 1 for the max() - ** aggregate, or 0 for min(). - */ - max = sqlite3_user_data(context)!=0; - cmp = sqlite3MemCompare(pBest, pArg, pColl); - if( (max && cmp<0) || (!max && cmp>0) ){ - sqlite3VdbeMemCopy(pBest, pArg); - }else{ - sqlite3SkipAccumulatorLoad(context); - } - }else{ - sqlite3VdbeMemCopy(pBest, pArg); - } -} -static void minMaxFinalize(sqlite3_context *context){ - sqlite3_value *pRes; - pRes = (sqlite3_value *)sqlite3_aggregate_context(context, 0); - if( pRes ){ - if( pRes->flags ){ - sqlite3_result_value(context, pRes); - } - sqlite3VdbeMemRelease(pRes); - } -} - -/* -** group_concat(EXPR, ?SEPARATOR?) -*/ -static void groupConcatStep( - sqlite3_context *context, - int argc, - sqlite3_value **argv -){ - const char *zVal; - StrAccum *pAccum; - const char *zSep; - int nVal, nSep; - assert( argc==1 || argc==2 ); - if( sqlite3_value_type(argv[0])==SQLITE_NULL ) return; - pAccum = (StrAccum*)sqlite3_aggregate_context(context, sizeof(*pAccum)); - - if( pAccum ){ - sqlite3 *db = sqlite3_context_db_handle(context); - int firstTerm = pAccum->useMalloc==0; - pAccum->useMalloc = 2; - pAccum->mxAlloc = db->aLimit[SQLITE_LIMIT_LENGTH]; - if( !firstTerm ){ - if( argc==2 ){ - zSep = (char*)sqlite3_value_text(argv[1]); - nSep = sqlite3_value_bytes(argv[1]); - }else{ - zSep = ","; - nSep = 1; - } - if( nSep ) sqlite3StrAccumAppend(pAccum, zSep, nSep); - } - zVal = (char*)sqlite3_value_text(argv[0]); - nVal = sqlite3_value_bytes(argv[0]); - if( zVal ) sqlite3StrAccumAppend(pAccum, zVal, nVal); - } -} -static void groupConcatFinalize(sqlite3_context *context){ - StrAccum *pAccum; - pAccum = sqlite3_aggregate_context(context, 0); - if( pAccum ){ - if( pAccum->accError==STRACCUM_TOOBIG ){ - sqlite3_result_error_toobig(context); - }else if( pAccum->accError==STRACCUM_NOMEM ){ - sqlite3_result_error_nomem(context); - }else{ - sqlite3_result_text(context, sqlite3StrAccumFinish(pAccum), -1, - sqlite3_free); - } - } -} - -/* -** This routine does per-connection function registration. Most -** of the built-in functions above are part of the global function set. -** This routine only deals with those that are not global. -*/ -SQLITE_PRIVATE void sqlite3RegisterBuiltinFunctions(sqlite3 *db){ - int rc = sqlite3_overload_function(db, "MATCH", 2); - assert( rc==SQLITE_NOMEM || rc==SQLITE_OK ); - if( rc==SQLITE_NOMEM ){ - db->mallocFailed = 1; - } -} - -/* -** Set the LIKEOPT flag on the 2-argument function with the given name. -*/ -static void setLikeOptFlag(sqlite3 *db, const char *zName, u8 flagVal){ - FuncDef *pDef; - pDef = sqlite3FindFunction(db, zName, sqlite3Strlen30(zName), - 2, SQLITE_UTF8, 0); - if( ALWAYS(pDef) ){ - pDef->funcFlags |= flagVal; - } -} - -/* -** Register the built-in LIKE and GLOB functions. The caseSensitive -** parameter determines whether or not the LIKE operator is case -** sensitive. GLOB is always case sensitive. -*/ -SQLITE_PRIVATE void sqlite3RegisterLikeFunctions(sqlite3 *db, int caseSensitive){ - struct compareInfo *pInfo; - if( caseSensitive ){ - pInfo = (struct compareInfo*)&likeInfoAlt; - }else{ - pInfo = (struct compareInfo*)&likeInfoNorm; - } - sqlite3CreateFunc(db, "like", 2, SQLITE_UTF8, pInfo, likeFunc, 0, 0, 0); - sqlite3CreateFunc(db, "like", 3, SQLITE_UTF8, pInfo, likeFunc, 0, 0, 0); - sqlite3CreateFunc(db, "glob", 2, SQLITE_UTF8, - (struct compareInfo*)&globInfo, likeFunc, 0, 0, 0); - setLikeOptFlag(db, "glob", SQLITE_FUNC_LIKE | SQLITE_FUNC_CASE); - setLikeOptFlag(db, "like", - caseSensitive ? (SQLITE_FUNC_LIKE | SQLITE_FUNC_CASE) : SQLITE_FUNC_LIKE); -} - -/* -** pExpr points to an expression which implements a function. If -** it is appropriate to apply the LIKE optimization to that function -** then set aWc[0] through aWc[2] to the wildcard characters and -** return TRUE. If the function is not a LIKE-style function then -** return FALSE. -*/ -SQLITE_PRIVATE int sqlite3IsLikeFunction(sqlite3 *db, Expr *pExpr, int *pIsNocase, char *aWc){ - FuncDef *pDef; - if( pExpr->op!=TK_FUNCTION - || !pExpr->x.pList - || pExpr->x.pList->nExpr!=2 - ){ - return 0; - } - assert( !ExprHasProperty(pExpr, EP_xIsSelect) ); - pDef = sqlite3FindFunction(db, pExpr->u.zToken, - sqlite3Strlen30(pExpr->u.zToken), - 2, SQLITE_UTF8, 0); - if( NEVER(pDef==0) || (pDef->funcFlags & SQLITE_FUNC_LIKE)==0 ){ - return 0; - } - - /* The memcpy() statement assumes that the wildcard characters are - ** the first three statements in the compareInfo structure. The - ** asserts() that follow verify that assumption - */ - memcpy(aWc, pDef->pUserData, 3); - assert( (char*)&likeInfoAlt == (char*)&likeInfoAlt.matchAll ); - assert( &((char*)&likeInfoAlt)[1] == (char*)&likeInfoAlt.matchOne ); - assert( &((char*)&likeInfoAlt)[2] == (char*)&likeInfoAlt.matchSet ); - *pIsNocase = (pDef->funcFlags & SQLITE_FUNC_CASE)==0; - return 1; -} - -/* -** All all of the FuncDef structures in the aBuiltinFunc[] array above -** to the global function hash table. This occurs at start-time (as -** a consequence of calling sqlite3_initialize()). -** -** After this routine runs -*/ -SQLITE_PRIVATE void sqlite3RegisterGlobalFunctions(void){ - /* - ** The following array holds FuncDef structures for all of the functions - ** defined in this file. - ** - ** The array cannot be constant since changes are made to the - ** FuncDef.pHash elements at start-time. The elements of this array - ** are read-only after initialization is complete. - */ - static SQLITE_WSD FuncDef aBuiltinFunc[] = { - FUNCTION(ltrim, 1, 1, 0, trimFunc ), - FUNCTION(ltrim, 2, 1, 0, trimFunc ), - FUNCTION(rtrim, 1, 2, 0, trimFunc ), - FUNCTION(rtrim, 2, 2, 0, trimFunc ), - FUNCTION(trim, 1, 3, 0, trimFunc ), - FUNCTION(trim, 2, 3, 0, trimFunc ), - FUNCTION(min, -1, 0, 1, minmaxFunc ), - FUNCTION(min, 0, 0, 1, 0 ), - AGGREGATE(min, 1, 0, 1, minmaxStep, minMaxFinalize ), - FUNCTION(max, -1, 1, 1, minmaxFunc ), - FUNCTION(max, 0, 1, 1, 0 ), - AGGREGATE(max, 1, 1, 1, minmaxStep, minMaxFinalize ), - FUNCTION2(typeof, 1, 0, 0, typeofFunc, SQLITE_FUNC_TYPEOF), - FUNCTION2(length, 1, 0, 0, lengthFunc, SQLITE_FUNC_LENGTH), - FUNCTION(instr, 2, 0, 0, instrFunc ), - FUNCTION(substr, 2, 0, 0, substrFunc ), - FUNCTION(substr, 3, 0, 0, substrFunc ), - FUNCTION(printf, -1, 0, 0, printfFunc ), - FUNCTION(unicode, 1, 0, 0, unicodeFunc ), - FUNCTION(char, -1, 0, 0, charFunc ), - FUNCTION(abs, 1, 0, 0, absFunc ), -#ifndef SQLITE_OMIT_FLOATING_POINT - FUNCTION(round, 1, 0, 0, roundFunc ), - FUNCTION(round, 2, 0, 0, roundFunc ), -#endif - FUNCTION(upper, 1, 0, 0, upperFunc ), - FUNCTION(lower, 1, 0, 0, lowerFunc ), - FUNCTION(coalesce, 1, 0, 0, 0 ), - FUNCTION(coalesce, 0, 0, 0, 0 ), - FUNCTION2(coalesce, -1, 0, 0, noopFunc, SQLITE_FUNC_COALESCE), - FUNCTION(hex, 1, 0, 0, hexFunc ), - FUNCTION2(ifnull, 2, 0, 0, noopFunc, SQLITE_FUNC_COALESCE), - FUNCTION2(unlikely, 1, 0, 0, noopFunc, SQLITE_FUNC_UNLIKELY), - FUNCTION2(likelihood, 2, 0, 0, noopFunc, SQLITE_FUNC_UNLIKELY), - VFUNCTION(random, 0, 0, 0, randomFunc ), - VFUNCTION(randomblob, 1, 0, 0, randomBlob ), - FUNCTION(nullif, 2, 0, 1, nullifFunc ), - FUNCTION(sqlite_version, 0, 0, 0, versionFunc ), - FUNCTION(sqlite_source_id, 0, 0, 0, sourceidFunc ), - FUNCTION(sqlite_log, 2, 0, 0, errlogFunc ), -#ifndef SQLITE_OMIT_COMPILEOPTION_DIAGS - FUNCTION(sqlite_compileoption_used,1, 0, 0, compileoptionusedFunc ), - FUNCTION(sqlite_compileoption_get, 1, 0, 0, compileoptiongetFunc ), -#endif /* SQLITE_OMIT_COMPILEOPTION_DIAGS */ - FUNCTION(quote, 1, 0, 0, quoteFunc ), - VFUNCTION(last_insert_rowid, 0, 0, 0, last_insert_rowid), - VFUNCTION(changes, 0, 0, 0, changes ), - VFUNCTION(total_changes, 0, 0, 0, total_changes ), - FUNCTION(replace, 3, 0, 0, replaceFunc ), - FUNCTION(zeroblob, 1, 0, 0, zeroblobFunc ), - #ifdef SQLITE_SOUNDEX - FUNCTION(soundex, 1, 0, 0, soundexFunc ), - #endif - #ifndef SQLITE_OMIT_LOAD_EXTENSION - FUNCTION(load_extension, 1, 0, 0, loadExt ), - FUNCTION(load_extension, 2, 0, 0, loadExt ), - #endif - AGGREGATE(sum, 1, 0, 0, sumStep, sumFinalize ), - AGGREGATE(total, 1, 0, 0, sumStep, totalFinalize ), - AGGREGATE(avg, 1, 0, 0, sumStep, avgFinalize ), - /* AGGREGATE(count, 0, 0, 0, countStep, countFinalize ), */ - {0,SQLITE_UTF8|SQLITE_FUNC_COUNT,0,0,0,countStep,countFinalize,"count",0,0}, - AGGREGATE(count, 1, 0, 0, countStep, countFinalize ), - AGGREGATE(group_concat, 1, 0, 0, groupConcatStep, groupConcatFinalize), - AGGREGATE(group_concat, 2, 0, 0, groupConcatStep, groupConcatFinalize), - - LIKEFUNC(glob, 2, &globInfo, SQLITE_FUNC_LIKE|SQLITE_FUNC_CASE), - #ifdef SQLITE_CASE_SENSITIVE_LIKE - LIKEFUNC(like, 2, &likeInfoAlt, SQLITE_FUNC_LIKE|SQLITE_FUNC_CASE), - LIKEFUNC(like, 3, &likeInfoAlt, SQLITE_FUNC_LIKE|SQLITE_FUNC_CASE), - #else - LIKEFUNC(like, 2, &likeInfoNorm, SQLITE_FUNC_LIKE), - LIKEFUNC(like, 3, &likeInfoNorm, SQLITE_FUNC_LIKE), - #endif - }; - - int i; - FuncDefHash *pHash = &GLOBAL(FuncDefHash, sqlite3GlobalFunctions); - FuncDef *aFunc = (FuncDef*)&GLOBAL(FuncDef, aBuiltinFunc); - - for(i=0; idb->mallocFailed flag is set. -*/ -SQLITE_PRIVATE int sqlite3FkLocateIndex( - Parse *pParse, /* Parse context to store any error in */ - Table *pParent, /* Parent table of FK constraint pFKey */ - FKey *pFKey, /* Foreign key to find index for */ - Index **ppIdx, /* OUT: Unique index on parent table */ - int **paiCol /* OUT: Map of index columns in pFKey */ -){ - Index *pIdx = 0; /* Value to return via *ppIdx */ - int *aiCol = 0; /* Value to return via *paiCol */ - int nCol = pFKey->nCol; /* Number of columns in parent key */ - char *zKey = pFKey->aCol[0].zCol; /* Name of left-most parent key column */ - - /* The caller is responsible for zeroing output parameters. */ - assert( ppIdx && *ppIdx==0 ); - assert( !paiCol || *paiCol==0 ); - assert( pParse ); - - /* If this is a non-composite (single column) foreign key, check if it - ** maps to the INTEGER PRIMARY KEY of table pParent. If so, leave *ppIdx - ** and *paiCol set to zero and return early. - ** - ** Otherwise, for a composite foreign key (more than one column), allocate - ** space for the aiCol array (returned via output parameter *paiCol). - ** Non-composite foreign keys do not require the aiCol array. - */ - if( nCol==1 ){ - /* The FK maps to the IPK if any of the following are true: - ** - ** 1) There is an INTEGER PRIMARY KEY column and the FK is implicitly - ** mapped to the primary key of table pParent, or - ** 2) The FK is explicitly mapped to a column declared as INTEGER - ** PRIMARY KEY. - */ - if( pParent->iPKey>=0 ){ - if( !zKey ) return 0; - if( !sqlite3StrICmp(pParent->aCol[pParent->iPKey].zName, zKey) ) return 0; - } - }else if( paiCol ){ - assert( nCol>1 ); - aiCol = (int *)sqlite3DbMallocRaw(pParse->db, nCol*sizeof(int)); - if( !aiCol ) return 1; - *paiCol = aiCol; - } - - for(pIdx=pParent->pIndex; pIdx; pIdx=pIdx->pNext){ - if( pIdx->nKeyCol==nCol && pIdx->onError!=OE_None ){ - /* pIdx is a UNIQUE index (or a PRIMARY KEY) and has the right number - ** of columns. If each indexed column corresponds to a foreign key - ** column of pFKey, then this index is a winner. */ - - if( zKey==0 ){ - /* If zKey is NULL, then this foreign key is implicitly mapped to - ** the PRIMARY KEY of table pParent. The PRIMARY KEY index may be - ** identified by the test. */ - if( IsPrimaryKeyIndex(pIdx) ){ - if( aiCol ){ - int i; - for(i=0; iaCol[i].iFrom; - } - break; - } - }else{ - /* If zKey is non-NULL, then this foreign key was declared to - ** map to an explicit list of columns in table pParent. Check if this - ** index matches those columns. Also, check that the index uses - ** the default collation sequences for each column. */ - int i, j; - for(i=0; iaiColumn[i]; /* Index of column in parent tbl */ - char *zDfltColl; /* Def. collation for column */ - char *zIdxCol; /* Name of indexed column */ - - /* If the index uses a collation sequence that is different from - ** the default collation sequence for the column, this index is - ** unusable. Bail out early in this case. */ - zDfltColl = pParent->aCol[iCol].zColl; - if( !zDfltColl ){ - zDfltColl = "BINARY"; - } - if( sqlite3StrICmp(pIdx->azColl[i], zDfltColl) ) break; - - zIdxCol = pParent->aCol[iCol].zName; - for(j=0; jaCol[j].zCol, zIdxCol)==0 ){ - if( aiCol ) aiCol[i] = pFKey->aCol[j].iFrom; - break; - } - } - if( j==nCol ) break; - } - if( i==nCol ) break; /* pIdx is usable */ - } - } - } - - if( !pIdx ){ - if( !pParse->disableTriggers ){ - sqlite3ErrorMsg(pParse, - "foreign key mismatch - \"%w\" referencing \"%w\"", - pFKey->pFrom->zName, pFKey->zTo); - } - sqlite3DbFree(pParse->db, aiCol); - return 1; - } - - *ppIdx = pIdx; - return 0; -} - -/* -** This function is called when a row is inserted into or deleted from the -** child table of foreign key constraint pFKey. If an SQL UPDATE is executed -** on the child table of pFKey, this function is invoked twice for each row -** affected - once to "delete" the old row, and then again to "insert" the -** new row. -** -** Each time it is called, this function generates VDBE code to locate the -** row in the parent table that corresponds to the row being inserted into -** or deleted from the child table. If the parent row can be found, no -** special action is taken. Otherwise, if the parent row can *not* be -** found in the parent table: -** -** Operation | FK type | Action taken -** -------------------------------------------------------------------------- -** INSERT immediate Increment the "immediate constraint counter". -** -** DELETE immediate Decrement the "immediate constraint counter". -** -** INSERT deferred Increment the "deferred constraint counter". -** -** DELETE deferred Decrement the "deferred constraint counter". -** -** These operations are identified in the comment at the top of this file -** (fkey.c) as "I.1" and "D.1". -*/ -static void fkLookupParent( - Parse *pParse, /* Parse context */ - int iDb, /* Index of database housing pTab */ - Table *pTab, /* Parent table of FK pFKey */ - Index *pIdx, /* Unique index on parent key columns in pTab */ - FKey *pFKey, /* Foreign key constraint */ - int *aiCol, /* Map from parent key columns to child table columns */ - int regData, /* Address of array containing child table row */ - int nIncr, /* Increment constraint counter by this */ - int isIgnore /* If true, pretend pTab contains all NULL values */ -){ - int i; /* Iterator variable */ - Vdbe *v = sqlite3GetVdbe(pParse); /* Vdbe to add code to */ - int iCur = pParse->nTab - 1; /* Cursor number to use */ - int iOk = sqlite3VdbeMakeLabel(v); /* jump here if parent key found */ - - /* If nIncr is less than zero, then check at runtime if there are any - ** outstanding constraints to resolve. If there are not, there is no need - ** to check if deleting this row resolves any outstanding violations. - ** - ** Check if any of the key columns in the child table row are NULL. If - ** any are, then the constraint is considered satisfied. No need to - ** search for a matching row in the parent table. */ - if( nIncr<0 ){ - sqlite3VdbeAddOp2(v, OP_FkIfZero, pFKey->isDeferred, iOk); - VdbeCoverage(v); - } - for(i=0; inCol; i++){ - int iReg = aiCol[i] + regData + 1; - sqlite3VdbeAddOp2(v, OP_IsNull, iReg, iOk); VdbeCoverage(v); - } - - if( isIgnore==0 ){ - if( pIdx==0 ){ - /* If pIdx is NULL, then the parent key is the INTEGER PRIMARY KEY - ** column of the parent table (table pTab). */ - int iMustBeInt; /* Address of MustBeInt instruction */ - int regTemp = sqlite3GetTempReg(pParse); - - /* Invoke MustBeInt to coerce the child key value to an integer (i.e. - ** apply the affinity of the parent key). If this fails, then there - ** is no matching parent key. Before using MustBeInt, make a copy of - ** the value. Otherwise, the value inserted into the child key column - ** will have INTEGER affinity applied to it, which may not be correct. */ - sqlite3VdbeAddOp2(v, OP_SCopy, aiCol[0]+1+regData, regTemp); - iMustBeInt = sqlite3VdbeAddOp2(v, OP_MustBeInt, regTemp, 0); - VdbeCoverage(v); - - /* If the parent table is the same as the child table, and we are about - ** to increment the constraint-counter (i.e. this is an INSERT operation), - ** then check if the row being inserted matches itself. If so, do not - ** increment the constraint-counter. */ - if( pTab==pFKey->pFrom && nIncr==1 ){ - sqlite3VdbeAddOp3(v, OP_Eq, regData, iOk, regTemp); VdbeCoverage(v); - sqlite3VdbeChangeP5(v, SQLITE_NOTNULL); - } - - sqlite3OpenTable(pParse, iCur, iDb, pTab, OP_OpenRead); - sqlite3VdbeAddOp3(v, OP_NotExists, iCur, 0, regTemp); VdbeCoverage(v); - sqlite3VdbeAddOp2(v, OP_Goto, 0, iOk); - sqlite3VdbeJumpHere(v, sqlite3VdbeCurrentAddr(v)-2); - sqlite3VdbeJumpHere(v, iMustBeInt); - sqlite3ReleaseTempReg(pParse, regTemp); - }else{ - int nCol = pFKey->nCol; - int regTemp = sqlite3GetTempRange(pParse, nCol); - int regRec = sqlite3GetTempReg(pParse); - - sqlite3VdbeAddOp3(v, OP_OpenRead, iCur, pIdx->tnum, iDb); - sqlite3VdbeSetP4KeyInfo(pParse, pIdx); - for(i=0; ipFrom && nIncr==1 ){ - int iJump = sqlite3VdbeCurrentAddr(v) + nCol + 1; - for(i=0; iaiColumn[i]+1+regData; - assert( aiCol[i]!=pTab->iPKey ); - if( pIdx->aiColumn[i]==pTab->iPKey ){ - /* The parent key is a composite key that includes the IPK column */ - iParent = regData; - } - sqlite3VdbeAddOp3(v, OP_Ne, iChild, iJump, iParent); VdbeCoverage(v); - sqlite3VdbeChangeP5(v, SQLITE_JUMPIFNULL); - } - sqlite3VdbeAddOp2(v, OP_Goto, 0, iOk); - } - - sqlite3VdbeAddOp4(v, OP_MakeRecord, regTemp, nCol, regRec, - sqlite3IndexAffinityStr(v,pIdx), nCol); - sqlite3VdbeAddOp4Int(v, OP_Found, iCur, iOk, regRec, 0); VdbeCoverage(v); - - sqlite3ReleaseTempReg(pParse, regRec); - sqlite3ReleaseTempRange(pParse, regTemp, nCol); - } - } - - if( !pFKey->isDeferred && !(pParse->db->flags & SQLITE_DeferFKs) - && !pParse->pToplevel - && !pParse->isMultiWrite - ){ - /* Special case: If this is an INSERT statement that will insert exactly - ** one row into the table, raise a constraint immediately instead of - ** incrementing a counter. This is necessary as the VM code is being - ** generated for will not open a statement transaction. */ - assert( nIncr==1 ); - sqlite3HaltConstraint(pParse, SQLITE_CONSTRAINT_FOREIGNKEY, - OE_Abort, 0, P4_STATIC, P5_ConstraintFK); - }else{ - if( nIncr>0 && pFKey->isDeferred==0 ){ - sqlite3ParseToplevel(pParse)->mayAbort = 1; - } - sqlite3VdbeAddOp2(v, OP_FkCounter, pFKey->isDeferred, nIncr); - } - - sqlite3VdbeResolveLabel(v, iOk); - sqlite3VdbeAddOp1(v, OP_Close, iCur); -} - - -/* -** Return an Expr object that refers to a memory register corresponding -** to column iCol of table pTab. -** -** regBase is the first of an array of register that contains the data -** for pTab. regBase itself holds the rowid. regBase+1 holds the first -** column. regBase+2 holds the second column, and so forth. -*/ -static Expr *exprTableRegister( - Parse *pParse, /* Parsing and code generating context */ - Table *pTab, /* The table whose content is at r[regBase]... */ - int regBase, /* Contents of table pTab */ - i16 iCol /* Which column of pTab is desired */ -){ - Expr *pExpr; - Column *pCol; - const char *zColl; - sqlite3 *db = pParse->db; - - pExpr = sqlite3Expr(db, TK_REGISTER, 0); - if( pExpr ){ - if( iCol>=0 && iCol!=pTab->iPKey ){ - pCol = &pTab->aCol[iCol]; - pExpr->iTable = regBase + iCol + 1; - pExpr->affinity = pCol->affinity; - zColl = pCol->zColl; - if( zColl==0 ) zColl = db->pDfltColl->zName; - pExpr = sqlite3ExprAddCollateString(pParse, pExpr, zColl); - }else{ - pExpr->iTable = regBase; - pExpr->affinity = SQLITE_AFF_INTEGER; - } - } - return pExpr; -} - -/* -** Return an Expr object that refers to column iCol of table pTab which -** has cursor iCur. -*/ -static Expr *exprTableColumn( - sqlite3 *db, /* The database connection */ - Table *pTab, /* The table whose column is desired */ - int iCursor, /* The open cursor on the table */ - i16 iCol /* The column that is wanted */ -){ - Expr *pExpr = sqlite3Expr(db, TK_COLUMN, 0); - if( pExpr ){ - pExpr->pTab = pTab; - pExpr->iTable = iCursor; - pExpr->iColumn = iCol; - } - return pExpr; -} - -/* -** This function is called to generate code executed when a row is deleted -** from the parent table of foreign key constraint pFKey and, if pFKey is -** deferred, when a row is inserted into the same table. When generating -** code for an SQL UPDATE operation, this function may be called twice - -** once to "delete" the old row and once to "insert" the new row. -** -** The code generated by this function scans through the rows in the child -** table that correspond to the parent table row being deleted or inserted. -** For each child row found, one of the following actions is taken: -** -** Operation | FK type | Action taken -** -------------------------------------------------------------------------- -** DELETE immediate Increment the "immediate constraint counter". -** Or, if the ON (UPDATE|DELETE) action is RESTRICT, -** throw a "FOREIGN KEY constraint failed" exception. -** -** INSERT immediate Decrement the "immediate constraint counter". -** -** DELETE deferred Increment the "deferred constraint counter". -** Or, if the ON (UPDATE|DELETE) action is RESTRICT, -** throw a "FOREIGN KEY constraint failed" exception. -** -** INSERT deferred Decrement the "deferred constraint counter". -** -** These operations are identified in the comment at the top of this file -** (fkey.c) as "I.2" and "D.2". -*/ -static void fkScanChildren( - Parse *pParse, /* Parse context */ - SrcList *pSrc, /* The child table to be scanned */ - Table *pTab, /* The parent table */ - Index *pIdx, /* Index on parent covering the foreign key */ - FKey *pFKey, /* The foreign key linking pSrc to pTab */ - int *aiCol, /* Map from pIdx cols to child table cols */ - int regData, /* Parent row data starts here */ - int nIncr /* Amount to increment deferred counter by */ -){ - sqlite3 *db = pParse->db; /* Database handle */ - int i; /* Iterator variable */ - Expr *pWhere = 0; /* WHERE clause to scan with */ - NameContext sNameContext; /* Context used to resolve WHERE clause */ - WhereInfo *pWInfo; /* Context used by sqlite3WhereXXX() */ - int iFkIfZero = 0; /* Address of OP_FkIfZero */ - Vdbe *v = sqlite3GetVdbe(pParse); - - assert( pIdx==0 || pIdx->pTable==pTab ); - assert( pIdx==0 || pIdx->nKeyCol==pFKey->nCol ); - assert( pIdx!=0 || pFKey->nCol==1 ); - assert( pIdx!=0 || HasRowid(pTab) ); - - if( nIncr<0 ){ - iFkIfZero = sqlite3VdbeAddOp2(v, OP_FkIfZero, pFKey->isDeferred, 0); - VdbeCoverage(v); - } - - /* Create an Expr object representing an SQL expression like: - ** - ** = AND = ... - ** - ** The collation sequence used for the comparison should be that of - ** the parent key columns. The affinity of the parent key column should - ** be applied to each child key value before the comparison takes place. - */ - for(i=0; inCol; i++){ - Expr *pLeft; /* Value from parent table row */ - Expr *pRight; /* Column ref to child table */ - Expr *pEq; /* Expression (pLeft = pRight) */ - i16 iCol; /* Index of column in child table */ - const char *zCol; /* Name of column in child table */ - - iCol = pIdx ? pIdx->aiColumn[i] : -1; - pLeft = exprTableRegister(pParse, pTab, regData, iCol); - iCol = aiCol ? aiCol[i] : pFKey->aCol[0].iFrom; - assert( iCol>=0 ); - zCol = pFKey->pFrom->aCol[iCol].zName; - pRight = sqlite3Expr(db, TK_ID, zCol); - pEq = sqlite3PExpr(pParse, TK_EQ, pLeft, pRight, 0); - pWhere = sqlite3ExprAnd(db, pWhere, pEq); - } - - /* If the child table is the same as the parent table, then add terms - ** to the WHERE clause that prevent this entry from being scanned. - ** The added WHERE clause terms are like this: - ** - ** $current_rowid!=rowid - ** NOT( $current_a==a AND $current_b==b AND ... ) - ** - ** The first form is used for rowid tables. The second form is used - ** for WITHOUT ROWID tables. In the second form, the primary key is - ** (a,b,...) - */ - if( pTab==pFKey->pFrom && nIncr>0 ){ - Expr *pNe; /* Expression (pLeft != pRight) */ - Expr *pLeft; /* Value from parent table row */ - Expr *pRight; /* Column ref to child table */ - if( HasRowid(pTab) ){ - pLeft = exprTableRegister(pParse, pTab, regData, -1); - pRight = exprTableColumn(db, pTab, pSrc->a[0].iCursor, -1); - pNe = sqlite3PExpr(pParse, TK_NE, pLeft, pRight, 0); - }else{ - Expr *pEq, *pAll = 0; - Index *pPk = sqlite3PrimaryKeyIndex(pTab); - assert( pIdx!=0 ); - for(i=0; inKeyCol; i++){ - i16 iCol = pIdx->aiColumn[i]; - pLeft = exprTableRegister(pParse, pTab, regData, iCol); - pRight = exprTableColumn(db, pTab, pSrc->a[0].iCursor, iCol); - pEq = sqlite3PExpr(pParse, TK_EQ, pLeft, pRight, 0); - pAll = sqlite3ExprAnd(db, pAll, pEq); - } - pNe = sqlite3PExpr(pParse, TK_NOT, pAll, 0, 0); - } - pWhere = sqlite3ExprAnd(db, pWhere, pNe); - } - - /* Resolve the references in the WHERE clause. */ - memset(&sNameContext, 0, sizeof(NameContext)); - sNameContext.pSrcList = pSrc; - sNameContext.pParse = pParse; - sqlite3ResolveExprNames(&sNameContext, pWhere); - - /* Create VDBE to loop through the entries in pSrc that match the WHERE - ** clause. If the constraint is not deferred, throw an exception for - ** each row found. Otherwise, for deferred constraints, increment the - ** deferred constraint counter by nIncr for each row selected. */ - pWInfo = sqlite3WhereBegin(pParse, pSrc, pWhere, 0, 0, 0, 0); - if( nIncr>0 && pFKey->isDeferred==0 ){ - sqlite3ParseToplevel(pParse)->mayAbort = 1; - } - sqlite3VdbeAddOp2(v, OP_FkCounter, pFKey->isDeferred, nIncr); - if( pWInfo ){ - sqlite3WhereEnd(pWInfo); - } - - /* Clean up the WHERE clause constructed above. */ - sqlite3ExprDelete(db, pWhere); - if( iFkIfZero ){ - sqlite3VdbeJumpHere(v, iFkIfZero); - } -} - -/* -** This function returns a linked list of FKey objects (connected by -** FKey.pNextTo) holding all children of table pTab. For example, -** given the following schema: -** -** CREATE TABLE t1(a PRIMARY KEY); -** CREATE TABLE t2(b REFERENCES t1(a); -** -** Calling this function with table "t1" as an argument returns a pointer -** to the FKey structure representing the foreign key constraint on table -** "t2". Calling this function with "t2" as the argument would return a -** NULL pointer (as there are no FK constraints for which t2 is the parent -** table). -*/ -SQLITE_PRIVATE FKey *sqlite3FkReferences(Table *pTab){ - int nName = sqlite3Strlen30(pTab->zName); - return (FKey *)sqlite3HashFind(&pTab->pSchema->fkeyHash, pTab->zName, nName); -} - -/* -** The second argument is a Trigger structure allocated by the -** fkActionTrigger() routine. This function deletes the Trigger structure -** and all of its sub-components. -** -** The Trigger structure or any of its sub-components may be allocated from -** the lookaside buffer belonging to database handle dbMem. -*/ -static void fkTriggerDelete(sqlite3 *dbMem, Trigger *p){ - if( p ){ - TriggerStep *pStep = p->step_list; - sqlite3ExprDelete(dbMem, pStep->pWhere); - sqlite3ExprListDelete(dbMem, pStep->pExprList); - sqlite3SelectDelete(dbMem, pStep->pSelect); - sqlite3ExprDelete(dbMem, p->pWhen); - sqlite3DbFree(dbMem, p); - } -} - -/* -** This function is called to generate code that runs when table pTab is -** being dropped from the database. The SrcList passed as the second argument -** to this function contains a single entry guaranteed to resolve to -** table pTab. -** -** Normally, no code is required. However, if either -** -** (a) The table is the parent table of a FK constraint, or -** (b) The table is the child table of a deferred FK constraint and it is -** determined at runtime that there are outstanding deferred FK -** constraint violations in the database, -** -** then the equivalent of "DELETE FROM " is executed before dropping -** the table from the database. Triggers are disabled while running this -** DELETE, but foreign key actions are not. -*/ -SQLITE_PRIVATE void sqlite3FkDropTable(Parse *pParse, SrcList *pName, Table *pTab){ - sqlite3 *db = pParse->db; - if( (db->flags&SQLITE_ForeignKeys) && !IsVirtual(pTab) && !pTab->pSelect ){ - int iSkip = 0; - Vdbe *v = sqlite3GetVdbe(pParse); - - assert( v ); /* VDBE has already been allocated */ - if( sqlite3FkReferences(pTab)==0 ){ - /* Search for a deferred foreign key constraint for which this table - ** is the child table. If one cannot be found, return without - ** generating any VDBE code. If one can be found, then jump over - ** the entire DELETE if there are no outstanding deferred constraints - ** when this statement is run. */ - FKey *p; - for(p=pTab->pFKey; p; p=p->pNextFrom){ - if( p->isDeferred || (db->flags & SQLITE_DeferFKs) ) break; - } - if( !p ) return; - iSkip = sqlite3VdbeMakeLabel(v); - sqlite3VdbeAddOp2(v, OP_FkIfZero, 1, iSkip); VdbeCoverage(v); - } - - pParse->disableTriggers = 1; - sqlite3DeleteFrom(pParse, sqlite3SrcListDup(db, pName, 0), 0); - pParse->disableTriggers = 0; - - /* If the DELETE has generated immediate foreign key constraint - ** violations, halt the VDBE and return an error at this point, before - ** any modifications to the schema are made. This is because statement - ** transactions are not able to rollback schema changes. - ** - ** If the SQLITE_DeferFKs flag is set, then this is not required, as - ** the statement transaction will not be rolled back even if FK - ** constraints are violated. - */ - if( (db->flags & SQLITE_DeferFKs)==0 ){ - sqlite3VdbeAddOp2(v, OP_FkIfZero, 0, sqlite3VdbeCurrentAddr(v)+2); - VdbeCoverage(v); - sqlite3HaltConstraint(pParse, SQLITE_CONSTRAINT_FOREIGNKEY, - OE_Abort, 0, P4_STATIC, P5_ConstraintFK); - } - - if( iSkip ){ - sqlite3VdbeResolveLabel(v, iSkip); - } - } -} - - -/* -** The second argument points to an FKey object representing a foreign key -** for which pTab is the child table. An UPDATE statement against pTab -** is currently being processed. For each column of the table that is -** actually updated, the corresponding element in the aChange[] array -** is zero or greater (if a column is unmodified the corresponding element -** is set to -1). If the rowid column is modified by the UPDATE statement -** the bChngRowid argument is non-zero. -** -** This function returns true if any of the columns that are part of the -** child key for FK constraint *p are modified. -*/ -static int fkChildIsModified( - Table *pTab, /* Table being updated */ - FKey *p, /* Foreign key for which pTab is the child */ - int *aChange, /* Array indicating modified columns */ - int bChngRowid /* True if rowid is modified by this update */ -){ - int i; - for(i=0; inCol; i++){ - int iChildKey = p->aCol[i].iFrom; - if( aChange[iChildKey]>=0 ) return 1; - if( iChildKey==pTab->iPKey && bChngRowid ) return 1; - } - return 0; -} - -/* -** The second argument points to an FKey object representing a foreign key -** for which pTab is the parent table. An UPDATE statement against pTab -** is currently being processed. For each column of the table that is -** actually updated, the corresponding element in the aChange[] array -** is zero or greater (if a column is unmodified the corresponding element -** is set to -1). If the rowid column is modified by the UPDATE statement -** the bChngRowid argument is non-zero. -** -** This function returns true if any of the columns that are part of the -** parent key for FK constraint *p are modified. -*/ -static int fkParentIsModified( - Table *pTab, - FKey *p, - int *aChange, - int bChngRowid -){ - int i; - for(i=0; inCol; i++){ - char *zKey = p->aCol[i].zCol; - int iKey; - for(iKey=0; iKeynCol; iKey++){ - if( aChange[iKey]>=0 || (iKey==pTab->iPKey && bChngRowid) ){ - Column *pCol = &pTab->aCol[iKey]; - if( zKey ){ - if( 0==sqlite3StrICmp(pCol->zName, zKey) ) return 1; - }else if( pCol->colFlags & COLFLAG_PRIMKEY ){ - return 1; - } - } - } - } - return 0; -} - -/* -** This function is called when inserting, deleting or updating a row of -** table pTab to generate VDBE code to perform foreign key constraint -** processing for the operation. -** -** For a DELETE operation, parameter regOld is passed the index of the -** first register in an array of (pTab->nCol+1) registers containing the -** rowid of the row being deleted, followed by each of the column values -** of the row being deleted, from left to right. Parameter regNew is passed -** zero in this case. -** -** For an INSERT operation, regOld is passed zero and regNew is passed the -** first register of an array of (pTab->nCol+1) registers containing the new -** row data. -** -** For an UPDATE operation, this function is called twice. Once before -** the original record is deleted from the table using the calling convention -** described for DELETE. Then again after the original record is deleted -** but before the new record is inserted using the INSERT convention. -*/ -SQLITE_PRIVATE void sqlite3FkCheck( - Parse *pParse, /* Parse context */ - Table *pTab, /* Row is being deleted from this table */ - int regOld, /* Previous row data is stored here */ - int regNew, /* New row data is stored here */ - int *aChange, /* Array indicating UPDATEd columns (or 0) */ - int bChngRowid /* True if rowid is UPDATEd */ -){ - sqlite3 *db = pParse->db; /* Database handle */ - FKey *pFKey; /* Used to iterate through FKs */ - int iDb; /* Index of database containing pTab */ - const char *zDb; /* Name of database containing pTab */ - int isIgnoreErrors = pParse->disableTriggers; - - /* Exactly one of regOld and regNew should be non-zero. */ - assert( (regOld==0)!=(regNew==0) ); - - /* If foreign-keys are disabled, this function is a no-op. */ - if( (db->flags&SQLITE_ForeignKeys)==0 ) return; - - iDb = sqlite3SchemaToIndex(db, pTab->pSchema); - zDb = db->aDb[iDb].zName; - - /* Loop through all the foreign key constraints for which pTab is the - ** child table (the table that the foreign key definition is part of). */ - for(pFKey=pTab->pFKey; pFKey; pFKey=pFKey->pNextFrom){ - Table *pTo; /* Parent table of foreign key pFKey */ - Index *pIdx = 0; /* Index on key columns in pTo */ - int *aiFree = 0; - int *aiCol; - int iCol; - int i; - int isIgnore = 0; - - if( aChange - && sqlite3_stricmp(pTab->zName, pFKey->zTo)!=0 - && fkChildIsModified(pTab, pFKey, aChange, bChngRowid)==0 - ){ - continue; - } - - /* Find the parent table of this foreign key. Also find a unique index - ** on the parent key columns in the parent table. If either of these - ** schema items cannot be located, set an error in pParse and return - ** early. */ - if( pParse->disableTriggers ){ - pTo = sqlite3FindTable(db, pFKey->zTo, zDb); - }else{ - pTo = sqlite3LocateTable(pParse, 0, pFKey->zTo, zDb); - } - if( !pTo || sqlite3FkLocateIndex(pParse, pTo, pFKey, &pIdx, &aiFree) ){ - assert( isIgnoreErrors==0 || (regOld!=0 && regNew==0) ); - if( !isIgnoreErrors || db->mallocFailed ) return; - if( pTo==0 ){ - /* If isIgnoreErrors is true, then a table is being dropped. In this - ** case SQLite runs a "DELETE FROM xxx" on the table being dropped - ** before actually dropping it in order to check FK constraints. - ** If the parent table of an FK constraint on the current table is - ** missing, behave as if it is empty. i.e. decrement the relevant - ** FK counter for each row of the current table with non-NULL keys. - */ - Vdbe *v = sqlite3GetVdbe(pParse); - int iJump = sqlite3VdbeCurrentAddr(v) + pFKey->nCol + 1; - for(i=0; inCol; i++){ - int iReg = pFKey->aCol[i].iFrom + regOld + 1; - sqlite3VdbeAddOp2(v, OP_IsNull, iReg, iJump); VdbeCoverage(v); - } - sqlite3VdbeAddOp2(v, OP_FkCounter, pFKey->isDeferred, -1); - } - continue; - } - assert( pFKey->nCol==1 || (aiFree && pIdx) ); - - if( aiFree ){ - aiCol = aiFree; - }else{ - iCol = pFKey->aCol[0].iFrom; - aiCol = &iCol; - } - for(i=0; inCol; i++){ - if( aiCol[i]==pTab->iPKey ){ - aiCol[i] = -1; - } -#ifndef SQLITE_OMIT_AUTHORIZATION - /* Request permission to read the parent key columns. If the - ** authorization callback returns SQLITE_IGNORE, behave as if any - ** values read from the parent table are NULL. */ - if( db->xAuth ){ - int rcauth; - char *zCol = pTo->aCol[pIdx ? pIdx->aiColumn[i] : pTo->iPKey].zName; - rcauth = sqlite3AuthReadCol(pParse, pTo->zName, zCol, iDb); - isIgnore = (rcauth==SQLITE_IGNORE); - } -#endif - } - - /* Take a shared-cache advisory read-lock on the parent table. Allocate - ** a cursor to use to search the unique index on the parent key columns - ** in the parent table. */ - sqlite3TableLock(pParse, iDb, pTo->tnum, 0, pTo->zName); - pParse->nTab++; - - if( regOld!=0 ){ - /* A row is being removed from the child table. Search for the parent. - ** If the parent does not exist, removing the child row resolves an - ** outstanding foreign key constraint violation. */ - fkLookupParent(pParse, iDb, pTo, pIdx, pFKey, aiCol, regOld, -1,isIgnore); - } - if( regNew!=0 ){ - /* A row is being added to the child table. If a parent row cannot - ** be found, adding the child row has violated the FK constraint. */ - fkLookupParent(pParse, iDb, pTo, pIdx, pFKey, aiCol, regNew, +1,isIgnore); - } - - sqlite3DbFree(db, aiFree); - } - - /* Loop through all the foreign key constraints that refer to this table. - ** (the "child" constraints) */ - for(pFKey = sqlite3FkReferences(pTab); pFKey; pFKey=pFKey->pNextTo){ - Index *pIdx = 0; /* Foreign key index for pFKey */ - SrcList *pSrc; - int *aiCol = 0; - - if( aChange && fkParentIsModified(pTab, pFKey, aChange, bChngRowid)==0 ){ - continue; - } - - if( !pFKey->isDeferred && !(db->flags & SQLITE_DeferFKs) - && !pParse->pToplevel && !pParse->isMultiWrite - ){ - assert( regOld==0 && regNew!=0 ); - /* Inserting a single row into a parent table cannot cause an immediate - ** foreign key violation. So do nothing in this case. */ - continue; - } - - if( sqlite3FkLocateIndex(pParse, pTab, pFKey, &pIdx, &aiCol) ){ - if( !isIgnoreErrors || db->mallocFailed ) return; - continue; - } - assert( aiCol || pFKey->nCol==1 ); - - /* Create a SrcList structure containing the child table. We need the - ** child table as a SrcList for sqlite3WhereBegin() */ - pSrc = sqlite3SrcListAppend(db, 0, 0, 0); - if( pSrc ){ - struct SrcList_item *pItem = pSrc->a; - pItem->pTab = pFKey->pFrom; - pItem->zName = pFKey->pFrom->zName; - pItem->pTab->nRef++; - pItem->iCursor = pParse->nTab++; - - if( regNew!=0 ){ - fkScanChildren(pParse, pSrc, pTab, pIdx, pFKey, aiCol, regNew, -1); - } - if( regOld!=0 ){ - /* If there is a RESTRICT action configured for the current operation - ** on the parent table of this FK, then throw an exception - ** immediately if the FK constraint is violated, even if this is a - ** deferred trigger. That's what RESTRICT means. To defer checking - ** the constraint, the FK should specify NO ACTION (represented - ** using OE_None). NO ACTION is the default. */ - fkScanChildren(pParse, pSrc, pTab, pIdx, pFKey, aiCol, regOld, 1); - } - pItem->zName = 0; - sqlite3SrcListDelete(db, pSrc); - } - sqlite3DbFree(db, aiCol); - } -} - -#define COLUMN_MASK(x) (((x)>31) ? 0xffffffff : ((u32)1<<(x))) - -/* -** This function is called before generating code to update or delete a -** row contained in table pTab. -*/ -SQLITE_PRIVATE u32 sqlite3FkOldmask( - Parse *pParse, /* Parse context */ - Table *pTab /* Table being modified */ -){ - u32 mask = 0; - if( pParse->db->flags&SQLITE_ForeignKeys ){ - FKey *p; - int i; - for(p=pTab->pFKey; p; p=p->pNextFrom){ - for(i=0; inCol; i++) mask |= COLUMN_MASK(p->aCol[i].iFrom); - } - for(p=sqlite3FkReferences(pTab); p; p=p->pNextTo){ - Index *pIdx = 0; - sqlite3FkLocateIndex(pParse, pTab, p, &pIdx, 0); - if( pIdx ){ - for(i=0; inKeyCol; i++) mask |= COLUMN_MASK(pIdx->aiColumn[i]); - } - } - } - return mask; -} - - -/* -** This function is called before generating code to update or delete a -** row contained in table pTab. If the operation is a DELETE, then -** parameter aChange is passed a NULL value. For an UPDATE, aChange points -** to an array of size N, where N is the number of columns in table pTab. -** If the i'th column is not modified by the UPDATE, then the corresponding -** entry in the aChange[] array is set to -1. If the column is modified, -** the value is 0 or greater. Parameter chngRowid is set to true if the -** UPDATE statement modifies the rowid fields of the table. -** -** If any foreign key processing will be required, this function returns -** true. If there is no foreign key related processing, this function -** returns false. -*/ -SQLITE_PRIVATE int sqlite3FkRequired( - Parse *pParse, /* Parse context */ - Table *pTab, /* Table being modified */ - int *aChange, /* Non-NULL for UPDATE operations */ - int chngRowid /* True for UPDATE that affects rowid */ -){ - if( pParse->db->flags&SQLITE_ForeignKeys ){ - if( !aChange ){ - /* A DELETE operation. Foreign key processing is required if the - ** table in question is either the child or parent table for any - ** foreign key constraint. */ - return (sqlite3FkReferences(pTab) || pTab->pFKey); - }else{ - /* This is an UPDATE. Foreign key processing is only required if the - ** operation modifies one or more child or parent key columns. */ - FKey *p; - - /* Check if any child key columns are being modified. */ - for(p=pTab->pFKey; p; p=p->pNextFrom){ - if( fkChildIsModified(pTab, p, aChange, chngRowid) ) return 1; - } - - /* Check if any parent key columns are being modified. */ - for(p=sqlite3FkReferences(pTab); p; p=p->pNextTo){ - if( fkParentIsModified(pTab, p, aChange, chngRowid) ) return 1; - } - } - } - return 0; -} - -/* -** This function is called when an UPDATE or DELETE operation is being -** compiled on table pTab, which is the parent table of foreign-key pFKey. -** If the current operation is an UPDATE, then the pChanges parameter is -** passed a pointer to the list of columns being modified. If it is a -** DELETE, pChanges is passed a NULL pointer. -** -** It returns a pointer to a Trigger structure containing a trigger -** equivalent to the ON UPDATE or ON DELETE action specified by pFKey. -** If the action is "NO ACTION" or "RESTRICT", then a NULL pointer is -** returned (these actions require no special handling by the triggers -** sub-system, code for them is created by fkScanChildren()). -** -** For example, if pFKey is the foreign key and pTab is table "p" in -** the following schema: -** -** CREATE TABLE p(pk PRIMARY KEY); -** CREATE TABLE c(ck REFERENCES p ON DELETE CASCADE); -** -** then the returned trigger structure is equivalent to: -** -** CREATE TRIGGER ... DELETE ON p BEGIN -** DELETE FROM c WHERE ck = old.pk; -** END; -** -** The returned pointer is cached as part of the foreign key object. It -** is eventually freed along with the rest of the foreign key object by -** sqlite3FkDelete(). -*/ -static Trigger *fkActionTrigger( - Parse *pParse, /* Parse context */ - Table *pTab, /* Table being updated or deleted from */ - FKey *pFKey, /* Foreign key to get action for */ - ExprList *pChanges /* Change-list for UPDATE, NULL for DELETE */ -){ - sqlite3 *db = pParse->db; /* Database handle */ - int action; /* One of OE_None, OE_Cascade etc. */ - Trigger *pTrigger; /* Trigger definition to return */ - int iAction = (pChanges!=0); /* 1 for UPDATE, 0 for DELETE */ - - action = pFKey->aAction[iAction]; - pTrigger = pFKey->apTrigger[iAction]; - - if( action!=OE_None && !pTrigger ){ - u8 enableLookaside; /* Copy of db->lookaside.bEnabled */ - char const *zFrom; /* Name of child table */ - int nFrom; /* Length in bytes of zFrom */ - Index *pIdx = 0; /* Parent key index for this FK */ - int *aiCol = 0; /* child table cols -> parent key cols */ - TriggerStep *pStep = 0; /* First (only) step of trigger program */ - Expr *pWhere = 0; /* WHERE clause of trigger step */ - ExprList *pList = 0; /* Changes list if ON UPDATE CASCADE */ - Select *pSelect = 0; /* If RESTRICT, "SELECT RAISE(...)" */ - int i; /* Iterator variable */ - Expr *pWhen = 0; /* WHEN clause for the trigger */ - - if( sqlite3FkLocateIndex(pParse, pTab, pFKey, &pIdx, &aiCol) ) return 0; - assert( aiCol || pFKey->nCol==1 ); - - for(i=0; inCol; i++){ - Token tOld = { "old", 3 }; /* Literal "old" token */ - Token tNew = { "new", 3 }; /* Literal "new" token */ - Token tFromCol; /* Name of column in child table */ - Token tToCol; /* Name of column in parent table */ - int iFromCol; /* Idx of column in child table */ - Expr *pEq; /* tFromCol = OLD.tToCol */ - - iFromCol = aiCol ? aiCol[i] : pFKey->aCol[0].iFrom; - assert( iFromCol>=0 ); - tToCol.z = pIdx ? pTab->aCol[pIdx->aiColumn[i]].zName : "oid"; - tFromCol.z = pFKey->pFrom->aCol[iFromCol].zName; - - tToCol.n = sqlite3Strlen30(tToCol.z); - tFromCol.n = sqlite3Strlen30(tFromCol.z); - - /* Create the expression "OLD.zToCol = zFromCol". It is important - ** that the "OLD.zToCol" term is on the LHS of the = operator, so - ** that the affinity and collation sequence associated with the - ** parent table are used for the comparison. */ - pEq = sqlite3PExpr(pParse, TK_EQ, - sqlite3PExpr(pParse, TK_DOT, - sqlite3PExpr(pParse, TK_ID, 0, 0, &tOld), - sqlite3PExpr(pParse, TK_ID, 0, 0, &tToCol) - , 0), - sqlite3PExpr(pParse, TK_ID, 0, 0, &tFromCol) - , 0); - pWhere = sqlite3ExprAnd(db, pWhere, pEq); - - /* For ON UPDATE, construct the next term of the WHEN clause. - ** The final WHEN clause will be like this: - ** - ** WHEN NOT(old.col1 IS new.col1 AND ... AND old.colN IS new.colN) - */ - if( pChanges ){ - pEq = sqlite3PExpr(pParse, TK_IS, - sqlite3PExpr(pParse, TK_DOT, - sqlite3PExpr(pParse, TK_ID, 0, 0, &tOld), - sqlite3PExpr(pParse, TK_ID, 0, 0, &tToCol), - 0), - sqlite3PExpr(pParse, TK_DOT, - sqlite3PExpr(pParse, TK_ID, 0, 0, &tNew), - sqlite3PExpr(pParse, TK_ID, 0, 0, &tToCol), - 0), - 0); - pWhen = sqlite3ExprAnd(db, pWhen, pEq); - } - - if( action!=OE_Restrict && (action!=OE_Cascade || pChanges) ){ - Expr *pNew; - if( action==OE_Cascade ){ - pNew = sqlite3PExpr(pParse, TK_DOT, - sqlite3PExpr(pParse, TK_ID, 0, 0, &tNew), - sqlite3PExpr(pParse, TK_ID, 0, 0, &tToCol) - , 0); - }else if( action==OE_SetDflt ){ - Expr *pDflt = pFKey->pFrom->aCol[iFromCol].pDflt; - if( pDflt ){ - pNew = sqlite3ExprDup(db, pDflt, 0); - }else{ - pNew = sqlite3PExpr(pParse, TK_NULL, 0, 0, 0); - } - }else{ - pNew = sqlite3PExpr(pParse, TK_NULL, 0, 0, 0); - } - pList = sqlite3ExprListAppend(pParse, pList, pNew); - sqlite3ExprListSetName(pParse, pList, &tFromCol, 0); - } - } - sqlite3DbFree(db, aiCol); - - zFrom = pFKey->pFrom->zName; - nFrom = sqlite3Strlen30(zFrom); - - if( action==OE_Restrict ){ - Token tFrom; - Expr *pRaise; - - tFrom.z = zFrom; - tFrom.n = nFrom; - pRaise = sqlite3Expr(db, TK_RAISE, "FOREIGN KEY constraint failed"); - if( pRaise ){ - pRaise->affinity = OE_Abort; - } - pSelect = sqlite3SelectNew(pParse, - sqlite3ExprListAppend(pParse, 0, pRaise), - sqlite3SrcListAppend(db, 0, &tFrom, 0), - pWhere, - 0, 0, 0, 0, 0, 0 - ); - pWhere = 0; - } - - /* Disable lookaside memory allocation */ - enableLookaside = db->lookaside.bEnabled; - db->lookaside.bEnabled = 0; - - pTrigger = (Trigger *)sqlite3DbMallocZero(db, - sizeof(Trigger) + /* struct Trigger */ - sizeof(TriggerStep) + /* Single step in trigger program */ - nFrom + 1 /* Space for pStep->target.z */ - ); - if( pTrigger ){ - pStep = pTrigger->step_list = (TriggerStep *)&pTrigger[1]; - pStep->target.z = (char *)&pStep[1]; - pStep->target.n = nFrom; - memcpy((char *)pStep->target.z, zFrom, nFrom); - - pStep->pWhere = sqlite3ExprDup(db, pWhere, EXPRDUP_REDUCE); - pStep->pExprList = sqlite3ExprListDup(db, pList, EXPRDUP_REDUCE); - pStep->pSelect = sqlite3SelectDup(db, pSelect, EXPRDUP_REDUCE); - if( pWhen ){ - pWhen = sqlite3PExpr(pParse, TK_NOT, pWhen, 0, 0); - pTrigger->pWhen = sqlite3ExprDup(db, pWhen, EXPRDUP_REDUCE); - } - } - - /* Re-enable the lookaside buffer, if it was disabled earlier. */ - db->lookaside.bEnabled = enableLookaside; - - sqlite3ExprDelete(db, pWhere); - sqlite3ExprDelete(db, pWhen); - sqlite3ExprListDelete(db, pList); - sqlite3SelectDelete(db, pSelect); - if( db->mallocFailed==1 ){ - fkTriggerDelete(db, pTrigger); - return 0; - } - assert( pStep!=0 ); - - switch( action ){ - case OE_Restrict: - pStep->op = TK_SELECT; - break; - case OE_Cascade: - if( !pChanges ){ - pStep->op = TK_DELETE; - break; - } - default: - pStep->op = TK_UPDATE; - } - pStep->pTrig = pTrigger; - pTrigger->pSchema = pTab->pSchema; - pTrigger->pTabSchema = pTab->pSchema; - pFKey->apTrigger[iAction] = pTrigger; - pTrigger->op = (pChanges ? TK_UPDATE : TK_DELETE); - } - - return pTrigger; -} - -/* -** This function is called when deleting or updating a row to implement -** any required CASCADE, SET NULL or SET DEFAULT actions. -*/ -SQLITE_PRIVATE void sqlite3FkActions( - Parse *pParse, /* Parse context */ - Table *pTab, /* Table being updated or deleted from */ - ExprList *pChanges, /* Change-list for UPDATE, NULL for DELETE */ - int regOld, /* Address of array containing old row */ - int *aChange, /* Array indicating UPDATEd columns (or 0) */ - int bChngRowid /* True if rowid is UPDATEd */ -){ - /* If foreign-key support is enabled, iterate through all FKs that - ** refer to table pTab. If there is an action associated with the FK - ** for this operation (either update or delete), invoke the associated - ** trigger sub-program. */ - if( pParse->db->flags&SQLITE_ForeignKeys ){ - FKey *pFKey; /* Iterator variable */ - for(pFKey = sqlite3FkReferences(pTab); pFKey; pFKey=pFKey->pNextTo){ - if( aChange==0 || fkParentIsModified(pTab, pFKey, aChange, bChngRowid) ){ - Trigger *pAct = fkActionTrigger(pParse, pTab, pFKey, pChanges); - if( pAct ){ - sqlite3CodeRowTriggerDirect(pParse, pAct, pTab, regOld, OE_Abort, 0); - } - } - } - } -} - -#endif /* ifndef SQLITE_OMIT_TRIGGER */ - -/* -** Free all memory associated with foreign key definitions attached to -** table pTab. Remove the deleted foreign keys from the Schema.fkeyHash -** hash table. -*/ -SQLITE_PRIVATE void sqlite3FkDelete(sqlite3 *db, Table *pTab){ - FKey *pFKey; /* Iterator variable */ - FKey *pNext; /* Copy of pFKey->pNextFrom */ - - assert( db==0 || sqlite3SchemaMutexHeld(db, 0, pTab->pSchema) ); - for(pFKey=pTab->pFKey; pFKey; pFKey=pNext){ - - /* Remove the FK from the fkeyHash hash table. */ - if( !db || db->pnBytesFreed==0 ){ - if( pFKey->pPrevTo ){ - pFKey->pPrevTo->pNextTo = pFKey->pNextTo; - }else{ - void *p = (void *)pFKey->pNextTo; - const char *z = (p ? pFKey->pNextTo->zTo : pFKey->zTo); - sqlite3HashInsert(&pTab->pSchema->fkeyHash, z, sqlite3Strlen30(z), p); - } - if( pFKey->pNextTo ){ - pFKey->pNextTo->pPrevTo = pFKey->pPrevTo; - } - } - - /* EV: R-30323-21917 Each foreign key constraint in SQLite is - ** classified as either immediate or deferred. - */ - assert( pFKey->isDeferred==0 || pFKey->isDeferred==1 ); - - /* Delete any triggers created to implement actions for this FK. */ -#ifndef SQLITE_OMIT_TRIGGER - fkTriggerDelete(db, pFKey->apTrigger[0]); - fkTriggerDelete(db, pFKey->apTrigger[1]); -#endif - - pNext = pFKey->pNextFrom; - sqlite3DbFree(db, pFKey); - } -} -#endif /* ifndef SQLITE_OMIT_FOREIGN_KEY */ - -/************** End of fkey.c ************************************************/ -/************** Begin file insert.c ******************************************/ -/* -** 2001 September 15 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** This file contains C code routines that are called by the parser -** to handle INSERT statements in SQLite. -*/ - -/* -** Generate code that will -** -** (1) acquire a lock for table pTab then -** (2) open pTab as cursor iCur. -** -** If pTab is a WITHOUT ROWID table, then it is the PRIMARY KEY index -** for that table that is actually opened. -*/ -SQLITE_PRIVATE void sqlite3OpenTable( - Parse *pParse, /* Generate code into this VDBE */ - int iCur, /* The cursor number of the table */ - int iDb, /* The database index in sqlite3.aDb[] */ - Table *pTab, /* The table to be opened */ - int opcode /* OP_OpenRead or OP_OpenWrite */ -){ - Vdbe *v; - assert( !IsVirtual(pTab) ); - v = sqlite3GetVdbe(pParse); - assert( opcode==OP_OpenWrite || opcode==OP_OpenRead ); - sqlite3TableLock(pParse, iDb, pTab->tnum, - (opcode==OP_OpenWrite)?1:0, pTab->zName); - if( HasRowid(pTab) ){ - sqlite3VdbeAddOp4Int(v, opcode, iCur, pTab->tnum, iDb, pTab->nCol); - VdbeComment((v, "%s", pTab->zName)); - }else{ - Index *pPk = sqlite3PrimaryKeyIndex(pTab); - assert( pPk!=0 ); - assert( pPk->tnum=pTab->tnum ); - sqlite3VdbeAddOp3(v, opcode, iCur, pPk->tnum, iDb); - sqlite3VdbeSetP4KeyInfo(pParse, pPk); - VdbeComment((v, "%s", pTab->zName)); - } -} - -/* -** Return a pointer to the column affinity string associated with index -** pIdx. A column affinity string has one character for each column in -** the table, according to the affinity of the column: -** -** Character Column affinity -** ------------------------------ -** 'a' TEXT -** 'b' NONE -** 'c' NUMERIC -** 'd' INTEGER -** 'e' REAL -** -** An extra 'd' is appended to the end of the string to cover the -** rowid that appears as the last column in every index. -** -** Memory for the buffer containing the column index affinity string -** is managed along with the rest of the Index structure. It will be -** released when sqlite3DeleteIndex() is called. -*/ -SQLITE_PRIVATE const char *sqlite3IndexAffinityStr(Vdbe *v, Index *pIdx){ - if( !pIdx->zColAff ){ - /* The first time a column affinity string for a particular index is - ** required, it is allocated and populated here. It is then stored as - ** a member of the Index structure for subsequent use. - ** - ** The column affinity string will eventually be deleted by - ** sqliteDeleteIndex() when the Index structure itself is cleaned - ** up. - */ - int n; - Table *pTab = pIdx->pTable; - sqlite3 *db = sqlite3VdbeDb(v); - pIdx->zColAff = (char *)sqlite3DbMallocRaw(0, pIdx->nColumn+1); - if( !pIdx->zColAff ){ - db->mallocFailed = 1; - return 0; - } - for(n=0; nnColumn; n++){ - i16 x = pIdx->aiColumn[n]; - pIdx->zColAff[n] = x<0 ? SQLITE_AFF_INTEGER : pTab->aCol[x].affinity; - } - pIdx->zColAff[n] = 0; - } - - return pIdx->zColAff; -} - -/* -** Compute the affinity string for table pTab, if it has not already been -** computed. As an optimization, omit trailing SQLITE_AFF_NONE affinities. -** -** If the affinity exists (if it is no entirely SQLITE_AFF_NONE values) and -** if iReg>0 then code an OP_Affinity opcode that will set the affinities -** for register iReg and following. Or if affinities exists and iReg==0, -** then just set the P4 operand of the previous opcode (which should be -** an OP_MakeRecord) to the affinity string. -** -** A column affinity string has one character per column: -** -** Character Column affinity -** ------------------------------ -** 'a' TEXT -** 'b' NONE -** 'c' NUMERIC -** 'd' INTEGER -** 'e' REAL -*/ -SQLITE_PRIVATE void sqlite3TableAffinity(Vdbe *v, Table *pTab, int iReg){ - int i; - char *zColAff = pTab->zColAff; - if( zColAff==0 ){ - sqlite3 *db = sqlite3VdbeDb(v); - zColAff = (char *)sqlite3DbMallocRaw(0, pTab->nCol+1); - if( !zColAff ){ - db->mallocFailed = 1; - return; - } - - for(i=0; inCol; i++){ - zColAff[i] = pTab->aCol[i].affinity; - } - do{ - zColAff[i--] = 0; - }while( i>=0 && zColAff[i]==SQLITE_AFF_NONE ); - pTab->zColAff = zColAff; - } - i = sqlite3Strlen30(zColAff); - if( i ){ - if( iReg ){ - sqlite3VdbeAddOp4(v, OP_Affinity, iReg, i, 0, zColAff, i); - }else{ - sqlite3VdbeChangeP4(v, -1, zColAff, i); - } - } -} - -/* -** Return non-zero if the table pTab in database iDb or any of its indices -** have been opened at any point in the VDBE program. This is used to see if -** a statement of the form "INSERT INTO SELECT ..." can -** run without using a temporary table for the results of the SELECT. -*/ -static int readsTable(Parse *p, int iDb, Table *pTab){ - Vdbe *v = sqlite3GetVdbe(p); - int i; - int iEnd = sqlite3VdbeCurrentAddr(v); -#ifndef SQLITE_OMIT_VIRTUALTABLE - VTable *pVTab = IsVirtual(pTab) ? sqlite3GetVTable(p->db, pTab) : 0; -#endif - - for(i=1; iopcode==OP_OpenRead && pOp->p3==iDb ){ - Index *pIndex; - int tnum = pOp->p2; - if( tnum==pTab->tnum ){ - return 1; - } - for(pIndex=pTab->pIndex; pIndex; pIndex=pIndex->pNext){ - if( tnum==pIndex->tnum ){ - return 1; - } - } - } -#ifndef SQLITE_OMIT_VIRTUALTABLE - if( pOp->opcode==OP_VOpen && pOp->p4.pVtab==pVTab ){ - assert( pOp->p4.pVtab!=0 ); - assert( pOp->p4type==P4_VTAB ); - return 1; - } -#endif - } - return 0; -} - -#ifndef SQLITE_OMIT_AUTOINCREMENT -/* -** Locate or create an AutoincInfo structure associated with table pTab -** which is in database iDb. Return the register number for the register -** that holds the maximum rowid. -** -** There is at most one AutoincInfo structure per table even if the -** same table is autoincremented multiple times due to inserts within -** triggers. A new AutoincInfo structure is created if this is the -** first use of table pTab. On 2nd and subsequent uses, the original -** AutoincInfo structure is used. -** -** Three memory locations are allocated: -** -** (1) Register to hold the name of the pTab table. -** (2) Register to hold the maximum ROWID of pTab. -** (3) Register to hold the rowid in sqlite_sequence of pTab -** -** The 2nd register is the one that is returned. That is all the -** insert routine needs to know about. -*/ -static int autoIncBegin( - Parse *pParse, /* Parsing context */ - int iDb, /* Index of the database holding pTab */ - Table *pTab /* The table we are writing to */ -){ - int memId = 0; /* Register holding maximum rowid */ - if( pTab->tabFlags & TF_Autoincrement ){ - Parse *pToplevel = sqlite3ParseToplevel(pParse); - AutoincInfo *pInfo; - - pInfo = pToplevel->pAinc; - while( pInfo && pInfo->pTab!=pTab ){ pInfo = pInfo->pNext; } - if( pInfo==0 ){ - pInfo = sqlite3DbMallocRaw(pParse->db, sizeof(*pInfo)); - if( pInfo==0 ) return 0; - pInfo->pNext = pToplevel->pAinc; - pToplevel->pAinc = pInfo; - pInfo->pTab = pTab; - pInfo->iDb = iDb; - pToplevel->nMem++; /* Register to hold name of table */ - pInfo->regCtr = ++pToplevel->nMem; /* Max rowid register */ - pToplevel->nMem++; /* Rowid in sqlite_sequence */ - } - memId = pInfo->regCtr; - } - return memId; -} - -/* -** This routine generates code that will initialize all of the -** register used by the autoincrement tracker. -*/ -SQLITE_PRIVATE void sqlite3AutoincrementBegin(Parse *pParse){ - AutoincInfo *p; /* Information about an AUTOINCREMENT */ - sqlite3 *db = pParse->db; /* The database connection */ - Db *pDb; /* Database only autoinc table */ - int memId; /* Register holding max rowid */ - int addr; /* A VDBE address */ - Vdbe *v = pParse->pVdbe; /* VDBE under construction */ - - /* This routine is never called during trigger-generation. It is - ** only called from the top-level */ - assert( pParse->pTriggerTab==0 ); - assert( pParse==sqlite3ParseToplevel(pParse) ); - - assert( v ); /* We failed long ago if this is not so */ - for(p = pParse->pAinc; p; p = p->pNext){ - pDb = &db->aDb[p->iDb]; - memId = p->regCtr; - assert( sqlite3SchemaMutexHeld(db, 0, pDb->pSchema) ); - sqlite3OpenTable(pParse, 0, p->iDb, pDb->pSchema->pSeqTab, OP_OpenRead); - sqlite3VdbeAddOp3(v, OP_Null, 0, memId, memId+1); - addr = sqlite3VdbeCurrentAddr(v); - sqlite3VdbeAddOp4(v, OP_String8, 0, memId-1, 0, p->pTab->zName, 0); - sqlite3VdbeAddOp2(v, OP_Rewind, 0, addr+9); VdbeCoverage(v); - sqlite3VdbeAddOp3(v, OP_Column, 0, 0, memId); - sqlite3VdbeAddOp3(v, OP_Ne, memId-1, addr+7, memId); VdbeCoverage(v); - sqlite3VdbeChangeP5(v, SQLITE_JUMPIFNULL); - sqlite3VdbeAddOp2(v, OP_Rowid, 0, memId+1); - sqlite3VdbeAddOp3(v, OP_Column, 0, 1, memId); - sqlite3VdbeAddOp2(v, OP_Goto, 0, addr+9); - sqlite3VdbeAddOp2(v, OP_Next, 0, addr+2); VdbeCoverage(v); - sqlite3VdbeAddOp2(v, OP_Integer, 0, memId); - sqlite3VdbeAddOp0(v, OP_Close); - } -} - -/* -** Update the maximum rowid for an autoincrement calculation. -** -** This routine should be called when the top of the stack holds a -** new rowid that is about to be inserted. If that new rowid is -** larger than the maximum rowid in the memId memory cell, then the -** memory cell is updated. The stack is unchanged. -*/ -static void autoIncStep(Parse *pParse, int memId, int regRowid){ - if( memId>0 ){ - sqlite3VdbeAddOp2(pParse->pVdbe, OP_MemMax, memId, regRowid); - } -} - -/* -** This routine generates the code needed to write autoincrement -** maximum rowid values back into the sqlite_sequence register. -** Every statement that might do an INSERT into an autoincrement -** table (either directly or through triggers) needs to call this -** routine just before the "exit" code. -*/ -SQLITE_PRIVATE void sqlite3AutoincrementEnd(Parse *pParse){ - AutoincInfo *p; - Vdbe *v = pParse->pVdbe; - sqlite3 *db = pParse->db; - - assert( v ); - for(p = pParse->pAinc; p; p = p->pNext){ - Db *pDb = &db->aDb[p->iDb]; - int j1; - int iRec; - int memId = p->regCtr; - - iRec = sqlite3GetTempReg(pParse); - assert( sqlite3SchemaMutexHeld(db, 0, pDb->pSchema) ); - sqlite3OpenTable(pParse, 0, p->iDb, pDb->pSchema->pSeqTab, OP_OpenWrite); - j1 = sqlite3VdbeAddOp1(v, OP_NotNull, memId+1); VdbeCoverage(v); - sqlite3VdbeAddOp2(v, OP_NewRowid, 0, memId+1); - sqlite3VdbeJumpHere(v, j1); - sqlite3VdbeAddOp3(v, OP_MakeRecord, memId-1, 2, iRec); - sqlite3VdbeAddOp3(v, OP_Insert, 0, iRec, memId+1); - sqlite3VdbeChangeP5(v, OPFLAG_APPEND); - sqlite3VdbeAddOp0(v, OP_Close); - sqlite3ReleaseTempReg(pParse, iRec); - } -} -#else -/* -** If SQLITE_OMIT_AUTOINCREMENT is defined, then the three routines -** above are all no-ops -*/ -# define autoIncBegin(A,B,C) (0) -# define autoIncStep(A,B,C) -#endif /* SQLITE_OMIT_AUTOINCREMENT */ - - -/* Forward declaration */ -static int xferOptimization( - Parse *pParse, /* Parser context */ - Table *pDest, /* The table we are inserting into */ - Select *pSelect, /* A SELECT statement to use as the data source */ - int onError, /* How to handle constraint errors */ - int iDbDest /* The database of pDest */ -); - -/* -** This routine is called to handle SQL of the following forms: -** -** insert into TABLE (IDLIST) values(EXPRLIST) -** insert into TABLE (IDLIST) select -** -** The IDLIST following the table name is always optional. If omitted, -** then a list of all columns for the table is substituted. The IDLIST -** appears in the pColumn parameter. pColumn is NULL if IDLIST is omitted. -** -** The pList parameter holds EXPRLIST in the first form of the INSERT -** statement above, and pSelect is NULL. For the second form, pList is -** NULL and pSelect is a pointer to the select statement used to generate -** data for the insert. -** -** The code generated follows one of four templates. For a simple -** insert with data coming from a VALUES clause, the code executes -** once straight down through. Pseudo-code follows (we call this -** the "1st template"): -** -** open write cursor to
    and its indices -** put VALUES clause expressions into registers -** write the resulting record into
    -** cleanup -** -** The three remaining templates assume the statement is of the form -** -** INSERT INTO
    SELECT ... -** -** If the SELECT clause is of the restricted form "SELECT * FROM " - -** in other words if the SELECT pulls all columns from a single table -** and there is no WHERE or LIMIT or GROUP BY or ORDER BY clauses, and -** if and are distinct tables but have identical -** schemas, including all the same indices, then a special optimization -** is invoked that copies raw records from over to . -** See the xferOptimization() function for the implementation of this -** template. This is the 2nd template. -** -** open a write cursor to
    -** open read cursor on -** transfer all records in over to
    -** close cursors -** foreach index on
    -** open a write cursor on the
    index -** open a read cursor on the corresponding index -** transfer all records from the read to the write cursors -** close cursors -** end foreach -** -** The 3rd template is for when the second template does not apply -** and the SELECT clause does not read from
    at any time. -** The generated code follows this template: -** -** X <- A -** goto B -** A: setup for the SELECT -** loop over the rows in the SELECT -** load values into registers R..R+n -** yield X -** end loop -** cleanup after the SELECT -** end-coroutine X -** B: open write cursor to
    and its indices -** C: yield X, at EOF goto D -** insert the select result into
    from R..R+n -** goto C -** D: cleanup -** -** The 4th template is used if the insert statement takes its -** values from a SELECT but the data is being inserted into a table -** that is also read as part of the SELECT. In the third form, -** we have to use a intermediate table to store the results of -** the select. The template is like this: -** -** X <- A -** goto B -** A: setup for the SELECT -** loop over the tables in the SELECT -** load value into register R..R+n -** yield X -** end loop -** cleanup after the SELECT -** end co-routine R -** B: open temp table -** L: yield X, at EOF goto M -** insert row from R..R+n into temp table -** goto L -** M: open write cursor to
    and its indices -** rewind temp table -** C: loop over rows of intermediate table -** transfer values form intermediate table into
    -** end loop -** D: cleanup -*/ -SQLITE_PRIVATE void sqlite3Insert( - Parse *pParse, /* Parser context */ - SrcList *pTabList, /* Name of table into which we are inserting */ - Select *pSelect, /* A SELECT statement to use as the data source */ - IdList *pColumn, /* Column names corresponding to IDLIST. */ - int onError /* How to handle constraint errors */ -){ - sqlite3 *db; /* The main database structure */ - Table *pTab; /* The table to insert into. aka TABLE */ - char *zTab; /* Name of the table into which we are inserting */ - const char *zDb; /* Name of the database holding this table */ - int i, j, idx; /* Loop counters */ - Vdbe *v; /* Generate code into this virtual machine */ - Index *pIdx; /* For looping over indices of the table */ - int nColumn; /* Number of columns in the data */ - int nHidden = 0; /* Number of hidden columns if TABLE is virtual */ - int iDataCur = 0; /* VDBE cursor that is the main data repository */ - int iIdxCur = 0; /* First index cursor */ - int ipkColumn = -1; /* Column that is the INTEGER PRIMARY KEY */ - int endOfLoop; /* Label for the end of the insertion loop */ - int srcTab = 0; /* Data comes from this temporary cursor if >=0 */ - int addrInsTop = 0; /* Jump to label "D" */ - int addrCont = 0; /* Top of insert loop. Label "C" in templates 3 and 4 */ - SelectDest dest; /* Destination for SELECT on rhs of INSERT */ - int iDb; /* Index of database holding TABLE */ - Db *pDb; /* The database containing table being inserted into */ - u8 useTempTable = 0; /* Store SELECT results in intermediate table */ - u8 appendFlag = 0; /* True if the insert is likely to be an append */ - u8 withoutRowid; /* 0 for normal table. 1 for WITHOUT ROWID table */ - u8 bIdListInOrder = 1; /* True if IDLIST is in table order */ - ExprList *pList = 0; /* List of VALUES() to be inserted */ - - /* Register allocations */ - int regFromSelect = 0;/* Base register for data coming from SELECT */ - int regAutoinc = 0; /* Register holding the AUTOINCREMENT counter */ - int regRowCount = 0; /* Memory cell used for the row counter */ - int regIns; /* Block of regs holding rowid+data being inserted */ - int regRowid; /* registers holding insert rowid */ - int regData; /* register holding first column to insert */ - int *aRegIdx = 0; /* One register allocated to each index */ - -#ifndef SQLITE_OMIT_TRIGGER - int isView; /* True if attempting to insert into a view */ - Trigger *pTrigger; /* List of triggers on pTab, if required */ - int tmask; /* Mask of trigger times */ -#endif - - db = pParse->db; - memset(&dest, 0, sizeof(dest)); - if( pParse->nErr || db->mallocFailed ){ - goto insert_cleanup; - } - - /* If the Select object is really just a simple VALUES() list with a - ** single row values (the common case) then keep that one row of values - ** and go ahead and discard the Select object - */ - if( pSelect && (pSelect->selFlags & SF_Values)!=0 && pSelect->pPrior==0 ){ - pList = pSelect->pEList; - pSelect->pEList = 0; - sqlite3SelectDelete(db, pSelect); - pSelect = 0; - } - - /* Locate the table into which we will be inserting new information. - */ - assert( pTabList->nSrc==1 ); - zTab = pTabList->a[0].zName; - if( NEVER(zTab==0) ) goto insert_cleanup; - pTab = sqlite3SrcListLookup(pParse, pTabList); - if( pTab==0 ){ - goto insert_cleanup; - } - iDb = sqlite3SchemaToIndex(db, pTab->pSchema); - assert( iDbnDb ); - pDb = &db->aDb[iDb]; - zDb = pDb->zName; - if( sqlite3AuthCheck(pParse, SQLITE_INSERT, pTab->zName, 0, zDb) ){ - goto insert_cleanup; - } - withoutRowid = !HasRowid(pTab); - - /* Figure out if we have any triggers and if the table being - ** inserted into is a view - */ -#ifndef SQLITE_OMIT_TRIGGER - pTrigger = sqlite3TriggersExist(pParse, pTab, TK_INSERT, 0, &tmask); - isView = pTab->pSelect!=0; -#else -# define pTrigger 0 -# define tmask 0 -# define isView 0 -#endif -#ifdef SQLITE_OMIT_VIEW -# undef isView -# define isView 0 -#endif - assert( (pTrigger && tmask) || (pTrigger==0 && tmask==0) ); - - /* If pTab is really a view, make sure it has been initialized. - ** ViewGetColumnNames() is a no-op if pTab is not a view. - */ - if( sqlite3ViewGetColumnNames(pParse, pTab) ){ - goto insert_cleanup; - } - - /* Cannot insert into a read-only table. - */ - if( sqlite3IsReadOnly(pParse, pTab, tmask) ){ - goto insert_cleanup; - } - - /* Allocate a VDBE - */ - v = sqlite3GetVdbe(pParse); - if( v==0 ) goto insert_cleanup; - if( pParse->nested==0 ) sqlite3VdbeCountChanges(v); - sqlite3BeginWriteOperation(pParse, pSelect || pTrigger, iDb); - -#ifndef SQLITE_OMIT_XFER_OPT - /* If the statement is of the form - ** - ** INSERT INTO SELECT * FROM ; - ** - ** Then special optimizations can be applied that make the transfer - ** very fast and which reduce fragmentation of indices. - ** - ** This is the 2nd template. - */ - if( pColumn==0 && xferOptimization(pParse, pTab, pSelect, onError, iDb) ){ - assert( !pTrigger ); - assert( pList==0 ); - goto insert_end; - } -#endif /* SQLITE_OMIT_XFER_OPT */ - - /* If this is an AUTOINCREMENT table, look up the sequence number in the - ** sqlite_sequence table and store it in memory cell regAutoinc. - */ - regAutoinc = autoIncBegin(pParse, iDb, pTab); - - /* Allocate registers for holding the rowid of the new row, - ** the content of the new row, and the assemblied row record. - */ - regRowid = regIns = pParse->nMem+1; - pParse->nMem += pTab->nCol + 1; - if( IsVirtual(pTab) ){ - regRowid++; - pParse->nMem++; - } - regData = regRowid+1; - - /* If the INSERT statement included an IDLIST term, then make sure - ** all elements of the IDLIST really are columns of the table and - ** remember the column indices. - ** - ** If the table has an INTEGER PRIMARY KEY column and that column - ** is named in the IDLIST, then record in the ipkColumn variable - ** the index into IDLIST of the primary key column. ipkColumn is - ** the index of the primary key as it appears in IDLIST, not as - ** is appears in the original table. (The index of the INTEGER - ** PRIMARY KEY in the original table is pTab->iPKey.) - */ - if( pColumn ){ - for(i=0; inId; i++){ - pColumn->a[i].idx = -1; - } - for(i=0; inId; i++){ - for(j=0; jnCol; j++){ - if( sqlite3StrICmp(pColumn->a[i].zName, pTab->aCol[j].zName)==0 ){ - pColumn->a[i].idx = j; - if( i!=j ) bIdListInOrder = 0; - if( j==pTab->iPKey ){ - ipkColumn = i; assert( !withoutRowid ); - } - break; - } - } - if( j>=pTab->nCol ){ - if( sqlite3IsRowid(pColumn->a[i].zName) && !withoutRowid ){ - ipkColumn = i; - bIdListInOrder = 0; - }else{ - sqlite3ErrorMsg(pParse, "table %S has no column named %s", - pTabList, 0, pColumn->a[i].zName); - pParse->checkSchema = 1; - goto insert_cleanup; - } - } - } - } - - /* Figure out how many columns of data are supplied. If the data - ** is coming from a SELECT statement, then generate a co-routine that - ** produces a single row of the SELECT on each invocation. The - ** co-routine is the common header to the 3rd and 4th templates. - */ - if( pSelect ){ - /* Data is coming from a SELECT. Generate a co-routine to run the SELECT */ - int regYield; /* Register holding co-routine entry-point */ - int addrTop; /* Top of the co-routine */ - int rc; /* Result code */ - - regYield = ++pParse->nMem; - addrTop = sqlite3VdbeCurrentAddr(v) + 1; - sqlite3VdbeAddOp3(v, OP_InitCoroutine, regYield, 0, addrTop); - sqlite3SelectDestInit(&dest, SRT_Coroutine, regYield); - dest.iSdst = bIdListInOrder ? regData : 0; - dest.nSdst = pTab->nCol; - rc = sqlite3Select(pParse, pSelect, &dest); - regFromSelect = dest.iSdst; - assert( pParse->nErr==0 || rc ); - if( rc || db->mallocFailed ) goto insert_cleanup; - sqlite3VdbeAddOp1(v, OP_EndCoroutine, regYield); - sqlite3VdbeJumpHere(v, addrTop - 1); /* label B: */ - assert( pSelect->pEList ); - nColumn = pSelect->pEList->nExpr; - - /* Set useTempTable to TRUE if the result of the SELECT statement - ** should be written into a temporary table (template 4). Set to - ** FALSE if each output row of the SELECT can be written directly into - ** the destination table (template 3). - ** - ** A temp table must be used if the table being updated is also one - ** of the tables being read by the SELECT statement. Also use a - ** temp table in the case of row triggers. - */ - if( pTrigger || readsTable(pParse, iDb, pTab) ){ - useTempTable = 1; - } - - if( useTempTable ){ - /* Invoke the coroutine to extract information from the SELECT - ** and add it to a transient table srcTab. The code generated - ** here is from the 4th template: - ** - ** B: open temp table - ** L: yield X, goto M at EOF - ** insert row from R..R+n into temp table - ** goto L - ** M: ... - */ - int regRec; /* Register to hold packed record */ - int regTempRowid; /* Register to hold temp table ROWID */ - int addrL; /* Label "L" */ - - srcTab = pParse->nTab++; - regRec = sqlite3GetTempReg(pParse); - regTempRowid = sqlite3GetTempReg(pParse); - sqlite3VdbeAddOp2(v, OP_OpenEphemeral, srcTab, nColumn); - addrL = sqlite3VdbeAddOp1(v, OP_Yield, dest.iSDParm); VdbeCoverage(v); - sqlite3VdbeAddOp3(v, OP_MakeRecord, regFromSelect, nColumn, regRec); - sqlite3VdbeAddOp2(v, OP_NewRowid, srcTab, regTempRowid); - sqlite3VdbeAddOp3(v, OP_Insert, srcTab, regRec, regTempRowid); - sqlite3VdbeAddOp2(v, OP_Goto, 0, addrL); - sqlite3VdbeJumpHere(v, addrL); - sqlite3ReleaseTempReg(pParse, regRec); - sqlite3ReleaseTempReg(pParse, regTempRowid); - } - }else{ - /* This is the case if the data for the INSERT is coming from a VALUES - ** clause - */ - NameContext sNC; - memset(&sNC, 0, sizeof(sNC)); - sNC.pParse = pParse; - srcTab = -1; - assert( useTempTable==0 ); - nColumn = pList ? pList->nExpr : 0; - for(i=0; ia[i].pExpr) ){ - goto insert_cleanup; - } - } - } - - /* If there is no IDLIST term but the table has an integer primary - ** key, the set the ipkColumn variable to the integer primary key - ** column index in the original table definition. - */ - if( pColumn==0 && nColumn>0 ){ - ipkColumn = pTab->iPKey; - } - - /* Make sure the number of columns in the source data matches the number - ** of columns to be inserted into the table. - */ - if( IsVirtual(pTab) ){ - for(i=0; inCol; i++){ - nHidden += (IsHiddenColumn(&pTab->aCol[i]) ? 1 : 0); - } - } - if( pColumn==0 && nColumn && nColumn!=(pTab->nCol-nHidden) ){ - sqlite3ErrorMsg(pParse, - "table %S has %d columns but %d values were supplied", - pTabList, 0, pTab->nCol-nHidden, nColumn); - goto insert_cleanup; - } - if( pColumn!=0 && nColumn!=pColumn->nId ){ - sqlite3ErrorMsg(pParse, "%d values for %d columns", nColumn, pColumn->nId); - goto insert_cleanup; - } - - /* Initialize the count of rows to be inserted - */ - if( db->flags & SQLITE_CountRows ){ - regRowCount = ++pParse->nMem; - sqlite3VdbeAddOp2(v, OP_Integer, 0, regRowCount); - } - - /* If this is not a view, open the table and and all indices */ - if( !isView ){ - int nIdx; - nIdx = sqlite3OpenTableAndIndices(pParse, pTab, OP_OpenWrite, -1, 0, - &iDataCur, &iIdxCur); - aRegIdx = sqlite3DbMallocRaw(db, sizeof(int)*(nIdx+1)); - if( aRegIdx==0 ){ - goto insert_cleanup; - } - for(i=0; inMem; - } - } - - /* This is the top of the main insertion loop */ - if( useTempTable ){ - /* This block codes the top of loop only. The complete loop is the - ** following pseudocode (template 4): - ** - ** rewind temp table, if empty goto D - ** C: loop over rows of intermediate table - ** transfer values form intermediate table into
    - ** end loop - ** D: ... - */ - addrInsTop = sqlite3VdbeAddOp1(v, OP_Rewind, srcTab); VdbeCoverage(v); - addrCont = sqlite3VdbeCurrentAddr(v); - }else if( pSelect ){ - /* This block codes the top of loop only. The complete loop is the - ** following pseudocode (template 3): - ** - ** C: yield X, at EOF goto D - ** insert the select result into
    from R..R+n - ** goto C - ** D: ... - */ - addrInsTop = addrCont = sqlite3VdbeAddOp1(v, OP_Yield, dest.iSDParm); - VdbeCoverage(v); - } - - /* Run the BEFORE and INSTEAD OF triggers, if there are any - */ - endOfLoop = sqlite3VdbeMakeLabel(v); - if( tmask & TRIGGER_BEFORE ){ - int regCols = sqlite3GetTempRange(pParse, pTab->nCol+1); - - /* build the NEW.* reference row. Note that if there is an INTEGER - ** PRIMARY KEY into which a NULL is being inserted, that NULL will be - ** translated into a unique ID for the row. But on a BEFORE trigger, - ** we do not know what the unique ID will be (because the insert has - ** not happened yet) so we substitute a rowid of -1 - */ - if( ipkColumn<0 ){ - sqlite3VdbeAddOp2(v, OP_Integer, -1, regCols); - }else{ - int j1; - assert( !withoutRowid ); - if( useTempTable ){ - sqlite3VdbeAddOp3(v, OP_Column, srcTab, ipkColumn, regCols); - }else{ - assert( pSelect==0 ); /* Otherwise useTempTable is true */ - sqlite3ExprCode(pParse, pList->a[ipkColumn].pExpr, regCols); - } - j1 = sqlite3VdbeAddOp1(v, OP_NotNull, regCols); VdbeCoverage(v); - sqlite3VdbeAddOp2(v, OP_Integer, -1, regCols); - sqlite3VdbeJumpHere(v, j1); - sqlite3VdbeAddOp1(v, OP_MustBeInt, regCols); VdbeCoverage(v); - } - - /* Cannot have triggers on a virtual table. If it were possible, - ** this block would have to account for hidden column. - */ - assert( !IsVirtual(pTab) ); - - /* Create the new column data - */ - for(i=0; inCol; i++){ - if( pColumn==0 ){ - j = i; - }else{ - for(j=0; jnId; j++){ - if( pColumn->a[j].idx==i ) break; - } - } - if( (!useTempTable && !pList) || (pColumn && j>=pColumn->nId) ){ - sqlite3ExprCode(pParse, pTab->aCol[i].pDflt, regCols+i+1); - }else if( useTempTable ){ - sqlite3VdbeAddOp3(v, OP_Column, srcTab, j, regCols+i+1); - }else{ - assert( pSelect==0 ); /* Otherwise useTempTable is true */ - sqlite3ExprCodeAndCache(pParse, pList->a[j].pExpr, regCols+i+1); - } - } - - /* If this is an INSERT on a view with an INSTEAD OF INSERT trigger, - ** do not attempt any conversions before assembling the record. - ** If this is a real table, attempt conversions as required by the - ** table column affinities. - */ - if( !isView ){ - sqlite3TableAffinity(v, pTab, regCols+1); - } - - /* Fire BEFORE or INSTEAD OF triggers */ - sqlite3CodeRowTrigger(pParse, pTrigger, TK_INSERT, 0, TRIGGER_BEFORE, - pTab, regCols-pTab->nCol-1, onError, endOfLoop); - - sqlite3ReleaseTempRange(pParse, regCols, pTab->nCol+1); - } - - /* Compute the content of the next row to insert into a range of - ** registers beginning at regIns. - */ - if( !isView ){ - if( IsVirtual(pTab) ){ - /* The row that the VUpdate opcode will delete: none */ - sqlite3VdbeAddOp2(v, OP_Null, 0, regIns); - } - if( ipkColumn>=0 ){ - if( useTempTable ){ - sqlite3VdbeAddOp3(v, OP_Column, srcTab, ipkColumn, regRowid); - }else if( pSelect ){ - sqlite3VdbeAddOp2(v, OP_Copy, regFromSelect+ipkColumn, regRowid); - }else{ - VdbeOp *pOp; - sqlite3ExprCode(pParse, pList->a[ipkColumn].pExpr, regRowid); - pOp = sqlite3VdbeGetOp(v, -1); - if( ALWAYS(pOp) && pOp->opcode==OP_Null && !IsVirtual(pTab) ){ - appendFlag = 1; - pOp->opcode = OP_NewRowid; - pOp->p1 = iDataCur; - pOp->p2 = regRowid; - pOp->p3 = regAutoinc; - } - } - /* If the PRIMARY KEY expression is NULL, then use OP_NewRowid - ** to generate a unique primary key value. - */ - if( !appendFlag ){ - int j1; - if( !IsVirtual(pTab) ){ - j1 = sqlite3VdbeAddOp1(v, OP_NotNull, regRowid); VdbeCoverage(v); - sqlite3VdbeAddOp3(v, OP_NewRowid, iDataCur, regRowid, regAutoinc); - sqlite3VdbeJumpHere(v, j1); - }else{ - j1 = sqlite3VdbeCurrentAddr(v); - sqlite3VdbeAddOp2(v, OP_IsNull, regRowid, j1+2); VdbeCoverage(v); - } - sqlite3VdbeAddOp1(v, OP_MustBeInt, regRowid); VdbeCoverage(v); - } - }else if( IsVirtual(pTab) || withoutRowid ){ - sqlite3VdbeAddOp2(v, OP_Null, 0, regRowid); - }else{ - sqlite3VdbeAddOp3(v, OP_NewRowid, iDataCur, regRowid, regAutoinc); - appendFlag = 1; - } - autoIncStep(pParse, regAutoinc, regRowid); - - /* Compute data for all columns of the new entry, beginning - ** with the first column. - */ - nHidden = 0; - for(i=0; inCol; i++){ - int iRegStore = regRowid+1+i; - if( i==pTab->iPKey ){ - /* The value of the INTEGER PRIMARY KEY column is always a NULL. - ** Whenever this column is read, the rowid will be substituted - ** in its place. Hence, fill this column with a NULL to avoid - ** taking up data space with information that will never be used. - ** As there may be shallow copies of this value, make it a soft-NULL */ - sqlite3VdbeAddOp1(v, OP_SoftNull, iRegStore); - continue; - } - if( pColumn==0 ){ - if( IsHiddenColumn(&pTab->aCol[i]) ){ - assert( IsVirtual(pTab) ); - j = -1; - nHidden++; - }else{ - j = i - nHidden; - } - }else{ - for(j=0; jnId; j++){ - if( pColumn->a[j].idx==i ) break; - } - } - if( j<0 || nColumn==0 || (pColumn && j>=pColumn->nId) ){ - sqlite3ExprCodeFactorable(pParse, pTab->aCol[i].pDflt, iRegStore); - }else if( useTempTable ){ - sqlite3VdbeAddOp3(v, OP_Column, srcTab, j, iRegStore); - }else if( pSelect ){ - if( regFromSelect!=regData ){ - sqlite3VdbeAddOp2(v, OP_SCopy, regFromSelect+j, iRegStore); - } - }else{ - sqlite3ExprCode(pParse, pList->a[j].pExpr, iRegStore); - } - } - - /* Generate code to check constraints and generate index keys and - ** do the insertion. - */ -#ifndef SQLITE_OMIT_VIRTUALTABLE - if( IsVirtual(pTab) ){ - const char *pVTab = (const char *)sqlite3GetVTable(db, pTab); - sqlite3VtabMakeWritable(pParse, pTab); - sqlite3VdbeAddOp4(v, OP_VUpdate, 1, pTab->nCol+2, regIns, pVTab, P4_VTAB); - sqlite3VdbeChangeP5(v, onError==OE_Default ? OE_Abort : onError); - sqlite3MayAbort(pParse); - }else -#endif - { - int isReplace; /* Set to true if constraints may cause a replace */ - sqlite3GenerateConstraintChecks(pParse, pTab, aRegIdx, iDataCur, iIdxCur, - regIns, 0, ipkColumn>=0, onError, endOfLoop, &isReplace - ); - sqlite3FkCheck(pParse, pTab, 0, regIns, 0, 0); - sqlite3CompleteInsertion(pParse, pTab, iDataCur, iIdxCur, - regIns, aRegIdx, 0, appendFlag, isReplace==0); - } - } - - /* Update the count of rows that are inserted - */ - if( (db->flags & SQLITE_CountRows)!=0 ){ - sqlite3VdbeAddOp2(v, OP_AddImm, regRowCount, 1); - } - - if( pTrigger ){ - /* Code AFTER triggers */ - sqlite3CodeRowTrigger(pParse, pTrigger, TK_INSERT, 0, TRIGGER_AFTER, - pTab, regData-2-pTab->nCol, onError, endOfLoop); - } - - /* The bottom of the main insertion loop, if the data source - ** is a SELECT statement. - */ - sqlite3VdbeResolveLabel(v, endOfLoop); - if( useTempTable ){ - sqlite3VdbeAddOp2(v, OP_Next, srcTab, addrCont); VdbeCoverage(v); - sqlite3VdbeJumpHere(v, addrInsTop); - sqlite3VdbeAddOp1(v, OP_Close, srcTab); - }else if( pSelect ){ - sqlite3VdbeAddOp2(v, OP_Goto, 0, addrCont); - sqlite3VdbeJumpHere(v, addrInsTop); - } - - if( !IsVirtual(pTab) && !isView ){ - /* Close all tables opened */ - if( iDataCurpIndex; pIdx; pIdx=pIdx->pNext, idx++){ - sqlite3VdbeAddOp1(v, OP_Close, idx+iIdxCur); - } - } - -insert_end: - /* Update the sqlite_sequence table by storing the content of the - ** maximum rowid counter values recorded while inserting into - ** autoincrement tables. - */ - if( pParse->nested==0 && pParse->pTriggerTab==0 ){ - sqlite3AutoincrementEnd(pParse); - } - - /* - ** Return the number of rows inserted. If this routine is - ** generating code because of a call to sqlite3NestedParse(), do not - ** invoke the callback function. - */ - if( (db->flags&SQLITE_CountRows) && !pParse->nested && !pParse->pTriggerTab ){ - sqlite3VdbeAddOp2(v, OP_ResultRow, regRowCount, 1); - sqlite3VdbeSetNumCols(v, 1); - sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "rows inserted", SQLITE_STATIC); - } - -insert_cleanup: - sqlite3SrcListDelete(db, pTabList); - sqlite3ExprListDelete(db, pList); - sqlite3SelectDelete(db, pSelect); - sqlite3IdListDelete(db, pColumn); - sqlite3DbFree(db, aRegIdx); -} - -/* Make sure "isView" and other macros defined above are undefined. Otherwise -** thely may interfere with compilation of other functions in this file -** (or in another file, if this file becomes part of the amalgamation). */ -#ifdef isView - #undef isView -#endif -#ifdef pTrigger - #undef pTrigger -#endif -#ifdef tmask - #undef tmask -#endif - -/* -** Generate code to do constraint checks prior to an INSERT or an UPDATE -** on table pTab. -** -** The regNewData parameter is the first register in a range that contains -** the data to be inserted or the data after the update. There will be -** pTab->nCol+1 registers in this range. The first register (the one -** that regNewData points to) will contain the new rowid, or NULL in the -** case of a WITHOUT ROWID table. The second register in the range will -** contain the content of the first table column. The third register will -** contain the content of the second table column. And so forth. -** -** The regOldData parameter is similar to regNewData except that it contains -** the data prior to an UPDATE rather than afterwards. regOldData is zero -** for an INSERT. This routine can distinguish between UPDATE and INSERT by -** checking regOldData for zero. -** -** For an UPDATE, the pkChng boolean is true if the true primary key (the -** rowid for a normal table or the PRIMARY KEY for a WITHOUT ROWID table) -** might be modified by the UPDATE. If pkChng is false, then the key of -** the iDataCur content table is guaranteed to be unchanged by the UPDATE. -** -** For an INSERT, the pkChng boolean indicates whether or not the rowid -** was explicitly specified as part of the INSERT statement. If pkChng -** is zero, it means that the either rowid is computed automatically or -** that the table is a WITHOUT ROWID table and has no rowid. On an INSERT, -** pkChng will only be true if the INSERT statement provides an integer -** value for either the rowid column or its INTEGER PRIMARY KEY alias. -** -** The code generated by this routine will store new index entries into -** registers identified by aRegIdx[]. No index entry is created for -** indices where aRegIdx[i]==0. The order of indices in aRegIdx[] is -** the same as the order of indices on the linked list of indices -** at pTab->pIndex. -** -** The caller must have already opened writeable cursors on the main -** table and all applicable indices (that is to say, all indices for which -** aRegIdx[] is not zero). iDataCur is the cursor for the main table when -** inserting or updating a rowid table, or the cursor for the PRIMARY KEY -** index when operating on a WITHOUT ROWID table. iIdxCur is the cursor -** for the first index in the pTab->pIndex list. Cursors for other indices -** are at iIdxCur+N for the N-th element of the pTab->pIndex list. -** -** This routine also generates code to check constraints. NOT NULL, -** CHECK, and UNIQUE constraints are all checked. If a constraint fails, -** then the appropriate action is performed. There are five possible -** actions: ROLLBACK, ABORT, FAIL, REPLACE, and IGNORE. -** -** Constraint type Action What Happens -** --------------- ---------- ---------------------------------------- -** any ROLLBACK The current transaction is rolled back and -** sqlite3_step() returns immediately with a -** return code of SQLITE_CONSTRAINT. -** -** any ABORT Back out changes from the current command -** only (do not do a complete rollback) then -** cause sqlite3_step() to return immediately -** with SQLITE_CONSTRAINT. -** -** any FAIL Sqlite3_step() returns immediately with a -** return code of SQLITE_CONSTRAINT. The -** transaction is not rolled back and any -** changes to prior rows are retained. -** -** any IGNORE The attempt in insert or update the current -** row is skipped, without throwing an error. -** Processing continues with the next row. -** (There is an immediate jump to ignoreDest.) -** -** NOT NULL REPLACE The NULL value is replace by the default -** value for that column. If the default value -** is NULL, the action is the same as ABORT. -** -** UNIQUE REPLACE The other row that conflicts with the row -** being inserted is removed. -** -** CHECK REPLACE Illegal. The results in an exception. -** -** Which action to take is determined by the overrideError parameter. -** Or if overrideError==OE_Default, then the pParse->onError parameter -** is used. Or if pParse->onError==OE_Default then the onError value -** for the constraint is used. -*/ -SQLITE_PRIVATE void sqlite3GenerateConstraintChecks( - Parse *pParse, /* The parser context */ - Table *pTab, /* The table being inserted or updated */ - int *aRegIdx, /* Use register aRegIdx[i] for index i. 0 for unused */ - int iDataCur, /* Canonical data cursor (main table or PK index) */ - int iIdxCur, /* First index cursor */ - int regNewData, /* First register in a range holding values to insert */ - int regOldData, /* Previous content. 0 for INSERTs */ - u8 pkChng, /* Non-zero if the rowid or PRIMARY KEY changed */ - u8 overrideError, /* Override onError to this if not OE_Default */ - int ignoreDest, /* Jump to this label on an OE_Ignore resolution */ - int *pbMayReplace /* OUT: Set to true if constraint may cause a replace */ -){ - Vdbe *v; /* VDBE under constrution */ - Index *pIdx; /* Pointer to one of the indices */ - Index *pPk = 0; /* The PRIMARY KEY index */ - sqlite3 *db; /* Database connection */ - int i; /* loop counter */ - int ix; /* Index loop counter */ - int nCol; /* Number of columns */ - int onError; /* Conflict resolution strategy */ - int j1; /* Addresss of jump instruction */ - int seenReplace = 0; /* True if REPLACE is used to resolve INT PK conflict */ - int nPkField; /* Number of fields in PRIMARY KEY. 1 for ROWID tables */ - int ipkTop = 0; /* Top of the rowid change constraint check */ - int ipkBottom = 0; /* Bottom of the rowid change constraint check */ - u8 isUpdate; /* True if this is an UPDATE operation */ - u8 bAffinityDone = 0; /* True if the OP_Affinity operation has been run */ - int regRowid = -1; /* Register holding ROWID value */ - - isUpdate = regOldData!=0; - db = pParse->db; - v = sqlite3GetVdbe(pParse); - assert( v!=0 ); - assert( pTab->pSelect==0 ); /* This table is not a VIEW */ - nCol = pTab->nCol; - - /* pPk is the PRIMARY KEY index for WITHOUT ROWID tables and NULL for - ** normal rowid tables. nPkField is the number of key fields in the - ** pPk index or 1 for a rowid table. In other words, nPkField is the - ** number of fields in the true primary key of the table. */ - if( HasRowid(pTab) ){ - pPk = 0; - nPkField = 1; - }else{ - pPk = sqlite3PrimaryKeyIndex(pTab); - nPkField = pPk->nKeyCol; - } - - /* Record that this module has started */ - VdbeModuleComment((v, "BEGIN: GenCnstCks(%d,%d,%d,%d,%d)", - iDataCur, iIdxCur, regNewData, regOldData, pkChng)); - - /* Test all NOT NULL constraints. - */ - for(i=0; iiPKey ){ - continue; - } - onError = pTab->aCol[i].notNull; - if( onError==OE_None ) continue; - if( overrideError!=OE_Default ){ - onError = overrideError; - }else if( onError==OE_Default ){ - onError = OE_Abort; - } - if( onError==OE_Replace && pTab->aCol[i].pDflt==0 ){ - onError = OE_Abort; - } - assert( onError==OE_Rollback || onError==OE_Abort || onError==OE_Fail - || onError==OE_Ignore || onError==OE_Replace ); - switch( onError ){ - case OE_Abort: - sqlite3MayAbort(pParse); - /* Fall through */ - case OE_Rollback: - case OE_Fail: { - char *zMsg = sqlite3MPrintf(db, "%s.%s", pTab->zName, - pTab->aCol[i].zName); - sqlite3VdbeAddOp4(v, OP_HaltIfNull, SQLITE_CONSTRAINT_NOTNULL, onError, - regNewData+1+i, zMsg, P4_DYNAMIC); - sqlite3VdbeChangeP5(v, P5_ConstraintNotNull); - VdbeCoverage(v); - break; - } - case OE_Ignore: { - sqlite3VdbeAddOp2(v, OP_IsNull, regNewData+1+i, ignoreDest); - VdbeCoverage(v); - break; - } - default: { - assert( onError==OE_Replace ); - j1 = sqlite3VdbeAddOp1(v, OP_NotNull, regNewData+1+i); VdbeCoverage(v); - sqlite3ExprCode(pParse, pTab->aCol[i].pDflt, regNewData+1+i); - sqlite3VdbeJumpHere(v, j1); - break; - } - } - } - - /* Test all CHECK constraints - */ -#ifndef SQLITE_OMIT_CHECK - if( pTab->pCheck && (db->flags & SQLITE_IgnoreChecks)==0 ){ - ExprList *pCheck = pTab->pCheck; - pParse->ckBase = regNewData+1; - onError = overrideError!=OE_Default ? overrideError : OE_Abort; - for(i=0; inExpr; i++){ - int allOk = sqlite3VdbeMakeLabel(v); - sqlite3ExprIfTrue(pParse, pCheck->a[i].pExpr, allOk, SQLITE_JUMPIFNULL); - if( onError==OE_Ignore ){ - sqlite3VdbeAddOp2(v, OP_Goto, 0, ignoreDest); - }else{ - char *zName = pCheck->a[i].zName; - if( zName==0 ) zName = pTab->zName; - if( onError==OE_Replace ) onError = OE_Abort; /* IMP: R-15569-63625 */ - sqlite3HaltConstraint(pParse, SQLITE_CONSTRAINT_CHECK, - onError, zName, P4_TRANSIENT, - P5_ConstraintCheck); - } - sqlite3VdbeResolveLabel(v, allOk); - } - } -#endif /* !defined(SQLITE_OMIT_CHECK) */ - - /* If rowid is changing, make sure the new rowid does not previously - ** exist in the table. - */ - if( pkChng && pPk==0 ){ - int addrRowidOk = sqlite3VdbeMakeLabel(v); - - /* Figure out what action to take in case of a rowid collision */ - onError = pTab->keyConf; - if( overrideError!=OE_Default ){ - onError = overrideError; - }else if( onError==OE_Default ){ - onError = OE_Abort; - } - - if( isUpdate ){ - /* pkChng!=0 does not mean that the rowid has change, only that - ** it might have changed. Skip the conflict logic below if the rowid - ** is unchanged. */ - sqlite3VdbeAddOp3(v, OP_Eq, regNewData, addrRowidOk, regOldData); - sqlite3VdbeChangeP5(v, SQLITE_NOTNULL); - VdbeCoverage(v); - } - - /* If the response to a rowid conflict is REPLACE but the response - ** to some other UNIQUE constraint is FAIL or IGNORE, then we need - ** to defer the running of the rowid conflict checking until after - ** the UNIQUE constraints have run. - */ - if( onError==OE_Replace && overrideError!=OE_Replace ){ - for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ - if( pIdx->onError==OE_Ignore || pIdx->onError==OE_Fail ){ - ipkTop = sqlite3VdbeAddOp0(v, OP_Goto); - break; - } - } - } - - /* Check to see if the new rowid already exists in the table. Skip - ** the following conflict logic if it does not. */ - sqlite3VdbeAddOp3(v, OP_NotExists, iDataCur, addrRowidOk, regNewData); - VdbeCoverage(v); - - /* Generate code that deals with a rowid collision */ - switch( onError ){ - default: { - onError = OE_Abort; - /* Fall thru into the next case */ - } - case OE_Rollback: - case OE_Abort: - case OE_Fail: { - sqlite3RowidConstraint(pParse, onError, pTab); - break; - } - case OE_Replace: { - /* If there are DELETE triggers on this table and the - ** recursive-triggers flag is set, call GenerateRowDelete() to - ** remove the conflicting row from the table. This will fire - ** the triggers and remove both the table and index b-tree entries. - ** - ** Otherwise, if there are no triggers or the recursive-triggers - ** flag is not set, but the table has one or more indexes, call - ** GenerateRowIndexDelete(). This removes the index b-tree entries - ** only. The table b-tree entry will be replaced by the new entry - ** when it is inserted. - ** - ** If either GenerateRowDelete() or GenerateRowIndexDelete() is called, - ** also invoke MultiWrite() to indicate that this VDBE may require - ** statement rollback (if the statement is aborted after the delete - ** takes place). Earlier versions called sqlite3MultiWrite() regardless, - ** but being more selective here allows statements like: - ** - ** REPLACE INTO t(rowid) VALUES($newrowid) - ** - ** to run without a statement journal if there are no indexes on the - ** table. - */ - Trigger *pTrigger = 0; - if( db->flags&SQLITE_RecTriggers ){ - pTrigger = sqlite3TriggersExist(pParse, pTab, TK_DELETE, 0, 0); - } - if( pTrigger || sqlite3FkRequired(pParse, pTab, 0, 0) ){ - sqlite3MultiWrite(pParse); - sqlite3GenerateRowDelete(pParse, pTab, pTrigger, iDataCur, iIdxCur, - regNewData, 1, 0, OE_Replace, 1); - }else if( pTab->pIndex ){ - sqlite3MultiWrite(pParse); - sqlite3GenerateRowIndexDelete(pParse, pTab, iDataCur, iIdxCur, 0); - } - seenReplace = 1; - break; - } - case OE_Ignore: { - /*assert( seenReplace==0 );*/ - sqlite3VdbeAddOp2(v, OP_Goto, 0, ignoreDest); - break; - } - } - sqlite3VdbeResolveLabel(v, addrRowidOk); - if( ipkTop ){ - ipkBottom = sqlite3VdbeAddOp0(v, OP_Goto); - sqlite3VdbeJumpHere(v, ipkTop); - } - } - - /* Test all UNIQUE constraints by creating entries for each UNIQUE - ** index and making sure that duplicate entries do not already exist. - ** Compute the revised record entries for indices as we go. - ** - ** This loop also handles the case of the PRIMARY KEY index for a - ** WITHOUT ROWID table. - */ - for(ix=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, ix++){ - int regIdx; /* Range of registers hold conent for pIdx */ - int regR; /* Range of registers holding conflicting PK */ - int iThisCur; /* Cursor for this UNIQUE index */ - int addrUniqueOk; /* Jump here if the UNIQUE constraint is satisfied */ - - if( aRegIdx[ix]==0 ) continue; /* Skip indices that do not change */ - if( bAffinityDone==0 ){ - sqlite3TableAffinity(v, pTab, regNewData+1); - bAffinityDone = 1; - } - iThisCur = iIdxCur+ix; - addrUniqueOk = sqlite3VdbeMakeLabel(v); - - /* Skip partial indices for which the WHERE clause is not true */ - if( pIdx->pPartIdxWhere ){ - sqlite3VdbeAddOp2(v, OP_Null, 0, aRegIdx[ix]); - pParse->ckBase = regNewData+1; - sqlite3ExprIfFalse(pParse, pIdx->pPartIdxWhere, addrUniqueOk, - SQLITE_JUMPIFNULL); - pParse->ckBase = 0; - } - - /* Create a record for this index entry as it should appear after - ** the insert or update. Store that record in the aRegIdx[ix] register - */ - regIdx = sqlite3GetTempRange(pParse, pIdx->nColumn); - for(i=0; inColumn; i++){ - int iField = pIdx->aiColumn[i]; - int x; - if( iField<0 || iField==pTab->iPKey ){ - if( regRowid==regIdx+i ) continue; /* ROWID already in regIdx+i */ - x = regNewData; - regRowid = pIdx->pPartIdxWhere ? -1 : regIdx+i; - }else{ - x = iField + regNewData + 1; - } - sqlite3VdbeAddOp2(v, OP_SCopy, x, regIdx+i); - VdbeComment((v, "%s", iField<0 ? "rowid" : pTab->aCol[iField].zName)); - } - sqlite3VdbeAddOp3(v, OP_MakeRecord, regIdx, pIdx->nColumn, aRegIdx[ix]); - VdbeComment((v, "for %s", pIdx->zName)); - sqlite3ExprCacheAffinityChange(pParse, regIdx, pIdx->nColumn); - - /* In an UPDATE operation, if this index is the PRIMARY KEY index - ** of a WITHOUT ROWID table and there has been no change the - ** primary key, then no collision is possible. The collision detection - ** logic below can all be skipped. */ - if( isUpdate && pPk==pIdx && pkChng==0 ){ - sqlite3VdbeResolveLabel(v, addrUniqueOk); - continue; - } - - /* Find out what action to take in case there is a uniqueness conflict */ - onError = pIdx->onError; - if( onError==OE_None ){ - sqlite3ReleaseTempRange(pParse, regIdx, pIdx->nColumn); - sqlite3VdbeResolveLabel(v, addrUniqueOk); - continue; /* pIdx is not a UNIQUE index */ - } - if( overrideError!=OE_Default ){ - onError = overrideError; - }else if( onError==OE_Default ){ - onError = OE_Abort; - } - - /* Check to see if the new index entry will be unique */ - sqlite3VdbeAddOp4Int(v, OP_NoConflict, iThisCur, addrUniqueOk, - regIdx, pIdx->nKeyCol); VdbeCoverage(v); - - /* Generate code to handle collisions */ - regR = (pIdx==pPk) ? regIdx : sqlite3GetTempRange(pParse, nPkField); - if( isUpdate || onError==OE_Replace ){ - if( HasRowid(pTab) ){ - sqlite3VdbeAddOp2(v, OP_IdxRowid, iThisCur, regR); - /* Conflict only if the rowid of the existing index entry - ** is different from old-rowid */ - if( isUpdate ){ - sqlite3VdbeAddOp3(v, OP_Eq, regR, addrUniqueOk, regOldData); - sqlite3VdbeChangeP5(v, SQLITE_NOTNULL); - VdbeCoverage(v); - } - }else{ - int x; - /* Extract the PRIMARY KEY from the end of the index entry and - ** store it in registers regR..regR+nPk-1 */ - if( pIdx!=pPk ){ - for(i=0; inKeyCol; i++){ - x = sqlite3ColumnOfIndex(pIdx, pPk->aiColumn[i]); - sqlite3VdbeAddOp3(v, OP_Column, iThisCur, x, regR+i); - VdbeComment((v, "%s.%s", pTab->zName, - pTab->aCol[pPk->aiColumn[i]].zName)); - } - } - if( isUpdate ){ - /* If currently processing the PRIMARY KEY of a WITHOUT ROWID - ** table, only conflict if the new PRIMARY KEY values are actually - ** different from the old. - ** - ** For a UNIQUE index, only conflict if the PRIMARY KEY values - ** of the matched index row are different from the original PRIMARY - ** KEY values of this row before the update. */ - int addrJump = sqlite3VdbeCurrentAddr(v)+pPk->nKeyCol; - int op = OP_Ne; - int regCmp = (IsPrimaryKeyIndex(pIdx) ? regIdx : regR); - - for(i=0; inKeyCol; i++){ - char *p4 = (char*)sqlite3LocateCollSeq(pParse, pPk->azColl[i]); - x = pPk->aiColumn[i]; - if( i==(pPk->nKeyCol-1) ){ - addrJump = addrUniqueOk; - op = OP_Eq; - } - sqlite3VdbeAddOp4(v, op, - regOldData+1+x, addrJump, regCmp+i, p4, P4_COLLSEQ - ); - sqlite3VdbeChangeP5(v, SQLITE_NOTNULL); - VdbeCoverageIf(v, op==OP_Eq); - VdbeCoverageIf(v, op==OP_Ne); - } - } - } - } - - /* Generate code that executes if the new index entry is not unique */ - assert( onError==OE_Rollback || onError==OE_Abort || onError==OE_Fail - || onError==OE_Ignore || onError==OE_Replace ); - switch( onError ){ - case OE_Rollback: - case OE_Abort: - case OE_Fail: { - sqlite3UniqueConstraint(pParse, onError, pIdx); - break; - } - case OE_Ignore: { - sqlite3VdbeAddOp2(v, OP_Goto, 0, ignoreDest); - break; - } - default: { - Trigger *pTrigger = 0; - assert( onError==OE_Replace ); - sqlite3MultiWrite(pParse); - if( db->flags&SQLITE_RecTriggers ){ - pTrigger = sqlite3TriggersExist(pParse, pTab, TK_DELETE, 0, 0); - } - sqlite3GenerateRowDelete(pParse, pTab, pTrigger, iDataCur, iIdxCur, - regR, nPkField, 0, OE_Replace, pIdx==pPk); - seenReplace = 1; - break; - } - } - sqlite3VdbeResolveLabel(v, addrUniqueOk); - sqlite3ReleaseTempRange(pParse, regIdx, pIdx->nColumn); - if( regR!=regIdx ) sqlite3ReleaseTempRange(pParse, regR, nPkField); - } - if( ipkTop ){ - sqlite3VdbeAddOp2(v, OP_Goto, 0, ipkTop+1); - sqlite3VdbeJumpHere(v, ipkBottom); - } - - *pbMayReplace = seenReplace; - VdbeModuleComment((v, "END: GenCnstCks(%d)", seenReplace)); -} - -/* -** This routine generates code to finish the INSERT or UPDATE operation -** that was started by a prior call to sqlite3GenerateConstraintChecks. -** A consecutive range of registers starting at regNewData contains the -** rowid and the content to be inserted. -** -** The arguments to this routine should be the same as the first six -** arguments to sqlite3GenerateConstraintChecks. -*/ -SQLITE_PRIVATE void sqlite3CompleteInsertion( - Parse *pParse, /* The parser context */ - Table *pTab, /* the table into which we are inserting */ - int iDataCur, /* Cursor of the canonical data source */ - int iIdxCur, /* First index cursor */ - int regNewData, /* Range of content */ - int *aRegIdx, /* Register used by each index. 0 for unused indices */ - int isUpdate, /* True for UPDATE, False for INSERT */ - int appendBias, /* True if this is likely to be an append */ - int useSeekResult /* True to set the USESEEKRESULT flag on OP_[Idx]Insert */ -){ - Vdbe *v; /* Prepared statements under construction */ - Index *pIdx; /* An index being inserted or updated */ - u8 pik_flags; /* flag values passed to the btree insert */ - int regData; /* Content registers (after the rowid) */ - int regRec; /* Register holding assemblied record for the table */ - int i; /* Loop counter */ - u8 bAffinityDone = 0; /* True if OP_Affinity has been run already */ - - v = sqlite3GetVdbe(pParse); - assert( v!=0 ); - assert( pTab->pSelect==0 ); /* This table is not a VIEW */ - for(i=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, i++){ - if( aRegIdx[i]==0 ) continue; - bAffinityDone = 1; - if( pIdx->pPartIdxWhere ){ - sqlite3VdbeAddOp2(v, OP_IsNull, aRegIdx[i], sqlite3VdbeCurrentAddr(v)+2); - VdbeCoverage(v); - } - sqlite3VdbeAddOp2(v, OP_IdxInsert, iIdxCur+i, aRegIdx[i]); - pik_flags = 0; - if( useSeekResult ) pik_flags = OPFLAG_USESEEKRESULT; - if( IsPrimaryKeyIndex(pIdx) && !HasRowid(pTab) ){ - assert( pParse->nested==0 ); - pik_flags |= OPFLAG_NCHANGE; - } - if( pik_flags ) sqlite3VdbeChangeP5(v, pik_flags); - } - if( !HasRowid(pTab) ) return; - regData = regNewData + 1; - regRec = sqlite3GetTempReg(pParse); - sqlite3VdbeAddOp3(v, OP_MakeRecord, regData, pTab->nCol, regRec); - if( !bAffinityDone ) sqlite3TableAffinity(v, pTab, 0); - sqlite3ExprCacheAffinityChange(pParse, regData, pTab->nCol); - if( pParse->nested ){ - pik_flags = 0; - }else{ - pik_flags = OPFLAG_NCHANGE; - pik_flags |= (isUpdate?OPFLAG_ISUPDATE:OPFLAG_LASTROWID); - } - if( appendBias ){ - pik_flags |= OPFLAG_APPEND; - } - if( useSeekResult ){ - pik_flags |= OPFLAG_USESEEKRESULT; - } - sqlite3VdbeAddOp3(v, OP_Insert, iDataCur, regRec, regNewData); - if( !pParse->nested ){ - sqlite3VdbeChangeP4(v, -1, pTab->zName, P4_TRANSIENT); - } - sqlite3VdbeChangeP5(v, pik_flags); -} - -/* -** Allocate cursors for the pTab table and all its indices and generate -** code to open and initialized those cursors. -** -** The cursor for the object that contains the complete data (normally -** the table itself, but the PRIMARY KEY index in the case of a WITHOUT -** ROWID table) is returned in *piDataCur. The first index cursor is -** returned in *piIdxCur. The number of indices is returned. -** -** Use iBase as the first cursor (either the *piDataCur for rowid tables -** or the first index for WITHOUT ROWID tables) if it is non-negative. -** If iBase is negative, then allocate the next available cursor. -** -** For a rowid table, *piDataCur will be exactly one less than *piIdxCur. -** For a WITHOUT ROWID table, *piDataCur will be somewhere in the range -** of *piIdxCurs, depending on where the PRIMARY KEY index appears on the -** pTab->pIndex list. -*/ -SQLITE_PRIVATE int sqlite3OpenTableAndIndices( - Parse *pParse, /* Parsing context */ - Table *pTab, /* Table to be opened */ - int op, /* OP_OpenRead or OP_OpenWrite */ - int iBase, /* Use this for the table cursor, if there is one */ - u8 *aToOpen, /* If not NULL: boolean for each table and index */ - int *piDataCur, /* Write the database source cursor number here */ - int *piIdxCur /* Write the first index cursor number here */ -){ - int i; - int iDb; - int iDataCur; - Index *pIdx; - Vdbe *v; - - assert( op==OP_OpenRead || op==OP_OpenWrite ); - if( IsVirtual(pTab) ){ - assert( aToOpen==0 ); - *piDataCur = 0; - *piIdxCur = 1; - return 0; - } - iDb = sqlite3SchemaToIndex(pParse->db, pTab->pSchema); - v = sqlite3GetVdbe(pParse); - assert( v!=0 ); - if( iBase<0 ) iBase = pParse->nTab; - iDataCur = iBase++; - if( piDataCur ) *piDataCur = iDataCur; - if( HasRowid(pTab) && (aToOpen==0 || aToOpen[0]) ){ - sqlite3OpenTable(pParse, iDataCur, iDb, pTab, op); - }else{ - sqlite3TableLock(pParse, iDb, pTab->tnum, op==OP_OpenWrite, pTab->zName); - } - if( piIdxCur ) *piIdxCur = iBase; - for(i=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, i++){ - int iIdxCur = iBase++; - assert( pIdx->pSchema==pTab->pSchema ); - if( IsPrimaryKeyIndex(pIdx) && !HasRowid(pTab) && piDataCur ){ - *piDataCur = iIdxCur; - } - if( aToOpen==0 || aToOpen[i+1] ){ - sqlite3VdbeAddOp3(v, op, iIdxCur, pIdx->tnum, iDb); - sqlite3VdbeSetP4KeyInfo(pParse, pIdx); - VdbeComment((v, "%s", pIdx->zName)); - } - } - if( iBase>pParse->nTab ) pParse->nTab = iBase; - return i; -} - - -#ifdef SQLITE_TEST -/* -** The following global variable is incremented whenever the -** transfer optimization is used. This is used for testing -** purposes only - to make sure the transfer optimization really -** is happening when it is suppose to. -*/ -SQLITE_API int sqlite3_xferopt_count; -#endif /* SQLITE_TEST */ - - -#ifndef SQLITE_OMIT_XFER_OPT -/* -** Check to collation names to see if they are compatible. -*/ -static int xferCompatibleCollation(const char *z1, const char *z2){ - if( z1==0 ){ - return z2==0; - } - if( z2==0 ){ - return 0; - } - return sqlite3StrICmp(z1, z2)==0; -} - - -/* -** Check to see if index pSrc is compatible as a source of data -** for index pDest in an insert transfer optimization. The rules -** for a compatible index: -** -** * The index is over the same set of columns -** * The same DESC and ASC markings occurs on all columns -** * The same onError processing (OE_Abort, OE_Ignore, etc) -** * The same collating sequence on each column -** * The index has the exact same WHERE clause -*/ -static int xferCompatibleIndex(Index *pDest, Index *pSrc){ - int i; - assert( pDest && pSrc ); - assert( pDest->pTable!=pSrc->pTable ); - if( pDest->nKeyCol!=pSrc->nKeyCol ){ - return 0; /* Different number of columns */ - } - if( pDest->onError!=pSrc->onError ){ - return 0; /* Different conflict resolution strategies */ - } - for(i=0; inKeyCol; i++){ - if( pSrc->aiColumn[i]!=pDest->aiColumn[i] ){ - return 0; /* Different columns indexed */ - } - if( pSrc->aSortOrder[i]!=pDest->aSortOrder[i] ){ - return 0; /* Different sort orders */ - } - if( !xferCompatibleCollation(pSrc->azColl[i],pDest->azColl[i]) ){ - return 0; /* Different collating sequences */ - } - } - if( sqlite3ExprCompare(pSrc->pPartIdxWhere, pDest->pPartIdxWhere, -1) ){ - return 0; /* Different WHERE clauses */ - } - - /* If no test above fails then the indices must be compatible */ - return 1; -} - -/* -** Attempt the transfer optimization on INSERTs of the form -** -** INSERT INTO tab1 SELECT * FROM tab2; -** -** The xfer optimization transfers raw records from tab2 over to tab1. -** Columns are not decoded and reassemblied, which greatly improves -** performance. Raw index records are transferred in the same way. -** -** The xfer optimization is only attempted if tab1 and tab2 are compatible. -** There are lots of rules for determining compatibility - see comments -** embedded in the code for details. -** -** This routine returns TRUE if the optimization is guaranteed to be used. -** Sometimes the xfer optimization will only work if the destination table -** is empty - a factor that can only be determined at run-time. In that -** case, this routine generates code for the xfer optimization but also -** does a test to see if the destination table is empty and jumps over the -** xfer optimization code if the test fails. In that case, this routine -** returns FALSE so that the caller will know to go ahead and generate -** an unoptimized transfer. This routine also returns FALSE if there -** is no chance that the xfer optimization can be applied. -** -** This optimization is particularly useful at making VACUUM run faster. -*/ -static int xferOptimization( - Parse *pParse, /* Parser context */ - Table *pDest, /* The table we are inserting into */ - Select *pSelect, /* A SELECT statement to use as the data source */ - int onError, /* How to handle constraint errors */ - int iDbDest /* The database of pDest */ -){ - ExprList *pEList; /* The result set of the SELECT */ - Table *pSrc; /* The table in the FROM clause of SELECT */ - Index *pSrcIdx, *pDestIdx; /* Source and destination indices */ - struct SrcList_item *pItem; /* An element of pSelect->pSrc */ - int i; /* Loop counter */ - int iDbSrc; /* The database of pSrc */ - int iSrc, iDest; /* Cursors from source and destination */ - int addr1, addr2; /* Loop addresses */ - int emptyDestTest = 0; /* Address of test for empty pDest */ - int emptySrcTest = 0; /* Address of test for empty pSrc */ - Vdbe *v; /* The VDBE we are building */ - int regAutoinc; /* Memory register used by AUTOINC */ - int destHasUniqueIdx = 0; /* True if pDest has a UNIQUE index */ - int regData, regRowid; /* Registers holding data and rowid */ - - if( pSelect==0 ){ - return 0; /* Must be of the form INSERT INTO ... SELECT ... */ - } - if( pParse->pWith || pSelect->pWith ){ - /* Do not attempt to process this query if there are an WITH clauses - ** attached to it. Proceeding may generate a false "no such table: xxx" - ** error if pSelect reads from a CTE named "xxx". */ - return 0; - } - if( sqlite3TriggerList(pParse, pDest) ){ - return 0; /* tab1 must not have triggers */ - } -#ifndef SQLITE_OMIT_VIRTUALTABLE - if( pDest->tabFlags & TF_Virtual ){ - return 0; /* tab1 must not be a virtual table */ - } -#endif - if( onError==OE_Default ){ - if( pDest->iPKey>=0 ) onError = pDest->keyConf; - if( onError==OE_Default ) onError = OE_Abort; - } - assert(pSelect->pSrc); /* allocated even if there is no FROM clause */ - if( pSelect->pSrc->nSrc!=1 ){ - return 0; /* FROM clause must have exactly one term */ - } - if( pSelect->pSrc->a[0].pSelect ){ - return 0; /* FROM clause cannot contain a subquery */ - } - if( pSelect->pWhere ){ - return 0; /* SELECT may not have a WHERE clause */ - } - if( pSelect->pOrderBy ){ - return 0; /* SELECT may not have an ORDER BY clause */ - } - /* Do not need to test for a HAVING clause. If HAVING is present but - ** there is no ORDER BY, we will get an error. */ - if( pSelect->pGroupBy ){ - return 0; /* SELECT may not have a GROUP BY clause */ - } - if( pSelect->pLimit ){ - return 0; /* SELECT may not have a LIMIT clause */ - } - assert( pSelect->pOffset==0 ); /* Must be so if pLimit==0 */ - if( pSelect->pPrior ){ - return 0; /* SELECT may not be a compound query */ - } - if( pSelect->selFlags & SF_Distinct ){ - return 0; /* SELECT may not be DISTINCT */ - } - pEList = pSelect->pEList; - assert( pEList!=0 ); - if( pEList->nExpr!=1 ){ - return 0; /* The result set must have exactly one column */ - } - assert( pEList->a[0].pExpr ); - if( pEList->a[0].pExpr->op!=TK_ALL ){ - return 0; /* The result set must be the special operator "*" */ - } - - /* At this point we have established that the statement is of the - ** correct syntactic form to participate in this optimization. Now - ** we have to check the semantics. - */ - pItem = pSelect->pSrc->a; - pSrc = sqlite3LocateTableItem(pParse, 0, pItem); - if( pSrc==0 ){ - return 0; /* FROM clause does not contain a real table */ - } - if( pSrc==pDest ){ - return 0; /* tab1 and tab2 may not be the same table */ - } - if( HasRowid(pDest)!=HasRowid(pSrc) ){ - return 0; /* source and destination must both be WITHOUT ROWID or not */ - } -#ifndef SQLITE_OMIT_VIRTUALTABLE - if( pSrc->tabFlags & TF_Virtual ){ - return 0; /* tab2 must not be a virtual table */ - } -#endif - if( pSrc->pSelect ){ - return 0; /* tab2 may not be a view */ - } - if( pDest->nCol!=pSrc->nCol ){ - return 0; /* Number of columns must be the same in tab1 and tab2 */ - } - if( pDest->iPKey!=pSrc->iPKey ){ - return 0; /* Both tables must have the same INTEGER PRIMARY KEY */ - } - for(i=0; inCol; i++){ - Column *pDestCol = &pDest->aCol[i]; - Column *pSrcCol = &pSrc->aCol[i]; - if( pDestCol->affinity!=pSrcCol->affinity ){ - return 0; /* Affinity must be the same on all columns */ - } - if( !xferCompatibleCollation(pDestCol->zColl, pSrcCol->zColl) ){ - return 0; /* Collating sequence must be the same on all columns */ - } - if( pDestCol->notNull && !pSrcCol->notNull ){ - return 0; /* tab2 must be NOT NULL if tab1 is */ - } - /* Default values for second and subsequent columns need to match. */ - if( i>0 - && ((pDestCol->zDflt==0)!=(pSrcCol->zDflt==0) - || (pDestCol->zDflt && strcmp(pDestCol->zDflt, pSrcCol->zDflt)!=0)) - ){ - return 0; /* Default values must be the same for all columns */ - } - } - for(pDestIdx=pDest->pIndex; pDestIdx; pDestIdx=pDestIdx->pNext){ - if( pDestIdx->onError!=OE_None ){ - destHasUniqueIdx = 1; - } - for(pSrcIdx=pSrc->pIndex; pSrcIdx; pSrcIdx=pSrcIdx->pNext){ - if( xferCompatibleIndex(pDestIdx, pSrcIdx) ) break; - } - if( pSrcIdx==0 ){ - return 0; /* pDestIdx has no corresponding index in pSrc */ - } - } -#ifndef SQLITE_OMIT_CHECK - if( pDest->pCheck && sqlite3ExprListCompare(pSrc->pCheck,pDest->pCheck,-1) ){ - return 0; /* Tables have different CHECK constraints. Ticket #2252 */ - } -#endif -#ifndef SQLITE_OMIT_FOREIGN_KEY - /* Disallow the transfer optimization if the destination table constains - ** any foreign key constraints. This is more restrictive than necessary. - ** But the main beneficiary of the transfer optimization is the VACUUM - ** command, and the VACUUM command disables foreign key constraints. So - ** the extra complication to make this rule less restrictive is probably - ** not worth the effort. Ticket [6284df89debdfa61db8073e062908af0c9b6118e] - */ - if( (pParse->db->flags & SQLITE_ForeignKeys)!=0 && pDest->pFKey!=0 ){ - return 0; - } -#endif - if( (pParse->db->flags & SQLITE_CountRows)!=0 ){ - return 0; /* xfer opt does not play well with PRAGMA count_changes */ - } - - /* If we get this far, it means that the xfer optimization is at - ** least a possibility, though it might only work if the destination - ** table (tab1) is initially empty. - */ -#ifdef SQLITE_TEST - sqlite3_xferopt_count++; -#endif - iDbSrc = sqlite3SchemaToIndex(pParse->db, pSrc->pSchema); - v = sqlite3GetVdbe(pParse); - sqlite3CodeVerifySchema(pParse, iDbSrc); - iSrc = pParse->nTab++; - iDest = pParse->nTab++; - regAutoinc = autoIncBegin(pParse, iDbDest, pDest); - regData = sqlite3GetTempReg(pParse); - regRowid = sqlite3GetTempReg(pParse); - sqlite3OpenTable(pParse, iDest, iDbDest, pDest, OP_OpenWrite); - assert( HasRowid(pDest) || destHasUniqueIdx ); - if( (pDest->iPKey<0 && pDest->pIndex!=0) /* (1) */ - || destHasUniqueIdx /* (2) */ - || (onError!=OE_Abort && onError!=OE_Rollback) /* (3) */ - ){ - /* In some circumstances, we are able to run the xfer optimization - ** only if the destination table is initially empty. This code makes - ** that determination. Conditions under which the destination must - ** be empty: - ** - ** (1) There is no INTEGER PRIMARY KEY but there are indices. - ** (If the destination is not initially empty, the rowid fields - ** of index entries might need to change.) - ** - ** (2) The destination has a unique index. (The xfer optimization - ** is unable to test uniqueness.) - ** - ** (3) onError is something other than OE_Abort and OE_Rollback. - */ - addr1 = sqlite3VdbeAddOp2(v, OP_Rewind, iDest, 0); VdbeCoverage(v); - emptyDestTest = sqlite3VdbeAddOp2(v, OP_Goto, 0, 0); - sqlite3VdbeJumpHere(v, addr1); - } - if( HasRowid(pSrc) ){ - sqlite3OpenTable(pParse, iSrc, iDbSrc, pSrc, OP_OpenRead); - emptySrcTest = sqlite3VdbeAddOp2(v, OP_Rewind, iSrc, 0); VdbeCoverage(v); - if( pDest->iPKey>=0 ){ - addr1 = sqlite3VdbeAddOp2(v, OP_Rowid, iSrc, regRowid); - addr2 = sqlite3VdbeAddOp3(v, OP_NotExists, iDest, 0, regRowid); - VdbeCoverage(v); - sqlite3RowidConstraint(pParse, onError, pDest); - sqlite3VdbeJumpHere(v, addr2); - autoIncStep(pParse, regAutoinc, regRowid); - }else if( pDest->pIndex==0 ){ - addr1 = sqlite3VdbeAddOp2(v, OP_NewRowid, iDest, regRowid); - }else{ - addr1 = sqlite3VdbeAddOp2(v, OP_Rowid, iSrc, regRowid); - assert( (pDest->tabFlags & TF_Autoincrement)==0 ); - } - sqlite3VdbeAddOp2(v, OP_RowData, iSrc, regData); - sqlite3VdbeAddOp3(v, OP_Insert, iDest, regData, regRowid); - sqlite3VdbeChangeP5(v, OPFLAG_NCHANGE|OPFLAG_LASTROWID|OPFLAG_APPEND); - sqlite3VdbeChangeP4(v, -1, pDest->zName, 0); - sqlite3VdbeAddOp2(v, OP_Next, iSrc, addr1); VdbeCoverage(v); - sqlite3VdbeAddOp2(v, OP_Close, iSrc, 0); - sqlite3VdbeAddOp2(v, OP_Close, iDest, 0); - }else{ - sqlite3TableLock(pParse, iDbDest, pDest->tnum, 1, pDest->zName); - sqlite3TableLock(pParse, iDbSrc, pSrc->tnum, 0, pSrc->zName); - } - for(pDestIdx=pDest->pIndex; pDestIdx; pDestIdx=pDestIdx->pNext){ - for(pSrcIdx=pSrc->pIndex; ALWAYS(pSrcIdx); pSrcIdx=pSrcIdx->pNext){ - if( xferCompatibleIndex(pDestIdx, pSrcIdx) ) break; - } - assert( pSrcIdx ); - sqlite3VdbeAddOp3(v, OP_OpenRead, iSrc, pSrcIdx->tnum, iDbSrc); - sqlite3VdbeSetP4KeyInfo(pParse, pSrcIdx); - VdbeComment((v, "%s", pSrcIdx->zName)); - sqlite3VdbeAddOp3(v, OP_OpenWrite, iDest, pDestIdx->tnum, iDbDest); - sqlite3VdbeSetP4KeyInfo(pParse, pDestIdx); - sqlite3VdbeChangeP5(v, OPFLAG_BULKCSR); - VdbeComment((v, "%s", pDestIdx->zName)); - addr1 = sqlite3VdbeAddOp2(v, OP_Rewind, iSrc, 0); VdbeCoverage(v); - sqlite3VdbeAddOp2(v, OP_RowKey, iSrc, regData); - sqlite3VdbeAddOp3(v, OP_IdxInsert, iDest, regData, 1); - sqlite3VdbeAddOp2(v, OP_Next, iSrc, addr1+1); VdbeCoverage(v); - sqlite3VdbeJumpHere(v, addr1); - sqlite3VdbeAddOp2(v, OP_Close, iSrc, 0); - sqlite3VdbeAddOp2(v, OP_Close, iDest, 0); - } - if( emptySrcTest ) sqlite3VdbeJumpHere(v, emptySrcTest); - sqlite3ReleaseTempReg(pParse, regRowid); - sqlite3ReleaseTempReg(pParse, regData); - if( emptyDestTest ){ - sqlite3VdbeAddOp2(v, OP_Halt, SQLITE_OK, 0); - sqlite3VdbeJumpHere(v, emptyDestTest); - sqlite3VdbeAddOp2(v, OP_Close, iDest, 0); - return 0; - }else{ - return 1; - } -} -#endif /* SQLITE_OMIT_XFER_OPT */ - -/************** End of insert.c **********************************************/ -/************** Begin file legacy.c ******************************************/ -/* -** 2001 September 15 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** Main file for the SQLite library. The routines in this file -** implement the programmer interface to the library. Routines in -** other files are for internal use by SQLite and should not be -** accessed by users of the library. -*/ - - -/* -** Execute SQL code. Return one of the SQLITE_ success/failure -** codes. Also write an error message into memory obtained from -** malloc() and make *pzErrMsg point to that message. -** -** If the SQL is a query, then for each row in the query result -** the xCallback() function is called. pArg becomes the first -** argument to xCallback(). If xCallback=NULL then no callback -** is invoked, even for queries. -*/ -SQLITE_API int sqlite3_exec( - sqlite3 *db, /* The database on which the SQL executes */ - const char *zSql, /* The SQL to be executed */ - sqlite3_callback xCallback, /* Invoke this callback routine */ - void *pArg, /* First argument to xCallback() */ - char **pzErrMsg /* Write error messages here */ -){ - int rc = SQLITE_OK; /* Return code */ - const char *zLeftover; /* Tail of unprocessed SQL */ - sqlite3_stmt *pStmt = 0; /* The current SQL statement */ - char **azCols = 0; /* Names of result columns */ - int callbackIsInit; /* True if callback data is initialized */ - - if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT; - if( zSql==0 ) zSql = ""; - - sqlite3_mutex_enter(db->mutex); - sqlite3Error(db, SQLITE_OK, 0); - while( rc==SQLITE_OK && zSql[0] ){ - int nCol; - char **azVals = 0; - - pStmt = 0; - rc = sqlite3_prepare_v2(db, zSql, -1, &pStmt, &zLeftover); - assert( rc==SQLITE_OK || pStmt==0 ); - if( rc!=SQLITE_OK ){ - continue; - } - if( !pStmt ){ - /* this happens for a comment or white-space */ - zSql = zLeftover; - continue; - } - - callbackIsInit = 0; - nCol = sqlite3_column_count(pStmt); - - while( 1 ){ - int i; - rc = sqlite3_step(pStmt); - - /* Invoke the callback function if required */ - if( xCallback && (SQLITE_ROW==rc || - (SQLITE_DONE==rc && !callbackIsInit - && db->flags&SQLITE_NullCallback)) ){ - if( !callbackIsInit ){ - azCols = sqlite3DbMallocZero(db, 2*nCol*sizeof(const char*) + 1); - if( azCols==0 ){ - goto exec_out; - } - for(i=0; imallocFailed = 1; - goto exec_out; - } - } - } - if( xCallback(pArg, nCol, azVals, azCols) ){ - rc = SQLITE_ABORT; - sqlite3VdbeFinalize((Vdbe *)pStmt); - pStmt = 0; - sqlite3Error(db, SQLITE_ABORT, 0); - goto exec_out; - } - } - - if( rc!=SQLITE_ROW ){ - rc = sqlite3VdbeFinalize((Vdbe *)pStmt); - pStmt = 0; - zSql = zLeftover; - while( sqlite3Isspace(zSql[0]) ) zSql++; - break; - } - } - - sqlite3DbFree(db, azCols); - azCols = 0; - } - -exec_out: - if( pStmt ) sqlite3VdbeFinalize((Vdbe *)pStmt); - sqlite3DbFree(db, azCols); - - rc = sqlite3ApiExit(db, rc); - if( rc!=SQLITE_OK && ALWAYS(rc==sqlite3_errcode(db)) && pzErrMsg ){ - int nErrMsg = 1 + sqlite3Strlen30(sqlite3_errmsg(db)); - *pzErrMsg = sqlite3Malloc(nErrMsg); - if( *pzErrMsg ){ - memcpy(*pzErrMsg, sqlite3_errmsg(db), nErrMsg); - }else{ - rc = SQLITE_NOMEM; - sqlite3Error(db, SQLITE_NOMEM, 0); - } - }else if( pzErrMsg ){ - *pzErrMsg = 0; - } - - assert( (rc&db->errMask)==rc ); - sqlite3_mutex_leave(db->mutex); - return rc; -} - -/************** End of legacy.c **********************************************/ -/************** Begin file loadext.c *****************************************/ -/* -** 2006 June 7 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** This file contains code used to dynamically load extensions into -** the SQLite library. -*/ - -#ifndef SQLITE_CORE - #define SQLITE_CORE 1 /* Disable the API redefinition in sqlite3ext.h */ -#endif -/************** Include sqlite3ext.h in the middle of loadext.c **************/ -/************** Begin file sqlite3ext.h **************************************/ -/* -** 2006 June 7 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** This header file defines the SQLite interface for use by -** shared libraries that want to be imported as extensions into -** an SQLite instance. Shared libraries that intend to be loaded -** as extensions by SQLite should #include this file instead of -** sqlite3.h. -*/ -#ifndef _SQLITE3EXT_H_ -#define _SQLITE3EXT_H_ - -typedef struct sqlite3_api_routines sqlite3_api_routines; - -/* -** The following structure holds pointers to all of the SQLite API -** routines. -** -** WARNING: In order to maintain backwards compatibility, add new -** interfaces to the end of this structure only. If you insert new -** interfaces in the middle of this structure, then older different -** versions of SQLite will not be able to load each others' shared -** libraries! -*/ -struct sqlite3_api_routines { - void * (*aggregate_context)(sqlite3_context*,int nBytes); - int (*aggregate_count)(sqlite3_context*); - int (*bind_blob)(sqlite3_stmt*,int,const void*,int n,void(*)(void*)); - int (*bind_double)(sqlite3_stmt*,int,double); - int (*bind_int)(sqlite3_stmt*,int,int); - int (*bind_int64)(sqlite3_stmt*,int,sqlite_int64); - int (*bind_null)(sqlite3_stmt*,int); - int (*bind_parameter_count)(sqlite3_stmt*); - int (*bind_parameter_index)(sqlite3_stmt*,const char*zName); - const char * (*bind_parameter_name)(sqlite3_stmt*,int); - int (*bind_text)(sqlite3_stmt*,int,const char*,int n,void(*)(void*)); - int (*bind_text16)(sqlite3_stmt*,int,const void*,int,void(*)(void*)); - int (*bind_value)(sqlite3_stmt*,int,const sqlite3_value*); - int (*busy_handler)(sqlite3*,int(*)(void*,int),void*); - int (*busy_timeout)(sqlite3*,int ms); - int (*changes)(sqlite3*); - int (*close)(sqlite3*); - int (*collation_needed)(sqlite3*,void*,void(*)(void*,sqlite3*, - int eTextRep,const char*)); - int (*collation_needed16)(sqlite3*,void*,void(*)(void*,sqlite3*, - int eTextRep,const void*)); - const void * (*column_blob)(sqlite3_stmt*,int iCol); - int (*column_bytes)(sqlite3_stmt*,int iCol); - int (*column_bytes16)(sqlite3_stmt*,int iCol); - int (*column_count)(sqlite3_stmt*pStmt); - const char * (*column_database_name)(sqlite3_stmt*,int); - const void * (*column_database_name16)(sqlite3_stmt*,int); - const char * (*column_decltype)(sqlite3_stmt*,int i); - const void * (*column_decltype16)(sqlite3_stmt*,int); - double (*column_double)(sqlite3_stmt*,int iCol); - int (*column_int)(sqlite3_stmt*,int iCol); - sqlite_int64 (*column_int64)(sqlite3_stmt*,int iCol); - const char * (*column_name)(sqlite3_stmt*,int); - const void * (*column_name16)(sqlite3_stmt*,int); - const char * (*column_origin_name)(sqlite3_stmt*,int); - const void * (*column_origin_name16)(sqlite3_stmt*,int); - const char * (*column_table_name)(sqlite3_stmt*,int); - const void * (*column_table_name16)(sqlite3_stmt*,int); - const unsigned char * (*column_text)(sqlite3_stmt*,int iCol); - const void * (*column_text16)(sqlite3_stmt*,int iCol); - int (*column_type)(sqlite3_stmt*,int iCol); - sqlite3_value* (*column_value)(sqlite3_stmt*,int iCol); - void * (*commit_hook)(sqlite3*,int(*)(void*),void*); - int (*complete)(const char*sql); - int (*complete16)(const void*sql); - int (*create_collation)(sqlite3*,const char*,int,void*, - int(*)(void*,int,const void*,int,const void*)); - int (*create_collation16)(sqlite3*,const void*,int,void*, - int(*)(void*,int,const void*,int,const void*)); - int (*create_function)(sqlite3*,const char*,int,int,void*, - void (*xFunc)(sqlite3_context*,int,sqlite3_value**), - void (*xStep)(sqlite3_context*,int,sqlite3_value**), - void (*xFinal)(sqlite3_context*)); - int (*create_function16)(sqlite3*,const void*,int,int,void*, - void (*xFunc)(sqlite3_context*,int,sqlite3_value**), - void (*xStep)(sqlite3_context*,int,sqlite3_value**), - void (*xFinal)(sqlite3_context*)); - int (*create_module)(sqlite3*,const char*,const sqlite3_module*,void*); - int (*data_count)(sqlite3_stmt*pStmt); - sqlite3 * (*db_handle)(sqlite3_stmt*); - int (*declare_vtab)(sqlite3*,const char*); - int (*enable_shared_cache)(int); - int (*errcode)(sqlite3*db); - const char * (*errmsg)(sqlite3*); - const void * (*errmsg16)(sqlite3*); - int (*exec)(sqlite3*,const char*,sqlite3_callback,void*,char**); - int (*expired)(sqlite3_stmt*); - int (*finalize)(sqlite3_stmt*pStmt); - void (*free)(void*); - void (*free_table)(char**result); - int (*get_autocommit)(sqlite3*); - void * (*get_auxdata)(sqlite3_context*,int); - int (*get_table)(sqlite3*,const char*,char***,int*,int*,char**); - int (*global_recover)(void); - void (*interruptx)(sqlite3*); - sqlite_int64 (*last_insert_rowid)(sqlite3*); - const char * (*libversion)(void); - int (*libversion_number)(void); - void *(*malloc)(int); - char * (*mprintf)(const char*,...); - int (*open)(const char*,sqlite3**); - int (*open16)(const void*,sqlite3**); - int (*prepare)(sqlite3*,const char*,int,sqlite3_stmt**,const char**); - int (*prepare16)(sqlite3*,const void*,int,sqlite3_stmt**,const void**); - void * (*profile)(sqlite3*,void(*)(void*,const char*,sqlite_uint64),void*); - void (*progress_handler)(sqlite3*,int,int(*)(void*),void*); - void *(*realloc)(void*,int); - int (*reset)(sqlite3_stmt*pStmt); - void (*result_blob)(sqlite3_context*,const void*,int,void(*)(void*)); - void (*result_double)(sqlite3_context*,double); - void (*result_error)(sqlite3_context*,const char*,int); - void (*result_error16)(sqlite3_context*,const void*,int); - void (*result_int)(sqlite3_context*,int); - void (*result_int64)(sqlite3_context*,sqlite_int64); - void (*result_null)(sqlite3_context*); - void (*result_text)(sqlite3_context*,const char*,int,void(*)(void*)); - void (*result_text16)(sqlite3_context*,const void*,int,void(*)(void*)); - void (*result_text16be)(sqlite3_context*,const void*,int,void(*)(void*)); - void (*result_text16le)(sqlite3_context*,const void*,int,void(*)(void*)); - void (*result_value)(sqlite3_context*,sqlite3_value*); - void * (*rollback_hook)(sqlite3*,void(*)(void*),void*); - int (*set_authorizer)(sqlite3*,int(*)(void*,int,const char*,const char*, - const char*,const char*),void*); - void (*set_auxdata)(sqlite3_context*,int,void*,void (*)(void*)); - char * (*snprintf)(int,char*,const char*,...); - int (*step)(sqlite3_stmt*); - int (*table_column_metadata)(sqlite3*,const char*,const char*,const char*, - char const**,char const**,int*,int*,int*); - void (*thread_cleanup)(void); - int (*total_changes)(sqlite3*); - void * (*trace)(sqlite3*,void(*xTrace)(void*,const char*),void*); - int (*transfer_bindings)(sqlite3_stmt*,sqlite3_stmt*); - void * (*update_hook)(sqlite3*,void(*)(void*,int ,char const*,char const*, - sqlite_int64),void*); - void * (*user_data)(sqlite3_context*); - const void * (*value_blob)(sqlite3_value*); - int (*value_bytes)(sqlite3_value*); - int (*value_bytes16)(sqlite3_value*); - double (*value_double)(sqlite3_value*); - int (*value_int)(sqlite3_value*); - sqlite_int64 (*value_int64)(sqlite3_value*); - int (*value_numeric_type)(sqlite3_value*); - const unsigned char * (*value_text)(sqlite3_value*); - const void * (*value_text16)(sqlite3_value*); - const void * (*value_text16be)(sqlite3_value*); - const void * (*value_text16le)(sqlite3_value*); - int (*value_type)(sqlite3_value*); - char *(*vmprintf)(const char*,va_list); - /* Added ??? */ - int (*overload_function)(sqlite3*, const char *zFuncName, int nArg); - /* Added by 3.3.13 */ - int (*prepare_v2)(sqlite3*,const char*,int,sqlite3_stmt**,const char**); - int (*prepare16_v2)(sqlite3*,const void*,int,sqlite3_stmt**,const void**); - int (*clear_bindings)(sqlite3_stmt*); - /* Added by 3.4.1 */ - int (*create_module_v2)(sqlite3*,const char*,const sqlite3_module*,void*, - void (*xDestroy)(void *)); - /* Added by 3.5.0 */ - int (*bind_zeroblob)(sqlite3_stmt*,int,int); - int (*blob_bytes)(sqlite3_blob*); - int (*blob_close)(sqlite3_blob*); - int (*blob_open)(sqlite3*,const char*,const char*,const char*,sqlite3_int64, - int,sqlite3_blob**); - int (*blob_read)(sqlite3_blob*,void*,int,int); - int (*blob_write)(sqlite3_blob*,const void*,int,int); - int (*create_collation_v2)(sqlite3*,const char*,int,void*, - int(*)(void*,int,const void*,int,const void*), - void(*)(void*)); - int (*file_control)(sqlite3*,const char*,int,void*); - sqlite3_int64 (*memory_highwater)(int); - sqlite3_int64 (*memory_used)(void); - sqlite3_mutex *(*mutex_alloc)(int); - void (*mutex_enter)(sqlite3_mutex*); - void (*mutex_free)(sqlite3_mutex*); - void (*mutex_leave)(sqlite3_mutex*); - int (*mutex_try)(sqlite3_mutex*); - int (*open_v2)(const char*,sqlite3**,int,const char*); - int (*release_memory)(int); - void (*result_error_nomem)(sqlite3_context*); - void (*result_error_toobig)(sqlite3_context*); - int (*sleep)(int); - void (*soft_heap_limit)(int); - sqlite3_vfs *(*vfs_find)(const char*); - int (*vfs_register)(sqlite3_vfs*,int); - int (*vfs_unregister)(sqlite3_vfs*); - int (*xthreadsafe)(void); - void (*result_zeroblob)(sqlite3_context*,int); - void (*result_error_code)(sqlite3_context*,int); - int (*test_control)(int, ...); - void (*randomness)(int,void*); - sqlite3 *(*context_db_handle)(sqlite3_context*); - int (*extended_result_codes)(sqlite3*,int); - int (*limit)(sqlite3*,int,int); - sqlite3_stmt *(*next_stmt)(sqlite3*,sqlite3_stmt*); - const char *(*sql)(sqlite3_stmt*); - int (*status)(int,int*,int*,int); - int (*backup_finish)(sqlite3_backup*); - sqlite3_backup *(*backup_init)(sqlite3*,const char*,sqlite3*,const char*); - int (*backup_pagecount)(sqlite3_backup*); - int (*backup_remaining)(sqlite3_backup*); - int (*backup_step)(sqlite3_backup*,int); - const char *(*compileoption_get)(int); - int (*compileoption_used)(const char*); - int (*create_function_v2)(sqlite3*,const char*,int,int,void*, - void (*xFunc)(sqlite3_context*,int,sqlite3_value**), - void (*xStep)(sqlite3_context*,int,sqlite3_value**), - void (*xFinal)(sqlite3_context*), - void(*xDestroy)(void*)); - int (*db_config)(sqlite3*,int,...); - sqlite3_mutex *(*db_mutex)(sqlite3*); - int (*db_status)(sqlite3*,int,int*,int*,int); - int (*extended_errcode)(sqlite3*); - void (*log)(int,const char*,...); - sqlite3_int64 (*soft_heap_limit64)(sqlite3_int64); - const char *(*sourceid)(void); - int (*stmt_status)(sqlite3_stmt*,int,int); - int (*strnicmp)(const char*,const char*,int); - int (*unlock_notify)(sqlite3*,void(*)(void**,int),void*); - int (*wal_autocheckpoint)(sqlite3*,int); - int (*wal_checkpoint)(sqlite3*,const char*); - void *(*wal_hook)(sqlite3*,int(*)(void*,sqlite3*,const char*,int),void*); - int (*blob_reopen)(sqlite3_blob*,sqlite3_int64); - int (*vtab_config)(sqlite3*,int op,...); - int (*vtab_on_conflict)(sqlite3*); - /* Version 3.7.16 and later */ - int (*close_v2)(sqlite3*); - const char *(*db_filename)(sqlite3*,const char*); - int (*db_readonly)(sqlite3*,const char*); - int (*db_release_memory)(sqlite3*); - const char *(*errstr)(int); - int (*stmt_busy)(sqlite3_stmt*); - int (*stmt_readonly)(sqlite3_stmt*); - int (*stricmp)(const char*,const char*); - int (*uri_boolean)(const char*,const char*,int); - sqlite3_int64 (*uri_int64)(const char*,const char*,sqlite3_int64); - const char *(*uri_parameter)(const char*,const char*); - char *(*vsnprintf)(int,char*,const char*,va_list); - int (*wal_checkpoint_v2)(sqlite3*,const char*,int,int*,int*); -}; - -/* -** The following macros redefine the API routines so that they are -** redirected throught the global sqlite3_api structure. -** -** This header file is also used by the loadext.c source file -** (part of the main SQLite library - not an extension) so that -** it can get access to the sqlite3_api_routines structure -** definition. But the main library does not want to redefine -** the API. So the redefinition macros are only valid if the -** SQLITE_CORE macros is undefined. -*/ -#ifndef SQLITE_CORE -#define sqlite3_aggregate_context sqlite3_api->aggregate_context -#ifndef SQLITE_OMIT_DEPRECATED -#define sqlite3_aggregate_count sqlite3_api->aggregate_count -#endif -#define sqlite3_bind_blob sqlite3_api->bind_blob -#define sqlite3_bind_double sqlite3_api->bind_double -#define sqlite3_bind_int sqlite3_api->bind_int -#define sqlite3_bind_int64 sqlite3_api->bind_int64 -#define sqlite3_bind_null sqlite3_api->bind_null -#define sqlite3_bind_parameter_count sqlite3_api->bind_parameter_count -#define sqlite3_bind_parameter_index sqlite3_api->bind_parameter_index -#define sqlite3_bind_parameter_name sqlite3_api->bind_parameter_name -#define sqlite3_bind_text sqlite3_api->bind_text -#define sqlite3_bind_text16 sqlite3_api->bind_text16 -#define sqlite3_bind_value sqlite3_api->bind_value -#define sqlite3_busy_handler sqlite3_api->busy_handler -#define sqlite3_busy_timeout sqlite3_api->busy_timeout -#define sqlite3_changes sqlite3_api->changes -#define sqlite3_close sqlite3_api->close -#define sqlite3_collation_needed sqlite3_api->collation_needed -#define sqlite3_collation_needed16 sqlite3_api->collation_needed16 -#define sqlite3_column_blob sqlite3_api->column_blob -#define sqlite3_column_bytes sqlite3_api->column_bytes -#define sqlite3_column_bytes16 sqlite3_api->column_bytes16 -#define sqlite3_column_count sqlite3_api->column_count -#define sqlite3_column_database_name sqlite3_api->column_database_name -#define sqlite3_column_database_name16 sqlite3_api->column_database_name16 -#define sqlite3_column_decltype sqlite3_api->column_decltype -#define sqlite3_column_decltype16 sqlite3_api->column_decltype16 -#define sqlite3_column_double sqlite3_api->column_double -#define sqlite3_column_int sqlite3_api->column_int -#define sqlite3_column_int64 sqlite3_api->column_int64 -#define sqlite3_column_name sqlite3_api->column_name -#define sqlite3_column_name16 sqlite3_api->column_name16 -#define sqlite3_column_origin_name sqlite3_api->column_origin_name -#define sqlite3_column_origin_name16 sqlite3_api->column_origin_name16 -#define sqlite3_column_table_name sqlite3_api->column_table_name -#define sqlite3_column_table_name16 sqlite3_api->column_table_name16 -#define sqlite3_column_text sqlite3_api->column_text -#define sqlite3_column_text16 sqlite3_api->column_text16 -#define sqlite3_column_type sqlite3_api->column_type -#define sqlite3_column_value sqlite3_api->column_value -#define sqlite3_commit_hook sqlite3_api->commit_hook -#define sqlite3_complete sqlite3_api->complete -#define sqlite3_complete16 sqlite3_api->complete16 -#define sqlite3_create_collation sqlite3_api->create_collation -#define sqlite3_create_collation16 sqlite3_api->create_collation16 -#define sqlite3_create_function sqlite3_api->create_function -#define sqlite3_create_function16 sqlite3_api->create_function16 -#define sqlite3_create_module sqlite3_api->create_module -#define sqlite3_create_module_v2 sqlite3_api->create_module_v2 -#define sqlite3_data_count sqlite3_api->data_count -#define sqlite3_db_handle sqlite3_api->db_handle -#define sqlite3_declare_vtab sqlite3_api->declare_vtab -#define sqlite3_enable_shared_cache sqlite3_api->enable_shared_cache -#define sqlite3_errcode sqlite3_api->errcode -#define sqlite3_errmsg sqlite3_api->errmsg -#define sqlite3_errmsg16 sqlite3_api->errmsg16 -#define sqlite3_exec sqlite3_api->exec -#ifndef SQLITE_OMIT_DEPRECATED -#define sqlite3_expired sqlite3_api->expired -#endif -#define sqlite3_finalize sqlite3_api->finalize -#define sqlite3_free sqlite3_api->free -#define sqlite3_free_table sqlite3_api->free_table -#define sqlite3_get_autocommit sqlite3_api->get_autocommit -#define sqlite3_get_auxdata sqlite3_api->get_auxdata -#define sqlite3_get_table sqlite3_api->get_table -#ifndef SQLITE_OMIT_DEPRECATED -#define sqlite3_global_recover sqlite3_api->global_recover -#endif -#define sqlite3_interrupt sqlite3_api->interruptx -#define sqlite3_last_insert_rowid sqlite3_api->last_insert_rowid -#define sqlite3_libversion sqlite3_api->libversion -#define sqlite3_libversion_number sqlite3_api->libversion_number -#define sqlite3_malloc sqlite3_api->malloc -#define sqlite3_mprintf sqlite3_api->mprintf -#define sqlite3_open sqlite3_api->open -#define sqlite3_open16 sqlite3_api->open16 -#define sqlite3_prepare sqlite3_api->prepare -#define sqlite3_prepare16 sqlite3_api->prepare16 -#define sqlite3_prepare_v2 sqlite3_api->prepare_v2 -#define sqlite3_prepare16_v2 sqlite3_api->prepare16_v2 -#define sqlite3_profile sqlite3_api->profile -#define sqlite3_progress_handler sqlite3_api->progress_handler -#define sqlite3_realloc sqlite3_api->realloc -#define sqlite3_reset sqlite3_api->reset -#define sqlite3_result_blob sqlite3_api->result_blob -#define sqlite3_result_double sqlite3_api->result_double -#define sqlite3_result_error sqlite3_api->result_error -#define sqlite3_result_error16 sqlite3_api->result_error16 -#define sqlite3_result_int sqlite3_api->result_int -#define sqlite3_result_int64 sqlite3_api->result_int64 -#define sqlite3_result_null sqlite3_api->result_null -#define sqlite3_result_text sqlite3_api->result_text -#define sqlite3_result_text16 sqlite3_api->result_text16 -#define sqlite3_result_text16be sqlite3_api->result_text16be -#define sqlite3_result_text16le sqlite3_api->result_text16le -#define sqlite3_result_value sqlite3_api->result_value -#define sqlite3_rollback_hook sqlite3_api->rollback_hook -#define sqlite3_set_authorizer sqlite3_api->set_authorizer -#define sqlite3_set_auxdata sqlite3_api->set_auxdata -#define sqlite3_snprintf sqlite3_api->snprintf -#define sqlite3_step sqlite3_api->step -#define sqlite3_table_column_metadata sqlite3_api->table_column_metadata -#define sqlite3_thread_cleanup sqlite3_api->thread_cleanup -#define sqlite3_total_changes sqlite3_api->total_changes -#define sqlite3_trace sqlite3_api->trace -#ifndef SQLITE_OMIT_DEPRECATED -#define sqlite3_transfer_bindings sqlite3_api->transfer_bindings -#endif -#define sqlite3_update_hook sqlite3_api->update_hook -#define sqlite3_user_data sqlite3_api->user_data -#define sqlite3_value_blob sqlite3_api->value_blob -#define sqlite3_value_bytes sqlite3_api->value_bytes -#define sqlite3_value_bytes16 sqlite3_api->value_bytes16 -#define sqlite3_value_double sqlite3_api->value_double -#define sqlite3_value_int sqlite3_api->value_int -#define sqlite3_value_int64 sqlite3_api->value_int64 -#define sqlite3_value_numeric_type sqlite3_api->value_numeric_type -#define sqlite3_value_text sqlite3_api->value_text -#define sqlite3_value_text16 sqlite3_api->value_text16 -#define sqlite3_value_text16be sqlite3_api->value_text16be -#define sqlite3_value_text16le sqlite3_api->value_text16le -#define sqlite3_value_type sqlite3_api->value_type -#define sqlite3_vmprintf sqlite3_api->vmprintf -#define sqlite3_overload_function sqlite3_api->overload_function -#define sqlite3_prepare_v2 sqlite3_api->prepare_v2 -#define sqlite3_prepare16_v2 sqlite3_api->prepare16_v2 -#define sqlite3_clear_bindings sqlite3_api->clear_bindings -#define sqlite3_bind_zeroblob sqlite3_api->bind_zeroblob -#define sqlite3_blob_bytes sqlite3_api->blob_bytes -#define sqlite3_blob_close sqlite3_api->blob_close -#define sqlite3_blob_open sqlite3_api->blob_open -#define sqlite3_blob_read sqlite3_api->blob_read -#define sqlite3_blob_write sqlite3_api->blob_write -#define sqlite3_create_collation_v2 sqlite3_api->create_collation_v2 -#define sqlite3_file_control sqlite3_api->file_control -#define sqlite3_memory_highwater sqlite3_api->memory_highwater -#define sqlite3_memory_used sqlite3_api->memory_used -#define sqlite3_mutex_alloc sqlite3_api->mutex_alloc -#define sqlite3_mutex_enter sqlite3_api->mutex_enter -#define sqlite3_mutex_free sqlite3_api->mutex_free -#define sqlite3_mutex_leave sqlite3_api->mutex_leave -#define sqlite3_mutex_try sqlite3_api->mutex_try -#define sqlite3_open_v2 sqlite3_api->open_v2 -#define sqlite3_release_memory sqlite3_api->release_memory -#define sqlite3_result_error_nomem sqlite3_api->result_error_nomem -#define sqlite3_result_error_toobig sqlite3_api->result_error_toobig -#define sqlite3_sleep sqlite3_api->sleep -#define sqlite3_soft_heap_limit sqlite3_api->soft_heap_limit -#define sqlite3_vfs_find sqlite3_api->vfs_find -#define sqlite3_vfs_register sqlite3_api->vfs_register -#define sqlite3_vfs_unregister sqlite3_api->vfs_unregister -#define sqlite3_threadsafe sqlite3_api->xthreadsafe -#define sqlite3_result_zeroblob sqlite3_api->result_zeroblob -#define sqlite3_result_error_code sqlite3_api->result_error_code -#define sqlite3_test_control sqlite3_api->test_control -#define sqlite3_randomness sqlite3_api->randomness -#define sqlite3_context_db_handle sqlite3_api->context_db_handle -#define sqlite3_extended_result_codes sqlite3_api->extended_result_codes -#define sqlite3_limit sqlite3_api->limit -#define sqlite3_next_stmt sqlite3_api->next_stmt -#define sqlite3_sql sqlite3_api->sql -#define sqlite3_status sqlite3_api->status -#define sqlite3_backup_finish sqlite3_api->backup_finish -#define sqlite3_backup_init sqlite3_api->backup_init -#define sqlite3_backup_pagecount sqlite3_api->backup_pagecount -#define sqlite3_backup_remaining sqlite3_api->backup_remaining -#define sqlite3_backup_step sqlite3_api->backup_step -#define sqlite3_compileoption_get sqlite3_api->compileoption_get -#define sqlite3_compileoption_used sqlite3_api->compileoption_used -#define sqlite3_create_function_v2 sqlite3_api->create_function_v2 -#define sqlite3_db_config sqlite3_api->db_config -#define sqlite3_db_mutex sqlite3_api->db_mutex -#define sqlite3_db_status sqlite3_api->db_status -#define sqlite3_extended_errcode sqlite3_api->extended_errcode -#define sqlite3_log sqlite3_api->log -#define sqlite3_soft_heap_limit64 sqlite3_api->soft_heap_limit64 -#define sqlite3_sourceid sqlite3_api->sourceid -#define sqlite3_stmt_status sqlite3_api->stmt_status -#define sqlite3_strnicmp sqlite3_api->strnicmp -#define sqlite3_unlock_notify sqlite3_api->unlock_notify -#define sqlite3_wal_autocheckpoint sqlite3_api->wal_autocheckpoint -#define sqlite3_wal_checkpoint sqlite3_api->wal_checkpoint -#define sqlite3_wal_hook sqlite3_api->wal_hook -#define sqlite3_blob_reopen sqlite3_api->blob_reopen -#define sqlite3_vtab_config sqlite3_api->vtab_config -#define sqlite3_vtab_on_conflict sqlite3_api->vtab_on_conflict -/* Version 3.7.16 and later */ -#define sqlite3_close_v2 sqlite3_api->close_v2 -#define sqlite3_db_filename sqlite3_api->db_filename -#define sqlite3_db_readonly sqlite3_api->db_readonly -#define sqlite3_db_release_memory sqlite3_api->db_release_memory -#define sqlite3_errstr sqlite3_api->errstr -#define sqlite3_stmt_busy sqlite3_api->stmt_busy -#define sqlite3_stmt_readonly sqlite3_api->stmt_readonly -#define sqlite3_stricmp sqlite3_api->stricmp -#define sqlite3_uri_boolean sqlite3_api->uri_boolean -#define sqlite3_uri_int64 sqlite3_api->uri_int64 -#define sqlite3_uri_parameter sqlite3_api->uri_parameter -#define sqlite3_uri_vsnprintf sqlite3_api->vsnprintf -#define sqlite3_wal_checkpoint_v2 sqlite3_api->wal_checkpoint_v2 -#endif /* SQLITE_CORE */ - -#ifndef SQLITE_CORE - /* This case when the file really is being compiled as a loadable - ** extension */ -# define SQLITE_EXTENSION_INIT1 const sqlite3_api_routines *sqlite3_api=0; -# define SQLITE_EXTENSION_INIT2(v) sqlite3_api=v; -# define SQLITE_EXTENSION_INIT3 \ - extern const sqlite3_api_routines *sqlite3_api; -#else - /* This case when the file is being statically linked into the - ** application */ -# define SQLITE_EXTENSION_INIT1 /*no-op*/ -# define SQLITE_EXTENSION_INIT2(v) (void)v; /* unused parameter */ -# define SQLITE_EXTENSION_INIT3 /*no-op*/ -#endif - -#endif /* _SQLITE3EXT_H_ */ - -/************** End of sqlite3ext.h ******************************************/ -/************** Continuing where we left off in loadext.c ********************/ -/* #include */ - -#ifndef SQLITE_OMIT_LOAD_EXTENSION - -/* -** Some API routines are omitted when various features are -** excluded from a build of SQLite. Substitute a NULL pointer -** for any missing APIs. -*/ -#ifndef SQLITE_ENABLE_COLUMN_METADATA -# define sqlite3_column_database_name 0 -# define sqlite3_column_database_name16 0 -# define sqlite3_column_table_name 0 -# define sqlite3_column_table_name16 0 -# define sqlite3_column_origin_name 0 -# define sqlite3_column_origin_name16 0 -# define sqlite3_table_column_metadata 0 -#endif - -#ifdef SQLITE_OMIT_AUTHORIZATION -# define sqlite3_set_authorizer 0 -#endif - -#ifdef SQLITE_OMIT_UTF16 -# define sqlite3_bind_text16 0 -# define sqlite3_collation_needed16 0 -# define sqlite3_column_decltype16 0 -# define sqlite3_column_name16 0 -# define sqlite3_column_text16 0 -# define sqlite3_complete16 0 -# define sqlite3_create_collation16 0 -# define sqlite3_create_function16 0 -# define sqlite3_errmsg16 0 -# define sqlite3_open16 0 -# define sqlite3_prepare16 0 -# define sqlite3_prepare16_v2 0 -# define sqlite3_result_error16 0 -# define sqlite3_result_text16 0 -# define sqlite3_result_text16be 0 -# define sqlite3_result_text16le 0 -# define sqlite3_value_text16 0 -# define sqlite3_value_text16be 0 -# define sqlite3_value_text16le 0 -# define sqlite3_column_database_name16 0 -# define sqlite3_column_table_name16 0 -# define sqlite3_column_origin_name16 0 -#endif - -#ifdef SQLITE_OMIT_COMPLETE -# define sqlite3_complete 0 -# define sqlite3_complete16 0 -#endif - -#ifdef SQLITE_OMIT_DECLTYPE -# define sqlite3_column_decltype16 0 -# define sqlite3_column_decltype 0 -#endif - -#ifdef SQLITE_OMIT_PROGRESS_CALLBACK -# define sqlite3_progress_handler 0 -#endif - -#ifdef SQLITE_OMIT_VIRTUALTABLE -# define sqlite3_create_module 0 -# define sqlite3_create_module_v2 0 -# define sqlite3_declare_vtab 0 -# define sqlite3_vtab_config 0 -# define sqlite3_vtab_on_conflict 0 -#endif - -#ifdef SQLITE_OMIT_SHARED_CACHE -# define sqlite3_enable_shared_cache 0 -#endif - -#ifdef SQLITE_OMIT_TRACE -# define sqlite3_profile 0 -# define sqlite3_trace 0 -#endif - -#ifdef SQLITE_OMIT_GET_TABLE -# define sqlite3_free_table 0 -# define sqlite3_get_table 0 -#endif - -#ifdef SQLITE_OMIT_INCRBLOB -#define sqlite3_bind_zeroblob 0 -#define sqlite3_blob_bytes 0 -#define sqlite3_blob_close 0 -#define sqlite3_blob_open 0 -#define sqlite3_blob_read 0 -#define sqlite3_blob_write 0 -#define sqlite3_blob_reopen 0 -#endif - -/* -** The following structure contains pointers to all SQLite API routines. -** A pointer to this structure is passed into extensions when they are -** loaded so that the extension can make calls back into the SQLite -** library. -** -** When adding new APIs, add them to the bottom of this structure -** in order to preserve backwards compatibility. -** -** Extensions that use newer APIs should first call the -** sqlite3_libversion_number() to make sure that the API they -** intend to use is supported by the library. Extensions should -** also check to make sure that the pointer to the function is -** not NULL before calling it. -*/ -static const sqlite3_api_routines sqlite3Apis = { - sqlite3_aggregate_context, -#ifndef SQLITE_OMIT_DEPRECATED - sqlite3_aggregate_count, -#else - 0, -#endif - sqlite3_bind_blob, - sqlite3_bind_double, - sqlite3_bind_int, - sqlite3_bind_int64, - sqlite3_bind_null, - sqlite3_bind_parameter_count, - sqlite3_bind_parameter_index, - sqlite3_bind_parameter_name, - sqlite3_bind_text, - sqlite3_bind_text16, - sqlite3_bind_value, - sqlite3_busy_handler, - sqlite3_busy_timeout, - sqlite3_changes, - sqlite3_close, - sqlite3_collation_needed, - sqlite3_collation_needed16, - sqlite3_column_blob, - sqlite3_column_bytes, - sqlite3_column_bytes16, - sqlite3_column_count, - sqlite3_column_database_name, - sqlite3_column_database_name16, - sqlite3_column_decltype, - sqlite3_column_decltype16, - sqlite3_column_double, - sqlite3_column_int, - sqlite3_column_int64, - sqlite3_column_name, - sqlite3_column_name16, - sqlite3_column_origin_name, - sqlite3_column_origin_name16, - sqlite3_column_table_name, - sqlite3_column_table_name16, - sqlite3_column_text, - sqlite3_column_text16, - sqlite3_column_type, - sqlite3_column_value, - sqlite3_commit_hook, - sqlite3_complete, - sqlite3_complete16, - sqlite3_create_collation, - sqlite3_create_collation16, - sqlite3_create_function, - sqlite3_create_function16, - sqlite3_create_module, - sqlite3_data_count, - sqlite3_db_handle, - sqlite3_declare_vtab, - sqlite3_enable_shared_cache, - sqlite3_errcode, - sqlite3_errmsg, - sqlite3_errmsg16, - sqlite3_exec, -#ifndef SQLITE_OMIT_DEPRECATED - sqlite3_expired, -#else - 0, -#endif - sqlite3_finalize, - sqlite3_free, - sqlite3_free_table, - sqlite3_get_autocommit, - sqlite3_get_auxdata, - sqlite3_get_table, - 0, /* Was sqlite3_global_recover(), but that function is deprecated */ - sqlite3_interrupt, - sqlite3_last_insert_rowid, - sqlite3_libversion, - sqlite3_libversion_number, - sqlite3_malloc, - sqlite3_mprintf, - sqlite3_open, - sqlite3_open16, - sqlite3_prepare, - sqlite3_prepare16, - sqlite3_profile, - sqlite3_progress_handler, - sqlite3_realloc, - sqlite3_reset, - sqlite3_result_blob, - sqlite3_result_double, - sqlite3_result_error, - sqlite3_result_error16, - sqlite3_result_int, - sqlite3_result_int64, - sqlite3_result_null, - sqlite3_result_text, - sqlite3_result_text16, - sqlite3_result_text16be, - sqlite3_result_text16le, - sqlite3_result_value, - sqlite3_rollback_hook, - sqlite3_set_authorizer, - sqlite3_set_auxdata, - sqlite3_snprintf, - sqlite3_step, - sqlite3_table_column_metadata, -#ifndef SQLITE_OMIT_DEPRECATED - sqlite3_thread_cleanup, -#else - 0, -#endif - sqlite3_total_changes, - sqlite3_trace, -#ifndef SQLITE_OMIT_DEPRECATED - sqlite3_transfer_bindings, -#else - 0, -#endif - sqlite3_update_hook, - sqlite3_user_data, - sqlite3_value_blob, - sqlite3_value_bytes, - sqlite3_value_bytes16, - sqlite3_value_double, - sqlite3_value_int, - sqlite3_value_int64, - sqlite3_value_numeric_type, - sqlite3_value_text, - sqlite3_value_text16, - sqlite3_value_text16be, - sqlite3_value_text16le, - sqlite3_value_type, - sqlite3_vmprintf, - /* - ** The original API set ends here. All extensions can call any - ** of the APIs above provided that the pointer is not NULL. But - ** before calling APIs that follow, extension should check the - ** sqlite3_libversion_number() to make sure they are dealing with - ** a library that is new enough to support that API. - ************************************************************************* - */ - sqlite3_overload_function, - - /* - ** Added after 3.3.13 - */ - sqlite3_prepare_v2, - sqlite3_prepare16_v2, - sqlite3_clear_bindings, - - /* - ** Added for 3.4.1 - */ - sqlite3_create_module_v2, - - /* - ** Added for 3.5.0 - */ - sqlite3_bind_zeroblob, - sqlite3_blob_bytes, - sqlite3_blob_close, - sqlite3_blob_open, - sqlite3_blob_read, - sqlite3_blob_write, - sqlite3_create_collation_v2, - sqlite3_file_control, - sqlite3_memory_highwater, - sqlite3_memory_used, -#ifdef SQLITE_MUTEX_OMIT - 0, - 0, - 0, - 0, - 0, -#else - sqlite3_mutex_alloc, - sqlite3_mutex_enter, - sqlite3_mutex_free, - sqlite3_mutex_leave, - sqlite3_mutex_try, -#endif - sqlite3_open_v2, - sqlite3_release_memory, - sqlite3_result_error_nomem, - sqlite3_result_error_toobig, - sqlite3_sleep, - sqlite3_soft_heap_limit, - sqlite3_vfs_find, - sqlite3_vfs_register, - sqlite3_vfs_unregister, - - /* - ** Added for 3.5.8 - */ - sqlite3_threadsafe, - sqlite3_result_zeroblob, - sqlite3_result_error_code, - sqlite3_test_control, - sqlite3_randomness, - sqlite3_context_db_handle, - - /* - ** Added for 3.6.0 - */ - sqlite3_extended_result_codes, - sqlite3_limit, - sqlite3_next_stmt, - sqlite3_sql, - sqlite3_status, - - /* - ** Added for 3.7.4 - */ - sqlite3_backup_finish, - sqlite3_backup_init, - sqlite3_backup_pagecount, - sqlite3_backup_remaining, - sqlite3_backup_step, -#ifndef SQLITE_OMIT_COMPILEOPTION_DIAGS - sqlite3_compileoption_get, - sqlite3_compileoption_used, -#else - 0, - 0, -#endif - sqlite3_create_function_v2, - sqlite3_db_config, - sqlite3_db_mutex, - sqlite3_db_status, - sqlite3_extended_errcode, - sqlite3_log, - sqlite3_soft_heap_limit64, - sqlite3_sourceid, - sqlite3_stmt_status, - sqlite3_strnicmp, -#ifdef SQLITE_ENABLE_UNLOCK_NOTIFY - sqlite3_unlock_notify, -#else - 0, -#endif -#ifndef SQLITE_OMIT_WAL - sqlite3_wal_autocheckpoint, - sqlite3_wal_checkpoint, - sqlite3_wal_hook, -#else - 0, - 0, - 0, -#endif - sqlite3_blob_reopen, - sqlite3_vtab_config, - sqlite3_vtab_on_conflict, - sqlite3_close_v2, - sqlite3_db_filename, - sqlite3_db_readonly, - sqlite3_db_release_memory, - sqlite3_errstr, - sqlite3_stmt_busy, - sqlite3_stmt_readonly, - sqlite3_stricmp, - sqlite3_uri_boolean, - sqlite3_uri_int64, - sqlite3_uri_parameter, - sqlite3_vsnprintf, - sqlite3_wal_checkpoint_v2 -}; - -/* -** Attempt to load an SQLite extension library contained in the file -** zFile. The entry point is zProc. zProc may be 0 in which case a -** default entry point name (sqlite3_extension_init) is used. Use -** of the default name is recommended. -** -** Return SQLITE_OK on success and SQLITE_ERROR if something goes wrong. -** -** If an error occurs and pzErrMsg is not 0, then fill *pzErrMsg with -** error message text. The calling function should free this memory -** by calling sqlite3DbFree(db, ). -*/ -static int sqlite3LoadExtension( - sqlite3 *db, /* Load the extension into this database connection */ - const char *zFile, /* Name of the shared library containing extension */ - const char *zProc, /* Entry point. Use "sqlite3_extension_init" if 0 */ - char **pzErrMsg /* Put error message here if not 0 */ -){ - sqlite3_vfs *pVfs = db->pVfs; - void *handle; - int (*xInit)(sqlite3*,char**,const sqlite3_api_routines*); - char *zErrmsg = 0; - const char *zEntry; - char *zAltEntry = 0; - void **aHandle; - int nMsg = 300 + sqlite3Strlen30(zFile); - int ii; - - /* Shared library endings to try if zFile cannot be loaded as written */ - static const char *azEndings[] = { -#if SQLITE_OS_WIN - "dll" -#elif defined(__APPLE__) - "dylib" -#else - "so" -#endif - }; - - - if( pzErrMsg ) *pzErrMsg = 0; - - /* Ticket #1863. To avoid a creating security problems for older - ** applications that relink against newer versions of SQLite, the - ** ability to run load_extension is turned off by default. One - ** must call sqlite3_enable_load_extension() to turn on extension - ** loading. Otherwise you get the following error. - */ - if( (db->flags & SQLITE_LoadExtension)==0 ){ - if( pzErrMsg ){ - *pzErrMsg = sqlite3_mprintf("not authorized"); - } - return SQLITE_ERROR; - } - - zEntry = zProc ? zProc : "sqlite3_extension_init"; - - handle = sqlite3OsDlOpen(pVfs, zFile); -#if SQLITE_OS_UNIX || SQLITE_OS_WIN - for(ii=0; ii sqlite3_example_init - ** C:/lib/mathfuncs.dll ==> sqlite3_mathfuncs_init - */ - if( xInit==0 && zProc==0 ){ - int iFile, iEntry, c; - int ncFile = sqlite3Strlen30(zFile); - zAltEntry = sqlite3_malloc(ncFile+30); - if( zAltEntry==0 ){ - sqlite3OsDlClose(pVfs, handle); - return SQLITE_NOMEM; - } - memcpy(zAltEntry, "sqlite3_", 8); - for(iFile=ncFile-1; iFile>=0 && zFile[iFile]!='/'; iFile--){} - iFile++; - if( sqlite3_strnicmp(zFile+iFile, "lib", 3)==0 ) iFile += 3; - for(iEntry=8; (c = zFile[iFile])!=0 && c!='.'; iFile++){ - if( sqlite3Isalpha(c) ){ - zAltEntry[iEntry++] = (char)sqlite3UpperToLower[(unsigned)c]; - } - } - memcpy(zAltEntry+iEntry, "_init", 6); - zEntry = zAltEntry; - xInit = (int(*)(sqlite3*,char**,const sqlite3_api_routines*)) - sqlite3OsDlSym(pVfs, handle, zEntry); - } - if( xInit==0 ){ - if( pzErrMsg ){ - nMsg += sqlite3Strlen30(zEntry); - *pzErrMsg = zErrmsg = sqlite3_malloc(nMsg); - if( zErrmsg ){ - sqlite3_snprintf(nMsg, zErrmsg, - "no entry point [%s] in shared library [%s]", zEntry, zFile); - sqlite3OsDlError(pVfs, nMsg-1, zErrmsg); - } - } - sqlite3OsDlClose(pVfs, handle); - sqlite3_free(zAltEntry); - return SQLITE_ERROR; - } - sqlite3_free(zAltEntry); - if( xInit(db, &zErrmsg, &sqlite3Apis) ){ - if( pzErrMsg ){ - *pzErrMsg = sqlite3_mprintf("error during initialization: %s", zErrmsg); - } - sqlite3_free(zErrmsg); - sqlite3OsDlClose(pVfs, handle); - return SQLITE_ERROR; - } - - /* Append the new shared library handle to the db->aExtension array. */ - aHandle = sqlite3DbMallocZero(db, sizeof(handle)*(db->nExtension+1)); - if( aHandle==0 ){ - return SQLITE_NOMEM; - } - if( db->nExtension>0 ){ - memcpy(aHandle, db->aExtension, sizeof(handle)*db->nExtension); - } - sqlite3DbFree(db, db->aExtension); - db->aExtension = aHandle; - - db->aExtension[db->nExtension++] = handle; - return SQLITE_OK; -} -SQLITE_API int sqlite3_load_extension( - sqlite3 *db, /* Load the extension into this database connection */ - const char *zFile, /* Name of the shared library containing extension */ - const char *zProc, /* Entry point. Use "sqlite3_extension_init" if 0 */ - char **pzErrMsg /* Put error message here if not 0 */ -){ - int rc; - sqlite3_mutex_enter(db->mutex); - rc = sqlite3LoadExtension(db, zFile, zProc, pzErrMsg); - rc = sqlite3ApiExit(db, rc); - sqlite3_mutex_leave(db->mutex); - return rc; -} - -/* -** Call this routine when the database connection is closing in order -** to clean up loaded extensions -*/ -SQLITE_PRIVATE void sqlite3CloseExtensions(sqlite3 *db){ - int i; - assert( sqlite3_mutex_held(db->mutex) ); - for(i=0; inExtension; i++){ - sqlite3OsDlClose(db->pVfs, db->aExtension[i]); - } - sqlite3DbFree(db, db->aExtension); -} - -/* -** Enable or disable extension loading. Extension loading is disabled by -** default so as not to open security holes in older applications. -*/ -SQLITE_API int sqlite3_enable_load_extension(sqlite3 *db, int onoff){ - sqlite3_mutex_enter(db->mutex); - if( onoff ){ - db->flags |= SQLITE_LoadExtension; - }else{ - db->flags &= ~SQLITE_LoadExtension; - } - sqlite3_mutex_leave(db->mutex); - return SQLITE_OK; -} - -#endif /* SQLITE_OMIT_LOAD_EXTENSION */ - -/* -** The auto-extension code added regardless of whether or not extension -** loading is supported. We need a dummy sqlite3Apis pointer for that -** code if regular extension loading is not available. This is that -** dummy pointer. -*/ -#ifdef SQLITE_OMIT_LOAD_EXTENSION -static const sqlite3_api_routines sqlite3Apis = { 0 }; -#endif - - -/* -** The following object holds the list of automatically loaded -** extensions. -** -** This list is shared across threads. The SQLITE_MUTEX_STATIC_MASTER -** mutex must be held while accessing this list. -*/ -typedef struct sqlite3AutoExtList sqlite3AutoExtList; -static SQLITE_WSD struct sqlite3AutoExtList { - int nExt; /* Number of entries in aExt[] */ - void (**aExt)(void); /* Pointers to the extension init functions */ -} sqlite3Autoext = { 0, 0 }; - -/* The "wsdAutoext" macro will resolve to the autoextension -** state vector. If writable static data is unsupported on the target, -** we have to locate the state vector at run-time. In the more common -** case where writable static data is supported, wsdStat can refer directly -** to the "sqlite3Autoext" state vector declared above. -*/ -#ifdef SQLITE_OMIT_WSD -# define wsdAutoextInit \ - sqlite3AutoExtList *x = &GLOBAL(sqlite3AutoExtList,sqlite3Autoext) -# define wsdAutoext x[0] -#else -# define wsdAutoextInit -# define wsdAutoext sqlite3Autoext -#endif - - -/* -** Register a statically linked extension that is automatically -** loaded by every new database connection. -*/ -SQLITE_API int sqlite3_auto_extension(void (*xInit)(void)){ - int rc = SQLITE_OK; -#ifndef SQLITE_OMIT_AUTOINIT - rc = sqlite3_initialize(); - if( rc ){ - return rc; - }else -#endif - { - int i; -#if SQLITE_THREADSAFE - sqlite3_mutex *mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER); -#endif - wsdAutoextInit; - sqlite3_mutex_enter(mutex); - for(i=0; i=0; i--){ - if( wsdAutoext.aExt[i]==xInit ){ - wsdAutoext.nExt--; - wsdAutoext.aExt[i] = wsdAutoext.aExt[wsdAutoext.nExt]; - n++; - break; - } - } - sqlite3_mutex_leave(mutex); - return n; -} - -/* -** Reset the automatic extension loading mechanism. -*/ -SQLITE_API void sqlite3_reset_auto_extension(void){ -#ifndef SQLITE_OMIT_AUTOINIT - if( sqlite3_initialize()==SQLITE_OK ) -#endif - { -#if SQLITE_THREADSAFE - sqlite3_mutex *mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER); -#endif - wsdAutoextInit; - sqlite3_mutex_enter(mutex); - sqlite3_free(wsdAutoext.aExt); - wsdAutoext.aExt = 0; - wsdAutoext.nExt = 0; - sqlite3_mutex_leave(mutex); - } -} - -/* -** Load all automatic extensions. -** -** If anything goes wrong, set an error in the database connection. -*/ -SQLITE_PRIVATE void sqlite3AutoLoadExtensions(sqlite3 *db){ - int i; - int go = 1; - int rc; - int (*xInit)(sqlite3*,char**,const sqlite3_api_routines*); - - wsdAutoextInit; - if( wsdAutoext.nExt==0 ){ - /* Common case: early out without every having to acquire a mutex */ - return; - } - for(i=0; go; i++){ - char *zErrmsg; -#if SQLITE_THREADSAFE - sqlite3_mutex *mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER); -#endif - sqlite3_mutex_enter(mutex); - if( i>=wsdAutoext.nExt ){ - xInit = 0; - go = 0; - }else{ - xInit = (int(*)(sqlite3*,char**,const sqlite3_api_routines*)) - wsdAutoext.aExt[i]; - } - sqlite3_mutex_leave(mutex); - zErrmsg = 0; - if( xInit && (rc = xInit(db, &zErrmsg, &sqlite3Apis))!=0 ){ - sqlite3Error(db, rc, - "automatic extension loading failed: %s", zErrmsg); - go = 0; - } - sqlite3_free(zErrmsg); - } -} - -/************** End of loadext.c *********************************************/ -/************** Begin file pragma.c ******************************************/ -/* -** 2003 April 6 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** This file contains code used to implement the PRAGMA command. -*/ - -#if !defined(SQLITE_ENABLE_LOCKING_STYLE) -# if defined(__APPLE__) -# define SQLITE_ENABLE_LOCKING_STYLE 1 -# else -# define SQLITE_ENABLE_LOCKING_STYLE 0 -# endif -#endif - -/*************************************************************************** -** The next block of code, including the PragTyp_XXXX macro definitions and -** the aPragmaName[] object is composed of generated code. DO NOT EDIT. -** -** To add new pragmas, edit the code in ../tool/mkpragmatab.tcl and rerun -** that script. Then copy/paste the output in place of the following: -*/ -#define PragTyp_HEADER_VALUE 0 -#define PragTyp_AUTO_VACUUM 1 -#define PragTyp_FLAG 2 -#define PragTyp_BUSY_TIMEOUT 3 -#define PragTyp_CACHE_SIZE 4 -#define PragTyp_CASE_SENSITIVE_LIKE 5 -#define PragTyp_COLLATION_LIST 6 -#define PragTyp_COMPILE_OPTIONS 7 -#define PragTyp_DATA_STORE_DIRECTORY 8 -#define PragTyp_DATABASE_LIST 9 -#define PragTyp_DEFAULT_CACHE_SIZE 10 -#define PragTyp_ENCODING 11 -#define PragTyp_FOREIGN_KEY_CHECK 12 -#define PragTyp_FOREIGN_KEY_LIST 13 -#define PragTyp_INCREMENTAL_VACUUM 14 -#define PragTyp_INDEX_INFO 15 -#define PragTyp_INDEX_LIST 16 -#define PragTyp_INTEGRITY_CHECK 17 -#define PragTyp_JOURNAL_MODE 18 -#define PragTyp_JOURNAL_SIZE_LIMIT 19 -#define PragTyp_LOCK_PROXY_FILE 20 -#define PragTyp_LOCKING_MODE 21 -#define PragTyp_PAGE_COUNT 22 -#define PragTyp_MMAP_SIZE 23 -#define PragTyp_PAGE_SIZE 24 -#define PragTyp_SECURE_DELETE 25 -#define PragTyp_SHRINK_MEMORY 26 -#define PragTyp_SOFT_HEAP_LIMIT 27 -#define PragTyp_STATS 28 -#define PragTyp_SYNCHRONOUS 29 -#define PragTyp_TABLE_INFO 30 -#define PragTyp_TEMP_STORE 31 -#define PragTyp_TEMP_STORE_DIRECTORY 32 -#define PragTyp_WAL_AUTOCHECKPOINT 33 -#define PragTyp_WAL_CHECKPOINT 34 -#define PragTyp_ACTIVATE_EXTENSIONS 35 -#define PragTyp_HEXKEY 36 -#define PragTyp_KEY 37 -#define PragTyp_REKEY 38 -#define PragTyp_LOCK_STATUS 39 -#define PragTyp_PARSER_TRACE 40 -#define PragFlag_NeedSchema 0x01 -static const struct sPragmaNames { - const char *const zName; /* Name of pragma */ - u8 ePragTyp; /* PragTyp_XXX value */ - u8 mPragFlag; /* Zero or more PragFlag_XXX values */ - u32 iArg; /* Extra argument */ -} aPragmaNames[] = { -#if defined(SQLITE_HAS_CODEC) || defined(SQLITE_ENABLE_CEROD) - { /* zName: */ "activate_extensions", - /* ePragTyp: */ PragTyp_ACTIVATE_EXTENSIONS, - /* ePragFlag: */ 0, - /* iArg: */ 0 }, -#endif -#if !defined(SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS) - { /* zName: */ "application_id", - /* ePragTyp: */ PragTyp_HEADER_VALUE, - /* ePragFlag: */ 0, - /* iArg: */ 0 }, -#endif -#if !defined(SQLITE_OMIT_AUTOVACUUM) - { /* zName: */ "auto_vacuum", - /* ePragTyp: */ PragTyp_AUTO_VACUUM, - /* ePragFlag: */ PragFlag_NeedSchema, - /* iArg: */ 0 }, -#endif -#if !defined(SQLITE_OMIT_FLAG_PRAGMAS) -#if !defined(SQLITE_OMIT_AUTOMATIC_INDEX) - { /* zName: */ "automatic_index", - /* ePragTyp: */ PragTyp_FLAG, - /* ePragFlag: */ 0, - /* iArg: */ SQLITE_AutoIndex }, -#endif -#endif - { /* zName: */ "busy_timeout", - /* ePragTyp: */ PragTyp_BUSY_TIMEOUT, - /* ePragFlag: */ 0, - /* iArg: */ 0 }, -#if !defined(SQLITE_OMIT_PAGER_PRAGMAS) - { /* zName: */ "cache_size", - /* ePragTyp: */ PragTyp_CACHE_SIZE, - /* ePragFlag: */ PragFlag_NeedSchema, - /* iArg: */ 0 }, -#endif -#if !defined(SQLITE_OMIT_FLAG_PRAGMAS) - { /* zName: */ "cache_spill", - /* ePragTyp: */ PragTyp_FLAG, - /* ePragFlag: */ 0, - /* iArg: */ SQLITE_CacheSpill }, -#endif - { /* zName: */ "case_sensitive_like", - /* ePragTyp: */ PragTyp_CASE_SENSITIVE_LIKE, - /* ePragFlag: */ 0, - /* iArg: */ 0 }, -#if !defined(SQLITE_OMIT_FLAG_PRAGMAS) - { /* zName: */ "checkpoint_fullfsync", - /* ePragTyp: */ PragTyp_FLAG, - /* ePragFlag: */ 0, - /* iArg: */ SQLITE_CkptFullFSync }, -#endif -#if !defined(SQLITE_OMIT_SCHEMA_PRAGMAS) - { /* zName: */ "collation_list", - /* ePragTyp: */ PragTyp_COLLATION_LIST, - /* ePragFlag: */ 0, - /* iArg: */ 0 }, -#endif -#if !defined(SQLITE_OMIT_COMPILEOPTION_DIAGS) - { /* zName: */ "compile_options", - /* ePragTyp: */ PragTyp_COMPILE_OPTIONS, - /* ePragFlag: */ 0, - /* iArg: */ 0 }, -#endif -#if !defined(SQLITE_OMIT_FLAG_PRAGMAS) - { /* zName: */ "count_changes", - /* ePragTyp: */ PragTyp_FLAG, - /* ePragFlag: */ 0, - /* iArg: */ SQLITE_CountRows }, -#endif -#if !defined(SQLITE_OMIT_PAGER_PRAGMAS) && SQLITE_OS_WIN - { /* zName: */ "data_store_directory", - /* ePragTyp: */ PragTyp_DATA_STORE_DIRECTORY, - /* ePragFlag: */ 0, - /* iArg: */ 0 }, -#endif -#if !defined(SQLITE_OMIT_SCHEMA_PRAGMAS) - { /* zName: */ "database_list", - /* ePragTyp: */ PragTyp_DATABASE_LIST, - /* ePragFlag: */ PragFlag_NeedSchema, - /* iArg: */ 0 }, -#endif -#if !defined(SQLITE_OMIT_PAGER_PRAGMAS) && !defined(SQLITE_OMIT_DEPRECATED) - { /* zName: */ "default_cache_size", - /* ePragTyp: */ PragTyp_DEFAULT_CACHE_SIZE, - /* ePragFlag: */ PragFlag_NeedSchema, - /* iArg: */ 0 }, -#endif -#if !defined(SQLITE_OMIT_FLAG_PRAGMAS) -#if !defined(SQLITE_OMIT_FOREIGN_KEY) && !defined(SQLITE_OMIT_TRIGGER) - { /* zName: */ "defer_foreign_keys", - /* ePragTyp: */ PragTyp_FLAG, - /* ePragFlag: */ 0, - /* iArg: */ SQLITE_DeferFKs }, -#endif -#endif -#if !defined(SQLITE_OMIT_FLAG_PRAGMAS) - { /* zName: */ "empty_result_callbacks", - /* ePragTyp: */ PragTyp_FLAG, - /* ePragFlag: */ 0, - /* iArg: */ SQLITE_NullCallback }, -#endif -#if !defined(SQLITE_OMIT_UTF16) - { /* zName: */ "encoding", - /* ePragTyp: */ PragTyp_ENCODING, - /* ePragFlag: */ 0, - /* iArg: */ 0 }, -#endif -#if !defined(SQLITE_OMIT_FOREIGN_KEY) && !defined(SQLITE_OMIT_TRIGGER) - { /* zName: */ "foreign_key_check", - /* ePragTyp: */ PragTyp_FOREIGN_KEY_CHECK, - /* ePragFlag: */ PragFlag_NeedSchema, - /* iArg: */ 0 }, -#endif -#if !defined(SQLITE_OMIT_FOREIGN_KEY) - { /* zName: */ "foreign_key_list", - /* ePragTyp: */ PragTyp_FOREIGN_KEY_LIST, - /* ePragFlag: */ PragFlag_NeedSchema, - /* iArg: */ 0 }, -#endif -#if !defined(SQLITE_OMIT_FLAG_PRAGMAS) -#if !defined(SQLITE_OMIT_FOREIGN_KEY) && !defined(SQLITE_OMIT_TRIGGER) - { /* zName: */ "foreign_keys", - /* ePragTyp: */ PragTyp_FLAG, - /* ePragFlag: */ 0, - /* iArg: */ SQLITE_ForeignKeys }, -#endif -#endif -#if !defined(SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS) - { /* zName: */ "freelist_count", - /* ePragTyp: */ PragTyp_HEADER_VALUE, - /* ePragFlag: */ 0, - /* iArg: */ 0 }, -#endif -#if !defined(SQLITE_OMIT_FLAG_PRAGMAS) - { /* zName: */ "full_column_names", - /* ePragTyp: */ PragTyp_FLAG, - /* ePragFlag: */ 0, - /* iArg: */ SQLITE_FullColNames }, - { /* zName: */ "fullfsync", - /* ePragTyp: */ PragTyp_FLAG, - /* ePragFlag: */ 0, - /* iArg: */ SQLITE_FullFSync }, -#endif -#if defined(SQLITE_HAS_CODEC) - { /* zName: */ "hexkey", - /* ePragTyp: */ PragTyp_HEXKEY, - /* ePragFlag: */ 0, - /* iArg: */ 0 }, - { /* zName: */ "hexrekey", - /* ePragTyp: */ PragTyp_HEXKEY, - /* ePragFlag: */ 0, - /* iArg: */ 0 }, -#endif -#if !defined(SQLITE_OMIT_FLAG_PRAGMAS) -#if !defined(SQLITE_OMIT_CHECK) - { /* zName: */ "ignore_check_constraints", - /* ePragTyp: */ PragTyp_FLAG, - /* ePragFlag: */ 0, - /* iArg: */ SQLITE_IgnoreChecks }, -#endif -#endif -#if !defined(SQLITE_OMIT_AUTOVACUUM) - { /* zName: */ "incremental_vacuum", - /* ePragTyp: */ PragTyp_INCREMENTAL_VACUUM, - /* ePragFlag: */ PragFlag_NeedSchema, - /* iArg: */ 0 }, -#endif -#if !defined(SQLITE_OMIT_SCHEMA_PRAGMAS) - { /* zName: */ "index_info", - /* ePragTyp: */ PragTyp_INDEX_INFO, - /* ePragFlag: */ PragFlag_NeedSchema, - /* iArg: */ 0 }, - { /* zName: */ "index_list", - /* ePragTyp: */ PragTyp_INDEX_LIST, - /* ePragFlag: */ PragFlag_NeedSchema, - /* iArg: */ 0 }, -#endif -#if !defined(SQLITE_OMIT_INTEGRITY_CHECK) - { /* zName: */ "integrity_check", - /* ePragTyp: */ PragTyp_INTEGRITY_CHECK, - /* ePragFlag: */ PragFlag_NeedSchema, - /* iArg: */ 0 }, -#endif -#if !defined(SQLITE_OMIT_PAGER_PRAGMAS) - { /* zName: */ "journal_mode", - /* ePragTyp: */ PragTyp_JOURNAL_MODE, - /* ePragFlag: */ PragFlag_NeedSchema, - /* iArg: */ 0 }, - { /* zName: */ "journal_size_limit", - /* ePragTyp: */ PragTyp_JOURNAL_SIZE_LIMIT, - /* ePragFlag: */ 0, - /* iArg: */ 0 }, -#endif -#if defined(SQLITE_HAS_CODEC) - { /* zName: */ "key", - /* ePragTyp: */ PragTyp_KEY, - /* ePragFlag: */ 0, - /* iArg: */ 0 }, -#endif -#if !defined(SQLITE_OMIT_FLAG_PRAGMAS) - { /* zName: */ "legacy_file_format", - /* ePragTyp: */ PragTyp_FLAG, - /* ePragFlag: */ 0, - /* iArg: */ SQLITE_LegacyFileFmt }, -#endif -#if !defined(SQLITE_OMIT_PAGER_PRAGMAS) && SQLITE_ENABLE_LOCKING_STYLE - { /* zName: */ "lock_proxy_file", - /* ePragTyp: */ PragTyp_LOCK_PROXY_FILE, - /* ePragFlag: */ 0, - /* iArg: */ 0 }, -#endif -#if defined(SQLITE_DEBUG) || defined(SQLITE_TEST) - { /* zName: */ "lock_status", - /* ePragTyp: */ PragTyp_LOCK_STATUS, - /* ePragFlag: */ 0, - /* iArg: */ 0 }, -#endif -#if !defined(SQLITE_OMIT_PAGER_PRAGMAS) - { /* zName: */ "locking_mode", - /* ePragTyp: */ PragTyp_LOCKING_MODE, - /* ePragFlag: */ 0, - /* iArg: */ 0 }, - { /* zName: */ "max_page_count", - /* ePragTyp: */ PragTyp_PAGE_COUNT, - /* ePragFlag: */ PragFlag_NeedSchema, - /* iArg: */ 0 }, - { /* zName: */ "mmap_size", - /* ePragTyp: */ PragTyp_MMAP_SIZE, - /* ePragFlag: */ 0, - /* iArg: */ 0 }, - { /* zName: */ "page_count", - /* ePragTyp: */ PragTyp_PAGE_COUNT, - /* ePragFlag: */ PragFlag_NeedSchema, - /* iArg: */ 0 }, - { /* zName: */ "page_size", - /* ePragTyp: */ PragTyp_PAGE_SIZE, - /* ePragFlag: */ 0, - /* iArg: */ 0 }, -#endif -#if defined(SQLITE_DEBUG) - { /* zName: */ "parser_trace", - /* ePragTyp: */ PragTyp_PARSER_TRACE, - /* ePragFlag: */ 0, - /* iArg: */ 0 }, -#endif -#if !defined(SQLITE_OMIT_FLAG_PRAGMAS) - { /* zName: */ "query_only", - /* ePragTyp: */ PragTyp_FLAG, - /* ePragFlag: */ 0, - /* iArg: */ SQLITE_QueryOnly }, -#endif -#if !defined(SQLITE_OMIT_INTEGRITY_CHECK) - { /* zName: */ "quick_check", - /* ePragTyp: */ PragTyp_INTEGRITY_CHECK, - /* ePragFlag: */ PragFlag_NeedSchema, - /* iArg: */ 0 }, -#endif -#if !defined(SQLITE_OMIT_FLAG_PRAGMAS) - { /* zName: */ "read_uncommitted", - /* ePragTyp: */ PragTyp_FLAG, - /* ePragFlag: */ 0, - /* iArg: */ SQLITE_ReadUncommitted }, - { /* zName: */ "recursive_triggers", - /* ePragTyp: */ PragTyp_FLAG, - /* ePragFlag: */ 0, - /* iArg: */ SQLITE_RecTriggers }, -#endif -#if defined(SQLITE_HAS_CODEC) - { /* zName: */ "rekey", - /* ePragTyp: */ PragTyp_REKEY, - /* ePragFlag: */ 0, - /* iArg: */ 0 }, -#endif -#if !defined(SQLITE_OMIT_FLAG_PRAGMAS) - { /* zName: */ "reverse_unordered_selects", - /* ePragTyp: */ PragTyp_FLAG, - /* ePragFlag: */ 0, - /* iArg: */ SQLITE_ReverseOrder }, -#endif -#if !defined(SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS) - { /* zName: */ "schema_version", - /* ePragTyp: */ PragTyp_HEADER_VALUE, - /* ePragFlag: */ 0, - /* iArg: */ 0 }, -#endif -#if !defined(SQLITE_OMIT_PAGER_PRAGMAS) - { /* zName: */ "secure_delete", - /* ePragTyp: */ PragTyp_SECURE_DELETE, - /* ePragFlag: */ 0, - /* iArg: */ 0 }, -#endif -#if !defined(SQLITE_OMIT_FLAG_PRAGMAS) - { /* zName: */ "short_column_names", - /* ePragTyp: */ PragTyp_FLAG, - /* ePragFlag: */ 0, - /* iArg: */ SQLITE_ShortColNames }, -#endif - { /* zName: */ "shrink_memory", - /* ePragTyp: */ PragTyp_SHRINK_MEMORY, - /* ePragFlag: */ 0, - /* iArg: */ 0 }, - { /* zName: */ "soft_heap_limit", - /* ePragTyp: */ PragTyp_SOFT_HEAP_LIMIT, - /* ePragFlag: */ 0, - /* iArg: */ 0 }, -#if !defined(SQLITE_OMIT_FLAG_PRAGMAS) -#if defined(SQLITE_DEBUG) - { /* zName: */ "sql_trace", - /* ePragTyp: */ PragTyp_FLAG, - /* ePragFlag: */ 0, - /* iArg: */ SQLITE_SqlTrace }, -#endif -#endif -#if !defined(SQLITE_OMIT_SCHEMA_PRAGMAS) - { /* zName: */ "stats", - /* ePragTyp: */ PragTyp_STATS, - /* ePragFlag: */ PragFlag_NeedSchema, - /* iArg: */ 0 }, -#endif -#if !defined(SQLITE_OMIT_PAGER_PRAGMAS) - { /* zName: */ "synchronous", - /* ePragTyp: */ PragTyp_SYNCHRONOUS, - /* ePragFlag: */ PragFlag_NeedSchema, - /* iArg: */ 0 }, -#endif -#if !defined(SQLITE_OMIT_SCHEMA_PRAGMAS) - { /* zName: */ "table_info", - /* ePragTyp: */ PragTyp_TABLE_INFO, - /* ePragFlag: */ PragFlag_NeedSchema, - /* iArg: */ 0 }, -#endif -#if !defined(SQLITE_OMIT_PAGER_PRAGMAS) - { /* zName: */ "temp_store", - /* ePragTyp: */ PragTyp_TEMP_STORE, - /* ePragFlag: */ 0, - /* iArg: */ 0 }, - { /* zName: */ "temp_store_directory", - /* ePragTyp: */ PragTyp_TEMP_STORE_DIRECTORY, - /* ePragFlag: */ 0, - /* iArg: */ 0 }, -#endif -#if !defined(SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS) - { /* zName: */ "user_version", - /* ePragTyp: */ PragTyp_HEADER_VALUE, - /* ePragFlag: */ 0, - /* iArg: */ 0 }, -#endif -#if !defined(SQLITE_OMIT_FLAG_PRAGMAS) -#if defined(SQLITE_DEBUG) - { /* zName: */ "vdbe_addoptrace", - /* ePragTyp: */ PragTyp_FLAG, - /* ePragFlag: */ 0, - /* iArg: */ SQLITE_VdbeAddopTrace }, - { /* zName: */ "vdbe_debug", - /* ePragTyp: */ PragTyp_FLAG, - /* ePragFlag: */ 0, - /* iArg: */ SQLITE_SqlTrace|SQLITE_VdbeListing|SQLITE_VdbeTrace }, - { /* zName: */ "vdbe_eqp", - /* ePragTyp: */ PragTyp_FLAG, - /* ePragFlag: */ 0, - /* iArg: */ SQLITE_VdbeEQP }, - { /* zName: */ "vdbe_listing", - /* ePragTyp: */ PragTyp_FLAG, - /* ePragFlag: */ 0, - /* iArg: */ SQLITE_VdbeListing }, - { /* zName: */ "vdbe_trace", - /* ePragTyp: */ PragTyp_FLAG, - /* ePragFlag: */ 0, - /* iArg: */ SQLITE_VdbeTrace }, -#endif -#endif -#if !defined(SQLITE_OMIT_WAL) - { /* zName: */ "wal_autocheckpoint", - /* ePragTyp: */ PragTyp_WAL_AUTOCHECKPOINT, - /* ePragFlag: */ 0, - /* iArg: */ 0 }, - { /* zName: */ "wal_checkpoint", - /* ePragTyp: */ PragTyp_WAL_CHECKPOINT, - /* ePragFlag: */ PragFlag_NeedSchema, - /* iArg: */ 0 }, -#endif -#if !defined(SQLITE_OMIT_FLAG_PRAGMAS) - { /* zName: */ "writable_schema", - /* ePragTyp: */ PragTyp_FLAG, - /* ePragFlag: */ 0, - /* iArg: */ SQLITE_WriteSchema|SQLITE_RecoveryMode }, -#endif -}; -/* Number of pragmas: 56 on by default, 69 total. */ -/* End of the automatically generated pragma table. -***************************************************************************/ - -/* -** Interpret the given string as a safety level. Return 0 for OFF, -** 1 for ON or NORMAL and 2 for FULL. Return 1 for an empty or -** unrecognized string argument. The FULL option is disallowed -** if the omitFull parameter it 1. -** -** Note that the values returned are one less that the values that -** should be passed into sqlite3BtreeSetSafetyLevel(). The is done -** to support legacy SQL code. The safety level used to be boolean -** and older scripts may have used numbers 0 for OFF and 1 for ON. -*/ -static u8 getSafetyLevel(const char *z, int omitFull, int dflt){ - /* 123456789 123456789 */ - static const char zText[] = "onoffalseyestruefull"; - static const u8 iOffset[] = {0, 1, 2, 4, 9, 12, 16}; - static const u8 iLength[] = {2, 2, 3, 5, 3, 4, 4}; - static const u8 iValue[] = {1, 0, 0, 0, 1, 1, 2}; - int i, n; - if( sqlite3Isdigit(*z) ){ - return (u8)sqlite3Atoi(z); - } - n = sqlite3Strlen30(z); - for(i=0; i=0&&i<=2)?i:0); -} -#endif /* ifndef SQLITE_OMIT_AUTOVACUUM */ - -#ifndef SQLITE_OMIT_PAGER_PRAGMAS -/* -** Interpret the given string as a temp db location. Return 1 for file -** backed temporary databases, 2 for the Red-Black tree in memory database -** and 0 to use the compile-time default. -*/ -static int getTempStore(const char *z){ - if( z[0]>='0' && z[0]<='2' ){ - return z[0] - '0'; - }else if( sqlite3StrICmp(z, "file")==0 ){ - return 1; - }else if( sqlite3StrICmp(z, "memory")==0 ){ - return 2; - }else{ - return 0; - } -} -#endif /* SQLITE_PAGER_PRAGMAS */ - -#ifndef SQLITE_OMIT_PAGER_PRAGMAS -/* -** Invalidate temp storage, either when the temp storage is changed -** from default, or when 'file' and the temp_store_directory has changed -*/ -static int invalidateTempStorage(Parse *pParse){ - sqlite3 *db = pParse->db; - if( db->aDb[1].pBt!=0 ){ - if( !db->autoCommit || sqlite3BtreeIsInReadTrans(db->aDb[1].pBt) ){ - sqlite3ErrorMsg(pParse, "temporary storage cannot be changed " - "from within a transaction"); - return SQLITE_ERROR; - } - sqlite3BtreeClose(db->aDb[1].pBt); - db->aDb[1].pBt = 0; - sqlite3ResetAllSchemasOfConnection(db); - } - return SQLITE_OK; -} -#endif /* SQLITE_PAGER_PRAGMAS */ - -#ifndef SQLITE_OMIT_PAGER_PRAGMAS -/* -** If the TEMP database is open, close it and mark the database schema -** as needing reloading. This must be done when using the SQLITE_TEMP_STORE -** or DEFAULT_TEMP_STORE pragmas. -*/ -static int changeTempStorage(Parse *pParse, const char *zStorageType){ - int ts = getTempStore(zStorageType); - sqlite3 *db = pParse->db; - if( db->temp_store==ts ) return SQLITE_OK; - if( invalidateTempStorage( pParse ) != SQLITE_OK ){ - return SQLITE_ERROR; - } - db->temp_store = (u8)ts; - return SQLITE_OK; -} -#endif /* SQLITE_PAGER_PRAGMAS */ - -/* -** Generate code to return a single integer value. -*/ -static void returnSingleInt(Parse *pParse, const char *zLabel, i64 value){ - Vdbe *v = sqlite3GetVdbe(pParse); - int mem = ++pParse->nMem; - i64 *pI64 = sqlite3DbMallocRaw(pParse->db, sizeof(value)); - if( pI64 ){ - memcpy(pI64, &value, sizeof(value)); - } - sqlite3VdbeAddOp4(v, OP_Int64, 0, mem, 0, (char*)pI64, P4_INT64); - sqlite3VdbeSetNumCols(v, 1); - sqlite3VdbeSetColName(v, 0, COLNAME_NAME, zLabel, SQLITE_STATIC); - sqlite3VdbeAddOp2(v, OP_ResultRow, mem, 1); -} - - -/* -** Set the safety_level and pager flags for pager iDb. Or if iDb<0 -** set these values for all pagers. -*/ -#ifndef SQLITE_OMIT_PAGER_PRAGMAS -static void setAllPagerFlags(sqlite3 *db){ - if( db->autoCommit ){ - Db *pDb = db->aDb; - int n = db->nDb; - assert( SQLITE_FullFSync==PAGER_FULLFSYNC ); - assert( SQLITE_CkptFullFSync==PAGER_CKPT_FULLFSYNC ); - assert( SQLITE_CacheSpill==PAGER_CACHESPILL ); - assert( (PAGER_FULLFSYNC | PAGER_CKPT_FULLFSYNC | PAGER_CACHESPILL) - == PAGER_FLAGS_MASK ); - assert( (pDb->safety_level & PAGER_SYNCHRONOUS_MASK)==pDb->safety_level ); - while( (n--) > 0 ){ - if( pDb->pBt ){ - sqlite3BtreeSetPagerFlags(pDb->pBt, - pDb->safety_level | (db->flags & PAGER_FLAGS_MASK) ); - } - pDb++; - } - } -} -#else -# define setAllPagerFlags(X) /* no-op */ -#endif - - -/* -** Return a human-readable name for a constraint resolution action. -*/ -#ifndef SQLITE_OMIT_FOREIGN_KEY -static const char *actionName(u8 action){ - const char *zName; - switch( action ){ - case OE_SetNull: zName = "SET NULL"; break; - case OE_SetDflt: zName = "SET DEFAULT"; break; - case OE_Cascade: zName = "CASCADE"; break; - case OE_Restrict: zName = "RESTRICT"; break; - default: zName = "NO ACTION"; - assert( action==OE_None ); break; - } - return zName; -} -#endif - - -/* -** Parameter eMode must be one of the PAGER_JOURNALMODE_XXX constants -** defined in pager.h. This function returns the associated lowercase -** journal-mode name. -*/ -SQLITE_PRIVATE const char *sqlite3JournalModename(int eMode){ - static char * const azModeName[] = { - "delete", "persist", "off", "truncate", "memory" -#ifndef SQLITE_OMIT_WAL - , "wal" -#endif - }; - assert( PAGER_JOURNALMODE_DELETE==0 ); - assert( PAGER_JOURNALMODE_PERSIST==1 ); - assert( PAGER_JOURNALMODE_OFF==2 ); - assert( PAGER_JOURNALMODE_TRUNCATE==3 ); - assert( PAGER_JOURNALMODE_MEMORY==4 ); - assert( PAGER_JOURNALMODE_WAL==5 ); - assert( eMode>=0 && eMode<=ArraySize(azModeName) ); - - if( eMode==ArraySize(azModeName) ) return 0; - return azModeName[eMode]; -} - -/* -** Process a pragma statement. -** -** Pragmas are of this form: -** -** PRAGMA [database.]id [= value] -** -** The identifier might also be a string. The value is a string, and -** identifier, or a number. If minusFlag is true, then the value is -** a number that was preceded by a minus sign. -** -** If the left side is "database.id" then pId1 is the database name -** and pId2 is the id. If the left side is just "id" then pId1 is the -** id and pId2 is any empty string. -*/ -SQLITE_PRIVATE void sqlite3Pragma( - Parse *pParse, - Token *pId1, /* First part of [database.]id field */ - Token *pId2, /* Second part of [database.]id field, or NULL */ - Token *pValue, /* Token for , or NULL */ - int minusFlag /* True if a '-' sign preceded */ -){ - char *zLeft = 0; /* Nul-terminated UTF-8 string */ - char *zRight = 0; /* Nul-terminated UTF-8 string , or NULL */ - const char *zDb = 0; /* The database name */ - Token *pId; /* Pointer to token */ - char *aFcntl[4]; /* Argument to SQLITE_FCNTL_PRAGMA */ - int iDb; /* Database index for */ - int lwr, upr, mid; /* Binary search bounds */ - int rc; /* return value form SQLITE_FCNTL_PRAGMA */ - sqlite3 *db = pParse->db; /* The database connection */ - Db *pDb; /* The specific database being pragmaed */ - Vdbe *v = sqlite3GetVdbe(pParse); /* Prepared statement */ - - if( v==0 ) return; - sqlite3VdbeRunOnlyOnce(v); - pParse->nMem = 2; - - /* Interpret the [database.] part of the pragma statement. iDb is the - ** index of the database this pragma is being applied to in db.aDb[]. */ - iDb = sqlite3TwoPartName(pParse, pId1, pId2, &pId); - if( iDb<0 ) return; - pDb = &db->aDb[iDb]; - - /* If the temp database has been explicitly named as part of the - ** pragma, make sure it is open. - */ - if( iDb==1 && sqlite3OpenTempDatabase(pParse) ){ - return; - } - - zLeft = sqlite3NameFromToken(db, pId); - if( !zLeft ) return; - if( minusFlag ){ - zRight = sqlite3MPrintf(db, "-%T", pValue); - }else{ - zRight = sqlite3NameFromToken(db, pValue); - } - - assert( pId2 ); - zDb = pId2->n>0 ? pDb->zName : 0; - if( sqlite3AuthCheck(pParse, SQLITE_PRAGMA, zLeft, zRight, zDb) ){ - goto pragma_out; - } - - /* Send an SQLITE_FCNTL_PRAGMA file-control to the underlying VFS - ** connection. If it returns SQLITE_OK, then assume that the VFS - ** handled the pragma and generate a no-op prepared statement. - */ - aFcntl[0] = 0; - aFcntl[1] = zLeft; - aFcntl[2] = zRight; - aFcntl[3] = 0; - db->busyHandler.nBusy = 0; - rc = sqlite3_file_control(db, zDb, SQLITE_FCNTL_PRAGMA, (void*)aFcntl); - if( rc==SQLITE_OK ){ - if( aFcntl[0] ){ - int mem = ++pParse->nMem; - sqlite3VdbeAddOp4(v, OP_String8, 0, mem, 0, aFcntl[0], 0); - sqlite3VdbeSetNumCols(v, 1); - sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "result", SQLITE_STATIC); - sqlite3VdbeAddOp2(v, OP_ResultRow, mem, 1); - sqlite3_free(aFcntl[0]); - } - goto pragma_out; - } - if( rc!=SQLITE_NOTFOUND ){ - if( aFcntl[0] ){ - sqlite3ErrorMsg(pParse, "%s", aFcntl[0]); - sqlite3_free(aFcntl[0]); - } - pParse->nErr++; - pParse->rc = rc; - goto pragma_out; - } - - /* Locate the pragma in the lookup table */ - lwr = 0; - upr = ArraySize(aPragmaNames)-1; - while( lwr<=upr ){ - mid = (lwr+upr)/2; - rc = sqlite3_stricmp(zLeft, aPragmaNames[mid].zName); - if( rc==0 ) break; - if( rc<0 ){ - upr = mid - 1; - }else{ - lwr = mid + 1; - } - } - if( lwr>upr ) goto pragma_out; - - /* Make sure the database schema is loaded if the pragma requires that */ - if( (aPragmaNames[mid].mPragFlag & PragFlag_NeedSchema)!=0 ){ - if( sqlite3ReadSchema(pParse) ) goto pragma_out; - } - - /* Jump to the appropriate pragma handler */ - switch( aPragmaNames[mid].ePragTyp ){ - -#if !defined(SQLITE_OMIT_PAGER_PRAGMAS) && !defined(SQLITE_OMIT_DEPRECATED) - /* - ** PRAGMA [database.]default_cache_size - ** PRAGMA [database.]default_cache_size=N - ** - ** The first form reports the current persistent setting for the - ** page cache size. The value returned is the maximum number of - ** pages in the page cache. The second form sets both the current - ** page cache size value and the persistent page cache size value - ** stored in the database file. - ** - ** Older versions of SQLite would set the default cache size to a - ** negative number to indicate synchronous=OFF. These days, synchronous - ** is always on by default regardless of the sign of the default cache - ** size. But continue to take the absolute value of the default cache - ** size of historical compatibility. - */ - case PragTyp_DEFAULT_CACHE_SIZE: { - static const int iLn = VDBE_OFFSET_LINENO(2); - static const VdbeOpList getCacheSize[] = { - { OP_Transaction, 0, 0, 0}, /* 0 */ - { OP_ReadCookie, 0, 1, BTREE_DEFAULT_CACHE_SIZE}, /* 1 */ - { OP_IfPos, 1, 8, 0}, - { OP_Integer, 0, 2, 0}, - { OP_Subtract, 1, 2, 1}, - { OP_IfPos, 1, 8, 0}, - { OP_Integer, 0, 1, 0}, /* 6 */ - { OP_Noop, 0, 0, 0}, - { OP_ResultRow, 1, 1, 0}, - }; - int addr; - sqlite3VdbeUsesBtree(v, iDb); - if( !zRight ){ - sqlite3VdbeSetNumCols(v, 1); - sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "cache_size", SQLITE_STATIC); - pParse->nMem += 2; - addr = sqlite3VdbeAddOpList(v, ArraySize(getCacheSize), getCacheSize,iLn); - sqlite3VdbeChangeP1(v, addr, iDb); - sqlite3VdbeChangeP1(v, addr+1, iDb); - sqlite3VdbeChangeP1(v, addr+6, SQLITE_DEFAULT_CACHE_SIZE); - }else{ - int size = sqlite3AbsInt32(sqlite3Atoi(zRight)); - sqlite3BeginWriteOperation(pParse, 0, iDb); - sqlite3VdbeAddOp2(v, OP_Integer, size, 1); - sqlite3VdbeAddOp3(v, OP_SetCookie, iDb, BTREE_DEFAULT_CACHE_SIZE, 1); - assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); - pDb->pSchema->cache_size = size; - sqlite3BtreeSetCacheSize(pDb->pBt, pDb->pSchema->cache_size); - } - break; - } -#endif /* !SQLITE_OMIT_PAGER_PRAGMAS && !SQLITE_OMIT_DEPRECATED */ - -#if !defined(SQLITE_OMIT_PAGER_PRAGMAS) - /* - ** PRAGMA [database.]page_size - ** PRAGMA [database.]page_size=N - ** - ** The first form reports the current setting for the - ** database page size in bytes. The second form sets the - ** database page size value. The value can only be set if - ** the database has not yet been created. - */ - case PragTyp_PAGE_SIZE: { - Btree *pBt = pDb->pBt; - assert( pBt!=0 ); - if( !zRight ){ - int size = ALWAYS(pBt) ? sqlite3BtreeGetPageSize(pBt) : 0; - returnSingleInt(pParse, "page_size", size); - }else{ - /* Malloc may fail when setting the page-size, as there is an internal - ** buffer that the pager module resizes using sqlite3_realloc(). - */ - db->nextPagesize = sqlite3Atoi(zRight); - if( SQLITE_NOMEM==sqlite3BtreeSetPageSize(pBt, db->nextPagesize,-1,0) ){ - db->mallocFailed = 1; - } - } - break; - } - - /* - ** PRAGMA [database.]secure_delete - ** PRAGMA [database.]secure_delete=ON/OFF - ** - ** The first form reports the current setting for the - ** secure_delete flag. The second form changes the secure_delete - ** flag setting and reports thenew value. - */ - case PragTyp_SECURE_DELETE: { - Btree *pBt = pDb->pBt; - int b = -1; - assert( pBt!=0 ); - if( zRight ){ - b = sqlite3GetBoolean(zRight, 0); - } - if( pId2->n==0 && b>=0 ){ - int ii; - for(ii=0; iinDb; ii++){ - sqlite3BtreeSecureDelete(db->aDb[ii].pBt, b); - } - } - b = sqlite3BtreeSecureDelete(pBt, b); - returnSingleInt(pParse, "secure_delete", b); - break; - } - - /* - ** PRAGMA [database.]max_page_count - ** PRAGMA [database.]max_page_count=N - ** - ** The first form reports the current setting for the - ** maximum number of pages in the database file. The - ** second form attempts to change this setting. Both - ** forms return the current setting. - ** - ** The absolute value of N is used. This is undocumented and might - ** change. The only purpose is to provide an easy way to test - ** the sqlite3AbsInt32() function. - ** - ** PRAGMA [database.]page_count - ** - ** Return the number of pages in the specified database. - */ - case PragTyp_PAGE_COUNT: { - int iReg; - sqlite3CodeVerifySchema(pParse, iDb); - iReg = ++pParse->nMem; - if( sqlite3Tolower(zLeft[0])=='p' ){ - sqlite3VdbeAddOp2(v, OP_Pagecount, iDb, iReg); - }else{ - sqlite3VdbeAddOp3(v, OP_MaxPgcnt, iDb, iReg, - sqlite3AbsInt32(sqlite3Atoi(zRight))); - } - sqlite3VdbeAddOp2(v, OP_ResultRow, iReg, 1); - sqlite3VdbeSetNumCols(v, 1); - sqlite3VdbeSetColName(v, 0, COLNAME_NAME, zLeft, SQLITE_TRANSIENT); - break; - } - - /* - ** PRAGMA [database.]locking_mode - ** PRAGMA [database.]locking_mode = (normal|exclusive) - */ - case PragTyp_LOCKING_MODE: { - const char *zRet = "normal"; - int eMode = getLockingMode(zRight); - - if( pId2->n==0 && eMode==PAGER_LOCKINGMODE_QUERY ){ - /* Simple "PRAGMA locking_mode;" statement. This is a query for - ** the current default locking mode (which may be different to - ** the locking-mode of the main database). - */ - eMode = db->dfltLockMode; - }else{ - Pager *pPager; - if( pId2->n==0 ){ - /* This indicates that no database name was specified as part - ** of the PRAGMA command. In this case the locking-mode must be - ** set on all attached databases, as well as the main db file. - ** - ** Also, the sqlite3.dfltLockMode variable is set so that - ** any subsequently attached databases also use the specified - ** locking mode. - */ - int ii; - assert(pDb==&db->aDb[0]); - for(ii=2; iinDb; ii++){ - pPager = sqlite3BtreePager(db->aDb[ii].pBt); - sqlite3PagerLockingMode(pPager, eMode); - } - db->dfltLockMode = (u8)eMode; - } - pPager = sqlite3BtreePager(pDb->pBt); - eMode = sqlite3PagerLockingMode(pPager, eMode); - } - - assert( eMode==PAGER_LOCKINGMODE_NORMAL - || eMode==PAGER_LOCKINGMODE_EXCLUSIVE ); - if( eMode==PAGER_LOCKINGMODE_EXCLUSIVE ){ - zRet = "exclusive"; - } - sqlite3VdbeSetNumCols(v, 1); - sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "locking_mode", SQLITE_STATIC); - sqlite3VdbeAddOp4(v, OP_String8, 0, 1, 0, zRet, 0); - sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 1); - break; - } - - /* - ** PRAGMA [database.]journal_mode - ** PRAGMA [database.]journal_mode = - ** (delete|persist|off|truncate|memory|wal|off) - */ - case PragTyp_JOURNAL_MODE: { - int eMode; /* One of the PAGER_JOURNALMODE_XXX symbols */ - int ii; /* Loop counter */ - - sqlite3VdbeSetNumCols(v, 1); - sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "journal_mode", SQLITE_STATIC); - - if( zRight==0 ){ - /* If there is no "=MODE" part of the pragma, do a query for the - ** current mode */ - eMode = PAGER_JOURNALMODE_QUERY; - }else{ - const char *zMode; - int n = sqlite3Strlen30(zRight); - for(eMode=0; (zMode = sqlite3JournalModename(eMode))!=0; eMode++){ - if( sqlite3StrNICmp(zRight, zMode, n)==0 ) break; - } - if( !zMode ){ - /* If the "=MODE" part does not match any known journal mode, - ** then do a query */ - eMode = PAGER_JOURNALMODE_QUERY; - } - } - if( eMode==PAGER_JOURNALMODE_QUERY && pId2->n==0 ){ - /* Convert "PRAGMA journal_mode" into "PRAGMA main.journal_mode" */ - iDb = 0; - pId2->n = 1; - } - for(ii=db->nDb-1; ii>=0; ii--){ - if( db->aDb[ii].pBt && (ii==iDb || pId2->n==0) ){ - sqlite3VdbeUsesBtree(v, ii); - sqlite3VdbeAddOp3(v, OP_JournalMode, ii, 1, eMode); - } - } - sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 1); - break; - } - - /* - ** PRAGMA [database.]journal_size_limit - ** PRAGMA [database.]journal_size_limit=N - ** - ** Get or set the size limit on rollback journal files. - */ - case PragTyp_JOURNAL_SIZE_LIMIT: { - Pager *pPager = sqlite3BtreePager(pDb->pBt); - i64 iLimit = -2; - if( zRight ){ - sqlite3Atoi64(zRight, &iLimit, sqlite3Strlen30(zRight), SQLITE_UTF8); - if( iLimit<-1 ) iLimit = -1; - } - iLimit = sqlite3PagerJournalSizeLimit(pPager, iLimit); - returnSingleInt(pParse, "journal_size_limit", iLimit); - break; - } - -#endif /* SQLITE_OMIT_PAGER_PRAGMAS */ - - /* - ** PRAGMA [database.]auto_vacuum - ** PRAGMA [database.]auto_vacuum=N - ** - ** Get or set the value of the database 'auto-vacuum' parameter. - ** The value is one of: 0 NONE 1 FULL 2 INCREMENTAL - */ -#ifndef SQLITE_OMIT_AUTOVACUUM - case PragTyp_AUTO_VACUUM: { - Btree *pBt = pDb->pBt; - assert( pBt!=0 ); - if( !zRight ){ - returnSingleInt(pParse, "auto_vacuum", sqlite3BtreeGetAutoVacuum(pBt)); - }else{ - int eAuto = getAutoVacuum(zRight); - assert( eAuto>=0 && eAuto<=2 ); - db->nextAutovac = (u8)eAuto; - /* Call SetAutoVacuum() to set initialize the internal auto and - ** incr-vacuum flags. This is required in case this connection - ** creates the database file. It is important that it is created - ** as an auto-vacuum capable db. - */ - rc = sqlite3BtreeSetAutoVacuum(pBt, eAuto); - if( rc==SQLITE_OK && (eAuto==1 || eAuto==2) ){ - /* When setting the auto_vacuum mode to either "full" or - ** "incremental", write the value of meta[6] in the database - ** file. Before writing to meta[6], check that meta[3] indicates - ** that this really is an auto-vacuum capable database. - */ - static const int iLn = VDBE_OFFSET_LINENO(2); - static const VdbeOpList setMeta6[] = { - { OP_Transaction, 0, 1, 0}, /* 0 */ - { OP_ReadCookie, 0, 1, BTREE_LARGEST_ROOT_PAGE}, - { OP_If, 1, 0, 0}, /* 2 */ - { OP_Halt, SQLITE_OK, OE_Abort, 0}, /* 3 */ - { OP_Integer, 0, 1, 0}, /* 4 */ - { OP_SetCookie, 0, BTREE_INCR_VACUUM, 1}, /* 5 */ - }; - int iAddr; - iAddr = sqlite3VdbeAddOpList(v, ArraySize(setMeta6), setMeta6, iLn); - sqlite3VdbeChangeP1(v, iAddr, iDb); - sqlite3VdbeChangeP1(v, iAddr+1, iDb); - sqlite3VdbeChangeP2(v, iAddr+2, iAddr+4); - sqlite3VdbeChangeP1(v, iAddr+4, eAuto-1); - sqlite3VdbeChangeP1(v, iAddr+5, iDb); - sqlite3VdbeUsesBtree(v, iDb); - } - } - break; - } -#endif - - /* - ** PRAGMA [database.]incremental_vacuum(N) - ** - ** Do N steps of incremental vacuuming on a database. - */ -#ifndef SQLITE_OMIT_AUTOVACUUM - case PragTyp_INCREMENTAL_VACUUM: { - int iLimit, addr; - if( zRight==0 || !sqlite3GetInt32(zRight, &iLimit) || iLimit<=0 ){ - iLimit = 0x7fffffff; - } - sqlite3BeginWriteOperation(pParse, 0, iDb); - sqlite3VdbeAddOp2(v, OP_Integer, iLimit, 1); - addr = sqlite3VdbeAddOp1(v, OP_IncrVacuum, iDb); VdbeCoverage(v); - sqlite3VdbeAddOp1(v, OP_ResultRow, 1); - sqlite3VdbeAddOp2(v, OP_AddImm, 1, -1); - sqlite3VdbeAddOp2(v, OP_IfPos, 1, addr); VdbeCoverage(v); - sqlite3VdbeJumpHere(v, addr); - break; - } -#endif - -#ifndef SQLITE_OMIT_PAGER_PRAGMAS - /* - ** PRAGMA [database.]cache_size - ** PRAGMA [database.]cache_size=N - ** - ** The first form reports the current local setting for the - ** page cache size. The second form sets the local - ** page cache size value. If N is positive then that is the - ** number of pages in the cache. If N is negative, then the - ** number of pages is adjusted so that the cache uses -N kibibytes - ** of memory. - */ - case PragTyp_CACHE_SIZE: { - assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); - if( !zRight ){ - returnSingleInt(pParse, "cache_size", pDb->pSchema->cache_size); - }else{ - int size = sqlite3Atoi(zRight); - pDb->pSchema->cache_size = size; - sqlite3BtreeSetCacheSize(pDb->pBt, pDb->pSchema->cache_size); - } - break; - } - - /* - ** PRAGMA [database.]mmap_size(N) - ** - ** Used to set mapping size limit. The mapping size limit is - ** used to limit the aggregate size of all memory mapped regions of the - ** database file. If this parameter is set to zero, then memory mapping - ** is not used at all. If N is negative, then the default memory map - ** limit determined by sqlite3_config(SQLITE_CONFIG_MMAP_SIZE) is set. - ** The parameter N is measured in bytes. - ** - ** This value is advisory. The underlying VFS is free to memory map - ** as little or as much as it wants. Except, if N is set to 0 then the - ** upper layers will never invoke the xFetch interfaces to the VFS. - */ - case PragTyp_MMAP_SIZE: { - sqlite3_int64 sz; -#if SQLITE_MAX_MMAP_SIZE>0 - assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); - if( zRight ){ - int ii; - sqlite3Atoi64(zRight, &sz, sqlite3Strlen30(zRight), SQLITE_UTF8); - if( sz<0 ) sz = sqlite3GlobalConfig.szMmap; - if( pId2->n==0 ) db->szMmap = sz; - for(ii=db->nDb-1; ii>=0; ii--){ - if( db->aDb[ii].pBt && (ii==iDb || pId2->n==0) ){ - sqlite3BtreeSetMmapLimit(db->aDb[ii].pBt, sz); - } - } - } - sz = -1; - rc = sqlite3_file_control(db, zDb, SQLITE_FCNTL_MMAP_SIZE, &sz); -#else - sz = 0; - rc = SQLITE_OK; -#endif - if( rc==SQLITE_OK ){ - returnSingleInt(pParse, "mmap_size", sz); - }else if( rc!=SQLITE_NOTFOUND ){ - pParse->nErr++; - pParse->rc = rc; - } - break; - } - - /* - ** PRAGMA temp_store - ** PRAGMA temp_store = "default"|"memory"|"file" - ** - ** Return or set the local value of the temp_store flag. Changing - ** the local value does not make changes to the disk file and the default - ** value will be restored the next time the database is opened. - ** - ** Note that it is possible for the library compile-time options to - ** override this setting - */ - case PragTyp_TEMP_STORE: { - if( !zRight ){ - returnSingleInt(pParse, "temp_store", db->temp_store); - }else{ - changeTempStorage(pParse, zRight); - } - break; - } - - /* - ** PRAGMA temp_store_directory - ** PRAGMA temp_store_directory = ""|"directory_name" - ** - ** Return or set the local value of the temp_store_directory flag. Changing - ** the value sets a specific directory to be used for temporary files. - ** Setting to a null string reverts to the default temporary directory search. - ** If temporary directory is changed, then invalidateTempStorage. - ** - */ - case PragTyp_TEMP_STORE_DIRECTORY: { - if( !zRight ){ - if( sqlite3_temp_directory ){ - sqlite3VdbeSetNumCols(v, 1); - sqlite3VdbeSetColName(v, 0, COLNAME_NAME, - "temp_store_directory", SQLITE_STATIC); - sqlite3VdbeAddOp4(v, OP_String8, 0, 1, 0, sqlite3_temp_directory, 0); - sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 1); - } - }else{ -#ifndef SQLITE_OMIT_WSD - if( zRight[0] ){ - int res; - rc = sqlite3OsAccess(db->pVfs, zRight, SQLITE_ACCESS_READWRITE, &res); - if( rc!=SQLITE_OK || res==0 ){ - sqlite3ErrorMsg(pParse, "not a writable directory"); - goto pragma_out; - } - } - if( SQLITE_TEMP_STORE==0 - || (SQLITE_TEMP_STORE==1 && db->temp_store<=1) - || (SQLITE_TEMP_STORE==2 && db->temp_store==1) - ){ - invalidateTempStorage(pParse); - } - sqlite3_free(sqlite3_temp_directory); - if( zRight[0] ){ - sqlite3_temp_directory = sqlite3_mprintf("%s", zRight); - }else{ - sqlite3_temp_directory = 0; - } -#endif /* SQLITE_OMIT_WSD */ - } - break; - } - -#if SQLITE_OS_WIN - /* - ** PRAGMA data_store_directory - ** PRAGMA data_store_directory = ""|"directory_name" - ** - ** Return or set the local value of the data_store_directory flag. Changing - ** the value sets a specific directory to be used for database files that - ** were specified with a relative pathname. Setting to a null string reverts - ** to the default database directory, which for database files specified with - ** a relative path will probably be based on the current directory for the - ** process. Database file specified with an absolute path are not impacted - ** by this setting, regardless of its value. - ** - */ - case PragTyp_DATA_STORE_DIRECTORY: { - if( !zRight ){ - if( sqlite3_data_directory ){ - sqlite3VdbeSetNumCols(v, 1); - sqlite3VdbeSetColName(v, 0, COLNAME_NAME, - "data_store_directory", SQLITE_STATIC); - sqlite3VdbeAddOp4(v, OP_String8, 0, 1, 0, sqlite3_data_directory, 0); - sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 1); - } - }else{ -#ifndef SQLITE_OMIT_WSD - if( zRight[0] ){ - int res; - rc = sqlite3OsAccess(db->pVfs, zRight, SQLITE_ACCESS_READWRITE, &res); - if( rc!=SQLITE_OK || res==0 ){ - sqlite3ErrorMsg(pParse, "not a writable directory"); - goto pragma_out; - } - } - sqlite3_free(sqlite3_data_directory); - if( zRight[0] ){ - sqlite3_data_directory = sqlite3_mprintf("%s", zRight); - }else{ - sqlite3_data_directory = 0; - } -#endif /* SQLITE_OMIT_WSD */ - } - break; - } -#endif - -#if SQLITE_ENABLE_LOCKING_STYLE - /* - ** PRAGMA [database.]lock_proxy_file - ** PRAGMA [database.]lock_proxy_file = ":auto:"|"lock_file_path" - ** - ** Return or set the value of the lock_proxy_file flag. Changing - ** the value sets a specific file to be used for database access locks. - ** - */ - case PragTyp_LOCK_PROXY_FILE: { - if( !zRight ){ - Pager *pPager = sqlite3BtreePager(pDb->pBt); - char *proxy_file_path = NULL; - sqlite3_file *pFile = sqlite3PagerFile(pPager); - sqlite3OsFileControlHint(pFile, SQLITE_GET_LOCKPROXYFILE, - &proxy_file_path); - - if( proxy_file_path ){ - sqlite3VdbeSetNumCols(v, 1); - sqlite3VdbeSetColName(v, 0, COLNAME_NAME, - "lock_proxy_file", SQLITE_STATIC); - sqlite3VdbeAddOp4(v, OP_String8, 0, 1, 0, proxy_file_path, 0); - sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 1); - } - }else{ - Pager *pPager = sqlite3BtreePager(pDb->pBt); - sqlite3_file *pFile = sqlite3PagerFile(pPager); - int res; - if( zRight[0] ){ - res=sqlite3OsFileControl(pFile, SQLITE_SET_LOCKPROXYFILE, - zRight); - } else { - res=sqlite3OsFileControl(pFile, SQLITE_SET_LOCKPROXYFILE, - NULL); - } - if( res!=SQLITE_OK ){ - sqlite3ErrorMsg(pParse, "failed to set lock proxy file"); - goto pragma_out; - } - } - break; - } -#endif /* SQLITE_ENABLE_LOCKING_STYLE */ - - /* - ** PRAGMA [database.]synchronous - ** PRAGMA [database.]synchronous=OFF|ON|NORMAL|FULL - ** - ** Return or set the local value of the synchronous flag. Changing - ** the local value does not make changes to the disk file and the - ** default value will be restored the next time the database is - ** opened. - */ - case PragTyp_SYNCHRONOUS: { - if( !zRight ){ - returnSingleInt(pParse, "synchronous", pDb->safety_level-1); - }else{ - if( !db->autoCommit ){ - sqlite3ErrorMsg(pParse, - "Safety level may not be changed inside a transaction"); - }else{ - pDb->safety_level = getSafetyLevel(zRight,0,1)+1; - setAllPagerFlags(db); - } - } - break; - } -#endif /* SQLITE_OMIT_PAGER_PRAGMAS */ - -#ifndef SQLITE_OMIT_FLAG_PRAGMAS - case PragTyp_FLAG: { - if( zRight==0 ){ - returnSingleInt(pParse, aPragmaNames[mid].zName, - (db->flags & aPragmaNames[mid].iArg)!=0 ); - }else{ - int mask = aPragmaNames[mid].iArg; /* Mask of bits to set or clear. */ - if( db->autoCommit==0 ){ - /* Foreign key support may not be enabled or disabled while not - ** in auto-commit mode. */ - mask &= ~(SQLITE_ForeignKeys); - } - - if( sqlite3GetBoolean(zRight, 0) ){ - db->flags |= mask; - }else{ - db->flags &= ~mask; - if( mask==SQLITE_DeferFKs ) db->nDeferredImmCons = 0; - } - - /* Many of the flag-pragmas modify the code generated by the SQL - ** compiler (eg. count_changes). So add an opcode to expire all - ** compiled SQL statements after modifying a pragma value. - */ - sqlite3VdbeAddOp2(v, OP_Expire, 0, 0); - setAllPagerFlags(db); - } - break; - } -#endif /* SQLITE_OMIT_FLAG_PRAGMAS */ - -#ifndef SQLITE_OMIT_SCHEMA_PRAGMAS - /* - ** PRAGMA table_info(
    ) - ** - ** Return a single row for each column of the named table. The columns of - ** the returned data set are: - ** - ** cid: Column id (numbered from left to right, starting at 0) - ** name: Column name - ** type: Column declaration type. - ** notnull: True if 'NOT NULL' is part of column declaration - ** dflt_value: The default value for the column, if any. - */ - case PragTyp_TABLE_INFO: if( zRight ){ - Table *pTab; - pTab = sqlite3FindTable(db, zRight, zDb); - if( pTab ){ - int i, k; - int nHidden = 0; - Column *pCol; - Index *pPk = sqlite3PrimaryKeyIndex(pTab); - sqlite3VdbeSetNumCols(v, 6); - pParse->nMem = 6; - sqlite3CodeVerifySchema(pParse, iDb); - sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "cid", SQLITE_STATIC); - sqlite3VdbeSetColName(v, 1, COLNAME_NAME, "name", SQLITE_STATIC); - sqlite3VdbeSetColName(v, 2, COLNAME_NAME, "type", SQLITE_STATIC); - sqlite3VdbeSetColName(v, 3, COLNAME_NAME, "notnull", SQLITE_STATIC); - sqlite3VdbeSetColName(v, 4, COLNAME_NAME, "dflt_value", SQLITE_STATIC); - sqlite3VdbeSetColName(v, 5, COLNAME_NAME, "pk", SQLITE_STATIC); - sqlite3ViewGetColumnNames(pParse, pTab); - for(i=0, pCol=pTab->aCol; inCol; i++, pCol++){ - if( IsHiddenColumn(pCol) ){ - nHidden++; - continue; - } - sqlite3VdbeAddOp2(v, OP_Integer, i-nHidden, 1); - sqlite3VdbeAddOp4(v, OP_String8, 0, 2, 0, pCol->zName, 0); - sqlite3VdbeAddOp4(v, OP_String8, 0, 3, 0, - pCol->zType ? pCol->zType : "", 0); - sqlite3VdbeAddOp2(v, OP_Integer, (pCol->notNull ? 1 : 0), 4); - if( pCol->zDflt ){ - sqlite3VdbeAddOp4(v, OP_String8, 0, 5, 0, (char*)pCol->zDflt, 0); - }else{ - sqlite3VdbeAddOp2(v, OP_Null, 0, 5); - } - if( (pCol->colFlags & COLFLAG_PRIMKEY)==0 ){ - k = 0; - }else if( pPk==0 ){ - k = 1; - }else{ - for(k=1; ALWAYS(k<=pTab->nCol) && pPk->aiColumn[k-1]!=i; k++){} - } - sqlite3VdbeAddOp2(v, OP_Integer, k, 6); - sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 6); - } - } - } - break; - - case PragTyp_STATS: { - Index *pIdx; - HashElem *i; - v = sqlite3GetVdbe(pParse); - sqlite3VdbeSetNumCols(v, 4); - pParse->nMem = 4; - sqlite3CodeVerifySchema(pParse, iDb); - sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "table", SQLITE_STATIC); - sqlite3VdbeSetColName(v, 1, COLNAME_NAME, "index", SQLITE_STATIC); - sqlite3VdbeSetColName(v, 2, COLNAME_NAME, "width", SQLITE_STATIC); - sqlite3VdbeSetColName(v, 3, COLNAME_NAME, "height", SQLITE_STATIC); - for(i=sqliteHashFirst(&pDb->pSchema->tblHash); i; i=sqliteHashNext(i)){ - Table *pTab = sqliteHashData(i); - sqlite3VdbeAddOp4(v, OP_String8, 0, 1, 0, pTab->zName, 0); - sqlite3VdbeAddOp2(v, OP_Null, 0, 2); - sqlite3VdbeAddOp2(v, OP_Integer, - (int)sqlite3LogEstToInt(pTab->szTabRow), 3); - sqlite3VdbeAddOp2(v, OP_Integer, - (int)sqlite3LogEstToInt(pTab->nRowLogEst), 4); - sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 4); - for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ - sqlite3VdbeAddOp4(v, OP_String8, 0, 2, 0, pIdx->zName, 0); - sqlite3VdbeAddOp2(v, OP_Integer, - (int)sqlite3LogEstToInt(pIdx->szIdxRow), 3); - sqlite3VdbeAddOp2(v, OP_Integer, - (int)sqlite3LogEstToInt(pIdx->aiRowLogEst[0]), 4); - sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 4); - } - } - } - break; - - case PragTyp_INDEX_INFO: if( zRight ){ - Index *pIdx; - Table *pTab; - pIdx = sqlite3FindIndex(db, zRight, zDb); - if( pIdx ){ - int i; - pTab = pIdx->pTable; - sqlite3VdbeSetNumCols(v, 3); - pParse->nMem = 3; - sqlite3CodeVerifySchema(pParse, iDb); - sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "seqno", SQLITE_STATIC); - sqlite3VdbeSetColName(v, 1, COLNAME_NAME, "cid", SQLITE_STATIC); - sqlite3VdbeSetColName(v, 2, COLNAME_NAME, "name", SQLITE_STATIC); - for(i=0; inKeyCol; i++){ - i16 cnum = pIdx->aiColumn[i]; - sqlite3VdbeAddOp2(v, OP_Integer, i, 1); - sqlite3VdbeAddOp2(v, OP_Integer, cnum, 2); - assert( pTab->nCol>cnum ); - sqlite3VdbeAddOp4(v, OP_String8, 0, 3, 0, pTab->aCol[cnum].zName, 0); - sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 3); - } - } - } - break; - - case PragTyp_INDEX_LIST: if( zRight ){ - Index *pIdx; - Table *pTab; - int i; - pTab = sqlite3FindTable(db, zRight, zDb); - if( pTab ){ - v = sqlite3GetVdbe(pParse); - sqlite3VdbeSetNumCols(v, 3); - pParse->nMem = 3; - sqlite3CodeVerifySchema(pParse, iDb); - sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "seq", SQLITE_STATIC); - sqlite3VdbeSetColName(v, 1, COLNAME_NAME, "name", SQLITE_STATIC); - sqlite3VdbeSetColName(v, 2, COLNAME_NAME, "unique", SQLITE_STATIC); - for(pIdx=pTab->pIndex, i=0; pIdx; pIdx=pIdx->pNext, i++){ - sqlite3VdbeAddOp2(v, OP_Integer, i, 1); - sqlite3VdbeAddOp4(v, OP_String8, 0, 2, 0, pIdx->zName, 0); - sqlite3VdbeAddOp2(v, OP_Integer, pIdx->onError!=OE_None, 3); - sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 3); - } - } - } - break; - - case PragTyp_DATABASE_LIST: { - int i; - sqlite3VdbeSetNumCols(v, 3); - pParse->nMem = 3; - sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "seq", SQLITE_STATIC); - sqlite3VdbeSetColName(v, 1, COLNAME_NAME, "name", SQLITE_STATIC); - sqlite3VdbeSetColName(v, 2, COLNAME_NAME, "file", SQLITE_STATIC); - for(i=0; inDb; i++){ - if( db->aDb[i].pBt==0 ) continue; - assert( db->aDb[i].zName!=0 ); - sqlite3VdbeAddOp2(v, OP_Integer, i, 1); - sqlite3VdbeAddOp4(v, OP_String8, 0, 2, 0, db->aDb[i].zName, 0); - sqlite3VdbeAddOp4(v, OP_String8, 0, 3, 0, - sqlite3BtreeGetFilename(db->aDb[i].pBt), 0); - sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 3); - } - } - break; - - case PragTyp_COLLATION_LIST: { - int i = 0; - HashElem *p; - sqlite3VdbeSetNumCols(v, 2); - pParse->nMem = 2; - sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "seq", SQLITE_STATIC); - sqlite3VdbeSetColName(v, 1, COLNAME_NAME, "name", SQLITE_STATIC); - for(p=sqliteHashFirst(&db->aCollSeq); p; p=sqliteHashNext(p)){ - CollSeq *pColl = (CollSeq *)sqliteHashData(p); - sqlite3VdbeAddOp2(v, OP_Integer, i++, 1); - sqlite3VdbeAddOp4(v, OP_String8, 0, 2, 0, pColl->zName, 0); - sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 2); - } - } - break; -#endif /* SQLITE_OMIT_SCHEMA_PRAGMAS */ - -#ifndef SQLITE_OMIT_FOREIGN_KEY - case PragTyp_FOREIGN_KEY_LIST: if( zRight ){ - FKey *pFK; - Table *pTab; - pTab = sqlite3FindTable(db, zRight, zDb); - if( pTab ){ - v = sqlite3GetVdbe(pParse); - pFK = pTab->pFKey; - if( pFK ){ - int i = 0; - sqlite3VdbeSetNumCols(v, 8); - pParse->nMem = 8; - sqlite3CodeVerifySchema(pParse, iDb); - sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "id", SQLITE_STATIC); - sqlite3VdbeSetColName(v, 1, COLNAME_NAME, "seq", SQLITE_STATIC); - sqlite3VdbeSetColName(v, 2, COLNAME_NAME, "table", SQLITE_STATIC); - sqlite3VdbeSetColName(v, 3, COLNAME_NAME, "from", SQLITE_STATIC); - sqlite3VdbeSetColName(v, 4, COLNAME_NAME, "to", SQLITE_STATIC); - sqlite3VdbeSetColName(v, 5, COLNAME_NAME, "on_update", SQLITE_STATIC); - sqlite3VdbeSetColName(v, 6, COLNAME_NAME, "on_delete", SQLITE_STATIC); - sqlite3VdbeSetColName(v, 7, COLNAME_NAME, "match", SQLITE_STATIC); - while(pFK){ - int j; - for(j=0; jnCol; j++){ - char *zCol = pFK->aCol[j].zCol; - char *zOnDelete = (char *)actionName(pFK->aAction[0]); - char *zOnUpdate = (char *)actionName(pFK->aAction[1]); - sqlite3VdbeAddOp2(v, OP_Integer, i, 1); - sqlite3VdbeAddOp2(v, OP_Integer, j, 2); - sqlite3VdbeAddOp4(v, OP_String8, 0, 3, 0, pFK->zTo, 0); - sqlite3VdbeAddOp4(v, OP_String8, 0, 4, 0, - pTab->aCol[pFK->aCol[j].iFrom].zName, 0); - sqlite3VdbeAddOp4(v, zCol ? OP_String8 : OP_Null, 0, 5, 0, zCol, 0); - sqlite3VdbeAddOp4(v, OP_String8, 0, 6, 0, zOnUpdate, 0); - sqlite3VdbeAddOp4(v, OP_String8, 0, 7, 0, zOnDelete, 0); - sqlite3VdbeAddOp4(v, OP_String8, 0, 8, 0, "NONE", 0); - sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 8); - } - ++i; - pFK = pFK->pNextFrom; - } - } - } - } - break; -#endif /* !defined(SQLITE_OMIT_FOREIGN_KEY) */ - -#ifndef SQLITE_OMIT_FOREIGN_KEY -#ifndef SQLITE_OMIT_TRIGGER - case PragTyp_FOREIGN_KEY_CHECK: { - FKey *pFK; /* A foreign key constraint */ - Table *pTab; /* Child table contain "REFERENCES" keyword */ - Table *pParent; /* Parent table that child points to */ - Index *pIdx; /* Index in the parent table */ - int i; /* Loop counter: Foreign key number for pTab */ - int j; /* Loop counter: Field of the foreign key */ - HashElem *k; /* Loop counter: Next table in schema */ - int x; /* result variable */ - int regResult; /* 3 registers to hold a result row */ - int regKey; /* Register to hold key for checking the FK */ - int regRow; /* Registers to hold a row from pTab */ - int addrTop; /* Top of a loop checking foreign keys */ - int addrOk; /* Jump here if the key is OK */ - int *aiCols; /* child to parent column mapping */ - - regResult = pParse->nMem+1; - pParse->nMem += 4; - regKey = ++pParse->nMem; - regRow = ++pParse->nMem; - v = sqlite3GetVdbe(pParse); - sqlite3VdbeSetNumCols(v, 4); - sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "table", SQLITE_STATIC); - sqlite3VdbeSetColName(v, 1, COLNAME_NAME, "rowid", SQLITE_STATIC); - sqlite3VdbeSetColName(v, 2, COLNAME_NAME, "parent", SQLITE_STATIC); - sqlite3VdbeSetColName(v, 3, COLNAME_NAME, "fkid", SQLITE_STATIC); - sqlite3CodeVerifySchema(pParse, iDb); - k = sqliteHashFirst(&db->aDb[iDb].pSchema->tblHash); - while( k ){ - if( zRight ){ - pTab = sqlite3LocateTable(pParse, 0, zRight, zDb); - k = 0; - }else{ - pTab = (Table*)sqliteHashData(k); - k = sqliteHashNext(k); - } - if( pTab==0 || pTab->pFKey==0 ) continue; - sqlite3TableLock(pParse, iDb, pTab->tnum, 0, pTab->zName); - if( pTab->nCol+regRow>pParse->nMem ) pParse->nMem = pTab->nCol + regRow; - sqlite3OpenTable(pParse, 0, iDb, pTab, OP_OpenRead); - sqlite3VdbeAddOp4(v, OP_String8, 0, regResult, 0, pTab->zName, - P4_TRANSIENT); - for(i=1, pFK=pTab->pFKey; pFK; i++, pFK=pFK->pNextFrom){ - pParent = sqlite3FindTable(db, pFK->zTo, zDb); - if( pParent==0 ) continue; - pIdx = 0; - sqlite3TableLock(pParse, iDb, pParent->tnum, 0, pParent->zName); - x = sqlite3FkLocateIndex(pParse, pParent, pFK, &pIdx, 0); - if( x==0 ){ - if( pIdx==0 ){ - sqlite3OpenTable(pParse, i, iDb, pParent, OP_OpenRead); - }else{ - sqlite3VdbeAddOp3(v, OP_OpenRead, i, pIdx->tnum, iDb); - sqlite3VdbeSetP4KeyInfo(pParse, pIdx); - } - }else{ - k = 0; - break; - } - } - assert( pParse->nErr>0 || pFK==0 ); - if( pFK ) break; - if( pParse->nTabnTab = i; - addrTop = sqlite3VdbeAddOp1(v, OP_Rewind, 0); VdbeCoverage(v); - for(i=1, pFK=pTab->pFKey; pFK; i++, pFK=pFK->pNextFrom){ - pParent = sqlite3FindTable(db, pFK->zTo, zDb); - pIdx = 0; - aiCols = 0; - if( pParent ){ - x = sqlite3FkLocateIndex(pParse, pParent, pFK, &pIdx, &aiCols); - assert( x==0 ); - } - addrOk = sqlite3VdbeMakeLabel(v); - if( pParent && pIdx==0 ){ - int iKey = pFK->aCol[0].iFrom; - assert( iKey>=0 && iKeynCol ); - if( iKey!=pTab->iPKey ){ - sqlite3VdbeAddOp3(v, OP_Column, 0, iKey, regRow); - sqlite3ColumnDefault(v, pTab, iKey, regRow); - sqlite3VdbeAddOp2(v, OP_IsNull, regRow, addrOk); VdbeCoverage(v); - sqlite3VdbeAddOp2(v, OP_MustBeInt, regRow, - sqlite3VdbeCurrentAddr(v)+3); VdbeCoverage(v); - }else{ - sqlite3VdbeAddOp2(v, OP_Rowid, 0, regRow); - } - sqlite3VdbeAddOp3(v, OP_NotExists, i, 0, regRow); VdbeCoverage(v); - sqlite3VdbeAddOp2(v, OP_Goto, 0, addrOk); - sqlite3VdbeJumpHere(v, sqlite3VdbeCurrentAddr(v)-2); - }else{ - for(j=0; jnCol; j++){ - sqlite3ExprCodeGetColumnOfTable(v, pTab, 0, - aiCols ? aiCols[j] : pFK->aCol[j].iFrom, regRow+j); - sqlite3VdbeAddOp2(v, OP_IsNull, regRow+j, addrOk); VdbeCoverage(v); - } - if( pParent ){ - sqlite3VdbeAddOp4(v, OP_MakeRecord, regRow, pFK->nCol, regKey, - sqlite3IndexAffinityStr(v,pIdx), pFK->nCol); - sqlite3VdbeAddOp4Int(v, OP_Found, i, addrOk, regKey, 0); - VdbeCoverage(v); - } - } - sqlite3VdbeAddOp2(v, OP_Rowid, 0, regResult+1); - sqlite3VdbeAddOp4(v, OP_String8, 0, regResult+2, 0, - pFK->zTo, P4_TRANSIENT); - sqlite3VdbeAddOp2(v, OP_Integer, i-1, regResult+3); - sqlite3VdbeAddOp2(v, OP_ResultRow, regResult, 4); - sqlite3VdbeResolveLabel(v, addrOk); - sqlite3DbFree(db, aiCols); - } - sqlite3VdbeAddOp2(v, OP_Next, 0, addrTop+1); VdbeCoverage(v); - sqlite3VdbeJumpHere(v, addrTop); - } - } - break; -#endif /* !defined(SQLITE_OMIT_TRIGGER) */ -#endif /* !defined(SQLITE_OMIT_FOREIGN_KEY) */ - -#ifndef NDEBUG - case PragTyp_PARSER_TRACE: { - if( zRight ){ - if( sqlite3GetBoolean(zRight, 0) ){ - sqlite3ParserTrace(stderr, "parser: "); - }else{ - sqlite3ParserTrace(0, 0); - } - } - } - break; -#endif - - /* Reinstall the LIKE and GLOB functions. The variant of LIKE - ** used will be case sensitive or not depending on the RHS. - */ - case PragTyp_CASE_SENSITIVE_LIKE: { - if( zRight ){ - sqlite3RegisterLikeFunctions(db, sqlite3GetBoolean(zRight, 0)); - } - } - break; - -#ifndef SQLITE_INTEGRITY_CHECK_ERROR_MAX -# define SQLITE_INTEGRITY_CHECK_ERROR_MAX 100 -#endif - -#ifndef SQLITE_OMIT_INTEGRITY_CHECK - /* Pragma "quick_check" is reduced version of - ** integrity_check designed to detect most database corruption - ** without most of the overhead of a full integrity-check. - */ - case PragTyp_INTEGRITY_CHECK: { - int i, j, addr, mxErr; - - /* Code that appears at the end of the integrity check. If no error - ** messages have been generated, output OK. Otherwise output the - ** error message - */ - static const int iLn = VDBE_OFFSET_LINENO(2); - static const VdbeOpList endCode[] = { - { OP_AddImm, 1, 0, 0}, /* 0 */ - { OP_IfNeg, 1, 0, 0}, /* 1 */ - { OP_String8, 0, 3, 0}, /* 2 */ - { OP_ResultRow, 3, 1, 0}, - }; - - int isQuick = (sqlite3Tolower(zLeft[0])=='q'); - - /* If the PRAGMA command was of the form "PRAGMA .integrity_check", - ** then iDb is set to the index of the database identified by . - ** In this case, the integrity of database iDb only is verified by - ** the VDBE created below. - ** - ** Otherwise, if the command was simply "PRAGMA integrity_check" (or - ** "PRAGMA quick_check"), then iDb is set to 0. In this case, set iDb - ** to -1 here, to indicate that the VDBE should verify the integrity - ** of all attached databases. */ - assert( iDb>=0 ); - assert( iDb==0 || pId2->z ); - if( pId2->z==0 ) iDb = -1; - - /* Initialize the VDBE program */ - pParse->nMem = 6; - sqlite3VdbeSetNumCols(v, 1); - sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "integrity_check", SQLITE_STATIC); - - /* Set the maximum error count */ - mxErr = SQLITE_INTEGRITY_CHECK_ERROR_MAX; - if( zRight ){ - sqlite3GetInt32(zRight, &mxErr); - if( mxErr<=0 ){ - mxErr = SQLITE_INTEGRITY_CHECK_ERROR_MAX; - } - } - sqlite3VdbeAddOp2(v, OP_Integer, mxErr, 1); /* reg[1] holds errors left */ - - /* Do an integrity check on each database file */ - for(i=0; inDb; i++){ - HashElem *x; - Hash *pTbls; - int cnt = 0; - - if( OMIT_TEMPDB && i==1 ) continue; - if( iDb>=0 && i!=iDb ) continue; - - sqlite3CodeVerifySchema(pParse, i); - addr = sqlite3VdbeAddOp1(v, OP_IfPos, 1); /* Halt if out of errors */ - VdbeCoverage(v); - sqlite3VdbeAddOp2(v, OP_Halt, 0, 0); - sqlite3VdbeJumpHere(v, addr); - - /* Do an integrity check of the B-Tree - ** - ** Begin by filling registers 2, 3, ... with the root pages numbers - ** for all tables and indices in the database. - */ - assert( sqlite3SchemaMutexHeld(db, i, 0) ); - pTbls = &db->aDb[i].pSchema->tblHash; - for(x=sqliteHashFirst(pTbls); x; x=sqliteHashNext(x)){ - Table *pTab = sqliteHashData(x); - Index *pIdx; - if( HasRowid(pTab) ){ - sqlite3VdbeAddOp2(v, OP_Integer, pTab->tnum, 2+cnt); - VdbeComment((v, "%s", pTab->zName)); - cnt++; - } - for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ - sqlite3VdbeAddOp2(v, OP_Integer, pIdx->tnum, 2+cnt); - VdbeComment((v, "%s", pIdx->zName)); - cnt++; - } - } - - /* Make sure sufficient number of registers have been allocated */ - pParse->nMem = MAX( pParse->nMem, cnt+8 ); - - /* Do the b-tree integrity checks */ - sqlite3VdbeAddOp3(v, OP_IntegrityCk, 2, cnt, 1); - sqlite3VdbeChangeP5(v, (u8)i); - addr = sqlite3VdbeAddOp1(v, OP_IsNull, 2); VdbeCoverage(v); - sqlite3VdbeAddOp4(v, OP_String8, 0, 3, 0, - sqlite3MPrintf(db, "*** in database %s ***\n", db->aDb[i].zName), - P4_DYNAMIC); - sqlite3VdbeAddOp3(v, OP_Move, 2, 4, 1); - sqlite3VdbeAddOp3(v, OP_Concat, 4, 3, 2); - sqlite3VdbeAddOp2(v, OP_ResultRow, 2, 1); - sqlite3VdbeJumpHere(v, addr); - - /* Make sure all the indices are constructed correctly. - */ - for(x=sqliteHashFirst(pTbls); x && !isQuick; x=sqliteHashNext(x)){ - Table *pTab = sqliteHashData(x); - Index *pIdx, *pPk; - Index *pPrior = 0; - int loopTop; - int iDataCur, iIdxCur; - int r1 = -1; - - if( pTab->pIndex==0 ) continue; - pPk = HasRowid(pTab) ? 0 : sqlite3PrimaryKeyIndex(pTab); - addr = sqlite3VdbeAddOp1(v, OP_IfPos, 1); /* Stop if out of errors */ - VdbeCoverage(v); - sqlite3VdbeAddOp2(v, OP_Halt, 0, 0); - sqlite3VdbeJumpHere(v, addr); - sqlite3ExprCacheClear(pParse); - sqlite3OpenTableAndIndices(pParse, pTab, OP_OpenRead, - 1, 0, &iDataCur, &iIdxCur); - sqlite3VdbeAddOp2(v, OP_Integer, 0, 7); - for(j=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, j++){ - sqlite3VdbeAddOp2(v, OP_Integer, 0, 8+j); /* index entries counter */ - } - pParse->nMem = MAX(pParse->nMem, 8+j); - sqlite3VdbeAddOp2(v, OP_Rewind, iDataCur, 0); VdbeCoverage(v); - loopTop = sqlite3VdbeAddOp2(v, OP_AddImm, 7, 1); - for(j=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, j++){ - int jmp2, jmp3, jmp4; - if( pPk==pIdx ) continue; - r1 = sqlite3GenerateIndexKey(pParse, pIdx, iDataCur, 0, 0, &jmp3, - pPrior, r1); - pPrior = pIdx; - sqlite3VdbeAddOp2(v, OP_AddImm, 8+j, 1); /* increment entry count */ - jmp2 = sqlite3VdbeAddOp4Int(v, OP_Found, iIdxCur+j, 0, r1, - pIdx->nColumn); VdbeCoverage(v); - sqlite3VdbeAddOp2(v, OP_AddImm, 1, -1); /* Decrement error limit */ - sqlite3VdbeAddOp4(v, OP_String8, 0, 3, 0, "row ", P4_STATIC); - sqlite3VdbeAddOp3(v, OP_Concat, 7, 3, 3); - sqlite3VdbeAddOp4(v, OP_String8, 0, 4, 0, " missing from index ", - P4_STATIC); - sqlite3VdbeAddOp3(v, OP_Concat, 4, 3, 3); - sqlite3VdbeAddOp4(v, OP_String8, 0, 4, 0, pIdx->zName, P4_TRANSIENT); - sqlite3VdbeAddOp3(v, OP_Concat, 4, 3, 3); - sqlite3VdbeAddOp2(v, OP_ResultRow, 3, 1); - jmp4 = sqlite3VdbeAddOp1(v, OP_IfPos, 1); VdbeCoverage(v); - sqlite3VdbeAddOp0(v, OP_Halt); - sqlite3VdbeJumpHere(v, jmp4); - sqlite3VdbeJumpHere(v, jmp2); - sqlite3ResolvePartIdxLabel(pParse, jmp3); - } - sqlite3VdbeAddOp2(v, OP_Next, iDataCur, loopTop); VdbeCoverage(v); - sqlite3VdbeJumpHere(v, loopTop-1); -#ifndef SQLITE_OMIT_BTREECOUNT - sqlite3VdbeAddOp4(v, OP_String8, 0, 2, 0, - "wrong # of entries in index ", P4_STATIC); - for(j=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, j++){ - if( pPk==pIdx ) continue; - addr = sqlite3VdbeCurrentAddr(v); - sqlite3VdbeAddOp2(v, OP_IfPos, 1, addr+2); VdbeCoverage(v); - sqlite3VdbeAddOp2(v, OP_Halt, 0, 0); - sqlite3VdbeAddOp2(v, OP_Count, iIdxCur+j, 3); - sqlite3VdbeAddOp3(v, OP_Eq, 8+j, addr+8, 3); VdbeCoverage(v); - sqlite3VdbeChangeP5(v, SQLITE_NOTNULL); - sqlite3VdbeAddOp2(v, OP_AddImm, 1, -1); - sqlite3VdbeAddOp4(v, OP_String8, 0, 3, 0, pIdx->zName, P4_TRANSIENT); - sqlite3VdbeAddOp3(v, OP_Concat, 3, 2, 7); - sqlite3VdbeAddOp2(v, OP_ResultRow, 7, 1); - } -#endif /* SQLITE_OMIT_BTREECOUNT */ - } - } - addr = sqlite3VdbeAddOpList(v, ArraySize(endCode), endCode, iLn); - sqlite3VdbeChangeP2(v, addr, -mxErr); - sqlite3VdbeJumpHere(v, addr+1); - sqlite3VdbeChangeP4(v, addr+2, "ok", P4_STATIC); - } - break; -#endif /* SQLITE_OMIT_INTEGRITY_CHECK */ - -#ifndef SQLITE_OMIT_UTF16 - /* - ** PRAGMA encoding - ** PRAGMA encoding = "utf-8"|"utf-16"|"utf-16le"|"utf-16be" - ** - ** In its first form, this pragma returns the encoding of the main - ** database. If the database is not initialized, it is initialized now. - ** - ** The second form of this pragma is a no-op if the main database file - ** has not already been initialized. In this case it sets the default - ** encoding that will be used for the main database file if a new file - ** is created. If an existing main database file is opened, then the - ** default text encoding for the existing database is used. - ** - ** In all cases new databases created using the ATTACH command are - ** created to use the same default text encoding as the main database. If - ** the main database has not been initialized and/or created when ATTACH - ** is executed, this is done before the ATTACH operation. - ** - ** In the second form this pragma sets the text encoding to be used in - ** new database files created using this database handle. It is only - ** useful if invoked immediately after the main database i - */ - case PragTyp_ENCODING: { - static const struct EncName { - char *zName; - u8 enc; - } encnames[] = { - { "UTF8", SQLITE_UTF8 }, - { "UTF-8", SQLITE_UTF8 }, /* Must be element [1] */ - { "UTF-16le", SQLITE_UTF16LE }, /* Must be element [2] */ - { "UTF-16be", SQLITE_UTF16BE }, /* Must be element [3] */ - { "UTF16le", SQLITE_UTF16LE }, - { "UTF16be", SQLITE_UTF16BE }, - { "UTF-16", 0 }, /* SQLITE_UTF16NATIVE */ - { "UTF16", 0 }, /* SQLITE_UTF16NATIVE */ - { 0, 0 } - }; - const struct EncName *pEnc; - if( !zRight ){ /* "PRAGMA encoding" */ - if( sqlite3ReadSchema(pParse) ) goto pragma_out; - sqlite3VdbeSetNumCols(v, 1); - sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "encoding", SQLITE_STATIC); - sqlite3VdbeAddOp2(v, OP_String8, 0, 1); - assert( encnames[SQLITE_UTF8].enc==SQLITE_UTF8 ); - assert( encnames[SQLITE_UTF16LE].enc==SQLITE_UTF16LE ); - assert( encnames[SQLITE_UTF16BE].enc==SQLITE_UTF16BE ); - sqlite3VdbeChangeP4(v, -1, encnames[ENC(pParse->db)].zName, P4_STATIC); - sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 1); - }else{ /* "PRAGMA encoding = XXX" */ - /* Only change the value of sqlite.enc if the database handle is not - ** initialized. If the main database exists, the new sqlite.enc value - ** will be overwritten when the schema is next loaded. If it does not - ** already exists, it will be created to use the new encoding value. - */ - if( - !(DbHasProperty(db, 0, DB_SchemaLoaded)) || - DbHasProperty(db, 0, DB_Empty) - ){ - for(pEnc=&encnames[0]; pEnc->zName; pEnc++){ - if( 0==sqlite3StrICmp(zRight, pEnc->zName) ){ - ENC(pParse->db) = pEnc->enc ? pEnc->enc : SQLITE_UTF16NATIVE; - break; - } - } - if( !pEnc->zName ){ - sqlite3ErrorMsg(pParse, "unsupported encoding: %s", zRight); - } - } - } - } - break; -#endif /* SQLITE_OMIT_UTF16 */ - -#ifndef SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS - /* - ** PRAGMA [database.]schema_version - ** PRAGMA [database.]schema_version = - ** - ** PRAGMA [database.]user_version - ** PRAGMA [database.]user_version = - ** - ** PRAGMA [database.]freelist_count = - ** - ** PRAGMA [database.]application_id - ** PRAGMA [database.]application_id = - ** - ** The pragma's schema_version and user_version are used to set or get - ** the value of the schema-version and user-version, respectively. Both - ** the schema-version and the user-version are 32-bit signed integers - ** stored in the database header. - ** - ** The schema-cookie is usually only manipulated internally by SQLite. It - ** is incremented by SQLite whenever the database schema is modified (by - ** creating or dropping a table or index). The schema version is used by - ** SQLite each time a query is executed to ensure that the internal cache - ** of the schema used when compiling the SQL query matches the schema of - ** the database against which the compiled query is actually executed. - ** Subverting this mechanism by using "PRAGMA schema_version" to modify - ** the schema-version is potentially dangerous and may lead to program - ** crashes or database corruption. Use with caution! - ** - ** The user-version is not used internally by SQLite. It may be used by - ** applications for any purpose. - */ - case PragTyp_HEADER_VALUE: { - int iCookie; /* Cookie index. 1 for schema-cookie, 6 for user-cookie. */ - sqlite3VdbeUsesBtree(v, iDb); - switch( zLeft[0] ){ - case 'a': case 'A': - iCookie = BTREE_APPLICATION_ID; - break; - case 'f': case 'F': - iCookie = BTREE_FREE_PAGE_COUNT; - break; - case 's': case 'S': - iCookie = BTREE_SCHEMA_VERSION; - break; - default: - iCookie = BTREE_USER_VERSION; - break; - } - - if( zRight && iCookie!=BTREE_FREE_PAGE_COUNT ){ - /* Write the specified cookie value */ - static const VdbeOpList setCookie[] = { - { OP_Transaction, 0, 1, 0}, /* 0 */ - { OP_Integer, 0, 1, 0}, /* 1 */ - { OP_SetCookie, 0, 0, 1}, /* 2 */ - }; - int addr = sqlite3VdbeAddOpList(v, ArraySize(setCookie), setCookie, 0); - sqlite3VdbeChangeP1(v, addr, iDb); - sqlite3VdbeChangeP1(v, addr+1, sqlite3Atoi(zRight)); - sqlite3VdbeChangeP1(v, addr+2, iDb); - sqlite3VdbeChangeP2(v, addr+2, iCookie); - }else{ - /* Read the specified cookie value */ - static const VdbeOpList readCookie[] = { - { OP_Transaction, 0, 0, 0}, /* 0 */ - { OP_ReadCookie, 0, 1, 0}, /* 1 */ - { OP_ResultRow, 1, 1, 0} - }; - int addr = sqlite3VdbeAddOpList(v, ArraySize(readCookie), readCookie, 0); - sqlite3VdbeChangeP1(v, addr, iDb); - sqlite3VdbeChangeP1(v, addr+1, iDb); - sqlite3VdbeChangeP3(v, addr+1, iCookie); - sqlite3VdbeSetNumCols(v, 1); - sqlite3VdbeSetColName(v, 0, COLNAME_NAME, zLeft, SQLITE_TRANSIENT); - } - } - break; -#endif /* SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS */ - -#ifndef SQLITE_OMIT_COMPILEOPTION_DIAGS - /* - ** PRAGMA compile_options - ** - ** Return the names of all compile-time options used in this build, - ** one option per row. - */ - case PragTyp_COMPILE_OPTIONS: { - int i = 0; - const char *zOpt; - sqlite3VdbeSetNumCols(v, 1); - pParse->nMem = 1; - sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "compile_option", SQLITE_STATIC); - while( (zOpt = sqlite3_compileoption_get(i++))!=0 ){ - sqlite3VdbeAddOp4(v, OP_String8, 0, 1, 0, zOpt, 0); - sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 1); - } - } - break; -#endif /* SQLITE_OMIT_COMPILEOPTION_DIAGS */ - -#ifndef SQLITE_OMIT_WAL - /* - ** PRAGMA [database.]wal_checkpoint = passive|full|restart - ** - ** Checkpoint the database. - */ - case PragTyp_WAL_CHECKPOINT: { - int iBt = (pId2->z?iDb:SQLITE_MAX_ATTACHED); - int eMode = SQLITE_CHECKPOINT_PASSIVE; - if( zRight ){ - if( sqlite3StrICmp(zRight, "full")==0 ){ - eMode = SQLITE_CHECKPOINT_FULL; - }else if( sqlite3StrICmp(zRight, "restart")==0 ){ - eMode = SQLITE_CHECKPOINT_RESTART; - } - } - sqlite3VdbeSetNumCols(v, 3); - pParse->nMem = 3; - sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "busy", SQLITE_STATIC); - sqlite3VdbeSetColName(v, 1, COLNAME_NAME, "log", SQLITE_STATIC); - sqlite3VdbeSetColName(v, 2, COLNAME_NAME, "checkpointed", SQLITE_STATIC); - - sqlite3VdbeAddOp3(v, OP_Checkpoint, iBt, eMode, 1); - sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 3); - } - break; - - /* - ** PRAGMA wal_autocheckpoint - ** PRAGMA wal_autocheckpoint = N - ** - ** Configure a database connection to automatically checkpoint a database - ** after accumulating N frames in the log. Or query for the current value - ** of N. - */ - case PragTyp_WAL_AUTOCHECKPOINT: { - if( zRight ){ - sqlite3_wal_autocheckpoint(db, sqlite3Atoi(zRight)); - } - returnSingleInt(pParse, "wal_autocheckpoint", - db->xWalCallback==sqlite3WalDefaultHook ? - SQLITE_PTR_TO_INT(db->pWalArg) : 0); - } - break; -#endif - - /* - ** PRAGMA shrink_memory - ** - ** This pragma attempts to free as much memory as possible from the - ** current database connection. - */ - case PragTyp_SHRINK_MEMORY: { - sqlite3_db_release_memory(db); - break; - } - - /* - ** PRAGMA busy_timeout - ** PRAGMA busy_timeout = N - ** - ** Call sqlite3_busy_timeout(db, N). Return the current timeout value - ** if one is set. If no busy handler or a different busy handler is set - ** then 0 is returned. Setting the busy_timeout to 0 or negative - ** disables the timeout. - */ - /*case PragTyp_BUSY_TIMEOUT*/ default: { - assert( aPragmaNames[mid].ePragTyp==PragTyp_BUSY_TIMEOUT ); - if( zRight ){ - sqlite3_busy_timeout(db, sqlite3Atoi(zRight)); - } - returnSingleInt(pParse, "timeout", db->busyTimeout); - break; - } - - /* - ** PRAGMA soft_heap_limit - ** PRAGMA soft_heap_limit = N - ** - ** Call sqlite3_soft_heap_limit64(N). Return the result. If N is omitted, - ** use -1. - */ - case PragTyp_SOFT_HEAP_LIMIT: { - sqlite3_int64 N; - if( zRight && sqlite3Atoi64(zRight, &N, 1000000, SQLITE_UTF8)==SQLITE_OK ){ - sqlite3_soft_heap_limit64(N); - } - returnSingleInt(pParse, "soft_heap_limit", sqlite3_soft_heap_limit64(-1)); - break; - } - -#if defined(SQLITE_DEBUG) || defined(SQLITE_TEST) - /* - ** Report the current state of file logs for all databases - */ - case PragTyp_LOCK_STATUS: { - static const char *const azLockName[] = { - "unlocked", "shared", "reserved", "pending", "exclusive" - }; - int i; - sqlite3VdbeSetNumCols(v, 2); - pParse->nMem = 2; - sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "database", SQLITE_STATIC); - sqlite3VdbeSetColName(v, 1, COLNAME_NAME, "status", SQLITE_STATIC); - for(i=0; inDb; i++){ - Btree *pBt; - const char *zState = "unknown"; - int j; - if( db->aDb[i].zName==0 ) continue; - sqlite3VdbeAddOp4(v, OP_String8, 0, 1, 0, db->aDb[i].zName, P4_STATIC); - pBt = db->aDb[i].pBt; - if( pBt==0 || sqlite3BtreePager(pBt)==0 ){ - zState = "closed"; - }else if( sqlite3_file_control(db, i ? db->aDb[i].zName : 0, - SQLITE_FCNTL_LOCKSTATE, &j)==SQLITE_OK ){ - zState = azLockName[j]; - } - sqlite3VdbeAddOp4(v, OP_String8, 0, 2, 0, zState, P4_STATIC); - sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 2); - } - break; - } -#endif - -#ifdef SQLITE_HAS_CODEC - case PragTyp_KEY: { - if( zRight ) sqlite3_key_v2(db, zDb, zRight, sqlite3Strlen30(zRight)); - break; - } - case PragTyp_REKEY: { - if( zRight ) sqlite3_rekey_v2(db, zDb, zRight, sqlite3Strlen30(zRight)); - break; - } - case PragTyp_HEXKEY: { - if( zRight ){ - u8 iByte; - int i; - char zKey[40]; - for(i=0, iByte=0; idb; - if( !db->mallocFailed && (db->flags & SQLITE_RecoveryMode)==0 ){ - if( zObj==0 ) zObj = "?"; - sqlite3SetString(pData->pzErrMsg, db, - "malformed database schema (%s)", zObj); - if( zExtra ){ - *pData->pzErrMsg = sqlite3MAppendf(db, *pData->pzErrMsg, - "%s - %s", *pData->pzErrMsg, zExtra); - } - } - pData->rc = db->mallocFailed ? SQLITE_NOMEM : SQLITE_CORRUPT_BKPT; -} - -/* -** This is the callback routine for the code that initializes the -** database. See sqlite3Init() below for additional information. -** This routine is also called from the OP_ParseSchema opcode of the VDBE. -** -** Each callback contains the following information: -** -** argv[0] = name of thing being created -** argv[1] = root page number for table or index. 0 for trigger or view. -** argv[2] = SQL text for the CREATE statement. -** -*/ -SQLITE_PRIVATE int sqlite3InitCallback(void *pInit, int argc, char **argv, char **NotUsed){ - InitData *pData = (InitData*)pInit; - sqlite3 *db = pData->db; - int iDb = pData->iDb; - - assert( argc==3 ); - UNUSED_PARAMETER2(NotUsed, argc); - assert( sqlite3_mutex_held(db->mutex) ); - DbClearProperty(db, iDb, DB_Empty); - if( db->mallocFailed ){ - corruptSchema(pData, argv[0], 0); - return 1; - } - - assert( iDb>=0 && iDbnDb ); - if( argv==0 ) return 0; /* Might happen if EMPTY_RESULT_CALLBACKS are on */ - if( argv[1]==0 ){ - corruptSchema(pData, argv[0], 0); - }else if( argv[2] && argv[2][0] ){ - /* Call the parser to process a CREATE TABLE, INDEX or VIEW. - ** But because db->init.busy is set to 1, no VDBE code is generated - ** or executed. All the parser does is build the internal data - ** structures that describe the table, index, or view. - */ - int rc; - sqlite3_stmt *pStmt; - TESTONLY(int rcp); /* Return code from sqlite3_prepare() */ - - assert( db->init.busy ); - db->init.iDb = iDb; - db->init.newTnum = sqlite3Atoi(argv[1]); - db->init.orphanTrigger = 0; - TESTONLY(rcp = ) sqlite3_prepare(db, argv[2], -1, &pStmt, 0); - rc = db->errCode; - assert( (rc&0xFF)==(rcp&0xFF) ); - db->init.iDb = 0; - if( SQLITE_OK!=rc ){ - if( db->init.orphanTrigger ){ - assert( iDb==1 ); - }else{ - pData->rc = rc; - if( rc==SQLITE_NOMEM ){ - db->mallocFailed = 1; - }else if( rc!=SQLITE_INTERRUPT && (rc&0xFF)!=SQLITE_LOCKED ){ - corruptSchema(pData, argv[0], sqlite3_errmsg(db)); - } - } - } - sqlite3_finalize(pStmt); - }else if( argv[0]==0 ){ - corruptSchema(pData, 0, 0); - }else{ - /* If the SQL column is blank it means this is an index that - ** was created to be the PRIMARY KEY or to fulfill a UNIQUE - ** constraint for a CREATE TABLE. The index should have already - ** been created when we processed the CREATE TABLE. All we have - ** to do here is record the root page number for that index. - */ - Index *pIndex; - pIndex = sqlite3FindIndex(db, argv[0], db->aDb[iDb].zName); - if( pIndex==0 ){ - /* This can occur if there exists an index on a TEMP table which - ** has the same name as another index on a permanent index. Since - ** the permanent table is hidden by the TEMP table, we can also - ** safely ignore the index on the permanent table. - */ - /* Do Nothing */; - }else if( sqlite3GetInt32(argv[1], &pIndex->tnum)==0 ){ - corruptSchema(pData, argv[0], "invalid rootpage"); - } - } - return 0; -} - -/* -** Attempt to read the database schema and initialize internal -** data structures for a single database file. The index of the -** database file is given by iDb. iDb==0 is used for the main -** database. iDb==1 should never be used. iDb>=2 is used for -** auxiliary databases. Return one of the SQLITE_ error codes to -** indicate success or failure. -*/ -static int sqlite3InitOne(sqlite3 *db, int iDb, char **pzErrMsg){ - int rc; - int i; -#ifndef SQLITE_OMIT_DEPRECATED - int size; -#endif - Table *pTab; - Db *pDb; - char const *azArg[4]; - int meta[5]; - InitData initData; - char const *zMasterSchema; - char const *zMasterName; - int openedTransaction = 0; - - /* - ** The master database table has a structure like this - */ - static const char master_schema[] = - "CREATE TABLE sqlite_master(\n" - " type text,\n" - " name text,\n" - " tbl_name text,\n" - " rootpage integer,\n" - " sql text\n" - ")" - ; -#ifndef SQLITE_OMIT_TEMPDB - static const char temp_master_schema[] = - "CREATE TEMP TABLE sqlite_temp_master(\n" - " type text,\n" - " name text,\n" - " tbl_name text,\n" - " rootpage integer,\n" - " sql text\n" - ")" - ; -#else - #define temp_master_schema 0 -#endif - - assert( iDb>=0 && iDbnDb ); - assert( db->aDb[iDb].pSchema ); - assert( sqlite3_mutex_held(db->mutex) ); - assert( iDb==1 || sqlite3BtreeHoldsMutex(db->aDb[iDb].pBt) ); - - /* zMasterSchema and zInitScript are set to point at the master schema - ** and initialisation script appropriate for the database being - ** initialized. zMasterName is the name of the master table. - */ - if( !OMIT_TEMPDB && iDb==1 ){ - zMasterSchema = temp_master_schema; - }else{ - zMasterSchema = master_schema; - } - zMasterName = SCHEMA_TABLE(iDb); - - /* Construct the schema tables. */ - azArg[0] = zMasterName; - azArg[1] = "1"; - azArg[2] = zMasterSchema; - azArg[3] = 0; - initData.db = db; - initData.iDb = iDb; - initData.rc = SQLITE_OK; - initData.pzErrMsg = pzErrMsg; - sqlite3InitCallback(&initData, 3, (char **)azArg, 0); - if( initData.rc ){ - rc = initData.rc; - goto error_out; - } - pTab = sqlite3FindTable(db, zMasterName, db->aDb[iDb].zName); - if( ALWAYS(pTab) ){ - pTab->tabFlags |= TF_Readonly; - } - - /* Create a cursor to hold the database open - */ - pDb = &db->aDb[iDb]; - if( pDb->pBt==0 ){ - if( !OMIT_TEMPDB && ALWAYS(iDb==1) ){ - DbSetProperty(db, 1, DB_SchemaLoaded); - } - return SQLITE_OK; - } - - /* If there is not already a read-only (or read-write) transaction opened - ** on the b-tree database, open one now. If a transaction is opened, it - ** will be closed before this function returns. */ - sqlite3BtreeEnter(pDb->pBt); - if( !sqlite3BtreeIsInReadTrans(pDb->pBt) ){ - rc = sqlite3BtreeBeginTrans(pDb->pBt, 0); - if( rc!=SQLITE_OK ){ - sqlite3SetString(pzErrMsg, db, "%s", sqlite3ErrStr(rc)); - goto initone_error_out; - } - openedTransaction = 1; - } - - /* Get the database meta information. - ** - ** Meta values are as follows: - ** meta[0] Schema cookie. Changes with each schema change. - ** meta[1] File format of schema layer. - ** meta[2] Size of the page cache. - ** meta[3] Largest rootpage (auto/incr_vacuum mode) - ** meta[4] Db text encoding. 1:UTF-8 2:UTF-16LE 3:UTF-16BE - ** meta[5] User version - ** meta[6] Incremental vacuum mode - ** meta[7] unused - ** meta[8] unused - ** meta[9] unused - ** - ** Note: The #defined SQLITE_UTF* symbols in sqliteInt.h correspond to - ** the possible values of meta[4]. - */ - for(i=0; ipBt, i+1, (u32 *)&meta[i]); - } - pDb->pSchema->schema_cookie = meta[BTREE_SCHEMA_VERSION-1]; - - /* If opening a non-empty database, check the text encoding. For the - ** main database, set sqlite3.enc to the encoding of the main database. - ** For an attached db, it is an error if the encoding is not the same - ** as sqlite3.enc. - */ - if( meta[BTREE_TEXT_ENCODING-1] ){ /* text encoding */ - if( iDb==0 ){ -#ifndef SQLITE_OMIT_UTF16 - u8 encoding; - /* If opening the main database, set ENC(db). */ - encoding = (u8)meta[BTREE_TEXT_ENCODING-1] & 3; - if( encoding==0 ) encoding = SQLITE_UTF8; - ENC(db) = encoding; -#else - ENC(db) = SQLITE_UTF8; -#endif - }else{ - /* If opening an attached database, the encoding much match ENC(db) */ - if( meta[BTREE_TEXT_ENCODING-1]!=ENC(db) ){ - sqlite3SetString(pzErrMsg, db, "attached databases must use the same" - " text encoding as main database"); - rc = SQLITE_ERROR; - goto initone_error_out; - } - } - }else{ - DbSetProperty(db, iDb, DB_Empty); - } - pDb->pSchema->enc = ENC(db); - - if( pDb->pSchema->cache_size==0 ){ -#ifndef SQLITE_OMIT_DEPRECATED - size = sqlite3AbsInt32(meta[BTREE_DEFAULT_CACHE_SIZE-1]); - if( size==0 ){ size = SQLITE_DEFAULT_CACHE_SIZE; } - pDb->pSchema->cache_size = size; -#else - pDb->pSchema->cache_size = SQLITE_DEFAULT_CACHE_SIZE; -#endif - sqlite3BtreeSetCacheSize(pDb->pBt, pDb->pSchema->cache_size); - } - - /* - ** file_format==1 Version 3.0.0. - ** file_format==2 Version 3.1.3. // ALTER TABLE ADD COLUMN - ** file_format==3 Version 3.1.4. // ditto but with non-NULL defaults - ** file_format==4 Version 3.3.0. // DESC indices. Boolean constants - */ - pDb->pSchema->file_format = (u8)meta[BTREE_FILE_FORMAT-1]; - if( pDb->pSchema->file_format==0 ){ - pDb->pSchema->file_format = 1; - } - if( pDb->pSchema->file_format>SQLITE_MAX_FILE_FORMAT ){ - sqlite3SetString(pzErrMsg, db, "unsupported file format"); - rc = SQLITE_ERROR; - goto initone_error_out; - } - - /* Ticket #2804: When we open a database in the newer file format, - ** clear the legacy_file_format pragma flag so that a VACUUM will - ** not downgrade the database and thus invalidate any descending - ** indices that the user might have created. - */ - if( iDb==0 && meta[BTREE_FILE_FORMAT-1]>=4 ){ - db->flags &= ~SQLITE_LegacyFileFmt; - } - - /* Read the schema information out of the schema tables - */ - assert( db->init.busy ); - { - char *zSql; - zSql = sqlite3MPrintf(db, - "SELECT name, rootpage, sql FROM '%q'.%s ORDER BY rowid", - db->aDb[iDb].zName, zMasterName); -#ifndef SQLITE_OMIT_AUTHORIZATION - { - int (*xAuth)(void*,int,const char*,const char*,const char*,const char*); - xAuth = db->xAuth; - db->xAuth = 0; -#endif - rc = sqlite3_exec(db, zSql, sqlite3InitCallback, &initData, 0); -#ifndef SQLITE_OMIT_AUTHORIZATION - db->xAuth = xAuth; - } -#endif - if( rc==SQLITE_OK ) rc = initData.rc; - sqlite3DbFree(db, zSql); -#ifndef SQLITE_OMIT_ANALYZE - if( rc==SQLITE_OK ){ - sqlite3AnalysisLoad(db, iDb); - } -#endif - } - if( db->mallocFailed ){ - rc = SQLITE_NOMEM; - sqlite3ResetAllSchemasOfConnection(db); - } - if( rc==SQLITE_OK || (db->flags&SQLITE_RecoveryMode)){ - /* Black magic: If the SQLITE_RecoveryMode flag is set, then consider - ** the schema loaded, even if errors occurred. In this situation the - ** current sqlite3_prepare() operation will fail, but the following one - ** will attempt to compile the supplied statement against whatever subset - ** of the schema was loaded before the error occurred. The primary - ** purpose of this is to allow access to the sqlite_master table - ** even when its contents have been corrupted. - */ - DbSetProperty(db, iDb, DB_SchemaLoaded); - rc = SQLITE_OK; - } - - /* Jump here for an error that occurs after successfully allocating - ** curMain and calling sqlite3BtreeEnter(). For an error that occurs - ** before that point, jump to error_out. - */ -initone_error_out: - if( openedTransaction ){ - sqlite3BtreeCommit(pDb->pBt); - } - sqlite3BtreeLeave(pDb->pBt); - -error_out: - if( rc==SQLITE_NOMEM || rc==SQLITE_IOERR_NOMEM ){ - db->mallocFailed = 1; - } - return rc; -} - -/* -** Initialize all database files - the main database file, the file -** used to store temporary tables, and any additional database files -** created using ATTACH statements. Return a success code. If an -** error occurs, write an error message into *pzErrMsg. -** -** After a database is initialized, the DB_SchemaLoaded bit is set -** bit is set in the flags field of the Db structure. If the database -** file was of zero-length, then the DB_Empty flag is also set. -*/ -SQLITE_PRIVATE int sqlite3Init(sqlite3 *db, char **pzErrMsg){ - int i, rc; - int commit_internal = !(db->flags&SQLITE_InternChanges); - - assert( sqlite3_mutex_held(db->mutex) ); - rc = SQLITE_OK; - db->init.busy = 1; - for(i=0; rc==SQLITE_OK && inDb; i++){ - if( DbHasProperty(db, i, DB_SchemaLoaded) || i==1 ) continue; - rc = sqlite3InitOne(db, i, pzErrMsg); - if( rc ){ - sqlite3ResetOneSchema(db, i); - } - } - - /* Once all the other databases have been initialized, load the schema - ** for the TEMP database. This is loaded last, as the TEMP database - ** schema may contain references to objects in other databases. - */ -#ifndef SQLITE_OMIT_TEMPDB - if( rc==SQLITE_OK && ALWAYS(db->nDb>1) - && !DbHasProperty(db, 1, DB_SchemaLoaded) ){ - rc = sqlite3InitOne(db, 1, pzErrMsg); - if( rc ){ - sqlite3ResetOneSchema(db, 1); - } - } -#endif - - db->init.busy = 0; - if( rc==SQLITE_OK && commit_internal ){ - sqlite3CommitInternalChanges(db); - } - - return rc; -} - -/* -** This routine is a no-op if the database schema is already initialized. -** Otherwise, the schema is loaded. An error code is returned. -*/ -SQLITE_PRIVATE int sqlite3ReadSchema(Parse *pParse){ - int rc = SQLITE_OK; - sqlite3 *db = pParse->db; - assert( sqlite3_mutex_held(db->mutex) ); - if( !db->init.busy ){ - rc = sqlite3Init(db, &pParse->zErrMsg); - } - if( rc!=SQLITE_OK ){ - pParse->rc = rc; - pParse->nErr++; - } - return rc; -} - - -/* -** Check schema cookies in all databases. If any cookie is out -** of date set pParse->rc to SQLITE_SCHEMA. If all schema cookies -** make no changes to pParse->rc. -*/ -static void schemaIsValid(Parse *pParse){ - sqlite3 *db = pParse->db; - int iDb; - int rc; - int cookie; - - assert( pParse->checkSchema ); - assert( sqlite3_mutex_held(db->mutex) ); - for(iDb=0; iDbnDb; iDb++){ - int openedTransaction = 0; /* True if a transaction is opened */ - Btree *pBt = db->aDb[iDb].pBt; /* Btree database to read cookie from */ - if( pBt==0 ) continue; - - /* If there is not already a read-only (or read-write) transaction opened - ** on the b-tree database, open one now. If a transaction is opened, it - ** will be closed immediately after reading the meta-value. */ - if( !sqlite3BtreeIsInReadTrans(pBt) ){ - rc = sqlite3BtreeBeginTrans(pBt, 0); - if( rc==SQLITE_NOMEM || rc==SQLITE_IOERR_NOMEM ){ - db->mallocFailed = 1; - } - if( rc!=SQLITE_OK ) return; - openedTransaction = 1; - } - - /* Read the schema cookie from the database. If it does not match the - ** value stored as part of the in-memory schema representation, - ** set Parse.rc to SQLITE_SCHEMA. */ - sqlite3BtreeGetMeta(pBt, BTREE_SCHEMA_VERSION, (u32 *)&cookie); - assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); - if( cookie!=db->aDb[iDb].pSchema->schema_cookie ){ - sqlite3ResetOneSchema(db, iDb); - pParse->rc = SQLITE_SCHEMA; - } - - /* Close the transaction, if one was opened. */ - if( openedTransaction ){ - sqlite3BtreeCommit(pBt); - } - } -} - -/* -** Convert a schema pointer into the iDb index that indicates -** which database file in db->aDb[] the schema refers to. -** -** If the same database is attached more than once, the first -** attached database is returned. -*/ -SQLITE_PRIVATE int sqlite3SchemaToIndex(sqlite3 *db, Schema *pSchema){ - int i = -1000000; - - /* If pSchema is NULL, then return -1000000. This happens when code in - ** expr.c is trying to resolve a reference to a transient table (i.e. one - ** created by a sub-select). In this case the return value of this - ** function should never be used. - ** - ** We return -1000000 instead of the more usual -1 simply because using - ** -1000000 as the incorrect index into db->aDb[] is much - ** more likely to cause a segfault than -1 (of course there are assert() - ** statements too, but it never hurts to play the odds). - */ - assert( sqlite3_mutex_held(db->mutex) ); - if( pSchema ){ - for(i=0; ALWAYS(inDb); i++){ - if( db->aDb[i].pSchema==pSchema ){ - break; - } - } - assert( i>=0 && inDb ); - } - return i; -} - -/* -** Free all memory allocations in the pParse object -*/ -SQLITE_PRIVATE void sqlite3ParserReset(Parse *pParse){ - if( pParse ){ - sqlite3 *db = pParse->db; - sqlite3DbFree(db, pParse->aLabel); - sqlite3ExprListDelete(db, pParse->pConstExpr); - } -} - -/* -** Compile the UTF-8 encoded SQL statement zSql into a statement handle. -*/ -static int sqlite3Prepare( - sqlite3 *db, /* Database handle. */ - const char *zSql, /* UTF-8 encoded SQL statement. */ - int nBytes, /* Length of zSql in bytes. */ - int saveSqlFlag, /* True to copy SQL text into the sqlite3_stmt */ - Vdbe *pReprepare, /* VM being reprepared */ - sqlite3_stmt **ppStmt, /* OUT: A pointer to the prepared statement */ - const char **pzTail /* OUT: End of parsed string */ -){ - Parse *pParse; /* Parsing context */ - char *zErrMsg = 0; /* Error message */ - int rc = SQLITE_OK; /* Result code */ - int i; /* Loop counter */ - - /* Allocate the parsing context */ - pParse = sqlite3StackAllocZero(db, sizeof(*pParse)); - if( pParse==0 ){ - rc = SQLITE_NOMEM; - goto end_prepare; - } - pParse->pReprepare = pReprepare; - assert( ppStmt && *ppStmt==0 ); - assert( !db->mallocFailed ); - assert( sqlite3_mutex_held(db->mutex) ); - - /* Check to verify that it is possible to get a read lock on all - ** database schemas. The inability to get a read lock indicates that - ** some other database connection is holding a write-lock, which in - ** turn means that the other connection has made uncommitted changes - ** to the schema. - ** - ** Were we to proceed and prepare the statement against the uncommitted - ** schema changes and if those schema changes are subsequently rolled - ** back and different changes are made in their place, then when this - ** prepared statement goes to run the schema cookie would fail to detect - ** the schema change. Disaster would follow. - ** - ** This thread is currently holding mutexes on all Btrees (because - ** of the sqlite3BtreeEnterAll() in sqlite3LockAndPrepare()) so it - ** is not possible for another thread to start a new schema change - ** while this routine is running. Hence, we do not need to hold - ** locks on the schema, we just need to make sure nobody else is - ** holding them. - ** - ** Note that setting READ_UNCOMMITTED overrides most lock detection, - ** but it does *not* override schema lock detection, so this all still - ** works even if READ_UNCOMMITTED is set. - */ - for(i=0; inDb; i++) { - Btree *pBt = db->aDb[i].pBt; - if( pBt ){ - assert( sqlite3BtreeHoldsMutex(pBt) ); - rc = sqlite3BtreeSchemaLocked(pBt); - if( rc ){ - const char *zDb = db->aDb[i].zName; - sqlite3Error(db, rc, "database schema is locked: %s", zDb); - testcase( db->flags & SQLITE_ReadUncommitted ); - goto end_prepare; - } - } - } - - sqlite3VtabUnlockList(db); - - pParse->db = db; - pParse->nQueryLoop = 0; /* Logarithmic, so 0 really means 1 */ - if( nBytes>=0 && (nBytes==0 || zSql[nBytes-1]!=0) ){ - char *zSqlCopy; - int mxLen = db->aLimit[SQLITE_LIMIT_SQL_LENGTH]; - testcase( nBytes==mxLen ); - testcase( nBytes==mxLen+1 ); - if( nBytes>mxLen ){ - sqlite3Error(db, SQLITE_TOOBIG, "statement too long"); - rc = sqlite3ApiExit(db, SQLITE_TOOBIG); - goto end_prepare; - } - zSqlCopy = sqlite3DbStrNDup(db, zSql, nBytes); - if( zSqlCopy ){ - sqlite3RunParser(pParse, zSqlCopy, &zErrMsg); - sqlite3DbFree(db, zSqlCopy); - pParse->zTail = &zSql[pParse->zTail-zSqlCopy]; - }else{ - pParse->zTail = &zSql[nBytes]; - } - }else{ - sqlite3RunParser(pParse, zSql, &zErrMsg); - } - assert( 0==pParse->nQueryLoop ); - - if( db->mallocFailed ){ - pParse->rc = SQLITE_NOMEM; - } - if( pParse->rc==SQLITE_DONE ) pParse->rc = SQLITE_OK; - if( pParse->checkSchema ){ - schemaIsValid(pParse); - } - if( db->mallocFailed ){ - pParse->rc = SQLITE_NOMEM; - } - if( pzTail ){ - *pzTail = pParse->zTail; - } - rc = pParse->rc; - -#ifndef SQLITE_OMIT_EXPLAIN - if( rc==SQLITE_OK && pParse->pVdbe && pParse->explain ){ - static const char * const azColName[] = { - "addr", "opcode", "p1", "p2", "p3", "p4", "p5", "comment", - "selectid", "order", "from", "detail" - }; - int iFirst, mx; - if( pParse->explain==2 ){ - sqlite3VdbeSetNumCols(pParse->pVdbe, 4); - iFirst = 8; - mx = 12; - }else{ - sqlite3VdbeSetNumCols(pParse->pVdbe, 8); - iFirst = 0; - mx = 8; - } - for(i=iFirst; ipVdbe, i-iFirst, COLNAME_NAME, - azColName[i], SQLITE_STATIC); - } - } -#endif - - if( db->init.busy==0 ){ - Vdbe *pVdbe = pParse->pVdbe; - sqlite3VdbeSetSql(pVdbe, zSql, (int)(pParse->zTail-zSql), saveSqlFlag); - } - if( pParse->pVdbe && (rc!=SQLITE_OK || db->mallocFailed) ){ - sqlite3VdbeFinalize(pParse->pVdbe); - assert(!(*ppStmt)); - }else{ - *ppStmt = (sqlite3_stmt*)pParse->pVdbe; - } - - if( zErrMsg ){ - sqlite3Error(db, rc, "%s", zErrMsg); - sqlite3DbFree(db, zErrMsg); - }else{ - sqlite3Error(db, rc, 0); - } - - /* Delete any TriggerPrg structures allocated while parsing this statement. */ - while( pParse->pTriggerPrg ){ - TriggerPrg *pT = pParse->pTriggerPrg; - pParse->pTriggerPrg = pT->pNext; - sqlite3DbFree(db, pT); - } - -end_prepare: - - sqlite3ParserReset(pParse); - sqlite3StackFree(db, pParse); - rc = sqlite3ApiExit(db, rc); - assert( (rc&db->errMask)==rc ); - return rc; -} -static int sqlite3LockAndPrepare( - sqlite3 *db, /* Database handle. */ - const char *zSql, /* UTF-8 encoded SQL statement. */ - int nBytes, /* Length of zSql in bytes. */ - int saveSqlFlag, /* True to copy SQL text into the sqlite3_stmt */ - Vdbe *pOld, /* VM being reprepared */ - sqlite3_stmt **ppStmt, /* OUT: A pointer to the prepared statement */ - const char **pzTail /* OUT: End of parsed string */ -){ - int rc; - assert( ppStmt!=0 ); - *ppStmt = 0; - if( !sqlite3SafetyCheckOk(db) ){ - return SQLITE_MISUSE_BKPT; - } - sqlite3_mutex_enter(db->mutex); - sqlite3BtreeEnterAll(db); - rc = sqlite3Prepare(db, zSql, nBytes, saveSqlFlag, pOld, ppStmt, pzTail); - if( rc==SQLITE_SCHEMA ){ - sqlite3_finalize(*ppStmt); - rc = sqlite3Prepare(db, zSql, nBytes, saveSqlFlag, pOld, ppStmt, pzTail); - } - sqlite3BtreeLeaveAll(db); - sqlite3_mutex_leave(db->mutex); - assert( rc==SQLITE_OK || *ppStmt==0 ); - return rc; -} - -/* -** Rerun the compilation of a statement after a schema change. -** -** If the statement is successfully recompiled, return SQLITE_OK. Otherwise, -** if the statement cannot be recompiled because another connection has -** locked the sqlite3_master table, return SQLITE_LOCKED. If any other error -** occurs, return SQLITE_SCHEMA. -*/ -SQLITE_PRIVATE int sqlite3Reprepare(Vdbe *p){ - int rc; - sqlite3_stmt *pNew; - const char *zSql; - sqlite3 *db; - - assert( sqlite3_mutex_held(sqlite3VdbeDb(p)->mutex) ); - zSql = sqlite3_sql((sqlite3_stmt *)p); - assert( zSql!=0 ); /* Reprepare only called for prepare_v2() statements */ - db = sqlite3VdbeDb(p); - assert( sqlite3_mutex_held(db->mutex) ); - rc = sqlite3LockAndPrepare(db, zSql, -1, 0, p, &pNew, 0); - if( rc ){ - if( rc==SQLITE_NOMEM ){ - db->mallocFailed = 1; - } - assert( pNew==0 ); - return rc; - }else{ - assert( pNew!=0 ); - } - sqlite3VdbeSwap((Vdbe*)pNew, p); - sqlite3TransferBindings(pNew, (sqlite3_stmt*)p); - sqlite3VdbeResetStepResult((Vdbe*)pNew); - sqlite3VdbeFinalize((Vdbe*)pNew); - return SQLITE_OK; -} - - -/* -** Two versions of the official API. Legacy and new use. In the legacy -** version, the original SQL text is not saved in the prepared statement -** and so if a schema change occurs, SQLITE_SCHEMA is returned by -** sqlite3_step(). In the new version, the original SQL text is retained -** and the statement is automatically recompiled if an schema change -** occurs. -*/ -SQLITE_API int sqlite3_prepare( - sqlite3 *db, /* Database handle. */ - const char *zSql, /* UTF-8 encoded SQL statement. */ - int nBytes, /* Length of zSql in bytes. */ - sqlite3_stmt **ppStmt, /* OUT: A pointer to the prepared statement */ - const char **pzTail /* OUT: End of parsed string */ -){ - int rc; - rc = sqlite3LockAndPrepare(db,zSql,nBytes,0,0,ppStmt,pzTail); - assert( rc==SQLITE_OK || ppStmt==0 || *ppStmt==0 ); /* VERIFY: F13021 */ - return rc; -} -SQLITE_API int sqlite3_prepare_v2( - sqlite3 *db, /* Database handle. */ - const char *zSql, /* UTF-8 encoded SQL statement. */ - int nBytes, /* Length of zSql in bytes. */ - sqlite3_stmt **ppStmt, /* OUT: A pointer to the prepared statement */ - const char **pzTail /* OUT: End of parsed string */ -){ - int rc; - rc = sqlite3LockAndPrepare(db,zSql,nBytes,1,0,ppStmt,pzTail); - assert( rc==SQLITE_OK || ppStmt==0 || *ppStmt==0 ); /* VERIFY: F13021 */ - return rc; -} - - -#ifndef SQLITE_OMIT_UTF16 -/* -** Compile the UTF-16 encoded SQL statement zSql into a statement handle. -*/ -static int sqlite3Prepare16( - sqlite3 *db, /* Database handle. */ - const void *zSql, /* UTF-16 encoded SQL statement. */ - int nBytes, /* Length of zSql in bytes. */ - int saveSqlFlag, /* True to save SQL text into the sqlite3_stmt */ - sqlite3_stmt **ppStmt, /* OUT: A pointer to the prepared statement */ - const void **pzTail /* OUT: End of parsed string */ -){ - /* This function currently works by first transforming the UTF-16 - ** encoded string to UTF-8, then invoking sqlite3_prepare(). The - ** tricky bit is figuring out the pointer to return in *pzTail. - */ - char *zSql8; - const char *zTail8 = 0; - int rc = SQLITE_OK; - - assert( ppStmt ); - *ppStmt = 0; - if( !sqlite3SafetyCheckOk(db) ){ - return SQLITE_MISUSE_BKPT; - } - if( nBytes>=0 ){ - int sz; - const char *z = (const char*)zSql; - for(sz=0; szmutex); - zSql8 = sqlite3Utf16to8(db, zSql, nBytes, SQLITE_UTF16NATIVE); - if( zSql8 ){ - rc = sqlite3LockAndPrepare(db, zSql8, -1, saveSqlFlag, 0, ppStmt, &zTail8); - } - - if( zTail8 && pzTail ){ - /* If sqlite3_prepare returns a tail pointer, we calculate the - ** equivalent pointer into the UTF-16 string by counting the unicode - ** characters between zSql8 and zTail8, and then returning a pointer - ** the same number of characters into the UTF-16 string. - */ - int chars_parsed = sqlite3Utf8CharLen(zSql8, (int)(zTail8-zSql8)); - *pzTail = (u8 *)zSql + sqlite3Utf16ByteLen(zSql, chars_parsed); - } - sqlite3DbFree(db, zSql8); - rc = sqlite3ApiExit(db, rc); - sqlite3_mutex_leave(db->mutex); - return rc; -} - -/* -** Two versions of the official API. Legacy and new use. In the legacy -** version, the original SQL text is not saved in the prepared statement -** and so if a schema change occurs, SQLITE_SCHEMA is returned by -** sqlite3_step(). In the new version, the original SQL text is retained -** and the statement is automatically recompiled if an schema change -** occurs. -*/ -SQLITE_API int sqlite3_prepare16( - sqlite3 *db, /* Database handle. */ - const void *zSql, /* UTF-16 encoded SQL statement. */ - int nBytes, /* Length of zSql in bytes. */ - sqlite3_stmt **ppStmt, /* OUT: A pointer to the prepared statement */ - const void **pzTail /* OUT: End of parsed string */ -){ - int rc; - rc = sqlite3Prepare16(db,zSql,nBytes,0,ppStmt,pzTail); - assert( rc==SQLITE_OK || ppStmt==0 || *ppStmt==0 ); /* VERIFY: F13021 */ - return rc; -} -SQLITE_API int sqlite3_prepare16_v2( - sqlite3 *db, /* Database handle. */ - const void *zSql, /* UTF-16 encoded SQL statement. */ - int nBytes, /* Length of zSql in bytes. */ - sqlite3_stmt **ppStmt, /* OUT: A pointer to the prepared statement */ - const void **pzTail /* OUT: End of parsed string */ -){ - int rc; - rc = sqlite3Prepare16(db,zSql,nBytes,1,ppStmt,pzTail); - assert( rc==SQLITE_OK || ppStmt==0 || *ppStmt==0 ); /* VERIFY: F13021 */ - return rc; -} - -#endif /* SQLITE_OMIT_UTF16 */ - -/************** End of prepare.c *********************************************/ -/************** Begin file select.c ******************************************/ -/* -** 2001 September 15 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** This file contains C code routines that are called by the parser -** to handle SELECT statements in SQLite. -*/ - -/* -** An instance of the following object is used to record information about -** how to process the DISTINCT keyword, to simplify passing that information -** into the selectInnerLoop() routine. -*/ -typedef struct DistinctCtx DistinctCtx; -struct DistinctCtx { - u8 isTnct; /* True if the DISTINCT keyword is present */ - u8 eTnctType; /* One of the WHERE_DISTINCT_* operators */ - int tabTnct; /* Ephemeral table used for DISTINCT processing */ - int addrTnct; /* Address of OP_OpenEphemeral opcode for tabTnct */ -}; - -/* -** An instance of the following object is used to record information about -** the ORDER BY (or GROUP BY) clause of query is being coded. -*/ -typedef struct SortCtx SortCtx; -struct SortCtx { - ExprList *pOrderBy; /* The ORDER BY (or GROUP BY clause) */ - int nOBSat; /* Number of ORDER BY terms satisfied by indices */ - int iECursor; /* Cursor number for the sorter */ - int regReturn; /* Register holding block-output return address */ - int labelBkOut; /* Start label for the block-output subroutine */ - int addrSortIndex; /* Address of the OP_SorterOpen or OP_OpenEphemeral */ - u8 sortFlags; /* Zero or more SORTFLAG_* bits */ -}; -#define SORTFLAG_UseSorter 0x01 /* Use SorterOpen instead of OpenEphemeral */ - -/* -** Delete all the content of a Select structure but do not deallocate -** the select structure itself. -*/ -static void clearSelect(sqlite3 *db, Select *p){ - sqlite3ExprListDelete(db, p->pEList); - sqlite3SrcListDelete(db, p->pSrc); - sqlite3ExprDelete(db, p->pWhere); - sqlite3ExprListDelete(db, p->pGroupBy); - sqlite3ExprDelete(db, p->pHaving); - sqlite3ExprListDelete(db, p->pOrderBy); - sqlite3SelectDelete(db, p->pPrior); - sqlite3ExprDelete(db, p->pLimit); - sqlite3ExprDelete(db, p->pOffset); - sqlite3WithDelete(db, p->pWith); -} - -/* -** Initialize a SelectDest structure. -*/ -SQLITE_PRIVATE void sqlite3SelectDestInit(SelectDest *pDest, int eDest, int iParm){ - pDest->eDest = (u8)eDest; - pDest->iSDParm = iParm; - pDest->affSdst = 0; - pDest->iSdst = 0; - pDest->nSdst = 0; -} - - -/* -** Allocate a new Select structure and return a pointer to that -** structure. -*/ -SQLITE_PRIVATE Select *sqlite3SelectNew( - Parse *pParse, /* Parsing context */ - ExprList *pEList, /* which columns to include in the result */ - SrcList *pSrc, /* the FROM clause -- which tables to scan */ - Expr *pWhere, /* the WHERE clause */ - ExprList *pGroupBy, /* the GROUP BY clause */ - Expr *pHaving, /* the HAVING clause */ - ExprList *pOrderBy, /* the ORDER BY clause */ - u16 selFlags, /* Flag parameters, such as SF_Distinct */ - Expr *pLimit, /* LIMIT value. NULL means not used */ - Expr *pOffset /* OFFSET value. NULL means no offset */ -){ - Select *pNew; - Select standin; - sqlite3 *db = pParse->db; - pNew = sqlite3DbMallocZero(db, sizeof(*pNew) ); - assert( db->mallocFailed || !pOffset || pLimit ); /* OFFSET implies LIMIT */ - if( pNew==0 ){ - assert( db->mallocFailed ); - pNew = &standin; - memset(pNew, 0, sizeof(*pNew)); - } - if( pEList==0 ){ - pEList = sqlite3ExprListAppend(pParse, 0, sqlite3Expr(db,TK_ALL,0)); - } - pNew->pEList = pEList; - if( pSrc==0 ) pSrc = sqlite3DbMallocZero(db, sizeof(*pSrc)); - pNew->pSrc = pSrc; - pNew->pWhere = pWhere; - pNew->pGroupBy = pGroupBy; - pNew->pHaving = pHaving; - pNew->pOrderBy = pOrderBy; - pNew->selFlags = selFlags; - pNew->op = TK_SELECT; - pNew->pLimit = pLimit; - pNew->pOffset = pOffset; - assert( pOffset==0 || pLimit!=0 ); - pNew->addrOpenEphm[0] = -1; - pNew->addrOpenEphm[1] = -1; - if( db->mallocFailed ) { - clearSelect(db, pNew); - if( pNew!=&standin ) sqlite3DbFree(db, pNew); - pNew = 0; - }else{ - assert( pNew->pSrc!=0 || pParse->nErr>0 ); - } - assert( pNew!=&standin ); - return pNew; -} - -/* -** Delete the given Select structure and all of its substructures. -*/ -SQLITE_PRIVATE void sqlite3SelectDelete(sqlite3 *db, Select *p){ - if( p ){ - clearSelect(db, p); - sqlite3DbFree(db, p); - } -} - -/* -** Return a pointer to the right-most SELECT statement in a compound. -*/ -static Select *findRightmost(Select *p){ - while( p->pNext ) p = p->pNext; - return p; -} - -/* -** Given 1 to 3 identifiers preceding the JOIN keyword, determine the -** type of join. Return an integer constant that expresses that type -** in terms of the following bit values: -** -** JT_INNER -** JT_CROSS -** JT_OUTER -** JT_NATURAL -** JT_LEFT -** JT_RIGHT -** -** A full outer join is the combination of JT_LEFT and JT_RIGHT. -** -** If an illegal or unsupported join type is seen, then still return -** a join type, but put an error in the pParse structure. -*/ -SQLITE_PRIVATE int sqlite3JoinType(Parse *pParse, Token *pA, Token *pB, Token *pC){ - int jointype = 0; - Token *apAll[3]; - Token *p; - /* 0123456789 123456789 123456789 123 */ - static const char zKeyText[] = "naturaleftouterightfullinnercross"; - static const struct { - u8 i; /* Beginning of keyword text in zKeyText[] */ - u8 nChar; /* Length of the keyword in characters */ - u8 code; /* Join type mask */ - } aKeyword[] = { - /* natural */ { 0, 7, JT_NATURAL }, - /* left */ { 6, 4, JT_LEFT|JT_OUTER }, - /* outer */ { 10, 5, JT_OUTER }, - /* right */ { 14, 5, JT_RIGHT|JT_OUTER }, - /* full */ { 19, 4, JT_LEFT|JT_RIGHT|JT_OUTER }, - /* inner */ { 23, 5, JT_INNER }, - /* cross */ { 28, 5, JT_INNER|JT_CROSS }, - }; - int i, j; - apAll[0] = pA; - apAll[1] = pB; - apAll[2] = pC; - for(i=0; i<3 && apAll[i]; i++){ - p = apAll[i]; - for(j=0; jn==aKeyword[j].nChar - && sqlite3StrNICmp((char*)p->z, &zKeyText[aKeyword[j].i], p->n)==0 ){ - jointype |= aKeyword[j].code; - break; - } - } - testcase( j==0 || j==1 || j==2 || j==3 || j==4 || j==5 || j==6 ); - if( j>=ArraySize(aKeyword) ){ - jointype |= JT_ERROR; - break; - } - } - if( - (jointype & (JT_INNER|JT_OUTER))==(JT_INNER|JT_OUTER) || - (jointype & JT_ERROR)!=0 - ){ - const char *zSp = " "; - assert( pB!=0 ); - if( pC==0 ){ zSp++; } - sqlite3ErrorMsg(pParse, "unknown or unsupported join type: " - "%T %T%s%T", pA, pB, zSp, pC); - jointype = JT_INNER; - }else if( (jointype & JT_OUTER)!=0 - && (jointype & (JT_LEFT|JT_RIGHT))!=JT_LEFT ){ - sqlite3ErrorMsg(pParse, - "RIGHT and FULL OUTER JOINs are not currently supported"); - jointype = JT_INNER; - } - return jointype; -} - -/* -** Return the index of a column in a table. Return -1 if the column -** is not contained in the table. -*/ -static int columnIndex(Table *pTab, const char *zCol){ - int i; - for(i=0; inCol; i++){ - if( sqlite3StrICmp(pTab->aCol[i].zName, zCol)==0 ) return i; - } - return -1; -} - -/* -** Search the first N tables in pSrc, from left to right, looking for a -** table that has a column named zCol. -** -** When found, set *piTab and *piCol to the table index and column index -** of the matching column and return TRUE. -** -** If not found, return FALSE. -*/ -static int tableAndColumnIndex( - SrcList *pSrc, /* Array of tables to search */ - int N, /* Number of tables in pSrc->a[] to search */ - const char *zCol, /* Name of the column we are looking for */ - int *piTab, /* Write index of pSrc->a[] here */ - int *piCol /* Write index of pSrc->a[*piTab].pTab->aCol[] here */ -){ - int i; /* For looping over tables in pSrc */ - int iCol; /* Index of column matching zCol */ - - assert( (piTab==0)==(piCol==0) ); /* Both or neither are NULL */ - for(i=0; ia[i].pTab, zCol); - if( iCol>=0 ){ - if( piTab ){ - *piTab = i; - *piCol = iCol; - } - return 1; - } - } - return 0; -} - -/* -** This function is used to add terms implied by JOIN syntax to the -** WHERE clause expression of a SELECT statement. The new term, which -** is ANDed with the existing WHERE clause, is of the form: -** -** (tab1.col1 = tab2.col2) -** -** where tab1 is the iSrc'th table in SrcList pSrc and tab2 is the -** (iSrc+1)'th. Column col1 is column iColLeft of tab1, and col2 is -** column iColRight of tab2. -*/ -static void addWhereTerm( - Parse *pParse, /* Parsing context */ - SrcList *pSrc, /* List of tables in FROM clause */ - int iLeft, /* Index of first table to join in pSrc */ - int iColLeft, /* Index of column in first table */ - int iRight, /* Index of second table in pSrc */ - int iColRight, /* Index of column in second table */ - int isOuterJoin, /* True if this is an OUTER join */ - Expr **ppWhere /* IN/OUT: The WHERE clause to add to */ -){ - sqlite3 *db = pParse->db; - Expr *pE1; - Expr *pE2; - Expr *pEq; - - assert( iLeftnSrc>iRight ); - assert( pSrc->a[iLeft].pTab ); - assert( pSrc->a[iRight].pTab ); - - pE1 = sqlite3CreateColumnExpr(db, pSrc, iLeft, iColLeft); - pE2 = sqlite3CreateColumnExpr(db, pSrc, iRight, iColRight); - - pEq = sqlite3PExpr(pParse, TK_EQ, pE1, pE2, 0); - if( pEq && isOuterJoin ){ - ExprSetProperty(pEq, EP_FromJoin); - assert( !ExprHasProperty(pEq, EP_TokenOnly|EP_Reduced) ); - ExprSetVVAProperty(pEq, EP_NoReduce); - pEq->iRightJoinTable = (i16)pE2->iTable; - } - *ppWhere = sqlite3ExprAnd(db, *ppWhere, pEq); -} - -/* -** Set the EP_FromJoin property on all terms of the given expression. -** And set the Expr.iRightJoinTable to iTable for every term in the -** expression. -** -** The EP_FromJoin property is used on terms of an expression to tell -** the LEFT OUTER JOIN processing logic that this term is part of the -** join restriction specified in the ON or USING clause and not a part -** of the more general WHERE clause. These terms are moved over to the -** WHERE clause during join processing but we need to remember that they -** originated in the ON or USING clause. -** -** The Expr.iRightJoinTable tells the WHERE clause processing that the -** expression depends on table iRightJoinTable even if that table is not -** explicitly mentioned in the expression. That information is needed -** for cases like this: -** -** SELECT * FROM t1 LEFT JOIN t2 ON t1.a=t2.b AND t1.x=5 -** -** The where clause needs to defer the handling of the t1.x=5 -** term until after the t2 loop of the join. In that way, a -** NULL t2 row will be inserted whenever t1.x!=5. If we do not -** defer the handling of t1.x=5, it will be processed immediately -** after the t1 loop and rows with t1.x!=5 will never appear in -** the output, which is incorrect. -*/ -static void setJoinExpr(Expr *p, int iTable){ - while( p ){ - ExprSetProperty(p, EP_FromJoin); - assert( !ExprHasProperty(p, EP_TokenOnly|EP_Reduced) ); - ExprSetVVAProperty(p, EP_NoReduce); - p->iRightJoinTable = (i16)iTable; - setJoinExpr(p->pLeft, iTable); - p = p->pRight; - } -} - -/* -** This routine processes the join information for a SELECT statement. -** ON and USING clauses are converted into extra terms of the WHERE clause. -** NATURAL joins also create extra WHERE clause terms. -** -** The terms of a FROM clause are contained in the Select.pSrc structure. -** The left most table is the first entry in Select.pSrc. The right-most -** table is the last entry. The join operator is held in the entry to -** the left. Thus entry 0 contains the join operator for the join between -** entries 0 and 1. Any ON or USING clauses associated with the join are -** also attached to the left entry. -** -** This routine returns the number of errors encountered. -*/ -static int sqliteProcessJoin(Parse *pParse, Select *p){ - SrcList *pSrc; /* All tables in the FROM clause */ - int i, j; /* Loop counters */ - struct SrcList_item *pLeft; /* Left table being joined */ - struct SrcList_item *pRight; /* Right table being joined */ - - pSrc = p->pSrc; - pLeft = &pSrc->a[0]; - pRight = &pLeft[1]; - for(i=0; inSrc-1; i++, pRight++, pLeft++){ - Table *pLeftTab = pLeft->pTab; - Table *pRightTab = pRight->pTab; - int isOuter; - - if( NEVER(pLeftTab==0 || pRightTab==0) ) continue; - isOuter = (pRight->jointype & JT_OUTER)!=0; - - /* When the NATURAL keyword is present, add WHERE clause terms for - ** every column that the two tables have in common. - */ - if( pRight->jointype & JT_NATURAL ){ - if( pRight->pOn || pRight->pUsing ){ - sqlite3ErrorMsg(pParse, "a NATURAL join may not have " - "an ON or USING clause", 0); - return 1; - } - for(j=0; jnCol; j++){ - char *zName; /* Name of column in the right table */ - int iLeft; /* Matching left table */ - int iLeftCol; /* Matching column in the left table */ - - zName = pRightTab->aCol[j].zName; - if( tableAndColumnIndex(pSrc, i+1, zName, &iLeft, &iLeftCol) ){ - addWhereTerm(pParse, pSrc, iLeft, iLeftCol, i+1, j, - isOuter, &p->pWhere); - } - } - } - - /* Disallow both ON and USING clauses in the same join - */ - if( pRight->pOn && pRight->pUsing ){ - sqlite3ErrorMsg(pParse, "cannot have both ON and USING " - "clauses in the same join"); - return 1; - } - - /* Add the ON clause to the end of the WHERE clause, connected by - ** an AND operator. - */ - if( pRight->pOn ){ - if( isOuter ) setJoinExpr(pRight->pOn, pRight->iCursor); - p->pWhere = sqlite3ExprAnd(pParse->db, p->pWhere, pRight->pOn); - pRight->pOn = 0; - } - - /* Create extra terms on the WHERE clause for each column named - ** in the USING clause. Example: If the two tables to be joined are - ** A and B and the USING clause names X, Y, and Z, then add this - ** to the WHERE clause: A.X=B.X AND A.Y=B.Y AND A.Z=B.Z - ** Report an error if any column mentioned in the USING clause is - ** not contained in both tables to be joined. - */ - if( pRight->pUsing ){ - IdList *pList = pRight->pUsing; - for(j=0; jnId; j++){ - char *zName; /* Name of the term in the USING clause */ - int iLeft; /* Table on the left with matching column name */ - int iLeftCol; /* Column number of matching column on the left */ - int iRightCol; /* Column number of matching column on the right */ - - zName = pList->a[j].zName; - iRightCol = columnIndex(pRightTab, zName); - if( iRightCol<0 - || !tableAndColumnIndex(pSrc, i+1, zName, &iLeft, &iLeftCol) - ){ - sqlite3ErrorMsg(pParse, "cannot join using column %s - column " - "not present in both tables", zName); - return 1; - } - addWhereTerm(pParse, pSrc, iLeft, iLeftCol, i+1, iRightCol, - isOuter, &p->pWhere); - } - } - } - return 0; -} - -/* Forward reference */ -static KeyInfo *keyInfoFromExprList( - Parse *pParse, /* Parsing context */ - ExprList *pList, /* Form the KeyInfo object from this ExprList */ - int iStart, /* Begin with this column of pList */ - int nExtra /* Add this many extra columns to the end */ -); - -/* -** Insert code into "v" that will push the record in register regData -** into the sorter. -*/ -static void pushOntoSorter( - Parse *pParse, /* Parser context */ - SortCtx *pSort, /* Information about the ORDER BY clause */ - Select *pSelect, /* The whole SELECT statement */ - int regData /* Register holding data to be sorted */ -){ - Vdbe *v = pParse->pVdbe; - int nExpr = pSort->pOrderBy->nExpr; - int regRecord = ++pParse->nMem; - int regBase = pParse->nMem+1; - int nOBSat = pSort->nOBSat; - int op; - - pParse->nMem += nExpr+2; /* nExpr+2 registers allocated at regBase */ - sqlite3ExprCacheClear(pParse); - sqlite3ExprCodeExprList(pParse, pSort->pOrderBy, regBase, 0); - sqlite3VdbeAddOp2(v, OP_Sequence, pSort->iECursor, regBase+nExpr); - sqlite3ExprCodeMove(pParse, regData, regBase+nExpr+1, 1); - sqlite3VdbeAddOp3(v, OP_MakeRecord, regBase+nOBSat, nExpr+2-nOBSat,regRecord); - if( nOBSat>0 ){ - int regPrevKey; /* The first nOBSat columns of the previous row */ - int addrFirst; /* Address of the OP_IfNot opcode */ - int addrJmp; /* Address of the OP_Jump opcode */ - VdbeOp *pOp; /* Opcode that opens the sorter */ - int nKey; /* Number of sorting key columns, including OP_Sequence */ - KeyInfo *pKI; /* Original KeyInfo on the sorter table */ - - regPrevKey = pParse->nMem+1; - pParse->nMem += pSort->nOBSat; - nKey = nExpr - pSort->nOBSat + 1; - addrFirst = sqlite3VdbeAddOp1(v, OP_IfNot, regBase+nExpr); VdbeCoverage(v); - sqlite3VdbeAddOp3(v, OP_Compare, regPrevKey, regBase, pSort->nOBSat); - pOp = sqlite3VdbeGetOp(v, pSort->addrSortIndex); - if( pParse->db->mallocFailed ) return; - pOp->p2 = nKey + 1; - pKI = pOp->p4.pKeyInfo; - memset(pKI->aSortOrder, 0, pKI->nField); /* Makes OP_Jump below testable */ - sqlite3VdbeChangeP4(v, -1, (char*)pKI, P4_KEYINFO); - pOp->p4.pKeyInfo = keyInfoFromExprList(pParse, pSort->pOrderBy, nOBSat, 1); - addrJmp = sqlite3VdbeCurrentAddr(v); - sqlite3VdbeAddOp3(v, OP_Jump, addrJmp+1, 0, addrJmp+1); VdbeCoverage(v); - pSort->labelBkOut = sqlite3VdbeMakeLabel(v); - pSort->regReturn = ++pParse->nMem; - sqlite3VdbeAddOp2(v, OP_Gosub, pSort->regReturn, pSort->labelBkOut); - sqlite3VdbeAddOp1(v, OP_ResetSorter, pSort->iECursor); - sqlite3VdbeJumpHere(v, addrFirst); - sqlite3VdbeAddOp3(v, OP_Move, regBase, regPrevKey, pSort->nOBSat); - sqlite3VdbeJumpHere(v, addrJmp); - } - if( pSort->sortFlags & SORTFLAG_UseSorter ){ - op = OP_SorterInsert; - }else{ - op = OP_IdxInsert; - } - sqlite3VdbeAddOp2(v, op, pSort->iECursor, regRecord); - if( pSelect->iLimit ){ - int addr1, addr2; - int iLimit; - if( pSelect->iOffset ){ - iLimit = pSelect->iOffset+1; - }else{ - iLimit = pSelect->iLimit; - } - addr1 = sqlite3VdbeAddOp1(v, OP_IfZero, iLimit); VdbeCoverage(v); - sqlite3VdbeAddOp2(v, OP_AddImm, iLimit, -1); - addr2 = sqlite3VdbeAddOp0(v, OP_Goto); - sqlite3VdbeJumpHere(v, addr1); - sqlite3VdbeAddOp1(v, OP_Last, pSort->iECursor); - sqlite3VdbeAddOp1(v, OP_Delete, pSort->iECursor); - sqlite3VdbeJumpHere(v, addr2); - } -} - -/* -** Add code to implement the OFFSET -*/ -static void codeOffset( - Vdbe *v, /* Generate code into this VM */ - int iOffset, /* Register holding the offset counter */ - int iContinue /* Jump here to skip the current record */ -){ - if( iOffset>0 ){ - int addr; - sqlite3VdbeAddOp2(v, OP_AddImm, iOffset, -1); - addr = sqlite3VdbeAddOp1(v, OP_IfNeg, iOffset); VdbeCoverage(v); - sqlite3VdbeAddOp2(v, OP_Goto, 0, iContinue); - VdbeComment((v, "skip OFFSET records")); - sqlite3VdbeJumpHere(v, addr); - } -} - -/* -** Add code that will check to make sure the N registers starting at iMem -** form a distinct entry. iTab is a sorting index that holds previously -** seen combinations of the N values. A new entry is made in iTab -** if the current N values are new. -** -** A jump to addrRepeat is made and the N+1 values are popped from the -** stack if the top N elements are not distinct. -*/ -static void codeDistinct( - Parse *pParse, /* Parsing and code generating context */ - int iTab, /* A sorting index used to test for distinctness */ - int addrRepeat, /* Jump to here if not distinct */ - int N, /* Number of elements */ - int iMem /* First element */ -){ - Vdbe *v; - int r1; - - v = pParse->pVdbe; - r1 = sqlite3GetTempReg(pParse); - sqlite3VdbeAddOp4Int(v, OP_Found, iTab, addrRepeat, iMem, N); VdbeCoverage(v); - sqlite3VdbeAddOp3(v, OP_MakeRecord, iMem, N, r1); - sqlite3VdbeAddOp2(v, OP_IdxInsert, iTab, r1); - sqlite3ReleaseTempReg(pParse, r1); -} - -#ifndef SQLITE_OMIT_SUBQUERY -/* -** Generate an error message when a SELECT is used within a subexpression -** (example: "a IN (SELECT * FROM table)") but it has more than 1 result -** column. We do this in a subroutine because the error used to occur -** in multiple places. (The error only occurs in one place now, but we -** retain the subroutine to minimize code disruption.) -*/ -static int checkForMultiColumnSelectError( - Parse *pParse, /* Parse context. */ - SelectDest *pDest, /* Destination of SELECT results */ - int nExpr /* Number of result columns returned by SELECT */ -){ - int eDest = pDest->eDest; - if( nExpr>1 && (eDest==SRT_Mem || eDest==SRT_Set) ){ - sqlite3ErrorMsg(pParse, "only a single result allowed for " - "a SELECT that is part of an expression"); - return 1; - }else{ - return 0; - } -} -#endif - -/* -** This routine generates the code for the inside of the inner loop -** of a SELECT. -** -** If srcTab is negative, then the pEList expressions -** are evaluated in order to get the data for this row. If srcTab is -** zero or more, then data is pulled from srcTab and pEList is used only -** to get number columns and the datatype for each column. -*/ -static void selectInnerLoop( - Parse *pParse, /* The parser context */ - Select *p, /* The complete select statement being coded */ - ExprList *pEList, /* List of values being extracted */ - int srcTab, /* Pull data from this table */ - SortCtx *pSort, /* If not NULL, info on how to process ORDER BY */ - DistinctCtx *pDistinct, /* If not NULL, info on how to process DISTINCT */ - SelectDest *pDest, /* How to dispose of the results */ - int iContinue, /* Jump here to continue with next row */ - int iBreak /* Jump here to break out of the inner loop */ -){ - Vdbe *v = pParse->pVdbe; - int i; - int hasDistinct; /* True if the DISTINCT keyword is present */ - int regResult; /* Start of memory holding result set */ - int eDest = pDest->eDest; /* How to dispose of results */ - int iParm = pDest->iSDParm; /* First argument to disposal method */ - int nResultCol; /* Number of result columns */ - - assert( v ); - assert( pEList!=0 ); - hasDistinct = pDistinct ? pDistinct->eTnctType : WHERE_DISTINCT_NOOP; - if( pSort && pSort->pOrderBy==0 ) pSort = 0; - if( pSort==0 && !hasDistinct ){ - assert( iContinue!=0 ); - codeOffset(v, p->iOffset, iContinue); - } - - /* Pull the requested columns. - */ - nResultCol = pEList->nExpr; - - if( pDest->iSdst==0 ){ - pDest->iSdst = pParse->nMem+1; - pParse->nMem += nResultCol; - }else if( pDest->iSdst+nResultCol > pParse->nMem ){ - /* This is an error condition that can result, for example, when a SELECT - ** on the right-hand side of an INSERT contains more result columns than - ** there are columns in the table on the left. The error will be caught - ** and reported later. But we need to make sure enough memory is allocated - ** to avoid other spurious errors in the meantime. */ - pParse->nMem += nResultCol; - } - pDest->nSdst = nResultCol; - regResult = pDest->iSdst; - if( srcTab>=0 ){ - for(i=0; ia[i].zName)); - } - }else if( eDest!=SRT_Exists ){ - /* If the destination is an EXISTS(...) expression, the actual - ** values returned by the SELECT are not required. - */ - sqlite3ExprCodeExprList(pParse, pEList, regResult, - (eDest==SRT_Output||eDest==SRT_Coroutine)?SQLITE_ECEL_DUP:0); - } - - /* If the DISTINCT keyword was present on the SELECT statement - ** and this row has been seen before, then do not make this row - ** part of the result. - */ - if( hasDistinct ){ - switch( pDistinct->eTnctType ){ - case WHERE_DISTINCT_ORDERED: { - VdbeOp *pOp; /* No longer required OpenEphemeral instr. */ - int iJump; /* Jump destination */ - int regPrev; /* Previous row content */ - - /* Allocate space for the previous row */ - regPrev = pParse->nMem+1; - pParse->nMem += nResultCol; - - /* Change the OP_OpenEphemeral coded earlier to an OP_Null - ** sets the MEM_Cleared bit on the first register of the - ** previous value. This will cause the OP_Ne below to always - ** fail on the first iteration of the loop even if the first - ** row is all NULLs. - */ - sqlite3VdbeChangeToNoop(v, pDistinct->addrTnct); - pOp = sqlite3VdbeGetOp(v, pDistinct->addrTnct); - pOp->opcode = OP_Null; - pOp->p1 = 1; - pOp->p2 = regPrev; - - iJump = sqlite3VdbeCurrentAddr(v) + nResultCol; - for(i=0; ia[i].pExpr); - if( iaddrTnct); - break; - } - - default: { - assert( pDistinct->eTnctType==WHERE_DISTINCT_UNORDERED ); - codeDistinct(pParse, pDistinct->tabTnct, iContinue, nResultCol, regResult); - break; - } - } - if( pSort==0 ){ - codeOffset(v, p->iOffset, iContinue); - } - } - - switch( eDest ){ - /* In this mode, write each query result to the key of the temporary - ** table iParm. - */ -#ifndef SQLITE_OMIT_COMPOUND_SELECT - case SRT_Union: { - int r1; - r1 = sqlite3GetTempReg(pParse); - sqlite3VdbeAddOp3(v, OP_MakeRecord, regResult, nResultCol, r1); - sqlite3VdbeAddOp2(v, OP_IdxInsert, iParm, r1); - sqlite3ReleaseTempReg(pParse, r1); - break; - } - - /* Construct a record from the query result, but instead of - ** saving that record, use it as a key to delete elements from - ** the temporary table iParm. - */ - case SRT_Except: { - sqlite3VdbeAddOp3(v, OP_IdxDelete, iParm, regResult, nResultCol); - break; - } -#endif /* SQLITE_OMIT_COMPOUND_SELECT */ - - /* Store the result as data using a unique key. - */ - case SRT_Fifo: - case SRT_DistFifo: - case SRT_Table: - case SRT_EphemTab: { - int r1 = sqlite3GetTempReg(pParse); - testcase( eDest==SRT_Table ); - testcase( eDest==SRT_EphemTab ); - sqlite3VdbeAddOp3(v, OP_MakeRecord, regResult, nResultCol, r1); -#ifndef SQLITE_OMIT_CTE - if( eDest==SRT_DistFifo ){ - /* If the destination is DistFifo, then cursor (iParm+1) is open - ** on an ephemeral index. If the current row is already present - ** in the index, do not write it to the output. If not, add the - ** current row to the index and proceed with writing it to the - ** output table as well. */ - int addr = sqlite3VdbeCurrentAddr(v) + 4; - sqlite3VdbeAddOp4Int(v, OP_Found, iParm+1, addr, r1, 0); VdbeCoverage(v); - sqlite3VdbeAddOp2(v, OP_IdxInsert, iParm+1, r1); - assert( pSort==0 ); - } -#endif - if( pSort ){ - pushOntoSorter(pParse, pSort, p, r1); - }else{ - int r2 = sqlite3GetTempReg(pParse); - sqlite3VdbeAddOp2(v, OP_NewRowid, iParm, r2); - sqlite3VdbeAddOp3(v, OP_Insert, iParm, r1, r2); - sqlite3VdbeChangeP5(v, OPFLAG_APPEND); - sqlite3ReleaseTempReg(pParse, r2); - } - sqlite3ReleaseTempReg(pParse, r1); - break; - } - -#ifndef SQLITE_OMIT_SUBQUERY - /* If we are creating a set for an "expr IN (SELECT ...)" construct, - ** then there should be a single item on the stack. Write this - ** item into the set table with bogus data. - */ - case SRT_Set: { - assert( nResultCol==1 ); - pDest->affSdst = - sqlite3CompareAffinity(pEList->a[0].pExpr, pDest->affSdst); - if( pSort ){ - /* At first glance you would think we could optimize out the - ** ORDER BY in this case since the order of entries in the set - ** does not matter. But there might be a LIMIT clause, in which - ** case the order does matter */ - pushOntoSorter(pParse, pSort, p, regResult); - }else{ - int r1 = sqlite3GetTempReg(pParse); - sqlite3VdbeAddOp4(v, OP_MakeRecord, regResult,1,r1, &pDest->affSdst, 1); - sqlite3ExprCacheAffinityChange(pParse, regResult, 1); - sqlite3VdbeAddOp2(v, OP_IdxInsert, iParm, r1); - sqlite3ReleaseTempReg(pParse, r1); - } - break; - } - - /* If any row exist in the result set, record that fact and abort. - */ - case SRT_Exists: { - sqlite3VdbeAddOp2(v, OP_Integer, 1, iParm); - /* The LIMIT clause will terminate the loop for us */ - break; - } - - /* If this is a scalar select that is part of an expression, then - ** store the results in the appropriate memory cell and break out - ** of the scan loop. - */ - case SRT_Mem: { - assert( nResultCol==1 ); - if( pSort ){ - pushOntoSorter(pParse, pSort, p, regResult); - }else{ - sqlite3ExprCodeMove(pParse, regResult, iParm, 1); - /* The LIMIT clause will jump out of the loop for us */ - } - break; - } -#endif /* #ifndef SQLITE_OMIT_SUBQUERY */ - - case SRT_Coroutine: /* Send data to a co-routine */ - case SRT_Output: { /* Return the results */ - testcase( eDest==SRT_Coroutine ); - testcase( eDest==SRT_Output ); - if( pSort ){ - int r1 = sqlite3GetTempReg(pParse); - sqlite3VdbeAddOp3(v, OP_MakeRecord, regResult, nResultCol, r1); - pushOntoSorter(pParse, pSort, p, r1); - sqlite3ReleaseTempReg(pParse, r1); - }else if( eDest==SRT_Coroutine ){ - sqlite3VdbeAddOp1(v, OP_Yield, pDest->iSDParm); - }else{ - sqlite3VdbeAddOp2(v, OP_ResultRow, regResult, nResultCol); - sqlite3ExprCacheAffinityChange(pParse, regResult, nResultCol); - } - break; - } - -#ifndef SQLITE_OMIT_CTE - /* Write the results into a priority queue that is order according to - ** pDest->pOrderBy (in pSO). pDest->iSDParm (in iParm) is the cursor for an - ** index with pSO->nExpr+2 columns. Build a key using pSO for the first - ** pSO->nExpr columns, then make sure all keys are unique by adding a - ** final OP_Sequence column. The last column is the record as a blob. - */ - case SRT_DistQueue: - case SRT_Queue: { - int nKey; - int r1, r2, r3; - int addrTest = 0; - ExprList *pSO; - pSO = pDest->pOrderBy; - assert( pSO ); - nKey = pSO->nExpr; - r1 = sqlite3GetTempReg(pParse); - r2 = sqlite3GetTempRange(pParse, nKey+2); - r3 = r2+nKey+1; - if( eDest==SRT_DistQueue ){ - /* If the destination is DistQueue, then cursor (iParm+1) is open - ** on a second ephemeral index that holds all values every previously - ** added to the queue. */ - addrTest = sqlite3VdbeAddOp4Int(v, OP_Found, iParm+1, 0, - regResult, nResultCol); - VdbeCoverage(v); - } - sqlite3VdbeAddOp3(v, OP_MakeRecord, regResult, nResultCol, r3); - if( eDest==SRT_DistQueue ){ - sqlite3VdbeAddOp2(v, OP_IdxInsert, iParm+1, r3); - sqlite3VdbeChangeP5(v, OPFLAG_USESEEKRESULT); - } - for(i=0; ia[i].u.x.iOrderByCol - 1, - r2+i); - } - sqlite3VdbeAddOp2(v, OP_Sequence, iParm, r2+nKey); - sqlite3VdbeAddOp3(v, OP_MakeRecord, r2, nKey+2, r1); - sqlite3VdbeAddOp2(v, OP_IdxInsert, iParm, r1); - if( addrTest ) sqlite3VdbeJumpHere(v, addrTest); - sqlite3ReleaseTempReg(pParse, r1); - sqlite3ReleaseTempRange(pParse, r2, nKey+2); - break; - } -#endif /* SQLITE_OMIT_CTE */ - - - -#if !defined(SQLITE_OMIT_TRIGGER) - /* Discard the results. This is used for SELECT statements inside - ** the body of a TRIGGER. The purpose of such selects is to call - ** user-defined functions that have side effects. We do not care - ** about the actual results of the select. - */ - default: { - assert( eDest==SRT_Discard ); - break; - } -#endif - } - - /* Jump to the end of the loop if the LIMIT is reached. Except, if - ** there is a sorter, in which case the sorter has already limited - ** the output for us. - */ - if( pSort==0 && p->iLimit ){ - sqlite3VdbeAddOp3(v, OP_IfZero, p->iLimit, iBreak, -1); VdbeCoverage(v); - } -} - -/* -** Allocate a KeyInfo object sufficient for an index of N key columns and -** X extra columns. -*/ -SQLITE_PRIVATE KeyInfo *sqlite3KeyInfoAlloc(sqlite3 *db, int N, int X){ - KeyInfo *p = sqlite3DbMallocZero(0, - sizeof(KeyInfo) + (N+X)*(sizeof(CollSeq*)+1)); - if( p ){ - p->aSortOrder = (u8*)&p->aColl[N+X]; - p->nField = (u16)N; - p->nXField = (u16)X; - p->enc = ENC(db); - p->db = db; - p->nRef = 1; - }else{ - db->mallocFailed = 1; - } - return p; -} - -/* -** Deallocate a KeyInfo object -*/ -SQLITE_PRIVATE void sqlite3KeyInfoUnref(KeyInfo *p){ - if( p ){ - assert( p->nRef>0 ); - p->nRef--; - if( p->nRef==0 ) sqlite3DbFree(0, p); - } -} - -/* -** Make a new pointer to a KeyInfo object -*/ -SQLITE_PRIVATE KeyInfo *sqlite3KeyInfoRef(KeyInfo *p){ - if( p ){ - assert( p->nRef>0 ); - p->nRef++; - } - return p; -} - -#ifdef SQLITE_DEBUG -/* -** Return TRUE if a KeyInfo object can be change. The KeyInfo object -** can only be changed if this is just a single reference to the object. -** -** This routine is used only inside of assert() statements. -*/ -SQLITE_PRIVATE int sqlite3KeyInfoIsWriteable(KeyInfo *p){ return p->nRef==1; } -#endif /* SQLITE_DEBUG */ - -/* -** Given an expression list, generate a KeyInfo structure that records -** the collating sequence for each expression in that expression list. -** -** If the ExprList is an ORDER BY or GROUP BY clause then the resulting -** KeyInfo structure is appropriate for initializing a virtual index to -** implement that clause. If the ExprList is the result set of a SELECT -** then the KeyInfo structure is appropriate for initializing a virtual -** index to implement a DISTINCT test. -** -** Space to hold the KeyInfo structure is obtain from malloc. The calling -** function is responsible for seeing that this structure is eventually -** freed. -*/ -static KeyInfo *keyInfoFromExprList( - Parse *pParse, /* Parsing context */ - ExprList *pList, /* Form the KeyInfo object from this ExprList */ - int iStart, /* Begin with this column of pList */ - int nExtra /* Add this many extra columns to the end */ -){ - int nExpr; - KeyInfo *pInfo; - struct ExprList_item *pItem; - sqlite3 *db = pParse->db; - int i; - - nExpr = pList->nExpr; - pInfo = sqlite3KeyInfoAlloc(db, nExpr+nExtra-iStart, 1); - if( pInfo ){ - assert( sqlite3KeyInfoIsWriteable(pInfo) ); - for(i=iStart, pItem=pList->a+iStart; ipExpr); - if( !pColl ) pColl = db->pDfltColl; - pInfo->aColl[i-iStart] = pColl; - pInfo->aSortOrder[i-iStart] = pItem->sortOrder; - } - } - return pInfo; -} - -#ifndef SQLITE_OMIT_COMPOUND_SELECT -/* -** Name of the connection operator, used for error messages. -*/ -static const char *selectOpName(int id){ - char *z; - switch( id ){ - case TK_ALL: z = "UNION ALL"; break; - case TK_INTERSECT: z = "INTERSECT"; break; - case TK_EXCEPT: z = "EXCEPT"; break; - default: z = "UNION"; break; - } - return z; -} -#endif /* SQLITE_OMIT_COMPOUND_SELECT */ - -#ifndef SQLITE_OMIT_EXPLAIN -/* -** Unless an "EXPLAIN QUERY PLAN" command is being processed, this function -** is a no-op. Otherwise, it adds a single row of output to the EQP result, -** where the caption is of the form: -** -** "USE TEMP B-TREE FOR xxx" -** -** where xxx is one of "DISTINCT", "ORDER BY" or "GROUP BY". Exactly which -** is determined by the zUsage argument. -*/ -static void explainTempTable(Parse *pParse, const char *zUsage){ - if( pParse->explain==2 ){ - Vdbe *v = pParse->pVdbe; - char *zMsg = sqlite3MPrintf(pParse->db, "USE TEMP B-TREE FOR %s", zUsage); - sqlite3VdbeAddOp4(v, OP_Explain, pParse->iSelectId, 0, 0, zMsg, P4_DYNAMIC); - } -} - -/* -** Assign expression b to lvalue a. A second, no-op, version of this macro -** is provided when SQLITE_OMIT_EXPLAIN is defined. This allows the code -** in sqlite3Select() to assign values to structure member variables that -** only exist if SQLITE_OMIT_EXPLAIN is not defined without polluting the -** code with #ifndef directives. -*/ -# define explainSetInteger(a, b) a = b - -#else -/* No-op versions of the explainXXX() functions and macros. */ -# define explainTempTable(y,z) -# define explainSetInteger(y,z) -#endif - -#if !defined(SQLITE_OMIT_EXPLAIN) && !defined(SQLITE_OMIT_COMPOUND_SELECT) -/* -** Unless an "EXPLAIN QUERY PLAN" command is being processed, this function -** is a no-op. Otherwise, it adds a single row of output to the EQP result, -** where the caption is of one of the two forms: -** -** "COMPOSITE SUBQUERIES iSub1 and iSub2 (op)" -** "COMPOSITE SUBQUERIES iSub1 and iSub2 USING TEMP B-TREE (op)" -** -** where iSub1 and iSub2 are the integers passed as the corresponding -** function parameters, and op is the text representation of the parameter -** of the same name. The parameter "op" must be one of TK_UNION, TK_EXCEPT, -** TK_INTERSECT or TK_ALL. The first form is used if argument bUseTmp is -** false, or the second form if it is true. -*/ -static void explainComposite( - Parse *pParse, /* Parse context */ - int op, /* One of TK_UNION, TK_EXCEPT etc. */ - int iSub1, /* Subquery id 1 */ - int iSub2, /* Subquery id 2 */ - int bUseTmp /* True if a temp table was used */ -){ - assert( op==TK_UNION || op==TK_EXCEPT || op==TK_INTERSECT || op==TK_ALL ); - if( pParse->explain==2 ){ - Vdbe *v = pParse->pVdbe; - char *zMsg = sqlite3MPrintf( - pParse->db, "COMPOUND SUBQUERIES %d AND %d %s(%s)", iSub1, iSub2, - bUseTmp?"USING TEMP B-TREE ":"", selectOpName(op) - ); - sqlite3VdbeAddOp4(v, OP_Explain, pParse->iSelectId, 0, 0, zMsg, P4_DYNAMIC); - } -} -#else -/* No-op versions of the explainXXX() functions and macros. */ -# define explainComposite(v,w,x,y,z) -#endif - -/* -** If the inner loop was generated using a non-null pOrderBy argument, -** then the results were placed in a sorter. After the loop is terminated -** we need to run the sorter and output the results. The following -** routine generates the code needed to do that. -*/ -static void generateSortTail( - Parse *pParse, /* Parsing context */ - Select *p, /* The SELECT statement */ - SortCtx *pSort, /* Information on the ORDER BY clause */ - int nColumn, /* Number of columns of data */ - SelectDest *pDest /* Write the sorted results here */ -){ - Vdbe *v = pParse->pVdbe; /* The prepared statement */ - int addrBreak = sqlite3VdbeMakeLabel(v); /* Jump here to exit loop */ - int addrContinue = sqlite3VdbeMakeLabel(v); /* Jump here for next cycle */ - int addr; - int addrOnce = 0; - int iTab; - int pseudoTab = 0; - ExprList *pOrderBy = pSort->pOrderBy; - int eDest = pDest->eDest; - int iParm = pDest->iSDParm; - int regRow; - int regRowid; - int nKey; - - if( pSort->labelBkOut ){ - sqlite3VdbeAddOp2(v, OP_Gosub, pSort->regReturn, pSort->labelBkOut); - sqlite3VdbeAddOp2(v, OP_Goto, 0, addrBreak); - sqlite3VdbeResolveLabel(v, pSort->labelBkOut); - addrOnce = sqlite3CodeOnce(pParse); VdbeCoverage(v); - } - iTab = pSort->iECursor; - regRow = sqlite3GetTempReg(pParse); - if( eDest==SRT_Output || eDest==SRT_Coroutine ){ - pseudoTab = pParse->nTab++; - sqlite3VdbeAddOp3(v, OP_OpenPseudo, pseudoTab, regRow, nColumn); - regRowid = 0; - }else{ - regRowid = sqlite3GetTempReg(pParse); - } - nKey = pOrderBy->nExpr - pSort->nOBSat; - if( pSort->sortFlags & SORTFLAG_UseSorter ){ - int regSortOut = ++pParse->nMem; - int ptab2 = pParse->nTab++; - sqlite3VdbeAddOp3(v, OP_OpenPseudo, ptab2, regSortOut, nKey+2); - if( addrOnce ) sqlite3VdbeJumpHere(v, addrOnce); - addr = 1 + sqlite3VdbeAddOp2(v, OP_SorterSort, iTab, addrBreak); - VdbeCoverage(v); - codeOffset(v, p->iOffset, addrContinue); - sqlite3VdbeAddOp2(v, OP_SorterData, iTab, regSortOut); - sqlite3VdbeAddOp3(v, OP_Column, ptab2, nKey+1, regRow); - sqlite3VdbeChangeP5(v, OPFLAG_CLEARCACHE); - }else{ - if( addrOnce ) sqlite3VdbeJumpHere(v, addrOnce); - addr = 1 + sqlite3VdbeAddOp2(v, OP_Sort, iTab, addrBreak); VdbeCoverage(v); - codeOffset(v, p->iOffset, addrContinue); - sqlite3VdbeAddOp3(v, OP_Column, iTab, nKey+1, regRow); - } - switch( eDest ){ - case SRT_Table: - case SRT_EphemTab: { - testcase( eDest==SRT_Table ); - testcase( eDest==SRT_EphemTab ); - sqlite3VdbeAddOp2(v, OP_NewRowid, iParm, regRowid); - sqlite3VdbeAddOp3(v, OP_Insert, iParm, regRow, regRowid); - sqlite3VdbeChangeP5(v, OPFLAG_APPEND); - break; - } -#ifndef SQLITE_OMIT_SUBQUERY - case SRT_Set: { - assert( nColumn==1 ); - sqlite3VdbeAddOp4(v, OP_MakeRecord, regRow, 1, regRowid, - &pDest->affSdst, 1); - sqlite3ExprCacheAffinityChange(pParse, regRow, 1); - sqlite3VdbeAddOp2(v, OP_IdxInsert, iParm, regRowid); - break; - } - case SRT_Mem: { - assert( nColumn==1 ); - sqlite3ExprCodeMove(pParse, regRow, iParm, 1); - /* The LIMIT clause will terminate the loop for us */ - break; - } -#endif - default: { - int i; - assert( eDest==SRT_Output || eDest==SRT_Coroutine ); - testcase( eDest==SRT_Output ); - testcase( eDest==SRT_Coroutine ); - for(i=0; iiSdst+i ); - sqlite3VdbeAddOp3(v, OP_Column, pseudoTab, i, pDest->iSdst+i); - if( i==0 ){ - sqlite3VdbeChangeP5(v, OPFLAG_CLEARCACHE); - } - } - if( eDest==SRT_Output ){ - sqlite3VdbeAddOp2(v, OP_ResultRow, pDest->iSdst, nColumn); - sqlite3ExprCacheAffinityChange(pParse, pDest->iSdst, nColumn); - }else{ - sqlite3VdbeAddOp1(v, OP_Yield, pDest->iSDParm); - } - break; - } - } - sqlite3ReleaseTempReg(pParse, regRow); - sqlite3ReleaseTempReg(pParse, regRowid); - - /* The bottom of the loop - */ - sqlite3VdbeResolveLabel(v, addrContinue); - if( pSort->sortFlags & SORTFLAG_UseSorter ){ - sqlite3VdbeAddOp2(v, OP_SorterNext, iTab, addr); VdbeCoverage(v); - }else{ - sqlite3VdbeAddOp2(v, OP_Next, iTab, addr); VdbeCoverage(v); - } - if( pSort->regReturn ) sqlite3VdbeAddOp1(v, OP_Return, pSort->regReturn); - sqlite3VdbeResolveLabel(v, addrBreak); -} - -/* -** Return a pointer to a string containing the 'declaration type' of the -** expression pExpr. The string may be treated as static by the caller. -** -** Also try to estimate the size of the returned value and return that -** result in *pEstWidth. -** -** The declaration type is the exact datatype definition extracted from the -** original CREATE TABLE statement if the expression is a column. The -** declaration type for a ROWID field is INTEGER. Exactly when an expression -** is considered a column can be complex in the presence of subqueries. The -** result-set expression in all of the following SELECT statements is -** considered a column by this function. -** -** SELECT col FROM tbl; -** SELECT (SELECT col FROM tbl; -** SELECT (SELECT col FROM tbl); -** SELECT abc FROM (SELECT col AS abc FROM tbl); -** -** The declaration type for any expression other than a column is NULL. -** -** This routine has either 3 or 6 parameters depending on whether or not -** the SQLITE_ENABLE_COLUMN_METADATA compile-time option is used. -*/ -#ifdef SQLITE_ENABLE_COLUMN_METADATA -# define columnType(A,B,C,D,E,F) columnTypeImpl(A,B,C,D,E,F) -static const char *columnTypeImpl( - NameContext *pNC, - Expr *pExpr, - const char **pzOrigDb, - const char **pzOrigTab, - const char **pzOrigCol, - u8 *pEstWidth -){ - char const *zOrigDb = 0; - char const *zOrigTab = 0; - char const *zOrigCol = 0; -#else /* if !defined(SQLITE_ENABLE_COLUMN_METADATA) */ -# define columnType(A,B,C,D,E,F) columnTypeImpl(A,B,F) -static const char *columnTypeImpl( - NameContext *pNC, - Expr *pExpr, - u8 *pEstWidth -){ -#endif /* !defined(SQLITE_ENABLE_COLUMN_METADATA) */ - char const *zType = 0; - int j; - u8 estWidth = 1; - - if( NEVER(pExpr==0) || pNC->pSrcList==0 ) return 0; - switch( pExpr->op ){ - case TK_AGG_COLUMN: - case TK_COLUMN: { - /* The expression is a column. Locate the table the column is being - ** extracted from in NameContext.pSrcList. This table may be real - ** database table or a subquery. - */ - Table *pTab = 0; /* Table structure column is extracted from */ - Select *pS = 0; /* Select the column is extracted from */ - int iCol = pExpr->iColumn; /* Index of column in pTab */ - testcase( pExpr->op==TK_AGG_COLUMN ); - testcase( pExpr->op==TK_COLUMN ); - while( pNC && !pTab ){ - SrcList *pTabList = pNC->pSrcList; - for(j=0;jnSrc && pTabList->a[j].iCursor!=pExpr->iTable;j++); - if( jnSrc ){ - pTab = pTabList->a[j].pTab; - pS = pTabList->a[j].pSelect; - }else{ - pNC = pNC->pNext; - } - } - - if( pTab==0 ){ - /* At one time, code such as "SELECT new.x" within a trigger would - ** cause this condition to run. Since then, we have restructured how - ** trigger code is generated and so this condition is no longer - ** possible. However, it can still be true for statements like - ** the following: - ** - ** CREATE TABLE t1(col INTEGER); - ** SELECT (SELECT t1.col) FROM FROM t1; - ** - ** when columnType() is called on the expression "t1.col" in the - ** sub-select. In this case, set the column type to NULL, even - ** though it should really be "INTEGER". - ** - ** This is not a problem, as the column type of "t1.col" is never - ** used. When columnType() is called on the expression - ** "(SELECT t1.col)", the correct type is returned (see the TK_SELECT - ** branch below. */ - break; - } - - assert( pTab && pExpr->pTab==pTab ); - if( pS ){ - /* The "table" is actually a sub-select or a view in the FROM clause - ** of the SELECT statement. Return the declaration type and origin - ** data for the result-set column of the sub-select. - */ - if( iCol>=0 && ALWAYS(iColpEList->nExpr) ){ - /* If iCol is less than zero, then the expression requests the - ** rowid of the sub-select or view. This expression is legal (see - ** test case misc2.2.2) - it always evaluates to NULL. - */ - NameContext sNC; - Expr *p = pS->pEList->a[iCol].pExpr; - sNC.pSrcList = pS->pSrc; - sNC.pNext = pNC; - sNC.pParse = pNC->pParse; - zType = columnType(&sNC, p,&zOrigDb,&zOrigTab,&zOrigCol, &estWidth); - } - }else if( pTab->pSchema ){ - /* A real table */ - assert( !pS ); - if( iCol<0 ) iCol = pTab->iPKey; - assert( iCol==-1 || (iCol>=0 && iColnCol) ); -#ifdef SQLITE_ENABLE_COLUMN_METADATA - if( iCol<0 ){ - zType = "INTEGER"; - zOrigCol = "rowid"; - }else{ - zType = pTab->aCol[iCol].zType; - zOrigCol = pTab->aCol[iCol].zName; - estWidth = pTab->aCol[iCol].szEst; - } - zOrigTab = pTab->zName; - if( pNC->pParse ){ - int iDb = sqlite3SchemaToIndex(pNC->pParse->db, pTab->pSchema); - zOrigDb = pNC->pParse->db->aDb[iDb].zName; - } -#else - if( iCol<0 ){ - zType = "INTEGER"; - }else{ - zType = pTab->aCol[iCol].zType; - estWidth = pTab->aCol[iCol].szEst; - } -#endif - } - break; - } -#ifndef SQLITE_OMIT_SUBQUERY - case TK_SELECT: { - /* The expression is a sub-select. Return the declaration type and - ** origin info for the single column in the result set of the SELECT - ** statement. - */ - NameContext sNC; - Select *pS = pExpr->x.pSelect; - Expr *p = pS->pEList->a[0].pExpr; - assert( ExprHasProperty(pExpr, EP_xIsSelect) ); - sNC.pSrcList = pS->pSrc; - sNC.pNext = pNC; - sNC.pParse = pNC->pParse; - zType = columnType(&sNC, p, &zOrigDb, &zOrigTab, &zOrigCol, &estWidth); - break; - } -#endif - } - -#ifdef SQLITE_ENABLE_COLUMN_METADATA - if( pzOrigDb ){ - assert( pzOrigTab && pzOrigCol ); - *pzOrigDb = zOrigDb; - *pzOrigTab = zOrigTab; - *pzOrigCol = zOrigCol; - } -#endif - if( pEstWidth ) *pEstWidth = estWidth; - return zType; -} - -/* -** Generate code that will tell the VDBE the declaration types of columns -** in the result set. -*/ -static void generateColumnTypes( - Parse *pParse, /* Parser context */ - SrcList *pTabList, /* List of tables */ - ExprList *pEList /* Expressions defining the result set */ -){ -#ifndef SQLITE_OMIT_DECLTYPE - Vdbe *v = pParse->pVdbe; - int i; - NameContext sNC; - sNC.pSrcList = pTabList; - sNC.pParse = pParse; - for(i=0; inExpr; i++){ - Expr *p = pEList->a[i].pExpr; - const char *zType; -#ifdef SQLITE_ENABLE_COLUMN_METADATA - const char *zOrigDb = 0; - const char *zOrigTab = 0; - const char *zOrigCol = 0; - zType = columnType(&sNC, p, &zOrigDb, &zOrigTab, &zOrigCol, 0); - - /* The vdbe must make its own copy of the column-type and other - ** column specific strings, in case the schema is reset before this - ** virtual machine is deleted. - */ - sqlite3VdbeSetColName(v, i, COLNAME_DATABASE, zOrigDb, SQLITE_TRANSIENT); - sqlite3VdbeSetColName(v, i, COLNAME_TABLE, zOrigTab, SQLITE_TRANSIENT); - sqlite3VdbeSetColName(v, i, COLNAME_COLUMN, zOrigCol, SQLITE_TRANSIENT); -#else - zType = columnType(&sNC, p, 0, 0, 0, 0); -#endif - sqlite3VdbeSetColName(v, i, COLNAME_DECLTYPE, zType, SQLITE_TRANSIENT); - } -#endif /* !defined(SQLITE_OMIT_DECLTYPE) */ -} - -/* -** Generate code that will tell the VDBE the names of columns -** in the result set. This information is used to provide the -** azCol[] values in the callback. -*/ -static void generateColumnNames( - Parse *pParse, /* Parser context */ - SrcList *pTabList, /* List of tables */ - ExprList *pEList /* Expressions defining the result set */ -){ - Vdbe *v = pParse->pVdbe; - int i, j; - sqlite3 *db = pParse->db; - int fullNames, shortNames; - -#ifndef SQLITE_OMIT_EXPLAIN - /* If this is an EXPLAIN, skip this step */ - if( pParse->explain ){ - return; - } -#endif - - if( pParse->colNamesSet || NEVER(v==0) || db->mallocFailed ) return; - pParse->colNamesSet = 1; - fullNames = (db->flags & SQLITE_FullColNames)!=0; - shortNames = (db->flags & SQLITE_ShortColNames)!=0; - sqlite3VdbeSetNumCols(v, pEList->nExpr); - for(i=0; inExpr; i++){ - Expr *p; - p = pEList->a[i].pExpr; - if( NEVER(p==0) ) continue; - if( pEList->a[i].zName ){ - char *zName = pEList->a[i].zName; - sqlite3VdbeSetColName(v, i, COLNAME_NAME, zName, SQLITE_TRANSIENT); - }else if( (p->op==TK_COLUMN || p->op==TK_AGG_COLUMN) && pTabList ){ - Table *pTab; - char *zCol; - int iCol = p->iColumn; - for(j=0; ALWAYS(jnSrc); j++){ - if( pTabList->a[j].iCursor==p->iTable ) break; - } - assert( jnSrc ); - pTab = pTabList->a[j].pTab; - if( iCol<0 ) iCol = pTab->iPKey; - assert( iCol==-1 || (iCol>=0 && iColnCol) ); - if( iCol<0 ){ - zCol = "rowid"; - }else{ - zCol = pTab->aCol[iCol].zName; - } - if( !shortNames && !fullNames ){ - sqlite3VdbeSetColName(v, i, COLNAME_NAME, - sqlite3DbStrDup(db, pEList->a[i].zSpan), SQLITE_DYNAMIC); - }else if( fullNames ){ - char *zName = 0; - zName = sqlite3MPrintf(db, "%s.%s", pTab->zName, zCol); - sqlite3VdbeSetColName(v, i, COLNAME_NAME, zName, SQLITE_DYNAMIC); - }else{ - sqlite3VdbeSetColName(v, i, COLNAME_NAME, zCol, SQLITE_TRANSIENT); - } - }else{ - const char *z = pEList->a[i].zSpan; - z = z==0 ? sqlite3MPrintf(db, "column%d", i+1) : sqlite3DbStrDup(db, z); - sqlite3VdbeSetColName(v, i, COLNAME_NAME, z, SQLITE_DYNAMIC); - } - } - generateColumnTypes(pParse, pTabList, pEList); -} - -/* -** Given a an expression list (which is really the list of expressions -** that form the result set of a SELECT statement) compute appropriate -** column names for a table that would hold the expression list. -** -** All column names will be unique. -** -** Only the column names are computed. Column.zType, Column.zColl, -** and other fields of Column are zeroed. -** -** Return SQLITE_OK on success. If a memory allocation error occurs, -** store NULL in *paCol and 0 in *pnCol and return SQLITE_NOMEM. -*/ -static int selectColumnsFromExprList( - Parse *pParse, /* Parsing context */ - ExprList *pEList, /* Expr list from which to derive column names */ - i16 *pnCol, /* Write the number of columns here */ - Column **paCol /* Write the new column list here */ -){ - sqlite3 *db = pParse->db; /* Database connection */ - int i, j; /* Loop counters */ - int cnt; /* Index added to make the name unique */ - Column *aCol, *pCol; /* For looping over result columns */ - int nCol; /* Number of columns in the result set */ - Expr *p; /* Expression for a single result column */ - char *zName; /* Column name */ - int nName; /* Size of name in zName[] */ - - if( pEList ){ - nCol = pEList->nExpr; - aCol = sqlite3DbMallocZero(db, sizeof(aCol[0])*nCol); - testcase( aCol==0 ); - }else{ - nCol = 0; - aCol = 0; - } - *pnCol = nCol; - *paCol = aCol; - - for(i=0, pCol=aCol; ia[i].pExpr); - if( (zName = pEList->a[i].zName)!=0 ){ - /* If the column contains an "AS " phrase, use as the name */ - zName = sqlite3DbStrDup(db, zName); - }else{ - Expr *pColExpr = p; /* The expression that is the result column name */ - Table *pTab; /* Table associated with this expression */ - while( pColExpr->op==TK_DOT ){ - pColExpr = pColExpr->pRight; - assert( pColExpr!=0 ); - } - if( pColExpr->op==TK_COLUMN && ALWAYS(pColExpr->pTab!=0) ){ - /* For columns use the column name name */ - int iCol = pColExpr->iColumn; - pTab = pColExpr->pTab; - if( iCol<0 ) iCol = pTab->iPKey; - zName = sqlite3MPrintf(db, "%s", - iCol>=0 ? pTab->aCol[iCol].zName : "rowid"); - }else if( pColExpr->op==TK_ID ){ - assert( !ExprHasProperty(pColExpr, EP_IntValue) ); - zName = sqlite3MPrintf(db, "%s", pColExpr->u.zToken); - }else{ - /* Use the original text of the column expression as its name */ - zName = sqlite3MPrintf(db, "%s", pEList->a[i].zSpan); - } - } - if( db->mallocFailed ){ - sqlite3DbFree(db, zName); - break; - } - - /* Make sure the column name is unique. If the name is not unique, - ** append a integer to the name so that it becomes unique. - */ - nName = sqlite3Strlen30(zName); - for(j=cnt=0; j1 && sqlite3Isdigit(zName[k]); k--){} - if( k>=0 && zName[k]==':' ) nName = k; - zName[nName] = 0; - zNewName = sqlite3MPrintf(db, "%s:%d", zName, ++cnt); - sqlite3DbFree(db, zName); - zName = zNewName; - j = -1; - if( zName==0 ) break; - } - } - pCol->zName = zName; - } - if( db->mallocFailed ){ - for(j=0; jdb; - NameContext sNC; - Column *pCol; - CollSeq *pColl; - int i; - Expr *p; - struct ExprList_item *a; - u64 szAll = 0; - - assert( pSelect!=0 ); - assert( (pSelect->selFlags & SF_Resolved)!=0 ); - assert( pTab->nCol==pSelect->pEList->nExpr || db->mallocFailed ); - if( db->mallocFailed ) return; - memset(&sNC, 0, sizeof(sNC)); - sNC.pSrcList = pSelect->pSrc; - a = pSelect->pEList->a; - for(i=0, pCol=pTab->aCol; inCol; i++, pCol++){ - p = a[i].pExpr; - pCol->zType = sqlite3DbStrDup(db, columnType(&sNC, p,0,0,0, &pCol->szEst)); - szAll += pCol->szEst; - pCol->affinity = sqlite3ExprAffinity(p); - if( pCol->affinity==0 ) pCol->affinity = SQLITE_AFF_NONE; - pColl = sqlite3ExprCollSeq(pParse, p); - if( pColl ){ - pCol->zColl = sqlite3DbStrDup(db, pColl->zName); - } - } - pTab->szTabRow = sqlite3LogEst(szAll*4); -} - -/* -** Given a SELECT statement, generate a Table structure that describes -** the result set of that SELECT. -*/ -SQLITE_PRIVATE Table *sqlite3ResultSetOfSelect(Parse *pParse, Select *pSelect){ - Table *pTab; - sqlite3 *db = pParse->db; - int savedFlags; - - savedFlags = db->flags; - db->flags &= ~SQLITE_FullColNames; - db->flags |= SQLITE_ShortColNames; - sqlite3SelectPrep(pParse, pSelect, 0); - if( pParse->nErr ) return 0; - while( pSelect->pPrior ) pSelect = pSelect->pPrior; - db->flags = savedFlags; - pTab = sqlite3DbMallocZero(db, sizeof(Table) ); - if( pTab==0 ){ - return 0; - } - /* The sqlite3ResultSetOfSelect() is only used n contexts where lookaside - ** is disabled */ - assert( db->lookaside.bEnabled==0 ); - pTab->nRef = 1; - pTab->zName = 0; - pTab->nRowLogEst = 200; assert( 200==sqlite3LogEst(1048576) ); - selectColumnsFromExprList(pParse, pSelect->pEList, &pTab->nCol, &pTab->aCol); - selectAddColumnTypeAndCollation(pParse, pTab, pSelect); - pTab->iPKey = -1; - if( db->mallocFailed ){ - sqlite3DeleteTable(db, pTab); - return 0; - } - return pTab; -} - -/* -** Get a VDBE for the given parser context. Create a new one if necessary. -** If an error occurs, return NULL and leave a message in pParse. -*/ -SQLITE_PRIVATE Vdbe *sqlite3GetVdbe(Parse *pParse){ - Vdbe *v = pParse->pVdbe; - if( v==0 ){ - v = pParse->pVdbe = sqlite3VdbeCreate(pParse); - if( v ) sqlite3VdbeAddOp0(v, OP_Init); - if( pParse->pToplevel==0 - && OptimizationEnabled(pParse->db,SQLITE_FactorOutConst) - ){ - pParse->okConstFactor = 1; - } - - } - return v; -} - - -/* -** Compute the iLimit and iOffset fields of the SELECT based on the -** pLimit and pOffset expressions. pLimit and pOffset hold the expressions -** that appear in the original SQL statement after the LIMIT and OFFSET -** keywords. Or NULL if those keywords are omitted. iLimit and iOffset -** are the integer memory register numbers for counters used to compute -** the limit and offset. If there is no limit and/or offset, then -** iLimit and iOffset are negative. -** -** This routine changes the values of iLimit and iOffset only if -** a limit or offset is defined by pLimit and pOffset. iLimit and -** iOffset should have been preset to appropriate default values (zero) -** prior to calling this routine. -** -** The iOffset register (if it exists) is initialized to the value -** of the OFFSET. The iLimit register is initialized to LIMIT. Register -** iOffset+1 is initialized to LIMIT+OFFSET. -** -** Only if pLimit!=0 or pOffset!=0 do the limit registers get -** redefined. The UNION ALL operator uses this property to force -** the reuse of the same limit and offset registers across multiple -** SELECT statements. -*/ -static void computeLimitRegisters(Parse *pParse, Select *p, int iBreak){ - Vdbe *v = 0; - int iLimit = 0; - int iOffset; - int addr1, n; - if( p->iLimit ) return; - - /* - ** "LIMIT -1" always shows all rows. There is some - ** controversy about what the correct behavior should be. - ** The current implementation interprets "LIMIT 0" to mean - ** no rows. - */ - sqlite3ExprCacheClear(pParse); - assert( p->pOffset==0 || p->pLimit!=0 ); - if( p->pLimit ){ - p->iLimit = iLimit = ++pParse->nMem; - v = sqlite3GetVdbe(pParse); - assert( v!=0 ); - if( sqlite3ExprIsInteger(p->pLimit, &n) ){ - sqlite3VdbeAddOp2(v, OP_Integer, n, iLimit); - VdbeComment((v, "LIMIT counter")); - if( n==0 ){ - sqlite3VdbeAddOp2(v, OP_Goto, 0, iBreak); - }else if( n>=0 && p->nSelectRow>(u64)n ){ - p->nSelectRow = n; - } - }else{ - sqlite3ExprCode(pParse, p->pLimit, iLimit); - sqlite3VdbeAddOp1(v, OP_MustBeInt, iLimit); VdbeCoverage(v); - VdbeComment((v, "LIMIT counter")); - sqlite3VdbeAddOp2(v, OP_IfZero, iLimit, iBreak); VdbeCoverage(v); - } - if( p->pOffset ){ - p->iOffset = iOffset = ++pParse->nMem; - pParse->nMem++; /* Allocate an extra register for limit+offset */ - sqlite3ExprCode(pParse, p->pOffset, iOffset); - sqlite3VdbeAddOp1(v, OP_MustBeInt, iOffset); VdbeCoverage(v); - VdbeComment((v, "OFFSET counter")); - addr1 = sqlite3VdbeAddOp1(v, OP_IfPos, iOffset); VdbeCoverage(v); - sqlite3VdbeAddOp2(v, OP_Integer, 0, iOffset); - sqlite3VdbeJumpHere(v, addr1); - sqlite3VdbeAddOp3(v, OP_Add, iLimit, iOffset, iOffset+1); - VdbeComment((v, "LIMIT+OFFSET")); - addr1 = sqlite3VdbeAddOp1(v, OP_IfPos, iLimit); VdbeCoverage(v); - sqlite3VdbeAddOp2(v, OP_Integer, -1, iOffset+1); - sqlite3VdbeJumpHere(v, addr1); - } - } -} - -#ifndef SQLITE_OMIT_COMPOUND_SELECT -/* -** Return the appropriate collating sequence for the iCol-th column of -** the result set for the compound-select statement "p". Return NULL if -** the column has no default collating sequence. -** -** The collating sequence for the compound select is taken from the -** left-most term of the select that has a collating sequence. -*/ -static CollSeq *multiSelectCollSeq(Parse *pParse, Select *p, int iCol){ - CollSeq *pRet; - if( p->pPrior ){ - pRet = multiSelectCollSeq(pParse, p->pPrior, iCol); - }else{ - pRet = 0; - } - assert( iCol>=0 ); - if( pRet==0 && iColpEList->nExpr ){ - pRet = sqlite3ExprCollSeq(pParse, p->pEList->a[iCol].pExpr); - } - return pRet; -} - -/* -** The select statement passed as the second parameter is a compound SELECT -** with an ORDER BY clause. This function allocates and returns a KeyInfo -** structure suitable for implementing the ORDER BY. -** -** Space to hold the KeyInfo structure is obtained from malloc. The calling -** function is responsible for ensuring that this structure is eventually -** freed. -*/ -static KeyInfo *multiSelectOrderByKeyInfo(Parse *pParse, Select *p, int nExtra){ - ExprList *pOrderBy = p->pOrderBy; - int nOrderBy = p->pOrderBy->nExpr; - sqlite3 *db = pParse->db; - KeyInfo *pRet = sqlite3KeyInfoAlloc(db, nOrderBy+nExtra, 1); - if( pRet ){ - int i; - for(i=0; ia[i]; - Expr *pTerm = pItem->pExpr; - CollSeq *pColl; - - if( pTerm->flags & EP_Collate ){ - pColl = sqlite3ExprCollSeq(pParse, pTerm); - }else{ - pColl = multiSelectCollSeq(pParse, p, pItem->u.x.iOrderByCol-1); - if( pColl==0 ) pColl = db->pDfltColl; - pOrderBy->a[i].pExpr = - sqlite3ExprAddCollateString(pParse, pTerm, pColl->zName); - } - assert( sqlite3KeyInfoIsWriteable(pRet) ); - pRet->aColl[i] = pColl; - pRet->aSortOrder[i] = pOrderBy->a[i].sortOrder; - } - } - - return pRet; -} - -#ifndef SQLITE_OMIT_CTE -/* -** This routine generates VDBE code to compute the content of a WITH RECURSIVE -** query of the form: -** -** AS ( UNION [ALL] ) -** \___________/ \_______________/ -** p->pPrior p -** -** -** There is exactly one reference to the recursive-table in the FROM clause -** of recursive-query, marked with the SrcList->a[].isRecursive flag. -** -** The setup-query runs once to generate an initial set of rows that go -** into a Queue table. Rows are extracted from the Queue table one by -** one. Each row extracted from Queue is output to pDest. Then the single -** extracted row (now in the iCurrent table) becomes the content of the -** recursive-table for a recursive-query run. The output of the recursive-query -** is added back into the Queue table. Then another row is extracted from Queue -** and the iteration continues until the Queue table is empty. -** -** If the compound query operator is UNION then no duplicate rows are ever -** inserted into the Queue table. The iDistinct table keeps a copy of all rows -** that have ever been inserted into Queue and causes duplicates to be -** discarded. If the operator is UNION ALL, then duplicates are allowed. -** -** If the query has an ORDER BY, then entries in the Queue table are kept in -** ORDER BY order and the first entry is extracted for each cycle. Without -** an ORDER BY, the Queue table is just a FIFO. -** -** If a LIMIT clause is provided, then the iteration stops after LIMIT rows -** have been output to pDest. A LIMIT of zero means to output no rows and a -** negative LIMIT means to output all rows. If there is also an OFFSET clause -** with a positive value, then the first OFFSET outputs are discarded rather -** than being sent to pDest. The LIMIT count does not begin until after OFFSET -** rows have been skipped. -*/ -static void generateWithRecursiveQuery( - Parse *pParse, /* Parsing context */ - Select *p, /* The recursive SELECT to be coded */ - SelectDest *pDest /* What to do with query results */ -){ - SrcList *pSrc = p->pSrc; /* The FROM clause of the recursive query */ - int nCol = p->pEList->nExpr; /* Number of columns in the recursive table */ - Vdbe *v = pParse->pVdbe; /* The prepared statement under construction */ - Select *pSetup = p->pPrior; /* The setup query */ - int addrTop; /* Top of the loop */ - int addrCont, addrBreak; /* CONTINUE and BREAK addresses */ - int iCurrent = 0; /* The Current table */ - int regCurrent; /* Register holding Current table */ - int iQueue; /* The Queue table */ - int iDistinct = 0; /* To ensure unique results if UNION */ - int eDest = SRT_Fifo; /* How to write to Queue */ - SelectDest destQueue; /* SelectDest targetting the Queue table */ - int i; /* Loop counter */ - int rc; /* Result code */ - ExprList *pOrderBy; /* The ORDER BY clause */ - Expr *pLimit, *pOffset; /* Saved LIMIT and OFFSET */ - int regLimit, regOffset; /* Registers used by LIMIT and OFFSET */ - - /* Obtain authorization to do a recursive query */ - if( sqlite3AuthCheck(pParse, SQLITE_RECURSIVE, 0, 0, 0) ) return; - - /* Process the LIMIT and OFFSET clauses, if they exist */ - addrBreak = sqlite3VdbeMakeLabel(v); - computeLimitRegisters(pParse, p, addrBreak); - pLimit = p->pLimit; - pOffset = p->pOffset; - regLimit = p->iLimit; - regOffset = p->iOffset; - p->pLimit = p->pOffset = 0; - p->iLimit = p->iOffset = 0; - pOrderBy = p->pOrderBy; - - /* Locate the cursor number of the Current table */ - for(i=0; ALWAYS(inSrc); i++){ - if( pSrc->a[i].isRecursive ){ - iCurrent = pSrc->a[i].iCursor; - break; - } - } - - /* Allocate cursors numbers for Queue and Distinct. The cursor number for - ** the Distinct table must be exactly one greater than Queue in order - ** for the SRT_DistFifo and SRT_DistQueue destinations to work. */ - iQueue = pParse->nTab++; - if( p->op==TK_UNION ){ - eDest = pOrderBy ? SRT_DistQueue : SRT_DistFifo; - iDistinct = pParse->nTab++; - }else{ - eDest = pOrderBy ? SRT_Queue : SRT_Fifo; - } - sqlite3SelectDestInit(&destQueue, eDest, iQueue); - - /* Allocate cursors for Current, Queue, and Distinct. */ - regCurrent = ++pParse->nMem; - sqlite3VdbeAddOp3(v, OP_OpenPseudo, iCurrent, regCurrent, nCol); - if( pOrderBy ){ - KeyInfo *pKeyInfo = multiSelectOrderByKeyInfo(pParse, p, 1); - sqlite3VdbeAddOp4(v, OP_OpenEphemeral, iQueue, pOrderBy->nExpr+2, 0, - (char*)pKeyInfo, P4_KEYINFO); - destQueue.pOrderBy = pOrderBy; - }else{ - sqlite3VdbeAddOp2(v, OP_OpenEphemeral, iQueue, nCol); - } - VdbeComment((v, "Queue table")); - if( iDistinct ){ - p->addrOpenEphm[0] = sqlite3VdbeAddOp2(v, OP_OpenEphemeral, iDistinct, 0); - p->selFlags |= SF_UsesEphemeral; - } - - /* Detach the ORDER BY clause from the compound SELECT */ - p->pOrderBy = 0; - - /* Store the results of the setup-query in Queue. */ - pSetup->pNext = 0; - rc = sqlite3Select(pParse, pSetup, &destQueue); - pSetup->pNext = p; - if( rc ) goto end_of_recursive_query; - - /* Find the next row in the Queue and output that row */ - addrTop = sqlite3VdbeAddOp2(v, OP_Rewind, iQueue, addrBreak); VdbeCoverage(v); - - /* Transfer the next row in Queue over to Current */ - sqlite3VdbeAddOp1(v, OP_NullRow, iCurrent); /* To reset column cache */ - if( pOrderBy ){ - sqlite3VdbeAddOp3(v, OP_Column, iQueue, pOrderBy->nExpr+1, regCurrent); - }else{ - sqlite3VdbeAddOp2(v, OP_RowData, iQueue, regCurrent); - } - sqlite3VdbeAddOp1(v, OP_Delete, iQueue); - - /* Output the single row in Current */ - addrCont = sqlite3VdbeMakeLabel(v); - codeOffset(v, regOffset, addrCont); - selectInnerLoop(pParse, p, p->pEList, iCurrent, - 0, 0, pDest, addrCont, addrBreak); - if( regLimit ){ - sqlite3VdbeAddOp3(v, OP_IfZero, regLimit, addrBreak, -1); - VdbeCoverage(v); - } - sqlite3VdbeResolveLabel(v, addrCont); - - /* Execute the recursive SELECT taking the single row in Current as - ** the value for the recursive-table. Store the results in the Queue. - */ - p->pPrior = 0; - sqlite3Select(pParse, p, &destQueue); - assert( p->pPrior==0 ); - p->pPrior = pSetup; - - /* Keep running the loop until the Queue is empty */ - sqlite3VdbeAddOp2(v, OP_Goto, 0, addrTop); - sqlite3VdbeResolveLabel(v, addrBreak); - -end_of_recursive_query: - sqlite3ExprListDelete(pParse->db, p->pOrderBy); - p->pOrderBy = pOrderBy; - p->pLimit = pLimit; - p->pOffset = pOffset; - return; -} -#endif /* SQLITE_OMIT_CTE */ - -/* Forward references */ -static int multiSelectOrderBy( - Parse *pParse, /* Parsing context */ - Select *p, /* The right-most of SELECTs to be coded */ - SelectDest *pDest /* What to do with query results */ -); - - -/* -** This routine is called to process a compound query form from -** two or more separate queries using UNION, UNION ALL, EXCEPT, or -** INTERSECT -** -** "p" points to the right-most of the two queries. the query on the -** left is p->pPrior. The left query could also be a compound query -** in which case this routine will be called recursively. -** -** The results of the total query are to be written into a destination -** of type eDest with parameter iParm. -** -** Example 1: Consider a three-way compound SQL statement. -** -** SELECT a FROM t1 UNION SELECT b FROM t2 UNION SELECT c FROM t3 -** -** This statement is parsed up as follows: -** -** SELECT c FROM t3 -** | -** `-----> SELECT b FROM t2 -** | -** `------> SELECT a FROM t1 -** -** The arrows in the diagram above represent the Select.pPrior pointer. -** So if this routine is called with p equal to the t3 query, then -** pPrior will be the t2 query. p->op will be TK_UNION in this case. -** -** Notice that because of the way SQLite parses compound SELECTs, the -** individual selects always group from left to right. -*/ -static int multiSelect( - Parse *pParse, /* Parsing context */ - Select *p, /* The right-most of SELECTs to be coded */ - SelectDest *pDest /* What to do with query results */ -){ - int rc = SQLITE_OK; /* Success code from a subroutine */ - Select *pPrior; /* Another SELECT immediately to our left */ - Vdbe *v; /* Generate code to this VDBE */ - SelectDest dest; /* Alternative data destination */ - Select *pDelete = 0; /* Chain of simple selects to delete */ - sqlite3 *db; /* Database connection */ -#ifndef SQLITE_OMIT_EXPLAIN - int iSub1 = 0; /* EQP id of left-hand query */ - int iSub2 = 0; /* EQP id of right-hand query */ -#endif - - /* Make sure there is no ORDER BY or LIMIT clause on prior SELECTs. Only - ** the last (right-most) SELECT in the series may have an ORDER BY or LIMIT. - */ - assert( p && p->pPrior ); /* Calling function guarantees this much */ - assert( (p->selFlags & SF_Recursive)==0 || p->op==TK_ALL || p->op==TK_UNION ); - db = pParse->db; - pPrior = p->pPrior; - dest = *pDest; - if( pPrior->pOrderBy ){ - sqlite3ErrorMsg(pParse,"ORDER BY clause should come after %s not before", - selectOpName(p->op)); - rc = 1; - goto multi_select_end; - } - if( pPrior->pLimit ){ - sqlite3ErrorMsg(pParse,"LIMIT clause should come after %s not before", - selectOpName(p->op)); - rc = 1; - goto multi_select_end; - } - - v = sqlite3GetVdbe(pParse); - assert( v!=0 ); /* The VDBE already created by calling function */ - - /* Create the destination temporary table if necessary - */ - if( dest.eDest==SRT_EphemTab ){ - assert( p->pEList ); - sqlite3VdbeAddOp2(v, OP_OpenEphemeral, dest.iSDParm, p->pEList->nExpr); - sqlite3VdbeChangeP5(v, BTREE_UNORDERED); - dest.eDest = SRT_Table; - } - - /* Make sure all SELECTs in the statement have the same number of elements - ** in their result sets. - */ - assert( p->pEList && pPrior->pEList ); - if( p->pEList->nExpr!=pPrior->pEList->nExpr ){ - if( p->selFlags & SF_Values ){ - sqlite3ErrorMsg(pParse, "all VALUES must have the same number of terms"); - }else{ - sqlite3ErrorMsg(pParse, "SELECTs to the left and right of %s" - " do not have the same number of result columns", selectOpName(p->op)); - } - rc = 1; - goto multi_select_end; - } - -#ifndef SQLITE_OMIT_CTE - if( p->selFlags & SF_Recursive ){ - generateWithRecursiveQuery(pParse, p, &dest); - }else -#endif - - /* Compound SELECTs that have an ORDER BY clause are handled separately. - */ - if( p->pOrderBy ){ - return multiSelectOrderBy(pParse, p, pDest); - }else - - /* Generate code for the left and right SELECT statements. - */ - switch( p->op ){ - case TK_ALL: { - int addr = 0; - int nLimit; - assert( !pPrior->pLimit ); - pPrior->iLimit = p->iLimit; - pPrior->iOffset = p->iOffset; - pPrior->pLimit = p->pLimit; - pPrior->pOffset = p->pOffset; - explainSetInteger(iSub1, pParse->iNextSelectId); - rc = sqlite3Select(pParse, pPrior, &dest); - p->pLimit = 0; - p->pOffset = 0; - if( rc ){ - goto multi_select_end; - } - p->pPrior = 0; - p->iLimit = pPrior->iLimit; - p->iOffset = pPrior->iOffset; - if( p->iLimit ){ - addr = sqlite3VdbeAddOp1(v, OP_IfZero, p->iLimit); VdbeCoverage(v); - VdbeComment((v, "Jump ahead if LIMIT reached")); - } - explainSetInteger(iSub2, pParse->iNextSelectId); - rc = sqlite3Select(pParse, p, &dest); - testcase( rc!=SQLITE_OK ); - pDelete = p->pPrior; - p->pPrior = pPrior; - p->nSelectRow += pPrior->nSelectRow; - if( pPrior->pLimit - && sqlite3ExprIsInteger(pPrior->pLimit, &nLimit) - && nLimit>0 && p->nSelectRow > (u64)nLimit - ){ - p->nSelectRow = nLimit; - } - if( addr ){ - sqlite3VdbeJumpHere(v, addr); - } - break; - } - case TK_EXCEPT: - case TK_UNION: { - int unionTab; /* Cursor number of the temporary table holding result */ - u8 op = 0; /* One of the SRT_ operations to apply to self */ - int priorOp; /* The SRT_ operation to apply to prior selects */ - Expr *pLimit, *pOffset; /* Saved values of p->nLimit and p->nOffset */ - int addr; - SelectDest uniondest; - - testcase( p->op==TK_EXCEPT ); - testcase( p->op==TK_UNION ); - priorOp = SRT_Union; - if( dest.eDest==priorOp ){ - /* We can reuse a temporary table generated by a SELECT to our - ** right. - */ - assert( p->pLimit==0 ); /* Not allowed on leftward elements */ - assert( p->pOffset==0 ); /* Not allowed on leftward elements */ - unionTab = dest.iSDParm; - }else{ - /* We will need to create our own temporary table to hold the - ** intermediate results. - */ - unionTab = pParse->nTab++; - assert( p->pOrderBy==0 ); - addr = sqlite3VdbeAddOp2(v, OP_OpenEphemeral, unionTab, 0); - assert( p->addrOpenEphm[0] == -1 ); - p->addrOpenEphm[0] = addr; - findRightmost(p)->selFlags |= SF_UsesEphemeral; - assert( p->pEList ); - } - - /* Code the SELECT statements to our left - */ - assert( !pPrior->pOrderBy ); - sqlite3SelectDestInit(&uniondest, priorOp, unionTab); - explainSetInteger(iSub1, pParse->iNextSelectId); - rc = sqlite3Select(pParse, pPrior, &uniondest); - if( rc ){ - goto multi_select_end; - } - - /* Code the current SELECT statement - */ - if( p->op==TK_EXCEPT ){ - op = SRT_Except; - }else{ - assert( p->op==TK_UNION ); - op = SRT_Union; - } - p->pPrior = 0; - pLimit = p->pLimit; - p->pLimit = 0; - pOffset = p->pOffset; - p->pOffset = 0; - uniondest.eDest = op; - explainSetInteger(iSub2, pParse->iNextSelectId); - rc = sqlite3Select(pParse, p, &uniondest); - testcase( rc!=SQLITE_OK ); - /* Query flattening in sqlite3Select() might refill p->pOrderBy. - ** Be sure to delete p->pOrderBy, therefore, to avoid a memory leak. */ - sqlite3ExprListDelete(db, p->pOrderBy); - pDelete = p->pPrior; - p->pPrior = pPrior; - p->pOrderBy = 0; - if( p->op==TK_UNION ) p->nSelectRow += pPrior->nSelectRow; - sqlite3ExprDelete(db, p->pLimit); - p->pLimit = pLimit; - p->pOffset = pOffset; - p->iLimit = 0; - p->iOffset = 0; - - /* Convert the data in the temporary table into whatever form - ** it is that we currently need. - */ - assert( unionTab==dest.iSDParm || dest.eDest!=priorOp ); - if( dest.eDest!=priorOp ){ - int iCont, iBreak, iStart; - assert( p->pEList ); - if( dest.eDest==SRT_Output ){ - Select *pFirst = p; - while( pFirst->pPrior ) pFirst = pFirst->pPrior; - generateColumnNames(pParse, 0, pFirst->pEList); - } - iBreak = sqlite3VdbeMakeLabel(v); - iCont = sqlite3VdbeMakeLabel(v); - computeLimitRegisters(pParse, p, iBreak); - sqlite3VdbeAddOp2(v, OP_Rewind, unionTab, iBreak); VdbeCoverage(v); - iStart = sqlite3VdbeCurrentAddr(v); - selectInnerLoop(pParse, p, p->pEList, unionTab, - 0, 0, &dest, iCont, iBreak); - sqlite3VdbeResolveLabel(v, iCont); - sqlite3VdbeAddOp2(v, OP_Next, unionTab, iStart); VdbeCoverage(v); - sqlite3VdbeResolveLabel(v, iBreak); - sqlite3VdbeAddOp2(v, OP_Close, unionTab, 0); - } - break; - } - default: assert( p->op==TK_INTERSECT ); { - int tab1, tab2; - int iCont, iBreak, iStart; - Expr *pLimit, *pOffset; - int addr; - SelectDest intersectdest; - int r1; - - /* INTERSECT is different from the others since it requires - ** two temporary tables. Hence it has its own case. Begin - ** by allocating the tables we will need. - */ - tab1 = pParse->nTab++; - tab2 = pParse->nTab++; - assert( p->pOrderBy==0 ); - - addr = sqlite3VdbeAddOp2(v, OP_OpenEphemeral, tab1, 0); - assert( p->addrOpenEphm[0] == -1 ); - p->addrOpenEphm[0] = addr; - findRightmost(p)->selFlags |= SF_UsesEphemeral; - assert( p->pEList ); - - /* Code the SELECTs to our left into temporary table "tab1". - */ - sqlite3SelectDestInit(&intersectdest, SRT_Union, tab1); - explainSetInteger(iSub1, pParse->iNextSelectId); - rc = sqlite3Select(pParse, pPrior, &intersectdest); - if( rc ){ - goto multi_select_end; - } - - /* Code the current SELECT into temporary table "tab2" - */ - addr = sqlite3VdbeAddOp2(v, OP_OpenEphemeral, tab2, 0); - assert( p->addrOpenEphm[1] == -1 ); - p->addrOpenEphm[1] = addr; - p->pPrior = 0; - pLimit = p->pLimit; - p->pLimit = 0; - pOffset = p->pOffset; - p->pOffset = 0; - intersectdest.iSDParm = tab2; - explainSetInteger(iSub2, pParse->iNextSelectId); - rc = sqlite3Select(pParse, p, &intersectdest); - testcase( rc!=SQLITE_OK ); - pDelete = p->pPrior; - p->pPrior = pPrior; - if( p->nSelectRow>pPrior->nSelectRow ) p->nSelectRow = pPrior->nSelectRow; - sqlite3ExprDelete(db, p->pLimit); - p->pLimit = pLimit; - p->pOffset = pOffset; - - /* Generate code to take the intersection of the two temporary - ** tables. - */ - assert( p->pEList ); - if( dest.eDest==SRT_Output ){ - Select *pFirst = p; - while( pFirst->pPrior ) pFirst = pFirst->pPrior; - generateColumnNames(pParse, 0, pFirst->pEList); - } - iBreak = sqlite3VdbeMakeLabel(v); - iCont = sqlite3VdbeMakeLabel(v); - computeLimitRegisters(pParse, p, iBreak); - sqlite3VdbeAddOp2(v, OP_Rewind, tab1, iBreak); VdbeCoverage(v); - r1 = sqlite3GetTempReg(pParse); - iStart = sqlite3VdbeAddOp2(v, OP_RowKey, tab1, r1); - sqlite3VdbeAddOp4Int(v, OP_NotFound, tab2, iCont, r1, 0); VdbeCoverage(v); - sqlite3ReleaseTempReg(pParse, r1); - selectInnerLoop(pParse, p, p->pEList, tab1, - 0, 0, &dest, iCont, iBreak); - sqlite3VdbeResolveLabel(v, iCont); - sqlite3VdbeAddOp2(v, OP_Next, tab1, iStart); VdbeCoverage(v); - sqlite3VdbeResolveLabel(v, iBreak); - sqlite3VdbeAddOp2(v, OP_Close, tab2, 0); - sqlite3VdbeAddOp2(v, OP_Close, tab1, 0); - break; - } - } - - explainComposite(pParse, p->op, iSub1, iSub2, p->op!=TK_ALL); - - /* Compute collating sequences used by - ** temporary tables needed to implement the compound select. - ** Attach the KeyInfo structure to all temporary tables. - ** - ** This section is run by the right-most SELECT statement only. - ** SELECT statements to the left always skip this part. The right-most - ** SELECT might also skip this part if it has no ORDER BY clause and - ** no temp tables are required. - */ - if( p->selFlags & SF_UsesEphemeral ){ - int i; /* Loop counter */ - KeyInfo *pKeyInfo; /* Collating sequence for the result set */ - Select *pLoop; /* For looping through SELECT statements */ - CollSeq **apColl; /* For looping through pKeyInfo->aColl[] */ - int nCol; /* Number of columns in result set */ - - assert( p->pNext==0 ); - nCol = p->pEList->nExpr; - pKeyInfo = sqlite3KeyInfoAlloc(db, nCol, 1); - if( !pKeyInfo ){ - rc = SQLITE_NOMEM; - goto multi_select_end; - } - for(i=0, apColl=pKeyInfo->aColl; ipDfltColl; - } - } - - for(pLoop=p; pLoop; pLoop=pLoop->pPrior){ - for(i=0; i<2; i++){ - int addr = pLoop->addrOpenEphm[i]; - if( addr<0 ){ - /* If [0] is unused then [1] is also unused. So we can - ** always safely abort as soon as the first unused slot is found */ - assert( pLoop->addrOpenEphm[1]<0 ); - break; - } - sqlite3VdbeChangeP2(v, addr, nCol); - sqlite3VdbeChangeP4(v, addr, (char*)sqlite3KeyInfoRef(pKeyInfo), - P4_KEYINFO); - pLoop->addrOpenEphm[i] = -1; - } - } - sqlite3KeyInfoUnref(pKeyInfo); - } - -multi_select_end: - pDest->iSdst = dest.iSdst; - pDest->nSdst = dest.nSdst; - sqlite3SelectDelete(db, pDelete); - return rc; -} -#endif /* SQLITE_OMIT_COMPOUND_SELECT */ - -/* -** Code an output subroutine for a coroutine implementation of a -** SELECT statment. -** -** The data to be output is contained in pIn->iSdst. There are -** pIn->nSdst columns to be output. pDest is where the output should -** be sent. -** -** regReturn is the number of the register holding the subroutine -** return address. -** -** If regPrev>0 then it is the first register in a vector that -** records the previous output. mem[regPrev] is a flag that is false -** if there has been no previous output. If regPrev>0 then code is -** generated to suppress duplicates. pKeyInfo is used for comparing -** keys. -** -** If the LIMIT found in p->iLimit is reached, jump immediately to -** iBreak. -*/ -static int generateOutputSubroutine( - Parse *pParse, /* Parsing context */ - Select *p, /* The SELECT statement */ - SelectDest *pIn, /* Coroutine supplying data */ - SelectDest *pDest, /* Where to send the data */ - int regReturn, /* The return address register */ - int regPrev, /* Previous result register. No uniqueness if 0 */ - KeyInfo *pKeyInfo, /* For comparing with previous entry */ - int iBreak /* Jump here if we hit the LIMIT */ -){ - Vdbe *v = pParse->pVdbe; - int iContinue; - int addr; - - addr = sqlite3VdbeCurrentAddr(v); - iContinue = sqlite3VdbeMakeLabel(v); - - /* Suppress duplicates for UNION, EXCEPT, and INTERSECT - */ - if( regPrev ){ - int j1, j2; - j1 = sqlite3VdbeAddOp1(v, OP_IfNot, regPrev); VdbeCoverage(v); - j2 = sqlite3VdbeAddOp4(v, OP_Compare, pIn->iSdst, regPrev+1, pIn->nSdst, - (char*)sqlite3KeyInfoRef(pKeyInfo), P4_KEYINFO); - sqlite3VdbeAddOp3(v, OP_Jump, j2+2, iContinue, j2+2); VdbeCoverage(v); - sqlite3VdbeJumpHere(v, j1); - sqlite3VdbeAddOp3(v, OP_Copy, pIn->iSdst, regPrev+1, pIn->nSdst-1); - sqlite3VdbeAddOp2(v, OP_Integer, 1, regPrev); - } - if( pParse->db->mallocFailed ) return 0; - - /* Suppress the first OFFSET entries if there is an OFFSET clause - */ - codeOffset(v, p->iOffset, iContinue); - - switch( pDest->eDest ){ - /* Store the result as data using a unique key. - */ - case SRT_Table: - case SRT_EphemTab: { - int r1 = sqlite3GetTempReg(pParse); - int r2 = sqlite3GetTempReg(pParse); - testcase( pDest->eDest==SRT_Table ); - testcase( pDest->eDest==SRT_EphemTab ); - sqlite3VdbeAddOp3(v, OP_MakeRecord, pIn->iSdst, pIn->nSdst, r1); - sqlite3VdbeAddOp2(v, OP_NewRowid, pDest->iSDParm, r2); - sqlite3VdbeAddOp3(v, OP_Insert, pDest->iSDParm, r1, r2); - sqlite3VdbeChangeP5(v, OPFLAG_APPEND); - sqlite3ReleaseTempReg(pParse, r2); - sqlite3ReleaseTempReg(pParse, r1); - break; - } - -#ifndef SQLITE_OMIT_SUBQUERY - /* If we are creating a set for an "expr IN (SELECT ...)" construct, - ** then there should be a single item on the stack. Write this - ** item into the set table with bogus data. - */ - case SRT_Set: { - int r1; - assert( pIn->nSdst==1 ); - pDest->affSdst = - sqlite3CompareAffinity(p->pEList->a[0].pExpr, pDest->affSdst); - r1 = sqlite3GetTempReg(pParse); - sqlite3VdbeAddOp4(v, OP_MakeRecord, pIn->iSdst, 1, r1, &pDest->affSdst,1); - sqlite3ExprCacheAffinityChange(pParse, pIn->iSdst, 1); - sqlite3VdbeAddOp2(v, OP_IdxInsert, pDest->iSDParm, r1); - sqlite3ReleaseTempReg(pParse, r1); - break; - } - -#if 0 /* Never occurs on an ORDER BY query */ - /* If any row exist in the result set, record that fact and abort. - */ - case SRT_Exists: { - sqlite3VdbeAddOp2(v, OP_Integer, 1, pDest->iSDParm); - /* The LIMIT clause will terminate the loop for us */ - break; - } -#endif - - /* If this is a scalar select that is part of an expression, then - ** store the results in the appropriate memory cell and break out - ** of the scan loop. - */ - case SRT_Mem: { - assert( pIn->nSdst==1 ); - sqlite3ExprCodeMove(pParse, pIn->iSdst, pDest->iSDParm, 1); - /* The LIMIT clause will jump out of the loop for us */ - break; - } -#endif /* #ifndef SQLITE_OMIT_SUBQUERY */ - - /* The results are stored in a sequence of registers - ** starting at pDest->iSdst. Then the co-routine yields. - */ - case SRT_Coroutine: { - if( pDest->iSdst==0 ){ - pDest->iSdst = sqlite3GetTempRange(pParse, pIn->nSdst); - pDest->nSdst = pIn->nSdst; - } - sqlite3ExprCodeMove(pParse, pIn->iSdst, pDest->iSdst, pDest->nSdst); - sqlite3VdbeAddOp1(v, OP_Yield, pDest->iSDParm); - break; - } - - /* If none of the above, then the result destination must be - ** SRT_Output. This routine is never called with any other - ** destination other than the ones handled above or SRT_Output. - ** - ** For SRT_Output, results are stored in a sequence of registers. - ** Then the OP_ResultRow opcode is used to cause sqlite3_step() to - ** return the next row of result. - */ - default: { - assert( pDest->eDest==SRT_Output ); - sqlite3VdbeAddOp2(v, OP_ResultRow, pIn->iSdst, pIn->nSdst); - sqlite3ExprCacheAffinityChange(pParse, pIn->iSdst, pIn->nSdst); - break; - } - } - - /* Jump to the end of the loop if the LIMIT is reached. - */ - if( p->iLimit ){ - sqlite3VdbeAddOp3(v, OP_IfZero, p->iLimit, iBreak, -1); VdbeCoverage(v); - } - - /* Generate the subroutine return - */ - sqlite3VdbeResolveLabel(v, iContinue); - sqlite3VdbeAddOp1(v, OP_Return, regReturn); - - return addr; -} - -/* -** Alternative compound select code generator for cases when there -** is an ORDER BY clause. -** -** We assume a query of the following form: -** -** ORDER BY -** -** is one of UNION ALL, UNION, EXCEPT, or INTERSECT. The idea -** is to code both and with the ORDER BY clause as -** co-routines. Then run the co-routines in parallel and merge the results -** into the output. In addition to the two coroutines (called selectA and -** selectB) there are 7 subroutines: -** -** outA: Move the output of the selectA coroutine into the output -** of the compound query. -** -** outB: Move the output of the selectB coroutine into the output -** of the compound query. (Only generated for UNION and -** UNION ALL. EXCEPT and INSERTSECT never output a row that -** appears only in B.) -** -** AltB: Called when there is data from both coroutines and AB. -** -** EofA: Called when data is exhausted from selectA. -** -** EofB: Called when data is exhausted from selectB. -** -** The implementation of the latter five subroutines depend on which -** is used: -** -** -** UNION ALL UNION EXCEPT INTERSECT -** ------------- ----------------- -------------- ----------------- -** AltB: outA, nextA outA, nextA outA, nextA nextA -** -** AeqB: outA, nextA nextA nextA outA, nextA -** -** AgtB: outB, nextB outB, nextB nextB nextB -** -** EofA: outB, nextB outB, nextB halt halt -** -** EofB: outA, nextA outA, nextA outA, nextA halt -** -** In the AltB, AeqB, and AgtB subroutines, an EOF on A following nextA -** causes an immediate jump to EofA and an EOF on B following nextB causes -** an immediate jump to EofB. Within EofA and EofB, and EOF on entry or -** following nextX causes a jump to the end of the select processing. -** -** Duplicate removal in the UNION, EXCEPT, and INTERSECT cases is handled -** within the output subroutine. The regPrev register set holds the previously -** output value. A comparison is made against this value and the output -** is skipped if the next results would be the same as the previous. -** -** The implementation plan is to implement the two coroutines and seven -** subroutines first, then put the control logic at the bottom. Like this: -** -** goto Init -** coA: coroutine for left query (A) -** coB: coroutine for right query (B) -** outA: output one row of A -** outB: output one row of B (UNION and UNION ALL only) -** EofA: ... -** EofB: ... -** AltB: ... -** AeqB: ... -** AgtB: ... -** Init: initialize coroutine registers -** yield coA -** if eof(A) goto EofA -** yield coB -** if eof(B) goto EofB -** Cmpr: Compare A, B -** Jump AltB, AeqB, AgtB -** End: ... -** -** We call AltB, AeqB, AgtB, EofA, and EofB "subroutines" but they are not -** actually called using Gosub and they do not Return. EofA and EofB loop -** until all data is exhausted then jump to the "end" labe. AltB, AeqB, -** and AgtB jump to either L2 or to one of EofA or EofB. -*/ -#ifndef SQLITE_OMIT_COMPOUND_SELECT -static int multiSelectOrderBy( - Parse *pParse, /* Parsing context */ - Select *p, /* The right-most of SELECTs to be coded */ - SelectDest *pDest /* What to do with query results */ -){ - int i, j; /* Loop counters */ - Select *pPrior; /* Another SELECT immediately to our left */ - Vdbe *v; /* Generate code to this VDBE */ - SelectDest destA; /* Destination for coroutine A */ - SelectDest destB; /* Destination for coroutine B */ - int regAddrA; /* Address register for select-A coroutine */ - int regAddrB; /* Address register for select-B coroutine */ - int addrSelectA; /* Address of the select-A coroutine */ - int addrSelectB; /* Address of the select-B coroutine */ - int regOutA; /* Address register for the output-A subroutine */ - int regOutB; /* Address register for the output-B subroutine */ - int addrOutA; /* Address of the output-A subroutine */ - int addrOutB = 0; /* Address of the output-B subroutine */ - int addrEofA; /* Address of the select-A-exhausted subroutine */ - int addrEofA_noB; /* Alternate addrEofA if B is uninitialized */ - int addrEofB; /* Address of the select-B-exhausted subroutine */ - int addrAltB; /* Address of the AB subroutine */ - int regLimitA; /* Limit register for select-A */ - int regLimitB; /* Limit register for select-A */ - int regPrev; /* A range of registers to hold previous output */ - int savedLimit; /* Saved value of p->iLimit */ - int savedOffset; /* Saved value of p->iOffset */ - int labelCmpr; /* Label for the start of the merge algorithm */ - int labelEnd; /* Label for the end of the overall SELECT stmt */ - int j1; /* Jump instructions that get retargetted */ - int op; /* One of TK_ALL, TK_UNION, TK_EXCEPT, TK_INTERSECT */ - KeyInfo *pKeyDup = 0; /* Comparison information for duplicate removal */ - KeyInfo *pKeyMerge; /* Comparison information for merging rows */ - sqlite3 *db; /* Database connection */ - ExprList *pOrderBy; /* The ORDER BY clause */ - int nOrderBy; /* Number of terms in the ORDER BY clause */ - int *aPermute; /* Mapping from ORDER BY terms to result set columns */ -#ifndef SQLITE_OMIT_EXPLAIN - int iSub1; /* EQP id of left-hand query */ - int iSub2; /* EQP id of right-hand query */ -#endif - - assert( p->pOrderBy!=0 ); - assert( pKeyDup==0 ); /* "Managed" code needs this. Ticket #3382. */ - db = pParse->db; - v = pParse->pVdbe; - assert( v!=0 ); /* Already thrown the error if VDBE alloc failed */ - labelEnd = sqlite3VdbeMakeLabel(v); - labelCmpr = sqlite3VdbeMakeLabel(v); - - - /* Patch up the ORDER BY clause - */ - op = p->op; - pPrior = p->pPrior; - assert( pPrior->pOrderBy==0 ); - pOrderBy = p->pOrderBy; - assert( pOrderBy ); - nOrderBy = pOrderBy->nExpr; - - /* For operators other than UNION ALL we have to make sure that - ** the ORDER BY clause covers every term of the result set. Add - ** terms to the ORDER BY clause as necessary. - */ - if( op!=TK_ALL ){ - for(i=1; db->mallocFailed==0 && i<=p->pEList->nExpr; i++){ - struct ExprList_item *pItem; - for(j=0, pItem=pOrderBy->a; ju.x.iOrderByCol>0 ); - if( pItem->u.x.iOrderByCol==i ) break; - } - if( j==nOrderBy ){ - Expr *pNew = sqlite3Expr(db, TK_INTEGER, 0); - if( pNew==0 ) return SQLITE_NOMEM; - pNew->flags |= EP_IntValue; - pNew->u.iValue = i; - pOrderBy = sqlite3ExprListAppend(pParse, pOrderBy, pNew); - if( pOrderBy ) pOrderBy->a[nOrderBy++].u.x.iOrderByCol = (u16)i; - } - } - } - - /* Compute the comparison permutation and keyinfo that is used with - ** the permutation used to determine if the next - ** row of results comes from selectA or selectB. Also add explicit - ** collations to the ORDER BY clause terms so that when the subqueries - ** to the right and the left are evaluated, they use the correct - ** collation. - */ - aPermute = sqlite3DbMallocRaw(db, sizeof(int)*nOrderBy); - if( aPermute ){ - struct ExprList_item *pItem; - for(i=0, pItem=pOrderBy->a; iu.x.iOrderByCol>0 - && pItem->u.x.iOrderByCol<=p->pEList->nExpr ); - aPermute[i] = pItem->u.x.iOrderByCol - 1; - } - pKeyMerge = multiSelectOrderByKeyInfo(pParse, p, 1); - }else{ - pKeyMerge = 0; - } - - /* Reattach the ORDER BY clause to the query. - */ - p->pOrderBy = pOrderBy; - pPrior->pOrderBy = sqlite3ExprListDup(pParse->db, pOrderBy, 0); - - /* Allocate a range of temporary registers and the KeyInfo needed - ** for the logic that removes duplicate result rows when the - ** operator is UNION, EXCEPT, or INTERSECT (but not UNION ALL). - */ - if( op==TK_ALL ){ - regPrev = 0; - }else{ - int nExpr = p->pEList->nExpr; - assert( nOrderBy>=nExpr || db->mallocFailed ); - regPrev = pParse->nMem+1; - pParse->nMem += nExpr+1; - sqlite3VdbeAddOp2(v, OP_Integer, 0, regPrev); - pKeyDup = sqlite3KeyInfoAlloc(db, nExpr, 1); - if( pKeyDup ){ - assert( sqlite3KeyInfoIsWriteable(pKeyDup) ); - for(i=0; iaColl[i] = multiSelectCollSeq(pParse, p, i); - pKeyDup->aSortOrder[i] = 0; - } - } - } - - /* Separate the left and the right query from one another - */ - p->pPrior = 0; - pPrior->pNext = 0; - sqlite3ResolveOrderGroupBy(pParse, p, p->pOrderBy, "ORDER"); - if( pPrior->pPrior==0 ){ - sqlite3ResolveOrderGroupBy(pParse, pPrior, pPrior->pOrderBy, "ORDER"); - } - - /* Compute the limit registers */ - computeLimitRegisters(pParse, p, labelEnd); - if( p->iLimit && op==TK_ALL ){ - regLimitA = ++pParse->nMem; - regLimitB = ++pParse->nMem; - sqlite3VdbeAddOp2(v, OP_Copy, p->iOffset ? p->iOffset+1 : p->iLimit, - regLimitA); - sqlite3VdbeAddOp2(v, OP_Copy, regLimitA, regLimitB); - }else{ - regLimitA = regLimitB = 0; - } - sqlite3ExprDelete(db, p->pLimit); - p->pLimit = 0; - sqlite3ExprDelete(db, p->pOffset); - p->pOffset = 0; - - regAddrA = ++pParse->nMem; - regAddrB = ++pParse->nMem; - regOutA = ++pParse->nMem; - regOutB = ++pParse->nMem; - sqlite3SelectDestInit(&destA, SRT_Coroutine, regAddrA); - sqlite3SelectDestInit(&destB, SRT_Coroutine, regAddrB); - - /* Generate a coroutine to evaluate the SELECT statement to the - ** left of the compound operator - the "A" select. - */ - addrSelectA = sqlite3VdbeCurrentAddr(v) + 1; - j1 = sqlite3VdbeAddOp3(v, OP_InitCoroutine, regAddrA, 0, addrSelectA); - VdbeComment((v, "left SELECT")); - pPrior->iLimit = regLimitA; - explainSetInteger(iSub1, pParse->iNextSelectId); - sqlite3Select(pParse, pPrior, &destA); - sqlite3VdbeAddOp1(v, OP_EndCoroutine, regAddrA); - sqlite3VdbeJumpHere(v, j1); - - /* Generate a coroutine to evaluate the SELECT statement on - ** the right - the "B" select - */ - addrSelectB = sqlite3VdbeCurrentAddr(v) + 1; - j1 = sqlite3VdbeAddOp3(v, OP_InitCoroutine, regAddrB, 0, addrSelectB); - VdbeComment((v, "right SELECT")); - savedLimit = p->iLimit; - savedOffset = p->iOffset; - p->iLimit = regLimitB; - p->iOffset = 0; - explainSetInteger(iSub2, pParse->iNextSelectId); - sqlite3Select(pParse, p, &destB); - p->iLimit = savedLimit; - p->iOffset = savedOffset; - sqlite3VdbeAddOp1(v, OP_EndCoroutine, regAddrB); - - /* Generate a subroutine that outputs the current row of the A - ** select as the next output row of the compound select. - */ - VdbeNoopComment((v, "Output routine for A")); - addrOutA = generateOutputSubroutine(pParse, - p, &destA, pDest, regOutA, - regPrev, pKeyDup, labelEnd); - - /* Generate a subroutine that outputs the current row of the B - ** select as the next output row of the compound select. - */ - if( op==TK_ALL || op==TK_UNION ){ - VdbeNoopComment((v, "Output routine for B")); - addrOutB = generateOutputSubroutine(pParse, - p, &destB, pDest, regOutB, - regPrev, pKeyDup, labelEnd); - } - sqlite3KeyInfoUnref(pKeyDup); - - /* Generate a subroutine to run when the results from select A - ** are exhausted and only data in select B remains. - */ - if( op==TK_EXCEPT || op==TK_INTERSECT ){ - addrEofA_noB = addrEofA = labelEnd; - }else{ - VdbeNoopComment((v, "eof-A subroutine")); - addrEofA = sqlite3VdbeAddOp2(v, OP_Gosub, regOutB, addrOutB); - addrEofA_noB = sqlite3VdbeAddOp2(v, OP_Yield, regAddrB, labelEnd); - VdbeCoverage(v); - sqlite3VdbeAddOp2(v, OP_Goto, 0, addrEofA); - p->nSelectRow += pPrior->nSelectRow; - } - - /* Generate a subroutine to run when the results from select B - ** are exhausted and only data in select A remains. - */ - if( op==TK_INTERSECT ){ - addrEofB = addrEofA; - if( p->nSelectRow > pPrior->nSelectRow ) p->nSelectRow = pPrior->nSelectRow; - }else{ - VdbeNoopComment((v, "eof-B subroutine")); - addrEofB = sqlite3VdbeAddOp2(v, OP_Gosub, regOutA, addrOutA); - sqlite3VdbeAddOp2(v, OP_Yield, regAddrA, labelEnd); VdbeCoverage(v); - sqlite3VdbeAddOp2(v, OP_Goto, 0, addrEofB); - } - - /* Generate code to handle the case of AB - */ - VdbeNoopComment((v, "A-gt-B subroutine")); - addrAgtB = sqlite3VdbeCurrentAddr(v); - if( op==TK_ALL || op==TK_UNION ){ - sqlite3VdbeAddOp2(v, OP_Gosub, regOutB, addrOutB); - } - sqlite3VdbeAddOp2(v, OP_Yield, regAddrB, addrEofB); VdbeCoverage(v); - sqlite3VdbeAddOp2(v, OP_Goto, 0, labelCmpr); - - /* This code runs once to initialize everything. - */ - sqlite3VdbeJumpHere(v, j1); - sqlite3VdbeAddOp2(v, OP_Yield, regAddrA, addrEofA_noB); VdbeCoverage(v); - sqlite3VdbeAddOp2(v, OP_Yield, regAddrB, addrEofB); VdbeCoverage(v); - - /* Implement the main merge loop - */ - sqlite3VdbeResolveLabel(v, labelCmpr); - sqlite3VdbeAddOp4(v, OP_Permutation, 0, 0, 0, (char*)aPermute, P4_INTARRAY); - sqlite3VdbeAddOp4(v, OP_Compare, destA.iSdst, destB.iSdst, nOrderBy, - (char*)pKeyMerge, P4_KEYINFO); - sqlite3VdbeChangeP5(v, OPFLAG_PERMUTE); - sqlite3VdbeAddOp3(v, OP_Jump, addrAltB, addrAeqB, addrAgtB); VdbeCoverage(v); - - /* Jump to the this point in order to terminate the query. - */ - sqlite3VdbeResolveLabel(v, labelEnd); - - /* Set the number of output columns - */ - if( pDest->eDest==SRT_Output ){ - Select *pFirst = pPrior; - while( pFirst->pPrior ) pFirst = pFirst->pPrior; - generateColumnNames(pParse, 0, pFirst->pEList); - } - - /* Reassembly the compound query so that it will be freed correctly - ** by the calling function */ - if( p->pPrior ){ - sqlite3SelectDelete(db, p->pPrior); - } - p->pPrior = pPrior; - pPrior->pNext = p; - - /*** TBD: Insert subroutine calls to close cursors on incomplete - **** subqueries ****/ - explainComposite(pParse, p->op, iSub1, iSub2, 0); - return SQLITE_OK; -} -#endif - -#if !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) -/* Forward Declarations */ -static void substExprList(sqlite3*, ExprList*, int, ExprList*); -static void substSelect(sqlite3*, Select *, int, ExprList *); - -/* -** Scan through the expression pExpr. Replace every reference to -** a column in table number iTable with a copy of the iColumn-th -** entry in pEList. (But leave references to the ROWID column -** unchanged.) -** -** This routine is part of the flattening procedure. A subquery -** whose result set is defined by pEList appears as entry in the -** FROM clause of a SELECT such that the VDBE cursor assigned to that -** FORM clause entry is iTable. This routine make the necessary -** changes to pExpr so that it refers directly to the source table -** of the subquery rather the result set of the subquery. -*/ -static Expr *substExpr( - sqlite3 *db, /* Report malloc errors to this connection */ - Expr *pExpr, /* Expr in which substitution occurs */ - int iTable, /* Table to be substituted */ - ExprList *pEList /* Substitute expressions */ -){ - if( pExpr==0 ) return 0; - if( pExpr->op==TK_COLUMN && pExpr->iTable==iTable ){ - if( pExpr->iColumn<0 ){ - pExpr->op = TK_NULL; - }else{ - Expr *pNew; - assert( pEList!=0 && pExpr->iColumnnExpr ); - assert( pExpr->pLeft==0 && pExpr->pRight==0 ); - pNew = sqlite3ExprDup(db, pEList->a[pExpr->iColumn].pExpr, 0); - sqlite3ExprDelete(db, pExpr); - pExpr = pNew; - } - }else{ - pExpr->pLeft = substExpr(db, pExpr->pLeft, iTable, pEList); - pExpr->pRight = substExpr(db, pExpr->pRight, iTable, pEList); - if( ExprHasProperty(pExpr, EP_xIsSelect) ){ - substSelect(db, pExpr->x.pSelect, iTable, pEList); - }else{ - substExprList(db, pExpr->x.pList, iTable, pEList); - } - } - return pExpr; -} -static void substExprList( - sqlite3 *db, /* Report malloc errors here */ - ExprList *pList, /* List to scan and in which to make substitutes */ - int iTable, /* Table to be substituted */ - ExprList *pEList /* Substitute values */ -){ - int i; - if( pList==0 ) return; - for(i=0; inExpr; i++){ - pList->a[i].pExpr = substExpr(db, pList->a[i].pExpr, iTable, pEList); - } -} -static void substSelect( - sqlite3 *db, /* Report malloc errors here */ - Select *p, /* SELECT statement in which to make substitutions */ - int iTable, /* Table to be replaced */ - ExprList *pEList /* Substitute values */ -){ - SrcList *pSrc; - struct SrcList_item *pItem; - int i; - if( !p ) return; - substExprList(db, p->pEList, iTable, pEList); - substExprList(db, p->pGroupBy, iTable, pEList); - substExprList(db, p->pOrderBy, iTable, pEList); - p->pHaving = substExpr(db, p->pHaving, iTable, pEList); - p->pWhere = substExpr(db, p->pWhere, iTable, pEList); - substSelect(db, p->pPrior, iTable, pEList); - pSrc = p->pSrc; - assert( pSrc ); /* Even for (SELECT 1) we have: pSrc!=0 but pSrc->nSrc==0 */ - if( ALWAYS(pSrc) ){ - for(i=pSrc->nSrc, pItem=pSrc->a; i>0; i--, pItem++){ - substSelect(db, pItem->pSelect, iTable, pEList); - } - } -} -#endif /* !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) */ - -#if !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) -/* -** This routine attempts to flatten subqueries as a performance optimization. -** This routine returns 1 if it makes changes and 0 if no flattening occurs. -** -** To understand the concept of flattening, consider the following -** query: -** -** SELECT a FROM (SELECT x+y AS a FROM t1 WHERE z<100) WHERE a>5 -** -** The default way of implementing this query is to execute the -** subquery first and store the results in a temporary table, then -** run the outer query on that temporary table. This requires two -** passes over the data. Furthermore, because the temporary table -** has no indices, the WHERE clause on the outer query cannot be -** optimized. -** -** This routine attempts to rewrite queries such as the above into -** a single flat select, like this: -** -** SELECT x+y AS a FROM t1 WHERE z<100 AND a>5 -** -** The code generated for this simpification gives the same result -** but only has to scan the data once. And because indices might -** exist on the table t1, a complete scan of the data might be -** avoided. -** -** Flattening is only attempted if all of the following are true: -** -** (1) The subquery and the outer query do not both use aggregates. -** -** (2) The subquery is not an aggregate or the outer query is not a join. -** -** (3) The subquery is not the right operand of a left outer join -** (Originally ticket #306. Strengthened by ticket #3300) -** -** (4) The subquery is not DISTINCT. -** -** (**) At one point restrictions (4) and (5) defined a subset of DISTINCT -** sub-queries that were excluded from this optimization. Restriction -** (4) has since been expanded to exclude all DISTINCT subqueries. -** -** (6) The subquery does not use aggregates or the outer query is not -** DISTINCT. -** -** (7) The subquery has a FROM clause. TODO: For subqueries without -** A FROM clause, consider adding a FROM close with the special -** table sqlite_once that consists of a single row containing a -** single NULL. -** -** (8) The subquery does not use LIMIT or the outer query is not a join. -** -** (9) The subquery does not use LIMIT or the outer query does not use -** aggregates. -** -** (10) The subquery does not use aggregates or the outer query does not -** use LIMIT. -** -** (11) The subquery and the outer query do not both have ORDER BY clauses. -** -** (**) Not implemented. Subsumed into restriction (3). Was previously -** a separate restriction deriving from ticket #350. -** -** (13) The subquery and outer query do not both use LIMIT. -** -** (14) The subquery does not use OFFSET. -** -** (15) The outer query is not part of a compound select or the -** subquery does not have a LIMIT clause. -** (See ticket #2339 and ticket [02a8e81d44]). -** -** (16) The outer query is not an aggregate or the subquery does -** not contain ORDER BY. (Ticket #2942) This used to not matter -** until we introduced the group_concat() function. -** -** (17) The sub-query is not a compound select, or it is a UNION ALL -** compound clause made up entirely of non-aggregate queries, and -** the parent query: -** -** * is not itself part of a compound select, -** * is not an aggregate or DISTINCT query, and -** * is not a join -** -** The parent and sub-query may contain WHERE clauses. Subject to -** rules (11), (13) and (14), they may also contain ORDER BY, -** LIMIT and OFFSET clauses. The subquery cannot use any compound -** operator other than UNION ALL because all the other compound -** operators have an implied DISTINCT which is disallowed by -** restriction (4). -** -** Also, each component of the sub-query must return the same number -** of result columns. This is actually a requirement for any compound -** SELECT statement, but all the code here does is make sure that no -** such (illegal) sub-query is flattened. The caller will detect the -** syntax error and return a detailed message. -** -** (18) If the sub-query is a compound select, then all terms of the -** ORDER by clause of the parent must be simple references to -** columns of the sub-query. -** -** (19) The subquery does not use LIMIT or the outer query does not -** have a WHERE clause. -** -** (20) If the sub-query is a compound select, then it must not use -** an ORDER BY clause. Ticket #3773. We could relax this constraint -** somewhat by saying that the terms of the ORDER BY clause must -** appear as unmodified result columns in the outer query. But we -** have other optimizations in mind to deal with that case. -** -** (21) The subquery does not use LIMIT or the outer query is not -** DISTINCT. (See ticket [752e1646fc]). -** -** (22) The subquery is not a recursive CTE. -** -** (23) The parent is not a recursive CTE, or the sub-query is not a -** compound query. This restriction is because transforming the -** parent to a compound query confuses the code that handles -** recursive queries in multiSelect(). -** -** -** In this routine, the "p" parameter is a pointer to the outer query. -** The subquery is p->pSrc->a[iFrom]. isAgg is true if the outer query -** uses aggregates and subqueryIsAgg is true if the subquery uses aggregates. -** -** If flattening is not attempted, this routine is a no-op and returns 0. -** If flattening is attempted this routine returns 1. -** -** All of the expression analysis must occur on both the outer query and -** the subquery before this routine runs. -*/ -static int flattenSubquery( - Parse *pParse, /* Parsing context */ - Select *p, /* The parent or outer SELECT statement */ - int iFrom, /* Index in p->pSrc->a[] of the inner subquery */ - int isAgg, /* True if outer SELECT uses aggregate functions */ - int subqueryIsAgg /* True if the subquery uses aggregate functions */ -){ - const char *zSavedAuthContext = pParse->zAuthContext; - Select *pParent; - Select *pSub; /* The inner query or "subquery" */ - Select *pSub1; /* Pointer to the rightmost select in sub-query */ - SrcList *pSrc; /* The FROM clause of the outer query */ - SrcList *pSubSrc; /* The FROM clause of the subquery */ - ExprList *pList; /* The result set of the outer query */ - int iParent; /* VDBE cursor number of the pSub result set temp table */ - int i; /* Loop counter */ - Expr *pWhere; /* The WHERE clause */ - struct SrcList_item *pSubitem; /* The subquery */ - sqlite3 *db = pParse->db; - - /* Check to see if flattening is permitted. Return 0 if not. - */ - assert( p!=0 ); - assert( p->pPrior==0 ); /* Unable to flatten compound queries */ - if( OptimizationDisabled(db, SQLITE_QueryFlattener) ) return 0; - pSrc = p->pSrc; - assert( pSrc && iFrom>=0 && iFromnSrc ); - pSubitem = &pSrc->a[iFrom]; - iParent = pSubitem->iCursor; - pSub = pSubitem->pSelect; - assert( pSub!=0 ); - if( isAgg && subqueryIsAgg ) return 0; /* Restriction (1) */ - if( subqueryIsAgg && pSrc->nSrc>1 ) return 0; /* Restriction (2) */ - pSubSrc = pSub->pSrc; - assert( pSubSrc ); - /* Prior to version 3.1.2, when LIMIT and OFFSET had to be simple constants, - ** not arbitrary expresssions, we allowed some combining of LIMIT and OFFSET - ** because they could be computed at compile-time. But when LIMIT and OFFSET - ** became arbitrary expressions, we were forced to add restrictions (13) - ** and (14). */ - if( pSub->pLimit && p->pLimit ) return 0; /* Restriction (13) */ - if( pSub->pOffset ) return 0; /* Restriction (14) */ - if( (p->selFlags & SF_Compound)!=0 && pSub->pLimit ){ - return 0; /* Restriction (15) */ - } - if( pSubSrc->nSrc==0 ) return 0; /* Restriction (7) */ - if( pSub->selFlags & SF_Distinct ) return 0; /* Restriction (5) */ - if( pSub->pLimit && (pSrc->nSrc>1 || isAgg) ){ - return 0; /* Restrictions (8)(9) */ - } - if( (p->selFlags & SF_Distinct)!=0 && subqueryIsAgg ){ - return 0; /* Restriction (6) */ - } - if( p->pOrderBy && pSub->pOrderBy ){ - return 0; /* Restriction (11) */ - } - if( isAgg && pSub->pOrderBy ) return 0; /* Restriction (16) */ - if( pSub->pLimit && p->pWhere ) return 0; /* Restriction (19) */ - if( pSub->pLimit && (p->selFlags & SF_Distinct)!=0 ){ - return 0; /* Restriction (21) */ - } - if( pSub->selFlags & SF_Recursive ) return 0; /* Restriction (22) */ - if( (p->selFlags & SF_Recursive) && pSub->pPrior ) return 0; /* (23) */ - - /* OBSOLETE COMMENT 1: - ** Restriction 3: If the subquery is a join, make sure the subquery is - ** not used as the right operand of an outer join. Examples of why this - ** is not allowed: - ** - ** t1 LEFT OUTER JOIN (t2 JOIN t3) - ** - ** If we flatten the above, we would get - ** - ** (t1 LEFT OUTER JOIN t2) JOIN t3 - ** - ** which is not at all the same thing. - ** - ** OBSOLETE COMMENT 2: - ** Restriction 12: If the subquery is the right operand of a left outer - ** join, make sure the subquery has no WHERE clause. - ** An examples of why this is not allowed: - ** - ** t1 LEFT OUTER JOIN (SELECT * FROM t2 WHERE t2.x>0) - ** - ** If we flatten the above, we would get - ** - ** (t1 LEFT OUTER JOIN t2) WHERE t2.x>0 - ** - ** But the t2.x>0 test will always fail on a NULL row of t2, which - ** effectively converts the OUTER JOIN into an INNER JOIN. - ** - ** THIS OVERRIDES OBSOLETE COMMENTS 1 AND 2 ABOVE: - ** Ticket #3300 shows that flattening the right term of a LEFT JOIN - ** is fraught with danger. Best to avoid the whole thing. If the - ** subquery is the right term of a LEFT JOIN, then do not flatten. - */ - if( (pSubitem->jointype & JT_OUTER)!=0 ){ - return 0; - } - - /* Restriction 17: If the sub-query is a compound SELECT, then it must - ** use only the UNION ALL operator. And none of the simple select queries - ** that make up the compound SELECT are allowed to be aggregate or distinct - ** queries. - */ - if( pSub->pPrior ){ - if( pSub->pOrderBy ){ - return 0; /* Restriction 20 */ - } - if( isAgg || (p->selFlags & SF_Distinct)!=0 || pSrc->nSrc!=1 ){ - return 0; - } - for(pSub1=pSub; pSub1; pSub1=pSub1->pPrior){ - testcase( (pSub1->selFlags & (SF_Distinct|SF_Aggregate))==SF_Distinct ); - testcase( (pSub1->selFlags & (SF_Distinct|SF_Aggregate))==SF_Aggregate ); - assert( pSub->pSrc!=0 ); - if( (pSub1->selFlags & (SF_Distinct|SF_Aggregate))!=0 - || (pSub1->pPrior && pSub1->op!=TK_ALL) - || pSub1->pSrc->nSrc<1 - || pSub->pEList->nExpr!=pSub1->pEList->nExpr - ){ - return 0; - } - testcase( pSub1->pSrc->nSrc>1 ); - } - - /* Restriction 18. */ - if( p->pOrderBy ){ - int ii; - for(ii=0; iipOrderBy->nExpr; ii++){ - if( p->pOrderBy->a[ii].u.x.iOrderByCol==0 ) return 0; - } - } - } - - /***** If we reach this point, flattening is permitted. *****/ - - /* Authorize the subquery */ - pParse->zAuthContext = pSubitem->zName; - TESTONLY(i =) sqlite3AuthCheck(pParse, SQLITE_SELECT, 0, 0, 0); - testcase( i==SQLITE_DENY ); - pParse->zAuthContext = zSavedAuthContext; - - /* If the sub-query is a compound SELECT statement, then (by restrictions - ** 17 and 18 above) it must be a UNION ALL and the parent query must - ** be of the form: - ** - ** SELECT FROM () - ** - ** followed by any ORDER BY, LIMIT and/or OFFSET clauses. This block - ** creates N-1 copies of the parent query without any ORDER BY, LIMIT or - ** OFFSET clauses and joins them to the left-hand-side of the original - ** using UNION ALL operators. In this case N is the number of simple - ** select statements in the compound sub-query. - ** - ** Example: - ** - ** SELECT a+1 FROM ( - ** SELECT x FROM tab - ** UNION ALL - ** SELECT y FROM tab - ** UNION ALL - ** SELECT abs(z*2) FROM tab2 - ** ) WHERE a!=5 ORDER BY 1 - ** - ** Transformed into: - ** - ** SELECT x+1 FROM tab WHERE x+1!=5 - ** UNION ALL - ** SELECT y+1 FROM tab WHERE y+1!=5 - ** UNION ALL - ** SELECT abs(z*2)+1 FROM tab2 WHERE abs(z*2)+1!=5 - ** ORDER BY 1 - ** - ** We call this the "compound-subquery flattening". - */ - for(pSub=pSub->pPrior; pSub; pSub=pSub->pPrior){ - Select *pNew; - ExprList *pOrderBy = p->pOrderBy; - Expr *pLimit = p->pLimit; - Expr *pOffset = p->pOffset; - Select *pPrior = p->pPrior; - p->pOrderBy = 0; - p->pSrc = 0; - p->pPrior = 0; - p->pLimit = 0; - p->pOffset = 0; - pNew = sqlite3SelectDup(db, p, 0); - p->pOffset = pOffset; - p->pLimit = pLimit; - p->pOrderBy = pOrderBy; - p->pSrc = pSrc; - p->op = TK_ALL; - if( pNew==0 ){ - p->pPrior = pPrior; - }else{ - pNew->pPrior = pPrior; - if( pPrior ) pPrior->pNext = pNew; - pNew->pNext = p; - p->pPrior = pNew; - } - if( db->mallocFailed ) return 1; - } - - /* Begin flattening the iFrom-th entry of the FROM clause - ** in the outer query. - */ - pSub = pSub1 = pSubitem->pSelect; - - /* Delete the transient table structure associated with the - ** subquery - */ - sqlite3DbFree(db, pSubitem->zDatabase); - sqlite3DbFree(db, pSubitem->zName); - sqlite3DbFree(db, pSubitem->zAlias); - pSubitem->zDatabase = 0; - pSubitem->zName = 0; - pSubitem->zAlias = 0; - pSubitem->pSelect = 0; - - /* Defer deleting the Table object associated with the - ** subquery until code generation is - ** complete, since there may still exist Expr.pTab entries that - ** refer to the subquery even after flattening. Ticket #3346. - ** - ** pSubitem->pTab is always non-NULL by test restrictions and tests above. - */ - if( ALWAYS(pSubitem->pTab!=0) ){ - Table *pTabToDel = pSubitem->pTab; - if( pTabToDel->nRef==1 ){ - Parse *pToplevel = sqlite3ParseToplevel(pParse); - pTabToDel->pNextZombie = pToplevel->pZombieTab; - pToplevel->pZombieTab = pTabToDel; - }else{ - pTabToDel->nRef--; - } - pSubitem->pTab = 0; - } - - /* The following loop runs once for each term in a compound-subquery - ** flattening (as described above). If we are doing a different kind - ** of flattening - a flattening other than a compound-subquery flattening - - ** then this loop only runs once. - ** - ** This loop moves all of the FROM elements of the subquery into the - ** the FROM clause of the outer query. Before doing this, remember - ** the cursor number for the original outer query FROM element in - ** iParent. The iParent cursor will never be used. Subsequent code - ** will scan expressions looking for iParent references and replace - ** those references with expressions that resolve to the subquery FROM - ** elements we are now copying in. - */ - for(pParent=p; pParent; pParent=pParent->pPrior, pSub=pSub->pPrior){ - int nSubSrc; - u8 jointype = 0; - pSubSrc = pSub->pSrc; /* FROM clause of subquery */ - nSubSrc = pSubSrc->nSrc; /* Number of terms in subquery FROM clause */ - pSrc = pParent->pSrc; /* FROM clause of the outer query */ - - if( pSrc ){ - assert( pParent==p ); /* First time through the loop */ - jointype = pSubitem->jointype; - }else{ - assert( pParent!=p ); /* 2nd and subsequent times through the loop */ - pSrc = pParent->pSrc = sqlite3SrcListAppend(db, 0, 0, 0); - if( pSrc==0 ){ - assert( db->mallocFailed ); - break; - } - } - - /* The subquery uses a single slot of the FROM clause of the outer - ** query. If the subquery has more than one element in its FROM clause, - ** then expand the outer query to make space for it to hold all elements - ** of the subquery. - ** - ** Example: - ** - ** SELECT * FROM tabA, (SELECT * FROM sub1, sub2), tabB; - ** - ** The outer query has 3 slots in its FROM clause. One slot of the - ** outer query (the middle slot) is used by the subquery. The next - ** block of code will expand the out query to 4 slots. The middle - ** slot is expanded to two slots in order to make space for the - ** two elements in the FROM clause of the subquery. - */ - if( nSubSrc>1 ){ - pParent->pSrc = pSrc = sqlite3SrcListEnlarge(db, pSrc, nSubSrc-1,iFrom+1); - if( db->mallocFailed ){ - break; - } - } - - /* Transfer the FROM clause terms from the subquery into the - ** outer query. - */ - for(i=0; ia[i+iFrom].pUsing); - pSrc->a[i+iFrom] = pSubSrc->a[i]; - memset(&pSubSrc->a[i], 0, sizeof(pSubSrc->a[i])); - } - pSrc->a[iFrom].jointype = jointype; - - /* Now begin substituting subquery result set expressions for - ** references to the iParent in the outer query. - ** - ** Example: - ** - ** SELECT a+5, b*10 FROM (SELECT x*3 AS a, y+10 AS b FROM t1) WHERE a>b; - ** \ \_____________ subquery __________/ / - ** \_____________________ outer query ______________________________/ - ** - ** We look at every expression in the outer query and every place we see - ** "a" we substitute "x*3" and every place we see "b" we substitute "y+10". - */ - pList = pParent->pEList; - for(i=0; inExpr; i++){ - if( pList->a[i].zName==0 ){ - char *zName = sqlite3DbStrDup(db, pList->a[i].zSpan); - sqlite3Dequote(zName); - pList->a[i].zName = zName; - } - } - substExprList(db, pParent->pEList, iParent, pSub->pEList); - if( isAgg ){ - substExprList(db, pParent->pGroupBy, iParent, pSub->pEList); - pParent->pHaving = substExpr(db, pParent->pHaving, iParent, pSub->pEList); - } - if( pSub->pOrderBy ){ - assert( pParent->pOrderBy==0 ); - pParent->pOrderBy = pSub->pOrderBy; - pSub->pOrderBy = 0; - }else if( pParent->pOrderBy ){ - substExprList(db, pParent->pOrderBy, iParent, pSub->pEList); - } - if( pSub->pWhere ){ - pWhere = sqlite3ExprDup(db, pSub->pWhere, 0); - }else{ - pWhere = 0; - } - if( subqueryIsAgg ){ - assert( pParent->pHaving==0 ); - pParent->pHaving = pParent->pWhere; - pParent->pWhere = pWhere; - pParent->pHaving = substExpr(db, pParent->pHaving, iParent, pSub->pEList); - pParent->pHaving = sqlite3ExprAnd(db, pParent->pHaving, - sqlite3ExprDup(db, pSub->pHaving, 0)); - assert( pParent->pGroupBy==0 ); - pParent->pGroupBy = sqlite3ExprListDup(db, pSub->pGroupBy, 0); - }else{ - pParent->pWhere = substExpr(db, pParent->pWhere, iParent, pSub->pEList); - pParent->pWhere = sqlite3ExprAnd(db, pParent->pWhere, pWhere); - } - - /* The flattened query is distinct if either the inner or the - ** outer query is distinct. - */ - pParent->selFlags |= pSub->selFlags & SF_Distinct; - - /* - ** SELECT ... FROM (SELECT ... LIMIT a OFFSET b) LIMIT x OFFSET y; - ** - ** One is tempted to try to add a and b to combine the limits. But this - ** does not work if either limit is negative. - */ - if( pSub->pLimit ){ - pParent->pLimit = pSub->pLimit; - pSub->pLimit = 0; - } - } - - /* Finially, delete what is left of the subquery and return - ** success. - */ - sqlite3SelectDelete(db, pSub1); - - return 1; -} -#endif /* !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) */ - -/* -** Based on the contents of the AggInfo structure indicated by the first -** argument, this function checks if the following are true: -** -** * the query contains just a single aggregate function, -** * the aggregate function is either min() or max(), and -** * the argument to the aggregate function is a column value. -** -** If all of the above are true, then WHERE_ORDERBY_MIN or WHERE_ORDERBY_MAX -** is returned as appropriate. Also, *ppMinMax is set to point to the -** list of arguments passed to the aggregate before returning. -** -** Or, if the conditions above are not met, *ppMinMax is set to 0 and -** WHERE_ORDERBY_NORMAL is returned. -*/ -static u8 minMaxQuery(AggInfo *pAggInfo, ExprList **ppMinMax){ - int eRet = WHERE_ORDERBY_NORMAL; /* Return value */ - - *ppMinMax = 0; - if( pAggInfo->nFunc==1 ){ - Expr *pExpr = pAggInfo->aFunc[0].pExpr; /* Aggregate function */ - ExprList *pEList = pExpr->x.pList; /* Arguments to agg function */ - - assert( pExpr->op==TK_AGG_FUNCTION ); - if( pEList && pEList->nExpr==1 && pEList->a[0].pExpr->op==TK_AGG_COLUMN ){ - const char *zFunc = pExpr->u.zToken; - if( sqlite3StrICmp(zFunc, "min")==0 ){ - eRet = WHERE_ORDERBY_MIN; - *ppMinMax = pEList; - }else if( sqlite3StrICmp(zFunc, "max")==0 ){ - eRet = WHERE_ORDERBY_MAX; - *ppMinMax = pEList; - } - } - } - - assert( *ppMinMax==0 || (*ppMinMax)->nExpr==1 ); - return eRet; -} - -/* -** The select statement passed as the first argument is an aggregate query. -** The second argment is the associated aggregate-info object. This -** function tests if the SELECT is of the form: -** -** SELECT count(*) FROM -** -** where table is a database table, not a sub-select or view. If the query -** does match this pattern, then a pointer to the Table object representing -** is returned. Otherwise, 0 is returned. -*/ -static Table *isSimpleCount(Select *p, AggInfo *pAggInfo){ - Table *pTab; - Expr *pExpr; - - assert( !p->pGroupBy ); - - if( p->pWhere || p->pEList->nExpr!=1 - || p->pSrc->nSrc!=1 || p->pSrc->a[0].pSelect - ){ - return 0; - } - pTab = p->pSrc->a[0].pTab; - pExpr = p->pEList->a[0].pExpr; - assert( pTab && !pTab->pSelect && pExpr ); - - if( IsVirtual(pTab) ) return 0; - if( pExpr->op!=TK_AGG_FUNCTION ) return 0; - if( NEVER(pAggInfo->nFunc==0) ) return 0; - if( (pAggInfo->aFunc[0].pFunc->funcFlags&SQLITE_FUNC_COUNT)==0 ) return 0; - if( pExpr->flags&EP_Distinct ) return 0; - - return pTab; -} - -/* -** If the source-list item passed as an argument was augmented with an -** INDEXED BY clause, then try to locate the specified index. If there -** was such a clause and the named index cannot be found, return -** SQLITE_ERROR and leave an error in pParse. Otherwise, populate -** pFrom->pIndex and return SQLITE_OK. -*/ -SQLITE_PRIVATE int sqlite3IndexedByLookup(Parse *pParse, struct SrcList_item *pFrom){ - if( pFrom->pTab && pFrom->zIndex ){ - Table *pTab = pFrom->pTab; - char *zIndex = pFrom->zIndex; - Index *pIdx; - for(pIdx=pTab->pIndex; - pIdx && sqlite3StrICmp(pIdx->zName, zIndex); - pIdx=pIdx->pNext - ); - if( !pIdx ){ - sqlite3ErrorMsg(pParse, "no such index: %s", zIndex, 0); - pParse->checkSchema = 1; - return SQLITE_ERROR; - } - pFrom->pIndex = pIdx; - } - return SQLITE_OK; -} -/* -** Detect compound SELECT statements that use an ORDER BY clause with -** an alternative collating sequence. -** -** SELECT ... FROM t1 EXCEPT SELECT ... FROM t2 ORDER BY .. COLLATE ... -** -** These are rewritten as a subquery: -** -** SELECT * FROM (SELECT ... FROM t1 EXCEPT SELECT ... FROM t2) -** ORDER BY ... COLLATE ... -** -** This transformation is necessary because the multiSelectOrderBy() routine -** above that generates the code for a compound SELECT with an ORDER BY clause -** uses a merge algorithm that requires the same collating sequence on the -** result columns as on the ORDER BY clause. See ticket -** http://www.sqlite.org/src/info/6709574d2a -** -** This transformation is only needed for EXCEPT, INTERSECT, and UNION. -** The UNION ALL operator works fine with multiSelectOrderBy() even when -** there are COLLATE terms in the ORDER BY. -*/ -static int convertCompoundSelectToSubquery(Walker *pWalker, Select *p){ - int i; - Select *pNew; - Select *pX; - sqlite3 *db; - struct ExprList_item *a; - SrcList *pNewSrc; - Parse *pParse; - Token dummy; - - if( p->pPrior==0 ) return WRC_Continue; - if( p->pOrderBy==0 ) return WRC_Continue; - for(pX=p; pX && (pX->op==TK_ALL || pX->op==TK_SELECT); pX=pX->pPrior){} - if( pX==0 ) return WRC_Continue; - a = p->pOrderBy->a; - for(i=p->pOrderBy->nExpr-1; i>=0; i--){ - if( a[i].pExpr->flags & EP_Collate ) break; - } - if( i<0 ) return WRC_Continue; - - /* If we reach this point, that means the transformation is required. */ - - pParse = pWalker->pParse; - db = pParse->db; - pNew = sqlite3DbMallocZero(db, sizeof(*pNew) ); - if( pNew==0 ) return WRC_Abort; - memset(&dummy, 0, sizeof(dummy)); - pNewSrc = sqlite3SrcListAppendFromTerm(pParse,0,0,0,&dummy,pNew,0,0); - if( pNewSrc==0 ) return WRC_Abort; - *pNew = *p; - p->pSrc = pNewSrc; - p->pEList = sqlite3ExprListAppend(pParse, 0, sqlite3Expr(db, TK_ALL, 0)); - p->op = TK_SELECT; - p->pWhere = 0; - pNew->pGroupBy = 0; - pNew->pHaving = 0; - pNew->pOrderBy = 0; - p->pPrior = 0; - p->pNext = 0; - p->selFlags &= ~SF_Compound; - assert( pNew->pPrior!=0 ); - pNew->pPrior->pNext = pNew; - pNew->pLimit = 0; - pNew->pOffset = 0; - return WRC_Continue; -} - -#ifndef SQLITE_OMIT_CTE -/* -** Argument pWith (which may be NULL) points to a linked list of nested -** WITH contexts, from inner to outermost. If the table identified by -** FROM clause element pItem is really a common-table-expression (CTE) -** then return a pointer to the CTE definition for that table. Otherwise -** return NULL. -** -** If a non-NULL value is returned, set *ppContext to point to the With -** object that the returned CTE belongs to. -*/ -static struct Cte *searchWith( - With *pWith, /* Current outermost WITH clause */ - struct SrcList_item *pItem, /* FROM clause element to resolve */ - With **ppContext /* OUT: WITH clause return value belongs to */ -){ - const char *zName; - if( pItem->zDatabase==0 && (zName = pItem->zName)!=0 ){ - With *p; - for(p=pWith; p; p=p->pOuter){ - int i; - for(i=0; inCte; i++){ - if( sqlite3StrICmp(zName, p->a[i].zName)==0 ){ - *ppContext = p; - return &p->a[i]; - } - } - } - } - return 0; -} - -/* The code generator maintains a stack of active WITH clauses -** with the inner-most WITH clause being at the top of the stack. -** -** This routine pushes the WITH clause passed as the second argument -** onto the top of the stack. If argument bFree is true, then this -** WITH clause will never be popped from the stack. In this case it -** should be freed along with the Parse object. In other cases, when -** bFree==0, the With object will be freed along with the SELECT -** statement with which it is associated. -*/ -SQLITE_PRIVATE void sqlite3WithPush(Parse *pParse, With *pWith, u8 bFree){ - assert( bFree==0 || pParse->pWith==0 ); - if( pWith ){ - pWith->pOuter = pParse->pWith; - pParse->pWith = pWith; - pParse->bFreeWith = bFree; - } -} - -/* -** This function checks if argument pFrom refers to a CTE declared by -** a WITH clause on the stack currently maintained by the parser. And, -** if currently processing a CTE expression, if it is a recursive -** reference to the current CTE. -** -** If pFrom falls into either of the two categories above, pFrom->pTab -** and other fields are populated accordingly. The caller should check -** (pFrom->pTab!=0) to determine whether or not a successful match -** was found. -** -** Whether or not a match is found, SQLITE_OK is returned if no error -** occurs. If an error does occur, an error message is stored in the -** parser and some error code other than SQLITE_OK returned. -*/ -static int withExpand( - Walker *pWalker, - struct SrcList_item *pFrom -){ - Parse *pParse = pWalker->pParse; - sqlite3 *db = pParse->db; - struct Cte *pCte; /* Matched CTE (or NULL if no match) */ - With *pWith; /* WITH clause that pCte belongs to */ - - assert( pFrom->pTab==0 ); - - pCte = searchWith(pParse->pWith, pFrom, &pWith); - if( pCte ){ - Table *pTab; - ExprList *pEList; - Select *pSel; - Select *pLeft; /* Left-most SELECT statement */ - int bMayRecursive; /* True if compound joined by UNION [ALL] */ - With *pSavedWith; /* Initial value of pParse->pWith */ - - /* If pCte->zErr is non-NULL at this point, then this is an illegal - ** recursive reference to CTE pCte. Leave an error in pParse and return - ** early. If pCte->zErr is NULL, then this is not a recursive reference. - ** In this case, proceed. */ - if( pCte->zErr ){ - sqlite3ErrorMsg(pParse, pCte->zErr, pCte->zName); - return SQLITE_ERROR; - } - - assert( pFrom->pTab==0 ); - pFrom->pTab = pTab = sqlite3DbMallocZero(db, sizeof(Table)); - if( pTab==0 ) return WRC_Abort; - pTab->nRef = 1; - pTab->zName = sqlite3DbStrDup(db, pCte->zName); - pTab->iPKey = -1; - pTab->nRowLogEst = 200; assert( 200==sqlite3LogEst(1048576) ); - pTab->tabFlags |= TF_Ephemeral; - pFrom->pSelect = sqlite3SelectDup(db, pCte->pSelect, 0); - if( db->mallocFailed ) return SQLITE_NOMEM; - assert( pFrom->pSelect ); - - /* Check if this is a recursive CTE. */ - pSel = pFrom->pSelect; - bMayRecursive = ( pSel->op==TK_ALL || pSel->op==TK_UNION ); - if( bMayRecursive ){ - int i; - SrcList *pSrc = pFrom->pSelect->pSrc; - for(i=0; inSrc; i++){ - struct SrcList_item *pItem = &pSrc->a[i]; - if( pItem->zDatabase==0 - && pItem->zName!=0 - && 0==sqlite3StrICmp(pItem->zName, pCte->zName) - ){ - pItem->pTab = pTab; - pItem->isRecursive = 1; - pTab->nRef++; - pSel->selFlags |= SF_Recursive; - } - } - } - - /* Only one recursive reference is permitted. */ - if( pTab->nRef>2 ){ - sqlite3ErrorMsg( - pParse, "multiple references to recursive table: %s", pCte->zName - ); - return SQLITE_ERROR; - } - assert( pTab->nRef==1 || ((pSel->selFlags&SF_Recursive) && pTab->nRef==2 )); - - pCte->zErr = "circular reference: %s"; - pSavedWith = pParse->pWith; - pParse->pWith = pWith; - sqlite3WalkSelect(pWalker, bMayRecursive ? pSel->pPrior : pSel); - - for(pLeft=pSel; pLeft->pPrior; pLeft=pLeft->pPrior); - pEList = pLeft->pEList; - if( pCte->pCols ){ - if( pEList->nExpr!=pCte->pCols->nExpr ){ - sqlite3ErrorMsg(pParse, "table %s has %d values for %d columns", - pCte->zName, pEList->nExpr, pCte->pCols->nExpr - ); - pParse->pWith = pSavedWith; - return SQLITE_ERROR; - } - pEList = pCte->pCols; - } - - selectColumnsFromExprList(pParse, pEList, &pTab->nCol, &pTab->aCol); - if( bMayRecursive ){ - if( pSel->selFlags & SF_Recursive ){ - pCte->zErr = "multiple recursive references: %s"; - }else{ - pCte->zErr = "recursive reference in a subquery: %s"; - } - sqlite3WalkSelect(pWalker, pSel); - } - pCte->zErr = 0; - pParse->pWith = pSavedWith; - } - - return SQLITE_OK; -} -#endif - -#ifndef SQLITE_OMIT_CTE -/* -** If the SELECT passed as the second argument has an associated WITH -** clause, pop it from the stack stored as part of the Parse object. -** -** This function is used as the xSelectCallback2() callback by -** sqlite3SelectExpand() when walking a SELECT tree to resolve table -** names and other FROM clause elements. -*/ -static void selectPopWith(Walker *pWalker, Select *p){ - Parse *pParse = pWalker->pParse; - With *pWith = findRightmost(p)->pWith; - if( pWith!=0 ){ - assert( pParse->pWith==pWith ); - pParse->pWith = pWith->pOuter; - } -} -#else -#define selectPopWith 0 -#endif - -/* -** This routine is a Walker callback for "expanding" a SELECT statement. -** "Expanding" means to do the following: -** -** (1) Make sure VDBE cursor numbers have been assigned to every -** element of the FROM clause. -** -** (2) Fill in the pTabList->a[].pTab fields in the SrcList that -** defines FROM clause. When views appear in the FROM clause, -** fill pTabList->a[].pSelect with a copy of the SELECT statement -** that implements the view. A copy is made of the view's SELECT -** statement so that we can freely modify or delete that statement -** without worrying about messing up the presistent representation -** of the view. -** -** (3) Add terms to the WHERE clause to accomodate the NATURAL keyword -** on joins and the ON and USING clause of joins. -** -** (4) Scan the list of columns in the result set (pEList) looking -** for instances of the "*" operator or the TABLE.* operator. -** If found, expand each "*" to be every column in every table -** and TABLE.* to be every column in TABLE. -** -*/ -static int selectExpander(Walker *pWalker, Select *p){ - Parse *pParse = pWalker->pParse; - int i, j, k; - SrcList *pTabList; - ExprList *pEList; - struct SrcList_item *pFrom; - sqlite3 *db = pParse->db; - Expr *pE, *pRight, *pExpr; - u16 selFlags = p->selFlags; - - p->selFlags |= SF_Expanded; - if( db->mallocFailed ){ - return WRC_Abort; - } - if( NEVER(p->pSrc==0) || (selFlags & SF_Expanded)!=0 ){ - return WRC_Prune; - } - pTabList = p->pSrc; - pEList = p->pEList; - sqlite3WithPush(pParse, findRightmost(p)->pWith, 0); - - /* Make sure cursor numbers have been assigned to all entries in - ** the FROM clause of the SELECT statement. - */ - sqlite3SrcListAssignCursors(pParse, pTabList); - - /* Look up every table named in the FROM clause of the select. If - ** an entry of the FROM clause is a subquery instead of a table or view, - ** then create a transient table structure to describe the subquery. - */ - for(i=0, pFrom=pTabList->a; inSrc; i++, pFrom++){ - Table *pTab; - assert( pFrom->isRecursive==0 || pFrom->pTab ); - if( pFrom->isRecursive ) continue; - if( pFrom->pTab!=0 ){ - /* This statement has already been prepared. There is no need - ** to go further. */ - assert( i==0 ); -#ifndef SQLITE_OMIT_CTE - selectPopWith(pWalker, p); -#endif - return WRC_Prune; - } -#ifndef SQLITE_OMIT_CTE - if( withExpand(pWalker, pFrom) ) return WRC_Abort; - if( pFrom->pTab ) {} else -#endif - if( pFrom->zName==0 ){ -#ifndef SQLITE_OMIT_SUBQUERY - Select *pSel = pFrom->pSelect; - /* A sub-query in the FROM clause of a SELECT */ - assert( pSel!=0 ); - assert( pFrom->pTab==0 ); - sqlite3WalkSelect(pWalker, pSel); - pFrom->pTab = pTab = sqlite3DbMallocZero(db, sizeof(Table)); - if( pTab==0 ) return WRC_Abort; - pTab->nRef = 1; - pTab->zName = sqlite3MPrintf(db, "sqlite_sq_%p", (void*)pTab); - while( pSel->pPrior ){ pSel = pSel->pPrior; } - selectColumnsFromExprList(pParse, pSel->pEList, &pTab->nCol, &pTab->aCol); - pTab->iPKey = -1; - pTab->nRowLogEst = 200; assert( 200==sqlite3LogEst(1048576) ); - pTab->tabFlags |= TF_Ephemeral; -#endif - }else{ - /* An ordinary table or view name in the FROM clause */ - assert( pFrom->pTab==0 ); - pFrom->pTab = pTab = sqlite3LocateTableItem(pParse, 0, pFrom); - if( pTab==0 ) return WRC_Abort; - if( pTab->nRef==0xffff ){ - sqlite3ErrorMsg(pParse, "too many references to \"%s\": max 65535", - pTab->zName); - pFrom->pTab = 0; - return WRC_Abort; - } - pTab->nRef++; -#if !defined(SQLITE_OMIT_VIEW) || !defined (SQLITE_OMIT_VIRTUALTABLE) - if( pTab->pSelect || IsVirtual(pTab) ){ - /* We reach here if the named table is a really a view */ - if( sqlite3ViewGetColumnNames(pParse, pTab) ) return WRC_Abort; - assert( pFrom->pSelect==0 ); - pFrom->pSelect = sqlite3SelectDup(db, pTab->pSelect, 0); - sqlite3WalkSelect(pWalker, pFrom->pSelect); - } -#endif - } - - /* Locate the index named by the INDEXED BY clause, if any. */ - if( sqlite3IndexedByLookup(pParse, pFrom) ){ - return WRC_Abort; - } - } - - /* Process NATURAL keywords, and ON and USING clauses of joins. - */ - if( db->mallocFailed || sqliteProcessJoin(pParse, p) ){ - return WRC_Abort; - } - - /* For every "*" that occurs in the column list, insert the names of - ** all columns in all tables. And for every TABLE.* insert the names - ** of all columns in TABLE. The parser inserted a special expression - ** with the TK_ALL operator for each "*" that it found in the column list. - ** The following code just has to locate the TK_ALL expressions and expand - ** each one to the list of all columns in all tables. - ** - ** The first loop just checks to see if there are any "*" operators - ** that need expanding. - */ - for(k=0; knExpr; k++){ - pE = pEList->a[k].pExpr; - if( pE->op==TK_ALL ) break; - assert( pE->op!=TK_DOT || pE->pRight!=0 ); - assert( pE->op!=TK_DOT || (pE->pLeft!=0 && pE->pLeft->op==TK_ID) ); - if( pE->op==TK_DOT && pE->pRight->op==TK_ALL ) break; - } - if( knExpr ){ - /* - ** If we get here it means the result set contains one or more "*" - ** operators that need to be expanded. Loop through each expression - ** in the result set and expand them one by one. - */ - struct ExprList_item *a = pEList->a; - ExprList *pNew = 0; - int flags = pParse->db->flags; - int longNames = (flags & SQLITE_FullColNames)!=0 - && (flags & SQLITE_ShortColNames)==0; - - /* When processing FROM-clause subqueries, it is always the case - ** that full_column_names=OFF and short_column_names=ON. The - ** sqlite3ResultSetOfSelect() routine makes it so. */ - assert( (p->selFlags & SF_NestedFrom)==0 - || ((flags & SQLITE_FullColNames)==0 && - (flags & SQLITE_ShortColNames)!=0) ); - - for(k=0; knExpr; k++){ - pE = a[k].pExpr; - pRight = pE->pRight; - assert( pE->op!=TK_DOT || pRight!=0 ); - if( pE->op!=TK_ALL && (pE->op!=TK_DOT || pRight->op!=TK_ALL) ){ - /* This particular expression does not need to be expanded. - */ - pNew = sqlite3ExprListAppend(pParse, pNew, a[k].pExpr); - if( pNew ){ - pNew->a[pNew->nExpr-1].zName = a[k].zName; - pNew->a[pNew->nExpr-1].zSpan = a[k].zSpan; - a[k].zName = 0; - a[k].zSpan = 0; - } - a[k].pExpr = 0; - }else{ - /* This expression is a "*" or a "TABLE.*" and needs to be - ** expanded. */ - int tableSeen = 0; /* Set to 1 when TABLE matches */ - char *zTName = 0; /* text of name of TABLE */ - if( pE->op==TK_DOT ){ - assert( pE->pLeft!=0 ); - assert( !ExprHasProperty(pE->pLeft, EP_IntValue) ); - zTName = pE->pLeft->u.zToken; - } - for(i=0, pFrom=pTabList->a; inSrc; i++, pFrom++){ - Table *pTab = pFrom->pTab; - Select *pSub = pFrom->pSelect; - char *zTabName = pFrom->zAlias; - const char *zSchemaName = 0; - int iDb; - if( zTabName==0 ){ - zTabName = pTab->zName; - } - if( db->mallocFailed ) break; - if( pSub==0 || (pSub->selFlags & SF_NestedFrom)==0 ){ - pSub = 0; - if( zTName && sqlite3StrICmp(zTName, zTabName)!=0 ){ - continue; - } - iDb = sqlite3SchemaToIndex(db, pTab->pSchema); - zSchemaName = iDb>=0 ? db->aDb[iDb].zName : "*"; - } - for(j=0; jnCol; j++){ - char *zName = pTab->aCol[j].zName; - char *zColname; /* The computed column name */ - char *zToFree; /* Malloced string that needs to be freed */ - Token sColname; /* Computed column name as a token */ - - assert( zName ); - if( zTName && pSub - && sqlite3MatchSpanName(pSub->pEList->a[j].zSpan, 0, zTName, 0)==0 - ){ - continue; - } - - /* If a column is marked as 'hidden' (currently only possible - ** for virtual tables), do not include it in the expanded - ** result-set list. - */ - if( IsHiddenColumn(&pTab->aCol[j]) ){ - assert(IsVirtual(pTab)); - continue; - } - tableSeen = 1; - - if( i>0 && zTName==0 ){ - if( (pFrom->jointype & JT_NATURAL)!=0 - && tableAndColumnIndex(pTabList, i, zName, 0, 0) - ){ - /* In a NATURAL join, omit the join columns from the - ** table to the right of the join */ - continue; - } - if( sqlite3IdListIndex(pFrom->pUsing, zName)>=0 ){ - /* In a join with a USING clause, omit columns in the - ** using clause from the table on the right. */ - continue; - } - } - pRight = sqlite3Expr(db, TK_ID, zName); - zColname = zName; - zToFree = 0; - if( longNames || pTabList->nSrc>1 ){ - Expr *pLeft; - pLeft = sqlite3Expr(db, TK_ID, zTabName); - pExpr = sqlite3PExpr(pParse, TK_DOT, pLeft, pRight, 0); - if( zSchemaName ){ - pLeft = sqlite3Expr(db, TK_ID, zSchemaName); - pExpr = sqlite3PExpr(pParse, TK_DOT, pLeft, pExpr, 0); - } - if( longNames ){ - zColname = sqlite3MPrintf(db, "%s.%s", zTabName, zName); - zToFree = zColname; - } - }else{ - pExpr = pRight; - } - pNew = sqlite3ExprListAppend(pParse, pNew, pExpr); - sColname.z = zColname; - sColname.n = sqlite3Strlen30(zColname); - sqlite3ExprListSetName(pParse, pNew, &sColname, 0); - if( pNew && (p->selFlags & SF_NestedFrom)!=0 ){ - struct ExprList_item *pX = &pNew->a[pNew->nExpr-1]; - if( pSub ){ - pX->zSpan = sqlite3DbStrDup(db, pSub->pEList->a[j].zSpan); - testcase( pX->zSpan==0 ); - }else{ - pX->zSpan = sqlite3MPrintf(db, "%s.%s.%s", - zSchemaName, zTabName, zColname); - testcase( pX->zSpan==0 ); - } - pX->bSpanIsTab = 1; - } - sqlite3DbFree(db, zToFree); - } - } - if( !tableSeen ){ - if( zTName ){ - sqlite3ErrorMsg(pParse, "no such table: %s", zTName); - }else{ - sqlite3ErrorMsg(pParse, "no tables specified"); - } - } - } - } - sqlite3ExprListDelete(db, pEList); - p->pEList = pNew; - } -#if SQLITE_MAX_COLUMN - if( p->pEList && p->pEList->nExpr>db->aLimit[SQLITE_LIMIT_COLUMN] ){ - sqlite3ErrorMsg(pParse, "too many columns in result set"); - } -#endif - return WRC_Continue; -} - -/* -** No-op routine for the parse-tree walker. -** -** When this routine is the Walker.xExprCallback then expression trees -** are walked without any actions being taken at each node. Presumably, -** when this routine is used for Walker.xExprCallback then -** Walker.xSelectCallback is set to do something useful for every -** subquery in the parser tree. -*/ -static int exprWalkNoop(Walker *NotUsed, Expr *NotUsed2){ - UNUSED_PARAMETER2(NotUsed, NotUsed2); - return WRC_Continue; -} - -/* -** This routine "expands" a SELECT statement and all of its subqueries. -** For additional information on what it means to "expand" a SELECT -** statement, see the comment on the selectExpand worker callback above. -** -** Expanding a SELECT statement is the first step in processing a -** SELECT statement. The SELECT statement must be expanded before -** name resolution is performed. -** -** If anything goes wrong, an error message is written into pParse. -** The calling function can detect the problem by looking at pParse->nErr -** and/or pParse->db->mallocFailed. -*/ -static void sqlite3SelectExpand(Parse *pParse, Select *pSelect){ - Walker w; - memset(&w, 0, sizeof(w)); - w.xExprCallback = exprWalkNoop; - w.pParse = pParse; - if( pParse->hasCompound ){ - w.xSelectCallback = convertCompoundSelectToSubquery; - sqlite3WalkSelect(&w, pSelect); - } - w.xSelectCallback = selectExpander; - w.xSelectCallback2 = selectPopWith; - sqlite3WalkSelect(&w, pSelect); -} - - -#ifndef SQLITE_OMIT_SUBQUERY -/* -** This is a Walker.xSelectCallback callback for the sqlite3SelectTypeInfo() -** interface. -** -** For each FROM-clause subquery, add Column.zType and Column.zColl -** information to the Table structure that represents the result set -** of that subquery. -** -** The Table structure that represents the result set was constructed -** by selectExpander() but the type and collation information was omitted -** at that point because identifiers had not yet been resolved. This -** routine is called after identifier resolution. -*/ -static void selectAddSubqueryTypeInfo(Walker *pWalker, Select *p){ - Parse *pParse; - int i; - SrcList *pTabList; - struct SrcList_item *pFrom; - - assert( p->selFlags & SF_Resolved ); - if( (p->selFlags & SF_HasTypeInfo)==0 ){ - p->selFlags |= SF_HasTypeInfo; - pParse = pWalker->pParse; - pTabList = p->pSrc; - for(i=0, pFrom=pTabList->a; inSrc; i++, pFrom++){ - Table *pTab = pFrom->pTab; - if( ALWAYS(pTab!=0) && (pTab->tabFlags & TF_Ephemeral)!=0 ){ - /* A sub-query in the FROM clause of a SELECT */ - Select *pSel = pFrom->pSelect; - if( pSel ){ - while( pSel->pPrior ) pSel = pSel->pPrior; - selectAddColumnTypeAndCollation(pParse, pTab, pSel); - } - } - } - } -} -#endif - - -/* -** This routine adds datatype and collating sequence information to -** the Table structures of all FROM-clause subqueries in a -** SELECT statement. -** -** Use this routine after name resolution. -*/ -static void sqlite3SelectAddTypeInfo(Parse *pParse, Select *pSelect){ -#ifndef SQLITE_OMIT_SUBQUERY - Walker w; - memset(&w, 0, sizeof(w)); - w.xSelectCallback2 = selectAddSubqueryTypeInfo; - w.xExprCallback = exprWalkNoop; - w.pParse = pParse; - sqlite3WalkSelect(&w, pSelect); -#endif -} - - -/* -** This routine sets up a SELECT statement for processing. The -** following is accomplished: -** -** * VDBE Cursor numbers are assigned to all FROM-clause terms. -** * Ephemeral Table objects are created for all FROM-clause subqueries. -** * ON and USING clauses are shifted into WHERE statements -** * Wildcards "*" and "TABLE.*" in result sets are expanded. -** * Identifiers in expression are matched to tables. -** -** This routine acts recursively on all subqueries within the SELECT. -*/ -SQLITE_PRIVATE void sqlite3SelectPrep( - Parse *pParse, /* The parser context */ - Select *p, /* The SELECT statement being coded. */ - NameContext *pOuterNC /* Name context for container */ -){ - sqlite3 *db; - if( NEVER(p==0) ) return; - db = pParse->db; - if( db->mallocFailed ) return; - if( p->selFlags & SF_HasTypeInfo ) return; - sqlite3SelectExpand(pParse, p); - if( pParse->nErr || db->mallocFailed ) return; - sqlite3ResolveSelectNames(pParse, p, pOuterNC); - if( pParse->nErr || db->mallocFailed ) return; - sqlite3SelectAddTypeInfo(pParse, p); -} - -/* -** Reset the aggregate accumulator. -** -** The aggregate accumulator is a set of memory cells that hold -** intermediate results while calculating an aggregate. This -** routine generates code that stores NULLs in all of those memory -** cells. -*/ -static void resetAccumulator(Parse *pParse, AggInfo *pAggInfo){ - Vdbe *v = pParse->pVdbe; - int i; - struct AggInfo_func *pFunc; - int nReg = pAggInfo->nFunc + pAggInfo->nColumn; - if( nReg==0 ) return; -#ifdef SQLITE_DEBUG - /* Verify that all AggInfo registers are within the range specified by - ** AggInfo.mnReg..AggInfo.mxReg */ - assert( nReg==pAggInfo->mxReg-pAggInfo->mnReg+1 ); - for(i=0; inColumn; i++){ - assert( pAggInfo->aCol[i].iMem>=pAggInfo->mnReg - && pAggInfo->aCol[i].iMem<=pAggInfo->mxReg ); - } - for(i=0; inFunc; i++){ - assert( pAggInfo->aFunc[i].iMem>=pAggInfo->mnReg - && pAggInfo->aFunc[i].iMem<=pAggInfo->mxReg ); - } -#endif - sqlite3VdbeAddOp3(v, OP_Null, 0, pAggInfo->mnReg, pAggInfo->mxReg); - for(pFunc=pAggInfo->aFunc, i=0; inFunc; i++, pFunc++){ - if( pFunc->iDistinct>=0 ){ - Expr *pE = pFunc->pExpr; - assert( !ExprHasProperty(pE, EP_xIsSelect) ); - if( pE->x.pList==0 || pE->x.pList->nExpr!=1 ){ - sqlite3ErrorMsg(pParse, "DISTINCT aggregates must have exactly one " - "argument"); - pFunc->iDistinct = -1; - }else{ - KeyInfo *pKeyInfo = keyInfoFromExprList(pParse, pE->x.pList, 0, 0); - sqlite3VdbeAddOp4(v, OP_OpenEphemeral, pFunc->iDistinct, 0, 0, - (char*)pKeyInfo, P4_KEYINFO); - } - } - } -} - -/* -** Invoke the OP_AggFinalize opcode for every aggregate function -** in the AggInfo structure. -*/ -static void finalizeAggFunctions(Parse *pParse, AggInfo *pAggInfo){ - Vdbe *v = pParse->pVdbe; - int i; - struct AggInfo_func *pF; - for(i=0, pF=pAggInfo->aFunc; inFunc; i++, pF++){ - ExprList *pList = pF->pExpr->x.pList; - assert( !ExprHasProperty(pF->pExpr, EP_xIsSelect) ); - sqlite3VdbeAddOp4(v, OP_AggFinal, pF->iMem, pList ? pList->nExpr : 0, 0, - (void*)pF->pFunc, P4_FUNCDEF); - } -} - -/* -** Update the accumulator memory cells for an aggregate based on -** the current cursor position. -*/ -static void updateAccumulator(Parse *pParse, AggInfo *pAggInfo){ - Vdbe *v = pParse->pVdbe; - int i; - int regHit = 0; - int addrHitTest = 0; - struct AggInfo_func *pF; - struct AggInfo_col *pC; - - pAggInfo->directMode = 1; - for(i=0, pF=pAggInfo->aFunc; inFunc; i++, pF++){ - int nArg; - int addrNext = 0; - int regAgg; - ExprList *pList = pF->pExpr->x.pList; - assert( !ExprHasProperty(pF->pExpr, EP_xIsSelect) ); - if( pList ){ - nArg = pList->nExpr; - regAgg = sqlite3GetTempRange(pParse, nArg); - sqlite3ExprCodeExprList(pParse, pList, regAgg, SQLITE_ECEL_DUP); - }else{ - nArg = 0; - regAgg = 0; - } - if( pF->iDistinct>=0 ){ - addrNext = sqlite3VdbeMakeLabel(v); - assert( nArg==1 ); - codeDistinct(pParse, pF->iDistinct, addrNext, 1, regAgg); - } - if( pF->pFunc->funcFlags & SQLITE_FUNC_NEEDCOLL ){ - CollSeq *pColl = 0; - struct ExprList_item *pItem; - int j; - assert( pList!=0 ); /* pList!=0 if pF->pFunc has NEEDCOLL */ - for(j=0, pItem=pList->a; !pColl && jpExpr); - } - if( !pColl ){ - pColl = pParse->db->pDfltColl; - } - if( regHit==0 && pAggInfo->nAccumulator ) regHit = ++pParse->nMem; - sqlite3VdbeAddOp4(v, OP_CollSeq, regHit, 0, 0, (char *)pColl, P4_COLLSEQ); - } - sqlite3VdbeAddOp4(v, OP_AggStep, 0, regAgg, pF->iMem, - (void*)pF->pFunc, P4_FUNCDEF); - sqlite3VdbeChangeP5(v, (u8)nArg); - sqlite3ExprCacheAffinityChange(pParse, regAgg, nArg); - sqlite3ReleaseTempRange(pParse, regAgg, nArg); - if( addrNext ){ - sqlite3VdbeResolveLabel(v, addrNext); - sqlite3ExprCacheClear(pParse); - } - } - - /* Before populating the accumulator registers, clear the column cache. - ** Otherwise, if any of the required column values are already present - ** in registers, sqlite3ExprCode() may use OP_SCopy to copy the value - ** to pC->iMem. But by the time the value is used, the original register - ** may have been used, invalidating the underlying buffer holding the - ** text or blob value. See ticket [883034dcb5]. - ** - ** Another solution would be to change the OP_SCopy used to copy cached - ** values to an OP_Copy. - */ - if( regHit ){ - addrHitTest = sqlite3VdbeAddOp1(v, OP_If, regHit); VdbeCoverage(v); - } - sqlite3ExprCacheClear(pParse); - for(i=0, pC=pAggInfo->aCol; inAccumulator; i++, pC++){ - sqlite3ExprCode(pParse, pC->pExpr, pC->iMem); - } - pAggInfo->directMode = 0; - sqlite3ExprCacheClear(pParse); - if( addrHitTest ){ - sqlite3VdbeJumpHere(v, addrHitTest); - } -} - -/* -** Add a single OP_Explain instruction to the VDBE to explain a simple -** count(*) query ("SELECT count(*) FROM pTab"). -*/ -#ifndef SQLITE_OMIT_EXPLAIN -static void explainSimpleCount( - Parse *pParse, /* Parse context */ - Table *pTab, /* Table being queried */ - Index *pIdx /* Index used to optimize scan, or NULL */ -){ - if( pParse->explain==2 ){ - int bCover = (pIdx!=0 && (HasRowid(pTab) || !IsPrimaryKeyIndex(pIdx))); - char *zEqp = sqlite3MPrintf(pParse->db, "SCAN TABLE %s%s%s", - pTab->zName, - bCover ? " USING COVERING INDEX " : "", - bCover ? pIdx->zName : "" - ); - sqlite3VdbeAddOp4( - pParse->pVdbe, OP_Explain, pParse->iSelectId, 0, 0, zEqp, P4_DYNAMIC - ); - } -} -#else -# define explainSimpleCount(a,b,c) -#endif - -/* -** Generate code for the SELECT statement given in the p argument. -** -** The results are returned according to the SelectDest structure. -** See comments in sqliteInt.h for further information. -** -** This routine returns the number of errors. If any errors are -** encountered, then an appropriate error message is left in -** pParse->zErrMsg. -** -** This routine does NOT free the Select structure passed in. The -** calling function needs to do that. -*/ -SQLITE_PRIVATE int sqlite3Select( - Parse *pParse, /* The parser context */ - Select *p, /* The SELECT statement being coded. */ - SelectDest *pDest /* What to do with the query results */ -){ - int i, j; /* Loop counters */ - WhereInfo *pWInfo; /* Return from sqlite3WhereBegin() */ - Vdbe *v; /* The virtual machine under construction */ - int isAgg; /* True for select lists like "count(*)" */ - ExprList *pEList; /* List of columns to extract. */ - SrcList *pTabList; /* List of tables to select from */ - Expr *pWhere; /* The WHERE clause. May be NULL */ - ExprList *pGroupBy; /* The GROUP BY clause. May be NULL */ - Expr *pHaving; /* The HAVING clause. May be NULL */ - int rc = 1; /* Value to return from this function */ - DistinctCtx sDistinct; /* Info on how to code the DISTINCT keyword */ - SortCtx sSort; /* Info on how to code the ORDER BY clause */ - AggInfo sAggInfo; /* Information used by aggregate queries */ - int iEnd; /* Address of the end of the query */ - sqlite3 *db; /* The database connection */ - -#ifndef SQLITE_OMIT_EXPLAIN - int iRestoreSelectId = pParse->iSelectId; - pParse->iSelectId = pParse->iNextSelectId++; -#endif - - db = pParse->db; - if( p==0 || db->mallocFailed || pParse->nErr ){ - return 1; - } - if( sqlite3AuthCheck(pParse, SQLITE_SELECT, 0, 0, 0) ) return 1; - memset(&sAggInfo, 0, sizeof(sAggInfo)); - - assert( p->pOrderBy==0 || pDest->eDest!=SRT_DistFifo ); - assert( p->pOrderBy==0 || pDest->eDest!=SRT_Fifo ); - assert( p->pOrderBy==0 || pDest->eDest!=SRT_DistQueue ); - assert( p->pOrderBy==0 || pDest->eDest!=SRT_Queue ); - if( IgnorableOrderby(pDest) ){ - assert(pDest->eDest==SRT_Exists || pDest->eDest==SRT_Union || - pDest->eDest==SRT_Except || pDest->eDest==SRT_Discard || - pDest->eDest==SRT_Queue || pDest->eDest==SRT_DistFifo || - pDest->eDest==SRT_DistQueue || pDest->eDest==SRT_Fifo); - /* If ORDER BY makes no difference in the output then neither does - ** DISTINCT so it can be removed too. */ - sqlite3ExprListDelete(db, p->pOrderBy); - p->pOrderBy = 0; - p->selFlags &= ~SF_Distinct; - } - sqlite3SelectPrep(pParse, p, 0); - memset(&sSort, 0, sizeof(sSort)); - sSort.pOrderBy = p->pOrderBy; - pTabList = p->pSrc; - pEList = p->pEList; - if( pParse->nErr || db->mallocFailed ){ - goto select_end; - } - isAgg = (p->selFlags & SF_Aggregate)!=0; - assert( pEList!=0 ); - - /* Begin generating code. - */ - v = sqlite3GetVdbe(pParse); - if( v==0 ) goto select_end; - - /* If writing to memory or generating a set - ** only a single column may be output. - */ -#ifndef SQLITE_OMIT_SUBQUERY - if( checkForMultiColumnSelectError(pParse, pDest, pEList->nExpr) ){ - goto select_end; - } -#endif - - /* Generate code for all sub-queries in the FROM clause - */ -#if !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) - for(i=0; !p->pPrior && inSrc; i++){ - struct SrcList_item *pItem = &pTabList->a[i]; - SelectDest dest; - Select *pSub = pItem->pSelect; - int isAggSub; - - if( pSub==0 ) continue; - - /* Sometimes the code for a subquery will be generated more than - ** once, if the subquery is part of the WHERE clause in a LEFT JOIN, - ** for example. In that case, do not regenerate the code to manifest - ** a view or the co-routine to implement a view. The first instance - ** is sufficient, though the subroutine to manifest the view does need - ** to be invoked again. */ - if( pItem->addrFillSub ){ - if( pItem->viaCoroutine==0 ){ - sqlite3VdbeAddOp2(v, OP_Gosub, pItem->regReturn, pItem->addrFillSub); - } - continue; - } - - /* Increment Parse.nHeight by the height of the largest expression - ** tree referred to by this, the parent select. The child select - ** may contain expression trees of at most - ** (SQLITE_MAX_EXPR_DEPTH-Parse.nHeight) height. This is a bit - ** more conservative than necessary, but much easier than enforcing - ** an exact limit. - */ - pParse->nHeight += sqlite3SelectExprHeight(p); - - isAggSub = (pSub->selFlags & SF_Aggregate)!=0; - if( flattenSubquery(pParse, p, i, isAgg, isAggSub) ){ - /* This subquery can be absorbed into its parent. */ - if( isAggSub ){ - isAgg = 1; - p->selFlags |= SF_Aggregate; - } - i = -1; - }else if( pTabList->nSrc==1 - && OptimizationEnabled(db, SQLITE_SubqCoroutine) - ){ - /* Implement a co-routine that will return a single row of the result - ** set on each invocation. - */ - int addrTop = sqlite3VdbeCurrentAddr(v)+1; - pItem->regReturn = ++pParse->nMem; - sqlite3VdbeAddOp3(v, OP_InitCoroutine, pItem->regReturn, 0, addrTop); - VdbeComment((v, "%s", pItem->pTab->zName)); - pItem->addrFillSub = addrTop; - sqlite3SelectDestInit(&dest, SRT_Coroutine, pItem->regReturn); - explainSetInteger(pItem->iSelectId, (u8)pParse->iNextSelectId); - sqlite3Select(pParse, pSub, &dest); - pItem->pTab->nRowLogEst = sqlite3LogEst(pSub->nSelectRow); - pItem->viaCoroutine = 1; - pItem->regResult = dest.iSdst; - sqlite3VdbeAddOp1(v, OP_EndCoroutine, pItem->regReturn); - sqlite3VdbeJumpHere(v, addrTop-1); - sqlite3ClearTempRegCache(pParse); - }else{ - /* Generate a subroutine that will fill an ephemeral table with - ** the content of this subquery. pItem->addrFillSub will point - ** to the address of the generated subroutine. pItem->regReturn - ** is a register allocated to hold the subroutine return address - */ - int topAddr; - int onceAddr = 0; - int retAddr; - assert( pItem->addrFillSub==0 ); - pItem->regReturn = ++pParse->nMem; - topAddr = sqlite3VdbeAddOp2(v, OP_Integer, 0, pItem->regReturn); - pItem->addrFillSub = topAddr+1; - if( pItem->isCorrelated==0 ){ - /* If the subquery is not correlated and if we are not inside of - ** a trigger, then we only need to compute the value of the subquery - ** once. */ - onceAddr = sqlite3CodeOnce(pParse); VdbeCoverage(v); - VdbeComment((v, "materialize \"%s\"", pItem->pTab->zName)); - }else{ - VdbeNoopComment((v, "materialize \"%s\"", pItem->pTab->zName)); - } - sqlite3SelectDestInit(&dest, SRT_EphemTab, pItem->iCursor); - explainSetInteger(pItem->iSelectId, (u8)pParse->iNextSelectId); - sqlite3Select(pParse, pSub, &dest); - pItem->pTab->nRowLogEst = sqlite3LogEst(pSub->nSelectRow); - if( onceAddr ) sqlite3VdbeJumpHere(v, onceAddr); - retAddr = sqlite3VdbeAddOp1(v, OP_Return, pItem->regReturn); - VdbeComment((v, "end %s", pItem->pTab->zName)); - sqlite3VdbeChangeP1(v, topAddr, retAddr); - sqlite3ClearTempRegCache(pParse); - } - if( /*pParse->nErr ||*/ db->mallocFailed ){ - goto select_end; - } - pParse->nHeight -= sqlite3SelectExprHeight(p); - pTabList = p->pSrc; - if( !IgnorableOrderby(pDest) ){ - sSort.pOrderBy = p->pOrderBy; - } - } - pEList = p->pEList; -#endif - pWhere = p->pWhere; - pGroupBy = p->pGroupBy; - pHaving = p->pHaving; - sDistinct.isTnct = (p->selFlags & SF_Distinct)!=0; - -#ifndef SQLITE_OMIT_COMPOUND_SELECT - /* If there is are a sequence of queries, do the earlier ones first. - */ - if( p->pPrior ){ - rc = multiSelect(pParse, p, pDest); - explainSetInteger(pParse->iSelectId, iRestoreSelectId); - return rc; - } -#endif - - /* If the query is DISTINCT with an ORDER BY but is not an aggregate, and - ** if the select-list is the same as the ORDER BY list, then this query - ** can be rewritten as a GROUP BY. In other words, this: - ** - ** SELECT DISTINCT xyz FROM ... ORDER BY xyz - ** - ** is transformed to: - ** - ** SELECT xyz FROM ... GROUP BY xyz - ** - ** The second form is preferred as a single index (or temp-table) may be - ** used for both the ORDER BY and DISTINCT processing. As originally - ** written the query must use a temp-table for at least one of the ORDER - ** BY and DISTINCT, and an index or separate temp-table for the other. - */ - if( (p->selFlags & (SF_Distinct|SF_Aggregate))==SF_Distinct - && sqlite3ExprListCompare(sSort.pOrderBy, p->pEList, -1)==0 - ){ - p->selFlags &= ~SF_Distinct; - p->pGroupBy = sqlite3ExprListDup(db, p->pEList, 0); - pGroupBy = p->pGroupBy; - sSort.pOrderBy = 0; - /* Notice that even thought SF_Distinct has been cleared from p->selFlags, - ** the sDistinct.isTnct is still set. Hence, isTnct represents the - ** original setting of the SF_Distinct flag, not the current setting */ - assert( sDistinct.isTnct ); - } - - /* If there is an ORDER BY clause, then this sorting - ** index might end up being unused if the data can be - ** extracted in pre-sorted order. If that is the case, then the - ** OP_OpenEphemeral instruction will be changed to an OP_Noop once - ** we figure out that the sorting index is not needed. The addrSortIndex - ** variable is used to facilitate that change. - */ - if( sSort.pOrderBy ){ - KeyInfo *pKeyInfo; - pKeyInfo = keyInfoFromExprList(pParse, sSort.pOrderBy, 0, 0); - sSort.iECursor = pParse->nTab++; - sSort.addrSortIndex = - sqlite3VdbeAddOp4(v, OP_OpenEphemeral, - sSort.iECursor, sSort.pOrderBy->nExpr+2, 0, - (char*)pKeyInfo, P4_KEYINFO); - }else{ - sSort.addrSortIndex = -1; - } - - /* If the output is destined for a temporary table, open that table. - */ - if( pDest->eDest==SRT_EphemTab ){ - sqlite3VdbeAddOp2(v, OP_OpenEphemeral, pDest->iSDParm, pEList->nExpr); - } - - /* Set the limiter. - */ - iEnd = sqlite3VdbeMakeLabel(v); - p->nSelectRow = LARGEST_INT64; - computeLimitRegisters(pParse, p, iEnd); - if( p->iLimit==0 && sSort.addrSortIndex>=0 ){ - sqlite3VdbeGetOp(v, sSort.addrSortIndex)->opcode = OP_SorterOpen; - sSort.sortFlags |= SORTFLAG_UseSorter; - } - - /* Open a virtual index to use for the distinct set. - */ - if( p->selFlags & SF_Distinct ){ - sDistinct.tabTnct = pParse->nTab++; - sDistinct.addrTnct = sqlite3VdbeAddOp4(v, OP_OpenEphemeral, - sDistinct.tabTnct, 0, 0, - (char*)keyInfoFromExprList(pParse, p->pEList,0,0), - P4_KEYINFO); - sqlite3VdbeChangeP5(v, BTREE_UNORDERED); - sDistinct.eTnctType = WHERE_DISTINCT_UNORDERED; - }else{ - sDistinct.eTnctType = WHERE_DISTINCT_NOOP; - } - - if( !isAgg && pGroupBy==0 ){ - /* No aggregate functions and no GROUP BY clause */ - u16 wctrlFlags = (sDistinct.isTnct ? WHERE_WANT_DISTINCT : 0); - - /* Begin the database scan. */ - pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, sSort.pOrderBy, - p->pEList, wctrlFlags, 0); - if( pWInfo==0 ) goto select_end; - if( sqlite3WhereOutputRowCount(pWInfo) < p->nSelectRow ){ - p->nSelectRow = sqlite3WhereOutputRowCount(pWInfo); - } - if( sDistinct.isTnct && sqlite3WhereIsDistinct(pWInfo) ){ - sDistinct.eTnctType = sqlite3WhereIsDistinct(pWInfo); - } - if( sSort.pOrderBy ){ - sSort.nOBSat = sqlite3WhereIsOrdered(pWInfo); - if( sSort.nOBSat==sSort.pOrderBy->nExpr ){ - sSort.pOrderBy = 0; - } - } - - /* If sorting index that was created by a prior OP_OpenEphemeral - ** instruction ended up not being needed, then change the OP_OpenEphemeral - ** into an OP_Noop. - */ - if( sSort.addrSortIndex>=0 && sSort.pOrderBy==0 ){ - sqlite3VdbeChangeToNoop(v, sSort.addrSortIndex); - } - - /* Use the standard inner loop. */ - selectInnerLoop(pParse, p, pEList, -1, &sSort, &sDistinct, pDest, - sqlite3WhereContinueLabel(pWInfo), - sqlite3WhereBreakLabel(pWInfo)); - - /* End the database scan loop. - */ - sqlite3WhereEnd(pWInfo); - }else{ - /* This case when there exist aggregate functions or a GROUP BY clause - ** or both */ - NameContext sNC; /* Name context for processing aggregate information */ - int iAMem; /* First Mem address for storing current GROUP BY */ - int iBMem; /* First Mem address for previous GROUP BY */ - int iUseFlag; /* Mem address holding flag indicating that at least - ** one row of the input to the aggregator has been - ** processed */ - int iAbortFlag; /* Mem address which causes query abort if positive */ - int groupBySort; /* Rows come from source in GROUP BY order */ - int addrEnd; /* End of processing for this SELECT */ - int sortPTab = 0; /* Pseudotable used to decode sorting results */ - int sortOut = 0; /* Output register from the sorter */ - int orderByGrp = 0; /* True if the GROUP BY and ORDER BY are the same */ - - /* Remove any and all aliases between the result set and the - ** GROUP BY clause. - */ - if( pGroupBy ){ - int k; /* Loop counter */ - struct ExprList_item *pItem; /* For looping over expression in a list */ - - for(k=p->pEList->nExpr, pItem=p->pEList->a; k>0; k--, pItem++){ - pItem->u.x.iAlias = 0; - } - for(k=pGroupBy->nExpr, pItem=pGroupBy->a; k>0; k--, pItem++){ - pItem->u.x.iAlias = 0; - } - if( p->nSelectRow>100 ) p->nSelectRow = 100; - }else{ - p->nSelectRow = 1; - } - - - /* If there is both a GROUP BY and an ORDER BY clause and they are - ** identical, then it may be possible to disable the ORDER BY clause - ** on the grounds that the GROUP BY will cause elements to come out - ** in the correct order. It also may not - the GROUP BY may use a - ** database index that causes rows to be grouped together as required - ** but not actually sorted. Either way, record the fact that the - ** ORDER BY and GROUP BY clauses are the same by setting the orderByGrp - ** variable. */ - if( sqlite3ExprListCompare(pGroupBy, sSort.pOrderBy, -1)==0 ){ - orderByGrp = 1; - } - - /* Create a label to jump to when we want to abort the query */ - addrEnd = sqlite3VdbeMakeLabel(v); - - /* Convert TK_COLUMN nodes into TK_AGG_COLUMN and make entries in - ** sAggInfo for all TK_AGG_FUNCTION nodes in expressions of the - ** SELECT statement. - */ - memset(&sNC, 0, sizeof(sNC)); - sNC.pParse = pParse; - sNC.pSrcList = pTabList; - sNC.pAggInfo = &sAggInfo; - sAggInfo.mnReg = pParse->nMem+1; - sAggInfo.nSortingColumn = pGroupBy ? pGroupBy->nExpr+1 : 0; - sAggInfo.pGroupBy = pGroupBy; - sqlite3ExprAnalyzeAggList(&sNC, pEList); - sqlite3ExprAnalyzeAggList(&sNC, sSort.pOrderBy); - if( pHaving ){ - sqlite3ExprAnalyzeAggregates(&sNC, pHaving); - } - sAggInfo.nAccumulator = sAggInfo.nColumn; - for(i=0; ix.pList); - sNC.ncFlags &= ~NC_InAggFunc; - } - sAggInfo.mxReg = pParse->nMem; - if( db->mallocFailed ) goto select_end; - - /* Processing for aggregates with GROUP BY is very different and - ** much more complex than aggregates without a GROUP BY. - */ - if( pGroupBy ){ - KeyInfo *pKeyInfo; /* Keying information for the group by clause */ - int j1; /* A-vs-B comparision jump */ - int addrOutputRow; /* Start of subroutine that outputs a result row */ - int regOutputRow; /* Return address register for output subroutine */ - int addrSetAbort; /* Set the abort flag and return */ - int addrTopOfLoop; /* Top of the input loop */ - int addrSortingIdx; /* The OP_OpenEphemeral for the sorting index */ - int addrReset; /* Subroutine for resetting the accumulator */ - int regReset; /* Return address register for reset subroutine */ - - /* If there is a GROUP BY clause we might need a sorting index to - ** implement it. Allocate that sorting index now. If it turns out - ** that we do not need it after all, the OP_SorterOpen instruction - ** will be converted into a Noop. - */ - sAggInfo.sortingIdx = pParse->nTab++; - pKeyInfo = keyInfoFromExprList(pParse, pGroupBy, 0, 0); - addrSortingIdx = sqlite3VdbeAddOp4(v, OP_SorterOpen, - sAggInfo.sortingIdx, sAggInfo.nSortingColumn, - 0, (char*)pKeyInfo, P4_KEYINFO); - - /* Initialize memory locations used by GROUP BY aggregate processing - */ - iUseFlag = ++pParse->nMem; - iAbortFlag = ++pParse->nMem; - regOutputRow = ++pParse->nMem; - addrOutputRow = sqlite3VdbeMakeLabel(v); - regReset = ++pParse->nMem; - addrReset = sqlite3VdbeMakeLabel(v); - iAMem = pParse->nMem + 1; - pParse->nMem += pGroupBy->nExpr; - iBMem = pParse->nMem + 1; - pParse->nMem += pGroupBy->nExpr; - sqlite3VdbeAddOp2(v, OP_Integer, 0, iAbortFlag); - VdbeComment((v, "clear abort flag")); - sqlite3VdbeAddOp2(v, OP_Integer, 0, iUseFlag); - VdbeComment((v, "indicate accumulator empty")); - sqlite3VdbeAddOp3(v, OP_Null, 0, iAMem, iAMem+pGroupBy->nExpr-1); - - /* Begin a loop that will extract all source rows in GROUP BY order. - ** This might involve two separate loops with an OP_Sort in between, or - ** it might be a single loop that uses an index to extract information - ** in the right order to begin with. - */ - sqlite3VdbeAddOp2(v, OP_Gosub, regReset, addrReset); - pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, pGroupBy, 0, - WHERE_GROUPBY | (orderByGrp ? WHERE_SORTBYGROUP : 0), 0 - ); - if( pWInfo==0 ) goto select_end; - if( sqlite3WhereIsOrdered(pWInfo)==pGroupBy->nExpr ){ - /* The optimizer is able to deliver rows in group by order so - ** we do not have to sort. The OP_OpenEphemeral table will be - ** cancelled later because we still need to use the pKeyInfo - */ - groupBySort = 0; - }else{ - /* Rows are coming out in undetermined order. We have to push - ** each row into a sorting index, terminate the first loop, - ** then loop over the sorting index in order to get the output - ** in sorted order - */ - int regBase; - int regRecord; - int nCol; - int nGroupBy; - - explainTempTable(pParse, - (sDistinct.isTnct && (p->selFlags&SF_Distinct)==0) ? - "DISTINCT" : "GROUP BY"); - - groupBySort = 1; - nGroupBy = pGroupBy->nExpr; - nCol = nGroupBy + 1; - j = nGroupBy+1; - for(i=0; i=j ){ - nCol++; - j++; - } - } - regBase = sqlite3GetTempRange(pParse, nCol); - sqlite3ExprCacheClear(pParse); - sqlite3ExprCodeExprList(pParse, pGroupBy, regBase, 0); - sqlite3VdbeAddOp2(v, OP_Sequence, sAggInfo.sortingIdx,regBase+nGroupBy); - j = nGroupBy+1; - for(i=0; iiSorterColumn>=j ){ - int r1 = j + regBase; - int r2; - - r2 = sqlite3ExprCodeGetColumn(pParse, - pCol->pTab, pCol->iColumn, pCol->iTable, r1, 0); - if( r1!=r2 ){ - sqlite3VdbeAddOp2(v, OP_SCopy, r2, r1); - } - j++; - } - } - regRecord = sqlite3GetTempReg(pParse); - sqlite3VdbeAddOp3(v, OP_MakeRecord, regBase, nCol, regRecord); - sqlite3VdbeAddOp2(v, OP_SorterInsert, sAggInfo.sortingIdx, regRecord); - sqlite3ReleaseTempReg(pParse, regRecord); - sqlite3ReleaseTempRange(pParse, regBase, nCol); - sqlite3WhereEnd(pWInfo); - sAggInfo.sortingIdxPTab = sortPTab = pParse->nTab++; - sortOut = sqlite3GetTempReg(pParse); - sqlite3VdbeAddOp3(v, OP_OpenPseudo, sortPTab, sortOut, nCol); - sqlite3VdbeAddOp2(v, OP_SorterSort, sAggInfo.sortingIdx, addrEnd); - VdbeComment((v, "GROUP BY sort")); VdbeCoverage(v); - sAggInfo.useSortingIdx = 1; - sqlite3ExprCacheClear(pParse); - - } - - /* If the index or temporary table used by the GROUP BY sort - ** will naturally deliver rows in the order required by the ORDER BY - ** clause, cancel the ephemeral table open coded earlier. - ** - ** This is an optimization - the correct answer should result regardless. - ** Use the SQLITE_GroupByOrder flag with SQLITE_TESTCTRL_OPTIMIZER to - ** disable this optimization for testing purposes. */ - if( orderByGrp && OptimizationEnabled(db, SQLITE_GroupByOrder) - && (groupBySort || sqlite3WhereIsSorted(pWInfo)) - ){ - sSort.pOrderBy = 0; - sqlite3VdbeChangeToNoop(v, sSort.addrSortIndex); - } - - /* Evaluate the current GROUP BY terms and store in b0, b1, b2... - ** (b0 is memory location iBMem+0, b1 is iBMem+1, and so forth) - ** Then compare the current GROUP BY terms against the GROUP BY terms - ** from the previous row currently stored in a0, a1, a2... - */ - addrTopOfLoop = sqlite3VdbeCurrentAddr(v); - sqlite3ExprCacheClear(pParse); - if( groupBySort ){ - sqlite3VdbeAddOp2(v, OP_SorterData, sAggInfo.sortingIdx, sortOut); - } - for(j=0; jnExpr; j++){ - if( groupBySort ){ - sqlite3VdbeAddOp3(v, OP_Column, sortPTab, j, iBMem+j); - if( j==0 ) sqlite3VdbeChangeP5(v, OPFLAG_CLEARCACHE); - }else{ - sAggInfo.directMode = 1; - sqlite3ExprCode(pParse, pGroupBy->a[j].pExpr, iBMem+j); - } - } - sqlite3VdbeAddOp4(v, OP_Compare, iAMem, iBMem, pGroupBy->nExpr, - (char*)sqlite3KeyInfoRef(pKeyInfo), P4_KEYINFO); - j1 = sqlite3VdbeCurrentAddr(v); - sqlite3VdbeAddOp3(v, OP_Jump, j1+1, 0, j1+1); VdbeCoverage(v); - - /* Generate code that runs whenever the GROUP BY changes. - ** Changes in the GROUP BY are detected by the previous code - ** block. If there were no changes, this block is skipped. - ** - ** This code copies current group by terms in b0,b1,b2,... - ** over to a0,a1,a2. It then calls the output subroutine - ** and resets the aggregate accumulator registers in preparation - ** for the next GROUP BY batch. - */ - sqlite3ExprCodeMove(pParse, iBMem, iAMem, pGroupBy->nExpr); - sqlite3VdbeAddOp2(v, OP_Gosub, regOutputRow, addrOutputRow); - VdbeComment((v, "output one row")); - sqlite3VdbeAddOp2(v, OP_IfPos, iAbortFlag, addrEnd); VdbeCoverage(v); - VdbeComment((v, "check abort flag")); - sqlite3VdbeAddOp2(v, OP_Gosub, regReset, addrReset); - VdbeComment((v, "reset accumulator")); - - /* Update the aggregate accumulators based on the content of - ** the current row - */ - sqlite3VdbeJumpHere(v, j1); - updateAccumulator(pParse, &sAggInfo); - sqlite3VdbeAddOp2(v, OP_Integer, 1, iUseFlag); - VdbeComment((v, "indicate data in accumulator")); - - /* End of the loop - */ - if( groupBySort ){ - sqlite3VdbeAddOp2(v, OP_SorterNext, sAggInfo.sortingIdx, addrTopOfLoop); - VdbeCoverage(v); - }else{ - sqlite3WhereEnd(pWInfo); - sqlite3VdbeChangeToNoop(v, addrSortingIdx); - } - - /* Output the final row of result - */ - sqlite3VdbeAddOp2(v, OP_Gosub, regOutputRow, addrOutputRow); - VdbeComment((v, "output final row")); - - /* Jump over the subroutines - */ - sqlite3VdbeAddOp2(v, OP_Goto, 0, addrEnd); - - /* Generate a subroutine that outputs a single row of the result - ** set. This subroutine first looks at the iUseFlag. If iUseFlag - ** is less than or equal to zero, the subroutine is a no-op. If - ** the processing calls for the query to abort, this subroutine - ** increments the iAbortFlag memory location before returning in - ** order to signal the caller to abort. - */ - addrSetAbort = sqlite3VdbeCurrentAddr(v); - sqlite3VdbeAddOp2(v, OP_Integer, 1, iAbortFlag); - VdbeComment((v, "set abort flag")); - sqlite3VdbeAddOp1(v, OP_Return, regOutputRow); - sqlite3VdbeResolveLabel(v, addrOutputRow); - addrOutputRow = sqlite3VdbeCurrentAddr(v); - sqlite3VdbeAddOp2(v, OP_IfPos, iUseFlag, addrOutputRow+2); VdbeCoverage(v); - VdbeComment((v, "Groupby result generator entry point")); - sqlite3VdbeAddOp1(v, OP_Return, regOutputRow); - finalizeAggFunctions(pParse, &sAggInfo); - sqlite3ExprIfFalse(pParse, pHaving, addrOutputRow+1, SQLITE_JUMPIFNULL); - selectInnerLoop(pParse, p, p->pEList, -1, &sSort, - &sDistinct, pDest, - addrOutputRow+1, addrSetAbort); - sqlite3VdbeAddOp1(v, OP_Return, regOutputRow); - VdbeComment((v, "end groupby result generator")); - - /* Generate a subroutine that will reset the group-by accumulator - */ - sqlite3VdbeResolveLabel(v, addrReset); - resetAccumulator(pParse, &sAggInfo); - sqlite3VdbeAddOp1(v, OP_Return, regReset); - - } /* endif pGroupBy. Begin aggregate queries without GROUP BY: */ - else { - ExprList *pDel = 0; -#ifndef SQLITE_OMIT_BTREECOUNT - Table *pTab; - if( (pTab = isSimpleCount(p, &sAggInfo))!=0 ){ - /* If isSimpleCount() returns a pointer to a Table structure, then - ** the SQL statement is of the form: - ** - ** SELECT count(*) FROM - ** - ** where the Table structure returned represents table . - ** - ** This statement is so common that it is optimized specially. The - ** OP_Count instruction is executed either on the intkey table that - ** contains the data for table or on one of its indexes. It - ** is better to execute the op on an index, as indexes are almost - ** always spread across less pages than their corresponding tables. - */ - const int iDb = sqlite3SchemaToIndex(pParse->db, pTab->pSchema); - const int iCsr = pParse->nTab++; /* Cursor to scan b-tree */ - Index *pIdx; /* Iterator variable */ - KeyInfo *pKeyInfo = 0; /* Keyinfo for scanned index */ - Index *pBest = 0; /* Best index found so far */ - int iRoot = pTab->tnum; /* Root page of scanned b-tree */ - - sqlite3CodeVerifySchema(pParse, iDb); - sqlite3TableLock(pParse, iDb, pTab->tnum, 0, pTab->zName); - - /* Search for the index that has the lowest scan cost. - ** - ** (2011-04-15) Do not do a full scan of an unordered index. - ** - ** (2013-10-03) Do not count the entries in a partial index. - ** - ** In practice the KeyInfo structure will not be used. It is only - ** passed to keep OP_OpenRead happy. - */ - if( !HasRowid(pTab) ) pBest = sqlite3PrimaryKeyIndex(pTab); - for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ - if( pIdx->bUnordered==0 - && pIdx->szIdxRowszTabRow - && pIdx->pPartIdxWhere==0 - && (!pBest || pIdx->szIdxRowszIdxRow) - ){ - pBest = pIdx; - } - } - if( pBest ){ - iRoot = pBest->tnum; - pKeyInfo = sqlite3KeyInfoOfIndex(pParse, pBest); - } - - /* Open a read-only cursor, execute the OP_Count, close the cursor. */ - sqlite3VdbeAddOp4Int(v, OP_OpenRead, iCsr, iRoot, iDb, 1); - if( pKeyInfo ){ - sqlite3VdbeChangeP4(v, -1, (char *)pKeyInfo, P4_KEYINFO); - } - sqlite3VdbeAddOp2(v, OP_Count, iCsr, sAggInfo.aFunc[0].iMem); - sqlite3VdbeAddOp1(v, OP_Close, iCsr); - explainSimpleCount(pParse, pTab, pBest); - }else -#endif /* SQLITE_OMIT_BTREECOUNT */ - { - /* Check if the query is of one of the following forms: - ** - ** SELECT min(x) FROM ... - ** SELECT max(x) FROM ... - ** - ** If it is, then ask the code in where.c to attempt to sort results - ** as if there was an "ORDER ON x" or "ORDER ON x DESC" clause. - ** If where.c is able to produce results sorted in this order, then - ** add vdbe code to break out of the processing loop after the - ** first iteration (since the first iteration of the loop is - ** guaranteed to operate on the row with the minimum or maximum - ** value of x, the only row required). - ** - ** A special flag must be passed to sqlite3WhereBegin() to slightly - ** modify behavior as follows: - ** - ** + If the query is a "SELECT min(x)", then the loop coded by - ** where.c should not iterate over any values with a NULL value - ** for x. - ** - ** + The optimizer code in where.c (the thing that decides which - ** index or indices to use) should place a different priority on - ** satisfying the 'ORDER BY' clause than it does in other cases. - ** Refer to code and comments in where.c for details. - */ - ExprList *pMinMax = 0; - u8 flag = WHERE_ORDERBY_NORMAL; - - assert( p->pGroupBy==0 ); - assert( flag==0 ); - if( p->pHaving==0 ){ - flag = minMaxQuery(&sAggInfo, &pMinMax); - } - assert( flag==0 || (pMinMax!=0 && pMinMax->nExpr==1) ); - - if( flag ){ - pMinMax = sqlite3ExprListDup(db, pMinMax, 0); - pDel = pMinMax; - if( pMinMax && !db->mallocFailed ){ - pMinMax->a[0].sortOrder = flag!=WHERE_ORDERBY_MIN ?1:0; - pMinMax->a[0].pExpr->op = TK_COLUMN; - } - } - - /* This case runs if the aggregate has no GROUP BY clause. The - ** processing is much simpler since there is only a single row - ** of output. - */ - resetAccumulator(pParse, &sAggInfo); - pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, pMinMax,0,flag,0); - if( pWInfo==0 ){ - sqlite3ExprListDelete(db, pDel); - goto select_end; - } - updateAccumulator(pParse, &sAggInfo); - assert( pMinMax==0 || pMinMax->nExpr==1 ); - if( sqlite3WhereIsOrdered(pWInfo)>0 ){ - sqlite3VdbeAddOp2(v, OP_Goto, 0, sqlite3WhereBreakLabel(pWInfo)); - VdbeComment((v, "%s() by index", - (flag==WHERE_ORDERBY_MIN?"min":"max"))); - } - sqlite3WhereEnd(pWInfo); - finalizeAggFunctions(pParse, &sAggInfo); - } - - sSort.pOrderBy = 0; - sqlite3ExprIfFalse(pParse, pHaving, addrEnd, SQLITE_JUMPIFNULL); - selectInnerLoop(pParse, p, p->pEList, -1, 0, 0, - pDest, addrEnd, addrEnd); - sqlite3ExprListDelete(db, pDel); - } - sqlite3VdbeResolveLabel(v, addrEnd); - - } /* endif aggregate query */ - - if( sDistinct.eTnctType==WHERE_DISTINCT_UNORDERED ){ - explainTempTable(pParse, "DISTINCT"); - } - - /* If there is an ORDER BY clause, then we need to sort the results - ** and send them to the callback one by one. - */ - if( sSort.pOrderBy ){ - explainTempTable(pParse, sSort.nOBSat>0 ? "RIGHT PART OF ORDER BY":"ORDER BY"); - generateSortTail(pParse, p, &sSort, pEList->nExpr, pDest); - } - - /* Jump here to skip this query - */ - sqlite3VdbeResolveLabel(v, iEnd); - - /* The SELECT was successfully coded. Set the return code to 0 - ** to indicate no errors. - */ - rc = 0; - - /* Control jumps to here if an error is encountered above, or upon - ** successful coding of the SELECT. - */ -select_end: - explainSetInteger(pParse->iSelectId, iRestoreSelectId); - - /* Identify column names if results of the SELECT are to be output. - */ - if( rc==SQLITE_OK && pDest->eDest==SRT_Output ){ - generateColumnNames(pParse, pTabList, pEList); - } - - sqlite3DbFree(db, sAggInfo.aCol); - sqlite3DbFree(db, sAggInfo.aFunc); - return rc; -} - -#if defined(SQLITE_ENABLE_TREE_EXPLAIN) -/* -** Generate a human-readable description of a the Select object. -*/ -static void explainOneSelect(Vdbe *pVdbe, Select *p){ - sqlite3ExplainPrintf(pVdbe, "SELECT "); - if( p->selFlags & (SF_Distinct|SF_Aggregate) ){ - if( p->selFlags & SF_Distinct ){ - sqlite3ExplainPrintf(pVdbe, "DISTINCT "); - } - if( p->selFlags & SF_Aggregate ){ - sqlite3ExplainPrintf(pVdbe, "agg_flag "); - } - sqlite3ExplainNL(pVdbe); - sqlite3ExplainPrintf(pVdbe, " "); - } - sqlite3ExplainExprList(pVdbe, p->pEList); - sqlite3ExplainNL(pVdbe); - if( p->pSrc && p->pSrc->nSrc ){ - int i; - sqlite3ExplainPrintf(pVdbe, "FROM "); - sqlite3ExplainPush(pVdbe); - for(i=0; ipSrc->nSrc; i++){ - struct SrcList_item *pItem = &p->pSrc->a[i]; - sqlite3ExplainPrintf(pVdbe, "{%d,*} = ", pItem->iCursor); - if( pItem->pSelect ){ - sqlite3ExplainSelect(pVdbe, pItem->pSelect); - if( pItem->pTab ){ - sqlite3ExplainPrintf(pVdbe, " (tabname=%s)", pItem->pTab->zName); - } - }else if( pItem->zName ){ - sqlite3ExplainPrintf(pVdbe, "%s", pItem->zName); - } - if( pItem->zAlias ){ - sqlite3ExplainPrintf(pVdbe, " (AS %s)", pItem->zAlias); - } - if( pItem->jointype & JT_LEFT ){ - sqlite3ExplainPrintf(pVdbe, " LEFT-JOIN"); - } - sqlite3ExplainNL(pVdbe); - } - sqlite3ExplainPop(pVdbe); - } - if( p->pWhere ){ - sqlite3ExplainPrintf(pVdbe, "WHERE "); - sqlite3ExplainExpr(pVdbe, p->pWhere); - sqlite3ExplainNL(pVdbe); - } - if( p->pGroupBy ){ - sqlite3ExplainPrintf(pVdbe, "GROUPBY "); - sqlite3ExplainExprList(pVdbe, p->pGroupBy); - sqlite3ExplainNL(pVdbe); - } - if( p->pHaving ){ - sqlite3ExplainPrintf(pVdbe, "HAVING "); - sqlite3ExplainExpr(pVdbe, p->pHaving); - sqlite3ExplainNL(pVdbe); - } - if( p->pOrderBy ){ - sqlite3ExplainPrintf(pVdbe, "ORDERBY "); - sqlite3ExplainExprList(pVdbe, p->pOrderBy); - sqlite3ExplainNL(pVdbe); - } - if( p->pLimit ){ - sqlite3ExplainPrintf(pVdbe, "LIMIT "); - sqlite3ExplainExpr(pVdbe, p->pLimit); - sqlite3ExplainNL(pVdbe); - } - if( p->pOffset ){ - sqlite3ExplainPrintf(pVdbe, "OFFSET "); - sqlite3ExplainExpr(pVdbe, p->pOffset); - sqlite3ExplainNL(pVdbe); - } -} -SQLITE_PRIVATE void sqlite3ExplainSelect(Vdbe *pVdbe, Select *p){ - if( p==0 ){ - sqlite3ExplainPrintf(pVdbe, "(null-select)"); - return; - } - sqlite3ExplainPush(pVdbe); - while( p ){ - explainOneSelect(pVdbe, p); - p = p->pNext; - if( p==0 ) break; - sqlite3ExplainNL(pVdbe); - sqlite3ExplainPrintf(pVdbe, "%s\n", selectOpName(p->op)); - } - sqlite3ExplainPrintf(pVdbe, "END"); - sqlite3ExplainPop(pVdbe); -} - -/* End of the structure debug printing code -*****************************************************************************/ -#endif /* defined(SQLITE_ENABLE_TREE_EXPLAIN) */ - -/************** End of select.c **********************************************/ -/************** Begin file table.c *******************************************/ -/* -** 2001 September 15 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** This file contains the sqlite3_get_table() and sqlite3_free_table() -** interface routines. These are just wrappers around the main -** interface routine of sqlite3_exec(). -** -** These routines are in a separate files so that they will not be linked -** if they are not used. -*/ -/* #include */ -/* #include */ - -#ifndef SQLITE_OMIT_GET_TABLE - -/* -** This structure is used to pass data from sqlite3_get_table() through -** to the callback function is uses to build the result. -*/ -typedef struct TabResult { - char **azResult; /* Accumulated output */ - char *zErrMsg; /* Error message text, if an error occurs */ - int nAlloc; /* Slots allocated for azResult[] */ - int nRow; /* Number of rows in the result */ - int nColumn; /* Number of columns in the result */ - int nData; /* Slots used in azResult[]. (nRow+1)*nColumn */ - int rc; /* Return code from sqlite3_exec() */ -} TabResult; - -/* -** This routine is called once for each row in the result table. Its job -** is to fill in the TabResult structure appropriately, allocating new -** memory as necessary. -*/ -static int sqlite3_get_table_cb(void *pArg, int nCol, char **argv, char **colv){ - TabResult *p = (TabResult*)pArg; /* Result accumulator */ - int need; /* Slots needed in p->azResult[] */ - int i; /* Loop counter */ - char *z; /* A single column of result */ - - /* Make sure there is enough space in p->azResult to hold everything - ** we need to remember from this invocation of the callback. - */ - if( p->nRow==0 && argv!=0 ){ - need = nCol*2; - }else{ - need = nCol; - } - if( p->nData + need > p->nAlloc ){ - char **azNew; - p->nAlloc = p->nAlloc*2 + need; - azNew = sqlite3_realloc( p->azResult, sizeof(char*)*p->nAlloc ); - if( azNew==0 ) goto malloc_failed; - p->azResult = azNew; - } - - /* If this is the first row, then generate an extra row containing - ** the names of all columns. - */ - if( p->nRow==0 ){ - p->nColumn = nCol; - for(i=0; iazResult[p->nData++] = z; - } - }else if( p->nColumn!=nCol ){ - sqlite3_free(p->zErrMsg); - p->zErrMsg = sqlite3_mprintf( - "sqlite3_get_table() called with two or more incompatible queries" - ); - p->rc = SQLITE_ERROR; - return 1; - } - - /* Copy over the row data - */ - if( argv!=0 ){ - for(i=0; iazResult[p->nData++] = z; - } - p->nRow++; - } - return 0; - -malloc_failed: - p->rc = SQLITE_NOMEM; - return 1; -} - -/* -** Query the database. But instead of invoking a callback for each row, -** malloc() for space to hold the result and return the entire results -** at the conclusion of the call. -** -** The result that is written to ***pazResult is held in memory obtained -** from malloc(). But the caller cannot free this memory directly. -** Instead, the entire table should be passed to sqlite3_free_table() when -** the calling procedure is finished using it. -*/ -SQLITE_API int sqlite3_get_table( - sqlite3 *db, /* The database on which the SQL executes */ - const char *zSql, /* The SQL to be executed */ - char ***pazResult, /* Write the result table here */ - int *pnRow, /* Write the number of rows in the result here */ - int *pnColumn, /* Write the number of columns of result here */ - char **pzErrMsg /* Write error messages here */ -){ - int rc; - TabResult res; - - *pazResult = 0; - if( pnColumn ) *pnColumn = 0; - if( pnRow ) *pnRow = 0; - if( pzErrMsg ) *pzErrMsg = 0; - res.zErrMsg = 0; - res.nRow = 0; - res.nColumn = 0; - res.nData = 1; - res.nAlloc = 20; - res.rc = SQLITE_OK; - res.azResult = sqlite3_malloc(sizeof(char*)*res.nAlloc ); - if( res.azResult==0 ){ - db->errCode = SQLITE_NOMEM; - return SQLITE_NOMEM; - } - res.azResult[0] = 0; - rc = sqlite3_exec(db, zSql, sqlite3_get_table_cb, &res, pzErrMsg); - assert( sizeof(res.azResult[0])>= sizeof(res.nData) ); - res.azResult[0] = SQLITE_INT_TO_PTR(res.nData); - if( (rc&0xff)==SQLITE_ABORT ){ - sqlite3_free_table(&res.azResult[1]); - if( res.zErrMsg ){ - if( pzErrMsg ){ - sqlite3_free(*pzErrMsg); - *pzErrMsg = sqlite3_mprintf("%s",res.zErrMsg); - } - sqlite3_free(res.zErrMsg); - } - db->errCode = res.rc; /* Assume 32-bit assignment is atomic */ - return res.rc; - } - sqlite3_free(res.zErrMsg); - if( rc!=SQLITE_OK ){ - sqlite3_free_table(&res.azResult[1]); - return rc; - } - if( res.nAlloc>res.nData ){ - char **azNew; - azNew = sqlite3_realloc( res.azResult, sizeof(char*)*res.nData ); - if( azNew==0 ){ - sqlite3_free_table(&res.azResult[1]); - db->errCode = SQLITE_NOMEM; - return SQLITE_NOMEM; - } - res.azResult = azNew; - } - *pazResult = &res.azResult[1]; - if( pnColumn ) *pnColumn = res.nColumn; - if( pnRow ) *pnRow = res.nRow; - return rc; -} - -/* -** This routine frees the space the sqlite3_get_table() malloced. -*/ -SQLITE_API void sqlite3_free_table( - char **azResult /* Result returned from from sqlite3_get_table() */ -){ - if( azResult ){ - int i, n; - azResult--; - assert( azResult!=0 ); - n = SQLITE_PTR_TO_INT(azResult[0]); - for(i=1; ipNext; - - sqlite3ExprDelete(db, pTmp->pWhere); - sqlite3ExprListDelete(db, pTmp->pExprList); - sqlite3SelectDelete(db, pTmp->pSelect); - sqlite3IdListDelete(db, pTmp->pIdList); - - sqlite3DbFree(db, pTmp); - } -} - -/* -** Given table pTab, return a list of all the triggers attached to -** the table. The list is connected by Trigger.pNext pointers. -** -** All of the triggers on pTab that are in the same database as pTab -** are already attached to pTab->pTrigger. But there might be additional -** triggers on pTab in the TEMP schema. This routine prepends all -** TEMP triggers on pTab to the beginning of the pTab->pTrigger list -** and returns the combined list. -** -** To state it another way: This routine returns a list of all triggers -** that fire off of pTab. The list will include any TEMP triggers on -** pTab as well as the triggers lised in pTab->pTrigger. -*/ -SQLITE_PRIVATE Trigger *sqlite3TriggerList(Parse *pParse, Table *pTab){ - Schema * const pTmpSchema = pParse->db->aDb[1].pSchema; - Trigger *pList = 0; /* List of triggers to return */ - - if( pParse->disableTriggers ){ - return 0; - } - - if( pTmpSchema!=pTab->pSchema ){ - HashElem *p; - assert( sqlite3SchemaMutexHeld(pParse->db, 0, pTmpSchema) ); - for(p=sqliteHashFirst(&pTmpSchema->trigHash); p; p=sqliteHashNext(p)){ - Trigger *pTrig = (Trigger *)sqliteHashData(p); - if( pTrig->pTabSchema==pTab->pSchema - && 0==sqlite3StrICmp(pTrig->table, pTab->zName) - ){ - pTrig->pNext = (pList ? pList : pTab->pTrigger); - pList = pTrig; - } - } - } - - return (pList ? pList : pTab->pTrigger); -} - -/* -** This is called by the parser when it sees a CREATE TRIGGER statement -** up to the point of the BEGIN before the trigger actions. A Trigger -** structure is generated based on the information available and stored -** in pParse->pNewTrigger. After the trigger actions have been parsed, the -** sqlite3FinishTrigger() function is called to complete the trigger -** construction process. -*/ -SQLITE_PRIVATE void sqlite3BeginTrigger( - Parse *pParse, /* The parse context of the CREATE TRIGGER statement */ - Token *pName1, /* The name of the trigger */ - Token *pName2, /* The name of the trigger */ - int tr_tm, /* One of TK_BEFORE, TK_AFTER, TK_INSTEAD */ - int op, /* One of TK_INSERT, TK_UPDATE, TK_DELETE */ - IdList *pColumns, /* column list if this is an UPDATE OF trigger */ - SrcList *pTableName,/* The name of the table/view the trigger applies to */ - Expr *pWhen, /* WHEN clause */ - int isTemp, /* True if the TEMPORARY keyword is present */ - int noErr /* Suppress errors if the trigger already exists */ -){ - Trigger *pTrigger = 0; /* The new trigger */ - Table *pTab; /* Table that the trigger fires off of */ - char *zName = 0; /* Name of the trigger */ - sqlite3 *db = pParse->db; /* The database connection */ - int iDb; /* The database to store the trigger in */ - Token *pName; /* The unqualified db name */ - DbFixer sFix; /* State vector for the DB fixer */ - int iTabDb; /* Index of the database holding pTab */ - - assert( pName1!=0 ); /* pName1->z might be NULL, but not pName1 itself */ - assert( pName2!=0 ); - assert( op==TK_INSERT || op==TK_UPDATE || op==TK_DELETE ); - assert( op>0 && op<0xff ); - if( isTemp ){ - /* If TEMP was specified, then the trigger name may not be qualified. */ - if( pName2->n>0 ){ - sqlite3ErrorMsg(pParse, "temporary trigger may not have qualified name"); - goto trigger_cleanup; - } - iDb = 1; - pName = pName1; - }else{ - /* Figure out the db that the trigger will be created in */ - iDb = sqlite3TwoPartName(pParse, pName1, pName2, &pName); - if( iDb<0 ){ - goto trigger_cleanup; - } - } - if( !pTableName || db->mallocFailed ){ - goto trigger_cleanup; - } - - /* A long-standing parser bug is that this syntax was allowed: - ** - ** CREATE TRIGGER attached.demo AFTER INSERT ON attached.tab .... - ** ^^^^^^^^ - ** - ** To maintain backwards compatibility, ignore the database - ** name on pTableName if we are reparsing our of SQLITE_MASTER. - */ - if( db->init.busy && iDb!=1 ){ - sqlite3DbFree(db, pTableName->a[0].zDatabase); - pTableName->a[0].zDatabase = 0; - } - - /* If the trigger name was unqualified, and the table is a temp table, - ** then set iDb to 1 to create the trigger in the temporary database. - ** If sqlite3SrcListLookup() returns 0, indicating the table does not - ** exist, the error is caught by the block below. - */ - pTab = sqlite3SrcListLookup(pParse, pTableName); - if( db->init.busy==0 && pName2->n==0 && pTab - && pTab->pSchema==db->aDb[1].pSchema ){ - iDb = 1; - } - - /* Ensure the table name matches database name and that the table exists */ - if( db->mallocFailed ) goto trigger_cleanup; - assert( pTableName->nSrc==1 ); - sqlite3FixInit(&sFix, pParse, iDb, "trigger", pName); - if( sqlite3FixSrcList(&sFix, pTableName) ){ - goto trigger_cleanup; - } - pTab = sqlite3SrcListLookup(pParse, pTableName); - if( !pTab ){ - /* The table does not exist. */ - if( db->init.iDb==1 ){ - /* Ticket #3810. - ** Normally, whenever a table is dropped, all associated triggers are - ** dropped too. But if a TEMP trigger is created on a non-TEMP table - ** and the table is dropped by a different database connection, the - ** trigger is not visible to the database connection that does the - ** drop so the trigger cannot be dropped. This results in an - ** "orphaned trigger" - a trigger whose associated table is missing. - */ - db->init.orphanTrigger = 1; - } - goto trigger_cleanup; - } - if( IsVirtual(pTab) ){ - sqlite3ErrorMsg(pParse, "cannot create triggers on virtual tables"); - goto trigger_cleanup; - } - - /* Check that the trigger name is not reserved and that no trigger of the - ** specified name exists */ - zName = sqlite3NameFromToken(db, pName); - if( !zName || SQLITE_OK!=sqlite3CheckObjectName(pParse, zName) ){ - goto trigger_cleanup; - } - assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); - if( sqlite3HashFind(&(db->aDb[iDb].pSchema->trigHash), - zName, sqlite3Strlen30(zName)) ){ - if( !noErr ){ - sqlite3ErrorMsg(pParse, "trigger %T already exists", pName); - }else{ - assert( !db->init.busy ); - sqlite3CodeVerifySchema(pParse, iDb); - } - goto trigger_cleanup; - } - - /* Do not create a trigger on a system table */ - if( sqlite3StrNICmp(pTab->zName, "sqlite_", 7)==0 ){ - sqlite3ErrorMsg(pParse, "cannot create trigger on system table"); - pParse->nErr++; - goto trigger_cleanup; - } - - /* INSTEAD of triggers are only for views and views only support INSTEAD - ** of triggers. - */ - if( pTab->pSelect && tr_tm!=TK_INSTEAD ){ - sqlite3ErrorMsg(pParse, "cannot create %s trigger on view: %S", - (tr_tm == TK_BEFORE)?"BEFORE":"AFTER", pTableName, 0); - goto trigger_cleanup; - } - if( !pTab->pSelect && tr_tm==TK_INSTEAD ){ - sqlite3ErrorMsg(pParse, "cannot create INSTEAD OF" - " trigger on table: %S", pTableName, 0); - goto trigger_cleanup; - } - iTabDb = sqlite3SchemaToIndex(db, pTab->pSchema); - -#ifndef SQLITE_OMIT_AUTHORIZATION - { - int code = SQLITE_CREATE_TRIGGER; - const char *zDb = db->aDb[iTabDb].zName; - const char *zDbTrig = isTemp ? db->aDb[1].zName : zDb; - if( iTabDb==1 || isTemp ) code = SQLITE_CREATE_TEMP_TRIGGER; - if( sqlite3AuthCheck(pParse, code, zName, pTab->zName, zDbTrig) ){ - goto trigger_cleanup; - } - if( sqlite3AuthCheck(pParse, SQLITE_INSERT, SCHEMA_TABLE(iTabDb),0,zDb)){ - goto trigger_cleanup; - } - } -#endif - - /* INSTEAD OF triggers can only appear on views and BEFORE triggers - ** cannot appear on views. So we might as well translate every - ** INSTEAD OF trigger into a BEFORE trigger. It simplifies code - ** elsewhere. - */ - if (tr_tm == TK_INSTEAD){ - tr_tm = TK_BEFORE; - } - - /* Build the Trigger object */ - pTrigger = (Trigger*)sqlite3DbMallocZero(db, sizeof(Trigger)); - if( pTrigger==0 ) goto trigger_cleanup; - pTrigger->zName = zName; - zName = 0; - pTrigger->table = sqlite3DbStrDup(db, pTableName->a[0].zName); - pTrigger->pSchema = db->aDb[iDb].pSchema; - pTrigger->pTabSchema = pTab->pSchema; - pTrigger->op = (u8)op; - pTrigger->tr_tm = tr_tm==TK_BEFORE ? TRIGGER_BEFORE : TRIGGER_AFTER; - pTrigger->pWhen = sqlite3ExprDup(db, pWhen, EXPRDUP_REDUCE); - pTrigger->pColumns = sqlite3IdListDup(db, pColumns); - assert( pParse->pNewTrigger==0 ); - pParse->pNewTrigger = pTrigger; - -trigger_cleanup: - sqlite3DbFree(db, zName); - sqlite3SrcListDelete(db, pTableName); - sqlite3IdListDelete(db, pColumns); - sqlite3ExprDelete(db, pWhen); - if( !pParse->pNewTrigger ){ - sqlite3DeleteTrigger(db, pTrigger); - }else{ - assert( pParse->pNewTrigger==pTrigger ); - } -} - -/* -** This routine is called after all of the trigger actions have been parsed -** in order to complete the process of building the trigger. -*/ -SQLITE_PRIVATE void sqlite3FinishTrigger( - Parse *pParse, /* Parser context */ - TriggerStep *pStepList, /* The triggered program */ - Token *pAll /* Token that describes the complete CREATE TRIGGER */ -){ - Trigger *pTrig = pParse->pNewTrigger; /* Trigger being finished */ - char *zName; /* Name of trigger */ - sqlite3 *db = pParse->db; /* The database */ - DbFixer sFix; /* Fixer object */ - int iDb; /* Database containing the trigger */ - Token nameToken; /* Trigger name for error reporting */ - - pParse->pNewTrigger = 0; - if( NEVER(pParse->nErr) || !pTrig ) goto triggerfinish_cleanup; - zName = pTrig->zName; - iDb = sqlite3SchemaToIndex(pParse->db, pTrig->pSchema); - pTrig->step_list = pStepList; - while( pStepList ){ - pStepList->pTrig = pTrig; - pStepList = pStepList->pNext; - } - nameToken.z = pTrig->zName; - nameToken.n = sqlite3Strlen30(nameToken.z); - sqlite3FixInit(&sFix, pParse, iDb, "trigger", &nameToken); - if( sqlite3FixTriggerStep(&sFix, pTrig->step_list) - || sqlite3FixExpr(&sFix, pTrig->pWhen) - ){ - goto triggerfinish_cleanup; - } - - /* if we are not initializing, - ** build the sqlite_master entry - */ - if( !db->init.busy ){ - Vdbe *v; - char *z; - - /* Make an entry in the sqlite_master table */ - v = sqlite3GetVdbe(pParse); - if( v==0 ) goto triggerfinish_cleanup; - sqlite3BeginWriteOperation(pParse, 0, iDb); - z = sqlite3DbStrNDup(db, (char*)pAll->z, pAll->n); - sqlite3NestedParse(pParse, - "INSERT INTO %Q.%s VALUES('trigger',%Q,%Q,0,'CREATE TRIGGER %q')", - db->aDb[iDb].zName, SCHEMA_TABLE(iDb), zName, - pTrig->table, z); - sqlite3DbFree(db, z); - sqlite3ChangeCookie(pParse, iDb); - sqlite3VdbeAddParseSchemaOp(v, iDb, - sqlite3MPrintf(db, "type='trigger' AND name='%q'", zName)); - } - - if( db->init.busy ){ - Trigger *pLink = pTrig; - Hash *pHash = &db->aDb[iDb].pSchema->trigHash; - assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); - pTrig = sqlite3HashInsert(pHash, zName, sqlite3Strlen30(zName), pTrig); - if( pTrig ){ - db->mallocFailed = 1; - }else if( pLink->pSchema==pLink->pTabSchema ){ - Table *pTab; - int n = sqlite3Strlen30(pLink->table); - pTab = sqlite3HashFind(&pLink->pTabSchema->tblHash, pLink->table, n); - assert( pTab!=0 ); - pLink->pNext = pTab->pTrigger; - pTab->pTrigger = pLink; - } - } - -triggerfinish_cleanup: - sqlite3DeleteTrigger(db, pTrig); - assert( !pParse->pNewTrigger ); - sqlite3DeleteTriggerStep(db, pStepList); -} - -/* -** Turn a SELECT statement (that the pSelect parameter points to) into -** a trigger step. Return a pointer to a TriggerStep structure. -** -** The parser calls this routine when it finds a SELECT statement in -** body of a TRIGGER. -*/ -SQLITE_PRIVATE TriggerStep *sqlite3TriggerSelectStep(sqlite3 *db, Select *pSelect){ - TriggerStep *pTriggerStep = sqlite3DbMallocZero(db, sizeof(TriggerStep)); - if( pTriggerStep==0 ) { - sqlite3SelectDelete(db, pSelect); - return 0; - } - pTriggerStep->op = TK_SELECT; - pTriggerStep->pSelect = pSelect; - pTriggerStep->orconf = OE_Default; - return pTriggerStep; -} - -/* -** Allocate space to hold a new trigger step. The allocated space -** holds both the TriggerStep object and the TriggerStep.target.z string. -** -** If an OOM error occurs, NULL is returned and db->mallocFailed is set. -*/ -static TriggerStep *triggerStepAllocate( - sqlite3 *db, /* Database connection */ - u8 op, /* Trigger opcode */ - Token *pName /* The target name */ -){ - TriggerStep *pTriggerStep; - - pTriggerStep = sqlite3DbMallocZero(db, sizeof(TriggerStep) + pName->n); - if( pTriggerStep ){ - char *z = (char*)&pTriggerStep[1]; - memcpy(z, pName->z, pName->n); - pTriggerStep->target.z = z; - pTriggerStep->target.n = pName->n; - pTriggerStep->op = op; - } - return pTriggerStep; -} - -/* -** Build a trigger step out of an INSERT statement. Return a pointer -** to the new trigger step. -** -** The parser calls this routine when it sees an INSERT inside the -** body of a trigger. -*/ -SQLITE_PRIVATE TriggerStep *sqlite3TriggerInsertStep( - sqlite3 *db, /* The database connection */ - Token *pTableName, /* Name of the table into which we insert */ - IdList *pColumn, /* List of columns in pTableName to insert into */ - Select *pSelect, /* A SELECT statement that supplies values */ - u8 orconf /* The conflict algorithm (OE_Abort, OE_Replace, etc.) */ -){ - TriggerStep *pTriggerStep; - - assert(pSelect != 0 || db->mallocFailed); - - pTriggerStep = triggerStepAllocate(db, TK_INSERT, pTableName); - if( pTriggerStep ){ - pTriggerStep->pSelect = sqlite3SelectDup(db, pSelect, EXPRDUP_REDUCE); - pTriggerStep->pIdList = pColumn; - pTriggerStep->orconf = orconf; - }else{ - sqlite3IdListDelete(db, pColumn); - } - sqlite3SelectDelete(db, pSelect); - - return pTriggerStep; -} - -/* -** Construct a trigger step that implements an UPDATE statement and return -** a pointer to that trigger step. The parser calls this routine when it -** sees an UPDATE statement inside the body of a CREATE TRIGGER. -*/ -SQLITE_PRIVATE TriggerStep *sqlite3TriggerUpdateStep( - sqlite3 *db, /* The database connection */ - Token *pTableName, /* Name of the table to be updated */ - ExprList *pEList, /* The SET clause: list of column and new values */ - Expr *pWhere, /* The WHERE clause */ - u8 orconf /* The conflict algorithm. (OE_Abort, OE_Ignore, etc) */ -){ - TriggerStep *pTriggerStep; - - pTriggerStep = triggerStepAllocate(db, TK_UPDATE, pTableName); - if( pTriggerStep ){ - pTriggerStep->pExprList = sqlite3ExprListDup(db, pEList, EXPRDUP_REDUCE); - pTriggerStep->pWhere = sqlite3ExprDup(db, pWhere, EXPRDUP_REDUCE); - pTriggerStep->orconf = orconf; - } - sqlite3ExprListDelete(db, pEList); - sqlite3ExprDelete(db, pWhere); - return pTriggerStep; -} - -/* -** Construct a trigger step that implements a DELETE statement and return -** a pointer to that trigger step. The parser calls this routine when it -** sees a DELETE statement inside the body of a CREATE TRIGGER. -*/ -SQLITE_PRIVATE TriggerStep *sqlite3TriggerDeleteStep( - sqlite3 *db, /* Database connection */ - Token *pTableName, /* The table from which rows are deleted */ - Expr *pWhere /* The WHERE clause */ -){ - TriggerStep *pTriggerStep; - - pTriggerStep = triggerStepAllocate(db, TK_DELETE, pTableName); - if( pTriggerStep ){ - pTriggerStep->pWhere = sqlite3ExprDup(db, pWhere, EXPRDUP_REDUCE); - pTriggerStep->orconf = OE_Default; - } - sqlite3ExprDelete(db, pWhere); - return pTriggerStep; -} - -/* -** Recursively delete a Trigger structure -*/ -SQLITE_PRIVATE void sqlite3DeleteTrigger(sqlite3 *db, Trigger *pTrigger){ - if( pTrigger==0 ) return; - sqlite3DeleteTriggerStep(db, pTrigger->step_list); - sqlite3DbFree(db, pTrigger->zName); - sqlite3DbFree(db, pTrigger->table); - sqlite3ExprDelete(db, pTrigger->pWhen); - sqlite3IdListDelete(db, pTrigger->pColumns); - sqlite3DbFree(db, pTrigger); -} - -/* -** This function is called to drop a trigger from the database schema. -** -** This may be called directly from the parser and therefore identifies -** the trigger by name. The sqlite3DropTriggerPtr() routine does the -** same job as this routine except it takes a pointer to the trigger -** instead of the trigger name. -**/ -SQLITE_PRIVATE void sqlite3DropTrigger(Parse *pParse, SrcList *pName, int noErr){ - Trigger *pTrigger = 0; - int i; - const char *zDb; - const char *zName; - int nName; - sqlite3 *db = pParse->db; - - if( db->mallocFailed ) goto drop_trigger_cleanup; - if( SQLITE_OK!=sqlite3ReadSchema(pParse) ){ - goto drop_trigger_cleanup; - } - - assert( pName->nSrc==1 ); - zDb = pName->a[0].zDatabase; - zName = pName->a[0].zName; - nName = sqlite3Strlen30(zName); - assert( zDb!=0 || sqlite3BtreeHoldsAllMutexes(db) ); - for(i=OMIT_TEMPDB; inDb; i++){ - int j = (i<2) ? i^1 : i; /* Search TEMP before MAIN */ - if( zDb && sqlite3StrICmp(db->aDb[j].zName, zDb) ) continue; - assert( sqlite3SchemaMutexHeld(db, j, 0) ); - pTrigger = sqlite3HashFind(&(db->aDb[j].pSchema->trigHash), zName, nName); - if( pTrigger ) break; - } - if( !pTrigger ){ - if( !noErr ){ - sqlite3ErrorMsg(pParse, "no such trigger: %S", pName, 0); - }else{ - sqlite3CodeVerifyNamedSchema(pParse, zDb); - } - pParse->checkSchema = 1; - goto drop_trigger_cleanup; - } - sqlite3DropTriggerPtr(pParse, pTrigger); - -drop_trigger_cleanup: - sqlite3SrcListDelete(db, pName); -} - -/* -** Return a pointer to the Table structure for the table that a trigger -** is set on. -*/ -static Table *tableOfTrigger(Trigger *pTrigger){ - int n = sqlite3Strlen30(pTrigger->table); - return sqlite3HashFind(&pTrigger->pTabSchema->tblHash, pTrigger->table, n); -} - - -/* -** Drop a trigger given a pointer to that trigger. -*/ -SQLITE_PRIVATE void sqlite3DropTriggerPtr(Parse *pParse, Trigger *pTrigger){ - Table *pTable; - Vdbe *v; - sqlite3 *db = pParse->db; - int iDb; - - iDb = sqlite3SchemaToIndex(pParse->db, pTrigger->pSchema); - assert( iDb>=0 && iDbnDb ); - pTable = tableOfTrigger(pTrigger); - assert( pTable ); - assert( pTable->pSchema==pTrigger->pSchema || iDb==1 ); -#ifndef SQLITE_OMIT_AUTHORIZATION - { - int code = SQLITE_DROP_TRIGGER; - const char *zDb = db->aDb[iDb].zName; - const char *zTab = SCHEMA_TABLE(iDb); - if( iDb==1 ) code = SQLITE_DROP_TEMP_TRIGGER; - if( sqlite3AuthCheck(pParse, code, pTrigger->zName, pTable->zName, zDb) || - sqlite3AuthCheck(pParse, SQLITE_DELETE, zTab, 0, zDb) ){ - return; - } - } -#endif - - /* Generate code to destroy the database record of the trigger. - */ - assert( pTable!=0 ); - if( (v = sqlite3GetVdbe(pParse))!=0 ){ - int base; - static const int iLn = VDBE_OFFSET_LINENO(2); - static const VdbeOpList dropTrigger[] = { - { OP_Rewind, 0, ADDR(9), 0}, - { OP_String8, 0, 1, 0}, /* 1 */ - { OP_Column, 0, 1, 2}, - { OP_Ne, 2, ADDR(8), 1}, - { OP_String8, 0, 1, 0}, /* 4: "trigger" */ - { OP_Column, 0, 0, 2}, - { OP_Ne, 2, ADDR(8), 1}, - { OP_Delete, 0, 0, 0}, - { OP_Next, 0, ADDR(1), 0}, /* 8 */ - }; - - sqlite3BeginWriteOperation(pParse, 0, iDb); - sqlite3OpenMasterTable(pParse, iDb); - base = sqlite3VdbeAddOpList(v, ArraySize(dropTrigger), dropTrigger, iLn); - sqlite3VdbeChangeP4(v, base+1, pTrigger->zName, P4_TRANSIENT); - sqlite3VdbeChangeP4(v, base+4, "trigger", P4_STATIC); - sqlite3ChangeCookie(pParse, iDb); - sqlite3VdbeAddOp2(v, OP_Close, 0, 0); - sqlite3VdbeAddOp4(v, OP_DropTrigger, iDb, 0, 0, pTrigger->zName, 0); - if( pParse->nMem<3 ){ - pParse->nMem = 3; - } - } -} - -/* -** Remove a trigger from the hash tables of the sqlite* pointer. -*/ -SQLITE_PRIVATE void sqlite3UnlinkAndDeleteTrigger(sqlite3 *db, int iDb, const char *zName){ - Trigger *pTrigger; - Hash *pHash; - - assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); - pHash = &(db->aDb[iDb].pSchema->trigHash); - pTrigger = sqlite3HashInsert(pHash, zName, sqlite3Strlen30(zName), 0); - if( ALWAYS(pTrigger) ){ - if( pTrigger->pSchema==pTrigger->pTabSchema ){ - Table *pTab = tableOfTrigger(pTrigger); - Trigger **pp; - for(pp=&pTab->pTrigger; *pp!=pTrigger; pp=&((*pp)->pNext)); - *pp = (*pp)->pNext; - } - sqlite3DeleteTrigger(db, pTrigger); - db->flags |= SQLITE_InternChanges; - } -} - -/* -** pEList is the SET clause of an UPDATE statement. Each entry -** in pEList is of the format =. If any of the entries -** in pEList have an which matches an identifier in pIdList, -** then return TRUE. If pIdList==NULL, then it is considered a -** wildcard that matches anything. Likewise if pEList==NULL then -** it matches anything so always return true. Return false only -** if there is no match. -*/ -static int checkColumnOverlap(IdList *pIdList, ExprList *pEList){ - int e; - if( pIdList==0 || NEVER(pEList==0) ) return 1; - for(e=0; enExpr; e++){ - if( sqlite3IdListIndex(pIdList, pEList->a[e].zName)>=0 ) return 1; - } - return 0; -} - -/* -** Return a list of all triggers on table pTab if there exists at least -** one trigger that must be fired when an operation of type 'op' is -** performed on the table, and, if that operation is an UPDATE, if at -** least one of the columns in pChanges is being modified. -*/ -SQLITE_PRIVATE Trigger *sqlite3TriggersExist( - Parse *pParse, /* Parse context */ - Table *pTab, /* The table the contains the triggers */ - int op, /* one of TK_DELETE, TK_INSERT, TK_UPDATE */ - ExprList *pChanges, /* Columns that change in an UPDATE statement */ - int *pMask /* OUT: Mask of TRIGGER_BEFORE|TRIGGER_AFTER */ -){ - int mask = 0; - Trigger *pList = 0; - Trigger *p; - - if( (pParse->db->flags & SQLITE_EnableTrigger)!=0 ){ - pList = sqlite3TriggerList(pParse, pTab); - } - assert( pList==0 || IsVirtual(pTab)==0 ); - for(p=pList; p; p=p->pNext){ - if( p->op==op && checkColumnOverlap(p->pColumns, pChanges) ){ - mask |= p->tr_tm; - } - } - if( pMask ){ - *pMask = mask; - } - return (mask ? pList : 0); -} - -/* -** Convert the pStep->target token into a SrcList and return a pointer -** to that SrcList. -** -** This routine adds a specific database name, if needed, to the target when -** forming the SrcList. This prevents a trigger in one database from -** referring to a target in another database. An exception is when the -** trigger is in TEMP in which case it can refer to any other database it -** wants. -*/ -static SrcList *targetSrcList( - Parse *pParse, /* The parsing context */ - TriggerStep *pStep /* The trigger containing the target token */ -){ - int iDb; /* Index of the database to use */ - SrcList *pSrc; /* SrcList to be returned */ - - pSrc = sqlite3SrcListAppend(pParse->db, 0, &pStep->target, 0); - if( pSrc ){ - assert( pSrc->nSrc>0 ); - assert( pSrc->a!=0 ); - iDb = sqlite3SchemaToIndex(pParse->db, pStep->pTrig->pSchema); - if( iDb==0 || iDb>=2 ){ - sqlite3 *db = pParse->db; - assert( iDbdb->nDb ); - pSrc->a[pSrc->nSrc-1].zDatabase = sqlite3DbStrDup(db, db->aDb[iDb].zName); - } - } - return pSrc; -} - -/* -** Generate VDBE code for the statements inside the body of a single -** trigger. -*/ -static int codeTriggerProgram( - Parse *pParse, /* The parser context */ - TriggerStep *pStepList, /* List of statements inside the trigger body */ - int orconf /* Conflict algorithm. (OE_Abort, etc) */ -){ - TriggerStep *pStep; - Vdbe *v = pParse->pVdbe; - sqlite3 *db = pParse->db; - - assert( pParse->pTriggerTab && pParse->pToplevel ); - assert( pStepList ); - assert( v!=0 ); - for(pStep=pStepList; pStep; pStep=pStep->pNext){ - /* Figure out the ON CONFLICT policy that will be used for this step - ** of the trigger program. If the statement that caused this trigger - ** to fire had an explicit ON CONFLICT, then use it. Otherwise, use - ** the ON CONFLICT policy that was specified as part of the trigger - ** step statement. Example: - ** - ** CREATE TRIGGER AFTER INSERT ON t1 BEGIN; - ** INSERT OR REPLACE INTO t2 VALUES(new.a, new.b); - ** END; - ** - ** INSERT INTO t1 ... ; -- insert into t2 uses REPLACE policy - ** INSERT OR IGNORE INTO t1 ... ; -- insert into t2 uses IGNORE policy - */ - pParse->eOrconf = (orconf==OE_Default)?pStep->orconf:(u8)orconf; - assert( pParse->okConstFactor==0 ); - - switch( pStep->op ){ - case TK_UPDATE: { - sqlite3Update(pParse, - targetSrcList(pParse, pStep), - sqlite3ExprListDup(db, pStep->pExprList, 0), - sqlite3ExprDup(db, pStep->pWhere, 0), - pParse->eOrconf - ); - break; - } - case TK_INSERT: { - sqlite3Insert(pParse, - targetSrcList(pParse, pStep), - sqlite3SelectDup(db, pStep->pSelect, 0), - sqlite3IdListDup(db, pStep->pIdList), - pParse->eOrconf - ); - break; - } - case TK_DELETE: { - sqlite3DeleteFrom(pParse, - targetSrcList(pParse, pStep), - sqlite3ExprDup(db, pStep->pWhere, 0) - ); - break; - } - default: assert( pStep->op==TK_SELECT ); { - SelectDest sDest; - Select *pSelect = sqlite3SelectDup(db, pStep->pSelect, 0); - sqlite3SelectDestInit(&sDest, SRT_Discard, 0); - sqlite3Select(pParse, pSelect, &sDest); - sqlite3SelectDelete(db, pSelect); - break; - } - } - if( pStep->op!=TK_SELECT ){ - sqlite3VdbeAddOp0(v, OP_ResetCount); - } - } - - return 0; -} - -#ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS -/* -** This function is used to add VdbeComment() annotations to a VDBE -** program. It is not used in production code, only for debugging. -*/ -static const char *onErrorText(int onError){ - switch( onError ){ - case OE_Abort: return "abort"; - case OE_Rollback: return "rollback"; - case OE_Fail: return "fail"; - case OE_Replace: return "replace"; - case OE_Ignore: return "ignore"; - case OE_Default: return "default"; - } - return "n/a"; -} -#endif - -/* -** Parse context structure pFrom has just been used to create a sub-vdbe -** (trigger program). If an error has occurred, transfer error information -** from pFrom to pTo. -*/ -static void transferParseError(Parse *pTo, Parse *pFrom){ - assert( pFrom->zErrMsg==0 || pFrom->nErr ); - assert( pTo->zErrMsg==0 || pTo->nErr ); - if( pTo->nErr==0 ){ - pTo->zErrMsg = pFrom->zErrMsg; - pTo->nErr = pFrom->nErr; - }else{ - sqlite3DbFree(pFrom->db, pFrom->zErrMsg); - } -} - -/* -** Create and populate a new TriggerPrg object with a sub-program -** implementing trigger pTrigger with ON CONFLICT policy orconf. -*/ -static TriggerPrg *codeRowTrigger( - Parse *pParse, /* Current parse context */ - Trigger *pTrigger, /* Trigger to code */ - Table *pTab, /* The table pTrigger is attached to */ - int orconf /* ON CONFLICT policy to code trigger program with */ -){ - Parse *pTop = sqlite3ParseToplevel(pParse); - sqlite3 *db = pParse->db; /* Database handle */ - TriggerPrg *pPrg; /* Value to return */ - Expr *pWhen = 0; /* Duplicate of trigger WHEN expression */ - Vdbe *v; /* Temporary VM */ - NameContext sNC; /* Name context for sub-vdbe */ - SubProgram *pProgram = 0; /* Sub-vdbe for trigger program */ - Parse *pSubParse; /* Parse context for sub-vdbe */ - int iEndTrigger = 0; /* Label to jump to if WHEN is false */ - - assert( pTrigger->zName==0 || pTab==tableOfTrigger(pTrigger) ); - assert( pTop->pVdbe ); - - /* Allocate the TriggerPrg and SubProgram objects. To ensure that they - ** are freed if an error occurs, link them into the Parse.pTriggerPrg - ** list of the top-level Parse object sooner rather than later. */ - pPrg = sqlite3DbMallocZero(db, sizeof(TriggerPrg)); - if( !pPrg ) return 0; - pPrg->pNext = pTop->pTriggerPrg; - pTop->pTriggerPrg = pPrg; - pPrg->pProgram = pProgram = sqlite3DbMallocZero(db, sizeof(SubProgram)); - if( !pProgram ) return 0; - sqlite3VdbeLinkSubProgram(pTop->pVdbe, pProgram); - pPrg->pTrigger = pTrigger; - pPrg->orconf = orconf; - pPrg->aColmask[0] = 0xffffffff; - pPrg->aColmask[1] = 0xffffffff; - - /* Allocate and populate a new Parse context to use for coding the - ** trigger sub-program. */ - pSubParse = sqlite3StackAllocZero(db, sizeof(Parse)); - if( !pSubParse ) return 0; - memset(&sNC, 0, sizeof(sNC)); - sNC.pParse = pSubParse; - pSubParse->db = db; - pSubParse->pTriggerTab = pTab; - pSubParse->pToplevel = pTop; - pSubParse->zAuthContext = pTrigger->zName; - pSubParse->eTriggerOp = pTrigger->op; - pSubParse->nQueryLoop = pParse->nQueryLoop; - - v = sqlite3GetVdbe(pSubParse); - if( v ){ - VdbeComment((v, "Start: %s.%s (%s %s%s%s ON %s)", - pTrigger->zName, onErrorText(orconf), - (pTrigger->tr_tm==TRIGGER_BEFORE ? "BEFORE" : "AFTER"), - (pTrigger->op==TK_UPDATE ? "UPDATE" : ""), - (pTrigger->op==TK_INSERT ? "INSERT" : ""), - (pTrigger->op==TK_DELETE ? "DELETE" : ""), - pTab->zName - )); -#ifndef SQLITE_OMIT_TRACE - sqlite3VdbeChangeP4(v, -1, - sqlite3MPrintf(db, "-- TRIGGER %s", pTrigger->zName), P4_DYNAMIC - ); -#endif - - /* If one was specified, code the WHEN clause. If it evaluates to false - ** (or NULL) the sub-vdbe is immediately halted by jumping to the - ** OP_Halt inserted at the end of the program. */ - if( pTrigger->pWhen ){ - pWhen = sqlite3ExprDup(db, pTrigger->pWhen, 0); - if( SQLITE_OK==sqlite3ResolveExprNames(&sNC, pWhen) - && db->mallocFailed==0 - ){ - iEndTrigger = sqlite3VdbeMakeLabel(v); - sqlite3ExprIfFalse(pSubParse, pWhen, iEndTrigger, SQLITE_JUMPIFNULL); - } - sqlite3ExprDelete(db, pWhen); - } - - /* Code the trigger program into the sub-vdbe. */ - codeTriggerProgram(pSubParse, pTrigger->step_list, orconf); - - /* Insert an OP_Halt at the end of the sub-program. */ - if( iEndTrigger ){ - sqlite3VdbeResolveLabel(v, iEndTrigger); - } - sqlite3VdbeAddOp0(v, OP_Halt); - VdbeComment((v, "End: %s.%s", pTrigger->zName, onErrorText(orconf))); - - transferParseError(pParse, pSubParse); - if( db->mallocFailed==0 ){ - pProgram->aOp = sqlite3VdbeTakeOpArray(v, &pProgram->nOp, &pTop->nMaxArg); - } - pProgram->nMem = pSubParse->nMem; - pProgram->nCsr = pSubParse->nTab; - pProgram->nOnce = pSubParse->nOnce; - pProgram->token = (void *)pTrigger; - pPrg->aColmask[0] = pSubParse->oldmask; - pPrg->aColmask[1] = pSubParse->newmask; - sqlite3VdbeDelete(v); - } - - assert( !pSubParse->pAinc && !pSubParse->pZombieTab ); - assert( !pSubParse->pTriggerPrg && !pSubParse->nMaxArg ); - sqlite3ParserReset(pSubParse); - sqlite3StackFree(db, pSubParse); - - return pPrg; -} - -/* -** Return a pointer to a TriggerPrg object containing the sub-program for -** trigger pTrigger with default ON CONFLICT algorithm orconf. If no such -** TriggerPrg object exists, a new object is allocated and populated before -** being returned. -*/ -static TriggerPrg *getRowTrigger( - Parse *pParse, /* Current parse context */ - Trigger *pTrigger, /* Trigger to code */ - Table *pTab, /* The table trigger pTrigger is attached to */ - int orconf /* ON CONFLICT algorithm. */ -){ - Parse *pRoot = sqlite3ParseToplevel(pParse); - TriggerPrg *pPrg; - - assert( pTrigger->zName==0 || pTab==tableOfTrigger(pTrigger) ); - - /* It may be that this trigger has already been coded (or is in the - ** process of being coded). If this is the case, then an entry with - ** a matching TriggerPrg.pTrigger field will be present somewhere - ** in the Parse.pTriggerPrg list. Search for such an entry. */ - for(pPrg=pRoot->pTriggerPrg; - pPrg && (pPrg->pTrigger!=pTrigger || pPrg->orconf!=orconf); - pPrg=pPrg->pNext - ); - - /* If an existing TriggerPrg could not be located, create a new one. */ - if( !pPrg ){ - pPrg = codeRowTrigger(pParse, pTrigger, pTab, orconf); - } - - return pPrg; -} - -/* -** Generate code for the trigger program associated with trigger p on -** table pTab. The reg, orconf and ignoreJump parameters passed to this -** function are the same as those described in the header function for -** sqlite3CodeRowTrigger() -*/ -SQLITE_PRIVATE void sqlite3CodeRowTriggerDirect( - Parse *pParse, /* Parse context */ - Trigger *p, /* Trigger to code */ - Table *pTab, /* The table to code triggers from */ - int reg, /* Reg array containing OLD.* and NEW.* values */ - int orconf, /* ON CONFLICT policy */ - int ignoreJump /* Instruction to jump to for RAISE(IGNORE) */ -){ - Vdbe *v = sqlite3GetVdbe(pParse); /* Main VM */ - TriggerPrg *pPrg; - pPrg = getRowTrigger(pParse, p, pTab, orconf); - assert( pPrg || pParse->nErr || pParse->db->mallocFailed ); - - /* Code the OP_Program opcode in the parent VDBE. P4 of the OP_Program - ** is a pointer to the sub-vdbe containing the trigger program. */ - if( pPrg ){ - int bRecursive = (p->zName && 0==(pParse->db->flags&SQLITE_RecTriggers)); - - sqlite3VdbeAddOp3(v, OP_Program, reg, ignoreJump, ++pParse->nMem); - sqlite3VdbeChangeP4(v, -1, (const char *)pPrg->pProgram, P4_SUBPROGRAM); - VdbeComment( - (v, "Call: %s.%s", (p->zName?p->zName:"fkey"), onErrorText(orconf))); - - /* Set the P5 operand of the OP_Program instruction to non-zero if - ** recursive invocation of this trigger program is disallowed. Recursive - ** invocation is disallowed if (a) the sub-program is really a trigger, - ** not a foreign key action, and (b) the flag to enable recursive triggers - ** is clear. */ - sqlite3VdbeChangeP5(v, (u8)bRecursive); - } -} - -/* -** This is called to code the required FOR EACH ROW triggers for an operation -** on table pTab. The operation to code triggers for (INSERT, UPDATE or DELETE) -** is given by the op parameter. The tr_tm parameter determines whether the -** BEFORE or AFTER triggers are coded. If the operation is an UPDATE, then -** parameter pChanges is passed the list of columns being modified. -** -** If there are no triggers that fire at the specified time for the specified -** operation on pTab, this function is a no-op. -** -** The reg argument is the address of the first in an array of registers -** that contain the values substituted for the new.* and old.* references -** in the trigger program. If N is the number of columns in table pTab -** (a copy of pTab->nCol), then registers are populated as follows: -** -** Register Contains -** ------------------------------------------------------ -** reg+0 OLD.rowid -** reg+1 OLD.* value of left-most column of pTab -** ... ... -** reg+N OLD.* value of right-most column of pTab -** reg+N+1 NEW.rowid -** reg+N+2 OLD.* value of left-most column of pTab -** ... ... -** reg+N+N+1 NEW.* value of right-most column of pTab -** -** For ON DELETE triggers, the registers containing the NEW.* values will -** never be accessed by the trigger program, so they are not allocated or -** populated by the caller (there is no data to populate them with anyway). -** Similarly, for ON INSERT triggers the values stored in the OLD.* registers -** are never accessed, and so are not allocated by the caller. So, for an -** ON INSERT trigger, the value passed to this function as parameter reg -** is not a readable register, although registers (reg+N) through -** (reg+N+N+1) are. -** -** Parameter orconf is the default conflict resolution algorithm for the -** trigger program to use (REPLACE, IGNORE etc.). Parameter ignoreJump -** is the instruction that control should jump to if a trigger program -** raises an IGNORE exception. -*/ -SQLITE_PRIVATE void sqlite3CodeRowTrigger( - Parse *pParse, /* Parse context */ - Trigger *pTrigger, /* List of triggers on table pTab */ - int op, /* One of TK_UPDATE, TK_INSERT, TK_DELETE */ - ExprList *pChanges, /* Changes list for any UPDATE OF triggers */ - int tr_tm, /* One of TRIGGER_BEFORE, TRIGGER_AFTER */ - Table *pTab, /* The table to code triggers from */ - int reg, /* The first in an array of registers (see above) */ - int orconf, /* ON CONFLICT policy */ - int ignoreJump /* Instruction to jump to for RAISE(IGNORE) */ -){ - Trigger *p; /* Used to iterate through pTrigger list */ - - assert( op==TK_UPDATE || op==TK_INSERT || op==TK_DELETE ); - assert( tr_tm==TRIGGER_BEFORE || tr_tm==TRIGGER_AFTER ); - assert( (op==TK_UPDATE)==(pChanges!=0) ); - - for(p=pTrigger; p; p=p->pNext){ - - /* Sanity checking: The schema for the trigger and for the table are - ** always defined. The trigger must be in the same schema as the table - ** or else it must be a TEMP trigger. */ - assert( p->pSchema!=0 ); - assert( p->pTabSchema!=0 ); - assert( p->pSchema==p->pTabSchema - || p->pSchema==pParse->db->aDb[1].pSchema ); - - /* Determine whether we should code this trigger */ - if( p->op==op - && p->tr_tm==tr_tm - && checkColumnOverlap(p->pColumns, pChanges) - ){ - sqlite3CodeRowTriggerDirect(pParse, p, pTab, reg, orconf, ignoreJump); - } - } -} - -/* -** Triggers may access values stored in the old.* or new.* pseudo-table. -** This function returns a 32-bit bitmask indicating which columns of the -** old.* or new.* tables actually are used by triggers. This information -** may be used by the caller, for example, to avoid having to load the entire -** old.* record into memory when executing an UPDATE or DELETE command. -** -** Bit 0 of the returned mask is set if the left-most column of the -** table may be accessed using an [old|new].reference. Bit 1 is set if -** the second leftmost column value is required, and so on. If there -** are more than 32 columns in the table, and at least one of the columns -** with an index greater than 32 may be accessed, 0xffffffff is returned. -** -** It is not possible to determine if the old.rowid or new.rowid column is -** accessed by triggers. The caller must always assume that it is. -** -** Parameter isNew must be either 1 or 0. If it is 0, then the mask returned -** applies to the old.* table. If 1, the new.* table. -** -** Parameter tr_tm must be a mask with one or both of the TRIGGER_BEFORE -** and TRIGGER_AFTER bits set. Values accessed by BEFORE triggers are only -** included in the returned mask if the TRIGGER_BEFORE bit is set in the -** tr_tm parameter. Similarly, values accessed by AFTER triggers are only -** included in the returned mask if the TRIGGER_AFTER bit is set in tr_tm. -*/ -SQLITE_PRIVATE u32 sqlite3TriggerColmask( - Parse *pParse, /* Parse context */ - Trigger *pTrigger, /* List of triggers on table pTab */ - ExprList *pChanges, /* Changes list for any UPDATE OF triggers */ - int isNew, /* 1 for new.* ref mask, 0 for old.* ref mask */ - int tr_tm, /* Mask of TRIGGER_BEFORE|TRIGGER_AFTER */ - Table *pTab, /* The table to code triggers from */ - int orconf /* Default ON CONFLICT policy for trigger steps */ -){ - const int op = pChanges ? TK_UPDATE : TK_DELETE; - u32 mask = 0; - Trigger *p; - - assert( isNew==1 || isNew==0 ); - for(p=pTrigger; p; p=p->pNext){ - if( p->op==op && (tr_tm&p->tr_tm) - && checkColumnOverlap(p->pColumns,pChanges) - ){ - TriggerPrg *pPrg; - pPrg = getRowTrigger(pParse, p, pTab, orconf); - if( pPrg ){ - mask |= pPrg->aColmask[isNew]; - } - } - } - - return mask; -} - -#endif /* !defined(SQLITE_OMIT_TRIGGER) */ - -/************** End of trigger.c *********************************************/ -/************** Begin file update.c ******************************************/ -/* -** 2001 September 15 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** This file contains C code routines that are called by the parser -** to handle UPDATE statements. -*/ - -#ifndef SQLITE_OMIT_VIRTUALTABLE -/* Forward declaration */ -static void updateVirtualTable( - Parse *pParse, /* The parsing context */ - SrcList *pSrc, /* The virtual table to be modified */ - Table *pTab, /* The virtual table */ - ExprList *pChanges, /* The columns to change in the UPDATE statement */ - Expr *pRowidExpr, /* Expression used to recompute the rowid */ - int *aXRef, /* Mapping from columns of pTab to entries in pChanges */ - Expr *pWhere, /* WHERE clause of the UPDATE statement */ - int onError /* ON CONFLICT strategy */ -); -#endif /* SQLITE_OMIT_VIRTUALTABLE */ - -/* -** The most recently coded instruction was an OP_Column to retrieve the -** i-th column of table pTab. This routine sets the P4 parameter of the -** OP_Column to the default value, if any. -** -** The default value of a column is specified by a DEFAULT clause in the -** column definition. This was either supplied by the user when the table -** was created, or added later to the table definition by an ALTER TABLE -** command. If the latter, then the row-records in the table btree on disk -** may not contain a value for the column and the default value, taken -** from the P4 parameter of the OP_Column instruction, is returned instead. -** If the former, then all row-records are guaranteed to include a value -** for the column and the P4 value is not required. -** -** Column definitions created by an ALTER TABLE command may only have -** literal default values specified: a number, null or a string. (If a more -** complicated default expression value was provided, it is evaluated -** when the ALTER TABLE is executed and one of the literal values written -** into the sqlite_master table.) -** -** Therefore, the P4 parameter is only required if the default value for -** the column is a literal number, string or null. The sqlite3ValueFromExpr() -** function is capable of transforming these types of expressions into -** sqlite3_value objects. -** -** If parameter iReg is not negative, code an OP_RealAffinity instruction -** on register iReg. This is used when an equivalent integer value is -** stored in place of an 8-byte floating point value in order to save -** space. -*/ -SQLITE_PRIVATE void sqlite3ColumnDefault(Vdbe *v, Table *pTab, int i, int iReg){ - assert( pTab!=0 ); - if( !pTab->pSelect ){ - sqlite3_value *pValue = 0; - u8 enc = ENC(sqlite3VdbeDb(v)); - Column *pCol = &pTab->aCol[i]; - VdbeComment((v, "%s.%s", pTab->zName, pCol->zName)); - assert( inCol ); - sqlite3ValueFromExpr(sqlite3VdbeDb(v), pCol->pDflt, enc, - pCol->affinity, &pValue); - if( pValue ){ - sqlite3VdbeChangeP4(v, -1, (const char *)pValue, P4_MEM); - } -#ifndef SQLITE_OMIT_FLOATING_POINT - if( pTab->aCol[i].affinity==SQLITE_AFF_REAL ){ - sqlite3VdbeAddOp1(v, OP_RealAffinity, iReg); - } -#endif - } -} - -/* -** Process an UPDATE statement. -** -** UPDATE OR IGNORE table_wxyz SET a=b, c=d WHERE e<5 AND f NOT NULL; -** \_______/ \________/ \______/ \________________/ -* onError pTabList pChanges pWhere -*/ -SQLITE_PRIVATE void sqlite3Update( - Parse *pParse, /* The parser context */ - SrcList *pTabList, /* The table in which we should change things */ - ExprList *pChanges, /* Things to be changed */ - Expr *pWhere, /* The WHERE clause. May be null */ - int onError /* How to handle constraint errors */ -){ - int i, j; /* Loop counters */ - Table *pTab; /* The table to be updated */ - int addrTop = 0; /* VDBE instruction address of the start of the loop */ - WhereInfo *pWInfo; /* Information about the WHERE clause */ - Vdbe *v; /* The virtual database engine */ - Index *pIdx; /* For looping over indices */ - Index *pPk; /* The PRIMARY KEY index for WITHOUT ROWID tables */ - int nIdx; /* Number of indices that need updating */ - int iBaseCur; /* Base cursor number */ - int iDataCur; /* Cursor for the canonical data btree */ - int iIdxCur; /* Cursor for the first index */ - sqlite3 *db; /* The database structure */ - int *aRegIdx = 0; /* One register assigned to each index to be updated */ - int *aXRef = 0; /* aXRef[i] is the index in pChanges->a[] of the - ** an expression for the i-th column of the table. - ** aXRef[i]==-1 if the i-th column is not changed. */ - u8 *aToOpen; /* 1 for tables and indices to be opened */ - u8 chngPk; /* PRIMARY KEY changed in a WITHOUT ROWID table */ - u8 chngRowid; /* Rowid changed in a normal table */ - u8 chngKey; /* Either chngPk or chngRowid */ - Expr *pRowidExpr = 0; /* Expression defining the new record number */ - AuthContext sContext; /* The authorization context */ - NameContext sNC; /* The name-context to resolve expressions in */ - int iDb; /* Database containing the table being updated */ - int okOnePass; /* True for one-pass algorithm without the FIFO */ - int hasFK; /* True if foreign key processing is required */ - int labelBreak; /* Jump here to break out of UPDATE loop */ - int labelContinue; /* Jump here to continue next step of UPDATE loop */ - -#ifndef SQLITE_OMIT_TRIGGER - int isView; /* True when updating a view (INSTEAD OF trigger) */ - Trigger *pTrigger; /* List of triggers on pTab, if required */ - int tmask; /* Mask of TRIGGER_BEFORE|TRIGGER_AFTER */ -#endif - int newmask; /* Mask of NEW.* columns accessed by BEFORE triggers */ - int iEph = 0; /* Ephemeral table holding all primary key values */ - int nKey = 0; /* Number of elements in regKey for WITHOUT ROWID */ - int aiCurOnePass[2]; /* The write cursors opened by WHERE_ONEPASS */ - - /* Register Allocations */ - int regRowCount = 0; /* A count of rows changed */ - int regOldRowid; /* The old rowid */ - int regNewRowid; /* The new rowid */ - int regNew; /* Content of the NEW.* table in triggers */ - int regOld = 0; /* Content of OLD.* table in triggers */ - int regRowSet = 0; /* Rowset of rows to be updated */ - int regKey = 0; /* composite PRIMARY KEY value */ - - memset(&sContext, 0, sizeof(sContext)); - db = pParse->db; - if( pParse->nErr || db->mallocFailed ){ - goto update_cleanup; - } - assert( pTabList->nSrc==1 ); - - /* Locate the table which we want to update. - */ - pTab = sqlite3SrcListLookup(pParse, pTabList); - if( pTab==0 ) goto update_cleanup; - iDb = sqlite3SchemaToIndex(pParse->db, pTab->pSchema); - - /* Figure out if we have any triggers and if the table being - ** updated is a view. - */ -#ifndef SQLITE_OMIT_TRIGGER - pTrigger = sqlite3TriggersExist(pParse, pTab, TK_UPDATE, pChanges, &tmask); - isView = pTab->pSelect!=0; - assert( pTrigger || tmask==0 ); -#else -# define pTrigger 0 -# define isView 0 -# define tmask 0 -#endif -#ifdef SQLITE_OMIT_VIEW -# undef isView -# define isView 0 -#endif - - if( sqlite3ViewGetColumnNames(pParse, pTab) ){ - goto update_cleanup; - } - if( sqlite3IsReadOnly(pParse, pTab, tmask) ){ - goto update_cleanup; - } - - /* Allocate a cursors for the main database table and for all indices. - ** The index cursors might not be used, but if they are used they - ** need to occur right after the database cursor. So go ahead and - ** allocate enough space, just in case. - */ - pTabList->a[0].iCursor = iBaseCur = iDataCur = pParse->nTab++; - iIdxCur = iDataCur+1; - pPk = HasRowid(pTab) ? 0 : sqlite3PrimaryKeyIndex(pTab); - for(nIdx=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, nIdx++){ - if( IsPrimaryKeyIndex(pIdx) && pPk!=0 ){ - iDataCur = pParse->nTab; - pTabList->a[0].iCursor = iDataCur; - } - pParse->nTab++; - } - - /* Allocate space for aXRef[], aRegIdx[], and aToOpen[]. - ** Initialize aXRef[] and aToOpen[] to their default values. - */ - aXRef = sqlite3DbMallocRaw(db, sizeof(int) * (pTab->nCol+nIdx) + nIdx+2 ); - if( aXRef==0 ) goto update_cleanup; - aRegIdx = aXRef+pTab->nCol; - aToOpen = (u8*)(aRegIdx+nIdx); - memset(aToOpen, 1, nIdx+1); - aToOpen[nIdx+1] = 0; - for(i=0; inCol; i++) aXRef[i] = -1; - - /* Initialize the name-context */ - memset(&sNC, 0, sizeof(sNC)); - sNC.pParse = pParse; - sNC.pSrcList = pTabList; - - /* Resolve the column names in all the expressions of the - ** of the UPDATE statement. Also find the column index - ** for each column to be updated in the pChanges array. For each - ** column to be updated, make sure we have authorization to change - ** that column. - */ - chngRowid = chngPk = 0; - for(i=0; inExpr; i++){ - if( sqlite3ResolveExprNames(&sNC, pChanges->a[i].pExpr) ){ - goto update_cleanup; - } - for(j=0; jnCol; j++){ - if( sqlite3StrICmp(pTab->aCol[j].zName, pChanges->a[i].zName)==0 ){ - if( j==pTab->iPKey ){ - chngRowid = 1; - pRowidExpr = pChanges->a[i].pExpr; - }else if( pPk && (pTab->aCol[j].colFlags & COLFLAG_PRIMKEY)!=0 ){ - chngPk = 1; - } - aXRef[j] = i; - break; - } - } - if( j>=pTab->nCol ){ - if( pPk==0 && sqlite3IsRowid(pChanges->a[i].zName) ){ - j = -1; - chngRowid = 1; - pRowidExpr = pChanges->a[i].pExpr; - }else{ - sqlite3ErrorMsg(pParse, "no such column: %s", pChanges->a[i].zName); - pParse->checkSchema = 1; - goto update_cleanup; - } - } -#ifndef SQLITE_OMIT_AUTHORIZATION - { - int rc; - rc = sqlite3AuthCheck(pParse, SQLITE_UPDATE, pTab->zName, - j<0 ? "ROWID" : pTab->aCol[j].zName, - db->aDb[iDb].zName); - if( rc==SQLITE_DENY ){ - goto update_cleanup; - }else if( rc==SQLITE_IGNORE ){ - aXRef[j] = -1; - } - } -#endif - } - assert( (chngRowid & chngPk)==0 ); - assert( chngRowid==0 || chngRowid==1 ); - assert( chngPk==0 || chngPk==1 ); - chngKey = chngRowid + chngPk; - - /* The SET expressions are not actually used inside the WHERE loop. - ** So reset the colUsed mask - */ - pTabList->a[0].colUsed = 0; - - hasFK = sqlite3FkRequired(pParse, pTab, aXRef, chngKey); - - /* There is one entry in the aRegIdx[] array for each index on the table - ** being updated. Fill in aRegIdx[] with a register number that will hold - ** the key for accessing each index. - */ - for(j=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, j++){ - int reg; - if( chngKey || hasFK || pIdx->pPartIdxWhere || pIdx==pPk ){ - reg = ++pParse->nMem; - }else{ - reg = 0; - for(i=0; inKeyCol; i++){ - if( aXRef[pIdx->aiColumn[i]]>=0 ){ - reg = ++pParse->nMem; - break; - } - } - } - if( reg==0 ) aToOpen[j+1] = 0; - aRegIdx[j] = reg; - } - - /* Begin generating code. */ - v = sqlite3GetVdbe(pParse); - if( v==0 ) goto update_cleanup; - if( pParse->nested==0 ) sqlite3VdbeCountChanges(v); - sqlite3BeginWriteOperation(pParse, 1, iDb); - -#ifndef SQLITE_OMIT_VIRTUALTABLE - /* Virtual tables must be handled separately */ - if( IsVirtual(pTab) ){ - updateVirtualTable(pParse, pTabList, pTab, pChanges, pRowidExpr, aXRef, - pWhere, onError); - pWhere = 0; - pTabList = 0; - goto update_cleanup; - } -#endif - - /* Allocate required registers. */ - regRowSet = ++pParse->nMem; - regOldRowid = regNewRowid = ++pParse->nMem; - if( chngPk || pTrigger || hasFK ){ - regOld = pParse->nMem + 1; - pParse->nMem += pTab->nCol; - } - if( chngKey || pTrigger || hasFK ){ - regNewRowid = ++pParse->nMem; - } - regNew = pParse->nMem + 1; - pParse->nMem += pTab->nCol; - - /* Start the view context. */ - if( isView ){ - sqlite3AuthContextPush(pParse, &sContext, pTab->zName); - } - - /* If we are trying to update a view, realize that view into - ** a ephemeral table. - */ -#if !defined(SQLITE_OMIT_VIEW) && !defined(SQLITE_OMIT_TRIGGER) - if( isView ){ - sqlite3MaterializeView(pParse, pTab, pWhere, iDataCur); - } -#endif - - /* Resolve the column names in all the expressions in the - ** WHERE clause. - */ - if( sqlite3ResolveExprNames(&sNC, pWhere) ){ - goto update_cleanup; - } - - /* Begin the database scan - */ - if( HasRowid(pTab) ){ - sqlite3VdbeAddOp3(v, OP_Null, 0, regRowSet, regOldRowid); - pWInfo = sqlite3WhereBegin( - pParse, pTabList, pWhere, 0, 0, WHERE_ONEPASS_DESIRED, iIdxCur - ); - if( pWInfo==0 ) goto update_cleanup; - okOnePass = sqlite3WhereOkOnePass(pWInfo, aiCurOnePass); - - /* Remember the rowid of every item to be updated. - */ - sqlite3VdbeAddOp2(v, OP_Rowid, iDataCur, regOldRowid); - if( !okOnePass ){ - sqlite3VdbeAddOp2(v, OP_RowSetAdd, regRowSet, regOldRowid); - } - - /* End the database scan loop. - */ - sqlite3WhereEnd(pWInfo); - }else{ - int iPk; /* First of nPk memory cells holding PRIMARY KEY value */ - i16 nPk; /* Number of components of the PRIMARY KEY */ - int addrOpen; /* Address of the OpenEphemeral instruction */ - - assert( pPk!=0 ); - nPk = pPk->nKeyCol; - iPk = pParse->nMem+1; - pParse->nMem += nPk; - regKey = ++pParse->nMem; - iEph = pParse->nTab++; - sqlite3VdbeAddOp2(v, OP_Null, 0, iPk); - addrOpen = sqlite3VdbeAddOp2(v, OP_OpenEphemeral, iEph, nPk); - sqlite3VdbeSetP4KeyInfo(pParse, pPk); - pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, 0, 0, - WHERE_ONEPASS_DESIRED, iIdxCur); - if( pWInfo==0 ) goto update_cleanup; - okOnePass = sqlite3WhereOkOnePass(pWInfo, aiCurOnePass); - for(i=0; iaiColumn[i], - iPk+i); - } - if( okOnePass ){ - sqlite3VdbeChangeToNoop(v, addrOpen); - nKey = nPk; - regKey = iPk; - }else{ - sqlite3VdbeAddOp4(v, OP_MakeRecord, iPk, nPk, regKey, - sqlite3IndexAffinityStr(v, pPk), nPk); - sqlite3VdbeAddOp2(v, OP_IdxInsert, iEph, regKey); - } - sqlite3WhereEnd(pWInfo); - } - - /* Initialize the count of updated rows - */ - if( (db->flags & SQLITE_CountRows) && !pParse->pTriggerTab ){ - regRowCount = ++pParse->nMem; - sqlite3VdbeAddOp2(v, OP_Integer, 0, regRowCount); - } - - labelBreak = sqlite3VdbeMakeLabel(v); - if( !isView ){ - /* - ** Open every index that needs updating. Note that if any - ** index could potentially invoke a REPLACE conflict resolution - ** action, then we need to open all indices because we might need - ** to be deleting some records. - */ - if( onError==OE_Replace ){ - memset(aToOpen, 1, nIdx+1); - }else{ - for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ - if( pIdx->onError==OE_Replace ){ - memset(aToOpen, 1, nIdx+1); - break; - } - } - } - if( okOnePass ){ - if( aiCurOnePass[0]>=0 ) aToOpen[aiCurOnePass[0]-iBaseCur] = 0; - if( aiCurOnePass[1]>=0 ) aToOpen[aiCurOnePass[1]-iBaseCur] = 0; - } - sqlite3OpenTableAndIndices(pParse, pTab, OP_OpenWrite, iBaseCur, aToOpen, - 0, 0); - } - - /* Top of the update loop */ - if( okOnePass ){ - if( aToOpen[iDataCur-iBaseCur] ){ - assert( pPk!=0 ); - sqlite3VdbeAddOp4Int(v, OP_NotFound, iDataCur, labelBreak, regKey, nKey); - VdbeCoverageNeverTaken(v); - } - labelContinue = labelBreak; - sqlite3VdbeAddOp2(v, OP_IsNull, pPk ? regKey : regOldRowid, labelBreak); - VdbeCoverage(v); - }else if( pPk ){ - labelContinue = sqlite3VdbeMakeLabel(v); - sqlite3VdbeAddOp2(v, OP_Rewind, iEph, labelBreak); VdbeCoverage(v); - addrTop = sqlite3VdbeAddOp2(v, OP_RowKey, iEph, regKey); - sqlite3VdbeAddOp4Int(v, OP_NotFound, iDataCur, labelContinue, regKey, 0); - VdbeCoverage(v); - }else{ - labelContinue = sqlite3VdbeAddOp3(v, OP_RowSetRead, regRowSet, labelBreak, - regOldRowid); - VdbeCoverage(v); - sqlite3VdbeAddOp3(v, OP_NotExists, iDataCur, labelContinue, regOldRowid); - VdbeCoverage(v); - } - - /* If the record number will change, set register regNewRowid to - ** contain the new value. If the record number is not being modified, - ** then regNewRowid is the same register as regOldRowid, which is - ** already populated. */ - assert( chngKey || pTrigger || hasFK || regOldRowid==regNewRowid ); - if( chngRowid ){ - sqlite3ExprCode(pParse, pRowidExpr, regNewRowid); - sqlite3VdbeAddOp1(v, OP_MustBeInt, regNewRowid); VdbeCoverage(v); - } - - /* Compute the old pre-UPDATE content of the row being changed, if that - ** information is needed */ - if( chngPk || hasFK || pTrigger ){ - u32 oldmask = (hasFK ? sqlite3FkOldmask(pParse, pTab) : 0); - oldmask |= sqlite3TriggerColmask(pParse, - pTrigger, pChanges, 0, TRIGGER_BEFORE|TRIGGER_AFTER, pTab, onError - ); - for(i=0; inCol; i++){ - if( oldmask==0xffffffff - || (i<32 && (oldmask & MASKBIT32(i))!=0) - || (pTab->aCol[i].colFlags & COLFLAG_PRIMKEY)!=0 - ){ - testcase( oldmask!=0xffffffff && i==31 ); - sqlite3ExprCodeGetColumnOfTable(v, pTab, iDataCur, i, regOld+i); - }else{ - sqlite3VdbeAddOp2(v, OP_Null, 0, regOld+i); - } - } - if( chngRowid==0 && pPk==0 ){ - sqlite3VdbeAddOp2(v, OP_Copy, regOldRowid, regNewRowid); - } - } - - /* Populate the array of registers beginning at regNew with the new - ** row data. This array is used to check constaints, create the new - ** table and index records, and as the values for any new.* references - ** made by triggers. - ** - ** If there are one or more BEFORE triggers, then do not populate the - ** registers associated with columns that are (a) not modified by - ** this UPDATE statement and (b) not accessed by new.* references. The - ** values for registers not modified by the UPDATE must be reloaded from - ** the database after the BEFORE triggers are fired anyway (as the trigger - ** may have modified them). So not loading those that are not going to - ** be used eliminates some redundant opcodes. - */ - newmask = sqlite3TriggerColmask( - pParse, pTrigger, pChanges, 1, TRIGGER_BEFORE, pTab, onError - ); - /*sqlite3VdbeAddOp3(v, OP_Null, 0, regNew, regNew+pTab->nCol-1);*/ - for(i=0; inCol; i++){ - if( i==pTab->iPKey ){ - sqlite3VdbeAddOp2(v, OP_Null, 0, regNew+i); - }else{ - j = aXRef[i]; - if( j>=0 ){ - sqlite3ExprCode(pParse, pChanges->a[j].pExpr, regNew+i); - }else if( 0==(tmask&TRIGGER_BEFORE) || i>31 || (newmask & MASKBIT32(i)) ){ - /* This branch loads the value of a column that will not be changed - ** into a register. This is done if there are no BEFORE triggers, or - ** if there are one or more BEFORE triggers that use this value via - ** a new.* reference in a trigger program. - */ - testcase( i==31 ); - testcase( i==32 ); - sqlite3ExprCodeGetColumnOfTable(v, pTab, iDataCur, i, regNew+i); - }else{ - sqlite3VdbeAddOp2(v, OP_Null, 0, regNew+i); - } - } - } - - /* Fire any BEFORE UPDATE triggers. This happens before constraints are - ** verified. One could argue that this is wrong. - */ - if( tmask&TRIGGER_BEFORE ){ - sqlite3TableAffinity(v, pTab, regNew); - sqlite3CodeRowTrigger(pParse, pTrigger, TK_UPDATE, pChanges, - TRIGGER_BEFORE, pTab, regOldRowid, onError, labelContinue); - - /* The row-trigger may have deleted the row being updated. In this - ** case, jump to the next row. No updates or AFTER triggers are - ** required. This behavior - what happens when the row being updated - ** is deleted or renamed by a BEFORE trigger - is left undefined in the - ** documentation. - */ - if( pPk ){ - sqlite3VdbeAddOp4Int(v, OP_NotFound, iDataCur, labelContinue,regKey,nKey); - VdbeCoverage(v); - }else{ - sqlite3VdbeAddOp3(v, OP_NotExists, iDataCur, labelContinue, regOldRowid); - VdbeCoverage(v); - } - - /* If it did not delete it, the row-trigger may still have modified - ** some of the columns of the row being updated. Load the values for - ** all columns not modified by the update statement into their - ** registers in case this has happened. - */ - for(i=0; inCol; i++){ - if( aXRef[i]<0 && i!=pTab->iPKey ){ - sqlite3ExprCodeGetColumnOfTable(v, pTab, iDataCur, i, regNew+i); - } - } - } - - if( !isView ){ - int j1 = 0; /* Address of jump instruction */ - int bReplace = 0; /* True if REPLACE conflict resolution might happen */ - - /* Do constraint checks. */ - assert( regOldRowid>0 ); - sqlite3GenerateConstraintChecks(pParse, pTab, aRegIdx, iDataCur, iIdxCur, - regNewRowid, regOldRowid, chngKey, onError, labelContinue, &bReplace); - - /* Do FK constraint checks. */ - if( hasFK ){ - sqlite3FkCheck(pParse, pTab, regOldRowid, 0, aXRef, chngKey); - } - - /* Delete the index entries associated with the current record. */ - if( bReplace || chngKey ){ - if( pPk ){ - j1 = sqlite3VdbeAddOp4Int(v, OP_NotFound, iDataCur, 0, regKey, nKey); - }else{ - j1 = sqlite3VdbeAddOp3(v, OP_NotExists, iDataCur, 0, regOldRowid); - } - VdbeCoverageNeverTaken(v); - } - sqlite3GenerateRowIndexDelete(pParse, pTab, iDataCur, iIdxCur, aRegIdx); - - /* If changing the record number, delete the old record. */ - if( hasFK || chngKey || pPk!=0 ){ - sqlite3VdbeAddOp2(v, OP_Delete, iDataCur, 0); - } - if( bReplace || chngKey ){ - sqlite3VdbeJumpHere(v, j1); - } - - if( hasFK ){ - sqlite3FkCheck(pParse, pTab, 0, regNewRowid, aXRef, chngKey); - } - - /* Insert the new index entries and the new record. */ - sqlite3CompleteInsertion(pParse, pTab, iDataCur, iIdxCur, - regNewRowid, aRegIdx, 1, 0, 0); - - /* Do any ON CASCADE, SET NULL or SET DEFAULT operations required to - ** handle rows (possibly in other tables) that refer via a foreign key - ** to the row just updated. */ - if( hasFK ){ - sqlite3FkActions(pParse, pTab, pChanges, regOldRowid, aXRef, chngKey); - } - } - - /* Increment the row counter - */ - if( (db->flags & SQLITE_CountRows) && !pParse->pTriggerTab){ - sqlite3VdbeAddOp2(v, OP_AddImm, regRowCount, 1); - } - - sqlite3CodeRowTrigger(pParse, pTrigger, TK_UPDATE, pChanges, - TRIGGER_AFTER, pTab, regOldRowid, onError, labelContinue); - - /* Repeat the above with the next record to be updated, until - ** all record selected by the WHERE clause have been updated. - */ - if( okOnePass ){ - /* Nothing to do at end-of-loop for a single-pass */ - }else if( pPk ){ - sqlite3VdbeResolveLabel(v, labelContinue); - sqlite3VdbeAddOp2(v, OP_Next, iEph, addrTop); VdbeCoverage(v); - }else{ - sqlite3VdbeAddOp2(v, OP_Goto, 0, labelContinue); - } - sqlite3VdbeResolveLabel(v, labelBreak); - - /* Close all tables */ - for(i=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, i++){ - assert( aRegIdx ); - if( aToOpen[i+1] ){ - sqlite3VdbeAddOp2(v, OP_Close, iIdxCur+i, 0); - } - } - if( iDataCurnested==0 && pParse->pTriggerTab==0 ){ - sqlite3AutoincrementEnd(pParse); - } - - /* - ** Return the number of rows that were changed. If this routine is - ** generating code because of a call to sqlite3NestedParse(), do not - ** invoke the callback function. - */ - if( (db->flags&SQLITE_CountRows) && !pParse->pTriggerTab && !pParse->nested ){ - sqlite3VdbeAddOp2(v, OP_ResultRow, regRowCount, 1); - sqlite3VdbeSetNumCols(v, 1); - sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "rows updated", SQLITE_STATIC); - } - -update_cleanup: - sqlite3AuthContextPop(&sContext); - sqlite3DbFree(db, aXRef); /* Also frees aRegIdx[] and aToOpen[] */ - sqlite3SrcListDelete(db, pTabList); - sqlite3ExprListDelete(db, pChanges); - sqlite3ExprDelete(db, pWhere); - return; -} -/* Make sure "isView" and other macros defined above are undefined. Otherwise -** thely may interfere with compilation of other functions in this file -** (or in another file, if this file becomes part of the amalgamation). */ -#ifdef isView - #undef isView -#endif -#ifdef pTrigger - #undef pTrigger -#endif - -#ifndef SQLITE_OMIT_VIRTUALTABLE -/* -** Generate code for an UPDATE of a virtual table. -** -** The strategy is that we create an ephemerial table that contains -** for each row to be changed: -** -** (A) The original rowid of that row. -** (B) The revised rowid for the row. (note1) -** (C) The content of every column in the row. -** -** Then we loop over this ephemeral table and for each row in -** the ephermeral table call VUpdate. -** -** When finished, drop the ephemeral table. -** -** (note1) Actually, if we know in advance that (A) is always the same -** as (B) we only store (A), then duplicate (A) when pulling -** it out of the ephemeral table before calling VUpdate. -*/ -static void updateVirtualTable( - Parse *pParse, /* The parsing context */ - SrcList *pSrc, /* The virtual table to be modified */ - Table *pTab, /* The virtual table */ - ExprList *pChanges, /* The columns to change in the UPDATE statement */ - Expr *pRowid, /* Expression used to recompute the rowid */ - int *aXRef, /* Mapping from columns of pTab to entries in pChanges */ - Expr *pWhere, /* WHERE clause of the UPDATE statement */ - int onError /* ON CONFLICT strategy */ -){ - Vdbe *v = pParse->pVdbe; /* Virtual machine under construction */ - ExprList *pEList = 0; /* The result set of the SELECT statement */ - Select *pSelect = 0; /* The SELECT statement */ - Expr *pExpr; /* Temporary expression */ - int ephemTab; /* Table holding the result of the SELECT */ - int i; /* Loop counter */ - int addr; /* Address of top of loop */ - int iReg; /* First register in set passed to OP_VUpdate */ - sqlite3 *db = pParse->db; /* Database connection */ - const char *pVTab = (const char*)sqlite3GetVTable(db, pTab); - SelectDest dest; - - /* Construct the SELECT statement that will find the new values for - ** all updated rows. - */ - pEList = sqlite3ExprListAppend(pParse, 0, sqlite3Expr(db, TK_ID, "_rowid_")); - if( pRowid ){ - pEList = sqlite3ExprListAppend(pParse, pEList, - sqlite3ExprDup(db, pRowid, 0)); - } - assert( pTab->iPKey<0 ); - for(i=0; inCol; i++){ - if( aXRef[i]>=0 ){ - pExpr = sqlite3ExprDup(db, pChanges->a[aXRef[i]].pExpr, 0); - }else{ - pExpr = sqlite3Expr(db, TK_ID, pTab->aCol[i].zName); - } - pEList = sqlite3ExprListAppend(pParse, pEList, pExpr); - } - pSelect = sqlite3SelectNew(pParse, pEList, pSrc, pWhere, 0, 0, 0, 0, 0, 0); - - /* Create the ephemeral table into which the update results will - ** be stored. - */ - assert( v ); - ephemTab = pParse->nTab++; - sqlite3VdbeAddOp2(v, OP_OpenEphemeral, ephemTab, pTab->nCol+1+(pRowid!=0)); - sqlite3VdbeChangeP5(v, BTREE_UNORDERED); - - /* fill the ephemeral table - */ - sqlite3SelectDestInit(&dest, SRT_Table, ephemTab); - sqlite3Select(pParse, pSelect, &dest); - - /* Generate code to scan the ephemeral table and call VUpdate. */ - iReg = ++pParse->nMem; - pParse->nMem += pTab->nCol+1; - addr = sqlite3VdbeAddOp2(v, OP_Rewind, ephemTab, 0); VdbeCoverage(v); - sqlite3VdbeAddOp3(v, OP_Column, ephemTab, 0, iReg); - sqlite3VdbeAddOp3(v, OP_Column, ephemTab, (pRowid?1:0), iReg+1); - for(i=0; inCol; i++){ - sqlite3VdbeAddOp3(v, OP_Column, ephemTab, i+1+(pRowid!=0), iReg+2+i); - } - sqlite3VtabMakeWritable(pParse, pTab); - sqlite3VdbeAddOp4(v, OP_VUpdate, 0, pTab->nCol+2, iReg, pVTab, P4_VTAB); - sqlite3VdbeChangeP5(v, onError==OE_Default ? OE_Abort : onError); - sqlite3MayAbort(pParse); - sqlite3VdbeAddOp2(v, OP_Next, ephemTab, addr+1); VdbeCoverage(v); - sqlite3VdbeJumpHere(v, addr); - sqlite3VdbeAddOp2(v, OP_Close, ephemTab, 0); - - /* Cleanup */ - sqlite3SelectDelete(db, pSelect); -} -#endif /* SQLITE_OMIT_VIRTUALTABLE */ - -/************** End of update.c **********************************************/ -/************** Begin file vacuum.c ******************************************/ -/* -** 2003 April 6 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** This file contains code used to implement the VACUUM command. -** -** Most of the code in this file may be omitted by defining the -** SQLITE_OMIT_VACUUM macro. -*/ - -#if !defined(SQLITE_OMIT_VACUUM) && !defined(SQLITE_OMIT_ATTACH) -/* -** Finalize a prepared statement. If there was an error, store the -** text of the error message in *pzErrMsg. Return the result code. -*/ -static int vacuumFinalize(sqlite3 *db, sqlite3_stmt *pStmt, char **pzErrMsg){ - int rc; - rc = sqlite3VdbeFinalize((Vdbe*)pStmt); - if( rc ){ - sqlite3SetString(pzErrMsg, db, sqlite3_errmsg(db)); - } - return rc; -} - -/* -** Execute zSql on database db. Return an error code. -*/ -static int execSql(sqlite3 *db, char **pzErrMsg, const char *zSql){ - sqlite3_stmt *pStmt; - VVA_ONLY( int rc; ) - if( !zSql ){ - return SQLITE_NOMEM; - } - if( SQLITE_OK!=sqlite3_prepare(db, zSql, -1, &pStmt, 0) ){ - sqlite3SetString(pzErrMsg, db, sqlite3_errmsg(db)); - return sqlite3_errcode(db); - } - VVA_ONLY( rc = ) sqlite3_step(pStmt); - assert( rc!=SQLITE_ROW || (db->flags&SQLITE_CountRows) ); - return vacuumFinalize(db, pStmt, pzErrMsg); -} - -/* -** Execute zSql on database db. The statement returns exactly -** one column. Execute this as SQL on the same database. -*/ -static int execExecSql(sqlite3 *db, char **pzErrMsg, const char *zSql){ - sqlite3_stmt *pStmt; - int rc; - - rc = sqlite3_prepare(db, zSql, -1, &pStmt, 0); - if( rc!=SQLITE_OK ) return rc; - - while( SQLITE_ROW==sqlite3_step(pStmt) ){ - rc = execSql(db, pzErrMsg, (char*)sqlite3_column_text(pStmt, 0)); - if( rc!=SQLITE_OK ){ - vacuumFinalize(db, pStmt, pzErrMsg); - return rc; - } - } - - return vacuumFinalize(db, pStmt, pzErrMsg); -} - -/* -** The VACUUM command is used to clean up the database, -** collapse free space, etc. It is modelled after the VACUUM command -** in PostgreSQL. The VACUUM command works as follows: -** -** (1) Create a new transient database file -** (2) Copy all content from the database being vacuumed into -** the new transient database file -** (3) Copy content from the transient database back into the -** original database. -** -** The transient database requires temporary disk space approximately -** equal to the size of the original database. The copy operation of -** step (3) requires additional temporary disk space approximately equal -** to the size of the original database for the rollback journal. -** Hence, temporary disk space that is approximately 2x the size of the -** orginal database is required. Every page of the database is written -** approximately 3 times: Once for step (2) and twice for step (3). -** Two writes per page are required in step (3) because the original -** database content must be written into the rollback journal prior to -** overwriting the database with the vacuumed content. -** -** Only 1x temporary space and only 1x writes would be required if -** the copy of step (3) were replace by deleting the original database -** and renaming the transient database as the original. But that will -** not work if other processes are attached to the original database. -** And a power loss in between deleting the original and renaming the -** transient would cause the database file to appear to be deleted -** following reboot. -*/ -SQLITE_PRIVATE void sqlite3Vacuum(Parse *pParse){ - Vdbe *v = sqlite3GetVdbe(pParse); - if( v ){ - sqlite3VdbeAddOp2(v, OP_Vacuum, 0, 0); - sqlite3VdbeUsesBtree(v, 0); - } - return; -} - -/* -** This routine implements the OP_Vacuum opcode of the VDBE. -*/ -SQLITE_PRIVATE int sqlite3RunVacuum(char **pzErrMsg, sqlite3 *db){ - int rc = SQLITE_OK; /* Return code from service routines */ - Btree *pMain; /* The database being vacuumed */ - Btree *pTemp; /* The temporary database we vacuum into */ - char *zSql = 0; /* SQL statements */ - int saved_flags; /* Saved value of the db->flags */ - int saved_nChange; /* Saved value of db->nChange */ - int saved_nTotalChange; /* Saved value of db->nTotalChange */ - void (*saved_xTrace)(void*,const char*); /* Saved db->xTrace */ - Db *pDb = 0; /* Database to detach at end of vacuum */ - int isMemDb; /* True if vacuuming a :memory: database */ - int nRes; /* Bytes of reserved space at the end of each page */ - int nDb; /* Number of attached databases */ - - if( !db->autoCommit ){ - sqlite3SetString(pzErrMsg, db, "cannot VACUUM from within a transaction"); - return SQLITE_ERROR; - } - if( db->nVdbeActive>1 ){ - sqlite3SetString(pzErrMsg, db,"cannot VACUUM - SQL statements in progress"); - return SQLITE_ERROR; - } - - /* Save the current value of the database flags so that it can be - ** restored before returning. Then set the writable-schema flag, and - ** disable CHECK and foreign key constraints. */ - saved_flags = db->flags; - saved_nChange = db->nChange; - saved_nTotalChange = db->nTotalChange; - saved_xTrace = db->xTrace; - db->flags |= SQLITE_WriteSchema | SQLITE_IgnoreChecks | SQLITE_PreferBuiltin; - db->flags &= ~(SQLITE_ForeignKeys | SQLITE_ReverseOrder); - db->xTrace = 0; - - pMain = db->aDb[0].pBt; - isMemDb = sqlite3PagerIsMemdb(sqlite3BtreePager(pMain)); - - /* Attach the temporary database as 'vacuum_db'. The synchronous pragma - ** can be set to 'off' for this file, as it is not recovered if a crash - ** occurs anyway. The integrity of the database is maintained by a - ** (possibly synchronous) transaction opened on the main database before - ** sqlite3BtreeCopyFile() is called. - ** - ** An optimisation would be to use a non-journaled pager. - ** (Later:) I tried setting "PRAGMA vacuum_db.journal_mode=OFF" but - ** that actually made the VACUUM run slower. Very little journalling - ** actually occurs when doing a vacuum since the vacuum_db is initially - ** empty. Only the journal header is written. Apparently it takes more - ** time to parse and run the PRAGMA to turn journalling off than it does - ** to write the journal header file. - */ - nDb = db->nDb; - if( sqlite3TempInMemory(db) ){ - zSql = "ATTACH ':memory:' AS vacuum_db;"; - }else{ - zSql = "ATTACH '' AS vacuum_db;"; - } - rc = execSql(db, pzErrMsg, zSql); - if( db->nDb>nDb ){ - pDb = &db->aDb[db->nDb-1]; - assert( strcmp(pDb->zName,"vacuum_db")==0 ); - } - if( rc!=SQLITE_OK ) goto end_of_vacuum; - pTemp = db->aDb[db->nDb-1].pBt; - - /* The call to execSql() to attach the temp database has left the file - ** locked (as there was more than one active statement when the transaction - ** to read the schema was concluded. Unlock it here so that this doesn't - ** cause problems for the call to BtreeSetPageSize() below. */ - sqlite3BtreeCommit(pTemp); - - nRes = sqlite3BtreeGetReserve(pMain); - - /* A VACUUM cannot change the pagesize of an encrypted database. */ -#ifdef SQLITE_HAS_CODEC - if( db->nextPagesize ){ - extern void sqlite3CodecGetKey(sqlite3*, int, void**, int*); - int nKey; - char *zKey; - sqlite3CodecGetKey(db, 0, (void**)&zKey, &nKey); - if( nKey ) db->nextPagesize = 0; - } -#endif - - rc = execSql(db, pzErrMsg, "PRAGMA vacuum_db.synchronous=OFF"); - if( rc!=SQLITE_OK ) goto end_of_vacuum; - - /* Begin a transaction and take an exclusive lock on the main database - ** file. This is done before the sqlite3BtreeGetPageSize(pMain) call below, - ** to ensure that we do not try to change the page-size on a WAL database. - */ - rc = execSql(db, pzErrMsg, "BEGIN;"); - if( rc!=SQLITE_OK ) goto end_of_vacuum; - rc = sqlite3BtreeBeginTrans(pMain, 2); - if( rc!=SQLITE_OK ) goto end_of_vacuum; - - /* Do not attempt to change the page size for a WAL database */ - if( sqlite3PagerGetJournalMode(sqlite3BtreePager(pMain)) - ==PAGER_JOURNALMODE_WAL ){ - db->nextPagesize = 0; - } - - if( sqlite3BtreeSetPageSize(pTemp, sqlite3BtreeGetPageSize(pMain), nRes, 0) - || (!isMemDb && sqlite3BtreeSetPageSize(pTemp, db->nextPagesize, nRes, 0)) - || NEVER(db->mallocFailed) - ){ - rc = SQLITE_NOMEM; - goto end_of_vacuum; - } - -#ifndef SQLITE_OMIT_AUTOVACUUM - sqlite3BtreeSetAutoVacuum(pTemp, db->nextAutovac>=0 ? db->nextAutovac : - sqlite3BtreeGetAutoVacuum(pMain)); -#endif - - /* Query the schema of the main database. Create a mirror schema - ** in the temporary database. - */ - rc = execExecSql(db, pzErrMsg, - "SELECT 'CREATE TABLE vacuum_db.' || substr(sql,14) " - " FROM sqlite_master WHERE type='table' AND name!='sqlite_sequence'" - " AND coalesce(rootpage,1)>0" - ); - if( rc!=SQLITE_OK ) goto end_of_vacuum; - rc = execExecSql(db, pzErrMsg, - "SELECT 'CREATE INDEX vacuum_db.' || substr(sql,14)" - " FROM sqlite_master WHERE sql LIKE 'CREATE INDEX %' "); - if( rc!=SQLITE_OK ) goto end_of_vacuum; - rc = execExecSql(db, pzErrMsg, - "SELECT 'CREATE UNIQUE INDEX vacuum_db.' || substr(sql,21) " - " FROM sqlite_master WHERE sql LIKE 'CREATE UNIQUE INDEX %'"); - if( rc!=SQLITE_OK ) goto end_of_vacuum; - - /* Loop through the tables in the main database. For each, do - ** an "INSERT INTO vacuum_db.xxx SELECT * FROM main.xxx;" to copy - ** the contents to the temporary database. - */ - rc = execExecSql(db, pzErrMsg, - "SELECT 'INSERT INTO vacuum_db.' || quote(name) " - "|| ' SELECT * FROM main.' || quote(name) || ';'" - "FROM main.sqlite_master " - "WHERE type = 'table' AND name!='sqlite_sequence' " - " AND coalesce(rootpage,1)>0" - ); - if( rc!=SQLITE_OK ) goto end_of_vacuum; - - /* Copy over the sequence table - */ - rc = execExecSql(db, pzErrMsg, - "SELECT 'DELETE FROM vacuum_db.' || quote(name) || ';' " - "FROM vacuum_db.sqlite_master WHERE name='sqlite_sequence' " - ); - if( rc!=SQLITE_OK ) goto end_of_vacuum; - rc = execExecSql(db, pzErrMsg, - "SELECT 'INSERT INTO vacuum_db.' || quote(name) " - "|| ' SELECT * FROM main.' || quote(name) || ';' " - "FROM vacuum_db.sqlite_master WHERE name=='sqlite_sequence';" - ); - if( rc!=SQLITE_OK ) goto end_of_vacuum; - - - /* Copy the triggers, views, and virtual tables from the main database - ** over to the temporary database. None of these objects has any - ** associated storage, so all we have to do is copy their entries - ** from the SQLITE_MASTER table. - */ - rc = execSql(db, pzErrMsg, - "INSERT INTO vacuum_db.sqlite_master " - " SELECT type, name, tbl_name, rootpage, sql" - " FROM main.sqlite_master" - " WHERE type='view' OR type='trigger'" - " OR (type='table' AND rootpage=0)" - ); - if( rc ) goto end_of_vacuum; - - /* At this point, there is a write transaction open on both the - ** vacuum database and the main database. Assuming no error occurs, - ** both transactions are closed by this block - the main database - ** transaction by sqlite3BtreeCopyFile() and the other by an explicit - ** call to sqlite3BtreeCommit(). - */ - { - u32 meta; - int i; - - /* This array determines which meta meta values are preserved in the - ** vacuum. Even entries are the meta value number and odd entries - ** are an increment to apply to the meta value after the vacuum. - ** The increment is used to increase the schema cookie so that other - ** connections to the same database will know to reread the schema. - */ - static const unsigned char aCopy[] = { - BTREE_SCHEMA_VERSION, 1, /* Add one to the old schema cookie */ - BTREE_DEFAULT_CACHE_SIZE, 0, /* Preserve the default page cache size */ - BTREE_TEXT_ENCODING, 0, /* Preserve the text encoding */ - BTREE_USER_VERSION, 0, /* Preserve the user version */ - BTREE_APPLICATION_ID, 0, /* Preserve the application id */ - }; - - assert( 1==sqlite3BtreeIsInTrans(pTemp) ); - assert( 1==sqlite3BtreeIsInTrans(pMain) ); - - /* Copy Btree meta values */ - for(i=0; iflags */ - db->flags = saved_flags; - db->nChange = saved_nChange; - db->nTotalChange = saved_nTotalChange; - db->xTrace = saved_xTrace; - sqlite3BtreeSetPageSize(pMain, -1, -1, 1); - - /* Currently there is an SQL level transaction open on the vacuum - ** database. No locks are held on any other files (since the main file - ** was committed at the btree level). So it safe to end the transaction - ** by manually setting the autoCommit flag to true and detaching the - ** vacuum database. The vacuum_db journal file is deleted when the pager - ** is closed by the DETACH. - */ - db->autoCommit = 1; - - if( pDb ){ - sqlite3BtreeClose(pDb->pBt); - pDb->pBt = 0; - pDb->pSchema = 0; - } - - /* This both clears the schemas and reduces the size of the db->aDb[] - ** array. */ - sqlite3ResetAllSchemasOfConnection(db); - - return rc; -} - -#endif /* SQLITE_OMIT_VACUUM && SQLITE_OMIT_ATTACH */ - -/************** End of vacuum.c **********************************************/ -/************** Begin file vtab.c ********************************************/ -/* -** 2006 June 10 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** This file contains code used to help implement virtual tables. -*/ -#ifndef SQLITE_OMIT_VIRTUALTABLE - -/* -** Before a virtual table xCreate() or xConnect() method is invoked, the -** sqlite3.pVtabCtx member variable is set to point to an instance of -** this struct allocated on the stack. It is used by the implementation of -** the sqlite3_declare_vtab() and sqlite3_vtab_config() APIs, both of which -** are invoked only from within xCreate and xConnect methods. -*/ -struct VtabCtx { - VTable *pVTable; /* The virtual table being constructed */ - Table *pTab; /* The Table object to which the virtual table belongs */ -}; - -/* -** The actual function that does the work of creating a new module. -** This function implements the sqlite3_create_module() and -** sqlite3_create_module_v2() interfaces. -*/ -static int createModule( - sqlite3 *db, /* Database in which module is registered */ - const char *zName, /* Name assigned to this module */ - const sqlite3_module *pModule, /* The definition of the module */ - void *pAux, /* Context pointer for xCreate/xConnect */ - void (*xDestroy)(void *) /* Module destructor function */ -){ - int rc = SQLITE_OK; - int nName; - - sqlite3_mutex_enter(db->mutex); - nName = sqlite3Strlen30(zName); - if( sqlite3HashFind(&db->aModule, zName, nName) ){ - rc = SQLITE_MISUSE_BKPT; - }else{ - Module *pMod; - pMod = (Module *)sqlite3DbMallocRaw(db, sizeof(Module) + nName + 1); - if( pMod ){ - Module *pDel; - char *zCopy = (char *)(&pMod[1]); - memcpy(zCopy, zName, nName+1); - pMod->zName = zCopy; - pMod->pModule = pModule; - pMod->pAux = pAux; - pMod->xDestroy = xDestroy; - pDel = (Module *)sqlite3HashInsert(&db->aModule,zCopy,nName,(void*)pMod); - assert( pDel==0 || pDel==pMod ); - if( pDel ){ - db->mallocFailed = 1; - sqlite3DbFree(db, pDel); - } - } - } - rc = sqlite3ApiExit(db, rc); - if( rc!=SQLITE_OK && xDestroy ) xDestroy(pAux); - - sqlite3_mutex_leave(db->mutex); - return rc; -} - - -/* -** External API function used to create a new virtual-table module. -*/ -SQLITE_API int sqlite3_create_module( - sqlite3 *db, /* Database in which module is registered */ - const char *zName, /* Name assigned to this module */ - const sqlite3_module *pModule, /* The definition of the module */ - void *pAux /* Context pointer for xCreate/xConnect */ -){ - return createModule(db, zName, pModule, pAux, 0); -} - -/* -** External API function used to create a new virtual-table module. -*/ -SQLITE_API int sqlite3_create_module_v2( - sqlite3 *db, /* Database in which module is registered */ - const char *zName, /* Name assigned to this module */ - const sqlite3_module *pModule, /* The definition of the module */ - void *pAux, /* Context pointer for xCreate/xConnect */ - void (*xDestroy)(void *) /* Module destructor function */ -){ - return createModule(db, zName, pModule, pAux, xDestroy); -} - -/* -** Lock the virtual table so that it cannot be disconnected. -** Locks nest. Every lock should have a corresponding unlock. -** If an unlock is omitted, resources leaks will occur. -** -** If a disconnect is attempted while a virtual table is locked, -** the disconnect is deferred until all locks have been removed. -*/ -SQLITE_PRIVATE void sqlite3VtabLock(VTable *pVTab){ - pVTab->nRef++; -} - - -/* -** pTab is a pointer to a Table structure representing a virtual-table. -** Return a pointer to the VTable object used by connection db to access -** this virtual-table, if one has been created, or NULL otherwise. -*/ -SQLITE_PRIVATE VTable *sqlite3GetVTable(sqlite3 *db, Table *pTab){ - VTable *pVtab; - assert( IsVirtual(pTab) ); - for(pVtab=pTab->pVTable; pVtab && pVtab->db!=db; pVtab=pVtab->pNext); - return pVtab; -} - -/* -** Decrement the ref-count on a virtual table object. When the ref-count -** reaches zero, call the xDisconnect() method to delete the object. -*/ -SQLITE_PRIVATE void sqlite3VtabUnlock(VTable *pVTab){ - sqlite3 *db = pVTab->db; - - assert( db ); - assert( pVTab->nRef>0 ); - assert( db->magic==SQLITE_MAGIC_OPEN || db->magic==SQLITE_MAGIC_ZOMBIE ); - - pVTab->nRef--; - if( pVTab->nRef==0 ){ - sqlite3_vtab *p = pVTab->pVtab; - if( p ){ - p->pModule->xDisconnect(p); - } - sqlite3DbFree(db, pVTab); - } -} - -/* -** Table p is a virtual table. This function moves all elements in the -** p->pVTable list to the sqlite3.pDisconnect lists of their associated -** database connections to be disconnected at the next opportunity. -** Except, if argument db is not NULL, then the entry associated with -** connection db is left in the p->pVTable list. -*/ -static VTable *vtabDisconnectAll(sqlite3 *db, Table *p){ - VTable *pRet = 0; - VTable *pVTable = p->pVTable; - p->pVTable = 0; - - /* Assert that the mutex (if any) associated with the BtShared database - ** that contains table p is held by the caller. See header comments - ** above function sqlite3VtabUnlockList() for an explanation of why - ** this makes it safe to access the sqlite3.pDisconnect list of any - ** database connection that may have an entry in the p->pVTable list. - */ - assert( db==0 || sqlite3SchemaMutexHeld(db, 0, p->pSchema) ); - - while( pVTable ){ - sqlite3 *db2 = pVTable->db; - VTable *pNext = pVTable->pNext; - assert( db2 ); - if( db2==db ){ - pRet = pVTable; - p->pVTable = pRet; - pRet->pNext = 0; - }else{ - pVTable->pNext = db2->pDisconnect; - db2->pDisconnect = pVTable; - } - pVTable = pNext; - } - - assert( !db || pRet ); - return pRet; -} - -/* -** Table *p is a virtual table. This function removes the VTable object -** for table *p associated with database connection db from the linked -** list in p->pVTab. It also decrements the VTable ref count. This is -** used when closing database connection db to free all of its VTable -** objects without disturbing the rest of the Schema object (which may -** be being used by other shared-cache connections). -*/ -SQLITE_PRIVATE void sqlite3VtabDisconnect(sqlite3 *db, Table *p){ - VTable **ppVTab; - - assert( IsVirtual(p) ); - assert( sqlite3BtreeHoldsAllMutexes(db) ); - assert( sqlite3_mutex_held(db->mutex) ); - - for(ppVTab=&p->pVTable; *ppVTab; ppVTab=&(*ppVTab)->pNext){ - if( (*ppVTab)->db==db ){ - VTable *pVTab = *ppVTab; - *ppVTab = pVTab->pNext; - sqlite3VtabUnlock(pVTab); - break; - } - } -} - - -/* -** Disconnect all the virtual table objects in the sqlite3.pDisconnect list. -** -** This function may only be called when the mutexes associated with all -** shared b-tree databases opened using connection db are held by the -** caller. This is done to protect the sqlite3.pDisconnect list. The -** sqlite3.pDisconnect list is accessed only as follows: -** -** 1) By this function. In this case, all BtShared mutexes and the mutex -** associated with the database handle itself must be held. -** -** 2) By function vtabDisconnectAll(), when it adds a VTable entry to -** the sqlite3.pDisconnect list. In this case either the BtShared mutex -** associated with the database the virtual table is stored in is held -** or, if the virtual table is stored in a non-sharable database, then -** the database handle mutex is held. -** -** As a result, a sqlite3.pDisconnect cannot be accessed simultaneously -** by multiple threads. It is thread-safe. -*/ -SQLITE_PRIVATE void sqlite3VtabUnlockList(sqlite3 *db){ - VTable *p = db->pDisconnect; - db->pDisconnect = 0; - - assert( sqlite3BtreeHoldsAllMutexes(db) ); - assert( sqlite3_mutex_held(db->mutex) ); - - if( p ){ - sqlite3ExpirePreparedStatements(db); - do { - VTable *pNext = p->pNext; - sqlite3VtabUnlock(p); - p = pNext; - }while( p ); - } -} - -/* -** Clear any and all virtual-table information from the Table record. -** This routine is called, for example, just before deleting the Table -** record. -** -** Since it is a virtual-table, the Table structure contains a pointer -** to the head of a linked list of VTable structures. Each VTable -** structure is associated with a single sqlite3* user of the schema. -** The reference count of the VTable structure associated with database -** connection db is decremented immediately (which may lead to the -** structure being xDisconnected and free). Any other VTable structures -** in the list are moved to the sqlite3.pDisconnect list of the associated -** database connection. -*/ -SQLITE_PRIVATE void sqlite3VtabClear(sqlite3 *db, Table *p){ - if( !db || db->pnBytesFreed==0 ) vtabDisconnectAll(0, p); - if( p->azModuleArg ){ - int i; - for(i=0; inModuleArg; i++){ - if( i!=1 ) sqlite3DbFree(db, p->azModuleArg[i]); - } - sqlite3DbFree(db, p->azModuleArg); - } -} - -/* -** Add a new module argument to pTable->azModuleArg[]. -** The string is not copied - the pointer is stored. The -** string will be freed automatically when the table is -** deleted. -*/ -static void addModuleArgument(sqlite3 *db, Table *pTable, char *zArg){ - int i = pTable->nModuleArg++; - int nBytes = sizeof(char *)*(1+pTable->nModuleArg); - char **azModuleArg; - azModuleArg = sqlite3DbRealloc(db, pTable->azModuleArg, nBytes); - if( azModuleArg==0 ){ - int j; - for(j=0; jazModuleArg[j]); - } - sqlite3DbFree(db, zArg); - sqlite3DbFree(db, pTable->azModuleArg); - pTable->nModuleArg = 0; - }else{ - azModuleArg[i] = zArg; - azModuleArg[i+1] = 0; - } - pTable->azModuleArg = azModuleArg; -} - -/* -** The parser calls this routine when it first sees a CREATE VIRTUAL TABLE -** statement. The module name has been parsed, but the optional list -** of parameters that follow the module name are still pending. -*/ -SQLITE_PRIVATE void sqlite3VtabBeginParse( - Parse *pParse, /* Parsing context */ - Token *pName1, /* Name of new table, or database name */ - Token *pName2, /* Name of new table or NULL */ - Token *pModuleName, /* Name of the module for the virtual table */ - int ifNotExists /* No error if the table already exists */ -){ - int iDb; /* The database the table is being created in */ - Table *pTable; /* The new virtual table */ - sqlite3 *db; /* Database connection */ - - sqlite3StartTable(pParse, pName1, pName2, 0, 0, 1, ifNotExists); - pTable = pParse->pNewTable; - if( pTable==0 ) return; - assert( 0==pTable->pIndex ); - - db = pParse->db; - iDb = sqlite3SchemaToIndex(db, pTable->pSchema); - assert( iDb>=0 ); - - pTable->tabFlags |= TF_Virtual; - pTable->nModuleArg = 0; - addModuleArgument(db, pTable, sqlite3NameFromToken(db, pModuleName)); - addModuleArgument(db, pTable, 0); - addModuleArgument(db, pTable, sqlite3DbStrDup(db, pTable->zName)); - pParse->sNameToken.n = (int)(&pModuleName->z[pModuleName->n] - pName1->z); - -#ifndef SQLITE_OMIT_AUTHORIZATION - /* Creating a virtual table invokes the authorization callback twice. - ** The first invocation, to obtain permission to INSERT a row into the - ** sqlite_master table, has already been made by sqlite3StartTable(). - ** The second call, to obtain permission to create the table, is made now. - */ - if( pTable->azModuleArg ){ - sqlite3AuthCheck(pParse, SQLITE_CREATE_VTABLE, pTable->zName, - pTable->azModuleArg[0], pParse->db->aDb[iDb].zName); - } -#endif -} - -/* -** This routine takes the module argument that has been accumulating -** in pParse->zArg[] and appends it to the list of arguments on the -** virtual table currently under construction in pParse->pTable. -*/ -static void addArgumentToVtab(Parse *pParse){ - if( pParse->sArg.z && pParse->pNewTable ){ - const char *z = (const char*)pParse->sArg.z; - int n = pParse->sArg.n; - sqlite3 *db = pParse->db; - addModuleArgument(db, pParse->pNewTable, sqlite3DbStrNDup(db, z, n)); - } -} - -/* -** The parser calls this routine after the CREATE VIRTUAL TABLE statement -** has been completely parsed. -*/ -SQLITE_PRIVATE void sqlite3VtabFinishParse(Parse *pParse, Token *pEnd){ - Table *pTab = pParse->pNewTable; /* The table being constructed */ - sqlite3 *db = pParse->db; /* The database connection */ - - if( pTab==0 ) return; - addArgumentToVtab(pParse); - pParse->sArg.z = 0; - if( pTab->nModuleArg<1 ) return; - - /* If the CREATE VIRTUAL TABLE statement is being entered for the - ** first time (in other words if the virtual table is actually being - ** created now instead of just being read out of sqlite_master) then - ** do additional initialization work and store the statement text - ** in the sqlite_master table. - */ - if( !db->init.busy ){ - char *zStmt; - char *zWhere; - int iDb; - Vdbe *v; - - /* Compute the complete text of the CREATE VIRTUAL TABLE statement */ - if( pEnd ){ - pParse->sNameToken.n = (int)(pEnd->z - pParse->sNameToken.z) + pEnd->n; - } - zStmt = sqlite3MPrintf(db, "CREATE VIRTUAL TABLE %T", &pParse->sNameToken); - - /* A slot for the record has already been allocated in the - ** SQLITE_MASTER table. We just need to update that slot with all - ** the information we've collected. - ** - ** The VM register number pParse->regRowid holds the rowid of an - ** entry in the sqlite_master table tht was created for this vtab - ** by sqlite3StartTable(). - */ - iDb = sqlite3SchemaToIndex(db, pTab->pSchema); - sqlite3NestedParse(pParse, - "UPDATE %Q.%s " - "SET type='table', name=%Q, tbl_name=%Q, rootpage=0, sql=%Q " - "WHERE rowid=#%d", - db->aDb[iDb].zName, SCHEMA_TABLE(iDb), - pTab->zName, - pTab->zName, - zStmt, - pParse->regRowid - ); - sqlite3DbFree(db, zStmt); - v = sqlite3GetVdbe(pParse); - sqlite3ChangeCookie(pParse, iDb); - - sqlite3VdbeAddOp2(v, OP_Expire, 0, 0); - zWhere = sqlite3MPrintf(db, "name='%q' AND type='table'", pTab->zName); - sqlite3VdbeAddParseSchemaOp(v, iDb, zWhere); - sqlite3VdbeAddOp4(v, OP_VCreate, iDb, 0, 0, - pTab->zName, sqlite3Strlen30(pTab->zName) + 1); - } - - /* If we are rereading the sqlite_master table create the in-memory - ** record of the table. The xConnect() method is not called until - ** the first time the virtual table is used in an SQL statement. This - ** allows a schema that contains virtual tables to be loaded before - ** the required virtual table implementations are registered. */ - else { - Table *pOld; - Schema *pSchema = pTab->pSchema; - const char *zName = pTab->zName; - int nName = sqlite3Strlen30(zName); - assert( sqlite3SchemaMutexHeld(db, 0, pSchema) ); - pOld = sqlite3HashInsert(&pSchema->tblHash, zName, nName, pTab); - if( pOld ){ - db->mallocFailed = 1; - assert( pTab==pOld ); /* Malloc must have failed inside HashInsert() */ - return; - } - pParse->pNewTable = 0; - } -} - -/* -** The parser calls this routine when it sees the first token -** of an argument to the module name in a CREATE VIRTUAL TABLE statement. -*/ -SQLITE_PRIVATE void sqlite3VtabArgInit(Parse *pParse){ - addArgumentToVtab(pParse); - pParse->sArg.z = 0; - pParse->sArg.n = 0; -} - -/* -** The parser calls this routine for each token after the first token -** in an argument to the module name in a CREATE VIRTUAL TABLE statement. -*/ -SQLITE_PRIVATE void sqlite3VtabArgExtend(Parse *pParse, Token *p){ - Token *pArg = &pParse->sArg; - if( pArg->z==0 ){ - pArg->z = p->z; - pArg->n = p->n; - }else{ - assert(pArg->z < p->z); - pArg->n = (int)(&p->z[p->n] - pArg->z); - } -} - -/* -** Invoke a virtual table constructor (either xCreate or xConnect). The -** pointer to the function to invoke is passed as the fourth parameter -** to this procedure. -*/ -static int vtabCallConstructor( - sqlite3 *db, - Table *pTab, - Module *pMod, - int (*xConstruct)(sqlite3*,void*,int,const char*const*,sqlite3_vtab**,char**), - char **pzErr -){ - VtabCtx sCtx, *pPriorCtx; - VTable *pVTable; - int rc; - const char *const*azArg = (const char *const*)pTab->azModuleArg; - int nArg = pTab->nModuleArg; - char *zErr = 0; - char *zModuleName = sqlite3MPrintf(db, "%s", pTab->zName); - int iDb; - - if( !zModuleName ){ - return SQLITE_NOMEM; - } - - pVTable = sqlite3DbMallocZero(db, sizeof(VTable)); - if( !pVTable ){ - sqlite3DbFree(db, zModuleName); - return SQLITE_NOMEM; - } - pVTable->db = db; - pVTable->pMod = pMod; - - iDb = sqlite3SchemaToIndex(db, pTab->pSchema); - pTab->azModuleArg[1] = db->aDb[iDb].zName; - - /* Invoke the virtual table constructor */ - assert( &db->pVtabCtx ); - assert( xConstruct ); - sCtx.pTab = pTab; - sCtx.pVTable = pVTable; - pPriorCtx = db->pVtabCtx; - db->pVtabCtx = &sCtx; - rc = xConstruct(db, pMod->pAux, nArg, azArg, &pVTable->pVtab, &zErr); - db->pVtabCtx = pPriorCtx; - if( rc==SQLITE_NOMEM ) db->mallocFailed = 1; - - if( SQLITE_OK!=rc ){ - if( zErr==0 ){ - *pzErr = sqlite3MPrintf(db, "vtable constructor failed: %s", zModuleName); - }else { - *pzErr = sqlite3MPrintf(db, "%s", zErr); - sqlite3_free(zErr); - } - sqlite3DbFree(db, pVTable); - }else if( ALWAYS(pVTable->pVtab) ){ - /* Justification of ALWAYS(): A correct vtab constructor must allocate - ** the sqlite3_vtab object if successful. */ - pVTable->pVtab->pModule = pMod->pModule; - pVTable->nRef = 1; - if( sCtx.pTab ){ - const char *zFormat = "vtable constructor did not declare schema: %s"; - *pzErr = sqlite3MPrintf(db, zFormat, pTab->zName); - sqlite3VtabUnlock(pVTable); - rc = SQLITE_ERROR; - }else{ - int iCol; - /* If everything went according to plan, link the new VTable structure - ** into the linked list headed by pTab->pVTable. Then loop through the - ** columns of the table to see if any of them contain the token "hidden". - ** If so, set the Column COLFLAG_HIDDEN flag and remove the token from - ** the type string. */ - pVTable->pNext = pTab->pVTable; - pTab->pVTable = pVTable; - - for(iCol=0; iColnCol; iCol++){ - char *zType = pTab->aCol[iCol].zType; - int nType; - int i = 0; - if( !zType ) continue; - nType = sqlite3Strlen30(zType); - if( sqlite3StrNICmp("hidden", zType, 6)||(zType[6] && zType[6]!=' ') ){ - for(i=0; i0 ){ - assert(zType[i-1]==' '); - zType[i-1] = '\0'; - } - pTab->aCol[iCol].colFlags |= COLFLAG_HIDDEN; - } - } - } - } - - sqlite3DbFree(db, zModuleName); - return rc; -} - -/* -** This function is invoked by the parser to call the xConnect() method -** of the virtual table pTab. If an error occurs, an error code is returned -** and an error left in pParse. -** -** This call is a no-op if table pTab is not a virtual table. -*/ -SQLITE_PRIVATE int sqlite3VtabCallConnect(Parse *pParse, Table *pTab){ - sqlite3 *db = pParse->db; - const char *zMod; - Module *pMod; - int rc; - - assert( pTab ); - if( (pTab->tabFlags & TF_Virtual)==0 || sqlite3GetVTable(db, pTab) ){ - return SQLITE_OK; - } - - /* Locate the required virtual table module */ - zMod = pTab->azModuleArg[0]; - pMod = (Module*)sqlite3HashFind(&db->aModule, zMod, sqlite3Strlen30(zMod)); - - if( !pMod ){ - const char *zModule = pTab->azModuleArg[0]; - sqlite3ErrorMsg(pParse, "no such module: %s", zModule); - rc = SQLITE_ERROR; - }else{ - char *zErr = 0; - rc = vtabCallConstructor(db, pTab, pMod, pMod->pModule->xConnect, &zErr); - if( rc!=SQLITE_OK ){ - sqlite3ErrorMsg(pParse, "%s", zErr); - } - sqlite3DbFree(db, zErr); - } - - return rc; -} -/* -** Grow the db->aVTrans[] array so that there is room for at least one -** more v-table. Return SQLITE_NOMEM if a malloc fails, or SQLITE_OK otherwise. -*/ -static int growVTrans(sqlite3 *db){ - const int ARRAY_INCR = 5; - - /* Grow the sqlite3.aVTrans array if required */ - if( (db->nVTrans%ARRAY_INCR)==0 ){ - VTable **aVTrans; - int nBytes = sizeof(sqlite3_vtab *) * (db->nVTrans + ARRAY_INCR); - aVTrans = sqlite3DbRealloc(db, (void *)db->aVTrans, nBytes); - if( !aVTrans ){ - return SQLITE_NOMEM; - } - memset(&aVTrans[db->nVTrans], 0, sizeof(sqlite3_vtab *)*ARRAY_INCR); - db->aVTrans = aVTrans; - } - - return SQLITE_OK; -} - -/* -** Add the virtual table pVTab to the array sqlite3.aVTrans[]. Space should -** have already been reserved using growVTrans(). -*/ -static void addToVTrans(sqlite3 *db, VTable *pVTab){ - /* Add pVtab to the end of sqlite3.aVTrans */ - db->aVTrans[db->nVTrans++] = pVTab; - sqlite3VtabLock(pVTab); -} - -/* -** This function is invoked by the vdbe to call the xCreate method -** of the virtual table named zTab in database iDb. -** -** If an error occurs, *pzErr is set to point an an English language -** description of the error and an SQLITE_XXX error code is returned. -** In this case the caller must call sqlite3DbFree(db, ) on *pzErr. -*/ -SQLITE_PRIVATE int sqlite3VtabCallCreate(sqlite3 *db, int iDb, const char *zTab, char **pzErr){ - int rc = SQLITE_OK; - Table *pTab; - Module *pMod; - const char *zMod; - - pTab = sqlite3FindTable(db, zTab, db->aDb[iDb].zName); - assert( pTab && (pTab->tabFlags & TF_Virtual)!=0 && !pTab->pVTable ); - - /* Locate the required virtual table module */ - zMod = pTab->azModuleArg[0]; - pMod = (Module*)sqlite3HashFind(&db->aModule, zMod, sqlite3Strlen30(zMod)); - - /* If the module has been registered and includes a Create method, - ** invoke it now. If the module has not been registered, return an - ** error. Otherwise, do nothing. - */ - if( !pMod ){ - *pzErr = sqlite3MPrintf(db, "no such module: %s", zMod); - rc = SQLITE_ERROR; - }else{ - rc = vtabCallConstructor(db, pTab, pMod, pMod->pModule->xCreate, pzErr); - } - - /* Justification of ALWAYS(): The xConstructor method is required to - ** create a valid sqlite3_vtab if it returns SQLITE_OK. */ - if( rc==SQLITE_OK && ALWAYS(sqlite3GetVTable(db, pTab)) ){ - rc = growVTrans(db); - if( rc==SQLITE_OK ){ - addToVTrans(db, sqlite3GetVTable(db, pTab)); - } - } - - return rc; -} - -/* -** This function is used to set the schema of a virtual table. It is only -** valid to call this function from within the xCreate() or xConnect() of a -** virtual table module. -*/ -SQLITE_API int sqlite3_declare_vtab(sqlite3 *db, const char *zCreateTable){ - Parse *pParse; - - int rc = SQLITE_OK; - Table *pTab; - char *zErr = 0; - - sqlite3_mutex_enter(db->mutex); - if( !db->pVtabCtx || !(pTab = db->pVtabCtx->pTab) ){ - sqlite3Error(db, SQLITE_MISUSE, 0); - sqlite3_mutex_leave(db->mutex); - return SQLITE_MISUSE_BKPT; - } - assert( (pTab->tabFlags & TF_Virtual)!=0 ); - - pParse = sqlite3StackAllocZero(db, sizeof(*pParse)); - if( pParse==0 ){ - rc = SQLITE_NOMEM; - }else{ - pParse->declareVtab = 1; - pParse->db = db; - pParse->nQueryLoop = 1; - - if( SQLITE_OK==sqlite3RunParser(pParse, zCreateTable, &zErr) - && pParse->pNewTable - && !db->mallocFailed - && !pParse->pNewTable->pSelect - && (pParse->pNewTable->tabFlags & TF_Virtual)==0 - ){ - if( !pTab->aCol ){ - pTab->aCol = pParse->pNewTable->aCol; - pTab->nCol = pParse->pNewTable->nCol; - pParse->pNewTable->nCol = 0; - pParse->pNewTable->aCol = 0; - } - db->pVtabCtx->pTab = 0; - }else{ - sqlite3Error(db, SQLITE_ERROR, (zErr ? "%s" : 0), zErr); - sqlite3DbFree(db, zErr); - rc = SQLITE_ERROR; - } - pParse->declareVtab = 0; - - if( pParse->pVdbe ){ - sqlite3VdbeFinalize(pParse->pVdbe); - } - sqlite3DeleteTable(db, pParse->pNewTable); - sqlite3ParserReset(pParse); - sqlite3StackFree(db, pParse); - } - - assert( (rc&0xff)==rc ); - rc = sqlite3ApiExit(db, rc); - sqlite3_mutex_leave(db->mutex); - return rc; -} - -/* -** This function is invoked by the vdbe to call the xDestroy method -** of the virtual table named zTab in database iDb. This occurs -** when a DROP TABLE is mentioned. -** -** This call is a no-op if zTab is not a virtual table. -*/ -SQLITE_PRIVATE int sqlite3VtabCallDestroy(sqlite3 *db, int iDb, const char *zTab){ - int rc = SQLITE_OK; - Table *pTab; - - pTab = sqlite3FindTable(db, zTab, db->aDb[iDb].zName); - if( ALWAYS(pTab!=0 && pTab->pVTable!=0) ){ - VTable *p = vtabDisconnectAll(db, pTab); - - assert( rc==SQLITE_OK ); - rc = p->pMod->pModule->xDestroy(p->pVtab); - - /* Remove the sqlite3_vtab* from the aVTrans[] array, if applicable */ - if( rc==SQLITE_OK ){ - assert( pTab->pVTable==p && p->pNext==0 ); - p->pVtab = 0; - pTab->pVTable = 0; - sqlite3VtabUnlock(p); - } - } - - return rc; -} - -/* -** This function invokes either the xRollback or xCommit method -** of each of the virtual tables in the sqlite3.aVTrans array. The method -** called is identified by the second argument, "offset", which is -** the offset of the method to call in the sqlite3_module structure. -** -** The array is cleared after invoking the callbacks. -*/ -static void callFinaliser(sqlite3 *db, int offset){ - int i; - if( db->aVTrans ){ - for(i=0; inVTrans; i++){ - VTable *pVTab = db->aVTrans[i]; - sqlite3_vtab *p = pVTab->pVtab; - if( p ){ - int (*x)(sqlite3_vtab *); - x = *(int (**)(sqlite3_vtab *))((char *)p->pModule + offset); - if( x ) x(p); - } - pVTab->iSavepoint = 0; - sqlite3VtabUnlock(pVTab); - } - sqlite3DbFree(db, db->aVTrans); - db->nVTrans = 0; - db->aVTrans = 0; - } -} - -/* -** Invoke the xSync method of all virtual tables in the sqlite3.aVTrans -** array. Return the error code for the first error that occurs, or -** SQLITE_OK if all xSync operations are successful. -** -** If an error message is available, leave it in p->zErrMsg. -*/ -SQLITE_PRIVATE int sqlite3VtabSync(sqlite3 *db, Vdbe *p){ - int i; - int rc = SQLITE_OK; - VTable **aVTrans = db->aVTrans; - - db->aVTrans = 0; - for(i=0; rc==SQLITE_OK && inVTrans; i++){ - int (*x)(sqlite3_vtab *); - sqlite3_vtab *pVtab = aVTrans[i]->pVtab; - if( pVtab && (x = pVtab->pModule->xSync)!=0 ){ - rc = x(pVtab); - sqlite3VtabImportErrmsg(p, pVtab); - } - } - db->aVTrans = aVTrans; - return rc; -} - -/* -** Invoke the xRollback method of all virtual tables in the -** sqlite3.aVTrans array. Then clear the array itself. -*/ -SQLITE_PRIVATE int sqlite3VtabRollback(sqlite3 *db){ - callFinaliser(db, offsetof(sqlite3_module,xRollback)); - return SQLITE_OK; -} - -/* -** Invoke the xCommit method of all virtual tables in the -** sqlite3.aVTrans array. Then clear the array itself. -*/ -SQLITE_PRIVATE int sqlite3VtabCommit(sqlite3 *db){ - callFinaliser(db, offsetof(sqlite3_module,xCommit)); - return SQLITE_OK; -} - -/* -** If the virtual table pVtab supports the transaction interface -** (xBegin/xRollback/xCommit and optionally xSync) and a transaction is -** not currently open, invoke the xBegin method now. -** -** If the xBegin call is successful, place the sqlite3_vtab pointer -** in the sqlite3.aVTrans array. -*/ -SQLITE_PRIVATE int sqlite3VtabBegin(sqlite3 *db, VTable *pVTab){ - int rc = SQLITE_OK; - const sqlite3_module *pModule; - - /* Special case: If db->aVTrans is NULL and db->nVTrans is greater - ** than zero, then this function is being called from within a - ** virtual module xSync() callback. It is illegal to write to - ** virtual module tables in this case, so return SQLITE_LOCKED. - */ - if( sqlite3VtabInSync(db) ){ - return SQLITE_LOCKED; - } - if( !pVTab ){ - return SQLITE_OK; - } - pModule = pVTab->pVtab->pModule; - - if( pModule->xBegin ){ - int i; - - /* If pVtab is already in the aVTrans array, return early */ - for(i=0; inVTrans; i++){ - if( db->aVTrans[i]==pVTab ){ - return SQLITE_OK; - } - } - - /* Invoke the xBegin method. If successful, add the vtab to the - ** sqlite3.aVTrans[] array. */ - rc = growVTrans(db); - if( rc==SQLITE_OK ){ - rc = pModule->xBegin(pVTab->pVtab); - if( rc==SQLITE_OK ){ - addToVTrans(db, pVTab); - } - } - } - return rc; -} - -/* -** Invoke either the xSavepoint, xRollbackTo or xRelease method of all -** virtual tables that currently have an open transaction. Pass iSavepoint -** as the second argument to the virtual table method invoked. -** -** If op is SAVEPOINT_BEGIN, the xSavepoint method is invoked. If it is -** SAVEPOINT_ROLLBACK, the xRollbackTo method. Otherwise, if op is -** SAVEPOINT_RELEASE, then the xRelease method of each virtual table with -** an open transaction is invoked. -** -** If any virtual table method returns an error code other than SQLITE_OK, -** processing is abandoned and the error returned to the caller of this -** function immediately. If all calls to virtual table methods are successful, -** SQLITE_OK is returned. -*/ -SQLITE_PRIVATE int sqlite3VtabSavepoint(sqlite3 *db, int op, int iSavepoint){ - int rc = SQLITE_OK; - - assert( op==SAVEPOINT_RELEASE||op==SAVEPOINT_ROLLBACK||op==SAVEPOINT_BEGIN ); - assert( iSavepoint>=0 ); - if( db->aVTrans ){ - int i; - for(i=0; rc==SQLITE_OK && inVTrans; i++){ - VTable *pVTab = db->aVTrans[i]; - const sqlite3_module *pMod = pVTab->pMod->pModule; - if( pVTab->pVtab && pMod->iVersion>=2 ){ - int (*xMethod)(sqlite3_vtab *, int); - switch( op ){ - case SAVEPOINT_BEGIN: - xMethod = pMod->xSavepoint; - pVTab->iSavepoint = iSavepoint+1; - break; - case SAVEPOINT_ROLLBACK: - xMethod = pMod->xRollbackTo; - break; - default: - xMethod = pMod->xRelease; - break; - } - if( xMethod && pVTab->iSavepoint>iSavepoint ){ - rc = xMethod(pVTab->pVtab, iSavepoint); - } - } - } - } - return rc; -} - -/* -** The first parameter (pDef) is a function implementation. The -** second parameter (pExpr) is the first argument to this function. -** If pExpr is a column in a virtual table, then let the virtual -** table implementation have an opportunity to overload the function. -** -** This routine is used to allow virtual table implementations to -** overload MATCH, LIKE, GLOB, and REGEXP operators. -** -** Return either the pDef argument (indicating no change) or a -** new FuncDef structure that is marked as ephemeral using the -** SQLITE_FUNC_EPHEM flag. -*/ -SQLITE_PRIVATE FuncDef *sqlite3VtabOverloadFunction( - sqlite3 *db, /* Database connection for reporting malloc problems */ - FuncDef *pDef, /* Function to possibly overload */ - int nArg, /* Number of arguments to the function */ - Expr *pExpr /* First argument to the function */ -){ - Table *pTab; - sqlite3_vtab *pVtab; - sqlite3_module *pMod; - void (*xFunc)(sqlite3_context*,int,sqlite3_value**) = 0; - void *pArg = 0; - FuncDef *pNew; - int rc = 0; - char *zLowerName; - unsigned char *z; - - - /* Check to see the left operand is a column in a virtual table */ - if( NEVER(pExpr==0) ) return pDef; - if( pExpr->op!=TK_COLUMN ) return pDef; - pTab = pExpr->pTab; - if( NEVER(pTab==0) ) return pDef; - if( (pTab->tabFlags & TF_Virtual)==0 ) return pDef; - pVtab = sqlite3GetVTable(db, pTab)->pVtab; - assert( pVtab!=0 ); - assert( pVtab->pModule!=0 ); - pMod = (sqlite3_module *)pVtab->pModule; - if( pMod->xFindFunction==0 ) return pDef; - - /* Call the xFindFunction method on the virtual table implementation - ** to see if the implementation wants to overload this function - */ - zLowerName = sqlite3DbStrDup(db, pDef->zName); - if( zLowerName ){ - for(z=(unsigned char*)zLowerName; *z; z++){ - *z = sqlite3UpperToLower[*z]; - } - rc = pMod->xFindFunction(pVtab, nArg, zLowerName, &xFunc, &pArg); - sqlite3DbFree(db, zLowerName); - } - if( rc==0 ){ - return pDef; - } - - /* Create a new ephemeral function definition for the overloaded - ** function */ - pNew = sqlite3DbMallocZero(db, sizeof(*pNew) - + sqlite3Strlen30(pDef->zName) + 1); - if( pNew==0 ){ - return pDef; - } - *pNew = *pDef; - pNew->zName = (char *)&pNew[1]; - memcpy(pNew->zName, pDef->zName, sqlite3Strlen30(pDef->zName)+1); - pNew->xFunc = xFunc; - pNew->pUserData = pArg; - pNew->funcFlags |= SQLITE_FUNC_EPHEM; - return pNew; -} - -/* -** Make sure virtual table pTab is contained in the pParse->apVirtualLock[] -** array so that an OP_VBegin will get generated for it. Add pTab to the -** array if it is missing. If pTab is already in the array, this routine -** is a no-op. -*/ -SQLITE_PRIVATE void sqlite3VtabMakeWritable(Parse *pParse, Table *pTab){ - Parse *pToplevel = sqlite3ParseToplevel(pParse); - int i, n; - Table **apVtabLock; - - assert( IsVirtual(pTab) ); - for(i=0; inVtabLock; i++){ - if( pTab==pToplevel->apVtabLock[i] ) return; - } - n = (pToplevel->nVtabLock+1)*sizeof(pToplevel->apVtabLock[0]); - apVtabLock = sqlite3_realloc(pToplevel->apVtabLock, n); - if( apVtabLock ){ - pToplevel->apVtabLock = apVtabLock; - pToplevel->apVtabLock[pToplevel->nVtabLock++] = pTab; - }else{ - pToplevel->db->mallocFailed = 1; - } -} - -/* -** Return the ON CONFLICT resolution mode in effect for the virtual -** table update operation currently in progress. -** -** The results of this routine are undefined unless it is called from -** within an xUpdate method. -*/ -SQLITE_API int sqlite3_vtab_on_conflict(sqlite3 *db){ - static const unsigned char aMap[] = { - SQLITE_ROLLBACK, SQLITE_ABORT, SQLITE_FAIL, SQLITE_IGNORE, SQLITE_REPLACE - }; - assert( OE_Rollback==1 && OE_Abort==2 && OE_Fail==3 ); - assert( OE_Ignore==4 && OE_Replace==5 ); - assert( db->vtabOnConflict>=1 && db->vtabOnConflict<=5 ); - return (int)aMap[db->vtabOnConflict-1]; -} - -/* -** Call from within the xCreate() or xConnect() methods to provide -** the SQLite core with additional information about the behavior -** of the virtual table being implemented. -*/ -SQLITE_API int sqlite3_vtab_config(sqlite3 *db, int op, ...){ - va_list ap; - int rc = SQLITE_OK; - - sqlite3_mutex_enter(db->mutex); - - va_start(ap, op); - switch( op ){ - case SQLITE_VTAB_CONSTRAINT_SUPPORT: { - VtabCtx *p = db->pVtabCtx; - if( !p ){ - rc = SQLITE_MISUSE_BKPT; - }else{ - assert( p->pTab==0 || (p->pTab->tabFlags & TF_Virtual)!=0 ); - p->pVTable->bConstraint = (u8)va_arg(ap, int); - } - break; - } - default: - rc = SQLITE_MISUSE_BKPT; - break; - } - va_end(ap); - - if( rc!=SQLITE_OK ) sqlite3Error(db, rc, 0); - sqlite3_mutex_leave(db->mutex); - return rc; -} - -#endif /* SQLITE_OMIT_VIRTUALTABLE */ - -/************** End of vtab.c ************************************************/ -/************** Begin file where.c *******************************************/ -/* -** 2001 September 15 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** This module contains C code that generates VDBE code used to process -** the WHERE clause of SQL statements. This module is responsible for -** generating the code that loops through a table looking for applicable -** rows. Indices are selected and used to speed the search when doing -** so is applicable. Because this module is responsible for selecting -** indices, you might also think of this module as the "query optimizer". -*/ -/************** Include whereInt.h in the middle of where.c ******************/ -/************** Begin file whereInt.h ****************************************/ -/* -** 2013-11-12 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** -** This file contains structure and macro definitions for the query -** planner logic in "where.c". These definitions are broken out into -** a separate source file for easier editing. -*/ - -/* -** Trace output macros -*/ -#if defined(SQLITE_TEST) || defined(SQLITE_DEBUG) -/***/ int sqlite3WhereTrace = 0; -#endif -#if defined(SQLITE_DEBUG) \ - && (defined(SQLITE_TEST) || defined(SQLITE_ENABLE_WHERETRACE)) -# define WHERETRACE(K,X) if(sqlite3WhereTrace&(K)) sqlite3DebugPrintf X -# define WHERETRACE_ENABLED 1 -#else -# define WHERETRACE(K,X) -#endif - -/* Forward references -*/ -typedef struct WhereClause WhereClause; -typedef struct WhereMaskSet WhereMaskSet; -typedef struct WhereOrInfo WhereOrInfo; -typedef struct WhereAndInfo WhereAndInfo; -typedef struct WhereLevel WhereLevel; -typedef struct WhereLoop WhereLoop; -typedef struct WherePath WherePath; -typedef struct WhereTerm WhereTerm; -typedef struct WhereLoopBuilder WhereLoopBuilder; -typedef struct WhereScan WhereScan; -typedef struct WhereOrCost WhereOrCost; -typedef struct WhereOrSet WhereOrSet; - -/* -** This object contains information needed to implement a single nested -** loop in WHERE clause. -** -** Contrast this object with WhereLoop. This object describes the -** implementation of the loop. WhereLoop describes the algorithm. -** This object contains a pointer to the WhereLoop algorithm as one of -** its elements. -** -** The WhereInfo object contains a single instance of this object for -** each term in the FROM clause (which is to say, for each of the -** nested loops as implemented). The order of WhereLevel objects determines -** the loop nested order, with WhereInfo.a[0] being the outer loop and -** WhereInfo.a[WhereInfo.nLevel-1] being the inner loop. -*/ -struct WhereLevel { - int iLeftJoin; /* Memory cell used to implement LEFT OUTER JOIN */ - int iTabCur; /* The VDBE cursor used to access the table */ - int iIdxCur; /* The VDBE cursor used to access pIdx */ - int addrBrk; /* Jump here to break out of the loop */ - int addrNxt; /* Jump here to start the next IN combination */ - int addrSkip; /* Jump here for next iteration of skip-scan */ - int addrCont; /* Jump here to continue with the next loop cycle */ - int addrFirst; /* First instruction of interior of the loop */ - int addrBody; /* Beginning of the body of this loop */ - u8 iFrom; /* Which entry in the FROM clause */ - u8 op, p3, p5; /* Opcode, P3 & P5 of the opcode that ends the loop */ - int p1, p2; /* Operands of the opcode used to ends the loop */ - union { /* Information that depends on pWLoop->wsFlags */ - struct { - int nIn; /* Number of entries in aInLoop[] */ - struct InLoop { - int iCur; /* The VDBE cursor used by this IN operator */ - int addrInTop; /* Top of the IN loop */ - u8 eEndLoopOp; /* IN Loop terminator. OP_Next or OP_Prev */ - } *aInLoop; /* Information about each nested IN operator */ - } in; /* Used when pWLoop->wsFlags&WHERE_IN_ABLE */ - Index *pCovidx; /* Possible covering index for WHERE_MULTI_OR */ - } u; - struct WhereLoop *pWLoop; /* The selected WhereLoop object */ - Bitmask notReady; /* FROM entries not usable at this level */ -}; - -/* -** Each instance of this object represents an algorithm for evaluating one -** term of a join. Every term of the FROM clause will have at least -** one corresponding WhereLoop object (unless INDEXED BY constraints -** prevent a query solution - which is an error) and many terms of the -** FROM clause will have multiple WhereLoop objects, each describing a -** potential way of implementing that FROM-clause term, together with -** dependencies and cost estimates for using the chosen algorithm. -** -** Query planning consists of building up a collection of these WhereLoop -** objects, then computing a particular sequence of WhereLoop objects, with -** one WhereLoop object per FROM clause term, that satisfy all dependencies -** and that minimize the overall cost. -*/ -struct WhereLoop { - Bitmask prereq; /* Bitmask of other loops that must run first */ - Bitmask maskSelf; /* Bitmask identifying table iTab */ -#ifdef SQLITE_DEBUG - char cId; /* Symbolic ID of this loop for debugging use */ -#endif - u8 iTab; /* Position in FROM clause of table for this loop */ - u8 iSortIdx; /* Sorting index number. 0==None */ - LogEst rSetup; /* One-time setup cost (ex: create transient index) */ - LogEst rRun; /* Cost of running each loop */ - LogEst nOut; /* Estimated number of output rows */ - union { - struct { /* Information for internal btree tables */ - u16 nEq; /* Number of equality constraints */ - u16 nSkip; /* Number of initial index columns to skip */ - Index *pIndex; /* Index used, or NULL */ - } btree; - struct { /* Information for virtual tables */ - int idxNum; /* Index number */ - u8 needFree; /* True if sqlite3_free(idxStr) is needed */ - i8 isOrdered; /* True if satisfies ORDER BY */ - u16 omitMask; /* Terms that may be omitted */ - char *idxStr; /* Index identifier string */ - } vtab; - } u; - u32 wsFlags; /* WHERE_* flags describing the plan */ - u16 nLTerm; /* Number of entries in aLTerm[] */ - /**** whereLoopXfer() copies fields above ***********************/ -# define WHERE_LOOP_XFER_SZ offsetof(WhereLoop,nLSlot) - u16 nLSlot; /* Number of slots allocated for aLTerm[] */ - WhereTerm **aLTerm; /* WhereTerms used */ - WhereLoop *pNextLoop; /* Next WhereLoop object in the WhereClause */ - WhereTerm *aLTermSpace[4]; /* Initial aLTerm[] space */ -}; - -/* This object holds the prerequisites and the cost of running a -** subquery on one operand of an OR operator in the WHERE clause. -** See WhereOrSet for additional information -*/ -struct WhereOrCost { - Bitmask prereq; /* Prerequisites */ - LogEst rRun; /* Cost of running this subquery */ - LogEst nOut; /* Number of outputs for this subquery */ -}; - -/* The WhereOrSet object holds a set of possible WhereOrCosts that -** correspond to the subquery(s) of OR-clause processing. Only the -** best N_OR_COST elements are retained. -*/ -#define N_OR_COST 3 -struct WhereOrSet { - u16 n; /* Number of valid a[] entries */ - WhereOrCost a[N_OR_COST]; /* Set of best costs */ -}; - - -/* Forward declaration of methods */ -static int whereLoopResize(sqlite3*, WhereLoop*, int); - -/* -** Each instance of this object holds a sequence of WhereLoop objects -** that implement some or all of a query plan. -** -** Think of each WhereLoop object as a node in a graph with arcs -** showing dependencies and costs for travelling between nodes. (That is -** not a completely accurate description because WhereLoop costs are a -** vector, not a scalar, and because dependencies are many-to-one, not -** one-to-one as are graph nodes. But it is a useful visualization aid.) -** Then a WherePath object is a path through the graph that visits some -** or all of the WhereLoop objects once. -** -** The "solver" works by creating the N best WherePath objects of length -** 1. Then using those as a basis to compute the N best WherePath objects -** of length 2. And so forth until the length of WherePaths equals the -** number of nodes in the FROM clause. The best (lowest cost) WherePath -** at the end is the choosen query plan. -*/ -struct WherePath { - Bitmask maskLoop; /* Bitmask of all WhereLoop objects in this path */ - Bitmask revLoop; /* aLoop[]s that should be reversed for ORDER BY */ - LogEst nRow; /* Estimated number of rows generated by this path */ - LogEst rCost; /* Total cost of this path */ - i8 isOrdered; /* No. of ORDER BY terms satisfied. -1 for unknown */ - WhereLoop **aLoop; /* Array of WhereLoop objects implementing this path */ -}; - -/* -** The query generator uses an array of instances of this structure to -** help it analyze the subexpressions of the WHERE clause. Each WHERE -** clause subexpression is separated from the others by AND operators, -** usually, or sometimes subexpressions separated by OR. -** -** All WhereTerms are collected into a single WhereClause structure. -** The following identity holds: -** -** WhereTerm.pWC->a[WhereTerm.idx] == WhereTerm -** -** When a term is of the form: -** -** X -** -** where X is a column name and is one of certain operators, -** then WhereTerm.leftCursor and WhereTerm.u.leftColumn record the -** cursor number and column number for X. WhereTerm.eOperator records -** the using a bitmask encoding defined by WO_xxx below. The -** use of a bitmask encoding for the operator allows us to search -** quickly for terms that match any of several different operators. -** -** A WhereTerm might also be two or more subterms connected by OR: -** -** (t1.X ) OR (t1.Y ) OR .... -** -** In this second case, wtFlag has the TERM_ORINFO bit set and eOperator==WO_OR -** and the WhereTerm.u.pOrInfo field points to auxiliary information that -** is collected about the OR clause. -** -** If a term in the WHERE clause does not match either of the two previous -** categories, then eOperator==0. The WhereTerm.pExpr field is still set -** to the original subexpression content and wtFlags is set up appropriately -** but no other fields in the WhereTerm object are meaningful. -** -** When eOperator!=0, prereqRight and prereqAll record sets of cursor numbers, -** but they do so indirectly. A single WhereMaskSet structure translates -** cursor number into bits and the translated bit is stored in the prereq -** fields. The translation is used in order to maximize the number of -** bits that will fit in a Bitmask. The VDBE cursor numbers might be -** spread out over the non-negative integers. For example, the cursor -** numbers might be 3, 8, 9, 10, 20, 23, 41, and 45. The WhereMaskSet -** translates these sparse cursor numbers into consecutive integers -** beginning with 0 in order to make the best possible use of the available -** bits in the Bitmask. So, in the example above, the cursor numbers -** would be mapped into integers 0 through 7. -** -** The number of terms in a join is limited by the number of bits -** in prereqRight and prereqAll. The default is 64 bits, hence SQLite -** is only able to process joins with 64 or fewer tables. -*/ -struct WhereTerm { - Expr *pExpr; /* Pointer to the subexpression that is this term */ - int iParent; /* Disable pWC->a[iParent] when this term disabled */ - int leftCursor; /* Cursor number of X in "X " */ - union { - int leftColumn; /* Column number of X in "X " */ - WhereOrInfo *pOrInfo; /* Extra information if (eOperator & WO_OR)!=0 */ - WhereAndInfo *pAndInfo; /* Extra information if (eOperator& WO_AND)!=0 */ - } u; - LogEst truthProb; /* Probability of truth for this expression */ - u16 eOperator; /* A WO_xx value describing */ - u8 wtFlags; /* TERM_xxx bit flags. See below */ - u8 nChild; /* Number of children that must disable us */ - WhereClause *pWC; /* The clause this term is part of */ - Bitmask prereqRight; /* Bitmask of tables used by pExpr->pRight */ - Bitmask prereqAll; /* Bitmask of tables referenced by pExpr */ -}; - -/* -** Allowed values of WhereTerm.wtFlags -*/ -#define TERM_DYNAMIC 0x01 /* Need to call sqlite3ExprDelete(db, pExpr) */ -#define TERM_VIRTUAL 0x02 /* Added by the optimizer. Do not code */ -#define TERM_CODED 0x04 /* This term is already coded */ -#define TERM_COPIED 0x08 /* Has a child */ -#define TERM_ORINFO 0x10 /* Need to free the WhereTerm.u.pOrInfo object */ -#define TERM_ANDINFO 0x20 /* Need to free the WhereTerm.u.pAndInfo obj */ -#define TERM_OR_OK 0x40 /* Used during OR-clause processing */ -#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 -# define TERM_VNULL 0x80 /* Manufactured x>NULL or x<=NULL term */ -#else -# define TERM_VNULL 0x00 /* Disabled if not using stat3 */ -#endif - -/* -** An instance of the WhereScan object is used as an iterator for locating -** terms in the WHERE clause that are useful to the query planner. -*/ -struct WhereScan { - WhereClause *pOrigWC; /* Original, innermost WhereClause */ - WhereClause *pWC; /* WhereClause currently being scanned */ - char *zCollName; /* Required collating sequence, if not NULL */ - char idxaff; /* Must match this affinity, if zCollName!=NULL */ - unsigned char nEquiv; /* Number of entries in aEquiv[] */ - unsigned char iEquiv; /* Next unused slot in aEquiv[] */ - u32 opMask; /* Acceptable operators */ - int k; /* Resume scanning at this->pWC->a[this->k] */ - int aEquiv[22]; /* Cursor,Column pairs for equivalence classes */ -}; - -/* -** An instance of the following structure holds all information about a -** WHERE clause. Mostly this is a container for one or more WhereTerms. -** -** Explanation of pOuter: For a WHERE clause of the form -** -** a AND ((b AND c) OR (d AND e)) AND f -** -** There are separate WhereClause objects for the whole clause and for -** the subclauses "(b AND c)" and "(d AND e)". The pOuter field of the -** subclauses points to the WhereClause object for the whole clause. -*/ -struct WhereClause { - WhereInfo *pWInfo; /* WHERE clause processing context */ - WhereClause *pOuter; /* Outer conjunction */ - u8 op; /* Split operator. TK_AND or TK_OR */ - int nTerm; /* Number of terms */ - int nSlot; /* Number of entries in a[] */ - WhereTerm *a; /* Each a[] describes a term of the WHERE cluase */ -#if defined(SQLITE_SMALL_STACK) - WhereTerm aStatic[1]; /* Initial static space for a[] */ -#else - WhereTerm aStatic[8]; /* Initial static space for a[] */ -#endif -}; - -/* -** A WhereTerm with eOperator==WO_OR has its u.pOrInfo pointer set to -** a dynamically allocated instance of the following structure. -*/ -struct WhereOrInfo { - WhereClause wc; /* Decomposition into subterms */ - Bitmask indexable; /* Bitmask of all indexable tables in the clause */ -}; - -/* -** A WhereTerm with eOperator==WO_AND has its u.pAndInfo pointer set to -** a dynamically allocated instance of the following structure. -*/ -struct WhereAndInfo { - WhereClause wc; /* The subexpression broken out */ -}; - -/* -** An instance of the following structure keeps track of a mapping -** between VDBE cursor numbers and bits of the bitmasks in WhereTerm. -** -** The VDBE cursor numbers are small integers contained in -** SrcList_item.iCursor and Expr.iTable fields. For any given WHERE -** clause, the cursor numbers might not begin with 0 and they might -** contain gaps in the numbering sequence. But we want to make maximum -** use of the bits in our bitmasks. This structure provides a mapping -** from the sparse cursor numbers into consecutive integers beginning -** with 0. -** -** If WhereMaskSet.ix[A]==B it means that The A-th bit of a Bitmask -** corresponds VDBE cursor number B. The A-th bit of a bitmask is 1<3, 5->1, 8->2, 29->0, -** 57->5, 73->4. Or one of 719 other combinations might be used. It -** does not really matter. What is important is that sparse cursor -** numbers all get mapped into bit numbers that begin with 0 and contain -** no gaps. -*/ -struct WhereMaskSet { - int n; /* Number of assigned cursor values */ - int ix[BMS]; /* Cursor assigned to each bit */ -}; - -/* -** This object is a convenience wrapper holding all information needed -** to construct WhereLoop objects for a particular query. -*/ -struct WhereLoopBuilder { - WhereInfo *pWInfo; /* Information about this WHERE */ - WhereClause *pWC; /* WHERE clause terms */ - ExprList *pOrderBy; /* ORDER BY clause */ - WhereLoop *pNew; /* Template WhereLoop */ - WhereOrSet *pOrSet; /* Record best loops here, if not NULL */ -#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 - UnpackedRecord *pRec; /* Probe for stat4 (if required) */ - int nRecValid; /* Number of valid fields currently in pRec */ -#endif -}; - -/* -** The WHERE clause processing routine has two halves. The -** first part does the start of the WHERE loop and the second -** half does the tail of the WHERE loop. An instance of -** this structure is returned by the first half and passed -** into the second half to give some continuity. -** -** An instance of this object holds the complete state of the query -** planner. -*/ -struct WhereInfo { - Parse *pParse; /* Parsing and code generating context */ - SrcList *pTabList; /* List of tables in the join */ - ExprList *pOrderBy; /* The ORDER BY clause or NULL */ - ExprList *pResultSet; /* Result set. DISTINCT operates on these */ - WhereLoop *pLoops; /* List of all WhereLoop objects */ - Bitmask revMask; /* Mask of ORDER BY terms that need reversing */ - LogEst nRowOut; /* Estimated number of output rows */ - u16 wctrlFlags; /* Flags originally passed to sqlite3WhereBegin() */ - i8 nOBSat; /* Number of ORDER BY terms satisfied by indices */ - u8 sorted; /* True if really sorted (not just grouped) */ - u8 okOnePass; /* Ok to use one-pass algorithm for UPDATE/DELETE */ - u8 untestedTerms; /* Not all WHERE terms resolved by outer loop */ - u8 eDistinct; /* One of the WHERE_DISTINCT_* values below */ - u8 nLevel; /* Number of nested loop */ - int iTop; /* The very beginning of the WHERE loop */ - int iContinue; /* Jump here to continue with next record */ - int iBreak; /* Jump here to break out of the loop */ - int savedNQueryLoop; /* pParse->nQueryLoop outside the WHERE loop */ - int aiCurOnePass[2]; /* OP_OpenWrite cursors for the ONEPASS opt */ - WhereMaskSet sMaskSet; /* Map cursor numbers to bitmasks */ - WhereClause sWC; /* Decomposition of the WHERE clause */ - WhereLevel a[1]; /* Information about each nest loop in WHERE */ -}; - -/* -** Bitmasks for the operators on WhereTerm objects. These are all -** operators that are of interest to the query planner. An -** OR-ed combination of these values can be used when searching for -** particular WhereTerms within a WhereClause. -*/ -#define WO_IN 0x001 -#define WO_EQ 0x002 -#define WO_LT (WO_EQ<<(TK_LT-TK_EQ)) -#define WO_LE (WO_EQ<<(TK_LE-TK_EQ)) -#define WO_GT (WO_EQ<<(TK_GT-TK_EQ)) -#define WO_GE (WO_EQ<<(TK_GE-TK_EQ)) -#define WO_MATCH 0x040 -#define WO_ISNULL 0x080 -#define WO_OR 0x100 /* Two or more OR-connected terms */ -#define WO_AND 0x200 /* Two or more AND-connected terms */ -#define WO_EQUIV 0x400 /* Of the form A==B, both columns */ -#define WO_NOOP 0x800 /* This term does not restrict search space */ - -#define WO_ALL 0xfff /* Mask of all possible WO_* values */ -#define WO_SINGLE 0x0ff /* Mask of all non-compound WO_* values */ - -/* -** These are definitions of bits in the WhereLoop.wsFlags field. -** The particular combination of bits in each WhereLoop help to -** determine the algorithm that WhereLoop represents. -*/ -#define WHERE_COLUMN_EQ 0x00000001 /* x=EXPR */ -#define WHERE_COLUMN_RANGE 0x00000002 /* xEXPR */ -#define WHERE_COLUMN_IN 0x00000004 /* x IN (...) */ -#define WHERE_COLUMN_NULL 0x00000008 /* x IS NULL */ -#define WHERE_CONSTRAINT 0x0000000f /* Any of the WHERE_COLUMN_xxx values */ -#define WHERE_TOP_LIMIT 0x00000010 /* xEXPR or x>=EXPR constraint */ -#define WHERE_BOTH_LIMIT 0x00000030 /* Both x>EXPR and xnRowOut); -} - -/* -** Return one of the WHERE_DISTINCT_xxxxx values to indicate how this -** WHERE clause returns outputs for DISTINCT processing. -*/ -SQLITE_PRIVATE int sqlite3WhereIsDistinct(WhereInfo *pWInfo){ - return pWInfo->eDistinct; -} - -/* -** Return TRUE if the WHERE clause returns rows in ORDER BY order. -** Return FALSE if the output needs to be sorted. -*/ -SQLITE_PRIVATE int sqlite3WhereIsOrdered(WhereInfo *pWInfo){ - return pWInfo->nOBSat; -} - -/* -** Return the VDBE address or label to jump to in order to continue -** immediately with the next row of a WHERE clause. -*/ -SQLITE_PRIVATE int sqlite3WhereContinueLabel(WhereInfo *pWInfo){ - assert( pWInfo->iContinue!=0 ); - return pWInfo->iContinue; -} - -/* -** Return the VDBE address or label to jump to in order to break -** out of a WHERE loop. -*/ -SQLITE_PRIVATE int sqlite3WhereBreakLabel(WhereInfo *pWInfo){ - return pWInfo->iBreak; -} - -/* -** Return TRUE if an UPDATE or DELETE statement can operate directly on -** the rowids returned by a WHERE clause. Return FALSE if doing an -** UPDATE or DELETE might change subsequent WHERE clause results. -** -** If the ONEPASS optimization is used (if this routine returns true) -** then also write the indices of open cursors used by ONEPASS -** into aiCur[0] and aiCur[1]. iaCur[0] gets the cursor of the data -** table and iaCur[1] gets the cursor used by an auxiliary index. -** Either value may be -1, indicating that cursor is not used. -** Any cursors returned will have been opened for writing. -** -** aiCur[0] and aiCur[1] both get -1 if the where-clause logic is -** unable to use the ONEPASS optimization. -*/ -SQLITE_PRIVATE int sqlite3WhereOkOnePass(WhereInfo *pWInfo, int *aiCur){ - memcpy(aiCur, pWInfo->aiCurOnePass, sizeof(int)*2); - return pWInfo->okOnePass; -} - -/* -** Move the content of pSrc into pDest -*/ -static void whereOrMove(WhereOrSet *pDest, WhereOrSet *pSrc){ - pDest->n = pSrc->n; - memcpy(pDest->a, pSrc->a, pDest->n*sizeof(pDest->a[0])); -} - -/* -** Try to insert a new prerequisite/cost entry into the WhereOrSet pSet. -** -** The new entry might overwrite an existing entry, or it might be -** appended, or it might be discarded. Do whatever is the right thing -** so that pSet keeps the N_OR_COST best entries seen so far. -*/ -static int whereOrInsert( - WhereOrSet *pSet, /* The WhereOrSet to be updated */ - Bitmask prereq, /* Prerequisites of the new entry */ - LogEst rRun, /* Run-cost of the new entry */ - LogEst nOut /* Number of outputs for the new entry */ -){ - u16 i; - WhereOrCost *p; - for(i=pSet->n, p=pSet->a; i>0; i--, p++){ - if( rRun<=p->rRun && (prereq & p->prereq)==prereq ){ - goto whereOrInsert_done; - } - if( p->rRun<=rRun && (p->prereq & prereq)==p->prereq ){ - return 0; - } - } - if( pSet->na[pSet->n++]; - p->nOut = nOut; - }else{ - p = pSet->a; - for(i=1; in; i++){ - if( p->rRun>pSet->a[i].rRun ) p = pSet->a + i; - } - if( p->rRun<=rRun ) return 0; - } -whereOrInsert_done: - p->prereq = prereq; - p->rRun = rRun; - if( p->nOut>nOut ) p->nOut = nOut; - return 1; -} - -/* -** Initialize a preallocated WhereClause structure. -*/ -static void whereClauseInit( - WhereClause *pWC, /* The WhereClause to be initialized */ - WhereInfo *pWInfo /* The WHERE processing context */ -){ - pWC->pWInfo = pWInfo; - pWC->pOuter = 0; - pWC->nTerm = 0; - pWC->nSlot = ArraySize(pWC->aStatic); - pWC->a = pWC->aStatic; -} - -/* Forward reference */ -static void whereClauseClear(WhereClause*); - -/* -** Deallocate all memory associated with a WhereOrInfo object. -*/ -static void whereOrInfoDelete(sqlite3 *db, WhereOrInfo *p){ - whereClauseClear(&p->wc); - sqlite3DbFree(db, p); -} - -/* -** Deallocate all memory associated with a WhereAndInfo object. -*/ -static void whereAndInfoDelete(sqlite3 *db, WhereAndInfo *p){ - whereClauseClear(&p->wc); - sqlite3DbFree(db, p); -} - -/* -** Deallocate a WhereClause structure. The WhereClause structure -** itself is not freed. This routine is the inverse of whereClauseInit(). -*/ -static void whereClauseClear(WhereClause *pWC){ - int i; - WhereTerm *a; - sqlite3 *db = pWC->pWInfo->pParse->db; - for(i=pWC->nTerm-1, a=pWC->a; i>=0; i--, a++){ - if( a->wtFlags & TERM_DYNAMIC ){ - sqlite3ExprDelete(db, a->pExpr); - } - if( a->wtFlags & TERM_ORINFO ){ - whereOrInfoDelete(db, a->u.pOrInfo); - }else if( a->wtFlags & TERM_ANDINFO ){ - whereAndInfoDelete(db, a->u.pAndInfo); - } - } - if( pWC->a!=pWC->aStatic ){ - sqlite3DbFree(db, pWC->a); - } -} - -/* -** Add a single new WhereTerm entry to the WhereClause object pWC. -** The new WhereTerm object is constructed from Expr p and with wtFlags. -** The index in pWC->a[] of the new WhereTerm is returned on success. -** 0 is returned if the new WhereTerm could not be added due to a memory -** allocation error. The memory allocation failure will be recorded in -** the db->mallocFailed flag so that higher-level functions can detect it. -** -** This routine will increase the size of the pWC->a[] array as necessary. -** -** If the wtFlags argument includes TERM_DYNAMIC, then responsibility -** for freeing the expression p is assumed by the WhereClause object pWC. -** This is true even if this routine fails to allocate a new WhereTerm. -** -** WARNING: This routine might reallocate the space used to store -** WhereTerms. All pointers to WhereTerms should be invalidated after -** calling this routine. Such pointers may be reinitialized by referencing -** the pWC->a[] array. -*/ -static int whereClauseInsert(WhereClause *pWC, Expr *p, u8 wtFlags){ - WhereTerm *pTerm; - int idx; - testcase( wtFlags & TERM_VIRTUAL ); - if( pWC->nTerm>=pWC->nSlot ){ - WhereTerm *pOld = pWC->a; - sqlite3 *db = pWC->pWInfo->pParse->db; - pWC->a = sqlite3DbMallocRaw(db, sizeof(pWC->a[0])*pWC->nSlot*2 ); - if( pWC->a==0 ){ - if( wtFlags & TERM_DYNAMIC ){ - sqlite3ExprDelete(db, p); - } - pWC->a = pOld; - return 0; - } - memcpy(pWC->a, pOld, sizeof(pWC->a[0])*pWC->nTerm); - if( pOld!=pWC->aStatic ){ - sqlite3DbFree(db, pOld); - } - pWC->nSlot = sqlite3DbMallocSize(db, pWC->a)/sizeof(pWC->a[0]); - } - pTerm = &pWC->a[idx = pWC->nTerm++]; - if( p && ExprHasProperty(p, EP_Unlikely) ){ - pTerm->truthProb = sqlite3LogEst(p->iTable) - 99; - }else{ - pTerm->truthProb = 1; - } - pTerm->pExpr = sqlite3ExprSkipCollate(p); - pTerm->wtFlags = wtFlags; - pTerm->pWC = pWC; - pTerm->iParent = -1; - return idx; -} - -/* -** This routine identifies subexpressions in the WHERE clause where -** each subexpression is separated by the AND operator or some other -** operator specified in the op parameter. The WhereClause structure -** is filled with pointers to subexpressions. For example: -** -** WHERE a=='hello' AND coalesce(b,11)<10 AND (c+12!=d OR c==22) -** \________/ \_______________/ \________________/ -** slot[0] slot[1] slot[2] -** -** The original WHERE clause in pExpr is unaltered. All this routine -** does is make slot[] entries point to substructure within pExpr. -** -** In the previous sentence and in the diagram, "slot[]" refers to -** the WhereClause.a[] array. The slot[] array grows as needed to contain -** all terms of the WHERE clause. -*/ -static void whereSplit(WhereClause *pWC, Expr *pExpr, u8 op){ - pWC->op = op; - if( pExpr==0 ) return; - if( pExpr->op!=op ){ - whereClauseInsert(pWC, pExpr, 0); - }else{ - whereSplit(pWC, pExpr->pLeft, op); - whereSplit(pWC, pExpr->pRight, op); - } -} - -/* -** Initialize a WhereMaskSet object -*/ -#define initMaskSet(P) (P)->n=0 - -/* -** Return the bitmask for the given cursor number. Return 0 if -** iCursor is not in the set. -*/ -static Bitmask getMask(WhereMaskSet *pMaskSet, int iCursor){ - int i; - assert( pMaskSet->n<=(int)sizeof(Bitmask)*8 ); - for(i=0; in; i++){ - if( pMaskSet->ix[i]==iCursor ){ - return MASKBIT(i); - } - } - return 0; -} - -/* -** Create a new mask for cursor iCursor. -** -** There is one cursor per table in the FROM clause. The number of -** tables in the FROM clause is limited by a test early in the -** sqlite3WhereBegin() routine. So we know that the pMaskSet->ix[] -** array will never overflow. -*/ -static void createMask(WhereMaskSet *pMaskSet, int iCursor){ - assert( pMaskSet->n < ArraySize(pMaskSet->ix) ); - pMaskSet->ix[pMaskSet->n++] = iCursor; -} - -/* -** These routines walk (recursively) an expression tree and generate -** a bitmask indicating which tables are used in that expression -** tree. -*/ -static Bitmask exprListTableUsage(WhereMaskSet*, ExprList*); -static Bitmask exprSelectTableUsage(WhereMaskSet*, Select*); -static Bitmask exprTableUsage(WhereMaskSet *pMaskSet, Expr *p){ - Bitmask mask = 0; - if( p==0 ) return 0; - if( p->op==TK_COLUMN ){ - mask = getMask(pMaskSet, p->iTable); - return mask; - } - mask = exprTableUsage(pMaskSet, p->pRight); - mask |= exprTableUsage(pMaskSet, p->pLeft); - if( ExprHasProperty(p, EP_xIsSelect) ){ - mask |= exprSelectTableUsage(pMaskSet, p->x.pSelect); - }else{ - mask |= exprListTableUsage(pMaskSet, p->x.pList); - } - return mask; -} -static Bitmask exprListTableUsage(WhereMaskSet *pMaskSet, ExprList *pList){ - int i; - Bitmask mask = 0; - if( pList ){ - for(i=0; inExpr; i++){ - mask |= exprTableUsage(pMaskSet, pList->a[i].pExpr); - } - } - return mask; -} -static Bitmask exprSelectTableUsage(WhereMaskSet *pMaskSet, Select *pS){ - Bitmask mask = 0; - while( pS ){ - SrcList *pSrc = pS->pSrc; - mask |= exprListTableUsage(pMaskSet, pS->pEList); - mask |= exprListTableUsage(pMaskSet, pS->pGroupBy); - mask |= exprListTableUsage(pMaskSet, pS->pOrderBy); - mask |= exprTableUsage(pMaskSet, pS->pWhere); - mask |= exprTableUsage(pMaskSet, pS->pHaving); - if( ALWAYS(pSrc!=0) ){ - int i; - for(i=0; inSrc; i++){ - mask |= exprSelectTableUsage(pMaskSet, pSrc->a[i].pSelect); - mask |= exprTableUsage(pMaskSet, pSrc->a[i].pOn); - } - } - pS = pS->pPrior; - } - return mask; -} - -/* -** Return TRUE if the given operator is one of the operators that is -** allowed for an indexable WHERE clause term. The allowed operators are -** "=", "<", ">", "<=", ">=", "IN", and "IS NULL" -*/ -static int allowedOp(int op){ - assert( TK_GT>TK_EQ && TK_GTTK_EQ && TK_LTTK_EQ && TK_LE=TK_EQ && op<=TK_GE) || op==TK_ISNULL; -} - -/* -** Swap two objects of type TYPE. -*/ -#define SWAP(TYPE,A,B) {TYPE t=A; A=B; B=t;} - -/* -** Commute a comparison operator. Expressions of the form "X op Y" -** are converted into "Y op X". -** -** If left/right precedence rules come into play when determining the -** collating sequence, then COLLATE operators are adjusted to ensure -** that the collating sequence does not change. For example: -** "Y collate NOCASE op X" becomes "X op Y" because any collation sequence on -** the left hand side of a comparison overrides any collation sequence -** attached to the right. For the same reason the EP_Collate flag -** is not commuted. -*/ -static void exprCommute(Parse *pParse, Expr *pExpr){ - u16 expRight = (pExpr->pRight->flags & EP_Collate); - u16 expLeft = (pExpr->pLeft->flags & EP_Collate); - assert( allowedOp(pExpr->op) && pExpr->op!=TK_IN ); - if( expRight==expLeft ){ - /* Either X and Y both have COLLATE operator or neither do */ - if( expRight ){ - /* Both X and Y have COLLATE operators. Make sure X is always - ** used by clearing the EP_Collate flag from Y. */ - pExpr->pRight->flags &= ~EP_Collate; - }else if( sqlite3ExprCollSeq(pParse, pExpr->pLeft)!=0 ){ - /* Neither X nor Y have COLLATE operators, but X has a non-default - ** collating sequence. So add the EP_Collate marker on X to cause - ** it to be searched first. */ - pExpr->pLeft->flags |= EP_Collate; - } - } - SWAP(Expr*,pExpr->pRight,pExpr->pLeft); - if( pExpr->op>=TK_GT ){ - assert( TK_LT==TK_GT+2 ); - assert( TK_GE==TK_LE+2 ); - assert( TK_GT>TK_EQ ); - assert( TK_GTop>=TK_GT && pExpr->op<=TK_GE ); - pExpr->op = ((pExpr->op-TK_GT)^2)+TK_GT; - } -} - -/* -** Translate from TK_xx operator to WO_xx bitmask. -*/ -static u16 operatorMask(int op){ - u16 c; - assert( allowedOp(op) ); - if( op==TK_IN ){ - c = WO_IN; - }else if( op==TK_ISNULL ){ - c = WO_ISNULL; - }else{ - assert( (WO_EQ<<(op-TK_EQ)) < 0x7fff ); - c = (u16)(WO_EQ<<(op-TK_EQ)); - } - assert( op!=TK_ISNULL || c==WO_ISNULL ); - assert( op!=TK_IN || c==WO_IN ); - assert( op!=TK_EQ || c==WO_EQ ); - assert( op!=TK_LT || c==WO_LT ); - assert( op!=TK_LE || c==WO_LE ); - assert( op!=TK_GT || c==WO_GT ); - assert( op!=TK_GE || c==WO_GE ); - return c; -} - -/* -** Advance to the next WhereTerm that matches according to the criteria -** established when the pScan object was initialized by whereScanInit(). -** Return NULL if there are no more matching WhereTerms. -*/ -static WhereTerm *whereScanNext(WhereScan *pScan){ - int iCur; /* The cursor on the LHS of the term */ - int iColumn; /* The column on the LHS of the term. -1 for IPK */ - Expr *pX; /* An expression being tested */ - WhereClause *pWC; /* Shorthand for pScan->pWC */ - WhereTerm *pTerm; /* The term being tested */ - int k = pScan->k; /* Where to start scanning */ - - while( pScan->iEquiv<=pScan->nEquiv ){ - iCur = pScan->aEquiv[pScan->iEquiv-2]; - iColumn = pScan->aEquiv[pScan->iEquiv-1]; - while( (pWC = pScan->pWC)!=0 ){ - for(pTerm=pWC->a+k; knTerm; k++, pTerm++){ - if( pTerm->leftCursor==iCur - && pTerm->u.leftColumn==iColumn - && (pScan->iEquiv<=2 || !ExprHasProperty(pTerm->pExpr, EP_FromJoin)) - ){ - if( (pTerm->eOperator & WO_EQUIV)!=0 - && pScan->nEquivaEquiv) - ){ - int j; - pX = sqlite3ExprSkipCollate(pTerm->pExpr->pRight); - assert( pX->op==TK_COLUMN ); - for(j=0; jnEquiv; j+=2){ - if( pScan->aEquiv[j]==pX->iTable - && pScan->aEquiv[j+1]==pX->iColumn ){ - break; - } - } - if( j==pScan->nEquiv ){ - pScan->aEquiv[j] = pX->iTable; - pScan->aEquiv[j+1] = pX->iColumn; - pScan->nEquiv += 2; - } - } - if( (pTerm->eOperator & pScan->opMask)!=0 ){ - /* Verify the affinity and collating sequence match */ - if( pScan->zCollName && (pTerm->eOperator & WO_ISNULL)==0 ){ - CollSeq *pColl; - Parse *pParse = pWC->pWInfo->pParse; - pX = pTerm->pExpr; - if( !sqlite3IndexAffinityOk(pX, pScan->idxaff) ){ - continue; - } - assert(pX->pLeft); - pColl = sqlite3BinaryCompareCollSeq(pParse, - pX->pLeft, pX->pRight); - if( pColl==0 ) pColl = pParse->db->pDfltColl; - if( sqlite3StrICmp(pColl->zName, pScan->zCollName) ){ - continue; - } - } - if( (pTerm->eOperator & WO_EQ)!=0 - && (pX = pTerm->pExpr->pRight)->op==TK_COLUMN - && pX->iTable==pScan->aEquiv[0] - && pX->iColumn==pScan->aEquiv[1] - ){ - continue; - } - pScan->k = k+1; - return pTerm; - } - } - } - pScan->pWC = pScan->pWC->pOuter; - k = 0; - } - pScan->pWC = pScan->pOrigWC; - k = 0; - pScan->iEquiv += 2; - } - return 0; -} - -/* -** Initialize a WHERE clause scanner object. Return a pointer to the -** first match. Return NULL if there are no matches. -** -** The scanner will be searching the WHERE clause pWC. It will look -** for terms of the form "X " where X is column iColumn of table -** iCur. The must be one of the operators described by opMask. -** -** If the search is for X and the WHERE clause contains terms of the -** form X=Y then this routine might also return terms of the form -** "Y ". The number of levels of transitivity is limited, -** but is enough to handle most commonly occurring SQL statements. -** -** If X is not the INTEGER PRIMARY KEY then X must be compatible with -** index pIdx. -*/ -static WhereTerm *whereScanInit( - WhereScan *pScan, /* The WhereScan object being initialized */ - WhereClause *pWC, /* The WHERE clause to be scanned */ - int iCur, /* Cursor to scan for */ - int iColumn, /* Column to scan for */ - u32 opMask, /* Operator(s) to scan for */ - Index *pIdx /* Must be compatible with this index */ -){ - int j; - - /* memset(pScan, 0, sizeof(*pScan)); */ - pScan->pOrigWC = pWC; - pScan->pWC = pWC; - if( pIdx && iColumn>=0 ){ - pScan->idxaff = pIdx->pTable->aCol[iColumn].affinity; - for(j=0; pIdx->aiColumn[j]!=iColumn; j++){ - if( NEVER(j>=pIdx->nKeyCol) ) return 0; - } - pScan->zCollName = pIdx->azColl[j]; - }else{ - pScan->idxaff = 0; - pScan->zCollName = 0; - } - pScan->opMask = opMask; - pScan->k = 0; - pScan->aEquiv[0] = iCur; - pScan->aEquiv[1] = iColumn; - pScan->nEquiv = 2; - pScan->iEquiv = 2; - return whereScanNext(pScan); -} - -/* -** Search for a term in the WHERE clause that is of the form "X " -** where X is a reference to the iColumn of table iCur and is one of -** the WO_xx operator codes specified by the op parameter. -** Return a pointer to the term. Return 0 if not found. -** -** The term returned might by Y= if there is another constraint in -** the WHERE clause that specifies that X=Y. Any such constraints will be -** identified by the WO_EQUIV bit in the pTerm->eOperator field. The -** aEquiv[] array holds X and all its equivalents, with each SQL variable -** taking up two slots in aEquiv[]. The first slot is for the cursor number -** and the second is for the column number. There are 22 slots in aEquiv[] -** so that means we can look for X plus up to 10 other equivalent values. -** Hence a search for X will return if X=A1 and A1=A2 and A2=A3 -** and ... and A9=A10 and A10=. -** -** If there are multiple terms in the WHERE clause of the form "X " -** then try for the one with no dependencies on - in other words where -** is a constant expression of some kind. Only return entries of -** the form "X Y" where Y is a column in another table if no terms of -** the form "X " exist. If no terms with a constant RHS -** exist, try to return a term that does not use WO_EQUIV. -*/ -static WhereTerm *findTerm( - WhereClause *pWC, /* The WHERE clause to be searched */ - int iCur, /* Cursor number of LHS */ - int iColumn, /* Column number of LHS */ - Bitmask notReady, /* RHS must not overlap with this mask */ - u32 op, /* Mask of WO_xx values describing operator */ - Index *pIdx /* Must be compatible with this index, if not NULL */ -){ - WhereTerm *pResult = 0; - WhereTerm *p; - WhereScan scan; - - p = whereScanInit(&scan, pWC, iCur, iColumn, op, pIdx); - while( p ){ - if( (p->prereqRight & notReady)==0 ){ - if( p->prereqRight==0 && (p->eOperator&WO_EQ)!=0 ){ - return p; - } - if( pResult==0 ) pResult = p; - } - p = whereScanNext(&scan); - } - return pResult; -} - -/* Forward reference */ -static void exprAnalyze(SrcList*, WhereClause*, int); - -/* -** Call exprAnalyze on all terms in a WHERE clause. -*/ -static void exprAnalyzeAll( - SrcList *pTabList, /* the FROM clause */ - WhereClause *pWC /* the WHERE clause to be analyzed */ -){ - int i; - for(i=pWC->nTerm-1; i>=0; i--){ - exprAnalyze(pTabList, pWC, i); - } -} - -#ifndef SQLITE_OMIT_LIKE_OPTIMIZATION -/* -** Check to see if the given expression is a LIKE or GLOB operator that -** can be optimized using inequality constraints. Return TRUE if it is -** so and false if not. -** -** In order for the operator to be optimizible, the RHS must be a string -** literal that does not begin with a wildcard. -*/ -static int isLikeOrGlob( - Parse *pParse, /* Parsing and code generating context */ - Expr *pExpr, /* Test this expression */ - Expr **ppPrefix, /* Pointer to TK_STRING expression with pattern prefix */ - int *pisComplete, /* True if the only wildcard is % in the last character */ - int *pnoCase /* True if uppercase is equivalent to lowercase */ -){ - const char *z = 0; /* String on RHS of LIKE operator */ - Expr *pRight, *pLeft; /* Right and left size of LIKE operator */ - ExprList *pList; /* List of operands to the LIKE operator */ - int c; /* One character in z[] */ - int cnt; /* Number of non-wildcard prefix characters */ - char wc[3]; /* Wildcard characters */ - sqlite3 *db = pParse->db; /* Database connection */ - sqlite3_value *pVal = 0; - int op; /* Opcode of pRight */ - - if( !sqlite3IsLikeFunction(db, pExpr, pnoCase, wc) ){ - return 0; - } -#ifdef SQLITE_EBCDIC - if( *pnoCase ) return 0; -#endif - pList = pExpr->x.pList; - pLeft = pList->a[1].pExpr; - if( pLeft->op!=TK_COLUMN - || sqlite3ExprAffinity(pLeft)!=SQLITE_AFF_TEXT - || IsVirtual(pLeft->pTab) - ){ - /* IMP: R-02065-49465 The left-hand side of the LIKE or GLOB operator must - ** be the name of an indexed column with TEXT affinity. */ - return 0; - } - assert( pLeft->iColumn!=(-1) ); /* Because IPK never has AFF_TEXT */ - - pRight = sqlite3ExprSkipCollate(pList->a[0].pExpr); - op = pRight->op; - if( op==TK_VARIABLE ){ - Vdbe *pReprepare = pParse->pReprepare; - int iCol = pRight->iColumn; - pVal = sqlite3VdbeGetBoundValue(pReprepare, iCol, SQLITE_AFF_NONE); - if( pVal && sqlite3_value_type(pVal)==SQLITE_TEXT ){ - z = (char *)sqlite3_value_text(pVal); - } - sqlite3VdbeSetVarmask(pParse->pVdbe, iCol); - assert( pRight->op==TK_VARIABLE || pRight->op==TK_REGISTER ); - }else if( op==TK_STRING ){ - z = pRight->u.zToken; - } - if( z ){ - cnt = 0; - while( (c=z[cnt])!=0 && c!=wc[0] && c!=wc[1] && c!=wc[2] ){ - cnt++; - } - if( cnt!=0 && 255!=(u8)z[cnt-1] ){ - Expr *pPrefix; - *pisComplete = c==wc[0] && z[cnt+1]==0; - pPrefix = sqlite3Expr(db, TK_STRING, z); - if( pPrefix ) pPrefix->u.zToken[cnt] = 0; - *ppPrefix = pPrefix; - if( op==TK_VARIABLE ){ - Vdbe *v = pParse->pVdbe; - sqlite3VdbeSetVarmask(v, pRight->iColumn); - if( *pisComplete && pRight->u.zToken[1] ){ - /* If the rhs of the LIKE expression is a variable, and the current - ** value of the variable means there is no need to invoke the LIKE - ** function, then no OP_Variable will be added to the program. - ** This causes problems for the sqlite3_bind_parameter_name() - ** API. To workaround them, add a dummy OP_Variable here. - */ - int r1 = sqlite3GetTempReg(pParse); - sqlite3ExprCodeTarget(pParse, pRight, r1); - sqlite3VdbeChangeP3(v, sqlite3VdbeCurrentAddr(v)-1, 0); - sqlite3ReleaseTempReg(pParse, r1); - } - } - }else{ - z = 0; - } - } - - sqlite3ValueFree(pVal); - return (z!=0); -} -#endif /* SQLITE_OMIT_LIKE_OPTIMIZATION */ - - -#ifndef SQLITE_OMIT_VIRTUALTABLE -/* -** Check to see if the given expression is of the form -** -** column MATCH expr -** -** If it is then return TRUE. If not, return FALSE. -*/ -static int isMatchOfColumn( - Expr *pExpr /* Test this expression */ -){ - ExprList *pList; - - if( pExpr->op!=TK_FUNCTION ){ - return 0; - } - if( sqlite3StrICmp(pExpr->u.zToken,"match")!=0 ){ - return 0; - } - pList = pExpr->x.pList; - if( pList->nExpr!=2 ){ - return 0; - } - if( pList->a[1].pExpr->op != TK_COLUMN ){ - return 0; - } - return 1; -} -#endif /* SQLITE_OMIT_VIRTUALTABLE */ - -/* -** If the pBase expression originated in the ON or USING clause of -** a join, then transfer the appropriate markings over to derived. -*/ -static void transferJoinMarkings(Expr *pDerived, Expr *pBase){ - if( pDerived ){ - pDerived->flags |= pBase->flags & EP_FromJoin; - pDerived->iRightJoinTable = pBase->iRightJoinTable; - } -} - -#if !defined(SQLITE_OMIT_OR_OPTIMIZATION) && !defined(SQLITE_OMIT_SUBQUERY) -/* -** Analyze a term that consists of two or more OR-connected -** subterms. So in: -** -** ... WHERE (a=5) AND (b=7 OR c=9 OR d=13) AND (d=13) -** ^^^^^^^^^^^^^^^^^^^^ -** -** This routine analyzes terms such as the middle term in the above example. -** A WhereOrTerm object is computed and attached to the term under -** analysis, regardless of the outcome of the analysis. Hence: -** -** WhereTerm.wtFlags |= TERM_ORINFO -** WhereTerm.u.pOrInfo = a dynamically allocated WhereOrTerm object -** -** The term being analyzed must have two or more of OR-connected subterms. -** A single subterm might be a set of AND-connected sub-subterms. -** Examples of terms under analysis: -** -** (A) t1.x=t2.y OR t1.x=t2.z OR t1.y=15 OR t1.z=t3.a+5 -** (B) x=expr1 OR expr2=x OR x=expr3 -** (C) t1.x=t2.y OR (t1.x=t2.z AND t1.y=15) -** (D) x=expr1 OR (y>11 AND y<22 AND z LIKE '*hello*') -** (E) (p.a=1 AND q.b=2 AND r.c=3) OR (p.x=4 AND q.y=5 AND r.z=6) -** -** CASE 1: -** -** If all subterms are of the form T.C=expr for some single column of C and -** a single table T (as shown in example B above) then create a new virtual -** term that is an equivalent IN expression. In other words, if the term -** being analyzed is: -** -** x = expr1 OR expr2 = x OR x = expr3 -** -** then create a new virtual term like this: -** -** x IN (expr1,expr2,expr3) -** -** CASE 2: -** -** If all subterms are indexable by a single table T, then set -** -** WhereTerm.eOperator = WO_OR -** WhereTerm.u.pOrInfo->indexable |= the cursor number for table T -** -** A subterm is "indexable" if it is of the form -** "T.C " where C is any column of table T and -** is one of "=", "<", "<=", ">", ">=", "IS NULL", or "IN". -** A subterm is also indexable if it is an AND of two or more -** subsubterms at least one of which is indexable. Indexable AND -** subterms have their eOperator set to WO_AND and they have -** u.pAndInfo set to a dynamically allocated WhereAndTerm object. -** -** From another point of view, "indexable" means that the subterm could -** potentially be used with an index if an appropriate index exists. -** This analysis does not consider whether or not the index exists; that -** is decided elsewhere. This analysis only looks at whether subterms -** appropriate for indexing exist. -** -** All examples A through E above satisfy case 2. But if a term -** also statisfies case 1 (such as B) we know that the optimizer will -** always prefer case 1, so in that case we pretend that case 2 is not -** satisfied. -** -** It might be the case that multiple tables are indexable. For example, -** (E) above is indexable on tables P, Q, and R. -** -** Terms that satisfy case 2 are candidates for lookup by using -** separate indices to find rowids for each subterm and composing -** the union of all rowids using a RowSet object. This is similar -** to "bitmap indices" in other database engines. -** -** OTHERWISE: -** -** If neither case 1 nor case 2 apply, then leave the eOperator set to -** zero. This term is not useful for search. -*/ -static void exprAnalyzeOrTerm( - SrcList *pSrc, /* the FROM clause */ - WhereClause *pWC, /* the complete WHERE clause */ - int idxTerm /* Index of the OR-term to be analyzed */ -){ - WhereInfo *pWInfo = pWC->pWInfo; /* WHERE clause processing context */ - Parse *pParse = pWInfo->pParse; /* Parser context */ - sqlite3 *db = pParse->db; /* Database connection */ - WhereTerm *pTerm = &pWC->a[idxTerm]; /* The term to be analyzed */ - Expr *pExpr = pTerm->pExpr; /* The expression of the term */ - int i; /* Loop counters */ - WhereClause *pOrWc; /* Breakup of pTerm into subterms */ - WhereTerm *pOrTerm; /* A Sub-term within the pOrWc */ - WhereOrInfo *pOrInfo; /* Additional information associated with pTerm */ - Bitmask chngToIN; /* Tables that might satisfy case 1 */ - Bitmask indexable; /* Tables that are indexable, satisfying case 2 */ - - /* - ** Break the OR clause into its separate subterms. The subterms are - ** stored in a WhereClause structure containing within the WhereOrInfo - ** object that is attached to the original OR clause term. - */ - assert( (pTerm->wtFlags & (TERM_DYNAMIC|TERM_ORINFO|TERM_ANDINFO))==0 ); - assert( pExpr->op==TK_OR ); - pTerm->u.pOrInfo = pOrInfo = sqlite3DbMallocZero(db, sizeof(*pOrInfo)); - if( pOrInfo==0 ) return; - pTerm->wtFlags |= TERM_ORINFO; - pOrWc = &pOrInfo->wc; - whereClauseInit(pOrWc, pWInfo); - whereSplit(pOrWc, pExpr, TK_OR); - exprAnalyzeAll(pSrc, pOrWc); - if( db->mallocFailed ) return; - assert( pOrWc->nTerm>=2 ); - - /* - ** Compute the set of tables that might satisfy cases 1 or 2. - */ - indexable = ~(Bitmask)0; - chngToIN = ~(Bitmask)0; - for(i=pOrWc->nTerm-1, pOrTerm=pOrWc->a; i>=0 && indexable; i--, pOrTerm++){ - if( (pOrTerm->eOperator & WO_SINGLE)==0 ){ - WhereAndInfo *pAndInfo; - assert( (pOrTerm->wtFlags & (TERM_ANDINFO|TERM_ORINFO))==0 ); - chngToIN = 0; - pAndInfo = sqlite3DbMallocRaw(db, sizeof(*pAndInfo)); - if( pAndInfo ){ - WhereClause *pAndWC; - WhereTerm *pAndTerm; - int j; - Bitmask b = 0; - pOrTerm->u.pAndInfo = pAndInfo; - pOrTerm->wtFlags |= TERM_ANDINFO; - pOrTerm->eOperator = WO_AND; - pAndWC = &pAndInfo->wc; - whereClauseInit(pAndWC, pWC->pWInfo); - whereSplit(pAndWC, pOrTerm->pExpr, TK_AND); - exprAnalyzeAll(pSrc, pAndWC); - pAndWC->pOuter = pWC; - testcase( db->mallocFailed ); - if( !db->mallocFailed ){ - for(j=0, pAndTerm=pAndWC->a; jnTerm; j++, pAndTerm++){ - assert( pAndTerm->pExpr ); - if( allowedOp(pAndTerm->pExpr->op) ){ - b |= getMask(&pWInfo->sMaskSet, pAndTerm->leftCursor); - } - } - } - indexable &= b; - } - }else if( pOrTerm->wtFlags & TERM_COPIED ){ - /* Skip this term for now. We revisit it when we process the - ** corresponding TERM_VIRTUAL term */ - }else{ - Bitmask b; - b = getMask(&pWInfo->sMaskSet, pOrTerm->leftCursor); - if( pOrTerm->wtFlags & TERM_VIRTUAL ){ - WhereTerm *pOther = &pOrWc->a[pOrTerm->iParent]; - b |= getMask(&pWInfo->sMaskSet, pOther->leftCursor); - } - indexable &= b; - if( (pOrTerm->eOperator & WO_EQ)==0 ){ - chngToIN = 0; - }else{ - chngToIN &= b; - } - } - } - - /* - ** Record the set of tables that satisfy case 2. The set might be - ** empty. - */ - pOrInfo->indexable = indexable; - pTerm->eOperator = indexable==0 ? 0 : WO_OR; - - /* - ** chngToIN holds a set of tables that *might* satisfy case 1. But - ** we have to do some additional checking to see if case 1 really - ** is satisfied. - ** - ** chngToIN will hold either 0, 1, or 2 bits. The 0-bit case means - ** that there is no possibility of transforming the OR clause into an - ** IN operator because one or more terms in the OR clause contain - ** something other than == on a column in the single table. The 1-bit - ** case means that every term of the OR clause is of the form - ** "table.column=expr" for some single table. The one bit that is set - ** will correspond to the common table. We still need to check to make - ** sure the same column is used on all terms. The 2-bit case is when - ** the all terms are of the form "table1.column=table2.column". It - ** might be possible to form an IN operator with either table1.column - ** or table2.column as the LHS if either is common to every term of - ** the OR clause. - ** - ** Note that terms of the form "table.column1=table.column2" (the - ** same table on both sizes of the ==) cannot be optimized. - */ - if( chngToIN ){ - int okToChngToIN = 0; /* True if the conversion to IN is valid */ - int iColumn = -1; /* Column index on lhs of IN operator */ - int iCursor = -1; /* Table cursor common to all terms */ - int j = 0; /* Loop counter */ - - /* Search for a table and column that appears on one side or the - ** other of the == operator in every subterm. That table and column - ** will be recorded in iCursor and iColumn. There might not be any - ** such table and column. Set okToChngToIN if an appropriate table - ** and column is found but leave okToChngToIN false if not found. - */ - for(j=0; j<2 && !okToChngToIN; j++){ - pOrTerm = pOrWc->a; - for(i=pOrWc->nTerm-1; i>=0; i--, pOrTerm++){ - assert( pOrTerm->eOperator & WO_EQ ); - pOrTerm->wtFlags &= ~TERM_OR_OK; - if( pOrTerm->leftCursor==iCursor ){ - /* This is the 2-bit case and we are on the second iteration and - ** current term is from the first iteration. So skip this term. */ - assert( j==1 ); - continue; - } - if( (chngToIN & getMask(&pWInfo->sMaskSet, pOrTerm->leftCursor))==0 ){ - /* This term must be of the form t1.a==t2.b where t2 is in the - ** chngToIN set but t1 is not. This term will be either preceeded - ** or follwed by an inverted copy (t2.b==t1.a). Skip this term - ** and use its inversion. */ - testcase( pOrTerm->wtFlags & TERM_COPIED ); - testcase( pOrTerm->wtFlags & TERM_VIRTUAL ); - assert( pOrTerm->wtFlags & (TERM_COPIED|TERM_VIRTUAL) ); - continue; - } - iColumn = pOrTerm->u.leftColumn; - iCursor = pOrTerm->leftCursor; - break; - } - if( i<0 ){ - /* No candidate table+column was found. This can only occur - ** on the second iteration */ - assert( j==1 ); - assert( IsPowerOfTwo(chngToIN) ); - assert( chngToIN==getMask(&pWInfo->sMaskSet, iCursor) ); - break; - } - testcase( j==1 ); - - /* We have found a candidate table and column. Check to see if that - ** table and column is common to every term in the OR clause */ - okToChngToIN = 1; - for(; i>=0 && okToChngToIN; i--, pOrTerm++){ - assert( pOrTerm->eOperator & WO_EQ ); - if( pOrTerm->leftCursor!=iCursor ){ - pOrTerm->wtFlags &= ~TERM_OR_OK; - }else if( pOrTerm->u.leftColumn!=iColumn ){ - okToChngToIN = 0; - }else{ - int affLeft, affRight; - /* If the right-hand side is also a column, then the affinities - ** of both right and left sides must be such that no type - ** conversions are required on the right. (Ticket #2249) - */ - affRight = sqlite3ExprAffinity(pOrTerm->pExpr->pRight); - affLeft = sqlite3ExprAffinity(pOrTerm->pExpr->pLeft); - if( affRight!=0 && affRight!=affLeft ){ - okToChngToIN = 0; - }else{ - pOrTerm->wtFlags |= TERM_OR_OK; - } - } - } - } - - /* At this point, okToChngToIN is true if original pTerm satisfies - ** case 1. In that case, construct a new virtual term that is - ** pTerm converted into an IN operator. - */ - if( okToChngToIN ){ - Expr *pDup; /* A transient duplicate expression */ - ExprList *pList = 0; /* The RHS of the IN operator */ - Expr *pLeft = 0; /* The LHS of the IN operator */ - Expr *pNew; /* The complete IN operator */ - - for(i=pOrWc->nTerm-1, pOrTerm=pOrWc->a; i>=0; i--, pOrTerm++){ - if( (pOrTerm->wtFlags & TERM_OR_OK)==0 ) continue; - assert( pOrTerm->eOperator & WO_EQ ); - assert( pOrTerm->leftCursor==iCursor ); - assert( pOrTerm->u.leftColumn==iColumn ); - pDup = sqlite3ExprDup(db, pOrTerm->pExpr->pRight, 0); - pList = sqlite3ExprListAppend(pWInfo->pParse, pList, pDup); - pLeft = pOrTerm->pExpr->pLeft; - } - assert( pLeft!=0 ); - pDup = sqlite3ExprDup(db, pLeft, 0); - pNew = sqlite3PExpr(pParse, TK_IN, pDup, 0, 0); - if( pNew ){ - int idxNew; - transferJoinMarkings(pNew, pExpr); - assert( !ExprHasProperty(pNew, EP_xIsSelect) ); - pNew->x.pList = pList; - idxNew = whereClauseInsert(pWC, pNew, TERM_VIRTUAL|TERM_DYNAMIC); - testcase( idxNew==0 ); - exprAnalyze(pSrc, pWC, idxNew); - pTerm = &pWC->a[idxTerm]; - pWC->a[idxNew].iParent = idxTerm; - pTerm->nChild = 1; - }else{ - sqlite3ExprListDelete(db, pList); - } - pTerm->eOperator = WO_NOOP; /* case 1 trumps case 2 */ - } - } -} -#endif /* !SQLITE_OMIT_OR_OPTIMIZATION && !SQLITE_OMIT_SUBQUERY */ - -/* -** The input to this routine is an WhereTerm structure with only the -** "pExpr" field filled in. The job of this routine is to analyze the -** subexpression and populate all the other fields of the WhereTerm -** structure. -** -** If the expression is of the form " X" it gets commuted -** to the standard form of "X ". -** -** If the expression is of the form "X Y" where both X and Y are -** columns, then the original expression is unchanged and a new virtual -** term of the form "Y X" is added to the WHERE clause and -** analyzed separately. The original term is marked with TERM_COPIED -** and the new term is marked with TERM_DYNAMIC (because it's pExpr -** needs to be freed with the WhereClause) and TERM_VIRTUAL (because it -** is a commuted copy of a prior term.) The original term has nChild=1 -** and the copy has idxParent set to the index of the original term. -*/ -static void exprAnalyze( - SrcList *pSrc, /* the FROM clause */ - WhereClause *pWC, /* the WHERE clause */ - int idxTerm /* Index of the term to be analyzed */ -){ - WhereInfo *pWInfo = pWC->pWInfo; /* WHERE clause processing context */ - WhereTerm *pTerm; /* The term to be analyzed */ - WhereMaskSet *pMaskSet; /* Set of table index masks */ - Expr *pExpr; /* The expression to be analyzed */ - Bitmask prereqLeft; /* Prerequesites of the pExpr->pLeft */ - Bitmask prereqAll; /* Prerequesites of pExpr */ - Bitmask extraRight = 0; /* Extra dependencies on LEFT JOIN */ - Expr *pStr1 = 0; /* RHS of LIKE/GLOB operator */ - int isComplete = 0; /* RHS of LIKE/GLOB ends with wildcard */ - int noCase = 0; /* LIKE/GLOB distinguishes case */ - int op; /* Top-level operator. pExpr->op */ - Parse *pParse = pWInfo->pParse; /* Parsing context */ - sqlite3 *db = pParse->db; /* Database connection */ - - if( db->mallocFailed ){ - return; - } - pTerm = &pWC->a[idxTerm]; - pMaskSet = &pWInfo->sMaskSet; - pExpr = pTerm->pExpr; - assert( pExpr->op!=TK_AS && pExpr->op!=TK_COLLATE ); - prereqLeft = exprTableUsage(pMaskSet, pExpr->pLeft); - op = pExpr->op; - if( op==TK_IN ){ - assert( pExpr->pRight==0 ); - if( ExprHasProperty(pExpr, EP_xIsSelect) ){ - pTerm->prereqRight = exprSelectTableUsage(pMaskSet, pExpr->x.pSelect); - }else{ - pTerm->prereqRight = exprListTableUsage(pMaskSet, pExpr->x.pList); - } - }else if( op==TK_ISNULL ){ - pTerm->prereqRight = 0; - }else{ - pTerm->prereqRight = exprTableUsage(pMaskSet, pExpr->pRight); - } - prereqAll = exprTableUsage(pMaskSet, pExpr); - if( ExprHasProperty(pExpr, EP_FromJoin) ){ - Bitmask x = getMask(pMaskSet, pExpr->iRightJoinTable); - prereqAll |= x; - extraRight = x-1; /* ON clause terms may not be used with an index - ** on left table of a LEFT JOIN. Ticket #3015 */ - } - pTerm->prereqAll = prereqAll; - pTerm->leftCursor = -1; - pTerm->iParent = -1; - pTerm->eOperator = 0; - if( allowedOp(op) ){ - Expr *pLeft = sqlite3ExprSkipCollate(pExpr->pLeft); - Expr *pRight = sqlite3ExprSkipCollate(pExpr->pRight); - u16 opMask = (pTerm->prereqRight & prereqLeft)==0 ? WO_ALL : WO_EQUIV; - if( pLeft->op==TK_COLUMN ){ - pTerm->leftCursor = pLeft->iTable; - pTerm->u.leftColumn = pLeft->iColumn; - pTerm->eOperator = operatorMask(op) & opMask; - } - if( pRight && pRight->op==TK_COLUMN ){ - WhereTerm *pNew; - Expr *pDup; - u16 eExtraOp = 0; /* Extra bits for pNew->eOperator */ - if( pTerm->leftCursor>=0 ){ - int idxNew; - pDup = sqlite3ExprDup(db, pExpr, 0); - if( db->mallocFailed ){ - sqlite3ExprDelete(db, pDup); - return; - } - idxNew = whereClauseInsert(pWC, pDup, TERM_VIRTUAL|TERM_DYNAMIC); - if( idxNew==0 ) return; - pNew = &pWC->a[idxNew]; - pNew->iParent = idxTerm; - pTerm = &pWC->a[idxTerm]; - pTerm->nChild = 1; - pTerm->wtFlags |= TERM_COPIED; - if( pExpr->op==TK_EQ - && !ExprHasProperty(pExpr, EP_FromJoin) - && OptimizationEnabled(db, SQLITE_Transitive) - ){ - pTerm->eOperator |= WO_EQUIV; - eExtraOp = WO_EQUIV; - } - }else{ - pDup = pExpr; - pNew = pTerm; - } - exprCommute(pParse, pDup); - pLeft = sqlite3ExprSkipCollate(pDup->pLeft); - pNew->leftCursor = pLeft->iTable; - pNew->u.leftColumn = pLeft->iColumn; - testcase( (prereqLeft | extraRight) != prereqLeft ); - pNew->prereqRight = prereqLeft | extraRight; - pNew->prereqAll = prereqAll; - pNew->eOperator = (operatorMask(pDup->op) + eExtraOp) & opMask; - } - } - -#ifndef SQLITE_OMIT_BETWEEN_OPTIMIZATION - /* If a term is the BETWEEN operator, create two new virtual terms - ** that define the range that the BETWEEN implements. For example: - ** - ** a BETWEEN b AND c - ** - ** is converted into: - ** - ** (a BETWEEN b AND c) AND (a>=b) AND (a<=c) - ** - ** The two new terms are added onto the end of the WhereClause object. - ** The new terms are "dynamic" and are children of the original BETWEEN - ** term. That means that if the BETWEEN term is coded, the children are - ** skipped. Or, if the children are satisfied by an index, the original - ** BETWEEN term is skipped. - */ - else if( pExpr->op==TK_BETWEEN && pWC->op==TK_AND ){ - ExprList *pList = pExpr->x.pList; - int i; - static const u8 ops[] = {TK_GE, TK_LE}; - assert( pList!=0 ); - assert( pList->nExpr==2 ); - for(i=0; i<2; i++){ - Expr *pNewExpr; - int idxNew; - pNewExpr = sqlite3PExpr(pParse, ops[i], - sqlite3ExprDup(db, pExpr->pLeft, 0), - sqlite3ExprDup(db, pList->a[i].pExpr, 0), 0); - transferJoinMarkings(pNewExpr, pExpr); - idxNew = whereClauseInsert(pWC, pNewExpr, TERM_VIRTUAL|TERM_DYNAMIC); - testcase( idxNew==0 ); - exprAnalyze(pSrc, pWC, idxNew); - pTerm = &pWC->a[idxTerm]; - pWC->a[idxNew].iParent = idxTerm; - } - pTerm->nChild = 2; - } -#endif /* SQLITE_OMIT_BETWEEN_OPTIMIZATION */ - -#if !defined(SQLITE_OMIT_OR_OPTIMIZATION) && !defined(SQLITE_OMIT_SUBQUERY) - /* Analyze a term that is composed of two or more subterms connected by - ** an OR operator. - */ - else if( pExpr->op==TK_OR ){ - assert( pWC->op==TK_AND ); - exprAnalyzeOrTerm(pSrc, pWC, idxTerm); - pTerm = &pWC->a[idxTerm]; - } -#endif /* SQLITE_OMIT_OR_OPTIMIZATION */ - -#ifndef SQLITE_OMIT_LIKE_OPTIMIZATION - /* Add constraints to reduce the search space on a LIKE or GLOB - ** operator. - ** - ** A like pattern of the form "x LIKE 'abc%'" is changed into constraints - ** - ** x>='abc' AND x<'abd' AND x LIKE 'abc%' - ** - ** The last character of the prefix "abc" is incremented to form the - ** termination condition "abd". - */ - if( pWC->op==TK_AND - && isLikeOrGlob(pParse, pExpr, &pStr1, &isComplete, &noCase) - ){ - Expr *pLeft; /* LHS of LIKE/GLOB operator */ - Expr *pStr2; /* Copy of pStr1 - RHS of LIKE/GLOB operator */ - Expr *pNewExpr1; - Expr *pNewExpr2; - int idxNew1; - int idxNew2; - Token sCollSeqName; /* Name of collating sequence */ - - pLeft = pExpr->x.pList->a[1].pExpr; - pStr2 = sqlite3ExprDup(db, pStr1, 0); - if( !db->mallocFailed ){ - u8 c, *pC; /* Last character before the first wildcard */ - pC = (u8*)&pStr2->u.zToken[sqlite3Strlen30(pStr2->u.zToken)-1]; - c = *pC; - if( noCase ){ - /* The point is to increment the last character before the first - ** wildcard. But if we increment '@', that will push it into the - ** alphabetic range where case conversions will mess up the - ** inequality. To avoid this, make sure to also run the full - ** LIKE on all candidate expressions by clearing the isComplete flag - */ - if( c=='A'-1 ) isComplete = 0; - c = sqlite3UpperToLower[c]; - } - *pC = c + 1; - } - sCollSeqName.z = noCase ? "NOCASE" : "BINARY"; - sCollSeqName.n = 6; - pNewExpr1 = sqlite3ExprDup(db, pLeft, 0); - pNewExpr1 = sqlite3PExpr(pParse, TK_GE, - sqlite3ExprAddCollateToken(pParse,pNewExpr1,&sCollSeqName), - pStr1, 0); - transferJoinMarkings(pNewExpr1, pExpr); - idxNew1 = whereClauseInsert(pWC, pNewExpr1, TERM_VIRTUAL|TERM_DYNAMIC); - testcase( idxNew1==0 ); - exprAnalyze(pSrc, pWC, idxNew1); - pNewExpr2 = sqlite3ExprDup(db, pLeft, 0); - pNewExpr2 = sqlite3PExpr(pParse, TK_LT, - sqlite3ExprAddCollateToken(pParse,pNewExpr2,&sCollSeqName), - pStr2, 0); - transferJoinMarkings(pNewExpr2, pExpr); - idxNew2 = whereClauseInsert(pWC, pNewExpr2, TERM_VIRTUAL|TERM_DYNAMIC); - testcase( idxNew2==0 ); - exprAnalyze(pSrc, pWC, idxNew2); - pTerm = &pWC->a[idxTerm]; - if( isComplete ){ - pWC->a[idxNew1].iParent = idxTerm; - pWC->a[idxNew2].iParent = idxTerm; - pTerm->nChild = 2; - } - } -#endif /* SQLITE_OMIT_LIKE_OPTIMIZATION */ - -#ifndef SQLITE_OMIT_VIRTUALTABLE - /* Add a WO_MATCH auxiliary term to the constraint set if the - ** current expression is of the form: column MATCH expr. - ** This information is used by the xBestIndex methods of - ** virtual tables. The native query optimizer does not attempt - ** to do anything with MATCH functions. - */ - if( isMatchOfColumn(pExpr) ){ - int idxNew; - Expr *pRight, *pLeft; - WhereTerm *pNewTerm; - Bitmask prereqColumn, prereqExpr; - - pRight = pExpr->x.pList->a[0].pExpr; - pLeft = pExpr->x.pList->a[1].pExpr; - prereqExpr = exprTableUsage(pMaskSet, pRight); - prereqColumn = exprTableUsage(pMaskSet, pLeft); - if( (prereqExpr & prereqColumn)==0 ){ - Expr *pNewExpr; - pNewExpr = sqlite3PExpr(pParse, TK_MATCH, - 0, sqlite3ExprDup(db, pRight, 0), 0); - idxNew = whereClauseInsert(pWC, pNewExpr, TERM_VIRTUAL|TERM_DYNAMIC); - testcase( idxNew==0 ); - pNewTerm = &pWC->a[idxNew]; - pNewTerm->prereqRight = prereqExpr; - pNewTerm->leftCursor = pLeft->iTable; - pNewTerm->u.leftColumn = pLeft->iColumn; - pNewTerm->eOperator = WO_MATCH; - pNewTerm->iParent = idxTerm; - pTerm = &pWC->a[idxTerm]; - pTerm->nChild = 1; - pTerm->wtFlags |= TERM_COPIED; - pNewTerm->prereqAll = pTerm->prereqAll; - } - } -#endif /* SQLITE_OMIT_VIRTUALTABLE */ - -#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 - /* When sqlite_stat3 histogram data is available an operator of the - ** form "x IS NOT NULL" can sometimes be evaluated more efficiently - ** as "x>NULL" if x is not an INTEGER PRIMARY KEY. So construct a - ** virtual term of that form. - ** - ** Note that the virtual term must be tagged with TERM_VNULL. This - ** TERM_VNULL tag will suppress the not-null check at the beginning - ** of the loop. Without the TERM_VNULL flag, the not-null check at - ** the start of the loop will prevent any results from being returned. - */ - if( pExpr->op==TK_NOTNULL - && pExpr->pLeft->op==TK_COLUMN - && pExpr->pLeft->iColumn>=0 - && OptimizationEnabled(db, SQLITE_Stat3) - ){ - Expr *pNewExpr; - Expr *pLeft = pExpr->pLeft; - int idxNew; - WhereTerm *pNewTerm; - - pNewExpr = sqlite3PExpr(pParse, TK_GT, - sqlite3ExprDup(db, pLeft, 0), - sqlite3PExpr(pParse, TK_NULL, 0, 0, 0), 0); - - idxNew = whereClauseInsert(pWC, pNewExpr, - TERM_VIRTUAL|TERM_DYNAMIC|TERM_VNULL); - if( idxNew ){ - pNewTerm = &pWC->a[idxNew]; - pNewTerm->prereqRight = 0; - pNewTerm->leftCursor = pLeft->iTable; - pNewTerm->u.leftColumn = pLeft->iColumn; - pNewTerm->eOperator = WO_GT; - pNewTerm->iParent = idxTerm; - pTerm = &pWC->a[idxTerm]; - pTerm->nChild = 1; - pTerm->wtFlags |= TERM_COPIED; - pNewTerm->prereqAll = pTerm->prereqAll; - } - } -#endif /* SQLITE_ENABLE_STAT3_OR_STAT4 */ - - /* Prevent ON clause terms of a LEFT JOIN from being used to drive - ** an index for tables to the left of the join. - */ - pTerm->prereqRight |= extraRight; -} - -/* -** This function searches pList for a entry that matches the iCol-th column -** of index pIdx. -** -** If such an expression is found, its index in pList->a[] is returned. If -** no expression is found, -1 is returned. -*/ -static int findIndexCol( - Parse *pParse, /* Parse context */ - ExprList *pList, /* Expression list to search */ - int iBase, /* Cursor for table associated with pIdx */ - Index *pIdx, /* Index to match column of */ - int iCol /* Column of index to match */ -){ - int i; - const char *zColl = pIdx->azColl[iCol]; - - for(i=0; inExpr; i++){ - Expr *p = sqlite3ExprSkipCollate(pList->a[i].pExpr); - if( p->op==TK_COLUMN - && p->iColumn==pIdx->aiColumn[iCol] - && p->iTable==iBase - ){ - CollSeq *pColl = sqlite3ExprCollSeq(pParse, pList->a[i].pExpr); - if( ALWAYS(pColl) && 0==sqlite3StrICmp(pColl->zName, zColl) ){ - return i; - } - } - } - - return -1; -} - -/* -** Return true if the DISTINCT expression-list passed as the third argument -** is redundant. -** -** A DISTINCT list is redundant if the database contains some subset of -** columns that are unique and non-null. -*/ -static int isDistinctRedundant( - Parse *pParse, /* Parsing context */ - SrcList *pTabList, /* The FROM clause */ - WhereClause *pWC, /* The WHERE clause */ - ExprList *pDistinct /* The result set that needs to be DISTINCT */ -){ - Table *pTab; - Index *pIdx; - int i; - int iBase; - - /* If there is more than one table or sub-select in the FROM clause of - ** this query, then it will not be possible to show that the DISTINCT - ** clause is redundant. */ - if( pTabList->nSrc!=1 ) return 0; - iBase = pTabList->a[0].iCursor; - pTab = pTabList->a[0].pTab; - - /* If any of the expressions is an IPK column on table iBase, then return - ** true. Note: The (p->iTable==iBase) part of this test may be false if the - ** current SELECT is a correlated sub-query. - */ - for(i=0; inExpr; i++){ - Expr *p = sqlite3ExprSkipCollate(pDistinct->a[i].pExpr); - if( p->op==TK_COLUMN && p->iTable==iBase && p->iColumn<0 ) return 1; - } - - /* Loop through all indices on the table, checking each to see if it makes - ** the DISTINCT qualifier redundant. It does so if: - ** - ** 1. The index is itself UNIQUE, and - ** - ** 2. All of the columns in the index are either part of the pDistinct - ** list, or else the WHERE clause contains a term of the form "col=X", - ** where X is a constant value. The collation sequences of the - ** comparison and select-list expressions must match those of the index. - ** - ** 3. All of those index columns for which the WHERE clause does not - ** contain a "col=X" term are subject to a NOT NULL constraint. - */ - for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ - if( pIdx->onError==OE_None ) continue; - for(i=0; inKeyCol; i++){ - i16 iCol = pIdx->aiColumn[i]; - if( 0==findTerm(pWC, iBase, iCol, ~(Bitmask)0, WO_EQ, pIdx) ){ - int iIdxCol = findIndexCol(pParse, pDistinct, iBase, pIdx, i); - if( iIdxCol<0 || pTab->aCol[iCol].notNull==0 ){ - break; - } - } - } - if( i==pIdx->nKeyCol ){ - /* This index implies that the DISTINCT qualifier is redundant. */ - return 1; - } - } - - return 0; -} - - -/* -** Estimate the logarithm of the input value to base 2. -*/ -static LogEst estLog(LogEst N){ - LogEst x = sqlite3LogEst(N); - return x>33 ? x - 33 : 0; -} - -/* -** Two routines for printing the content of an sqlite3_index_info -** structure. Used for testing and debugging only. If neither -** SQLITE_TEST or SQLITE_DEBUG are defined, then these routines -** are no-ops. -*/ -#if !defined(SQLITE_OMIT_VIRTUALTABLE) && defined(WHERETRACE_ENABLED) -static void TRACE_IDX_INPUTS(sqlite3_index_info *p){ - int i; - if( !sqlite3WhereTrace ) return; - for(i=0; inConstraint; i++){ - sqlite3DebugPrintf(" constraint[%d]: col=%d termid=%d op=%d usabled=%d\n", - i, - p->aConstraint[i].iColumn, - p->aConstraint[i].iTermOffset, - p->aConstraint[i].op, - p->aConstraint[i].usable); - } - for(i=0; inOrderBy; i++){ - sqlite3DebugPrintf(" orderby[%d]: col=%d desc=%d\n", - i, - p->aOrderBy[i].iColumn, - p->aOrderBy[i].desc); - } -} -static void TRACE_IDX_OUTPUTS(sqlite3_index_info *p){ - int i; - if( !sqlite3WhereTrace ) return; - for(i=0; inConstraint; i++){ - sqlite3DebugPrintf(" usage[%d]: argvIdx=%d omit=%d\n", - i, - p->aConstraintUsage[i].argvIndex, - p->aConstraintUsage[i].omit); - } - sqlite3DebugPrintf(" idxNum=%d\n", p->idxNum); - sqlite3DebugPrintf(" idxStr=%s\n", p->idxStr); - sqlite3DebugPrintf(" orderByConsumed=%d\n", p->orderByConsumed); - sqlite3DebugPrintf(" estimatedCost=%g\n", p->estimatedCost); - sqlite3DebugPrintf(" estimatedRows=%lld\n", p->estimatedRows); -} -#else -#define TRACE_IDX_INPUTS(A) -#define TRACE_IDX_OUTPUTS(A) -#endif - -#ifndef SQLITE_OMIT_AUTOMATIC_INDEX -/* -** Return TRUE if the WHERE clause term pTerm is of a form where it -** could be used with an index to access pSrc, assuming an appropriate -** index existed. -*/ -static int termCanDriveIndex( - WhereTerm *pTerm, /* WHERE clause term to check */ - struct SrcList_item *pSrc, /* Table we are trying to access */ - Bitmask notReady /* Tables in outer loops of the join */ -){ - char aff; - if( pTerm->leftCursor!=pSrc->iCursor ) return 0; - if( (pTerm->eOperator & WO_EQ)==0 ) return 0; - if( (pTerm->prereqRight & notReady)!=0 ) return 0; - if( pTerm->u.leftColumn<0 ) return 0; - aff = pSrc->pTab->aCol[pTerm->u.leftColumn].affinity; - if( !sqlite3IndexAffinityOk(pTerm->pExpr, aff) ) return 0; - return 1; -} -#endif - - -#ifndef SQLITE_OMIT_AUTOMATIC_INDEX -/* -** Generate code to construct the Index object for an automatic index -** and to set up the WhereLevel object pLevel so that the code generator -** makes use of the automatic index. -*/ -static void constructAutomaticIndex( - Parse *pParse, /* The parsing context */ - WhereClause *pWC, /* The WHERE clause */ - struct SrcList_item *pSrc, /* The FROM clause term to get the next index */ - Bitmask notReady, /* Mask of cursors that are not available */ - WhereLevel *pLevel /* Write new index here */ -){ - int nKeyCol; /* Number of columns in the constructed index */ - WhereTerm *pTerm; /* A single term of the WHERE clause */ - WhereTerm *pWCEnd; /* End of pWC->a[] */ - Index *pIdx; /* Object describing the transient index */ - Vdbe *v; /* Prepared statement under construction */ - int addrInit; /* Address of the initialization bypass jump */ - Table *pTable; /* The table being indexed */ - int addrTop; /* Top of the index fill loop */ - int regRecord; /* Register holding an index record */ - int n; /* Column counter */ - int i; /* Loop counter */ - int mxBitCol; /* Maximum column in pSrc->colUsed */ - CollSeq *pColl; /* Collating sequence to on a column */ - WhereLoop *pLoop; /* The Loop object */ - char *zNotUsed; /* Extra space on the end of pIdx */ - Bitmask idxCols; /* Bitmap of columns used for indexing */ - Bitmask extraCols; /* Bitmap of additional columns */ - u8 sentWarning = 0; /* True if a warnning has been issued */ - - /* Generate code to skip over the creation and initialization of the - ** transient index on 2nd and subsequent iterations of the loop. */ - v = pParse->pVdbe; - assert( v!=0 ); - addrInit = sqlite3CodeOnce(pParse); VdbeCoverage(v); - - /* Count the number of columns that will be added to the index - ** and used to match WHERE clause constraints */ - nKeyCol = 0; - pTable = pSrc->pTab; - pWCEnd = &pWC->a[pWC->nTerm]; - pLoop = pLevel->pWLoop; - idxCols = 0; - for(pTerm=pWC->a; pTermu.leftColumn; - Bitmask cMask = iCol>=BMS ? MASKBIT(BMS-1) : MASKBIT(iCol); - testcase( iCol==BMS ); - testcase( iCol==BMS-1 ); - if( !sentWarning ){ - sqlite3_log(SQLITE_WARNING_AUTOINDEX, - "automatic index on %s(%s)", pTable->zName, - pTable->aCol[iCol].zName); - sentWarning = 1; - } - if( (idxCols & cMask)==0 ){ - if( whereLoopResize(pParse->db, pLoop, nKeyCol+1) ) return; - pLoop->aLTerm[nKeyCol++] = pTerm; - idxCols |= cMask; - } - } - } - assert( nKeyCol>0 ); - pLoop->u.btree.nEq = pLoop->nLTerm = nKeyCol; - pLoop->wsFlags = WHERE_COLUMN_EQ | WHERE_IDX_ONLY | WHERE_INDEXED - | WHERE_AUTO_INDEX; - - /* Count the number of additional columns needed to create a - ** covering index. A "covering index" is an index that contains all - ** columns that are needed by the query. With a covering index, the - ** original table never needs to be accessed. Automatic indices must - ** be a covering index because the index will not be updated if the - ** original table changes and the index and table cannot both be used - ** if they go out of sync. - */ - extraCols = pSrc->colUsed & (~idxCols | MASKBIT(BMS-1)); - mxBitCol = (pTable->nCol >= BMS-1) ? BMS-1 : pTable->nCol; - testcase( pTable->nCol==BMS-1 ); - testcase( pTable->nCol==BMS-2 ); - for(i=0; icolUsed & MASKBIT(BMS-1) ){ - nKeyCol += pTable->nCol - BMS + 1; - } - pLoop->wsFlags |= WHERE_COLUMN_EQ | WHERE_IDX_ONLY; - - /* Construct the Index object to describe this index */ - pIdx = sqlite3AllocateIndexObject(pParse->db, nKeyCol+1, 0, &zNotUsed); - if( pIdx==0 ) return; - pLoop->u.btree.pIndex = pIdx; - pIdx->zName = "auto-index"; - pIdx->pTable = pTable; - n = 0; - idxCols = 0; - for(pTerm=pWC->a; pTermu.leftColumn; - Bitmask cMask = iCol>=BMS ? MASKBIT(BMS-1) : MASKBIT(iCol); - testcase( iCol==BMS-1 ); - testcase( iCol==BMS ); - if( (idxCols & cMask)==0 ){ - Expr *pX = pTerm->pExpr; - idxCols |= cMask; - pIdx->aiColumn[n] = pTerm->u.leftColumn; - pColl = sqlite3BinaryCompareCollSeq(pParse, pX->pLeft, pX->pRight); - pIdx->azColl[n] = ALWAYS(pColl) ? pColl->zName : "BINARY"; - n++; - } - } - } - assert( (u32)n==pLoop->u.btree.nEq ); - - /* Add additional columns needed to make the automatic index into - ** a covering index */ - for(i=0; iaiColumn[n] = i; - pIdx->azColl[n] = "BINARY"; - n++; - } - } - if( pSrc->colUsed & MASKBIT(BMS-1) ){ - for(i=BMS-1; inCol; i++){ - pIdx->aiColumn[n] = i; - pIdx->azColl[n] = "BINARY"; - n++; - } - } - assert( n==nKeyCol ); - pIdx->aiColumn[n] = -1; - pIdx->azColl[n] = "BINARY"; - - /* Create the automatic index */ - assert( pLevel->iIdxCur>=0 ); - pLevel->iIdxCur = pParse->nTab++; - sqlite3VdbeAddOp2(v, OP_OpenAutoindex, pLevel->iIdxCur, nKeyCol+1); - sqlite3VdbeSetP4KeyInfo(pParse, pIdx); - VdbeComment((v, "for %s", pTable->zName)); - - /* Fill the automatic index with content */ - addrTop = sqlite3VdbeAddOp1(v, OP_Rewind, pLevel->iTabCur); VdbeCoverage(v); - regRecord = sqlite3GetTempReg(pParse); - sqlite3GenerateIndexKey(pParse, pIdx, pLevel->iTabCur, regRecord, 0, 0, 0, 0); - sqlite3VdbeAddOp2(v, OP_IdxInsert, pLevel->iIdxCur, regRecord); - sqlite3VdbeChangeP5(v, OPFLAG_USESEEKRESULT); - sqlite3VdbeAddOp2(v, OP_Next, pLevel->iTabCur, addrTop+1); VdbeCoverage(v); - sqlite3VdbeChangeP5(v, SQLITE_STMTSTATUS_AUTOINDEX); - sqlite3VdbeJumpHere(v, addrTop); - sqlite3ReleaseTempReg(pParse, regRecord); - - /* Jump here when skipping the initialization */ - sqlite3VdbeJumpHere(v, addrInit); -} -#endif /* SQLITE_OMIT_AUTOMATIC_INDEX */ - -#ifndef SQLITE_OMIT_VIRTUALTABLE -/* -** Allocate and populate an sqlite3_index_info structure. It is the -** responsibility of the caller to eventually release the structure -** by passing the pointer returned by this function to sqlite3_free(). -*/ -static sqlite3_index_info *allocateIndexInfo( - Parse *pParse, - WhereClause *pWC, - struct SrcList_item *pSrc, - ExprList *pOrderBy -){ - int i, j; - int nTerm; - struct sqlite3_index_constraint *pIdxCons; - struct sqlite3_index_orderby *pIdxOrderBy; - struct sqlite3_index_constraint_usage *pUsage; - WhereTerm *pTerm; - int nOrderBy; - sqlite3_index_info *pIdxInfo; - - /* Count the number of possible WHERE clause constraints referring - ** to this virtual table */ - for(i=nTerm=0, pTerm=pWC->a; inTerm; i++, pTerm++){ - if( pTerm->leftCursor != pSrc->iCursor ) continue; - assert( IsPowerOfTwo(pTerm->eOperator & ~WO_EQUIV) ); - testcase( pTerm->eOperator & WO_IN ); - testcase( pTerm->eOperator & WO_ISNULL ); - testcase( pTerm->eOperator & WO_ALL ); - if( (pTerm->eOperator & ~(WO_ISNULL|WO_EQUIV))==0 ) continue; - if( pTerm->wtFlags & TERM_VNULL ) continue; - nTerm++; - } - - /* If the ORDER BY clause contains only columns in the current - ** virtual table then allocate space for the aOrderBy part of - ** the sqlite3_index_info structure. - */ - nOrderBy = 0; - if( pOrderBy ){ - int n = pOrderBy->nExpr; - for(i=0; ia[i].pExpr; - if( pExpr->op!=TK_COLUMN || pExpr->iTable!=pSrc->iCursor ) break; - } - if( i==n){ - nOrderBy = n; - } - } - - /* Allocate the sqlite3_index_info structure - */ - pIdxInfo = sqlite3DbMallocZero(pParse->db, sizeof(*pIdxInfo) - + (sizeof(*pIdxCons) + sizeof(*pUsage))*nTerm - + sizeof(*pIdxOrderBy)*nOrderBy ); - if( pIdxInfo==0 ){ - sqlite3ErrorMsg(pParse, "out of memory"); - return 0; - } - - /* Initialize the structure. The sqlite3_index_info structure contains - ** many fields that are declared "const" to prevent xBestIndex from - ** changing them. We have to do some funky casting in order to - ** initialize those fields. - */ - pIdxCons = (struct sqlite3_index_constraint*)&pIdxInfo[1]; - pIdxOrderBy = (struct sqlite3_index_orderby*)&pIdxCons[nTerm]; - pUsage = (struct sqlite3_index_constraint_usage*)&pIdxOrderBy[nOrderBy]; - *(int*)&pIdxInfo->nConstraint = nTerm; - *(int*)&pIdxInfo->nOrderBy = nOrderBy; - *(struct sqlite3_index_constraint**)&pIdxInfo->aConstraint = pIdxCons; - *(struct sqlite3_index_orderby**)&pIdxInfo->aOrderBy = pIdxOrderBy; - *(struct sqlite3_index_constraint_usage**)&pIdxInfo->aConstraintUsage = - pUsage; - - for(i=j=0, pTerm=pWC->a; inTerm; i++, pTerm++){ - u8 op; - if( pTerm->leftCursor != pSrc->iCursor ) continue; - assert( IsPowerOfTwo(pTerm->eOperator & ~WO_EQUIV) ); - testcase( pTerm->eOperator & WO_IN ); - testcase( pTerm->eOperator & WO_ISNULL ); - testcase( pTerm->eOperator & WO_ALL ); - if( (pTerm->eOperator & ~(WO_ISNULL|WO_EQUIV))==0 ) continue; - if( pTerm->wtFlags & TERM_VNULL ) continue; - pIdxCons[j].iColumn = pTerm->u.leftColumn; - pIdxCons[j].iTermOffset = i; - op = (u8)pTerm->eOperator & WO_ALL; - if( op==WO_IN ) op = WO_EQ; - pIdxCons[j].op = op; - /* The direct assignment in the previous line is possible only because - ** the WO_ and SQLITE_INDEX_CONSTRAINT_ codes are identical. The - ** following asserts verify this fact. */ - assert( WO_EQ==SQLITE_INDEX_CONSTRAINT_EQ ); - assert( WO_LT==SQLITE_INDEX_CONSTRAINT_LT ); - assert( WO_LE==SQLITE_INDEX_CONSTRAINT_LE ); - assert( WO_GT==SQLITE_INDEX_CONSTRAINT_GT ); - assert( WO_GE==SQLITE_INDEX_CONSTRAINT_GE ); - assert( WO_MATCH==SQLITE_INDEX_CONSTRAINT_MATCH ); - assert( pTerm->eOperator & (WO_IN|WO_EQ|WO_LT|WO_LE|WO_GT|WO_GE|WO_MATCH) ); - j++; - } - for(i=0; ia[i].pExpr; - pIdxOrderBy[i].iColumn = pExpr->iColumn; - pIdxOrderBy[i].desc = pOrderBy->a[i].sortOrder; - } - - return pIdxInfo; -} - -/* -** The table object reference passed as the second argument to this function -** must represent a virtual table. This function invokes the xBestIndex() -** method of the virtual table with the sqlite3_index_info object that -** comes in as the 3rd argument to this function. -** -** If an error occurs, pParse is populated with an error message and a -** non-zero value is returned. Otherwise, 0 is returned and the output -** part of the sqlite3_index_info structure is left populated. -** -** Whether or not an error is returned, it is the responsibility of the -** caller to eventually free p->idxStr if p->needToFreeIdxStr indicates -** that this is required. -*/ -static int vtabBestIndex(Parse *pParse, Table *pTab, sqlite3_index_info *p){ - sqlite3_vtab *pVtab = sqlite3GetVTable(pParse->db, pTab)->pVtab; - int i; - int rc; - - TRACE_IDX_INPUTS(p); - rc = pVtab->pModule->xBestIndex(pVtab, p); - TRACE_IDX_OUTPUTS(p); - - if( rc!=SQLITE_OK ){ - if( rc==SQLITE_NOMEM ){ - pParse->db->mallocFailed = 1; - }else if( !pVtab->zErrMsg ){ - sqlite3ErrorMsg(pParse, "%s", sqlite3ErrStr(rc)); - }else{ - sqlite3ErrorMsg(pParse, "%s", pVtab->zErrMsg); - } - } - sqlite3_free(pVtab->zErrMsg); - pVtab->zErrMsg = 0; - - for(i=0; inConstraint; i++){ - if( !p->aConstraint[i].usable && p->aConstraintUsage[i].argvIndex>0 ){ - sqlite3ErrorMsg(pParse, - "table %s: xBestIndex returned an invalid plan", pTab->zName); - } - } - - return pParse->nErr; -} -#endif /* !defined(SQLITE_OMIT_VIRTUALTABLE) */ - - -#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 -/* -** Estimate the location of a particular key among all keys in an -** index. Store the results in aStat as follows: -** -** aStat[0] Est. number of rows less than pVal -** aStat[1] Est. number of rows equal to pVal -** -** Return SQLITE_OK on success. -*/ -static void whereKeyStats( - Parse *pParse, /* Database connection */ - Index *pIdx, /* Index to consider domain of */ - UnpackedRecord *pRec, /* Vector of values to consider */ - int roundUp, /* Round up if true. Round down if false */ - tRowcnt *aStat /* OUT: stats written here */ -){ - IndexSample *aSample = pIdx->aSample; - int iCol; /* Index of required stats in anEq[] etc. */ - int iMin = 0; /* Smallest sample not yet tested */ - int i = pIdx->nSample; /* Smallest sample larger than or equal to pRec */ - int iTest; /* Next sample to test */ - int res; /* Result of comparison operation */ - -#ifndef SQLITE_DEBUG - UNUSED_PARAMETER( pParse ); -#endif - assert( pRec!=0 ); - iCol = pRec->nField - 1; - assert( pIdx->nSample>0 ); - assert( pRec->nField>0 && iColnSampleCol ); - do{ - iTest = (iMin+i)/2; - res = sqlite3VdbeRecordCompare(aSample[iTest].n, aSample[iTest].p, pRec, 0); - if( res<0 ){ - iMin = iTest+1; - }else{ - i = iTest; - } - }while( res && iMinnSample ); - assert( 0==sqlite3VdbeRecordCompare(aSample[i].n, aSample[i].p, pRec, 0) - || pParse->db->mallocFailed ); - }else{ - /* Otherwise, pRec must be smaller than sample $i and larger than - ** sample ($i-1). */ - assert( i==pIdx->nSample - || sqlite3VdbeRecordCompare(aSample[i].n, aSample[i].p, pRec, 0)>0 - || pParse->db->mallocFailed ); - assert( i==0 - || sqlite3VdbeRecordCompare(aSample[i-1].n, aSample[i-1].p, pRec, 0)<0 - || pParse->db->mallocFailed ); - } -#endif /* ifdef SQLITE_DEBUG */ - - /* At this point, aSample[i] is the first sample that is greater than - ** or equal to pVal. Or if i==pIdx->nSample, then all samples are less - ** than pVal. If aSample[i]==pVal, then res==0. - */ - if( res==0 ){ - aStat[0] = aSample[i].anLt[iCol]; - aStat[1] = aSample[i].anEq[iCol]; - }else{ - tRowcnt iLower, iUpper, iGap; - if( i==0 ){ - iLower = 0; - iUpper = aSample[0].anLt[iCol]; - }else{ - i64 nRow0 = sqlite3LogEstToInt(pIdx->aiRowLogEst[0]); - iUpper = i>=pIdx->nSample ? nRow0 : aSample[i].anLt[iCol]; - iLower = aSample[i-1].anEq[iCol] + aSample[i-1].anLt[iCol]; - } - aStat[1] = (pIdx->nKeyCol>iCol ? pIdx->aAvgEq[iCol] : 1); - if( iLower>=iUpper ){ - iGap = 0; - }else{ - iGap = iUpper - iLower; - } - if( roundUp ){ - iGap = (iGap*2)/3; - }else{ - iGap = iGap/3; - } - aStat[0] = iLower + iGap; - } -} -#endif /* SQLITE_ENABLE_STAT3_OR_STAT4 */ - -/* -** If it is not NULL, pTerm is a term that provides an upper or lower -** bound on a range scan. Without considering pTerm, it is estimated -** that the scan will visit nNew rows. This function returns the number -** estimated to be visited after taking pTerm into account. -** -** If the user explicitly specified a likelihood() value for this term, -** then the return value is the likelihood multiplied by the number of -** input rows. Otherwise, this function assumes that an "IS NOT NULL" term -** has a likelihood of 0.50, and any other term a likelihood of 0.25. -*/ -static LogEst whereRangeAdjust(WhereTerm *pTerm, LogEst nNew){ - LogEst nRet = nNew; - if( pTerm ){ - if( pTerm->truthProb<=0 ){ - nRet += pTerm->truthProb; - }else if( (pTerm->wtFlags & TERM_VNULL)==0 ){ - nRet -= 20; assert( 20==sqlite3LogEst(4) ); - } - } - return nRet; -} - -/* -** This function is used to estimate the number of rows that will be visited -** by scanning an index for a range of values. The range may have an upper -** bound, a lower bound, or both. The WHERE clause terms that set the upper -** and lower bounds are represented by pLower and pUpper respectively. For -** example, assuming that index p is on t1(a): -** -** ... FROM t1 WHERE a > ? AND a < ? ... -** |_____| |_____| -** | | -** pLower pUpper -** -** If either of the upper or lower bound is not present, then NULL is passed in -** place of the corresponding WhereTerm. -** -** The value in (pBuilder->pNew->u.btree.nEq) is the index of the index -** column subject to the range constraint. Or, equivalently, the number of -** equality constraints optimized by the proposed index scan. For example, -** assuming index p is on t1(a, b), and the SQL query is: -** -** ... FROM t1 WHERE a = ? AND b > ? AND b < ? ... -** -** then nEq is set to 1 (as the range restricted column, b, is the second -** left-most column of the index). Or, if the query is: -** -** ... FROM t1 WHERE a > ? AND a < ? ... -** -** then nEq is set to 0. -** -** When this function is called, *pnOut is set to the sqlite3LogEst() of the -** number of rows that the index scan is expected to visit without -** considering the range constraints. If nEq is 0, this is the number of -** rows in the index. Assuming no error occurs, *pnOut is adjusted (reduced) -** to account for the range contraints pLower and pUpper. -** -** In the absence of sqlite_stat4 ANALYZE data, or if such data cannot be -** used, each range inequality reduces the search space by a factor of 4. -** Hence a pair of constraints (x>? AND x123" Might be NULL */ - WhereTerm *pUpper, /* Upper bound on the range. ex: "x<455" Might be NULL */ - WhereLoop *pLoop /* Modify the .nOut and maybe .rRun fields */ -){ - int rc = SQLITE_OK; - int nOut = pLoop->nOut; - LogEst nNew; - -#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 - Index *p = pLoop->u.btree.pIndex; - int nEq = pLoop->u.btree.nEq; - - if( p->nSample>0 - && nEq==pBuilder->nRecValid - && nEqnSampleCol - && OptimizationEnabled(pParse->db, SQLITE_Stat3) - ){ - UnpackedRecord *pRec = pBuilder->pRec; - tRowcnt a[2]; - u8 aff; - - /* Variable iLower will be set to the estimate of the number of rows in - ** the index that are less than the lower bound of the range query. The - ** lower bound being the concatenation of $P and $L, where $P is the - ** key-prefix formed by the nEq values matched against the nEq left-most - ** columns of the index, and $L is the value in pLower. - ** - ** Or, if pLower is NULL or $L cannot be extracted from it (because it - ** is not a simple variable or literal value), the lower bound of the - ** range is $P. Due to a quirk in the way whereKeyStats() works, even - ** if $L is available, whereKeyStats() is called for both ($P) and - ** ($P:$L) and the larger of the two returned values used. - ** - ** Similarly, iUpper is to be set to the estimate of the number of rows - ** less than the upper bound of the range query. Where the upper bound - ** is either ($P) or ($P:$U). Again, even if $U is available, both values - ** of iUpper are requested of whereKeyStats() and the smaller used. - */ - tRowcnt iLower; - tRowcnt iUpper; - - if( nEq==p->nKeyCol ){ - aff = SQLITE_AFF_INTEGER; - }else{ - aff = p->pTable->aCol[p->aiColumn[nEq]].affinity; - } - /* Determine iLower and iUpper using ($P) only. */ - if( nEq==0 ){ - iLower = 0; - iUpper = sqlite3LogEstToInt(p->aiRowLogEst[0]); - }else{ - /* Note: this call could be optimized away - since the same values must - ** have been requested when testing key $P in whereEqualScanEst(). */ - whereKeyStats(pParse, p, pRec, 0, a); - iLower = a[0]; - iUpper = a[0] + a[1]; - } - - /* If possible, improve on the iLower estimate using ($P:$L). */ - if( pLower ){ - int bOk; /* True if value is extracted from pExpr */ - Expr *pExpr = pLower->pExpr->pRight; - assert( (pLower->eOperator & (WO_GT|WO_GE))!=0 ); - rc = sqlite3Stat4ProbeSetValue(pParse, p, &pRec, pExpr, aff, nEq, &bOk); - if( rc==SQLITE_OK && bOk ){ - tRowcnt iNew; - whereKeyStats(pParse, p, pRec, 0, a); - iNew = a[0] + ((pLower->eOperator & WO_GT) ? a[1] : 0); - if( iNew>iLower ) iLower = iNew; - nOut--; - } - } - - /* If possible, improve on the iUpper estimate using ($P:$U). */ - if( pUpper ){ - int bOk; /* True if value is extracted from pExpr */ - Expr *pExpr = pUpper->pExpr->pRight; - assert( (pUpper->eOperator & (WO_LT|WO_LE))!=0 ); - rc = sqlite3Stat4ProbeSetValue(pParse, p, &pRec, pExpr, aff, nEq, &bOk); - if( rc==SQLITE_OK && bOk ){ - tRowcnt iNew; - whereKeyStats(pParse, p, pRec, 1, a); - iNew = a[0] + ((pUpper->eOperator & WO_LE) ? a[1] : 0); - if( iNewpRec = pRec; - if( rc==SQLITE_OK ){ - if( iUpper>iLower ){ - nNew = sqlite3LogEst(iUpper - iLower); - }else{ - nNew = 10; assert( 10==sqlite3LogEst(2) ); - } - if( nNewnOut = (LogEst)nOut; - WHERETRACE(0x10, ("range scan regions: %u..%u est=%d\n", - (u32)iLower, (u32)iUpper, nOut)); - return SQLITE_OK; - } - } -#else - UNUSED_PARAMETER(pParse); - UNUSED_PARAMETER(pBuilder); -#endif - assert( pLower || pUpper ); - assert( pUpper==0 || (pUpper->wtFlags & TERM_VNULL)==0 ); - nNew = whereRangeAdjust(pLower, nOut); - nNew = whereRangeAdjust(pUpper, nNew); - - /* TUNING: If there is both an upper and lower limit, assume the range is - ** reduced by an additional 75%. This means that, by default, an open-ended - ** range query (e.g. col > ?) is assumed to match 1/4 of the rows in the - ** index. While a closed range (e.g. col BETWEEN ? AND ?) is estimated to - ** match 1/64 of the index. */ - if( pLower && pUpper ) nNew -= 20; - - nOut -= (pLower!=0) + (pUpper!=0); - if( nNew<10 ) nNew = 10; - if( nNewnOut = (LogEst)nOut; - return rc; -} - -#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 -/* -** Estimate the number of rows that will be returned based on -** an equality constraint x=VALUE and where that VALUE occurs in -** the histogram data. This only works when x is the left-most -** column of an index and sqlite_stat3 histogram data is available -** for that index. When pExpr==NULL that means the constraint is -** "x IS NULL" instead of "x=VALUE". -** -** Write the estimated row count into *pnRow and return SQLITE_OK. -** If unable to make an estimate, leave *pnRow unchanged and return -** non-zero. -** -** This routine can fail if it is unable to load a collating sequence -** required for string comparison, or if unable to allocate memory -** for a UTF conversion required for comparison. The error is stored -** in the pParse structure. -*/ -static int whereEqualScanEst( - Parse *pParse, /* Parsing & code generating context */ - WhereLoopBuilder *pBuilder, - Expr *pExpr, /* Expression for VALUE in the x=VALUE constraint */ - tRowcnt *pnRow /* Write the revised row estimate here */ -){ - Index *p = pBuilder->pNew->u.btree.pIndex; - int nEq = pBuilder->pNew->u.btree.nEq; - UnpackedRecord *pRec = pBuilder->pRec; - u8 aff; /* Column affinity */ - int rc; /* Subfunction return code */ - tRowcnt a[2]; /* Statistics */ - int bOk; - - assert( nEq>=1 ); - assert( nEq<=(p->nKeyCol+1) ); - assert( p->aSample!=0 ); - assert( p->nSample>0 ); - assert( pBuilder->nRecValidnRecValid<(nEq-1) ){ - return SQLITE_NOTFOUND; - } - - /* This is an optimization only. The call to sqlite3Stat4ProbeSetValue() - ** below would return the same value. */ - if( nEq>p->nKeyCol ){ - *pnRow = 1; - return SQLITE_OK; - } - - aff = p->pTable->aCol[p->aiColumn[nEq-1]].affinity; - rc = sqlite3Stat4ProbeSetValue(pParse, p, &pRec, pExpr, aff, nEq-1, &bOk); - pBuilder->pRec = pRec; - if( rc!=SQLITE_OK ) return rc; - if( bOk==0 ) return SQLITE_NOTFOUND; - pBuilder->nRecValid = nEq; - - whereKeyStats(pParse, p, pRec, 0, a); - WHERETRACE(0x10,("equality scan regions: %d\n", (int)a[1])); - *pnRow = a[1]; - - return rc; -} -#endif /* SQLITE_ENABLE_STAT3_OR_STAT4 */ - -#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 -/* -** Estimate the number of rows that will be returned based on -** an IN constraint where the right-hand side of the IN operator -** is a list of values. Example: -** -** WHERE x IN (1,2,3,4) -** -** Write the estimated row count into *pnRow and return SQLITE_OK. -** If unable to make an estimate, leave *pnRow unchanged and return -** non-zero. -** -** This routine can fail if it is unable to load a collating sequence -** required for string comparison, or if unable to allocate memory -** for a UTF conversion required for comparison. The error is stored -** in the pParse structure. -*/ -static int whereInScanEst( - Parse *pParse, /* Parsing & code generating context */ - WhereLoopBuilder *pBuilder, - ExprList *pList, /* The value list on the RHS of "x IN (v1,v2,v3,...)" */ - tRowcnt *pnRow /* Write the revised row estimate here */ -){ - Index *p = pBuilder->pNew->u.btree.pIndex; - i64 nRow0 = sqlite3LogEstToInt(p->aiRowLogEst[0]); - int nRecValid = pBuilder->nRecValid; - int rc = SQLITE_OK; /* Subfunction return code */ - tRowcnt nEst; /* Number of rows for a single term */ - tRowcnt nRowEst = 0; /* New estimate of the number of rows */ - int i; /* Loop counter */ - - assert( p->aSample!=0 ); - for(i=0; rc==SQLITE_OK && inExpr; i++){ - nEst = nRow0; - rc = whereEqualScanEst(pParse, pBuilder, pList->a[i].pExpr, &nEst); - nRowEst += nEst; - pBuilder->nRecValid = nRecValid; - } - - if( rc==SQLITE_OK ){ - if( nRowEst > nRow0 ) nRowEst = nRow0; - *pnRow = nRowEst; - WHERETRACE(0x10,("IN row estimate: est=%g\n", nRowEst)); - } - assert( pBuilder->nRecValid==nRecValid ); - return rc; -} -#endif /* SQLITE_ENABLE_STAT3_OR_STAT4 */ - -/* -** Disable a term in the WHERE clause. Except, do not disable the term -** if it controls a LEFT OUTER JOIN and it did not originate in the ON -** or USING clause of that join. -** -** Consider the term t2.z='ok' in the following queries: -** -** (1) SELECT * FROM t1 LEFT JOIN t2 ON t1.a=t2.x WHERE t2.z='ok' -** (2) SELECT * FROM t1 LEFT JOIN t2 ON t1.a=t2.x AND t2.z='ok' -** (3) SELECT * FROM t1, t2 WHERE t1.a=t2.x AND t2.z='ok' -** -** The t2.z='ok' is disabled in the in (2) because it originates -** in the ON clause. The term is disabled in (3) because it is not part -** of a LEFT OUTER JOIN. In (1), the term is not disabled. -** -** Disabling a term causes that term to not be tested in the inner loop -** of the join. Disabling is an optimization. When terms are satisfied -** by indices, we disable them to prevent redundant tests in the inner -** loop. We would get the correct results if nothing were ever disabled, -** but joins might run a little slower. The trick is to disable as much -** as we can without disabling too much. If we disabled in (1), we'd get -** the wrong answer. See ticket #813. -*/ -static void disableTerm(WhereLevel *pLevel, WhereTerm *pTerm){ - if( pTerm - && (pTerm->wtFlags & TERM_CODED)==0 - && (pLevel->iLeftJoin==0 || ExprHasProperty(pTerm->pExpr, EP_FromJoin)) - && (pLevel->notReady & pTerm->prereqAll)==0 - ){ - pTerm->wtFlags |= TERM_CODED; - if( pTerm->iParent>=0 ){ - WhereTerm *pOther = &pTerm->pWC->a[pTerm->iParent]; - if( (--pOther->nChild)==0 ){ - disableTerm(pLevel, pOther); - } - } - } -} - -/* -** Code an OP_Affinity opcode to apply the column affinity string zAff -** to the n registers starting at base. -** -** As an optimization, SQLITE_AFF_NONE entries (which are no-ops) at the -** beginning and end of zAff are ignored. If all entries in zAff are -** SQLITE_AFF_NONE, then no code gets generated. -** -** This routine makes its own copy of zAff so that the caller is free -** to modify zAff after this routine returns. -*/ -static void codeApplyAffinity(Parse *pParse, int base, int n, char *zAff){ - Vdbe *v = pParse->pVdbe; - if( zAff==0 ){ - assert( pParse->db->mallocFailed ); - return; - } - assert( v!=0 ); - - /* Adjust base and n to skip over SQLITE_AFF_NONE entries at the beginning - ** and end of the affinity string. - */ - while( n>0 && zAff[0]==SQLITE_AFF_NONE ){ - n--; - base++; - zAff++; - } - while( n>1 && zAff[n-1]==SQLITE_AFF_NONE ){ - n--; - } - - /* Code the OP_Affinity opcode if there is anything left to do. */ - if( n>0 ){ - sqlite3VdbeAddOp2(v, OP_Affinity, base, n); - sqlite3VdbeChangeP4(v, -1, zAff, n); - sqlite3ExprCacheAffinityChange(pParse, base, n); - } -} - - -/* -** Generate code for a single equality term of the WHERE clause. An equality -** term can be either X=expr or X IN (...). pTerm is the term to be -** coded. -** -** The current value for the constraint is left in register iReg. -** -** For a constraint of the form X=expr, the expression is evaluated and its -** result is left on the stack. For constraints of the form X IN (...) -** this routine sets up a loop that will iterate over all values of X. -*/ -static int codeEqualityTerm( - Parse *pParse, /* The parsing context */ - WhereTerm *pTerm, /* The term of the WHERE clause to be coded */ - WhereLevel *pLevel, /* The level of the FROM clause we are working on */ - int iEq, /* Index of the equality term within this level */ - int bRev, /* True for reverse-order IN operations */ - int iTarget /* Attempt to leave results in this register */ -){ - Expr *pX = pTerm->pExpr; - Vdbe *v = pParse->pVdbe; - int iReg; /* Register holding results */ - - assert( iTarget>0 ); - if( pX->op==TK_EQ ){ - iReg = sqlite3ExprCodeTarget(pParse, pX->pRight, iTarget); - }else if( pX->op==TK_ISNULL ){ - iReg = iTarget; - sqlite3VdbeAddOp2(v, OP_Null, 0, iReg); -#ifndef SQLITE_OMIT_SUBQUERY - }else{ - int eType; - int iTab; - struct InLoop *pIn; - WhereLoop *pLoop = pLevel->pWLoop; - - if( (pLoop->wsFlags & WHERE_VIRTUALTABLE)==0 - && pLoop->u.btree.pIndex!=0 - && pLoop->u.btree.pIndex->aSortOrder[iEq] - ){ - testcase( iEq==0 ); - testcase( bRev ); - bRev = !bRev; - } - assert( pX->op==TK_IN ); - iReg = iTarget; - eType = sqlite3FindInIndex(pParse, pX, 0); - if( eType==IN_INDEX_INDEX_DESC ){ - testcase( bRev ); - bRev = !bRev; - } - iTab = pX->iTable; - sqlite3VdbeAddOp2(v, bRev ? OP_Last : OP_Rewind, iTab, 0); - VdbeCoverageIf(v, bRev); - VdbeCoverageIf(v, !bRev); - assert( (pLoop->wsFlags & WHERE_MULTI_OR)==0 ); - pLoop->wsFlags |= WHERE_IN_ABLE; - if( pLevel->u.in.nIn==0 ){ - pLevel->addrNxt = sqlite3VdbeMakeLabel(v); - } - pLevel->u.in.nIn++; - pLevel->u.in.aInLoop = - sqlite3DbReallocOrFree(pParse->db, pLevel->u.in.aInLoop, - sizeof(pLevel->u.in.aInLoop[0])*pLevel->u.in.nIn); - pIn = pLevel->u.in.aInLoop; - if( pIn ){ - pIn += pLevel->u.in.nIn - 1; - pIn->iCur = iTab; - if( eType==IN_INDEX_ROWID ){ - pIn->addrInTop = sqlite3VdbeAddOp2(v, OP_Rowid, iTab, iReg); - }else{ - pIn->addrInTop = sqlite3VdbeAddOp3(v, OP_Column, iTab, 0, iReg); - } - pIn->eEndLoopOp = bRev ? OP_PrevIfOpen : OP_NextIfOpen; - sqlite3VdbeAddOp1(v, OP_IsNull, iReg); VdbeCoverage(v); - }else{ - pLevel->u.in.nIn = 0; - } -#endif - } - disableTerm(pLevel, pTerm); - return iReg; -} - -/* -** Generate code that will evaluate all == and IN constraints for an -** index scan. -** -** For example, consider table t1(a,b,c,d,e,f) with index i1(a,b,c). -** Suppose the WHERE clause is this: a==5 AND b IN (1,2,3) AND c>5 AND c<10 -** The index has as many as three equality constraints, but in this -** example, the third "c" value is an inequality. So only two -** constraints are coded. This routine will generate code to evaluate -** a==5 and b IN (1,2,3). The current values for a and b will be stored -** in consecutive registers and the index of the first register is returned. -** -** In the example above nEq==2. But this subroutine works for any value -** of nEq including 0. If nEq==0, this routine is nearly a no-op. -** The only thing it does is allocate the pLevel->iMem memory cell and -** compute the affinity string. -** -** The nExtraReg parameter is 0 or 1. It is 0 if all WHERE clause constraints -** are == or IN and are covered by the nEq. nExtraReg is 1 if there is -** an inequality constraint (such as the "c>=5 AND c<10" in the example) that -** occurs after the nEq quality constraints. -** -** This routine allocates a range of nEq+nExtraReg memory cells and returns -** the index of the first memory cell in that range. The code that -** calls this routine will use that memory range to store keys for -** start and termination conditions of the loop. -** key value of the loop. If one or more IN operators appear, then -** this routine allocates an additional nEq memory cells for internal -** use. -** -** Before returning, *pzAff is set to point to a buffer containing a -** copy of the column affinity string of the index allocated using -** sqlite3DbMalloc(). Except, entries in the copy of the string associated -** with equality constraints that use NONE affinity are set to -** SQLITE_AFF_NONE. This is to deal with SQL such as the following: -** -** CREATE TABLE t1(a TEXT PRIMARY KEY, b); -** SELECT ... FROM t1 AS t2, t1 WHERE t1.a = t2.b; -** -** In the example above, the index on t1(a) has TEXT affinity. But since -** the right hand side of the equality constraint (t2.b) has NONE affinity, -** no conversion should be attempted before using a t2.b value as part of -** a key to search the index. Hence the first byte in the returned affinity -** string in this example would be set to SQLITE_AFF_NONE. -*/ -static int codeAllEqualityTerms( - Parse *pParse, /* Parsing context */ - WhereLevel *pLevel, /* Which nested loop of the FROM we are coding */ - int bRev, /* Reverse the order of IN operators */ - int nExtraReg, /* Number of extra registers to allocate */ - char **pzAff /* OUT: Set to point to affinity string */ -){ - u16 nEq; /* The number of == or IN constraints to code */ - u16 nSkip; /* Number of left-most columns to skip */ - Vdbe *v = pParse->pVdbe; /* The vm under construction */ - Index *pIdx; /* The index being used for this loop */ - WhereTerm *pTerm; /* A single constraint term */ - WhereLoop *pLoop; /* The WhereLoop object */ - int j; /* Loop counter */ - int regBase; /* Base register */ - int nReg; /* Number of registers to allocate */ - char *zAff; /* Affinity string to return */ - - /* This module is only called on query plans that use an index. */ - pLoop = pLevel->pWLoop; - assert( (pLoop->wsFlags & WHERE_VIRTUALTABLE)==0 ); - nEq = pLoop->u.btree.nEq; - nSkip = pLoop->u.btree.nSkip; - pIdx = pLoop->u.btree.pIndex; - assert( pIdx!=0 ); - - /* Figure out how many memory cells we will need then allocate them. - */ - regBase = pParse->nMem + 1; - nReg = pLoop->u.btree.nEq + nExtraReg; - pParse->nMem += nReg; - - zAff = sqlite3DbStrDup(pParse->db, sqlite3IndexAffinityStr(v, pIdx)); - if( !zAff ){ - pParse->db->mallocFailed = 1; - } - - if( nSkip ){ - int iIdxCur = pLevel->iIdxCur; - sqlite3VdbeAddOp1(v, (bRev?OP_Last:OP_Rewind), iIdxCur); - VdbeCoverageIf(v, bRev==0); - VdbeCoverageIf(v, bRev!=0); - VdbeComment((v, "begin skip-scan on %s", pIdx->zName)); - j = sqlite3VdbeAddOp0(v, OP_Goto); - pLevel->addrSkip = sqlite3VdbeAddOp4Int(v, (bRev?OP_SeekLT:OP_SeekGT), - iIdxCur, 0, regBase, nSkip); - VdbeCoverageIf(v, bRev==0); - VdbeCoverageIf(v, bRev!=0); - sqlite3VdbeJumpHere(v, j); - for(j=0; jaiColumn[j]>=0 ); - VdbeComment((v, "%s", pIdx->pTable->aCol[pIdx->aiColumn[j]].zName)); - } - } - - /* Evaluate the equality constraints - */ - assert( zAff==0 || (int)strlen(zAff)>=nEq ); - for(j=nSkip; jaLTerm[j]; - assert( pTerm!=0 ); - /* The following testcase is true for indices with redundant columns. - ** Ex: CREATE INDEX i1 ON t1(a,b,a); SELECT * FROM t1 WHERE a=0 AND b=0; */ - testcase( (pTerm->wtFlags & TERM_CODED)!=0 ); - testcase( pTerm->wtFlags & TERM_VIRTUAL ); - r1 = codeEqualityTerm(pParse, pTerm, pLevel, j, bRev, regBase+j); - if( r1!=regBase+j ){ - if( nReg==1 ){ - sqlite3ReleaseTempReg(pParse, regBase); - regBase = r1; - }else{ - sqlite3VdbeAddOp2(v, OP_SCopy, r1, regBase+j); - } - } - testcase( pTerm->eOperator & WO_ISNULL ); - testcase( pTerm->eOperator & WO_IN ); - if( (pTerm->eOperator & (WO_ISNULL|WO_IN))==0 ){ - Expr *pRight = pTerm->pExpr->pRight; - if( sqlite3ExprCanBeNull(pRight) ){ - sqlite3VdbeAddOp2(v, OP_IsNull, regBase+j, pLevel->addrBrk); - VdbeCoverage(v); - } - if( zAff ){ - if( sqlite3CompareAffinity(pRight, zAff[j])==SQLITE_AFF_NONE ){ - zAff[j] = SQLITE_AFF_NONE; - } - if( sqlite3ExprNeedsNoAffinityChange(pRight, zAff[j]) ){ - zAff[j] = SQLITE_AFF_NONE; - } - } - } - } - *pzAff = zAff; - return regBase; -} - -#ifndef SQLITE_OMIT_EXPLAIN -/* -** This routine is a helper for explainIndexRange() below -** -** pStr holds the text of an expression that we are building up one term -** at a time. This routine adds a new term to the end of the expression. -** Terms are separated by AND so add the "AND" text for second and subsequent -** terms only. -*/ -static void explainAppendTerm( - StrAccum *pStr, /* The text expression being built */ - int iTerm, /* Index of this term. First is zero */ - const char *zColumn, /* Name of the column */ - const char *zOp /* Name of the operator */ -){ - if( iTerm ) sqlite3StrAccumAppend(pStr, " AND ", 5); - sqlite3StrAccumAppendAll(pStr, zColumn); - sqlite3StrAccumAppend(pStr, zOp, 1); - sqlite3StrAccumAppend(pStr, "?", 1); -} - -/* -** Argument pLevel describes a strategy for scanning table pTab. This -** function returns a pointer to a string buffer containing a description -** of the subset of table rows scanned by the strategy in the form of an -** SQL expression. Or, if all rows are scanned, NULL is returned. -** -** For example, if the query: -** -** SELECT * FROM t1 WHERE a=1 AND b>2; -** -** is run and there is an index on (a, b), then this function returns a -** string similar to: -** -** "a=? AND b>?" -** -** The returned pointer points to memory obtained from sqlite3DbMalloc(). -** It is the responsibility of the caller to free the buffer when it is -** no longer required. -*/ -static char *explainIndexRange(sqlite3 *db, WhereLoop *pLoop, Table *pTab){ - Index *pIndex = pLoop->u.btree.pIndex; - u16 nEq = pLoop->u.btree.nEq; - u16 nSkip = pLoop->u.btree.nSkip; - int i, j; - Column *aCol = pTab->aCol; - i16 *aiColumn = pIndex->aiColumn; - StrAccum txt; - - if( nEq==0 && (pLoop->wsFlags & (WHERE_BTM_LIMIT|WHERE_TOP_LIMIT))==0 ){ - return 0; - } - sqlite3StrAccumInit(&txt, 0, 0, SQLITE_MAX_LENGTH); - txt.db = db; - sqlite3StrAccumAppend(&txt, " (", 2); - for(i=0; inKeyCol ) ? "rowid" : aCol[aiColumn[i]].zName; - if( i>=nSkip ){ - explainAppendTerm(&txt, i, z, "="); - }else{ - if( i ) sqlite3StrAccumAppend(&txt, " AND ", 5); - sqlite3StrAccumAppend(&txt, "ANY(", 4); - sqlite3StrAccumAppendAll(&txt, z); - sqlite3StrAccumAppend(&txt, ")", 1); - } - } - - j = i; - if( pLoop->wsFlags&WHERE_BTM_LIMIT ){ - char *z = (j==pIndex->nKeyCol ) ? "rowid" : aCol[aiColumn[j]].zName; - explainAppendTerm(&txt, i++, z, ">"); - } - if( pLoop->wsFlags&WHERE_TOP_LIMIT ){ - char *z = (j==pIndex->nKeyCol ) ? "rowid" : aCol[aiColumn[j]].zName; - explainAppendTerm(&txt, i, z, "<"); - } - sqlite3StrAccumAppend(&txt, ")", 1); - return sqlite3StrAccumFinish(&txt); -} - -/* -** This function is a no-op unless currently processing an EXPLAIN QUERY PLAN -** command. If the query being compiled is an EXPLAIN QUERY PLAN, a single -** record is added to the output to describe the table scan strategy in -** pLevel. -*/ -static void explainOneScan( - Parse *pParse, /* Parse context */ - SrcList *pTabList, /* Table list this loop refers to */ - WhereLevel *pLevel, /* Scan to write OP_Explain opcode for */ - int iLevel, /* Value for "level" column of output */ - int iFrom, /* Value for "from" column of output */ - u16 wctrlFlags /* Flags passed to sqlite3WhereBegin() */ -){ -#ifndef SQLITE_DEBUG - if( pParse->explain==2 ) -#endif - { - struct SrcList_item *pItem = &pTabList->a[pLevel->iFrom]; - Vdbe *v = pParse->pVdbe; /* VM being constructed */ - sqlite3 *db = pParse->db; /* Database handle */ - char *zMsg; /* Text to add to EQP output */ - int iId = pParse->iSelectId; /* Select id (left-most output column) */ - int isSearch; /* True for a SEARCH. False for SCAN. */ - WhereLoop *pLoop; /* The controlling WhereLoop object */ - u32 flags; /* Flags that describe this loop */ - - pLoop = pLevel->pWLoop; - flags = pLoop->wsFlags; - if( (flags&WHERE_MULTI_OR) || (wctrlFlags&WHERE_ONETABLE_ONLY) ) return; - - isSearch = (flags&(WHERE_BTM_LIMIT|WHERE_TOP_LIMIT))!=0 - || ((flags&WHERE_VIRTUALTABLE)==0 && (pLoop->u.btree.nEq>0)) - || (wctrlFlags&(WHERE_ORDERBY_MIN|WHERE_ORDERBY_MAX)); - - zMsg = sqlite3MPrintf(db, "%s", isSearch?"SEARCH":"SCAN"); - if( pItem->pSelect ){ - zMsg = sqlite3MAppendf(db, zMsg, "%s SUBQUERY %d", zMsg,pItem->iSelectId); - }else{ - zMsg = sqlite3MAppendf(db, zMsg, "%s TABLE %s", zMsg, pItem->zName); - } - - if( pItem->zAlias ){ - zMsg = sqlite3MAppendf(db, zMsg, "%s AS %s", zMsg, pItem->zAlias); - } - if( (flags & (WHERE_IPK|WHERE_VIRTUALTABLE))==0 - && ALWAYS(pLoop->u.btree.pIndex!=0) - ){ - const char *zFmt; - Index *pIdx = pLoop->u.btree.pIndex; - char *zWhere = explainIndexRange(db, pLoop, pItem->pTab); - assert( !(flags&WHERE_AUTO_INDEX) || (flags&WHERE_IDX_ONLY) ); - if( !HasRowid(pItem->pTab) && IsPrimaryKeyIndex(pIdx) ){ - zFmt = zWhere ? "%s USING PRIMARY KEY%.0s%s" : "%s%.0s%s"; - }else if( flags & WHERE_AUTO_INDEX ){ - zFmt = "%s USING AUTOMATIC COVERING INDEX%.0s%s"; - }else if( flags & WHERE_IDX_ONLY ){ - zFmt = "%s USING COVERING INDEX %s%s"; - }else{ - zFmt = "%s USING INDEX %s%s"; - } - zMsg = sqlite3MAppendf(db, zMsg, zFmt, zMsg, pIdx->zName, zWhere); - sqlite3DbFree(db, zWhere); - }else if( (flags & WHERE_IPK)!=0 && (flags & WHERE_CONSTRAINT)!=0 ){ - zMsg = sqlite3MAppendf(db, zMsg, "%s USING INTEGER PRIMARY KEY", zMsg); - - if( flags&(WHERE_COLUMN_EQ|WHERE_COLUMN_IN) ){ - zMsg = sqlite3MAppendf(db, zMsg, "%s (rowid=?)", zMsg); - }else if( (flags&WHERE_BOTH_LIMIT)==WHERE_BOTH_LIMIT ){ - zMsg = sqlite3MAppendf(db, zMsg, "%s (rowid>? AND rowid?)", zMsg); - }else if( ALWAYS(flags&WHERE_TOP_LIMIT) ){ - zMsg = sqlite3MAppendf(db, zMsg, "%s (rowidu.vtab.idxNum, pLoop->u.vtab.idxStr); - } -#endif - zMsg = sqlite3MAppendf(db, zMsg, "%s", zMsg); - sqlite3VdbeAddOp4(v, OP_Explain, iId, iLevel, iFrom, zMsg, P4_DYNAMIC); - } -} -#else -# define explainOneScan(u,v,w,x,y,z) -#endif /* SQLITE_OMIT_EXPLAIN */ - - -/* -** Generate code for the start of the iLevel-th loop in the WHERE clause -** implementation described by pWInfo. -*/ -static Bitmask codeOneLoopStart( - WhereInfo *pWInfo, /* Complete information about the WHERE clause */ - int iLevel, /* Which level of pWInfo->a[] should be coded */ - Bitmask notReady /* Which tables are currently available */ -){ - int j, k; /* Loop counters */ - int iCur; /* The VDBE cursor for the table */ - int addrNxt; /* Where to jump to continue with the next IN case */ - int omitTable; /* True if we use the index only */ - int bRev; /* True if we need to scan in reverse order */ - WhereLevel *pLevel; /* The where level to be coded */ - WhereLoop *pLoop; /* The WhereLoop object being coded */ - WhereClause *pWC; /* Decomposition of the entire WHERE clause */ - WhereTerm *pTerm; /* A WHERE clause term */ - Parse *pParse; /* Parsing context */ - sqlite3 *db; /* Database connection */ - Vdbe *v; /* The prepared stmt under constructions */ - struct SrcList_item *pTabItem; /* FROM clause term being coded */ - int addrBrk; /* Jump here to break out of the loop */ - int addrCont; /* Jump here to continue with next cycle */ - int iRowidReg = 0; /* Rowid is stored in this register, if not zero */ - int iReleaseReg = 0; /* Temp register to free before returning */ - - pParse = pWInfo->pParse; - v = pParse->pVdbe; - pWC = &pWInfo->sWC; - db = pParse->db; - pLevel = &pWInfo->a[iLevel]; - pLoop = pLevel->pWLoop; - pTabItem = &pWInfo->pTabList->a[pLevel->iFrom]; - iCur = pTabItem->iCursor; - pLevel->notReady = notReady & ~getMask(&pWInfo->sMaskSet, iCur); - bRev = (pWInfo->revMask>>iLevel)&1; - omitTable = (pLoop->wsFlags & WHERE_IDX_ONLY)!=0 - && (pWInfo->wctrlFlags & WHERE_FORCE_TABLE)==0; - VdbeModuleComment((v, "Begin WHERE-loop%d: %s",iLevel,pTabItem->pTab->zName)); - - /* Create labels for the "break" and "continue" instructions - ** for the current loop. Jump to addrBrk to break out of a loop. - ** Jump to cont to go immediately to the next iteration of the - ** loop. - ** - ** When there is an IN operator, we also have a "addrNxt" label that - ** means to continue with the next IN value combination. When - ** there are no IN operators in the constraints, the "addrNxt" label - ** is the same as "addrBrk". - */ - addrBrk = pLevel->addrBrk = pLevel->addrNxt = sqlite3VdbeMakeLabel(v); - addrCont = pLevel->addrCont = sqlite3VdbeMakeLabel(v); - - /* If this is the right table of a LEFT OUTER JOIN, allocate and - ** initialize a memory cell that records if this table matches any - ** row of the left table of the join. - */ - if( pLevel->iFrom>0 && (pTabItem[0].jointype & JT_LEFT)!=0 ){ - pLevel->iLeftJoin = ++pParse->nMem; - sqlite3VdbeAddOp2(v, OP_Integer, 0, pLevel->iLeftJoin); - VdbeComment((v, "init LEFT JOIN no-match flag")); - } - - /* Special case of a FROM clause subquery implemented as a co-routine */ - if( pTabItem->viaCoroutine ){ - int regYield = pTabItem->regReturn; - sqlite3VdbeAddOp3(v, OP_InitCoroutine, regYield, 0, pTabItem->addrFillSub); - pLevel->p2 = sqlite3VdbeAddOp2(v, OP_Yield, regYield, addrBrk); - VdbeCoverage(v); - VdbeComment((v, "next row of \"%s\"", pTabItem->pTab->zName)); - pLevel->op = OP_Goto; - }else - -#ifndef SQLITE_OMIT_VIRTUALTABLE - if( (pLoop->wsFlags & WHERE_VIRTUALTABLE)!=0 ){ - /* Case 1: The table is a virtual-table. Use the VFilter and VNext - ** to access the data. - */ - int iReg; /* P3 Value for OP_VFilter */ - int addrNotFound; - int nConstraint = pLoop->nLTerm; - - sqlite3ExprCachePush(pParse); - iReg = sqlite3GetTempRange(pParse, nConstraint+2); - addrNotFound = pLevel->addrBrk; - for(j=0; jaLTerm[j]; - if( pTerm==0 ) continue; - if( pTerm->eOperator & WO_IN ){ - codeEqualityTerm(pParse, pTerm, pLevel, j, bRev, iTarget); - addrNotFound = pLevel->addrNxt; - }else{ - sqlite3ExprCode(pParse, pTerm->pExpr->pRight, iTarget); - } - } - sqlite3VdbeAddOp2(v, OP_Integer, pLoop->u.vtab.idxNum, iReg); - sqlite3VdbeAddOp2(v, OP_Integer, nConstraint, iReg+1); - sqlite3VdbeAddOp4(v, OP_VFilter, iCur, addrNotFound, iReg, - pLoop->u.vtab.idxStr, - pLoop->u.vtab.needFree ? P4_MPRINTF : P4_STATIC); - VdbeCoverage(v); - pLoop->u.vtab.needFree = 0; - for(j=0; ju.vtab.omitMask>>j)&1 ){ - disableTerm(pLevel, pLoop->aLTerm[j]); - } - } - pLevel->op = OP_VNext; - pLevel->p1 = iCur; - pLevel->p2 = sqlite3VdbeCurrentAddr(v); - sqlite3ReleaseTempRange(pParse, iReg, nConstraint+2); - sqlite3ExprCachePop(pParse); - }else -#endif /* SQLITE_OMIT_VIRTUALTABLE */ - - if( (pLoop->wsFlags & WHERE_IPK)!=0 - && (pLoop->wsFlags & (WHERE_COLUMN_IN|WHERE_COLUMN_EQ))!=0 - ){ - /* Case 2: We can directly reference a single row using an - ** equality comparison against the ROWID field. Or - ** we reference multiple rows using a "rowid IN (...)" - ** construct. - */ - assert( pLoop->u.btree.nEq==1 ); - pTerm = pLoop->aLTerm[0]; - assert( pTerm!=0 ); - assert( pTerm->pExpr!=0 ); - assert( omitTable==0 ); - testcase( pTerm->wtFlags & TERM_VIRTUAL ); - iReleaseReg = ++pParse->nMem; - iRowidReg = codeEqualityTerm(pParse, pTerm, pLevel, 0, bRev, iReleaseReg); - if( iRowidReg!=iReleaseReg ) sqlite3ReleaseTempReg(pParse, iReleaseReg); - addrNxt = pLevel->addrNxt; - sqlite3VdbeAddOp2(v, OP_MustBeInt, iRowidReg, addrNxt); VdbeCoverage(v); - sqlite3VdbeAddOp3(v, OP_NotExists, iCur, addrNxt, iRowidReg); - VdbeCoverage(v); - sqlite3ExprCacheAffinityChange(pParse, iRowidReg, 1); - sqlite3ExprCacheStore(pParse, iCur, -1, iRowidReg); - VdbeComment((v, "pk")); - pLevel->op = OP_Noop; - }else if( (pLoop->wsFlags & WHERE_IPK)!=0 - && (pLoop->wsFlags & WHERE_COLUMN_RANGE)!=0 - ){ - /* Case 3: We have an inequality comparison against the ROWID field. - */ - int testOp = OP_Noop; - int start; - int memEndValue = 0; - WhereTerm *pStart, *pEnd; - - assert( omitTable==0 ); - j = 0; - pStart = pEnd = 0; - if( pLoop->wsFlags & WHERE_BTM_LIMIT ) pStart = pLoop->aLTerm[j++]; - if( pLoop->wsFlags & WHERE_TOP_LIMIT ) pEnd = pLoop->aLTerm[j++]; - assert( pStart!=0 || pEnd!=0 ); - if( bRev ){ - pTerm = pStart; - pStart = pEnd; - pEnd = pTerm; - } - if( pStart ){ - Expr *pX; /* The expression that defines the start bound */ - int r1, rTemp; /* Registers for holding the start boundary */ - - /* The following constant maps TK_xx codes into corresponding - ** seek opcodes. It depends on a particular ordering of TK_xx - */ - const u8 aMoveOp[] = { - /* TK_GT */ OP_SeekGT, - /* TK_LE */ OP_SeekLE, - /* TK_LT */ OP_SeekLT, - /* TK_GE */ OP_SeekGE - }; - assert( TK_LE==TK_GT+1 ); /* Make sure the ordering.. */ - assert( TK_LT==TK_GT+2 ); /* ... of the TK_xx values... */ - assert( TK_GE==TK_GT+3 ); /* ... is correcct. */ - - assert( (pStart->wtFlags & TERM_VNULL)==0 ); - testcase( pStart->wtFlags & TERM_VIRTUAL ); - pX = pStart->pExpr; - assert( pX!=0 ); - testcase( pStart->leftCursor!=iCur ); /* transitive constraints */ - r1 = sqlite3ExprCodeTemp(pParse, pX->pRight, &rTemp); - sqlite3VdbeAddOp3(v, aMoveOp[pX->op-TK_GT], iCur, addrBrk, r1); - VdbeComment((v, "pk")); - VdbeCoverageIf(v, pX->op==TK_GT); - VdbeCoverageIf(v, pX->op==TK_LE); - VdbeCoverageIf(v, pX->op==TK_LT); - VdbeCoverageIf(v, pX->op==TK_GE); - sqlite3ExprCacheAffinityChange(pParse, r1, 1); - sqlite3ReleaseTempReg(pParse, rTemp); - disableTerm(pLevel, pStart); - }else{ - sqlite3VdbeAddOp2(v, bRev ? OP_Last : OP_Rewind, iCur, addrBrk); - VdbeCoverageIf(v, bRev==0); - VdbeCoverageIf(v, bRev!=0); - } - if( pEnd ){ - Expr *pX; - pX = pEnd->pExpr; - assert( pX!=0 ); - assert( (pEnd->wtFlags & TERM_VNULL)==0 ); - testcase( pEnd->leftCursor!=iCur ); /* Transitive constraints */ - testcase( pEnd->wtFlags & TERM_VIRTUAL ); - memEndValue = ++pParse->nMem; - sqlite3ExprCode(pParse, pX->pRight, memEndValue); - if( pX->op==TK_LT || pX->op==TK_GT ){ - testOp = bRev ? OP_Le : OP_Ge; - }else{ - testOp = bRev ? OP_Lt : OP_Gt; - } - disableTerm(pLevel, pEnd); - } - start = sqlite3VdbeCurrentAddr(v); - pLevel->op = bRev ? OP_Prev : OP_Next; - pLevel->p1 = iCur; - pLevel->p2 = start; - assert( pLevel->p5==0 ); - if( testOp!=OP_Noop ){ - iRowidReg = ++pParse->nMem; - sqlite3VdbeAddOp2(v, OP_Rowid, iCur, iRowidReg); - sqlite3ExprCacheStore(pParse, iCur, -1, iRowidReg); - sqlite3VdbeAddOp3(v, testOp, memEndValue, addrBrk, iRowidReg); - VdbeCoverageIf(v, testOp==OP_Le); - VdbeCoverageIf(v, testOp==OP_Lt); - VdbeCoverageIf(v, testOp==OP_Ge); - VdbeCoverageIf(v, testOp==OP_Gt); - sqlite3VdbeChangeP5(v, SQLITE_AFF_NUMERIC | SQLITE_JUMPIFNULL); - } - }else if( pLoop->wsFlags & WHERE_INDEXED ){ - /* Case 4: A scan using an index. - ** - ** The WHERE clause may contain zero or more equality - ** terms ("==" or "IN" operators) that refer to the N - ** left-most columns of the index. It may also contain - ** inequality constraints (>, <, >= or <=) on the indexed - ** column that immediately follows the N equalities. Only - ** the right-most column can be an inequality - the rest must - ** use the "==" and "IN" operators. For example, if the - ** index is on (x,y,z), then the following clauses are all - ** optimized: - ** - ** x=5 - ** x=5 AND y=10 - ** x=5 AND y<10 - ** x=5 AND y>5 AND y<10 - ** x=5 AND y=5 AND z<=10 - ** - ** The z<10 term of the following cannot be used, only - ** the x=5 term: - ** - ** x=5 AND z<10 - ** - ** N may be zero if there are inequality constraints. - ** If there are no inequality constraints, then N is at - ** least one. - ** - ** This case is also used when there are no WHERE clause - ** constraints but an index is selected anyway, in order - ** to force the output order to conform to an ORDER BY. - */ - static const u8 aStartOp[] = { - 0, - 0, - OP_Rewind, /* 2: (!start_constraints && startEq && !bRev) */ - OP_Last, /* 3: (!start_constraints && startEq && bRev) */ - OP_SeekGT, /* 4: (start_constraints && !startEq && !bRev) */ - OP_SeekLT, /* 5: (start_constraints && !startEq && bRev) */ - OP_SeekGE, /* 6: (start_constraints && startEq && !bRev) */ - OP_SeekLE /* 7: (start_constraints && startEq && bRev) */ - }; - static const u8 aEndOp[] = { - OP_IdxGE, /* 0: (end_constraints && !bRev && !endEq) */ - OP_IdxGT, /* 1: (end_constraints && !bRev && endEq) */ - OP_IdxLE, /* 2: (end_constraints && bRev && !endEq) */ - OP_IdxLT, /* 3: (end_constraints && bRev && endEq) */ - }; - u16 nEq = pLoop->u.btree.nEq; /* Number of == or IN terms */ - int regBase; /* Base register holding constraint values */ - WhereTerm *pRangeStart = 0; /* Inequality constraint at range start */ - WhereTerm *pRangeEnd = 0; /* Inequality constraint at range end */ - int startEq; /* True if range start uses ==, >= or <= */ - int endEq; /* True if range end uses ==, >= or <= */ - int start_constraints; /* Start of range is constrained */ - int nConstraint; /* Number of constraint terms */ - Index *pIdx; /* The index we will be using */ - int iIdxCur; /* The VDBE cursor for the index */ - int nExtraReg = 0; /* Number of extra registers needed */ - int op; /* Instruction opcode */ - char *zStartAff; /* Affinity for start of range constraint */ - char cEndAff = 0; /* Affinity for end of range constraint */ - u8 bSeekPastNull = 0; /* True to seek past initial nulls */ - u8 bStopAtNull = 0; /* Add condition to terminate at NULLs */ - - pIdx = pLoop->u.btree.pIndex; - iIdxCur = pLevel->iIdxCur; - assert( nEq>=pLoop->u.btree.nSkip ); - - /* If this loop satisfies a sort order (pOrderBy) request that - ** was passed to this function to implement a "SELECT min(x) ..." - ** query, then the caller will only allow the loop to run for - ** a single iteration. This means that the first row returned - ** should not have a NULL value stored in 'x'. If column 'x' is - ** the first one after the nEq equality constraints in the index, - ** this requires some special handling. - */ - assert( pWInfo->pOrderBy==0 - || pWInfo->pOrderBy->nExpr==1 - || (pWInfo->wctrlFlags&WHERE_ORDERBY_MIN)==0 ); - if( (pWInfo->wctrlFlags&WHERE_ORDERBY_MIN)!=0 - && pWInfo->nOBSat>0 - && (pIdx->nKeyCol>nEq) - ){ - assert( pLoop->u.btree.nSkip==0 ); - bSeekPastNull = 1; - nExtraReg = 1; - } - - /* Find any inequality constraint terms for the start and end - ** of the range. - */ - j = nEq; - if( pLoop->wsFlags & WHERE_BTM_LIMIT ){ - pRangeStart = pLoop->aLTerm[j++]; - nExtraReg = 1; - } - if( pLoop->wsFlags & WHERE_TOP_LIMIT ){ - pRangeEnd = pLoop->aLTerm[j++]; - nExtraReg = 1; - if( pRangeStart==0 - && (j = pIdx->aiColumn[nEq])>=0 - && pIdx->pTable->aCol[j].notNull==0 - ){ - bSeekPastNull = 1; - } - } - assert( pRangeEnd==0 || (pRangeEnd->wtFlags & TERM_VNULL)==0 ); - - /* Generate code to evaluate all constraint terms using == or IN - ** and store the values of those terms in an array of registers - ** starting at regBase. - */ - regBase = codeAllEqualityTerms(pParse,pLevel,bRev,nExtraReg,&zStartAff); - assert( zStartAff==0 || sqlite3Strlen30(zStartAff)>=nEq ); - if( zStartAff ) cEndAff = zStartAff[nEq]; - addrNxt = pLevel->addrNxt; - - /* If we are doing a reverse order scan on an ascending index, or - ** a forward order scan on a descending index, interchange the - ** start and end terms (pRangeStart and pRangeEnd). - */ - if( (nEqnKeyCol && bRev==(pIdx->aSortOrder[nEq]==SQLITE_SO_ASC)) - || (bRev && pIdx->nKeyCol==nEq) - ){ - SWAP(WhereTerm *, pRangeEnd, pRangeStart); - SWAP(u8, bSeekPastNull, bStopAtNull); - } - - testcase( pRangeStart && (pRangeStart->eOperator & WO_LE)!=0 ); - testcase( pRangeStart && (pRangeStart->eOperator & WO_GE)!=0 ); - testcase( pRangeEnd && (pRangeEnd->eOperator & WO_LE)!=0 ); - testcase( pRangeEnd && (pRangeEnd->eOperator & WO_GE)!=0 ); - startEq = !pRangeStart || pRangeStart->eOperator & (WO_LE|WO_GE); - endEq = !pRangeEnd || pRangeEnd->eOperator & (WO_LE|WO_GE); - start_constraints = pRangeStart || nEq>0; - - /* Seek the index cursor to the start of the range. */ - nConstraint = nEq; - if( pRangeStart ){ - Expr *pRight = pRangeStart->pExpr->pRight; - sqlite3ExprCode(pParse, pRight, regBase+nEq); - if( (pRangeStart->wtFlags & TERM_VNULL)==0 - && sqlite3ExprCanBeNull(pRight) - ){ - sqlite3VdbeAddOp2(v, OP_IsNull, regBase+nEq, addrNxt); - VdbeCoverage(v); - } - if( zStartAff ){ - if( sqlite3CompareAffinity(pRight, zStartAff[nEq])==SQLITE_AFF_NONE){ - /* Since the comparison is to be performed with no conversions - ** applied to the operands, set the affinity to apply to pRight to - ** SQLITE_AFF_NONE. */ - zStartAff[nEq] = SQLITE_AFF_NONE; - } - if( sqlite3ExprNeedsNoAffinityChange(pRight, zStartAff[nEq]) ){ - zStartAff[nEq] = SQLITE_AFF_NONE; - } - } - nConstraint++; - testcase( pRangeStart->wtFlags & TERM_VIRTUAL ); - }else if( bSeekPastNull ){ - sqlite3VdbeAddOp2(v, OP_Null, 0, regBase+nEq); - nConstraint++; - startEq = 0; - start_constraints = 1; - } - codeApplyAffinity(pParse, regBase, nConstraint - bSeekPastNull, zStartAff); - op = aStartOp[(start_constraints<<2) + (startEq<<1) + bRev]; - assert( op!=0 ); - sqlite3VdbeAddOp4Int(v, op, iIdxCur, addrNxt, regBase, nConstraint); - VdbeCoverage(v); - VdbeCoverageIf(v, op==OP_Rewind); testcase( op==OP_Rewind ); - VdbeCoverageIf(v, op==OP_Last); testcase( op==OP_Last ); - VdbeCoverageIf(v, op==OP_SeekGT); testcase( op==OP_SeekGT ); - VdbeCoverageIf(v, op==OP_SeekGE); testcase( op==OP_SeekGE ); - VdbeCoverageIf(v, op==OP_SeekLE); testcase( op==OP_SeekLE ); - VdbeCoverageIf(v, op==OP_SeekLT); testcase( op==OP_SeekLT ); - - /* Load the value for the inequality constraint at the end of the - ** range (if any). - */ - nConstraint = nEq; - if( pRangeEnd ){ - Expr *pRight = pRangeEnd->pExpr->pRight; - sqlite3ExprCacheRemove(pParse, regBase+nEq, 1); - sqlite3ExprCode(pParse, pRight, regBase+nEq); - if( (pRangeEnd->wtFlags & TERM_VNULL)==0 - && sqlite3ExprCanBeNull(pRight) - ){ - sqlite3VdbeAddOp2(v, OP_IsNull, regBase+nEq, addrNxt); - VdbeCoverage(v); - } - if( sqlite3CompareAffinity(pRight, cEndAff)!=SQLITE_AFF_NONE - && !sqlite3ExprNeedsNoAffinityChange(pRight, cEndAff) - ){ - codeApplyAffinity(pParse, regBase+nEq, 1, &cEndAff); - } - nConstraint++; - testcase( pRangeEnd->wtFlags & TERM_VIRTUAL ); - }else if( bStopAtNull ){ - sqlite3VdbeAddOp2(v, OP_Null, 0, regBase+nEq); - endEq = 0; - nConstraint++; - } - sqlite3DbFree(db, zStartAff); - - /* Top of the loop body */ - pLevel->p2 = sqlite3VdbeCurrentAddr(v); - - /* Check if the index cursor is past the end of the range. */ - if( nConstraint ){ - op = aEndOp[bRev*2 + endEq]; - sqlite3VdbeAddOp4Int(v, op, iIdxCur, addrNxt, regBase, nConstraint); - testcase( op==OP_IdxGT ); VdbeCoverageIf(v, op==OP_IdxGT ); - testcase( op==OP_IdxGE ); VdbeCoverageIf(v, op==OP_IdxGE ); - testcase( op==OP_IdxLT ); VdbeCoverageIf(v, op==OP_IdxLT ); - testcase( op==OP_IdxLE ); VdbeCoverageIf(v, op==OP_IdxLE ); - } - - /* Seek the table cursor, if required */ - disableTerm(pLevel, pRangeStart); - disableTerm(pLevel, pRangeEnd); - if( omitTable ){ - /* pIdx is a covering index. No need to access the main table. */ - }else if( HasRowid(pIdx->pTable) ){ - iRowidReg = ++pParse->nMem; - sqlite3VdbeAddOp2(v, OP_IdxRowid, iIdxCur, iRowidReg); - sqlite3ExprCacheStore(pParse, iCur, -1, iRowidReg); - sqlite3VdbeAddOp2(v, OP_Seek, iCur, iRowidReg); /* Deferred seek */ - }else if( iCur!=iIdxCur ){ - Index *pPk = sqlite3PrimaryKeyIndex(pIdx->pTable); - iRowidReg = sqlite3GetTempRange(pParse, pPk->nKeyCol); - for(j=0; jnKeyCol; j++){ - k = sqlite3ColumnOfIndex(pIdx, pPk->aiColumn[j]); - sqlite3VdbeAddOp3(v, OP_Column, iIdxCur, k, iRowidReg+j); - } - sqlite3VdbeAddOp4Int(v, OP_NotFound, iCur, addrCont, - iRowidReg, pPk->nKeyCol); VdbeCoverage(v); - } - - /* Record the instruction used to terminate the loop. Disable - ** WHERE clause terms made redundant by the index range scan. - */ - if( pLoop->wsFlags & WHERE_ONEROW ){ - pLevel->op = OP_Noop; - }else if( bRev ){ - pLevel->op = OP_Prev; - }else{ - pLevel->op = OP_Next; - } - pLevel->p1 = iIdxCur; - pLevel->p3 = (pLoop->wsFlags&WHERE_UNQ_WANTED)!=0 ? 1:0; - if( (pLoop->wsFlags & WHERE_CONSTRAINT)==0 ){ - pLevel->p5 = SQLITE_STMTSTATUS_FULLSCAN_STEP; - }else{ - assert( pLevel->p5==0 ); - } - }else - -#ifndef SQLITE_OMIT_OR_OPTIMIZATION - if( pLoop->wsFlags & WHERE_MULTI_OR ){ - /* Case 5: Two or more separately indexed terms connected by OR - ** - ** Example: - ** - ** CREATE TABLE t1(a,b,c,d); - ** CREATE INDEX i1 ON t1(a); - ** CREATE INDEX i2 ON t1(b); - ** CREATE INDEX i3 ON t1(c); - ** - ** SELECT * FROM t1 WHERE a=5 OR b=7 OR (c=11 AND d=13) - ** - ** In the example, there are three indexed terms connected by OR. - ** The top of the loop looks like this: - ** - ** Null 1 # Zero the rowset in reg 1 - ** - ** Then, for each indexed term, the following. The arguments to - ** RowSetTest are such that the rowid of the current row is inserted - ** into the RowSet. If it is already present, control skips the - ** Gosub opcode and jumps straight to the code generated by WhereEnd(). - ** - ** sqlite3WhereBegin() - ** RowSetTest # Insert rowid into rowset - ** Gosub 2 A - ** sqlite3WhereEnd() - ** - ** Following the above, code to terminate the loop. Label A, the target - ** of the Gosub above, jumps to the instruction right after the Goto. - ** - ** Null 1 # Zero the rowset in reg 1 - ** Goto B # The loop is finished. - ** - ** A: # Return data, whatever. - ** - ** Return 2 # Jump back to the Gosub - ** - ** B: - ** - ** Added 2014-05-26: If the table is a WITHOUT ROWID table, then - ** use an ephermeral index instead of a RowSet to record the primary - ** keys of the rows we have already seen. - ** - */ - WhereClause *pOrWc; /* The OR-clause broken out into subterms */ - SrcList *pOrTab; /* Shortened table list or OR-clause generation */ - Index *pCov = 0; /* Potential covering index (or NULL) */ - int iCovCur = pParse->nTab++; /* Cursor used for index scans (if any) */ - - int regReturn = ++pParse->nMem; /* Register used with OP_Gosub */ - int regRowset = 0; /* Register for RowSet object */ - int regRowid = 0; /* Register holding rowid */ - int iLoopBody = sqlite3VdbeMakeLabel(v); /* Start of loop body */ - int iRetInit; /* Address of regReturn init */ - int untestedTerms = 0; /* Some terms not completely tested */ - int ii; /* Loop counter */ - Expr *pAndExpr = 0; /* An ".. AND (...)" expression */ - Table *pTab = pTabItem->pTab; - - pTerm = pLoop->aLTerm[0]; - assert( pTerm!=0 ); - assert( pTerm->eOperator & WO_OR ); - assert( (pTerm->wtFlags & TERM_ORINFO)!=0 ); - pOrWc = &pTerm->u.pOrInfo->wc; - pLevel->op = OP_Return; - pLevel->p1 = regReturn; - - /* Set up a new SrcList in pOrTab containing the table being scanned - ** by this loop in the a[0] slot and all notReady tables in a[1..] slots. - ** This becomes the SrcList in the recursive call to sqlite3WhereBegin(). - */ - if( pWInfo->nLevel>1 ){ - int nNotReady; /* The number of notReady tables */ - struct SrcList_item *origSrc; /* Original list of tables */ - nNotReady = pWInfo->nLevel - iLevel - 1; - pOrTab = sqlite3StackAllocRaw(db, - sizeof(*pOrTab)+ nNotReady*sizeof(pOrTab->a[0])); - if( pOrTab==0 ) return notReady; - pOrTab->nAlloc = (u8)(nNotReady + 1); - pOrTab->nSrc = pOrTab->nAlloc; - memcpy(pOrTab->a, pTabItem, sizeof(*pTabItem)); - origSrc = pWInfo->pTabList->a; - for(k=1; k<=nNotReady; k++){ - memcpy(&pOrTab->a[k], &origSrc[pLevel[k].iFrom], sizeof(pOrTab->a[k])); - } - }else{ - pOrTab = pWInfo->pTabList; - } - - /* Initialize the rowset register to contain NULL. An SQL NULL is - ** equivalent to an empty rowset. Or, create an ephermeral index - ** capable of holding primary keys in the case of a WITHOUT ROWID. - ** - ** Also initialize regReturn to contain the address of the instruction - ** immediately following the OP_Return at the bottom of the loop. This - ** is required in a few obscure LEFT JOIN cases where control jumps - ** over the top of the loop into the body of it. In this case the - ** correct response for the end-of-loop code (the OP_Return) is to - ** fall through to the next instruction, just as an OP_Next does if - ** called on an uninitialized cursor. - */ - if( (pWInfo->wctrlFlags & WHERE_DUPLICATES_OK)==0 ){ - if( HasRowid(pTab) ){ - regRowset = ++pParse->nMem; - sqlite3VdbeAddOp2(v, OP_Null, 0, regRowset); - }else{ - Index *pPk = sqlite3PrimaryKeyIndex(pTab); - regRowset = pParse->nTab++; - sqlite3VdbeAddOp2(v, OP_OpenEphemeral, regRowset, pPk->nKeyCol); - sqlite3VdbeSetP4KeyInfo(pParse, pPk); - } - regRowid = ++pParse->nMem; - } - iRetInit = sqlite3VdbeAddOp2(v, OP_Integer, 0, regReturn); - - /* If the original WHERE clause is z of the form: (x1 OR x2 OR ...) AND y - ** Then for every term xN, evaluate as the subexpression: xN AND z - ** That way, terms in y that are factored into the disjunction will - ** be picked up by the recursive calls to sqlite3WhereBegin() below. - ** - ** Actually, each subexpression is converted to "xN AND w" where w is - ** the "interesting" terms of z - terms that did not originate in the - ** ON or USING clause of a LEFT JOIN, and terms that are usable as - ** indices. - ** - ** This optimization also only applies if the (x1 OR x2 OR ...) term - ** is not contained in the ON clause of a LEFT JOIN. - ** See ticket http://www.sqlite.org/src/info/f2369304e4 - */ - if( pWC->nTerm>1 ){ - int iTerm; - for(iTerm=0; iTermnTerm; iTerm++){ - Expr *pExpr = pWC->a[iTerm].pExpr; - if( &pWC->a[iTerm] == pTerm ) continue; - if( ExprHasProperty(pExpr, EP_FromJoin) ) continue; - testcase( pWC->a[iTerm].wtFlags & TERM_ORINFO ); - testcase( pWC->a[iTerm].wtFlags & TERM_VIRTUAL ); - if( pWC->a[iTerm].wtFlags & (TERM_ORINFO|TERM_VIRTUAL) ) continue; - if( (pWC->a[iTerm].eOperator & WO_ALL)==0 ) continue; - pExpr = sqlite3ExprDup(db, pExpr, 0); - pAndExpr = sqlite3ExprAnd(db, pAndExpr, pExpr); - } - if( pAndExpr ){ - pAndExpr = sqlite3PExpr(pParse, TK_AND, 0, pAndExpr, 0); - } - } - - /* Run a separate WHERE clause for each term of the OR clause. After - ** eliminating duplicates from other WHERE clauses, the action for each - ** sub-WHERE clause is to to invoke the main loop body as a subroutine. - */ - for(ii=0; iinTerm; ii++){ - WhereTerm *pOrTerm = &pOrWc->a[ii]; - if( pOrTerm->leftCursor==iCur || (pOrTerm->eOperator & WO_AND)!=0 ){ - WhereInfo *pSubWInfo; /* Info for single OR-term scan */ - Expr *pOrExpr = pOrTerm->pExpr; /* Current OR clause term */ - int j1 = 0; /* Address of jump operation */ - if( pAndExpr && !ExprHasProperty(pOrExpr, EP_FromJoin) ){ - pAndExpr->pLeft = pOrExpr; - pOrExpr = pAndExpr; - } - /* Loop through table entries that match term pOrTerm. */ - pSubWInfo = sqlite3WhereBegin(pParse, pOrTab, pOrExpr, 0, 0, - WHERE_OMIT_OPEN_CLOSE | WHERE_AND_ONLY | - WHERE_FORCE_TABLE | WHERE_ONETABLE_ONLY, iCovCur); - assert( pSubWInfo || pParse->nErr || db->mallocFailed ); - if( pSubWInfo ){ - WhereLoop *pSubLoop; - explainOneScan( - pParse, pOrTab, &pSubWInfo->a[0], iLevel, pLevel->iFrom, 0 - ); - /* This is the sub-WHERE clause body. First skip over - ** duplicate rows from prior sub-WHERE clauses, and record the - ** rowid (or PRIMARY KEY) for the current row so that the same - ** row will be skipped in subsequent sub-WHERE clauses. - */ - if( (pWInfo->wctrlFlags & WHERE_DUPLICATES_OK)==0 ){ - int r; - int iSet = ((ii==pOrWc->nTerm-1)?-1:ii); - if( HasRowid(pTab) ){ - r = sqlite3ExprCodeGetColumn(pParse, pTab, -1, iCur, regRowid, 0); - j1 = sqlite3VdbeAddOp4Int(v, OP_RowSetTest, regRowset, 0, r,iSet); - VdbeCoverage(v); - }else{ - Index *pPk = sqlite3PrimaryKeyIndex(pTab); - int nPk = pPk->nKeyCol; - int iPk; - - /* Read the PK into an array of temp registers. */ - r = sqlite3GetTempRange(pParse, nPk); - for(iPk=0; iPkaiColumn[iPk]; - sqlite3ExprCodeGetColumn(pParse, pTab, iCol, iCur, r+iPk, 0); - } - - /* Check if the temp table already contains this key. If so, - ** the row has already been included in the result set and - ** can be ignored (by jumping past the Gosub below). Otherwise, - ** insert the key into the temp table and proceed with processing - ** the row. - ** - ** Use some of the same optimizations as OP_RowSetTest: If iSet - ** is zero, assume that the key cannot already be present in - ** the temp table. And if iSet is -1, assume that there is no - ** need to insert the key into the temp table, as it will never - ** be tested for. */ - if( iSet ){ - j1 = sqlite3VdbeAddOp4Int(v, OP_Found, regRowset, 0, r, nPk); - VdbeCoverage(v); - } - if( iSet>=0 ){ - sqlite3VdbeAddOp3(v, OP_MakeRecord, r, nPk, regRowid); - sqlite3VdbeAddOp3(v, OP_IdxInsert, regRowset, regRowid, 0); - if( iSet ) sqlite3VdbeChangeP5(v, OPFLAG_USESEEKRESULT); - } - - /* Release the array of temp registers */ - sqlite3ReleaseTempRange(pParse, r, nPk); - } - } - - /* Invoke the main loop body as a subroutine */ - sqlite3VdbeAddOp2(v, OP_Gosub, regReturn, iLoopBody); - - /* Jump here (skipping the main loop body subroutine) if the - ** current sub-WHERE row is a duplicate from prior sub-WHEREs. */ - if( j1 ) sqlite3VdbeJumpHere(v, j1); - - /* The pSubWInfo->untestedTerms flag means that this OR term - ** contained one or more AND term from a notReady table. The - ** terms from the notReady table could not be tested and will - ** need to be tested later. - */ - if( pSubWInfo->untestedTerms ) untestedTerms = 1; - - /* If all of the OR-connected terms are optimized using the same - ** index, and the index is opened using the same cursor number - ** by each call to sqlite3WhereBegin() made by this loop, it may - ** be possible to use that index as a covering index. - ** - ** If the call to sqlite3WhereBegin() above resulted in a scan that - ** uses an index, and this is either the first OR-connected term - ** processed or the index is the same as that used by all previous - ** terms, set pCov to the candidate covering index. Otherwise, set - ** pCov to NULL to indicate that no candidate covering index will - ** be available. - */ - pSubLoop = pSubWInfo->a[0].pWLoop; - assert( (pSubLoop->wsFlags & WHERE_AUTO_INDEX)==0 ); - if( (pSubLoop->wsFlags & WHERE_INDEXED)!=0 - && (ii==0 || pSubLoop->u.btree.pIndex==pCov) - && (HasRowid(pTab) || !IsPrimaryKeyIndex(pSubLoop->u.btree.pIndex)) - ){ - assert( pSubWInfo->a[0].iIdxCur==iCovCur ); - pCov = pSubLoop->u.btree.pIndex; - }else{ - pCov = 0; - } - - /* Finish the loop through table entries that match term pOrTerm. */ - sqlite3WhereEnd(pSubWInfo); - } - } - } - pLevel->u.pCovidx = pCov; - if( pCov ) pLevel->iIdxCur = iCovCur; - if( pAndExpr ){ - pAndExpr->pLeft = 0; - sqlite3ExprDelete(db, pAndExpr); - } - sqlite3VdbeChangeP1(v, iRetInit, sqlite3VdbeCurrentAddr(v)); - sqlite3VdbeAddOp2(v, OP_Goto, 0, pLevel->addrBrk); - sqlite3VdbeResolveLabel(v, iLoopBody); - - if( pWInfo->nLevel>1 ) sqlite3StackFree(db, pOrTab); - if( !untestedTerms ) disableTerm(pLevel, pTerm); - }else -#endif /* SQLITE_OMIT_OR_OPTIMIZATION */ - - { - /* Case 6: There is no usable index. We must do a complete - ** scan of the entire table. - */ - static const u8 aStep[] = { OP_Next, OP_Prev }; - static const u8 aStart[] = { OP_Rewind, OP_Last }; - assert( bRev==0 || bRev==1 ); - if( pTabItem->isRecursive ){ - /* Tables marked isRecursive have only a single row that is stored in - ** a pseudo-cursor. No need to Rewind or Next such cursors. */ - pLevel->op = OP_Noop; - }else{ - pLevel->op = aStep[bRev]; - pLevel->p1 = iCur; - pLevel->p2 = 1 + sqlite3VdbeAddOp2(v, aStart[bRev], iCur, addrBrk); - VdbeCoverageIf(v, bRev==0); - VdbeCoverageIf(v, bRev!=0); - pLevel->p5 = SQLITE_STMTSTATUS_FULLSCAN_STEP; - } - } - - /* Insert code to test every subexpression that can be completely - ** computed using the current set of tables. - */ - for(pTerm=pWC->a, j=pWC->nTerm; j>0; j--, pTerm++){ - Expr *pE; - testcase( pTerm->wtFlags & TERM_VIRTUAL ); - testcase( pTerm->wtFlags & TERM_CODED ); - if( pTerm->wtFlags & (TERM_VIRTUAL|TERM_CODED) ) continue; - if( (pTerm->prereqAll & pLevel->notReady)!=0 ){ - testcase( pWInfo->untestedTerms==0 - && (pWInfo->wctrlFlags & WHERE_ONETABLE_ONLY)!=0 ); - pWInfo->untestedTerms = 1; - continue; - } - pE = pTerm->pExpr; - assert( pE!=0 ); - if( pLevel->iLeftJoin && !ExprHasProperty(pE, EP_FromJoin) ){ - continue; - } - sqlite3ExprIfFalse(pParse, pE, addrCont, SQLITE_JUMPIFNULL); - pTerm->wtFlags |= TERM_CODED; - } - - /* Insert code to test for implied constraints based on transitivity - ** of the "==" operator. - ** - ** Example: If the WHERE clause contains "t1.a=t2.b" and "t2.b=123" - ** and we are coding the t1 loop and the t2 loop has not yet coded, - ** then we cannot use the "t1.a=t2.b" constraint, but we can code - ** the implied "t1.a=123" constraint. - */ - for(pTerm=pWC->a, j=pWC->nTerm; j>0; j--, pTerm++){ - Expr *pE, *pEAlt; - WhereTerm *pAlt; - if( pTerm->wtFlags & (TERM_VIRTUAL|TERM_CODED) ) continue; - if( pTerm->eOperator!=(WO_EQUIV|WO_EQ) ) continue; - if( pTerm->leftCursor!=iCur ) continue; - if( pLevel->iLeftJoin ) continue; - pE = pTerm->pExpr; - assert( !ExprHasProperty(pE, EP_FromJoin) ); - assert( (pTerm->prereqRight & pLevel->notReady)!=0 ); - pAlt = findTerm(pWC, iCur, pTerm->u.leftColumn, notReady, WO_EQ|WO_IN, 0); - if( pAlt==0 ) continue; - if( pAlt->wtFlags & (TERM_CODED) ) continue; - testcase( pAlt->eOperator & WO_EQ ); - testcase( pAlt->eOperator & WO_IN ); - VdbeModuleComment((v, "begin transitive constraint")); - pEAlt = sqlite3StackAllocRaw(db, sizeof(*pEAlt)); - if( pEAlt ){ - *pEAlt = *pAlt->pExpr; - pEAlt->pLeft = pE->pLeft; - sqlite3ExprIfFalse(pParse, pEAlt, addrCont, SQLITE_JUMPIFNULL); - sqlite3StackFree(db, pEAlt); - } - } - - /* For a LEFT OUTER JOIN, generate code that will record the fact that - ** at least one row of the right table has matched the left table. - */ - if( pLevel->iLeftJoin ){ - pLevel->addrFirst = sqlite3VdbeCurrentAddr(v); - sqlite3VdbeAddOp2(v, OP_Integer, 1, pLevel->iLeftJoin); - VdbeComment((v, "record LEFT JOIN hit")); - sqlite3ExprCacheClear(pParse); - for(pTerm=pWC->a, j=0; jnTerm; j++, pTerm++){ - testcase( pTerm->wtFlags & TERM_VIRTUAL ); - testcase( pTerm->wtFlags & TERM_CODED ); - if( pTerm->wtFlags & (TERM_VIRTUAL|TERM_CODED) ) continue; - if( (pTerm->prereqAll & pLevel->notReady)!=0 ){ - assert( pWInfo->untestedTerms ); - continue; - } - assert( pTerm->pExpr ); - sqlite3ExprIfFalse(pParse, pTerm->pExpr, addrCont, SQLITE_JUMPIFNULL); - pTerm->wtFlags |= TERM_CODED; - } - } - - return pLevel->notReady; -} - -#if defined(WHERETRACE_ENABLED) && defined(SQLITE_ENABLE_TREE_EXPLAIN) -/* -** Generate "Explanation" text for a WhereTerm. -*/ -static void whereExplainTerm(Vdbe *v, WhereTerm *pTerm){ - char zType[4]; - memcpy(zType, "...", 4); - if( pTerm->wtFlags & TERM_VIRTUAL ) zType[0] = 'V'; - if( pTerm->eOperator & WO_EQUIV ) zType[1] = 'E'; - if( ExprHasProperty(pTerm->pExpr, EP_FromJoin) ) zType[2] = 'L'; - sqlite3ExplainPrintf(v, "%s ", zType); - sqlite3ExplainExpr(v, pTerm->pExpr); -} -#endif /* WHERETRACE_ENABLED && SQLITE_ENABLE_TREE_EXPLAIN */ - - -#ifdef WHERETRACE_ENABLED -/* -** Print a WhereLoop object for debugging purposes -*/ -static void whereLoopPrint(WhereLoop *p, WhereClause *pWC){ - WhereInfo *pWInfo = pWC->pWInfo; - int nb = 1+(pWInfo->pTabList->nSrc+7)/8; - struct SrcList_item *pItem = pWInfo->pTabList->a + p->iTab; - Table *pTab = pItem->pTab; - sqlite3DebugPrintf("%c%2d.%0*llx.%0*llx", p->cId, - p->iTab, nb, p->maskSelf, nb, p->prereq); - sqlite3DebugPrintf(" %12s", - pItem->zAlias ? pItem->zAlias : pTab->zName); - if( (p->wsFlags & WHERE_VIRTUALTABLE)==0 ){ - const char *zName; - if( p->u.btree.pIndex && (zName = p->u.btree.pIndex->zName)!=0 ){ - if( strncmp(zName, "sqlite_autoindex_", 17)==0 ){ - int i = sqlite3Strlen30(zName) - 1; - while( zName[i]!='_' ) i--; - zName += i; - } - sqlite3DebugPrintf(".%-16s %2d", zName, p->u.btree.nEq); - }else{ - sqlite3DebugPrintf("%20s",""); - } - }else{ - char *z; - if( p->u.vtab.idxStr ){ - z = sqlite3_mprintf("(%d,\"%s\",%x)", - p->u.vtab.idxNum, p->u.vtab.idxStr, p->u.vtab.omitMask); - }else{ - z = sqlite3_mprintf("(%d,%x)", p->u.vtab.idxNum, p->u.vtab.omitMask); - } - sqlite3DebugPrintf(" %-19s", z); - sqlite3_free(z); - } - sqlite3DebugPrintf(" f %04x N %d", p->wsFlags, p->nLTerm); - sqlite3DebugPrintf(" cost %d,%d,%d\n", p->rSetup, p->rRun, p->nOut); -#ifdef SQLITE_ENABLE_TREE_EXPLAIN - /* If the 0x100 bit of wheretracing is set, then show all of the constraint - ** expressions in the WhereLoop.aLTerm[] array. - */ - if( p->nLTerm && (sqlite3WhereTrace & 0x100)!=0 ){ /* WHERETRACE 0x100 */ - int i; - Vdbe *v = pWInfo->pParse->pVdbe; - sqlite3ExplainBegin(v); - for(i=0; inLTerm; i++){ - WhereTerm *pTerm = p->aLTerm[i]; - if( pTerm==0 ) continue; - sqlite3ExplainPrintf(v, " (%d) #%-2d ", i+1, (int)(pTerm-pWC->a)); - sqlite3ExplainPush(v); - whereExplainTerm(v, pTerm); - sqlite3ExplainPop(v); - sqlite3ExplainNL(v); - } - sqlite3ExplainFinish(v); - sqlite3DebugPrintf("%s", sqlite3VdbeExplanation(v)); - } -#endif -} -#endif - -/* -** Convert bulk memory into a valid WhereLoop that can be passed -** to whereLoopClear harmlessly. -*/ -static void whereLoopInit(WhereLoop *p){ - p->aLTerm = p->aLTermSpace; - p->nLTerm = 0; - p->nLSlot = ArraySize(p->aLTermSpace); - p->wsFlags = 0; -} - -/* -** Clear the WhereLoop.u union. Leave WhereLoop.pLTerm intact. -*/ -static void whereLoopClearUnion(sqlite3 *db, WhereLoop *p){ - if( p->wsFlags & (WHERE_VIRTUALTABLE|WHERE_AUTO_INDEX) ){ - if( (p->wsFlags & WHERE_VIRTUALTABLE)!=0 && p->u.vtab.needFree ){ - sqlite3_free(p->u.vtab.idxStr); - p->u.vtab.needFree = 0; - p->u.vtab.idxStr = 0; - }else if( (p->wsFlags & WHERE_AUTO_INDEX)!=0 && p->u.btree.pIndex!=0 ){ - sqlite3DbFree(db, p->u.btree.pIndex->zColAff); - sqlite3KeyInfoUnref(p->u.btree.pIndex->pKeyInfo); - sqlite3DbFree(db, p->u.btree.pIndex); - p->u.btree.pIndex = 0; - } - } -} - -/* -** Deallocate internal memory used by a WhereLoop object -*/ -static void whereLoopClear(sqlite3 *db, WhereLoop *p){ - if( p->aLTerm!=p->aLTermSpace ) sqlite3DbFree(db, p->aLTerm); - whereLoopClearUnion(db, p); - whereLoopInit(p); -} - -/* -** Increase the memory allocation for pLoop->aLTerm[] to be at least n. -*/ -static int whereLoopResize(sqlite3 *db, WhereLoop *p, int n){ - WhereTerm **paNew; - if( p->nLSlot>=n ) return SQLITE_OK; - n = (n+7)&~7; - paNew = sqlite3DbMallocRaw(db, sizeof(p->aLTerm[0])*n); - if( paNew==0 ) return SQLITE_NOMEM; - memcpy(paNew, p->aLTerm, sizeof(p->aLTerm[0])*p->nLSlot); - if( p->aLTerm!=p->aLTermSpace ) sqlite3DbFree(db, p->aLTerm); - p->aLTerm = paNew; - p->nLSlot = n; - return SQLITE_OK; -} - -/* -** Transfer content from the second pLoop into the first. -*/ -static int whereLoopXfer(sqlite3 *db, WhereLoop *pTo, WhereLoop *pFrom){ - whereLoopClearUnion(db, pTo); - if( whereLoopResize(db, pTo, pFrom->nLTerm) ){ - memset(&pTo->u, 0, sizeof(pTo->u)); - return SQLITE_NOMEM; - } - memcpy(pTo, pFrom, WHERE_LOOP_XFER_SZ); - memcpy(pTo->aLTerm, pFrom->aLTerm, pTo->nLTerm*sizeof(pTo->aLTerm[0])); - if( pFrom->wsFlags & WHERE_VIRTUALTABLE ){ - pFrom->u.vtab.needFree = 0; - }else if( (pFrom->wsFlags & WHERE_AUTO_INDEX)!=0 ){ - pFrom->u.btree.pIndex = 0; - } - return SQLITE_OK; -} - -/* -** Delete a WhereLoop object -*/ -static void whereLoopDelete(sqlite3 *db, WhereLoop *p){ - whereLoopClear(db, p); - sqlite3DbFree(db, p); -} - -/* -** Free a WhereInfo structure -*/ -static void whereInfoFree(sqlite3 *db, WhereInfo *pWInfo){ - if( ALWAYS(pWInfo) ){ - whereClauseClear(&pWInfo->sWC); - while( pWInfo->pLoops ){ - WhereLoop *p = pWInfo->pLoops; - pWInfo->pLoops = p->pNextLoop; - whereLoopDelete(db, p); - } - sqlite3DbFree(db, pWInfo); - } -} - -/* -** Return TRUE if both of the following are true: -** -** (1) X has the same or lower cost that Y -** (2) X is a proper subset of Y -** -** By "proper subset" we mean that X uses fewer WHERE clause terms -** than Y and that every WHERE clause term used by X is also used -** by Y. -** -** If X is a proper subset of Y then Y is a better choice and ought -** to have a lower cost. This routine returns TRUE when that cost -** relationship is inverted and needs to be adjusted. -*/ -static int whereLoopCheaperProperSubset( - const WhereLoop *pX, /* First WhereLoop to compare */ - const WhereLoop *pY /* Compare against this WhereLoop */ -){ - int i, j; - if( pX->nLTerm >= pY->nLTerm ) return 0; /* X is not a subset of Y */ - if( pX->rRun >= pY->rRun ){ - if( pX->rRun > pY->rRun ) return 0; /* X costs more than Y */ - if( pX->nOut > pY->nOut ) return 0; /* X costs more than Y */ - } - for(i=pX->nLTerm-1; i>=0; i--){ - for(j=pY->nLTerm-1; j>=0; j--){ - if( pY->aLTerm[j]==pX->aLTerm[i] ) break; - } - if( j<0 ) return 0; /* X not a subset of Y since term X[i] not used by Y */ - } - return 1; /* All conditions meet */ -} - -/* -** Try to adjust the cost of WhereLoop pTemplate upwards or downwards so -** that: -** -** (1) pTemplate costs less than any other WhereLoops that are a proper -** subset of pTemplate -** -** (2) pTemplate costs more than any other WhereLoops for which pTemplate -** is a proper subset. -** -** To say "WhereLoop X is a proper subset of Y" means that X uses fewer -** WHERE clause terms than Y and that every WHERE clause term used by X is -** also used by Y. -** -** This adjustment is omitted for SKIPSCAN loops. In a SKIPSCAN loop, the -** WhereLoop.nLTerm field is not an accurate measure of the number of WHERE -** clause terms covered, since some of the first nLTerm entries in aLTerm[] -** will be NULL (because they are skipped). That makes it more difficult -** to compare the loops. We could add extra code to do the comparison, and -** perhaps we will someday. But SKIPSCAN is sufficiently uncommon, and this -** adjustment is sufficient minor, that it is very difficult to construct -** a test case where the extra code would improve the query plan. Better -** to avoid the added complexity and just omit cost adjustments to SKIPSCAN -** loops. -*/ -static void whereLoopAdjustCost(const WhereLoop *p, WhereLoop *pTemplate){ - if( (pTemplate->wsFlags & WHERE_INDEXED)==0 ) return; - if( (pTemplate->wsFlags & WHERE_SKIPSCAN)!=0 ) return; - for(; p; p=p->pNextLoop){ - if( p->iTab!=pTemplate->iTab ) continue; - if( (p->wsFlags & WHERE_INDEXED)==0 ) continue; - if( (p->wsFlags & WHERE_SKIPSCAN)!=0 ) continue; - if( whereLoopCheaperProperSubset(p, pTemplate) ){ - /* Adjust pTemplate cost downward so that it is cheaper than its - ** subset p */ - pTemplate->rRun = p->rRun; - pTemplate->nOut = p->nOut - 1; - }else if( whereLoopCheaperProperSubset(pTemplate, p) ){ - /* Adjust pTemplate cost upward so that it is costlier than p since - ** pTemplate is a proper subset of p */ - pTemplate->rRun = p->rRun; - pTemplate->nOut = p->nOut + 1; - } - } -} - -/* -** Search the list of WhereLoops in *ppPrev looking for one that can be -** supplanted by pTemplate. -** -** Return NULL if the WhereLoop list contains an entry that can supplant -** pTemplate, in other words if pTemplate does not belong on the list. -** -** If pX is a WhereLoop that pTemplate can supplant, then return the -** link that points to pX. -** -** If pTemplate cannot supplant any existing element of the list but needs -** to be added to the list, then return a pointer to the tail of the list. -*/ -static WhereLoop **whereLoopFindLesser( - WhereLoop **ppPrev, - const WhereLoop *pTemplate -){ - WhereLoop *p; - for(p=(*ppPrev); p; ppPrev=&p->pNextLoop, p=*ppPrev){ - if( p->iTab!=pTemplate->iTab || p->iSortIdx!=pTemplate->iSortIdx ){ - /* If either the iTab or iSortIdx values for two WhereLoop are different - ** then those WhereLoops need to be considered separately. Neither is - ** a candidate to replace the other. */ - continue; - } - /* In the current implementation, the rSetup value is either zero - ** or the cost of building an automatic index (NlogN) and the NlogN - ** is the same for compatible WhereLoops. */ - assert( p->rSetup==0 || pTemplate->rSetup==0 - || p->rSetup==pTemplate->rSetup ); - - /* whereLoopAddBtree() always generates and inserts the automatic index - ** case first. Hence compatible candidate WhereLoops never have a larger - ** rSetup. Call this SETUP-INVARIANT */ - assert( p->rSetup>=pTemplate->rSetup ); - - /* If existing WhereLoop p is better than pTemplate, pTemplate can be - ** discarded. WhereLoop p is better if: - ** (1) p has no more dependencies than pTemplate, and - ** (2) p has an equal or lower cost than pTemplate - */ - if( (p->prereq & pTemplate->prereq)==p->prereq /* (1) */ - && p->rSetup<=pTemplate->rSetup /* (2a) */ - && p->rRun<=pTemplate->rRun /* (2b) */ - && p->nOut<=pTemplate->nOut /* (2c) */ - ){ - return 0; /* Discard pTemplate */ - } - - /* If pTemplate is always better than p, then cause p to be overwritten - ** with pTemplate. pTemplate is better than p if: - ** (1) pTemplate has no more dependences than p, and - ** (2) pTemplate has an equal or lower cost than p. - */ - if( (p->prereq & pTemplate->prereq)==pTemplate->prereq /* (1) */ - && p->rRun>=pTemplate->rRun /* (2a) */ - && p->nOut>=pTemplate->nOut /* (2b) */ - ){ - assert( p->rSetup>=pTemplate->rSetup ); /* SETUP-INVARIANT above */ - break; /* Cause p to be overwritten by pTemplate */ - } - } - return ppPrev; -} - -/* -** Insert or replace a WhereLoop entry using the template supplied. -** -** An existing WhereLoop entry might be overwritten if the new template -** is better and has fewer dependencies. Or the template will be ignored -** and no insert will occur if an existing WhereLoop is faster and has -** fewer dependencies than the template. Otherwise a new WhereLoop is -** added based on the template. -** -** If pBuilder->pOrSet is not NULL then we care about only the -** prerequisites and rRun and nOut costs of the N best loops. That -** information is gathered in the pBuilder->pOrSet object. This special -** processing mode is used only for OR clause processing. -** -** When accumulating multiple loops (when pBuilder->pOrSet is NULL) we -** still might overwrite similar loops with the new template if the -** new template is better. Loops may be overwritten if the following -** conditions are met: -** -** (1) They have the same iTab. -** (2) They have the same iSortIdx. -** (3) The template has same or fewer dependencies than the current loop -** (4) The template has the same or lower cost than the current loop -*/ -static int whereLoopInsert(WhereLoopBuilder *pBuilder, WhereLoop *pTemplate){ - WhereLoop **ppPrev, *p; - WhereInfo *pWInfo = pBuilder->pWInfo; - sqlite3 *db = pWInfo->pParse->db; - - /* If pBuilder->pOrSet is defined, then only keep track of the costs - ** and prereqs. - */ - if( pBuilder->pOrSet!=0 ){ -#if WHERETRACE_ENABLED - u16 n = pBuilder->pOrSet->n; - int x = -#endif - whereOrInsert(pBuilder->pOrSet, pTemplate->prereq, pTemplate->rRun, - pTemplate->nOut); -#if WHERETRACE_ENABLED /* 0x8 */ - if( sqlite3WhereTrace & 0x8 ){ - sqlite3DebugPrintf(x?" or-%d: ":" or-X: ", n); - whereLoopPrint(pTemplate, pBuilder->pWC); - } -#endif - return SQLITE_OK; - } - - /* Look for an existing WhereLoop to replace with pTemplate - */ - whereLoopAdjustCost(pWInfo->pLoops, pTemplate); - ppPrev = whereLoopFindLesser(&pWInfo->pLoops, pTemplate); - - if( ppPrev==0 ){ - /* There already exists a WhereLoop on the list that is better - ** than pTemplate, so just ignore pTemplate */ -#if WHERETRACE_ENABLED /* 0x8 */ - if( sqlite3WhereTrace & 0x8 ){ - sqlite3DebugPrintf("ins-noop: "); - whereLoopPrint(pTemplate, pBuilder->pWC); - } -#endif - return SQLITE_OK; - }else{ - p = *ppPrev; - } - - /* If we reach this point it means that either p[] should be overwritten - ** with pTemplate[] if p[] exists, or if p==NULL then allocate a new - ** WhereLoop and insert it. - */ -#if WHERETRACE_ENABLED /* 0x8 */ - if( sqlite3WhereTrace & 0x8 ){ - if( p!=0 ){ - sqlite3DebugPrintf("ins-del: "); - whereLoopPrint(p, pBuilder->pWC); - } - sqlite3DebugPrintf("ins-new: "); - whereLoopPrint(pTemplate, pBuilder->pWC); - } -#endif - if( p==0 ){ - /* Allocate a new WhereLoop to add to the end of the list */ - *ppPrev = p = sqlite3DbMallocRaw(db, sizeof(WhereLoop)); - if( p==0 ) return SQLITE_NOMEM; - whereLoopInit(p); - p->pNextLoop = 0; - }else{ - /* We will be overwriting WhereLoop p[]. But before we do, first - ** go through the rest of the list and delete any other entries besides - ** p[] that are also supplated by pTemplate */ - WhereLoop **ppTail = &p->pNextLoop; - WhereLoop *pToDel; - while( *ppTail ){ - ppTail = whereLoopFindLesser(ppTail, pTemplate); - if( NEVER(ppTail==0) ) break; - pToDel = *ppTail; - if( pToDel==0 ) break; - *ppTail = pToDel->pNextLoop; -#if WHERETRACE_ENABLED /* 0x8 */ - if( sqlite3WhereTrace & 0x8 ){ - sqlite3DebugPrintf("ins-del: "); - whereLoopPrint(pToDel, pBuilder->pWC); - } -#endif - whereLoopDelete(db, pToDel); - } - } - whereLoopXfer(db, p, pTemplate); - if( (p->wsFlags & WHERE_VIRTUALTABLE)==0 ){ - Index *pIndex = p->u.btree.pIndex; - if( pIndex && pIndex->tnum==0 ){ - p->u.btree.pIndex = 0; - } - } - return SQLITE_OK; -} - -/* -** Adjust the WhereLoop.nOut value downward to account for terms of the -** WHERE clause that reference the loop but which are not used by an -** index. -** -** In the current implementation, the first extra WHERE clause term reduces -** the number of output rows by a factor of 10 and each additional term -** reduces the number of output rows by sqrt(2). -*/ -static void whereLoopOutputAdjust(WhereClause *pWC, WhereLoop *pLoop){ - WhereTerm *pTerm, *pX; - Bitmask notAllowed = ~(pLoop->prereq|pLoop->maskSelf); - int i, j; - - if( !OptimizationEnabled(pWC->pWInfo->pParse->db, SQLITE_AdjustOutEst) ){ - return; - } - for(i=pWC->nTerm, pTerm=pWC->a; i>0; i--, pTerm++){ - if( (pTerm->wtFlags & TERM_VIRTUAL)!=0 ) break; - if( (pTerm->prereqAll & pLoop->maskSelf)==0 ) continue; - if( (pTerm->prereqAll & notAllowed)!=0 ) continue; - for(j=pLoop->nLTerm-1; j>=0; j--){ - pX = pLoop->aLTerm[j]; - if( pX==0 ) continue; - if( pX==pTerm ) break; - if( pX->iParent>=0 && (&pWC->a[pX->iParent])==pTerm ) break; - } - if( j<0 ){ - pLoop->nOut += (pTerm->truthProb<=0 ? pTerm->truthProb : -1); - } - } -} - -/* -** We have so far matched pBuilder->pNew->u.btree.nEq terms of the -** index pIndex. Try to match one more. -** -** When this function is called, pBuilder->pNew->nOut contains the -** number of rows expected to be visited by filtering using the nEq -** terms only. If it is modified, this value is restored before this -** function returns. -** -** If pProbe->tnum==0, that means pIndex is a fake index used for the -** INTEGER PRIMARY KEY. -*/ -static int whereLoopAddBtreeIndex( - WhereLoopBuilder *pBuilder, /* The WhereLoop factory */ - struct SrcList_item *pSrc, /* FROM clause term being analyzed */ - Index *pProbe, /* An index on pSrc */ - LogEst nInMul /* log(Number of iterations due to IN) */ -){ - WhereInfo *pWInfo = pBuilder->pWInfo; /* WHERE analyse context */ - Parse *pParse = pWInfo->pParse; /* Parsing context */ - sqlite3 *db = pParse->db; /* Database connection malloc context */ - WhereLoop *pNew; /* Template WhereLoop under construction */ - WhereTerm *pTerm; /* A WhereTerm under consideration */ - int opMask; /* Valid operators for constraints */ - WhereScan scan; /* Iterator for WHERE terms */ - Bitmask saved_prereq; /* Original value of pNew->prereq */ - u16 saved_nLTerm; /* Original value of pNew->nLTerm */ - u16 saved_nEq; /* Original value of pNew->u.btree.nEq */ - u16 saved_nSkip; /* Original value of pNew->u.btree.nSkip */ - u32 saved_wsFlags; /* Original value of pNew->wsFlags */ - LogEst saved_nOut; /* Original value of pNew->nOut */ - int iCol; /* Index of the column in the table */ - int rc = SQLITE_OK; /* Return code */ - LogEst rLogSize; /* Logarithm of table size */ - WhereTerm *pTop = 0, *pBtm = 0; /* Top and bottom range constraints */ - - pNew = pBuilder->pNew; - if( db->mallocFailed ) return SQLITE_NOMEM; - - assert( (pNew->wsFlags & WHERE_VIRTUALTABLE)==0 ); - assert( (pNew->wsFlags & WHERE_TOP_LIMIT)==0 ); - if( pNew->wsFlags & WHERE_BTM_LIMIT ){ - opMask = WO_LT|WO_LE; - }else if( pProbe->tnum<=0 || (pSrc->jointype & JT_LEFT)!=0 ){ - opMask = WO_EQ|WO_IN|WO_GT|WO_GE|WO_LT|WO_LE; - }else{ - opMask = WO_EQ|WO_IN|WO_ISNULL|WO_GT|WO_GE|WO_LT|WO_LE; - } - if( pProbe->bUnordered ) opMask &= ~(WO_GT|WO_GE|WO_LT|WO_LE); - - assert( pNew->u.btree.nEq<=pProbe->nKeyCol ); - if( pNew->u.btree.nEq < pProbe->nKeyCol ){ - iCol = pProbe->aiColumn[pNew->u.btree.nEq]; - }else{ - iCol = -1; - } - pTerm = whereScanInit(&scan, pBuilder->pWC, pSrc->iCursor, iCol, - opMask, pProbe); - saved_nEq = pNew->u.btree.nEq; - saved_nSkip = pNew->u.btree.nSkip; - saved_nLTerm = pNew->nLTerm; - saved_wsFlags = pNew->wsFlags; - saved_prereq = pNew->prereq; - saved_nOut = pNew->nOut; - pNew->rSetup = 0; - rLogSize = estLog(pProbe->aiRowLogEst[0]); - - /* Consider using a skip-scan if there are no WHERE clause constraints - ** available for the left-most terms of the index, and if the average - ** number of repeats in the left-most terms is at least 18. - ** - ** The magic number 18 is selected on the basis that scanning 17 rows - ** is almost always quicker than an index seek (even though if the index - ** contains fewer than 2^17 rows we assume otherwise in other parts of - ** the code). And, even if it is not, it should not be too much slower. - ** On the other hand, the extra seeks could end up being significantly - ** more expensive. */ - assert( 42==sqlite3LogEst(18) ); - if( pTerm==0 - && saved_nEq==saved_nSkip - && saved_nEq+1nKeyCol - && pProbe->aiRowLogEst[saved_nEq+1]>=42 /* TUNING: Minimum for skip-scan */ - && (rc = whereLoopResize(db, pNew, pNew->nLTerm+1))==SQLITE_OK - ){ - LogEst nIter; - pNew->u.btree.nEq++; - pNew->u.btree.nSkip++; - pNew->aLTerm[pNew->nLTerm++] = 0; - pNew->wsFlags |= WHERE_SKIPSCAN; - nIter = pProbe->aiRowLogEst[saved_nEq] - pProbe->aiRowLogEst[saved_nEq+1]; - pNew->nOut -= nIter; - whereLoopAddBtreeIndex(pBuilder, pSrc, pProbe, nIter + nInMul); - pNew->nOut = saved_nOut; - } - for(; rc==SQLITE_OK && pTerm!=0; pTerm = whereScanNext(&scan)){ - u16 eOp = pTerm->eOperator; /* Shorthand for pTerm->eOperator */ - LogEst rCostIdx; - LogEst nOutUnadjusted; /* nOut before IN() and WHERE adjustments */ - int nIn = 0; -#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 - int nRecValid = pBuilder->nRecValid; -#endif - if( (eOp==WO_ISNULL || (pTerm->wtFlags&TERM_VNULL)!=0) - && (iCol<0 || pSrc->pTab->aCol[iCol].notNull) - ){ - continue; /* ignore IS [NOT] NULL constraints on NOT NULL columns */ - } - if( pTerm->prereqRight & pNew->maskSelf ) continue; - - pNew->wsFlags = saved_wsFlags; - pNew->u.btree.nEq = saved_nEq; - pNew->nLTerm = saved_nLTerm; - if( whereLoopResize(db, pNew, pNew->nLTerm+1) ) break; /* OOM */ - pNew->aLTerm[pNew->nLTerm++] = pTerm; - pNew->prereq = (saved_prereq | pTerm->prereqRight) & ~pNew->maskSelf; - - assert( nInMul==0 - || (pNew->wsFlags & WHERE_COLUMN_NULL)!=0 - || (pNew->wsFlags & WHERE_COLUMN_IN)!=0 - || (pNew->wsFlags & WHERE_SKIPSCAN)!=0 - ); - - if( eOp & WO_IN ){ - Expr *pExpr = pTerm->pExpr; - pNew->wsFlags |= WHERE_COLUMN_IN; - if( ExprHasProperty(pExpr, EP_xIsSelect) ){ - /* "x IN (SELECT ...)": TUNING: the SELECT returns 25 rows */ - nIn = 46; assert( 46==sqlite3LogEst(25) ); - }else if( ALWAYS(pExpr->x.pList && pExpr->x.pList->nExpr) ){ - /* "x IN (value, value, ...)" */ - nIn = sqlite3LogEst(pExpr->x.pList->nExpr); - } - assert( nIn>0 ); /* RHS always has 2 or more terms... The parser - ** changes "x IN (?)" into "x=?". */ - - }else if( eOp & (WO_EQ) ){ - pNew->wsFlags |= WHERE_COLUMN_EQ; - if( iCol<0 || (nInMul==0 && pNew->u.btree.nEq==pProbe->nKeyCol-1) ){ - if( iCol>=0 && pProbe->onError==OE_None ){ - pNew->wsFlags |= WHERE_UNQ_WANTED; - }else{ - pNew->wsFlags |= WHERE_ONEROW; - } - } - }else if( eOp & WO_ISNULL ){ - pNew->wsFlags |= WHERE_COLUMN_NULL; - }else if( eOp & (WO_GT|WO_GE) ){ - testcase( eOp & WO_GT ); - testcase( eOp & WO_GE ); - pNew->wsFlags |= WHERE_COLUMN_RANGE|WHERE_BTM_LIMIT; - pBtm = pTerm; - pTop = 0; - }else{ - assert( eOp & (WO_LT|WO_LE) ); - testcase( eOp & WO_LT ); - testcase( eOp & WO_LE ); - pNew->wsFlags |= WHERE_COLUMN_RANGE|WHERE_TOP_LIMIT; - pTop = pTerm; - pBtm = (pNew->wsFlags & WHERE_BTM_LIMIT)!=0 ? - pNew->aLTerm[pNew->nLTerm-2] : 0; - } - - /* At this point pNew->nOut is set to the number of rows expected to - ** be visited by the index scan before considering term pTerm, or the - ** values of nIn and nInMul. In other words, assuming that all - ** "x IN(...)" terms are replaced with "x = ?". This block updates - ** the value of pNew->nOut to account for pTerm (but not nIn/nInMul). */ - assert( pNew->nOut==saved_nOut ); - if( pNew->wsFlags & WHERE_COLUMN_RANGE ){ - /* Adjust nOut using stat3/stat4 data. Or, if there is no stat3/stat4 - ** data, using some other estimate. */ - whereRangeScanEst(pParse, pBuilder, pBtm, pTop, pNew); - }else{ - int nEq = ++pNew->u.btree.nEq; - assert( eOp & (WO_ISNULL|WO_EQ|WO_IN) ); - - assert( pNew->nOut==saved_nOut ); - if( pTerm->truthProb<=0 && iCol>=0 ){ - assert( (eOp & WO_IN) || nIn==0 ); - testcase( eOp & WO_IN ); - pNew->nOut += pTerm->truthProb; - pNew->nOut -= nIn; - }else{ -#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 - tRowcnt nOut = 0; - if( nInMul==0 - && pProbe->nSample - && pNew->u.btree.nEq<=pProbe->nSampleCol - && OptimizationEnabled(db, SQLITE_Stat3) - && ((eOp & WO_IN)==0 || !ExprHasProperty(pTerm->pExpr, EP_xIsSelect)) - ){ - Expr *pExpr = pTerm->pExpr; - if( (eOp & (WO_EQ|WO_ISNULL))!=0 ){ - testcase( eOp & WO_EQ ); - testcase( eOp & WO_ISNULL ); - rc = whereEqualScanEst(pParse, pBuilder, pExpr->pRight, &nOut); - }else{ - rc = whereInScanEst(pParse, pBuilder, pExpr->x.pList, &nOut); - } - assert( rc!=SQLITE_OK || nOut>0 ); - if( rc==SQLITE_NOTFOUND ) rc = SQLITE_OK; - if( rc!=SQLITE_OK ) break; /* Jump out of the pTerm loop */ - if( nOut ){ - pNew->nOut = sqlite3LogEst(nOut); - if( pNew->nOut>saved_nOut ) pNew->nOut = saved_nOut; - pNew->nOut -= nIn; - } - } - if( nOut==0 ) -#endif - { - pNew->nOut += (pProbe->aiRowLogEst[nEq] - pProbe->aiRowLogEst[nEq-1]); - if( eOp & WO_ISNULL ){ - /* TUNING: If there is no likelihood() value, assume that a - ** "col IS NULL" expression matches twice as many rows - ** as (col=?). */ - pNew->nOut += 10; - } - } - } - } - - /* Set rCostIdx to the cost of visiting selected rows in index. Add - ** it to pNew->rRun, which is currently set to the cost of the index - ** seek only. Then, if this is a non-covering index, add the cost of - ** visiting the rows in the main table. */ - rCostIdx = pNew->nOut + 1 + (15*pProbe->szIdxRow)/pSrc->pTab->szTabRow; - pNew->rRun = sqlite3LogEstAdd(rLogSize, rCostIdx); - if( (pNew->wsFlags & (WHERE_IDX_ONLY|WHERE_IPK))==0 ){ - pNew->rRun = sqlite3LogEstAdd(pNew->rRun, pNew->nOut + 16); - } - - nOutUnadjusted = pNew->nOut; - pNew->rRun += nInMul + nIn; - pNew->nOut += nInMul + nIn; - whereLoopOutputAdjust(pBuilder->pWC, pNew); - rc = whereLoopInsert(pBuilder, pNew); - - if( pNew->wsFlags & WHERE_COLUMN_RANGE ){ - pNew->nOut = saved_nOut; - }else{ - pNew->nOut = nOutUnadjusted; - } - - if( (pNew->wsFlags & WHERE_TOP_LIMIT)==0 - && pNew->u.btree.nEq<(pProbe->nKeyCol + (pProbe->zName!=0)) - ){ - whereLoopAddBtreeIndex(pBuilder, pSrc, pProbe, nInMul+nIn); - } - pNew->nOut = saved_nOut; -#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 - pBuilder->nRecValid = nRecValid; -#endif - } - pNew->prereq = saved_prereq; - pNew->u.btree.nEq = saved_nEq; - pNew->u.btree.nSkip = saved_nSkip; - pNew->wsFlags = saved_wsFlags; - pNew->nOut = saved_nOut; - pNew->nLTerm = saved_nLTerm; - return rc; -} - -/* -** Return True if it is possible that pIndex might be useful in -** implementing the ORDER BY clause in pBuilder. -** -** Return False if pBuilder does not contain an ORDER BY clause or -** if there is no way for pIndex to be useful in implementing that -** ORDER BY clause. -*/ -static int indexMightHelpWithOrderBy( - WhereLoopBuilder *pBuilder, - Index *pIndex, - int iCursor -){ - ExprList *pOB; - int ii, jj; - - if( pIndex->bUnordered ) return 0; - if( (pOB = pBuilder->pWInfo->pOrderBy)==0 ) return 0; - for(ii=0; iinExpr; ii++){ - Expr *pExpr = sqlite3ExprSkipCollate(pOB->a[ii].pExpr); - if( pExpr->op!=TK_COLUMN ) return 0; - if( pExpr->iTable==iCursor ){ - for(jj=0; jjnKeyCol; jj++){ - if( pExpr->iColumn==pIndex->aiColumn[jj] ) return 1; - } - } - } - return 0; -} - -/* -** Return a bitmask where 1s indicate that the corresponding column of -** the table is used by an index. Only the first 63 columns are considered. -*/ -static Bitmask columnsInIndex(Index *pIdx){ - Bitmask m = 0; - int j; - for(j=pIdx->nColumn-1; j>=0; j--){ - int x = pIdx->aiColumn[j]; - if( x>=0 ){ - testcase( x==BMS-1 ); - testcase( x==BMS-2 ); - if( xa; inTerm; i++, pTerm++){ - if( sqlite3ExprImpliesExpr(pTerm->pExpr, pWhere, iTab) ) return 1; - } - return 0; -} - -/* -** Add all WhereLoop objects for a single table of the join where the table -** is idenfied by pBuilder->pNew->iTab. That table is guaranteed to be -** a b-tree table, not a virtual table. -** -** The costs (WhereLoop.rRun) of the b-tree loops added by this function -** are calculated as follows: -** -** For a full scan, assuming the table (or index) contains nRow rows: -** -** cost = nRow * 3.0 // full-table scan -** cost = nRow * K // scan of covering index -** cost = nRow * (K+3.0) // scan of non-covering index -** -** where K is a value between 1.1 and 3.0 set based on the relative -** estimated average size of the index and table records. -** -** For an index scan, where nVisit is the number of index rows visited -** by the scan, and nSeek is the number of seek operations required on -** the index b-tree: -** -** cost = nSeek * (log(nRow) + K * nVisit) // covering index -** cost = nSeek * (log(nRow) + (K+3.0) * nVisit) // non-covering index -** -** Normally, nSeek is 1. nSeek values greater than 1 come about if the -** WHERE clause includes "x IN (....)" terms used in place of "x=?". Or when -** implicit "x IN (SELECT x FROM tbl)" terms are added for skip-scans. -*/ -static int whereLoopAddBtree( - WhereLoopBuilder *pBuilder, /* WHERE clause information */ - Bitmask mExtra /* Extra prerequesites for using this table */ -){ - WhereInfo *pWInfo; /* WHERE analysis context */ - Index *pProbe; /* An index we are evaluating */ - Index sPk; /* A fake index object for the primary key */ - LogEst aiRowEstPk[2]; /* The aiRowLogEst[] value for the sPk index */ - i16 aiColumnPk = -1; /* The aColumn[] value for the sPk index */ - SrcList *pTabList; /* The FROM clause */ - struct SrcList_item *pSrc; /* The FROM clause btree term to add */ - WhereLoop *pNew; /* Template WhereLoop object */ - int rc = SQLITE_OK; /* Return code */ - int iSortIdx = 1; /* Index number */ - int b; /* A boolean value */ - LogEst rSize; /* number of rows in the table */ - LogEst rLogSize; /* Logarithm of the number of rows in the table */ - WhereClause *pWC; /* The parsed WHERE clause */ - Table *pTab; /* Table being queried */ - - pNew = pBuilder->pNew; - pWInfo = pBuilder->pWInfo; - pTabList = pWInfo->pTabList; - pSrc = pTabList->a + pNew->iTab; - pTab = pSrc->pTab; - pWC = pBuilder->pWC; - assert( !IsVirtual(pSrc->pTab) ); - - if( pSrc->pIndex ){ - /* An INDEXED BY clause specifies a particular index to use */ - pProbe = pSrc->pIndex; - }else if( !HasRowid(pTab) ){ - pProbe = pTab->pIndex; - }else{ - /* There is no INDEXED BY clause. Create a fake Index object in local - ** variable sPk to represent the rowid primary key index. Make this - ** fake index the first in a chain of Index objects with all of the real - ** indices to follow */ - Index *pFirst; /* First of real indices on the table */ - memset(&sPk, 0, sizeof(Index)); - sPk.nKeyCol = 1; - sPk.aiColumn = &aiColumnPk; - sPk.aiRowLogEst = aiRowEstPk; - sPk.onError = OE_Replace; - sPk.pTable = pTab; - sPk.szIdxRow = pTab->szTabRow; - aiRowEstPk[0] = pTab->nRowLogEst; - aiRowEstPk[1] = 0; - pFirst = pSrc->pTab->pIndex; - if( pSrc->notIndexed==0 ){ - /* The real indices of the table are only considered if the - ** NOT INDEXED qualifier is omitted from the FROM clause */ - sPk.pNext = pFirst; - } - pProbe = &sPk; - } - rSize = pTab->nRowLogEst; - rLogSize = estLog(rSize); - -#ifndef SQLITE_OMIT_AUTOMATIC_INDEX - /* Automatic indexes */ - if( !pBuilder->pOrSet - && (pWInfo->pParse->db->flags & SQLITE_AutoIndex)!=0 - && pSrc->pIndex==0 - && !pSrc->viaCoroutine - && !pSrc->notIndexed - && HasRowid(pTab) - && !pSrc->isCorrelated - && !pSrc->isRecursive - ){ - /* Generate auto-index WhereLoops */ - WhereTerm *pTerm; - WhereTerm *pWCEnd = pWC->a + pWC->nTerm; - for(pTerm=pWC->a; rc==SQLITE_OK && pTermprereqRight & pNew->maskSelf ) continue; - if( termCanDriveIndex(pTerm, pSrc, 0) ){ - pNew->u.btree.nEq = 1; - pNew->u.btree.nSkip = 0; - pNew->u.btree.pIndex = 0; - pNew->nLTerm = 1; - pNew->aLTerm[0] = pTerm; - /* TUNING: One-time cost for computing the automatic index is - ** approximately 7*N*log2(N) where N is the number of rows in - ** the table being indexed. */ - pNew->rSetup = rLogSize + rSize + 28; assert( 28==sqlite3LogEst(7) ); - /* TUNING: Each index lookup yields 20 rows in the table. This - ** is more than the usual guess of 10 rows, since we have no way - ** of knowning how selective the index will ultimately be. It would - ** not be unreasonable to make this value much larger. */ - pNew->nOut = 43; assert( 43==sqlite3LogEst(20) ); - pNew->rRun = sqlite3LogEstAdd(rLogSize,pNew->nOut); - pNew->wsFlags = WHERE_AUTO_INDEX; - pNew->prereq = mExtra | pTerm->prereqRight; - rc = whereLoopInsert(pBuilder, pNew); - } - } - } -#endif /* SQLITE_OMIT_AUTOMATIC_INDEX */ - - /* Loop over all indices - */ - for(; rc==SQLITE_OK && pProbe; pProbe=pProbe->pNext, iSortIdx++){ - if( pProbe->pPartIdxWhere!=0 - && !whereUsablePartialIndex(pNew->iTab, pWC, pProbe->pPartIdxWhere) ){ - continue; /* Partial index inappropriate for this query */ - } - rSize = pProbe->aiRowLogEst[0]; - pNew->u.btree.nEq = 0; - pNew->u.btree.nSkip = 0; - pNew->nLTerm = 0; - pNew->iSortIdx = 0; - pNew->rSetup = 0; - pNew->prereq = mExtra; - pNew->nOut = rSize; - pNew->u.btree.pIndex = pProbe; - b = indexMightHelpWithOrderBy(pBuilder, pProbe, pSrc->iCursor); - /* The ONEPASS_DESIRED flags never occurs together with ORDER BY */ - assert( (pWInfo->wctrlFlags & WHERE_ONEPASS_DESIRED)==0 || b==0 ); - if( pProbe->tnum<=0 ){ - /* Integer primary key index */ - pNew->wsFlags = WHERE_IPK; - - /* Full table scan */ - pNew->iSortIdx = b ? iSortIdx : 0; - /* TUNING: Cost of full table scan is (N*3.0). */ - pNew->rRun = rSize + 16; - whereLoopOutputAdjust(pWC, pNew); - rc = whereLoopInsert(pBuilder, pNew); - pNew->nOut = rSize; - if( rc ) break; - }else{ - Bitmask m; - if( pProbe->isCovering ){ - pNew->wsFlags = WHERE_IDX_ONLY | WHERE_INDEXED; - m = 0; - }else{ - m = pSrc->colUsed & ~columnsInIndex(pProbe); - pNew->wsFlags = (m==0) ? (WHERE_IDX_ONLY|WHERE_INDEXED) : WHERE_INDEXED; - } - - /* Full scan via index */ - if( b - || !HasRowid(pTab) - || ( m==0 - && pProbe->bUnordered==0 - && (pProbe->szIdxRowszTabRow) - && (pWInfo->wctrlFlags & WHERE_ONEPASS_DESIRED)==0 - && sqlite3GlobalConfig.bUseCis - && OptimizationEnabled(pWInfo->pParse->db, SQLITE_CoverIdxScan) - ) - ){ - pNew->iSortIdx = b ? iSortIdx : 0; - - /* The cost of visiting the index rows is N*K, where K is - ** between 1.1 and 3.0, depending on the relative sizes of the - ** index and table rows. If this is a non-covering index scan, - ** also add the cost of visiting table rows (N*3.0). */ - pNew->rRun = rSize + 1 + (15*pProbe->szIdxRow)/pTab->szTabRow; - if( m!=0 ){ - pNew->rRun = sqlite3LogEstAdd(pNew->rRun, rSize+16); - } - - whereLoopOutputAdjust(pWC, pNew); - rc = whereLoopInsert(pBuilder, pNew); - pNew->nOut = rSize; - if( rc ) break; - } - } - - rc = whereLoopAddBtreeIndex(pBuilder, pSrc, pProbe, 0); -#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 - sqlite3Stat4ProbeFree(pBuilder->pRec); - pBuilder->nRecValid = 0; - pBuilder->pRec = 0; -#endif - - /* If there was an INDEXED BY clause, then only that one index is - ** considered. */ - if( pSrc->pIndex ) break; - } - return rc; -} - -#ifndef SQLITE_OMIT_VIRTUALTABLE -/* -** Add all WhereLoop objects for a table of the join identified by -** pBuilder->pNew->iTab. That table is guaranteed to be a virtual table. -*/ -static int whereLoopAddVirtual( - WhereLoopBuilder *pBuilder, /* WHERE clause information */ - Bitmask mExtra -){ - WhereInfo *pWInfo; /* WHERE analysis context */ - Parse *pParse; /* The parsing context */ - WhereClause *pWC; /* The WHERE clause */ - struct SrcList_item *pSrc; /* The FROM clause term to search */ - Table *pTab; - sqlite3 *db; - sqlite3_index_info *pIdxInfo; - struct sqlite3_index_constraint *pIdxCons; - struct sqlite3_index_constraint_usage *pUsage; - WhereTerm *pTerm; - int i, j; - int iTerm, mxTerm; - int nConstraint; - int seenIn = 0; /* True if an IN operator is seen */ - int seenVar = 0; /* True if a non-constant constraint is seen */ - int iPhase; /* 0: const w/o IN, 1: const, 2: no IN, 2: IN */ - WhereLoop *pNew; - int rc = SQLITE_OK; - - pWInfo = pBuilder->pWInfo; - pParse = pWInfo->pParse; - db = pParse->db; - pWC = pBuilder->pWC; - pNew = pBuilder->pNew; - pSrc = &pWInfo->pTabList->a[pNew->iTab]; - pTab = pSrc->pTab; - assert( IsVirtual(pTab) ); - pIdxInfo = allocateIndexInfo(pParse, pWC, pSrc, pBuilder->pOrderBy); - if( pIdxInfo==0 ) return SQLITE_NOMEM; - pNew->prereq = 0; - pNew->rSetup = 0; - pNew->wsFlags = WHERE_VIRTUALTABLE; - pNew->nLTerm = 0; - pNew->u.vtab.needFree = 0; - pUsage = pIdxInfo->aConstraintUsage; - nConstraint = pIdxInfo->nConstraint; - if( whereLoopResize(db, pNew, nConstraint) ){ - sqlite3DbFree(db, pIdxInfo); - return SQLITE_NOMEM; - } - - for(iPhase=0; iPhase<=3; iPhase++){ - if( !seenIn && (iPhase&1)!=0 ){ - iPhase++; - if( iPhase>3 ) break; - } - if( !seenVar && iPhase>1 ) break; - pIdxCons = *(struct sqlite3_index_constraint**)&pIdxInfo->aConstraint; - for(i=0; inConstraint; i++, pIdxCons++){ - j = pIdxCons->iTermOffset; - pTerm = &pWC->a[j]; - switch( iPhase ){ - case 0: /* Constants without IN operator */ - pIdxCons->usable = 0; - if( (pTerm->eOperator & WO_IN)!=0 ){ - seenIn = 1; - } - if( pTerm->prereqRight!=0 ){ - seenVar = 1; - }else if( (pTerm->eOperator & WO_IN)==0 ){ - pIdxCons->usable = 1; - } - break; - case 1: /* Constants with IN operators */ - assert( seenIn ); - pIdxCons->usable = (pTerm->prereqRight==0); - break; - case 2: /* Variables without IN */ - assert( seenVar ); - pIdxCons->usable = (pTerm->eOperator & WO_IN)==0; - break; - default: /* Variables with IN */ - assert( seenVar && seenIn ); - pIdxCons->usable = 1; - break; - } - } - memset(pUsage, 0, sizeof(pUsage[0])*pIdxInfo->nConstraint); - if( pIdxInfo->needToFreeIdxStr ) sqlite3_free(pIdxInfo->idxStr); - pIdxInfo->idxStr = 0; - pIdxInfo->idxNum = 0; - pIdxInfo->needToFreeIdxStr = 0; - pIdxInfo->orderByConsumed = 0; - pIdxInfo->estimatedCost = SQLITE_BIG_DBL / (double)2; - pIdxInfo->estimatedRows = 25; - rc = vtabBestIndex(pParse, pTab, pIdxInfo); - if( rc ) goto whereLoopAddVtab_exit; - pIdxCons = *(struct sqlite3_index_constraint**)&pIdxInfo->aConstraint; - pNew->prereq = mExtra; - mxTerm = -1; - assert( pNew->nLSlot>=nConstraint ); - for(i=0; iaLTerm[i] = 0; - pNew->u.vtab.omitMask = 0; - for(i=0; i=0 ){ - j = pIdxCons->iTermOffset; - if( iTerm>=nConstraint - || j<0 - || j>=pWC->nTerm - || pNew->aLTerm[iTerm]!=0 - ){ - rc = SQLITE_ERROR; - sqlite3ErrorMsg(pParse, "%s.xBestIndex() malfunction", pTab->zName); - goto whereLoopAddVtab_exit; - } - testcase( iTerm==nConstraint-1 ); - testcase( j==0 ); - testcase( j==pWC->nTerm-1 ); - pTerm = &pWC->a[j]; - pNew->prereq |= pTerm->prereqRight; - assert( iTermnLSlot ); - pNew->aLTerm[iTerm] = pTerm; - if( iTerm>mxTerm ) mxTerm = iTerm; - testcase( iTerm==15 ); - testcase( iTerm==16 ); - if( iTerm<16 && pUsage[i].omit ) pNew->u.vtab.omitMask |= 1<eOperator & WO_IN)!=0 ){ - if( pUsage[i].omit==0 ){ - /* Do not attempt to use an IN constraint if the virtual table - ** says that the equivalent EQ constraint cannot be safely omitted. - ** If we do attempt to use such a constraint, some rows might be - ** repeated in the output. */ - break; - } - /* A virtual table that is constrained by an IN clause may not - ** consume the ORDER BY clause because (1) the order of IN terms - ** is not necessarily related to the order of output terms and - ** (2) Multiple outputs from a single IN value will not merge - ** together. */ - pIdxInfo->orderByConsumed = 0; - } - } - } - if( i>=nConstraint ){ - pNew->nLTerm = mxTerm+1; - assert( pNew->nLTerm<=pNew->nLSlot ); - pNew->u.vtab.idxNum = pIdxInfo->idxNum; - pNew->u.vtab.needFree = pIdxInfo->needToFreeIdxStr; - pIdxInfo->needToFreeIdxStr = 0; - pNew->u.vtab.idxStr = pIdxInfo->idxStr; - pNew->u.vtab.isOrdered = (i8)(pIdxInfo->orderByConsumed ? - pIdxInfo->nOrderBy : 0); - pNew->rSetup = 0; - pNew->rRun = sqlite3LogEstFromDouble(pIdxInfo->estimatedCost); - pNew->nOut = sqlite3LogEst(pIdxInfo->estimatedRows); - whereLoopInsert(pBuilder, pNew); - if( pNew->u.vtab.needFree ){ - sqlite3_free(pNew->u.vtab.idxStr); - pNew->u.vtab.needFree = 0; - } - } - } - -whereLoopAddVtab_exit: - if( pIdxInfo->needToFreeIdxStr ) sqlite3_free(pIdxInfo->idxStr); - sqlite3DbFree(db, pIdxInfo); - return rc; -} -#endif /* SQLITE_OMIT_VIRTUALTABLE */ - -/* -** Add WhereLoop entries to handle OR terms. This works for either -** btrees or virtual tables. -*/ -static int whereLoopAddOr(WhereLoopBuilder *pBuilder, Bitmask mExtra){ - WhereInfo *pWInfo = pBuilder->pWInfo; - WhereClause *pWC; - WhereLoop *pNew; - WhereTerm *pTerm, *pWCEnd; - int rc = SQLITE_OK; - int iCur; - WhereClause tempWC; - WhereLoopBuilder sSubBuild; - WhereOrSet sSum, sCur; - struct SrcList_item *pItem; - - pWC = pBuilder->pWC; - if( pWInfo->wctrlFlags & WHERE_AND_ONLY ) return SQLITE_OK; - pWCEnd = pWC->a + pWC->nTerm; - pNew = pBuilder->pNew; - memset(&sSum, 0, sizeof(sSum)); - pItem = pWInfo->pTabList->a + pNew->iTab; - iCur = pItem->iCursor; - - for(pTerm=pWC->a; pTermeOperator & WO_OR)!=0 - && (pTerm->u.pOrInfo->indexable & pNew->maskSelf)!=0 - ){ - WhereClause * const pOrWC = &pTerm->u.pOrInfo->wc; - WhereTerm * const pOrWCEnd = &pOrWC->a[pOrWC->nTerm]; - WhereTerm *pOrTerm; - int once = 1; - int i, j; - - sSubBuild = *pBuilder; - sSubBuild.pOrderBy = 0; - sSubBuild.pOrSet = &sCur; - - for(pOrTerm=pOrWC->a; pOrTermeOperator & WO_AND)!=0 ){ - sSubBuild.pWC = &pOrTerm->u.pAndInfo->wc; - }else if( pOrTerm->leftCursor==iCur ){ - tempWC.pWInfo = pWC->pWInfo; - tempWC.pOuter = pWC; - tempWC.op = TK_AND; - tempWC.nTerm = 1; - tempWC.a = pOrTerm; - sSubBuild.pWC = &tempWC; - }else{ - continue; - } - sCur.n = 0; -#ifndef SQLITE_OMIT_VIRTUALTABLE - if( IsVirtual(pItem->pTab) ){ - rc = whereLoopAddVirtual(&sSubBuild, mExtra); - }else -#endif - { - rc = whereLoopAddBtree(&sSubBuild, mExtra); - } - assert( rc==SQLITE_OK || sCur.n==0 ); - if( sCur.n==0 ){ - sSum.n = 0; - break; - }else if( once ){ - whereOrMove(&sSum, &sCur); - once = 0; - }else{ - WhereOrSet sPrev; - whereOrMove(&sPrev, &sSum); - sSum.n = 0; - for(i=0; inLTerm = 1; - pNew->aLTerm[0] = pTerm; - pNew->wsFlags = WHERE_MULTI_OR; - pNew->rSetup = 0; - pNew->iSortIdx = 0; - memset(&pNew->u, 0, sizeof(pNew->u)); - for(i=0; rc==SQLITE_OK && irRun = sSum.a[i].rRun + 1; - pNew->nOut = sSum.a[i].nOut; - pNew->prereq = sSum.a[i].prereq; - rc = whereLoopInsert(pBuilder, pNew); - } - } - } - return rc; -} - -/* -** Add all WhereLoop objects for all tables -*/ -static int whereLoopAddAll(WhereLoopBuilder *pBuilder){ - WhereInfo *pWInfo = pBuilder->pWInfo; - Bitmask mExtra = 0; - Bitmask mPrior = 0; - int iTab; - SrcList *pTabList = pWInfo->pTabList; - struct SrcList_item *pItem; - sqlite3 *db = pWInfo->pParse->db; - int nTabList = pWInfo->nLevel; - int rc = SQLITE_OK; - u8 priorJoinType = 0; - WhereLoop *pNew; - - /* Loop over the tables in the join, from left to right */ - pNew = pBuilder->pNew; - whereLoopInit(pNew); - for(iTab=0, pItem=pTabList->a; iTabiTab = iTab; - pNew->maskSelf = getMask(&pWInfo->sMaskSet, pItem->iCursor); - if( ((pItem->jointype|priorJoinType) & (JT_LEFT|JT_CROSS))!=0 ){ - mExtra = mPrior; - } - priorJoinType = pItem->jointype; - if( IsVirtual(pItem->pTab) ){ - rc = whereLoopAddVirtual(pBuilder, mExtra); - }else{ - rc = whereLoopAddBtree(pBuilder, mExtra); - } - if( rc==SQLITE_OK ){ - rc = whereLoopAddOr(pBuilder, mExtra); - } - mPrior |= pNew->maskSelf; - if( rc || db->mallocFailed ) break; - } - whereLoopClear(db, pNew); - return rc; -} - -/* -** Examine a WherePath (with the addition of the extra WhereLoop of the 5th -** parameters) to see if it outputs rows in the requested ORDER BY -** (or GROUP BY) without requiring a separate sort operation. Return N: -** -** N>0: N terms of the ORDER BY clause are satisfied -** N==0: No terms of the ORDER BY clause are satisfied -** N<0: Unknown yet how many terms of ORDER BY might be satisfied. -** -** Note that processing for WHERE_GROUPBY and WHERE_DISTINCTBY is not as -** strict. With GROUP BY and DISTINCT the only requirement is that -** equivalent rows appear immediately adjacent to one another. GROUP BY -** and DISTINCT do not require rows to appear in any particular order as long -** as equivelent rows are grouped together. Thus for GROUP BY and DISTINCT -** the pOrderBy terms can be matched in any order. With ORDER BY, the -** pOrderBy terms must be matched in strict left-to-right order. -*/ -static i8 wherePathSatisfiesOrderBy( - WhereInfo *pWInfo, /* The WHERE clause */ - ExprList *pOrderBy, /* ORDER BY or GROUP BY or DISTINCT clause to check */ - WherePath *pPath, /* The WherePath to check */ - u16 wctrlFlags, /* Might contain WHERE_GROUPBY or WHERE_DISTINCTBY */ - u16 nLoop, /* Number of entries in pPath->aLoop[] */ - WhereLoop *pLast, /* Add this WhereLoop to the end of pPath->aLoop[] */ - Bitmask *pRevMask /* OUT: Mask of WhereLoops to run in reverse order */ -){ - u8 revSet; /* True if rev is known */ - u8 rev; /* Composite sort order */ - u8 revIdx; /* Index sort order */ - u8 isOrderDistinct; /* All prior WhereLoops are order-distinct */ - u8 distinctColumns; /* True if the loop has UNIQUE NOT NULL columns */ - u8 isMatch; /* iColumn matches a term of the ORDER BY clause */ - u16 nKeyCol; /* Number of key columns in pIndex */ - u16 nColumn; /* Total number of ordered columns in the index */ - u16 nOrderBy; /* Number terms in the ORDER BY clause */ - int iLoop; /* Index of WhereLoop in pPath being processed */ - int i, j; /* Loop counters */ - int iCur; /* Cursor number for current WhereLoop */ - int iColumn; /* A column number within table iCur */ - WhereLoop *pLoop = 0; /* Current WhereLoop being processed. */ - WhereTerm *pTerm; /* A single term of the WHERE clause */ - Expr *pOBExpr; /* An expression from the ORDER BY clause */ - CollSeq *pColl; /* COLLATE function from an ORDER BY clause term */ - Index *pIndex; /* The index associated with pLoop */ - sqlite3 *db = pWInfo->pParse->db; /* Database connection */ - Bitmask obSat = 0; /* Mask of ORDER BY terms satisfied so far */ - Bitmask obDone; /* Mask of all ORDER BY terms */ - Bitmask orderDistinctMask; /* Mask of all well-ordered loops */ - Bitmask ready; /* Mask of inner loops */ - - /* - ** We say the WhereLoop is "one-row" if it generates no more than one - ** row of output. A WhereLoop is one-row if all of the following are true: - ** (a) All index columns match with WHERE_COLUMN_EQ. - ** (b) The index is unique - ** Any WhereLoop with an WHERE_COLUMN_EQ constraint on the rowid is one-row. - ** Every one-row WhereLoop will have the WHERE_ONEROW bit set in wsFlags. - ** - ** We say the WhereLoop is "order-distinct" if the set of columns from - ** that WhereLoop that are in the ORDER BY clause are different for every - ** row of the WhereLoop. Every one-row WhereLoop is automatically - ** order-distinct. A WhereLoop that has no columns in the ORDER BY clause - ** is not order-distinct. To be order-distinct is not quite the same as being - ** UNIQUE since a UNIQUE column or index can have multiple rows that - ** are NULL and NULL values are equivalent for the purpose of order-distinct. - ** To be order-distinct, the columns must be UNIQUE and NOT NULL. - ** - ** The rowid for a table is always UNIQUE and NOT NULL so whenever the - ** rowid appears in the ORDER BY clause, the corresponding WhereLoop is - ** automatically order-distinct. - */ - - assert( pOrderBy!=0 ); - if( nLoop && OptimizationDisabled(db, SQLITE_OrderByIdxJoin) ) return 0; - - nOrderBy = pOrderBy->nExpr; - testcase( nOrderBy==BMS-1 ); - if( nOrderBy>BMS-1 ) return 0; /* Cannot optimize overly large ORDER BYs */ - isOrderDistinct = 1; - obDone = MASKBIT(nOrderBy)-1; - orderDistinctMask = 0; - ready = 0; - for(iLoop=0; isOrderDistinct && obSat0 ) ready |= pLoop->maskSelf; - pLoop = iLoopaLoop[iLoop] : pLast; - if( pLoop->wsFlags & WHERE_VIRTUALTABLE ){ - if( pLoop->u.vtab.isOrdered ) obSat = obDone; - break; - } - iCur = pWInfo->pTabList->a[pLoop->iTab].iCursor; - - /* Mark off any ORDER BY term X that is a column in the table of - ** the current loop for which there is term in the WHERE - ** clause of the form X IS NULL or X=? that reference only outer - ** loops. - */ - for(i=0; ia[i].pExpr); - if( pOBExpr->op!=TK_COLUMN ) continue; - if( pOBExpr->iTable!=iCur ) continue; - pTerm = findTerm(&pWInfo->sWC, iCur, pOBExpr->iColumn, - ~ready, WO_EQ|WO_ISNULL, 0); - if( pTerm==0 ) continue; - if( (pTerm->eOperator&WO_EQ)!=0 && pOBExpr->iColumn>=0 ){ - const char *z1, *z2; - pColl = sqlite3ExprCollSeq(pWInfo->pParse, pOrderBy->a[i].pExpr); - if( !pColl ) pColl = db->pDfltColl; - z1 = pColl->zName; - pColl = sqlite3ExprCollSeq(pWInfo->pParse, pTerm->pExpr); - if( !pColl ) pColl = db->pDfltColl; - z2 = pColl->zName; - if( sqlite3StrICmp(z1, z2)!=0 ) continue; - } - obSat |= MASKBIT(i); - } - - if( (pLoop->wsFlags & WHERE_ONEROW)==0 ){ - if( pLoop->wsFlags & WHERE_IPK ){ - pIndex = 0; - nKeyCol = 0; - nColumn = 1; - }else if( (pIndex = pLoop->u.btree.pIndex)==0 || pIndex->bUnordered ){ - return 0; - }else{ - nKeyCol = pIndex->nKeyCol; - nColumn = pIndex->nColumn; - assert( nColumn==nKeyCol+1 || !HasRowid(pIndex->pTable) ); - assert( pIndex->aiColumn[nColumn-1]==(-1) || !HasRowid(pIndex->pTable)); - isOrderDistinct = pIndex->onError!=OE_None; - } - - /* Loop through all columns of the index and deal with the ones - ** that are not constrained by == or IN. - */ - rev = revSet = 0; - distinctColumns = 0; - for(j=0; ju.btree.nEq - && pLoop->u.btree.nSkip==0 - && ((i = pLoop->aLTerm[j]->eOperator) & (WO_EQ|WO_ISNULL))!=0 - ){ - if( i & WO_ISNULL ){ - testcase( isOrderDistinct ); - isOrderDistinct = 0; - } - continue; - } - - /* Get the column number in the table (iColumn) and sort order - ** (revIdx) for the j-th column of the index. - */ - if( pIndex ){ - iColumn = pIndex->aiColumn[j]; - revIdx = pIndex->aSortOrder[j]; - if( iColumn==pIndex->pTable->iPKey ) iColumn = -1; - }else{ - iColumn = -1; - revIdx = 0; - } - - /* An unconstrained column that might be NULL means that this - ** WhereLoop is not well-ordered - */ - if( isOrderDistinct - && iColumn>=0 - && j>=pLoop->u.btree.nEq - && pIndex->pTable->aCol[iColumn].notNull==0 - ){ - isOrderDistinct = 0; - } - - /* Find the ORDER BY term that corresponds to the j-th column - ** of the index and mark that ORDER BY term off - */ - bOnce = 1; - isMatch = 0; - for(i=0; bOnce && ia[i].pExpr); - testcase( wctrlFlags & WHERE_GROUPBY ); - testcase( wctrlFlags & WHERE_DISTINCTBY ); - if( (wctrlFlags & (WHERE_GROUPBY|WHERE_DISTINCTBY))==0 ) bOnce = 0; - if( pOBExpr->op!=TK_COLUMN ) continue; - if( pOBExpr->iTable!=iCur ) continue; - if( pOBExpr->iColumn!=iColumn ) continue; - if( iColumn>=0 ){ - pColl = sqlite3ExprCollSeq(pWInfo->pParse, pOrderBy->a[i].pExpr); - if( !pColl ) pColl = db->pDfltColl; - if( sqlite3StrICmp(pColl->zName, pIndex->azColl[j])!=0 ) continue; - } - isMatch = 1; - break; - } - if( isMatch && (pWInfo->wctrlFlags & WHERE_GROUPBY)==0 ){ - /* Make sure the sort order is compatible in an ORDER BY clause. - ** Sort order is irrelevant for a GROUP BY clause. */ - if( revSet ){ - if( (rev ^ revIdx)!=pOrderBy->a[i].sortOrder ) isMatch = 0; - }else{ - rev = revIdx ^ pOrderBy->a[i].sortOrder; - if( rev ) *pRevMask |= MASKBIT(iLoop); - revSet = 1; - } - } - if( isMatch ){ - if( iColumn<0 ){ - testcase( distinctColumns==0 ); - distinctColumns = 1; - } - obSat |= MASKBIT(i); - }else{ - /* No match found */ - if( j==0 || jmaskSelf; - for(i=0; ia[i].pExpr; - mTerm = exprTableUsage(&pWInfo->sMaskSet,p); - if( mTerm==0 && !sqlite3ExprIsConstant(p) ) continue; - if( (mTerm&~orderDistinctMask)==0 ){ - obSat |= MASKBIT(i); - } - } - } - } /* End the loop over all WhereLoops from outer-most down to inner-most */ - if( obSat==obDone ) return (i8)nOrderBy; - if( !isOrderDistinct ){ - for(i=nOrderBy-1; i>0; i--){ - Bitmask m = MASKBIT(i) - 1; - if( (obSat&m)==m ) return i; - } - return 0; - } - return -1; -} - - -/* -** If the WHERE_GROUPBY flag is set in the mask passed to sqlite3WhereBegin(), -** the planner assumes that the specified pOrderBy list is actually a GROUP -** BY clause - and so any order that groups rows as required satisfies the -** request. -** -** Normally, in this case it is not possible for the caller to determine -** whether or not the rows are really being delivered in sorted order, or -** just in some other order that provides the required grouping. However, -** if the WHERE_SORTBYGROUP flag is also passed to sqlite3WhereBegin(), then -** this function may be called on the returned WhereInfo object. It returns -** true if the rows really will be sorted in the specified order, or false -** otherwise. -** -** For example, assuming: -** -** CREATE INDEX i1 ON t1(x, Y); -** -** then -** -** SELECT * FROM t1 GROUP BY x,y ORDER BY x,y; -- IsSorted()==1 -** SELECT * FROM t1 GROUP BY y,x ORDER BY y,x; -- IsSorted()==0 -*/ -SQLITE_PRIVATE int sqlite3WhereIsSorted(WhereInfo *pWInfo){ - assert( pWInfo->wctrlFlags & WHERE_GROUPBY ); - assert( pWInfo->wctrlFlags & WHERE_SORTBYGROUP ); - return pWInfo->sorted; -} - -#ifdef WHERETRACE_ENABLED -/* For debugging use only: */ -static const char *wherePathName(WherePath *pPath, int nLoop, WhereLoop *pLast){ - static char zName[65]; - int i; - for(i=0; iaLoop[i]->cId; } - if( pLast ) zName[i++] = pLast->cId; - zName[i] = 0; - return zName; -} -#endif - -/* -** Given the list of WhereLoop objects at pWInfo->pLoops, this routine -** attempts to find the lowest cost path that visits each WhereLoop -** once. This path is then loaded into the pWInfo->a[].pWLoop fields. -** -** Assume that the total number of output rows that will need to be sorted -** will be nRowEst (in the 10*log2 representation). Or, ignore sorting -** costs if nRowEst==0. -** -** Return SQLITE_OK on success or SQLITE_NOMEM of a memory allocation -** error occurs. -*/ -static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){ - int mxChoice; /* Maximum number of simultaneous paths tracked */ - int nLoop; /* Number of terms in the join */ - Parse *pParse; /* Parsing context */ - sqlite3 *db; /* The database connection */ - int iLoop; /* Loop counter over the terms of the join */ - int ii, jj; /* Loop counters */ - int mxI = 0; /* Index of next entry to replace */ - int nOrderBy; /* Number of ORDER BY clause terms */ - LogEst rCost; /* Cost of a path */ - LogEst nOut; /* Number of outputs */ - LogEst mxCost = 0; /* Maximum cost of a set of paths */ - LogEst mxOut = 0; /* Maximum nOut value on the set of paths */ - int nTo, nFrom; /* Number of valid entries in aTo[] and aFrom[] */ - WherePath *aFrom; /* All nFrom paths at the previous level */ - WherePath *aTo; /* The nTo best paths at the current level */ - WherePath *pFrom; /* An element of aFrom[] that we are working on */ - WherePath *pTo; /* An element of aTo[] that we are working on */ - WhereLoop *pWLoop; /* One of the WhereLoop objects */ - WhereLoop **pX; /* Used to divy up the pSpace memory */ - char *pSpace; /* Temporary memory used by this routine */ - - pParse = pWInfo->pParse; - db = pParse->db; - nLoop = pWInfo->nLevel; - /* TUNING: For simple queries, only the best path is tracked. - ** For 2-way joins, the 5 best paths are followed. - ** For joins of 3 or more tables, track the 10 best paths */ - mxChoice = (nLoop<=1) ? 1 : (nLoop==2 ? 5 : 10); - assert( nLoop<=pWInfo->pTabList->nSrc ); - WHERETRACE(0x002, ("---- begin solver\n")); - - /* Allocate and initialize space for aTo and aFrom */ - ii = (sizeof(WherePath)+sizeof(WhereLoop*)*nLoop)*mxChoice*2; - pSpace = sqlite3DbMallocRaw(db, ii); - if( pSpace==0 ) return SQLITE_NOMEM; - aTo = (WherePath*)pSpace; - aFrom = aTo+mxChoice; - memset(aFrom, 0, sizeof(aFrom[0])); - pX = (WhereLoop**)(aFrom+mxChoice); - for(ii=mxChoice*2, pFrom=aTo; ii>0; ii--, pFrom++, pX += nLoop){ - pFrom->aLoop = pX; - } - - /* Seed the search with a single WherePath containing zero WhereLoops. - ** - ** TUNING: Do not let the number of iterations go above 25. If the cost - ** of computing an automatic index is not paid back within the first 25 - ** rows, then do not use the automatic index. */ - aFrom[0].nRow = MIN(pParse->nQueryLoop, 46); assert( 46==sqlite3LogEst(25) ); - nFrom = 1; - - /* Precompute the cost of sorting the final result set, if the caller - ** to sqlite3WhereBegin() was concerned about sorting */ - if( pWInfo->pOrderBy==0 || nRowEst==0 ){ - aFrom[0].isOrdered = 0; - nOrderBy = 0; - }else{ - aFrom[0].isOrdered = nLoop>0 ? -1 : 1; - nOrderBy = pWInfo->pOrderBy->nExpr; - } - - /* Compute successively longer WherePaths using the previous generation - ** of WherePaths as the basis for the next. Keep track of the mxChoice - ** best paths at each generation */ - for(iLoop=0; iLooppLoops; pWLoop; pWLoop=pWLoop->pNextLoop){ - Bitmask maskNew; - Bitmask revMask = 0; - i8 isOrdered = pFrom->isOrdered; - if( (pWLoop->prereq & ~pFrom->maskLoop)!=0 ) continue; - if( (pWLoop->maskSelf & pFrom->maskLoop)!=0 ) continue; - /* At this point, pWLoop is a candidate to be the next loop. - ** Compute its cost */ - rCost = sqlite3LogEstAdd(pWLoop->rSetup,pWLoop->rRun + pFrom->nRow); - rCost = sqlite3LogEstAdd(rCost, pFrom->rCost); - nOut = pFrom->nRow + pWLoop->nOut; - maskNew = pFrom->maskLoop | pWLoop->maskSelf; - if( isOrdered<0 ){ - isOrdered = wherePathSatisfiesOrderBy(pWInfo, - pWInfo->pOrderBy, pFrom, pWInfo->wctrlFlags, - iLoop, pWLoop, &revMask); - if( isOrdered>=0 && isOrdered0 && 66==sqlite3LogEst(100) ); - rScale = sqlite3LogEst((nOrderBy-isOrdered)*100/nOrderBy) - 66; - rSortCost = nRowEst + estLog(nRowEst) + rScale + 16; - - /* TUNING: The cost of implementing DISTINCT using a B-TREE is - ** similar but with a larger constant of proportionality. - ** Multiply by an additional factor of 3.0. */ - if( pWInfo->wctrlFlags & WHERE_WANT_DISTINCT ){ - rSortCost += 16; - } - WHERETRACE(0x002, - ("---- sort cost=%-3d (%d/%d) increases cost %3d to %-3d\n", - rSortCost, (nOrderBy-isOrdered), nOrderBy, rCost, - sqlite3LogEstAdd(rCost,rSortCost))); - rCost = sqlite3LogEstAdd(rCost, rSortCost); - } - }else{ - revMask = pFrom->revLoop; - } - /* Check to see if pWLoop should be added to the mxChoice best so far */ - for(jj=0, pTo=aTo; jjmaskLoop==maskNew - && ((pTo->isOrdered^isOrdered)&80)==0 - && ((pTo->rCost<=rCost && pTo->nRow<=nOut) || - (pTo->rCost>=rCost && pTo->nRow>=nOut)) - ){ - testcase( jj==nTo-1 ); - break; - } - } - if( jj>=nTo ){ - if( nTo>=mxChoice && rCost>=mxCost ){ -#ifdef WHERETRACE_ENABLED /* 0x4 */ - if( sqlite3WhereTrace&0x4 ){ - sqlite3DebugPrintf("Skip %s cost=%-3d,%3d order=%c\n", - wherePathName(pFrom, iLoop, pWLoop), rCost, nOut, - isOrdered>=0 ? isOrdered+'0' : '?'); - } -#endif - continue; - } - /* Add a new Path to the aTo[] set */ - if( nTo=0 ? isOrdered+'0' : '?'); - } -#endif - }else{ - if( pTo->rCost<=rCost && pTo->nRow<=nOut ){ -#ifdef WHERETRACE_ENABLED /* 0x4 */ - if( sqlite3WhereTrace&0x4 ){ - sqlite3DebugPrintf( - "Skip %s cost=%-3d,%3d order=%c", - wherePathName(pFrom, iLoop, pWLoop), rCost, nOut, - isOrdered>=0 ? isOrdered+'0' : '?'); - sqlite3DebugPrintf(" vs %s cost=%-3d,%d order=%c\n", - wherePathName(pTo, iLoop+1, 0), pTo->rCost, pTo->nRow, - pTo->isOrdered>=0 ? pTo->isOrdered+'0' : '?'); - } -#endif - testcase( pTo->rCost==rCost ); - continue; - } - testcase( pTo->rCost==rCost+1 ); - /* A new and better score for a previously created equivalent path */ -#ifdef WHERETRACE_ENABLED /* 0x4 */ - if( sqlite3WhereTrace&0x4 ){ - sqlite3DebugPrintf( - "Update %s cost=%-3d,%3d order=%c", - wherePathName(pFrom, iLoop, pWLoop), rCost, nOut, - isOrdered>=0 ? isOrdered+'0' : '?'); - sqlite3DebugPrintf(" was %s cost=%-3d,%3d order=%c\n", - wherePathName(pTo, iLoop+1, 0), pTo->rCost, pTo->nRow, - pTo->isOrdered>=0 ? pTo->isOrdered+'0' : '?'); - } -#endif - } - /* pWLoop is a winner. Add it to the set of best so far */ - pTo->maskLoop = pFrom->maskLoop | pWLoop->maskSelf; - pTo->revLoop = revMask; - pTo->nRow = nOut; - pTo->rCost = rCost; - pTo->isOrdered = isOrdered; - memcpy(pTo->aLoop, pFrom->aLoop, sizeof(WhereLoop*)*iLoop); - pTo->aLoop[iLoop] = pWLoop; - if( nTo>=mxChoice ){ - mxI = 0; - mxCost = aTo[0].rCost; - mxOut = aTo[0].nRow; - for(jj=1, pTo=&aTo[1]; jjrCost>mxCost || (pTo->rCost==mxCost && pTo->nRow>mxOut) ){ - mxCost = pTo->rCost; - mxOut = pTo->nRow; - mxI = jj; - } - } - } - } - } - -#ifdef WHERETRACE_ENABLED /* >=2 */ - if( sqlite3WhereTrace>=2 ){ - sqlite3DebugPrintf("---- after round %d ----\n", iLoop); - for(ii=0, pTo=aTo; iirCost, pTo->nRow, - pTo->isOrdered>=0 ? (pTo->isOrdered+'0') : '?'); - if( pTo->isOrdered>0 ){ - sqlite3DebugPrintf(" rev=0x%llx\n", pTo->revLoop); - }else{ - sqlite3DebugPrintf("\n"); - } - } - } -#endif - - /* Swap the roles of aFrom and aTo for the next generation */ - pFrom = aTo; - aTo = aFrom; - aFrom = pFrom; - nFrom = nTo; - } - - if( nFrom==0 ){ - sqlite3ErrorMsg(pParse, "no query solution"); - sqlite3DbFree(db, pSpace); - return SQLITE_ERROR; - } - - /* Find the lowest cost path. pFrom will be left pointing to that path */ - pFrom = aFrom; - for(ii=1; iirCost>aFrom[ii].rCost ) pFrom = &aFrom[ii]; - } - assert( pWInfo->nLevel==nLoop ); - /* Load the lowest cost path into pWInfo */ - for(iLoop=0; iLoopa + iLoop; - pLevel->pWLoop = pWLoop = pFrom->aLoop[iLoop]; - pLevel->iFrom = pWLoop->iTab; - pLevel->iTabCur = pWInfo->pTabList->a[pLevel->iFrom].iCursor; - } - if( (pWInfo->wctrlFlags & WHERE_WANT_DISTINCT)!=0 - && (pWInfo->wctrlFlags & WHERE_DISTINCTBY)==0 - && pWInfo->eDistinct==WHERE_DISTINCT_NOOP - && nRowEst - ){ - Bitmask notUsed; - int rc = wherePathSatisfiesOrderBy(pWInfo, pWInfo->pResultSet, pFrom, - WHERE_DISTINCTBY, nLoop-1, pFrom->aLoop[nLoop-1], ¬Used); - if( rc==pWInfo->pResultSet->nExpr ){ - pWInfo->eDistinct = WHERE_DISTINCT_ORDERED; - } - } - if( pWInfo->pOrderBy ){ - if( pWInfo->wctrlFlags & WHERE_DISTINCTBY ){ - if( pFrom->isOrdered==pWInfo->pOrderBy->nExpr ){ - pWInfo->eDistinct = WHERE_DISTINCT_ORDERED; - } - }else{ - pWInfo->nOBSat = pFrom->isOrdered; - if( pWInfo->nOBSat<0 ) pWInfo->nOBSat = 0; - pWInfo->revMask = pFrom->revLoop; - } - if( (pWInfo->wctrlFlags & WHERE_SORTBYGROUP) - && pWInfo->nOBSat==pWInfo->pOrderBy->nExpr - ){ - Bitmask notUsed = 0; - int nOrder = wherePathSatisfiesOrderBy(pWInfo, pWInfo->pOrderBy, - pFrom, 0, nLoop-1, pFrom->aLoop[nLoop-1], ¬Used - ); - assert( pWInfo->sorted==0 ); - pWInfo->sorted = (nOrder==pWInfo->pOrderBy->nExpr); - } - } - - - pWInfo->nRowOut = pFrom->nRow; - - /* Free temporary memory and return success */ - sqlite3DbFree(db, pSpace); - return SQLITE_OK; -} - -/* -** Most queries use only a single table (they are not joins) and have -** simple == constraints against indexed fields. This routine attempts -** to plan those simple cases using much less ceremony than the -** general-purpose query planner, and thereby yield faster sqlite3_prepare() -** times for the common case. -** -** Return non-zero on success, if this query can be handled by this -** no-frills query planner. Return zero if this query needs the -** general-purpose query planner. -*/ -static int whereShortCut(WhereLoopBuilder *pBuilder){ - WhereInfo *pWInfo; - struct SrcList_item *pItem; - WhereClause *pWC; - WhereTerm *pTerm; - WhereLoop *pLoop; - int iCur; - int j; - Table *pTab; - Index *pIdx; - - pWInfo = pBuilder->pWInfo; - if( pWInfo->wctrlFlags & WHERE_FORCE_TABLE ) return 0; - assert( pWInfo->pTabList->nSrc>=1 ); - pItem = pWInfo->pTabList->a; - pTab = pItem->pTab; - if( IsVirtual(pTab) ) return 0; - if( pItem->zIndex ) return 0; - iCur = pItem->iCursor; - pWC = &pWInfo->sWC; - pLoop = pBuilder->pNew; - pLoop->wsFlags = 0; - pLoop->u.btree.nSkip = 0; - pTerm = findTerm(pWC, iCur, -1, 0, WO_EQ, 0); - if( pTerm ){ - pLoop->wsFlags = WHERE_COLUMN_EQ|WHERE_IPK|WHERE_ONEROW; - pLoop->aLTerm[0] = pTerm; - pLoop->nLTerm = 1; - pLoop->u.btree.nEq = 1; - /* TUNING: Cost of a rowid lookup is 10 */ - pLoop->rRun = 33; /* 33==sqlite3LogEst(10) */ - }else{ - for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ - assert( pLoop->aLTermSpace==pLoop->aLTerm ); - assert( ArraySize(pLoop->aLTermSpace)==4 ); - if( pIdx->onError==OE_None - || pIdx->pPartIdxWhere!=0 - || pIdx->nKeyCol>ArraySize(pLoop->aLTermSpace) - ) continue; - for(j=0; jnKeyCol; j++){ - pTerm = findTerm(pWC, iCur, pIdx->aiColumn[j], 0, WO_EQ, pIdx); - if( pTerm==0 ) break; - pLoop->aLTerm[j] = pTerm; - } - if( j!=pIdx->nKeyCol ) continue; - pLoop->wsFlags = WHERE_COLUMN_EQ|WHERE_ONEROW|WHERE_INDEXED; - if( pIdx->isCovering || (pItem->colUsed & ~columnsInIndex(pIdx))==0 ){ - pLoop->wsFlags |= WHERE_IDX_ONLY; - } - pLoop->nLTerm = j; - pLoop->u.btree.nEq = j; - pLoop->u.btree.pIndex = pIdx; - /* TUNING: Cost of a unique index lookup is 15 */ - pLoop->rRun = 39; /* 39==sqlite3LogEst(15) */ - break; - } - } - if( pLoop->wsFlags ){ - pLoop->nOut = (LogEst)1; - pWInfo->a[0].pWLoop = pLoop; - pLoop->maskSelf = getMask(&pWInfo->sMaskSet, iCur); - pWInfo->a[0].iTabCur = iCur; - pWInfo->nRowOut = 1; - if( pWInfo->pOrderBy ) pWInfo->nOBSat = pWInfo->pOrderBy->nExpr; - if( pWInfo->wctrlFlags & WHERE_WANT_DISTINCT ){ - pWInfo->eDistinct = WHERE_DISTINCT_UNIQUE; - } -#ifdef SQLITE_DEBUG - pLoop->cId = '0'; -#endif - return 1; - } - return 0; -} - -/* -** Generate the beginning of the loop used for WHERE clause processing. -** The return value is a pointer to an opaque structure that contains -** information needed to terminate the loop. Later, the calling routine -** should invoke sqlite3WhereEnd() with the return value of this function -** in order to complete the WHERE clause processing. -** -** If an error occurs, this routine returns NULL. -** -** The basic idea is to do a nested loop, one loop for each table in -** the FROM clause of a select. (INSERT and UPDATE statements are the -** same as a SELECT with only a single table in the FROM clause.) For -** example, if the SQL is this: -** -** SELECT * FROM t1, t2, t3 WHERE ...; -** -** Then the code generated is conceptually like the following: -** -** foreach row1 in t1 do \ Code generated -** foreach row2 in t2 do |-- by sqlite3WhereBegin() -** foreach row3 in t3 do / -** ... -** end \ Code generated -** end |-- by sqlite3WhereEnd() -** end / -** -** Note that the loops might not be nested in the order in which they -** appear in the FROM clause if a different order is better able to make -** use of indices. Note also that when the IN operator appears in -** the WHERE clause, it might result in additional nested loops for -** scanning through all values on the right-hand side of the IN. -** -** There are Btree cursors associated with each table. t1 uses cursor -** number pTabList->a[0].iCursor. t2 uses the cursor pTabList->a[1].iCursor. -** And so forth. This routine generates code to open those VDBE cursors -** and sqlite3WhereEnd() generates the code to close them. -** -** The code that sqlite3WhereBegin() generates leaves the cursors named -** in pTabList pointing at their appropriate entries. The [...] code -** can use OP_Column and OP_Rowid opcodes on these cursors to extract -** data from the various tables of the loop. -** -** If the WHERE clause is empty, the foreach loops must each scan their -** entire tables. Thus a three-way join is an O(N^3) operation. But if -** the tables have indices and there are terms in the WHERE clause that -** refer to those indices, a complete table scan can be avoided and the -** code will run much faster. Most of the work of this routine is checking -** to see if there are indices that can be used to speed up the loop. -** -** Terms of the WHERE clause are also used to limit which rows actually -** make it to the "..." in the middle of the loop. After each "foreach", -** terms of the WHERE clause that use only terms in that loop and outer -** loops are evaluated and if false a jump is made around all subsequent -** inner loops (or around the "..." if the test occurs within the inner- -** most loop) -** -** OUTER JOINS -** -** An outer join of tables t1 and t2 is conceptally coded as follows: -** -** foreach row1 in t1 do -** flag = 0 -** foreach row2 in t2 do -** start: -** ... -** flag = 1 -** end -** if flag==0 then -** move the row2 cursor to a null row -** goto start -** fi -** end -** -** ORDER BY CLAUSE PROCESSING -** -** pOrderBy is a pointer to the ORDER BY clause (or the GROUP BY clause -** if the WHERE_GROUPBY flag is set in wctrlFlags) of a SELECT statement -** if there is one. If there is no ORDER BY clause or if this routine -** is called from an UPDATE or DELETE statement, then pOrderBy is NULL. -** -** The iIdxCur parameter is the cursor number of an index. If -** WHERE_ONETABLE_ONLY is set, iIdxCur is the cursor number of an index -** to use for OR clause processing. The WHERE clause should use this -** specific cursor. If WHERE_ONEPASS_DESIRED is set, then iIdxCur is -** the first cursor in an array of cursors for all indices. iIdxCur should -** be used to compute the appropriate cursor depending on which index is -** used. -*/ -SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( - Parse *pParse, /* The parser context */ - SrcList *pTabList, /* FROM clause: A list of all tables to be scanned */ - Expr *pWhere, /* The WHERE clause */ - ExprList *pOrderBy, /* An ORDER BY (or GROUP BY) clause, or NULL */ - ExprList *pResultSet, /* Result set of the query */ - u16 wctrlFlags, /* One of the WHERE_* flags defined in sqliteInt.h */ - int iIdxCur /* If WHERE_ONETABLE_ONLY is set, index cursor number */ -){ - int nByteWInfo; /* Num. bytes allocated for WhereInfo struct */ - int nTabList; /* Number of elements in pTabList */ - WhereInfo *pWInfo; /* Will become the return value of this function */ - Vdbe *v = pParse->pVdbe; /* The virtual database engine */ - Bitmask notReady; /* Cursors that are not yet positioned */ - WhereLoopBuilder sWLB; /* The WhereLoop builder */ - WhereMaskSet *pMaskSet; /* The expression mask set */ - WhereLevel *pLevel; /* A single level in pWInfo->a[] */ - WhereLoop *pLoop; /* Pointer to a single WhereLoop object */ - int ii; /* Loop counter */ - sqlite3 *db; /* Database connection */ - int rc; /* Return code */ - - - /* Variable initialization */ - db = pParse->db; - memset(&sWLB, 0, sizeof(sWLB)); - - /* An ORDER/GROUP BY clause of more than 63 terms cannot be optimized */ - testcase( pOrderBy && pOrderBy->nExpr==BMS-1 ); - if( pOrderBy && pOrderBy->nExpr>=BMS ) pOrderBy = 0; - sWLB.pOrderBy = pOrderBy; - - /* Disable the DISTINCT optimization if SQLITE_DistinctOpt is set via - ** sqlite3_test_ctrl(SQLITE_TESTCTRL_OPTIMIZATIONS,...) */ - if( OptimizationDisabled(db, SQLITE_DistinctOpt) ){ - wctrlFlags &= ~WHERE_WANT_DISTINCT; - } - - /* The number of tables in the FROM clause is limited by the number of - ** bits in a Bitmask - */ - testcase( pTabList->nSrc==BMS ); - if( pTabList->nSrc>BMS ){ - sqlite3ErrorMsg(pParse, "at most %d tables in a join", BMS); - return 0; - } - - /* This function normally generates a nested loop for all tables in - ** pTabList. But if the WHERE_ONETABLE_ONLY flag is set, then we should - ** only generate code for the first table in pTabList and assume that - ** any cursors associated with subsequent tables are uninitialized. - */ - nTabList = (wctrlFlags & WHERE_ONETABLE_ONLY) ? 1 : pTabList->nSrc; - - /* Allocate and initialize the WhereInfo structure that will become the - ** return value. A single allocation is used to store the WhereInfo - ** struct, the contents of WhereInfo.a[], the WhereClause structure - ** and the WhereMaskSet structure. Since WhereClause contains an 8-byte - ** field (type Bitmask) it must be aligned on an 8-byte boundary on - ** some architectures. Hence the ROUND8() below. - */ - nByteWInfo = ROUND8(sizeof(WhereInfo)+(nTabList-1)*sizeof(WhereLevel)); - pWInfo = sqlite3DbMallocZero(db, nByteWInfo + sizeof(WhereLoop)); - if( db->mallocFailed ){ - sqlite3DbFree(db, pWInfo); - pWInfo = 0; - goto whereBeginError; - } - pWInfo->aiCurOnePass[0] = pWInfo->aiCurOnePass[1] = -1; - pWInfo->nLevel = nTabList; - pWInfo->pParse = pParse; - pWInfo->pTabList = pTabList; - pWInfo->pOrderBy = pOrderBy; - pWInfo->pResultSet = pResultSet; - pWInfo->iBreak = pWInfo->iContinue = sqlite3VdbeMakeLabel(v); - pWInfo->wctrlFlags = wctrlFlags; - pWInfo->savedNQueryLoop = pParse->nQueryLoop; - pMaskSet = &pWInfo->sMaskSet; - sWLB.pWInfo = pWInfo; - sWLB.pWC = &pWInfo->sWC; - sWLB.pNew = (WhereLoop*)(((char*)pWInfo)+nByteWInfo); - assert( EIGHT_BYTE_ALIGNMENT(sWLB.pNew) ); - whereLoopInit(sWLB.pNew); -#ifdef SQLITE_DEBUG - sWLB.pNew->cId = '*'; -#endif - - /* Split the WHERE clause into separate subexpressions where each - ** subexpression is separated by an AND operator. - */ - initMaskSet(pMaskSet); - whereClauseInit(&pWInfo->sWC, pWInfo); - whereSplit(&pWInfo->sWC, pWhere, TK_AND); - - /* Special case: a WHERE clause that is constant. Evaluate the - ** expression and either jump over all of the code or fall thru. - */ - for(ii=0; iinTerm; ii++){ - if( nTabList==0 || sqlite3ExprIsConstantNotJoin(sWLB.pWC->a[ii].pExpr) ){ - sqlite3ExprIfFalse(pParse, sWLB.pWC->a[ii].pExpr, pWInfo->iBreak, - SQLITE_JUMPIFNULL); - sWLB.pWC->a[ii].wtFlags |= TERM_CODED; - } - } - - /* Special case: No FROM clause - */ - if( nTabList==0 ){ - if( pOrderBy ) pWInfo->nOBSat = pOrderBy->nExpr; - if( wctrlFlags & WHERE_WANT_DISTINCT ){ - pWInfo->eDistinct = WHERE_DISTINCT_UNIQUE; - } - } - - /* Assign a bit from the bitmask to every term in the FROM clause. - ** - ** When assigning bitmask values to FROM clause cursors, it must be - ** the case that if X is the bitmask for the N-th FROM clause term then - ** the bitmask for all FROM clause terms to the left of the N-th term - ** is (X-1). An expression from the ON clause of a LEFT JOIN can use - ** its Expr.iRightJoinTable value to find the bitmask of the right table - ** of the join. Subtracting one from the right table bitmask gives a - ** bitmask for all tables to the left of the join. Knowing the bitmask - ** for all tables to the left of a left join is important. Ticket #3015. - ** - ** Note that bitmasks are created for all pTabList->nSrc tables in - ** pTabList, not just the first nTabList tables. nTabList is normally - ** equal to pTabList->nSrc but might be shortened to 1 if the - ** WHERE_ONETABLE_ONLY flag is set. - */ - for(ii=0; iinSrc; ii++){ - createMask(pMaskSet, pTabList->a[ii].iCursor); - } -#ifndef NDEBUG - { - Bitmask toTheLeft = 0; - for(ii=0; iinSrc; ii++){ - Bitmask m = getMask(pMaskSet, pTabList->a[ii].iCursor); - assert( (m-1)==toTheLeft ); - toTheLeft |= m; - } - } -#endif - - /* Analyze all of the subexpressions. Note that exprAnalyze() might - ** add new virtual terms onto the end of the WHERE clause. We do not - ** want to analyze these virtual terms, so start analyzing at the end - ** and work forward so that the added virtual terms are never processed. - */ - exprAnalyzeAll(pTabList, &pWInfo->sWC); - if( db->mallocFailed ){ - goto whereBeginError; - } - - if( wctrlFlags & WHERE_WANT_DISTINCT ){ - if( isDistinctRedundant(pParse, pTabList, &pWInfo->sWC, pResultSet) ){ - /* The DISTINCT marking is pointless. Ignore it. */ - pWInfo->eDistinct = WHERE_DISTINCT_UNIQUE; - }else if( pOrderBy==0 ){ - /* Try to ORDER BY the result set to make distinct processing easier */ - pWInfo->wctrlFlags |= WHERE_DISTINCTBY; - pWInfo->pOrderBy = pResultSet; - } - } - - /* Construct the WhereLoop objects */ - WHERETRACE(0xffff,("*** Optimizer Start ***\n")); - /* Display all terms of the WHERE clause */ -#if defined(WHERETRACE_ENABLED) && defined(SQLITE_ENABLE_TREE_EXPLAIN) - if( sqlite3WhereTrace & 0x100 ){ - int i; - Vdbe *v = pParse->pVdbe; - sqlite3ExplainBegin(v); - for(i=0; inTerm; i++){ - sqlite3ExplainPrintf(v, "#%-2d ", i); - sqlite3ExplainPush(v); - whereExplainTerm(v, &sWLB.pWC->a[i]); - sqlite3ExplainPop(v); - sqlite3ExplainNL(v); - } - sqlite3ExplainFinish(v); - sqlite3DebugPrintf("%s", sqlite3VdbeExplanation(v)); - } -#endif - if( nTabList!=1 || whereShortCut(&sWLB)==0 ){ - rc = whereLoopAddAll(&sWLB); - if( rc ) goto whereBeginError; - - /* Display all of the WhereLoop objects if wheretrace is enabled */ -#ifdef WHERETRACE_ENABLED /* !=0 */ - if( sqlite3WhereTrace ){ - WhereLoop *p; - int i; - static char zLabel[] = "0123456789abcdefghijklmnopqrstuvwyxz" - "ABCDEFGHIJKLMNOPQRSTUVWYXZ"; - for(p=pWInfo->pLoops, i=0; p; p=p->pNextLoop, i++){ - p->cId = zLabel[i%sizeof(zLabel)]; - whereLoopPrint(p, sWLB.pWC); - } - } -#endif - - wherePathSolver(pWInfo, 0); - if( db->mallocFailed ) goto whereBeginError; - if( pWInfo->pOrderBy ){ - wherePathSolver(pWInfo, pWInfo->nRowOut+1); - if( db->mallocFailed ) goto whereBeginError; - } - } - if( pWInfo->pOrderBy==0 && (db->flags & SQLITE_ReverseOrder)!=0 ){ - pWInfo->revMask = (Bitmask)(-1); - } - if( pParse->nErr || NEVER(db->mallocFailed) ){ - goto whereBeginError; - } -#ifdef WHERETRACE_ENABLED /* !=0 */ - if( sqlite3WhereTrace ){ - int ii; - sqlite3DebugPrintf("---- Solution nRow=%d", pWInfo->nRowOut); - if( pWInfo->nOBSat>0 ){ - sqlite3DebugPrintf(" ORDERBY=%d,0x%llx", pWInfo->nOBSat, pWInfo->revMask); - } - switch( pWInfo->eDistinct ){ - case WHERE_DISTINCT_UNIQUE: { - sqlite3DebugPrintf(" DISTINCT=unique"); - break; - } - case WHERE_DISTINCT_ORDERED: { - sqlite3DebugPrintf(" DISTINCT=ordered"); - break; - } - case WHERE_DISTINCT_UNORDERED: { - sqlite3DebugPrintf(" DISTINCT=unordered"); - break; - } - } - sqlite3DebugPrintf("\n"); - for(ii=0; iinLevel; ii++){ - whereLoopPrint(pWInfo->a[ii].pWLoop, sWLB.pWC); - } - } -#endif - /* Attempt to omit tables from the join that do not effect the result */ - if( pWInfo->nLevel>=2 - && pResultSet!=0 - && OptimizationEnabled(db, SQLITE_OmitNoopJoin) - ){ - Bitmask tabUsed = exprListTableUsage(pMaskSet, pResultSet); - if( sWLB.pOrderBy ) tabUsed |= exprListTableUsage(pMaskSet, sWLB.pOrderBy); - while( pWInfo->nLevel>=2 ){ - WhereTerm *pTerm, *pEnd; - pLoop = pWInfo->a[pWInfo->nLevel-1].pWLoop; - if( (pWInfo->pTabList->a[pLoop->iTab].jointype & JT_LEFT)==0 ) break; - if( (wctrlFlags & WHERE_WANT_DISTINCT)==0 - && (pLoop->wsFlags & WHERE_ONEROW)==0 - ){ - break; - } - if( (tabUsed & pLoop->maskSelf)!=0 ) break; - pEnd = sWLB.pWC->a + sWLB.pWC->nTerm; - for(pTerm=sWLB.pWC->a; pTermprereqAll & pLoop->maskSelf)!=0 - && !ExprHasProperty(pTerm->pExpr, EP_FromJoin) - ){ - break; - } - } - if( pTerm drop loop %c not used\n", pLoop->cId)); - pWInfo->nLevel--; - nTabList--; - } - } - WHERETRACE(0xffff,("*** Optimizer Finished ***\n")); - pWInfo->pParse->nQueryLoop += pWInfo->nRowOut; - - /* If the caller is an UPDATE or DELETE statement that is requesting - ** to use a one-pass algorithm, determine if this is appropriate. - ** The one-pass algorithm only works if the WHERE clause constrains - ** the statement to update a single row. - */ - assert( (wctrlFlags & WHERE_ONEPASS_DESIRED)==0 || pWInfo->nLevel==1 ); - if( (wctrlFlags & WHERE_ONEPASS_DESIRED)!=0 - && (pWInfo->a[0].pWLoop->wsFlags & WHERE_ONEROW)!=0 ){ - pWInfo->okOnePass = 1; - if( HasRowid(pTabList->a[0].pTab) ){ - pWInfo->a[0].pWLoop->wsFlags &= ~WHERE_IDX_ONLY; - } - } - - /* Open all tables in the pTabList and any indices selected for - ** searching those tables. - */ - notReady = ~(Bitmask)0; - for(ii=0, pLevel=pWInfo->a; iia[pLevel->iFrom]; - pTab = pTabItem->pTab; - iDb = sqlite3SchemaToIndex(db, pTab->pSchema); - pLoop = pLevel->pWLoop; - if( (pTab->tabFlags & TF_Ephemeral)!=0 || pTab->pSelect ){ - /* Do nothing */ - }else -#ifndef SQLITE_OMIT_VIRTUALTABLE - if( (pLoop->wsFlags & WHERE_VIRTUALTABLE)!=0 ){ - const char *pVTab = (const char *)sqlite3GetVTable(db, pTab); - int iCur = pTabItem->iCursor; - sqlite3VdbeAddOp4(v, OP_VOpen, iCur, 0, 0, pVTab, P4_VTAB); - }else if( IsVirtual(pTab) ){ - /* noop */ - }else -#endif - if( (pLoop->wsFlags & WHERE_IDX_ONLY)==0 - && (wctrlFlags & WHERE_OMIT_OPEN_CLOSE)==0 ){ - int op = OP_OpenRead; - if( pWInfo->okOnePass ){ - op = OP_OpenWrite; - pWInfo->aiCurOnePass[0] = pTabItem->iCursor; - }; - sqlite3OpenTable(pParse, pTabItem->iCursor, iDb, pTab, op); - assert( pTabItem->iCursor==pLevel->iTabCur ); - testcase( !pWInfo->okOnePass && pTab->nCol==BMS-1 ); - testcase( !pWInfo->okOnePass && pTab->nCol==BMS ); - if( !pWInfo->okOnePass && pTab->nColcolUsed; - int n = 0; - for(; b; b=b>>1, n++){} - sqlite3VdbeChangeP4(v, sqlite3VdbeCurrentAddr(v)-1, - SQLITE_INT_TO_PTR(n), P4_INT32); - assert( n<=pTab->nCol ); - } - }else{ - sqlite3TableLock(pParse, iDb, pTab->tnum, 0, pTab->zName); - } - if( pLoop->wsFlags & WHERE_INDEXED ){ - Index *pIx = pLoop->u.btree.pIndex; - int iIndexCur; - int op = OP_OpenRead; - /* iIdxCur is always set if to a positive value if ONEPASS is possible */ - assert( iIdxCur!=0 || (pWInfo->wctrlFlags & WHERE_ONEPASS_DESIRED)==0 ); - if( !HasRowid(pTab) && IsPrimaryKeyIndex(pIx) - && (wctrlFlags & WHERE_ONETABLE_ONLY)!=0 - ){ - /* This is one term of an OR-optimization using the PRIMARY KEY of a - ** WITHOUT ROWID table. No need for a separate index */ - iIndexCur = pLevel->iTabCur; - op = 0; - }else if( pWInfo->okOnePass ){ - Index *pJ = pTabItem->pTab->pIndex; - iIndexCur = iIdxCur; - assert( wctrlFlags & WHERE_ONEPASS_DESIRED ); - while( ALWAYS(pJ) && pJ!=pIx ){ - iIndexCur++; - pJ = pJ->pNext; - } - op = OP_OpenWrite; - pWInfo->aiCurOnePass[1] = iIndexCur; - }else if( iIdxCur && (wctrlFlags & WHERE_ONETABLE_ONLY)!=0 ){ - iIndexCur = iIdxCur; - }else{ - iIndexCur = pParse->nTab++; - } - pLevel->iIdxCur = iIndexCur; - assert( pIx->pSchema==pTab->pSchema ); - assert( iIndexCur>=0 ); - if( op ){ - sqlite3VdbeAddOp3(v, op, iIndexCur, pIx->tnum, iDb); - sqlite3VdbeSetP4KeyInfo(pParse, pIx); - VdbeComment((v, "%s", pIx->zName)); - } - } - if( iDb>=0 ) sqlite3CodeVerifySchema(pParse, iDb); - notReady &= ~getMask(&pWInfo->sMaskSet, pTabItem->iCursor); - } - pWInfo->iTop = sqlite3VdbeCurrentAddr(v); - if( db->mallocFailed ) goto whereBeginError; - - /* Generate the code to do the search. Each iteration of the for - ** loop below generates code for a single nested loop of the VM - ** program. - */ - notReady = ~(Bitmask)0; - for(ii=0; iia[ii]; -#ifndef SQLITE_OMIT_AUTOMATIC_INDEX - if( (pLevel->pWLoop->wsFlags & WHERE_AUTO_INDEX)!=0 ){ - constructAutomaticIndex(pParse, &pWInfo->sWC, - &pTabList->a[pLevel->iFrom], notReady, pLevel); - if( db->mallocFailed ) goto whereBeginError; - } -#endif - explainOneScan(pParse, pTabList, pLevel, ii, pLevel->iFrom, wctrlFlags); - pLevel->addrBody = sqlite3VdbeCurrentAddr(v); - notReady = codeOneLoopStart(pWInfo, ii, notReady); - pWInfo->iContinue = pLevel->addrCont; - } - - /* Done. */ - VdbeModuleComment((v, "Begin WHERE-core")); - return pWInfo; - - /* Jump here if malloc fails */ -whereBeginError: - if( pWInfo ){ - pParse->nQueryLoop = pWInfo->savedNQueryLoop; - whereInfoFree(db, pWInfo); - } - return 0; -} - -/* -** Generate the end of the WHERE loop. See comments on -** sqlite3WhereBegin() for additional information. -*/ -SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){ - Parse *pParse = pWInfo->pParse; - Vdbe *v = pParse->pVdbe; - int i; - WhereLevel *pLevel; - WhereLoop *pLoop; - SrcList *pTabList = pWInfo->pTabList; - sqlite3 *db = pParse->db; - - /* Generate loop termination code. - */ - VdbeModuleComment((v, "End WHERE-core")); - sqlite3ExprCacheClear(pParse); - for(i=pWInfo->nLevel-1; i>=0; i--){ - int addr; - pLevel = &pWInfo->a[i]; - pLoop = pLevel->pWLoop; - sqlite3VdbeResolveLabel(v, pLevel->addrCont); - if( pLevel->op!=OP_Noop ){ - sqlite3VdbeAddOp3(v, pLevel->op, pLevel->p1, pLevel->p2, pLevel->p3); - sqlite3VdbeChangeP5(v, pLevel->p5); - VdbeCoverage(v); - VdbeCoverageIf(v, pLevel->op==OP_Next); - VdbeCoverageIf(v, pLevel->op==OP_Prev); - VdbeCoverageIf(v, pLevel->op==OP_VNext); - } - if( pLoop->wsFlags & WHERE_IN_ABLE && pLevel->u.in.nIn>0 ){ - struct InLoop *pIn; - int j; - sqlite3VdbeResolveLabel(v, pLevel->addrNxt); - for(j=pLevel->u.in.nIn, pIn=&pLevel->u.in.aInLoop[j-1]; j>0; j--, pIn--){ - sqlite3VdbeJumpHere(v, pIn->addrInTop+1); - sqlite3VdbeAddOp2(v, pIn->eEndLoopOp, pIn->iCur, pIn->addrInTop); - VdbeCoverage(v); - VdbeCoverageIf(v, pIn->eEndLoopOp==OP_PrevIfOpen); - VdbeCoverageIf(v, pIn->eEndLoopOp==OP_NextIfOpen); - sqlite3VdbeJumpHere(v, pIn->addrInTop-1); - } - sqlite3DbFree(db, pLevel->u.in.aInLoop); - } - sqlite3VdbeResolveLabel(v, pLevel->addrBrk); - if( pLevel->addrSkip ){ - sqlite3VdbeAddOp2(v, OP_Goto, 0, pLevel->addrSkip); - VdbeComment((v, "next skip-scan on %s", pLoop->u.btree.pIndex->zName)); - sqlite3VdbeJumpHere(v, pLevel->addrSkip); - sqlite3VdbeJumpHere(v, pLevel->addrSkip-2); - } - if( pLevel->iLeftJoin ){ - addr = sqlite3VdbeAddOp1(v, OP_IfPos, pLevel->iLeftJoin); VdbeCoverage(v); - assert( (pLoop->wsFlags & WHERE_IDX_ONLY)==0 - || (pLoop->wsFlags & WHERE_INDEXED)!=0 ); - if( (pLoop->wsFlags & WHERE_IDX_ONLY)==0 ){ - sqlite3VdbeAddOp1(v, OP_NullRow, pTabList->a[i].iCursor); - } - if( pLoop->wsFlags & WHERE_INDEXED ){ - sqlite3VdbeAddOp1(v, OP_NullRow, pLevel->iIdxCur); - } - if( pLevel->op==OP_Return ){ - sqlite3VdbeAddOp2(v, OP_Gosub, pLevel->p1, pLevel->addrFirst); - }else{ - sqlite3VdbeAddOp2(v, OP_Goto, 0, pLevel->addrFirst); - } - sqlite3VdbeJumpHere(v, addr); - } - VdbeModuleComment((v, "End WHERE-loop%d: %s", i, - pWInfo->pTabList->a[pLevel->iFrom].pTab->zName)); - } - - /* The "break" point is here, just past the end of the outer loop. - ** Set it. - */ - sqlite3VdbeResolveLabel(v, pWInfo->iBreak); - - assert( pWInfo->nLevel<=pTabList->nSrc ); - for(i=0, pLevel=pWInfo->a; inLevel; i++, pLevel++){ - int k, last; - VdbeOp *pOp; - Index *pIdx = 0; - struct SrcList_item *pTabItem = &pTabList->a[pLevel->iFrom]; - Table *pTab = pTabItem->pTab; - assert( pTab!=0 ); - pLoop = pLevel->pWLoop; - - /* For a co-routine, change all OP_Column references to the table of - ** the co-routine into OP_SCopy of result contained in a register. - ** OP_Rowid becomes OP_Null. - */ - if( pTabItem->viaCoroutine && !db->mallocFailed ){ - last = sqlite3VdbeCurrentAddr(v); - k = pLevel->addrBody; - pOp = sqlite3VdbeGetOp(v, k); - for(; kp1!=pLevel->iTabCur ) continue; - if( pOp->opcode==OP_Column ){ - pOp->opcode = OP_Copy; - pOp->p1 = pOp->p2 + pTabItem->regResult; - pOp->p2 = pOp->p3; - pOp->p3 = 0; - }else if( pOp->opcode==OP_Rowid ){ - pOp->opcode = OP_Null; - pOp->p1 = 0; - pOp->p3 = 0; - } - } - continue; - } - - /* Close all of the cursors that were opened by sqlite3WhereBegin. - ** Except, do not close cursors that will be reused by the OR optimization - ** (WHERE_OMIT_OPEN_CLOSE). And do not close the OP_OpenWrite cursors - ** created for the ONEPASS optimization. - */ - if( (pTab->tabFlags & TF_Ephemeral)==0 - && pTab->pSelect==0 - && (pWInfo->wctrlFlags & WHERE_OMIT_OPEN_CLOSE)==0 - ){ - int ws = pLoop->wsFlags; - if( !pWInfo->okOnePass && (ws & WHERE_IDX_ONLY)==0 ){ - sqlite3VdbeAddOp1(v, OP_Close, pTabItem->iCursor); - } - if( (ws & WHERE_INDEXED)!=0 - && (ws & (WHERE_IPK|WHERE_AUTO_INDEX))==0 - && pLevel->iIdxCur!=pWInfo->aiCurOnePass[1] - ){ - sqlite3VdbeAddOp1(v, OP_Close, pLevel->iIdxCur); - } - } - - /* If this scan uses an index, make VDBE code substitutions to read data - ** from the index instead of from the table where possible. In some cases - ** this optimization prevents the table from ever being read, which can - ** yield a significant performance boost. - ** - ** Calls to the code generator in between sqlite3WhereBegin and - ** sqlite3WhereEnd will have created code that references the table - ** directly. This loop scans all that code looking for opcodes - ** that reference the table and converts them into opcodes that - ** reference the index. - */ - if( pLoop->wsFlags & (WHERE_INDEXED|WHERE_IDX_ONLY) ){ - pIdx = pLoop->u.btree.pIndex; - }else if( pLoop->wsFlags & WHERE_MULTI_OR ){ - pIdx = pLevel->u.pCovidx; - } - if( pIdx && !db->mallocFailed ){ - last = sqlite3VdbeCurrentAddr(v); - k = pLevel->addrBody; - pOp = sqlite3VdbeGetOp(v, k); - for(; kp1!=pLevel->iTabCur ) continue; - if( pOp->opcode==OP_Column ){ - int x = pOp->p2; - assert( pIdx->pTable==pTab ); - if( !HasRowid(pTab) ){ - Index *pPk = sqlite3PrimaryKeyIndex(pTab); - x = pPk->aiColumn[x]; - } - x = sqlite3ColumnOfIndex(pIdx, x); - if( x>=0 ){ - pOp->p2 = x; - pOp->p1 = pLevel->iIdxCur; - } - assert( (pLoop->wsFlags & WHERE_IDX_ONLY)==0 || x>=0 ); - }else if( pOp->opcode==OP_Rowid ){ - pOp->p1 = pLevel->iIdxCur; - pOp->opcode = OP_IdxRowid; - } - } - } - } - - /* Final cleanup - */ - pParse->nQueryLoop = pWInfo->savedNQueryLoop; - whereInfoFree(db, pWInfo); - return; -} - -/************** End of where.c ***********************************************/ -/************** Begin file parse.c *******************************************/ -/* Driver template for the LEMON parser generator. -** The author disclaims copyright to this source code. -** -** This version of "lempar.c" is modified, slightly, for use by SQLite. -** The only modifications are the addition of a couple of NEVER() -** macros to disable tests that are needed in the case of a general -** LALR(1) grammar but which are always false in the -** specific grammar used by SQLite. -*/ -/* First off, code is included that follows the "include" declaration -** in the input grammar file. */ -/* #include */ - - -/* -** Disable all error recovery processing in the parser push-down -** automaton. -*/ -#define YYNOERRORRECOVERY 1 - -/* -** Make yytestcase() the same as testcase() -*/ -#define yytestcase(X) testcase(X) - -/* -** An instance of this structure holds information about the -** LIMIT clause of a SELECT statement. -*/ -struct LimitVal { - Expr *pLimit; /* The LIMIT expression. NULL if there is no limit */ - Expr *pOffset; /* The OFFSET expression. NULL if there is none */ -}; - -/* -** An instance of this structure is used to store the LIKE, -** GLOB, NOT LIKE, and NOT GLOB operators. -*/ -struct LikeOp { - Token eOperator; /* "like" or "glob" or "regexp" */ - int bNot; /* True if the NOT keyword is present */ -}; - -/* -** An instance of the following structure describes the event of a -** TRIGGER. "a" is the event type, one of TK_UPDATE, TK_INSERT, -** TK_DELETE, or TK_INSTEAD. If the event is of the form -** -** UPDATE ON (a,b,c) -** -** Then the "b" IdList records the list "a,b,c". -*/ -struct TrigEvent { int a; IdList * b; }; - -/* -** An instance of this structure holds the ATTACH key and the key type. -*/ -struct AttachKey { int type; Token key; }; - - - /* This is a utility routine used to set the ExprSpan.zStart and - ** ExprSpan.zEnd values of pOut so that the span covers the complete - ** range of text beginning with pStart and going to the end of pEnd. - */ - static void spanSet(ExprSpan *pOut, Token *pStart, Token *pEnd){ - pOut->zStart = pStart->z; - pOut->zEnd = &pEnd->z[pEnd->n]; - } - - /* Construct a new Expr object from a single identifier. Use the - ** new Expr to populate pOut. Set the span of pOut to be the identifier - ** that created the expression. - */ - static void spanExpr(ExprSpan *pOut, Parse *pParse, int op, Token *pValue){ - pOut->pExpr = sqlite3PExpr(pParse, op, 0, 0, pValue); - pOut->zStart = pValue->z; - pOut->zEnd = &pValue->z[pValue->n]; - } - - /* This routine constructs a binary expression node out of two ExprSpan - ** objects and uses the result to populate a new ExprSpan object. - */ - static void spanBinaryExpr( - ExprSpan *pOut, /* Write the result here */ - Parse *pParse, /* The parsing context. Errors accumulate here */ - int op, /* The binary operation */ - ExprSpan *pLeft, /* The left operand */ - ExprSpan *pRight /* The right operand */ - ){ - pOut->pExpr = sqlite3PExpr(pParse, op, pLeft->pExpr, pRight->pExpr, 0); - pOut->zStart = pLeft->zStart; - pOut->zEnd = pRight->zEnd; - } - - /* Construct an expression node for a unary postfix operator - */ - static void spanUnaryPostfix( - ExprSpan *pOut, /* Write the new expression node here */ - Parse *pParse, /* Parsing context to record errors */ - int op, /* The operator */ - ExprSpan *pOperand, /* The operand */ - Token *pPostOp /* The operand token for setting the span */ - ){ - pOut->pExpr = sqlite3PExpr(pParse, op, pOperand->pExpr, 0, 0); - pOut->zStart = pOperand->zStart; - pOut->zEnd = &pPostOp->z[pPostOp->n]; - } - - /* A routine to convert a binary TK_IS or TK_ISNOT expression into a - ** unary TK_ISNULL or TK_NOTNULL expression. */ - static void binaryToUnaryIfNull(Parse *pParse, Expr *pY, Expr *pA, int op){ - sqlite3 *db = pParse->db; - if( db->mallocFailed==0 && pY->op==TK_NULL ){ - pA->op = (u8)op; - sqlite3ExprDelete(db, pA->pRight); - pA->pRight = 0; - } - } - - /* Construct an expression node for a unary prefix operator - */ - static void spanUnaryPrefix( - ExprSpan *pOut, /* Write the new expression node here */ - Parse *pParse, /* Parsing context to record errors */ - int op, /* The operator */ - ExprSpan *pOperand, /* The operand */ - Token *pPreOp /* The operand token for setting the span */ - ){ - pOut->pExpr = sqlite3PExpr(pParse, op, pOperand->pExpr, 0, 0); - pOut->zStart = pPreOp->z; - pOut->zEnd = pOperand->zEnd; - } -/* Next is all token values, in a form suitable for use by makeheaders. -** This section will be null unless lemon is run with the -m switch. -*/ -/* -** These constants (all generated automatically by the parser generator) -** specify the various kinds of tokens (terminals) that the parser -** understands. -** -** Each symbol here is a terminal symbol in the grammar. -*/ -/* Make sure the INTERFACE macro is defined. -*/ -#ifndef INTERFACE -# define INTERFACE 1 -#endif -/* The next thing included is series of defines which control -** various aspects of the generated parser. -** YYCODETYPE is the data type used for storing terminal -** and nonterminal numbers. "unsigned char" is -** used if there are fewer than 250 terminals -** and nonterminals. "int" is used otherwise. -** YYNOCODE is a number of type YYCODETYPE which corresponds -** to no legal terminal or nonterminal number. This -** number is used to fill in empty slots of the hash -** table. -** YYFALLBACK If defined, this indicates that one or more tokens -** have fall-back values which should be used if the -** original value of the token will not parse. -** YYACTIONTYPE is the data type used for storing terminal -** and nonterminal numbers. "unsigned char" is -** used if there are fewer than 250 rules and -** states combined. "int" is used otherwise. -** sqlite3ParserTOKENTYPE is the data type used for minor tokens given -** directly to the parser from the tokenizer. -** YYMINORTYPE is the data type used for all minor tokens. -** This is typically a union of many types, one of -** which is sqlite3ParserTOKENTYPE. The entry in the union -** for base tokens is called "yy0". -** YYSTACKDEPTH is the maximum depth of the parser's stack. If -** zero the stack is dynamically sized using realloc() -** sqlite3ParserARG_SDECL A static variable declaration for the %extra_argument -** sqlite3ParserARG_PDECL A parameter declaration for the %extra_argument -** sqlite3ParserARG_STORE Code to store %extra_argument into yypParser -** sqlite3ParserARG_FETCH Code to extract %extra_argument from yypParser -** YYNSTATE the combined number of states. -** YYNRULE the number of rules in the grammar -** YYERRORSYMBOL is the code number of the error symbol. If not -** defined, then do no error processing. -*/ -#define YYCODETYPE unsigned char -#define YYNOCODE 254 -#define YYACTIONTYPE unsigned short int -#define YYWILDCARD 70 -#define sqlite3ParserTOKENTYPE Token -typedef union { - int yyinit; - sqlite3ParserTOKENTYPE yy0; - Select* yy3; - ExprList* yy14; - With* yy59; - SrcList* yy65; - struct LikeOp yy96; - Expr* yy132; - u8 yy186; - int yy328; - ExprSpan yy346; - struct TrigEvent yy378; - u16 yy381; - IdList* yy408; - struct {int value; int mask;} yy429; - TriggerStep* yy473; - struct LimitVal yy476; -} YYMINORTYPE; -#ifndef YYSTACKDEPTH -#define YYSTACKDEPTH 100 -#endif -#define sqlite3ParserARG_SDECL Parse *pParse; -#define sqlite3ParserARG_PDECL ,Parse *pParse -#define sqlite3ParserARG_FETCH Parse *pParse = yypParser->pParse -#define sqlite3ParserARG_STORE yypParser->pParse = pParse -#define YYNSTATE 642 -#define YYNRULE 327 -#define YYFALLBACK 1 -#define YY_NO_ACTION (YYNSTATE+YYNRULE+2) -#define YY_ACCEPT_ACTION (YYNSTATE+YYNRULE+1) -#define YY_ERROR_ACTION (YYNSTATE+YYNRULE) - -/* The yyzerominor constant is used to initialize instances of -** YYMINORTYPE objects to zero. */ -static const YYMINORTYPE yyzerominor = { 0 }; - -/* Define the yytestcase() macro to be a no-op if is not already defined -** otherwise. -** -** Applications can choose to define yytestcase() in the %include section -** to a macro that can assist in verifying code coverage. For production -** code the yytestcase() macro should be turned off. But it is useful -** for testing. -*/ -#ifndef yytestcase -# define yytestcase(X) -#endif - - -/* Next are the tables used to determine what action to take based on the -** current state and lookahead token. These tables are used to implement -** functions that take a state number and lookahead value and return an -** action integer. -** -** Suppose the action integer is N. Then the action is determined as -** follows -** -** 0 <= N < YYNSTATE Shift N. That is, push the lookahead -** token onto the stack and goto state N. -** -** YYNSTATE <= N < YYNSTATE+YYNRULE Reduce by rule N-YYNSTATE. -** -** N == YYNSTATE+YYNRULE A syntax error has occurred. -** -** N == YYNSTATE+YYNRULE+1 The parser accepts its input. -** -** N == YYNSTATE+YYNRULE+2 No such action. Denotes unused -** slots in the yy_action[] table. -** -** The action table is constructed as a single large table named yy_action[]. -** Given state S and lookahead X, the action is computed as -** -** yy_action[ yy_shift_ofst[S] + X ] -** -** If the index value yy_shift_ofst[S]+X is out of range or if the value -** yy_lookahead[yy_shift_ofst[S]+X] is not equal to X or if yy_shift_ofst[S] -** is equal to YY_SHIFT_USE_DFLT, it means that the action is not in the table -** and that yy_default[S] should be used instead. -** -** The formula above is for computing the action when the lookahead is -** a terminal symbol. If the lookahead is a non-terminal (as occurs after -** a reduce action) then the yy_reduce_ofst[] array is used in place of -** the yy_shift_ofst[] array and YY_REDUCE_USE_DFLT is used in place of -** YY_SHIFT_USE_DFLT. -** -** The following are the tables generated in this section: -** -** yy_action[] A single table containing all actions. -** yy_lookahead[] A table containing the lookahead for each entry in -** yy_action. Used to detect hash collisions. -** yy_shift_ofst[] For each state, the offset into yy_action for -** shifting terminals. -** yy_reduce_ofst[] For each state, the offset into yy_action for -** shifting non-terminals after a reduce. -** yy_default[] Default action for each state. -*/ -#define YY_ACTTAB_COUNT (1497) -static const YYACTIONTYPE yy_action[] = { - /* 0 */ 306, 212, 432, 955, 639, 191, 955, 295, 559, 88, - /* 10 */ 88, 88, 88, 81, 86, 86, 86, 86, 85, 85, - /* 20 */ 84, 84, 84, 83, 330, 185, 184, 183, 635, 635, - /* 30 */ 292, 606, 606, 88, 88, 88, 88, 683, 86, 86, - /* 40 */ 86, 86, 85, 85, 84, 84, 84, 83, 330, 16, - /* 50 */ 436, 597, 89, 90, 80, 600, 599, 601, 601, 87, - /* 60 */ 87, 88, 88, 88, 88, 684, 86, 86, 86, 86, - /* 70 */ 85, 85, 84, 84, 84, 83, 330, 306, 559, 84, - /* 80 */ 84, 84, 83, 330, 65, 86, 86, 86, 86, 85, - /* 90 */ 85, 84, 84, 84, 83, 330, 635, 635, 634, 633, - /* 100 */ 182, 682, 550, 379, 376, 375, 17, 322, 606, 606, - /* 110 */ 371, 198, 479, 91, 374, 82, 79, 165, 85, 85, - /* 120 */ 84, 84, 84, 83, 330, 598, 635, 635, 107, 89, - /* 130 */ 90, 80, 600, 599, 601, 601, 87, 87, 88, 88, - /* 140 */ 88, 88, 186, 86, 86, 86, 86, 85, 85, 84, - /* 150 */ 84, 84, 83, 330, 306, 594, 594, 142, 328, 327, - /* 160 */ 484, 249, 344, 238, 635, 635, 634, 633, 585, 448, - /* 170 */ 526, 525, 229, 388, 1, 394, 450, 584, 449, 635, - /* 180 */ 635, 635, 635, 319, 395, 606, 606, 199, 157, 273, - /* 190 */ 382, 268, 381, 187, 635, 635, 634, 633, 311, 555, - /* 200 */ 266, 593, 593, 266, 347, 588, 89, 90, 80, 600, - /* 210 */ 599, 601, 601, 87, 87, 88, 88, 88, 88, 478, - /* 220 */ 86, 86, 86, 86, 85, 85, 84, 84, 84, 83, - /* 230 */ 330, 306, 272, 536, 634, 633, 146, 610, 197, 310, - /* 240 */ 575, 182, 482, 271, 379, 376, 375, 506, 21, 634, - /* 250 */ 633, 634, 633, 635, 635, 374, 611, 574, 548, 440, - /* 260 */ 111, 563, 606, 606, 634, 633, 324, 479, 608, 608, - /* 270 */ 608, 300, 435, 573, 119, 407, 210, 162, 562, 883, - /* 280 */ 592, 592, 306, 89, 90, 80, 600, 599, 601, 601, - /* 290 */ 87, 87, 88, 88, 88, 88, 506, 86, 86, 86, - /* 300 */ 86, 85, 85, 84, 84, 84, 83, 330, 620, 111, - /* 310 */ 635, 635, 361, 606, 606, 358, 249, 349, 248, 433, - /* 320 */ 243, 479, 586, 634, 633, 195, 611, 93, 119, 221, - /* 330 */ 575, 497, 534, 534, 89, 90, 80, 600, 599, 601, - /* 340 */ 601, 87, 87, 88, 88, 88, 88, 574, 86, 86, - /* 350 */ 86, 86, 85, 85, 84, 84, 84, 83, 330, 306, - /* 360 */ 77, 429, 638, 573, 589, 530, 240, 230, 242, 105, - /* 370 */ 249, 349, 248, 515, 588, 208, 460, 529, 564, 173, - /* 380 */ 634, 633, 970, 144, 430, 2, 424, 228, 380, 557, - /* 390 */ 606, 606, 190, 153, 159, 158, 514, 51, 632, 631, - /* 400 */ 630, 71, 536, 432, 954, 196, 610, 954, 614, 45, - /* 410 */ 18, 89, 90, 80, 600, 599, 601, 601, 87, 87, - /* 420 */ 88, 88, 88, 88, 261, 86, 86, 86, 86, 85, - /* 430 */ 85, 84, 84, 84, 83, 330, 306, 608, 608, 608, - /* 440 */ 542, 424, 402, 385, 241, 506, 451, 320, 211, 543, - /* 450 */ 164, 436, 386, 293, 451, 587, 108, 496, 111, 334, - /* 460 */ 391, 591, 424, 614, 27, 452, 453, 606, 606, 72, - /* 470 */ 257, 70, 259, 452, 339, 342, 564, 582, 68, 415, - /* 480 */ 469, 328, 327, 62, 614, 45, 110, 393, 89, 90, - /* 490 */ 80, 600, 599, 601, 601, 87, 87, 88, 88, 88, - /* 500 */ 88, 152, 86, 86, 86, 86, 85, 85, 84, 84, - /* 510 */ 84, 83, 330, 306, 110, 499, 520, 538, 402, 389, - /* 520 */ 424, 110, 566, 500, 593, 593, 454, 82, 79, 165, - /* 530 */ 424, 591, 384, 564, 340, 615, 188, 162, 424, 350, - /* 540 */ 616, 424, 614, 44, 606, 606, 445, 582, 300, 434, - /* 550 */ 151, 19, 614, 9, 568, 580, 348, 615, 469, 567, - /* 560 */ 614, 26, 616, 614, 45, 89, 90, 80, 600, 599, - /* 570 */ 601, 601, 87, 87, 88, 88, 88, 88, 411, 86, - /* 580 */ 86, 86, 86, 85, 85, 84, 84, 84, 83, 330, - /* 590 */ 306, 579, 110, 578, 521, 282, 433, 398, 400, 255, - /* 600 */ 486, 82, 79, 165, 487, 164, 82, 79, 165, 488, - /* 610 */ 488, 364, 387, 424, 544, 544, 509, 350, 362, 155, - /* 620 */ 191, 606, 606, 559, 642, 640, 333, 82, 79, 165, - /* 630 */ 305, 564, 507, 312, 357, 614, 45, 329, 596, 595, - /* 640 */ 194, 337, 89, 90, 80, 600, 599, 601, 601, 87, - /* 650 */ 87, 88, 88, 88, 88, 424, 86, 86, 86, 86, - /* 660 */ 85, 85, 84, 84, 84, 83, 330, 306, 20, 323, - /* 670 */ 150, 263, 211, 543, 421, 596, 595, 614, 22, 424, - /* 680 */ 193, 424, 284, 424, 391, 424, 509, 424, 577, 424, - /* 690 */ 186, 335, 424, 559, 424, 313, 120, 546, 606, 606, - /* 700 */ 67, 614, 47, 614, 50, 614, 48, 614, 100, 614, - /* 710 */ 99, 614, 101, 576, 614, 102, 614, 109, 326, 89, - /* 720 */ 90, 80, 600, 599, 601, 601, 87, 87, 88, 88, - /* 730 */ 88, 88, 424, 86, 86, 86, 86, 85, 85, 84, - /* 740 */ 84, 84, 83, 330, 306, 424, 311, 424, 585, 54, - /* 750 */ 424, 516, 517, 590, 614, 112, 424, 584, 424, 572, - /* 760 */ 424, 195, 424, 571, 424, 67, 424, 614, 94, 614, - /* 770 */ 98, 424, 614, 97, 264, 606, 606, 195, 614, 46, - /* 780 */ 614, 96, 614, 30, 614, 49, 614, 115, 614, 114, - /* 790 */ 418, 229, 388, 614, 113, 306, 89, 90, 80, 600, - /* 800 */ 599, 601, 601, 87, 87, 88, 88, 88, 88, 424, - /* 810 */ 86, 86, 86, 86, 85, 85, 84, 84, 84, 83, - /* 820 */ 330, 119, 424, 590, 110, 372, 606, 606, 195, 53, - /* 830 */ 250, 614, 29, 195, 472, 438, 729, 190, 302, 498, - /* 840 */ 14, 523, 641, 2, 614, 43, 306, 89, 90, 80, - /* 850 */ 600, 599, 601, 601, 87, 87, 88, 88, 88, 88, - /* 860 */ 424, 86, 86, 86, 86, 85, 85, 84, 84, 84, - /* 870 */ 83, 330, 424, 613, 964, 964, 354, 606, 606, 420, - /* 880 */ 312, 64, 614, 42, 391, 355, 283, 437, 301, 255, - /* 890 */ 414, 410, 495, 492, 614, 28, 471, 306, 89, 90, - /* 900 */ 80, 600, 599, 601, 601, 87, 87, 88, 88, 88, - /* 910 */ 88, 424, 86, 86, 86, 86, 85, 85, 84, 84, - /* 920 */ 84, 83, 330, 424, 110, 110, 110, 110, 606, 606, - /* 930 */ 110, 254, 13, 614, 41, 532, 531, 283, 481, 531, - /* 940 */ 457, 284, 119, 561, 356, 614, 40, 284, 306, 89, - /* 950 */ 78, 80, 600, 599, 601, 601, 87, 87, 88, 88, - /* 960 */ 88, 88, 424, 86, 86, 86, 86, 85, 85, 84, - /* 970 */ 84, 84, 83, 330, 110, 424, 341, 220, 555, 606, - /* 980 */ 606, 351, 555, 318, 614, 95, 413, 255, 83, 330, - /* 990 */ 284, 284, 255, 640, 333, 356, 255, 614, 39, 306, - /* 1000 */ 356, 90, 80, 600, 599, 601, 601, 87, 87, 88, - /* 1010 */ 88, 88, 88, 424, 86, 86, 86, 86, 85, 85, - /* 1020 */ 84, 84, 84, 83, 330, 424, 317, 316, 141, 465, - /* 1030 */ 606, 606, 219, 619, 463, 614, 10, 417, 462, 255, - /* 1040 */ 189, 510, 553, 351, 207, 363, 161, 614, 38, 315, - /* 1050 */ 218, 255, 255, 80, 600, 599, 601, 601, 87, 87, - /* 1060 */ 88, 88, 88, 88, 424, 86, 86, 86, 86, 85, - /* 1070 */ 85, 84, 84, 84, 83, 330, 76, 419, 255, 3, - /* 1080 */ 878, 461, 424, 247, 331, 331, 614, 37, 217, 76, - /* 1090 */ 419, 390, 3, 216, 215, 422, 4, 331, 331, 424, - /* 1100 */ 547, 12, 424, 545, 614, 36, 424, 541, 422, 424, - /* 1110 */ 540, 424, 214, 424, 408, 424, 539, 403, 605, 605, - /* 1120 */ 237, 614, 25, 119, 614, 24, 588, 408, 614, 45, - /* 1130 */ 118, 614, 35, 614, 34, 614, 33, 614, 23, 588, - /* 1140 */ 60, 223, 603, 602, 513, 378, 73, 74, 140, 139, - /* 1150 */ 424, 110, 265, 75, 426, 425, 59, 424, 610, 73, - /* 1160 */ 74, 549, 402, 404, 424, 373, 75, 426, 425, 604, - /* 1170 */ 138, 610, 614, 11, 392, 76, 419, 181, 3, 614, - /* 1180 */ 32, 271, 369, 331, 331, 493, 614, 31, 149, 608, - /* 1190 */ 608, 608, 607, 15, 422, 365, 614, 8, 137, 489, - /* 1200 */ 136, 190, 608, 608, 608, 607, 15, 485, 176, 135, - /* 1210 */ 7, 252, 477, 408, 174, 133, 175, 474, 57, 56, - /* 1220 */ 132, 130, 119, 76, 419, 588, 3, 468, 245, 464, - /* 1230 */ 171, 331, 331, 125, 123, 456, 447, 122, 446, 104, - /* 1240 */ 336, 231, 422, 166, 154, 73, 74, 332, 116, 431, - /* 1250 */ 121, 309, 75, 426, 425, 222, 106, 610, 308, 637, - /* 1260 */ 204, 408, 629, 627, 628, 6, 200, 428, 427, 290, - /* 1270 */ 203, 622, 201, 588, 62, 63, 289, 66, 419, 399, - /* 1280 */ 3, 401, 288, 92, 143, 331, 331, 287, 608, 608, - /* 1290 */ 608, 607, 15, 73, 74, 227, 422, 325, 69, 416, - /* 1300 */ 75, 426, 425, 612, 412, 610, 192, 61, 569, 209, - /* 1310 */ 396, 226, 278, 225, 383, 408, 527, 558, 276, 533, - /* 1320 */ 552, 528, 321, 523, 370, 508, 180, 588, 494, 179, - /* 1330 */ 366, 117, 253, 269, 522, 503, 608, 608, 608, 607, - /* 1340 */ 15, 551, 502, 58, 274, 524, 178, 73, 74, 304, - /* 1350 */ 501, 368, 303, 206, 75, 426, 425, 491, 360, 610, - /* 1360 */ 213, 177, 483, 131, 345, 298, 297, 296, 202, 294, - /* 1370 */ 480, 490, 466, 134, 172, 129, 444, 346, 470, 128, - /* 1380 */ 314, 459, 103, 127, 126, 148, 124, 167, 443, 235, - /* 1390 */ 608, 608, 608, 607, 15, 442, 439, 623, 234, 299, - /* 1400 */ 145, 583, 291, 377, 581, 160, 119, 156, 270, 636, - /* 1410 */ 971, 169, 279, 626, 520, 625, 473, 624, 170, 621, - /* 1420 */ 618, 119, 168, 55, 409, 423, 537, 609, 286, 285, - /* 1430 */ 405, 570, 560, 556, 5, 52, 458, 554, 147, 267, - /* 1440 */ 519, 504, 518, 406, 262, 239, 260, 512, 343, 511, - /* 1450 */ 258, 353, 565, 256, 224, 251, 359, 277, 275, 476, - /* 1460 */ 475, 246, 352, 244, 467, 455, 236, 233, 232, 307, - /* 1470 */ 441, 281, 205, 163, 397, 280, 535, 505, 330, 617, - /* 1480 */ 971, 971, 971, 971, 367, 971, 971, 971, 971, 971, - /* 1490 */ 971, 971, 971, 971, 971, 971, 338, -}; -static const YYCODETYPE yy_lookahead[] = { - /* 0 */ 19, 22, 22, 23, 1, 24, 26, 15, 27, 80, - /* 10 */ 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, - /* 20 */ 91, 92, 93, 94, 95, 108, 109, 110, 27, 28, - /* 30 */ 23, 50, 51, 80, 81, 82, 83, 122, 85, 86, - /* 40 */ 87, 88, 89, 90, 91, 92, 93, 94, 95, 22, - /* 50 */ 70, 23, 71, 72, 73, 74, 75, 76, 77, 78, - /* 60 */ 79, 80, 81, 82, 83, 122, 85, 86, 87, 88, - /* 70 */ 89, 90, 91, 92, 93, 94, 95, 19, 97, 91, - /* 80 */ 92, 93, 94, 95, 26, 85, 86, 87, 88, 89, - /* 90 */ 90, 91, 92, 93, 94, 95, 27, 28, 97, 98, - /* 100 */ 99, 122, 211, 102, 103, 104, 79, 19, 50, 51, - /* 110 */ 19, 122, 59, 55, 113, 224, 225, 226, 89, 90, - /* 120 */ 91, 92, 93, 94, 95, 23, 27, 28, 26, 71, - /* 130 */ 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, - /* 140 */ 82, 83, 51, 85, 86, 87, 88, 89, 90, 91, - /* 150 */ 92, 93, 94, 95, 19, 132, 133, 58, 89, 90, - /* 160 */ 21, 108, 109, 110, 27, 28, 97, 98, 33, 100, - /* 170 */ 7, 8, 119, 120, 22, 19, 107, 42, 109, 27, - /* 180 */ 28, 27, 28, 95, 28, 50, 51, 99, 100, 101, - /* 190 */ 102, 103, 104, 105, 27, 28, 97, 98, 107, 152, - /* 200 */ 112, 132, 133, 112, 65, 69, 71, 72, 73, 74, - /* 210 */ 75, 76, 77, 78, 79, 80, 81, 82, 83, 11, - /* 220 */ 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, - /* 230 */ 95, 19, 101, 97, 97, 98, 24, 101, 122, 157, - /* 240 */ 12, 99, 103, 112, 102, 103, 104, 152, 22, 97, - /* 250 */ 98, 97, 98, 27, 28, 113, 27, 29, 91, 164, - /* 260 */ 165, 124, 50, 51, 97, 98, 219, 59, 132, 133, - /* 270 */ 134, 22, 23, 45, 66, 47, 212, 213, 124, 140, - /* 280 */ 132, 133, 19, 71, 72, 73, 74, 75, 76, 77, - /* 290 */ 78, 79, 80, 81, 82, 83, 152, 85, 86, 87, - /* 300 */ 88, 89, 90, 91, 92, 93, 94, 95, 164, 165, - /* 310 */ 27, 28, 230, 50, 51, 233, 108, 109, 110, 70, - /* 320 */ 16, 59, 23, 97, 98, 26, 97, 22, 66, 185, - /* 330 */ 12, 187, 27, 28, 71, 72, 73, 74, 75, 76, - /* 340 */ 77, 78, 79, 80, 81, 82, 83, 29, 85, 86, - /* 350 */ 87, 88, 89, 90, 91, 92, 93, 94, 95, 19, - /* 360 */ 22, 148, 149, 45, 23, 47, 62, 154, 64, 156, - /* 370 */ 108, 109, 110, 37, 69, 23, 163, 59, 26, 26, - /* 380 */ 97, 98, 144, 145, 146, 147, 152, 200, 52, 23, - /* 390 */ 50, 51, 26, 22, 89, 90, 60, 210, 7, 8, - /* 400 */ 9, 138, 97, 22, 23, 26, 101, 26, 174, 175, - /* 410 */ 197, 71, 72, 73, 74, 75, 76, 77, 78, 79, - /* 420 */ 80, 81, 82, 83, 16, 85, 86, 87, 88, 89, - /* 430 */ 90, 91, 92, 93, 94, 95, 19, 132, 133, 134, - /* 440 */ 23, 152, 208, 209, 140, 152, 152, 111, 195, 196, - /* 450 */ 98, 70, 163, 160, 152, 23, 22, 164, 165, 246, - /* 460 */ 207, 27, 152, 174, 175, 171, 172, 50, 51, 137, - /* 470 */ 62, 139, 64, 171, 172, 222, 124, 27, 138, 24, - /* 480 */ 163, 89, 90, 130, 174, 175, 197, 163, 71, 72, - /* 490 */ 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, - /* 500 */ 83, 22, 85, 86, 87, 88, 89, 90, 91, 92, - /* 510 */ 93, 94, 95, 19, 197, 181, 182, 23, 208, 209, - /* 520 */ 152, 197, 26, 189, 132, 133, 232, 224, 225, 226, - /* 530 */ 152, 97, 91, 26, 232, 116, 212, 213, 152, 222, - /* 540 */ 121, 152, 174, 175, 50, 51, 243, 97, 22, 23, - /* 550 */ 22, 234, 174, 175, 177, 23, 239, 116, 163, 177, - /* 560 */ 174, 175, 121, 174, 175, 71, 72, 73, 74, 75, - /* 570 */ 76, 77, 78, 79, 80, 81, 82, 83, 24, 85, - /* 580 */ 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, - /* 590 */ 19, 23, 197, 11, 23, 227, 70, 208, 220, 152, - /* 600 */ 31, 224, 225, 226, 35, 98, 224, 225, 226, 108, - /* 610 */ 109, 110, 115, 152, 117, 118, 27, 222, 49, 123, - /* 620 */ 24, 50, 51, 27, 0, 1, 2, 224, 225, 226, - /* 630 */ 166, 124, 168, 169, 239, 174, 175, 170, 171, 172, - /* 640 */ 22, 194, 71, 72, 73, 74, 75, 76, 77, 78, - /* 650 */ 79, 80, 81, 82, 83, 152, 85, 86, 87, 88, - /* 660 */ 89, 90, 91, 92, 93, 94, 95, 19, 22, 208, - /* 670 */ 24, 23, 195, 196, 170, 171, 172, 174, 175, 152, - /* 680 */ 26, 152, 152, 152, 207, 152, 97, 152, 23, 152, - /* 690 */ 51, 244, 152, 97, 152, 247, 248, 23, 50, 51, - /* 700 */ 26, 174, 175, 174, 175, 174, 175, 174, 175, 174, - /* 710 */ 175, 174, 175, 23, 174, 175, 174, 175, 188, 71, - /* 720 */ 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, - /* 730 */ 82, 83, 152, 85, 86, 87, 88, 89, 90, 91, - /* 740 */ 92, 93, 94, 95, 19, 152, 107, 152, 33, 24, - /* 750 */ 152, 100, 101, 27, 174, 175, 152, 42, 152, 23, - /* 760 */ 152, 26, 152, 23, 152, 26, 152, 174, 175, 174, - /* 770 */ 175, 152, 174, 175, 23, 50, 51, 26, 174, 175, - /* 780 */ 174, 175, 174, 175, 174, 175, 174, 175, 174, 175, - /* 790 */ 163, 119, 120, 174, 175, 19, 71, 72, 73, 74, - /* 800 */ 75, 76, 77, 78, 79, 80, 81, 82, 83, 152, - /* 810 */ 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, - /* 820 */ 95, 66, 152, 97, 197, 23, 50, 51, 26, 53, - /* 830 */ 23, 174, 175, 26, 23, 23, 23, 26, 26, 26, - /* 840 */ 36, 106, 146, 147, 174, 175, 19, 71, 72, 73, - /* 850 */ 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, - /* 860 */ 152, 85, 86, 87, 88, 89, 90, 91, 92, 93, - /* 870 */ 94, 95, 152, 196, 119, 120, 19, 50, 51, 168, - /* 880 */ 169, 26, 174, 175, 207, 28, 152, 249, 250, 152, - /* 890 */ 163, 163, 163, 163, 174, 175, 163, 19, 71, 72, - /* 900 */ 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, - /* 910 */ 83, 152, 85, 86, 87, 88, 89, 90, 91, 92, - /* 920 */ 93, 94, 95, 152, 197, 197, 197, 197, 50, 51, - /* 930 */ 197, 194, 36, 174, 175, 191, 192, 152, 191, 192, - /* 940 */ 163, 152, 66, 124, 152, 174, 175, 152, 19, 71, - /* 950 */ 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, - /* 960 */ 82, 83, 152, 85, 86, 87, 88, 89, 90, 91, - /* 970 */ 92, 93, 94, 95, 197, 152, 100, 188, 152, 50, - /* 980 */ 51, 152, 152, 188, 174, 175, 252, 152, 94, 95, - /* 990 */ 152, 152, 152, 1, 2, 152, 152, 174, 175, 19, - /* 1000 */ 152, 72, 73, 74, 75, 76, 77, 78, 79, 80, - /* 1010 */ 81, 82, 83, 152, 85, 86, 87, 88, 89, 90, - /* 1020 */ 91, 92, 93, 94, 95, 152, 188, 188, 22, 194, - /* 1030 */ 50, 51, 240, 173, 194, 174, 175, 252, 194, 152, - /* 1040 */ 36, 181, 28, 152, 23, 219, 122, 174, 175, 219, - /* 1050 */ 221, 152, 152, 73, 74, 75, 76, 77, 78, 79, - /* 1060 */ 80, 81, 82, 83, 152, 85, 86, 87, 88, 89, - /* 1070 */ 90, 91, 92, 93, 94, 95, 19, 20, 152, 22, - /* 1080 */ 23, 194, 152, 240, 27, 28, 174, 175, 240, 19, - /* 1090 */ 20, 26, 22, 194, 194, 38, 22, 27, 28, 152, - /* 1100 */ 23, 22, 152, 116, 174, 175, 152, 23, 38, 152, - /* 1110 */ 23, 152, 221, 152, 57, 152, 23, 163, 50, 51, - /* 1120 */ 194, 174, 175, 66, 174, 175, 69, 57, 174, 175, - /* 1130 */ 40, 174, 175, 174, 175, 174, 175, 174, 175, 69, - /* 1140 */ 22, 53, 74, 75, 30, 53, 89, 90, 22, 22, - /* 1150 */ 152, 197, 23, 96, 97, 98, 22, 152, 101, 89, - /* 1160 */ 90, 91, 208, 209, 152, 53, 96, 97, 98, 101, - /* 1170 */ 22, 101, 174, 175, 152, 19, 20, 105, 22, 174, - /* 1180 */ 175, 112, 19, 27, 28, 20, 174, 175, 24, 132, - /* 1190 */ 133, 134, 135, 136, 38, 44, 174, 175, 107, 61, - /* 1200 */ 54, 26, 132, 133, 134, 135, 136, 54, 107, 22, - /* 1210 */ 5, 140, 1, 57, 36, 111, 122, 28, 79, 79, - /* 1220 */ 131, 123, 66, 19, 20, 69, 22, 1, 16, 20, - /* 1230 */ 125, 27, 28, 123, 111, 120, 23, 131, 23, 16, - /* 1240 */ 68, 142, 38, 15, 22, 89, 90, 3, 167, 4, - /* 1250 */ 248, 251, 96, 97, 98, 180, 180, 101, 251, 151, - /* 1260 */ 6, 57, 151, 13, 151, 26, 25, 151, 161, 202, - /* 1270 */ 153, 162, 153, 69, 130, 128, 203, 19, 20, 127, - /* 1280 */ 22, 126, 204, 129, 22, 27, 28, 205, 132, 133, - /* 1290 */ 134, 135, 136, 89, 90, 231, 38, 95, 137, 179, - /* 1300 */ 96, 97, 98, 206, 179, 101, 122, 107, 159, 159, - /* 1310 */ 125, 231, 216, 228, 107, 57, 184, 217, 216, 176, - /* 1320 */ 217, 176, 48, 106, 18, 184, 158, 69, 159, 158, - /* 1330 */ 46, 71, 237, 176, 176, 176, 132, 133, 134, 135, - /* 1340 */ 136, 217, 176, 137, 216, 178, 158, 89, 90, 179, - /* 1350 */ 176, 159, 179, 159, 96, 97, 98, 159, 159, 101, - /* 1360 */ 5, 158, 202, 22, 18, 10, 11, 12, 13, 14, - /* 1370 */ 190, 238, 17, 190, 158, 193, 41, 159, 202, 193, - /* 1380 */ 159, 202, 245, 193, 193, 223, 190, 32, 159, 34, - /* 1390 */ 132, 133, 134, 135, 136, 159, 39, 155, 43, 150, - /* 1400 */ 223, 177, 201, 178, 177, 186, 66, 199, 177, 152, - /* 1410 */ 253, 56, 215, 152, 182, 152, 202, 152, 63, 152, - /* 1420 */ 152, 66, 67, 242, 229, 152, 174, 152, 152, 152, - /* 1430 */ 152, 152, 152, 152, 199, 242, 202, 152, 198, 152, - /* 1440 */ 152, 152, 183, 192, 152, 215, 152, 183, 215, 183, - /* 1450 */ 152, 241, 214, 152, 211, 152, 152, 211, 211, 152, - /* 1460 */ 152, 241, 152, 152, 152, 152, 152, 152, 152, 114, - /* 1470 */ 152, 152, 235, 152, 152, 152, 174, 187, 95, 174, - /* 1480 */ 253, 253, 253, 253, 236, 253, 253, 253, 253, 253, - /* 1490 */ 253, 253, 253, 253, 253, 253, 141, -}; -#define YY_SHIFT_USE_DFLT (-86) -#define YY_SHIFT_COUNT (429) -#define YY_SHIFT_MIN (-85) -#define YY_SHIFT_MAX (1383) -static const short yy_shift_ofst[] = { - /* 0 */ 992, 1057, 1355, 1156, 1204, 1204, 1, 262, -19, 135, - /* 10 */ 135, 776, 1204, 1204, 1204, 1204, 69, 69, 53, 208, - /* 20 */ 283, 755, 58, 725, 648, 571, 494, 417, 340, 263, - /* 30 */ 212, 827, 827, 827, 827, 827, 827, 827, 827, 827, - /* 40 */ 827, 827, 827, 827, 827, 827, 878, 827, 929, 980, - /* 50 */ 980, 1070, 1204, 1204, 1204, 1204, 1204, 1204, 1204, 1204, - /* 60 */ 1204, 1204, 1204, 1204, 1204, 1204, 1204, 1204, 1204, 1204, - /* 70 */ 1204, 1204, 1204, 1204, 1204, 1204, 1204, 1204, 1204, 1204, - /* 80 */ 1258, 1204, 1204, 1204, 1204, 1204, 1204, 1204, 1204, 1204, - /* 90 */ 1204, 1204, 1204, 1204, -71, -47, -47, -47, -47, -47, - /* 100 */ 0, 29, -12, 283, 283, 139, 91, 392, 392, 894, - /* 110 */ 672, 726, 1383, -86, -86, -86, 88, 318, 318, 99, - /* 120 */ 381, -20, 283, 283, 283, 283, 283, 283, 283, 283, - /* 130 */ 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, - /* 140 */ 283, 283, 283, 283, 624, 876, 726, 672, 1340, 1340, - /* 150 */ 1340, 1340, 1340, 1340, -86, -86, -86, 305, 136, 136, - /* 160 */ 142, 167, 226, 154, 137, 152, 283, 283, 283, 283, - /* 170 */ 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, - /* 180 */ 283, 283, 283, 336, 336, 336, 283, 283, 352, 283, - /* 190 */ 283, 283, 283, 283, 228, 283, 283, 283, 283, 283, - /* 200 */ 283, 283, 283, 283, 283, 501, 569, 596, 596, 596, - /* 210 */ 507, 497, 441, 391, 353, 156, 156, 857, 353, 857, - /* 220 */ 735, 813, 639, 715, 156, 332, 715, 715, 496, 419, - /* 230 */ 646, 1357, 1184, 1184, 1335, 1335, 1184, 1341, 1260, 1144, - /* 240 */ 1346, 1346, 1346, 1346, 1184, 1306, 1144, 1341, 1260, 1260, - /* 250 */ 1144, 1184, 1306, 1206, 1284, 1184, 1184, 1306, 1184, 1306, - /* 260 */ 1184, 1306, 1262, 1207, 1207, 1207, 1274, 1262, 1207, 1217, - /* 270 */ 1207, 1274, 1207, 1207, 1185, 1200, 1185, 1200, 1185, 1200, - /* 280 */ 1184, 1184, 1161, 1262, 1202, 1202, 1262, 1154, 1155, 1147, - /* 290 */ 1152, 1144, 1241, 1239, 1250, 1250, 1254, 1254, 1254, 1254, - /* 300 */ -86, -86, -86, -86, -86, -86, 1068, 304, 526, 249, - /* 310 */ 408, -83, 434, 812, 27, 811, 807, 802, 751, 589, - /* 320 */ 651, 163, 131, 674, 366, 450, 299, 148, 23, 102, - /* 330 */ 229, -21, 1245, 1244, 1222, 1099, 1228, 1172, 1223, 1215, - /* 340 */ 1213, 1115, 1106, 1123, 1110, 1209, 1105, 1212, 1226, 1098, - /* 350 */ 1089, 1140, 1139, 1104, 1189, 1178, 1094, 1211, 1205, 1187, - /* 360 */ 1101, 1071, 1153, 1175, 1146, 1138, 1151, 1091, 1164, 1165, - /* 370 */ 1163, 1069, 1072, 1148, 1112, 1134, 1127, 1129, 1126, 1092, - /* 380 */ 1114, 1118, 1088, 1090, 1093, 1087, 1084, 987, 1079, 1077, - /* 390 */ 1074, 1065, 924, 1021, 1014, 1004, 1006, 819, 739, 896, - /* 400 */ 855, 804, 739, 740, 736, 690, 654, 665, 618, 582, - /* 410 */ 568, 528, 554, 379, 532, 479, 455, 379, 432, 371, - /* 420 */ 341, 28, 338, 116, -11, -57, -85, 7, -8, 3, -}; -#define YY_REDUCE_USE_DFLT (-110) -#define YY_REDUCE_COUNT (305) -#define YY_REDUCE_MIN (-109) -#define YY_REDUCE_MAX (1323) -static const short yy_reduce_ofst[] = { - /* 0 */ 238, 954, 213, 289, 310, 234, 144, 317, -109, 382, - /* 10 */ 377, 303, 461, 389, 378, 368, 302, 294, 253, 395, - /* 20 */ 293, 324, 403, 403, 403, 403, 403, 403, 403, 403, - /* 30 */ 403, 403, 403, 403, 403, 403, 403, 403, 403, 403, - /* 40 */ 403, 403, 403, 403, 403, 403, 403, 403, 403, 403, - /* 50 */ 403, 1022, 1012, 1005, 998, 963, 961, 959, 957, 950, - /* 60 */ 947, 930, 912, 873, 861, 823, 810, 771, 759, 720, - /* 70 */ 708, 670, 657, 619, 614, 612, 610, 608, 606, 604, - /* 80 */ 598, 595, 593, 580, 542, 540, 537, 535, 533, 531, - /* 90 */ 529, 527, 503, 386, 403, 403, 403, 403, 403, 403, - /* 100 */ 403, 403, 403, 95, 447, 82, 334, 504, 467, 403, - /* 110 */ 477, 464, 403, 403, 403, 403, 860, 747, 744, 785, - /* 120 */ 638, 638, 926, 891, 900, 899, 887, 844, 840, 835, - /* 130 */ 848, 830, 843, 829, 792, 839, 826, 737, 838, 795, - /* 140 */ 789, 47, 734, 530, 696, 777, 711, 677, 733, 730, - /* 150 */ 729, 728, 727, 627, 448, 64, 187, 1305, 1302, 1252, - /* 160 */ 1290, 1273, 1323, 1322, 1321, 1319, 1318, 1316, 1315, 1314, - /* 170 */ 1313, 1312, 1311, 1310, 1308, 1307, 1304, 1303, 1301, 1298, - /* 180 */ 1294, 1292, 1289, 1266, 1264, 1259, 1288, 1287, 1238, 1285, - /* 190 */ 1281, 1280, 1279, 1278, 1251, 1277, 1276, 1275, 1273, 1268, - /* 200 */ 1267, 1265, 1263, 1261, 1257, 1248, 1237, 1247, 1246, 1243, - /* 210 */ 1238, 1240, 1235, 1249, 1234, 1233, 1230, 1220, 1214, 1210, - /* 220 */ 1225, 1219, 1232, 1231, 1197, 1195, 1227, 1224, 1201, 1208, - /* 230 */ 1242, 1137, 1236, 1229, 1193, 1181, 1221, 1177, 1196, 1179, - /* 240 */ 1191, 1190, 1186, 1182, 1218, 1216, 1176, 1162, 1183, 1180, - /* 250 */ 1160, 1199, 1203, 1133, 1095, 1198, 1194, 1188, 1192, 1171, - /* 260 */ 1169, 1168, 1173, 1174, 1166, 1159, 1141, 1170, 1158, 1167, - /* 270 */ 1157, 1132, 1145, 1143, 1124, 1128, 1103, 1102, 1100, 1096, - /* 280 */ 1150, 1149, 1085, 1125, 1080, 1064, 1120, 1097, 1082, 1078, - /* 290 */ 1073, 1067, 1109, 1107, 1119, 1117, 1116, 1113, 1111, 1108, - /* 300 */ 1007, 1000, 1002, 1076, 1075, 1081, -}; -static const YYACTIONTYPE yy_default[] = { - /* 0 */ 647, 964, 964, 964, 878, 878, 969, 964, 774, 802, - /* 10 */ 802, 938, 969, 969, 969, 876, 969, 969, 969, 964, - /* 20 */ 969, 778, 808, 969, 969, 969, 969, 969, 969, 969, - /* 30 */ 969, 937, 939, 816, 815, 918, 789, 813, 806, 810, - /* 40 */ 879, 872, 873, 871, 875, 880, 969, 809, 841, 856, - /* 50 */ 840, 969, 969, 969, 969, 969, 969, 969, 969, 969, - /* 60 */ 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, - /* 70 */ 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, - /* 80 */ 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, - /* 90 */ 969, 969, 969, 969, 850, 855, 862, 854, 851, 843, - /* 100 */ 842, 844, 845, 969, 969, 673, 739, 969, 969, 846, - /* 110 */ 969, 685, 847, 859, 858, 857, 680, 969, 969, 969, - /* 120 */ 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, - /* 130 */ 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, - /* 140 */ 969, 969, 969, 969, 647, 964, 969, 969, 964, 964, - /* 150 */ 964, 964, 964, 964, 956, 778, 768, 969, 969, 969, - /* 160 */ 969, 969, 969, 969, 969, 969, 969, 944, 942, 969, - /* 170 */ 891, 969, 969, 969, 969, 969, 969, 969, 969, 969, - /* 180 */ 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, - /* 190 */ 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, - /* 200 */ 969, 969, 969, 969, 653, 969, 911, 774, 774, 774, - /* 210 */ 776, 754, 766, 655, 812, 791, 791, 923, 812, 923, - /* 220 */ 710, 733, 707, 802, 791, 874, 802, 802, 775, 766, - /* 230 */ 969, 949, 782, 782, 941, 941, 782, 821, 743, 812, - /* 240 */ 750, 750, 750, 750, 782, 670, 812, 821, 743, 743, - /* 250 */ 812, 782, 670, 917, 915, 782, 782, 670, 782, 670, - /* 260 */ 782, 670, 884, 741, 741, 741, 725, 884, 741, 710, - /* 270 */ 741, 725, 741, 741, 795, 790, 795, 790, 795, 790, - /* 280 */ 782, 782, 969, 884, 888, 888, 884, 807, 796, 805, - /* 290 */ 803, 812, 676, 728, 663, 663, 652, 652, 652, 652, - /* 300 */ 961, 961, 956, 712, 712, 695, 969, 969, 969, 969, - /* 310 */ 969, 969, 687, 969, 893, 969, 969, 969, 969, 969, - /* 320 */ 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, - /* 330 */ 969, 828, 969, 648, 951, 969, 969, 948, 969, 969, - /* 340 */ 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, - /* 350 */ 969, 969, 969, 969, 969, 969, 921, 969, 969, 969, - /* 360 */ 969, 969, 969, 914, 913, 969, 969, 969, 969, 969, - /* 370 */ 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, - /* 380 */ 969, 969, 969, 969, 969, 969, 969, 757, 969, 969, - /* 390 */ 969, 761, 969, 969, 969, 969, 969, 969, 804, 969, - /* 400 */ 797, 969, 877, 969, 969, 969, 969, 969, 969, 969, - /* 410 */ 969, 969, 969, 966, 969, 969, 969, 965, 969, 969, - /* 420 */ 969, 969, 969, 830, 969, 829, 833, 969, 661, 969, - /* 430 */ 644, 649, 960, 963, 962, 959, 958, 957, 952, 950, - /* 440 */ 947, 946, 945, 943, 940, 936, 897, 895, 902, 901, - /* 450 */ 900, 899, 898, 896, 894, 892, 818, 817, 814, 811, - /* 460 */ 753, 935, 890, 752, 749, 748, 669, 953, 920, 929, - /* 470 */ 928, 927, 822, 926, 925, 924, 922, 919, 906, 820, - /* 480 */ 819, 744, 882, 881, 672, 910, 909, 908, 912, 916, - /* 490 */ 907, 784, 751, 671, 668, 675, 679, 731, 732, 740, - /* 500 */ 738, 737, 736, 735, 734, 730, 681, 686, 724, 709, - /* 510 */ 708, 717, 716, 722, 721, 720, 719, 718, 715, 714, - /* 520 */ 713, 706, 705, 711, 704, 727, 726, 723, 703, 747, - /* 530 */ 746, 745, 742, 702, 701, 700, 833, 699, 698, 838, - /* 540 */ 837, 866, 826, 755, 759, 758, 762, 763, 771, 770, - /* 550 */ 769, 780, 781, 793, 792, 824, 823, 794, 779, 773, - /* 560 */ 772, 788, 787, 786, 785, 777, 767, 799, 798, 868, - /* 570 */ 783, 867, 865, 934, 933, 932, 931, 930, 870, 967, - /* 580 */ 968, 887, 889, 886, 801, 800, 885, 869, 839, 836, - /* 590 */ 690, 691, 905, 904, 903, 693, 692, 689, 688, 863, - /* 600 */ 860, 852, 864, 861, 853, 849, 848, 834, 832, 831, - /* 610 */ 827, 835, 760, 756, 825, 765, 764, 697, 696, 694, - /* 620 */ 678, 677, 674, 667, 665, 664, 666, 662, 660, 659, - /* 630 */ 658, 657, 656, 684, 683, 682, 654, 651, 650, 646, - /* 640 */ 645, 643, -}; - -/* The next table maps tokens into fallback tokens. If a construct -** like the following: -** -** %fallback ID X Y Z. -** -** appears in the grammar, then ID becomes a fallback token for X, Y, -** and Z. Whenever one of the tokens X, Y, or Z is input to the parser -** but it does not parse, the type of the token is changed to ID and -** the parse is retried before an error is thrown. -*/ -#ifdef YYFALLBACK -static const YYCODETYPE yyFallback[] = { - 0, /* $ => nothing */ - 0, /* SEMI => nothing */ - 27, /* EXPLAIN => ID */ - 27, /* QUERY => ID */ - 27, /* PLAN => ID */ - 27, /* BEGIN => ID */ - 0, /* TRANSACTION => nothing */ - 27, /* DEFERRED => ID */ - 27, /* IMMEDIATE => ID */ - 27, /* EXCLUSIVE => ID */ - 0, /* COMMIT => nothing */ - 27, /* END => ID */ - 27, /* ROLLBACK => ID */ - 27, /* SAVEPOINT => ID */ - 27, /* RELEASE => ID */ - 0, /* TO => nothing */ - 0, /* TABLE => nothing */ - 0, /* CREATE => nothing */ - 27, /* IF => ID */ - 0, /* NOT => nothing */ - 0, /* EXISTS => nothing */ - 27, /* TEMP => ID */ - 0, /* LP => nothing */ - 0, /* RP => nothing */ - 0, /* AS => nothing */ - 27, /* WITHOUT => ID */ - 0, /* COMMA => nothing */ - 0, /* ID => nothing */ - 0, /* INDEXED => nothing */ - 27, /* ABORT => ID */ - 27, /* ACTION => ID */ - 27, /* AFTER => ID */ - 27, /* ANALYZE => ID */ - 27, /* ASC => ID */ - 27, /* ATTACH => ID */ - 27, /* BEFORE => ID */ - 27, /* BY => ID */ - 27, /* CASCADE => ID */ - 27, /* CAST => ID */ - 27, /* COLUMNKW => ID */ - 27, /* CONFLICT => ID */ - 27, /* DATABASE => ID */ - 27, /* DESC => ID */ - 27, /* DETACH => ID */ - 27, /* EACH => ID */ - 27, /* FAIL => ID */ - 27, /* FOR => ID */ - 27, /* IGNORE => ID */ - 27, /* INITIALLY => ID */ - 27, /* INSTEAD => ID */ - 27, /* LIKE_KW => ID */ - 27, /* MATCH => ID */ - 27, /* NO => ID */ - 27, /* KEY => ID */ - 27, /* OF => ID */ - 27, /* OFFSET => ID */ - 27, /* PRAGMA => ID */ - 27, /* RAISE => ID */ - 27, /* RECURSIVE => ID */ - 27, /* REPLACE => ID */ - 27, /* RESTRICT => ID */ - 27, /* ROW => ID */ - 27, /* TRIGGER => ID */ - 27, /* VACUUM => ID */ - 27, /* VIEW => ID */ - 27, /* VIRTUAL => ID */ - 27, /* WITH => ID */ - 27, /* REINDEX => ID */ - 27, /* RENAME => ID */ - 27, /* CTIME_KW => ID */ -}; -#endif /* YYFALLBACK */ - -/* The following structure represents a single element of the -** parser's stack. Information stored includes: -** -** + The state number for the parser at this level of the stack. -** -** + The value of the token stored at this level of the stack. -** (In other words, the "major" token.) -** -** + The semantic value stored at this level of the stack. This is -** the information used by the action routines in the grammar. -** It is sometimes called the "minor" token. -*/ -struct yyStackEntry { - YYACTIONTYPE stateno; /* The state-number */ - YYCODETYPE major; /* The major token value. This is the code - ** number for the token at this stack level */ - YYMINORTYPE minor; /* The user-supplied minor token value. This - ** is the value of the token */ -}; -typedef struct yyStackEntry yyStackEntry; - -/* The state of the parser is completely contained in an instance of -** the following structure */ -struct yyParser { - int yyidx; /* Index of top element in stack */ -#ifdef YYTRACKMAXSTACKDEPTH - int yyidxMax; /* Maximum value of yyidx */ -#endif - int yyerrcnt; /* Shifts left before out of the error */ - sqlite3ParserARG_SDECL /* A place to hold %extra_argument */ -#if YYSTACKDEPTH<=0 - int yystksz; /* Current side of the stack */ - yyStackEntry *yystack; /* The parser's stack */ -#else - yyStackEntry yystack[YYSTACKDEPTH]; /* The parser's stack */ -#endif -}; -typedef struct yyParser yyParser; - -#ifndef NDEBUG -/* #include */ -static FILE *yyTraceFILE = 0; -static char *yyTracePrompt = 0; -#endif /* NDEBUG */ - -#ifndef NDEBUG -/* -** Turn parser tracing on by giving a stream to which to write the trace -** and a prompt to preface each trace message. Tracing is turned off -** by making either argument NULL -** -** Inputs: -**
      -**
    • A FILE* to which trace output should be written. -** If NULL, then tracing is turned off. -**
    • A prefix string written at the beginning of every -** line of trace output. If NULL, then tracing is -** turned off. -**
    -** -** Outputs: -** None. -*/ -SQLITE_PRIVATE void sqlite3ParserTrace(FILE *TraceFILE, char *zTracePrompt){ - yyTraceFILE = TraceFILE; - yyTracePrompt = zTracePrompt; - if( yyTraceFILE==0 ) yyTracePrompt = 0; - else if( yyTracePrompt==0 ) yyTraceFILE = 0; -} -#endif /* NDEBUG */ - -#ifndef NDEBUG -/* For tracing shifts, the names of all terminals and nonterminals -** are required. The following table supplies these names */ -static const char *const yyTokenName[] = { - "$", "SEMI", "EXPLAIN", "QUERY", - "PLAN", "BEGIN", "TRANSACTION", "DEFERRED", - "IMMEDIATE", "EXCLUSIVE", "COMMIT", "END", - "ROLLBACK", "SAVEPOINT", "RELEASE", "TO", - "TABLE", "CREATE", "IF", "NOT", - "EXISTS", "TEMP", "LP", "RP", - "AS", "WITHOUT", "COMMA", "ID", - "INDEXED", "ABORT", "ACTION", "AFTER", - "ANALYZE", "ASC", "ATTACH", "BEFORE", - "BY", "CASCADE", "CAST", "COLUMNKW", - "CONFLICT", "DATABASE", "DESC", "DETACH", - "EACH", "FAIL", "FOR", "IGNORE", - "INITIALLY", "INSTEAD", "LIKE_KW", "MATCH", - "NO", "KEY", "OF", "OFFSET", - "PRAGMA", "RAISE", "RECURSIVE", "REPLACE", - "RESTRICT", "ROW", "TRIGGER", "VACUUM", - "VIEW", "VIRTUAL", "WITH", "REINDEX", - "RENAME", "CTIME_KW", "ANY", "OR", - "AND", "IS", "BETWEEN", "IN", - "ISNULL", "NOTNULL", "NE", "EQ", - "GT", "LE", "LT", "GE", - "ESCAPE", "BITAND", "BITOR", "LSHIFT", - "RSHIFT", "PLUS", "MINUS", "STAR", - "SLASH", "REM", "CONCAT", "COLLATE", - "BITNOT", "STRING", "JOIN_KW", "CONSTRAINT", - "DEFAULT", "NULL", "PRIMARY", "UNIQUE", - "CHECK", "REFERENCES", "AUTOINCR", "ON", - "INSERT", "DELETE", "UPDATE", "SET", - "DEFERRABLE", "FOREIGN", "DROP", "UNION", - "ALL", "EXCEPT", "INTERSECT", "SELECT", - "VALUES", "DISTINCT", "DOT", "FROM", - "JOIN", "USING", "ORDER", "GROUP", - "HAVING", "LIMIT", "WHERE", "INTO", - "INTEGER", "FLOAT", "BLOB", "VARIABLE", - "CASE", "WHEN", "THEN", "ELSE", - "INDEX", "ALTER", "ADD", "error", - "input", "cmdlist", "ecmd", "explain", - "cmdx", "cmd", "transtype", "trans_opt", - "nm", "savepoint_opt", "create_table", "create_table_args", - "createkw", "temp", "ifnotexists", "dbnm", - "columnlist", "conslist_opt", "table_options", "select", - "column", "columnid", "type", "carglist", - "typetoken", "typename", "signed", "plus_num", - "minus_num", "ccons", "term", "expr", - "onconf", "sortorder", "autoinc", "idxlist_opt", - "refargs", "defer_subclause", "refarg", "refact", - "init_deferred_pred_opt", "conslist", "tconscomma", "tcons", - "idxlist", "defer_subclause_opt", "orconf", "resolvetype", - "raisetype", "ifexists", "fullname", "selectnowith", - "oneselect", "with", "multiselect_op", "distinct", - "selcollist", "from", "where_opt", "groupby_opt", - "having_opt", "orderby_opt", "limit_opt", "values", - "nexprlist", "exprlist", "sclp", "as", - "seltablist", "stl_prefix", "joinop", "indexed_opt", - "on_opt", "using_opt", "joinop2", "idlist", - "sortlist", "setlist", "insert_cmd", "inscollist_opt", - "likeop", "between_op", "in_op", "case_operand", - "case_exprlist", "case_else", "uniqueflag", "collate", - "nmnum", "trigger_decl", "trigger_cmd_list", "trigger_time", - "trigger_event", "foreach_clause", "when_clause", "trigger_cmd", - "trnm", "tridxby", "database_kw_opt", "key_opt", - "add_column_fullname", "kwcolumn_opt", "create_vtab", "vtabarglist", - "vtabarg", "vtabargtoken", "lp", "anylist", - "wqlist", -}; -#endif /* NDEBUG */ - -#ifndef NDEBUG -/* For tracing reduce actions, the names of all rules are required. -*/ -static const char *const yyRuleName[] = { - /* 0 */ "input ::= cmdlist", - /* 1 */ "cmdlist ::= cmdlist ecmd", - /* 2 */ "cmdlist ::= ecmd", - /* 3 */ "ecmd ::= SEMI", - /* 4 */ "ecmd ::= explain cmdx SEMI", - /* 5 */ "explain ::=", - /* 6 */ "explain ::= EXPLAIN", - /* 7 */ "explain ::= EXPLAIN QUERY PLAN", - /* 8 */ "cmdx ::= cmd", - /* 9 */ "cmd ::= BEGIN transtype trans_opt", - /* 10 */ "trans_opt ::=", - /* 11 */ "trans_opt ::= TRANSACTION", - /* 12 */ "trans_opt ::= TRANSACTION nm", - /* 13 */ "transtype ::=", - /* 14 */ "transtype ::= DEFERRED", - /* 15 */ "transtype ::= IMMEDIATE", - /* 16 */ "transtype ::= EXCLUSIVE", - /* 17 */ "cmd ::= COMMIT trans_opt", - /* 18 */ "cmd ::= END trans_opt", - /* 19 */ "cmd ::= ROLLBACK trans_opt", - /* 20 */ "savepoint_opt ::= SAVEPOINT", - /* 21 */ "savepoint_opt ::=", - /* 22 */ "cmd ::= SAVEPOINT nm", - /* 23 */ "cmd ::= RELEASE savepoint_opt nm", - /* 24 */ "cmd ::= ROLLBACK trans_opt TO savepoint_opt nm", - /* 25 */ "cmd ::= create_table create_table_args", - /* 26 */ "create_table ::= createkw temp TABLE ifnotexists nm dbnm", - /* 27 */ "createkw ::= CREATE", - /* 28 */ "ifnotexists ::=", - /* 29 */ "ifnotexists ::= IF NOT EXISTS", - /* 30 */ "temp ::= TEMP", - /* 31 */ "temp ::=", - /* 32 */ "create_table_args ::= LP columnlist conslist_opt RP table_options", - /* 33 */ "create_table_args ::= AS select", - /* 34 */ "table_options ::=", - /* 35 */ "table_options ::= WITHOUT nm", - /* 36 */ "columnlist ::= columnlist COMMA column", - /* 37 */ "columnlist ::= column", - /* 38 */ "column ::= columnid type carglist", - /* 39 */ "columnid ::= nm", - /* 40 */ "nm ::= ID|INDEXED", - /* 41 */ "nm ::= STRING", - /* 42 */ "nm ::= JOIN_KW", - /* 43 */ "type ::=", - /* 44 */ "type ::= typetoken", - /* 45 */ "typetoken ::= typename", - /* 46 */ "typetoken ::= typename LP signed RP", - /* 47 */ "typetoken ::= typename LP signed COMMA signed RP", - /* 48 */ "typename ::= ID|STRING", - /* 49 */ "typename ::= typename ID|STRING", - /* 50 */ "signed ::= plus_num", - /* 51 */ "signed ::= minus_num", - /* 52 */ "carglist ::= carglist ccons", - /* 53 */ "carglist ::=", - /* 54 */ "ccons ::= CONSTRAINT nm", - /* 55 */ "ccons ::= DEFAULT term", - /* 56 */ "ccons ::= DEFAULT LP expr RP", - /* 57 */ "ccons ::= DEFAULT PLUS term", - /* 58 */ "ccons ::= DEFAULT MINUS term", - /* 59 */ "ccons ::= DEFAULT ID|INDEXED", - /* 60 */ "ccons ::= NULL onconf", - /* 61 */ "ccons ::= NOT NULL onconf", - /* 62 */ "ccons ::= PRIMARY KEY sortorder onconf autoinc", - /* 63 */ "ccons ::= UNIQUE onconf", - /* 64 */ "ccons ::= CHECK LP expr RP", - /* 65 */ "ccons ::= REFERENCES nm idxlist_opt refargs", - /* 66 */ "ccons ::= defer_subclause", - /* 67 */ "ccons ::= COLLATE ID|STRING", - /* 68 */ "autoinc ::=", - /* 69 */ "autoinc ::= AUTOINCR", - /* 70 */ "refargs ::=", - /* 71 */ "refargs ::= refargs refarg", - /* 72 */ "refarg ::= MATCH nm", - /* 73 */ "refarg ::= ON INSERT refact", - /* 74 */ "refarg ::= ON DELETE refact", - /* 75 */ "refarg ::= ON UPDATE refact", - /* 76 */ "refact ::= SET NULL", - /* 77 */ "refact ::= SET DEFAULT", - /* 78 */ "refact ::= CASCADE", - /* 79 */ "refact ::= RESTRICT", - /* 80 */ "refact ::= NO ACTION", - /* 81 */ "defer_subclause ::= NOT DEFERRABLE init_deferred_pred_opt", - /* 82 */ "defer_subclause ::= DEFERRABLE init_deferred_pred_opt", - /* 83 */ "init_deferred_pred_opt ::=", - /* 84 */ "init_deferred_pred_opt ::= INITIALLY DEFERRED", - /* 85 */ "init_deferred_pred_opt ::= INITIALLY IMMEDIATE", - /* 86 */ "conslist_opt ::=", - /* 87 */ "conslist_opt ::= COMMA conslist", - /* 88 */ "conslist ::= conslist tconscomma tcons", - /* 89 */ "conslist ::= tcons", - /* 90 */ "tconscomma ::= COMMA", - /* 91 */ "tconscomma ::=", - /* 92 */ "tcons ::= CONSTRAINT nm", - /* 93 */ "tcons ::= PRIMARY KEY LP idxlist autoinc RP onconf", - /* 94 */ "tcons ::= UNIQUE LP idxlist RP onconf", - /* 95 */ "tcons ::= CHECK LP expr RP onconf", - /* 96 */ "tcons ::= FOREIGN KEY LP idxlist RP REFERENCES nm idxlist_opt refargs defer_subclause_opt", - /* 97 */ "defer_subclause_opt ::=", - /* 98 */ "defer_subclause_opt ::= defer_subclause", - /* 99 */ "onconf ::=", - /* 100 */ "onconf ::= ON CONFLICT resolvetype", - /* 101 */ "orconf ::=", - /* 102 */ "orconf ::= OR resolvetype", - /* 103 */ "resolvetype ::= raisetype", - /* 104 */ "resolvetype ::= IGNORE", - /* 105 */ "resolvetype ::= REPLACE", - /* 106 */ "cmd ::= DROP TABLE ifexists fullname", - /* 107 */ "ifexists ::= IF EXISTS", - /* 108 */ "ifexists ::=", - /* 109 */ "cmd ::= createkw temp VIEW ifnotexists nm dbnm AS select", - /* 110 */ "cmd ::= DROP VIEW ifexists fullname", - /* 111 */ "cmd ::= select", - /* 112 */ "select ::= with selectnowith", - /* 113 */ "selectnowith ::= oneselect", - /* 114 */ "selectnowith ::= selectnowith multiselect_op oneselect", - /* 115 */ "multiselect_op ::= UNION", - /* 116 */ "multiselect_op ::= UNION ALL", - /* 117 */ "multiselect_op ::= EXCEPT|INTERSECT", - /* 118 */ "oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt orderby_opt limit_opt", - /* 119 */ "oneselect ::= values", - /* 120 */ "values ::= VALUES LP nexprlist RP", - /* 121 */ "values ::= values COMMA LP exprlist RP", - /* 122 */ "distinct ::= DISTINCT", - /* 123 */ "distinct ::= ALL", - /* 124 */ "distinct ::=", - /* 125 */ "sclp ::= selcollist COMMA", - /* 126 */ "sclp ::=", - /* 127 */ "selcollist ::= sclp expr as", - /* 128 */ "selcollist ::= sclp STAR", - /* 129 */ "selcollist ::= sclp nm DOT STAR", - /* 130 */ "as ::= AS nm", - /* 131 */ "as ::= ID|STRING", - /* 132 */ "as ::=", - /* 133 */ "from ::=", - /* 134 */ "from ::= FROM seltablist", - /* 135 */ "stl_prefix ::= seltablist joinop", - /* 136 */ "stl_prefix ::=", - /* 137 */ "seltablist ::= stl_prefix nm dbnm as indexed_opt on_opt using_opt", - /* 138 */ "seltablist ::= stl_prefix LP select RP as on_opt using_opt", - /* 139 */ "seltablist ::= stl_prefix LP seltablist RP as on_opt using_opt", - /* 140 */ "dbnm ::=", - /* 141 */ "dbnm ::= DOT nm", - /* 142 */ "fullname ::= nm dbnm", - /* 143 */ "joinop ::= COMMA|JOIN", - /* 144 */ "joinop ::= JOIN_KW JOIN", - /* 145 */ "joinop ::= JOIN_KW nm JOIN", - /* 146 */ "joinop ::= JOIN_KW nm nm JOIN", - /* 147 */ "on_opt ::= ON expr", - /* 148 */ "on_opt ::=", - /* 149 */ "indexed_opt ::=", - /* 150 */ "indexed_opt ::= INDEXED BY nm", - /* 151 */ "indexed_opt ::= NOT INDEXED", - /* 152 */ "using_opt ::= USING LP idlist RP", - /* 153 */ "using_opt ::=", - /* 154 */ "orderby_opt ::=", - /* 155 */ "orderby_opt ::= ORDER BY sortlist", - /* 156 */ "sortlist ::= sortlist COMMA expr sortorder", - /* 157 */ "sortlist ::= expr sortorder", - /* 158 */ "sortorder ::= ASC", - /* 159 */ "sortorder ::= DESC", - /* 160 */ "sortorder ::=", - /* 161 */ "groupby_opt ::=", - /* 162 */ "groupby_opt ::= GROUP BY nexprlist", - /* 163 */ "having_opt ::=", - /* 164 */ "having_opt ::= HAVING expr", - /* 165 */ "limit_opt ::=", - /* 166 */ "limit_opt ::= LIMIT expr", - /* 167 */ "limit_opt ::= LIMIT expr OFFSET expr", - /* 168 */ "limit_opt ::= LIMIT expr COMMA expr", - /* 169 */ "cmd ::= with DELETE FROM fullname indexed_opt where_opt", - /* 170 */ "where_opt ::=", - /* 171 */ "where_opt ::= WHERE expr", - /* 172 */ "cmd ::= with UPDATE orconf fullname indexed_opt SET setlist where_opt", - /* 173 */ "setlist ::= setlist COMMA nm EQ expr", - /* 174 */ "setlist ::= nm EQ expr", - /* 175 */ "cmd ::= with insert_cmd INTO fullname inscollist_opt select", - /* 176 */ "cmd ::= with insert_cmd INTO fullname inscollist_opt DEFAULT VALUES", - /* 177 */ "insert_cmd ::= INSERT orconf", - /* 178 */ "insert_cmd ::= REPLACE", - /* 179 */ "inscollist_opt ::=", - /* 180 */ "inscollist_opt ::= LP idlist RP", - /* 181 */ "idlist ::= idlist COMMA nm", - /* 182 */ "idlist ::= nm", - /* 183 */ "expr ::= term", - /* 184 */ "expr ::= LP expr RP", - /* 185 */ "term ::= NULL", - /* 186 */ "expr ::= ID|INDEXED", - /* 187 */ "expr ::= JOIN_KW", - /* 188 */ "expr ::= nm DOT nm", - /* 189 */ "expr ::= nm DOT nm DOT nm", - /* 190 */ "term ::= INTEGER|FLOAT|BLOB", - /* 191 */ "term ::= STRING", - /* 192 */ "expr ::= VARIABLE", - /* 193 */ "expr ::= expr COLLATE ID|STRING", - /* 194 */ "expr ::= CAST LP expr AS typetoken RP", - /* 195 */ "expr ::= ID|INDEXED LP distinct exprlist RP", - /* 196 */ "expr ::= ID|INDEXED LP STAR RP", - /* 197 */ "term ::= CTIME_KW", - /* 198 */ "expr ::= expr AND expr", - /* 199 */ "expr ::= expr OR expr", - /* 200 */ "expr ::= expr LT|GT|GE|LE expr", - /* 201 */ "expr ::= expr EQ|NE expr", - /* 202 */ "expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr", - /* 203 */ "expr ::= expr PLUS|MINUS expr", - /* 204 */ "expr ::= expr STAR|SLASH|REM expr", - /* 205 */ "expr ::= expr CONCAT expr", - /* 206 */ "likeop ::= LIKE_KW|MATCH", - /* 207 */ "likeop ::= NOT LIKE_KW|MATCH", - /* 208 */ "expr ::= expr likeop expr", - /* 209 */ "expr ::= expr likeop expr ESCAPE expr", - /* 210 */ "expr ::= expr ISNULL|NOTNULL", - /* 211 */ "expr ::= expr NOT NULL", - /* 212 */ "expr ::= expr IS expr", - /* 213 */ "expr ::= expr IS NOT expr", - /* 214 */ "expr ::= NOT expr", - /* 215 */ "expr ::= BITNOT expr", - /* 216 */ "expr ::= MINUS expr", - /* 217 */ "expr ::= PLUS expr", - /* 218 */ "between_op ::= BETWEEN", - /* 219 */ "between_op ::= NOT BETWEEN", - /* 220 */ "expr ::= expr between_op expr AND expr", - /* 221 */ "in_op ::= IN", - /* 222 */ "in_op ::= NOT IN", - /* 223 */ "expr ::= expr in_op LP exprlist RP", - /* 224 */ "expr ::= LP select RP", - /* 225 */ "expr ::= expr in_op LP select RP", - /* 226 */ "expr ::= expr in_op nm dbnm", - /* 227 */ "expr ::= EXISTS LP select RP", - /* 228 */ "expr ::= CASE case_operand case_exprlist case_else END", - /* 229 */ "case_exprlist ::= case_exprlist WHEN expr THEN expr", - /* 230 */ "case_exprlist ::= WHEN expr THEN expr", - /* 231 */ "case_else ::= ELSE expr", - /* 232 */ "case_else ::=", - /* 233 */ "case_operand ::= expr", - /* 234 */ "case_operand ::=", - /* 235 */ "exprlist ::= nexprlist", - /* 236 */ "exprlist ::=", - /* 237 */ "nexprlist ::= nexprlist COMMA expr", - /* 238 */ "nexprlist ::= expr", - /* 239 */ "cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP idxlist RP where_opt", - /* 240 */ "uniqueflag ::= UNIQUE", - /* 241 */ "uniqueflag ::=", - /* 242 */ "idxlist_opt ::=", - /* 243 */ "idxlist_opt ::= LP idxlist RP", - /* 244 */ "idxlist ::= idxlist COMMA nm collate sortorder", - /* 245 */ "idxlist ::= nm collate sortorder", - /* 246 */ "collate ::=", - /* 247 */ "collate ::= COLLATE ID|STRING", - /* 248 */ "cmd ::= DROP INDEX ifexists fullname", - /* 249 */ "cmd ::= VACUUM", - /* 250 */ "cmd ::= VACUUM nm", - /* 251 */ "cmd ::= PRAGMA nm dbnm", - /* 252 */ "cmd ::= PRAGMA nm dbnm EQ nmnum", - /* 253 */ "cmd ::= PRAGMA nm dbnm LP nmnum RP", - /* 254 */ "cmd ::= PRAGMA nm dbnm EQ minus_num", - /* 255 */ "cmd ::= PRAGMA nm dbnm LP minus_num RP", - /* 256 */ "nmnum ::= plus_num", - /* 257 */ "nmnum ::= nm", - /* 258 */ "nmnum ::= ON", - /* 259 */ "nmnum ::= DELETE", - /* 260 */ "nmnum ::= DEFAULT", - /* 261 */ "plus_num ::= PLUS INTEGER|FLOAT", - /* 262 */ "plus_num ::= INTEGER|FLOAT", - /* 263 */ "minus_num ::= MINUS INTEGER|FLOAT", - /* 264 */ "cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END", - /* 265 */ "trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause", - /* 266 */ "trigger_time ::= BEFORE", - /* 267 */ "trigger_time ::= AFTER", - /* 268 */ "trigger_time ::= INSTEAD OF", - /* 269 */ "trigger_time ::=", - /* 270 */ "trigger_event ::= DELETE|INSERT", - /* 271 */ "trigger_event ::= UPDATE", - /* 272 */ "trigger_event ::= UPDATE OF idlist", - /* 273 */ "foreach_clause ::=", - /* 274 */ "foreach_clause ::= FOR EACH ROW", - /* 275 */ "when_clause ::=", - /* 276 */ "when_clause ::= WHEN expr", - /* 277 */ "trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI", - /* 278 */ "trigger_cmd_list ::= trigger_cmd SEMI", - /* 279 */ "trnm ::= nm", - /* 280 */ "trnm ::= nm DOT nm", - /* 281 */ "tridxby ::=", - /* 282 */ "tridxby ::= INDEXED BY nm", - /* 283 */ "tridxby ::= NOT INDEXED", - /* 284 */ "trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist where_opt", - /* 285 */ "trigger_cmd ::= insert_cmd INTO trnm inscollist_opt select", - /* 286 */ "trigger_cmd ::= DELETE FROM trnm tridxby where_opt", - /* 287 */ "trigger_cmd ::= select", - /* 288 */ "expr ::= RAISE LP IGNORE RP", - /* 289 */ "expr ::= RAISE LP raisetype COMMA nm RP", - /* 290 */ "raisetype ::= ROLLBACK", - /* 291 */ "raisetype ::= ABORT", - /* 292 */ "raisetype ::= FAIL", - /* 293 */ "cmd ::= DROP TRIGGER ifexists fullname", - /* 294 */ "cmd ::= ATTACH database_kw_opt expr AS expr key_opt", - /* 295 */ "cmd ::= DETACH database_kw_opt expr", - /* 296 */ "key_opt ::=", - /* 297 */ "key_opt ::= KEY expr", - /* 298 */ "database_kw_opt ::= DATABASE", - /* 299 */ "database_kw_opt ::=", - /* 300 */ "cmd ::= REINDEX", - /* 301 */ "cmd ::= REINDEX nm dbnm", - /* 302 */ "cmd ::= ANALYZE", - /* 303 */ "cmd ::= ANALYZE nm dbnm", - /* 304 */ "cmd ::= ALTER TABLE fullname RENAME TO nm", - /* 305 */ "cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt column", - /* 306 */ "add_column_fullname ::= fullname", - /* 307 */ "kwcolumn_opt ::=", - /* 308 */ "kwcolumn_opt ::= COLUMNKW", - /* 309 */ "cmd ::= create_vtab", - /* 310 */ "cmd ::= create_vtab LP vtabarglist RP", - /* 311 */ "create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm", - /* 312 */ "vtabarglist ::= vtabarg", - /* 313 */ "vtabarglist ::= vtabarglist COMMA vtabarg", - /* 314 */ "vtabarg ::=", - /* 315 */ "vtabarg ::= vtabarg vtabargtoken", - /* 316 */ "vtabargtoken ::= ANY", - /* 317 */ "vtabargtoken ::= lp anylist RP", - /* 318 */ "lp ::= LP", - /* 319 */ "anylist ::=", - /* 320 */ "anylist ::= anylist LP anylist RP", - /* 321 */ "anylist ::= anylist ANY", - /* 322 */ "with ::=", - /* 323 */ "with ::= WITH wqlist", - /* 324 */ "with ::= WITH RECURSIVE wqlist", - /* 325 */ "wqlist ::= nm idxlist_opt AS LP select RP", - /* 326 */ "wqlist ::= wqlist COMMA nm idxlist_opt AS LP select RP", -}; -#endif /* NDEBUG */ - - -#if YYSTACKDEPTH<=0 -/* -** Try to increase the size of the parser stack. -*/ -static void yyGrowStack(yyParser *p){ - int newSize; - yyStackEntry *pNew; - - newSize = p->yystksz*2 + 100; - pNew = realloc(p->yystack, newSize*sizeof(pNew[0])); - if( pNew ){ - p->yystack = pNew; - p->yystksz = newSize; -#ifndef NDEBUG - if( yyTraceFILE ){ - fprintf(yyTraceFILE,"%sStack grows to %d entries!\n", - yyTracePrompt, p->yystksz); - } -#endif - } -} -#endif - -/* -** This function allocates a new parser. -** The only argument is a pointer to a function which works like -** malloc. -** -** Inputs: -** A pointer to the function used to allocate memory. -** -** Outputs: -** A pointer to a parser. This pointer is used in subsequent calls -** to sqlite3Parser and sqlite3ParserFree. -*/ -SQLITE_PRIVATE void *sqlite3ParserAlloc(void *(*mallocProc)(size_t)){ - yyParser *pParser; - pParser = (yyParser*)(*mallocProc)( (size_t)sizeof(yyParser) ); - if( pParser ){ - pParser->yyidx = -1; -#ifdef YYTRACKMAXSTACKDEPTH - pParser->yyidxMax = 0; -#endif -#if YYSTACKDEPTH<=0 - pParser->yystack = NULL; - pParser->yystksz = 0; - yyGrowStack(pParser); -#endif - } - return pParser; -} - -/* The following function deletes the value associated with a -** symbol. The symbol can be either a terminal or nonterminal. -** "yymajor" is the symbol code, and "yypminor" is a pointer to -** the value. -*/ -static void yy_destructor( - yyParser *yypParser, /* The parser */ - YYCODETYPE yymajor, /* Type code for object to destroy */ - YYMINORTYPE *yypminor /* The object to be destroyed */ -){ - sqlite3ParserARG_FETCH; - switch( yymajor ){ - /* Here is inserted the actions which take place when a - ** terminal or non-terminal is destroyed. This can happen - ** when the symbol is popped from the stack during a - ** reduce or during error processing or when a parser is - ** being destroyed before it is finished parsing. - ** - ** Note: during a reduce, the only symbols destroyed are those - ** which appear on the RHS of the rule, but which are not used - ** inside the C code. - */ - case 163: /* select */ - case 195: /* selectnowith */ - case 196: /* oneselect */ - case 207: /* values */ -{ -sqlite3SelectDelete(pParse->db, (yypminor->yy3)); -} - break; - case 174: /* term */ - case 175: /* expr */ -{ -sqlite3ExprDelete(pParse->db, (yypminor->yy346).pExpr); -} - break; - case 179: /* idxlist_opt */ - case 188: /* idxlist */ - case 200: /* selcollist */ - case 203: /* groupby_opt */ - case 205: /* orderby_opt */ - case 208: /* nexprlist */ - case 209: /* exprlist */ - case 210: /* sclp */ - case 220: /* sortlist */ - case 221: /* setlist */ - case 228: /* case_exprlist */ -{ -sqlite3ExprListDelete(pParse->db, (yypminor->yy14)); -} - break; - case 194: /* fullname */ - case 201: /* from */ - case 212: /* seltablist */ - case 213: /* stl_prefix */ -{ -sqlite3SrcListDelete(pParse->db, (yypminor->yy65)); -} - break; - case 197: /* with */ - case 252: /* wqlist */ -{ -sqlite3WithDelete(pParse->db, (yypminor->yy59)); -} - break; - case 202: /* where_opt */ - case 204: /* having_opt */ - case 216: /* on_opt */ - case 227: /* case_operand */ - case 229: /* case_else */ - case 238: /* when_clause */ - case 243: /* key_opt */ -{ -sqlite3ExprDelete(pParse->db, (yypminor->yy132)); -} - break; - case 217: /* using_opt */ - case 219: /* idlist */ - case 223: /* inscollist_opt */ -{ -sqlite3IdListDelete(pParse->db, (yypminor->yy408)); -} - break; - case 234: /* trigger_cmd_list */ - case 239: /* trigger_cmd */ -{ -sqlite3DeleteTriggerStep(pParse->db, (yypminor->yy473)); -} - break; - case 236: /* trigger_event */ -{ -sqlite3IdListDelete(pParse->db, (yypminor->yy378).b); -} - break; - default: break; /* If no destructor action specified: do nothing */ - } -} - -/* -** Pop the parser's stack once. -** -** If there is a destructor routine associated with the token which -** is popped from the stack, then call it. -** -** Return the major token number for the symbol popped. -*/ -static int yy_pop_parser_stack(yyParser *pParser){ - YYCODETYPE yymajor; - yyStackEntry *yytos = &pParser->yystack[pParser->yyidx]; - - /* There is no mechanism by which the parser stack can be popped below - ** empty in SQLite. */ - if( NEVER(pParser->yyidx<0) ) return 0; -#ifndef NDEBUG - if( yyTraceFILE && pParser->yyidx>=0 ){ - fprintf(yyTraceFILE,"%sPopping %s\n", - yyTracePrompt, - yyTokenName[yytos->major]); - } -#endif - yymajor = yytos->major; - yy_destructor(pParser, yymajor, &yytos->minor); - pParser->yyidx--; - return yymajor; -} - -/* -** Deallocate and destroy a parser. Destructors are all called for -** all stack elements before shutting the parser down. -** -** Inputs: -**
      -**
    • A pointer to the parser. This should be a pointer -** obtained from sqlite3ParserAlloc. -**
    • A pointer to a function used to reclaim memory obtained -** from malloc. -**
    -*/ -SQLITE_PRIVATE void sqlite3ParserFree( - void *p, /* The parser to be deleted */ - void (*freeProc)(void*) /* Function used to reclaim memory */ -){ - yyParser *pParser = (yyParser*)p; - /* In SQLite, we never try to destroy a parser that was not successfully - ** created in the first place. */ - if( NEVER(pParser==0) ) return; - while( pParser->yyidx>=0 ) yy_pop_parser_stack(pParser); -#if YYSTACKDEPTH<=0 - free(pParser->yystack); -#endif - (*freeProc)((void*)pParser); -} - -/* -** Return the peak depth of the stack for a parser. -*/ -#ifdef YYTRACKMAXSTACKDEPTH -SQLITE_PRIVATE int sqlite3ParserStackPeak(void *p){ - yyParser *pParser = (yyParser*)p; - return pParser->yyidxMax; -} -#endif - -/* -** Find the appropriate action for a parser given the terminal -** look-ahead token iLookAhead. -** -** If the look-ahead token is YYNOCODE, then check to see if the action is -** independent of the look-ahead. If it is, return the action, otherwise -** return YY_NO_ACTION. -*/ -static int yy_find_shift_action( - yyParser *pParser, /* The parser */ - YYCODETYPE iLookAhead /* The look-ahead token */ -){ - int i; - int stateno = pParser->yystack[pParser->yyidx].stateno; - - if( stateno>YY_SHIFT_COUNT - || (i = yy_shift_ofst[stateno])==YY_SHIFT_USE_DFLT ){ - return yy_default[stateno]; - } - assert( iLookAhead!=YYNOCODE ); - i += iLookAhead; - if( i<0 || i>=YY_ACTTAB_COUNT || yy_lookahead[i]!=iLookAhead ){ - if( iLookAhead>0 ){ -#ifdef YYFALLBACK - YYCODETYPE iFallback; /* Fallback token */ - if( iLookAhead %s\n", - yyTracePrompt, yyTokenName[iLookAhead], yyTokenName[iFallback]); - } -#endif - return yy_find_shift_action(pParser, iFallback); - } -#endif -#ifdef YYWILDCARD - { - int j = i - iLookAhead + YYWILDCARD; - if( -#if YY_SHIFT_MIN+YYWILDCARD<0 - j>=0 && -#endif -#if YY_SHIFT_MAX+YYWILDCARD>=YY_ACTTAB_COUNT - j %s\n", - yyTracePrompt, yyTokenName[iLookAhead], yyTokenName[YYWILDCARD]); - } -#endif /* NDEBUG */ - return yy_action[j]; - } - } -#endif /* YYWILDCARD */ - } - return yy_default[stateno]; - }else{ - return yy_action[i]; - } -} - -/* -** Find the appropriate action for a parser given the non-terminal -** look-ahead token iLookAhead. -** -** If the look-ahead token is YYNOCODE, then check to see if the action is -** independent of the look-ahead. If it is, return the action, otherwise -** return YY_NO_ACTION. -*/ -static int yy_find_reduce_action( - int stateno, /* Current state number */ - YYCODETYPE iLookAhead /* The look-ahead token */ -){ - int i; -#ifdef YYERRORSYMBOL - if( stateno>YY_REDUCE_COUNT ){ - return yy_default[stateno]; - } -#else - assert( stateno<=YY_REDUCE_COUNT ); -#endif - i = yy_reduce_ofst[stateno]; - assert( i!=YY_REDUCE_USE_DFLT ); - assert( iLookAhead!=YYNOCODE ); - i += iLookAhead; -#ifdef YYERRORSYMBOL - if( i<0 || i>=YY_ACTTAB_COUNT || yy_lookahead[i]!=iLookAhead ){ - return yy_default[stateno]; - } -#else - assert( i>=0 && iyyidx--; -#ifndef NDEBUG - if( yyTraceFILE ){ - fprintf(yyTraceFILE,"%sStack Overflow!\n",yyTracePrompt); - } -#endif - while( yypParser->yyidx>=0 ) yy_pop_parser_stack(yypParser); - /* Here code is inserted which will execute if the parser - ** stack every overflows */ - - UNUSED_PARAMETER(yypMinor); /* Silence some compiler warnings */ - sqlite3ErrorMsg(pParse, "parser stack overflow"); - sqlite3ParserARG_STORE; /* Suppress warning about unused %extra_argument var */ -} - -/* -** Perform a shift action. -*/ -static void yy_shift( - yyParser *yypParser, /* The parser to be shifted */ - int yyNewState, /* The new state to shift in */ - int yyMajor, /* The major token to shift in */ - YYMINORTYPE *yypMinor /* Pointer to the minor token to shift in */ -){ - yyStackEntry *yytos; - yypParser->yyidx++; -#ifdef YYTRACKMAXSTACKDEPTH - if( yypParser->yyidx>yypParser->yyidxMax ){ - yypParser->yyidxMax = yypParser->yyidx; - } -#endif -#if YYSTACKDEPTH>0 - if( yypParser->yyidx>=YYSTACKDEPTH ){ - yyStackOverflow(yypParser, yypMinor); - return; - } -#else - if( yypParser->yyidx>=yypParser->yystksz ){ - yyGrowStack(yypParser); - if( yypParser->yyidx>=yypParser->yystksz ){ - yyStackOverflow(yypParser, yypMinor); - return; - } - } -#endif - yytos = &yypParser->yystack[yypParser->yyidx]; - yytos->stateno = (YYACTIONTYPE)yyNewState; - yytos->major = (YYCODETYPE)yyMajor; - yytos->minor = *yypMinor; -#ifndef NDEBUG - if( yyTraceFILE && yypParser->yyidx>0 ){ - int i; - fprintf(yyTraceFILE,"%sShift %d\n",yyTracePrompt,yyNewState); - fprintf(yyTraceFILE,"%sStack:",yyTracePrompt); - for(i=1; i<=yypParser->yyidx; i++) - fprintf(yyTraceFILE," %s",yyTokenName[yypParser->yystack[i].major]); - fprintf(yyTraceFILE,"\n"); - } -#endif -} - -/* The following table contains information about every rule that -** is used during the reduce. -*/ -static const struct { - YYCODETYPE lhs; /* Symbol on the left-hand side of the rule */ - unsigned char nrhs; /* Number of right-hand side symbols in the rule */ -} yyRuleInfo[] = { - { 144, 1 }, - { 145, 2 }, - { 145, 1 }, - { 146, 1 }, - { 146, 3 }, - { 147, 0 }, - { 147, 1 }, - { 147, 3 }, - { 148, 1 }, - { 149, 3 }, - { 151, 0 }, - { 151, 1 }, - { 151, 2 }, - { 150, 0 }, - { 150, 1 }, - { 150, 1 }, - { 150, 1 }, - { 149, 2 }, - { 149, 2 }, - { 149, 2 }, - { 153, 1 }, - { 153, 0 }, - { 149, 2 }, - { 149, 3 }, - { 149, 5 }, - { 149, 2 }, - { 154, 6 }, - { 156, 1 }, - { 158, 0 }, - { 158, 3 }, - { 157, 1 }, - { 157, 0 }, - { 155, 5 }, - { 155, 2 }, - { 162, 0 }, - { 162, 2 }, - { 160, 3 }, - { 160, 1 }, - { 164, 3 }, - { 165, 1 }, - { 152, 1 }, - { 152, 1 }, - { 152, 1 }, - { 166, 0 }, - { 166, 1 }, - { 168, 1 }, - { 168, 4 }, - { 168, 6 }, - { 169, 1 }, - { 169, 2 }, - { 170, 1 }, - { 170, 1 }, - { 167, 2 }, - { 167, 0 }, - { 173, 2 }, - { 173, 2 }, - { 173, 4 }, - { 173, 3 }, - { 173, 3 }, - { 173, 2 }, - { 173, 2 }, - { 173, 3 }, - { 173, 5 }, - { 173, 2 }, - { 173, 4 }, - { 173, 4 }, - { 173, 1 }, - { 173, 2 }, - { 178, 0 }, - { 178, 1 }, - { 180, 0 }, - { 180, 2 }, - { 182, 2 }, - { 182, 3 }, - { 182, 3 }, - { 182, 3 }, - { 183, 2 }, - { 183, 2 }, - { 183, 1 }, - { 183, 1 }, - { 183, 2 }, - { 181, 3 }, - { 181, 2 }, - { 184, 0 }, - { 184, 2 }, - { 184, 2 }, - { 161, 0 }, - { 161, 2 }, - { 185, 3 }, - { 185, 1 }, - { 186, 1 }, - { 186, 0 }, - { 187, 2 }, - { 187, 7 }, - { 187, 5 }, - { 187, 5 }, - { 187, 10 }, - { 189, 0 }, - { 189, 1 }, - { 176, 0 }, - { 176, 3 }, - { 190, 0 }, - { 190, 2 }, - { 191, 1 }, - { 191, 1 }, - { 191, 1 }, - { 149, 4 }, - { 193, 2 }, - { 193, 0 }, - { 149, 8 }, - { 149, 4 }, - { 149, 1 }, - { 163, 2 }, - { 195, 1 }, - { 195, 3 }, - { 198, 1 }, - { 198, 2 }, - { 198, 1 }, - { 196, 9 }, - { 196, 1 }, - { 207, 4 }, - { 207, 5 }, - { 199, 1 }, - { 199, 1 }, - { 199, 0 }, - { 210, 2 }, - { 210, 0 }, - { 200, 3 }, - { 200, 2 }, - { 200, 4 }, - { 211, 2 }, - { 211, 1 }, - { 211, 0 }, - { 201, 0 }, - { 201, 2 }, - { 213, 2 }, - { 213, 0 }, - { 212, 7 }, - { 212, 7 }, - { 212, 7 }, - { 159, 0 }, - { 159, 2 }, - { 194, 2 }, - { 214, 1 }, - { 214, 2 }, - { 214, 3 }, - { 214, 4 }, - { 216, 2 }, - { 216, 0 }, - { 215, 0 }, - { 215, 3 }, - { 215, 2 }, - { 217, 4 }, - { 217, 0 }, - { 205, 0 }, - { 205, 3 }, - { 220, 4 }, - { 220, 2 }, - { 177, 1 }, - { 177, 1 }, - { 177, 0 }, - { 203, 0 }, - { 203, 3 }, - { 204, 0 }, - { 204, 2 }, - { 206, 0 }, - { 206, 2 }, - { 206, 4 }, - { 206, 4 }, - { 149, 6 }, - { 202, 0 }, - { 202, 2 }, - { 149, 8 }, - { 221, 5 }, - { 221, 3 }, - { 149, 6 }, - { 149, 7 }, - { 222, 2 }, - { 222, 1 }, - { 223, 0 }, - { 223, 3 }, - { 219, 3 }, - { 219, 1 }, - { 175, 1 }, - { 175, 3 }, - { 174, 1 }, - { 175, 1 }, - { 175, 1 }, - { 175, 3 }, - { 175, 5 }, - { 174, 1 }, - { 174, 1 }, - { 175, 1 }, - { 175, 3 }, - { 175, 6 }, - { 175, 5 }, - { 175, 4 }, - { 174, 1 }, - { 175, 3 }, - { 175, 3 }, - { 175, 3 }, - { 175, 3 }, - { 175, 3 }, - { 175, 3 }, - { 175, 3 }, - { 175, 3 }, - { 224, 1 }, - { 224, 2 }, - { 175, 3 }, - { 175, 5 }, - { 175, 2 }, - { 175, 3 }, - { 175, 3 }, - { 175, 4 }, - { 175, 2 }, - { 175, 2 }, - { 175, 2 }, - { 175, 2 }, - { 225, 1 }, - { 225, 2 }, - { 175, 5 }, - { 226, 1 }, - { 226, 2 }, - { 175, 5 }, - { 175, 3 }, - { 175, 5 }, - { 175, 4 }, - { 175, 4 }, - { 175, 5 }, - { 228, 5 }, - { 228, 4 }, - { 229, 2 }, - { 229, 0 }, - { 227, 1 }, - { 227, 0 }, - { 209, 1 }, - { 209, 0 }, - { 208, 3 }, - { 208, 1 }, - { 149, 12 }, - { 230, 1 }, - { 230, 0 }, - { 179, 0 }, - { 179, 3 }, - { 188, 5 }, - { 188, 3 }, - { 231, 0 }, - { 231, 2 }, - { 149, 4 }, - { 149, 1 }, - { 149, 2 }, - { 149, 3 }, - { 149, 5 }, - { 149, 6 }, - { 149, 5 }, - { 149, 6 }, - { 232, 1 }, - { 232, 1 }, - { 232, 1 }, - { 232, 1 }, - { 232, 1 }, - { 171, 2 }, - { 171, 1 }, - { 172, 2 }, - { 149, 5 }, - { 233, 11 }, - { 235, 1 }, - { 235, 1 }, - { 235, 2 }, - { 235, 0 }, - { 236, 1 }, - { 236, 1 }, - { 236, 3 }, - { 237, 0 }, - { 237, 3 }, - { 238, 0 }, - { 238, 2 }, - { 234, 3 }, - { 234, 2 }, - { 240, 1 }, - { 240, 3 }, - { 241, 0 }, - { 241, 3 }, - { 241, 2 }, - { 239, 7 }, - { 239, 5 }, - { 239, 5 }, - { 239, 1 }, - { 175, 4 }, - { 175, 6 }, - { 192, 1 }, - { 192, 1 }, - { 192, 1 }, - { 149, 4 }, - { 149, 6 }, - { 149, 3 }, - { 243, 0 }, - { 243, 2 }, - { 242, 1 }, - { 242, 0 }, - { 149, 1 }, - { 149, 3 }, - { 149, 1 }, - { 149, 3 }, - { 149, 6 }, - { 149, 6 }, - { 244, 1 }, - { 245, 0 }, - { 245, 1 }, - { 149, 1 }, - { 149, 4 }, - { 246, 8 }, - { 247, 1 }, - { 247, 3 }, - { 248, 0 }, - { 248, 2 }, - { 249, 1 }, - { 249, 3 }, - { 250, 1 }, - { 251, 0 }, - { 251, 4 }, - { 251, 2 }, - { 197, 0 }, - { 197, 2 }, - { 197, 3 }, - { 252, 6 }, - { 252, 8 }, -}; - -static void yy_accept(yyParser*); /* Forward Declaration */ - -/* -** Perform a reduce action and the shift that must immediately -** follow the reduce. -*/ -static void yy_reduce( - yyParser *yypParser, /* The parser */ - int yyruleno /* Number of the rule by which to reduce */ -){ - int yygoto; /* The next state */ - int yyact; /* The next action */ - YYMINORTYPE yygotominor; /* The LHS of the rule reduced */ - yyStackEntry *yymsp; /* The top of the parser's stack */ - int yysize; /* Amount to pop the stack */ - sqlite3ParserARG_FETCH; - yymsp = &yypParser->yystack[yypParser->yyidx]; -#ifndef NDEBUG - if( yyTraceFILE && yyruleno>=0 - && yyruleno<(int)(sizeof(yyRuleName)/sizeof(yyRuleName[0])) ){ - fprintf(yyTraceFILE, "%sReduce [%s].\n", yyTracePrompt, - yyRuleName[yyruleno]); - } -#endif /* NDEBUG */ - - /* Silence complaints from purify about yygotominor being uninitialized - ** in some cases when it is copied into the stack after the following - ** switch. yygotominor is uninitialized when a rule reduces that does - ** not set the value of its left-hand side nonterminal. Leaving the - ** value of the nonterminal uninitialized is utterly harmless as long - ** as the value is never used. So really the only thing this code - ** accomplishes is to quieten purify. - ** - ** 2007-01-16: The wireshark project (www.wireshark.org) reports that - ** without this code, their parser segfaults. I'm not sure what there - ** parser is doing to make this happen. This is the second bug report - ** from wireshark this week. Clearly they are stressing Lemon in ways - ** that it has not been previously stressed... (SQLite ticket #2172) - */ - /*memset(&yygotominor, 0, sizeof(yygotominor));*/ - yygotominor = yyzerominor; - - - switch( yyruleno ){ - /* Beginning here are the reduction cases. A typical example - ** follows: - ** case 0: - ** #line - ** { ... } // User supplied code - ** #line - ** break; - */ - case 5: /* explain ::= */ -{ sqlite3BeginParse(pParse, 0); } - break; - case 6: /* explain ::= EXPLAIN */ -{ sqlite3BeginParse(pParse, 1); } - break; - case 7: /* explain ::= EXPLAIN QUERY PLAN */ -{ sqlite3BeginParse(pParse, 2); } - break; - case 8: /* cmdx ::= cmd */ -{ sqlite3FinishCoding(pParse); } - break; - case 9: /* cmd ::= BEGIN transtype trans_opt */ -{sqlite3BeginTransaction(pParse, yymsp[-1].minor.yy328);} - break; - case 13: /* transtype ::= */ -{yygotominor.yy328 = TK_DEFERRED;} - break; - case 14: /* transtype ::= DEFERRED */ - case 15: /* transtype ::= IMMEDIATE */ yytestcase(yyruleno==15); - case 16: /* transtype ::= EXCLUSIVE */ yytestcase(yyruleno==16); - case 115: /* multiselect_op ::= UNION */ yytestcase(yyruleno==115); - case 117: /* multiselect_op ::= EXCEPT|INTERSECT */ yytestcase(yyruleno==117); -{yygotominor.yy328 = yymsp[0].major;} - break; - case 17: /* cmd ::= COMMIT trans_opt */ - case 18: /* cmd ::= END trans_opt */ yytestcase(yyruleno==18); -{sqlite3CommitTransaction(pParse);} - break; - case 19: /* cmd ::= ROLLBACK trans_opt */ -{sqlite3RollbackTransaction(pParse);} - break; - case 22: /* cmd ::= SAVEPOINT nm */ -{ - sqlite3Savepoint(pParse, SAVEPOINT_BEGIN, &yymsp[0].minor.yy0); -} - break; - case 23: /* cmd ::= RELEASE savepoint_opt nm */ -{ - sqlite3Savepoint(pParse, SAVEPOINT_RELEASE, &yymsp[0].minor.yy0); -} - break; - case 24: /* cmd ::= ROLLBACK trans_opt TO savepoint_opt nm */ -{ - sqlite3Savepoint(pParse, SAVEPOINT_ROLLBACK, &yymsp[0].minor.yy0); -} - break; - case 26: /* create_table ::= createkw temp TABLE ifnotexists nm dbnm */ -{ - sqlite3StartTable(pParse,&yymsp[-1].minor.yy0,&yymsp[0].minor.yy0,yymsp[-4].minor.yy328,0,0,yymsp[-2].minor.yy328); -} - break; - case 27: /* createkw ::= CREATE */ -{ - pParse->db->lookaside.bEnabled = 0; - yygotominor.yy0 = yymsp[0].minor.yy0; -} - break; - case 28: /* ifnotexists ::= */ - case 31: /* temp ::= */ yytestcase(yyruleno==31); - case 68: /* autoinc ::= */ yytestcase(yyruleno==68); - case 81: /* defer_subclause ::= NOT DEFERRABLE init_deferred_pred_opt */ yytestcase(yyruleno==81); - case 83: /* init_deferred_pred_opt ::= */ yytestcase(yyruleno==83); - case 85: /* init_deferred_pred_opt ::= INITIALLY IMMEDIATE */ yytestcase(yyruleno==85); - case 97: /* defer_subclause_opt ::= */ yytestcase(yyruleno==97); - case 108: /* ifexists ::= */ yytestcase(yyruleno==108); - case 218: /* between_op ::= BETWEEN */ yytestcase(yyruleno==218); - case 221: /* in_op ::= IN */ yytestcase(yyruleno==221); -{yygotominor.yy328 = 0;} - break; - case 29: /* ifnotexists ::= IF NOT EXISTS */ - case 30: /* temp ::= TEMP */ yytestcase(yyruleno==30); - case 69: /* autoinc ::= AUTOINCR */ yytestcase(yyruleno==69); - case 84: /* init_deferred_pred_opt ::= INITIALLY DEFERRED */ yytestcase(yyruleno==84); - case 107: /* ifexists ::= IF EXISTS */ yytestcase(yyruleno==107); - case 219: /* between_op ::= NOT BETWEEN */ yytestcase(yyruleno==219); - case 222: /* in_op ::= NOT IN */ yytestcase(yyruleno==222); -{yygotominor.yy328 = 1;} - break; - case 32: /* create_table_args ::= LP columnlist conslist_opt RP table_options */ -{ - sqlite3EndTable(pParse,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0,yymsp[0].minor.yy186,0); -} - break; - case 33: /* create_table_args ::= AS select */ -{ - sqlite3EndTable(pParse,0,0,0,yymsp[0].minor.yy3); - sqlite3SelectDelete(pParse->db, yymsp[0].minor.yy3); -} - break; - case 34: /* table_options ::= */ -{yygotominor.yy186 = 0;} - break; - case 35: /* table_options ::= WITHOUT nm */ -{ - if( yymsp[0].minor.yy0.n==5 && sqlite3_strnicmp(yymsp[0].minor.yy0.z,"rowid",5)==0 ){ - yygotominor.yy186 = TF_WithoutRowid; - }else{ - yygotominor.yy186 = 0; - sqlite3ErrorMsg(pParse, "unknown table option: %.*s", yymsp[0].minor.yy0.n, yymsp[0].minor.yy0.z); - } -} - break; - case 38: /* column ::= columnid type carglist */ -{ - yygotominor.yy0.z = yymsp[-2].minor.yy0.z; - yygotominor.yy0.n = (int)(pParse->sLastToken.z-yymsp[-2].minor.yy0.z) + pParse->sLastToken.n; -} - break; - case 39: /* columnid ::= nm */ -{ - sqlite3AddColumn(pParse,&yymsp[0].minor.yy0); - yygotominor.yy0 = yymsp[0].minor.yy0; - pParse->constraintName.n = 0; -} - break; - case 40: /* nm ::= ID|INDEXED */ - case 41: /* nm ::= STRING */ yytestcase(yyruleno==41); - case 42: /* nm ::= JOIN_KW */ yytestcase(yyruleno==42); - case 45: /* typetoken ::= typename */ yytestcase(yyruleno==45); - case 48: /* typename ::= ID|STRING */ yytestcase(yyruleno==48); - case 130: /* as ::= AS nm */ yytestcase(yyruleno==130); - case 131: /* as ::= ID|STRING */ yytestcase(yyruleno==131); - case 141: /* dbnm ::= DOT nm */ yytestcase(yyruleno==141); - case 150: /* indexed_opt ::= INDEXED BY nm */ yytestcase(yyruleno==150); - case 247: /* collate ::= COLLATE ID|STRING */ yytestcase(yyruleno==247); - case 256: /* nmnum ::= plus_num */ yytestcase(yyruleno==256); - case 257: /* nmnum ::= nm */ yytestcase(yyruleno==257); - case 258: /* nmnum ::= ON */ yytestcase(yyruleno==258); - case 259: /* nmnum ::= DELETE */ yytestcase(yyruleno==259); - case 260: /* nmnum ::= DEFAULT */ yytestcase(yyruleno==260); - case 261: /* plus_num ::= PLUS INTEGER|FLOAT */ yytestcase(yyruleno==261); - case 262: /* plus_num ::= INTEGER|FLOAT */ yytestcase(yyruleno==262); - case 263: /* minus_num ::= MINUS INTEGER|FLOAT */ yytestcase(yyruleno==263); - case 279: /* trnm ::= nm */ yytestcase(yyruleno==279); -{yygotominor.yy0 = yymsp[0].minor.yy0;} - break; - case 44: /* type ::= typetoken */ -{sqlite3AddColumnType(pParse,&yymsp[0].minor.yy0);} - break; - case 46: /* typetoken ::= typename LP signed RP */ -{ - yygotominor.yy0.z = yymsp[-3].minor.yy0.z; - yygotominor.yy0.n = (int)(&yymsp[0].minor.yy0.z[yymsp[0].minor.yy0.n] - yymsp[-3].minor.yy0.z); -} - break; - case 47: /* typetoken ::= typename LP signed COMMA signed RP */ -{ - yygotominor.yy0.z = yymsp[-5].minor.yy0.z; - yygotominor.yy0.n = (int)(&yymsp[0].minor.yy0.z[yymsp[0].minor.yy0.n] - yymsp[-5].minor.yy0.z); -} - break; - case 49: /* typename ::= typename ID|STRING */ -{yygotominor.yy0.z=yymsp[-1].minor.yy0.z; yygotominor.yy0.n=yymsp[0].minor.yy0.n+(int)(yymsp[0].minor.yy0.z-yymsp[-1].minor.yy0.z);} - break; - case 54: /* ccons ::= CONSTRAINT nm */ - case 92: /* tcons ::= CONSTRAINT nm */ yytestcase(yyruleno==92); -{pParse->constraintName = yymsp[0].minor.yy0;} - break; - case 55: /* ccons ::= DEFAULT term */ - case 57: /* ccons ::= DEFAULT PLUS term */ yytestcase(yyruleno==57); -{sqlite3AddDefaultValue(pParse,&yymsp[0].minor.yy346);} - break; - case 56: /* ccons ::= DEFAULT LP expr RP */ -{sqlite3AddDefaultValue(pParse,&yymsp[-1].minor.yy346);} - break; - case 58: /* ccons ::= DEFAULT MINUS term */ -{ - ExprSpan v; - v.pExpr = sqlite3PExpr(pParse, TK_UMINUS, yymsp[0].minor.yy346.pExpr, 0, 0); - v.zStart = yymsp[-1].minor.yy0.z; - v.zEnd = yymsp[0].minor.yy346.zEnd; - sqlite3AddDefaultValue(pParse,&v); -} - break; - case 59: /* ccons ::= DEFAULT ID|INDEXED */ -{ - ExprSpan v; - spanExpr(&v, pParse, TK_STRING, &yymsp[0].minor.yy0); - sqlite3AddDefaultValue(pParse,&v); -} - break; - case 61: /* ccons ::= NOT NULL onconf */ -{sqlite3AddNotNull(pParse, yymsp[0].minor.yy328);} - break; - case 62: /* ccons ::= PRIMARY KEY sortorder onconf autoinc */ -{sqlite3AddPrimaryKey(pParse,0,yymsp[-1].minor.yy328,yymsp[0].minor.yy328,yymsp[-2].minor.yy328);} - break; - case 63: /* ccons ::= UNIQUE onconf */ -{sqlite3CreateIndex(pParse,0,0,0,0,yymsp[0].minor.yy328,0,0,0,0);} - break; - case 64: /* ccons ::= CHECK LP expr RP */ -{sqlite3AddCheckConstraint(pParse,yymsp[-1].minor.yy346.pExpr);} - break; - case 65: /* ccons ::= REFERENCES nm idxlist_opt refargs */ -{sqlite3CreateForeignKey(pParse,0,&yymsp[-2].minor.yy0,yymsp[-1].minor.yy14,yymsp[0].minor.yy328);} - break; - case 66: /* ccons ::= defer_subclause */ -{sqlite3DeferForeignKey(pParse,yymsp[0].minor.yy328);} - break; - case 67: /* ccons ::= COLLATE ID|STRING */ -{sqlite3AddCollateType(pParse, &yymsp[0].minor.yy0);} - break; - case 70: /* refargs ::= */ -{ yygotominor.yy328 = OE_None*0x0101; /* EV: R-19803-45884 */} - break; - case 71: /* refargs ::= refargs refarg */ -{ yygotominor.yy328 = (yymsp[-1].minor.yy328 & ~yymsp[0].minor.yy429.mask) | yymsp[0].minor.yy429.value; } - break; - case 72: /* refarg ::= MATCH nm */ - case 73: /* refarg ::= ON INSERT refact */ yytestcase(yyruleno==73); -{ yygotominor.yy429.value = 0; yygotominor.yy429.mask = 0x000000; } - break; - case 74: /* refarg ::= ON DELETE refact */ -{ yygotominor.yy429.value = yymsp[0].minor.yy328; yygotominor.yy429.mask = 0x0000ff; } - break; - case 75: /* refarg ::= ON UPDATE refact */ -{ yygotominor.yy429.value = yymsp[0].minor.yy328<<8; yygotominor.yy429.mask = 0x00ff00; } - break; - case 76: /* refact ::= SET NULL */ -{ yygotominor.yy328 = OE_SetNull; /* EV: R-33326-45252 */} - break; - case 77: /* refact ::= SET DEFAULT */ -{ yygotominor.yy328 = OE_SetDflt; /* EV: R-33326-45252 */} - break; - case 78: /* refact ::= CASCADE */ -{ yygotominor.yy328 = OE_Cascade; /* EV: R-33326-45252 */} - break; - case 79: /* refact ::= RESTRICT */ -{ yygotominor.yy328 = OE_Restrict; /* EV: R-33326-45252 */} - break; - case 80: /* refact ::= NO ACTION */ -{ yygotominor.yy328 = OE_None; /* EV: R-33326-45252 */} - break; - case 82: /* defer_subclause ::= DEFERRABLE init_deferred_pred_opt */ - case 98: /* defer_subclause_opt ::= defer_subclause */ yytestcase(yyruleno==98); - case 100: /* onconf ::= ON CONFLICT resolvetype */ yytestcase(yyruleno==100); - case 103: /* resolvetype ::= raisetype */ yytestcase(yyruleno==103); -{yygotominor.yy328 = yymsp[0].minor.yy328;} - break; - case 86: /* conslist_opt ::= */ -{yygotominor.yy0.n = 0; yygotominor.yy0.z = 0;} - break; - case 87: /* conslist_opt ::= COMMA conslist */ -{yygotominor.yy0 = yymsp[-1].minor.yy0;} - break; - case 90: /* tconscomma ::= COMMA */ -{pParse->constraintName.n = 0;} - break; - case 93: /* tcons ::= PRIMARY KEY LP idxlist autoinc RP onconf */ -{sqlite3AddPrimaryKey(pParse,yymsp[-3].minor.yy14,yymsp[0].minor.yy328,yymsp[-2].minor.yy328,0);} - break; - case 94: /* tcons ::= UNIQUE LP idxlist RP onconf */ -{sqlite3CreateIndex(pParse,0,0,0,yymsp[-2].minor.yy14,yymsp[0].minor.yy328,0,0,0,0);} - break; - case 95: /* tcons ::= CHECK LP expr RP onconf */ -{sqlite3AddCheckConstraint(pParse,yymsp[-2].minor.yy346.pExpr);} - break; - case 96: /* tcons ::= FOREIGN KEY LP idxlist RP REFERENCES nm idxlist_opt refargs defer_subclause_opt */ -{ - sqlite3CreateForeignKey(pParse, yymsp[-6].minor.yy14, &yymsp[-3].minor.yy0, yymsp[-2].minor.yy14, yymsp[-1].minor.yy328); - sqlite3DeferForeignKey(pParse, yymsp[0].minor.yy328); -} - break; - case 99: /* onconf ::= */ -{yygotominor.yy328 = OE_Default;} - break; - case 101: /* orconf ::= */ -{yygotominor.yy186 = OE_Default;} - break; - case 102: /* orconf ::= OR resolvetype */ -{yygotominor.yy186 = (u8)yymsp[0].minor.yy328;} - break; - case 104: /* resolvetype ::= IGNORE */ -{yygotominor.yy328 = OE_Ignore;} - break; - case 105: /* resolvetype ::= REPLACE */ -{yygotominor.yy328 = OE_Replace;} - break; - case 106: /* cmd ::= DROP TABLE ifexists fullname */ -{ - sqlite3DropTable(pParse, yymsp[0].minor.yy65, 0, yymsp[-1].minor.yy328); -} - break; - case 109: /* cmd ::= createkw temp VIEW ifnotexists nm dbnm AS select */ -{ - sqlite3CreateView(pParse, &yymsp[-7].minor.yy0, &yymsp[-3].minor.yy0, &yymsp[-2].minor.yy0, yymsp[0].minor.yy3, yymsp[-6].minor.yy328, yymsp[-4].minor.yy328); -} - break; - case 110: /* cmd ::= DROP VIEW ifexists fullname */ -{ - sqlite3DropTable(pParse, yymsp[0].minor.yy65, 1, yymsp[-1].minor.yy328); -} - break; - case 111: /* cmd ::= select */ -{ - SelectDest dest = {SRT_Output, 0, 0, 0, 0, 0}; - sqlite3Select(pParse, yymsp[0].minor.yy3, &dest); - sqlite3ExplainBegin(pParse->pVdbe); - sqlite3ExplainSelect(pParse->pVdbe, yymsp[0].minor.yy3); - sqlite3ExplainFinish(pParse->pVdbe); - sqlite3SelectDelete(pParse->db, yymsp[0].minor.yy3); -} - break; - case 112: /* select ::= with selectnowith */ -{ - Select *p = yymsp[0].minor.yy3, *pNext, *pLoop; - if( p ){ - int cnt = 0, mxSelect; - p->pWith = yymsp[-1].minor.yy59; - if( p->pPrior ){ - pNext = 0; - for(pLoop=p; pLoop; pNext=pLoop, pLoop=pLoop->pPrior, cnt++){ - pLoop->pNext = pNext; - pLoop->selFlags |= SF_Compound; - } - mxSelect = pParse->db->aLimit[SQLITE_LIMIT_COMPOUND_SELECT]; - if( mxSelect && cnt>mxSelect ){ - sqlite3ErrorMsg(pParse, "too many terms in compound SELECT"); - } - } - }else{ - sqlite3WithDelete(pParse->db, yymsp[-1].minor.yy59); - } - yygotominor.yy3 = p; -} - break; - case 113: /* selectnowith ::= oneselect */ - case 119: /* oneselect ::= values */ yytestcase(yyruleno==119); -{yygotominor.yy3 = yymsp[0].minor.yy3;} - break; - case 114: /* selectnowith ::= selectnowith multiselect_op oneselect */ -{ - Select *pRhs = yymsp[0].minor.yy3; - if( pRhs && pRhs->pPrior ){ - SrcList *pFrom; - Token x; - x.n = 0; - pFrom = sqlite3SrcListAppendFromTerm(pParse,0,0,0,&x,pRhs,0,0); - pRhs = sqlite3SelectNew(pParse,0,pFrom,0,0,0,0,0,0,0); - } - if( pRhs ){ - pRhs->op = (u8)yymsp[-1].minor.yy328; - pRhs->pPrior = yymsp[-2].minor.yy3; - if( yymsp[-1].minor.yy328!=TK_ALL ) pParse->hasCompound = 1; - }else{ - sqlite3SelectDelete(pParse->db, yymsp[-2].minor.yy3); - } - yygotominor.yy3 = pRhs; -} - break; - case 116: /* multiselect_op ::= UNION ALL */ -{yygotominor.yy328 = TK_ALL;} - break; - case 118: /* oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt orderby_opt limit_opt */ -{ - yygotominor.yy3 = sqlite3SelectNew(pParse,yymsp[-6].minor.yy14,yymsp[-5].minor.yy65,yymsp[-4].minor.yy132,yymsp[-3].minor.yy14,yymsp[-2].minor.yy132,yymsp[-1].minor.yy14,yymsp[-7].minor.yy381,yymsp[0].minor.yy476.pLimit,yymsp[0].minor.yy476.pOffset); -} - break; - case 120: /* values ::= VALUES LP nexprlist RP */ -{ - yygotominor.yy3 = sqlite3SelectNew(pParse,yymsp[-1].minor.yy14,0,0,0,0,0,SF_Values,0,0); -} - break; - case 121: /* values ::= values COMMA LP exprlist RP */ -{ - Select *pRight = sqlite3SelectNew(pParse,yymsp[-1].minor.yy14,0,0,0,0,0,SF_Values,0,0); - if( pRight ){ - pRight->op = TK_ALL; - pRight->pPrior = yymsp[-4].minor.yy3; - yygotominor.yy3 = pRight; - }else{ - yygotominor.yy3 = yymsp[-4].minor.yy3; - } -} - break; - case 122: /* distinct ::= DISTINCT */ -{yygotominor.yy381 = SF_Distinct;} - break; - case 123: /* distinct ::= ALL */ - case 124: /* distinct ::= */ yytestcase(yyruleno==124); -{yygotominor.yy381 = 0;} - break; - case 125: /* sclp ::= selcollist COMMA */ - case 243: /* idxlist_opt ::= LP idxlist RP */ yytestcase(yyruleno==243); -{yygotominor.yy14 = yymsp[-1].minor.yy14;} - break; - case 126: /* sclp ::= */ - case 154: /* orderby_opt ::= */ yytestcase(yyruleno==154); - case 161: /* groupby_opt ::= */ yytestcase(yyruleno==161); - case 236: /* exprlist ::= */ yytestcase(yyruleno==236); - case 242: /* idxlist_opt ::= */ yytestcase(yyruleno==242); -{yygotominor.yy14 = 0;} - break; - case 127: /* selcollist ::= sclp expr as */ -{ - yygotominor.yy14 = sqlite3ExprListAppend(pParse, yymsp[-2].minor.yy14, yymsp[-1].minor.yy346.pExpr); - if( yymsp[0].minor.yy0.n>0 ) sqlite3ExprListSetName(pParse, yygotominor.yy14, &yymsp[0].minor.yy0, 1); - sqlite3ExprListSetSpan(pParse,yygotominor.yy14,&yymsp[-1].minor.yy346); -} - break; - case 128: /* selcollist ::= sclp STAR */ -{ - Expr *p = sqlite3Expr(pParse->db, TK_ALL, 0); - yygotominor.yy14 = sqlite3ExprListAppend(pParse, yymsp[-1].minor.yy14, p); -} - break; - case 129: /* selcollist ::= sclp nm DOT STAR */ -{ - Expr *pRight = sqlite3PExpr(pParse, TK_ALL, 0, 0, &yymsp[0].minor.yy0); - Expr *pLeft = sqlite3PExpr(pParse, TK_ID, 0, 0, &yymsp[-2].minor.yy0); - Expr *pDot = sqlite3PExpr(pParse, TK_DOT, pLeft, pRight, 0); - yygotominor.yy14 = sqlite3ExprListAppend(pParse,yymsp[-3].minor.yy14, pDot); -} - break; - case 132: /* as ::= */ -{yygotominor.yy0.n = 0;} - break; - case 133: /* from ::= */ -{yygotominor.yy65 = sqlite3DbMallocZero(pParse->db, sizeof(*yygotominor.yy65));} - break; - case 134: /* from ::= FROM seltablist */ -{ - yygotominor.yy65 = yymsp[0].minor.yy65; - sqlite3SrcListShiftJoinType(yygotominor.yy65); -} - break; - case 135: /* stl_prefix ::= seltablist joinop */ -{ - yygotominor.yy65 = yymsp[-1].minor.yy65; - if( ALWAYS(yygotominor.yy65 && yygotominor.yy65->nSrc>0) ) yygotominor.yy65->a[yygotominor.yy65->nSrc-1].jointype = (u8)yymsp[0].minor.yy328; -} - break; - case 136: /* stl_prefix ::= */ -{yygotominor.yy65 = 0;} - break; - case 137: /* seltablist ::= stl_prefix nm dbnm as indexed_opt on_opt using_opt */ -{ - yygotominor.yy65 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-6].minor.yy65,&yymsp[-5].minor.yy0,&yymsp[-4].minor.yy0,&yymsp[-3].minor.yy0,0,yymsp[-1].minor.yy132,yymsp[0].minor.yy408); - sqlite3SrcListIndexedBy(pParse, yygotominor.yy65, &yymsp[-2].minor.yy0); -} - break; - case 138: /* seltablist ::= stl_prefix LP select RP as on_opt using_opt */ -{ - yygotominor.yy65 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-6].minor.yy65,0,0,&yymsp[-2].minor.yy0,yymsp[-4].minor.yy3,yymsp[-1].minor.yy132,yymsp[0].minor.yy408); - } - break; - case 139: /* seltablist ::= stl_prefix LP seltablist RP as on_opt using_opt */ -{ - if( yymsp[-6].minor.yy65==0 && yymsp[-2].minor.yy0.n==0 && yymsp[-1].minor.yy132==0 && yymsp[0].minor.yy408==0 ){ - yygotominor.yy65 = yymsp[-4].minor.yy65; - }else if( yymsp[-4].minor.yy65->nSrc==1 ){ - yygotominor.yy65 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-6].minor.yy65,0,0,&yymsp[-2].minor.yy0,0,yymsp[-1].minor.yy132,yymsp[0].minor.yy408); - if( yygotominor.yy65 ){ - struct SrcList_item *pNew = &yygotominor.yy65->a[yygotominor.yy65->nSrc-1]; - struct SrcList_item *pOld = yymsp[-4].minor.yy65->a; - pNew->zName = pOld->zName; - pNew->zDatabase = pOld->zDatabase; - pNew->pSelect = pOld->pSelect; - pOld->zName = pOld->zDatabase = 0; - pOld->pSelect = 0; - } - sqlite3SrcListDelete(pParse->db, yymsp[-4].minor.yy65); - }else{ - Select *pSubquery; - sqlite3SrcListShiftJoinType(yymsp[-4].minor.yy65); - pSubquery = sqlite3SelectNew(pParse,0,yymsp[-4].minor.yy65,0,0,0,0,SF_NestedFrom,0,0); - yygotominor.yy65 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-6].minor.yy65,0,0,&yymsp[-2].minor.yy0,pSubquery,yymsp[-1].minor.yy132,yymsp[0].minor.yy408); - } - } - break; - case 140: /* dbnm ::= */ - case 149: /* indexed_opt ::= */ yytestcase(yyruleno==149); -{yygotominor.yy0.z=0; yygotominor.yy0.n=0;} - break; - case 142: /* fullname ::= nm dbnm */ -{yygotominor.yy65 = sqlite3SrcListAppend(pParse->db,0,&yymsp[-1].minor.yy0,&yymsp[0].minor.yy0);} - break; - case 143: /* joinop ::= COMMA|JOIN */ -{ yygotominor.yy328 = JT_INNER; } - break; - case 144: /* joinop ::= JOIN_KW JOIN */ -{ yygotominor.yy328 = sqlite3JoinType(pParse,&yymsp[-1].minor.yy0,0,0); } - break; - case 145: /* joinop ::= JOIN_KW nm JOIN */ -{ yygotominor.yy328 = sqlite3JoinType(pParse,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0,0); } - break; - case 146: /* joinop ::= JOIN_KW nm nm JOIN */ -{ yygotominor.yy328 = sqlite3JoinType(pParse,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0); } - break; - case 147: /* on_opt ::= ON expr */ - case 164: /* having_opt ::= HAVING expr */ yytestcase(yyruleno==164); - case 171: /* where_opt ::= WHERE expr */ yytestcase(yyruleno==171); - case 231: /* case_else ::= ELSE expr */ yytestcase(yyruleno==231); - case 233: /* case_operand ::= expr */ yytestcase(yyruleno==233); -{yygotominor.yy132 = yymsp[0].minor.yy346.pExpr;} - break; - case 148: /* on_opt ::= */ - case 163: /* having_opt ::= */ yytestcase(yyruleno==163); - case 170: /* where_opt ::= */ yytestcase(yyruleno==170); - case 232: /* case_else ::= */ yytestcase(yyruleno==232); - case 234: /* case_operand ::= */ yytestcase(yyruleno==234); -{yygotominor.yy132 = 0;} - break; - case 151: /* indexed_opt ::= NOT INDEXED */ -{yygotominor.yy0.z=0; yygotominor.yy0.n=1;} - break; - case 152: /* using_opt ::= USING LP idlist RP */ - case 180: /* inscollist_opt ::= LP idlist RP */ yytestcase(yyruleno==180); -{yygotominor.yy408 = yymsp[-1].minor.yy408;} - break; - case 153: /* using_opt ::= */ - case 179: /* inscollist_opt ::= */ yytestcase(yyruleno==179); -{yygotominor.yy408 = 0;} - break; - case 155: /* orderby_opt ::= ORDER BY sortlist */ - case 162: /* groupby_opt ::= GROUP BY nexprlist */ yytestcase(yyruleno==162); - case 235: /* exprlist ::= nexprlist */ yytestcase(yyruleno==235); -{yygotominor.yy14 = yymsp[0].minor.yy14;} - break; - case 156: /* sortlist ::= sortlist COMMA expr sortorder */ -{ - yygotominor.yy14 = sqlite3ExprListAppend(pParse,yymsp[-3].minor.yy14,yymsp[-1].minor.yy346.pExpr); - if( yygotominor.yy14 ) yygotominor.yy14->a[yygotominor.yy14->nExpr-1].sortOrder = (u8)yymsp[0].minor.yy328; -} - break; - case 157: /* sortlist ::= expr sortorder */ -{ - yygotominor.yy14 = sqlite3ExprListAppend(pParse,0,yymsp[-1].minor.yy346.pExpr); - if( yygotominor.yy14 && ALWAYS(yygotominor.yy14->a) ) yygotominor.yy14->a[0].sortOrder = (u8)yymsp[0].minor.yy328; -} - break; - case 158: /* sortorder ::= ASC */ - case 160: /* sortorder ::= */ yytestcase(yyruleno==160); -{yygotominor.yy328 = SQLITE_SO_ASC;} - break; - case 159: /* sortorder ::= DESC */ -{yygotominor.yy328 = SQLITE_SO_DESC;} - break; - case 165: /* limit_opt ::= */ -{yygotominor.yy476.pLimit = 0; yygotominor.yy476.pOffset = 0;} - break; - case 166: /* limit_opt ::= LIMIT expr */ -{yygotominor.yy476.pLimit = yymsp[0].minor.yy346.pExpr; yygotominor.yy476.pOffset = 0;} - break; - case 167: /* limit_opt ::= LIMIT expr OFFSET expr */ -{yygotominor.yy476.pLimit = yymsp[-2].minor.yy346.pExpr; yygotominor.yy476.pOffset = yymsp[0].minor.yy346.pExpr;} - break; - case 168: /* limit_opt ::= LIMIT expr COMMA expr */ -{yygotominor.yy476.pOffset = yymsp[-2].minor.yy346.pExpr; yygotominor.yy476.pLimit = yymsp[0].minor.yy346.pExpr;} - break; - case 169: /* cmd ::= with DELETE FROM fullname indexed_opt where_opt */ -{ - sqlite3WithPush(pParse, yymsp[-5].minor.yy59, 1); - sqlite3SrcListIndexedBy(pParse, yymsp[-2].minor.yy65, &yymsp[-1].minor.yy0); - sqlite3DeleteFrom(pParse,yymsp[-2].minor.yy65,yymsp[0].minor.yy132); -} - break; - case 172: /* cmd ::= with UPDATE orconf fullname indexed_opt SET setlist where_opt */ -{ - sqlite3WithPush(pParse, yymsp[-7].minor.yy59, 1); - sqlite3SrcListIndexedBy(pParse, yymsp[-4].minor.yy65, &yymsp[-3].minor.yy0); - sqlite3ExprListCheckLength(pParse,yymsp[-1].minor.yy14,"set list"); - sqlite3Update(pParse,yymsp[-4].minor.yy65,yymsp[-1].minor.yy14,yymsp[0].minor.yy132,yymsp[-5].minor.yy186); -} - break; - case 173: /* setlist ::= setlist COMMA nm EQ expr */ -{ - yygotominor.yy14 = sqlite3ExprListAppend(pParse, yymsp[-4].minor.yy14, yymsp[0].minor.yy346.pExpr); - sqlite3ExprListSetName(pParse, yygotominor.yy14, &yymsp[-2].minor.yy0, 1); -} - break; - case 174: /* setlist ::= nm EQ expr */ -{ - yygotominor.yy14 = sqlite3ExprListAppend(pParse, 0, yymsp[0].minor.yy346.pExpr); - sqlite3ExprListSetName(pParse, yygotominor.yy14, &yymsp[-2].minor.yy0, 1); -} - break; - case 175: /* cmd ::= with insert_cmd INTO fullname inscollist_opt select */ -{ - sqlite3WithPush(pParse, yymsp[-5].minor.yy59, 1); - sqlite3Insert(pParse, yymsp[-2].minor.yy65, yymsp[0].minor.yy3, yymsp[-1].minor.yy408, yymsp[-4].minor.yy186); -} - break; - case 176: /* cmd ::= with insert_cmd INTO fullname inscollist_opt DEFAULT VALUES */ -{ - sqlite3WithPush(pParse, yymsp[-6].minor.yy59, 1); - sqlite3Insert(pParse, yymsp[-3].minor.yy65, 0, yymsp[-2].minor.yy408, yymsp[-5].minor.yy186); -} - break; - case 177: /* insert_cmd ::= INSERT orconf */ -{yygotominor.yy186 = yymsp[0].minor.yy186;} - break; - case 178: /* insert_cmd ::= REPLACE */ -{yygotominor.yy186 = OE_Replace;} - break; - case 181: /* idlist ::= idlist COMMA nm */ -{yygotominor.yy408 = sqlite3IdListAppend(pParse->db,yymsp[-2].minor.yy408,&yymsp[0].minor.yy0);} - break; - case 182: /* idlist ::= nm */ -{yygotominor.yy408 = sqlite3IdListAppend(pParse->db,0,&yymsp[0].minor.yy0);} - break; - case 183: /* expr ::= term */ -{yygotominor.yy346 = yymsp[0].minor.yy346;} - break; - case 184: /* expr ::= LP expr RP */ -{yygotominor.yy346.pExpr = yymsp[-1].minor.yy346.pExpr; spanSet(&yygotominor.yy346,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0);} - break; - case 185: /* term ::= NULL */ - case 190: /* term ::= INTEGER|FLOAT|BLOB */ yytestcase(yyruleno==190); - case 191: /* term ::= STRING */ yytestcase(yyruleno==191); -{spanExpr(&yygotominor.yy346, pParse, yymsp[0].major, &yymsp[0].minor.yy0);} - break; - case 186: /* expr ::= ID|INDEXED */ - case 187: /* expr ::= JOIN_KW */ yytestcase(yyruleno==187); -{spanExpr(&yygotominor.yy346, pParse, TK_ID, &yymsp[0].minor.yy0);} - break; - case 188: /* expr ::= nm DOT nm */ -{ - Expr *temp1 = sqlite3PExpr(pParse, TK_ID, 0, 0, &yymsp[-2].minor.yy0); - Expr *temp2 = sqlite3PExpr(pParse, TK_ID, 0, 0, &yymsp[0].minor.yy0); - yygotominor.yy346.pExpr = sqlite3PExpr(pParse, TK_DOT, temp1, temp2, 0); - spanSet(&yygotominor.yy346,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0); -} - break; - case 189: /* expr ::= nm DOT nm DOT nm */ -{ - Expr *temp1 = sqlite3PExpr(pParse, TK_ID, 0, 0, &yymsp[-4].minor.yy0); - Expr *temp2 = sqlite3PExpr(pParse, TK_ID, 0, 0, &yymsp[-2].minor.yy0); - Expr *temp3 = sqlite3PExpr(pParse, TK_ID, 0, 0, &yymsp[0].minor.yy0); - Expr *temp4 = sqlite3PExpr(pParse, TK_DOT, temp2, temp3, 0); - yygotominor.yy346.pExpr = sqlite3PExpr(pParse, TK_DOT, temp1, temp4, 0); - spanSet(&yygotominor.yy346,&yymsp[-4].minor.yy0,&yymsp[0].minor.yy0); -} - break; - case 192: /* expr ::= VARIABLE */ -{ - if( yymsp[0].minor.yy0.n>=2 && yymsp[0].minor.yy0.z[0]=='#' && sqlite3Isdigit(yymsp[0].minor.yy0.z[1]) ){ - /* When doing a nested parse, one can include terms in an expression - ** that look like this: #1 #2 ... These terms refer to registers - ** in the virtual machine. #N is the N-th register. */ - if( pParse->nested==0 ){ - sqlite3ErrorMsg(pParse, "near \"%T\": syntax error", &yymsp[0].minor.yy0); - yygotominor.yy346.pExpr = 0; - }else{ - yygotominor.yy346.pExpr = sqlite3PExpr(pParse, TK_REGISTER, 0, 0, &yymsp[0].minor.yy0); - if( yygotominor.yy346.pExpr ) sqlite3GetInt32(&yymsp[0].minor.yy0.z[1], &yygotominor.yy346.pExpr->iTable); - } - }else{ - spanExpr(&yygotominor.yy346, pParse, TK_VARIABLE, &yymsp[0].minor.yy0); - sqlite3ExprAssignVarNumber(pParse, yygotominor.yy346.pExpr); - } - spanSet(&yygotominor.yy346, &yymsp[0].minor.yy0, &yymsp[0].minor.yy0); -} - break; - case 193: /* expr ::= expr COLLATE ID|STRING */ -{ - yygotominor.yy346.pExpr = sqlite3ExprAddCollateToken(pParse, yymsp[-2].minor.yy346.pExpr, &yymsp[0].minor.yy0); - yygotominor.yy346.zStart = yymsp[-2].minor.yy346.zStart; - yygotominor.yy346.zEnd = &yymsp[0].minor.yy0.z[yymsp[0].minor.yy0.n]; -} - break; - case 194: /* expr ::= CAST LP expr AS typetoken RP */ -{ - yygotominor.yy346.pExpr = sqlite3PExpr(pParse, TK_CAST, yymsp[-3].minor.yy346.pExpr, 0, &yymsp[-1].minor.yy0); - spanSet(&yygotominor.yy346,&yymsp[-5].minor.yy0,&yymsp[0].minor.yy0); -} - break; - case 195: /* expr ::= ID|INDEXED LP distinct exprlist RP */ -{ - if( yymsp[-1].minor.yy14 && yymsp[-1].minor.yy14->nExpr>pParse->db->aLimit[SQLITE_LIMIT_FUNCTION_ARG] ){ - sqlite3ErrorMsg(pParse, "too many arguments on function %T", &yymsp[-4].minor.yy0); - } - yygotominor.yy346.pExpr = sqlite3ExprFunction(pParse, yymsp[-1].minor.yy14, &yymsp[-4].minor.yy0); - spanSet(&yygotominor.yy346,&yymsp[-4].minor.yy0,&yymsp[0].minor.yy0); - if( yymsp[-2].minor.yy381 && yygotominor.yy346.pExpr ){ - yygotominor.yy346.pExpr->flags |= EP_Distinct; - } -} - break; - case 196: /* expr ::= ID|INDEXED LP STAR RP */ -{ - yygotominor.yy346.pExpr = sqlite3ExprFunction(pParse, 0, &yymsp[-3].minor.yy0); - spanSet(&yygotominor.yy346,&yymsp[-3].minor.yy0,&yymsp[0].minor.yy0); -} - break; - case 197: /* term ::= CTIME_KW */ -{ - yygotominor.yy346.pExpr = sqlite3ExprFunction(pParse, 0, &yymsp[0].minor.yy0); - spanSet(&yygotominor.yy346, &yymsp[0].minor.yy0, &yymsp[0].minor.yy0); -} - break; - case 198: /* expr ::= expr AND expr */ - case 199: /* expr ::= expr OR expr */ yytestcase(yyruleno==199); - case 200: /* expr ::= expr LT|GT|GE|LE expr */ yytestcase(yyruleno==200); - case 201: /* expr ::= expr EQ|NE expr */ yytestcase(yyruleno==201); - case 202: /* expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */ yytestcase(yyruleno==202); - case 203: /* expr ::= expr PLUS|MINUS expr */ yytestcase(yyruleno==203); - case 204: /* expr ::= expr STAR|SLASH|REM expr */ yytestcase(yyruleno==204); - case 205: /* expr ::= expr CONCAT expr */ yytestcase(yyruleno==205); -{spanBinaryExpr(&yygotominor.yy346,pParse,yymsp[-1].major,&yymsp[-2].minor.yy346,&yymsp[0].minor.yy346);} - break; - case 206: /* likeop ::= LIKE_KW|MATCH */ -{yygotominor.yy96.eOperator = yymsp[0].minor.yy0; yygotominor.yy96.bNot = 0;} - break; - case 207: /* likeop ::= NOT LIKE_KW|MATCH */ -{yygotominor.yy96.eOperator = yymsp[0].minor.yy0; yygotominor.yy96.bNot = 1;} - break; - case 208: /* expr ::= expr likeop expr */ -{ - ExprList *pList; - pList = sqlite3ExprListAppend(pParse,0, yymsp[0].minor.yy346.pExpr); - pList = sqlite3ExprListAppend(pParse,pList, yymsp[-2].minor.yy346.pExpr); - yygotominor.yy346.pExpr = sqlite3ExprFunction(pParse, pList, &yymsp[-1].minor.yy96.eOperator); - if( yymsp[-1].minor.yy96.bNot ) yygotominor.yy346.pExpr = sqlite3PExpr(pParse, TK_NOT, yygotominor.yy346.pExpr, 0, 0); - yygotominor.yy346.zStart = yymsp[-2].minor.yy346.zStart; - yygotominor.yy346.zEnd = yymsp[0].minor.yy346.zEnd; - if( yygotominor.yy346.pExpr ) yygotominor.yy346.pExpr->flags |= EP_InfixFunc; -} - break; - case 209: /* expr ::= expr likeop expr ESCAPE expr */ -{ - ExprList *pList; - pList = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy346.pExpr); - pList = sqlite3ExprListAppend(pParse,pList, yymsp[-4].minor.yy346.pExpr); - pList = sqlite3ExprListAppend(pParse,pList, yymsp[0].minor.yy346.pExpr); - yygotominor.yy346.pExpr = sqlite3ExprFunction(pParse, pList, &yymsp[-3].minor.yy96.eOperator); - if( yymsp[-3].minor.yy96.bNot ) yygotominor.yy346.pExpr = sqlite3PExpr(pParse, TK_NOT, yygotominor.yy346.pExpr, 0, 0); - yygotominor.yy346.zStart = yymsp[-4].minor.yy346.zStart; - yygotominor.yy346.zEnd = yymsp[0].minor.yy346.zEnd; - if( yygotominor.yy346.pExpr ) yygotominor.yy346.pExpr->flags |= EP_InfixFunc; -} - break; - case 210: /* expr ::= expr ISNULL|NOTNULL */ -{spanUnaryPostfix(&yygotominor.yy346,pParse,yymsp[0].major,&yymsp[-1].minor.yy346,&yymsp[0].minor.yy0);} - break; - case 211: /* expr ::= expr NOT NULL */ -{spanUnaryPostfix(&yygotominor.yy346,pParse,TK_NOTNULL,&yymsp[-2].minor.yy346,&yymsp[0].minor.yy0);} - break; - case 212: /* expr ::= expr IS expr */ -{ - spanBinaryExpr(&yygotominor.yy346,pParse,TK_IS,&yymsp[-2].minor.yy346,&yymsp[0].minor.yy346); - binaryToUnaryIfNull(pParse, yymsp[0].minor.yy346.pExpr, yygotominor.yy346.pExpr, TK_ISNULL); -} - break; - case 213: /* expr ::= expr IS NOT expr */ -{ - spanBinaryExpr(&yygotominor.yy346,pParse,TK_ISNOT,&yymsp[-3].minor.yy346,&yymsp[0].minor.yy346); - binaryToUnaryIfNull(pParse, yymsp[0].minor.yy346.pExpr, yygotominor.yy346.pExpr, TK_NOTNULL); -} - break; - case 214: /* expr ::= NOT expr */ - case 215: /* expr ::= BITNOT expr */ yytestcase(yyruleno==215); -{spanUnaryPrefix(&yygotominor.yy346,pParse,yymsp[-1].major,&yymsp[0].minor.yy346,&yymsp[-1].minor.yy0);} - break; - case 216: /* expr ::= MINUS expr */ -{spanUnaryPrefix(&yygotominor.yy346,pParse,TK_UMINUS,&yymsp[0].minor.yy346,&yymsp[-1].minor.yy0);} - break; - case 217: /* expr ::= PLUS expr */ -{spanUnaryPrefix(&yygotominor.yy346,pParse,TK_UPLUS,&yymsp[0].minor.yy346,&yymsp[-1].minor.yy0);} - break; - case 220: /* expr ::= expr between_op expr AND expr */ -{ - ExprList *pList = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy346.pExpr); - pList = sqlite3ExprListAppend(pParse,pList, yymsp[0].minor.yy346.pExpr); - yygotominor.yy346.pExpr = sqlite3PExpr(pParse, TK_BETWEEN, yymsp[-4].minor.yy346.pExpr, 0, 0); - if( yygotominor.yy346.pExpr ){ - yygotominor.yy346.pExpr->x.pList = pList; - }else{ - sqlite3ExprListDelete(pParse->db, pList); - } - if( yymsp[-3].minor.yy328 ) yygotominor.yy346.pExpr = sqlite3PExpr(pParse, TK_NOT, yygotominor.yy346.pExpr, 0, 0); - yygotominor.yy346.zStart = yymsp[-4].minor.yy346.zStart; - yygotominor.yy346.zEnd = yymsp[0].minor.yy346.zEnd; -} - break; - case 223: /* expr ::= expr in_op LP exprlist RP */ -{ - if( yymsp[-1].minor.yy14==0 ){ - /* Expressions of the form - ** - ** expr1 IN () - ** expr1 NOT IN () - ** - ** simplify to constants 0 (false) and 1 (true), respectively, - ** regardless of the value of expr1. - */ - yygotominor.yy346.pExpr = sqlite3PExpr(pParse, TK_INTEGER, 0, 0, &sqlite3IntTokens[yymsp[-3].minor.yy328]); - sqlite3ExprDelete(pParse->db, yymsp[-4].minor.yy346.pExpr); - }else if( yymsp[-1].minor.yy14->nExpr==1 ){ - /* Expressions of the form: - ** - ** expr1 IN (?1) - ** expr1 NOT IN (?2) - ** - ** with exactly one value on the RHS can be simplified to something - ** like this: - ** - ** expr1 == ?1 - ** expr1 <> ?2 - ** - ** But, the RHS of the == or <> is marked with the EP_Generic flag - ** so that it may not contribute to the computation of comparison - ** affinity or the collating sequence to use for comparison. Otherwise, - ** the semantics would be subtly different from IN or NOT IN. - */ - Expr *pRHS = yymsp[-1].minor.yy14->a[0].pExpr; - yymsp[-1].minor.yy14->a[0].pExpr = 0; - sqlite3ExprListDelete(pParse->db, yymsp[-1].minor.yy14); - /* pRHS cannot be NULL because a malloc error would have been detected - ** before now and control would have never reached this point */ - if( ALWAYS(pRHS) ){ - pRHS->flags &= ~EP_Collate; - pRHS->flags |= EP_Generic; - } - yygotominor.yy346.pExpr = sqlite3PExpr(pParse, yymsp[-3].minor.yy328 ? TK_NE : TK_EQ, yymsp[-4].minor.yy346.pExpr, pRHS, 0); - }else{ - yygotominor.yy346.pExpr = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy346.pExpr, 0, 0); - if( yygotominor.yy346.pExpr ){ - yygotominor.yy346.pExpr->x.pList = yymsp[-1].minor.yy14; - sqlite3ExprSetHeight(pParse, yygotominor.yy346.pExpr); - }else{ - sqlite3ExprListDelete(pParse->db, yymsp[-1].minor.yy14); - } - if( yymsp[-3].minor.yy328 ) yygotominor.yy346.pExpr = sqlite3PExpr(pParse, TK_NOT, yygotominor.yy346.pExpr, 0, 0); - } - yygotominor.yy346.zStart = yymsp[-4].minor.yy346.zStart; - yygotominor.yy346.zEnd = &yymsp[0].minor.yy0.z[yymsp[0].minor.yy0.n]; - } - break; - case 224: /* expr ::= LP select RP */ -{ - yygotominor.yy346.pExpr = sqlite3PExpr(pParse, TK_SELECT, 0, 0, 0); - if( yygotominor.yy346.pExpr ){ - yygotominor.yy346.pExpr->x.pSelect = yymsp[-1].minor.yy3; - ExprSetProperty(yygotominor.yy346.pExpr, EP_xIsSelect); - sqlite3ExprSetHeight(pParse, yygotominor.yy346.pExpr); - }else{ - sqlite3SelectDelete(pParse->db, yymsp[-1].minor.yy3); - } - yygotominor.yy346.zStart = yymsp[-2].minor.yy0.z; - yygotominor.yy346.zEnd = &yymsp[0].minor.yy0.z[yymsp[0].minor.yy0.n]; - } - break; - case 225: /* expr ::= expr in_op LP select RP */ -{ - yygotominor.yy346.pExpr = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy346.pExpr, 0, 0); - if( yygotominor.yy346.pExpr ){ - yygotominor.yy346.pExpr->x.pSelect = yymsp[-1].minor.yy3; - ExprSetProperty(yygotominor.yy346.pExpr, EP_xIsSelect); - sqlite3ExprSetHeight(pParse, yygotominor.yy346.pExpr); - }else{ - sqlite3SelectDelete(pParse->db, yymsp[-1].minor.yy3); - } - if( yymsp[-3].minor.yy328 ) yygotominor.yy346.pExpr = sqlite3PExpr(pParse, TK_NOT, yygotominor.yy346.pExpr, 0, 0); - yygotominor.yy346.zStart = yymsp[-4].minor.yy346.zStart; - yygotominor.yy346.zEnd = &yymsp[0].minor.yy0.z[yymsp[0].minor.yy0.n]; - } - break; - case 226: /* expr ::= expr in_op nm dbnm */ -{ - SrcList *pSrc = sqlite3SrcListAppend(pParse->db, 0,&yymsp[-1].minor.yy0,&yymsp[0].minor.yy0); - yygotominor.yy346.pExpr = sqlite3PExpr(pParse, TK_IN, yymsp[-3].minor.yy346.pExpr, 0, 0); - if( yygotominor.yy346.pExpr ){ - yygotominor.yy346.pExpr->x.pSelect = sqlite3SelectNew(pParse, 0,pSrc,0,0,0,0,0,0,0); - ExprSetProperty(yygotominor.yy346.pExpr, EP_xIsSelect); - sqlite3ExprSetHeight(pParse, yygotominor.yy346.pExpr); - }else{ - sqlite3SrcListDelete(pParse->db, pSrc); - } - if( yymsp[-2].minor.yy328 ) yygotominor.yy346.pExpr = sqlite3PExpr(pParse, TK_NOT, yygotominor.yy346.pExpr, 0, 0); - yygotominor.yy346.zStart = yymsp[-3].minor.yy346.zStart; - yygotominor.yy346.zEnd = yymsp[0].minor.yy0.z ? &yymsp[0].minor.yy0.z[yymsp[0].minor.yy0.n] : &yymsp[-1].minor.yy0.z[yymsp[-1].minor.yy0.n]; - } - break; - case 227: /* expr ::= EXISTS LP select RP */ -{ - Expr *p = yygotominor.yy346.pExpr = sqlite3PExpr(pParse, TK_EXISTS, 0, 0, 0); - if( p ){ - p->x.pSelect = yymsp[-1].minor.yy3; - ExprSetProperty(p, EP_xIsSelect); - sqlite3ExprSetHeight(pParse, p); - }else{ - sqlite3SelectDelete(pParse->db, yymsp[-1].minor.yy3); - } - yygotominor.yy346.zStart = yymsp[-3].minor.yy0.z; - yygotominor.yy346.zEnd = &yymsp[0].minor.yy0.z[yymsp[0].minor.yy0.n]; - } - break; - case 228: /* expr ::= CASE case_operand case_exprlist case_else END */ -{ - yygotominor.yy346.pExpr = sqlite3PExpr(pParse, TK_CASE, yymsp[-3].minor.yy132, 0, 0); - if( yygotominor.yy346.pExpr ){ - yygotominor.yy346.pExpr->x.pList = yymsp[-1].minor.yy132 ? sqlite3ExprListAppend(pParse,yymsp[-2].minor.yy14,yymsp[-1].minor.yy132) : yymsp[-2].minor.yy14; - sqlite3ExprSetHeight(pParse, yygotominor.yy346.pExpr); - }else{ - sqlite3ExprListDelete(pParse->db, yymsp[-2].minor.yy14); - sqlite3ExprDelete(pParse->db, yymsp[-1].minor.yy132); - } - yygotominor.yy346.zStart = yymsp[-4].minor.yy0.z; - yygotominor.yy346.zEnd = &yymsp[0].minor.yy0.z[yymsp[0].minor.yy0.n]; -} - break; - case 229: /* case_exprlist ::= case_exprlist WHEN expr THEN expr */ -{ - yygotominor.yy14 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy14, yymsp[-2].minor.yy346.pExpr); - yygotominor.yy14 = sqlite3ExprListAppend(pParse,yygotominor.yy14, yymsp[0].minor.yy346.pExpr); -} - break; - case 230: /* case_exprlist ::= WHEN expr THEN expr */ -{ - yygotominor.yy14 = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy346.pExpr); - yygotominor.yy14 = sqlite3ExprListAppend(pParse,yygotominor.yy14, yymsp[0].minor.yy346.pExpr); -} - break; - case 237: /* nexprlist ::= nexprlist COMMA expr */ -{yygotominor.yy14 = sqlite3ExprListAppend(pParse,yymsp[-2].minor.yy14,yymsp[0].minor.yy346.pExpr);} - break; - case 238: /* nexprlist ::= expr */ -{yygotominor.yy14 = sqlite3ExprListAppend(pParse,0,yymsp[0].minor.yy346.pExpr);} - break; - case 239: /* cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP idxlist RP where_opt */ -{ - sqlite3CreateIndex(pParse, &yymsp[-7].minor.yy0, &yymsp[-6].minor.yy0, - sqlite3SrcListAppend(pParse->db,0,&yymsp[-4].minor.yy0,0), yymsp[-2].minor.yy14, yymsp[-10].minor.yy328, - &yymsp[-11].minor.yy0, yymsp[0].minor.yy132, SQLITE_SO_ASC, yymsp[-8].minor.yy328); -} - break; - case 240: /* uniqueflag ::= UNIQUE */ - case 291: /* raisetype ::= ABORT */ yytestcase(yyruleno==291); -{yygotominor.yy328 = OE_Abort;} - break; - case 241: /* uniqueflag ::= */ -{yygotominor.yy328 = OE_None;} - break; - case 244: /* idxlist ::= idxlist COMMA nm collate sortorder */ -{ - Expr *p = sqlite3ExprAddCollateToken(pParse, 0, &yymsp[-1].minor.yy0); - yygotominor.yy14 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy14, p); - sqlite3ExprListSetName(pParse,yygotominor.yy14,&yymsp[-2].minor.yy0,1); - sqlite3ExprListCheckLength(pParse, yygotominor.yy14, "index"); - if( yygotominor.yy14 ) yygotominor.yy14->a[yygotominor.yy14->nExpr-1].sortOrder = (u8)yymsp[0].minor.yy328; -} - break; - case 245: /* idxlist ::= nm collate sortorder */ -{ - Expr *p = sqlite3ExprAddCollateToken(pParse, 0, &yymsp[-1].minor.yy0); - yygotominor.yy14 = sqlite3ExprListAppend(pParse,0, p); - sqlite3ExprListSetName(pParse, yygotominor.yy14, &yymsp[-2].minor.yy0, 1); - sqlite3ExprListCheckLength(pParse, yygotominor.yy14, "index"); - if( yygotominor.yy14 ) yygotominor.yy14->a[yygotominor.yy14->nExpr-1].sortOrder = (u8)yymsp[0].minor.yy328; -} - break; - case 246: /* collate ::= */ -{yygotominor.yy0.z = 0; yygotominor.yy0.n = 0;} - break; - case 248: /* cmd ::= DROP INDEX ifexists fullname */ -{sqlite3DropIndex(pParse, yymsp[0].minor.yy65, yymsp[-1].minor.yy328);} - break; - case 249: /* cmd ::= VACUUM */ - case 250: /* cmd ::= VACUUM nm */ yytestcase(yyruleno==250); -{sqlite3Vacuum(pParse);} - break; - case 251: /* cmd ::= PRAGMA nm dbnm */ -{sqlite3Pragma(pParse,&yymsp[-1].minor.yy0,&yymsp[0].minor.yy0,0,0);} - break; - case 252: /* cmd ::= PRAGMA nm dbnm EQ nmnum */ -{sqlite3Pragma(pParse,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0,0);} - break; - case 253: /* cmd ::= PRAGMA nm dbnm LP nmnum RP */ -{sqlite3Pragma(pParse,&yymsp[-4].minor.yy0,&yymsp[-3].minor.yy0,&yymsp[-1].minor.yy0,0);} - break; - case 254: /* cmd ::= PRAGMA nm dbnm EQ minus_num */ -{sqlite3Pragma(pParse,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0,1);} - break; - case 255: /* cmd ::= PRAGMA nm dbnm LP minus_num RP */ -{sqlite3Pragma(pParse,&yymsp[-4].minor.yy0,&yymsp[-3].minor.yy0,&yymsp[-1].minor.yy0,1);} - break; - case 264: /* cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */ -{ - Token all; - all.z = yymsp[-3].minor.yy0.z; - all.n = (int)(yymsp[0].minor.yy0.z - yymsp[-3].minor.yy0.z) + yymsp[0].minor.yy0.n; - sqlite3FinishTrigger(pParse, yymsp[-1].minor.yy473, &all); -} - break; - case 265: /* trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */ -{ - sqlite3BeginTrigger(pParse, &yymsp[-7].minor.yy0, &yymsp[-6].minor.yy0, yymsp[-5].minor.yy328, yymsp[-4].minor.yy378.a, yymsp[-4].minor.yy378.b, yymsp[-2].minor.yy65, yymsp[0].minor.yy132, yymsp[-10].minor.yy328, yymsp[-8].minor.yy328); - yygotominor.yy0 = (yymsp[-6].minor.yy0.n==0?yymsp[-7].minor.yy0:yymsp[-6].minor.yy0); -} - break; - case 266: /* trigger_time ::= BEFORE */ - case 269: /* trigger_time ::= */ yytestcase(yyruleno==269); -{ yygotominor.yy328 = TK_BEFORE; } - break; - case 267: /* trigger_time ::= AFTER */ -{ yygotominor.yy328 = TK_AFTER; } - break; - case 268: /* trigger_time ::= INSTEAD OF */ -{ yygotominor.yy328 = TK_INSTEAD;} - break; - case 270: /* trigger_event ::= DELETE|INSERT */ - case 271: /* trigger_event ::= UPDATE */ yytestcase(yyruleno==271); -{yygotominor.yy378.a = yymsp[0].major; yygotominor.yy378.b = 0;} - break; - case 272: /* trigger_event ::= UPDATE OF idlist */ -{yygotominor.yy378.a = TK_UPDATE; yygotominor.yy378.b = yymsp[0].minor.yy408;} - break; - case 275: /* when_clause ::= */ - case 296: /* key_opt ::= */ yytestcase(yyruleno==296); -{ yygotominor.yy132 = 0; } - break; - case 276: /* when_clause ::= WHEN expr */ - case 297: /* key_opt ::= KEY expr */ yytestcase(yyruleno==297); -{ yygotominor.yy132 = yymsp[0].minor.yy346.pExpr; } - break; - case 277: /* trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */ -{ - assert( yymsp[-2].minor.yy473!=0 ); - yymsp[-2].minor.yy473->pLast->pNext = yymsp[-1].minor.yy473; - yymsp[-2].minor.yy473->pLast = yymsp[-1].minor.yy473; - yygotominor.yy473 = yymsp[-2].minor.yy473; -} - break; - case 278: /* trigger_cmd_list ::= trigger_cmd SEMI */ -{ - assert( yymsp[-1].minor.yy473!=0 ); - yymsp[-1].minor.yy473->pLast = yymsp[-1].minor.yy473; - yygotominor.yy473 = yymsp[-1].minor.yy473; -} - break; - case 280: /* trnm ::= nm DOT nm */ -{ - yygotominor.yy0 = yymsp[0].minor.yy0; - sqlite3ErrorMsg(pParse, - "qualified table names are not allowed on INSERT, UPDATE, and DELETE " - "statements within triggers"); -} - break; - case 282: /* tridxby ::= INDEXED BY nm */ -{ - sqlite3ErrorMsg(pParse, - "the INDEXED BY clause is not allowed on UPDATE or DELETE statements " - "within triggers"); -} - break; - case 283: /* tridxby ::= NOT INDEXED */ -{ - sqlite3ErrorMsg(pParse, - "the NOT INDEXED clause is not allowed on UPDATE or DELETE statements " - "within triggers"); -} - break; - case 284: /* trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist where_opt */ -{ yygotominor.yy473 = sqlite3TriggerUpdateStep(pParse->db, &yymsp[-4].minor.yy0, yymsp[-1].minor.yy14, yymsp[0].minor.yy132, yymsp[-5].minor.yy186); } - break; - case 285: /* trigger_cmd ::= insert_cmd INTO trnm inscollist_opt select */ -{yygotominor.yy473 = sqlite3TriggerInsertStep(pParse->db, &yymsp[-2].minor.yy0, yymsp[-1].minor.yy408, yymsp[0].minor.yy3, yymsp[-4].minor.yy186);} - break; - case 286: /* trigger_cmd ::= DELETE FROM trnm tridxby where_opt */ -{yygotominor.yy473 = sqlite3TriggerDeleteStep(pParse->db, &yymsp[-2].minor.yy0, yymsp[0].minor.yy132);} - break; - case 287: /* trigger_cmd ::= select */ -{yygotominor.yy473 = sqlite3TriggerSelectStep(pParse->db, yymsp[0].minor.yy3); } - break; - case 288: /* expr ::= RAISE LP IGNORE RP */ -{ - yygotominor.yy346.pExpr = sqlite3PExpr(pParse, TK_RAISE, 0, 0, 0); - if( yygotominor.yy346.pExpr ){ - yygotominor.yy346.pExpr->affinity = OE_Ignore; - } - yygotominor.yy346.zStart = yymsp[-3].minor.yy0.z; - yygotominor.yy346.zEnd = &yymsp[0].minor.yy0.z[yymsp[0].minor.yy0.n]; -} - break; - case 289: /* expr ::= RAISE LP raisetype COMMA nm RP */ -{ - yygotominor.yy346.pExpr = sqlite3PExpr(pParse, TK_RAISE, 0, 0, &yymsp[-1].minor.yy0); - if( yygotominor.yy346.pExpr ) { - yygotominor.yy346.pExpr->affinity = (char)yymsp[-3].minor.yy328; - } - yygotominor.yy346.zStart = yymsp[-5].minor.yy0.z; - yygotominor.yy346.zEnd = &yymsp[0].minor.yy0.z[yymsp[0].minor.yy0.n]; -} - break; - case 290: /* raisetype ::= ROLLBACK */ -{yygotominor.yy328 = OE_Rollback;} - break; - case 292: /* raisetype ::= FAIL */ -{yygotominor.yy328 = OE_Fail;} - break; - case 293: /* cmd ::= DROP TRIGGER ifexists fullname */ -{ - sqlite3DropTrigger(pParse,yymsp[0].minor.yy65,yymsp[-1].minor.yy328); -} - break; - case 294: /* cmd ::= ATTACH database_kw_opt expr AS expr key_opt */ -{ - sqlite3Attach(pParse, yymsp[-3].minor.yy346.pExpr, yymsp[-1].minor.yy346.pExpr, yymsp[0].minor.yy132); -} - break; - case 295: /* cmd ::= DETACH database_kw_opt expr */ -{ - sqlite3Detach(pParse, yymsp[0].minor.yy346.pExpr); -} - break; - case 300: /* cmd ::= REINDEX */ -{sqlite3Reindex(pParse, 0, 0);} - break; - case 301: /* cmd ::= REINDEX nm dbnm */ -{sqlite3Reindex(pParse, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0);} - break; - case 302: /* cmd ::= ANALYZE */ -{sqlite3Analyze(pParse, 0, 0);} - break; - case 303: /* cmd ::= ANALYZE nm dbnm */ -{sqlite3Analyze(pParse, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0);} - break; - case 304: /* cmd ::= ALTER TABLE fullname RENAME TO nm */ -{ - sqlite3AlterRenameTable(pParse,yymsp[-3].minor.yy65,&yymsp[0].minor.yy0); -} - break; - case 305: /* cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt column */ -{ - sqlite3AlterFinishAddColumn(pParse, &yymsp[0].minor.yy0); -} - break; - case 306: /* add_column_fullname ::= fullname */ -{ - pParse->db->lookaside.bEnabled = 0; - sqlite3AlterBeginAddColumn(pParse, yymsp[0].minor.yy65); -} - break; - case 309: /* cmd ::= create_vtab */ -{sqlite3VtabFinishParse(pParse,0);} - break; - case 310: /* cmd ::= create_vtab LP vtabarglist RP */ -{sqlite3VtabFinishParse(pParse,&yymsp[0].minor.yy0);} - break; - case 311: /* create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */ -{ - sqlite3VtabBeginParse(pParse, &yymsp[-3].minor.yy0, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, yymsp[-4].minor.yy328); -} - break; - case 314: /* vtabarg ::= */ -{sqlite3VtabArgInit(pParse);} - break; - case 316: /* vtabargtoken ::= ANY */ - case 317: /* vtabargtoken ::= lp anylist RP */ yytestcase(yyruleno==317); - case 318: /* lp ::= LP */ yytestcase(yyruleno==318); -{sqlite3VtabArgExtend(pParse,&yymsp[0].minor.yy0);} - break; - case 322: /* with ::= */ -{yygotominor.yy59 = 0;} - break; - case 323: /* with ::= WITH wqlist */ - case 324: /* with ::= WITH RECURSIVE wqlist */ yytestcase(yyruleno==324); -{ yygotominor.yy59 = yymsp[0].minor.yy59; } - break; - case 325: /* wqlist ::= nm idxlist_opt AS LP select RP */ -{ - yygotominor.yy59 = sqlite3WithAdd(pParse, 0, &yymsp[-5].minor.yy0, yymsp[-4].minor.yy14, yymsp[-1].minor.yy3); -} - break; - case 326: /* wqlist ::= wqlist COMMA nm idxlist_opt AS LP select RP */ -{ - yygotominor.yy59 = sqlite3WithAdd(pParse, yymsp[-7].minor.yy59, &yymsp[-5].minor.yy0, yymsp[-4].minor.yy14, yymsp[-1].minor.yy3); -} - break; - default: - /* (0) input ::= cmdlist */ yytestcase(yyruleno==0); - /* (1) cmdlist ::= cmdlist ecmd */ yytestcase(yyruleno==1); - /* (2) cmdlist ::= ecmd */ yytestcase(yyruleno==2); - /* (3) ecmd ::= SEMI */ yytestcase(yyruleno==3); - /* (4) ecmd ::= explain cmdx SEMI */ yytestcase(yyruleno==4); - /* (10) trans_opt ::= */ yytestcase(yyruleno==10); - /* (11) trans_opt ::= TRANSACTION */ yytestcase(yyruleno==11); - /* (12) trans_opt ::= TRANSACTION nm */ yytestcase(yyruleno==12); - /* (20) savepoint_opt ::= SAVEPOINT */ yytestcase(yyruleno==20); - /* (21) savepoint_opt ::= */ yytestcase(yyruleno==21); - /* (25) cmd ::= create_table create_table_args */ yytestcase(yyruleno==25); - /* (36) columnlist ::= columnlist COMMA column */ yytestcase(yyruleno==36); - /* (37) columnlist ::= column */ yytestcase(yyruleno==37); - /* (43) type ::= */ yytestcase(yyruleno==43); - /* (50) signed ::= plus_num */ yytestcase(yyruleno==50); - /* (51) signed ::= minus_num */ yytestcase(yyruleno==51); - /* (52) carglist ::= carglist ccons */ yytestcase(yyruleno==52); - /* (53) carglist ::= */ yytestcase(yyruleno==53); - /* (60) ccons ::= NULL onconf */ yytestcase(yyruleno==60); - /* (88) conslist ::= conslist tconscomma tcons */ yytestcase(yyruleno==88); - /* (89) conslist ::= tcons */ yytestcase(yyruleno==89); - /* (91) tconscomma ::= */ yytestcase(yyruleno==91); - /* (273) foreach_clause ::= */ yytestcase(yyruleno==273); - /* (274) foreach_clause ::= FOR EACH ROW */ yytestcase(yyruleno==274); - /* (281) tridxby ::= */ yytestcase(yyruleno==281); - /* (298) database_kw_opt ::= DATABASE */ yytestcase(yyruleno==298); - /* (299) database_kw_opt ::= */ yytestcase(yyruleno==299); - /* (307) kwcolumn_opt ::= */ yytestcase(yyruleno==307); - /* (308) kwcolumn_opt ::= COLUMNKW */ yytestcase(yyruleno==308); - /* (312) vtabarglist ::= vtabarg */ yytestcase(yyruleno==312); - /* (313) vtabarglist ::= vtabarglist COMMA vtabarg */ yytestcase(yyruleno==313); - /* (315) vtabarg ::= vtabarg vtabargtoken */ yytestcase(yyruleno==315); - /* (319) anylist ::= */ yytestcase(yyruleno==319); - /* (320) anylist ::= anylist LP anylist RP */ yytestcase(yyruleno==320); - /* (321) anylist ::= anylist ANY */ yytestcase(yyruleno==321); - break; - }; - assert( yyruleno>=0 && yyrulenoyyidx -= yysize; - yyact = yy_find_reduce_action(yymsp[-yysize].stateno,(YYCODETYPE)yygoto); - if( yyact < YYNSTATE ){ -#ifdef NDEBUG - /* If we are not debugging and the reduce action popped at least - ** one element off the stack, then we can push the new element back - ** onto the stack here, and skip the stack overflow test in yy_shift(). - ** That gives a significant speed improvement. */ - if( yysize ){ - yypParser->yyidx++; - yymsp -= yysize-1; - yymsp->stateno = (YYACTIONTYPE)yyact; - yymsp->major = (YYCODETYPE)yygoto; - yymsp->minor = yygotominor; - }else -#endif - { - yy_shift(yypParser,yyact,yygoto,&yygotominor); - } - }else{ - assert( yyact == YYNSTATE + YYNRULE + 1 ); - yy_accept(yypParser); - } -} - -/* -** The following code executes when the parse fails -*/ -#ifndef YYNOERRORRECOVERY -static void yy_parse_failed( - yyParser *yypParser /* The parser */ -){ - sqlite3ParserARG_FETCH; -#ifndef NDEBUG - if( yyTraceFILE ){ - fprintf(yyTraceFILE,"%sFail!\n",yyTracePrompt); - } -#endif - while( yypParser->yyidx>=0 ) yy_pop_parser_stack(yypParser); - /* Here code is inserted which will be executed whenever the - ** parser fails */ - sqlite3ParserARG_STORE; /* Suppress warning about unused %extra_argument variable */ -} -#endif /* YYNOERRORRECOVERY */ - -/* -** The following code executes when a syntax error first occurs. -*/ -static void yy_syntax_error( - yyParser *yypParser, /* The parser */ - int yymajor, /* The major type of the error token */ - YYMINORTYPE yyminor /* The minor type of the error token */ -){ - sqlite3ParserARG_FETCH; -#define TOKEN (yyminor.yy0) - - UNUSED_PARAMETER(yymajor); /* Silence some compiler warnings */ - assert( TOKEN.z[0] ); /* The tokenizer always gives us a token */ - sqlite3ErrorMsg(pParse, "near \"%T\": syntax error", &TOKEN); - sqlite3ParserARG_STORE; /* Suppress warning about unused %extra_argument variable */ -} - -/* -** The following is executed when the parser accepts -*/ -static void yy_accept( - yyParser *yypParser /* The parser */ -){ - sqlite3ParserARG_FETCH; -#ifndef NDEBUG - if( yyTraceFILE ){ - fprintf(yyTraceFILE,"%sAccept!\n",yyTracePrompt); - } -#endif - while( yypParser->yyidx>=0 ) yy_pop_parser_stack(yypParser); - /* Here code is inserted which will be executed whenever the - ** parser accepts */ - sqlite3ParserARG_STORE; /* Suppress warning about unused %extra_argument variable */ -} - -/* The main parser program. -** The first argument is a pointer to a structure obtained from -** "sqlite3ParserAlloc" which describes the current state of the parser. -** The second argument is the major token number. The third is -** the minor token. The fourth optional argument is whatever the -** user wants (and specified in the grammar) and is available for -** use by the action routines. -** -** Inputs: -**
      -**
    • A pointer to the parser (an opaque structure.) -**
    • The major token number. -**
    • The minor token number. -**
    • An option argument of a grammar-specified type. -**
    -** -** Outputs: -** None. -*/ -SQLITE_PRIVATE void sqlite3Parser( - void *yyp, /* The parser */ - int yymajor, /* The major token code number */ - sqlite3ParserTOKENTYPE yyminor /* The value for the token */ - sqlite3ParserARG_PDECL /* Optional %extra_argument parameter */ -){ - YYMINORTYPE yyminorunion; - int yyact; /* The parser action. */ -#if !defined(YYERRORSYMBOL) && !defined(YYNOERRORRECOVERY) - int yyendofinput; /* True if we are at the end of input */ -#endif -#ifdef YYERRORSYMBOL - int yyerrorhit = 0; /* True if yymajor has invoked an error */ -#endif - yyParser *yypParser; /* The parser */ - - /* (re)initialize the parser, if necessary */ - yypParser = (yyParser*)yyp; - if( yypParser->yyidx<0 ){ -#if YYSTACKDEPTH<=0 - if( yypParser->yystksz <=0 ){ - /*memset(&yyminorunion, 0, sizeof(yyminorunion));*/ - yyminorunion = yyzerominor; - yyStackOverflow(yypParser, &yyminorunion); - return; - } -#endif - yypParser->yyidx = 0; - yypParser->yyerrcnt = -1; - yypParser->yystack[0].stateno = 0; - yypParser->yystack[0].major = 0; - } - yyminorunion.yy0 = yyminor; -#if !defined(YYERRORSYMBOL) && !defined(YYNOERRORRECOVERY) - yyendofinput = (yymajor==0); -#endif - sqlite3ParserARG_STORE; - -#ifndef NDEBUG - if( yyTraceFILE ){ - fprintf(yyTraceFILE,"%sInput %s\n",yyTracePrompt,yyTokenName[yymajor]); - } -#endif - - do{ - yyact = yy_find_shift_action(yypParser,(YYCODETYPE)yymajor); - if( yyactyyerrcnt--; - yymajor = YYNOCODE; - }else if( yyact < YYNSTATE + YYNRULE ){ - yy_reduce(yypParser,yyact-YYNSTATE); - }else{ - assert( yyact == YY_ERROR_ACTION ); -#ifdef YYERRORSYMBOL - int yymx; -#endif -#ifndef NDEBUG - if( yyTraceFILE ){ - fprintf(yyTraceFILE,"%sSyntax Error!\n",yyTracePrompt); - } -#endif -#ifdef YYERRORSYMBOL - /* A syntax error has occurred. - ** The response to an error depends upon whether or not the - ** grammar defines an error token "ERROR". - ** - ** This is what we do if the grammar does define ERROR: - ** - ** * Call the %syntax_error function. - ** - ** * Begin popping the stack until we enter a state where - ** it is legal to shift the error symbol, then shift - ** the error symbol. - ** - ** * Set the error count to three. - ** - ** * Begin accepting and shifting new tokens. No new error - ** processing will occur until three tokens have been - ** shifted successfully. - ** - */ - if( yypParser->yyerrcnt<0 ){ - yy_syntax_error(yypParser,yymajor,yyminorunion); - } - yymx = yypParser->yystack[yypParser->yyidx].major; - if( yymx==YYERRORSYMBOL || yyerrorhit ){ -#ifndef NDEBUG - if( yyTraceFILE ){ - fprintf(yyTraceFILE,"%sDiscard input token %s\n", - yyTracePrompt,yyTokenName[yymajor]); - } -#endif - yy_destructor(yypParser, (YYCODETYPE)yymajor,&yyminorunion); - yymajor = YYNOCODE; - }else{ - while( - yypParser->yyidx >= 0 && - yymx != YYERRORSYMBOL && - (yyact = yy_find_reduce_action( - yypParser->yystack[yypParser->yyidx].stateno, - YYERRORSYMBOL)) >= YYNSTATE - ){ - yy_pop_parser_stack(yypParser); - } - if( yypParser->yyidx < 0 || yymajor==0 ){ - yy_destructor(yypParser,(YYCODETYPE)yymajor,&yyminorunion); - yy_parse_failed(yypParser); - yymajor = YYNOCODE; - }else if( yymx!=YYERRORSYMBOL ){ - YYMINORTYPE u2; - u2.YYERRSYMDT = 0; - yy_shift(yypParser,yyact,YYERRORSYMBOL,&u2); - } - } - yypParser->yyerrcnt = 3; - yyerrorhit = 1; -#elif defined(YYNOERRORRECOVERY) - /* If the YYNOERRORRECOVERY macro is defined, then do not attempt to - ** do any kind of error recovery. Instead, simply invoke the syntax - ** error routine and continue going as if nothing had happened. - ** - ** Applications can set this macro (for example inside %include) if - ** they intend to abandon the parse upon the first syntax error seen. - */ - yy_syntax_error(yypParser,yymajor,yyminorunion); - yy_destructor(yypParser,(YYCODETYPE)yymajor,&yyminorunion); - yymajor = YYNOCODE; - -#else /* YYERRORSYMBOL is not defined */ - /* This is what we do if the grammar does not define ERROR: - ** - ** * Report an error message, and throw away the input token. - ** - ** * If the input token is $, then fail the parse. - ** - ** As before, subsequent error messages are suppressed until - ** three input tokens have been successfully shifted. - */ - if( yypParser->yyerrcnt<=0 ){ - yy_syntax_error(yypParser,yymajor,yyminorunion); - } - yypParser->yyerrcnt = 3; - yy_destructor(yypParser,(YYCODETYPE)yymajor,&yyminorunion); - if( yyendofinput ){ - yy_parse_failed(yypParser); - } - yymajor = YYNOCODE; -#endif - } - }while( yymajor!=YYNOCODE && yypParser->yyidx>=0 ); - return; -} - -/************** End of parse.c ***********************************************/ -/************** Begin file tokenize.c ****************************************/ -/* -** 2001 September 15 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** An tokenizer for SQL -** -** This file contains C code that splits an SQL input string up into -** individual tokens and sends those tokens one-by-one over to the -** parser for analysis. -*/ -/* #include */ - -/* -** The charMap() macro maps alphabetic characters into their -** lower-case ASCII equivalent. On ASCII machines, this is just -** an upper-to-lower case map. On EBCDIC machines we also need -** to adjust the encoding. Only alphabetic characters and underscores -** need to be translated. -*/ -#ifdef SQLITE_ASCII -# define charMap(X) sqlite3UpperToLower[(unsigned char)X] -#endif -#ifdef SQLITE_EBCDIC -# define charMap(X) ebcdicToAscii[(unsigned char)X] -const unsigned char ebcdicToAscii[] = { -/* 0 1 2 3 4 5 6 7 8 9 A B C D E F */ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0x */ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 1x */ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 2x */ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 3x */ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 4x */ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 5x */ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 95, 0, 0, /* 6x */ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 7x */ - 0, 97, 98, 99,100,101,102,103,104,105, 0, 0, 0, 0, 0, 0, /* 8x */ - 0,106,107,108,109,110,111,112,113,114, 0, 0, 0, 0, 0, 0, /* 9x */ - 0, 0,115,116,117,118,119,120,121,122, 0, 0, 0, 0, 0, 0, /* Ax */ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* Bx */ - 0, 97, 98, 99,100,101,102,103,104,105, 0, 0, 0, 0, 0, 0, /* Cx */ - 0,106,107,108,109,110,111,112,113,114, 0, 0, 0, 0, 0, 0, /* Dx */ - 0, 0,115,116,117,118,119,120,121,122, 0, 0, 0, 0, 0, 0, /* Ex */ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* Fx */ -}; -#endif - -/* -** The sqlite3KeywordCode function looks up an identifier to determine if -** it is a keyword. If it is a keyword, the token code of that keyword is -** returned. If the input is not a keyword, TK_ID is returned. -** -** The implementation of this routine was generated by a program, -** mkkeywordhash.h, located in the tool subdirectory of the distribution. -** The output of the mkkeywordhash.c program is written into a file -** named keywordhash.h and then included into this source file by -** the #include below. -*/ -/************** Include keywordhash.h in the middle of tokenize.c ************/ -/************** Begin file keywordhash.h *************************************/ -/***** This file contains automatically generated code ****** -** -** The code in this file has been automatically generated by -** -** sqlite/tool/mkkeywordhash.c -** -** The code in this file implements a function that determines whether -** or not a given identifier is really an SQL keyword. The same thing -** might be implemented more directly using a hand-written hash table. -** But by using this automatically generated code, the size of the code -** is substantially reduced. This is important for embedded applications -** on platforms with limited memory. -*/ -/* Hash score: 182 */ -static int keywordCode(const char *z, int n){ - /* zText[] encodes 834 bytes of keywords in 554 bytes */ - /* REINDEXEDESCAPEACHECKEYBEFOREIGNOREGEXPLAINSTEADDATABASELECT */ - /* ABLEFTHENDEFERRABLELSEXCEPTRANSACTIONATURALTERAISEXCLUSIVE */ - /* XISTSAVEPOINTERSECTRIGGEREFERENCESCONSTRAINTOFFSETEMPORARY */ - /* UNIQUERYWITHOUTERELEASEATTACHAVINGROUPDATEBEGINNERECURSIVE */ - /* BETWEENOTNULLIKECASCADELETECASECOLLATECREATECURRENT_DATEDETACH */ - /* IMMEDIATEJOINSERTMATCHPLANALYZEPRAGMABORTVALUESVIRTUALIMITWHEN */ - /* WHERENAMEAFTEREPLACEANDEFAULTAUTOINCREMENTCASTCOLUMNCOMMIT */ - /* CONFLICTCROSSCURRENT_TIMESTAMPRIMARYDEFERREDISTINCTDROPFAIL */ - /* FROMFULLGLOBYIFISNULLORDERESTRICTRIGHTROLLBACKROWUNIONUSING */ - /* VACUUMVIEWINITIALLY */ - static const char zText[553] = { - 'R','E','I','N','D','E','X','E','D','E','S','C','A','P','E','A','C','H', - 'E','C','K','E','Y','B','E','F','O','R','E','I','G','N','O','R','E','G', - 'E','X','P','L','A','I','N','S','T','E','A','D','D','A','T','A','B','A', - 'S','E','L','E','C','T','A','B','L','E','F','T','H','E','N','D','E','F', - 'E','R','R','A','B','L','E','L','S','E','X','C','E','P','T','R','A','N', - 'S','A','C','T','I','O','N','A','T','U','R','A','L','T','E','R','A','I', - 'S','E','X','C','L','U','S','I','V','E','X','I','S','T','S','A','V','E', - 'P','O','I','N','T','E','R','S','E','C','T','R','I','G','G','E','R','E', - 'F','E','R','E','N','C','E','S','C','O','N','S','T','R','A','I','N','T', - 'O','F','F','S','E','T','E','M','P','O','R','A','R','Y','U','N','I','Q', - 'U','E','R','Y','W','I','T','H','O','U','T','E','R','E','L','E','A','S', - 'E','A','T','T','A','C','H','A','V','I','N','G','R','O','U','P','D','A', - 'T','E','B','E','G','I','N','N','E','R','E','C','U','R','S','I','V','E', - 'B','E','T','W','E','E','N','O','T','N','U','L','L','I','K','E','C','A', - 'S','C','A','D','E','L','E','T','E','C','A','S','E','C','O','L','L','A', - 'T','E','C','R','E','A','T','E','C','U','R','R','E','N','T','_','D','A', - 'T','E','D','E','T','A','C','H','I','M','M','E','D','I','A','T','E','J', - 'O','I','N','S','E','R','T','M','A','T','C','H','P','L','A','N','A','L', - 'Y','Z','E','P','R','A','G','M','A','B','O','R','T','V','A','L','U','E', - 'S','V','I','R','T','U','A','L','I','M','I','T','W','H','E','N','W','H', - 'E','R','E','N','A','M','E','A','F','T','E','R','E','P','L','A','C','E', - 'A','N','D','E','F','A','U','L','T','A','U','T','O','I','N','C','R','E', - 'M','E','N','T','C','A','S','T','C','O','L','U','M','N','C','O','M','M', - 'I','T','C','O','N','F','L','I','C','T','C','R','O','S','S','C','U','R', - 'R','E','N','T','_','T','I','M','E','S','T','A','M','P','R','I','M','A', - 'R','Y','D','E','F','E','R','R','E','D','I','S','T','I','N','C','T','D', - 'R','O','P','F','A','I','L','F','R','O','M','F','U','L','L','G','L','O', - 'B','Y','I','F','I','S','N','U','L','L','O','R','D','E','R','E','S','T', - 'R','I','C','T','R','I','G','H','T','R','O','L','L','B','A','C','K','R', - 'O','W','U','N','I','O','N','U','S','I','N','G','V','A','C','U','U','M', - 'V','I','E','W','I','N','I','T','I','A','L','L','Y', - }; - static const unsigned char aHash[127] = { - 76, 105, 117, 74, 0, 45, 0, 0, 82, 0, 77, 0, 0, - 42, 12, 78, 15, 0, 116, 85, 54, 112, 0, 19, 0, 0, - 121, 0, 119, 115, 0, 22, 93, 0, 9, 0, 0, 70, 71, - 0, 69, 6, 0, 48, 90, 102, 0, 118, 101, 0, 0, 44, - 0, 103, 24, 0, 17, 0, 122, 53, 23, 0, 5, 110, 25, - 96, 0, 0, 124, 106, 60, 123, 57, 28, 55, 0, 91, 0, - 100, 26, 0, 99, 0, 0, 0, 95, 92, 97, 88, 109, 14, - 39, 108, 0, 81, 0, 18, 89, 111, 32, 0, 120, 80, 113, - 62, 46, 84, 0, 0, 94, 40, 59, 114, 0, 36, 0, 0, - 29, 0, 86, 63, 64, 0, 20, 61, 0, 56, - }; - static const unsigned char aNext[124] = { - 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 2, 0, 0, 0, 0, 0, 0, 13, 0, 0, 0, 0, - 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 33, 0, 21, 0, 0, 0, 0, 0, 50, - 0, 43, 3, 47, 0, 0, 0, 0, 30, 0, 58, 0, 38, - 0, 0, 0, 1, 66, 0, 0, 67, 0, 41, 0, 0, 0, - 0, 0, 0, 49, 65, 0, 0, 0, 0, 31, 52, 16, 34, - 10, 0, 0, 0, 0, 0, 0, 0, 11, 72, 79, 0, 8, - 0, 104, 98, 0, 107, 0, 87, 0, 75, 51, 0, 27, 37, - 73, 83, 0, 35, 68, 0, 0, - }; - static const unsigned char aLen[124] = { - 7, 7, 5, 4, 6, 4, 5, 3, 6, 7, 3, 6, 6, - 7, 7, 3, 8, 2, 6, 5, 4, 4, 3, 10, 4, 6, - 11, 6, 2, 7, 5, 5, 9, 6, 9, 9, 7, 10, 10, - 4, 6, 2, 3, 9, 4, 2, 6, 5, 7, 4, 5, 7, - 6, 6, 5, 6, 5, 5, 9, 7, 7, 3, 2, 4, 4, - 7, 3, 6, 4, 7, 6, 12, 6, 9, 4, 6, 5, 4, - 7, 6, 5, 6, 7, 5, 4, 5, 6, 5, 7, 3, 7, - 13, 2, 2, 4, 6, 6, 8, 5, 17, 12, 7, 8, 8, - 2, 4, 4, 4, 4, 4, 2, 2, 6, 5, 8, 5, 8, - 3, 5, 5, 6, 4, 9, 3, - }; - static const unsigned short int aOffset[124] = { - 0, 2, 2, 8, 9, 14, 16, 20, 23, 25, 25, 29, 33, - 36, 41, 46, 48, 53, 54, 59, 62, 65, 67, 69, 78, 81, - 86, 91, 95, 96, 101, 105, 109, 117, 122, 128, 136, 142, 152, - 159, 162, 162, 165, 167, 167, 171, 176, 179, 184, 184, 188, 192, - 199, 204, 209, 212, 218, 221, 225, 234, 240, 240, 240, 243, 246, - 250, 251, 255, 261, 265, 272, 278, 290, 296, 305, 307, 313, 318, - 320, 327, 332, 337, 343, 349, 354, 358, 361, 367, 371, 378, 380, - 387, 389, 391, 400, 404, 410, 416, 424, 429, 429, 445, 452, 459, - 460, 467, 471, 475, 479, 483, 486, 488, 490, 496, 500, 508, 513, - 521, 524, 529, 534, 540, 544, 549, - }; - static const unsigned char aCode[124] = { - TK_REINDEX, TK_INDEXED, TK_INDEX, TK_DESC, TK_ESCAPE, - TK_EACH, TK_CHECK, TK_KEY, TK_BEFORE, TK_FOREIGN, - TK_FOR, TK_IGNORE, TK_LIKE_KW, TK_EXPLAIN, TK_INSTEAD, - TK_ADD, TK_DATABASE, TK_AS, TK_SELECT, TK_TABLE, - TK_JOIN_KW, TK_THEN, TK_END, TK_DEFERRABLE, TK_ELSE, - TK_EXCEPT, TK_TRANSACTION,TK_ACTION, TK_ON, TK_JOIN_KW, - TK_ALTER, TK_RAISE, TK_EXCLUSIVE, TK_EXISTS, TK_SAVEPOINT, - TK_INTERSECT, TK_TRIGGER, TK_REFERENCES, TK_CONSTRAINT, TK_INTO, - TK_OFFSET, TK_OF, TK_SET, TK_TEMP, TK_TEMP, - TK_OR, TK_UNIQUE, TK_QUERY, TK_WITHOUT, TK_WITH, - TK_JOIN_KW, TK_RELEASE, TK_ATTACH, TK_HAVING, TK_GROUP, - TK_UPDATE, TK_BEGIN, TK_JOIN_KW, TK_RECURSIVE, TK_BETWEEN, - TK_NOTNULL, TK_NOT, TK_NO, TK_NULL, TK_LIKE_KW, - TK_CASCADE, TK_ASC, TK_DELETE, TK_CASE, TK_COLLATE, - TK_CREATE, TK_CTIME_KW, TK_DETACH, TK_IMMEDIATE, TK_JOIN, - TK_INSERT, TK_MATCH, TK_PLAN, TK_ANALYZE, TK_PRAGMA, - TK_ABORT, TK_VALUES, TK_VIRTUAL, TK_LIMIT, TK_WHEN, - TK_WHERE, TK_RENAME, TK_AFTER, TK_REPLACE, TK_AND, - TK_DEFAULT, TK_AUTOINCR, TK_TO, TK_IN, TK_CAST, - TK_COLUMNKW, TK_COMMIT, TK_CONFLICT, TK_JOIN_KW, TK_CTIME_KW, - TK_CTIME_KW, TK_PRIMARY, TK_DEFERRED, TK_DISTINCT, TK_IS, - TK_DROP, TK_FAIL, TK_FROM, TK_JOIN_KW, TK_LIKE_KW, - TK_BY, TK_IF, TK_ISNULL, TK_ORDER, TK_RESTRICT, - TK_JOIN_KW, TK_ROLLBACK, TK_ROW, TK_UNION, TK_USING, - TK_VACUUM, TK_VIEW, TK_INITIALLY, TK_ALL, - }; - int h, i; - if( n<2 ) return TK_ID; - h = ((charMap(z[0])*4) ^ - (charMap(z[n-1])*3) ^ - n) % 127; - for(i=((int)aHash[h])-1; i>=0; i=((int)aNext[i])-1){ - if( aLen[i]==n && sqlite3StrNICmp(&zText[aOffset[i]],z,n)==0 ){ - testcase( i==0 ); /* REINDEX */ - testcase( i==1 ); /* INDEXED */ - testcase( i==2 ); /* INDEX */ - testcase( i==3 ); /* DESC */ - testcase( i==4 ); /* ESCAPE */ - testcase( i==5 ); /* EACH */ - testcase( i==6 ); /* CHECK */ - testcase( i==7 ); /* KEY */ - testcase( i==8 ); /* BEFORE */ - testcase( i==9 ); /* FOREIGN */ - testcase( i==10 ); /* FOR */ - testcase( i==11 ); /* IGNORE */ - testcase( i==12 ); /* REGEXP */ - testcase( i==13 ); /* EXPLAIN */ - testcase( i==14 ); /* INSTEAD */ - testcase( i==15 ); /* ADD */ - testcase( i==16 ); /* DATABASE */ - testcase( i==17 ); /* AS */ - testcase( i==18 ); /* SELECT */ - testcase( i==19 ); /* TABLE */ - testcase( i==20 ); /* LEFT */ - testcase( i==21 ); /* THEN */ - testcase( i==22 ); /* END */ - testcase( i==23 ); /* DEFERRABLE */ - testcase( i==24 ); /* ELSE */ - testcase( i==25 ); /* EXCEPT */ - testcase( i==26 ); /* TRANSACTION */ - testcase( i==27 ); /* ACTION */ - testcase( i==28 ); /* ON */ - testcase( i==29 ); /* NATURAL */ - testcase( i==30 ); /* ALTER */ - testcase( i==31 ); /* RAISE */ - testcase( i==32 ); /* EXCLUSIVE */ - testcase( i==33 ); /* EXISTS */ - testcase( i==34 ); /* SAVEPOINT */ - testcase( i==35 ); /* INTERSECT */ - testcase( i==36 ); /* TRIGGER */ - testcase( i==37 ); /* REFERENCES */ - testcase( i==38 ); /* CONSTRAINT */ - testcase( i==39 ); /* INTO */ - testcase( i==40 ); /* OFFSET */ - testcase( i==41 ); /* OF */ - testcase( i==42 ); /* SET */ - testcase( i==43 ); /* TEMPORARY */ - testcase( i==44 ); /* TEMP */ - testcase( i==45 ); /* OR */ - testcase( i==46 ); /* UNIQUE */ - testcase( i==47 ); /* QUERY */ - testcase( i==48 ); /* WITHOUT */ - testcase( i==49 ); /* WITH */ - testcase( i==50 ); /* OUTER */ - testcase( i==51 ); /* RELEASE */ - testcase( i==52 ); /* ATTACH */ - testcase( i==53 ); /* HAVING */ - testcase( i==54 ); /* GROUP */ - testcase( i==55 ); /* UPDATE */ - testcase( i==56 ); /* BEGIN */ - testcase( i==57 ); /* INNER */ - testcase( i==58 ); /* RECURSIVE */ - testcase( i==59 ); /* BETWEEN */ - testcase( i==60 ); /* NOTNULL */ - testcase( i==61 ); /* NOT */ - testcase( i==62 ); /* NO */ - testcase( i==63 ); /* NULL */ - testcase( i==64 ); /* LIKE */ - testcase( i==65 ); /* CASCADE */ - testcase( i==66 ); /* ASC */ - testcase( i==67 ); /* DELETE */ - testcase( i==68 ); /* CASE */ - testcase( i==69 ); /* COLLATE */ - testcase( i==70 ); /* CREATE */ - testcase( i==71 ); /* CURRENT_DATE */ - testcase( i==72 ); /* DETACH */ - testcase( i==73 ); /* IMMEDIATE */ - testcase( i==74 ); /* JOIN */ - testcase( i==75 ); /* INSERT */ - testcase( i==76 ); /* MATCH */ - testcase( i==77 ); /* PLAN */ - testcase( i==78 ); /* ANALYZE */ - testcase( i==79 ); /* PRAGMA */ - testcase( i==80 ); /* ABORT */ - testcase( i==81 ); /* VALUES */ - testcase( i==82 ); /* VIRTUAL */ - testcase( i==83 ); /* LIMIT */ - testcase( i==84 ); /* WHEN */ - testcase( i==85 ); /* WHERE */ - testcase( i==86 ); /* RENAME */ - testcase( i==87 ); /* AFTER */ - testcase( i==88 ); /* REPLACE */ - testcase( i==89 ); /* AND */ - testcase( i==90 ); /* DEFAULT */ - testcase( i==91 ); /* AUTOINCREMENT */ - testcase( i==92 ); /* TO */ - testcase( i==93 ); /* IN */ - testcase( i==94 ); /* CAST */ - testcase( i==95 ); /* COLUMN */ - testcase( i==96 ); /* COMMIT */ - testcase( i==97 ); /* CONFLICT */ - testcase( i==98 ); /* CROSS */ - testcase( i==99 ); /* CURRENT_TIMESTAMP */ - testcase( i==100 ); /* CURRENT_TIME */ - testcase( i==101 ); /* PRIMARY */ - testcase( i==102 ); /* DEFERRED */ - testcase( i==103 ); /* DISTINCT */ - testcase( i==104 ); /* IS */ - testcase( i==105 ); /* DROP */ - testcase( i==106 ); /* FAIL */ - testcase( i==107 ); /* FROM */ - testcase( i==108 ); /* FULL */ - testcase( i==109 ); /* GLOB */ - testcase( i==110 ); /* BY */ - testcase( i==111 ); /* IF */ - testcase( i==112 ); /* ISNULL */ - testcase( i==113 ); /* ORDER */ - testcase( i==114 ); /* RESTRICT */ - testcase( i==115 ); /* RIGHT */ - testcase( i==116 ); /* ROLLBACK */ - testcase( i==117 ); /* ROW */ - testcase( i==118 ); /* UNION */ - testcase( i==119 ); /* USING */ - testcase( i==120 ); /* VACUUM */ - testcase( i==121 ); /* VIEW */ - testcase( i==122 ); /* INITIALLY */ - testcase( i==123 ); /* ALL */ - return aCode[i]; - } - } - return TK_ID; -} -SQLITE_PRIVATE int sqlite3KeywordCode(const unsigned char *z, int n){ - return keywordCode((char*)z, n); -} -#define SQLITE_N_KEYWORD 124 - -/************** End of keywordhash.h *****************************************/ -/************** Continuing where we left off in tokenize.c *******************/ - - -/* -** If X is a character that can be used in an identifier then -** IdChar(X) will be true. Otherwise it is false. -** -** For ASCII, any character with the high-order bit set is -** allowed in an identifier. For 7-bit characters, -** sqlite3IsIdChar[X] must be 1. -** -** For EBCDIC, the rules are more complex but have the same -** end result. -** -** Ticket #1066. the SQL standard does not allow '$' in the -** middle of identfiers. But many SQL implementations do. -** SQLite will allow '$' in identifiers for compatibility. -** But the feature is undocumented. -*/ -#ifdef SQLITE_ASCII -#define IdChar(C) ((sqlite3CtypeMap[(unsigned char)C]&0x46)!=0) -#endif -#ifdef SQLITE_EBCDIC -SQLITE_PRIVATE const char sqlite3IsEbcdicIdChar[] = { -/* x0 x1 x2 x3 x4 x5 x6 x7 x8 x9 xA xB xC xD xE xF */ - 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, /* 4x */ - 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, /* 5x */ - 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, /* 6x */ - 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, /* 7x */ - 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, /* 8x */ - 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, /* 9x */ - 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, /* Ax */ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* Bx */ - 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, /* Cx */ - 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, /* Dx */ - 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, /* Ex */ - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, /* Fx */ -}; -#define IdChar(C) (((c=C)>=0x42 && sqlite3IsEbcdicIdChar[c-0x40])) -#endif - - -/* -** Return the length of the token that begins at z[0]. -** Store the token type in *tokenType before returning. -*/ -SQLITE_PRIVATE int sqlite3GetToken(const unsigned char *z, int *tokenType){ - int i, c; - switch( *z ){ - case ' ': case '\t': case '\n': case '\f': case '\r': { - testcase( z[0]==' ' ); - testcase( z[0]=='\t' ); - testcase( z[0]=='\n' ); - testcase( z[0]=='\f' ); - testcase( z[0]=='\r' ); - for(i=1; sqlite3Isspace(z[i]); i++){} - *tokenType = TK_SPACE; - return i; - } - case '-': { - if( z[1]=='-' ){ - for(i=2; (c=z[i])!=0 && c!='\n'; i++){} - *tokenType = TK_SPACE; /* IMP: R-22934-25134 */ - return i; - } - *tokenType = TK_MINUS; - return 1; - } - case '(': { - *tokenType = TK_LP; - return 1; - } - case ')': { - *tokenType = TK_RP; - return 1; - } - case ';': { - *tokenType = TK_SEMI; - return 1; - } - case '+': { - *tokenType = TK_PLUS; - return 1; - } - case '*': { - *tokenType = TK_STAR; - return 1; - } - case '/': { - if( z[1]!='*' || z[2]==0 ){ - *tokenType = TK_SLASH; - return 1; - } - for(i=3, c=z[2]; (c!='*' || z[i]!='/') && (c=z[i])!=0; i++){} - if( c ) i++; - *tokenType = TK_SPACE; /* IMP: R-22934-25134 */ - return i; - } - case '%': { - *tokenType = TK_REM; - return 1; - } - case '=': { - *tokenType = TK_EQ; - return 1 + (z[1]=='='); - } - case '<': { - if( (c=z[1])=='=' ){ - *tokenType = TK_LE; - return 2; - }else if( c=='>' ){ - *tokenType = TK_NE; - return 2; - }else if( c=='<' ){ - *tokenType = TK_LSHIFT; - return 2; - }else{ - *tokenType = TK_LT; - return 1; - } - } - case '>': { - if( (c=z[1])=='=' ){ - *tokenType = TK_GE; - return 2; - }else if( c=='>' ){ - *tokenType = TK_RSHIFT; - return 2; - }else{ - *tokenType = TK_GT; - return 1; - } - } - case '!': { - if( z[1]!='=' ){ - *tokenType = TK_ILLEGAL; - return 2; - }else{ - *tokenType = TK_NE; - return 2; - } - } - case '|': { - if( z[1]!='|' ){ - *tokenType = TK_BITOR; - return 1; - }else{ - *tokenType = TK_CONCAT; - return 2; - } - } - case ',': { - *tokenType = TK_COMMA; - return 1; - } - case '&': { - *tokenType = TK_BITAND; - return 1; - } - case '~': { - *tokenType = TK_BITNOT; - return 1; - } - case '`': - case '\'': - case '"': { - int delim = z[0]; - testcase( delim=='`' ); - testcase( delim=='\'' ); - testcase( delim=='"' ); - for(i=1; (c=z[i])!=0; i++){ - if( c==delim ){ - if( z[i+1]==delim ){ - i++; - }else{ - break; - } - } - } - if( c=='\'' ){ - *tokenType = TK_STRING; - return i+1; - }else if( c!=0 ){ - *tokenType = TK_ID; - return i+1; - }else{ - *tokenType = TK_ILLEGAL; - return i; - } - } - case '.': { -#ifndef SQLITE_OMIT_FLOATING_POINT - if( !sqlite3Isdigit(z[1]) ) -#endif - { - *tokenType = TK_DOT; - return 1; - } - /* If the next character is a digit, this is a floating point - ** number that begins with ".". Fall thru into the next case */ - } - case '0': case '1': case '2': case '3': case '4': - case '5': case '6': case '7': case '8': case '9': { - testcase( z[0]=='0' ); testcase( z[0]=='1' ); testcase( z[0]=='2' ); - testcase( z[0]=='3' ); testcase( z[0]=='4' ); testcase( z[0]=='5' ); - testcase( z[0]=='6' ); testcase( z[0]=='7' ); testcase( z[0]=='8' ); - testcase( z[0]=='9' ); - *tokenType = TK_INTEGER; - for(i=0; sqlite3Isdigit(z[i]); i++){} -#ifndef SQLITE_OMIT_FLOATING_POINT - if( z[i]=='.' ){ - i++; - while( sqlite3Isdigit(z[i]) ){ i++; } - *tokenType = TK_FLOAT; - } - if( (z[i]=='e' || z[i]=='E') && - ( sqlite3Isdigit(z[i+1]) - || ((z[i+1]=='+' || z[i+1]=='-') && sqlite3Isdigit(z[i+2])) - ) - ){ - i += 2; - while( sqlite3Isdigit(z[i]) ){ i++; } - *tokenType = TK_FLOAT; - } -#endif - while( IdChar(z[i]) ){ - *tokenType = TK_ILLEGAL; - i++; - } - return i; - } - case '[': { - for(i=1, c=z[0]; c!=']' && (c=z[i])!=0; i++){} - *tokenType = c==']' ? TK_ID : TK_ILLEGAL; - return i; - } - case '?': { - *tokenType = TK_VARIABLE; - for(i=1; sqlite3Isdigit(z[i]); i++){} - return i; - } -#ifndef SQLITE_OMIT_TCL_VARIABLE - case '$': -#endif - case '@': /* For compatibility with MS SQL Server */ - case '#': - case ':': { - int n = 0; - testcase( z[0]=='$' ); testcase( z[0]=='@' ); - testcase( z[0]==':' ); testcase( z[0]=='#' ); - *tokenType = TK_VARIABLE; - for(i=1; (c=z[i])!=0; i++){ - if( IdChar(c) ){ - n++; -#ifndef SQLITE_OMIT_TCL_VARIABLE - }else if( c=='(' && n>0 ){ - do{ - i++; - }while( (c=z[i])!=0 && !sqlite3Isspace(c) && c!=')' ); - if( c==')' ){ - i++; - }else{ - *tokenType = TK_ILLEGAL; - } - break; - }else if( c==':' && z[i+1]==':' ){ - i++; -#endif - }else{ - break; - } - } - if( n==0 ) *tokenType = TK_ILLEGAL; - return i; - } -#ifndef SQLITE_OMIT_BLOB_LITERAL - case 'x': case 'X': { - testcase( z[0]=='x' ); testcase( z[0]=='X' ); - if( z[1]=='\'' ){ - *tokenType = TK_BLOB; - for(i=2; sqlite3Isxdigit(z[i]); i++){} - if( z[i]!='\'' || i%2 ){ - *tokenType = TK_ILLEGAL; - while( z[i] && z[i]!='\'' ){ i++; } - } - if( z[i] ) i++; - return i; - } - /* Otherwise fall through to the next case */ - } -#endif - default: { - if( !IdChar(*z) ){ - break; - } - for(i=1; IdChar(z[i]); i++){} - *tokenType = keywordCode((char*)z, i); - return i; - } - } - *tokenType = TK_ILLEGAL; - return 1; -} - -/* -** Run the parser on the given SQL string. The parser structure is -** passed in. An SQLITE_ status code is returned. If an error occurs -** then an and attempt is made to write an error message into -** memory obtained from sqlite3_malloc() and to make *pzErrMsg point to that -** error message. -*/ -SQLITE_PRIVATE int sqlite3RunParser(Parse *pParse, const char *zSql, char **pzErrMsg){ - int nErr = 0; /* Number of errors encountered */ - int i; /* Loop counter */ - void *pEngine; /* The LEMON-generated LALR(1) parser */ - int tokenType; /* type of the next token */ - int lastTokenParsed = -1; /* type of the previous token */ - u8 enableLookaside; /* Saved value of db->lookaside.bEnabled */ - sqlite3 *db = pParse->db; /* The database connection */ - int mxSqlLen; /* Max length of an SQL string */ - - - mxSqlLen = db->aLimit[SQLITE_LIMIT_SQL_LENGTH]; - if( db->nVdbeActive==0 ){ - db->u1.isInterrupted = 0; - } - pParse->rc = SQLITE_OK; - pParse->zTail = zSql; - i = 0; - assert( pzErrMsg!=0 ); - pEngine = sqlite3ParserAlloc((void*(*)(size_t))sqlite3Malloc); - if( pEngine==0 ){ - db->mallocFailed = 1; - return SQLITE_NOMEM; - } - assert( pParse->pNewTable==0 ); - assert( pParse->pNewTrigger==0 ); - assert( pParse->nVar==0 ); - assert( pParse->nzVar==0 ); - assert( pParse->azVar==0 ); - enableLookaside = db->lookaside.bEnabled; - if( db->lookaside.pStart ) db->lookaside.bEnabled = 1; - while( !db->mallocFailed && zSql[i]!=0 ){ - assert( i>=0 ); - pParse->sLastToken.z = &zSql[i]; - pParse->sLastToken.n = sqlite3GetToken((unsigned char*)&zSql[i],&tokenType); - i += pParse->sLastToken.n; - if( i>mxSqlLen ){ - pParse->rc = SQLITE_TOOBIG; - break; - } - switch( tokenType ){ - case TK_SPACE: { - if( db->u1.isInterrupted ){ - sqlite3ErrorMsg(pParse, "interrupt"); - pParse->rc = SQLITE_INTERRUPT; - goto abort_parse; - } - break; - } - case TK_ILLEGAL: { - sqlite3DbFree(db, *pzErrMsg); - *pzErrMsg = sqlite3MPrintf(db, "unrecognized token: \"%T\"", - &pParse->sLastToken); - nErr++; - goto abort_parse; - } - case TK_SEMI: { - pParse->zTail = &zSql[i]; - /* Fall thru into the default case */ - } - default: { - sqlite3Parser(pEngine, tokenType, pParse->sLastToken, pParse); - lastTokenParsed = tokenType; - if( pParse->rc!=SQLITE_OK ){ - goto abort_parse; - } - break; - } - } - } -abort_parse: - if( zSql[i]==0 && nErr==0 && pParse->rc==SQLITE_OK ){ - if( lastTokenParsed!=TK_SEMI ){ - sqlite3Parser(pEngine, TK_SEMI, pParse->sLastToken, pParse); - pParse->zTail = &zSql[i]; - } - sqlite3Parser(pEngine, 0, pParse->sLastToken, pParse); - } -#ifdef YYTRACKMAXSTACKDEPTH - sqlite3StatusSet(SQLITE_STATUS_PARSER_STACK, - sqlite3ParserStackPeak(pEngine) - ); -#endif /* YYDEBUG */ - sqlite3ParserFree(pEngine, sqlite3_free); - db->lookaside.bEnabled = enableLookaside; - if( db->mallocFailed ){ - pParse->rc = SQLITE_NOMEM; - } - if( pParse->rc!=SQLITE_OK && pParse->rc!=SQLITE_DONE && pParse->zErrMsg==0 ){ - sqlite3SetString(&pParse->zErrMsg, db, "%s", sqlite3ErrStr(pParse->rc)); - } - assert( pzErrMsg!=0 ); - if( pParse->zErrMsg ){ - *pzErrMsg = pParse->zErrMsg; - sqlite3_log(pParse->rc, "%s", *pzErrMsg); - pParse->zErrMsg = 0; - nErr++; - } - if( pParse->pVdbe && pParse->nErr>0 && pParse->nested==0 ){ - sqlite3VdbeDelete(pParse->pVdbe); - pParse->pVdbe = 0; - } -#ifndef SQLITE_OMIT_SHARED_CACHE - if( pParse->nested==0 ){ - sqlite3DbFree(db, pParse->aTableLock); - pParse->aTableLock = 0; - pParse->nTableLock = 0; - } -#endif -#ifndef SQLITE_OMIT_VIRTUALTABLE - sqlite3_free(pParse->apVtabLock); -#endif - - if( !IN_DECLARE_VTAB ){ - /* If the pParse->declareVtab flag is set, do not delete any table - ** structure built up in pParse->pNewTable. The calling code (see vtab.c) - ** will take responsibility for freeing the Table structure. - */ - sqlite3DeleteTable(db, pParse->pNewTable); - } - - if( pParse->bFreeWith ) sqlite3WithDelete(db, pParse->pWith); - sqlite3DeleteTrigger(db, pParse->pNewTrigger); - for(i=pParse->nzVar-1; i>=0; i--) sqlite3DbFree(db, pParse->azVar[i]); - sqlite3DbFree(db, pParse->azVar); - while( pParse->pAinc ){ - AutoincInfo *p = pParse->pAinc; - pParse->pAinc = p->pNext; - sqlite3DbFree(db, p); - } - while( pParse->pZombieTab ){ - Table *p = pParse->pZombieTab; - pParse->pZombieTab = p->pNextZombie; - sqlite3DeleteTable(db, p); - } - if( nErr>0 && pParse->rc==SQLITE_OK ){ - pParse->rc = SQLITE_ERROR; - } - return nErr; -} - -/************** End of tokenize.c ********************************************/ -/************** Begin file complete.c ****************************************/ -/* -** 2001 September 15 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** An tokenizer for SQL -** -** This file contains C code that implements the sqlite3_complete() API. -** This code used to be part of the tokenizer.c source file. But by -** separating it out, the code will be automatically omitted from -** static links that do not use it. -*/ -#ifndef SQLITE_OMIT_COMPLETE - -/* -** This is defined in tokenize.c. We just have to import the definition. -*/ -#ifndef SQLITE_AMALGAMATION -#ifdef SQLITE_ASCII -#define IdChar(C) ((sqlite3CtypeMap[(unsigned char)C]&0x46)!=0) -#endif -#ifdef SQLITE_EBCDIC -SQLITE_PRIVATE const char sqlite3IsEbcdicIdChar[]; -#define IdChar(C) (((c=C)>=0x42 && sqlite3IsEbcdicIdChar[c-0x40])) -#endif -#endif /* SQLITE_AMALGAMATION */ - - -/* -** Token types used by the sqlite3_complete() routine. See the header -** comments on that procedure for additional information. -*/ -#define tkSEMI 0 -#define tkWS 1 -#define tkOTHER 2 -#ifndef SQLITE_OMIT_TRIGGER -#define tkEXPLAIN 3 -#define tkCREATE 4 -#define tkTEMP 5 -#define tkTRIGGER 6 -#define tkEND 7 -#endif - -/* -** Return TRUE if the given SQL string ends in a semicolon. -** -** Special handling is require for CREATE TRIGGER statements. -** Whenever the CREATE TRIGGER keywords are seen, the statement -** must end with ";END;". -** -** This implementation uses a state machine with 8 states: -** -** (0) INVALID We have not yet seen a non-whitespace character. -** -** (1) START At the beginning or end of an SQL statement. This routine -** returns 1 if it ends in the START state and 0 if it ends -** in any other state. -** -** (2) NORMAL We are in the middle of statement which ends with a single -** semicolon. -** -** (3) EXPLAIN The keyword EXPLAIN has been seen at the beginning of -** a statement. -** -** (4) CREATE The keyword CREATE has been seen at the beginning of a -** statement, possibly preceeded by EXPLAIN and/or followed by -** TEMP or TEMPORARY -** -** (5) TRIGGER We are in the middle of a trigger definition that must be -** ended by a semicolon, the keyword END, and another semicolon. -** -** (6) SEMI We've seen the first semicolon in the ";END;" that occurs at -** the end of a trigger definition. -** -** (7) END We've seen the ";END" of the ";END;" that occurs at the end -** of a trigger difinition. -** -** Transitions between states above are determined by tokens extracted -** from the input. The following tokens are significant: -** -** (0) tkSEMI A semicolon. -** (1) tkWS Whitespace. -** (2) tkOTHER Any other SQL token. -** (3) tkEXPLAIN The "explain" keyword. -** (4) tkCREATE The "create" keyword. -** (5) tkTEMP The "temp" or "temporary" keyword. -** (6) tkTRIGGER The "trigger" keyword. -** (7) tkEND The "end" keyword. -** -** Whitespace never causes a state transition and is always ignored. -** This means that a SQL string of all whitespace is invalid. -** -** If we compile with SQLITE_OMIT_TRIGGER, all of the computation needed -** to recognize the end of a trigger can be omitted. All we have to do -** is look for a semicolon that is not part of an string or comment. -*/ -SQLITE_API int sqlite3_complete(const char *zSql){ - u8 state = 0; /* Current state, using numbers defined in header comment */ - u8 token; /* Value of the next token */ - -#ifndef SQLITE_OMIT_TRIGGER - /* A complex statement machine used to detect the end of a CREATE TRIGGER - ** statement. This is the normal case. - */ - static const u8 trans[8][8] = { - /* Token: */ - /* State: ** SEMI WS OTHER EXPLAIN CREATE TEMP TRIGGER END */ - /* 0 INVALID: */ { 1, 0, 2, 3, 4, 2, 2, 2, }, - /* 1 START: */ { 1, 1, 2, 3, 4, 2, 2, 2, }, - /* 2 NORMAL: */ { 1, 2, 2, 2, 2, 2, 2, 2, }, - /* 3 EXPLAIN: */ { 1, 3, 3, 2, 4, 2, 2, 2, }, - /* 4 CREATE: */ { 1, 4, 2, 2, 2, 4, 5, 2, }, - /* 5 TRIGGER: */ { 6, 5, 5, 5, 5, 5, 5, 5, }, - /* 6 SEMI: */ { 6, 6, 5, 5, 5, 5, 5, 7, }, - /* 7 END: */ { 1, 7, 5, 5, 5, 5, 5, 5, }, - }; -#else - /* If triggers are not supported by this compile then the statement machine - ** used to detect the end of a statement is much simplier - */ - static const u8 trans[3][3] = { - /* Token: */ - /* State: ** SEMI WS OTHER */ - /* 0 INVALID: */ { 1, 0, 2, }, - /* 1 START: */ { 1, 1, 2, }, - /* 2 NORMAL: */ { 1, 2, 2, }, - }; -#endif /* SQLITE_OMIT_TRIGGER */ - - while( *zSql ){ - switch( *zSql ){ - case ';': { /* A semicolon */ - token = tkSEMI; - break; - } - case ' ': - case '\r': - case '\t': - case '\n': - case '\f': { /* White space is ignored */ - token = tkWS; - break; - } - case '/': { /* C-style comments */ - if( zSql[1]!='*' ){ - token = tkOTHER; - break; - } - zSql += 2; - while( zSql[0] && (zSql[0]!='*' || zSql[1]!='/') ){ zSql++; } - if( zSql[0]==0 ) return 0; - zSql++; - token = tkWS; - break; - } - case '-': { /* SQL-style comments from "--" to end of line */ - if( zSql[1]!='-' ){ - token = tkOTHER; - break; - } - while( *zSql && *zSql!='\n' ){ zSql++; } - if( *zSql==0 ) return state==1; - token = tkWS; - break; - } - case '[': { /* Microsoft-style identifiers in [...] */ - zSql++; - while( *zSql && *zSql!=']' ){ zSql++; } - if( *zSql==0 ) return 0; - token = tkOTHER; - break; - } - case '`': /* Grave-accent quoted symbols used by MySQL */ - case '"': /* single- and double-quoted strings */ - case '\'': { - int c = *zSql; - zSql++; - while( *zSql && *zSql!=c ){ zSql++; } - if( *zSql==0 ) return 0; - token = tkOTHER; - break; - } - default: { -#ifdef SQLITE_EBCDIC - unsigned char c; -#endif - if( IdChar((u8)*zSql) ){ - /* Keywords and unquoted identifiers */ - int nId; - for(nId=1; IdChar(zSql[nId]); nId++){} -#ifdef SQLITE_OMIT_TRIGGER - token = tkOTHER; -#else - switch( *zSql ){ - case 'c': case 'C': { - if( nId==6 && sqlite3StrNICmp(zSql, "create", 6)==0 ){ - token = tkCREATE; - }else{ - token = tkOTHER; - } - break; - } - case 't': case 'T': { - if( nId==7 && sqlite3StrNICmp(zSql, "trigger", 7)==0 ){ - token = tkTRIGGER; - }else if( nId==4 && sqlite3StrNICmp(zSql, "temp", 4)==0 ){ - token = tkTEMP; - }else if( nId==9 && sqlite3StrNICmp(zSql, "temporary", 9)==0 ){ - token = tkTEMP; - }else{ - token = tkOTHER; - } - break; - } - case 'e': case 'E': { - if( nId==3 && sqlite3StrNICmp(zSql, "end", 3)==0 ){ - token = tkEND; - }else -#ifndef SQLITE_OMIT_EXPLAIN - if( nId==7 && sqlite3StrNICmp(zSql, "explain", 7)==0 ){ - token = tkEXPLAIN; - }else -#endif - { - token = tkOTHER; - } - break; - } - default: { - token = tkOTHER; - break; - } - } -#endif /* SQLITE_OMIT_TRIGGER */ - zSql += nId-1; - }else{ - /* Operators and special symbols */ - token = tkOTHER; - } - break; - } - } - state = trans[state][token]; - zSql++; - } - return state==1; -} - -#ifndef SQLITE_OMIT_UTF16 -/* -** This routine is the same as the sqlite3_complete() routine described -** above, except that the parameter is required to be UTF-16 encoded, not -** UTF-8. -*/ -SQLITE_API int sqlite3_complete16(const void *zSql){ - sqlite3_value *pVal; - char const *zSql8; - int rc = SQLITE_NOMEM; - -#ifndef SQLITE_OMIT_AUTOINIT - rc = sqlite3_initialize(); - if( rc ) return rc; -#endif - pVal = sqlite3ValueNew(0); - sqlite3ValueSetStr(pVal, -1, zSql, SQLITE_UTF16NATIVE, SQLITE_STATIC); - zSql8 = sqlite3ValueText(pVal, SQLITE_UTF8); - if( zSql8 ){ - rc = sqlite3_complete(zSql8); - }else{ - rc = SQLITE_NOMEM; - } - sqlite3ValueFree(pVal); - return sqlite3ApiExit(0, rc); -} -#endif /* SQLITE_OMIT_UTF16 */ -#endif /* SQLITE_OMIT_COMPLETE */ - -/************** End of complete.c ********************************************/ -/************** Begin file main.c ********************************************/ -/* -** 2001 September 15 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** Main file for the SQLite library. The routines in this file -** implement the programmer interface to the library. Routines in -** other files are for internal use by SQLite and should not be -** accessed by users of the library. -*/ - -#ifdef SQLITE_ENABLE_FTS3 -/************** Include fts3.h in the middle of main.c ***********************/ -/************** Begin file fts3.h ********************************************/ -/* -** 2006 Oct 10 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -****************************************************************************** -** -** This header file is used by programs that want to link against the -** FTS3 library. All it does is declare the sqlite3Fts3Init() interface. -*/ - -#if 0 -extern "C" { -#endif /* __cplusplus */ - -SQLITE_PRIVATE int sqlite3Fts3Init(sqlite3 *db); - -#if 0 -} /* extern "C" */ -#endif /* __cplusplus */ - -/************** End of fts3.h ************************************************/ -/************** Continuing where we left off in main.c ***********************/ -#endif -#ifdef SQLITE_ENABLE_RTREE -/************** Include rtree.h in the middle of main.c **********************/ -/************** Begin file rtree.h *******************************************/ -/* -** 2008 May 26 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -****************************************************************************** -** -** This header file is used by programs that want to link against the -** RTREE library. All it does is declare the sqlite3RtreeInit() interface. -*/ - -#if 0 -extern "C" { -#endif /* __cplusplus */ - -SQLITE_PRIVATE int sqlite3RtreeInit(sqlite3 *db); - -#if 0 -} /* extern "C" */ -#endif /* __cplusplus */ - -/************** End of rtree.h ***********************************************/ -/************** Continuing where we left off in main.c ***********************/ -#endif -#ifdef SQLITE_ENABLE_ICU -/************** Include sqliteicu.h in the middle of main.c ******************/ -/************** Begin file sqliteicu.h ***************************************/ -/* -** 2008 May 26 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -****************************************************************************** -** -** This header file is used by programs that want to link against the -** ICU extension. All it does is declare the sqlite3IcuInit() interface. -*/ - -#if 0 -extern "C" { -#endif /* __cplusplus */ - -SQLITE_PRIVATE int sqlite3IcuInit(sqlite3 *db); - -#if 0 -} /* extern "C" */ -#endif /* __cplusplus */ - - -/************** End of sqliteicu.h *******************************************/ -/************** Continuing where we left off in main.c ***********************/ -#endif - -#ifndef SQLITE_AMALGAMATION -/* IMPLEMENTATION-OF: R-46656-45156 The sqlite3_version[] string constant -** contains the text of SQLITE_VERSION macro. -*/ -SQLITE_API const char sqlite3_version[] = SQLITE_VERSION; -#endif - -/* IMPLEMENTATION-OF: R-53536-42575 The sqlite3_libversion() function returns -** a pointer to the to the sqlite3_version[] string constant. -*/ -SQLITE_API const char *sqlite3_libversion(void){ return sqlite3_version; } - -/* IMPLEMENTATION-OF: R-63124-39300 The sqlite3_sourceid() function returns a -** pointer to a string constant whose value is the same as the -** SQLITE_SOURCE_ID C preprocessor macro. -*/ -SQLITE_API const char *sqlite3_sourceid(void){ return SQLITE_SOURCE_ID; } - -/* IMPLEMENTATION-OF: R-35210-63508 The sqlite3_libversion_number() function -** returns an integer equal to SQLITE_VERSION_NUMBER. -*/ -SQLITE_API int sqlite3_libversion_number(void){ return SQLITE_VERSION_NUMBER; } - -/* IMPLEMENTATION-OF: R-20790-14025 The sqlite3_threadsafe() function returns -** zero if and only if SQLite was compiled with mutexing code omitted due to -** the SQLITE_THREADSAFE compile-time option being set to 0. -*/ -SQLITE_API int sqlite3_threadsafe(void){ return SQLITE_THREADSAFE; } - -#if !defined(SQLITE_OMIT_TRACE) && defined(SQLITE_ENABLE_IOTRACE) -/* -** If the following function pointer is not NULL and if -** SQLITE_ENABLE_IOTRACE is enabled, then messages describing -** I/O active are written using this function. These messages -** are intended for debugging activity only. -*/ -SQLITE_PRIVATE void (*sqlite3IoTrace)(const char*, ...) = 0; -#endif - -/* -** If the following global variable points to a string which is the -** name of a directory, then that directory will be used to store -** temporary files. -** -** See also the "PRAGMA temp_store_directory" SQL command. -*/ -SQLITE_API char *sqlite3_temp_directory = 0; - -/* -** If the following global variable points to a string which is the -** name of a directory, then that directory will be used to store -** all database files specified with a relative pathname. -** -** See also the "PRAGMA data_store_directory" SQL command. -*/ -SQLITE_API char *sqlite3_data_directory = 0; - -/* -** Initialize SQLite. -** -** This routine must be called to initialize the memory allocation, -** VFS, and mutex subsystems prior to doing any serious work with -** SQLite. But as long as you do not compile with SQLITE_OMIT_AUTOINIT -** this routine will be called automatically by key routines such as -** sqlite3_open(). -** -** This routine is a no-op except on its very first call for the process, -** or for the first call after a call to sqlite3_shutdown. -** -** The first thread to call this routine runs the initialization to -** completion. If subsequent threads call this routine before the first -** thread has finished the initialization process, then the subsequent -** threads must block until the first thread finishes with the initialization. -** -** The first thread might call this routine recursively. Recursive -** calls to this routine should not block, of course. Otherwise the -** initialization process would never complete. -** -** Let X be the first thread to enter this routine. Let Y be some other -** thread. Then while the initial invocation of this routine by X is -** incomplete, it is required that: -** -** * Calls to this routine from Y must block until the outer-most -** call by X completes. -** -** * Recursive calls to this routine from thread X return immediately -** without blocking. -*/ -SQLITE_API int sqlite3_initialize(void){ - MUTEX_LOGIC( sqlite3_mutex *pMaster; ) /* The main static mutex */ - int rc; /* Result code */ -#ifdef SQLITE_EXTRA_INIT - int bRunExtraInit = 0; /* Extra initialization needed */ -#endif - -#ifdef SQLITE_OMIT_WSD - rc = sqlite3_wsd_init(4096, 24); - if( rc!=SQLITE_OK ){ - return rc; - } -#endif - - /* If SQLite is already completely initialized, then this call - ** to sqlite3_initialize() should be a no-op. But the initialization - ** must be complete. So isInit must not be set until the very end - ** of this routine. - */ - if( sqlite3GlobalConfig.isInit ) return SQLITE_OK; - - /* Make sure the mutex subsystem is initialized. If unable to - ** initialize the mutex subsystem, return early with the error. - ** If the system is so sick that we are unable to allocate a mutex, - ** there is not much SQLite is going to be able to do. - ** - ** The mutex subsystem must take care of serializing its own - ** initialization. - */ - rc = sqlite3MutexInit(); - if( rc ) return rc; - - /* Initialize the malloc() system and the recursive pInitMutex mutex. - ** This operation is protected by the STATIC_MASTER mutex. Note that - ** MutexAlloc() is called for a static mutex prior to initializing the - ** malloc subsystem - this implies that the allocation of a static - ** mutex must not require support from the malloc subsystem. - */ - MUTEX_LOGIC( pMaster = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER); ) - sqlite3_mutex_enter(pMaster); - sqlite3GlobalConfig.isMutexInit = 1; - if( !sqlite3GlobalConfig.isMallocInit ){ - rc = sqlite3MallocInit(); - } - if( rc==SQLITE_OK ){ - sqlite3GlobalConfig.isMallocInit = 1; - if( !sqlite3GlobalConfig.pInitMutex ){ - sqlite3GlobalConfig.pInitMutex = - sqlite3MutexAlloc(SQLITE_MUTEX_RECURSIVE); - if( sqlite3GlobalConfig.bCoreMutex && !sqlite3GlobalConfig.pInitMutex ){ - rc = SQLITE_NOMEM; - } - } - } - if( rc==SQLITE_OK ){ - sqlite3GlobalConfig.nRefInitMutex++; - } - sqlite3_mutex_leave(pMaster); - - /* If rc is not SQLITE_OK at this point, then either the malloc - ** subsystem could not be initialized or the system failed to allocate - ** the pInitMutex mutex. Return an error in either case. */ - if( rc!=SQLITE_OK ){ - return rc; - } - - /* Do the rest of the initialization under the recursive mutex so - ** that we will be able to handle recursive calls into - ** sqlite3_initialize(). The recursive calls normally come through - ** sqlite3_os_init() when it invokes sqlite3_vfs_register(), but other - ** recursive calls might also be possible. - ** - ** IMPLEMENTATION-OF: R-00140-37445 SQLite automatically serializes calls - ** to the xInit method, so the xInit method need not be threadsafe. - ** - ** The following mutex is what serializes access to the appdef pcache xInit - ** methods. The sqlite3_pcache_methods.xInit() all is embedded in the - ** call to sqlite3PcacheInitialize(). - */ - sqlite3_mutex_enter(sqlite3GlobalConfig.pInitMutex); - if( sqlite3GlobalConfig.isInit==0 && sqlite3GlobalConfig.inProgress==0 ){ - FuncDefHash *pHash = &GLOBAL(FuncDefHash, sqlite3GlobalFunctions); - sqlite3GlobalConfig.inProgress = 1; - memset(pHash, 0, sizeof(sqlite3GlobalFunctions)); - sqlite3RegisterGlobalFunctions(); - if( sqlite3GlobalConfig.isPCacheInit==0 ){ - rc = sqlite3PcacheInitialize(); - } - if( rc==SQLITE_OK ){ - sqlite3GlobalConfig.isPCacheInit = 1; - rc = sqlite3OsInit(); - } - if( rc==SQLITE_OK ){ - sqlite3PCacheBufferSetup( sqlite3GlobalConfig.pPage, - sqlite3GlobalConfig.szPage, sqlite3GlobalConfig.nPage); - sqlite3GlobalConfig.isInit = 1; -#ifdef SQLITE_EXTRA_INIT - bRunExtraInit = 1; -#endif - } - sqlite3GlobalConfig.inProgress = 0; - } - sqlite3_mutex_leave(sqlite3GlobalConfig.pInitMutex); - - /* Go back under the static mutex and clean up the recursive - ** mutex to prevent a resource leak. - */ - sqlite3_mutex_enter(pMaster); - sqlite3GlobalConfig.nRefInitMutex--; - if( sqlite3GlobalConfig.nRefInitMutex<=0 ){ - assert( sqlite3GlobalConfig.nRefInitMutex==0 ); - sqlite3_mutex_free(sqlite3GlobalConfig.pInitMutex); - sqlite3GlobalConfig.pInitMutex = 0; - } - sqlite3_mutex_leave(pMaster); - - /* The following is just a sanity check to make sure SQLite has - ** been compiled correctly. It is important to run this code, but - ** we don't want to run it too often and soak up CPU cycles for no - ** reason. So we run it once during initialization. - */ -#ifndef NDEBUG -#ifndef SQLITE_OMIT_FLOATING_POINT - /* This section of code's only "output" is via assert() statements. */ - if ( rc==SQLITE_OK ){ - u64 x = (((u64)1)<<63)-1; - double y; - assert(sizeof(x)==8); - assert(sizeof(x)==sizeof(y)); - memcpy(&y, &x, 8); - assert( sqlite3IsNaN(y) ); - } -#endif -#endif - - /* Do extra initialization steps requested by the SQLITE_EXTRA_INIT - ** compile-time option. - */ -#ifdef SQLITE_EXTRA_INIT - if( bRunExtraInit ){ - int SQLITE_EXTRA_INIT(const char*); - rc = SQLITE_EXTRA_INIT(0); - } -#endif - - return rc; -} - -/* -** Undo the effects of sqlite3_initialize(). Must not be called while -** there are outstanding database connections or memory allocations or -** while any part of SQLite is otherwise in use in any thread. This -** routine is not threadsafe. But it is safe to invoke this routine -** on when SQLite is already shut down. If SQLite is already shut down -** when this routine is invoked, then this routine is a harmless no-op. -*/ -SQLITE_API int sqlite3_shutdown(void){ - if( sqlite3GlobalConfig.isInit ){ -#ifdef SQLITE_EXTRA_SHUTDOWN - void SQLITE_EXTRA_SHUTDOWN(void); - SQLITE_EXTRA_SHUTDOWN(); -#endif - sqlite3_os_end(); - sqlite3_reset_auto_extension(); - sqlite3GlobalConfig.isInit = 0; - } - if( sqlite3GlobalConfig.isPCacheInit ){ - sqlite3PcacheShutdown(); - sqlite3GlobalConfig.isPCacheInit = 0; - } - if( sqlite3GlobalConfig.isMallocInit ){ - sqlite3MallocEnd(); - sqlite3GlobalConfig.isMallocInit = 0; - -#ifndef SQLITE_OMIT_SHUTDOWN_DIRECTORIES - /* The heap subsystem has now been shutdown and these values are supposed - ** to be NULL or point to memory that was obtained from sqlite3_malloc(), - ** which would rely on that heap subsystem; therefore, make sure these - ** values cannot refer to heap memory that was just invalidated when the - ** heap subsystem was shutdown. This is only done if the current call to - ** this function resulted in the heap subsystem actually being shutdown. - */ - sqlite3_data_directory = 0; - sqlite3_temp_directory = 0; -#endif - } - if( sqlite3GlobalConfig.isMutexInit ){ - sqlite3MutexEnd(); - sqlite3GlobalConfig.isMutexInit = 0; - } - - return SQLITE_OK; -} - -/* -** This API allows applications to modify the global configuration of -** the SQLite library at run-time. -** -** This routine should only be called when there are no outstanding -** database connections or memory allocations. This routine is not -** threadsafe. Failure to heed these warnings can lead to unpredictable -** behavior. -*/ -SQLITE_API int sqlite3_config(int op, ...){ - va_list ap; - int rc = SQLITE_OK; - - /* sqlite3_config() shall return SQLITE_MISUSE if it is invoked while - ** the SQLite library is in use. */ - if( sqlite3GlobalConfig.isInit ) return SQLITE_MISUSE_BKPT; - - va_start(ap, op); - switch( op ){ - - /* Mutex configuration options are only available in a threadsafe - ** compile. - */ -#if defined(SQLITE_THREADSAFE) && SQLITE_THREADSAFE>0 - case SQLITE_CONFIG_SINGLETHREAD: { - /* Disable all mutexing */ - sqlite3GlobalConfig.bCoreMutex = 0; - sqlite3GlobalConfig.bFullMutex = 0; - break; - } - case SQLITE_CONFIG_MULTITHREAD: { - /* Disable mutexing of database connections */ - /* Enable mutexing of core data structures */ - sqlite3GlobalConfig.bCoreMutex = 1; - sqlite3GlobalConfig.bFullMutex = 0; - break; - } - case SQLITE_CONFIG_SERIALIZED: { - /* Enable all mutexing */ - sqlite3GlobalConfig.bCoreMutex = 1; - sqlite3GlobalConfig.bFullMutex = 1; - break; - } - case SQLITE_CONFIG_MUTEX: { - /* Specify an alternative mutex implementation */ - sqlite3GlobalConfig.mutex = *va_arg(ap, sqlite3_mutex_methods*); - break; - } - case SQLITE_CONFIG_GETMUTEX: { - /* Retrieve the current mutex implementation */ - *va_arg(ap, sqlite3_mutex_methods*) = sqlite3GlobalConfig.mutex; - break; - } -#endif - - - case SQLITE_CONFIG_MALLOC: { - /* Specify an alternative malloc implementation */ - sqlite3GlobalConfig.m = *va_arg(ap, sqlite3_mem_methods*); - break; - } - case SQLITE_CONFIG_GETMALLOC: { - /* Retrieve the current malloc() implementation */ - if( sqlite3GlobalConfig.m.xMalloc==0 ) sqlite3MemSetDefault(); - *va_arg(ap, sqlite3_mem_methods*) = sqlite3GlobalConfig.m; - break; - } - case SQLITE_CONFIG_MEMSTATUS: { - /* Enable or disable the malloc status collection */ - sqlite3GlobalConfig.bMemstat = va_arg(ap, int); - break; - } - case SQLITE_CONFIG_SCRATCH: { - /* Designate a buffer for scratch memory space */ - sqlite3GlobalConfig.pScratch = va_arg(ap, void*); - sqlite3GlobalConfig.szScratch = va_arg(ap, int); - sqlite3GlobalConfig.nScratch = va_arg(ap, int); - break; - } - case SQLITE_CONFIG_PAGECACHE: { - /* Designate a buffer for page cache memory space */ - sqlite3GlobalConfig.pPage = va_arg(ap, void*); - sqlite3GlobalConfig.szPage = va_arg(ap, int); - sqlite3GlobalConfig.nPage = va_arg(ap, int); - break; - } - - case SQLITE_CONFIG_PCACHE: { - /* no-op */ - break; - } - case SQLITE_CONFIG_GETPCACHE: { - /* now an error */ - rc = SQLITE_ERROR; - break; - } - - case SQLITE_CONFIG_PCACHE2: { - /* Specify an alternative page cache implementation */ - sqlite3GlobalConfig.pcache2 = *va_arg(ap, sqlite3_pcache_methods2*); - break; - } - case SQLITE_CONFIG_GETPCACHE2: { - if( sqlite3GlobalConfig.pcache2.xInit==0 ){ - sqlite3PCacheSetDefault(); - } - *va_arg(ap, sqlite3_pcache_methods2*) = sqlite3GlobalConfig.pcache2; - break; - } - -#if defined(SQLITE_ENABLE_MEMSYS3) || defined(SQLITE_ENABLE_MEMSYS5) - case SQLITE_CONFIG_HEAP: { - /* Designate a buffer for heap memory space */ - sqlite3GlobalConfig.pHeap = va_arg(ap, void*); - sqlite3GlobalConfig.nHeap = va_arg(ap, int); - sqlite3GlobalConfig.mnReq = va_arg(ap, int); - - if( sqlite3GlobalConfig.mnReq<1 ){ - sqlite3GlobalConfig.mnReq = 1; - }else if( sqlite3GlobalConfig.mnReq>(1<<12) ){ - /* cap min request size at 2^12 */ - sqlite3GlobalConfig.mnReq = (1<<12); - } - - if( sqlite3GlobalConfig.pHeap==0 ){ - /* If the heap pointer is NULL, then restore the malloc implementation - ** back to NULL pointers too. This will cause the malloc to go - ** back to its default implementation when sqlite3_initialize() is - ** run. - */ - memset(&sqlite3GlobalConfig.m, 0, sizeof(sqlite3GlobalConfig.m)); - }else{ - /* The heap pointer is not NULL, then install one of the - ** mem5.c/mem3.c methods. The enclosing #if guarantees at - ** least one of these methods is currently enabled. - */ -#ifdef SQLITE_ENABLE_MEMSYS3 - sqlite3GlobalConfig.m = *sqlite3MemGetMemsys3(); -#endif -#ifdef SQLITE_ENABLE_MEMSYS5 - sqlite3GlobalConfig.m = *sqlite3MemGetMemsys5(); -#endif - } - break; - } -#endif - - case SQLITE_CONFIG_LOOKASIDE: { - sqlite3GlobalConfig.szLookaside = va_arg(ap, int); - sqlite3GlobalConfig.nLookaside = va_arg(ap, int); - break; - } - - /* Record a pointer to the logger function and its first argument. - ** The default is NULL. Logging is disabled if the function pointer is - ** NULL. - */ - case SQLITE_CONFIG_LOG: { - /* MSVC is picky about pulling func ptrs from va lists. - ** http://support.microsoft.com/kb/47961 - ** sqlite3GlobalConfig.xLog = va_arg(ap, void(*)(void*,int,const char*)); - */ - typedef void(*LOGFUNC_t)(void*,int,const char*); - sqlite3GlobalConfig.xLog = va_arg(ap, LOGFUNC_t); - sqlite3GlobalConfig.pLogArg = va_arg(ap, void*); - break; - } - - case SQLITE_CONFIG_URI: { - sqlite3GlobalConfig.bOpenUri = va_arg(ap, int); - break; - } - - case SQLITE_CONFIG_COVERING_INDEX_SCAN: { - sqlite3GlobalConfig.bUseCis = va_arg(ap, int); - break; - } - -#ifdef SQLITE_ENABLE_SQLLOG - case SQLITE_CONFIG_SQLLOG: { - typedef void(*SQLLOGFUNC_t)(void*, sqlite3*, const char*, int); - sqlite3GlobalConfig.xSqllog = va_arg(ap, SQLLOGFUNC_t); - sqlite3GlobalConfig.pSqllogArg = va_arg(ap, void *); - break; - } -#endif - - case SQLITE_CONFIG_MMAP_SIZE: { - sqlite3_int64 szMmap = va_arg(ap, sqlite3_int64); - sqlite3_int64 mxMmap = va_arg(ap, sqlite3_int64); - if( mxMmap<0 || mxMmap>SQLITE_MAX_MMAP_SIZE ){ - mxMmap = SQLITE_MAX_MMAP_SIZE; - } - sqlite3GlobalConfig.mxMmap = mxMmap; - if( szMmap<0 ) szMmap = SQLITE_DEFAULT_MMAP_SIZE; - if( szMmap>mxMmap) szMmap = mxMmap; - sqlite3GlobalConfig.szMmap = szMmap; - break; - } - -#if SQLITE_OS_WIN && defined(SQLITE_WIN32_MALLOC) - case SQLITE_CONFIG_WIN32_HEAPSIZE: { - sqlite3GlobalConfig.nHeap = va_arg(ap, int); - break; - } -#endif - - default: { - rc = SQLITE_ERROR; - break; - } - } - va_end(ap); - return rc; -} - -/* -** Set up the lookaside buffers for a database connection. -** Return SQLITE_OK on success. -** If lookaside is already active, return SQLITE_BUSY. -** -** The sz parameter is the number of bytes in each lookaside slot. -** The cnt parameter is the number of slots. If pStart is NULL the -** space for the lookaside memory is obtained from sqlite3_malloc(). -** If pStart is not NULL then it is sz*cnt bytes of memory to use for -** the lookaside memory. -*/ -static int setupLookaside(sqlite3 *db, void *pBuf, int sz, int cnt){ - void *pStart; - if( db->lookaside.nOut ){ - return SQLITE_BUSY; - } - /* Free any existing lookaside buffer for this handle before - ** allocating a new one so we don't have to have space for - ** both at the same time. - */ - if( db->lookaside.bMalloced ){ - sqlite3_free(db->lookaside.pStart); - } - /* The size of a lookaside slot after ROUNDDOWN8 needs to be larger - ** than a pointer to be useful. - */ - sz = ROUNDDOWN8(sz); /* IMP: R-33038-09382 */ - if( sz<=(int)sizeof(LookasideSlot*) ) sz = 0; - if( cnt<0 ) cnt = 0; - if( sz==0 || cnt==0 ){ - sz = 0; - pStart = 0; - }else if( pBuf==0 ){ - sqlite3BeginBenignMalloc(); - pStart = sqlite3Malloc( sz*cnt ); /* IMP: R-61949-35727 */ - sqlite3EndBenignMalloc(); - if( pStart ) cnt = sqlite3MallocSize(pStart)/sz; - }else{ - pStart = pBuf; - } - db->lookaside.pStart = pStart; - db->lookaside.pFree = 0; - db->lookaside.sz = (u16)sz; - if( pStart ){ - int i; - LookasideSlot *p; - assert( sz > (int)sizeof(LookasideSlot*) ); - p = (LookasideSlot*)pStart; - for(i=cnt-1; i>=0; i--){ - p->pNext = db->lookaside.pFree; - db->lookaside.pFree = p; - p = (LookasideSlot*)&((u8*)p)[sz]; - } - db->lookaside.pEnd = p; - db->lookaside.bEnabled = 1; - db->lookaside.bMalloced = pBuf==0 ?1:0; - }else{ - db->lookaside.pStart = db; - db->lookaside.pEnd = db; - db->lookaside.bEnabled = 0; - db->lookaside.bMalloced = 0; - } - return SQLITE_OK; -} - -/* -** Return the mutex associated with a database connection. -*/ -SQLITE_API sqlite3_mutex *sqlite3_db_mutex(sqlite3 *db){ - return db->mutex; -} - -/* -** Free up as much memory as we can from the given database -** connection. -*/ -SQLITE_API int sqlite3_db_release_memory(sqlite3 *db){ - int i; - sqlite3_mutex_enter(db->mutex); - sqlite3BtreeEnterAll(db); - for(i=0; inDb; i++){ - Btree *pBt = db->aDb[i].pBt; - if( pBt ){ - Pager *pPager = sqlite3BtreePager(pBt); - sqlite3PagerShrink(pPager); - } - } - sqlite3BtreeLeaveAll(db); - sqlite3_mutex_leave(db->mutex); - return SQLITE_OK; -} - -/* -** Configuration settings for an individual database connection -*/ -SQLITE_API int sqlite3_db_config(sqlite3 *db, int op, ...){ - va_list ap; - int rc; - va_start(ap, op); - switch( op ){ - case SQLITE_DBCONFIG_LOOKASIDE: { - void *pBuf = va_arg(ap, void*); /* IMP: R-26835-10964 */ - int sz = va_arg(ap, int); /* IMP: R-47871-25994 */ - int cnt = va_arg(ap, int); /* IMP: R-04460-53386 */ - rc = setupLookaside(db, pBuf, sz, cnt); - break; - } - default: { - static const struct { - int op; /* The opcode */ - u32 mask; /* Mask of the bit in sqlite3.flags to set/clear */ - } aFlagOp[] = { - { SQLITE_DBCONFIG_ENABLE_FKEY, SQLITE_ForeignKeys }, - { SQLITE_DBCONFIG_ENABLE_TRIGGER, SQLITE_EnableTrigger }, - }; - unsigned int i; - rc = SQLITE_ERROR; /* IMP: R-42790-23372 */ - for(i=0; iflags; - if( onoff>0 ){ - db->flags |= aFlagOp[i].mask; - }else if( onoff==0 ){ - db->flags &= ~aFlagOp[i].mask; - } - if( oldFlags!=db->flags ){ - sqlite3ExpirePreparedStatements(db); - } - if( pRes ){ - *pRes = (db->flags & aFlagOp[i].mask)!=0; - } - rc = SQLITE_OK; - break; - } - } - break; - } - } - va_end(ap); - return rc; -} - - -/* -** Return true if the buffer z[0..n-1] contains all spaces. -*/ -static int allSpaces(const char *z, int n){ - while( n>0 && z[n-1]==' ' ){ n--; } - return n==0; -} - -/* -** This is the default collating function named "BINARY" which is always -** available. -** -** If the padFlag argument is not NULL then space padding at the end -** of strings is ignored. This implements the RTRIM collation. -*/ -static int binCollFunc( - void *padFlag, - int nKey1, const void *pKey1, - int nKey2, const void *pKey2 -){ - int rc, n; - n = nKey1lastRowid; -} - -/* -** Return the number of changes in the most recent call to sqlite3_exec(). -*/ -SQLITE_API int sqlite3_changes(sqlite3 *db){ - return db->nChange; -} - -/* -** Return the number of changes since the database handle was opened. -*/ -SQLITE_API int sqlite3_total_changes(sqlite3 *db){ - return db->nTotalChange; -} - -/* -** Close all open savepoints. This function only manipulates fields of the -** database handle object, it does not close any savepoints that may be open -** at the b-tree/pager level. -*/ -SQLITE_PRIVATE void sqlite3CloseSavepoints(sqlite3 *db){ - while( db->pSavepoint ){ - Savepoint *pTmp = db->pSavepoint; - db->pSavepoint = pTmp->pNext; - sqlite3DbFree(db, pTmp); - } - db->nSavepoint = 0; - db->nStatement = 0; - db->isTransactionSavepoint = 0; -} - -/* -** Invoke the destructor function associated with FuncDef p, if any. Except, -** if this is not the last copy of the function, do not invoke it. Multiple -** copies of a single function are created when create_function() is called -** with SQLITE_ANY as the encoding. -*/ -static void functionDestroy(sqlite3 *db, FuncDef *p){ - FuncDestructor *pDestructor = p->pDestructor; - if( pDestructor ){ - pDestructor->nRef--; - if( pDestructor->nRef==0 ){ - pDestructor->xDestroy(pDestructor->pUserData); - sqlite3DbFree(db, pDestructor); - } - } -} - -/* -** Disconnect all sqlite3_vtab objects that belong to database connection -** db. This is called when db is being closed. -*/ -static void disconnectAllVtab(sqlite3 *db){ -#ifndef SQLITE_OMIT_VIRTUALTABLE - int i; - sqlite3BtreeEnterAll(db); - for(i=0; inDb; i++){ - Schema *pSchema = db->aDb[i].pSchema; - if( db->aDb[i].pSchema ){ - HashElem *p; - for(p=sqliteHashFirst(&pSchema->tblHash); p; p=sqliteHashNext(p)){ - Table *pTab = (Table *)sqliteHashData(p); - if( IsVirtual(pTab) ) sqlite3VtabDisconnect(db, pTab); - } - } - } - sqlite3VtabUnlockList(db); - sqlite3BtreeLeaveAll(db); -#else - UNUSED_PARAMETER(db); -#endif -} - -/* -** Return TRUE if database connection db has unfinalized prepared -** statements or unfinished sqlite3_backup objects. -*/ -static int connectionIsBusy(sqlite3 *db){ - int j; - assert( sqlite3_mutex_held(db->mutex) ); - if( db->pVdbe ) return 1; - for(j=0; jnDb; j++){ - Btree *pBt = db->aDb[j].pBt; - if( pBt && sqlite3BtreeIsInBackup(pBt) ) return 1; - } - return 0; -} - -/* -** Close an existing SQLite database -*/ -static int sqlite3Close(sqlite3 *db, int forceZombie){ - if( !db ){ - return SQLITE_OK; - } - if( !sqlite3SafetyCheckSickOrOk(db) ){ - return SQLITE_MISUSE_BKPT; - } - sqlite3_mutex_enter(db->mutex); - - /* Force xDisconnect calls on all virtual tables */ - disconnectAllVtab(db); - - /* If a transaction is open, the disconnectAllVtab() call above - ** will not have called the xDisconnect() method on any virtual - ** tables in the db->aVTrans[] array. The following sqlite3VtabRollback() - ** call will do so. We need to do this before the check for active - ** SQL statements below, as the v-table implementation may be storing - ** some prepared statements internally. - */ - sqlite3VtabRollback(db); - - /* Legacy behavior (sqlite3_close() behavior) is to return - ** SQLITE_BUSY if the connection can not be closed immediately. - */ - if( !forceZombie && connectionIsBusy(db) ){ - sqlite3Error(db, SQLITE_BUSY, "unable to close due to unfinalized " - "statements or unfinished backups"); - sqlite3_mutex_leave(db->mutex); - return SQLITE_BUSY; - } - -#ifdef SQLITE_ENABLE_SQLLOG - if( sqlite3GlobalConfig.xSqllog ){ - /* Closing the handle. Fourth parameter is passed the value 2. */ - sqlite3GlobalConfig.xSqllog(sqlite3GlobalConfig.pSqllogArg, db, 0, 2); - } -#endif - - /* Convert the connection into a zombie and then close it. - */ - db->magic = SQLITE_MAGIC_ZOMBIE; - sqlite3LeaveMutexAndCloseZombie(db); - return SQLITE_OK; -} - -/* -** Two variations on the public interface for closing a database -** connection. The sqlite3_close() version returns SQLITE_BUSY and -** leaves the connection option if there are unfinalized prepared -** statements or unfinished sqlite3_backups. The sqlite3_close_v2() -** version forces the connection to become a zombie if there are -** unclosed resources, and arranges for deallocation when the last -** prepare statement or sqlite3_backup closes. -*/ -SQLITE_API int sqlite3_close(sqlite3 *db){ return sqlite3Close(db,0); } -SQLITE_API int sqlite3_close_v2(sqlite3 *db){ return sqlite3Close(db,1); } - - -/* -** Close the mutex on database connection db. -** -** Furthermore, if database connection db is a zombie (meaning that there -** has been a prior call to sqlite3_close(db) or sqlite3_close_v2(db)) and -** every sqlite3_stmt has now been finalized and every sqlite3_backup has -** finished, then free all resources. -*/ -SQLITE_PRIVATE void sqlite3LeaveMutexAndCloseZombie(sqlite3 *db){ - HashElem *i; /* Hash table iterator */ - int j; - - /* If there are outstanding sqlite3_stmt or sqlite3_backup objects - ** or if the connection has not yet been closed by sqlite3_close_v2(), - ** then just leave the mutex and return. - */ - if( db->magic!=SQLITE_MAGIC_ZOMBIE || connectionIsBusy(db) ){ - sqlite3_mutex_leave(db->mutex); - return; - } - - /* If we reach this point, it means that the database connection has - ** closed all sqlite3_stmt and sqlite3_backup objects and has been - ** passed to sqlite3_close (meaning that it is a zombie). Therefore, - ** go ahead and free all resources. - */ - - /* If a transaction is open, roll it back. This also ensures that if - ** any database schemas have been modified by an uncommitted transaction - ** they are reset. And that the required b-tree mutex is held to make - ** the pager rollback and schema reset an atomic operation. */ - sqlite3RollbackAll(db, SQLITE_OK); - - /* Free any outstanding Savepoint structures. */ - sqlite3CloseSavepoints(db); - - /* Close all database connections */ - for(j=0; jnDb; j++){ - struct Db *pDb = &db->aDb[j]; - if( pDb->pBt ){ - sqlite3BtreeClose(pDb->pBt); - pDb->pBt = 0; - if( j!=1 ){ - pDb->pSchema = 0; - } - } - } - /* Clear the TEMP schema separately and last */ - if( db->aDb[1].pSchema ){ - sqlite3SchemaClear(db->aDb[1].pSchema); - } - sqlite3VtabUnlockList(db); - - /* Free up the array of auxiliary databases */ - sqlite3CollapseDatabaseArray(db); - assert( db->nDb<=2 ); - assert( db->aDb==db->aDbStatic ); - - /* Tell the code in notify.c that the connection no longer holds any - ** locks and does not require any further unlock-notify callbacks. - */ - sqlite3ConnectionClosed(db); - - for(j=0; jaFunc.a); j++){ - FuncDef *pNext, *pHash, *p; - for(p=db->aFunc.a[j]; p; p=pHash){ - pHash = p->pHash; - while( p ){ - functionDestroy(db, p); - pNext = p->pNext; - sqlite3DbFree(db, p); - p = pNext; - } - } - } - for(i=sqliteHashFirst(&db->aCollSeq); i; i=sqliteHashNext(i)){ - CollSeq *pColl = (CollSeq *)sqliteHashData(i); - /* Invoke any destructors registered for collation sequence user data. */ - for(j=0; j<3; j++){ - if( pColl[j].xDel ){ - pColl[j].xDel(pColl[j].pUser); - } - } - sqlite3DbFree(db, pColl); - } - sqlite3HashClear(&db->aCollSeq); -#ifndef SQLITE_OMIT_VIRTUALTABLE - for(i=sqliteHashFirst(&db->aModule); i; i=sqliteHashNext(i)){ - Module *pMod = (Module *)sqliteHashData(i); - if( pMod->xDestroy ){ - pMod->xDestroy(pMod->pAux); - } - sqlite3DbFree(db, pMod); - } - sqlite3HashClear(&db->aModule); -#endif - - sqlite3Error(db, SQLITE_OK, 0); /* Deallocates any cached error strings. */ - sqlite3ValueFree(db->pErr); - sqlite3CloseExtensions(db); - - db->magic = SQLITE_MAGIC_ERROR; - - /* The temp-database schema is allocated differently from the other schema - ** objects (using sqliteMalloc() directly, instead of sqlite3BtreeSchema()). - ** So it needs to be freed here. Todo: Why not roll the temp schema into - ** the same sqliteMalloc() as the one that allocates the database - ** structure? - */ - sqlite3DbFree(db, db->aDb[1].pSchema); - sqlite3_mutex_leave(db->mutex); - db->magic = SQLITE_MAGIC_CLOSED; - sqlite3_mutex_free(db->mutex); - assert( db->lookaside.nOut==0 ); /* Fails on a lookaside memory leak */ - if( db->lookaside.bMalloced ){ - sqlite3_free(db->lookaside.pStart); - } - sqlite3_free(db); -} - -/* -** Rollback all database files. If tripCode is not SQLITE_OK, then -** any open cursors are invalidated ("tripped" - as in "tripping a circuit -** breaker") and made to return tripCode if there are any further -** attempts to use that cursor. -*/ -SQLITE_PRIVATE void sqlite3RollbackAll(sqlite3 *db, int tripCode){ - int i; - int inTrans = 0; - assert( sqlite3_mutex_held(db->mutex) ); - sqlite3BeginBenignMalloc(); - - /* Obtain all b-tree mutexes before making any calls to BtreeRollback(). - ** This is important in case the transaction being rolled back has - ** modified the database schema. If the b-tree mutexes are not taken - ** here, then another shared-cache connection might sneak in between - ** the database rollback and schema reset, which can cause false - ** corruption reports in some cases. */ - sqlite3BtreeEnterAll(db); - - for(i=0; inDb; i++){ - Btree *p = db->aDb[i].pBt; - if( p ){ - if( sqlite3BtreeIsInTrans(p) ){ - inTrans = 1; - } - sqlite3BtreeRollback(p, tripCode); - } - } - sqlite3VtabRollback(db); - sqlite3EndBenignMalloc(); - - if( (db->flags&SQLITE_InternChanges)!=0 && db->init.busy==0 ){ - sqlite3ExpirePreparedStatements(db); - sqlite3ResetAllSchemasOfConnection(db); - } - sqlite3BtreeLeaveAll(db); - - /* Any deferred constraint violations have now been resolved. */ - db->nDeferredCons = 0; - db->nDeferredImmCons = 0; - db->flags &= ~SQLITE_DeferFKs; - - /* If one has been configured, invoke the rollback-hook callback */ - if( db->xRollbackCallback && (inTrans || !db->autoCommit) ){ - db->xRollbackCallback(db->pRollbackArg); - } -} - -/* -** Return a static string containing the name corresponding to the error code -** specified in the argument. -*/ -#if defined(SQLITE_TEST) -SQLITE_PRIVATE const char *sqlite3ErrName(int rc){ - const char *zName = 0; - int i, origRc = rc; - for(i=0; i<2 && zName==0; i++, rc &= 0xff){ - switch( rc ){ - case SQLITE_OK: zName = "SQLITE_OK"; break; - case SQLITE_ERROR: zName = "SQLITE_ERROR"; break; - case SQLITE_INTERNAL: zName = "SQLITE_INTERNAL"; break; - case SQLITE_PERM: zName = "SQLITE_PERM"; break; - case SQLITE_ABORT: zName = "SQLITE_ABORT"; break; - case SQLITE_ABORT_ROLLBACK: zName = "SQLITE_ABORT_ROLLBACK"; break; - case SQLITE_BUSY: zName = "SQLITE_BUSY"; break; - case SQLITE_BUSY_RECOVERY: zName = "SQLITE_BUSY_RECOVERY"; break; - case SQLITE_BUSY_SNAPSHOT: zName = "SQLITE_BUSY_SNAPSHOT"; break; - case SQLITE_LOCKED: zName = "SQLITE_LOCKED"; break; - case SQLITE_LOCKED_SHAREDCACHE: zName = "SQLITE_LOCKED_SHAREDCACHE";break; - case SQLITE_NOMEM: zName = "SQLITE_NOMEM"; break; - case SQLITE_READONLY: zName = "SQLITE_READONLY"; break; - case SQLITE_READONLY_RECOVERY: zName = "SQLITE_READONLY_RECOVERY"; break; - case SQLITE_READONLY_CANTLOCK: zName = "SQLITE_READONLY_CANTLOCK"; break; - case SQLITE_READONLY_ROLLBACK: zName = "SQLITE_READONLY_ROLLBACK"; break; - case SQLITE_READONLY_DBMOVED: zName = "SQLITE_READONLY_DBMOVED"; break; - case SQLITE_INTERRUPT: zName = "SQLITE_INTERRUPT"; break; - case SQLITE_IOERR: zName = "SQLITE_IOERR"; break; - case SQLITE_IOERR_READ: zName = "SQLITE_IOERR_READ"; break; - case SQLITE_IOERR_SHORT_READ: zName = "SQLITE_IOERR_SHORT_READ"; break; - case SQLITE_IOERR_WRITE: zName = "SQLITE_IOERR_WRITE"; break; - case SQLITE_IOERR_FSYNC: zName = "SQLITE_IOERR_FSYNC"; break; - case SQLITE_IOERR_DIR_FSYNC: zName = "SQLITE_IOERR_DIR_FSYNC"; break; - case SQLITE_IOERR_TRUNCATE: zName = "SQLITE_IOERR_TRUNCATE"; break; - case SQLITE_IOERR_FSTAT: zName = "SQLITE_IOERR_FSTAT"; break; - case SQLITE_IOERR_UNLOCK: zName = "SQLITE_IOERR_UNLOCK"; break; - case SQLITE_IOERR_RDLOCK: zName = "SQLITE_IOERR_RDLOCK"; break; - case SQLITE_IOERR_DELETE: zName = "SQLITE_IOERR_DELETE"; break; - case SQLITE_IOERR_BLOCKED: zName = "SQLITE_IOERR_BLOCKED"; break; - case SQLITE_IOERR_NOMEM: zName = "SQLITE_IOERR_NOMEM"; break; - case SQLITE_IOERR_ACCESS: zName = "SQLITE_IOERR_ACCESS"; break; - case SQLITE_IOERR_CHECKRESERVEDLOCK: - zName = "SQLITE_IOERR_CHECKRESERVEDLOCK"; break; - case SQLITE_IOERR_LOCK: zName = "SQLITE_IOERR_LOCK"; break; - case SQLITE_IOERR_CLOSE: zName = "SQLITE_IOERR_CLOSE"; break; - case SQLITE_IOERR_DIR_CLOSE: zName = "SQLITE_IOERR_DIR_CLOSE"; break; - case SQLITE_IOERR_SHMOPEN: zName = "SQLITE_IOERR_SHMOPEN"; break; - case SQLITE_IOERR_SHMSIZE: zName = "SQLITE_IOERR_SHMSIZE"; break; - case SQLITE_IOERR_SHMLOCK: zName = "SQLITE_IOERR_SHMLOCK"; break; - case SQLITE_IOERR_SHMMAP: zName = "SQLITE_IOERR_SHMMAP"; break; - case SQLITE_IOERR_SEEK: zName = "SQLITE_IOERR_SEEK"; break; - case SQLITE_IOERR_DELETE_NOENT: zName = "SQLITE_IOERR_DELETE_NOENT";break; - case SQLITE_IOERR_MMAP: zName = "SQLITE_IOERR_MMAP"; break; - case SQLITE_IOERR_GETTEMPPATH: zName = "SQLITE_IOERR_GETTEMPPATH"; break; - case SQLITE_IOERR_CONVPATH: zName = "SQLITE_IOERR_CONVPATH"; break; - case SQLITE_CORRUPT: zName = "SQLITE_CORRUPT"; break; - case SQLITE_CORRUPT_VTAB: zName = "SQLITE_CORRUPT_VTAB"; break; - case SQLITE_NOTFOUND: zName = "SQLITE_NOTFOUND"; break; - case SQLITE_FULL: zName = "SQLITE_FULL"; break; - case SQLITE_CANTOPEN: zName = "SQLITE_CANTOPEN"; break; - case SQLITE_CANTOPEN_NOTEMPDIR: zName = "SQLITE_CANTOPEN_NOTEMPDIR";break; - case SQLITE_CANTOPEN_ISDIR: zName = "SQLITE_CANTOPEN_ISDIR"; break; - case SQLITE_CANTOPEN_FULLPATH: zName = "SQLITE_CANTOPEN_FULLPATH"; break; - case SQLITE_CANTOPEN_CONVPATH: zName = "SQLITE_CANTOPEN_CONVPATH"; break; - case SQLITE_PROTOCOL: zName = "SQLITE_PROTOCOL"; break; - case SQLITE_EMPTY: zName = "SQLITE_EMPTY"; break; - case SQLITE_SCHEMA: zName = "SQLITE_SCHEMA"; break; - case SQLITE_TOOBIG: zName = "SQLITE_TOOBIG"; break; - case SQLITE_CONSTRAINT: zName = "SQLITE_CONSTRAINT"; break; - case SQLITE_CONSTRAINT_UNIQUE: zName = "SQLITE_CONSTRAINT_UNIQUE"; break; - case SQLITE_CONSTRAINT_TRIGGER: zName = "SQLITE_CONSTRAINT_TRIGGER";break; - case SQLITE_CONSTRAINT_FOREIGNKEY: - zName = "SQLITE_CONSTRAINT_FOREIGNKEY"; break; - case SQLITE_CONSTRAINT_CHECK: zName = "SQLITE_CONSTRAINT_CHECK"; break; - case SQLITE_CONSTRAINT_PRIMARYKEY: - zName = "SQLITE_CONSTRAINT_PRIMARYKEY"; break; - case SQLITE_CONSTRAINT_NOTNULL: zName = "SQLITE_CONSTRAINT_NOTNULL";break; - case SQLITE_CONSTRAINT_COMMITHOOK: - zName = "SQLITE_CONSTRAINT_COMMITHOOK"; break; - case SQLITE_CONSTRAINT_VTAB: zName = "SQLITE_CONSTRAINT_VTAB"; break; - case SQLITE_CONSTRAINT_FUNCTION: - zName = "SQLITE_CONSTRAINT_FUNCTION"; break; - case SQLITE_CONSTRAINT_ROWID: zName = "SQLITE_CONSTRAINT_ROWID"; break; - case SQLITE_MISMATCH: zName = "SQLITE_MISMATCH"; break; - case SQLITE_MISUSE: zName = "SQLITE_MISUSE"; break; - case SQLITE_NOLFS: zName = "SQLITE_NOLFS"; break; - case SQLITE_AUTH: zName = "SQLITE_AUTH"; break; - case SQLITE_FORMAT: zName = "SQLITE_FORMAT"; break; - case SQLITE_RANGE: zName = "SQLITE_RANGE"; break; - case SQLITE_NOTADB: zName = "SQLITE_NOTADB"; break; - case SQLITE_ROW: zName = "SQLITE_ROW"; break; - case SQLITE_NOTICE: zName = "SQLITE_NOTICE"; break; - case SQLITE_NOTICE_RECOVER_WAL: zName = "SQLITE_NOTICE_RECOVER_WAL";break; - case SQLITE_NOTICE_RECOVER_ROLLBACK: - zName = "SQLITE_NOTICE_RECOVER_ROLLBACK"; break; - case SQLITE_WARNING: zName = "SQLITE_WARNING"; break; - case SQLITE_WARNING_AUTOINDEX: zName = "SQLITE_WARNING_AUTOINDEX"; break; - case SQLITE_DONE: zName = "SQLITE_DONE"; break; - } - } - if( zName==0 ){ - static char zBuf[50]; - sqlite3_snprintf(sizeof(zBuf), zBuf, "SQLITE_UNKNOWN(%d)", origRc); - zName = zBuf; - } - return zName; -} -#endif - -/* -** Return a static string that describes the kind of error specified in the -** argument. -*/ -SQLITE_PRIVATE const char *sqlite3ErrStr(int rc){ - static const char* const aMsg[] = { - /* SQLITE_OK */ "not an error", - /* SQLITE_ERROR */ "SQL logic error or missing database", - /* SQLITE_INTERNAL */ 0, - /* SQLITE_PERM */ "access permission denied", - /* SQLITE_ABORT */ "callback requested query abort", - /* SQLITE_BUSY */ "database is locked", - /* SQLITE_LOCKED */ "database table is locked", - /* SQLITE_NOMEM */ "out of memory", - /* SQLITE_READONLY */ "attempt to write a readonly database", - /* SQLITE_INTERRUPT */ "interrupted", - /* SQLITE_IOERR */ "disk I/O error", - /* SQLITE_CORRUPT */ "database disk image is malformed", - /* SQLITE_NOTFOUND */ "unknown operation", - /* SQLITE_FULL */ "database or disk is full", - /* SQLITE_CANTOPEN */ "unable to open database file", - /* SQLITE_PROTOCOL */ "locking protocol", - /* SQLITE_EMPTY */ "table contains no data", - /* SQLITE_SCHEMA */ "database schema has changed", - /* SQLITE_TOOBIG */ "string or blob too big", - /* SQLITE_CONSTRAINT */ "constraint failed", - /* SQLITE_MISMATCH */ "datatype mismatch", - /* SQLITE_MISUSE */ "library routine called out of sequence", - /* SQLITE_NOLFS */ "large file support is disabled", - /* SQLITE_AUTH */ "authorization denied", - /* SQLITE_FORMAT */ "auxiliary database format error", - /* SQLITE_RANGE */ "bind or column index out of range", - /* SQLITE_NOTADB */ "file is encrypted or is not a database", - }; - const char *zErr = "unknown error"; - switch( rc ){ - case SQLITE_ABORT_ROLLBACK: { - zErr = "abort due to ROLLBACK"; - break; - } - default: { - rc &= 0xff; - if( ALWAYS(rc>=0) && rcbusyTimeout; - int delay, prior; - - assert( count>=0 ); - if( count < NDELAY ){ - delay = delays[count]; - prior = totals[count]; - }else{ - delay = delays[NDELAY-1]; - prior = totals[NDELAY-1] + delay*(count-(NDELAY-1)); - } - if( prior + delay > timeout ){ - delay = timeout - prior; - if( delay<=0 ) return 0; - } - sqlite3OsSleep(db->pVfs, delay*1000); - return 1; -#else - sqlite3 *db = (sqlite3 *)ptr; - int timeout = ((sqlite3 *)ptr)->busyTimeout; - if( (count+1)*1000 > timeout ){ - return 0; - } - sqlite3OsSleep(db->pVfs, 1000000); - return 1; -#endif -} - -/* -** Invoke the given busy handler. -** -** This routine is called when an operation failed with a lock. -** If this routine returns non-zero, the lock is retried. If it -** returns 0, the operation aborts with an SQLITE_BUSY error. -*/ -SQLITE_PRIVATE int sqlite3InvokeBusyHandler(BusyHandler *p){ - int rc; - if( NEVER(p==0) || p->xFunc==0 || p->nBusy<0 ) return 0; - rc = p->xFunc(p->pArg, p->nBusy); - if( rc==0 ){ - p->nBusy = -1; - }else{ - p->nBusy++; - } - return rc; -} - -/* -** This routine sets the busy callback for an Sqlite database to the -** given callback function with the given argument. -*/ -SQLITE_API int sqlite3_busy_handler( - sqlite3 *db, - int (*xBusy)(void*,int), - void *pArg -){ - sqlite3_mutex_enter(db->mutex); - db->busyHandler.xFunc = xBusy; - db->busyHandler.pArg = pArg; - db->busyHandler.nBusy = 0; - db->busyTimeout = 0; - sqlite3_mutex_leave(db->mutex); - return SQLITE_OK; -} - -#ifndef SQLITE_OMIT_PROGRESS_CALLBACK -/* -** This routine sets the progress callback for an Sqlite database to the -** given callback function with the given argument. The progress callback will -** be invoked every nOps opcodes. -*/ -SQLITE_API void sqlite3_progress_handler( - sqlite3 *db, - int nOps, - int (*xProgress)(void*), - void *pArg -){ - sqlite3_mutex_enter(db->mutex); - if( nOps>0 ){ - db->xProgress = xProgress; - db->nProgressOps = (unsigned)nOps; - db->pProgressArg = pArg; - }else{ - db->xProgress = 0; - db->nProgressOps = 0; - db->pProgressArg = 0; - } - sqlite3_mutex_leave(db->mutex); -} -#endif - - -/* -** This routine installs a default busy handler that waits for the -** specified number of milliseconds before returning 0. -*/ -SQLITE_API int sqlite3_busy_timeout(sqlite3 *db, int ms){ - if( ms>0 ){ - sqlite3_busy_handler(db, sqliteDefaultBusyCallback, (void*)db); - db->busyTimeout = ms; - }else{ - sqlite3_busy_handler(db, 0, 0); - } - return SQLITE_OK; -} - -/* -** Cause any pending operation to stop at its earliest opportunity. -*/ -SQLITE_API void sqlite3_interrupt(sqlite3 *db){ - db->u1.isInterrupted = 1; -} - - -/* -** This function is exactly the same as sqlite3_create_function(), except -** that it is designed to be called by internal code. The difference is -** that if a malloc() fails in sqlite3_create_function(), an error code -** is returned and the mallocFailed flag cleared. -*/ -SQLITE_PRIVATE int sqlite3CreateFunc( - sqlite3 *db, - const char *zFunctionName, - int nArg, - int enc, - void *pUserData, - void (*xFunc)(sqlite3_context*,int,sqlite3_value **), - void (*xStep)(sqlite3_context*,int,sqlite3_value **), - void (*xFinal)(sqlite3_context*), - FuncDestructor *pDestructor -){ - FuncDef *p; - int nName; - int extraFlags; - - assert( sqlite3_mutex_held(db->mutex) ); - if( zFunctionName==0 || - (xFunc && (xFinal || xStep)) || - (!xFunc && (xFinal && !xStep)) || - (!xFunc && (!xFinal && xStep)) || - (nArg<-1 || nArg>SQLITE_MAX_FUNCTION_ARG) || - (255<(nName = sqlite3Strlen30( zFunctionName))) ){ - return SQLITE_MISUSE_BKPT; - } - - assert( SQLITE_FUNC_CONSTANT==SQLITE_DETERMINISTIC ); - extraFlags = enc & SQLITE_DETERMINISTIC; - enc &= (SQLITE_FUNC_ENCMASK|SQLITE_ANY); - -#ifndef SQLITE_OMIT_UTF16 - /* If SQLITE_UTF16 is specified as the encoding type, transform this - ** to one of SQLITE_UTF16LE or SQLITE_UTF16BE using the - ** SQLITE_UTF16NATIVE macro. SQLITE_UTF16 is not used internally. - ** - ** If SQLITE_ANY is specified, add three versions of the function - ** to the hash table. - */ - if( enc==SQLITE_UTF16 ){ - enc = SQLITE_UTF16NATIVE; - }else if( enc==SQLITE_ANY ){ - int rc; - rc = sqlite3CreateFunc(db, zFunctionName, nArg, SQLITE_UTF8|extraFlags, - pUserData, xFunc, xStep, xFinal, pDestructor); - if( rc==SQLITE_OK ){ - rc = sqlite3CreateFunc(db, zFunctionName, nArg, SQLITE_UTF16LE|extraFlags, - pUserData, xFunc, xStep, xFinal, pDestructor); - } - if( rc!=SQLITE_OK ){ - return rc; - } - enc = SQLITE_UTF16BE; - } -#else - enc = SQLITE_UTF8; -#endif - - /* Check if an existing function is being overridden or deleted. If so, - ** and there are active VMs, then return SQLITE_BUSY. If a function - ** is being overridden/deleted but there are no active VMs, allow the - ** operation to continue but invalidate all precompiled statements. - */ - p = sqlite3FindFunction(db, zFunctionName, nName, nArg, (u8)enc, 0); - if( p && (p->funcFlags & SQLITE_FUNC_ENCMASK)==enc && p->nArg==nArg ){ - if( db->nVdbeActive ){ - sqlite3Error(db, SQLITE_BUSY, - "unable to delete/modify user-function due to active statements"); - assert( !db->mallocFailed ); - return SQLITE_BUSY; - }else{ - sqlite3ExpirePreparedStatements(db); - } - } - - p = sqlite3FindFunction(db, zFunctionName, nName, nArg, (u8)enc, 1); - assert(p || db->mallocFailed); - if( !p ){ - return SQLITE_NOMEM; - } - - /* If an older version of the function with a configured destructor is - ** being replaced invoke the destructor function here. */ - functionDestroy(db, p); - - if( pDestructor ){ - pDestructor->nRef++; - } - p->pDestructor = pDestructor; - p->funcFlags = (p->funcFlags & SQLITE_FUNC_ENCMASK) | extraFlags; - testcase( p->funcFlags & SQLITE_DETERMINISTIC ); - p->xFunc = xFunc; - p->xStep = xStep; - p->xFinalize = xFinal; - p->pUserData = pUserData; - p->nArg = (u16)nArg; - return SQLITE_OK; -} - -/* -** Create new user functions. -*/ -SQLITE_API int sqlite3_create_function( - sqlite3 *db, - const char *zFunc, - int nArg, - int enc, - void *p, - void (*xFunc)(sqlite3_context*,int,sqlite3_value **), - void (*xStep)(sqlite3_context*,int,sqlite3_value **), - void (*xFinal)(sqlite3_context*) -){ - return sqlite3_create_function_v2(db, zFunc, nArg, enc, p, xFunc, xStep, - xFinal, 0); -} - -SQLITE_API int sqlite3_create_function_v2( - sqlite3 *db, - const char *zFunc, - int nArg, - int enc, - void *p, - void (*xFunc)(sqlite3_context*,int,sqlite3_value **), - void (*xStep)(sqlite3_context*,int,sqlite3_value **), - void (*xFinal)(sqlite3_context*), - void (*xDestroy)(void *) -){ - int rc = SQLITE_ERROR; - FuncDestructor *pArg = 0; - sqlite3_mutex_enter(db->mutex); - if( xDestroy ){ - pArg = (FuncDestructor *)sqlite3DbMallocZero(db, sizeof(FuncDestructor)); - if( !pArg ){ - xDestroy(p); - goto out; - } - pArg->xDestroy = xDestroy; - pArg->pUserData = p; - } - rc = sqlite3CreateFunc(db, zFunc, nArg, enc, p, xFunc, xStep, xFinal, pArg); - if( pArg && pArg->nRef==0 ){ - assert( rc!=SQLITE_OK ); - xDestroy(p); - sqlite3DbFree(db, pArg); - } - - out: - rc = sqlite3ApiExit(db, rc); - sqlite3_mutex_leave(db->mutex); - return rc; -} - -#ifndef SQLITE_OMIT_UTF16 -SQLITE_API int sqlite3_create_function16( - sqlite3 *db, - const void *zFunctionName, - int nArg, - int eTextRep, - void *p, - void (*xFunc)(sqlite3_context*,int,sqlite3_value**), - void (*xStep)(sqlite3_context*,int,sqlite3_value**), - void (*xFinal)(sqlite3_context*) -){ - int rc; - char *zFunc8; - sqlite3_mutex_enter(db->mutex); - assert( !db->mallocFailed ); - zFunc8 = sqlite3Utf16to8(db, zFunctionName, -1, SQLITE_UTF16NATIVE); - rc = sqlite3CreateFunc(db, zFunc8, nArg, eTextRep, p, xFunc, xStep, xFinal,0); - sqlite3DbFree(db, zFunc8); - rc = sqlite3ApiExit(db, rc); - sqlite3_mutex_leave(db->mutex); - return rc; -} -#endif - - -/* -** Declare that a function has been overloaded by a virtual table. -** -** If the function already exists as a regular global function, then -** this routine is a no-op. If the function does not exist, then create -** a new one that always throws a run-time error. -** -** When virtual tables intend to provide an overloaded function, they -** should call this routine to make sure the global function exists. -** A global function must exist in order for name resolution to work -** properly. -*/ -SQLITE_API int sqlite3_overload_function( - sqlite3 *db, - const char *zName, - int nArg -){ - int nName = sqlite3Strlen30(zName); - int rc = SQLITE_OK; - sqlite3_mutex_enter(db->mutex); - if( sqlite3FindFunction(db, zName, nName, nArg, SQLITE_UTF8, 0)==0 ){ - rc = sqlite3CreateFunc(db, zName, nArg, SQLITE_UTF8, - 0, sqlite3InvalidFunction, 0, 0, 0); - } - rc = sqlite3ApiExit(db, rc); - sqlite3_mutex_leave(db->mutex); - return rc; -} - -#ifndef SQLITE_OMIT_TRACE -/* -** Register a trace function. The pArg from the previously registered trace -** is returned. -** -** A NULL trace function means that no tracing is executes. A non-NULL -** trace is a pointer to a function that is invoked at the start of each -** SQL statement. -*/ -SQLITE_API void *sqlite3_trace(sqlite3 *db, void (*xTrace)(void*,const char*), void *pArg){ - void *pOld; - sqlite3_mutex_enter(db->mutex); - pOld = db->pTraceArg; - db->xTrace = xTrace; - db->pTraceArg = pArg; - sqlite3_mutex_leave(db->mutex); - return pOld; -} -/* -** Register a profile function. The pArg from the previously registered -** profile function is returned. -** -** A NULL profile function means that no profiling is executes. A non-NULL -** profile is a pointer to a function that is invoked at the conclusion of -** each SQL statement that is run. -*/ -SQLITE_API void *sqlite3_profile( - sqlite3 *db, - void (*xProfile)(void*,const char*,sqlite_uint64), - void *pArg -){ - void *pOld; - sqlite3_mutex_enter(db->mutex); - pOld = db->pProfileArg; - db->xProfile = xProfile; - db->pProfileArg = pArg; - sqlite3_mutex_leave(db->mutex); - return pOld; -} -#endif /* SQLITE_OMIT_TRACE */ - -/* -** Register a function to be invoked when a transaction commits. -** If the invoked function returns non-zero, then the commit becomes a -** rollback. -*/ -SQLITE_API void *sqlite3_commit_hook( - sqlite3 *db, /* Attach the hook to this database */ - int (*xCallback)(void*), /* Function to invoke on each commit */ - void *pArg /* Argument to the function */ -){ - void *pOld; - sqlite3_mutex_enter(db->mutex); - pOld = db->pCommitArg; - db->xCommitCallback = xCallback; - db->pCommitArg = pArg; - sqlite3_mutex_leave(db->mutex); - return pOld; -} - -/* -** Register a callback to be invoked each time a row is updated, -** inserted or deleted using this database connection. -*/ -SQLITE_API void *sqlite3_update_hook( - sqlite3 *db, /* Attach the hook to this database */ - void (*xCallback)(void*,int,char const *,char const *,sqlite_int64), - void *pArg /* Argument to the function */ -){ - void *pRet; - sqlite3_mutex_enter(db->mutex); - pRet = db->pUpdateArg; - db->xUpdateCallback = xCallback; - db->pUpdateArg = pArg; - sqlite3_mutex_leave(db->mutex); - return pRet; -} - -/* -** Register a callback to be invoked each time a transaction is rolled -** back by this database connection. -*/ -SQLITE_API void *sqlite3_rollback_hook( - sqlite3 *db, /* Attach the hook to this database */ - void (*xCallback)(void*), /* Callback function */ - void *pArg /* Argument to the function */ -){ - void *pRet; - sqlite3_mutex_enter(db->mutex); - pRet = db->pRollbackArg; - db->xRollbackCallback = xCallback; - db->pRollbackArg = pArg; - sqlite3_mutex_leave(db->mutex); - return pRet; -} - -#ifndef SQLITE_OMIT_WAL -/* -** The sqlite3_wal_hook() callback registered by sqlite3_wal_autocheckpoint(). -** Invoke sqlite3_wal_checkpoint if the number of frames in the log file -** is greater than sqlite3.pWalArg cast to an integer (the value configured by -** wal_autocheckpoint()). -*/ -SQLITE_PRIVATE int sqlite3WalDefaultHook( - void *pClientData, /* Argument */ - sqlite3 *db, /* Connection */ - const char *zDb, /* Database */ - int nFrame /* Size of WAL */ -){ - if( nFrame>=SQLITE_PTR_TO_INT(pClientData) ){ - sqlite3BeginBenignMalloc(); - sqlite3_wal_checkpoint(db, zDb); - sqlite3EndBenignMalloc(); - } - return SQLITE_OK; -} -#endif /* SQLITE_OMIT_WAL */ - -/* -** Configure an sqlite3_wal_hook() callback to automatically checkpoint -** a database after committing a transaction if there are nFrame or -** more frames in the log file. Passing zero or a negative value as the -** nFrame parameter disables automatic checkpoints entirely. -** -** The callback registered by this function replaces any existing callback -** registered using sqlite3_wal_hook(). Likewise, registering a callback -** using sqlite3_wal_hook() disables the automatic checkpoint mechanism -** configured by this function. -*/ -SQLITE_API int sqlite3_wal_autocheckpoint(sqlite3 *db, int nFrame){ -#ifdef SQLITE_OMIT_WAL - UNUSED_PARAMETER(db); - UNUSED_PARAMETER(nFrame); -#else - if( nFrame>0 ){ - sqlite3_wal_hook(db, sqlite3WalDefaultHook, SQLITE_INT_TO_PTR(nFrame)); - }else{ - sqlite3_wal_hook(db, 0, 0); - } -#endif - return SQLITE_OK; -} - -/* -** Register a callback to be invoked each time a transaction is written -** into the write-ahead-log by this database connection. -*/ -SQLITE_API void *sqlite3_wal_hook( - sqlite3 *db, /* Attach the hook to this db handle */ - int(*xCallback)(void *, sqlite3*, const char*, int), - void *pArg /* First argument passed to xCallback() */ -){ -#ifndef SQLITE_OMIT_WAL - void *pRet; - sqlite3_mutex_enter(db->mutex); - pRet = db->pWalArg; - db->xWalCallback = xCallback; - db->pWalArg = pArg; - sqlite3_mutex_leave(db->mutex); - return pRet; -#else - return 0; -#endif -} - -/* -** Checkpoint database zDb. -*/ -SQLITE_API int sqlite3_wal_checkpoint_v2( - sqlite3 *db, /* Database handle */ - const char *zDb, /* Name of attached database (or NULL) */ - int eMode, /* SQLITE_CHECKPOINT_* value */ - int *pnLog, /* OUT: Size of WAL log in frames */ - int *pnCkpt /* OUT: Total number of frames checkpointed */ -){ -#ifdef SQLITE_OMIT_WAL - return SQLITE_OK; -#else - int rc; /* Return code */ - int iDb = SQLITE_MAX_ATTACHED; /* sqlite3.aDb[] index of db to checkpoint */ - - /* Initialize the output variables to -1 in case an error occurs. */ - if( pnLog ) *pnLog = -1; - if( pnCkpt ) *pnCkpt = -1; - - assert( SQLITE_CHECKPOINT_FULL>SQLITE_CHECKPOINT_PASSIVE ); - assert( SQLITE_CHECKPOINT_FULLSQLITE_CHECKPOINT_RESTART ){ - return SQLITE_MISUSE; - } - - sqlite3_mutex_enter(db->mutex); - if( zDb && zDb[0] ){ - iDb = sqlite3FindDbName(db, zDb); - } - if( iDb<0 ){ - rc = SQLITE_ERROR; - sqlite3Error(db, SQLITE_ERROR, "unknown database: %s", zDb); - }else{ - rc = sqlite3Checkpoint(db, iDb, eMode, pnLog, pnCkpt); - sqlite3Error(db, rc, 0); - } - rc = sqlite3ApiExit(db, rc); - sqlite3_mutex_leave(db->mutex); - return rc; -#endif -} - - -/* -** Checkpoint database zDb. If zDb is NULL, or if the buffer zDb points -** to contains a zero-length string, all attached databases are -** checkpointed. -*/ -SQLITE_API int sqlite3_wal_checkpoint(sqlite3 *db, const char *zDb){ - return sqlite3_wal_checkpoint_v2(db, zDb, SQLITE_CHECKPOINT_PASSIVE, 0, 0); -} - -#ifndef SQLITE_OMIT_WAL -/* -** Run a checkpoint on database iDb. This is a no-op if database iDb is -** not currently open in WAL mode. -** -** If a transaction is open on the database being checkpointed, this -** function returns SQLITE_LOCKED and a checkpoint is not attempted. If -** an error occurs while running the checkpoint, an SQLite error code is -** returned (i.e. SQLITE_IOERR). Otherwise, SQLITE_OK. -** -** The mutex on database handle db should be held by the caller. The mutex -** associated with the specific b-tree being checkpointed is taken by -** this function while the checkpoint is running. -** -** If iDb is passed SQLITE_MAX_ATTACHED, then all attached databases are -** checkpointed. If an error is encountered it is returned immediately - -** no attempt is made to checkpoint any remaining databases. -** -** Parameter eMode is one of SQLITE_CHECKPOINT_PASSIVE, FULL or RESTART. -*/ -SQLITE_PRIVATE int sqlite3Checkpoint(sqlite3 *db, int iDb, int eMode, int *pnLog, int *pnCkpt){ - int rc = SQLITE_OK; /* Return code */ - int i; /* Used to iterate through attached dbs */ - int bBusy = 0; /* True if SQLITE_BUSY has been encountered */ - - assert( sqlite3_mutex_held(db->mutex) ); - assert( !pnLog || *pnLog==-1 ); - assert( !pnCkpt || *pnCkpt==-1 ); - - for(i=0; inDb && rc==SQLITE_OK; i++){ - if( i==iDb || iDb==SQLITE_MAX_ATTACHED ){ - rc = sqlite3BtreeCheckpoint(db->aDb[i].pBt, eMode, pnLog, pnCkpt); - pnLog = 0; - pnCkpt = 0; - if( rc==SQLITE_BUSY ){ - bBusy = 1; - rc = SQLITE_OK; - } - } - } - - return (rc==SQLITE_OK && bBusy) ? SQLITE_BUSY : rc; -} -#endif /* SQLITE_OMIT_WAL */ - -/* -** This function returns true if main-memory should be used instead of -** a temporary file for transient pager files and statement journals. -** The value returned depends on the value of db->temp_store (runtime -** parameter) and the compile time value of SQLITE_TEMP_STORE. The -** following table describes the relationship between these two values -** and this functions return value. -** -** SQLITE_TEMP_STORE db->temp_store Location of temporary database -** ----------------- -------------- ------------------------------ -** 0 any file (return 0) -** 1 1 file (return 0) -** 1 2 memory (return 1) -** 1 0 file (return 0) -** 2 1 file (return 0) -** 2 2 memory (return 1) -** 2 0 memory (return 1) -** 3 any memory (return 1) -*/ -SQLITE_PRIVATE int sqlite3TempInMemory(const sqlite3 *db){ -#if SQLITE_TEMP_STORE==1 - return ( db->temp_store==2 ); -#endif -#if SQLITE_TEMP_STORE==2 - return ( db->temp_store!=1 ); -#endif -#if SQLITE_TEMP_STORE==3 - return 1; -#endif -#if SQLITE_TEMP_STORE<1 || SQLITE_TEMP_STORE>3 - return 0; -#endif -} - -/* -** Return UTF-8 encoded English language explanation of the most recent -** error. -*/ -SQLITE_API const char *sqlite3_errmsg(sqlite3 *db){ - const char *z; - if( !db ){ - return sqlite3ErrStr(SQLITE_NOMEM); - } - if( !sqlite3SafetyCheckSickOrOk(db) ){ - return sqlite3ErrStr(SQLITE_MISUSE_BKPT); - } - sqlite3_mutex_enter(db->mutex); - if( db->mallocFailed ){ - z = sqlite3ErrStr(SQLITE_NOMEM); - }else{ - testcase( db->pErr==0 ); - z = (char*)sqlite3_value_text(db->pErr); - assert( !db->mallocFailed ); - if( z==0 ){ - z = sqlite3ErrStr(db->errCode); - } - } - sqlite3_mutex_leave(db->mutex); - return z; -} - -#ifndef SQLITE_OMIT_UTF16 -/* -** Return UTF-16 encoded English language explanation of the most recent -** error. -*/ -SQLITE_API const void *sqlite3_errmsg16(sqlite3 *db){ - static const u16 outOfMem[] = { - 'o', 'u', 't', ' ', 'o', 'f', ' ', 'm', 'e', 'm', 'o', 'r', 'y', 0 - }; - static const u16 misuse[] = { - 'l', 'i', 'b', 'r', 'a', 'r', 'y', ' ', - 'r', 'o', 'u', 't', 'i', 'n', 'e', ' ', - 'c', 'a', 'l', 'l', 'e', 'd', ' ', - 'o', 'u', 't', ' ', - 'o', 'f', ' ', - 's', 'e', 'q', 'u', 'e', 'n', 'c', 'e', 0 - }; - - const void *z; - if( !db ){ - return (void *)outOfMem; - } - if( !sqlite3SafetyCheckSickOrOk(db) ){ - return (void *)misuse; - } - sqlite3_mutex_enter(db->mutex); - if( db->mallocFailed ){ - z = (void *)outOfMem; - }else{ - z = sqlite3_value_text16(db->pErr); - if( z==0 ){ - sqlite3Error(db, db->errCode, sqlite3ErrStr(db->errCode)); - z = sqlite3_value_text16(db->pErr); - } - /* A malloc() may have failed within the call to sqlite3_value_text16() - ** above. If this is the case, then the db->mallocFailed flag needs to - ** be cleared before returning. Do this directly, instead of via - ** sqlite3ApiExit(), to avoid setting the database handle error message. - */ - db->mallocFailed = 0; - } - sqlite3_mutex_leave(db->mutex); - return z; -} -#endif /* SQLITE_OMIT_UTF16 */ - -/* -** Return the most recent error code generated by an SQLite routine. If NULL is -** passed to this function, we assume a malloc() failed during sqlite3_open(). -*/ -SQLITE_API int sqlite3_errcode(sqlite3 *db){ - if( db && !sqlite3SafetyCheckSickOrOk(db) ){ - return SQLITE_MISUSE_BKPT; - } - if( !db || db->mallocFailed ){ - return SQLITE_NOMEM; - } - return db->errCode & db->errMask; -} -SQLITE_API int sqlite3_extended_errcode(sqlite3 *db){ - if( db && !sqlite3SafetyCheckSickOrOk(db) ){ - return SQLITE_MISUSE_BKPT; - } - if( !db || db->mallocFailed ){ - return SQLITE_NOMEM; - } - return db->errCode; -} - -/* -** Return a string that describes the kind of error specified in the -** argument. For now, this simply calls the internal sqlite3ErrStr() -** function. -*/ -SQLITE_API const char *sqlite3_errstr(int rc){ - return sqlite3ErrStr(rc); -} - -/* -** Invalidate all cached KeyInfo objects for database connection "db" -*/ -static void invalidateCachedKeyInfo(sqlite3 *db){ - Db *pDb; /* A single database */ - int iDb; /* The database index number */ - HashElem *k; /* For looping over tables in pDb */ - Table *pTab; /* A table in the database */ - Index *pIdx; /* Each index */ - - for(iDb=0, pDb=db->aDb; iDbnDb; iDb++, pDb++){ - if( pDb->pBt==0 ) continue; - sqlite3BtreeEnter(pDb->pBt); - for(k=sqliteHashFirst(&pDb->pSchema->tblHash); k; k=sqliteHashNext(k)){ - pTab = (Table*)sqliteHashData(k); - for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ - if( pIdx->pKeyInfo && pIdx->pKeyInfo->db==db ){ - sqlite3KeyInfoUnref(pIdx->pKeyInfo); - pIdx->pKeyInfo = 0; - } - } - } - sqlite3BtreeLeave(pDb->pBt); - } -} - -/* -** Create a new collating function for database "db". The name is zName -** and the encoding is enc. -*/ -static int createCollation( - sqlite3* db, - const char *zName, - u8 enc, - void* pCtx, - int(*xCompare)(void*,int,const void*,int,const void*), - void(*xDel)(void*) -){ - CollSeq *pColl; - int enc2; - int nName = sqlite3Strlen30(zName); - - assert( sqlite3_mutex_held(db->mutex) ); - - /* If SQLITE_UTF16 is specified as the encoding type, transform this - ** to one of SQLITE_UTF16LE or SQLITE_UTF16BE using the - ** SQLITE_UTF16NATIVE macro. SQLITE_UTF16 is not used internally. - */ - enc2 = enc; - testcase( enc2==SQLITE_UTF16 ); - testcase( enc2==SQLITE_UTF16_ALIGNED ); - if( enc2==SQLITE_UTF16 || enc2==SQLITE_UTF16_ALIGNED ){ - enc2 = SQLITE_UTF16NATIVE; - } - if( enc2SQLITE_UTF16BE ){ - return SQLITE_MISUSE_BKPT; - } - - /* Check if this call is removing or replacing an existing collation - ** sequence. If so, and there are active VMs, return busy. If there - ** are no active VMs, invalidate any pre-compiled statements. - */ - pColl = sqlite3FindCollSeq(db, (u8)enc2, zName, 0); - if( pColl && pColl->xCmp ){ - if( db->nVdbeActive ){ - sqlite3Error(db, SQLITE_BUSY, - "unable to delete/modify collation sequence due to active statements"); - return SQLITE_BUSY; - } - sqlite3ExpirePreparedStatements(db); - invalidateCachedKeyInfo(db); - - /* If collation sequence pColl was created directly by a call to - ** sqlite3_create_collation, and not generated by synthCollSeq(), - ** then any copies made by synthCollSeq() need to be invalidated. - ** Also, collation destructor - CollSeq.xDel() - function may need - ** to be called. - */ - if( (pColl->enc & ~SQLITE_UTF16_ALIGNED)==enc2 ){ - CollSeq *aColl = sqlite3HashFind(&db->aCollSeq, zName, nName); - int j; - for(j=0; j<3; j++){ - CollSeq *p = &aColl[j]; - if( p->enc==pColl->enc ){ - if( p->xDel ){ - p->xDel(p->pUser); - } - p->xCmp = 0; - } - } - } - } - - pColl = sqlite3FindCollSeq(db, (u8)enc2, zName, 1); - if( pColl==0 ) return SQLITE_NOMEM; - pColl->xCmp = xCompare; - pColl->pUser = pCtx; - pColl->xDel = xDel; - pColl->enc = (u8)(enc2 | (enc & SQLITE_UTF16_ALIGNED)); - sqlite3Error(db, SQLITE_OK, 0); - return SQLITE_OK; -} - - -/* -** This array defines hard upper bounds on limit values. The -** initializer must be kept in sync with the SQLITE_LIMIT_* -** #defines in sqlite3.h. -*/ -static const int aHardLimit[] = { - SQLITE_MAX_LENGTH, - SQLITE_MAX_SQL_LENGTH, - SQLITE_MAX_COLUMN, - SQLITE_MAX_EXPR_DEPTH, - SQLITE_MAX_COMPOUND_SELECT, - SQLITE_MAX_VDBE_OP, - SQLITE_MAX_FUNCTION_ARG, - SQLITE_MAX_ATTACHED, - SQLITE_MAX_LIKE_PATTERN_LENGTH, - SQLITE_MAX_VARIABLE_NUMBER, - SQLITE_MAX_TRIGGER_DEPTH, -}; - -/* -** Make sure the hard limits are set to reasonable values -*/ -#if SQLITE_MAX_LENGTH<100 -# error SQLITE_MAX_LENGTH must be at least 100 -#endif -#if SQLITE_MAX_SQL_LENGTH<100 -# error SQLITE_MAX_SQL_LENGTH must be at least 100 -#endif -#if SQLITE_MAX_SQL_LENGTH>SQLITE_MAX_LENGTH -# error SQLITE_MAX_SQL_LENGTH must not be greater than SQLITE_MAX_LENGTH -#endif -#if SQLITE_MAX_COMPOUND_SELECT<2 -# error SQLITE_MAX_COMPOUND_SELECT must be at least 2 -#endif -#if SQLITE_MAX_VDBE_OP<40 -# error SQLITE_MAX_VDBE_OP must be at least 40 -#endif -#if SQLITE_MAX_FUNCTION_ARG<0 || SQLITE_MAX_FUNCTION_ARG>1000 -# error SQLITE_MAX_FUNCTION_ARG must be between 0 and 1000 -#endif -#if SQLITE_MAX_ATTACHED<0 || SQLITE_MAX_ATTACHED>62 -# error SQLITE_MAX_ATTACHED must be between 0 and 62 -#endif -#if SQLITE_MAX_LIKE_PATTERN_LENGTH<1 -# error SQLITE_MAX_LIKE_PATTERN_LENGTH must be at least 1 -#endif -#if SQLITE_MAX_COLUMN>32767 -# error SQLITE_MAX_COLUMN must not exceed 32767 -#endif -#if SQLITE_MAX_TRIGGER_DEPTH<1 -# error SQLITE_MAX_TRIGGER_DEPTH must be at least 1 -#endif - - -/* -** Change the value of a limit. Report the old value. -** If an invalid limit index is supplied, report -1. -** Make no changes but still report the old value if the -** new limit is negative. -** -** A new lower limit does not shrink existing constructs. -** It merely prevents new constructs that exceed the limit -** from forming. -*/ -SQLITE_API int sqlite3_limit(sqlite3 *db, int limitId, int newLimit){ - int oldLimit; - - - /* EVIDENCE-OF: R-30189-54097 For each limit category SQLITE_LIMIT_NAME - ** there is a hard upper bound set at compile-time by a C preprocessor - ** macro called SQLITE_MAX_NAME. (The "_LIMIT_" in the name is changed to - ** "_MAX_".) - */ - assert( aHardLimit[SQLITE_LIMIT_LENGTH]==SQLITE_MAX_LENGTH ); - assert( aHardLimit[SQLITE_LIMIT_SQL_LENGTH]==SQLITE_MAX_SQL_LENGTH ); - assert( aHardLimit[SQLITE_LIMIT_COLUMN]==SQLITE_MAX_COLUMN ); - assert( aHardLimit[SQLITE_LIMIT_EXPR_DEPTH]==SQLITE_MAX_EXPR_DEPTH ); - assert( aHardLimit[SQLITE_LIMIT_COMPOUND_SELECT]==SQLITE_MAX_COMPOUND_SELECT); - assert( aHardLimit[SQLITE_LIMIT_VDBE_OP]==SQLITE_MAX_VDBE_OP ); - assert( aHardLimit[SQLITE_LIMIT_FUNCTION_ARG]==SQLITE_MAX_FUNCTION_ARG ); - assert( aHardLimit[SQLITE_LIMIT_ATTACHED]==SQLITE_MAX_ATTACHED ); - assert( aHardLimit[SQLITE_LIMIT_LIKE_PATTERN_LENGTH]== - SQLITE_MAX_LIKE_PATTERN_LENGTH ); - assert( aHardLimit[SQLITE_LIMIT_VARIABLE_NUMBER]==SQLITE_MAX_VARIABLE_NUMBER); - assert( aHardLimit[SQLITE_LIMIT_TRIGGER_DEPTH]==SQLITE_MAX_TRIGGER_DEPTH ); - assert( SQLITE_LIMIT_TRIGGER_DEPTH==(SQLITE_N_LIMIT-1) ); - - - if( limitId<0 || limitId>=SQLITE_N_LIMIT ){ - return -1; - } - oldLimit = db->aLimit[limitId]; - if( newLimit>=0 ){ /* IMP: R-52476-28732 */ - if( newLimit>aHardLimit[limitId] ){ - newLimit = aHardLimit[limitId]; /* IMP: R-51463-25634 */ - } - db->aLimit[limitId] = newLimit; - } - return oldLimit; /* IMP: R-53341-35419 */ -} - -/* -** This function is used to parse both URIs and non-URI filenames passed by the -** user to API functions sqlite3_open() or sqlite3_open_v2(), and for database -** URIs specified as part of ATTACH statements. -** -** The first argument to this function is the name of the VFS to use (or -** a NULL to signify the default VFS) if the URI does not contain a "vfs=xxx" -** query parameter. The second argument contains the URI (or non-URI filename) -** itself. When this function is called the *pFlags variable should contain -** the default flags to open the database handle with. The value stored in -** *pFlags may be updated before returning if the URI filename contains -** "cache=xxx" or "mode=xxx" query parameters. -** -** If successful, SQLITE_OK is returned. In this case *ppVfs is set to point to -** the VFS that should be used to open the database file. *pzFile is set to -** point to a buffer containing the name of the file to open. It is the -** responsibility of the caller to eventually call sqlite3_free() to release -** this buffer. -** -** If an error occurs, then an SQLite error code is returned and *pzErrMsg -** may be set to point to a buffer containing an English language error -** message. It is the responsibility of the caller to eventually release -** this buffer by calling sqlite3_free(). -*/ -SQLITE_PRIVATE int sqlite3ParseUri( - const char *zDefaultVfs, /* VFS to use if no "vfs=xxx" query option */ - const char *zUri, /* Nul-terminated URI to parse */ - unsigned int *pFlags, /* IN/OUT: SQLITE_OPEN_XXX flags */ - sqlite3_vfs **ppVfs, /* OUT: VFS to use */ - char **pzFile, /* OUT: Filename component of URI */ - char **pzErrMsg /* OUT: Error message (if rc!=SQLITE_OK) */ -){ - int rc = SQLITE_OK; - unsigned int flags = *pFlags; - const char *zVfs = zDefaultVfs; - char *zFile; - char c; - int nUri = sqlite3Strlen30(zUri); - - assert( *pzErrMsg==0 ); - - if( ((flags & SQLITE_OPEN_URI) || sqlite3GlobalConfig.bOpenUri) - && nUri>=5 && memcmp(zUri, "file:", 5)==0 - ){ - char *zOpt; - int eState; /* Parser state when parsing URI */ - int iIn; /* Input character index */ - int iOut = 0; /* Output character index */ - int nByte = nUri+2; /* Bytes of space to allocate */ - - /* Make sure the SQLITE_OPEN_URI flag is set to indicate to the VFS xOpen - ** method that there may be extra parameters following the file-name. */ - flags |= SQLITE_OPEN_URI; - - for(iIn=0; iIn=0 && octet<256 ); - if( octet==0 ){ - /* This branch is taken when "%00" appears within the URI. In this - ** case we ignore all text in the remainder of the path, name or - ** value currently being parsed. So ignore the current character - ** and skip to the next "?", "=" or "&", as appropriate. */ - while( (c = zUri[iIn])!=0 && c!='#' - && (eState!=0 || c!='?') - && (eState!=1 || (c!='=' && c!='&')) - && (eState!=2 || c!='&') - ){ - iIn++; - } - continue; - } - c = octet; - }else if( eState==1 && (c=='&' || c=='=') ){ - if( zFile[iOut-1]==0 ){ - /* An empty option name. Ignore this option altogether. */ - while( zUri[iIn] && zUri[iIn]!='#' && zUri[iIn-1]!='&' ) iIn++; - continue; - } - if( c=='&' ){ - zFile[iOut++] = '\0'; - }else{ - eState = 2; - } - c = 0; - }else if( (eState==0 && c=='?') || (eState==2 && c=='&') ){ - c = 0; - eState = 1; - } - zFile[iOut++] = c; - } - if( eState==1 ) zFile[iOut++] = '\0'; - zFile[iOut++] = '\0'; - zFile[iOut++] = '\0'; - - /* Check if there were any options specified that should be interpreted - ** here. Options that are interpreted here include "vfs" and those that - ** correspond to flags that may be passed to the sqlite3_open_v2() - ** method. */ - zOpt = &zFile[sqlite3Strlen30(zFile)+1]; - while( zOpt[0] ){ - int nOpt = sqlite3Strlen30(zOpt); - char *zVal = &zOpt[nOpt+1]; - int nVal = sqlite3Strlen30(zVal); - - if( nOpt==3 && memcmp("vfs", zOpt, 3)==0 ){ - zVfs = zVal; - }else{ - struct OpenMode { - const char *z; - int mode; - } *aMode = 0; - char *zModeType = 0; - int mask = 0; - int limit = 0; - - if( nOpt==5 && memcmp("cache", zOpt, 5)==0 ){ - static struct OpenMode aCacheMode[] = { - { "shared", SQLITE_OPEN_SHAREDCACHE }, - { "private", SQLITE_OPEN_PRIVATECACHE }, - { 0, 0 } - }; - - mask = SQLITE_OPEN_SHAREDCACHE|SQLITE_OPEN_PRIVATECACHE; - aMode = aCacheMode; - limit = mask; - zModeType = "cache"; - } - if( nOpt==4 && memcmp("mode", zOpt, 4)==0 ){ - static struct OpenMode aOpenMode[] = { - { "ro", SQLITE_OPEN_READONLY }, - { "rw", SQLITE_OPEN_READWRITE }, - { "rwc", SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE }, - { "memory", SQLITE_OPEN_MEMORY }, - { 0, 0 } - }; - - mask = SQLITE_OPEN_READONLY | SQLITE_OPEN_READWRITE - | SQLITE_OPEN_CREATE | SQLITE_OPEN_MEMORY; - aMode = aOpenMode; - limit = mask & flags; - zModeType = "access"; - } - - if( aMode ){ - int i; - int mode = 0; - for(i=0; aMode[i].z; i++){ - const char *z = aMode[i].z; - if( nVal==sqlite3Strlen30(z) && 0==memcmp(zVal, z, nVal) ){ - mode = aMode[i].mode; - break; - } - } - if( mode==0 ){ - *pzErrMsg = sqlite3_mprintf("no such %s mode: %s", zModeType, zVal); - rc = SQLITE_ERROR; - goto parse_uri_out; - } - if( (mode & ~SQLITE_OPEN_MEMORY)>limit ){ - *pzErrMsg = sqlite3_mprintf("%s mode not allowed: %s", - zModeType, zVal); - rc = SQLITE_PERM; - goto parse_uri_out; - } - flags = (flags & ~mask) | mode; - } - } - - zOpt = &zVal[nVal+1]; - } - - }else{ - zFile = sqlite3_malloc(nUri+2); - if( !zFile ) return SQLITE_NOMEM; - memcpy(zFile, zUri, nUri); - zFile[nUri] = '\0'; - zFile[nUri+1] = '\0'; - flags &= ~SQLITE_OPEN_URI; - } - - *ppVfs = sqlite3_vfs_find(zVfs); - if( *ppVfs==0 ){ - *pzErrMsg = sqlite3_mprintf("no such vfs: %s", zVfs); - rc = SQLITE_ERROR; - } - parse_uri_out: - if( rc!=SQLITE_OK ){ - sqlite3_free(zFile); - zFile = 0; - } - *pFlags = flags; - *pzFile = zFile; - return rc; -} - - -/* -** This routine does the work of opening a database on behalf of -** sqlite3_open() and sqlite3_open16(). The database filename "zFilename" -** is UTF-8 encoded. -*/ -static int openDatabase( - const char *zFilename, /* Database filename UTF-8 encoded */ - sqlite3 **ppDb, /* OUT: Returned database handle */ - unsigned int flags, /* Operational flags */ - const char *zVfs /* Name of the VFS to use */ -){ - sqlite3 *db; /* Store allocated handle here */ - int rc; /* Return code */ - int isThreadsafe; /* True for threadsafe connections */ - char *zOpen = 0; /* Filename argument to pass to BtreeOpen() */ - char *zErrMsg = 0; /* Error message from sqlite3ParseUri() */ - - *ppDb = 0; -#ifndef SQLITE_OMIT_AUTOINIT - rc = sqlite3_initialize(); - if( rc ) return rc; -#endif - - /* Only allow sensible combinations of bits in the flags argument. - ** Throw an error if any non-sense combination is used. If we - ** do not block illegal combinations here, it could trigger - ** assert() statements in deeper layers. Sensible combinations - ** are: - ** - ** 1: SQLITE_OPEN_READONLY - ** 2: SQLITE_OPEN_READWRITE - ** 6: SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE - */ - assert( SQLITE_OPEN_READONLY == 0x01 ); - assert( SQLITE_OPEN_READWRITE == 0x02 ); - assert( SQLITE_OPEN_CREATE == 0x04 ); - testcase( (1<<(flags&7))==0x02 ); /* READONLY */ - testcase( (1<<(flags&7))==0x04 ); /* READWRITE */ - testcase( (1<<(flags&7))==0x40 ); /* READWRITE | CREATE */ - if( ((1<<(flags&7)) & 0x46)==0 ) return SQLITE_MISUSE_BKPT; - - if( sqlite3GlobalConfig.bCoreMutex==0 ){ - isThreadsafe = 0; - }else if( flags & SQLITE_OPEN_NOMUTEX ){ - isThreadsafe = 0; - }else if( flags & SQLITE_OPEN_FULLMUTEX ){ - isThreadsafe = 1; - }else{ - isThreadsafe = sqlite3GlobalConfig.bFullMutex; - } - if( flags & SQLITE_OPEN_PRIVATECACHE ){ - flags &= ~SQLITE_OPEN_SHAREDCACHE; - }else if( sqlite3GlobalConfig.sharedCacheEnabled ){ - flags |= SQLITE_OPEN_SHAREDCACHE; - } - - /* Remove harmful bits from the flags parameter - ** - ** The SQLITE_OPEN_NOMUTEX and SQLITE_OPEN_FULLMUTEX flags were - ** dealt with in the previous code block. Besides these, the only - ** valid input flags for sqlite3_open_v2() are SQLITE_OPEN_READONLY, - ** SQLITE_OPEN_READWRITE, SQLITE_OPEN_CREATE, SQLITE_OPEN_SHAREDCACHE, - ** SQLITE_OPEN_PRIVATECACHE, and some reserved bits. Silently mask - ** off all other flags. - */ - flags &= ~( SQLITE_OPEN_DELETEONCLOSE | - SQLITE_OPEN_EXCLUSIVE | - SQLITE_OPEN_MAIN_DB | - SQLITE_OPEN_TEMP_DB | - SQLITE_OPEN_TRANSIENT_DB | - SQLITE_OPEN_MAIN_JOURNAL | - SQLITE_OPEN_TEMP_JOURNAL | - SQLITE_OPEN_SUBJOURNAL | - SQLITE_OPEN_MASTER_JOURNAL | - SQLITE_OPEN_NOMUTEX | - SQLITE_OPEN_FULLMUTEX | - SQLITE_OPEN_WAL - ); - - /* Allocate the sqlite data structure */ - db = sqlite3MallocZero( sizeof(sqlite3) ); - if( db==0 ) goto opendb_out; - if( isThreadsafe ){ - db->mutex = sqlite3MutexAlloc(SQLITE_MUTEX_RECURSIVE); - if( db->mutex==0 ){ - sqlite3_free(db); - db = 0; - goto opendb_out; - } - } - sqlite3_mutex_enter(db->mutex); - db->errMask = 0xff; - db->nDb = 2; - db->magic = SQLITE_MAGIC_BUSY; - db->aDb = db->aDbStatic; - - assert( sizeof(db->aLimit)==sizeof(aHardLimit) ); - memcpy(db->aLimit, aHardLimit, sizeof(db->aLimit)); - db->autoCommit = 1; - db->nextAutovac = -1; - db->szMmap = sqlite3GlobalConfig.szMmap; - db->nextPagesize = 0; - db->flags |= SQLITE_ShortColNames | SQLITE_EnableTrigger | SQLITE_CacheSpill -#if !defined(SQLITE_DEFAULT_AUTOMATIC_INDEX) || SQLITE_DEFAULT_AUTOMATIC_INDEX - | SQLITE_AutoIndex -#endif -#if SQLITE_DEFAULT_FILE_FORMAT<4 - | SQLITE_LegacyFileFmt -#endif -#ifdef SQLITE_ENABLE_LOAD_EXTENSION - | SQLITE_LoadExtension -#endif -#if SQLITE_DEFAULT_RECURSIVE_TRIGGERS - | SQLITE_RecTriggers -#endif -#if defined(SQLITE_DEFAULT_FOREIGN_KEYS) && SQLITE_DEFAULT_FOREIGN_KEYS - | SQLITE_ForeignKeys -#endif - ; - sqlite3HashInit(&db->aCollSeq); -#ifndef SQLITE_OMIT_VIRTUALTABLE - sqlite3HashInit(&db->aModule); -#endif - - /* Add the default collation sequence BINARY. BINARY works for both UTF-8 - ** and UTF-16, so add a version for each to avoid any unnecessary - ** conversions. The only error that can occur here is a malloc() failure. - */ - createCollation(db, "BINARY", SQLITE_UTF8, 0, binCollFunc, 0); - createCollation(db, "BINARY", SQLITE_UTF16BE, 0, binCollFunc, 0); - createCollation(db, "BINARY", SQLITE_UTF16LE, 0, binCollFunc, 0); - createCollation(db, "RTRIM", SQLITE_UTF8, (void*)1, binCollFunc, 0); - if( db->mallocFailed ){ - goto opendb_out; - } - db->pDfltColl = sqlite3FindCollSeq(db, SQLITE_UTF8, "BINARY", 0); - assert( db->pDfltColl!=0 ); - - /* Also add a UTF-8 case-insensitive collation sequence. */ - createCollation(db, "NOCASE", SQLITE_UTF8, 0, nocaseCollatingFunc, 0); - - /* Parse the filename/URI argument. */ - db->openFlags = flags; - rc = sqlite3ParseUri(zVfs, zFilename, &flags, &db->pVfs, &zOpen, &zErrMsg); - if( rc!=SQLITE_OK ){ - if( rc==SQLITE_NOMEM ) db->mallocFailed = 1; - sqlite3Error(db, rc, zErrMsg ? "%s" : 0, zErrMsg); - sqlite3_free(zErrMsg); - goto opendb_out; - } - - /* Open the backend database driver */ - rc = sqlite3BtreeOpen(db->pVfs, zOpen, db, &db->aDb[0].pBt, 0, - flags | SQLITE_OPEN_MAIN_DB); - if( rc!=SQLITE_OK ){ - if( rc==SQLITE_IOERR_NOMEM ){ - rc = SQLITE_NOMEM; - } - sqlite3Error(db, rc, 0); - goto opendb_out; - } - db->aDb[0].pSchema = sqlite3SchemaGet(db, db->aDb[0].pBt); - db->aDb[1].pSchema = sqlite3SchemaGet(db, 0); - - - /* The default safety_level for the main database is 'full'; for the temp - ** database it is 'NONE'. This matches the pager layer defaults. - */ - db->aDb[0].zName = "main"; - db->aDb[0].safety_level = 3; - db->aDb[1].zName = "temp"; - db->aDb[1].safety_level = 1; - - db->magic = SQLITE_MAGIC_OPEN; - if( db->mallocFailed ){ - goto opendb_out; - } - - /* Register all built-in functions, but do not attempt to read the - ** database schema yet. This is delayed until the first time the database - ** is accessed. - */ - sqlite3Error(db, SQLITE_OK, 0); - sqlite3RegisterBuiltinFunctions(db); - - /* Load automatic extensions - extensions that have been registered - ** using the sqlite3_automatic_extension() API. - */ - rc = sqlite3_errcode(db); - if( rc==SQLITE_OK ){ - sqlite3AutoLoadExtensions(db); - rc = sqlite3_errcode(db); - if( rc!=SQLITE_OK ){ - goto opendb_out; - } - } - -#ifdef SQLITE_ENABLE_FTS1 - if( !db->mallocFailed ){ - extern int sqlite3Fts1Init(sqlite3*); - rc = sqlite3Fts1Init(db); - } -#endif - -#ifdef SQLITE_ENABLE_FTS2 - if( !db->mallocFailed && rc==SQLITE_OK ){ - extern int sqlite3Fts2Init(sqlite3*); - rc = sqlite3Fts2Init(db); - } -#endif - -#ifdef SQLITE_ENABLE_FTS3 - if( !db->mallocFailed && rc==SQLITE_OK ){ - rc = sqlite3Fts3Init(db); - } -#endif - -#ifdef SQLITE_ENABLE_ICU - if( !db->mallocFailed && rc==SQLITE_OK ){ - rc = sqlite3IcuInit(db); - } -#endif - -#ifdef SQLITE_ENABLE_RTREE - if( !db->mallocFailed && rc==SQLITE_OK){ - rc = sqlite3RtreeInit(db); - } -#endif - - /* -DSQLITE_DEFAULT_LOCKING_MODE=1 makes EXCLUSIVE the default locking - ** mode. -DSQLITE_DEFAULT_LOCKING_MODE=0 make NORMAL the default locking - ** mode. Doing nothing at all also makes NORMAL the default. - */ -#ifdef SQLITE_DEFAULT_LOCKING_MODE - db->dfltLockMode = SQLITE_DEFAULT_LOCKING_MODE; - sqlite3PagerLockingMode(sqlite3BtreePager(db->aDb[0].pBt), - SQLITE_DEFAULT_LOCKING_MODE); -#endif - - if( rc ) sqlite3Error(db, rc, 0); - - /* Enable the lookaside-malloc subsystem */ - setupLookaside(db, 0, sqlite3GlobalConfig.szLookaside, - sqlite3GlobalConfig.nLookaside); - - sqlite3_wal_autocheckpoint(db, SQLITE_DEFAULT_WAL_AUTOCHECKPOINT); - -opendb_out: - sqlite3_free(zOpen); - if( db ){ - assert( db->mutex!=0 || isThreadsafe==0 || sqlite3GlobalConfig.bFullMutex==0 ); - sqlite3_mutex_leave(db->mutex); - } - rc = sqlite3_errcode(db); - assert( db!=0 || rc==SQLITE_NOMEM ); - if( rc==SQLITE_NOMEM ){ - sqlite3_close(db); - db = 0; - }else if( rc!=SQLITE_OK ){ - db->magic = SQLITE_MAGIC_SICK; - } - *ppDb = db; -#ifdef SQLITE_ENABLE_SQLLOG - if( sqlite3GlobalConfig.xSqllog ){ - /* Opening a db handle. Fourth parameter is passed 0. */ - void *pArg = sqlite3GlobalConfig.pSqllogArg; - sqlite3GlobalConfig.xSqllog(pArg, db, zFilename, 0); - } -#endif - return sqlite3ApiExit(0, rc); -} - -/* -** Open a new database handle. -*/ -SQLITE_API int sqlite3_open( - const char *zFilename, - sqlite3 **ppDb -){ - return openDatabase(zFilename, ppDb, - SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE, 0); -} -SQLITE_API int sqlite3_open_v2( - const char *filename, /* Database filename (UTF-8) */ - sqlite3 **ppDb, /* OUT: SQLite db handle */ - int flags, /* Flags */ - const char *zVfs /* Name of VFS module to use */ -){ - return openDatabase(filename, ppDb, (unsigned int)flags, zVfs); -} - -#ifndef SQLITE_OMIT_UTF16 -/* -** Open a new database handle. -*/ -SQLITE_API int sqlite3_open16( - const void *zFilename, - sqlite3 **ppDb -){ - char const *zFilename8; /* zFilename encoded in UTF-8 instead of UTF-16 */ - sqlite3_value *pVal; - int rc; - - assert( zFilename ); - assert( ppDb ); - *ppDb = 0; -#ifndef SQLITE_OMIT_AUTOINIT - rc = sqlite3_initialize(); - if( rc ) return rc; -#endif - pVal = sqlite3ValueNew(0); - sqlite3ValueSetStr(pVal, -1, zFilename, SQLITE_UTF16NATIVE, SQLITE_STATIC); - zFilename8 = sqlite3ValueText(pVal, SQLITE_UTF8); - if( zFilename8 ){ - rc = openDatabase(zFilename8, ppDb, - SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE, 0); - assert( *ppDb || rc==SQLITE_NOMEM ); - if( rc==SQLITE_OK && !DbHasProperty(*ppDb, 0, DB_SchemaLoaded) ){ - ENC(*ppDb) = SQLITE_UTF16NATIVE; - } - }else{ - rc = SQLITE_NOMEM; - } - sqlite3ValueFree(pVal); - - return sqlite3ApiExit(0, rc); -} -#endif /* SQLITE_OMIT_UTF16 */ - -/* -** Register a new collation sequence with the database handle db. -*/ -SQLITE_API int sqlite3_create_collation( - sqlite3* db, - const char *zName, - int enc, - void* pCtx, - int(*xCompare)(void*,int,const void*,int,const void*) -){ - int rc; - sqlite3_mutex_enter(db->mutex); - assert( !db->mallocFailed ); - rc = createCollation(db, zName, (u8)enc, pCtx, xCompare, 0); - rc = sqlite3ApiExit(db, rc); - sqlite3_mutex_leave(db->mutex); - return rc; -} - -/* -** Register a new collation sequence with the database handle db. -*/ -SQLITE_API int sqlite3_create_collation_v2( - sqlite3* db, - const char *zName, - int enc, - void* pCtx, - int(*xCompare)(void*,int,const void*,int,const void*), - void(*xDel)(void*) -){ - int rc; - sqlite3_mutex_enter(db->mutex); - assert( !db->mallocFailed ); - rc = createCollation(db, zName, (u8)enc, pCtx, xCompare, xDel); - rc = sqlite3ApiExit(db, rc); - sqlite3_mutex_leave(db->mutex); - return rc; -} - -#ifndef SQLITE_OMIT_UTF16 -/* -** Register a new collation sequence with the database handle db. -*/ -SQLITE_API int sqlite3_create_collation16( - sqlite3* db, - const void *zName, - int enc, - void* pCtx, - int(*xCompare)(void*,int,const void*,int,const void*) -){ - int rc = SQLITE_OK; - char *zName8; - sqlite3_mutex_enter(db->mutex); - assert( !db->mallocFailed ); - zName8 = sqlite3Utf16to8(db, zName, -1, SQLITE_UTF16NATIVE); - if( zName8 ){ - rc = createCollation(db, zName8, (u8)enc, pCtx, xCompare, 0); - sqlite3DbFree(db, zName8); - } - rc = sqlite3ApiExit(db, rc); - sqlite3_mutex_leave(db->mutex); - return rc; -} -#endif /* SQLITE_OMIT_UTF16 */ - -/* -** Register a collation sequence factory callback with the database handle -** db. Replace any previously installed collation sequence factory. -*/ -SQLITE_API int sqlite3_collation_needed( - sqlite3 *db, - void *pCollNeededArg, - void(*xCollNeeded)(void*,sqlite3*,int eTextRep,const char*) -){ - sqlite3_mutex_enter(db->mutex); - db->xCollNeeded = xCollNeeded; - db->xCollNeeded16 = 0; - db->pCollNeededArg = pCollNeededArg; - sqlite3_mutex_leave(db->mutex); - return SQLITE_OK; -} - -#ifndef SQLITE_OMIT_UTF16 -/* -** Register a collation sequence factory callback with the database handle -** db. Replace any previously installed collation sequence factory. -*/ -SQLITE_API int sqlite3_collation_needed16( - sqlite3 *db, - void *pCollNeededArg, - void(*xCollNeeded16)(void*,sqlite3*,int eTextRep,const void*) -){ - sqlite3_mutex_enter(db->mutex); - db->xCollNeeded = 0; - db->xCollNeeded16 = xCollNeeded16; - db->pCollNeededArg = pCollNeededArg; - sqlite3_mutex_leave(db->mutex); - return SQLITE_OK; -} -#endif /* SQLITE_OMIT_UTF16 */ - -#ifndef SQLITE_OMIT_DEPRECATED -/* -** This function is now an anachronism. It used to be used to recover from a -** malloc() failure, but SQLite now does this automatically. -*/ -SQLITE_API int sqlite3_global_recover(void){ - return SQLITE_OK; -} -#endif - -/* -** Test to see whether or not the database connection is in autocommit -** mode. Return TRUE if it is and FALSE if not. Autocommit mode is on -** by default. Autocommit is disabled by a BEGIN statement and reenabled -** by the next COMMIT or ROLLBACK. -*/ -SQLITE_API int sqlite3_get_autocommit(sqlite3 *db){ - return db->autoCommit; -} - -/* -** The following routines are subtitutes for constants SQLITE_CORRUPT, -** SQLITE_MISUSE, SQLITE_CANTOPEN, SQLITE_IOERR and possibly other error -** constants. They server two purposes: -** -** 1. Serve as a convenient place to set a breakpoint in a debugger -** to detect when version error conditions occurs. -** -** 2. Invoke sqlite3_log() to provide the source code location where -** a low-level error is first detected. -*/ -SQLITE_PRIVATE int sqlite3CorruptError(int lineno){ - testcase( sqlite3GlobalConfig.xLog!=0 ); - sqlite3_log(SQLITE_CORRUPT, - "database corruption at line %d of [%.10s]", - lineno, 20+sqlite3_sourceid()); - return SQLITE_CORRUPT; -} -SQLITE_PRIVATE int sqlite3MisuseError(int lineno){ - testcase( sqlite3GlobalConfig.xLog!=0 ); - sqlite3_log(SQLITE_MISUSE, - "misuse at line %d of [%.10s]", - lineno, 20+sqlite3_sourceid()); - return SQLITE_MISUSE; -} -SQLITE_PRIVATE int sqlite3CantopenError(int lineno){ - testcase( sqlite3GlobalConfig.xLog!=0 ); - sqlite3_log(SQLITE_CANTOPEN, - "cannot open file at line %d of [%.10s]", - lineno, 20+sqlite3_sourceid()); - return SQLITE_CANTOPEN; -} - - -#ifndef SQLITE_OMIT_DEPRECATED -/* -** This is a convenience routine that makes sure that all thread-specific -** data for this thread has been deallocated. -** -** SQLite no longer uses thread-specific data so this routine is now a -** no-op. It is retained for historical compatibility. -*/ -SQLITE_API void sqlite3_thread_cleanup(void){ -} -#endif - -/* -** Return meta information about a specific column of a database table. -** See comment in sqlite3.h (sqlite.h.in) for details. -*/ -#ifdef SQLITE_ENABLE_COLUMN_METADATA -SQLITE_API int sqlite3_table_column_metadata( - sqlite3 *db, /* Connection handle */ - const char *zDbName, /* Database name or NULL */ - const char *zTableName, /* Table name */ - const char *zColumnName, /* Column name */ - char const **pzDataType, /* OUTPUT: Declared data type */ - char const **pzCollSeq, /* OUTPUT: Collation sequence name */ - int *pNotNull, /* OUTPUT: True if NOT NULL constraint exists */ - int *pPrimaryKey, /* OUTPUT: True if column part of PK */ - int *pAutoinc /* OUTPUT: True if column is auto-increment */ -){ - int rc; - char *zErrMsg = 0; - Table *pTab = 0; - Column *pCol = 0; - int iCol; - - char const *zDataType = 0; - char const *zCollSeq = 0; - int notnull = 0; - int primarykey = 0; - int autoinc = 0; - - /* Ensure the database schema has been loaded */ - sqlite3_mutex_enter(db->mutex); - sqlite3BtreeEnterAll(db); - rc = sqlite3Init(db, &zErrMsg); - if( SQLITE_OK!=rc ){ - goto error_out; - } - - /* Locate the table in question */ - pTab = sqlite3FindTable(db, zTableName, zDbName); - if( !pTab || pTab->pSelect ){ - pTab = 0; - goto error_out; - } - - /* Find the column for which info is requested */ - if( sqlite3IsRowid(zColumnName) ){ - iCol = pTab->iPKey; - if( iCol>=0 ){ - pCol = &pTab->aCol[iCol]; - } - }else{ - for(iCol=0; iColnCol; iCol++){ - pCol = &pTab->aCol[iCol]; - if( 0==sqlite3StrICmp(pCol->zName, zColumnName) ){ - break; - } - } - if( iCol==pTab->nCol ){ - pTab = 0; - goto error_out; - } - } - - /* The following block stores the meta information that will be returned - ** to the caller in local variables zDataType, zCollSeq, notnull, primarykey - ** and autoinc. At this point there are two possibilities: - ** - ** 1. The specified column name was rowid", "oid" or "_rowid_" - ** and there is no explicitly declared IPK column. - ** - ** 2. The table is not a view and the column name identified an - ** explicitly declared column. Copy meta information from *pCol. - */ - if( pCol ){ - zDataType = pCol->zType; - zCollSeq = pCol->zColl; - notnull = pCol->notNull!=0; - primarykey = (pCol->colFlags & COLFLAG_PRIMKEY)!=0; - autoinc = pTab->iPKey==iCol && (pTab->tabFlags & TF_Autoincrement)!=0; - }else{ - zDataType = "INTEGER"; - primarykey = 1; - } - if( !zCollSeq ){ - zCollSeq = "BINARY"; - } - -error_out: - sqlite3BtreeLeaveAll(db); - - /* Whether the function call succeeded or failed, set the output parameters - ** to whatever their local counterparts contain. If an error did occur, - ** this has the effect of zeroing all output parameters. - */ - if( pzDataType ) *pzDataType = zDataType; - if( pzCollSeq ) *pzCollSeq = zCollSeq; - if( pNotNull ) *pNotNull = notnull; - if( pPrimaryKey ) *pPrimaryKey = primarykey; - if( pAutoinc ) *pAutoinc = autoinc; - - if( SQLITE_OK==rc && !pTab ){ - sqlite3DbFree(db, zErrMsg); - zErrMsg = sqlite3MPrintf(db, "no such table column: %s.%s", zTableName, - zColumnName); - rc = SQLITE_ERROR; - } - sqlite3Error(db, rc, (zErrMsg?"%s":0), zErrMsg); - sqlite3DbFree(db, zErrMsg); - rc = sqlite3ApiExit(db, rc); - sqlite3_mutex_leave(db->mutex); - return rc; -} -#endif - -/* -** Sleep for a little while. Return the amount of time slept. -*/ -SQLITE_API int sqlite3_sleep(int ms){ - sqlite3_vfs *pVfs; - int rc; - pVfs = sqlite3_vfs_find(0); - if( pVfs==0 ) return 0; - - /* This function works in milliseconds, but the underlying OsSleep() - ** API uses microseconds. Hence the 1000's. - */ - rc = (sqlite3OsSleep(pVfs, 1000*ms)/1000); - return rc; -} - -/* -** Enable or disable the extended result codes. -*/ -SQLITE_API int sqlite3_extended_result_codes(sqlite3 *db, int onoff){ - sqlite3_mutex_enter(db->mutex); - db->errMask = onoff ? 0xffffffff : 0xff; - sqlite3_mutex_leave(db->mutex); - return SQLITE_OK; -} - -/* -** Invoke the xFileControl method on a particular database. -*/ -SQLITE_API int sqlite3_file_control(sqlite3 *db, const char *zDbName, int op, void *pArg){ - int rc = SQLITE_ERROR; - Btree *pBtree; - - sqlite3_mutex_enter(db->mutex); - pBtree = sqlite3DbNameToBtree(db, zDbName); - if( pBtree ){ - Pager *pPager; - sqlite3_file *fd; - sqlite3BtreeEnter(pBtree); - pPager = sqlite3BtreePager(pBtree); - assert( pPager!=0 ); - fd = sqlite3PagerFile(pPager); - assert( fd!=0 ); - if( op==SQLITE_FCNTL_FILE_POINTER ){ - *(sqlite3_file**)pArg = fd; - rc = SQLITE_OK; - }else if( fd->pMethods ){ - rc = sqlite3OsFileControl(fd, op, pArg); - }else{ - rc = SQLITE_NOTFOUND; - } - sqlite3BtreeLeave(pBtree); - } - sqlite3_mutex_leave(db->mutex); - return rc; -} - -/* -** Interface to the testing logic. -*/ -SQLITE_API int sqlite3_test_control(int op, ...){ - int rc = 0; -#ifndef SQLITE_OMIT_BUILTIN_TEST - va_list ap; - va_start(ap, op); - switch( op ){ - - /* - ** Save the current state of the PRNG. - */ - case SQLITE_TESTCTRL_PRNG_SAVE: { - sqlite3PrngSaveState(); - break; - } - - /* - ** Restore the state of the PRNG to the last state saved using - ** PRNG_SAVE. If PRNG_SAVE has never before been called, then - ** this verb acts like PRNG_RESET. - */ - case SQLITE_TESTCTRL_PRNG_RESTORE: { - sqlite3PrngRestoreState(); - break; - } - - /* - ** Reset the PRNG back to its uninitialized state. The next call - ** to sqlite3_randomness() will reseed the PRNG using a single call - ** to the xRandomness method of the default VFS. - */ - case SQLITE_TESTCTRL_PRNG_RESET: { - sqlite3_randomness(0,0); - break; - } - - /* - ** sqlite3_test_control(BITVEC_TEST, size, program) - ** - ** Run a test against a Bitvec object of size. The program argument - ** is an array of integers that defines the test. Return -1 on a - ** memory allocation error, 0 on success, or non-zero for an error. - ** See the sqlite3BitvecBuiltinTest() for additional information. - */ - case SQLITE_TESTCTRL_BITVEC_TEST: { - int sz = va_arg(ap, int); - int *aProg = va_arg(ap, int*); - rc = sqlite3BitvecBuiltinTest(sz, aProg); - break; - } - - /* - ** sqlite3_test_control(FAULT_INSTALL, xCallback) - ** - ** Arrange to invoke xCallback() whenever sqlite3FaultSim() is called, - ** if xCallback is not NULL. - ** - ** As a test of the fault simulator mechanism itself, sqlite3FaultSim(0) - ** is called immediately after installing the new callback and the return - ** value from sqlite3FaultSim(0) becomes the return from - ** sqlite3_test_control(). - */ - case SQLITE_TESTCTRL_FAULT_INSTALL: { - /* MSVC is picky about pulling func ptrs from va lists. - ** http://support.microsoft.com/kb/47961 - ** sqlite3Config.xTestCallback = va_arg(ap, int(*)(int)); - */ - typedef int(*TESTCALLBACKFUNC_t)(int); - sqlite3Config.xTestCallback = va_arg(ap, TESTCALLBACKFUNC_t); - rc = sqlite3FaultSim(0); - break; - } - - /* - ** sqlite3_test_control(BENIGN_MALLOC_HOOKS, xBegin, xEnd) - ** - ** Register hooks to call to indicate which malloc() failures - ** are benign. - */ - case SQLITE_TESTCTRL_BENIGN_MALLOC_HOOKS: { - typedef void (*void_function)(void); - void_function xBenignBegin; - void_function xBenignEnd; - xBenignBegin = va_arg(ap, void_function); - xBenignEnd = va_arg(ap, void_function); - sqlite3BenignMallocHooks(xBenignBegin, xBenignEnd); - break; - } - - /* - ** sqlite3_test_control(SQLITE_TESTCTRL_PENDING_BYTE, unsigned int X) - ** - ** Set the PENDING byte to the value in the argument, if X>0. - ** Make no changes if X==0. Return the value of the pending byte - ** as it existing before this routine was called. - ** - ** IMPORTANT: Changing the PENDING byte from 0x40000000 results in - ** an incompatible database file format. Changing the PENDING byte - ** while any database connection is open results in undefined and - ** dileterious behavior. - */ - case SQLITE_TESTCTRL_PENDING_BYTE: { - rc = PENDING_BYTE; -#ifndef SQLITE_OMIT_WSD - { - unsigned int newVal = va_arg(ap, unsigned int); - if( newVal ) sqlite3PendingByte = newVal; - } -#endif - break; - } - - /* - ** sqlite3_test_control(SQLITE_TESTCTRL_ASSERT, int X) - ** - ** This action provides a run-time test to see whether or not - ** assert() was enabled at compile-time. If X is true and assert() - ** is enabled, then the return value is true. If X is true and - ** assert() is disabled, then the return value is zero. If X is - ** false and assert() is enabled, then the assertion fires and the - ** process aborts. If X is false and assert() is disabled, then the - ** return value is zero. - */ - case SQLITE_TESTCTRL_ASSERT: { - volatile int x = 0; - assert( (x = va_arg(ap,int))!=0 ); - rc = x; - break; - } - - - /* - ** sqlite3_test_control(SQLITE_TESTCTRL_ALWAYS, int X) - ** - ** This action provides a run-time test to see how the ALWAYS and - ** NEVER macros were defined at compile-time. - ** - ** The return value is ALWAYS(X). - ** - ** The recommended test is X==2. If the return value is 2, that means - ** ALWAYS() and NEVER() are both no-op pass-through macros, which is the - ** default setting. If the return value is 1, then ALWAYS() is either - ** hard-coded to true or else it asserts if its argument is false. - ** The first behavior (hard-coded to true) is the case if - ** SQLITE_TESTCTRL_ASSERT shows that assert() is disabled and the second - ** behavior (assert if the argument to ALWAYS() is false) is the case if - ** SQLITE_TESTCTRL_ASSERT shows that assert() is enabled. - ** - ** The run-time test procedure might look something like this: - ** - ** if( sqlite3_test_control(SQLITE_TESTCTRL_ALWAYS, 2)==2 ){ - ** // ALWAYS() and NEVER() are no-op pass-through macros - ** }else if( sqlite3_test_control(SQLITE_TESTCTRL_ASSERT, 1) ){ - ** // ALWAYS(x) asserts that x is true. NEVER(x) asserts x is false. - ** }else{ - ** // ALWAYS(x) is a constant 1. NEVER(x) is a constant 0. - ** } - */ - case SQLITE_TESTCTRL_ALWAYS: { - int x = va_arg(ap,int); - rc = ALWAYS(x); - break; - } - - /* - ** sqlite3_test_control(SQLITE_TESTCTRL_BYTEORDER); - ** - ** The integer returned reveals the byte-order of the computer on which - ** SQLite is running: - ** - ** 1 big-endian, determined at run-time - ** 10 little-endian, determined at run-time - ** 432101 big-endian, determined at compile-time - ** 123410 little-endian, determined at compile-time - */ - case SQLITE_TESTCTRL_BYTEORDER: { - rc = SQLITE_BYTEORDER*100 + SQLITE_LITTLEENDIAN*10 + SQLITE_BIGENDIAN; - break; - } - - /* sqlite3_test_control(SQLITE_TESTCTRL_RESERVE, sqlite3 *db, int N) - ** - ** Set the nReserve size to N for the main database on the database - ** connection db. - */ - case SQLITE_TESTCTRL_RESERVE: { - sqlite3 *db = va_arg(ap, sqlite3*); - int x = va_arg(ap,int); - sqlite3_mutex_enter(db->mutex); - sqlite3BtreeSetPageSize(db->aDb[0].pBt, 0, x, 0); - sqlite3_mutex_leave(db->mutex); - break; - } - - /* sqlite3_test_control(SQLITE_TESTCTRL_OPTIMIZATIONS, sqlite3 *db, int N) - ** - ** Enable or disable various optimizations for testing purposes. The - ** argument N is a bitmask of optimizations to be disabled. For normal - ** operation N should be 0. The idea is that a test program (like the - ** SQL Logic Test or SLT test module) can run the same SQL multiple times - ** with various optimizations disabled to verify that the same answer - ** is obtained in every case. - */ - case SQLITE_TESTCTRL_OPTIMIZATIONS: { - sqlite3 *db = va_arg(ap, sqlite3*); - db->dbOptFlags = (u16)(va_arg(ap, int) & 0xffff); - break; - } - -#ifdef SQLITE_N_KEYWORD - /* sqlite3_test_control(SQLITE_TESTCTRL_ISKEYWORD, const char *zWord) - ** - ** If zWord is a keyword recognized by the parser, then return the - ** number of keywords. Or if zWord is not a keyword, return 0. - ** - ** This test feature is only available in the amalgamation since - ** the SQLITE_N_KEYWORD macro is not defined in this file if SQLite - ** is built using separate source files. - */ - case SQLITE_TESTCTRL_ISKEYWORD: { - const char *zWord = va_arg(ap, const char*); - int n = sqlite3Strlen30(zWord); - rc = (sqlite3KeywordCode((u8*)zWord, n)!=TK_ID) ? SQLITE_N_KEYWORD : 0; - break; - } -#endif - - /* sqlite3_test_control(SQLITE_TESTCTRL_SCRATCHMALLOC, sz, &pNew, pFree); - ** - ** Pass pFree into sqlite3ScratchFree(). - ** If sz>0 then allocate a scratch buffer into pNew. - */ - case SQLITE_TESTCTRL_SCRATCHMALLOC: { - void *pFree, **ppNew; - int sz; - sz = va_arg(ap, int); - ppNew = va_arg(ap, void**); - pFree = va_arg(ap, void*); - if( sz ) *ppNew = sqlite3ScratchMalloc(sz); - sqlite3ScratchFree(pFree); - break; - } - - /* sqlite3_test_control(SQLITE_TESTCTRL_LOCALTIME_FAULT, int onoff); - ** - ** If parameter onoff is non-zero, configure the wrappers so that all - ** subsequent calls to localtime() and variants fail. If onoff is zero, - ** undo this setting. - */ - case SQLITE_TESTCTRL_LOCALTIME_FAULT: { - sqlite3GlobalConfig.bLocaltimeFault = va_arg(ap, int); - break; - } - -#if defined(SQLITE_ENABLE_TREE_EXPLAIN) - /* sqlite3_test_control(SQLITE_TESTCTRL_EXPLAIN_STMT, - ** sqlite3_stmt*,const char**); - ** - ** If compiled with SQLITE_ENABLE_TREE_EXPLAIN, each sqlite3_stmt holds - ** a string that describes the optimized parse tree. This test-control - ** returns a pointer to that string. - */ - case SQLITE_TESTCTRL_EXPLAIN_STMT: { - sqlite3_stmt *pStmt = va_arg(ap, sqlite3_stmt*); - const char **pzRet = va_arg(ap, const char**); - *pzRet = sqlite3VdbeExplanation((Vdbe*)pStmt); - break; - } -#endif - - /* sqlite3_test_control(SQLITE_TESTCTRL_NEVER_CORRUPT, int); - ** - ** Set or clear a flag that indicates that the database file is always well- - ** formed and never corrupt. This flag is clear by default, indicating that - ** database files might have arbitrary corruption. Setting the flag during - ** testing causes certain assert() statements in the code to be activated - ** that demonstrat invariants on well-formed database files. - */ - case SQLITE_TESTCTRL_NEVER_CORRUPT: { - sqlite3GlobalConfig.neverCorrupt = va_arg(ap, int); - break; - } - - - /* sqlite3_test_control(SQLITE_TESTCTRL_VDBE_COVERAGE, xCallback, ptr); - ** - ** Set the VDBE coverage callback function to xCallback with context - ** pointer ptr. - */ - case SQLITE_TESTCTRL_VDBE_COVERAGE: { -#ifdef SQLITE_VDBE_COVERAGE - typedef void (*branch_callback)(void*,int,u8,u8); - sqlite3GlobalConfig.xVdbeBranch = va_arg(ap,branch_callback); - sqlite3GlobalConfig.pVdbeBranchArg = va_arg(ap,void*); -#endif - break; - } - - } - va_end(ap); -#endif /* SQLITE_OMIT_BUILTIN_TEST */ - return rc; -} - -/* -** This is a utility routine, useful to VFS implementations, that checks -** to see if a database file was a URI that contained a specific query -** parameter, and if so obtains the value of the query parameter. -** -** The zFilename argument is the filename pointer passed into the xOpen() -** method of a VFS implementation. The zParam argument is the name of the -** query parameter we seek. This routine returns the value of the zParam -** parameter if it exists. If the parameter does not exist, this routine -** returns a NULL pointer. -*/ -SQLITE_API const char *sqlite3_uri_parameter(const char *zFilename, const char *zParam){ - if( zFilename==0 ) return 0; - zFilename += sqlite3Strlen30(zFilename) + 1; - while( zFilename[0] ){ - int x = strcmp(zFilename, zParam); - zFilename += sqlite3Strlen30(zFilename) + 1; - if( x==0 ) return zFilename; - zFilename += sqlite3Strlen30(zFilename) + 1; - } - return 0; -} - -/* -** Return a boolean value for a query parameter. -*/ -SQLITE_API int sqlite3_uri_boolean(const char *zFilename, const char *zParam, int bDflt){ - const char *z = sqlite3_uri_parameter(zFilename, zParam); - bDflt = bDflt!=0; - return z ? sqlite3GetBoolean(z, bDflt) : bDflt; -} - -/* -** Return a 64-bit integer value for a query parameter. -*/ -SQLITE_API sqlite3_int64 sqlite3_uri_int64( - const char *zFilename, /* Filename as passed to xOpen */ - const char *zParam, /* URI parameter sought */ - sqlite3_int64 bDflt /* return if parameter is missing */ -){ - const char *z = sqlite3_uri_parameter(zFilename, zParam); - sqlite3_int64 v; - if( z && sqlite3Atoi64(z, &v, sqlite3Strlen30(z), SQLITE_UTF8)==SQLITE_OK ){ - bDflt = v; - } - return bDflt; -} - -/* -** Return the Btree pointer identified by zDbName. Return NULL if not found. -*/ -SQLITE_PRIVATE Btree *sqlite3DbNameToBtree(sqlite3 *db, const char *zDbName){ - int i; - for(i=0; inDb; i++){ - if( db->aDb[i].pBt - && (zDbName==0 || sqlite3StrICmp(zDbName, db->aDb[i].zName)==0) - ){ - return db->aDb[i].pBt; - } - } - return 0; -} - -/* -** Return the filename of the database associated with a database -** connection. -*/ -SQLITE_API const char *sqlite3_db_filename(sqlite3 *db, const char *zDbName){ - Btree *pBt = sqlite3DbNameToBtree(db, zDbName); - return pBt ? sqlite3BtreeGetFilename(pBt) : 0; -} - -/* -** Return 1 if database is read-only or 0 if read/write. Return -1 if -** no such database exists. -*/ -SQLITE_API int sqlite3_db_readonly(sqlite3 *db, const char *zDbName){ - Btree *pBt = sqlite3DbNameToBtree(db, zDbName); - return pBt ? sqlite3BtreeIsReadonly(pBt) : -1; -} - -/************** End of main.c ************************************************/ -/************** Begin file notify.c ******************************************/ -/* -** 2009 March 3 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** -** This file contains the implementation of the sqlite3_unlock_notify() -** API method and its associated functionality. -*/ - -/* Omit this entire file if SQLITE_ENABLE_UNLOCK_NOTIFY is not defined. */ -#ifdef SQLITE_ENABLE_UNLOCK_NOTIFY - -/* -** Public interfaces: -** -** sqlite3ConnectionBlocked() -** sqlite3ConnectionUnlocked() -** sqlite3ConnectionClosed() -** sqlite3_unlock_notify() -*/ - -#define assertMutexHeld() \ - assert( sqlite3_mutex_held(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER)) ) - -/* -** Head of a linked list of all sqlite3 objects created by this process -** for which either sqlite3.pBlockingConnection or sqlite3.pUnlockConnection -** is not NULL. This variable may only accessed while the STATIC_MASTER -** mutex is held. -*/ -static sqlite3 *SQLITE_WSD sqlite3BlockedList = 0; - -#ifndef NDEBUG -/* -** This function is a complex assert() that verifies the following -** properties of the blocked connections list: -** -** 1) Each entry in the list has a non-NULL value for either -** pUnlockConnection or pBlockingConnection, or both. -** -** 2) All entries in the list that share a common value for -** xUnlockNotify are grouped together. -** -** 3) If the argument db is not NULL, then none of the entries in the -** blocked connections list have pUnlockConnection or pBlockingConnection -** set to db. This is used when closing connection db. -*/ -static void checkListProperties(sqlite3 *db){ - sqlite3 *p; - for(p=sqlite3BlockedList; p; p=p->pNextBlocked){ - int seen = 0; - sqlite3 *p2; - - /* Verify property (1) */ - assert( p->pUnlockConnection || p->pBlockingConnection ); - - /* Verify property (2) */ - for(p2=sqlite3BlockedList; p2!=p; p2=p2->pNextBlocked){ - if( p2->xUnlockNotify==p->xUnlockNotify ) seen = 1; - assert( p2->xUnlockNotify==p->xUnlockNotify || !seen ); - assert( db==0 || p->pUnlockConnection!=db ); - assert( db==0 || p->pBlockingConnection!=db ); - } - } -} -#else -# define checkListProperties(x) -#endif - -/* -** Remove connection db from the blocked connections list. If connection -** db is not currently a part of the list, this function is a no-op. -*/ -static void removeFromBlockedList(sqlite3 *db){ - sqlite3 **pp; - assertMutexHeld(); - for(pp=&sqlite3BlockedList; *pp; pp = &(*pp)->pNextBlocked){ - if( *pp==db ){ - *pp = (*pp)->pNextBlocked; - break; - } - } -} - -/* -** Add connection db to the blocked connections list. It is assumed -** that it is not already a part of the list. -*/ -static void addToBlockedList(sqlite3 *db){ - sqlite3 **pp; - assertMutexHeld(); - for( - pp=&sqlite3BlockedList; - *pp && (*pp)->xUnlockNotify!=db->xUnlockNotify; - pp=&(*pp)->pNextBlocked - ); - db->pNextBlocked = *pp; - *pp = db; -} - -/* -** Obtain the STATIC_MASTER mutex. -*/ -static void enterMutex(void){ - sqlite3_mutex_enter(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER)); - checkListProperties(0); -} - -/* -** Release the STATIC_MASTER mutex. -*/ -static void leaveMutex(void){ - assertMutexHeld(); - checkListProperties(0); - sqlite3_mutex_leave(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER)); -} - -/* -** Register an unlock-notify callback. -** -** This is called after connection "db" has attempted some operation -** but has received an SQLITE_LOCKED error because another connection -** (call it pOther) in the same process was busy using the same shared -** cache. pOther is found by looking at db->pBlockingConnection. -** -** If there is no blocking connection, the callback is invoked immediately, -** before this routine returns. -** -** If pOther is already blocked on db, then report SQLITE_LOCKED, to indicate -** a deadlock. -** -** Otherwise, make arrangements to invoke xNotify when pOther drops -** its locks. -** -** Each call to this routine overrides any prior callbacks registered -** on the same "db". If xNotify==0 then any prior callbacks are immediately -** cancelled. -*/ -SQLITE_API int sqlite3_unlock_notify( - sqlite3 *db, - void (*xNotify)(void **, int), - void *pArg -){ - int rc = SQLITE_OK; - - sqlite3_mutex_enter(db->mutex); - enterMutex(); - - if( xNotify==0 ){ - removeFromBlockedList(db); - db->pBlockingConnection = 0; - db->pUnlockConnection = 0; - db->xUnlockNotify = 0; - db->pUnlockArg = 0; - }else if( 0==db->pBlockingConnection ){ - /* The blocking transaction has been concluded. Or there never was a - ** blocking transaction. In either case, invoke the notify callback - ** immediately. - */ - xNotify(&pArg, 1); - }else{ - sqlite3 *p; - - for(p=db->pBlockingConnection; p && p!=db; p=p->pUnlockConnection){} - if( p ){ - rc = SQLITE_LOCKED; /* Deadlock detected. */ - }else{ - db->pUnlockConnection = db->pBlockingConnection; - db->xUnlockNotify = xNotify; - db->pUnlockArg = pArg; - removeFromBlockedList(db); - addToBlockedList(db); - } - } - - leaveMutex(); - assert( !db->mallocFailed ); - sqlite3Error(db, rc, (rc?"database is deadlocked":0)); - sqlite3_mutex_leave(db->mutex); - return rc; -} - -/* -** This function is called while stepping or preparing a statement -** associated with connection db. The operation will return SQLITE_LOCKED -** to the user because it requires a lock that will not be available -** until connection pBlocker concludes its current transaction. -*/ -SQLITE_PRIVATE void sqlite3ConnectionBlocked(sqlite3 *db, sqlite3 *pBlocker){ - enterMutex(); - if( db->pBlockingConnection==0 && db->pUnlockConnection==0 ){ - addToBlockedList(db); - } - db->pBlockingConnection = pBlocker; - leaveMutex(); -} - -/* -** This function is called when -** the transaction opened by database db has just finished. Locks held -** by database connection db have been released. -** -** This function loops through each entry in the blocked connections -** list and does the following: -** -** 1) If the sqlite3.pBlockingConnection member of a list entry is -** set to db, then set pBlockingConnection=0. -** -** 2) If the sqlite3.pUnlockConnection member of a list entry is -** set to db, then invoke the configured unlock-notify callback and -** set pUnlockConnection=0. -** -** 3) If the two steps above mean that pBlockingConnection==0 and -** pUnlockConnection==0, remove the entry from the blocked connections -** list. -*/ -SQLITE_PRIVATE void sqlite3ConnectionUnlocked(sqlite3 *db){ - void (*xUnlockNotify)(void **, int) = 0; /* Unlock-notify cb to invoke */ - int nArg = 0; /* Number of entries in aArg[] */ - sqlite3 **pp; /* Iterator variable */ - void **aArg; /* Arguments to the unlock callback */ - void **aDyn = 0; /* Dynamically allocated space for aArg[] */ - void *aStatic[16]; /* Starter space for aArg[]. No malloc required */ - - aArg = aStatic; - enterMutex(); /* Enter STATIC_MASTER mutex */ - - /* This loop runs once for each entry in the blocked-connections list. */ - for(pp=&sqlite3BlockedList; *pp; /* no-op */ ){ - sqlite3 *p = *pp; - - /* Step 1. */ - if( p->pBlockingConnection==db ){ - p->pBlockingConnection = 0; - } - - /* Step 2. */ - if( p->pUnlockConnection==db ){ - assert( p->xUnlockNotify ); - if( p->xUnlockNotify!=xUnlockNotify && nArg!=0 ){ - xUnlockNotify(aArg, nArg); - nArg = 0; - } - - sqlite3BeginBenignMalloc(); - assert( aArg==aDyn || (aDyn==0 && aArg==aStatic) ); - assert( nArg<=(int)ArraySize(aStatic) || aArg==aDyn ); - if( (!aDyn && nArg==(int)ArraySize(aStatic)) - || (aDyn && nArg==(int)(sqlite3MallocSize(aDyn)/sizeof(void*))) - ){ - /* The aArg[] array needs to grow. */ - void **pNew = (void **)sqlite3Malloc(nArg*sizeof(void *)*2); - if( pNew ){ - memcpy(pNew, aArg, nArg*sizeof(void *)); - sqlite3_free(aDyn); - aDyn = aArg = pNew; - }else{ - /* This occurs when the array of context pointers that need to - ** be passed to the unlock-notify callback is larger than the - ** aStatic[] array allocated on the stack and the attempt to - ** allocate a larger array from the heap has failed. - ** - ** This is a difficult situation to handle. Returning an error - ** code to the caller is insufficient, as even if an error code - ** is returned the transaction on connection db will still be - ** closed and the unlock-notify callbacks on blocked connections - ** will go unissued. This might cause the application to wait - ** indefinitely for an unlock-notify callback that will never - ** arrive. - ** - ** Instead, invoke the unlock-notify callback with the context - ** array already accumulated. We can then clear the array and - ** begin accumulating any further context pointers without - ** requiring any dynamic allocation. This is sub-optimal because - ** it means that instead of one callback with a large array of - ** context pointers the application will receive two or more - ** callbacks with smaller arrays of context pointers, which will - ** reduce the applications ability to prioritize multiple - ** connections. But it is the best that can be done under the - ** circumstances. - */ - xUnlockNotify(aArg, nArg); - nArg = 0; - } - } - sqlite3EndBenignMalloc(); - - aArg[nArg++] = p->pUnlockArg; - xUnlockNotify = p->xUnlockNotify; - p->pUnlockConnection = 0; - p->xUnlockNotify = 0; - p->pUnlockArg = 0; - } - - /* Step 3. */ - if( p->pBlockingConnection==0 && p->pUnlockConnection==0 ){ - /* Remove connection p from the blocked connections list. */ - *pp = p->pNextBlocked; - p->pNextBlocked = 0; - }else{ - pp = &p->pNextBlocked; - } - } - - if( nArg!=0 ){ - xUnlockNotify(aArg, nArg); - } - sqlite3_free(aDyn); - leaveMutex(); /* Leave STATIC_MASTER mutex */ -} - -/* -** This is called when the database connection passed as an argument is -** being closed. The connection is removed from the blocked list. -*/ -SQLITE_PRIVATE void sqlite3ConnectionClosed(sqlite3 *db){ - sqlite3ConnectionUnlocked(db); - enterMutex(); - removeFromBlockedList(db); - checkListProperties(db); - leaveMutex(); -} -#endif - -/************** End of notify.c **********************************************/ -/************** Begin file fts3.c ********************************************/ -/* -** 2006 Oct 10 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -****************************************************************************** -** -** This is an SQLite module implementing full-text search. -*/ - -/* -** The code in this file is only compiled if: -** -** * The FTS3 module is being built as an extension -** (in which case SQLITE_CORE is not defined), or -** -** * The FTS3 module is being built into the core of -** SQLite (in which case SQLITE_ENABLE_FTS3 is defined). -*/ - -/* The full-text index is stored in a series of b+tree (-like) -** structures called segments which map terms to doclists. The -** structures are like b+trees in layout, but are constructed from the -** bottom up in optimal fashion and are not updatable. Since trees -** are built from the bottom up, things will be described from the -** bottom up. -** -** -**** Varints **** -** The basic unit of encoding is a variable-length integer called a -** varint. We encode variable-length integers in little-endian order -** using seven bits * per byte as follows: -** -** KEY: -** A = 0xxxxxxx 7 bits of data and one flag bit -** B = 1xxxxxxx 7 bits of data and one flag bit -** -** 7 bits - A -** 14 bits - BA -** 21 bits - BBA -** and so on. -** -** This is similar in concept to how sqlite encodes "varints" but -** the encoding is not the same. SQLite varints are big-endian -** are are limited to 9 bytes in length whereas FTS3 varints are -** little-endian and can be up to 10 bytes in length (in theory). -** -** Example encodings: -** -** 1: 0x01 -** 127: 0x7f -** 128: 0x81 0x00 -** -** -**** Document lists **** -** A doclist (document list) holds a docid-sorted list of hits for a -** given term. Doclists hold docids and associated token positions. -** A docid is the unique integer identifier for a single document. -** A position is the index of a word within the document. The first -** word of the document has a position of 0. -** -** FTS3 used to optionally store character offsets using a compile-time -** option. But that functionality is no longer supported. -** -** A doclist is stored like this: -** -** array { -** varint docid; (delta from previous doclist) -** array { (position list for column 0) -** varint position; (2 more than the delta from previous position) -** } -** array { -** varint POS_COLUMN; (marks start of position list for new column) -** varint column; (index of new column) -** array { -** varint position; (2 more than the delta from previous position) -** } -** } -** varint POS_END; (marks end of positions for this document. -** } -** -** Here, array { X } means zero or more occurrences of X, adjacent in -** memory. A "position" is an index of a token in the token stream -** generated by the tokenizer. Note that POS_END and POS_COLUMN occur -** in the same logical place as the position element, and act as sentinals -** ending a position list array. POS_END is 0. POS_COLUMN is 1. -** The positions numbers are not stored literally but rather as two more -** than the difference from the prior position, or the just the position plus -** 2 for the first position. Example: -** -** label: A B C D E F G H I J K -** value: 123 5 9 1 1 14 35 0 234 72 0 -** -** The 123 value is the first docid. For column zero in this document -** there are two matches at positions 3 and 10 (5-2 and 9-2+3). The 1 -** at D signals the start of a new column; the 1 at E indicates that the -** new column is column number 1. There are two positions at 12 and 45 -** (14-2 and 35-2+12). The 0 at H indicate the end-of-document. The -** 234 at I is the delta to next docid (357). It has one position 70 -** (72-2) and then terminates with the 0 at K. -** -** A "position-list" is the list of positions for multiple columns for -** a single docid. A "column-list" is the set of positions for a single -** column. Hence, a position-list consists of one or more column-lists, -** a document record consists of a docid followed by a position-list and -** a doclist consists of one or more document records. -** -** A bare doclist omits the position information, becoming an -** array of varint-encoded docids. -** -**** Segment leaf nodes **** -** Segment leaf nodes store terms and doclists, ordered by term. Leaf -** nodes are written using LeafWriter, and read using LeafReader (to -** iterate through a single leaf node's data) and LeavesReader (to -** iterate through a segment's entire leaf layer). Leaf nodes have -** the format: -** -** varint iHeight; (height from leaf level, always 0) -** varint nTerm; (length of first term) -** char pTerm[nTerm]; (content of first term) -** varint nDoclist; (length of term's associated doclist) -** char pDoclist[nDoclist]; (content of doclist) -** array { -** (further terms are delta-encoded) -** varint nPrefix; (length of prefix shared with previous term) -** varint nSuffix; (length of unshared suffix) -** char pTermSuffix[nSuffix];(unshared suffix of next term) -** varint nDoclist; (length of term's associated doclist) -** char pDoclist[nDoclist]; (content of doclist) -** } -** -** Here, array { X } means zero or more occurrences of X, adjacent in -** memory. -** -** Leaf nodes are broken into blocks which are stored contiguously in -** the %_segments table in sorted order. This means that when the end -** of a node is reached, the next term is in the node with the next -** greater node id. -** -** New data is spilled to a new leaf node when the current node -** exceeds LEAF_MAX bytes (default 2048). New data which itself is -** larger than STANDALONE_MIN (default 1024) is placed in a standalone -** node (a leaf node with a single term and doclist). The goal of -** these settings is to pack together groups of small doclists while -** making it efficient to directly access large doclists. The -** assumption is that large doclists represent terms which are more -** likely to be query targets. -** -** TODO(shess) It may be useful for blocking decisions to be more -** dynamic. For instance, it may make more sense to have a 2.5k leaf -** node rather than splitting into 2k and .5k nodes. My intuition is -** that this might extend through 2x or 4x the pagesize. -** -** -**** Segment interior nodes **** -** Segment interior nodes store blockids for subtree nodes and terms -** to describe what data is stored by the each subtree. Interior -** nodes are written using InteriorWriter, and read using -** InteriorReader. InteriorWriters are created as needed when -** SegmentWriter creates new leaf nodes, or when an interior node -** itself grows too big and must be split. The format of interior -** nodes: -** -** varint iHeight; (height from leaf level, always >0) -** varint iBlockid; (block id of node's leftmost subtree) -** optional { -** varint nTerm; (length of first term) -** char pTerm[nTerm]; (content of first term) -** array { -** (further terms are delta-encoded) -** varint nPrefix; (length of shared prefix with previous term) -** varint nSuffix; (length of unshared suffix) -** char pTermSuffix[nSuffix]; (unshared suffix of next term) -** } -** } -** -** Here, optional { X } means an optional element, while array { X } -** means zero or more occurrences of X, adjacent in memory. -** -** An interior node encodes n terms separating n+1 subtrees. The -** subtree blocks are contiguous, so only the first subtree's blockid -** is encoded. The subtree at iBlockid will contain all terms less -** than the first term encoded (or all terms if no term is encoded). -** Otherwise, for terms greater than or equal to pTerm[i] but less -** than pTerm[i+1], the subtree for that term will be rooted at -** iBlockid+i. Interior nodes only store enough term data to -** distinguish adjacent children (if the rightmost term of the left -** child is "something", and the leftmost term of the right child is -** "wicked", only "w" is stored). -** -** New data is spilled to a new interior node at the same height when -** the current node exceeds INTERIOR_MAX bytes (default 2048). -** INTERIOR_MIN_TERMS (default 7) keeps large terms from monopolizing -** interior nodes and making the tree too skinny. The interior nodes -** at a given height are naturally tracked by interior nodes at -** height+1, and so on. -** -** -**** Segment directory **** -** The segment directory in table %_segdir stores meta-information for -** merging and deleting segments, and also the root node of the -** segment's tree. -** -** The root node is the top node of the segment's tree after encoding -** the entire segment, restricted to ROOT_MAX bytes (default 1024). -** This could be either a leaf node or an interior node. If the top -** node requires more than ROOT_MAX bytes, it is flushed to %_segments -** and a new root interior node is generated (which should always fit -** within ROOT_MAX because it only needs space for 2 varints, the -** height and the blockid of the previous root). -** -** The meta-information in the segment directory is: -** level - segment level (see below) -** idx - index within level -** - (level,idx uniquely identify a segment) -** start_block - first leaf node -** leaves_end_block - last leaf node -** end_block - last block (including interior nodes) -** root - contents of root node -** -** If the root node is a leaf node, then start_block, -** leaves_end_block, and end_block are all 0. -** -** -**** Segment merging **** -** To amortize update costs, segments are grouped into levels and -** merged in batches. Each increase in level represents exponentially -** more documents. -** -** New documents (actually, document updates) are tokenized and -** written individually (using LeafWriter) to a level 0 segment, with -** incrementing idx. When idx reaches MERGE_COUNT (default 16), all -** level 0 segments are merged into a single level 1 segment. Level 1 -** is populated like level 0, and eventually MERGE_COUNT level 1 -** segments are merged to a single level 2 segment (representing -** MERGE_COUNT^2 updates), and so on. -** -** A segment merge traverses all segments at a given level in -** parallel, performing a straightforward sorted merge. Since segment -** leaf nodes are written in to the %_segments table in order, this -** merge traverses the underlying sqlite disk structures efficiently. -** After the merge, all segment blocks from the merged level are -** deleted. -** -** MERGE_COUNT controls how often we merge segments. 16 seems to be -** somewhat of a sweet spot for insertion performance. 32 and 64 show -** very similar performance numbers to 16 on insertion, though they're -** a tiny bit slower (perhaps due to more overhead in merge-time -** sorting). 8 is about 20% slower than 16, 4 about 50% slower than -** 16, 2 about 66% slower than 16. -** -** At query time, high MERGE_COUNT increases the number of segments -** which need to be scanned and merged. For instance, with 100k docs -** inserted: -** -** MERGE_COUNT segments -** 16 25 -** 8 12 -** 4 10 -** 2 6 -** -** This appears to have only a moderate impact on queries for very -** frequent terms (which are somewhat dominated by segment merge -** costs), and infrequent and non-existent terms still seem to be fast -** even with many segments. -** -** TODO(shess) That said, it would be nice to have a better query-side -** argument for MERGE_COUNT of 16. Also, it is possible/likely that -** optimizations to things like doclist merging will swing the sweet -** spot around. -** -** -** -**** Handling of deletions and updates **** -** Since we're using a segmented structure, with no docid-oriented -** index into the term index, we clearly cannot simply update the term -** index when a document is deleted or updated. For deletions, we -** write an empty doclist (varint(docid) varint(POS_END)), for updates -** we simply write the new doclist. Segment merges overwrite older -** data for a particular docid with newer data, so deletes or updates -** will eventually overtake the earlier data and knock it out. The -** query logic likewise merges doclists so that newer data knocks out -** older data. -*/ - -/************** Include fts3Int.h in the middle of fts3.c ********************/ -/************** Begin file fts3Int.h *****************************************/ -/* -** 2009 Nov 12 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -****************************************************************************** -** -*/ -#ifndef _FTSINT_H -#define _FTSINT_H - -#if !defined(NDEBUG) && !defined(SQLITE_DEBUG) -# define NDEBUG 1 -#endif - -/* -** FTS4 is really an extension for FTS3. It is enabled using the -** SQLITE_ENABLE_FTS3 macro. But to avoid confusion we also all -** the SQLITE_ENABLE_FTS4 macro to serve as an alisse for SQLITE_ENABLE_FTS3. -*/ -#if defined(SQLITE_ENABLE_FTS4) && !defined(SQLITE_ENABLE_FTS3) -# define SQLITE_ENABLE_FTS3 -#endif - -#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) - -/* If not building as part of the core, include sqlite3ext.h. */ -#ifndef SQLITE_CORE -SQLITE_EXTENSION_INIT3 -#endif - -/************** Include fts3_tokenizer.h in the middle of fts3Int.h **********/ -/************** Begin file fts3_tokenizer.h **********************************/ -/* -** 2006 July 10 -** -** The author disclaims copyright to this source code. -** -************************************************************************* -** Defines the interface to tokenizers used by fulltext-search. There -** are three basic components: -** -** sqlite3_tokenizer_module is a singleton defining the tokenizer -** interface functions. This is essentially the class structure for -** tokenizers. -** -** sqlite3_tokenizer is used to define a particular tokenizer, perhaps -** including customization information defined at creation time. -** -** sqlite3_tokenizer_cursor is generated by a tokenizer to generate -** tokens from a particular input. -*/ -#ifndef _FTS3_TOKENIZER_H_ -#define _FTS3_TOKENIZER_H_ - -/* TODO(shess) Only used for SQLITE_OK and SQLITE_DONE at this time. -** If tokenizers are to be allowed to call sqlite3_*() functions, then -** we will need a way to register the API consistently. -*/ - -/* -** Structures used by the tokenizer interface. When a new tokenizer -** implementation is registered, the caller provides a pointer to -** an sqlite3_tokenizer_module containing pointers to the callback -** functions that make up an implementation. -** -** When an fts3 table is created, it passes any arguments passed to -** the tokenizer clause of the CREATE VIRTUAL TABLE statement to the -** sqlite3_tokenizer_module.xCreate() function of the requested tokenizer -** implementation. The xCreate() function in turn returns an -** sqlite3_tokenizer structure representing the specific tokenizer to -** be used for the fts3 table (customized by the tokenizer clause arguments). -** -** To tokenize an input buffer, the sqlite3_tokenizer_module.xOpen() -** method is called. It returns an sqlite3_tokenizer_cursor object -** that may be used to tokenize a specific input buffer based on -** the tokenization rules supplied by a specific sqlite3_tokenizer -** object. -*/ -typedef struct sqlite3_tokenizer_module sqlite3_tokenizer_module; -typedef struct sqlite3_tokenizer sqlite3_tokenizer; -typedef struct sqlite3_tokenizer_cursor sqlite3_tokenizer_cursor; - -struct sqlite3_tokenizer_module { - - /* - ** Structure version. Should always be set to 0 or 1. - */ - int iVersion; - - /* - ** Create a new tokenizer. The values in the argv[] array are the - ** arguments passed to the "tokenizer" clause of the CREATE VIRTUAL - ** TABLE statement that created the fts3 table. For example, if - ** the following SQL is executed: - ** - ** CREATE .. USING fts3( ... , tokenizer arg1 arg2) - ** - ** then argc is set to 2, and the argv[] array contains pointers - ** to the strings "arg1" and "arg2". - ** - ** This method should return either SQLITE_OK (0), or an SQLite error - ** code. If SQLITE_OK is returned, then *ppTokenizer should be set - ** to point at the newly created tokenizer structure. The generic - ** sqlite3_tokenizer.pModule variable should not be initialized by - ** this callback. The caller will do so. - */ - int (*xCreate)( - int argc, /* Size of argv array */ - const char *const*argv, /* Tokenizer argument strings */ - sqlite3_tokenizer **ppTokenizer /* OUT: Created tokenizer */ - ); - - /* - ** Destroy an existing tokenizer. The fts3 module calls this method - ** exactly once for each successful call to xCreate(). - */ - int (*xDestroy)(sqlite3_tokenizer *pTokenizer); - - /* - ** Create a tokenizer cursor to tokenize an input buffer. The caller - ** is responsible for ensuring that the input buffer remains valid - ** until the cursor is closed (using the xClose() method). - */ - int (*xOpen)( - sqlite3_tokenizer *pTokenizer, /* Tokenizer object */ - const char *pInput, int nBytes, /* Input buffer */ - sqlite3_tokenizer_cursor **ppCursor /* OUT: Created tokenizer cursor */ - ); - - /* - ** Destroy an existing tokenizer cursor. The fts3 module calls this - ** method exactly once for each successful call to xOpen(). - */ - int (*xClose)(sqlite3_tokenizer_cursor *pCursor); - - /* - ** Retrieve the next token from the tokenizer cursor pCursor. This - ** method should either return SQLITE_OK and set the values of the - ** "OUT" variables identified below, or SQLITE_DONE to indicate that - ** the end of the buffer has been reached, or an SQLite error code. - ** - ** *ppToken should be set to point at a buffer containing the - ** normalized version of the token (i.e. after any case-folding and/or - ** stemming has been performed). *pnBytes should be set to the length - ** of this buffer in bytes. The input text that generated the token is - ** identified by the byte offsets returned in *piStartOffset and - ** *piEndOffset. *piStartOffset should be set to the index of the first - ** byte of the token in the input buffer. *piEndOffset should be set - ** to the index of the first byte just past the end of the token in - ** the input buffer. - ** - ** The buffer *ppToken is set to point at is managed by the tokenizer - ** implementation. It is only required to be valid until the next call - ** to xNext() or xClose(). - */ - /* TODO(shess) current implementation requires pInput to be - ** nul-terminated. This should either be fixed, or pInput/nBytes - ** should be converted to zInput. - */ - int (*xNext)( - sqlite3_tokenizer_cursor *pCursor, /* Tokenizer cursor */ - const char **ppToken, int *pnBytes, /* OUT: Normalized text for token */ - int *piStartOffset, /* OUT: Byte offset of token in input buffer */ - int *piEndOffset, /* OUT: Byte offset of end of token in input buffer */ - int *piPosition /* OUT: Number of tokens returned before this one */ - ); - - /*********************************************************************** - ** Methods below this point are only available if iVersion>=1. - */ - - /* - ** Configure the language id of a tokenizer cursor. - */ - int (*xLanguageid)(sqlite3_tokenizer_cursor *pCsr, int iLangid); -}; - -struct sqlite3_tokenizer { - const sqlite3_tokenizer_module *pModule; /* The module for this tokenizer */ - /* Tokenizer implementations will typically add additional fields */ -}; - -struct sqlite3_tokenizer_cursor { - sqlite3_tokenizer *pTokenizer; /* Tokenizer for this cursor. */ - /* Tokenizer implementations will typically add additional fields */ -}; - -int fts3_global_term_cnt(int iTerm, int iCol); -int fts3_term_cnt(int iTerm, int iCol); - - -#endif /* _FTS3_TOKENIZER_H_ */ - -/************** End of fts3_tokenizer.h **************************************/ -/************** Continuing where we left off in fts3Int.h ********************/ -/************** Include fts3_hash.h in the middle of fts3Int.h ***************/ -/************** Begin file fts3_hash.h ***************************************/ -/* -** 2001 September 22 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** This is the header file for the generic hash-table implementation -** used in SQLite. We've modified it slightly to serve as a standalone -** hash table implementation for the full-text indexing module. -** -*/ -#ifndef _FTS3_HASH_H_ -#define _FTS3_HASH_H_ - -/* Forward declarations of structures. */ -typedef struct Fts3Hash Fts3Hash; -typedef struct Fts3HashElem Fts3HashElem; - -/* A complete hash table is an instance of the following structure. -** The internals of this structure are intended to be opaque -- client -** code should not attempt to access or modify the fields of this structure -** directly. Change this structure only by using the routines below. -** However, many of the "procedures" and "functions" for modifying and -** accessing this structure are really macros, so we can't really make -** this structure opaque. -*/ -struct Fts3Hash { - char keyClass; /* HASH_INT, _POINTER, _STRING, _BINARY */ - char copyKey; /* True if copy of key made on insert */ - int count; /* Number of entries in this table */ - Fts3HashElem *first; /* The first element of the array */ - int htsize; /* Number of buckets in the hash table */ - struct _fts3ht { /* the hash table */ - int count; /* Number of entries with this hash */ - Fts3HashElem *chain; /* Pointer to first entry with this hash */ - } *ht; -}; - -/* Each element in the hash table is an instance of the following -** structure. All elements are stored on a single doubly-linked list. -** -** Again, this structure is intended to be opaque, but it can't really -** be opaque because it is used by macros. -*/ -struct Fts3HashElem { - Fts3HashElem *next, *prev; /* Next and previous elements in the table */ - void *data; /* Data associated with this element */ - void *pKey; int nKey; /* Key associated with this element */ -}; - -/* -** There are 2 different modes of operation for a hash table: -** -** FTS3_HASH_STRING pKey points to a string that is nKey bytes long -** (including the null-terminator, if any). Case -** is respected in comparisons. -** -** FTS3_HASH_BINARY pKey points to binary data nKey bytes long. -** memcmp() is used to compare keys. -** -** A copy of the key is made if the copyKey parameter to fts3HashInit is 1. -*/ -#define FTS3_HASH_STRING 1 -#define FTS3_HASH_BINARY 2 - -/* -** Access routines. To delete, insert a NULL pointer. -*/ -SQLITE_PRIVATE void sqlite3Fts3HashInit(Fts3Hash *pNew, char keyClass, char copyKey); -SQLITE_PRIVATE void *sqlite3Fts3HashInsert(Fts3Hash*, const void *pKey, int nKey, void *pData); -SQLITE_PRIVATE void *sqlite3Fts3HashFind(const Fts3Hash*, const void *pKey, int nKey); -SQLITE_PRIVATE void sqlite3Fts3HashClear(Fts3Hash*); -SQLITE_PRIVATE Fts3HashElem *sqlite3Fts3HashFindElem(const Fts3Hash *, const void *, int); - -/* -** Shorthand for the functions above -*/ -#define fts3HashInit sqlite3Fts3HashInit -#define fts3HashInsert sqlite3Fts3HashInsert -#define fts3HashFind sqlite3Fts3HashFind -#define fts3HashClear sqlite3Fts3HashClear -#define fts3HashFindElem sqlite3Fts3HashFindElem - -/* -** Macros for looping over all elements of a hash table. The idiom is -** like this: -** -** Fts3Hash h; -** Fts3HashElem *p; -** ... -** for(p=fts3HashFirst(&h); p; p=fts3HashNext(p)){ -** SomeStructure *pData = fts3HashData(p); -** // do something with pData -** } -*/ -#define fts3HashFirst(H) ((H)->first) -#define fts3HashNext(E) ((E)->next) -#define fts3HashData(E) ((E)->data) -#define fts3HashKey(E) ((E)->pKey) -#define fts3HashKeysize(E) ((E)->nKey) - -/* -** Number of entries in a hash table -*/ -#define fts3HashCount(H) ((H)->count) - -#endif /* _FTS3_HASH_H_ */ - -/************** End of fts3_hash.h *******************************************/ -/************** Continuing where we left off in fts3Int.h ********************/ - -/* -** This constant determines the maximum depth of an FTS expression tree -** that the library will create and use. FTS uses recursion to perform -** various operations on the query tree, so the disadvantage of a large -** limit is that it may allow very large queries to use large amounts -** of stack space (perhaps causing a stack overflow). -*/ -#ifndef SQLITE_FTS3_MAX_EXPR_DEPTH -# define SQLITE_FTS3_MAX_EXPR_DEPTH 12 -#endif - - -/* -** This constant controls how often segments are merged. Once there are -** FTS3_MERGE_COUNT segments of level N, they are merged into a single -** segment of level N+1. -*/ -#define FTS3_MERGE_COUNT 16 - -/* -** This is the maximum amount of data (in bytes) to store in the -** Fts3Table.pendingTerms hash table. Normally, the hash table is -** populated as documents are inserted/updated/deleted in a transaction -** and used to create a new segment when the transaction is committed. -** However if this limit is reached midway through a transaction, a new -** segment is created and the hash table cleared immediately. -*/ -#define FTS3_MAX_PENDING_DATA (1*1024*1024) - -/* -** Macro to return the number of elements in an array. SQLite has a -** similar macro called ArraySize(). Use a different name to avoid -** a collision when building an amalgamation with built-in FTS3. -*/ -#define SizeofArray(X) ((int)(sizeof(X)/sizeof(X[0]))) - - -#ifndef MIN -# define MIN(x,y) ((x)<(y)?(x):(y)) -#endif -#ifndef MAX -# define MAX(x,y) ((x)>(y)?(x):(y)) -#endif - -/* -** Maximum length of a varint encoded integer. The varint format is different -** from that used by SQLite, so the maximum length is 10, not 9. -*/ -#define FTS3_VARINT_MAX 10 - -/* -** FTS4 virtual tables may maintain multiple indexes - one index of all terms -** in the document set and zero or more prefix indexes. All indexes are stored -** as one or more b+-trees in the %_segments and %_segdir tables. -** -** It is possible to determine which index a b+-tree belongs to based on the -** value stored in the "%_segdir.level" column. Given this value L, the index -** that the b+-tree belongs to is (L<<10). In other words, all b+-trees with -** level values between 0 and 1023 (inclusive) belong to index 0, all levels -** between 1024 and 2047 to index 1, and so on. -** -** It is considered impossible for an index to use more than 1024 levels. In -** theory though this may happen, but only after at least -** (FTS3_MERGE_COUNT^1024) separate flushes of the pending-terms tables. -*/ -#define FTS3_SEGDIR_MAXLEVEL 1024 -#define FTS3_SEGDIR_MAXLEVEL_STR "1024" - -/* -** The testcase() macro is only used by the amalgamation. If undefined, -** make it a no-op. -*/ -#ifndef testcase -# define testcase(X) -#endif - -/* -** Terminator values for position-lists and column-lists. -*/ -#define POS_COLUMN (1) /* Column-list terminator */ -#define POS_END (0) /* Position-list terminator */ - -/* -** This section provides definitions to allow the -** FTS3 extension to be compiled outside of the -** amalgamation. -*/ -#ifndef SQLITE_AMALGAMATION -/* -** Macros indicating that conditional expressions are always true or -** false. -*/ -#ifdef SQLITE_COVERAGE_TEST -# define ALWAYS(x) (1) -# define NEVER(X) (0) -#else -# define ALWAYS(x) (x) -# define NEVER(x) (x) -#endif - -/* -** Internal types used by SQLite. -*/ -typedef unsigned char u8; /* 1-byte (or larger) unsigned integer */ -typedef short int i16; /* 2-byte (or larger) signed integer */ -typedef unsigned int u32; /* 4-byte unsigned integer */ -typedef sqlite3_uint64 u64; /* 8-byte unsigned integer */ -typedef sqlite3_int64 i64; /* 8-byte signed integer */ - -/* -** Macro used to suppress compiler warnings for unused parameters. -*/ -#define UNUSED_PARAMETER(x) (void)(x) - -/* -** Activate assert() only if SQLITE_TEST is enabled. -*/ -#if !defined(NDEBUG) && !defined(SQLITE_DEBUG) -# define NDEBUG 1 -#endif - -/* -** The TESTONLY macro is used to enclose variable declarations or -** other bits of code that are needed to support the arguments -** within testcase() and assert() macros. -*/ -#if defined(SQLITE_DEBUG) || defined(SQLITE_COVERAGE_TEST) -# define TESTONLY(X) X -#else -# define TESTONLY(X) -#endif - -#endif /* SQLITE_AMALGAMATION */ - -#ifdef SQLITE_DEBUG -SQLITE_PRIVATE int sqlite3Fts3Corrupt(void); -# define FTS_CORRUPT_VTAB sqlite3Fts3Corrupt() -#else -# define FTS_CORRUPT_VTAB SQLITE_CORRUPT_VTAB -#endif - -typedef struct Fts3Table Fts3Table; -typedef struct Fts3Cursor Fts3Cursor; -typedef struct Fts3Expr Fts3Expr; -typedef struct Fts3Phrase Fts3Phrase; -typedef struct Fts3PhraseToken Fts3PhraseToken; - -typedef struct Fts3Doclist Fts3Doclist; -typedef struct Fts3SegFilter Fts3SegFilter; -typedef struct Fts3DeferredToken Fts3DeferredToken; -typedef struct Fts3SegReader Fts3SegReader; -typedef struct Fts3MultiSegReader Fts3MultiSegReader; - -/* -** A connection to a fulltext index is an instance of the following -** structure. The xCreate and xConnect methods create an instance -** of this structure and xDestroy and xDisconnect free that instance. -** All other methods receive a pointer to the structure as one of their -** arguments. -*/ -struct Fts3Table { - sqlite3_vtab base; /* Base class used by SQLite core */ - sqlite3 *db; /* The database connection */ - const char *zDb; /* logical database name */ - const char *zName; /* virtual table name */ - int nColumn; /* number of named columns in virtual table */ - char **azColumn; /* column names. malloced */ - u8 *abNotindexed; /* True for 'notindexed' columns */ - sqlite3_tokenizer *pTokenizer; /* tokenizer for inserts and queries */ - char *zContentTbl; /* content=xxx option, or NULL */ - char *zLanguageid; /* languageid=xxx option, or NULL */ - int nAutoincrmerge; /* Value configured by 'automerge' */ - u32 nLeafAdd; /* Number of leaf blocks added this trans */ - - /* Precompiled statements used by the implementation. Each of these - ** statements is run and reset within a single virtual table API call. - */ - sqlite3_stmt *aStmt[40]; - - char *zReadExprlist; - char *zWriteExprlist; - - int nNodeSize; /* Soft limit for node size */ - u8 bFts4; /* True for FTS4, false for FTS3 */ - u8 bHasStat; /* True if %_stat table exists (2==unknown) */ - u8 bHasDocsize; /* True if %_docsize table exists */ - u8 bDescIdx; /* True if doclists are in reverse order */ - u8 bIgnoreSavepoint; /* True to ignore xSavepoint invocations */ - int nPgsz; /* Page size for host database */ - char *zSegmentsTbl; /* Name of %_segments table */ - sqlite3_blob *pSegments; /* Blob handle open on %_segments table */ - - /* - ** The following array of hash tables is used to buffer pending index - ** updates during transactions. All pending updates buffered at any one - ** time must share a common language-id (see the FTS4 langid= feature). - ** The current language id is stored in variable iPrevLangid. - ** - ** A single FTS4 table may have multiple full-text indexes. For each index - ** there is an entry in the aIndex[] array. Index 0 is an index of all the - ** terms that appear in the document set. Each subsequent index in aIndex[] - ** is an index of prefixes of a specific length. - ** - ** Variable nPendingData contains an estimate the memory consumed by the - ** pending data structures, including hash table overhead, but not including - ** malloc overhead. When nPendingData exceeds nMaxPendingData, all hash - ** tables are flushed to disk. Variable iPrevDocid is the docid of the most - ** recently inserted record. - */ - int nIndex; /* Size of aIndex[] */ - struct Fts3Index { - int nPrefix; /* Prefix length (0 for main terms index) */ - Fts3Hash hPending; /* Pending terms table for this index */ - } *aIndex; - int nMaxPendingData; /* Max pending data before flush to disk */ - int nPendingData; /* Current bytes of pending data */ - sqlite_int64 iPrevDocid; /* Docid of most recently inserted document */ - int iPrevLangid; /* Langid of recently inserted document */ - -#if defined(SQLITE_DEBUG) || defined(SQLITE_COVERAGE_TEST) - /* State variables used for validating that the transaction control - ** methods of the virtual table are called at appropriate times. These - ** values do not contribute to FTS functionality; they are used for - ** verifying the operation of the SQLite core. - */ - int inTransaction; /* True after xBegin but before xCommit/xRollback */ - int mxSavepoint; /* Largest valid xSavepoint integer */ -#endif - -#ifdef SQLITE_TEST - /* True to disable the incremental doclist optimization. This is controled - ** by special insert command 'test-no-incr-doclist'. */ - int bNoIncrDoclist; -#endif -}; - -/* -** When the core wants to read from the virtual table, it creates a -** virtual table cursor (an instance of the following structure) using -** the xOpen method. Cursors are destroyed using the xClose method. -*/ -struct Fts3Cursor { - sqlite3_vtab_cursor base; /* Base class used by SQLite core */ - i16 eSearch; /* Search strategy (see below) */ - u8 isEof; /* True if at End Of Results */ - u8 isRequireSeek; /* True if must seek pStmt to %_content row */ - sqlite3_stmt *pStmt; /* Prepared statement in use by the cursor */ - Fts3Expr *pExpr; /* Parsed MATCH query string */ - int iLangid; /* Language being queried for */ - int nPhrase; /* Number of matchable phrases in query */ - Fts3DeferredToken *pDeferred; /* Deferred search tokens, if any */ - sqlite3_int64 iPrevId; /* Previous id read from aDoclist */ - char *pNextId; /* Pointer into the body of aDoclist */ - char *aDoclist; /* List of docids for full-text queries */ - int nDoclist; /* Size of buffer at aDoclist */ - u8 bDesc; /* True to sort in descending order */ - int eEvalmode; /* An FTS3_EVAL_XX constant */ - int nRowAvg; /* Average size of database rows, in pages */ - sqlite3_int64 nDoc; /* Documents in table */ - i64 iMinDocid; /* Minimum docid to return */ - i64 iMaxDocid; /* Maximum docid to return */ - int isMatchinfoNeeded; /* True when aMatchinfo[] needs filling in */ - u32 *aMatchinfo; /* Information about most recent match */ - int nMatchinfo; /* Number of elements in aMatchinfo[] */ - char *zMatchinfo; /* Matchinfo specification */ -}; - -#define FTS3_EVAL_FILTER 0 -#define FTS3_EVAL_NEXT 1 -#define FTS3_EVAL_MATCHINFO 2 - -/* -** The Fts3Cursor.eSearch member is always set to one of the following. -** Actualy, Fts3Cursor.eSearch can be greater than or equal to -** FTS3_FULLTEXT_SEARCH. If so, then Fts3Cursor.eSearch - 2 is the index -** of the column to be searched. For example, in -** -** CREATE VIRTUAL TABLE ex1 USING fts3(a,b,c,d); -** SELECT docid FROM ex1 WHERE b MATCH 'one two three'; -** -** Because the LHS of the MATCH operator is 2nd column "b", -** Fts3Cursor.eSearch will be set to FTS3_FULLTEXT_SEARCH+1. (+0 for a, -** +1 for b, +2 for c, +3 for d.) If the LHS of MATCH were "ex1" -** indicating that all columns should be searched, -** then eSearch would be set to FTS3_FULLTEXT_SEARCH+4. -*/ -#define FTS3_FULLSCAN_SEARCH 0 /* Linear scan of %_content table */ -#define FTS3_DOCID_SEARCH 1 /* Lookup by rowid on %_content table */ -#define FTS3_FULLTEXT_SEARCH 2 /* Full-text index search */ - -/* -** The lower 16-bits of the sqlite3_index_info.idxNum value set by -** the xBestIndex() method contains the Fts3Cursor.eSearch value described -** above. The upper 16-bits contain a combination of the following -** bits, used to describe extra constraints on full-text searches. -*/ -#define FTS3_HAVE_LANGID 0x00010000 /* languageid=? */ -#define FTS3_HAVE_DOCID_GE 0x00020000 /* docid>=? */ -#define FTS3_HAVE_DOCID_LE 0x00040000 /* docid<=? */ - -struct Fts3Doclist { - char *aAll; /* Array containing doclist (or NULL) */ - int nAll; /* Size of a[] in bytes */ - char *pNextDocid; /* Pointer to next docid */ - - sqlite3_int64 iDocid; /* Current docid (if pList!=0) */ - int bFreeList; /* True if pList should be sqlite3_free()d */ - char *pList; /* Pointer to position list following iDocid */ - int nList; /* Length of position list */ -}; - -/* -** A "phrase" is a sequence of one or more tokens that must match in -** sequence. A single token is the base case and the most common case. -** For a sequence of tokens contained in double-quotes (i.e. "one two three") -** nToken will be the number of tokens in the string. -*/ -struct Fts3PhraseToken { - char *z; /* Text of the token */ - int n; /* Number of bytes in buffer z */ - int isPrefix; /* True if token ends with a "*" character */ - int bFirst; /* True if token must appear at position 0 */ - - /* Variables above this point are populated when the expression is - ** parsed (by code in fts3_expr.c). Below this point the variables are - ** used when evaluating the expression. */ - Fts3DeferredToken *pDeferred; /* Deferred token object for this token */ - Fts3MultiSegReader *pSegcsr; /* Segment-reader for this token */ -}; - -struct Fts3Phrase { - /* Cache of doclist for this phrase. */ - Fts3Doclist doclist; - int bIncr; /* True if doclist is loaded incrementally */ - int iDoclistToken; - - /* Variables below this point are populated by fts3_expr.c when parsing - ** a MATCH expression. Everything above is part of the evaluation phase. - */ - int nToken; /* Number of tokens in the phrase */ - int iColumn; /* Index of column this phrase must match */ - Fts3PhraseToken aToken[1]; /* One entry for each token in the phrase */ -}; - -/* -** A tree of these objects forms the RHS of a MATCH operator. -** -** If Fts3Expr.eType is FTSQUERY_PHRASE and isLoaded is true, then aDoclist -** points to a malloced buffer, size nDoclist bytes, containing the results -** of this phrase query in FTS3 doclist format. As usual, the initial -** "Length" field found in doclists stored on disk is omitted from this -** buffer. -** -** Variable aMI is used only for FTSQUERY_NEAR nodes to store the global -** matchinfo data. If it is not NULL, it points to an array of size nCol*3, -** where nCol is the number of columns in the queried FTS table. The array -** is populated as follows: -** -** aMI[iCol*3 + 0] = Undefined -** aMI[iCol*3 + 1] = Number of occurrences -** aMI[iCol*3 + 2] = Number of rows containing at least one instance -** -** The aMI array is allocated using sqlite3_malloc(). It should be freed -** when the expression node is. -*/ -struct Fts3Expr { - int eType; /* One of the FTSQUERY_XXX values defined below */ - int nNear; /* Valid if eType==FTSQUERY_NEAR */ - Fts3Expr *pParent; /* pParent->pLeft==this or pParent->pRight==this */ - Fts3Expr *pLeft; /* Left operand */ - Fts3Expr *pRight; /* Right operand */ - Fts3Phrase *pPhrase; /* Valid if eType==FTSQUERY_PHRASE */ - - /* The following are used by the fts3_eval.c module. */ - sqlite3_int64 iDocid; /* Current docid */ - u8 bEof; /* True this expression is at EOF already */ - u8 bStart; /* True if iDocid is valid */ - u8 bDeferred; /* True if this expression is entirely deferred */ - - u32 *aMI; -}; - -/* -** Candidate values for Fts3Query.eType. Note that the order of the first -** four values is in order of precedence when parsing expressions. For -** example, the following: -** -** "a OR b AND c NOT d NEAR e" -** -** is equivalent to: -** -** "a OR (b AND (c NOT (d NEAR e)))" -*/ -#define FTSQUERY_NEAR 1 -#define FTSQUERY_NOT 2 -#define FTSQUERY_AND 3 -#define FTSQUERY_OR 4 -#define FTSQUERY_PHRASE 5 - - -/* fts3_write.c */ -SQLITE_PRIVATE int sqlite3Fts3UpdateMethod(sqlite3_vtab*,int,sqlite3_value**,sqlite3_int64*); -SQLITE_PRIVATE int sqlite3Fts3PendingTermsFlush(Fts3Table *); -SQLITE_PRIVATE void sqlite3Fts3PendingTermsClear(Fts3Table *); -SQLITE_PRIVATE int sqlite3Fts3Optimize(Fts3Table *); -SQLITE_PRIVATE int sqlite3Fts3SegReaderNew(int, int, sqlite3_int64, - sqlite3_int64, sqlite3_int64, const char *, int, Fts3SegReader**); -SQLITE_PRIVATE int sqlite3Fts3SegReaderPending( - Fts3Table*,int,const char*,int,int,Fts3SegReader**); -SQLITE_PRIVATE void sqlite3Fts3SegReaderFree(Fts3SegReader *); -SQLITE_PRIVATE int sqlite3Fts3AllSegdirs(Fts3Table*, int, int, int, sqlite3_stmt **); -SQLITE_PRIVATE int sqlite3Fts3ReadBlock(Fts3Table*, sqlite3_int64, char **, int*, int*); - -SQLITE_PRIVATE int sqlite3Fts3SelectDoctotal(Fts3Table *, sqlite3_stmt **); -SQLITE_PRIVATE int sqlite3Fts3SelectDocsize(Fts3Table *, sqlite3_int64, sqlite3_stmt **); - -#ifndef SQLITE_DISABLE_FTS4_DEFERRED -SQLITE_PRIVATE void sqlite3Fts3FreeDeferredTokens(Fts3Cursor *); -SQLITE_PRIVATE int sqlite3Fts3DeferToken(Fts3Cursor *, Fts3PhraseToken *, int); -SQLITE_PRIVATE int sqlite3Fts3CacheDeferredDoclists(Fts3Cursor *); -SQLITE_PRIVATE void sqlite3Fts3FreeDeferredDoclists(Fts3Cursor *); -SQLITE_PRIVATE int sqlite3Fts3DeferredTokenList(Fts3DeferredToken *, char **, int *); -#else -# define sqlite3Fts3FreeDeferredTokens(x) -# define sqlite3Fts3DeferToken(x,y,z) SQLITE_OK -# define sqlite3Fts3CacheDeferredDoclists(x) SQLITE_OK -# define sqlite3Fts3FreeDeferredDoclists(x) -# define sqlite3Fts3DeferredTokenList(x,y,z) SQLITE_OK -#endif - -SQLITE_PRIVATE void sqlite3Fts3SegmentsClose(Fts3Table *); -SQLITE_PRIVATE int sqlite3Fts3MaxLevel(Fts3Table *, int *); - -/* Special values interpreted by sqlite3SegReaderCursor() */ -#define FTS3_SEGCURSOR_PENDING -1 -#define FTS3_SEGCURSOR_ALL -2 - -SQLITE_PRIVATE int sqlite3Fts3SegReaderStart(Fts3Table*, Fts3MultiSegReader*, Fts3SegFilter*); -SQLITE_PRIVATE int sqlite3Fts3SegReaderStep(Fts3Table *, Fts3MultiSegReader *); -SQLITE_PRIVATE void sqlite3Fts3SegReaderFinish(Fts3MultiSegReader *); - -SQLITE_PRIVATE int sqlite3Fts3SegReaderCursor(Fts3Table *, - int, int, int, const char *, int, int, int, Fts3MultiSegReader *); - -/* Flags allowed as part of the 4th argument to SegmentReaderIterate() */ -#define FTS3_SEGMENT_REQUIRE_POS 0x00000001 -#define FTS3_SEGMENT_IGNORE_EMPTY 0x00000002 -#define FTS3_SEGMENT_COLUMN_FILTER 0x00000004 -#define FTS3_SEGMENT_PREFIX 0x00000008 -#define FTS3_SEGMENT_SCAN 0x00000010 -#define FTS3_SEGMENT_FIRST 0x00000020 - -/* Type passed as 4th argument to SegmentReaderIterate() */ -struct Fts3SegFilter { - const char *zTerm; - int nTerm; - int iCol; - int flags; -}; - -struct Fts3MultiSegReader { - /* Used internally by sqlite3Fts3SegReaderXXX() calls */ - Fts3SegReader **apSegment; /* Array of Fts3SegReader objects */ - int nSegment; /* Size of apSegment array */ - int nAdvance; /* How many seg-readers to advance */ - Fts3SegFilter *pFilter; /* Pointer to filter object */ - char *aBuffer; /* Buffer to merge doclists in */ - int nBuffer; /* Allocated size of aBuffer[] in bytes */ - - int iColFilter; /* If >=0, filter for this column */ - int bRestart; - - /* Used by fts3.c only. */ - int nCost; /* Cost of running iterator */ - int bLookup; /* True if a lookup of a single entry. */ - - /* Output values. Valid only after Fts3SegReaderStep() returns SQLITE_ROW. */ - char *zTerm; /* Pointer to term buffer */ - int nTerm; /* Size of zTerm in bytes */ - char *aDoclist; /* Pointer to doclist buffer */ - int nDoclist; /* Size of aDoclist[] in bytes */ -}; - -SQLITE_PRIVATE int sqlite3Fts3Incrmerge(Fts3Table*,int,int); - -#define fts3GetVarint32(p, piVal) ( \ - (*(u8*)(p)&0x80) ? sqlite3Fts3GetVarint32(p, piVal) : (*piVal=*(u8*)(p), 1) \ -) - -/* fts3.c */ -SQLITE_PRIVATE int sqlite3Fts3PutVarint(char *, sqlite3_int64); -SQLITE_PRIVATE int sqlite3Fts3GetVarint(const char *, sqlite_int64 *); -SQLITE_PRIVATE int sqlite3Fts3GetVarint32(const char *, int *); -SQLITE_PRIVATE int sqlite3Fts3VarintLen(sqlite3_uint64); -SQLITE_PRIVATE void sqlite3Fts3Dequote(char *); -SQLITE_PRIVATE void sqlite3Fts3DoclistPrev(int,char*,int,char**,sqlite3_int64*,int*,u8*); -SQLITE_PRIVATE int sqlite3Fts3EvalPhraseStats(Fts3Cursor *, Fts3Expr *, u32 *); -SQLITE_PRIVATE int sqlite3Fts3FirstFilter(sqlite3_int64, char *, int, char *); -SQLITE_PRIVATE void sqlite3Fts3CreateStatTable(int*, Fts3Table*); - -/* fts3_tokenizer.c */ -SQLITE_PRIVATE const char *sqlite3Fts3NextToken(const char *, int *); -SQLITE_PRIVATE int sqlite3Fts3InitHashTable(sqlite3 *, Fts3Hash *, const char *); -SQLITE_PRIVATE int sqlite3Fts3InitTokenizer(Fts3Hash *pHash, const char *, - sqlite3_tokenizer **, char ** -); -SQLITE_PRIVATE int sqlite3Fts3IsIdChar(char); - -/* fts3_snippet.c */ -SQLITE_PRIVATE void sqlite3Fts3Offsets(sqlite3_context*, Fts3Cursor*); -SQLITE_PRIVATE void sqlite3Fts3Snippet(sqlite3_context *, Fts3Cursor *, const char *, - const char *, const char *, int, int -); -SQLITE_PRIVATE void sqlite3Fts3Matchinfo(sqlite3_context *, Fts3Cursor *, const char *); - -/* fts3_expr.c */ -SQLITE_PRIVATE int sqlite3Fts3ExprParse(sqlite3_tokenizer *, int, - char **, int, int, int, const char *, int, Fts3Expr **, char ** -); -SQLITE_PRIVATE void sqlite3Fts3ExprFree(Fts3Expr *); -#ifdef SQLITE_TEST -SQLITE_PRIVATE int sqlite3Fts3ExprInitTestInterface(sqlite3 *db); -SQLITE_PRIVATE int sqlite3Fts3InitTerm(sqlite3 *db); -#endif - -SQLITE_PRIVATE int sqlite3Fts3OpenTokenizer(sqlite3_tokenizer *, int, const char *, int, - sqlite3_tokenizer_cursor ** -); - -/* fts3_aux.c */ -SQLITE_PRIVATE int sqlite3Fts3InitAux(sqlite3 *db); - -SQLITE_PRIVATE void sqlite3Fts3EvalPhraseCleanup(Fts3Phrase *); - -SQLITE_PRIVATE int sqlite3Fts3MsrIncrStart( - Fts3Table*, Fts3MultiSegReader*, int, const char*, int); -SQLITE_PRIVATE int sqlite3Fts3MsrIncrNext( - Fts3Table *, Fts3MultiSegReader *, sqlite3_int64 *, char **, int *); -SQLITE_PRIVATE int sqlite3Fts3EvalPhrasePoslist(Fts3Cursor *, Fts3Expr *, int iCol, char **); -SQLITE_PRIVATE int sqlite3Fts3MsrOvfl(Fts3Cursor *, Fts3MultiSegReader *, int *); -SQLITE_PRIVATE int sqlite3Fts3MsrIncrRestart(Fts3MultiSegReader *pCsr); - -/* fts3_tokenize_vtab.c */ -SQLITE_PRIVATE int sqlite3Fts3InitTok(sqlite3*, Fts3Hash *); - -/* fts3_unicode2.c (functions generated by parsing unicode text files) */ -#ifdef SQLITE_ENABLE_FTS4_UNICODE61 -SQLITE_PRIVATE int sqlite3FtsUnicodeFold(int, int); -SQLITE_PRIVATE int sqlite3FtsUnicodeIsalnum(int); -SQLITE_PRIVATE int sqlite3FtsUnicodeIsdiacritic(int); -#endif - -#endif /* !SQLITE_CORE || SQLITE_ENABLE_FTS3 */ -#endif /* _FTSINT_H */ - -/************** End of fts3Int.h *********************************************/ -/************** Continuing where we left off in fts3.c ***********************/ -#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) - -#if defined(SQLITE_ENABLE_FTS3) && !defined(SQLITE_CORE) -# define SQLITE_CORE 1 -#endif - -/* #include */ -/* #include */ -/* #include */ -/* #include */ -/* #include */ -/* #include */ - -#ifndef SQLITE_CORE - SQLITE_EXTENSION_INIT1 -#endif - -static int fts3EvalNext(Fts3Cursor *pCsr); -static int fts3EvalStart(Fts3Cursor *pCsr); -static int fts3TermSegReaderCursor( - Fts3Cursor *, const char *, int, int, Fts3MultiSegReader **); - -/* -** Write a 64-bit variable-length integer to memory starting at p[0]. -** The length of data written will be between 1 and FTS3_VARINT_MAX bytes. -** The number of bytes written is returned. -*/ -SQLITE_PRIVATE int sqlite3Fts3PutVarint(char *p, sqlite_int64 v){ - unsigned char *q = (unsigned char *) p; - sqlite_uint64 vu = v; - do{ - *q++ = (unsigned char) ((vu & 0x7f) | 0x80); - vu >>= 7; - }while( vu!=0 ); - q[-1] &= 0x7f; /* turn off high bit in final byte */ - assert( q - (unsigned char *)p <= FTS3_VARINT_MAX ); - return (int) (q - (unsigned char *)p); -} - -#define GETVARINT_STEP(v, ptr, shift, mask1, mask2, var, ret) \ - v = (v & mask1) | ( (*ptr++) << shift ); \ - if( (v & mask2)==0 ){ var = v; return ret; } -#define GETVARINT_INIT(v, ptr, shift, mask1, mask2, var, ret) \ - v = (*ptr++); \ - if( (v & mask2)==0 ){ var = v; return ret; } - -/* -** Read a 64-bit variable-length integer from memory starting at p[0]. -** Return the number of bytes read, or 0 on error. -** The value is stored in *v. -*/ -SQLITE_PRIVATE int sqlite3Fts3GetVarint(const char *p, sqlite_int64 *v){ - const char *pStart = p; - u32 a; - u64 b; - int shift; - - GETVARINT_INIT(a, p, 0, 0x00, 0x80, *v, 1); - GETVARINT_STEP(a, p, 7, 0x7F, 0x4000, *v, 2); - GETVARINT_STEP(a, p, 14, 0x3FFF, 0x200000, *v, 3); - GETVARINT_STEP(a, p, 21, 0x1FFFFF, 0x10000000, *v, 4); - b = (a & 0x0FFFFFFF ); - - for(shift=28; shift<=63; shift+=7){ - u64 c = *p++; - b += (c&0x7F) << shift; - if( (c & 0x80)==0 ) break; - } - *v = b; - return (int)(p - pStart); -} - -/* -** Similar to sqlite3Fts3GetVarint(), except that the output is truncated to a -** 32-bit integer before it is returned. -*/ -SQLITE_PRIVATE int sqlite3Fts3GetVarint32(const char *p, int *pi){ - u32 a; - -#ifndef fts3GetVarint32 - GETVARINT_INIT(a, p, 0, 0x00, 0x80, *pi, 1); -#else - a = (*p++); - assert( a & 0x80 ); -#endif - - GETVARINT_STEP(a, p, 7, 0x7F, 0x4000, *pi, 2); - GETVARINT_STEP(a, p, 14, 0x3FFF, 0x200000, *pi, 3); - GETVARINT_STEP(a, p, 21, 0x1FFFFF, 0x10000000, *pi, 4); - a = (a & 0x0FFFFFFF ); - *pi = (int)(a | ((u32)(*p & 0x0F) << 28)); - return 5; -} - -/* -** Return the number of bytes required to encode v as a varint -*/ -SQLITE_PRIVATE int sqlite3Fts3VarintLen(sqlite3_uint64 v){ - int i = 0; - do{ - i++; - v >>= 7; - }while( v!=0 ); - return i; -} - -/* -** Convert an SQL-style quoted string into a normal string by removing -** the quote characters. The conversion is done in-place. If the -** input does not begin with a quote character, then this routine -** is a no-op. -** -** Examples: -** -** "abc" becomes abc -** 'xyz' becomes xyz -** [pqr] becomes pqr -** `mno` becomes mno -** -*/ -SQLITE_PRIVATE void sqlite3Fts3Dequote(char *z){ - char quote; /* Quote character (if any ) */ - - quote = z[0]; - if( quote=='[' || quote=='\'' || quote=='"' || quote=='`' ){ - int iIn = 1; /* Index of next byte to read from input */ - int iOut = 0; /* Index of next byte to write to output */ - - /* If the first byte was a '[', then the close-quote character is a ']' */ - if( quote=='[' ) quote = ']'; - - while( ALWAYS(z[iIn]) ){ - if( z[iIn]==quote ){ - if( z[iIn+1]!=quote ) break; - z[iOut++] = quote; - iIn += 2; - }else{ - z[iOut++] = z[iIn++]; - } - } - z[iOut] = '\0'; - } -} - -/* -** Read a single varint from the doclist at *pp and advance *pp to point -** to the first byte past the end of the varint. Add the value of the varint -** to *pVal. -*/ -static void fts3GetDeltaVarint(char **pp, sqlite3_int64 *pVal){ - sqlite3_int64 iVal; - *pp += sqlite3Fts3GetVarint(*pp, &iVal); - *pVal += iVal; -} - -/* -** When this function is called, *pp points to the first byte following a -** varint that is part of a doclist (or position-list, or any other list -** of varints). This function moves *pp to point to the start of that varint, -** and sets *pVal by the varint value. -** -** Argument pStart points to the first byte of the doclist that the -** varint is part of. -*/ -static void fts3GetReverseVarint( - char **pp, - char *pStart, - sqlite3_int64 *pVal -){ - sqlite3_int64 iVal; - char *p; - - /* Pointer p now points at the first byte past the varint we are - ** interested in. So, unless the doclist is corrupt, the 0x80 bit is - ** clear on character p[-1]. */ - for(p = (*pp)-2; p>=pStart && *p&0x80; p--); - p++; - *pp = p; - - sqlite3Fts3GetVarint(p, &iVal); - *pVal = iVal; -} - -/* -** The xDisconnect() virtual table method. -*/ -static int fts3DisconnectMethod(sqlite3_vtab *pVtab){ - Fts3Table *p = (Fts3Table *)pVtab; - int i; - - assert( p->nPendingData==0 ); - assert( p->pSegments==0 ); - - /* Free any prepared statements held */ - for(i=0; iaStmt); i++){ - sqlite3_finalize(p->aStmt[i]); - } - sqlite3_free(p->zSegmentsTbl); - sqlite3_free(p->zReadExprlist); - sqlite3_free(p->zWriteExprlist); - sqlite3_free(p->zContentTbl); - sqlite3_free(p->zLanguageid); - - /* Invoke the tokenizer destructor to free the tokenizer. */ - p->pTokenizer->pModule->xDestroy(p->pTokenizer); - - sqlite3_free(p); - return SQLITE_OK; -} - -/* -** Construct one or more SQL statements from the format string given -** and then evaluate those statements. The success code is written -** into *pRc. -** -** If *pRc is initially non-zero then this routine is a no-op. -*/ -static void fts3DbExec( - int *pRc, /* Success code */ - sqlite3 *db, /* Database in which to run SQL */ - const char *zFormat, /* Format string for SQL */ - ... /* Arguments to the format string */ -){ - va_list ap; - char *zSql; - if( *pRc ) return; - va_start(ap, zFormat); - zSql = sqlite3_vmprintf(zFormat, ap); - va_end(ap); - if( zSql==0 ){ - *pRc = SQLITE_NOMEM; - }else{ - *pRc = sqlite3_exec(db, zSql, 0, 0, 0); - sqlite3_free(zSql); - } -} - -/* -** The xDestroy() virtual table method. -*/ -static int fts3DestroyMethod(sqlite3_vtab *pVtab){ - Fts3Table *p = (Fts3Table *)pVtab; - int rc = SQLITE_OK; /* Return code */ - const char *zDb = p->zDb; /* Name of database (e.g. "main", "temp") */ - sqlite3 *db = p->db; /* Database handle */ - - /* Drop the shadow tables */ - if( p->zContentTbl==0 ){ - fts3DbExec(&rc, db, "DROP TABLE IF EXISTS %Q.'%q_content'", zDb, p->zName); - } - fts3DbExec(&rc, db, "DROP TABLE IF EXISTS %Q.'%q_segments'", zDb,p->zName); - fts3DbExec(&rc, db, "DROP TABLE IF EXISTS %Q.'%q_segdir'", zDb, p->zName); - fts3DbExec(&rc, db, "DROP TABLE IF EXISTS %Q.'%q_docsize'", zDb, p->zName); - fts3DbExec(&rc, db, "DROP TABLE IF EXISTS %Q.'%q_stat'", zDb, p->zName); - - /* If everything has worked, invoke fts3DisconnectMethod() to free the - ** memory associated with the Fts3Table structure and return SQLITE_OK. - ** Otherwise, return an SQLite error code. - */ - return (rc==SQLITE_OK ? fts3DisconnectMethod(pVtab) : rc); -} - - -/* -** Invoke sqlite3_declare_vtab() to declare the schema for the FTS3 table -** passed as the first argument. This is done as part of the xConnect() -** and xCreate() methods. -** -** If *pRc is non-zero when this function is called, it is a no-op. -** Otherwise, if an error occurs, an SQLite error code is stored in *pRc -** before returning. -*/ -static void fts3DeclareVtab(int *pRc, Fts3Table *p){ - if( *pRc==SQLITE_OK ){ - int i; /* Iterator variable */ - int rc; /* Return code */ - char *zSql; /* SQL statement passed to declare_vtab() */ - char *zCols; /* List of user defined columns */ - const char *zLanguageid; - - zLanguageid = (p->zLanguageid ? p->zLanguageid : "__langid"); - sqlite3_vtab_config(p->db, SQLITE_VTAB_CONSTRAINT_SUPPORT, 1); - - /* Create a list of user columns for the virtual table */ - zCols = sqlite3_mprintf("%Q, ", p->azColumn[0]); - for(i=1; zCols && inColumn; i++){ - zCols = sqlite3_mprintf("%z%Q, ", zCols, p->azColumn[i]); - } - - /* Create the whole "CREATE TABLE" statement to pass to SQLite */ - zSql = sqlite3_mprintf( - "CREATE TABLE x(%s %Q HIDDEN, docid HIDDEN, %Q HIDDEN)", - zCols, p->zName, zLanguageid - ); - if( !zCols || !zSql ){ - rc = SQLITE_NOMEM; - }else{ - rc = sqlite3_declare_vtab(p->db, zSql); - } - - sqlite3_free(zSql); - sqlite3_free(zCols); - *pRc = rc; - } -} - -/* -** Create the %_stat table if it does not already exist. -*/ -SQLITE_PRIVATE void sqlite3Fts3CreateStatTable(int *pRc, Fts3Table *p){ - fts3DbExec(pRc, p->db, - "CREATE TABLE IF NOT EXISTS %Q.'%q_stat'" - "(id INTEGER PRIMARY KEY, value BLOB);", - p->zDb, p->zName - ); - if( (*pRc)==SQLITE_OK ) p->bHasStat = 1; -} - -/* -** Create the backing store tables (%_content, %_segments and %_segdir) -** required by the FTS3 table passed as the only argument. This is done -** as part of the vtab xCreate() method. -** -** If the p->bHasDocsize boolean is true (indicating that this is an -** FTS4 table, not an FTS3 table) then also create the %_docsize and -** %_stat tables required by FTS4. -*/ -static int fts3CreateTables(Fts3Table *p){ - int rc = SQLITE_OK; /* Return code */ - int i; /* Iterator variable */ - sqlite3 *db = p->db; /* The database connection */ - - if( p->zContentTbl==0 ){ - const char *zLanguageid = p->zLanguageid; - char *zContentCols; /* Columns of %_content table */ - - /* Create a list of user columns for the content table */ - zContentCols = sqlite3_mprintf("docid INTEGER PRIMARY KEY"); - for(i=0; zContentCols && inColumn; i++){ - char *z = p->azColumn[i]; - zContentCols = sqlite3_mprintf("%z, 'c%d%q'", zContentCols, i, z); - } - if( zLanguageid && zContentCols ){ - zContentCols = sqlite3_mprintf("%z, langid", zContentCols, zLanguageid); - } - if( zContentCols==0 ) rc = SQLITE_NOMEM; - - /* Create the content table */ - fts3DbExec(&rc, db, - "CREATE TABLE %Q.'%q_content'(%s)", - p->zDb, p->zName, zContentCols - ); - sqlite3_free(zContentCols); - } - - /* Create other tables */ - fts3DbExec(&rc, db, - "CREATE TABLE %Q.'%q_segments'(blockid INTEGER PRIMARY KEY, block BLOB);", - p->zDb, p->zName - ); - fts3DbExec(&rc, db, - "CREATE TABLE %Q.'%q_segdir'(" - "level INTEGER," - "idx INTEGER," - "start_block INTEGER," - "leaves_end_block INTEGER," - "end_block INTEGER," - "root BLOB," - "PRIMARY KEY(level, idx)" - ");", - p->zDb, p->zName - ); - if( p->bHasDocsize ){ - fts3DbExec(&rc, db, - "CREATE TABLE %Q.'%q_docsize'(docid INTEGER PRIMARY KEY, size BLOB);", - p->zDb, p->zName - ); - } - assert( p->bHasStat==p->bFts4 ); - if( p->bHasStat ){ - sqlite3Fts3CreateStatTable(&rc, p); - } - return rc; -} - -/* -** Store the current database page-size in bytes in p->nPgsz. -** -** If *pRc is non-zero when this function is called, it is a no-op. -** Otherwise, if an error occurs, an SQLite error code is stored in *pRc -** before returning. -*/ -static void fts3DatabasePageSize(int *pRc, Fts3Table *p){ - if( *pRc==SQLITE_OK ){ - int rc; /* Return code */ - char *zSql; /* SQL text "PRAGMA %Q.page_size" */ - sqlite3_stmt *pStmt; /* Compiled "PRAGMA %Q.page_size" statement */ - - zSql = sqlite3_mprintf("PRAGMA %Q.page_size", p->zDb); - if( !zSql ){ - rc = SQLITE_NOMEM; - }else{ - rc = sqlite3_prepare(p->db, zSql, -1, &pStmt, 0); - if( rc==SQLITE_OK ){ - sqlite3_step(pStmt); - p->nPgsz = sqlite3_column_int(pStmt, 0); - rc = sqlite3_finalize(pStmt); - }else if( rc==SQLITE_AUTH ){ - p->nPgsz = 1024; - rc = SQLITE_OK; - } - } - assert( p->nPgsz>0 || rc!=SQLITE_OK ); - sqlite3_free(zSql); - *pRc = rc; - } -} - -/* -** "Special" FTS4 arguments are column specifications of the following form: -** -** = -** -** There may not be whitespace surrounding the "=" character. The -** term may be quoted, but the may not. -*/ -static int fts3IsSpecialColumn( - const char *z, - int *pnKey, - char **pzValue -){ - char *zValue; - const char *zCsr = z; - - while( *zCsr!='=' ){ - if( *zCsr=='\0' ) return 0; - zCsr++; - } - - *pnKey = (int)(zCsr-z); - zValue = sqlite3_mprintf("%s", &zCsr[1]); - if( zValue ){ - sqlite3Fts3Dequote(zValue); - } - *pzValue = zValue; - return 1; -} - -/* -** Append the output of a printf() style formatting to an existing string. -*/ -static void fts3Appendf( - int *pRc, /* IN/OUT: Error code */ - char **pz, /* IN/OUT: Pointer to string buffer */ - const char *zFormat, /* Printf format string to append */ - ... /* Arguments for printf format string */ -){ - if( *pRc==SQLITE_OK ){ - va_list ap; - char *z; - va_start(ap, zFormat); - z = sqlite3_vmprintf(zFormat, ap); - va_end(ap); - if( z && *pz ){ - char *z2 = sqlite3_mprintf("%s%s", *pz, z); - sqlite3_free(z); - z = z2; - } - if( z==0 ) *pRc = SQLITE_NOMEM; - sqlite3_free(*pz); - *pz = z; - } -} - -/* -** Return a copy of input string zInput enclosed in double-quotes (") and -** with all double quote characters escaped. For example: -** -** fts3QuoteId("un \"zip\"") -> "un \"\"zip\"\"" -** -** The pointer returned points to memory obtained from sqlite3_malloc(). It -** is the callers responsibility to call sqlite3_free() to release this -** memory. -*/ -static char *fts3QuoteId(char const *zInput){ - int nRet; - char *zRet; - nRet = 2 + (int)strlen(zInput)*2 + 1; - zRet = sqlite3_malloc(nRet); - if( zRet ){ - int i; - char *z = zRet; - *(z++) = '"'; - for(i=0; zInput[i]; i++){ - if( zInput[i]=='"' ) *(z++) = '"'; - *(z++) = zInput[i]; - } - *(z++) = '"'; - *(z++) = '\0'; - } - return zRet; -} - -/* -** Return a list of comma separated SQL expressions and a FROM clause that -** could be used in a SELECT statement such as the following: -** -** SELECT FROM %_content AS x ... -** -** to return the docid, followed by each column of text data in order -** from left to write. If parameter zFunc is not NULL, then instead of -** being returned directly each column of text data is passed to an SQL -** function named zFunc first. For example, if zFunc is "unzip" and the -** table has the three user-defined columns "a", "b", and "c", the following -** string is returned: -** -** "docid, unzip(x.'a'), unzip(x.'b'), unzip(x.'c') FROM %_content AS x" -** -** The pointer returned points to a buffer allocated by sqlite3_malloc(). It -** is the responsibility of the caller to eventually free it. -** -** If *pRc is not SQLITE_OK when this function is called, it is a no-op (and -** a NULL pointer is returned). Otherwise, if an OOM error is encountered -** by this function, NULL is returned and *pRc is set to SQLITE_NOMEM. If -** no error occurs, *pRc is left unmodified. -*/ -static char *fts3ReadExprList(Fts3Table *p, const char *zFunc, int *pRc){ - char *zRet = 0; - char *zFree = 0; - char *zFunction; - int i; - - if( p->zContentTbl==0 ){ - if( !zFunc ){ - zFunction = ""; - }else{ - zFree = zFunction = fts3QuoteId(zFunc); - } - fts3Appendf(pRc, &zRet, "docid"); - for(i=0; inColumn; i++){ - fts3Appendf(pRc, &zRet, ",%s(x.'c%d%q')", zFunction, i, p->azColumn[i]); - } - if( p->zLanguageid ){ - fts3Appendf(pRc, &zRet, ", x.%Q", "langid"); - } - sqlite3_free(zFree); - }else{ - fts3Appendf(pRc, &zRet, "rowid"); - for(i=0; inColumn; i++){ - fts3Appendf(pRc, &zRet, ", x.'%q'", p->azColumn[i]); - } - if( p->zLanguageid ){ - fts3Appendf(pRc, &zRet, ", x.%Q", p->zLanguageid); - } - } - fts3Appendf(pRc, &zRet, " FROM '%q'.'%q%s' AS x", - p->zDb, - (p->zContentTbl ? p->zContentTbl : p->zName), - (p->zContentTbl ? "" : "_content") - ); - return zRet; -} - -/* -** Return a list of N comma separated question marks, where N is the number -** of columns in the %_content table (one for the docid plus one for each -** user-defined text column). -** -** If argument zFunc is not NULL, then all but the first question mark -** is preceded by zFunc and an open bracket, and followed by a closed -** bracket. For example, if zFunc is "zip" and the FTS3 table has three -** user-defined text columns, the following string is returned: -** -** "?, zip(?), zip(?), zip(?)" -** -** The pointer returned points to a buffer allocated by sqlite3_malloc(). It -** is the responsibility of the caller to eventually free it. -** -** If *pRc is not SQLITE_OK when this function is called, it is a no-op (and -** a NULL pointer is returned). Otherwise, if an OOM error is encountered -** by this function, NULL is returned and *pRc is set to SQLITE_NOMEM. If -** no error occurs, *pRc is left unmodified. -*/ -static char *fts3WriteExprList(Fts3Table *p, const char *zFunc, int *pRc){ - char *zRet = 0; - char *zFree = 0; - char *zFunction; - int i; - - if( !zFunc ){ - zFunction = ""; - }else{ - zFree = zFunction = fts3QuoteId(zFunc); - } - fts3Appendf(pRc, &zRet, "?"); - for(i=0; inColumn; i++){ - fts3Appendf(pRc, &zRet, ",%s(?)", zFunction); - } - if( p->zLanguageid ){ - fts3Appendf(pRc, &zRet, ", ?"); - } - sqlite3_free(zFree); - return zRet; -} - -/* -** This function interprets the string at (*pp) as a non-negative integer -** value. It reads the integer and sets *pnOut to the value read, then -** sets *pp to point to the byte immediately following the last byte of -** the integer value. -** -** Only decimal digits ('0'..'9') may be part of an integer value. -** -** If *pp does not being with a decimal digit SQLITE_ERROR is returned and -** the output value undefined. Otherwise SQLITE_OK is returned. -** -** This function is used when parsing the "prefix=" FTS4 parameter. -*/ -static int fts3GobbleInt(const char **pp, int *pnOut){ - const char *p; /* Iterator pointer */ - int nInt = 0; /* Output value */ - - for(p=*pp; p[0]>='0' && p[0]<='9'; p++){ - nInt = nInt * 10 + (p[0] - '0'); - } - if( p==*pp ) return SQLITE_ERROR; - *pnOut = nInt; - *pp = p; - return SQLITE_OK; -} - -/* -** This function is called to allocate an array of Fts3Index structures -** representing the indexes maintained by the current FTS table. FTS tables -** always maintain the main "terms" index, but may also maintain one or -** more "prefix" indexes, depending on the value of the "prefix=" parameter -** (if any) specified as part of the CREATE VIRTUAL TABLE statement. -** -** Argument zParam is passed the value of the "prefix=" option if one was -** specified, or NULL otherwise. -** -** If no error occurs, SQLITE_OK is returned and *apIndex set to point to -** the allocated array. *pnIndex is set to the number of elements in the -** array. If an error does occur, an SQLite error code is returned. -** -** Regardless of whether or not an error is returned, it is the responsibility -** of the caller to call sqlite3_free() on the output array to free it. -*/ -static int fts3PrefixParameter( - const char *zParam, /* ABC in prefix=ABC parameter to parse */ - int *pnIndex, /* OUT: size of *apIndex[] array */ - struct Fts3Index **apIndex /* OUT: Array of indexes for this table */ -){ - struct Fts3Index *aIndex; /* Allocated array */ - int nIndex = 1; /* Number of entries in array */ - - if( zParam && zParam[0] ){ - const char *p; - nIndex++; - for(p=zParam; *p; p++){ - if( *p==',' ) nIndex++; - } - } - - aIndex = sqlite3_malloc(sizeof(struct Fts3Index) * nIndex); - *apIndex = aIndex; - *pnIndex = nIndex; - if( !aIndex ){ - return SQLITE_NOMEM; - } - - memset(aIndex, 0, sizeof(struct Fts3Index) * nIndex); - if( zParam ){ - const char *p = zParam; - int i; - for(i=1; i module name ("fts3" or "fts4") -** argv[1] -> database name -** argv[2] -> table name -** argv[...] -> "column name" and other module argument fields. -*/ -static int fts3InitVtab( - int isCreate, /* True for xCreate, false for xConnect */ - sqlite3 *db, /* The SQLite database connection */ - void *pAux, /* Hash table containing tokenizers */ - int argc, /* Number of elements in argv array */ - const char * const *argv, /* xCreate/xConnect argument array */ - sqlite3_vtab **ppVTab, /* Write the resulting vtab structure here */ - char **pzErr /* Write any error message here */ -){ - Fts3Hash *pHash = (Fts3Hash *)pAux; - Fts3Table *p = 0; /* Pointer to allocated vtab */ - int rc = SQLITE_OK; /* Return code */ - int i; /* Iterator variable */ - int nByte; /* Size of allocation used for *p */ - int iCol; /* Column index */ - int nString = 0; /* Bytes required to hold all column names */ - int nCol = 0; /* Number of columns in the FTS table */ - char *zCsr; /* Space for holding column names */ - int nDb; /* Bytes required to hold database name */ - int nName; /* Bytes required to hold table name */ - int isFts4 = (argv[0][3]=='4'); /* True for FTS4, false for FTS3 */ - const char **aCol; /* Array of column names */ - sqlite3_tokenizer *pTokenizer = 0; /* Tokenizer for this table */ - - int nIndex; /* Size of aIndex[] array */ - struct Fts3Index *aIndex = 0; /* Array of indexes for this table */ - - /* The results of parsing supported FTS4 key=value options: */ - int bNoDocsize = 0; /* True to omit %_docsize table */ - int bDescIdx = 0; /* True to store descending indexes */ - char *zPrefix = 0; /* Prefix parameter value (or NULL) */ - char *zCompress = 0; /* compress=? parameter (or NULL) */ - char *zUncompress = 0; /* uncompress=? parameter (or NULL) */ - char *zContent = 0; /* content=? parameter (or NULL) */ - char *zLanguageid = 0; /* languageid=? parameter (or NULL) */ - char **azNotindexed = 0; /* The set of notindexed= columns */ - int nNotindexed = 0; /* Size of azNotindexed[] array */ - - assert( strlen(argv[0])==4 ); - assert( (sqlite3_strnicmp(argv[0], "fts4", 4)==0 && isFts4) - || (sqlite3_strnicmp(argv[0], "fts3", 4)==0 && !isFts4) - ); - - nDb = (int)strlen(argv[1]) + 1; - nName = (int)strlen(argv[2]) + 1; - - nByte = sizeof(const char *) * (argc-2); - aCol = (const char **)sqlite3_malloc(nByte); - if( aCol ){ - memset((void*)aCol, 0, nByte); - azNotindexed = (char **)sqlite3_malloc(nByte); - } - if( azNotindexed ){ - memset(azNotindexed, 0, nByte); - } - if( !aCol || !azNotindexed ){ - rc = SQLITE_NOMEM; - goto fts3_init_out; - } - - /* Loop through all of the arguments passed by the user to the FTS3/4 - ** module (i.e. all the column names and special arguments). This loop - ** does the following: - ** - ** + Figures out the number of columns the FTSX table will have, and - ** the number of bytes of space that must be allocated to store copies - ** of the column names. - ** - ** + If there is a tokenizer specification included in the arguments, - ** initializes the tokenizer pTokenizer. - */ - for(i=3; rc==SQLITE_OK && i8 - && 0==sqlite3_strnicmp(z, "tokenize", 8) - && 0==sqlite3Fts3IsIdChar(z[8]) - ){ - rc = sqlite3Fts3InitTokenizer(pHash, &z[9], &pTokenizer, pzErr); - } - - /* Check if it is an FTS4 special argument. */ - else if( isFts4 && fts3IsSpecialColumn(z, &nKey, &zVal) ){ - struct Fts4Option { - const char *zOpt; - int nOpt; - } aFts4Opt[] = { - { "matchinfo", 9 }, /* 0 -> MATCHINFO */ - { "prefix", 6 }, /* 1 -> PREFIX */ - { "compress", 8 }, /* 2 -> COMPRESS */ - { "uncompress", 10 }, /* 3 -> UNCOMPRESS */ - { "order", 5 }, /* 4 -> ORDER */ - { "content", 7 }, /* 5 -> CONTENT */ - { "languageid", 10 }, /* 6 -> LANGUAGEID */ - { "notindexed", 10 } /* 7 -> NOTINDEXED */ - }; - - int iOpt; - if( !zVal ){ - rc = SQLITE_NOMEM; - }else{ - for(iOpt=0; iOptnOpt && !sqlite3_strnicmp(z, pOp->zOpt, pOp->nOpt) ){ - break; - } - } - if( iOpt==SizeofArray(aFts4Opt) ){ - *pzErr = sqlite3_mprintf("unrecognized parameter: %s", z); - rc = SQLITE_ERROR; - }else{ - switch( iOpt ){ - case 0: /* MATCHINFO */ - if( strlen(zVal)!=4 || sqlite3_strnicmp(zVal, "fts3", 4) ){ - *pzErr = sqlite3_mprintf("unrecognized matchinfo: %s", zVal); - rc = SQLITE_ERROR; - } - bNoDocsize = 1; - break; - - case 1: /* PREFIX */ - sqlite3_free(zPrefix); - zPrefix = zVal; - zVal = 0; - break; - - case 2: /* COMPRESS */ - sqlite3_free(zCompress); - zCompress = zVal; - zVal = 0; - break; - - case 3: /* UNCOMPRESS */ - sqlite3_free(zUncompress); - zUncompress = zVal; - zVal = 0; - break; - - case 4: /* ORDER */ - if( (strlen(zVal)!=3 || sqlite3_strnicmp(zVal, "asc", 3)) - && (strlen(zVal)!=4 || sqlite3_strnicmp(zVal, "desc", 4)) - ){ - *pzErr = sqlite3_mprintf("unrecognized order: %s", zVal); - rc = SQLITE_ERROR; - } - bDescIdx = (zVal[0]=='d' || zVal[0]=='D'); - break; - - case 5: /* CONTENT */ - sqlite3_free(zContent); - zContent = zVal; - zVal = 0; - break; - - case 6: /* LANGUAGEID */ - assert( iOpt==6 ); - sqlite3_free(zLanguageid); - zLanguageid = zVal; - zVal = 0; - break; - - case 7: /* NOTINDEXED */ - azNotindexed[nNotindexed++] = zVal; - zVal = 0; - break; - } - } - sqlite3_free(zVal); - } - } - - /* Otherwise, the argument is a column name. */ - else { - nString += (int)(strlen(z) + 1); - aCol[nCol++] = z; - } - } - - /* If a content=xxx option was specified, the following: - ** - ** 1. Ignore any compress= and uncompress= options. - ** - ** 2. If no column names were specified as part of the CREATE VIRTUAL - ** TABLE statement, use all columns from the content table. - */ - if( rc==SQLITE_OK && zContent ){ - sqlite3_free(zCompress); - sqlite3_free(zUncompress); - zCompress = 0; - zUncompress = 0; - if( nCol==0 ){ - sqlite3_free((void*)aCol); - aCol = 0; - rc = fts3ContentColumns(db, argv[1], zContent, &aCol, &nCol, &nString); - - /* If a languageid= option was specified, remove the language id - ** column from the aCol[] array. */ - if( rc==SQLITE_OK && zLanguageid ){ - int j; - for(j=0; jdb = db; - p->nColumn = nCol; - p->nPendingData = 0; - p->azColumn = (char **)&p[1]; - p->pTokenizer = pTokenizer; - p->nMaxPendingData = FTS3_MAX_PENDING_DATA; - p->bHasDocsize = (isFts4 && bNoDocsize==0); - p->bHasStat = isFts4; - p->bFts4 = isFts4; - p->bDescIdx = bDescIdx; - p->nAutoincrmerge = 0xff; /* 0xff means setting unknown */ - p->zContentTbl = zContent; - p->zLanguageid = zLanguageid; - zContent = 0; - zLanguageid = 0; - TESTONLY( p->inTransaction = -1 ); - TESTONLY( p->mxSavepoint = -1 ); - - p->aIndex = (struct Fts3Index *)&p->azColumn[nCol]; - memcpy(p->aIndex, aIndex, sizeof(struct Fts3Index) * nIndex); - p->nIndex = nIndex; - for(i=0; iaIndex[i].hPending, FTS3_HASH_STRING, 1); - } - p->abNotindexed = (u8 *)&p->aIndex[nIndex]; - - /* Fill in the zName and zDb fields of the vtab structure. */ - zCsr = (char *)&p->abNotindexed[nCol]; - p->zName = zCsr; - memcpy(zCsr, argv[2], nName); - zCsr += nName; - p->zDb = zCsr; - memcpy(zCsr, argv[1], nDb); - zCsr += nDb; - - /* Fill in the azColumn array */ - for(iCol=0; iColazColumn[iCol] = zCsr; - zCsr += n+1; - assert( zCsr <= &((char *)p)[nByte] ); - } - - /* Fill in the abNotindexed array */ - for(iCol=0; iColazColumn[iCol]); - for(i=0; iazColumn[iCol], zNot, n) - ){ - p->abNotindexed[iCol] = 1; - sqlite3_free(zNot); - azNotindexed[i] = 0; - } - } - } - for(i=0; izReadExprlist = fts3ReadExprList(p, zUncompress, &rc); - p->zWriteExprlist = fts3WriteExprList(p, zCompress, &rc); - if( rc!=SQLITE_OK ) goto fts3_init_out; - - /* If this is an xCreate call, create the underlying tables in the - ** database. TODO: For xConnect(), it could verify that said tables exist. - */ - if( isCreate ){ - rc = fts3CreateTables(p); - } - - /* Check to see if a legacy fts3 table has been "upgraded" by the - ** addition of a %_stat table so that it can use incremental merge. - */ - if( !isFts4 && !isCreate ){ - p->bHasStat = 2; - } - - /* Figure out the page-size for the database. This is required in order to - ** estimate the cost of loading large doclists from the database. */ - fts3DatabasePageSize(&rc, p); - p->nNodeSize = p->nPgsz-35; - - /* Declare the table schema to SQLite. */ - fts3DeclareVtab(&rc, p); - -fts3_init_out: - sqlite3_free(zPrefix); - sqlite3_free(aIndex); - sqlite3_free(zCompress); - sqlite3_free(zUncompress); - sqlite3_free(zContent); - sqlite3_free(zLanguageid); - for(i=0; ipModule->xDestroy(pTokenizer); - } - }else{ - assert( p->pSegments==0 ); - *ppVTab = &p->base; - } - return rc; -} - -/* -** The xConnect() and xCreate() methods for the virtual table. All the -** work is done in function fts3InitVtab(). -*/ -static int fts3ConnectMethod( - sqlite3 *db, /* Database connection */ - void *pAux, /* Pointer to tokenizer hash table */ - int argc, /* Number of elements in argv array */ - const char * const *argv, /* xCreate/xConnect argument array */ - sqlite3_vtab **ppVtab, /* OUT: New sqlite3_vtab object */ - char **pzErr /* OUT: sqlite3_malloc'd error message */ -){ - return fts3InitVtab(0, db, pAux, argc, argv, ppVtab, pzErr); -} -static int fts3CreateMethod( - sqlite3 *db, /* Database connection */ - void *pAux, /* Pointer to tokenizer hash table */ - int argc, /* Number of elements in argv array */ - const char * const *argv, /* xCreate/xConnect argument array */ - sqlite3_vtab **ppVtab, /* OUT: New sqlite3_vtab object */ - char **pzErr /* OUT: sqlite3_malloc'd error message */ -){ - return fts3InitVtab(1, db, pAux, argc, argv, ppVtab, pzErr); -} - -/* -** Set the pIdxInfo->estimatedRows variable to nRow. Unless this -** extension is currently being used by a version of SQLite too old to -** support estimatedRows. In that case this function is a no-op. -*/ -static void fts3SetEstimatedRows(sqlite3_index_info *pIdxInfo, i64 nRow){ -#if SQLITE_VERSION_NUMBER>=3008002 - if( sqlite3_libversion_number()>=3008002 ){ - pIdxInfo->estimatedRows = nRow; - } -#endif -} - -/* -** Implementation of the xBestIndex method for FTS3 tables. There -** are three possible strategies, in order of preference: -** -** 1. Direct lookup by rowid or docid. -** 2. Full-text search using a MATCH operator on a non-docid column. -** 3. Linear scan of %_content table. -*/ -static int fts3BestIndexMethod(sqlite3_vtab *pVTab, sqlite3_index_info *pInfo){ - Fts3Table *p = (Fts3Table *)pVTab; - int i; /* Iterator variable */ - int iCons = -1; /* Index of constraint to use */ - - int iLangidCons = -1; /* Index of langid=x constraint, if present */ - int iDocidGe = -1; /* Index of docid>=x constraint, if present */ - int iDocidLe = -1; /* Index of docid<=x constraint, if present */ - int iIdx; - - /* By default use a full table scan. This is an expensive option, - ** so search through the constraints to see if a more efficient - ** strategy is possible. - */ - pInfo->idxNum = FTS3_FULLSCAN_SEARCH; - pInfo->estimatedCost = 5000000; - for(i=0; inConstraint; i++){ - int bDocid; /* True if this constraint is on docid */ - struct sqlite3_index_constraint *pCons = &pInfo->aConstraint[i]; - if( pCons->usable==0 ){ - if( pCons->op==SQLITE_INDEX_CONSTRAINT_MATCH ){ - /* There exists an unusable MATCH constraint. This means that if - ** the planner does elect to use the results of this call as part - ** of the overall query plan the user will see an "unable to use - ** function MATCH in the requested context" error. To discourage - ** this, return a very high cost here. */ - pInfo->idxNum = FTS3_FULLSCAN_SEARCH; - pInfo->estimatedCost = 1e50; - fts3SetEstimatedRows(pInfo, ((sqlite3_int64)1) << 50); - return SQLITE_OK; - } - continue; - } - - bDocid = (pCons->iColumn<0 || pCons->iColumn==p->nColumn+1); - - /* A direct lookup on the rowid or docid column. Assign a cost of 1.0. */ - if( iCons<0 && pCons->op==SQLITE_INDEX_CONSTRAINT_EQ && bDocid ){ - pInfo->idxNum = FTS3_DOCID_SEARCH; - pInfo->estimatedCost = 1.0; - iCons = i; - } - - /* A MATCH constraint. Use a full-text search. - ** - ** If there is more than one MATCH constraint available, use the first - ** one encountered. If there is both a MATCH constraint and a direct - ** rowid/docid lookup, prefer the MATCH strategy. This is done even - ** though the rowid/docid lookup is faster than a MATCH query, selecting - ** it would lead to an "unable to use function MATCH in the requested - ** context" error. - */ - if( pCons->op==SQLITE_INDEX_CONSTRAINT_MATCH - && pCons->iColumn>=0 && pCons->iColumn<=p->nColumn - ){ - pInfo->idxNum = FTS3_FULLTEXT_SEARCH + pCons->iColumn; - pInfo->estimatedCost = 2.0; - iCons = i; - } - - /* Equality constraint on the langid column */ - if( pCons->op==SQLITE_INDEX_CONSTRAINT_EQ - && pCons->iColumn==p->nColumn + 2 - ){ - iLangidCons = i; - } - - if( bDocid ){ - switch( pCons->op ){ - case SQLITE_INDEX_CONSTRAINT_GE: - case SQLITE_INDEX_CONSTRAINT_GT: - iDocidGe = i; - break; - - case SQLITE_INDEX_CONSTRAINT_LE: - case SQLITE_INDEX_CONSTRAINT_LT: - iDocidLe = i; - break; - } - } - } - - iIdx = 1; - if( iCons>=0 ){ - pInfo->aConstraintUsage[iCons].argvIndex = iIdx++; - pInfo->aConstraintUsage[iCons].omit = 1; - } - if( iLangidCons>=0 ){ - pInfo->idxNum |= FTS3_HAVE_LANGID; - pInfo->aConstraintUsage[iLangidCons].argvIndex = iIdx++; - } - if( iDocidGe>=0 ){ - pInfo->idxNum |= FTS3_HAVE_DOCID_GE; - pInfo->aConstraintUsage[iDocidGe].argvIndex = iIdx++; - } - if( iDocidLe>=0 ){ - pInfo->idxNum |= FTS3_HAVE_DOCID_LE; - pInfo->aConstraintUsage[iDocidLe].argvIndex = iIdx++; - } - - /* Regardless of the strategy selected, FTS can deliver rows in rowid (or - ** docid) order. Both ascending and descending are possible. - */ - if( pInfo->nOrderBy==1 ){ - struct sqlite3_index_orderby *pOrder = &pInfo->aOrderBy[0]; - if( pOrder->iColumn<0 || pOrder->iColumn==p->nColumn+1 ){ - if( pOrder->desc ){ - pInfo->idxStr = "DESC"; - }else{ - pInfo->idxStr = "ASC"; - } - pInfo->orderByConsumed = 1; - } - } - - assert( p->pSegments==0 ); - return SQLITE_OK; -} - -/* -** Implementation of xOpen method. -*/ -static int fts3OpenMethod(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCsr){ - sqlite3_vtab_cursor *pCsr; /* Allocated cursor */ - - UNUSED_PARAMETER(pVTab); - - /* Allocate a buffer large enough for an Fts3Cursor structure. If the - ** allocation succeeds, zero it and return SQLITE_OK. Otherwise, - ** if the allocation fails, return SQLITE_NOMEM. - */ - *ppCsr = pCsr = (sqlite3_vtab_cursor *)sqlite3_malloc(sizeof(Fts3Cursor)); - if( !pCsr ){ - return SQLITE_NOMEM; - } - memset(pCsr, 0, sizeof(Fts3Cursor)); - return SQLITE_OK; -} - -/* -** Close the cursor. For additional information see the documentation -** on the xClose method of the virtual table interface. -*/ -static int fts3CloseMethod(sqlite3_vtab_cursor *pCursor){ - Fts3Cursor *pCsr = (Fts3Cursor *)pCursor; - assert( ((Fts3Table *)pCsr->base.pVtab)->pSegments==0 ); - sqlite3_finalize(pCsr->pStmt); - sqlite3Fts3ExprFree(pCsr->pExpr); - sqlite3Fts3FreeDeferredTokens(pCsr); - sqlite3_free(pCsr->aDoclist); - sqlite3_free(pCsr->aMatchinfo); - assert( ((Fts3Table *)pCsr->base.pVtab)->pSegments==0 ); - sqlite3_free(pCsr); - return SQLITE_OK; -} - -/* -** If pCsr->pStmt has not been prepared (i.e. if pCsr->pStmt==0), then -** compose and prepare an SQL statement of the form: -** -** "SELECT FROM %_content WHERE rowid = ?" -** -** (or the equivalent for a content=xxx table) and set pCsr->pStmt to -** it. If an error occurs, return an SQLite error code. -** -** Otherwise, set *ppStmt to point to pCsr->pStmt and return SQLITE_OK. -*/ -static int fts3CursorSeekStmt(Fts3Cursor *pCsr, sqlite3_stmt **ppStmt){ - int rc = SQLITE_OK; - if( pCsr->pStmt==0 ){ - Fts3Table *p = (Fts3Table *)pCsr->base.pVtab; - char *zSql; - zSql = sqlite3_mprintf("SELECT %s WHERE rowid = ?", p->zReadExprlist); - if( !zSql ) return SQLITE_NOMEM; - rc = sqlite3_prepare_v2(p->db, zSql, -1, &pCsr->pStmt, 0); - sqlite3_free(zSql); - } - *ppStmt = pCsr->pStmt; - return rc; -} - -/* -** Position the pCsr->pStmt statement so that it is on the row -** of the %_content table that contains the last match. Return -** SQLITE_OK on success. -*/ -static int fts3CursorSeek(sqlite3_context *pContext, Fts3Cursor *pCsr){ - int rc = SQLITE_OK; - if( pCsr->isRequireSeek ){ - sqlite3_stmt *pStmt = 0; - - rc = fts3CursorSeekStmt(pCsr, &pStmt); - if( rc==SQLITE_OK ){ - sqlite3_bind_int64(pCsr->pStmt, 1, pCsr->iPrevId); - pCsr->isRequireSeek = 0; - if( SQLITE_ROW==sqlite3_step(pCsr->pStmt) ){ - return SQLITE_OK; - }else{ - rc = sqlite3_reset(pCsr->pStmt); - if( rc==SQLITE_OK && ((Fts3Table *)pCsr->base.pVtab)->zContentTbl==0 ){ - /* If no row was found and no error has occurred, then the %_content - ** table is missing a row that is present in the full-text index. - ** The data structures are corrupt. */ - rc = FTS_CORRUPT_VTAB; - pCsr->isEof = 1; - } - } - } - } - - if( rc!=SQLITE_OK && pContext ){ - sqlite3_result_error_code(pContext, rc); - } - return rc; -} - -/* -** This function is used to process a single interior node when searching -** a b-tree for a term or term prefix. The node data is passed to this -** function via the zNode/nNode parameters. The term to search for is -** passed in zTerm/nTerm. -** -** If piFirst is not NULL, then this function sets *piFirst to the blockid -** of the child node that heads the sub-tree that may contain the term. -** -** If piLast is not NULL, then *piLast is set to the right-most child node -** that heads a sub-tree that may contain a term for which zTerm/nTerm is -** a prefix. -** -** If an OOM error occurs, SQLITE_NOMEM is returned. Otherwise, SQLITE_OK. -*/ -static int fts3ScanInteriorNode( - const char *zTerm, /* Term to select leaves for */ - int nTerm, /* Size of term zTerm in bytes */ - const char *zNode, /* Buffer containing segment interior node */ - int nNode, /* Size of buffer at zNode */ - sqlite3_int64 *piFirst, /* OUT: Selected child node */ - sqlite3_int64 *piLast /* OUT: Selected child node */ -){ - int rc = SQLITE_OK; /* Return code */ - const char *zCsr = zNode; /* Cursor to iterate through node */ - const char *zEnd = &zCsr[nNode];/* End of interior node buffer */ - char *zBuffer = 0; /* Buffer to load terms into */ - int nAlloc = 0; /* Size of allocated buffer */ - int isFirstTerm = 1; /* True when processing first term on page */ - sqlite3_int64 iChild; /* Block id of child node to descend to */ - - /* Skip over the 'height' varint that occurs at the start of every - ** interior node. Then load the blockid of the left-child of the b-tree - ** node into variable iChild. - ** - ** Even if the data structure on disk is corrupted, this (reading two - ** varints from the buffer) does not risk an overread. If zNode is a - ** root node, then the buffer comes from a SELECT statement. SQLite does - ** not make this guarantee explicitly, but in practice there are always - ** either more than 20 bytes of allocated space following the nNode bytes of - ** contents, or two zero bytes. Or, if the node is read from the %_segments - ** table, then there are always 20 bytes of zeroed padding following the - ** nNode bytes of content (see sqlite3Fts3ReadBlock() for details). - */ - zCsr += sqlite3Fts3GetVarint(zCsr, &iChild); - zCsr += sqlite3Fts3GetVarint(zCsr, &iChild); - if( zCsr>zEnd ){ - return FTS_CORRUPT_VTAB; - } - - while( zCsrzEnd ){ - rc = FTS_CORRUPT_VTAB; - goto finish_scan; - } - if( nPrefix+nSuffix>nAlloc ){ - char *zNew; - nAlloc = (nPrefix+nSuffix) * 2; - zNew = (char *)sqlite3_realloc(zBuffer, nAlloc); - if( !zNew ){ - rc = SQLITE_NOMEM; - goto finish_scan; - } - zBuffer = zNew; - } - assert( zBuffer ); - memcpy(&zBuffer[nPrefix], zCsr, nSuffix); - nBuffer = nPrefix + nSuffix; - zCsr += nSuffix; - - /* Compare the term we are searching for with the term just loaded from - ** the interior node. If the specified term is greater than or equal - ** to the term from the interior node, then all terms on the sub-tree - ** headed by node iChild are smaller than zTerm. No need to search - ** iChild. - ** - ** If the interior node term is larger than the specified term, then - ** the tree headed by iChild may contain the specified term. - */ - cmp = memcmp(zTerm, zBuffer, (nBuffer>nTerm ? nTerm : nBuffer)); - if( piFirst && (cmp<0 || (cmp==0 && nBuffer>nTerm)) ){ - *piFirst = iChild; - piFirst = 0; - } - - if( piLast && cmp<0 ){ - *piLast = iChild; - piLast = 0; - } - - iChild++; - }; - - if( piFirst ) *piFirst = iChild; - if( piLast ) *piLast = iChild; - - finish_scan: - sqlite3_free(zBuffer); - return rc; -} - - -/* -** The buffer pointed to by argument zNode (size nNode bytes) contains an -** interior node of a b-tree segment. The zTerm buffer (size nTerm bytes) -** contains a term. This function searches the sub-tree headed by the zNode -** node for the range of leaf nodes that may contain the specified term -** or terms for which the specified term is a prefix. -** -** If piLeaf is not NULL, then *piLeaf is set to the blockid of the -** left-most leaf node in the tree that may contain the specified term. -** If piLeaf2 is not NULL, then *piLeaf2 is set to the blockid of the -** right-most leaf node that may contain a term for which the specified -** term is a prefix. -** -** It is possible that the range of returned leaf nodes does not contain -** the specified term or any terms for which it is a prefix. However, if the -** segment does contain any such terms, they are stored within the identified -** range. Because this function only inspects interior segment nodes (and -** never loads leaf nodes into memory), it is not possible to be sure. -** -** If an error occurs, an error code other than SQLITE_OK is returned. -*/ -static int fts3SelectLeaf( - Fts3Table *p, /* Virtual table handle */ - const char *zTerm, /* Term to select leaves for */ - int nTerm, /* Size of term zTerm in bytes */ - const char *zNode, /* Buffer containing segment interior node */ - int nNode, /* Size of buffer at zNode */ - sqlite3_int64 *piLeaf, /* Selected leaf node */ - sqlite3_int64 *piLeaf2 /* Selected leaf node */ -){ - int rc; /* Return code */ - int iHeight; /* Height of this node in tree */ - - assert( piLeaf || piLeaf2 ); - - fts3GetVarint32(zNode, &iHeight); - rc = fts3ScanInteriorNode(zTerm, nTerm, zNode, nNode, piLeaf, piLeaf2); - assert( !piLeaf2 || !piLeaf || rc!=SQLITE_OK || (*piLeaf<=*piLeaf2) ); - - if( rc==SQLITE_OK && iHeight>1 ){ - char *zBlob = 0; /* Blob read from %_segments table */ - int nBlob; /* Size of zBlob in bytes */ - - if( piLeaf && piLeaf2 && (*piLeaf!=*piLeaf2) ){ - rc = sqlite3Fts3ReadBlock(p, *piLeaf, &zBlob, &nBlob, 0); - if( rc==SQLITE_OK ){ - rc = fts3SelectLeaf(p, zTerm, nTerm, zBlob, nBlob, piLeaf, 0); - } - sqlite3_free(zBlob); - piLeaf = 0; - zBlob = 0; - } - - if( rc==SQLITE_OK ){ - rc = sqlite3Fts3ReadBlock(p, piLeaf?*piLeaf:*piLeaf2, &zBlob, &nBlob, 0); - } - if( rc==SQLITE_OK ){ - rc = fts3SelectLeaf(p, zTerm, nTerm, zBlob, nBlob, piLeaf, piLeaf2); - } - sqlite3_free(zBlob); - } - - return rc; -} - -/* -** This function is used to create delta-encoded serialized lists of FTS3 -** varints. Each call to this function appends a single varint to a list. -*/ -static void fts3PutDeltaVarint( - char **pp, /* IN/OUT: Output pointer */ - sqlite3_int64 *piPrev, /* IN/OUT: Previous value written to list */ - sqlite3_int64 iVal /* Write this value to the list */ -){ - assert( iVal-*piPrev > 0 || (*piPrev==0 && iVal==0) ); - *pp += sqlite3Fts3PutVarint(*pp, iVal-*piPrev); - *piPrev = iVal; -} - -/* -** When this function is called, *ppPoslist is assumed to point to the -** start of a position-list. After it returns, *ppPoslist points to the -** first byte after the position-list. -** -** A position list is list of positions (delta encoded) and columns for -** a single document record of a doclist. So, in other words, this -** routine advances *ppPoslist so that it points to the next docid in -** the doclist, or to the first byte past the end of the doclist. -** -** If pp is not NULL, then the contents of the position list are copied -** to *pp. *pp is set to point to the first byte past the last byte copied -** before this function returns. -*/ -static void fts3PoslistCopy(char **pp, char **ppPoslist){ - char *pEnd = *ppPoslist; - char c = 0; - - /* The end of a position list is marked by a zero encoded as an FTS3 - ** varint. A single POS_END (0) byte. Except, if the 0 byte is preceded by - ** a byte with the 0x80 bit set, then it is not a varint 0, but the tail - ** of some other, multi-byte, value. - ** - ** The following while-loop moves pEnd to point to the first byte that is not - ** immediately preceded by a byte with the 0x80 bit set. Then increments - ** pEnd once more so that it points to the byte immediately following the - ** last byte in the position-list. - */ - while( *pEnd | c ){ - c = *pEnd++ & 0x80; - testcase( c!=0 && (*pEnd)==0 ); - } - pEnd++; /* Advance past the POS_END terminator byte */ - - if( pp ){ - int n = (int)(pEnd - *ppPoslist); - char *p = *pp; - memcpy(p, *ppPoslist, n); - p += n; - *pp = p; - } - *ppPoslist = pEnd; -} - -/* -** When this function is called, *ppPoslist is assumed to point to the -** start of a column-list. After it returns, *ppPoslist points to the -** to the terminator (POS_COLUMN or POS_END) byte of the column-list. -** -** A column-list is list of delta-encoded positions for a single column -** within a single document within a doclist. -** -** The column-list is terminated either by a POS_COLUMN varint (1) or -** a POS_END varint (0). This routine leaves *ppPoslist pointing to -** the POS_COLUMN or POS_END that terminates the column-list. -** -** If pp is not NULL, then the contents of the column-list are copied -** to *pp. *pp is set to point to the first byte past the last byte copied -** before this function returns. The POS_COLUMN or POS_END terminator -** is not copied into *pp. -*/ -static void fts3ColumnlistCopy(char **pp, char **ppPoslist){ - char *pEnd = *ppPoslist; - char c = 0; - - /* A column-list is terminated by either a 0x01 or 0x00 byte that is - ** not part of a multi-byte varint. - */ - while( 0xFE & (*pEnd | c) ){ - c = *pEnd++ & 0x80; - testcase( c!=0 && ((*pEnd)&0xfe)==0 ); - } - if( pp ){ - int n = (int)(pEnd - *ppPoslist); - char *p = *pp; - memcpy(p, *ppPoslist, n); - p += n; - *pp = p; - } - *ppPoslist = pEnd; -} - -/* -** Value used to signify the end of an position-list. This is safe because -** it is not possible to have a document with 2^31 terms. -*/ -#define POSITION_LIST_END 0x7fffffff - -/* -** This function is used to help parse position-lists. When this function is -** called, *pp may point to the start of the next varint in the position-list -** being parsed, or it may point to 1 byte past the end of the position-list -** (in which case **pp will be a terminator bytes POS_END (0) or -** (1)). -** -** If *pp points past the end of the current position-list, set *pi to -** POSITION_LIST_END and return. Otherwise, read the next varint from *pp, -** increment the current value of *pi by the value read, and set *pp to -** point to the next value before returning. -** -** Before calling this routine *pi must be initialized to the value of -** the previous position, or zero if we are reading the first position -** in the position-list. Because positions are delta-encoded, the value -** of the previous position is needed in order to compute the value of -** the next position. -*/ -static void fts3ReadNextPos( - char **pp, /* IN/OUT: Pointer into position-list buffer */ - sqlite3_int64 *pi /* IN/OUT: Value read from position-list */ -){ - if( (**pp)&0xFE ){ - fts3GetDeltaVarint(pp, pi); - *pi -= 2; - }else{ - *pi = POSITION_LIST_END; - } -} - -/* -** If parameter iCol is not 0, write an POS_COLUMN (1) byte followed by -** the value of iCol encoded as a varint to *pp. This will start a new -** column list. -** -** Set *pp to point to the byte just after the last byte written before -** returning (do not modify it if iCol==0). Return the total number of bytes -** written (0 if iCol==0). -*/ -static int fts3PutColNumber(char **pp, int iCol){ - int n = 0; /* Number of bytes written */ - if( iCol ){ - char *p = *pp; /* Output pointer */ - n = 1 + sqlite3Fts3PutVarint(&p[1], iCol); - *p = 0x01; - *pp = &p[n]; - } - return n; -} - -/* -** Compute the union of two position lists. The output written -** into *pp contains all positions of both *pp1 and *pp2 in sorted -** order and with any duplicates removed. All pointers are -** updated appropriately. The caller is responsible for insuring -** that there is enough space in *pp to hold the complete output. -*/ -static void fts3PoslistMerge( - char **pp, /* Output buffer */ - char **pp1, /* Left input list */ - char **pp2 /* Right input list */ -){ - char *p = *pp; - char *p1 = *pp1; - char *p2 = *pp2; - - while( *p1 || *p2 ){ - int iCol1; /* The current column index in pp1 */ - int iCol2; /* The current column index in pp2 */ - - if( *p1==POS_COLUMN ) fts3GetVarint32(&p1[1], &iCol1); - else if( *p1==POS_END ) iCol1 = POSITION_LIST_END; - else iCol1 = 0; - - if( *p2==POS_COLUMN ) fts3GetVarint32(&p2[1], &iCol2); - else if( *p2==POS_END ) iCol2 = POSITION_LIST_END; - else iCol2 = 0; - - if( iCol1==iCol2 ){ - sqlite3_int64 i1 = 0; /* Last position from pp1 */ - sqlite3_int64 i2 = 0; /* Last position from pp2 */ - sqlite3_int64 iPrev = 0; - int n = fts3PutColNumber(&p, iCol1); - p1 += n; - p2 += n; - - /* At this point, both p1 and p2 point to the start of column-lists - ** for the same column (the column with index iCol1 and iCol2). - ** A column-list is a list of non-negative delta-encoded varints, each - ** incremented by 2 before being stored. Each list is terminated by a - ** POS_END (0) or POS_COLUMN (1). The following block merges the two lists - ** and writes the results to buffer p. p is left pointing to the byte - ** after the list written. No terminator (POS_END or POS_COLUMN) is - ** written to the output. - */ - fts3GetDeltaVarint(&p1, &i1); - fts3GetDeltaVarint(&p2, &i2); - do { - fts3PutDeltaVarint(&p, &iPrev, (i1pos(*pp1) && pos(*pp2)-pos(*pp1)<=nToken). i.e. -** when the *pp1 token appears before the *pp2 token, but not more than nToken -** slots before it. -** -** e.g. nToken==1 searches for adjacent positions. -*/ -static int fts3PoslistPhraseMerge( - char **pp, /* IN/OUT: Preallocated output buffer */ - int nToken, /* Maximum difference in token positions */ - int isSaveLeft, /* Save the left position */ - int isExact, /* If *pp1 is exactly nTokens before *pp2 */ - char **pp1, /* IN/OUT: Left input list */ - char **pp2 /* IN/OUT: Right input list */ -){ - char *p = *pp; - char *p1 = *pp1; - char *p2 = *pp2; - int iCol1 = 0; - int iCol2 = 0; - - /* Never set both isSaveLeft and isExact for the same invocation. */ - assert( isSaveLeft==0 || isExact==0 ); - - assert( p!=0 && *p1!=0 && *p2!=0 ); - if( *p1==POS_COLUMN ){ - p1++; - p1 += fts3GetVarint32(p1, &iCol1); - } - if( *p2==POS_COLUMN ){ - p2++; - p2 += fts3GetVarint32(p2, &iCol2); - } - - while( 1 ){ - if( iCol1==iCol2 ){ - char *pSave = p; - sqlite3_int64 iPrev = 0; - sqlite3_int64 iPos1 = 0; - sqlite3_int64 iPos2 = 0; - - if( iCol1 ){ - *p++ = POS_COLUMN; - p += sqlite3Fts3PutVarint(p, iCol1); - } - - assert( *p1!=POS_END && *p1!=POS_COLUMN ); - assert( *p2!=POS_END && *p2!=POS_COLUMN ); - fts3GetDeltaVarint(&p1, &iPos1); iPos1 -= 2; - fts3GetDeltaVarint(&p2, &iPos2); iPos2 -= 2; - - while( 1 ){ - if( iPos2==iPos1+nToken - || (isExact==0 && iPos2>iPos1 && iPos2<=iPos1+nToken) - ){ - sqlite3_int64 iSave; - iSave = isSaveLeft ? iPos1 : iPos2; - fts3PutDeltaVarint(&p, &iPrev, iSave+2); iPrev -= 2; - pSave = 0; - assert( p ); - } - if( (!isSaveLeft && iPos2<=(iPos1+nToken)) || iPos2<=iPos1 ){ - if( (*p2&0xFE)==0 ) break; - fts3GetDeltaVarint(&p2, &iPos2); iPos2 -= 2; - }else{ - if( (*p1&0xFE)==0 ) break; - fts3GetDeltaVarint(&p1, &iPos1); iPos1 -= 2; - } - } - - if( pSave ){ - assert( pp && p ); - p = pSave; - } - - fts3ColumnlistCopy(0, &p1); - fts3ColumnlistCopy(0, &p2); - assert( (*p1&0xFE)==0 && (*p2&0xFE)==0 ); - if( 0==*p1 || 0==*p2 ) break; - - p1++; - p1 += fts3GetVarint32(p1, &iCol1); - p2++; - p2 += fts3GetVarint32(p2, &iCol2); - } - - /* Advance pointer p1 or p2 (whichever corresponds to the smaller of - ** iCol1 and iCol2) so that it points to either the 0x00 that marks the - ** end of the position list, or the 0x01 that precedes the next - ** column-number in the position list. - */ - else if( iCol1=pEnd ){ - *pp = 0; - }else{ - sqlite3_int64 iVal; - *pp += sqlite3Fts3GetVarint(*pp, &iVal); - if( bDescIdx ){ - *pVal -= iVal; - }else{ - *pVal += iVal; - } - } -} - -/* -** This function is used to write a single varint to a buffer. The varint -** is written to *pp. Before returning, *pp is set to point 1 byte past the -** end of the value written. -** -** If *pbFirst is zero when this function is called, the value written to -** the buffer is that of parameter iVal. -** -** If *pbFirst is non-zero when this function is called, then the value -** written is either (iVal-*piPrev) (if bDescIdx is zero) or (*piPrev-iVal) -** (if bDescIdx is non-zero). -** -** Before returning, this function always sets *pbFirst to 1 and *piPrev -** to the value of parameter iVal. -*/ -static void fts3PutDeltaVarint3( - char **pp, /* IN/OUT: Output pointer */ - int bDescIdx, /* True for descending docids */ - sqlite3_int64 *piPrev, /* IN/OUT: Previous value written to list */ - int *pbFirst, /* IN/OUT: True after first int written */ - sqlite3_int64 iVal /* Write this value to the list */ -){ - sqlite3_int64 iWrite; - if( bDescIdx==0 || *pbFirst==0 ){ - iWrite = iVal - *piPrev; - }else{ - iWrite = *piPrev - iVal; - } - assert( *pbFirst || *piPrev==0 ); - assert( *pbFirst==0 || iWrite>0 ); - *pp += sqlite3Fts3PutVarint(*pp, iWrite); - *piPrev = iVal; - *pbFirst = 1; -} - - -/* -** This macro is used by various functions that merge doclists. The two -** arguments are 64-bit docid values. If the value of the stack variable -** bDescDoclist is 0 when this macro is invoked, then it returns (i1-i2). -** Otherwise, (i2-i1). -** -** Using this makes it easier to write code that can merge doclists that are -** sorted in either ascending or descending order. -*/ -#define DOCID_CMP(i1, i2) ((bDescDoclist?-1:1) * (i1-i2)) - -/* -** This function does an "OR" merge of two doclists (output contains all -** positions contained in either argument doclist). If the docids in the -** input doclists are sorted in ascending order, parameter bDescDoclist -** should be false. If they are sorted in ascending order, it should be -** passed a non-zero value. -** -** If no error occurs, *paOut is set to point at an sqlite3_malloc'd buffer -** containing the output doclist and SQLITE_OK is returned. In this case -** *pnOut is set to the number of bytes in the output doclist. -** -** If an error occurs, an SQLite error code is returned. The output values -** are undefined in this case. -*/ -static int fts3DoclistOrMerge( - int bDescDoclist, /* True if arguments are desc */ - char *a1, int n1, /* First doclist */ - char *a2, int n2, /* Second doclist */ - char **paOut, int *pnOut /* OUT: Malloc'd doclist */ -){ - sqlite3_int64 i1 = 0; - sqlite3_int64 i2 = 0; - sqlite3_int64 iPrev = 0; - char *pEnd1 = &a1[n1]; - char *pEnd2 = &a2[n2]; - char *p1 = a1; - char *p2 = a2; - char *p; - char *aOut; - int bFirstOut = 0; - - *paOut = 0; - *pnOut = 0; - - /* Allocate space for the output. Both the input and output doclists - ** are delta encoded. If they are in ascending order (bDescDoclist==0), - ** then the first docid in each list is simply encoded as a varint. For - ** each subsequent docid, the varint stored is the difference between the - ** current and previous docid (a positive number - since the list is in - ** ascending order). - ** - ** The first docid written to the output is therefore encoded using the - ** same number of bytes as it is in whichever of the input lists it is - ** read from. And each subsequent docid read from the same input list - ** consumes either the same or less bytes as it did in the input (since - ** the difference between it and the previous value in the output must - ** be a positive value less than or equal to the delta value read from - ** the input list). The same argument applies to all but the first docid - ** read from the 'other' list. And to the contents of all position lists - ** that will be copied and merged from the input to the output. - ** - ** However, if the first docid copied to the output is a negative number, - ** then the encoding of the first docid from the 'other' input list may - ** be larger in the output than it was in the input (since the delta value - ** may be a larger positive integer than the actual docid). - ** - ** The space required to store the output is therefore the sum of the - ** sizes of the two inputs, plus enough space for exactly one of the input - ** docids to grow. - ** - ** A symetric argument may be made if the doclists are in descending - ** order. - */ - aOut = sqlite3_malloc(n1+n2+FTS3_VARINT_MAX-1); - if( !aOut ) return SQLITE_NOMEM; - - p = aOut; - fts3GetDeltaVarint3(&p1, pEnd1, 0, &i1); - fts3GetDeltaVarint3(&p2, pEnd2, 0, &i2); - while( p1 || p2 ){ - sqlite3_int64 iDiff = DOCID_CMP(i1, i2); - - if( p2 && p1 && iDiff==0 ){ - fts3PutDeltaVarint3(&p, bDescDoclist, &iPrev, &bFirstOut, i1); - fts3PoslistMerge(&p, &p1, &p2); - fts3GetDeltaVarint3(&p1, pEnd1, bDescDoclist, &i1); - fts3GetDeltaVarint3(&p2, pEnd2, bDescDoclist, &i2); - }else if( !p2 || (p1 && iDiff<0) ){ - fts3PutDeltaVarint3(&p, bDescDoclist, &iPrev, &bFirstOut, i1); - fts3PoslistCopy(&p, &p1); - fts3GetDeltaVarint3(&p1, pEnd1, bDescDoclist, &i1); - }else{ - fts3PutDeltaVarint3(&p, bDescDoclist, &iPrev, &bFirstOut, i2); - fts3PoslistCopy(&p, &p2); - fts3GetDeltaVarint3(&p2, pEnd2, bDescDoclist, &i2); - } - } - - *paOut = aOut; - *pnOut = (int)(p-aOut); - assert( *pnOut<=n1+n2+FTS3_VARINT_MAX-1 ); - return SQLITE_OK; -} - -/* -** This function does a "phrase" merge of two doclists. In a phrase merge, -** the output contains a copy of each position from the right-hand input -** doclist for which there is a position in the left-hand input doclist -** exactly nDist tokens before it. -** -** If the docids in the input doclists are sorted in ascending order, -** parameter bDescDoclist should be false. If they are sorted in ascending -** order, it should be passed a non-zero value. -** -** The right-hand input doclist is overwritten by this function. -*/ -static void fts3DoclistPhraseMerge( - int bDescDoclist, /* True if arguments are desc */ - int nDist, /* Distance from left to right (1=adjacent) */ - char *aLeft, int nLeft, /* Left doclist */ - char *aRight, int *pnRight /* IN/OUT: Right/output doclist */ -){ - sqlite3_int64 i1 = 0; - sqlite3_int64 i2 = 0; - sqlite3_int64 iPrev = 0; - char *pEnd1 = &aLeft[nLeft]; - char *pEnd2 = &aRight[*pnRight]; - char *p1 = aLeft; - char *p2 = aRight; - char *p; - int bFirstOut = 0; - char *aOut = aRight; - - assert( nDist>0 ); - - p = aOut; - fts3GetDeltaVarint3(&p1, pEnd1, 0, &i1); - fts3GetDeltaVarint3(&p2, pEnd2, 0, &i2); - - while( p1 && p2 ){ - sqlite3_int64 iDiff = DOCID_CMP(i1, i2); - if( iDiff==0 ){ - char *pSave = p; - sqlite3_int64 iPrevSave = iPrev; - int bFirstOutSave = bFirstOut; - - fts3PutDeltaVarint3(&p, bDescDoclist, &iPrev, &bFirstOut, i1); - if( 0==fts3PoslistPhraseMerge(&p, nDist, 0, 1, &p1, &p2) ){ - p = pSave; - iPrev = iPrevSave; - bFirstOut = bFirstOutSave; - } - fts3GetDeltaVarint3(&p1, pEnd1, bDescDoclist, &i1); - fts3GetDeltaVarint3(&p2, pEnd2, bDescDoclist, &i2); - }else if( iDiff<0 ){ - fts3PoslistCopy(0, &p1); - fts3GetDeltaVarint3(&p1, pEnd1, bDescDoclist, &i1); - }else{ - fts3PoslistCopy(0, &p2); - fts3GetDeltaVarint3(&p2, pEnd2, bDescDoclist, &i2); - } - } - - *pnRight = (int)(p - aOut); -} - -/* -** Argument pList points to a position list nList bytes in size. This -** function checks to see if the position list contains any entries for -** a token in position 0 (of any column). If so, it writes argument iDelta -** to the output buffer pOut, followed by a position list consisting only -** of the entries from pList at position 0, and terminated by an 0x00 byte. -** The value returned is the number of bytes written to pOut (if any). -*/ -SQLITE_PRIVATE int sqlite3Fts3FirstFilter( - sqlite3_int64 iDelta, /* Varint that may be written to pOut */ - char *pList, /* Position list (no 0x00 term) */ - int nList, /* Size of pList in bytes */ - char *pOut /* Write output here */ -){ - int nOut = 0; - int bWritten = 0; /* True once iDelta has been written */ - char *p = pList; - char *pEnd = &pList[nList]; - - if( *p!=0x01 ){ - if( *p==0x02 ){ - nOut += sqlite3Fts3PutVarint(&pOut[nOut], iDelta); - pOut[nOut++] = 0x02; - bWritten = 1; - } - fts3ColumnlistCopy(0, &p); - } - - while( paaOutput); i++){ - if( pTS->aaOutput[i] ){ - if( !aOut ){ - aOut = pTS->aaOutput[i]; - nOut = pTS->anOutput[i]; - pTS->aaOutput[i] = 0; - }else{ - int nNew; - char *aNew; - - int rc = fts3DoclistOrMerge(p->bDescIdx, - pTS->aaOutput[i], pTS->anOutput[i], aOut, nOut, &aNew, &nNew - ); - if( rc!=SQLITE_OK ){ - sqlite3_free(aOut); - return rc; - } - - sqlite3_free(pTS->aaOutput[i]); - sqlite3_free(aOut); - pTS->aaOutput[i] = 0; - aOut = aNew; - nOut = nNew; - } - } - } - - pTS->aaOutput[0] = aOut; - pTS->anOutput[0] = nOut; - return SQLITE_OK; -} - -/* -** Merge the doclist aDoclist/nDoclist into the TermSelect object passed -** as the first argument. The merge is an "OR" merge (see function -** fts3DoclistOrMerge() for details). -** -** This function is called with the doclist for each term that matches -** a queried prefix. It merges all these doclists into one, the doclist -** for the specified prefix. Since there can be a very large number of -** doclists to merge, the merging is done pair-wise using the TermSelect -** object. -** -** This function returns SQLITE_OK if the merge is successful, or an -** SQLite error code (SQLITE_NOMEM) if an error occurs. -*/ -static int fts3TermSelectMerge( - Fts3Table *p, /* FTS table handle */ - TermSelect *pTS, /* TermSelect object to merge into */ - char *aDoclist, /* Pointer to doclist */ - int nDoclist /* Size of aDoclist in bytes */ -){ - if( pTS->aaOutput[0]==0 ){ - /* If this is the first term selected, copy the doclist to the output - ** buffer using memcpy(). */ - pTS->aaOutput[0] = sqlite3_malloc(nDoclist); - pTS->anOutput[0] = nDoclist; - if( pTS->aaOutput[0] ){ - memcpy(pTS->aaOutput[0], aDoclist, nDoclist); - }else{ - return SQLITE_NOMEM; - } - }else{ - char *aMerge = aDoclist; - int nMerge = nDoclist; - int iOut; - - for(iOut=0; iOutaaOutput); iOut++){ - if( pTS->aaOutput[iOut]==0 ){ - assert( iOut>0 ); - pTS->aaOutput[iOut] = aMerge; - pTS->anOutput[iOut] = nMerge; - break; - }else{ - char *aNew; - int nNew; - - int rc = fts3DoclistOrMerge(p->bDescIdx, aMerge, nMerge, - pTS->aaOutput[iOut], pTS->anOutput[iOut], &aNew, &nNew - ); - if( rc!=SQLITE_OK ){ - if( aMerge!=aDoclist ) sqlite3_free(aMerge); - return rc; - } - - if( aMerge!=aDoclist ) sqlite3_free(aMerge); - sqlite3_free(pTS->aaOutput[iOut]); - pTS->aaOutput[iOut] = 0; - - aMerge = aNew; - nMerge = nNew; - if( (iOut+1)==SizeofArray(pTS->aaOutput) ){ - pTS->aaOutput[iOut] = aMerge; - pTS->anOutput[iOut] = nMerge; - } - } - } - } - return SQLITE_OK; -} - -/* -** Append SegReader object pNew to the end of the pCsr->apSegment[] array. -*/ -static int fts3SegReaderCursorAppend( - Fts3MultiSegReader *pCsr, - Fts3SegReader *pNew -){ - if( (pCsr->nSegment%16)==0 ){ - Fts3SegReader **apNew; - int nByte = (pCsr->nSegment + 16)*sizeof(Fts3SegReader*); - apNew = (Fts3SegReader **)sqlite3_realloc(pCsr->apSegment, nByte); - if( !apNew ){ - sqlite3Fts3SegReaderFree(pNew); - return SQLITE_NOMEM; - } - pCsr->apSegment = apNew; - } - pCsr->apSegment[pCsr->nSegment++] = pNew; - return SQLITE_OK; -} - -/* -** Add seg-reader objects to the Fts3MultiSegReader object passed as the -** 8th argument. -** -** This function returns SQLITE_OK if successful, or an SQLite error code -** otherwise. -*/ -static int fts3SegReaderCursor( - Fts3Table *p, /* FTS3 table handle */ - int iLangid, /* Language id */ - int iIndex, /* Index to search (from 0 to p->nIndex-1) */ - int iLevel, /* Level of segments to scan */ - const char *zTerm, /* Term to query for */ - int nTerm, /* Size of zTerm in bytes */ - int isPrefix, /* True for a prefix search */ - int isScan, /* True to scan from zTerm to EOF */ - Fts3MultiSegReader *pCsr /* Cursor object to populate */ -){ - int rc = SQLITE_OK; /* Error code */ - sqlite3_stmt *pStmt = 0; /* Statement to iterate through segments */ - int rc2; /* Result of sqlite3_reset() */ - - /* If iLevel is less than 0 and this is not a scan, include a seg-reader - ** for the pending-terms. If this is a scan, then this call must be being - ** made by an fts4aux module, not an FTS table. In this case calling - ** Fts3SegReaderPending might segfault, as the data structures used by - ** fts4aux are not completely populated. So it's easiest to filter these - ** calls out here. */ - if( iLevel<0 && p->aIndex ){ - Fts3SegReader *pSeg = 0; - rc = sqlite3Fts3SegReaderPending(p, iIndex, zTerm, nTerm, isPrefix, &pSeg); - if( rc==SQLITE_OK && pSeg ){ - rc = fts3SegReaderCursorAppend(pCsr, pSeg); - } - } - - if( iLevel!=FTS3_SEGCURSOR_PENDING ){ - if( rc==SQLITE_OK ){ - rc = sqlite3Fts3AllSegdirs(p, iLangid, iIndex, iLevel, &pStmt); - } - - while( rc==SQLITE_OK && SQLITE_ROW==(rc = sqlite3_step(pStmt)) ){ - Fts3SegReader *pSeg = 0; - - /* Read the values returned by the SELECT into local variables. */ - sqlite3_int64 iStartBlock = sqlite3_column_int64(pStmt, 1); - sqlite3_int64 iLeavesEndBlock = sqlite3_column_int64(pStmt, 2); - sqlite3_int64 iEndBlock = sqlite3_column_int64(pStmt, 3); - int nRoot = sqlite3_column_bytes(pStmt, 4); - char const *zRoot = sqlite3_column_blob(pStmt, 4); - - /* If zTerm is not NULL, and this segment is not stored entirely on its - ** root node, the range of leaves scanned can be reduced. Do this. */ - if( iStartBlock && zTerm ){ - sqlite3_int64 *pi = (isPrefix ? &iLeavesEndBlock : 0); - rc = fts3SelectLeaf(p, zTerm, nTerm, zRoot, nRoot, &iStartBlock, pi); - if( rc!=SQLITE_OK ) goto finished; - if( isPrefix==0 && isScan==0 ) iLeavesEndBlock = iStartBlock; - } - - rc = sqlite3Fts3SegReaderNew(pCsr->nSegment+1, - (isPrefix==0 && isScan==0), - iStartBlock, iLeavesEndBlock, - iEndBlock, zRoot, nRoot, &pSeg - ); - if( rc!=SQLITE_OK ) goto finished; - rc = fts3SegReaderCursorAppend(pCsr, pSeg); - } - } - - finished: - rc2 = sqlite3_reset(pStmt); - if( rc==SQLITE_DONE ) rc = rc2; - - return rc; -} - -/* -** Set up a cursor object for iterating through a full-text index or a -** single level therein. -*/ -SQLITE_PRIVATE int sqlite3Fts3SegReaderCursor( - Fts3Table *p, /* FTS3 table handle */ - int iLangid, /* Language-id to search */ - int iIndex, /* Index to search (from 0 to p->nIndex-1) */ - int iLevel, /* Level of segments to scan */ - const char *zTerm, /* Term to query for */ - int nTerm, /* Size of zTerm in bytes */ - int isPrefix, /* True for a prefix search */ - int isScan, /* True to scan from zTerm to EOF */ - Fts3MultiSegReader *pCsr /* Cursor object to populate */ -){ - assert( iIndex>=0 && iIndexnIndex ); - assert( iLevel==FTS3_SEGCURSOR_ALL - || iLevel==FTS3_SEGCURSOR_PENDING - || iLevel>=0 - ); - assert( iLevelbase.pVtab; - - if( isPrefix ){ - for(i=1; bFound==0 && inIndex; i++){ - if( p->aIndex[i].nPrefix==nTerm ){ - bFound = 1; - rc = sqlite3Fts3SegReaderCursor(p, pCsr->iLangid, - i, FTS3_SEGCURSOR_ALL, zTerm, nTerm, 0, 0, pSegcsr - ); - pSegcsr->bLookup = 1; - } - } - - for(i=1; bFound==0 && inIndex; i++){ - if( p->aIndex[i].nPrefix==nTerm+1 ){ - bFound = 1; - rc = sqlite3Fts3SegReaderCursor(p, pCsr->iLangid, - i, FTS3_SEGCURSOR_ALL, zTerm, nTerm, 1, 0, pSegcsr - ); - if( rc==SQLITE_OK ){ - rc = fts3SegReaderCursorAddZero( - p, pCsr->iLangid, zTerm, nTerm, pSegcsr - ); - } - } - } - } - - if( bFound==0 ){ - rc = sqlite3Fts3SegReaderCursor(p, pCsr->iLangid, - 0, FTS3_SEGCURSOR_ALL, zTerm, nTerm, isPrefix, 0, pSegcsr - ); - pSegcsr->bLookup = !isPrefix; - } - } - - *ppSegcsr = pSegcsr; - return rc; -} - -/* -** Free an Fts3MultiSegReader allocated by fts3TermSegReaderCursor(). -*/ -static void fts3SegReaderCursorFree(Fts3MultiSegReader *pSegcsr){ - sqlite3Fts3SegReaderFinish(pSegcsr); - sqlite3_free(pSegcsr); -} - -/* -** This function retrieves the doclist for the specified term (or term -** prefix) from the database. -*/ -static int fts3TermSelect( - Fts3Table *p, /* Virtual table handle */ - Fts3PhraseToken *pTok, /* Token to query for */ - int iColumn, /* Column to query (or -ve for all columns) */ - int *pnOut, /* OUT: Size of buffer at *ppOut */ - char **ppOut /* OUT: Malloced result buffer */ -){ - int rc; /* Return code */ - Fts3MultiSegReader *pSegcsr; /* Seg-reader cursor for this term */ - TermSelect tsc; /* Object for pair-wise doclist merging */ - Fts3SegFilter filter; /* Segment term filter configuration */ - - pSegcsr = pTok->pSegcsr; - memset(&tsc, 0, sizeof(TermSelect)); - - filter.flags = FTS3_SEGMENT_IGNORE_EMPTY | FTS3_SEGMENT_REQUIRE_POS - | (pTok->isPrefix ? FTS3_SEGMENT_PREFIX : 0) - | (pTok->bFirst ? FTS3_SEGMENT_FIRST : 0) - | (iColumnnColumn ? FTS3_SEGMENT_COLUMN_FILTER : 0); - filter.iCol = iColumn; - filter.zTerm = pTok->z; - filter.nTerm = pTok->n; - - rc = sqlite3Fts3SegReaderStart(p, pSegcsr, &filter); - while( SQLITE_OK==rc - && SQLITE_ROW==(rc = sqlite3Fts3SegReaderStep(p, pSegcsr)) - ){ - rc = fts3TermSelectMerge(p, &tsc, pSegcsr->aDoclist, pSegcsr->nDoclist); - } - - if( rc==SQLITE_OK ){ - rc = fts3TermSelectFinishMerge(p, &tsc); - } - if( rc==SQLITE_OK ){ - *ppOut = tsc.aaOutput[0]; - *pnOut = tsc.anOutput[0]; - }else{ - int i; - for(i=0; ipSegcsr = 0; - return rc; -} - -/* -** This function counts the total number of docids in the doclist stored -** in buffer aList[], size nList bytes. -** -** If the isPoslist argument is true, then it is assumed that the doclist -** contains a position-list following each docid. Otherwise, it is assumed -** that the doclist is simply a list of docids stored as delta encoded -** varints. -*/ -static int fts3DoclistCountDocids(char *aList, int nList){ - int nDoc = 0; /* Return value */ - if( aList ){ - char *aEnd = &aList[nList]; /* Pointer to one byte after EOF */ - char *p = aList; /* Cursor */ - while( peSearch==FTS3_DOCID_SEARCH || pCsr->eSearch==FTS3_FULLSCAN_SEARCH ){ - if( SQLITE_ROW!=sqlite3_step(pCsr->pStmt) ){ - pCsr->isEof = 1; - rc = sqlite3_reset(pCsr->pStmt); - }else{ - pCsr->iPrevId = sqlite3_column_int64(pCsr->pStmt, 0); - rc = SQLITE_OK; - } - }else{ - rc = fts3EvalNext((Fts3Cursor *)pCursor); - } - assert( ((Fts3Table *)pCsr->base.pVtab)->pSegments==0 ); - return rc; -} - -/* -** The following are copied from sqliteInt.h. -** -** Constants for the largest and smallest possible 64-bit signed integers. -** These macros are designed to work correctly on both 32-bit and 64-bit -** compilers. -*/ -#ifndef SQLITE_AMALGAMATION -# define LARGEST_INT64 (0xffffffff|(((sqlite3_int64)0x7fffffff)<<32)) -# define SMALLEST_INT64 (((sqlite3_int64)-1) - LARGEST_INT64) -#endif - -/* -** If the numeric type of argument pVal is "integer", then return it -** converted to a 64-bit signed integer. Otherwise, return a copy of -** the second parameter, iDefault. -*/ -static sqlite3_int64 fts3DocidRange(sqlite3_value *pVal, i64 iDefault){ - if( pVal ){ - int eType = sqlite3_value_numeric_type(pVal); - if( eType==SQLITE_INTEGER ){ - return sqlite3_value_int64(pVal); - } - } - return iDefault; -} - -/* -** This is the xFilter interface for the virtual table. See -** the virtual table xFilter method documentation for additional -** information. -** -** If idxNum==FTS3_FULLSCAN_SEARCH then do a full table scan against -** the %_content table. -** -** If idxNum==FTS3_DOCID_SEARCH then do a docid lookup for a single entry -** in the %_content table. -** -** If idxNum>=FTS3_FULLTEXT_SEARCH then use the full text index. The -** column on the left-hand side of the MATCH operator is column -** number idxNum-FTS3_FULLTEXT_SEARCH, 0 indexed. argv[0] is the right-hand -** side of the MATCH operator. -*/ -static int fts3FilterMethod( - sqlite3_vtab_cursor *pCursor, /* The cursor used for this query */ - int idxNum, /* Strategy index */ - const char *idxStr, /* Unused */ - int nVal, /* Number of elements in apVal */ - sqlite3_value **apVal /* Arguments for the indexing scheme */ -){ - int rc; - char *zSql; /* SQL statement used to access %_content */ - int eSearch; - Fts3Table *p = (Fts3Table *)pCursor->pVtab; - Fts3Cursor *pCsr = (Fts3Cursor *)pCursor; - - sqlite3_value *pCons = 0; /* The MATCH or rowid constraint, if any */ - sqlite3_value *pLangid = 0; /* The "langid = ?" constraint, if any */ - sqlite3_value *pDocidGe = 0; /* The "docid >= ?" constraint, if any */ - sqlite3_value *pDocidLe = 0; /* The "docid <= ?" constraint, if any */ - int iIdx; - - UNUSED_PARAMETER(idxStr); - UNUSED_PARAMETER(nVal); - - eSearch = (idxNum & 0x0000FFFF); - assert( eSearch>=0 && eSearch<=(FTS3_FULLTEXT_SEARCH+p->nColumn) ); - assert( p->pSegments==0 ); - - /* Collect arguments into local variables */ - iIdx = 0; - if( eSearch!=FTS3_FULLSCAN_SEARCH ) pCons = apVal[iIdx++]; - if( idxNum & FTS3_HAVE_LANGID ) pLangid = apVal[iIdx++]; - if( idxNum & FTS3_HAVE_DOCID_GE ) pDocidGe = apVal[iIdx++]; - if( idxNum & FTS3_HAVE_DOCID_LE ) pDocidLe = apVal[iIdx++]; - assert( iIdx==nVal ); - - /* In case the cursor has been used before, clear it now. */ - sqlite3_finalize(pCsr->pStmt); - sqlite3_free(pCsr->aDoclist); - sqlite3Fts3ExprFree(pCsr->pExpr); - memset(&pCursor[1], 0, sizeof(Fts3Cursor)-sizeof(sqlite3_vtab_cursor)); - - /* Set the lower and upper bounds on docids to return */ - pCsr->iMinDocid = fts3DocidRange(pDocidGe, SMALLEST_INT64); - pCsr->iMaxDocid = fts3DocidRange(pDocidLe, LARGEST_INT64); - - if( idxStr ){ - pCsr->bDesc = (idxStr[0]=='D'); - }else{ - pCsr->bDesc = p->bDescIdx; - } - pCsr->eSearch = (i16)eSearch; - - if( eSearch!=FTS3_DOCID_SEARCH && eSearch!=FTS3_FULLSCAN_SEARCH ){ - int iCol = eSearch-FTS3_FULLTEXT_SEARCH; - const char *zQuery = (const char *)sqlite3_value_text(pCons); - - if( zQuery==0 && sqlite3_value_type(pCons)!=SQLITE_NULL ){ - return SQLITE_NOMEM; - } - - pCsr->iLangid = 0; - if( pLangid ) pCsr->iLangid = sqlite3_value_int(pLangid); - - assert( p->base.zErrMsg==0 ); - rc = sqlite3Fts3ExprParse(p->pTokenizer, pCsr->iLangid, - p->azColumn, p->bFts4, p->nColumn, iCol, zQuery, -1, &pCsr->pExpr, - &p->base.zErrMsg - ); - if( rc!=SQLITE_OK ){ - return rc; - } - - rc = fts3EvalStart(pCsr); - sqlite3Fts3SegmentsClose(p); - if( rc!=SQLITE_OK ) return rc; - pCsr->pNextId = pCsr->aDoclist; - pCsr->iPrevId = 0; - } - - /* Compile a SELECT statement for this cursor. For a full-table-scan, the - ** statement loops through all rows of the %_content table. For a - ** full-text query or docid lookup, the statement retrieves a single - ** row by docid. - */ - if( eSearch==FTS3_FULLSCAN_SEARCH ){ - zSql = sqlite3_mprintf( - "SELECT %s ORDER BY rowid %s", - p->zReadExprlist, (pCsr->bDesc ? "DESC" : "ASC") - ); - if( zSql ){ - rc = sqlite3_prepare_v2(p->db, zSql, -1, &pCsr->pStmt, 0); - sqlite3_free(zSql); - }else{ - rc = SQLITE_NOMEM; - } - }else if( eSearch==FTS3_DOCID_SEARCH ){ - rc = fts3CursorSeekStmt(pCsr, &pCsr->pStmt); - if( rc==SQLITE_OK ){ - rc = sqlite3_bind_value(pCsr->pStmt, 1, pCons); - } - } - if( rc!=SQLITE_OK ) return rc; - - return fts3NextMethod(pCursor); -} - -/* -** This is the xEof method of the virtual table. SQLite calls this -** routine to find out if it has reached the end of a result set. -*/ -static int fts3EofMethod(sqlite3_vtab_cursor *pCursor){ - return ((Fts3Cursor *)pCursor)->isEof; -} - -/* -** This is the xRowid method. The SQLite core calls this routine to -** retrieve the rowid for the current row of the result set. fts3 -** exposes %_content.docid as the rowid for the virtual table. The -** rowid should be written to *pRowid. -*/ -static int fts3RowidMethod(sqlite3_vtab_cursor *pCursor, sqlite_int64 *pRowid){ - Fts3Cursor *pCsr = (Fts3Cursor *) pCursor; - *pRowid = pCsr->iPrevId; - return SQLITE_OK; -} - -/* -** This is the xColumn method, called by SQLite to request a value from -** the row that the supplied cursor currently points to. -** -** If: -** -** (iCol < p->nColumn) -> The value of the iCol'th user column. -** (iCol == p->nColumn) -> Magic column with the same name as the table. -** (iCol == p->nColumn+1) -> Docid column -** (iCol == p->nColumn+2) -> Langid column -*/ -static int fts3ColumnMethod( - sqlite3_vtab_cursor *pCursor, /* Cursor to retrieve value from */ - sqlite3_context *pCtx, /* Context for sqlite3_result_xxx() calls */ - int iCol /* Index of column to read value from */ -){ - int rc = SQLITE_OK; /* Return Code */ - Fts3Cursor *pCsr = (Fts3Cursor *) pCursor; - Fts3Table *p = (Fts3Table *)pCursor->pVtab; - - /* The column value supplied by SQLite must be in range. */ - assert( iCol>=0 && iCol<=p->nColumn+2 ); - - if( iCol==p->nColumn+1 ){ - /* This call is a request for the "docid" column. Since "docid" is an - ** alias for "rowid", use the xRowid() method to obtain the value. - */ - sqlite3_result_int64(pCtx, pCsr->iPrevId); - }else if( iCol==p->nColumn ){ - /* The extra column whose name is the same as the table. - ** Return a blob which is a pointer to the cursor. */ - sqlite3_result_blob(pCtx, &pCsr, sizeof(pCsr), SQLITE_TRANSIENT); - }else if( iCol==p->nColumn+2 && pCsr->pExpr ){ - sqlite3_result_int64(pCtx, pCsr->iLangid); - }else{ - /* The requested column is either a user column (one that contains - ** indexed data), or the language-id column. */ - rc = fts3CursorSeek(0, pCsr); - - if( rc==SQLITE_OK ){ - if( iCol==p->nColumn+2 ){ - int iLangid = 0; - if( p->zLanguageid ){ - iLangid = sqlite3_column_int(pCsr->pStmt, p->nColumn+1); - } - sqlite3_result_int(pCtx, iLangid); - }else if( sqlite3_data_count(pCsr->pStmt)>(iCol+1) ){ - sqlite3_result_value(pCtx, sqlite3_column_value(pCsr->pStmt, iCol+1)); - } - } - } - - assert( ((Fts3Table *)pCsr->base.pVtab)->pSegments==0 ); - return rc; -} - -/* -** This function is the implementation of the xUpdate callback used by -** FTS3 virtual tables. It is invoked by SQLite each time a row is to be -** inserted, updated or deleted. -*/ -static int fts3UpdateMethod( - sqlite3_vtab *pVtab, /* Virtual table handle */ - int nArg, /* Size of argument array */ - sqlite3_value **apVal, /* Array of arguments */ - sqlite_int64 *pRowid /* OUT: The affected (or effected) rowid */ -){ - return sqlite3Fts3UpdateMethod(pVtab, nArg, apVal, pRowid); -} - -/* -** Implementation of xSync() method. Flush the contents of the pending-terms -** hash-table to the database. -*/ -static int fts3SyncMethod(sqlite3_vtab *pVtab){ - - /* Following an incremental-merge operation, assuming that the input - ** segments are not completely consumed (the usual case), they are updated - ** in place to remove the entries that have already been merged. This - ** involves updating the leaf block that contains the smallest unmerged - ** entry and each block (if any) between the leaf and the root node. So - ** if the height of the input segment b-trees is N, and input segments - ** are merged eight at a time, updating the input segments at the end - ** of an incremental-merge requires writing (8*(1+N)) blocks. N is usually - ** small - often between 0 and 2. So the overhead of the incremental - ** merge is somewhere between 8 and 24 blocks. To avoid this overhead - ** dwarfing the actual productive work accomplished, the incremental merge - ** is only attempted if it will write at least 64 leaf blocks. Hence - ** nMinMerge. - ** - ** Of course, updating the input segments also involves deleting a bunch - ** of blocks from the segments table. But this is not considered overhead - ** as it would also be required by a crisis-merge that used the same input - ** segments. - */ - const u32 nMinMerge = 64; /* Minimum amount of incr-merge work to do */ - - Fts3Table *p = (Fts3Table*)pVtab; - int rc = sqlite3Fts3PendingTermsFlush(p); - - if( rc==SQLITE_OK - && p->nLeafAdd>(nMinMerge/16) - && p->nAutoincrmerge && p->nAutoincrmerge!=0xff - ){ - int mxLevel = 0; /* Maximum relative level value in db */ - int A; /* Incr-merge parameter A */ - - rc = sqlite3Fts3MaxLevel(p, &mxLevel); - assert( rc==SQLITE_OK || mxLevel==0 ); - A = p->nLeafAdd * mxLevel; - A += (A/2); - if( A>(int)nMinMerge ) rc = sqlite3Fts3Incrmerge(p, A, p->nAutoincrmerge); - } - sqlite3Fts3SegmentsClose(p); - return rc; -} - -/* -** If it is currently unknown whether or not the FTS table has an %_stat -** table (if p->bHasStat==2), attempt to determine this (set p->bHasStat -** to 0 or 1). Return SQLITE_OK if successful, or an SQLite error code -** if an error occurs. -*/ -static int fts3SetHasStat(Fts3Table *p){ - int rc = SQLITE_OK; - if( p->bHasStat==2 ){ - const char *zFmt ="SELECT 1 FROM %Q.sqlite_master WHERE tbl_name='%q_stat'"; - char *zSql = sqlite3_mprintf(zFmt, p->zDb, p->zName); - if( zSql ){ - sqlite3_stmt *pStmt = 0; - rc = sqlite3_prepare_v2(p->db, zSql, -1, &pStmt, 0); - if( rc==SQLITE_OK ){ - int bHasStat = (sqlite3_step(pStmt)==SQLITE_ROW); - rc = sqlite3_finalize(pStmt); - if( rc==SQLITE_OK ) p->bHasStat = bHasStat; - } - sqlite3_free(zSql); - }else{ - rc = SQLITE_NOMEM; - } - } - return rc; -} - -/* -** Implementation of xBegin() method. -*/ -static int fts3BeginMethod(sqlite3_vtab *pVtab){ - Fts3Table *p = (Fts3Table*)pVtab; - UNUSED_PARAMETER(pVtab); - assert( p->pSegments==0 ); - assert( p->nPendingData==0 ); - assert( p->inTransaction!=1 ); - TESTONLY( p->inTransaction = 1 ); - TESTONLY( p->mxSavepoint = -1; ); - p->nLeafAdd = 0; - return fts3SetHasStat(p); -} - -/* -** Implementation of xCommit() method. This is a no-op. The contents of -** the pending-terms hash-table have already been flushed into the database -** by fts3SyncMethod(). -*/ -static int fts3CommitMethod(sqlite3_vtab *pVtab){ - TESTONLY( Fts3Table *p = (Fts3Table*)pVtab ); - UNUSED_PARAMETER(pVtab); - assert( p->nPendingData==0 ); - assert( p->inTransaction!=0 ); - assert( p->pSegments==0 ); - TESTONLY( p->inTransaction = 0 ); - TESTONLY( p->mxSavepoint = -1; ); - return SQLITE_OK; -} - -/* -** Implementation of xRollback(). Discard the contents of the pending-terms -** hash-table. Any changes made to the database are reverted by SQLite. -*/ -static int fts3RollbackMethod(sqlite3_vtab *pVtab){ - Fts3Table *p = (Fts3Table*)pVtab; - sqlite3Fts3PendingTermsClear(p); - assert( p->inTransaction!=0 ); - TESTONLY( p->inTransaction = 0 ); - TESTONLY( p->mxSavepoint = -1; ); - return SQLITE_OK; -} - -/* -** When called, *ppPoslist must point to the byte immediately following the -** end of a position-list. i.e. ( (*ppPoslist)[-1]==POS_END ). This function -** moves *ppPoslist so that it instead points to the first byte of the -** same position list. -*/ -static void fts3ReversePoslist(char *pStart, char **ppPoslist){ - char *p = &(*ppPoslist)[-2]; - char c = 0; - - while( p>pStart && (c=*p--)==0 ); - while( p>pStart && (*p & 0x80) | c ){ - c = *p--; - } - if( p>pStart ){ p = &p[2]; } - while( *p++&0x80 ); - *ppPoslist = p; -} - -/* -** Helper function used by the implementation of the overloaded snippet(), -** offsets() and optimize() SQL functions. -** -** If the value passed as the third argument is a blob of size -** sizeof(Fts3Cursor*), then the blob contents are copied to the -** output variable *ppCsr and SQLITE_OK is returned. Otherwise, an error -** message is written to context pContext and SQLITE_ERROR returned. The -** string passed via zFunc is used as part of the error message. -*/ -static int fts3FunctionArg( - sqlite3_context *pContext, /* SQL function call context */ - const char *zFunc, /* Function name */ - sqlite3_value *pVal, /* argv[0] passed to function */ - Fts3Cursor **ppCsr /* OUT: Store cursor handle here */ -){ - Fts3Cursor *pRet; - if( sqlite3_value_type(pVal)!=SQLITE_BLOB - || sqlite3_value_bytes(pVal)!=sizeof(Fts3Cursor *) - ){ - char *zErr = sqlite3_mprintf("illegal first argument to %s", zFunc); - sqlite3_result_error(pContext, zErr, -1); - sqlite3_free(zErr); - return SQLITE_ERROR; - } - memcpy(&pRet, sqlite3_value_blob(pVal), sizeof(Fts3Cursor *)); - *ppCsr = pRet; - return SQLITE_OK; -} - -/* -** Implementation of the snippet() function for FTS3 -*/ -static void fts3SnippetFunc( - sqlite3_context *pContext, /* SQLite function call context */ - int nVal, /* Size of apVal[] array */ - sqlite3_value **apVal /* Array of arguments */ -){ - Fts3Cursor *pCsr; /* Cursor handle passed through apVal[0] */ - const char *zStart = ""; - const char *zEnd = ""; - const char *zEllipsis = "..."; - int iCol = -1; - int nToken = 15; /* Default number of tokens in snippet */ - - /* There must be at least one argument passed to this function (otherwise - ** the non-overloaded version would have been called instead of this one). - */ - assert( nVal>=1 ); - - if( nVal>6 ){ - sqlite3_result_error(pContext, - "wrong number of arguments to function snippet()", -1); - return; - } - if( fts3FunctionArg(pContext, "snippet", apVal[0], &pCsr) ) return; - - switch( nVal ){ - case 6: nToken = sqlite3_value_int(apVal[5]); - case 5: iCol = sqlite3_value_int(apVal[4]); - case 4: zEllipsis = (const char*)sqlite3_value_text(apVal[3]); - case 3: zEnd = (const char*)sqlite3_value_text(apVal[2]); - case 2: zStart = (const char*)sqlite3_value_text(apVal[1]); - } - if( !zEllipsis || !zEnd || !zStart ){ - sqlite3_result_error_nomem(pContext); - }else if( SQLITE_OK==fts3CursorSeek(pContext, pCsr) ){ - sqlite3Fts3Snippet(pContext, pCsr, zStart, zEnd, zEllipsis, iCol, nToken); - } -} - -/* -** Implementation of the offsets() function for FTS3 -*/ -static void fts3OffsetsFunc( - sqlite3_context *pContext, /* SQLite function call context */ - int nVal, /* Size of argument array */ - sqlite3_value **apVal /* Array of arguments */ -){ - Fts3Cursor *pCsr; /* Cursor handle passed through apVal[0] */ - - UNUSED_PARAMETER(nVal); - - assert( nVal==1 ); - if( fts3FunctionArg(pContext, "offsets", apVal[0], &pCsr) ) return; - assert( pCsr ); - if( SQLITE_OK==fts3CursorSeek(pContext, pCsr) ){ - sqlite3Fts3Offsets(pContext, pCsr); - } -} - -/* -** Implementation of the special optimize() function for FTS3. This -** function merges all segments in the database to a single segment. -** Example usage is: -** -** SELECT optimize(t) FROM t LIMIT 1; -** -** where 't' is the name of an FTS3 table. -*/ -static void fts3OptimizeFunc( - sqlite3_context *pContext, /* SQLite function call context */ - int nVal, /* Size of argument array */ - sqlite3_value **apVal /* Array of arguments */ -){ - int rc; /* Return code */ - Fts3Table *p; /* Virtual table handle */ - Fts3Cursor *pCursor; /* Cursor handle passed through apVal[0] */ - - UNUSED_PARAMETER(nVal); - - assert( nVal==1 ); - if( fts3FunctionArg(pContext, "optimize", apVal[0], &pCursor) ) return; - p = (Fts3Table *)pCursor->base.pVtab; - assert( p ); - - rc = sqlite3Fts3Optimize(p); - - switch( rc ){ - case SQLITE_OK: - sqlite3_result_text(pContext, "Index optimized", -1, SQLITE_STATIC); - break; - case SQLITE_DONE: - sqlite3_result_text(pContext, "Index already optimal", -1, SQLITE_STATIC); - break; - default: - sqlite3_result_error_code(pContext, rc); - break; - } -} - -/* -** Implementation of the matchinfo() function for FTS3 -*/ -static void fts3MatchinfoFunc( - sqlite3_context *pContext, /* SQLite function call context */ - int nVal, /* Size of argument array */ - sqlite3_value **apVal /* Array of arguments */ -){ - Fts3Cursor *pCsr; /* Cursor handle passed through apVal[0] */ - assert( nVal==1 || nVal==2 ); - if( SQLITE_OK==fts3FunctionArg(pContext, "matchinfo", apVal[0], &pCsr) ){ - const char *zArg = 0; - if( nVal>1 ){ - zArg = (const char *)sqlite3_value_text(apVal[1]); - } - sqlite3Fts3Matchinfo(pContext, pCsr, zArg); - } -} - -/* -** This routine implements the xFindFunction method for the FTS3 -** virtual table. -*/ -static int fts3FindFunctionMethod( - sqlite3_vtab *pVtab, /* Virtual table handle */ - int nArg, /* Number of SQL function arguments */ - const char *zName, /* Name of SQL function */ - void (**pxFunc)(sqlite3_context*,int,sqlite3_value**), /* OUT: Result */ - void **ppArg /* Unused */ -){ - struct Overloaded { - const char *zName; - void (*xFunc)(sqlite3_context*,int,sqlite3_value**); - } aOverload[] = { - { "snippet", fts3SnippetFunc }, - { "offsets", fts3OffsetsFunc }, - { "optimize", fts3OptimizeFunc }, - { "matchinfo", fts3MatchinfoFunc }, - }; - int i; /* Iterator variable */ - - UNUSED_PARAMETER(pVtab); - UNUSED_PARAMETER(nArg); - UNUSED_PARAMETER(ppArg); - - for(i=0; idb; /* Database connection */ - int rc; /* Return Code */ - - /* At this point it must be known if the %_stat table exists or not. - ** So bHasStat may not be 2. */ - rc = fts3SetHasStat(p); - - /* As it happens, the pending terms table is always empty here. This is - ** because an "ALTER TABLE RENAME TABLE" statement inside a transaction - ** always opens a savepoint transaction. And the xSavepoint() method - ** flushes the pending terms table. But leave the (no-op) call to - ** PendingTermsFlush() in in case that changes. - */ - assert( p->nPendingData==0 ); - if( rc==SQLITE_OK ){ - rc = sqlite3Fts3PendingTermsFlush(p); - } - - if( p->zContentTbl==0 ){ - fts3DbExec(&rc, db, - "ALTER TABLE %Q.'%q_content' RENAME TO '%q_content';", - p->zDb, p->zName, zName - ); - } - - if( p->bHasDocsize ){ - fts3DbExec(&rc, db, - "ALTER TABLE %Q.'%q_docsize' RENAME TO '%q_docsize';", - p->zDb, p->zName, zName - ); - } - if( p->bHasStat ){ - fts3DbExec(&rc, db, - "ALTER TABLE %Q.'%q_stat' RENAME TO '%q_stat';", - p->zDb, p->zName, zName - ); - } - fts3DbExec(&rc, db, - "ALTER TABLE %Q.'%q_segments' RENAME TO '%q_segments';", - p->zDb, p->zName, zName - ); - fts3DbExec(&rc, db, - "ALTER TABLE %Q.'%q_segdir' RENAME TO '%q_segdir';", - p->zDb, p->zName, zName - ); - return rc; -} - -/* -** The xSavepoint() method. -** -** Flush the contents of the pending-terms table to disk. -*/ -static int fts3SavepointMethod(sqlite3_vtab *pVtab, int iSavepoint){ - int rc = SQLITE_OK; - UNUSED_PARAMETER(iSavepoint); - assert( ((Fts3Table *)pVtab)->inTransaction ); - assert( ((Fts3Table *)pVtab)->mxSavepoint < iSavepoint ); - TESTONLY( ((Fts3Table *)pVtab)->mxSavepoint = iSavepoint ); - if( ((Fts3Table *)pVtab)->bIgnoreSavepoint==0 ){ - rc = fts3SyncMethod(pVtab); - } - return rc; -} - -/* -** The xRelease() method. -** -** This is a no-op. -*/ -static int fts3ReleaseMethod(sqlite3_vtab *pVtab, int iSavepoint){ - TESTONLY( Fts3Table *p = (Fts3Table*)pVtab ); - UNUSED_PARAMETER(iSavepoint); - UNUSED_PARAMETER(pVtab); - assert( p->inTransaction ); - assert( p->mxSavepoint >= iSavepoint ); - TESTONLY( p->mxSavepoint = iSavepoint-1 ); - return SQLITE_OK; -} - -/* -** The xRollbackTo() method. -** -** Discard the contents of the pending terms table. -*/ -static int fts3RollbackToMethod(sqlite3_vtab *pVtab, int iSavepoint){ - Fts3Table *p = (Fts3Table*)pVtab; - UNUSED_PARAMETER(iSavepoint); - assert( p->inTransaction ); - assert( p->mxSavepoint >= iSavepoint ); - TESTONLY( p->mxSavepoint = iSavepoint ); - sqlite3Fts3PendingTermsClear(p); - return SQLITE_OK; -} - -static const sqlite3_module fts3Module = { - /* iVersion */ 2, - /* xCreate */ fts3CreateMethod, - /* xConnect */ fts3ConnectMethod, - /* xBestIndex */ fts3BestIndexMethod, - /* xDisconnect */ fts3DisconnectMethod, - /* xDestroy */ fts3DestroyMethod, - /* xOpen */ fts3OpenMethod, - /* xClose */ fts3CloseMethod, - /* xFilter */ fts3FilterMethod, - /* xNext */ fts3NextMethod, - /* xEof */ fts3EofMethod, - /* xColumn */ fts3ColumnMethod, - /* xRowid */ fts3RowidMethod, - /* xUpdate */ fts3UpdateMethod, - /* xBegin */ fts3BeginMethod, - /* xSync */ fts3SyncMethod, - /* xCommit */ fts3CommitMethod, - /* xRollback */ fts3RollbackMethod, - /* xFindFunction */ fts3FindFunctionMethod, - /* xRename */ fts3RenameMethod, - /* xSavepoint */ fts3SavepointMethod, - /* xRelease */ fts3ReleaseMethod, - /* xRollbackTo */ fts3RollbackToMethod, -}; - -/* -** This function is registered as the module destructor (called when an -** FTS3 enabled database connection is closed). It frees the memory -** allocated for the tokenizer hash table. -*/ -static void hashDestroy(void *p){ - Fts3Hash *pHash = (Fts3Hash *)p; - sqlite3Fts3HashClear(pHash); - sqlite3_free(pHash); -} - -/* -** The fts3 built-in tokenizers - "simple", "porter" and "icu"- are -** implemented in files fts3_tokenizer1.c, fts3_porter.c and fts3_icu.c -** respectively. The following three forward declarations are for functions -** declared in these files used to retrieve the respective implementations. -** -** Calling sqlite3Fts3SimpleTokenizerModule() sets the value pointed -** to by the argument to point to the "simple" tokenizer implementation. -** And so on. -*/ -SQLITE_PRIVATE void sqlite3Fts3SimpleTokenizerModule(sqlite3_tokenizer_module const**ppModule); -SQLITE_PRIVATE void sqlite3Fts3PorterTokenizerModule(sqlite3_tokenizer_module const**ppModule); -#ifdef SQLITE_ENABLE_FTS4_UNICODE61 -SQLITE_PRIVATE void sqlite3Fts3UnicodeTokenizer(sqlite3_tokenizer_module const**ppModule); -#endif -#ifdef SQLITE_ENABLE_ICU -SQLITE_PRIVATE void sqlite3Fts3IcuTokenizerModule(sqlite3_tokenizer_module const**ppModule); -#endif - -/* -** Initialize the fts3 extension. If this extension is built as part -** of the sqlite library, then this function is called directly by -** SQLite. If fts3 is built as a dynamically loadable extension, this -** function is called by the sqlite3_extension_init() entry point. -*/ -SQLITE_PRIVATE int sqlite3Fts3Init(sqlite3 *db){ - int rc = SQLITE_OK; - Fts3Hash *pHash = 0; - const sqlite3_tokenizer_module *pSimple = 0; - const sqlite3_tokenizer_module *pPorter = 0; -#ifdef SQLITE_ENABLE_FTS4_UNICODE61 - const sqlite3_tokenizer_module *pUnicode = 0; -#endif - -#ifdef SQLITE_ENABLE_ICU - const sqlite3_tokenizer_module *pIcu = 0; - sqlite3Fts3IcuTokenizerModule(&pIcu); -#endif - -#ifdef SQLITE_ENABLE_FTS4_UNICODE61 - sqlite3Fts3UnicodeTokenizer(&pUnicode); -#endif - -#ifdef SQLITE_TEST - rc = sqlite3Fts3InitTerm(db); - if( rc!=SQLITE_OK ) return rc; -#endif - - rc = sqlite3Fts3InitAux(db); - if( rc!=SQLITE_OK ) return rc; - - sqlite3Fts3SimpleTokenizerModule(&pSimple); - sqlite3Fts3PorterTokenizerModule(&pPorter); - - /* Allocate and initialize the hash-table used to store tokenizers. */ - pHash = sqlite3_malloc(sizeof(Fts3Hash)); - if( !pHash ){ - rc = SQLITE_NOMEM; - }else{ - sqlite3Fts3HashInit(pHash, FTS3_HASH_STRING, 1); - } - - /* Load the built-in tokenizers into the hash table */ - if( rc==SQLITE_OK ){ - if( sqlite3Fts3HashInsert(pHash, "simple", 7, (void *)pSimple) - || sqlite3Fts3HashInsert(pHash, "porter", 7, (void *)pPorter) - -#ifdef SQLITE_ENABLE_FTS4_UNICODE61 - || sqlite3Fts3HashInsert(pHash, "unicode61", 10, (void *)pUnicode) -#endif -#ifdef SQLITE_ENABLE_ICU - || (pIcu && sqlite3Fts3HashInsert(pHash, "icu", 4, (void *)pIcu)) -#endif - ){ - rc = SQLITE_NOMEM; - } - } - -#ifdef SQLITE_TEST - if( rc==SQLITE_OK ){ - rc = sqlite3Fts3ExprInitTestInterface(db); - } -#endif - - /* Create the virtual table wrapper around the hash-table and overload - ** the two scalar functions. If this is successful, register the - ** module with sqlite. - */ - if( SQLITE_OK==rc - && SQLITE_OK==(rc = sqlite3Fts3InitHashTable(db, pHash, "fts3_tokenizer")) - && SQLITE_OK==(rc = sqlite3_overload_function(db, "snippet", -1)) - && SQLITE_OK==(rc = sqlite3_overload_function(db, "offsets", 1)) - && SQLITE_OK==(rc = sqlite3_overload_function(db, "matchinfo", 1)) - && SQLITE_OK==(rc = sqlite3_overload_function(db, "matchinfo", 2)) - && SQLITE_OK==(rc = sqlite3_overload_function(db, "optimize", 1)) - ){ - rc = sqlite3_create_module_v2( - db, "fts3", &fts3Module, (void *)pHash, hashDestroy - ); - if( rc==SQLITE_OK ){ - rc = sqlite3_create_module_v2( - db, "fts4", &fts3Module, (void *)pHash, 0 - ); - } - if( rc==SQLITE_OK ){ - rc = sqlite3Fts3InitTok(db, (void *)pHash); - } - return rc; - } - - - /* An error has occurred. Delete the hash table and return the error code. */ - assert( rc!=SQLITE_OK ); - if( pHash ){ - sqlite3Fts3HashClear(pHash); - sqlite3_free(pHash); - } - return rc; -} - -/* -** Allocate an Fts3MultiSegReader for each token in the expression headed -** by pExpr. -** -** An Fts3SegReader object is a cursor that can seek or scan a range of -** entries within a single segment b-tree. An Fts3MultiSegReader uses multiple -** Fts3SegReader objects internally to provide an interface to seek or scan -** within the union of all segments of a b-tree. Hence the name. -** -** If the allocated Fts3MultiSegReader just seeks to a single entry in a -** segment b-tree (if the term is not a prefix or it is a prefix for which -** there exists prefix b-tree of the right length) then it may be traversed -** and merged incrementally. Otherwise, it has to be merged into an in-memory -** doclist and then traversed. -*/ -static void fts3EvalAllocateReaders( - Fts3Cursor *pCsr, /* FTS cursor handle */ - Fts3Expr *pExpr, /* Allocate readers for this expression */ - int *pnToken, /* OUT: Total number of tokens in phrase. */ - int *pnOr, /* OUT: Total number of OR nodes in expr. */ - int *pRc /* IN/OUT: Error code */ -){ - if( pExpr && SQLITE_OK==*pRc ){ - if( pExpr->eType==FTSQUERY_PHRASE ){ - int i; - int nToken = pExpr->pPhrase->nToken; - *pnToken += nToken; - for(i=0; ipPhrase->aToken[i]; - int rc = fts3TermSegReaderCursor(pCsr, - pToken->z, pToken->n, pToken->isPrefix, &pToken->pSegcsr - ); - if( rc!=SQLITE_OK ){ - *pRc = rc; - return; - } - } - assert( pExpr->pPhrase->iDoclistToken==0 ); - pExpr->pPhrase->iDoclistToken = -1; - }else{ - *pnOr += (pExpr->eType==FTSQUERY_OR); - fts3EvalAllocateReaders(pCsr, pExpr->pLeft, pnToken, pnOr, pRc); - fts3EvalAllocateReaders(pCsr, pExpr->pRight, pnToken, pnOr, pRc); - } - } -} - -/* -** Arguments pList/nList contain the doclist for token iToken of phrase p. -** It is merged into the main doclist stored in p->doclist.aAll/nAll. -** -** This function assumes that pList points to a buffer allocated using -** sqlite3_malloc(). This function takes responsibility for eventually -** freeing the buffer. -*/ -static void fts3EvalPhraseMergeToken( - Fts3Table *pTab, /* FTS Table pointer */ - Fts3Phrase *p, /* Phrase to merge pList/nList into */ - int iToken, /* Token pList/nList corresponds to */ - char *pList, /* Pointer to doclist */ - int nList /* Number of bytes in pList */ -){ - assert( iToken!=p->iDoclistToken ); - - if( pList==0 ){ - sqlite3_free(p->doclist.aAll); - p->doclist.aAll = 0; - p->doclist.nAll = 0; - } - - else if( p->iDoclistToken<0 ){ - p->doclist.aAll = pList; - p->doclist.nAll = nList; - } - - else if( p->doclist.aAll==0 ){ - sqlite3_free(pList); - } - - else { - char *pLeft; - char *pRight; - int nLeft; - int nRight; - int nDiff; - - if( p->iDoclistTokendoclist.aAll; - nLeft = p->doclist.nAll; - pRight = pList; - nRight = nList; - nDiff = iToken - p->iDoclistToken; - }else{ - pRight = p->doclist.aAll; - nRight = p->doclist.nAll; - pLeft = pList; - nLeft = nList; - nDiff = p->iDoclistToken - iToken; - } - - fts3DoclistPhraseMerge(pTab->bDescIdx, nDiff, pLeft, nLeft, pRight,&nRight); - sqlite3_free(pLeft); - p->doclist.aAll = pRight; - p->doclist.nAll = nRight; - } - - if( iToken>p->iDoclistToken ) p->iDoclistToken = iToken; -} - -/* -** Load the doclist for phrase p into p->doclist.aAll/nAll. The loaded doclist -** does not take deferred tokens into account. -** -** SQLITE_OK is returned if no error occurs, otherwise an SQLite error code. -*/ -static int fts3EvalPhraseLoad( - Fts3Cursor *pCsr, /* FTS Cursor handle */ - Fts3Phrase *p /* Phrase object */ -){ - Fts3Table *pTab = (Fts3Table *)pCsr->base.pVtab; - int iToken; - int rc = SQLITE_OK; - - for(iToken=0; rc==SQLITE_OK && iTokennToken; iToken++){ - Fts3PhraseToken *pToken = &p->aToken[iToken]; - assert( pToken->pDeferred==0 || pToken->pSegcsr==0 ); - - if( pToken->pSegcsr ){ - int nThis = 0; - char *pThis = 0; - rc = fts3TermSelect(pTab, pToken, p->iColumn, &nThis, &pThis); - if( rc==SQLITE_OK ){ - fts3EvalPhraseMergeToken(pTab, p, iToken, pThis, nThis); - } - } - assert( pToken->pSegcsr==0 ); - } - - return rc; -} - -/* -** This function is called on each phrase after the position lists for -** any deferred tokens have been loaded into memory. It updates the phrases -** current position list to include only those positions that are really -** instances of the phrase (after considering deferred tokens). If this -** means that the phrase does not appear in the current row, doclist.pList -** and doclist.nList are both zeroed. -** -** SQLITE_OK is returned if no error occurs, otherwise an SQLite error code. -*/ -static int fts3EvalDeferredPhrase(Fts3Cursor *pCsr, Fts3Phrase *pPhrase){ - int iToken; /* Used to iterate through phrase tokens */ - char *aPoslist = 0; /* Position list for deferred tokens */ - int nPoslist = 0; /* Number of bytes in aPoslist */ - int iPrev = -1; /* Token number of previous deferred token */ - - assert( pPhrase->doclist.bFreeList==0 ); - - for(iToken=0; iTokennToken; iToken++){ - Fts3PhraseToken *pToken = &pPhrase->aToken[iToken]; - Fts3DeferredToken *pDeferred = pToken->pDeferred; - - if( pDeferred ){ - char *pList; - int nList; - int rc = sqlite3Fts3DeferredTokenList(pDeferred, &pList, &nList); - if( rc!=SQLITE_OK ) return rc; - - if( pList==0 ){ - sqlite3_free(aPoslist); - pPhrase->doclist.pList = 0; - pPhrase->doclist.nList = 0; - return SQLITE_OK; - - }else if( aPoslist==0 ){ - aPoslist = pList; - nPoslist = nList; - - }else{ - char *aOut = pList; - char *p1 = aPoslist; - char *p2 = aOut; - - assert( iPrev>=0 ); - fts3PoslistPhraseMerge(&aOut, iToken-iPrev, 0, 1, &p1, &p2); - sqlite3_free(aPoslist); - aPoslist = pList; - nPoslist = (int)(aOut - aPoslist); - if( nPoslist==0 ){ - sqlite3_free(aPoslist); - pPhrase->doclist.pList = 0; - pPhrase->doclist.nList = 0; - return SQLITE_OK; - } - } - iPrev = iToken; - } - } - - if( iPrev>=0 ){ - int nMaxUndeferred = pPhrase->iDoclistToken; - if( nMaxUndeferred<0 ){ - pPhrase->doclist.pList = aPoslist; - pPhrase->doclist.nList = nPoslist; - pPhrase->doclist.iDocid = pCsr->iPrevId; - pPhrase->doclist.bFreeList = 1; - }else{ - int nDistance; - char *p1; - char *p2; - char *aOut; - - if( nMaxUndeferred>iPrev ){ - p1 = aPoslist; - p2 = pPhrase->doclist.pList; - nDistance = nMaxUndeferred - iPrev; - }else{ - p1 = pPhrase->doclist.pList; - p2 = aPoslist; - nDistance = iPrev - nMaxUndeferred; - } - - aOut = (char *)sqlite3_malloc(nPoslist+8); - if( !aOut ){ - sqlite3_free(aPoslist); - return SQLITE_NOMEM; - } - - pPhrase->doclist.pList = aOut; - if( fts3PoslistPhraseMerge(&aOut, nDistance, 0, 1, &p1, &p2) ){ - pPhrase->doclist.bFreeList = 1; - pPhrase->doclist.nList = (int)(aOut - pPhrase->doclist.pList); - }else{ - sqlite3_free(aOut); - pPhrase->doclist.pList = 0; - pPhrase->doclist.nList = 0; - } - sqlite3_free(aPoslist); - } - } - - return SQLITE_OK; -} - -/* -** Maximum number of tokens a phrase may have to be considered for the -** incremental doclists strategy. -*/ -#define MAX_INCR_PHRASE_TOKENS 4 - -/* -** This function is called for each Fts3Phrase in a full-text query -** expression to initialize the mechanism for returning rows. Once this -** function has been called successfully on an Fts3Phrase, it may be -** used with fts3EvalPhraseNext() to iterate through the matching docids. -** -** If parameter bOptOk is true, then the phrase may (or may not) use the -** incremental loading strategy. Otherwise, the entire doclist is loaded into -** memory within this call. -** -** SQLITE_OK is returned if no error occurs, otherwise an SQLite error code. -*/ -static int fts3EvalPhraseStart(Fts3Cursor *pCsr, int bOptOk, Fts3Phrase *p){ - Fts3Table *pTab = (Fts3Table *)pCsr->base.pVtab; - int rc = SQLITE_OK; /* Error code */ - int i; - - /* Determine if doclists may be loaded from disk incrementally. This is - ** possible if the bOptOk argument is true, the FTS doclists will be - ** scanned in forward order, and the phrase consists of - ** MAX_INCR_PHRASE_TOKENS or fewer tokens, none of which are are "^first" - ** tokens or prefix tokens that cannot use a prefix-index. */ - int bHaveIncr = 0; - int bIncrOk = (bOptOk - && pCsr->bDesc==pTab->bDescIdx - && p->nToken<=MAX_INCR_PHRASE_TOKENS && p->nToken>0 - && p->nToken<=MAX_INCR_PHRASE_TOKENS && p->nToken>0 -#ifdef SQLITE_TEST - && pTab->bNoIncrDoclist==0 -#endif - ); - for(i=0; bIncrOk==1 && inToken; i++){ - Fts3PhraseToken *pToken = &p->aToken[i]; - if( pToken->bFirst || (pToken->pSegcsr!=0 && !pToken->pSegcsr->bLookup) ){ - bIncrOk = 0; - } - if( pToken->pSegcsr ) bHaveIncr = 1; - } - - if( bIncrOk && bHaveIncr ){ - /* Use the incremental approach. */ - int iCol = (p->iColumn >= pTab->nColumn ? -1 : p->iColumn); - for(i=0; rc==SQLITE_OK && inToken; i++){ - Fts3PhraseToken *pToken = &p->aToken[i]; - Fts3MultiSegReader *pSegcsr = pToken->pSegcsr; - if( pSegcsr ){ - rc = sqlite3Fts3MsrIncrStart(pTab, pSegcsr, iCol, pToken->z, pToken->n); - } - } - p->bIncr = 1; - }else{ - /* Load the full doclist for the phrase into memory. */ - rc = fts3EvalPhraseLoad(pCsr, p); - p->bIncr = 0; - } - - assert( rc!=SQLITE_OK || p->nToken<1 || p->aToken[0].pSegcsr==0 || p->bIncr ); - return rc; -} - -/* -** This function is used to iterate backwards (from the end to start) -** through doclists. It is used by this module to iterate through phrase -** doclists in reverse and by the fts3_write.c module to iterate through -** pending-terms lists when writing to databases with "order=desc". -** -** The doclist may be sorted in ascending (parameter bDescIdx==0) or -** descending (parameter bDescIdx==1) order of docid. Regardless, this -** function iterates from the end of the doclist to the beginning. -*/ -SQLITE_PRIVATE void sqlite3Fts3DoclistPrev( - int bDescIdx, /* True if the doclist is desc */ - char *aDoclist, /* Pointer to entire doclist */ - int nDoclist, /* Length of aDoclist in bytes */ - char **ppIter, /* IN/OUT: Iterator pointer */ - sqlite3_int64 *piDocid, /* IN/OUT: Docid pointer */ - int *pnList, /* OUT: List length pointer */ - u8 *pbEof /* OUT: End-of-file flag */ -){ - char *p = *ppIter; - - assert( nDoclist>0 ); - assert( *pbEof==0 ); - assert( p || *piDocid==0 ); - assert( !p || (p>aDoclist && p<&aDoclist[nDoclist]) ); - - if( p==0 ){ - sqlite3_int64 iDocid = 0; - char *pNext = 0; - char *pDocid = aDoclist; - char *pEnd = &aDoclist[nDoclist]; - int iMul = 1; - - while( pDocid0 ); - assert( *pbEof==0 ); - assert( p || *piDocid==0 ); - assert( !p || (p>=aDoclist && p<=&aDoclist[nDoclist]) ); - - if( p==0 ){ - p = aDoclist; - p += sqlite3Fts3GetVarint(p, piDocid); - }else{ - fts3PoslistCopy(0, &p); - if( p>=&aDoclist[nDoclist] ){ - *pbEof = 1; - }else{ - sqlite3_int64 iVar; - p += sqlite3Fts3GetVarint(p, &iVar); - *piDocid += ((bDescIdx ? -1 : 1) * iVar); - } - } - - *ppIter = p; -} - -/* -** Advance the iterator pDL to the next entry in pDL->aAll/nAll. Set *pbEof -** to true if EOF is reached. -*/ -static void fts3EvalDlPhraseNext( - Fts3Table *pTab, - Fts3Doclist *pDL, - u8 *pbEof -){ - char *pIter; /* Used to iterate through aAll */ - char *pEnd = &pDL->aAll[pDL->nAll]; /* 1 byte past end of aAll */ - - if( pDL->pNextDocid ){ - pIter = pDL->pNextDocid; - }else{ - pIter = pDL->aAll; - } - - if( pIter>=pEnd ){ - /* We have already reached the end of this doclist. EOF. */ - *pbEof = 1; - }else{ - sqlite3_int64 iDelta; - pIter += sqlite3Fts3GetVarint(pIter, &iDelta); - if( pTab->bDescIdx==0 || pDL->pNextDocid==0 ){ - pDL->iDocid += iDelta; - }else{ - pDL->iDocid -= iDelta; - } - pDL->pList = pIter; - fts3PoslistCopy(0, &pIter); - pDL->nList = (int)(pIter - pDL->pList); - - /* pIter now points just past the 0x00 that terminates the position- - ** list for document pDL->iDocid. However, if this position-list was - ** edited in place by fts3EvalNearTrim(), then pIter may not actually - ** point to the start of the next docid value. The following line deals - ** with this case by advancing pIter past the zero-padding added by - ** fts3EvalNearTrim(). */ - while( pIterpNextDocid = pIter; - assert( pIter>=&pDL->aAll[pDL->nAll] || *pIter ); - *pbEof = 0; - } -} - -/* -** Helper type used by fts3EvalIncrPhraseNext() and incrPhraseTokenNext(). -*/ -typedef struct TokenDoclist TokenDoclist; -struct TokenDoclist { - int bIgnore; - sqlite3_int64 iDocid; - char *pList; - int nList; -}; - -/* -** Token pToken is an incrementally loaded token that is part of a -** multi-token phrase. Advance it to the next matching document in the -** database and populate output variable *p with the details of the new -** entry. Or, if the iterator has reached EOF, set *pbEof to true. -** -** If an error occurs, return an SQLite error code. Otherwise, return -** SQLITE_OK. -*/ -static int incrPhraseTokenNext( - Fts3Table *pTab, /* Virtual table handle */ - Fts3Phrase *pPhrase, /* Phrase to advance token of */ - int iToken, /* Specific token to advance */ - TokenDoclist *p, /* OUT: Docid and doclist for new entry */ - u8 *pbEof /* OUT: True if iterator is at EOF */ -){ - int rc = SQLITE_OK; - - if( pPhrase->iDoclistToken==iToken ){ - assert( p->bIgnore==0 ); - assert( pPhrase->aToken[iToken].pSegcsr==0 ); - fts3EvalDlPhraseNext(pTab, &pPhrase->doclist, pbEof); - p->pList = pPhrase->doclist.pList; - p->nList = pPhrase->doclist.nList; - p->iDocid = pPhrase->doclist.iDocid; - }else{ - Fts3PhraseToken *pToken = &pPhrase->aToken[iToken]; - assert( pToken->pDeferred==0 ); - assert( pToken->pSegcsr || pPhrase->iDoclistToken>=0 ); - if( pToken->pSegcsr ){ - assert( p->bIgnore==0 ); - rc = sqlite3Fts3MsrIncrNext( - pTab, pToken->pSegcsr, &p->iDocid, &p->pList, &p->nList - ); - if( p->pList==0 ) *pbEof = 1; - }else{ - p->bIgnore = 1; - } - } - - return rc; -} - - -/* -** The phrase iterator passed as the second argument: -** -** * features at least one token that uses an incremental doclist, and -** -** * does not contain any deferred tokens. -** -** Advance it to the next matching documnent in the database and populate -** the Fts3Doclist.pList and nList fields. -** -** If there is no "next" entry and no error occurs, then *pbEof is set to -** 1 before returning. Otherwise, if no error occurs and the iterator is -** successfully advanced, *pbEof is set to 0. -** -** If an error occurs, return an SQLite error code. Otherwise, return -** SQLITE_OK. -*/ -static int fts3EvalIncrPhraseNext( - Fts3Cursor *pCsr, /* FTS Cursor handle */ - Fts3Phrase *p, /* Phrase object to advance to next docid */ - u8 *pbEof /* OUT: Set to 1 if EOF */ -){ - int rc = SQLITE_OK; - Fts3Doclist *pDL = &p->doclist; - Fts3Table *pTab = (Fts3Table *)pCsr->base.pVtab; - u8 bEof = 0; - - /* This is only called if it is guaranteed that the phrase has at least - ** one incremental token. In which case the bIncr flag is set. */ - assert( p->bIncr==1 ); - - if( p->nToken==1 && p->bIncr ){ - rc = sqlite3Fts3MsrIncrNext(pTab, p->aToken[0].pSegcsr, - &pDL->iDocid, &pDL->pList, &pDL->nList - ); - if( pDL->pList==0 ) bEof = 1; - }else{ - int bDescDoclist = pCsr->bDesc; - struct TokenDoclist a[MAX_INCR_PHRASE_TOKENS]; - - memset(a, 0, sizeof(a)); - assert( p->nToken<=MAX_INCR_PHRASE_TOKENS ); - assert( p->iDoclistTokennToken && bEof==0; i++){ - rc = incrPhraseTokenNext(pTab, p, i, &a[i], &bEof); - if( a[i].bIgnore==0 && (bMaxSet==0 || DOCID_CMP(iMax, a[i].iDocid)<0) ){ - iMax = a[i].iDocid; - bMaxSet = 1; - } - } - assert( rc!=SQLITE_OK || a[p->nToken-1].bIgnore==0 ); - assert( rc!=SQLITE_OK || bMaxSet ); - - /* Keep advancing iterators until they all point to the same document */ - for(i=0; inToken; i++){ - while( rc==SQLITE_OK && bEof==0 - && a[i].bIgnore==0 && DOCID_CMP(a[i].iDocid, iMax)<0 - ){ - rc = incrPhraseTokenNext(pTab, p, i, &a[i], &bEof); - if( DOCID_CMP(a[i].iDocid, iMax)>0 ){ - iMax = a[i].iDocid; - i = 0; - } - } - } - - /* Check if the current entries really are a phrase match */ - if( bEof==0 ){ - int nList = 0; - int nByte = a[p->nToken-1].nList; - char *aDoclist = sqlite3_malloc(nByte+1); - if( !aDoclist ) return SQLITE_NOMEM; - memcpy(aDoclist, a[p->nToken-1].pList, nByte+1); - - for(i=0; i<(p->nToken-1); i++){ - if( a[i].bIgnore==0 ){ - char *pL = a[i].pList; - char *pR = aDoclist; - char *pOut = aDoclist; - int nDist = p->nToken-1-i; - int res = fts3PoslistPhraseMerge(&pOut, nDist, 0, 1, &pL, &pR); - if( res==0 ) break; - nList = (int)(pOut - aDoclist); - } - } - if( i==(p->nToken-1) ){ - pDL->iDocid = iMax; - pDL->pList = aDoclist; - pDL->nList = nList; - pDL->bFreeList = 1; - break; - } - sqlite3_free(aDoclist); - } - } - } - - *pbEof = bEof; - return rc; -} - -/* -** Attempt to move the phrase iterator to point to the next matching docid. -** If an error occurs, return an SQLite error code. Otherwise, return -** SQLITE_OK. -** -** If there is no "next" entry and no error occurs, then *pbEof is set to -** 1 before returning. Otherwise, if no error occurs and the iterator is -** successfully advanced, *pbEof is set to 0. -*/ -static int fts3EvalPhraseNext( - Fts3Cursor *pCsr, /* FTS Cursor handle */ - Fts3Phrase *p, /* Phrase object to advance to next docid */ - u8 *pbEof /* OUT: Set to 1 if EOF */ -){ - int rc = SQLITE_OK; - Fts3Doclist *pDL = &p->doclist; - Fts3Table *pTab = (Fts3Table *)pCsr->base.pVtab; - - if( p->bIncr ){ - rc = fts3EvalIncrPhraseNext(pCsr, p, pbEof); - }else if( pCsr->bDesc!=pTab->bDescIdx && pDL->nAll ){ - sqlite3Fts3DoclistPrev(pTab->bDescIdx, pDL->aAll, pDL->nAll, - &pDL->pNextDocid, &pDL->iDocid, &pDL->nList, pbEof - ); - pDL->pList = pDL->pNextDocid; - }else{ - fts3EvalDlPhraseNext(pTab, pDL, pbEof); - } - - return rc; -} - -/* -** -** If *pRc is not SQLITE_OK when this function is called, it is a no-op. -** Otherwise, fts3EvalPhraseStart() is called on all phrases within the -** expression. Also the Fts3Expr.bDeferred variable is set to true for any -** expressions for which all descendent tokens are deferred. -** -** If parameter bOptOk is zero, then it is guaranteed that the -** Fts3Phrase.doclist.aAll/nAll variables contain the entire doclist for -** each phrase in the expression (subject to deferred token processing). -** Or, if bOptOk is non-zero, then one or more tokens within the expression -** may be loaded incrementally, meaning doclist.aAll/nAll is not available. -** -** If an error occurs within this function, *pRc is set to an SQLite error -** code before returning. -*/ -static void fts3EvalStartReaders( - Fts3Cursor *pCsr, /* FTS Cursor handle */ - Fts3Expr *pExpr, /* Expression to initialize phrases in */ - int *pRc /* IN/OUT: Error code */ -){ - if( pExpr && SQLITE_OK==*pRc ){ - if( pExpr->eType==FTSQUERY_PHRASE ){ - int i; - int nToken = pExpr->pPhrase->nToken; - for(i=0; ipPhrase->aToken[i].pDeferred==0 ) break; - } - pExpr->bDeferred = (i==nToken); - *pRc = fts3EvalPhraseStart(pCsr, 1, pExpr->pPhrase); - }else{ - fts3EvalStartReaders(pCsr, pExpr->pLeft, pRc); - fts3EvalStartReaders(pCsr, pExpr->pRight, pRc); - pExpr->bDeferred = (pExpr->pLeft->bDeferred && pExpr->pRight->bDeferred); - } - } -} - -/* -** An array of the following structures is assembled as part of the process -** of selecting tokens to defer before the query starts executing (as part -** of the xFilter() method). There is one element in the array for each -** token in the FTS expression. -** -** Tokens are divided into AND/NEAR clusters. All tokens in a cluster belong -** to phrases that are connected only by AND and NEAR operators (not OR or -** NOT). When determining tokens to defer, each AND/NEAR cluster is considered -** separately. The root of a tokens AND/NEAR cluster is stored in -** Fts3TokenAndCost.pRoot. -*/ -typedef struct Fts3TokenAndCost Fts3TokenAndCost; -struct Fts3TokenAndCost { - Fts3Phrase *pPhrase; /* The phrase the token belongs to */ - int iToken; /* Position of token in phrase */ - Fts3PhraseToken *pToken; /* The token itself */ - Fts3Expr *pRoot; /* Root of NEAR/AND cluster */ - int nOvfl; /* Number of overflow pages to load doclist */ - int iCol; /* The column the token must match */ -}; - -/* -** This function is used to populate an allocated Fts3TokenAndCost array. -** -** If *pRc is not SQLITE_OK when this function is called, it is a no-op. -** Otherwise, if an error occurs during execution, *pRc is set to an -** SQLite error code. -*/ -static void fts3EvalTokenCosts( - Fts3Cursor *pCsr, /* FTS Cursor handle */ - Fts3Expr *pRoot, /* Root of current AND/NEAR cluster */ - Fts3Expr *pExpr, /* Expression to consider */ - Fts3TokenAndCost **ppTC, /* Write new entries to *(*ppTC)++ */ - Fts3Expr ***ppOr, /* Write new OR root to *(*ppOr)++ */ - int *pRc /* IN/OUT: Error code */ -){ - if( *pRc==SQLITE_OK ){ - if( pExpr->eType==FTSQUERY_PHRASE ){ - Fts3Phrase *pPhrase = pExpr->pPhrase; - int i; - for(i=0; *pRc==SQLITE_OK && inToken; i++){ - Fts3TokenAndCost *pTC = (*ppTC)++; - pTC->pPhrase = pPhrase; - pTC->iToken = i; - pTC->pRoot = pRoot; - pTC->pToken = &pPhrase->aToken[i]; - pTC->iCol = pPhrase->iColumn; - *pRc = sqlite3Fts3MsrOvfl(pCsr, pTC->pToken->pSegcsr, &pTC->nOvfl); - } - }else if( pExpr->eType!=FTSQUERY_NOT ){ - assert( pExpr->eType==FTSQUERY_OR - || pExpr->eType==FTSQUERY_AND - || pExpr->eType==FTSQUERY_NEAR - ); - assert( pExpr->pLeft && pExpr->pRight ); - if( pExpr->eType==FTSQUERY_OR ){ - pRoot = pExpr->pLeft; - **ppOr = pRoot; - (*ppOr)++; - } - fts3EvalTokenCosts(pCsr, pRoot, pExpr->pLeft, ppTC, ppOr, pRc); - if( pExpr->eType==FTSQUERY_OR ){ - pRoot = pExpr->pRight; - **ppOr = pRoot; - (*ppOr)++; - } - fts3EvalTokenCosts(pCsr, pRoot, pExpr->pRight, ppTC, ppOr, pRc); - } - } -} - -/* -** Determine the average document (row) size in pages. If successful, -** write this value to *pnPage and return SQLITE_OK. Otherwise, return -** an SQLite error code. -** -** The average document size in pages is calculated by first calculating -** determining the average size in bytes, B. If B is less than the amount -** of data that will fit on a single leaf page of an intkey table in -** this database, then the average docsize is 1. Otherwise, it is 1 plus -** the number of overflow pages consumed by a record B bytes in size. -*/ -static int fts3EvalAverageDocsize(Fts3Cursor *pCsr, int *pnPage){ - if( pCsr->nRowAvg==0 ){ - /* The average document size, which is required to calculate the cost - ** of each doclist, has not yet been determined. Read the required - ** data from the %_stat table to calculate it. - ** - ** Entry 0 of the %_stat table is a blob containing (nCol+1) FTS3 - ** varints, where nCol is the number of columns in the FTS3 table. - ** The first varint is the number of documents currently stored in - ** the table. The following nCol varints contain the total amount of - ** data stored in all rows of each column of the table, from left - ** to right. - */ - int rc; - Fts3Table *p = (Fts3Table*)pCsr->base.pVtab; - sqlite3_stmt *pStmt; - sqlite3_int64 nDoc = 0; - sqlite3_int64 nByte = 0; - const char *pEnd; - const char *a; - - rc = sqlite3Fts3SelectDoctotal(p, &pStmt); - if( rc!=SQLITE_OK ) return rc; - a = sqlite3_column_blob(pStmt, 0); - assert( a ); - - pEnd = &a[sqlite3_column_bytes(pStmt, 0)]; - a += sqlite3Fts3GetVarint(a, &nDoc); - while( anDoc = nDoc; - pCsr->nRowAvg = (int)(((nByte / nDoc) + p->nPgsz) / p->nPgsz); - assert( pCsr->nRowAvg>0 ); - rc = sqlite3_reset(pStmt); - if( rc!=SQLITE_OK ) return rc; - } - - *pnPage = pCsr->nRowAvg; - return SQLITE_OK; -} - -/* -** This function is called to select the tokens (if any) that will be -** deferred. The array aTC[] has already been populated when this is -** called. -** -** This function is called once for each AND/NEAR cluster in the -** expression. Each invocation determines which tokens to defer within -** the cluster with root node pRoot. See comments above the definition -** of struct Fts3TokenAndCost for more details. -** -** If no error occurs, SQLITE_OK is returned and sqlite3Fts3DeferToken() -** called on each token to defer. Otherwise, an SQLite error code is -** returned. -*/ -static int fts3EvalSelectDeferred( - Fts3Cursor *pCsr, /* FTS Cursor handle */ - Fts3Expr *pRoot, /* Consider tokens with this root node */ - Fts3TokenAndCost *aTC, /* Array of expression tokens and costs */ - int nTC /* Number of entries in aTC[] */ -){ - Fts3Table *pTab = (Fts3Table *)pCsr->base.pVtab; - int nDocSize = 0; /* Number of pages per doc loaded */ - int rc = SQLITE_OK; /* Return code */ - int ii; /* Iterator variable for various purposes */ - int nOvfl = 0; /* Total overflow pages used by doclists */ - int nToken = 0; /* Total number of tokens in cluster */ - - int nMinEst = 0; /* The minimum count for any phrase so far. */ - int nLoad4 = 1; /* (Phrases that will be loaded)^4. */ - - /* Tokens are never deferred for FTS tables created using the content=xxx - ** option. The reason being that it is not guaranteed that the content - ** table actually contains the same data as the index. To prevent this from - ** causing any problems, the deferred token optimization is completely - ** disabled for content=xxx tables. */ - if( pTab->zContentTbl ){ - return SQLITE_OK; - } - - /* Count the tokens in this AND/NEAR cluster. If none of the doclists - ** associated with the tokens spill onto overflow pages, or if there is - ** only 1 token, exit early. No tokens to defer in this case. */ - for(ii=0; ii0 ); - - - /* Iterate through all tokens in this AND/NEAR cluster, in ascending order - ** of the number of overflow pages that will be loaded by the pager layer - ** to retrieve the entire doclist for the token from the full-text index. - ** Load the doclists for tokens that are either: - ** - ** a. The cheapest token in the entire query (i.e. the one visited by the - ** first iteration of this loop), or - ** - ** b. Part of a multi-token phrase. - ** - ** After each token doclist is loaded, merge it with the others from the - ** same phrase and count the number of documents that the merged doclist - ** contains. Set variable "nMinEst" to the smallest number of documents in - ** any phrase doclist for which 1 or more token doclists have been loaded. - ** Let nOther be the number of other phrases for which it is certain that - ** one or more tokens will not be deferred. - ** - ** Then, for each token, defer it if loading the doclist would result in - ** loading N or more overflow pages into memory, where N is computed as: - ** - ** (nMinEst + 4^nOther - 1) / (4^nOther) - */ - for(ii=0; iinOvfl) - ){ - pTC = &aTC[iTC]; - } - } - assert( pTC ); - - if( ii && pTC->nOvfl>=((nMinEst+(nLoad4/4)-1)/(nLoad4/4))*nDocSize ){ - /* The number of overflow pages to load for this (and therefore all - ** subsequent) tokens is greater than the estimated number of pages - ** that will be loaded if all subsequent tokens are deferred. - */ - Fts3PhraseToken *pToken = pTC->pToken; - rc = sqlite3Fts3DeferToken(pCsr, pToken, pTC->iCol); - fts3SegReaderCursorFree(pToken->pSegcsr); - pToken->pSegcsr = 0; - }else{ - /* Set nLoad4 to the value of (4^nOther) for the next iteration of the - ** for-loop. Except, limit the value to 2^24 to prevent it from - ** overflowing the 32-bit integer it is stored in. */ - if( ii<12 ) nLoad4 = nLoad4*4; - - if( ii==0 || (pTC->pPhrase->nToken>1 && ii!=nToken-1) ){ - /* Either this is the cheapest token in the entire query, or it is - ** part of a multi-token phrase. Either way, the entire doclist will - ** (eventually) be loaded into memory. It may as well be now. */ - Fts3PhraseToken *pToken = pTC->pToken; - int nList = 0; - char *pList = 0; - rc = fts3TermSelect(pTab, pToken, pTC->iCol, &nList, &pList); - assert( rc==SQLITE_OK || pList==0 ); - if( rc==SQLITE_OK ){ - int nCount; - fts3EvalPhraseMergeToken(pTab, pTC->pPhrase, pTC->iToken,pList,nList); - nCount = fts3DoclistCountDocids( - pTC->pPhrase->doclist.aAll, pTC->pPhrase->doclist.nAll - ); - if( ii==0 || nCountpToken = 0; - } - - return rc; -} - -/* -** This function is called from within the xFilter method. It initializes -** the full-text query currently stored in pCsr->pExpr. To iterate through -** the results of a query, the caller does: -** -** fts3EvalStart(pCsr); -** while( 1 ){ -** fts3EvalNext(pCsr); -** if( pCsr->bEof ) break; -** ... return row pCsr->iPrevId to the caller ... -** } -*/ -static int fts3EvalStart(Fts3Cursor *pCsr){ - Fts3Table *pTab = (Fts3Table *)pCsr->base.pVtab; - int rc = SQLITE_OK; - int nToken = 0; - int nOr = 0; - - /* Allocate a MultiSegReader for each token in the expression. */ - fts3EvalAllocateReaders(pCsr, pCsr->pExpr, &nToken, &nOr, &rc); - - /* Determine which, if any, tokens in the expression should be deferred. */ -#ifndef SQLITE_DISABLE_FTS4_DEFERRED - if( rc==SQLITE_OK && nToken>1 && pTab->bFts4 ){ - Fts3TokenAndCost *aTC; - Fts3Expr **apOr; - aTC = (Fts3TokenAndCost *)sqlite3_malloc( - sizeof(Fts3TokenAndCost) * nToken - + sizeof(Fts3Expr *) * nOr * 2 - ); - apOr = (Fts3Expr **)&aTC[nToken]; - - if( !aTC ){ - rc = SQLITE_NOMEM; - }else{ - int ii; - Fts3TokenAndCost *pTC = aTC; - Fts3Expr **ppOr = apOr; - - fts3EvalTokenCosts(pCsr, 0, pCsr->pExpr, &pTC, &ppOr, &rc); - nToken = (int)(pTC-aTC); - nOr = (int)(ppOr-apOr); - - if( rc==SQLITE_OK ){ - rc = fts3EvalSelectDeferred(pCsr, 0, aTC, nToken); - for(ii=0; rc==SQLITE_OK && iipExpr, &rc); - return rc; -} - -/* -** Invalidate the current position list for phrase pPhrase. -*/ -static void fts3EvalInvalidatePoslist(Fts3Phrase *pPhrase){ - if( pPhrase->doclist.bFreeList ){ - sqlite3_free(pPhrase->doclist.pList); - } - pPhrase->doclist.pList = 0; - pPhrase->doclist.nList = 0; - pPhrase->doclist.bFreeList = 0; -} - -/* -** This function is called to edit the position list associated with -** the phrase object passed as the fifth argument according to a NEAR -** condition. For example: -** -** abc NEAR/5 "def ghi" -** -** Parameter nNear is passed the NEAR distance of the expression (5 in -** the example above). When this function is called, *paPoslist points to -** the position list, and *pnToken is the number of phrase tokens in, the -** phrase on the other side of the NEAR operator to pPhrase. For example, -** if pPhrase refers to the "def ghi" phrase, then *paPoslist points to -** the position list associated with phrase "abc". -** -** All positions in the pPhrase position list that are not sufficiently -** close to a position in the *paPoslist position list are removed. If this -** leaves 0 positions, zero is returned. Otherwise, non-zero. -** -** Before returning, *paPoslist is set to point to the position lsit -** associated with pPhrase. And *pnToken is set to the number of tokens in -** pPhrase. -*/ -static int fts3EvalNearTrim( - int nNear, /* NEAR distance. As in "NEAR/nNear". */ - char *aTmp, /* Temporary space to use */ - char **paPoslist, /* IN/OUT: Position list */ - int *pnToken, /* IN/OUT: Tokens in phrase of *paPoslist */ - Fts3Phrase *pPhrase /* The phrase object to trim the doclist of */ -){ - int nParam1 = nNear + pPhrase->nToken; - int nParam2 = nNear + *pnToken; - int nNew; - char *p2; - char *pOut; - int res; - - assert( pPhrase->doclist.pList ); - - p2 = pOut = pPhrase->doclist.pList; - res = fts3PoslistNearMerge( - &pOut, aTmp, nParam1, nParam2, paPoslist, &p2 - ); - if( res ){ - nNew = (int)(pOut - pPhrase->doclist.pList) - 1; - assert( pPhrase->doclist.pList[nNew]=='\0' ); - assert( nNew<=pPhrase->doclist.nList && nNew>0 ); - memset(&pPhrase->doclist.pList[nNew], 0, pPhrase->doclist.nList - nNew); - pPhrase->doclist.nList = nNew; - *paPoslist = pPhrase->doclist.pList; - *pnToken = pPhrase->nToken; - } - - return res; -} - -/* -** This function is a no-op if *pRc is other than SQLITE_OK when it is called. -** Otherwise, it advances the expression passed as the second argument to -** point to the next matching row in the database. Expressions iterate through -** matching rows in docid order. Ascending order if Fts3Cursor.bDesc is zero, -** or descending if it is non-zero. -** -** If an error occurs, *pRc is set to an SQLite error code. Otherwise, if -** successful, the following variables in pExpr are set: -** -** Fts3Expr.bEof (non-zero if EOF - there is no next row) -** Fts3Expr.iDocid (valid if bEof==0. The docid of the next row) -** -** If the expression is of type FTSQUERY_PHRASE, and the expression is not -** at EOF, then the following variables are populated with the position list -** for the phrase for the visited row: -** -** FTs3Expr.pPhrase->doclist.nList (length of pList in bytes) -** FTs3Expr.pPhrase->doclist.pList (pointer to position list) -** -** It says above that this function advances the expression to the next -** matching row. This is usually true, but there are the following exceptions: -** -** 1. Deferred tokens are not taken into account. If a phrase consists -** entirely of deferred tokens, it is assumed to match every row in -** the db. In this case the position-list is not populated at all. -** -** Or, if a phrase contains one or more deferred tokens and one or -** more non-deferred tokens, then the expression is advanced to the -** next possible match, considering only non-deferred tokens. In other -** words, if the phrase is "A B C", and "B" is deferred, the expression -** is advanced to the next row that contains an instance of "A * C", -** where "*" may match any single token. The position list in this case -** is populated as for "A * C" before returning. -** -** 2. NEAR is treated as AND. If the expression is "x NEAR y", it is -** advanced to point to the next row that matches "x AND y". -** -** See fts3EvalTestDeferredAndNear() for details on testing if a row is -** really a match, taking into account deferred tokens and NEAR operators. -*/ -static void fts3EvalNextRow( - Fts3Cursor *pCsr, /* FTS Cursor handle */ - Fts3Expr *pExpr, /* Expr. to advance to next matching row */ - int *pRc /* IN/OUT: Error code */ -){ - if( *pRc==SQLITE_OK ){ - int bDescDoclist = pCsr->bDesc; /* Used by DOCID_CMP() macro */ - assert( pExpr->bEof==0 ); - pExpr->bStart = 1; - - switch( pExpr->eType ){ - case FTSQUERY_NEAR: - case FTSQUERY_AND: { - Fts3Expr *pLeft = pExpr->pLeft; - Fts3Expr *pRight = pExpr->pRight; - assert( !pLeft->bDeferred || !pRight->bDeferred ); - - if( pLeft->bDeferred ){ - /* LHS is entirely deferred. So we assume it matches every row. - ** Advance the RHS iterator to find the next row visited. */ - fts3EvalNextRow(pCsr, pRight, pRc); - pExpr->iDocid = pRight->iDocid; - pExpr->bEof = pRight->bEof; - }else if( pRight->bDeferred ){ - /* RHS is entirely deferred. So we assume it matches every row. - ** Advance the LHS iterator to find the next row visited. */ - fts3EvalNextRow(pCsr, pLeft, pRc); - pExpr->iDocid = pLeft->iDocid; - pExpr->bEof = pLeft->bEof; - }else{ - /* Neither the RHS or LHS are deferred. */ - fts3EvalNextRow(pCsr, pLeft, pRc); - fts3EvalNextRow(pCsr, pRight, pRc); - while( !pLeft->bEof && !pRight->bEof && *pRc==SQLITE_OK ){ - sqlite3_int64 iDiff = DOCID_CMP(pLeft->iDocid, pRight->iDocid); - if( iDiff==0 ) break; - if( iDiff<0 ){ - fts3EvalNextRow(pCsr, pLeft, pRc); - }else{ - fts3EvalNextRow(pCsr, pRight, pRc); - } - } - pExpr->iDocid = pLeft->iDocid; - pExpr->bEof = (pLeft->bEof || pRight->bEof); - } - break; - } - - case FTSQUERY_OR: { - Fts3Expr *pLeft = pExpr->pLeft; - Fts3Expr *pRight = pExpr->pRight; - sqlite3_int64 iCmp = DOCID_CMP(pLeft->iDocid, pRight->iDocid); - - assert( pLeft->bStart || pLeft->iDocid==pRight->iDocid ); - assert( pRight->bStart || pLeft->iDocid==pRight->iDocid ); - - if( pRight->bEof || (pLeft->bEof==0 && iCmp<0) ){ - fts3EvalNextRow(pCsr, pLeft, pRc); - }else if( pLeft->bEof || (pRight->bEof==0 && iCmp>0) ){ - fts3EvalNextRow(pCsr, pRight, pRc); - }else{ - fts3EvalNextRow(pCsr, pLeft, pRc); - fts3EvalNextRow(pCsr, pRight, pRc); - } - - pExpr->bEof = (pLeft->bEof && pRight->bEof); - iCmp = DOCID_CMP(pLeft->iDocid, pRight->iDocid); - if( pRight->bEof || (pLeft->bEof==0 && iCmp<0) ){ - pExpr->iDocid = pLeft->iDocid; - }else{ - pExpr->iDocid = pRight->iDocid; - } - - break; - } - - case FTSQUERY_NOT: { - Fts3Expr *pLeft = pExpr->pLeft; - Fts3Expr *pRight = pExpr->pRight; - - if( pRight->bStart==0 ){ - fts3EvalNextRow(pCsr, pRight, pRc); - assert( *pRc!=SQLITE_OK || pRight->bStart ); - } - - fts3EvalNextRow(pCsr, pLeft, pRc); - if( pLeft->bEof==0 ){ - while( !*pRc - && !pRight->bEof - && DOCID_CMP(pLeft->iDocid, pRight->iDocid)>0 - ){ - fts3EvalNextRow(pCsr, pRight, pRc); - } - } - pExpr->iDocid = pLeft->iDocid; - pExpr->bEof = pLeft->bEof; - break; - } - - default: { - Fts3Phrase *pPhrase = pExpr->pPhrase; - fts3EvalInvalidatePoslist(pPhrase); - *pRc = fts3EvalPhraseNext(pCsr, pPhrase, &pExpr->bEof); - pExpr->iDocid = pPhrase->doclist.iDocid; - break; - } - } - } -} - -/* -** If *pRc is not SQLITE_OK, or if pExpr is not the root node of a NEAR -** cluster, then this function returns 1 immediately. -** -** Otherwise, it checks if the current row really does match the NEAR -** expression, using the data currently stored in the position lists -** (Fts3Expr->pPhrase.doclist.pList/nList) for each phrase in the expression. -** -** If the current row is a match, the position list associated with each -** phrase in the NEAR expression is edited in place to contain only those -** phrase instances sufficiently close to their peers to satisfy all NEAR -** constraints. In this case it returns 1. If the NEAR expression does not -** match the current row, 0 is returned. The position lists may or may not -** be edited if 0 is returned. -*/ -static int fts3EvalNearTest(Fts3Expr *pExpr, int *pRc){ - int res = 1; - - /* The following block runs if pExpr is the root of a NEAR query. - ** For example, the query: - ** - ** "w" NEAR "x" NEAR "y" NEAR "z" - ** - ** which is represented in tree form as: - ** - ** | - ** +--NEAR--+ <-- root of NEAR query - ** | | - ** +--NEAR--+ "z" - ** | | - ** +--NEAR--+ "y" - ** | | - ** "w" "x" - ** - ** The right-hand child of a NEAR node is always a phrase. The - ** left-hand child may be either a phrase or a NEAR node. There are - ** no exceptions to this - it's the way the parser in fts3_expr.c works. - */ - if( *pRc==SQLITE_OK - && pExpr->eType==FTSQUERY_NEAR - && pExpr->bEof==0 - && (pExpr->pParent==0 || pExpr->pParent->eType!=FTSQUERY_NEAR) - ){ - Fts3Expr *p; - int nTmp = 0; /* Bytes of temp space */ - char *aTmp; /* Temp space for PoslistNearMerge() */ - - /* Allocate temporary working space. */ - for(p=pExpr; p->pLeft; p=p->pLeft){ - nTmp += p->pRight->pPhrase->doclist.nList; - } - nTmp += p->pPhrase->doclist.nList; - if( nTmp==0 ){ - res = 0; - }else{ - aTmp = sqlite3_malloc(nTmp*2); - if( !aTmp ){ - *pRc = SQLITE_NOMEM; - res = 0; - }else{ - char *aPoslist = p->pPhrase->doclist.pList; - int nToken = p->pPhrase->nToken; - - for(p=p->pParent;res && p && p->eType==FTSQUERY_NEAR; p=p->pParent){ - Fts3Phrase *pPhrase = p->pRight->pPhrase; - int nNear = p->nNear; - res = fts3EvalNearTrim(nNear, aTmp, &aPoslist, &nToken, pPhrase); - } - - aPoslist = pExpr->pRight->pPhrase->doclist.pList; - nToken = pExpr->pRight->pPhrase->nToken; - for(p=pExpr->pLeft; p && res; p=p->pLeft){ - int nNear; - Fts3Phrase *pPhrase; - assert( p->pParent && p->pParent->pLeft==p ); - nNear = p->pParent->nNear; - pPhrase = ( - p->eType==FTSQUERY_NEAR ? p->pRight->pPhrase : p->pPhrase - ); - res = fts3EvalNearTrim(nNear, aTmp, &aPoslist, &nToken, pPhrase); - } - } - - sqlite3_free(aTmp); - } - } - - return res; -} - -/* -** This function is a helper function for fts3EvalTestDeferredAndNear(). -** Assuming no error occurs or has occurred, It returns non-zero if the -** expression passed as the second argument matches the row that pCsr -** currently points to, or zero if it does not. -** -** If *pRc is not SQLITE_OK when this function is called, it is a no-op. -** If an error occurs during execution of this function, *pRc is set to -** the appropriate SQLite error code. In this case the returned value is -** undefined. -*/ -static int fts3EvalTestExpr( - Fts3Cursor *pCsr, /* FTS cursor handle */ - Fts3Expr *pExpr, /* Expr to test. May or may not be root. */ - int *pRc /* IN/OUT: Error code */ -){ - int bHit = 1; /* Return value */ - if( *pRc==SQLITE_OK ){ - switch( pExpr->eType ){ - case FTSQUERY_NEAR: - case FTSQUERY_AND: - bHit = ( - fts3EvalTestExpr(pCsr, pExpr->pLeft, pRc) - && fts3EvalTestExpr(pCsr, pExpr->pRight, pRc) - && fts3EvalNearTest(pExpr, pRc) - ); - - /* If the NEAR expression does not match any rows, zero the doclist for - ** all phrases involved in the NEAR. This is because the snippet(), - ** offsets() and matchinfo() functions are not supposed to recognize - ** any instances of phrases that are part of unmatched NEAR queries. - ** For example if this expression: - ** - ** ... MATCH 'a OR (b NEAR c)' - ** - ** is matched against a row containing: - ** - ** 'a b d e' - ** - ** then any snippet() should ony highlight the "a" term, not the "b" - ** (as "b" is part of a non-matching NEAR clause). - */ - if( bHit==0 - && pExpr->eType==FTSQUERY_NEAR - && (pExpr->pParent==0 || pExpr->pParent->eType!=FTSQUERY_NEAR) - ){ - Fts3Expr *p; - for(p=pExpr; p->pPhrase==0; p=p->pLeft){ - if( p->pRight->iDocid==pCsr->iPrevId ){ - fts3EvalInvalidatePoslist(p->pRight->pPhrase); - } - } - if( p->iDocid==pCsr->iPrevId ){ - fts3EvalInvalidatePoslist(p->pPhrase); - } - } - - break; - - case FTSQUERY_OR: { - int bHit1 = fts3EvalTestExpr(pCsr, pExpr->pLeft, pRc); - int bHit2 = fts3EvalTestExpr(pCsr, pExpr->pRight, pRc); - bHit = bHit1 || bHit2; - break; - } - - case FTSQUERY_NOT: - bHit = ( - fts3EvalTestExpr(pCsr, pExpr->pLeft, pRc) - && !fts3EvalTestExpr(pCsr, pExpr->pRight, pRc) - ); - break; - - default: { -#ifndef SQLITE_DISABLE_FTS4_DEFERRED - if( pCsr->pDeferred - && (pExpr->iDocid==pCsr->iPrevId || pExpr->bDeferred) - ){ - Fts3Phrase *pPhrase = pExpr->pPhrase; - assert( pExpr->bDeferred || pPhrase->doclist.bFreeList==0 ); - if( pExpr->bDeferred ){ - fts3EvalInvalidatePoslist(pPhrase); - } - *pRc = fts3EvalDeferredPhrase(pCsr, pPhrase); - bHit = (pPhrase->doclist.pList!=0); - pExpr->iDocid = pCsr->iPrevId; - }else -#endif - { - bHit = (pExpr->bEof==0 && pExpr->iDocid==pCsr->iPrevId); - } - break; - } - } - } - return bHit; -} - -/* -** This function is called as the second part of each xNext operation when -** iterating through the results of a full-text query. At this point the -** cursor points to a row that matches the query expression, with the -** following caveats: -** -** * Up until this point, "NEAR" operators in the expression have been -** treated as "AND". -** -** * Deferred tokens have not yet been considered. -** -** If *pRc is not SQLITE_OK when this function is called, it immediately -** returns 0. Otherwise, it tests whether or not after considering NEAR -** operators and deferred tokens the current row is still a match for the -** expression. It returns 1 if both of the following are true: -** -** 1. *pRc is SQLITE_OK when this function returns, and -** -** 2. After scanning the current FTS table row for the deferred tokens, -** it is determined that the row does *not* match the query. -** -** Or, if no error occurs and it seems the current row does match the FTS -** query, return 0. -*/ -static int fts3EvalTestDeferredAndNear(Fts3Cursor *pCsr, int *pRc){ - int rc = *pRc; - int bMiss = 0; - if( rc==SQLITE_OK ){ - - /* If there are one or more deferred tokens, load the current row into - ** memory and scan it to determine the position list for each deferred - ** token. Then, see if this row is really a match, considering deferred - ** tokens and NEAR operators (neither of which were taken into account - ** earlier, by fts3EvalNextRow()). - */ - if( pCsr->pDeferred ){ - rc = fts3CursorSeek(0, pCsr); - if( rc==SQLITE_OK ){ - rc = sqlite3Fts3CacheDeferredDoclists(pCsr); - } - } - bMiss = (0==fts3EvalTestExpr(pCsr, pCsr->pExpr, &rc)); - - /* Free the position-lists accumulated for each deferred token above. */ - sqlite3Fts3FreeDeferredDoclists(pCsr); - *pRc = rc; - } - return (rc==SQLITE_OK && bMiss); -} - -/* -** Advance to the next document that matches the FTS expression in -** Fts3Cursor.pExpr. -*/ -static int fts3EvalNext(Fts3Cursor *pCsr){ - int rc = SQLITE_OK; /* Return Code */ - Fts3Expr *pExpr = pCsr->pExpr; - assert( pCsr->isEof==0 ); - if( pExpr==0 ){ - pCsr->isEof = 1; - }else{ - do { - if( pCsr->isRequireSeek==0 ){ - sqlite3_reset(pCsr->pStmt); - } - assert( sqlite3_data_count(pCsr->pStmt)==0 ); - fts3EvalNextRow(pCsr, pExpr, &rc); - pCsr->isEof = pExpr->bEof; - pCsr->isRequireSeek = 1; - pCsr->isMatchinfoNeeded = 1; - pCsr->iPrevId = pExpr->iDocid; - }while( pCsr->isEof==0 && fts3EvalTestDeferredAndNear(pCsr, &rc) ); - } - - /* Check if the cursor is past the end of the docid range specified - ** by Fts3Cursor.iMinDocid/iMaxDocid. If so, set the EOF flag. */ - if( rc==SQLITE_OK && ( - (pCsr->bDesc==0 && pCsr->iPrevId>pCsr->iMaxDocid) - || (pCsr->bDesc!=0 && pCsr->iPrevIdiMinDocid) - )){ - pCsr->isEof = 1; - } - - return rc; -} - -/* -** Restart interation for expression pExpr so that the next call to -** fts3EvalNext() visits the first row. Do not allow incremental -** loading or merging of phrase doclists for this iteration. -** -** If *pRc is other than SQLITE_OK when this function is called, it is -** a no-op. If an error occurs within this function, *pRc is set to an -** SQLite error code before returning. -*/ -static void fts3EvalRestart( - Fts3Cursor *pCsr, - Fts3Expr *pExpr, - int *pRc -){ - if( pExpr && *pRc==SQLITE_OK ){ - Fts3Phrase *pPhrase = pExpr->pPhrase; - - if( pPhrase ){ - fts3EvalInvalidatePoslist(pPhrase); - if( pPhrase->bIncr ){ - int i; - for(i=0; inToken; i++){ - Fts3PhraseToken *pToken = &pPhrase->aToken[i]; - assert( pToken->pDeferred==0 ); - if( pToken->pSegcsr ){ - sqlite3Fts3MsrIncrRestart(pToken->pSegcsr); - } - } - *pRc = fts3EvalPhraseStart(pCsr, 0, pPhrase); - } - pPhrase->doclist.pNextDocid = 0; - pPhrase->doclist.iDocid = 0; - } - - pExpr->iDocid = 0; - pExpr->bEof = 0; - pExpr->bStart = 0; - - fts3EvalRestart(pCsr, pExpr->pLeft, pRc); - fts3EvalRestart(pCsr, pExpr->pRight, pRc); - } -} - -/* -** After allocating the Fts3Expr.aMI[] array for each phrase in the -** expression rooted at pExpr, the cursor iterates through all rows matched -** by pExpr, calling this function for each row. This function increments -** the values in Fts3Expr.aMI[] according to the position-list currently -** found in Fts3Expr.pPhrase->doclist.pList for each of the phrase -** expression nodes. -*/ -static void fts3EvalUpdateCounts(Fts3Expr *pExpr){ - if( pExpr ){ - Fts3Phrase *pPhrase = pExpr->pPhrase; - if( pPhrase && pPhrase->doclist.pList ){ - int iCol = 0; - char *p = pPhrase->doclist.pList; - - assert( *p ); - while( 1 ){ - u8 c = 0; - int iCnt = 0; - while( 0xFE & (*p | c) ){ - if( (c&0x80)==0 ) iCnt++; - c = *p++ & 0x80; - } - - /* aMI[iCol*3 + 1] = Number of occurrences - ** aMI[iCol*3 + 2] = Number of rows containing at least one instance - */ - pExpr->aMI[iCol*3 + 1] += iCnt; - pExpr->aMI[iCol*3 + 2] += (iCnt>0); - if( *p==0x00 ) break; - p++; - p += fts3GetVarint32(p, &iCol); - } - } - - fts3EvalUpdateCounts(pExpr->pLeft); - fts3EvalUpdateCounts(pExpr->pRight); - } -} - -/* -** Expression pExpr must be of type FTSQUERY_PHRASE. -** -** If it is not already allocated and populated, this function allocates and -** populates the Fts3Expr.aMI[] array for expression pExpr. If pExpr is part -** of a NEAR expression, then it also allocates and populates the same array -** for all other phrases that are part of the NEAR expression. -** -** SQLITE_OK is returned if the aMI[] array is successfully allocated and -** populated. Otherwise, if an error occurs, an SQLite error code is returned. -*/ -static int fts3EvalGatherStats( - Fts3Cursor *pCsr, /* Cursor object */ - Fts3Expr *pExpr /* FTSQUERY_PHRASE expression */ -){ - int rc = SQLITE_OK; /* Return code */ - - assert( pExpr->eType==FTSQUERY_PHRASE ); - if( pExpr->aMI==0 ){ - Fts3Table *pTab = (Fts3Table *)pCsr->base.pVtab; - Fts3Expr *pRoot; /* Root of NEAR expression */ - Fts3Expr *p; /* Iterator used for several purposes */ - - sqlite3_int64 iPrevId = pCsr->iPrevId; - sqlite3_int64 iDocid; - u8 bEof; - - /* Find the root of the NEAR expression */ - pRoot = pExpr; - while( pRoot->pParent && pRoot->pParent->eType==FTSQUERY_NEAR ){ - pRoot = pRoot->pParent; - } - iDocid = pRoot->iDocid; - bEof = pRoot->bEof; - assert( pRoot->bStart ); - - /* Allocate space for the aMSI[] array of each FTSQUERY_PHRASE node */ - for(p=pRoot; p; p=p->pLeft){ - Fts3Expr *pE = (p->eType==FTSQUERY_PHRASE?p:p->pRight); - assert( pE->aMI==0 ); - pE->aMI = (u32 *)sqlite3_malloc(pTab->nColumn * 3 * sizeof(u32)); - if( !pE->aMI ) return SQLITE_NOMEM; - memset(pE->aMI, 0, pTab->nColumn * 3 * sizeof(u32)); - } - - fts3EvalRestart(pCsr, pRoot, &rc); - - while( pCsr->isEof==0 && rc==SQLITE_OK ){ - - do { - /* Ensure the %_content statement is reset. */ - if( pCsr->isRequireSeek==0 ) sqlite3_reset(pCsr->pStmt); - assert( sqlite3_data_count(pCsr->pStmt)==0 ); - - /* Advance to the next document */ - fts3EvalNextRow(pCsr, pRoot, &rc); - pCsr->isEof = pRoot->bEof; - pCsr->isRequireSeek = 1; - pCsr->isMatchinfoNeeded = 1; - pCsr->iPrevId = pRoot->iDocid; - }while( pCsr->isEof==0 - && pRoot->eType==FTSQUERY_NEAR - && fts3EvalTestDeferredAndNear(pCsr, &rc) - ); - - if( rc==SQLITE_OK && pCsr->isEof==0 ){ - fts3EvalUpdateCounts(pRoot); - } - } - - pCsr->isEof = 0; - pCsr->iPrevId = iPrevId; - - if( bEof ){ - pRoot->bEof = bEof; - }else{ - /* Caution: pRoot may iterate through docids in ascending or descending - ** order. For this reason, even though it seems more defensive, the - ** do loop can not be written: - ** - ** do {...} while( pRoot->iDocidbEof==0 ); - }while( pRoot->iDocid!=iDocid && rc==SQLITE_OK ); - fts3EvalTestDeferredAndNear(pCsr, &rc); - } - } - return rc; -} - -/* -** This function is used by the matchinfo() module to query a phrase -** expression node for the following information: -** -** 1. The total number of occurrences of the phrase in each column of -** the FTS table (considering all rows), and -** -** 2. For each column, the number of rows in the table for which the -** column contains at least one instance of the phrase. -** -** If no error occurs, SQLITE_OK is returned and the values for each column -** written into the array aiOut as follows: -** -** aiOut[iCol*3 + 1] = Number of occurrences -** aiOut[iCol*3 + 2] = Number of rows containing at least one instance -** -** Caveats: -** -** * If a phrase consists entirely of deferred tokens, then all output -** values are set to the number of documents in the table. In other -** words we assume that very common tokens occur exactly once in each -** column of each row of the table. -** -** * If a phrase contains some deferred tokens (and some non-deferred -** tokens), count the potential occurrence identified by considering -** the non-deferred tokens instead of actual phrase occurrences. -** -** * If the phrase is part of a NEAR expression, then only phrase instances -** that meet the NEAR constraint are included in the counts. -*/ -SQLITE_PRIVATE int sqlite3Fts3EvalPhraseStats( - Fts3Cursor *pCsr, /* FTS cursor handle */ - Fts3Expr *pExpr, /* Phrase expression */ - u32 *aiOut /* Array to write results into (see above) */ -){ - Fts3Table *pTab = (Fts3Table *)pCsr->base.pVtab; - int rc = SQLITE_OK; - int iCol; - - if( pExpr->bDeferred && pExpr->pParent->eType!=FTSQUERY_NEAR ){ - assert( pCsr->nDoc>0 ); - for(iCol=0; iColnColumn; iCol++){ - aiOut[iCol*3 + 1] = (u32)pCsr->nDoc; - aiOut[iCol*3 + 2] = (u32)pCsr->nDoc; - } - }else{ - rc = fts3EvalGatherStats(pCsr, pExpr); - if( rc==SQLITE_OK ){ - assert( pExpr->aMI ); - for(iCol=0; iColnColumn; iCol++){ - aiOut[iCol*3 + 1] = pExpr->aMI[iCol*3 + 1]; - aiOut[iCol*3 + 2] = pExpr->aMI[iCol*3 + 2]; - } - } - } - - return rc; -} - -/* -** The expression pExpr passed as the second argument to this function -** must be of type FTSQUERY_PHRASE. -** -** The returned value is either NULL or a pointer to a buffer containing -** a position-list indicating the occurrences of the phrase in column iCol -** of the current row. -** -** More specifically, the returned buffer contains 1 varint for each -** occurrence of the phrase in the column, stored using the normal (delta+2) -** compression and is terminated by either an 0x01 or 0x00 byte. For example, -** if the requested column contains "a b X c d X X" and the position-list -** for 'X' is requested, the buffer returned may contain: -** -** 0x04 0x05 0x03 0x01 or 0x04 0x05 0x03 0x00 -** -** This function works regardless of whether or not the phrase is deferred, -** incremental, or neither. -*/ -SQLITE_PRIVATE int sqlite3Fts3EvalPhrasePoslist( - Fts3Cursor *pCsr, /* FTS3 cursor object */ - Fts3Expr *pExpr, /* Phrase to return doclist for */ - int iCol, /* Column to return position list for */ - char **ppOut /* OUT: Pointer to position list */ -){ - Fts3Phrase *pPhrase = pExpr->pPhrase; - Fts3Table *pTab = (Fts3Table *)pCsr->base.pVtab; - char *pIter; - int iThis; - sqlite3_int64 iDocid; - - /* If this phrase is applies specifically to some column other than - ** column iCol, return a NULL pointer. */ - *ppOut = 0; - assert( iCol>=0 && iColnColumn ); - if( (pPhrase->iColumnnColumn && pPhrase->iColumn!=iCol) ){ - return SQLITE_OK; - } - - iDocid = pExpr->iDocid; - pIter = pPhrase->doclist.pList; - if( iDocid!=pCsr->iPrevId || pExpr->bEof ){ - int bDescDoclist = pTab->bDescIdx; /* For DOCID_CMP macro */ - int iMul; /* +1 if csr dir matches index dir, else -1 */ - int bOr = 0; - u8 bEof = 0; - u8 bTreeEof = 0; - Fts3Expr *p; /* Used to iterate from pExpr to root */ - Fts3Expr *pNear; /* Most senior NEAR ancestor (or pExpr) */ - - /* Check if this phrase descends from an OR expression node. If not, - ** return NULL. Otherwise, the entry that corresponds to docid - ** pCsr->iPrevId may lie earlier in the doclist buffer. Or, if the - ** tree that the node is part of has been marked as EOF, but the node - ** itself is not EOF, then it may point to an earlier entry. */ - pNear = pExpr; - for(p=pExpr->pParent; p; p=p->pParent){ - if( p->eType==FTSQUERY_OR ) bOr = 1; - if( p->eType==FTSQUERY_NEAR ) pNear = p; - if( p->bEof ) bTreeEof = 1; - } - if( bOr==0 ) return SQLITE_OK; - - /* This is the descendent of an OR node. In this case we cannot use - ** an incremental phrase. Load the entire doclist for the phrase - ** into memory in this case. */ - if( pPhrase->bIncr ){ - int rc = SQLITE_OK; - int bEofSave = pExpr->bEof; - fts3EvalRestart(pCsr, pExpr, &rc); - while( rc==SQLITE_OK && !pExpr->bEof ){ - fts3EvalNextRow(pCsr, pExpr, &rc); - if( bEofSave==0 && pExpr->iDocid==iDocid ) break; - } - pIter = pPhrase->doclist.pList; - assert( rc!=SQLITE_OK || pPhrase->bIncr==0 ); - if( rc!=SQLITE_OK ) return rc; - } - - iMul = ((pCsr->bDesc==bDescDoclist) ? 1 : -1); - while( bTreeEof==1 - && pNear->bEof==0 - && (DOCID_CMP(pNear->iDocid, pCsr->iPrevId) * iMul)<0 - ){ - int rc = SQLITE_OK; - fts3EvalNextRow(pCsr, pExpr, &rc); - if( rc!=SQLITE_OK ) return rc; - iDocid = pExpr->iDocid; - pIter = pPhrase->doclist.pList; - } - - bEof = (pPhrase->doclist.nAll==0); - assert( bDescDoclist==0 || bDescDoclist==1 ); - assert( pCsr->bDesc==0 || pCsr->bDesc==1 ); - - if( bEof==0 ){ - if( pCsr->bDesc==bDescDoclist ){ - int dummy; - if( pNear->bEof ){ - /* This expression is already at EOF. So position it to point to the - ** last entry in the doclist at pPhrase->doclist.aAll[]. Variable - ** iDocid is already set for this entry, so all that is required is - ** to set pIter to point to the first byte of the last position-list - ** in the doclist. - ** - ** It would also be correct to set pIter and iDocid to zero. In - ** this case, the first call to sqltie3Fts4DoclistPrev() below - ** would also move the iterator to point to the last entry in the - ** doclist. However, this is expensive, as to do so it has to - ** iterate through the entire doclist from start to finish (since - ** it does not know the docid for the last entry). */ - pIter = &pPhrase->doclist.aAll[pPhrase->doclist.nAll-1]; - fts3ReversePoslist(pPhrase->doclist.aAll, &pIter); - } - while( (pIter==0 || DOCID_CMP(iDocid, pCsr->iPrevId)>0 ) && bEof==0 ){ - sqlite3Fts3DoclistPrev( - bDescDoclist, pPhrase->doclist.aAll, pPhrase->doclist.nAll, - &pIter, &iDocid, &dummy, &bEof - ); - } - }else{ - if( pNear->bEof ){ - pIter = 0; - iDocid = 0; - } - while( (pIter==0 || DOCID_CMP(iDocid, pCsr->iPrevId)<0 ) && bEof==0 ){ - sqlite3Fts3DoclistNext( - bDescDoclist, pPhrase->doclist.aAll, pPhrase->doclist.nAll, - &pIter, &iDocid, &bEof - ); - } - } - } - - if( bEof || iDocid!=pCsr->iPrevId ) pIter = 0; - } - if( pIter==0 ) return SQLITE_OK; - - if( *pIter==0x01 ){ - pIter++; - pIter += fts3GetVarint32(pIter, &iThis); - }else{ - iThis = 0; - } - while( iThisdoclist, and -** * any Fts3MultiSegReader objects held by phrase tokens. -*/ -SQLITE_PRIVATE void sqlite3Fts3EvalPhraseCleanup(Fts3Phrase *pPhrase){ - if( pPhrase ){ - int i; - sqlite3_free(pPhrase->doclist.aAll); - fts3EvalInvalidatePoslist(pPhrase); - memset(&pPhrase->doclist, 0, sizeof(Fts3Doclist)); - for(i=0; inToken; i++){ - fts3SegReaderCursorFree(pPhrase->aToken[i].pSegcsr); - pPhrase->aToken[i].pSegcsr = 0; - } - } -} - - -/* -** Return SQLITE_CORRUPT_VTAB. -*/ -#ifdef SQLITE_DEBUG -SQLITE_PRIVATE int sqlite3Fts3Corrupt(){ - return SQLITE_CORRUPT_VTAB; -} -#endif - -#if !SQLITE_CORE -/* -** Initialize API pointer table, if required. -*/ -#ifdef _WIN32 -__declspec(dllexport) -#endif -SQLITE_API int sqlite3_fts3_init( - sqlite3 *db, - char **pzErrMsg, - const sqlite3_api_routines *pApi -){ - SQLITE_EXTENSION_INIT2(pApi) - return sqlite3Fts3Init(db); -} -#endif - -#endif - -/************** End of fts3.c ************************************************/ -/************** Begin file fts3_aux.c ****************************************/ -/* -** 2011 Jan 27 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -****************************************************************************** -** -*/ -#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) - -/* #include */ -/* #include */ - -typedef struct Fts3auxTable Fts3auxTable; -typedef struct Fts3auxCursor Fts3auxCursor; - -struct Fts3auxTable { - sqlite3_vtab base; /* Base class used by SQLite core */ - Fts3Table *pFts3Tab; -}; - -struct Fts3auxCursor { - sqlite3_vtab_cursor base; /* Base class used by SQLite core */ - Fts3MultiSegReader csr; /* Must be right after "base" */ - Fts3SegFilter filter; - char *zStop; - int nStop; /* Byte-length of string zStop */ - int iLangid; /* Language id to query */ - int isEof; /* True if cursor is at EOF */ - sqlite3_int64 iRowid; /* Current rowid */ - - int iCol; /* Current value of 'col' column */ - int nStat; /* Size of aStat[] array */ - struct Fts3auxColstats { - sqlite3_int64 nDoc; /* 'documents' values for current csr row */ - sqlite3_int64 nOcc; /* 'occurrences' values for current csr row */ - } *aStat; -}; - -/* -** Schema of the terms table. -*/ -#define FTS3_AUX_SCHEMA \ - "CREATE TABLE x(term, col, documents, occurrences, languageid HIDDEN)" - -/* -** This function does all the work for both the xConnect and xCreate methods. -** These tables have no persistent representation of their own, so xConnect -** and xCreate are identical operations. -*/ -static int fts3auxConnectMethod( - sqlite3 *db, /* Database connection */ - void *pUnused, /* Unused */ - int argc, /* Number of elements in argv array */ - const char * const *argv, /* xCreate/xConnect argument array */ - sqlite3_vtab **ppVtab, /* OUT: New sqlite3_vtab object */ - char **pzErr /* OUT: sqlite3_malloc'd error message */ -){ - char const *zDb; /* Name of database (e.g. "main") */ - char const *zFts3; /* Name of fts3 table */ - int nDb; /* Result of strlen(zDb) */ - int nFts3; /* Result of strlen(zFts3) */ - int nByte; /* Bytes of space to allocate here */ - int rc; /* value returned by declare_vtab() */ - Fts3auxTable *p; /* Virtual table object to return */ - - UNUSED_PARAMETER(pUnused); - - /* The user should invoke this in one of two forms: - ** - ** CREATE VIRTUAL TABLE xxx USING fts4aux(fts4-table); - ** CREATE VIRTUAL TABLE xxx USING fts4aux(fts4-table-db, fts4-table); - */ - if( argc!=4 && argc!=5 ) goto bad_args; - - zDb = argv[1]; - nDb = (int)strlen(zDb); - if( argc==5 ){ - if( nDb==4 && 0==sqlite3_strnicmp("temp", zDb, 4) ){ - zDb = argv[3]; - nDb = (int)strlen(zDb); - zFts3 = argv[4]; - }else{ - goto bad_args; - } - }else{ - zFts3 = argv[3]; - } - nFts3 = (int)strlen(zFts3); - - rc = sqlite3_declare_vtab(db, FTS3_AUX_SCHEMA); - if( rc!=SQLITE_OK ) return rc; - - nByte = sizeof(Fts3auxTable) + sizeof(Fts3Table) + nDb + nFts3 + 2; - p = (Fts3auxTable *)sqlite3_malloc(nByte); - if( !p ) return SQLITE_NOMEM; - memset(p, 0, nByte); - - p->pFts3Tab = (Fts3Table *)&p[1]; - p->pFts3Tab->zDb = (char *)&p->pFts3Tab[1]; - p->pFts3Tab->zName = &p->pFts3Tab->zDb[nDb+1]; - p->pFts3Tab->db = db; - p->pFts3Tab->nIndex = 1; - - memcpy((char *)p->pFts3Tab->zDb, zDb, nDb); - memcpy((char *)p->pFts3Tab->zName, zFts3, nFts3); - sqlite3Fts3Dequote((char *)p->pFts3Tab->zName); - - *ppVtab = (sqlite3_vtab *)p; - return SQLITE_OK; - - bad_args: - *pzErr = sqlite3_mprintf("invalid arguments to fts4aux constructor"); - return SQLITE_ERROR; -} - -/* -** This function does the work for both the xDisconnect and xDestroy methods. -** These tables have no persistent representation of their own, so xDisconnect -** and xDestroy are identical operations. -*/ -static int fts3auxDisconnectMethod(sqlite3_vtab *pVtab){ - Fts3auxTable *p = (Fts3auxTable *)pVtab; - Fts3Table *pFts3 = p->pFts3Tab; - int i; - - /* Free any prepared statements held */ - for(i=0; iaStmt); i++){ - sqlite3_finalize(pFts3->aStmt[i]); - } - sqlite3_free(pFts3->zSegmentsTbl); - sqlite3_free(p); - return SQLITE_OK; -} - -#define FTS4AUX_EQ_CONSTRAINT 1 -#define FTS4AUX_GE_CONSTRAINT 2 -#define FTS4AUX_LE_CONSTRAINT 4 - -/* -** xBestIndex - Analyze a WHERE and ORDER BY clause. -*/ -static int fts3auxBestIndexMethod( - sqlite3_vtab *pVTab, - sqlite3_index_info *pInfo -){ - int i; - int iEq = -1; - int iGe = -1; - int iLe = -1; - int iLangid = -1; - int iNext = 1; /* Next free argvIndex value */ - - UNUSED_PARAMETER(pVTab); - - /* This vtab delivers always results in "ORDER BY term ASC" order. */ - if( pInfo->nOrderBy==1 - && pInfo->aOrderBy[0].iColumn==0 - && pInfo->aOrderBy[0].desc==0 - ){ - pInfo->orderByConsumed = 1; - } - - /* Search for equality and range constraints on the "term" column. - ** And equality constraints on the hidden "languageid" column. */ - for(i=0; inConstraint; i++){ - if( pInfo->aConstraint[i].usable ){ - int op = pInfo->aConstraint[i].op; - int iCol = pInfo->aConstraint[i].iColumn; - - if( iCol==0 ){ - if( op==SQLITE_INDEX_CONSTRAINT_EQ ) iEq = i; - if( op==SQLITE_INDEX_CONSTRAINT_LT ) iLe = i; - if( op==SQLITE_INDEX_CONSTRAINT_LE ) iLe = i; - if( op==SQLITE_INDEX_CONSTRAINT_GT ) iGe = i; - if( op==SQLITE_INDEX_CONSTRAINT_GE ) iGe = i; - } - if( iCol==4 ){ - if( op==SQLITE_INDEX_CONSTRAINT_EQ ) iLangid = i; - } - } - } - - if( iEq>=0 ){ - pInfo->idxNum = FTS4AUX_EQ_CONSTRAINT; - pInfo->aConstraintUsage[iEq].argvIndex = iNext++; - pInfo->estimatedCost = 5; - }else{ - pInfo->idxNum = 0; - pInfo->estimatedCost = 20000; - if( iGe>=0 ){ - pInfo->idxNum += FTS4AUX_GE_CONSTRAINT; - pInfo->aConstraintUsage[iGe].argvIndex = iNext++; - pInfo->estimatedCost /= 2; - } - if( iLe>=0 ){ - pInfo->idxNum += FTS4AUX_LE_CONSTRAINT; - pInfo->aConstraintUsage[iLe].argvIndex = iNext++; - pInfo->estimatedCost /= 2; - } - } - if( iLangid>=0 ){ - pInfo->aConstraintUsage[iLangid].argvIndex = iNext++; - pInfo->estimatedCost--; - } - - return SQLITE_OK; -} - -/* -** xOpen - Open a cursor. -*/ -static int fts3auxOpenMethod(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCsr){ - Fts3auxCursor *pCsr; /* Pointer to cursor object to return */ - - UNUSED_PARAMETER(pVTab); - - pCsr = (Fts3auxCursor *)sqlite3_malloc(sizeof(Fts3auxCursor)); - if( !pCsr ) return SQLITE_NOMEM; - memset(pCsr, 0, sizeof(Fts3auxCursor)); - - *ppCsr = (sqlite3_vtab_cursor *)pCsr; - return SQLITE_OK; -} - -/* -** xClose - Close a cursor. -*/ -static int fts3auxCloseMethod(sqlite3_vtab_cursor *pCursor){ - Fts3Table *pFts3 = ((Fts3auxTable *)pCursor->pVtab)->pFts3Tab; - Fts3auxCursor *pCsr = (Fts3auxCursor *)pCursor; - - sqlite3Fts3SegmentsClose(pFts3); - sqlite3Fts3SegReaderFinish(&pCsr->csr); - sqlite3_free((void *)pCsr->filter.zTerm); - sqlite3_free(pCsr->zStop); - sqlite3_free(pCsr->aStat); - sqlite3_free(pCsr); - return SQLITE_OK; -} - -static int fts3auxGrowStatArray(Fts3auxCursor *pCsr, int nSize){ - if( nSize>pCsr->nStat ){ - struct Fts3auxColstats *aNew; - aNew = (struct Fts3auxColstats *)sqlite3_realloc(pCsr->aStat, - sizeof(struct Fts3auxColstats) * nSize - ); - if( aNew==0 ) return SQLITE_NOMEM; - memset(&aNew[pCsr->nStat], 0, - sizeof(struct Fts3auxColstats) * (nSize - pCsr->nStat) - ); - pCsr->aStat = aNew; - pCsr->nStat = nSize; - } - return SQLITE_OK; -} - -/* -** xNext - Advance the cursor to the next row, if any. -*/ -static int fts3auxNextMethod(sqlite3_vtab_cursor *pCursor){ - Fts3auxCursor *pCsr = (Fts3auxCursor *)pCursor; - Fts3Table *pFts3 = ((Fts3auxTable *)pCursor->pVtab)->pFts3Tab; - int rc; - - /* Increment our pretend rowid value. */ - pCsr->iRowid++; - - for(pCsr->iCol++; pCsr->iColnStat; pCsr->iCol++){ - if( pCsr->aStat[pCsr->iCol].nDoc>0 ) return SQLITE_OK; - } - - rc = sqlite3Fts3SegReaderStep(pFts3, &pCsr->csr); - if( rc==SQLITE_ROW ){ - int i = 0; - int nDoclist = pCsr->csr.nDoclist; - char *aDoclist = pCsr->csr.aDoclist; - int iCol; - - int eState = 0; - - if( pCsr->zStop ){ - int n = (pCsr->nStopcsr.nTerm) ? pCsr->nStop : pCsr->csr.nTerm; - int mc = memcmp(pCsr->zStop, pCsr->csr.zTerm, n); - if( mc<0 || (mc==0 && pCsr->csr.nTerm>pCsr->nStop) ){ - pCsr->isEof = 1; - return SQLITE_OK; - } - } - - if( fts3auxGrowStatArray(pCsr, 2) ) return SQLITE_NOMEM; - memset(pCsr->aStat, 0, sizeof(struct Fts3auxColstats) * pCsr->nStat); - iCol = 0; - - while( iaStat[0].nDoc++; - eState = 1; - iCol = 0; - break; - - /* State 1. In this state we are expecting either a 1, indicating - ** that the following integer will be a column number, or the - ** start of a position list for column 0. - ** - ** The only difference between state 1 and state 2 is that if the - ** integer encountered in state 1 is not 0 or 1, then we need to - ** increment the column 0 "nDoc" count for this term. - */ - case 1: - assert( iCol==0 ); - if( v>1 ){ - pCsr->aStat[1].nDoc++; - } - eState = 2; - /* fall through */ - - case 2: - if( v==0 ){ /* 0x00. Next integer will be a docid. */ - eState = 0; - }else if( v==1 ){ /* 0x01. Next integer will be a column number. */ - eState = 3; - }else{ /* 2 or greater. A position. */ - pCsr->aStat[iCol+1].nOcc++; - pCsr->aStat[0].nOcc++; - } - break; - - /* State 3. The integer just read is a column number. */ - default: assert( eState==3 ); - iCol = (int)v; - if( fts3auxGrowStatArray(pCsr, iCol+2) ) return SQLITE_NOMEM; - pCsr->aStat[iCol+1].nDoc++; - eState = 2; - break; - } - } - - pCsr->iCol = 0; - rc = SQLITE_OK; - }else{ - pCsr->isEof = 1; - } - return rc; -} - -/* -** xFilter - Initialize a cursor to point at the start of its data. -*/ -static int fts3auxFilterMethod( - sqlite3_vtab_cursor *pCursor, /* The cursor used for this query */ - int idxNum, /* Strategy index */ - const char *idxStr, /* Unused */ - int nVal, /* Number of elements in apVal */ - sqlite3_value **apVal /* Arguments for the indexing scheme */ -){ - Fts3auxCursor *pCsr = (Fts3auxCursor *)pCursor; - Fts3Table *pFts3 = ((Fts3auxTable *)pCursor->pVtab)->pFts3Tab; - int rc; - int isScan = 0; - int iLangVal = 0; /* Language id to query */ - - int iEq = -1; /* Index of term=? value in apVal */ - int iGe = -1; /* Index of term>=? value in apVal */ - int iLe = -1; /* Index of term<=? value in apVal */ - int iLangid = -1; /* Index of languageid=? value in apVal */ - int iNext = 0; - - UNUSED_PARAMETER(nVal); - UNUSED_PARAMETER(idxStr); - - assert( idxStr==0 ); - assert( idxNum==FTS4AUX_EQ_CONSTRAINT || idxNum==0 - || idxNum==FTS4AUX_LE_CONSTRAINT || idxNum==FTS4AUX_GE_CONSTRAINT - || idxNum==(FTS4AUX_LE_CONSTRAINT|FTS4AUX_GE_CONSTRAINT) - ); - - if( idxNum==FTS4AUX_EQ_CONSTRAINT ){ - iEq = iNext++; - }else{ - isScan = 1; - if( idxNum & FTS4AUX_GE_CONSTRAINT ){ - iGe = iNext++; - } - if( idxNum & FTS4AUX_LE_CONSTRAINT ){ - iLe = iNext++; - } - } - if( iNextfilter.zTerm); - sqlite3Fts3SegReaderFinish(&pCsr->csr); - sqlite3_free((void *)pCsr->filter.zTerm); - sqlite3_free(pCsr->aStat); - memset(&pCsr->csr, 0, ((u8*)&pCsr[1]) - (u8*)&pCsr->csr); - - pCsr->filter.flags = FTS3_SEGMENT_REQUIRE_POS|FTS3_SEGMENT_IGNORE_EMPTY; - if( isScan ) pCsr->filter.flags |= FTS3_SEGMENT_SCAN; - - if( iEq>=0 || iGe>=0 ){ - const unsigned char *zStr = sqlite3_value_text(apVal[0]); - assert( (iEq==0 && iGe==-1) || (iEq==-1 && iGe==0) ); - if( zStr ){ - pCsr->filter.zTerm = sqlite3_mprintf("%s", zStr); - pCsr->filter.nTerm = sqlite3_value_bytes(apVal[0]); - if( pCsr->filter.zTerm==0 ) return SQLITE_NOMEM; - } - } - - if( iLe>=0 ){ - pCsr->zStop = sqlite3_mprintf("%s", sqlite3_value_text(apVal[iLe])); - pCsr->nStop = sqlite3_value_bytes(apVal[iLe]); - if( pCsr->zStop==0 ) return SQLITE_NOMEM; - } - - if( iLangid>=0 ){ - iLangVal = sqlite3_value_int(apVal[iLangid]); - - /* If the user specified a negative value for the languageid, use zero - ** instead. This works, as the "languageid=?" constraint will also - ** be tested by the VDBE layer. The test will always be false (since - ** this module will not return a row with a negative languageid), and - ** so the overall query will return zero rows. */ - if( iLangVal<0 ) iLangVal = 0; - } - pCsr->iLangid = iLangVal; - - rc = sqlite3Fts3SegReaderCursor(pFts3, iLangVal, 0, FTS3_SEGCURSOR_ALL, - pCsr->filter.zTerm, pCsr->filter.nTerm, 0, isScan, &pCsr->csr - ); - if( rc==SQLITE_OK ){ - rc = sqlite3Fts3SegReaderStart(pFts3, &pCsr->csr, &pCsr->filter); - } - - if( rc==SQLITE_OK ) rc = fts3auxNextMethod(pCursor); - return rc; -} - -/* -** xEof - Return true if the cursor is at EOF, or false otherwise. -*/ -static int fts3auxEofMethod(sqlite3_vtab_cursor *pCursor){ - Fts3auxCursor *pCsr = (Fts3auxCursor *)pCursor; - return pCsr->isEof; -} - -/* -** xColumn - Return a column value. -*/ -static int fts3auxColumnMethod( - sqlite3_vtab_cursor *pCursor, /* Cursor to retrieve value from */ - sqlite3_context *pCtx, /* Context for sqlite3_result_xxx() calls */ - int iCol /* Index of column to read value from */ -){ - Fts3auxCursor *p = (Fts3auxCursor *)pCursor; - - assert( p->isEof==0 ); - switch( iCol ){ - case 0: /* term */ - sqlite3_result_text(pCtx, p->csr.zTerm, p->csr.nTerm, SQLITE_TRANSIENT); - break; - - case 1: /* col */ - if( p->iCol ){ - sqlite3_result_int(pCtx, p->iCol-1); - }else{ - sqlite3_result_text(pCtx, "*", -1, SQLITE_STATIC); - } - break; - - case 2: /* documents */ - sqlite3_result_int64(pCtx, p->aStat[p->iCol].nDoc); - break; - - case 3: /* occurrences */ - sqlite3_result_int64(pCtx, p->aStat[p->iCol].nOcc); - break; - - default: /* languageid */ - assert( iCol==4 ); - sqlite3_result_int(pCtx, p->iLangid); - break; - } - - return SQLITE_OK; -} - -/* -** xRowid - Return the current rowid for the cursor. -*/ -static int fts3auxRowidMethod( - sqlite3_vtab_cursor *pCursor, /* Cursor to retrieve value from */ - sqlite_int64 *pRowid /* OUT: Rowid value */ -){ - Fts3auxCursor *pCsr = (Fts3auxCursor *)pCursor; - *pRowid = pCsr->iRowid; - return SQLITE_OK; -} - -/* -** Register the fts3aux module with database connection db. Return SQLITE_OK -** if successful or an error code if sqlite3_create_module() fails. -*/ -SQLITE_PRIVATE int sqlite3Fts3InitAux(sqlite3 *db){ - static const sqlite3_module fts3aux_module = { - 0, /* iVersion */ - fts3auxConnectMethod, /* xCreate */ - fts3auxConnectMethod, /* xConnect */ - fts3auxBestIndexMethod, /* xBestIndex */ - fts3auxDisconnectMethod, /* xDisconnect */ - fts3auxDisconnectMethod, /* xDestroy */ - fts3auxOpenMethod, /* xOpen */ - fts3auxCloseMethod, /* xClose */ - fts3auxFilterMethod, /* xFilter */ - fts3auxNextMethod, /* xNext */ - fts3auxEofMethod, /* xEof */ - fts3auxColumnMethod, /* xColumn */ - fts3auxRowidMethod, /* xRowid */ - 0, /* xUpdate */ - 0, /* xBegin */ - 0, /* xSync */ - 0, /* xCommit */ - 0, /* xRollback */ - 0, /* xFindFunction */ - 0, /* xRename */ - 0, /* xSavepoint */ - 0, /* xRelease */ - 0 /* xRollbackTo */ - }; - int rc; /* Return code */ - - rc = sqlite3_create_module(db, "fts4aux", &fts3aux_module, 0); - return rc; -} - -#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) */ - -/************** End of fts3_aux.c ********************************************/ -/************** Begin file fts3_expr.c ***************************************/ -/* -** 2008 Nov 28 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -****************************************************************************** -** -** This module contains code that implements a parser for fts3 query strings -** (the right-hand argument to the MATCH operator). Because the supported -** syntax is relatively simple, the whole tokenizer/parser system is -** hand-coded. -*/ -#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) - -/* -** By default, this module parses the legacy syntax that has been -** traditionally used by fts3. Or, if SQLITE_ENABLE_FTS3_PARENTHESIS -** is defined, then it uses the new syntax. The differences between -** the new and the old syntaxes are: -** -** a) The new syntax supports parenthesis. The old does not. -** -** b) The new syntax supports the AND and NOT operators. The old does not. -** -** c) The old syntax supports the "-" token qualifier. This is not -** supported by the new syntax (it is replaced by the NOT operator). -** -** d) When using the old syntax, the OR operator has a greater precedence -** than an implicit AND. When using the new, both implicity and explicit -** AND operators have a higher precedence than OR. -** -** If compiled with SQLITE_TEST defined, then this module exports the -** symbol "int sqlite3_fts3_enable_parentheses". Setting this variable -** to zero causes the module to use the old syntax. If it is set to -** non-zero the new syntax is activated. This is so both syntaxes can -** be tested using a single build of testfixture. -** -** The following describes the syntax supported by the fts3 MATCH -** operator in a similar format to that used by the lemon parser -** generator. This module does not use actually lemon, it uses a -** custom parser. -** -** query ::= andexpr (OR andexpr)*. -** -** andexpr ::= notexpr (AND? notexpr)*. -** -** notexpr ::= nearexpr (NOT nearexpr|-TOKEN)*. -** notexpr ::= LP query RP. -** -** nearexpr ::= phrase (NEAR distance_opt nearexpr)*. -** -** distance_opt ::= . -** distance_opt ::= / INTEGER. -** -** phrase ::= TOKEN. -** phrase ::= COLUMN:TOKEN. -** phrase ::= "TOKEN TOKEN TOKEN...". -*/ - -#ifdef SQLITE_TEST -SQLITE_API int sqlite3_fts3_enable_parentheses = 0; -#else -# ifdef SQLITE_ENABLE_FTS3_PARENTHESIS -# define sqlite3_fts3_enable_parentheses 1 -# else -# define sqlite3_fts3_enable_parentheses 0 -# endif -#endif - -/* -** Default span for NEAR operators. -*/ -#define SQLITE_FTS3_DEFAULT_NEAR_PARAM 10 - -/* #include */ -/* #include */ - -/* -** isNot: -** This variable is used by function getNextNode(). When getNextNode() is -** called, it sets ParseContext.isNot to true if the 'next node' is a -** FTSQUERY_PHRASE with a unary "-" attached to it. i.e. "mysql" in the -** FTS3 query "sqlite -mysql". Otherwise, ParseContext.isNot is set to -** zero. -*/ -typedef struct ParseContext ParseContext; -struct ParseContext { - sqlite3_tokenizer *pTokenizer; /* Tokenizer module */ - int iLangid; /* Language id used with tokenizer */ - const char **azCol; /* Array of column names for fts3 table */ - int bFts4; /* True to allow FTS4-only syntax */ - int nCol; /* Number of entries in azCol[] */ - int iDefaultCol; /* Default column to query */ - int isNot; /* True if getNextNode() sees a unary - */ - sqlite3_context *pCtx; /* Write error message here */ - int nNest; /* Number of nested brackets */ -}; - -/* -** This function is equivalent to the standard isspace() function. -** -** The standard isspace() can be awkward to use safely, because although it -** is defined to accept an argument of type int, its behavior when passed -** an integer that falls outside of the range of the unsigned char type -** is undefined (and sometimes, "undefined" means segfault). This wrapper -** is defined to accept an argument of type char, and always returns 0 for -** any values that fall outside of the range of the unsigned char type (i.e. -** negative values). -*/ -static int fts3isspace(char c){ - return c==' ' || c=='\t' || c=='\n' || c=='\r' || c=='\v' || c=='\f'; -} - -/* -** Allocate nByte bytes of memory using sqlite3_malloc(). If successful, -** zero the memory before returning a pointer to it. If unsuccessful, -** return NULL. -*/ -static void *fts3MallocZero(int nByte){ - void *pRet = sqlite3_malloc(nByte); - if( pRet ) memset(pRet, 0, nByte); - return pRet; -} - -SQLITE_PRIVATE int sqlite3Fts3OpenTokenizer( - sqlite3_tokenizer *pTokenizer, - int iLangid, - const char *z, - int n, - sqlite3_tokenizer_cursor **ppCsr -){ - sqlite3_tokenizer_module const *pModule = pTokenizer->pModule; - sqlite3_tokenizer_cursor *pCsr = 0; - int rc; - - rc = pModule->xOpen(pTokenizer, z, n, &pCsr); - assert( rc==SQLITE_OK || pCsr==0 ); - if( rc==SQLITE_OK ){ - pCsr->pTokenizer = pTokenizer; - if( pModule->iVersion>=1 ){ - rc = pModule->xLanguageid(pCsr, iLangid); - if( rc!=SQLITE_OK ){ - pModule->xClose(pCsr); - pCsr = 0; - } - } - } - *ppCsr = pCsr; - return rc; -} - -/* -** Function getNextNode(), which is called by fts3ExprParse(), may itself -** call fts3ExprParse(). So this forward declaration is required. -*/ -static int fts3ExprParse(ParseContext *, const char *, int, Fts3Expr **, int *); - -/* -** Extract the next token from buffer z (length n) using the tokenizer -** and other information (column names etc.) in pParse. Create an Fts3Expr -** structure of type FTSQUERY_PHRASE containing a phrase consisting of this -** single token and set *ppExpr to point to it. If the end of the buffer is -** reached before a token is found, set *ppExpr to zero. It is the -** responsibility of the caller to eventually deallocate the allocated -** Fts3Expr structure (if any) by passing it to sqlite3_free(). -** -** Return SQLITE_OK if successful, or SQLITE_NOMEM if a memory allocation -** fails. -*/ -static int getNextToken( - ParseContext *pParse, /* fts3 query parse context */ - int iCol, /* Value for Fts3Phrase.iColumn */ - const char *z, int n, /* Input string */ - Fts3Expr **ppExpr, /* OUT: expression */ - int *pnConsumed /* OUT: Number of bytes consumed */ -){ - sqlite3_tokenizer *pTokenizer = pParse->pTokenizer; - sqlite3_tokenizer_module const *pModule = pTokenizer->pModule; - int rc; - sqlite3_tokenizer_cursor *pCursor; - Fts3Expr *pRet = 0; - int i = 0; - - /* Set variable i to the maximum number of bytes of input to tokenize. */ - for(i=0; iiLangid, z, i, &pCursor); - if( rc==SQLITE_OK ){ - const char *zToken; - int nToken = 0, iStart = 0, iEnd = 0, iPosition = 0; - int nByte; /* total space to allocate */ - - rc = pModule->xNext(pCursor, &zToken, &nToken, &iStart, &iEnd, &iPosition); - if( rc==SQLITE_OK ){ - nByte = sizeof(Fts3Expr) + sizeof(Fts3Phrase) + nToken; - pRet = (Fts3Expr *)fts3MallocZero(nByte); - if( !pRet ){ - rc = SQLITE_NOMEM; - }else{ - pRet->eType = FTSQUERY_PHRASE; - pRet->pPhrase = (Fts3Phrase *)&pRet[1]; - pRet->pPhrase->nToken = 1; - pRet->pPhrase->iColumn = iCol; - pRet->pPhrase->aToken[0].n = nToken; - pRet->pPhrase->aToken[0].z = (char *)&pRet->pPhrase[1]; - memcpy(pRet->pPhrase->aToken[0].z, zToken, nToken); - - if( iEndpPhrase->aToken[0].isPrefix = 1; - iEnd++; - } - - while( 1 ){ - if( !sqlite3_fts3_enable_parentheses - && iStart>0 && z[iStart-1]=='-' - ){ - pParse->isNot = 1; - iStart--; - }else if( pParse->bFts4 && iStart>0 && z[iStart-1]=='^' ){ - pRet->pPhrase->aToken[0].bFirst = 1; - iStart--; - }else{ - break; - } - } - - } - *pnConsumed = iEnd; - }else if( i && rc==SQLITE_DONE ){ - rc = SQLITE_OK; - } - - pModule->xClose(pCursor); - } - - *ppExpr = pRet; - return rc; -} - - -/* -** Enlarge a memory allocation. If an out-of-memory allocation occurs, -** then free the old allocation. -*/ -static void *fts3ReallocOrFree(void *pOrig, int nNew){ - void *pRet = sqlite3_realloc(pOrig, nNew); - if( !pRet ){ - sqlite3_free(pOrig); - } - return pRet; -} - -/* -** Buffer zInput, length nInput, contains the contents of a quoted string -** that appeared as part of an fts3 query expression. Neither quote character -** is included in the buffer. This function attempts to tokenize the entire -** input buffer and create an Fts3Expr structure of type FTSQUERY_PHRASE -** containing the results. -** -** If successful, SQLITE_OK is returned and *ppExpr set to point at the -** allocated Fts3Expr structure. Otherwise, either SQLITE_NOMEM (out of memory -** error) or SQLITE_ERROR (tokenization error) is returned and *ppExpr set -** to 0. -*/ -static int getNextString( - ParseContext *pParse, /* fts3 query parse context */ - const char *zInput, int nInput, /* Input string */ - Fts3Expr **ppExpr /* OUT: expression */ -){ - sqlite3_tokenizer *pTokenizer = pParse->pTokenizer; - sqlite3_tokenizer_module const *pModule = pTokenizer->pModule; - int rc; - Fts3Expr *p = 0; - sqlite3_tokenizer_cursor *pCursor = 0; - char *zTemp = 0; - int nTemp = 0; - - const int nSpace = sizeof(Fts3Expr) + sizeof(Fts3Phrase); - int nToken = 0; - - /* The final Fts3Expr data structure, including the Fts3Phrase, - ** Fts3PhraseToken structures token buffers are all stored as a single - ** allocation so that the expression can be freed with a single call to - ** sqlite3_free(). Setting this up requires a two pass approach. - ** - ** The first pass, in the block below, uses a tokenizer cursor to iterate - ** through the tokens in the expression. This pass uses fts3ReallocOrFree() - ** to assemble data in two dynamic buffers: - ** - ** Buffer p: Points to the Fts3Expr structure, followed by the Fts3Phrase - ** structure, followed by the array of Fts3PhraseToken - ** structures. This pass only populates the Fts3PhraseToken array. - ** - ** Buffer zTemp: Contains copies of all tokens. - ** - ** The second pass, in the block that begins "if( rc==SQLITE_DONE )" below, - ** appends buffer zTemp to buffer p, and fills in the Fts3Expr and Fts3Phrase - ** structures. - */ - rc = sqlite3Fts3OpenTokenizer( - pTokenizer, pParse->iLangid, zInput, nInput, &pCursor); - if( rc==SQLITE_OK ){ - int ii; - for(ii=0; rc==SQLITE_OK; ii++){ - const char *zByte; - int nByte = 0, iBegin = 0, iEnd = 0, iPos = 0; - rc = pModule->xNext(pCursor, &zByte, &nByte, &iBegin, &iEnd, &iPos); - if( rc==SQLITE_OK ){ - Fts3PhraseToken *pToken; - - p = fts3ReallocOrFree(p, nSpace + ii*sizeof(Fts3PhraseToken)); - if( !p ) goto no_mem; - - zTemp = fts3ReallocOrFree(zTemp, nTemp + nByte); - if( !zTemp ) goto no_mem; - - assert( nToken==ii ); - pToken = &((Fts3Phrase *)(&p[1]))->aToken[ii]; - memset(pToken, 0, sizeof(Fts3PhraseToken)); - - memcpy(&zTemp[nTemp], zByte, nByte); - nTemp += nByte; - - pToken->n = nByte; - pToken->isPrefix = (iEndbFirst = (iBegin>0 && zInput[iBegin-1]=='^'); - nToken = ii+1; - } - } - - pModule->xClose(pCursor); - pCursor = 0; - } - - if( rc==SQLITE_DONE ){ - int jj; - char *zBuf = 0; - - p = fts3ReallocOrFree(p, nSpace + nToken*sizeof(Fts3PhraseToken) + nTemp); - if( !p ) goto no_mem; - memset(p, 0, (char *)&(((Fts3Phrase *)&p[1])->aToken[0])-(char *)p); - p->eType = FTSQUERY_PHRASE; - p->pPhrase = (Fts3Phrase *)&p[1]; - p->pPhrase->iColumn = pParse->iDefaultCol; - p->pPhrase->nToken = nToken; - - zBuf = (char *)&p->pPhrase->aToken[nToken]; - if( zTemp ){ - memcpy(zBuf, zTemp, nTemp); - sqlite3_free(zTemp); - }else{ - assert( nTemp==0 ); - } - - for(jj=0; jjpPhrase->nToken; jj++){ - p->pPhrase->aToken[jj].z = zBuf; - zBuf += p->pPhrase->aToken[jj].n; - } - rc = SQLITE_OK; - } - - *ppExpr = p; - return rc; -no_mem: - - if( pCursor ){ - pModule->xClose(pCursor); - } - sqlite3_free(zTemp); - sqlite3_free(p); - *ppExpr = 0; - return SQLITE_NOMEM; -} - -/* -** The output variable *ppExpr is populated with an allocated Fts3Expr -** structure, or set to 0 if the end of the input buffer is reached. -** -** Returns an SQLite error code. SQLITE_OK if everything works, SQLITE_NOMEM -** if a malloc failure occurs, or SQLITE_ERROR if a parse error is encountered. -** If SQLITE_ERROR is returned, pContext is populated with an error message. -*/ -static int getNextNode( - ParseContext *pParse, /* fts3 query parse context */ - const char *z, int n, /* Input string */ - Fts3Expr **ppExpr, /* OUT: expression */ - int *pnConsumed /* OUT: Number of bytes consumed */ -){ - static const struct Fts3Keyword { - char *z; /* Keyword text */ - unsigned char n; /* Length of the keyword */ - unsigned char parenOnly; /* Only valid in paren mode */ - unsigned char eType; /* Keyword code */ - } aKeyword[] = { - { "OR" , 2, 0, FTSQUERY_OR }, - { "AND", 3, 1, FTSQUERY_AND }, - { "NOT", 3, 1, FTSQUERY_NOT }, - { "NEAR", 4, 0, FTSQUERY_NEAR } - }; - int ii; - int iCol; - int iColLen; - int rc; - Fts3Expr *pRet = 0; - - const char *zInput = z; - int nInput = n; - - pParse->isNot = 0; - - /* Skip over any whitespace before checking for a keyword, an open or - ** close bracket, or a quoted string. - */ - while( nInput>0 && fts3isspace(*zInput) ){ - nInput--; - zInput++; - } - if( nInput==0 ){ - return SQLITE_DONE; - } - - /* See if we are dealing with a keyword. */ - for(ii=0; ii<(int)(sizeof(aKeyword)/sizeof(struct Fts3Keyword)); ii++){ - const struct Fts3Keyword *pKey = &aKeyword[ii]; - - if( (pKey->parenOnly & ~sqlite3_fts3_enable_parentheses)!=0 ){ - continue; - } - - if( nInput>=pKey->n && 0==memcmp(zInput, pKey->z, pKey->n) ){ - int nNear = SQLITE_FTS3_DEFAULT_NEAR_PARAM; - int nKey = pKey->n; - char cNext; - - /* If this is a "NEAR" keyword, check for an explicit nearness. */ - if( pKey->eType==FTSQUERY_NEAR ){ - assert( nKey==4 ); - if( zInput[4]=='/' && zInput[5]>='0' && zInput[5]<='9' ){ - nNear = 0; - for(nKey=5; zInput[nKey]>='0' && zInput[nKey]<='9'; nKey++){ - nNear = nNear * 10 + (zInput[nKey] - '0'); - } - } - } - - /* At this point this is probably a keyword. But for that to be true, - ** the next byte must contain either whitespace, an open or close - ** parenthesis, a quote character, or EOF. - */ - cNext = zInput[nKey]; - if( fts3isspace(cNext) - || cNext=='"' || cNext=='(' || cNext==')' || cNext==0 - ){ - pRet = (Fts3Expr *)fts3MallocZero(sizeof(Fts3Expr)); - if( !pRet ){ - return SQLITE_NOMEM; - } - pRet->eType = pKey->eType; - pRet->nNear = nNear; - *ppExpr = pRet; - *pnConsumed = (int)((zInput - z) + nKey); - return SQLITE_OK; - } - - /* Turns out that wasn't a keyword after all. This happens if the - ** user has supplied a token such as "ORacle". Continue. - */ - } - } - - /* See if we are dealing with a quoted phrase. If this is the case, then - ** search for the closing quote and pass the whole string to getNextString() - ** for processing. This is easy to do, as fts3 has no syntax for escaping - ** a quote character embedded in a string. - */ - if( *zInput=='"' ){ - for(ii=1; iinNest++; - rc = fts3ExprParse(pParse, zInput+1, nInput-1, ppExpr, &nConsumed); - if( rc==SQLITE_OK && !*ppExpr ){ rc = SQLITE_DONE; } - *pnConsumed = (int)(zInput - z) + 1 + nConsumed; - return rc; - }else if( *zInput==')' ){ - pParse->nNest--; - *pnConsumed = (int)((zInput - z) + 1); - *ppExpr = 0; - return SQLITE_DONE; - } - } - - /* If control flows to this point, this must be a regular token, or - ** the end of the input. Read a regular token using the sqlite3_tokenizer - ** interface. Before doing so, figure out if there is an explicit - ** column specifier for the token. - ** - ** TODO: Strangely, it is not possible to associate a column specifier - ** with a quoted phrase, only with a single token. Not sure if this was - ** an implementation artifact or an intentional decision when fts3 was - ** first implemented. Whichever it was, this module duplicates the - ** limitation. - */ - iCol = pParse->iDefaultCol; - iColLen = 0; - for(ii=0; iinCol; ii++){ - const char *zStr = pParse->azCol[ii]; - int nStr = (int)strlen(zStr); - if( nInput>nStr && zInput[nStr]==':' - && sqlite3_strnicmp(zStr, zInput, nStr)==0 - ){ - iCol = ii; - iColLen = (int)((zInput - z) + nStr + 1); - break; - } - } - rc = getNextToken(pParse, iCol, &z[iColLen], n-iColLen, ppExpr, pnConsumed); - *pnConsumed += iColLen; - return rc; -} - -/* -** The argument is an Fts3Expr structure for a binary operator (any type -** except an FTSQUERY_PHRASE). Return an integer value representing the -** precedence of the operator. Lower values have a higher precedence (i.e. -** group more tightly). For example, in the C language, the == operator -** groups more tightly than ||, and would therefore have a higher precedence. -** -** When using the new fts3 query syntax (when SQLITE_ENABLE_FTS3_PARENTHESIS -** is defined), the order of the operators in precedence from highest to -** lowest is: -** -** NEAR -** NOT -** AND (including implicit ANDs) -** OR -** -** Note that when using the old query syntax, the OR operator has a higher -** precedence than the AND operator. -*/ -static int opPrecedence(Fts3Expr *p){ - assert( p->eType!=FTSQUERY_PHRASE ); - if( sqlite3_fts3_enable_parentheses ){ - return p->eType; - }else if( p->eType==FTSQUERY_NEAR ){ - return 1; - }else if( p->eType==FTSQUERY_OR ){ - return 2; - } - assert( p->eType==FTSQUERY_AND ); - return 3; -} - -/* -** Argument ppHead contains a pointer to the current head of a query -** expression tree being parsed. pPrev is the expression node most recently -** inserted into the tree. This function adds pNew, which is always a binary -** operator node, into the expression tree based on the relative precedence -** of pNew and the existing nodes of the tree. This may result in the head -** of the tree changing, in which case *ppHead is set to the new root node. -*/ -static void insertBinaryOperator( - Fts3Expr **ppHead, /* Pointer to the root node of a tree */ - Fts3Expr *pPrev, /* Node most recently inserted into the tree */ - Fts3Expr *pNew /* New binary node to insert into expression tree */ -){ - Fts3Expr *pSplit = pPrev; - while( pSplit->pParent && opPrecedence(pSplit->pParent)<=opPrecedence(pNew) ){ - pSplit = pSplit->pParent; - } - - if( pSplit->pParent ){ - assert( pSplit->pParent->pRight==pSplit ); - pSplit->pParent->pRight = pNew; - pNew->pParent = pSplit->pParent; - }else{ - *ppHead = pNew; - } - pNew->pLeft = pSplit; - pSplit->pParent = pNew; -} - -/* -** Parse the fts3 query expression found in buffer z, length n. This function -** returns either when the end of the buffer is reached or an unmatched -** closing bracket - ')' - is encountered. -** -** If successful, SQLITE_OK is returned, *ppExpr is set to point to the -** parsed form of the expression and *pnConsumed is set to the number of -** bytes read from buffer z. Otherwise, *ppExpr is set to 0 and SQLITE_NOMEM -** (out of memory error) or SQLITE_ERROR (parse error) is returned. -*/ -static int fts3ExprParse( - ParseContext *pParse, /* fts3 query parse context */ - const char *z, int n, /* Text of MATCH query */ - Fts3Expr **ppExpr, /* OUT: Parsed query structure */ - int *pnConsumed /* OUT: Number of bytes consumed */ -){ - Fts3Expr *pRet = 0; - Fts3Expr *pPrev = 0; - Fts3Expr *pNotBranch = 0; /* Only used in legacy parse mode */ - int nIn = n; - const char *zIn = z; - int rc = SQLITE_OK; - int isRequirePhrase = 1; - - while( rc==SQLITE_OK ){ - Fts3Expr *p = 0; - int nByte = 0; - - rc = getNextNode(pParse, zIn, nIn, &p, &nByte); - assert( nByte>0 || (rc!=SQLITE_OK && p==0) ); - if( rc==SQLITE_OK ){ - if( p ){ - int isPhrase; - - if( !sqlite3_fts3_enable_parentheses - && p->eType==FTSQUERY_PHRASE && pParse->isNot - ){ - /* Create an implicit NOT operator. */ - Fts3Expr *pNot = fts3MallocZero(sizeof(Fts3Expr)); - if( !pNot ){ - sqlite3Fts3ExprFree(p); - rc = SQLITE_NOMEM; - goto exprparse_out; - } - pNot->eType = FTSQUERY_NOT; - pNot->pRight = p; - p->pParent = pNot; - if( pNotBranch ){ - pNot->pLeft = pNotBranch; - pNotBranch->pParent = pNot; - } - pNotBranch = pNot; - p = pPrev; - }else{ - int eType = p->eType; - isPhrase = (eType==FTSQUERY_PHRASE || p->pLeft); - - /* The isRequirePhrase variable is set to true if a phrase or - ** an expression contained in parenthesis is required. If a - ** binary operator (AND, OR, NOT or NEAR) is encounted when - ** isRequirePhrase is set, this is a syntax error. - */ - if( !isPhrase && isRequirePhrase ){ - sqlite3Fts3ExprFree(p); - rc = SQLITE_ERROR; - goto exprparse_out; - } - - if( isPhrase && !isRequirePhrase ){ - /* Insert an implicit AND operator. */ - Fts3Expr *pAnd; - assert( pRet && pPrev ); - pAnd = fts3MallocZero(sizeof(Fts3Expr)); - if( !pAnd ){ - sqlite3Fts3ExprFree(p); - rc = SQLITE_NOMEM; - goto exprparse_out; - } - pAnd->eType = FTSQUERY_AND; - insertBinaryOperator(&pRet, pPrev, pAnd); - pPrev = pAnd; - } - - /* This test catches attempts to make either operand of a NEAR - ** operator something other than a phrase. For example, either of - ** the following: - ** - ** (bracketed expression) NEAR phrase - ** phrase NEAR (bracketed expression) - ** - ** Return an error in either case. - */ - if( pPrev && ( - (eType==FTSQUERY_NEAR && !isPhrase && pPrev->eType!=FTSQUERY_PHRASE) - || (eType!=FTSQUERY_PHRASE && isPhrase && pPrev->eType==FTSQUERY_NEAR) - )){ - sqlite3Fts3ExprFree(p); - rc = SQLITE_ERROR; - goto exprparse_out; - } - - if( isPhrase ){ - if( pRet ){ - assert( pPrev && pPrev->pLeft && pPrev->pRight==0 ); - pPrev->pRight = p; - p->pParent = pPrev; - }else{ - pRet = p; - } - }else{ - insertBinaryOperator(&pRet, pPrev, p); - } - isRequirePhrase = !isPhrase; - } - pPrev = p; - } - assert( nByte>0 ); - } - assert( rc!=SQLITE_OK || (nByte>0 && nByte<=nIn) ); - nIn -= nByte; - zIn += nByte; - } - - if( rc==SQLITE_DONE && pRet && isRequirePhrase ){ - rc = SQLITE_ERROR; - } - - if( rc==SQLITE_DONE ){ - rc = SQLITE_OK; - if( !sqlite3_fts3_enable_parentheses && pNotBranch ){ - if( !pRet ){ - rc = SQLITE_ERROR; - }else{ - Fts3Expr *pIter = pNotBranch; - while( pIter->pLeft ){ - pIter = pIter->pLeft; - } - pIter->pLeft = pRet; - pRet->pParent = pIter; - pRet = pNotBranch; - } - } - } - *pnConsumed = n - nIn; - -exprparse_out: - if( rc!=SQLITE_OK ){ - sqlite3Fts3ExprFree(pRet); - sqlite3Fts3ExprFree(pNotBranch); - pRet = 0; - } - *ppExpr = pRet; - return rc; -} - -/* -** Return SQLITE_ERROR if the maximum depth of the expression tree passed -** as the only argument is more than nMaxDepth. -*/ -static int fts3ExprCheckDepth(Fts3Expr *p, int nMaxDepth){ - int rc = SQLITE_OK; - if( p ){ - if( nMaxDepth<0 ){ - rc = SQLITE_TOOBIG; - }else{ - rc = fts3ExprCheckDepth(p->pLeft, nMaxDepth-1); - if( rc==SQLITE_OK ){ - rc = fts3ExprCheckDepth(p->pRight, nMaxDepth-1); - } - } - } - return rc; -} - -/* -** This function attempts to transform the expression tree at (*pp) to -** an equivalent but more balanced form. The tree is modified in place. -** If successful, SQLITE_OK is returned and (*pp) set to point to the -** new root expression node. -** -** nMaxDepth is the maximum allowable depth of the balanced sub-tree. -** -** Otherwise, if an error occurs, an SQLite error code is returned and -** expression (*pp) freed. -*/ -static int fts3ExprBalance(Fts3Expr **pp, int nMaxDepth){ - int rc = SQLITE_OK; /* Return code */ - Fts3Expr *pRoot = *pp; /* Initial root node */ - Fts3Expr *pFree = 0; /* List of free nodes. Linked by pParent. */ - int eType = pRoot->eType; /* Type of node in this tree */ - - if( nMaxDepth==0 ){ - rc = SQLITE_ERROR; - } - - if( rc==SQLITE_OK && (eType==FTSQUERY_AND || eType==FTSQUERY_OR) ){ - Fts3Expr **apLeaf; - apLeaf = (Fts3Expr **)sqlite3_malloc(sizeof(Fts3Expr *) * nMaxDepth); - if( 0==apLeaf ){ - rc = SQLITE_NOMEM; - }else{ - memset(apLeaf, 0, sizeof(Fts3Expr *) * nMaxDepth); - } - - if( rc==SQLITE_OK ){ - int i; - Fts3Expr *p; - - /* Set $p to point to the left-most leaf in the tree of eType nodes. */ - for(p=pRoot; p->eType==eType; p=p->pLeft){ - assert( p->pParent==0 || p->pParent->pLeft==p ); - assert( p->pLeft && p->pRight ); - } - - /* This loop runs once for each leaf in the tree of eType nodes. */ - while( 1 ){ - int iLvl; - Fts3Expr *pParent = p->pParent; /* Current parent of p */ - - assert( pParent==0 || pParent->pLeft==p ); - p->pParent = 0; - if( pParent ){ - pParent->pLeft = 0; - }else{ - pRoot = 0; - } - rc = fts3ExprBalance(&p, nMaxDepth-1); - if( rc!=SQLITE_OK ) break; - - for(iLvl=0; p && iLvlpLeft = apLeaf[iLvl]; - pFree->pRight = p; - pFree->pLeft->pParent = pFree; - pFree->pRight->pParent = pFree; - - p = pFree; - pFree = pFree->pParent; - p->pParent = 0; - apLeaf[iLvl] = 0; - } - } - if( p ){ - sqlite3Fts3ExprFree(p); - rc = SQLITE_TOOBIG; - break; - } - - /* If that was the last leaf node, break out of the loop */ - if( pParent==0 ) break; - - /* Set $p to point to the next leaf in the tree of eType nodes */ - for(p=pParent->pRight; p->eType==eType; p=p->pLeft); - - /* Remove pParent from the original tree. */ - assert( pParent->pParent==0 || pParent->pParent->pLeft==pParent ); - pParent->pRight->pParent = pParent->pParent; - if( pParent->pParent ){ - pParent->pParent->pLeft = pParent->pRight; - }else{ - assert( pParent==pRoot ); - pRoot = pParent->pRight; - } - - /* Link pParent into the free node list. It will be used as an - ** internal node of the new tree. */ - pParent->pParent = pFree; - pFree = pParent; - } - - if( rc==SQLITE_OK ){ - p = 0; - for(i=0; ipParent = 0; - }else{ - assert( pFree!=0 ); - pFree->pRight = p; - pFree->pLeft = apLeaf[i]; - pFree->pLeft->pParent = pFree; - pFree->pRight->pParent = pFree; - - p = pFree; - pFree = pFree->pParent; - p->pParent = 0; - } - } - } - pRoot = p; - }else{ - /* An error occurred. Delete the contents of the apLeaf[] array - ** and pFree list. Everything else is cleaned up by the call to - ** sqlite3Fts3ExprFree(pRoot) below. */ - Fts3Expr *pDel; - for(i=0; ipParent; - sqlite3_free(pDel); - } - } - - assert( pFree==0 ); - sqlite3_free( apLeaf ); - } - } - - if( rc!=SQLITE_OK ){ - sqlite3Fts3ExprFree(pRoot); - pRoot = 0; - } - *pp = pRoot; - return rc; -} - -/* -** This function is similar to sqlite3Fts3ExprParse(), with the following -** differences: -** -** 1. It does not do expression rebalancing. -** 2. It does not check that the expression does not exceed the -** maximum allowable depth. -** 3. Even if it fails, *ppExpr may still be set to point to an -** expression tree. It should be deleted using sqlite3Fts3ExprFree() -** in this case. -*/ -static int fts3ExprParseUnbalanced( - sqlite3_tokenizer *pTokenizer, /* Tokenizer module */ - int iLangid, /* Language id for tokenizer */ - char **azCol, /* Array of column names for fts3 table */ - int bFts4, /* True to allow FTS4-only syntax */ - int nCol, /* Number of entries in azCol[] */ - int iDefaultCol, /* Default column to query */ - const char *z, int n, /* Text of MATCH query */ - Fts3Expr **ppExpr /* OUT: Parsed query structure */ -){ - int nParsed; - int rc; - ParseContext sParse; - - memset(&sParse, 0, sizeof(ParseContext)); - sParse.pTokenizer = pTokenizer; - sParse.iLangid = iLangid; - sParse.azCol = (const char **)azCol; - sParse.nCol = nCol; - sParse.iDefaultCol = iDefaultCol; - sParse.bFts4 = bFts4; - if( z==0 ){ - *ppExpr = 0; - return SQLITE_OK; - } - if( n<0 ){ - n = (int)strlen(z); - } - rc = fts3ExprParse(&sParse, z, n, ppExpr, &nParsed); - assert( rc==SQLITE_OK || *ppExpr==0 ); - - /* Check for mismatched parenthesis */ - if( rc==SQLITE_OK && sParse.nNest ){ - rc = SQLITE_ERROR; - } - - return rc; -} - -/* -** Parameters z and n contain a pointer to and length of a buffer containing -** an fts3 query expression, respectively. This function attempts to parse the -** query expression and create a tree of Fts3Expr structures representing the -** parsed expression. If successful, *ppExpr is set to point to the head -** of the parsed expression tree and SQLITE_OK is returned. If an error -** occurs, either SQLITE_NOMEM (out-of-memory error) or SQLITE_ERROR (parse -** error) is returned and *ppExpr is set to 0. -** -** If parameter n is a negative number, then z is assumed to point to a -** nul-terminated string and the length is determined using strlen(). -** -** The first parameter, pTokenizer, is passed the fts3 tokenizer module to -** use to normalize query tokens while parsing the expression. The azCol[] -** array, which is assumed to contain nCol entries, should contain the names -** of each column in the target fts3 table, in order from left to right. -** Column names must be nul-terminated strings. -** -** The iDefaultCol parameter should be passed the index of the table column -** that appears on the left-hand-side of the MATCH operator (the default -** column to match against for tokens for which a column name is not explicitly -** specified as part of the query string), or -1 if tokens may by default -** match any table column. -*/ -SQLITE_PRIVATE int sqlite3Fts3ExprParse( - sqlite3_tokenizer *pTokenizer, /* Tokenizer module */ - int iLangid, /* Language id for tokenizer */ - char **azCol, /* Array of column names for fts3 table */ - int bFts4, /* True to allow FTS4-only syntax */ - int nCol, /* Number of entries in azCol[] */ - int iDefaultCol, /* Default column to query */ - const char *z, int n, /* Text of MATCH query */ - Fts3Expr **ppExpr, /* OUT: Parsed query structure */ - char **pzErr /* OUT: Error message (sqlite3_malloc) */ -){ - int rc = fts3ExprParseUnbalanced( - pTokenizer, iLangid, azCol, bFts4, nCol, iDefaultCol, z, n, ppExpr - ); - - /* Rebalance the expression. And check that its depth does not exceed - ** SQLITE_FTS3_MAX_EXPR_DEPTH. */ - if( rc==SQLITE_OK && *ppExpr ){ - rc = fts3ExprBalance(ppExpr, SQLITE_FTS3_MAX_EXPR_DEPTH); - if( rc==SQLITE_OK ){ - rc = fts3ExprCheckDepth(*ppExpr, SQLITE_FTS3_MAX_EXPR_DEPTH); - } - } - - if( rc!=SQLITE_OK ){ - sqlite3Fts3ExprFree(*ppExpr); - *ppExpr = 0; - if( rc==SQLITE_TOOBIG ){ - *pzErr = sqlite3_mprintf( - "FTS expression tree is too large (maximum depth %d)", - SQLITE_FTS3_MAX_EXPR_DEPTH - ); - rc = SQLITE_ERROR; - }else if( rc==SQLITE_ERROR ){ - *pzErr = sqlite3_mprintf("malformed MATCH expression: [%s]", z); - } - } - - return rc; -} - -/* -** Free a single node of an expression tree. -*/ -static void fts3FreeExprNode(Fts3Expr *p){ - assert( p->eType==FTSQUERY_PHRASE || p->pPhrase==0 ); - sqlite3Fts3EvalPhraseCleanup(p->pPhrase); - sqlite3_free(p->aMI); - sqlite3_free(p); -} - -/* -** Free a parsed fts3 query expression allocated by sqlite3Fts3ExprParse(). -** -** This function would be simpler if it recursively called itself. But -** that would mean passing a sufficiently large expression to ExprParse() -** could cause a stack overflow. -*/ -SQLITE_PRIVATE void sqlite3Fts3ExprFree(Fts3Expr *pDel){ - Fts3Expr *p; - assert( pDel==0 || pDel->pParent==0 ); - for(p=pDel; p && (p->pLeft||p->pRight); p=(p->pLeft ? p->pLeft : p->pRight)){ - assert( p->pParent==0 || p==p->pParent->pRight || p==p->pParent->pLeft ); - } - while( p ){ - Fts3Expr *pParent = p->pParent; - fts3FreeExprNode(p); - if( pParent && p==pParent->pLeft && pParent->pRight ){ - p = pParent->pRight; - while( p && (p->pLeft || p->pRight) ){ - assert( p==p->pParent->pRight || p==p->pParent->pLeft ); - p = (p->pLeft ? p->pLeft : p->pRight); - } - }else{ - p = pParent; - } - } -} - -/**************************************************************************** -***************************************************************************** -** Everything after this point is just test code. -*/ - -#ifdef SQLITE_TEST - -/* #include */ - -/* -** Function to query the hash-table of tokenizers (see README.tokenizers). -*/ -static int queryTestTokenizer( - sqlite3 *db, - const char *zName, - const sqlite3_tokenizer_module **pp -){ - int rc; - sqlite3_stmt *pStmt; - const char zSql[] = "SELECT fts3_tokenizer(?)"; - - *pp = 0; - rc = sqlite3_prepare_v2(db, zSql, -1, &pStmt, 0); - if( rc!=SQLITE_OK ){ - return rc; - } - - sqlite3_bind_text(pStmt, 1, zName, -1, SQLITE_STATIC); - if( SQLITE_ROW==sqlite3_step(pStmt) ){ - if( sqlite3_column_type(pStmt, 0)==SQLITE_BLOB ){ - memcpy((void *)pp, sqlite3_column_blob(pStmt, 0), sizeof(*pp)); - } - } - - return sqlite3_finalize(pStmt); -} - -/* -** Return a pointer to a buffer containing a text representation of the -** expression passed as the first argument. The buffer is obtained from -** sqlite3_malloc(). It is the responsibility of the caller to use -** sqlite3_free() to release the memory. If an OOM condition is encountered, -** NULL is returned. -** -** If the second argument is not NULL, then its contents are prepended to -** the returned expression text and then freed using sqlite3_free(). -*/ -static char *exprToString(Fts3Expr *pExpr, char *zBuf){ - if( pExpr==0 ){ - return sqlite3_mprintf(""); - } - switch( pExpr->eType ){ - case FTSQUERY_PHRASE: { - Fts3Phrase *pPhrase = pExpr->pPhrase; - int i; - zBuf = sqlite3_mprintf( - "%zPHRASE %d 0", zBuf, pPhrase->iColumn); - for(i=0; zBuf && inToken; i++){ - zBuf = sqlite3_mprintf("%z %.*s%s", zBuf, - pPhrase->aToken[i].n, pPhrase->aToken[i].z, - (pPhrase->aToken[i].isPrefix?"+":"") - ); - } - return zBuf; - } - - case FTSQUERY_NEAR: - zBuf = sqlite3_mprintf("%zNEAR/%d ", zBuf, pExpr->nNear); - break; - case FTSQUERY_NOT: - zBuf = sqlite3_mprintf("%zNOT ", zBuf); - break; - case FTSQUERY_AND: - zBuf = sqlite3_mprintf("%zAND ", zBuf); - break; - case FTSQUERY_OR: - zBuf = sqlite3_mprintf("%zOR ", zBuf); - break; - } - - if( zBuf ) zBuf = sqlite3_mprintf("%z{", zBuf); - if( zBuf ) zBuf = exprToString(pExpr->pLeft, zBuf); - if( zBuf ) zBuf = sqlite3_mprintf("%z} {", zBuf); - - if( zBuf ) zBuf = exprToString(pExpr->pRight, zBuf); - if( zBuf ) zBuf = sqlite3_mprintf("%z}", zBuf); - - return zBuf; -} - -/* -** This is the implementation of a scalar SQL function used to test the -** expression parser. It should be called as follows: -** -** fts3_exprtest(, , , ...); -** -** The first argument, , is the name of the fts3 tokenizer used -** to parse the query expression (see README.tokenizers). The second argument -** is the query expression to parse. Each subsequent argument is the name -** of a column of the fts3 table that the query expression may refer to. -** For example: -** -** SELECT fts3_exprtest('simple', 'Bill col2:Bloggs', 'col1', 'col2'); -*/ -static void fts3ExprTest( - sqlite3_context *context, - int argc, - sqlite3_value **argv -){ - sqlite3_tokenizer_module const *pModule = 0; - sqlite3_tokenizer *pTokenizer = 0; - int rc; - char **azCol = 0; - const char *zExpr; - int nExpr; - int nCol; - int ii; - Fts3Expr *pExpr; - char *zBuf = 0; - sqlite3 *db = sqlite3_context_db_handle(context); - - if( argc<3 ){ - sqlite3_result_error(context, - "Usage: fts3_exprtest(tokenizer, expr, col1, ...", -1 - ); - return; - } - - rc = queryTestTokenizer(db, - (const char *)sqlite3_value_text(argv[0]), &pModule); - if( rc==SQLITE_NOMEM ){ - sqlite3_result_error_nomem(context); - goto exprtest_out; - }else if( !pModule ){ - sqlite3_result_error(context, "No such tokenizer module", -1); - goto exprtest_out; - } - - rc = pModule->xCreate(0, 0, &pTokenizer); - assert( rc==SQLITE_NOMEM || rc==SQLITE_OK ); - if( rc==SQLITE_NOMEM ){ - sqlite3_result_error_nomem(context); - goto exprtest_out; - } - pTokenizer->pModule = pModule; - - zExpr = (const char *)sqlite3_value_text(argv[1]); - nExpr = sqlite3_value_bytes(argv[1]); - nCol = argc-2; - azCol = (char **)sqlite3_malloc(nCol*sizeof(char *)); - if( !azCol ){ - sqlite3_result_error_nomem(context); - goto exprtest_out; - } - for(ii=0; iixDestroy(pTokenizer); - } - sqlite3_free(azCol); -} - -/* -** Register the query expression parser test function fts3_exprtest() -** with database connection db. -*/ -SQLITE_PRIVATE int sqlite3Fts3ExprInitTestInterface(sqlite3* db){ - int rc = sqlite3_create_function( - db, "fts3_exprtest", -1, SQLITE_UTF8, 0, fts3ExprTest, 0, 0 - ); - if( rc==SQLITE_OK ){ - rc = sqlite3_create_function(db, "fts3_exprtest_rebalance", - -1, SQLITE_UTF8, (void *)1, fts3ExprTest, 0, 0 - ); - } - return rc; -} - -#endif -#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) */ - -/************** End of fts3_expr.c *******************************************/ -/************** Begin file fts3_hash.c ***************************************/ -/* -** 2001 September 22 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** This is the implementation of generic hash-tables used in SQLite. -** We've modified it slightly to serve as a standalone hash table -** implementation for the full-text indexing module. -*/ - -/* -** The code in this file is only compiled if: -** -** * The FTS3 module is being built as an extension -** (in which case SQLITE_CORE is not defined), or -** -** * The FTS3 module is being built into the core of -** SQLite (in which case SQLITE_ENABLE_FTS3 is defined). -*/ -#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) - -/* #include */ -/* #include */ -/* #include */ - - -/* -** Malloc and Free functions -*/ -static void *fts3HashMalloc(int n){ - void *p = sqlite3_malloc(n); - if( p ){ - memset(p, 0, n); - } - return p; -} -static void fts3HashFree(void *p){ - sqlite3_free(p); -} - -/* Turn bulk memory into a hash table object by initializing the -** fields of the Hash structure. -** -** "pNew" is a pointer to the hash table that is to be initialized. -** keyClass is one of the constants -** FTS3_HASH_BINARY or FTS3_HASH_STRING. The value of keyClass -** determines what kind of key the hash table will use. "copyKey" is -** true if the hash table should make its own private copy of keys and -** false if it should just use the supplied pointer. -*/ -SQLITE_PRIVATE void sqlite3Fts3HashInit(Fts3Hash *pNew, char keyClass, char copyKey){ - assert( pNew!=0 ); - assert( keyClass>=FTS3_HASH_STRING && keyClass<=FTS3_HASH_BINARY ); - pNew->keyClass = keyClass; - pNew->copyKey = copyKey; - pNew->first = 0; - pNew->count = 0; - pNew->htsize = 0; - pNew->ht = 0; -} - -/* Remove all entries from a hash table. Reclaim all memory. -** Call this routine to delete a hash table or to reset a hash table -** to the empty state. -*/ -SQLITE_PRIVATE void sqlite3Fts3HashClear(Fts3Hash *pH){ - Fts3HashElem *elem; /* For looping over all elements of the table */ - - assert( pH!=0 ); - elem = pH->first; - pH->first = 0; - fts3HashFree(pH->ht); - pH->ht = 0; - pH->htsize = 0; - while( elem ){ - Fts3HashElem *next_elem = elem->next; - if( pH->copyKey && elem->pKey ){ - fts3HashFree(elem->pKey); - } - fts3HashFree(elem); - elem = next_elem; - } - pH->count = 0; -} - -/* -** Hash and comparison functions when the mode is FTS3_HASH_STRING -*/ -static int fts3StrHash(const void *pKey, int nKey){ - const char *z = (const char *)pKey; - unsigned h = 0; - if( nKey<=0 ) nKey = (int) strlen(z); - while( nKey > 0 ){ - h = (h<<3) ^ h ^ *z++; - nKey--; - } - return (int)(h & 0x7fffffff); -} -static int fts3StrCompare(const void *pKey1, int n1, const void *pKey2, int n2){ - if( n1!=n2 ) return 1; - return strncmp((const char*)pKey1,(const char*)pKey2,n1); -} - -/* -** Hash and comparison functions when the mode is FTS3_HASH_BINARY -*/ -static int fts3BinHash(const void *pKey, int nKey){ - int h = 0; - const char *z = (const char *)pKey; - while( nKey-- > 0 ){ - h = (h<<3) ^ h ^ *(z++); - } - return h & 0x7fffffff; -} -static int fts3BinCompare(const void *pKey1, int n1, const void *pKey2, int n2){ - if( n1!=n2 ) return 1; - return memcmp(pKey1,pKey2,n1); -} - -/* -** Return a pointer to the appropriate hash function given the key class. -** -** The C syntax in this function definition may be unfamilar to some -** programmers, so we provide the following additional explanation: -** -** The name of the function is "ftsHashFunction". The function takes a -** single parameter "keyClass". The return value of ftsHashFunction() -** is a pointer to another function. Specifically, the return value -** of ftsHashFunction() is a pointer to a function that takes two parameters -** with types "const void*" and "int" and returns an "int". -*/ -static int (*ftsHashFunction(int keyClass))(const void*,int){ - if( keyClass==FTS3_HASH_STRING ){ - return &fts3StrHash; - }else{ - assert( keyClass==FTS3_HASH_BINARY ); - return &fts3BinHash; - } -} - -/* -** Return a pointer to the appropriate hash function given the key class. -** -** For help in interpreted the obscure C code in the function definition, -** see the header comment on the previous function. -*/ -static int (*ftsCompareFunction(int keyClass))(const void*,int,const void*,int){ - if( keyClass==FTS3_HASH_STRING ){ - return &fts3StrCompare; - }else{ - assert( keyClass==FTS3_HASH_BINARY ); - return &fts3BinCompare; - } -} - -/* Link an element into the hash table -*/ -static void fts3HashInsertElement( - Fts3Hash *pH, /* The complete hash table */ - struct _fts3ht *pEntry, /* The entry into which pNew is inserted */ - Fts3HashElem *pNew /* The element to be inserted */ -){ - Fts3HashElem *pHead; /* First element already in pEntry */ - pHead = pEntry->chain; - if( pHead ){ - pNew->next = pHead; - pNew->prev = pHead->prev; - if( pHead->prev ){ pHead->prev->next = pNew; } - else { pH->first = pNew; } - pHead->prev = pNew; - }else{ - pNew->next = pH->first; - if( pH->first ){ pH->first->prev = pNew; } - pNew->prev = 0; - pH->first = pNew; - } - pEntry->count++; - pEntry->chain = pNew; -} - - -/* Resize the hash table so that it cantains "new_size" buckets. -** "new_size" must be a power of 2. The hash table might fail -** to resize if sqliteMalloc() fails. -** -** Return non-zero if a memory allocation error occurs. -*/ -static int fts3Rehash(Fts3Hash *pH, int new_size){ - struct _fts3ht *new_ht; /* The new hash table */ - Fts3HashElem *elem, *next_elem; /* For looping over existing elements */ - int (*xHash)(const void*,int); /* The hash function */ - - assert( (new_size & (new_size-1))==0 ); - new_ht = (struct _fts3ht *)fts3HashMalloc( new_size*sizeof(struct _fts3ht) ); - if( new_ht==0 ) return 1; - fts3HashFree(pH->ht); - pH->ht = new_ht; - pH->htsize = new_size; - xHash = ftsHashFunction(pH->keyClass); - for(elem=pH->first, pH->first=0; elem; elem = next_elem){ - int h = (*xHash)(elem->pKey, elem->nKey) & (new_size-1); - next_elem = elem->next; - fts3HashInsertElement(pH, &new_ht[h], elem); - } - return 0; -} - -/* This function (for internal use only) locates an element in an -** hash table that matches the given key. The hash for this key has -** already been computed and is passed as the 4th parameter. -*/ -static Fts3HashElem *fts3FindElementByHash( - const Fts3Hash *pH, /* The pH to be searched */ - const void *pKey, /* The key we are searching for */ - int nKey, - int h /* The hash for this key. */ -){ - Fts3HashElem *elem; /* Used to loop thru the element list */ - int count; /* Number of elements left to test */ - int (*xCompare)(const void*,int,const void*,int); /* comparison function */ - - if( pH->ht ){ - struct _fts3ht *pEntry = &pH->ht[h]; - elem = pEntry->chain; - count = pEntry->count; - xCompare = ftsCompareFunction(pH->keyClass); - while( count-- && elem ){ - if( (*xCompare)(elem->pKey,elem->nKey,pKey,nKey)==0 ){ - return elem; - } - elem = elem->next; - } - } - return 0; -} - -/* Remove a single entry from the hash table given a pointer to that -** element and a hash on the element's key. -*/ -static void fts3RemoveElementByHash( - Fts3Hash *pH, /* The pH containing "elem" */ - Fts3HashElem* elem, /* The element to be removed from the pH */ - int h /* Hash value for the element */ -){ - struct _fts3ht *pEntry; - if( elem->prev ){ - elem->prev->next = elem->next; - }else{ - pH->first = elem->next; - } - if( elem->next ){ - elem->next->prev = elem->prev; - } - pEntry = &pH->ht[h]; - if( pEntry->chain==elem ){ - pEntry->chain = elem->next; - } - pEntry->count--; - if( pEntry->count<=0 ){ - pEntry->chain = 0; - } - if( pH->copyKey && elem->pKey ){ - fts3HashFree(elem->pKey); - } - fts3HashFree( elem ); - pH->count--; - if( pH->count<=0 ){ - assert( pH->first==0 ); - assert( pH->count==0 ); - fts3HashClear(pH); - } -} - -SQLITE_PRIVATE Fts3HashElem *sqlite3Fts3HashFindElem( - const Fts3Hash *pH, - const void *pKey, - int nKey -){ - int h; /* A hash on key */ - int (*xHash)(const void*,int); /* The hash function */ - - if( pH==0 || pH->ht==0 ) return 0; - xHash = ftsHashFunction(pH->keyClass); - assert( xHash!=0 ); - h = (*xHash)(pKey,nKey); - assert( (pH->htsize & (pH->htsize-1))==0 ); - return fts3FindElementByHash(pH,pKey,nKey, h & (pH->htsize-1)); -} - -/* -** Attempt to locate an element of the hash table pH with a key -** that matches pKey,nKey. Return the data for this element if it is -** found, or NULL if there is no match. -*/ -SQLITE_PRIVATE void *sqlite3Fts3HashFind(const Fts3Hash *pH, const void *pKey, int nKey){ - Fts3HashElem *pElem; /* The element that matches key (if any) */ - - pElem = sqlite3Fts3HashFindElem(pH, pKey, nKey); - return pElem ? pElem->data : 0; -} - -/* Insert an element into the hash table pH. The key is pKey,nKey -** and the data is "data". -** -** If no element exists with a matching key, then a new -** element is created. A copy of the key is made if the copyKey -** flag is set. NULL is returned. -** -** If another element already exists with the same key, then the -** new data replaces the old data and the old data is returned. -** The key is not copied in this instance. If a malloc fails, then -** the new data is returned and the hash table is unchanged. -** -** If the "data" parameter to this function is NULL, then the -** element corresponding to "key" is removed from the hash table. -*/ -SQLITE_PRIVATE void *sqlite3Fts3HashInsert( - Fts3Hash *pH, /* The hash table to insert into */ - const void *pKey, /* The key */ - int nKey, /* Number of bytes in the key */ - void *data /* The data */ -){ - int hraw; /* Raw hash value of the key */ - int h; /* the hash of the key modulo hash table size */ - Fts3HashElem *elem; /* Used to loop thru the element list */ - Fts3HashElem *new_elem; /* New element added to the pH */ - int (*xHash)(const void*,int); /* The hash function */ - - assert( pH!=0 ); - xHash = ftsHashFunction(pH->keyClass); - assert( xHash!=0 ); - hraw = (*xHash)(pKey, nKey); - assert( (pH->htsize & (pH->htsize-1))==0 ); - h = hraw & (pH->htsize-1); - elem = fts3FindElementByHash(pH,pKey,nKey,h); - if( elem ){ - void *old_data = elem->data; - if( data==0 ){ - fts3RemoveElementByHash(pH,elem,h); - }else{ - elem->data = data; - } - return old_data; - } - if( data==0 ) return 0; - if( (pH->htsize==0 && fts3Rehash(pH,8)) - || (pH->count>=pH->htsize && fts3Rehash(pH, pH->htsize*2)) - ){ - pH->count = 0; - return data; - } - assert( pH->htsize>0 ); - new_elem = (Fts3HashElem*)fts3HashMalloc( sizeof(Fts3HashElem) ); - if( new_elem==0 ) return data; - if( pH->copyKey && pKey!=0 ){ - new_elem->pKey = fts3HashMalloc( nKey ); - if( new_elem->pKey==0 ){ - fts3HashFree(new_elem); - return data; - } - memcpy((void*)new_elem->pKey, pKey, nKey); - }else{ - new_elem->pKey = (void*)pKey; - } - new_elem->nKey = nKey; - pH->count++; - assert( pH->htsize>0 ); - assert( (pH->htsize & (pH->htsize-1))==0 ); - h = hraw & (pH->htsize-1); - fts3HashInsertElement(pH, &pH->ht[h], new_elem); - new_elem->data = data; - return 0; -} - -#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) */ - -/************** End of fts3_hash.c *******************************************/ -/************** Begin file fts3_porter.c *************************************/ -/* -** 2006 September 30 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** Implementation of the full-text-search tokenizer that implements -** a Porter stemmer. -*/ - -/* -** The code in this file is only compiled if: -** -** * The FTS3 module is being built as an extension -** (in which case SQLITE_CORE is not defined), or -** -** * The FTS3 module is being built into the core of -** SQLite (in which case SQLITE_ENABLE_FTS3 is defined). -*/ -#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) - -/* #include */ -/* #include */ -/* #include */ -/* #include */ - - -/* -** Class derived from sqlite3_tokenizer -*/ -typedef struct porter_tokenizer { - sqlite3_tokenizer base; /* Base class */ -} porter_tokenizer; - -/* -** Class derived from sqlite3_tokenizer_cursor -*/ -typedef struct porter_tokenizer_cursor { - sqlite3_tokenizer_cursor base; - const char *zInput; /* input we are tokenizing */ - int nInput; /* size of the input */ - int iOffset; /* current position in zInput */ - int iToken; /* index of next token to be returned */ - char *zToken; /* storage for current token */ - int nAllocated; /* space allocated to zToken buffer */ -} porter_tokenizer_cursor; - - -/* -** Create a new tokenizer instance. -*/ -static int porterCreate( - int argc, const char * const *argv, - sqlite3_tokenizer **ppTokenizer -){ - porter_tokenizer *t; - - UNUSED_PARAMETER(argc); - UNUSED_PARAMETER(argv); - - t = (porter_tokenizer *) sqlite3_malloc(sizeof(*t)); - if( t==NULL ) return SQLITE_NOMEM; - memset(t, 0, sizeof(*t)); - *ppTokenizer = &t->base; - return SQLITE_OK; -} - -/* -** Destroy a tokenizer -*/ -static int porterDestroy(sqlite3_tokenizer *pTokenizer){ - sqlite3_free(pTokenizer); - return SQLITE_OK; -} - -/* -** Prepare to begin tokenizing a particular string. The input -** string to be tokenized is zInput[0..nInput-1]. A cursor -** used to incrementally tokenize this string is returned in -** *ppCursor. -*/ -static int porterOpen( - sqlite3_tokenizer *pTokenizer, /* The tokenizer */ - const char *zInput, int nInput, /* String to be tokenized */ - sqlite3_tokenizer_cursor **ppCursor /* OUT: Tokenization cursor */ -){ - porter_tokenizer_cursor *c; - - UNUSED_PARAMETER(pTokenizer); - - c = (porter_tokenizer_cursor *) sqlite3_malloc(sizeof(*c)); - if( c==NULL ) return SQLITE_NOMEM; - - c->zInput = zInput; - if( zInput==0 ){ - c->nInput = 0; - }else if( nInput<0 ){ - c->nInput = (int)strlen(zInput); - }else{ - c->nInput = nInput; - } - c->iOffset = 0; /* start tokenizing at the beginning */ - c->iToken = 0; - c->zToken = NULL; /* no space allocated, yet. */ - c->nAllocated = 0; - - *ppCursor = &c->base; - return SQLITE_OK; -} - -/* -** Close a tokenization cursor previously opened by a call to -** porterOpen() above. -*/ -static int porterClose(sqlite3_tokenizer_cursor *pCursor){ - porter_tokenizer_cursor *c = (porter_tokenizer_cursor *) pCursor; - sqlite3_free(c->zToken); - sqlite3_free(c); - return SQLITE_OK; -} -/* -** Vowel or consonant -*/ -static const char cType[] = { - 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, - 1, 1, 1, 2, 1 -}; - -/* -** isConsonant() and isVowel() determine if their first character in -** the string they point to is a consonant or a vowel, according -** to Porter ruls. -** -** A consonate is any letter other than 'a', 'e', 'i', 'o', or 'u'. -** 'Y' is a consonant unless it follows another consonant, -** in which case it is a vowel. -** -** In these routine, the letters are in reverse order. So the 'y' rule -** is that 'y' is a consonant unless it is followed by another -** consonent. -*/ -static int isVowel(const char*); -static int isConsonant(const char *z){ - int j; - char x = *z; - if( x==0 ) return 0; - assert( x>='a' && x<='z' ); - j = cType[x-'a']; - if( j<2 ) return j; - return z[1]==0 || isVowel(z + 1); -} -static int isVowel(const char *z){ - int j; - char x = *z; - if( x==0 ) return 0; - assert( x>='a' && x<='z' ); - j = cType[x-'a']; - if( j<2 ) return 1-j; - return isConsonant(z + 1); -} - -/* -** Let any sequence of one or more vowels be represented by V and let -** C be sequence of one or more consonants. Then every word can be -** represented as: -** -** [C] (VC){m} [V] -** -** In prose: A word is an optional consonant followed by zero or -** vowel-consonant pairs followed by an optional vowel. "m" is the -** number of vowel consonant pairs. This routine computes the value -** of m for the first i bytes of a word. -** -** Return true if the m-value for z is 1 or more. In other words, -** return true if z contains at least one vowel that is followed -** by a consonant. -** -** In this routine z[] is in reverse order. So we are really looking -** for an instance of of a consonant followed by a vowel. -*/ -static int m_gt_0(const char *z){ - while( isVowel(z) ){ z++; } - if( *z==0 ) return 0; - while( isConsonant(z) ){ z++; } - return *z!=0; -} - -/* Like mgt0 above except we are looking for a value of m which is -** exactly 1 -*/ -static int m_eq_1(const char *z){ - while( isVowel(z) ){ z++; } - if( *z==0 ) return 0; - while( isConsonant(z) ){ z++; } - if( *z==0 ) return 0; - while( isVowel(z) ){ z++; } - if( *z==0 ) return 1; - while( isConsonant(z) ){ z++; } - return *z==0; -} - -/* Like mgt0 above except we are looking for a value of m>1 instead -** or m>0 -*/ -static int m_gt_1(const char *z){ - while( isVowel(z) ){ z++; } - if( *z==0 ) return 0; - while( isConsonant(z) ){ z++; } - if( *z==0 ) return 0; - while( isVowel(z) ){ z++; } - if( *z==0 ) return 0; - while( isConsonant(z) ){ z++; } - return *z!=0; -} - -/* -** Return TRUE if there is a vowel anywhere within z[0..n-1] -*/ -static int hasVowel(const char *z){ - while( isConsonant(z) ){ z++; } - return *z!=0; -} - -/* -** Return TRUE if the word ends in a double consonant. -** -** The text is reversed here. So we are really looking at -** the first two characters of z[]. -*/ -static int doubleConsonant(const char *z){ - return isConsonant(z) && z[0]==z[1]; -} - -/* -** Return TRUE if the word ends with three letters which -** are consonant-vowel-consonent and where the final consonant -** is not 'w', 'x', or 'y'. -** -** The word is reversed here. So we are really checking the -** first three letters and the first one cannot be in [wxy]. -*/ -static int star_oh(const char *z){ - return - isConsonant(z) && - z[0]!='w' && z[0]!='x' && z[0]!='y' && - isVowel(z+1) && - isConsonant(z+2); -} - -/* -** If the word ends with zFrom and xCond() is true for the stem -** of the word that preceeds the zFrom ending, then change the -** ending to zTo. -** -** The input word *pz and zFrom are both in reverse order. zTo -** is in normal order. -** -** Return TRUE if zFrom matches. Return FALSE if zFrom does not -** match. Not that TRUE is returned even if xCond() fails and -** no substitution occurs. -*/ -static int stem( - char **pz, /* The word being stemmed (Reversed) */ - const char *zFrom, /* If the ending matches this... (Reversed) */ - const char *zTo, /* ... change the ending to this (not reversed) */ - int (*xCond)(const char*) /* Condition that must be true */ -){ - char *z = *pz; - while( *zFrom && *zFrom==*z ){ z++; zFrom++; } - if( *zFrom!=0 ) return 0; - if( xCond && !xCond(z) ) return 1; - while( *zTo ){ - *(--z) = *(zTo++); - } - *pz = z; - return 1; -} - -/* -** This is the fallback stemmer used when the porter stemmer is -** inappropriate. The input word is copied into the output with -** US-ASCII case folding. If the input word is too long (more -** than 20 bytes if it contains no digits or more than 6 bytes if -** it contains digits) then word is truncated to 20 or 6 bytes -** by taking 10 or 3 bytes from the beginning and end. -*/ -static void copy_stemmer(const char *zIn, int nIn, char *zOut, int *pnOut){ - int i, mx, j; - int hasDigit = 0; - for(i=0; i='A' && c<='Z' ){ - zOut[i] = c - 'A' + 'a'; - }else{ - if( c>='0' && c<='9' ) hasDigit = 1; - zOut[i] = c; - } - } - mx = hasDigit ? 3 : 10; - if( nIn>mx*2 ){ - for(j=mx, i=nIn-mx; i=(int)sizeof(zReverse)-7 ){ - /* The word is too big or too small for the porter stemmer. - ** Fallback to the copy stemmer */ - copy_stemmer(zIn, nIn, zOut, pnOut); - return; - } - for(i=0, j=sizeof(zReverse)-6; i='A' && c<='Z' ){ - zReverse[j] = c + 'a' - 'A'; - }else if( c>='a' && c<='z' ){ - zReverse[j] = c; - }else{ - /* The use of a character not in [a-zA-Z] means that we fallback - ** to the copy stemmer */ - copy_stemmer(zIn, nIn, zOut, pnOut); - return; - } - } - memset(&zReverse[sizeof(zReverse)-5], 0, 5); - z = &zReverse[j+1]; - - - /* Step 1a */ - if( z[0]=='s' ){ - if( - !stem(&z, "sess", "ss", 0) && - !stem(&z, "sei", "i", 0) && - !stem(&z, "ss", "ss", 0) - ){ - z++; - } - } - - /* Step 1b */ - z2 = z; - if( stem(&z, "dee", "ee", m_gt_0) ){ - /* Do nothing. The work was all in the test */ - }else if( - (stem(&z, "gni", "", hasVowel) || stem(&z, "de", "", hasVowel)) - && z!=z2 - ){ - if( stem(&z, "ta", "ate", 0) || - stem(&z, "lb", "ble", 0) || - stem(&z, "zi", "ize", 0) ){ - /* Do nothing. The work was all in the test */ - }else if( doubleConsonant(z) && (*z!='l' && *z!='s' && *z!='z') ){ - z++; - }else if( m_eq_1(z) && star_oh(z) ){ - *(--z) = 'e'; - } - } - - /* Step 1c */ - if( z[0]=='y' && hasVowel(z+1) ){ - z[0] = 'i'; - } - - /* Step 2 */ - switch( z[1] ){ - case 'a': - if( !stem(&z, "lanoita", "ate", m_gt_0) ){ - stem(&z, "lanoit", "tion", m_gt_0); - } - break; - case 'c': - if( !stem(&z, "icne", "ence", m_gt_0) ){ - stem(&z, "icna", "ance", m_gt_0); - } - break; - case 'e': - stem(&z, "rezi", "ize", m_gt_0); - break; - case 'g': - stem(&z, "igol", "log", m_gt_0); - break; - case 'l': - if( !stem(&z, "ilb", "ble", m_gt_0) - && !stem(&z, "illa", "al", m_gt_0) - && !stem(&z, "iltne", "ent", m_gt_0) - && !stem(&z, "ile", "e", m_gt_0) - ){ - stem(&z, "ilsuo", "ous", m_gt_0); - } - break; - case 'o': - if( !stem(&z, "noitazi", "ize", m_gt_0) - && !stem(&z, "noita", "ate", m_gt_0) - ){ - stem(&z, "rota", "ate", m_gt_0); - } - break; - case 's': - if( !stem(&z, "msila", "al", m_gt_0) - && !stem(&z, "ssenevi", "ive", m_gt_0) - && !stem(&z, "ssenluf", "ful", m_gt_0) - ){ - stem(&z, "ssensuo", "ous", m_gt_0); - } - break; - case 't': - if( !stem(&z, "itila", "al", m_gt_0) - && !stem(&z, "itivi", "ive", m_gt_0) - ){ - stem(&z, "itilib", "ble", m_gt_0); - } - break; - } - - /* Step 3 */ - switch( z[0] ){ - case 'e': - if( !stem(&z, "etaci", "ic", m_gt_0) - && !stem(&z, "evita", "", m_gt_0) - ){ - stem(&z, "ezila", "al", m_gt_0); - } - break; - case 'i': - stem(&z, "itici", "ic", m_gt_0); - break; - case 'l': - if( !stem(&z, "laci", "ic", m_gt_0) ){ - stem(&z, "luf", "", m_gt_0); - } - break; - case 's': - stem(&z, "ssen", "", m_gt_0); - break; - } - - /* Step 4 */ - switch( z[1] ){ - case 'a': - if( z[0]=='l' && m_gt_1(z+2) ){ - z += 2; - } - break; - case 'c': - if( z[0]=='e' && z[2]=='n' && (z[3]=='a' || z[3]=='e') && m_gt_1(z+4) ){ - z += 4; - } - break; - case 'e': - if( z[0]=='r' && m_gt_1(z+2) ){ - z += 2; - } - break; - case 'i': - if( z[0]=='c' && m_gt_1(z+2) ){ - z += 2; - } - break; - case 'l': - if( z[0]=='e' && z[2]=='b' && (z[3]=='a' || z[3]=='i') && m_gt_1(z+4) ){ - z += 4; - } - break; - case 'n': - if( z[0]=='t' ){ - if( z[2]=='a' ){ - if( m_gt_1(z+3) ){ - z += 3; - } - }else if( z[2]=='e' ){ - if( !stem(&z, "tneme", "", m_gt_1) - && !stem(&z, "tnem", "", m_gt_1) - ){ - stem(&z, "tne", "", m_gt_1); - } - } - } - break; - case 'o': - if( z[0]=='u' ){ - if( m_gt_1(z+2) ){ - z += 2; - } - }else if( z[3]=='s' || z[3]=='t' ){ - stem(&z, "noi", "", m_gt_1); - } - break; - case 's': - if( z[0]=='m' && z[2]=='i' && m_gt_1(z+3) ){ - z += 3; - } - break; - case 't': - if( !stem(&z, "eta", "", m_gt_1) ){ - stem(&z, "iti", "", m_gt_1); - } - break; - case 'u': - if( z[0]=='s' && z[2]=='o' && m_gt_1(z+3) ){ - z += 3; - } - break; - case 'v': - case 'z': - if( z[0]=='e' && z[2]=='i' && m_gt_1(z+3) ){ - z += 3; - } - break; - } - - /* Step 5a */ - if( z[0]=='e' ){ - if( m_gt_1(z+1) ){ - z++; - }else if( m_eq_1(z+1) && !star_oh(z+1) ){ - z++; - } - } - - /* Step 5b */ - if( m_gt_1(z) && z[0]=='l' && z[1]=='l' ){ - z++; - } - - /* z[] is now the stemmed word in reverse order. Flip it back - ** around into forward order and return. - */ - *pnOut = i = (int)strlen(z); - zOut[i] = 0; - while( *z ){ - zOut[--i] = *(z++); - } -} - -/* -** Characters that can be part of a token. We assume any character -** whose value is greater than 0x80 (any UTF character) can be -** part of a token. In other words, delimiters all must have -** values of 0x7f or lower. -*/ -static const char porterIdChar[] = { -/* x0 x1 x2 x3 x4 x5 x6 x7 x8 x9 xA xB xC xD xE xF */ - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, /* 3x */ - 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 4x */ - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, /* 5x */ - 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 6x */ - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, /* 7x */ -}; -#define isDelim(C) (((ch=C)&0x80)==0 && (ch<0x30 || !porterIdChar[ch-0x30])) - -/* -** Extract the next token from a tokenization cursor. The cursor must -** have been opened by a prior call to porterOpen(). -*/ -static int porterNext( - sqlite3_tokenizer_cursor *pCursor, /* Cursor returned by porterOpen */ - const char **pzToken, /* OUT: *pzToken is the token text */ - int *pnBytes, /* OUT: Number of bytes in token */ - int *piStartOffset, /* OUT: Starting offset of token */ - int *piEndOffset, /* OUT: Ending offset of token */ - int *piPosition /* OUT: Position integer of token */ -){ - porter_tokenizer_cursor *c = (porter_tokenizer_cursor *) pCursor; - const char *z = c->zInput; - - while( c->iOffsetnInput ){ - int iStartOffset, ch; - - /* Scan past delimiter characters */ - while( c->iOffsetnInput && isDelim(z[c->iOffset]) ){ - c->iOffset++; - } - - /* Count non-delimiter characters. */ - iStartOffset = c->iOffset; - while( c->iOffsetnInput && !isDelim(z[c->iOffset]) ){ - c->iOffset++; - } - - if( c->iOffset>iStartOffset ){ - int n = c->iOffset-iStartOffset; - if( n>c->nAllocated ){ - char *pNew; - c->nAllocated = n+20; - pNew = sqlite3_realloc(c->zToken, c->nAllocated); - if( !pNew ) return SQLITE_NOMEM; - c->zToken = pNew; - } - porter_stemmer(&z[iStartOffset], n, c->zToken, pnBytes); - *pzToken = c->zToken; - *piStartOffset = iStartOffset; - *piEndOffset = c->iOffset; - *piPosition = c->iToken++; - return SQLITE_OK; - } - } - return SQLITE_DONE; -} - -/* -** The set of routines that implement the porter-stemmer tokenizer -*/ -static const sqlite3_tokenizer_module porterTokenizerModule = { - 0, - porterCreate, - porterDestroy, - porterOpen, - porterClose, - porterNext, - 0 -}; - -/* -** Allocate a new porter tokenizer. Return a pointer to the new -** tokenizer in *ppModule -*/ -SQLITE_PRIVATE void sqlite3Fts3PorterTokenizerModule( - sqlite3_tokenizer_module const**ppModule -){ - *ppModule = &porterTokenizerModule; -} - -#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) */ - -/************** End of fts3_porter.c *****************************************/ -/************** Begin file fts3_tokenizer.c **********************************/ -/* -** 2007 June 22 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -****************************************************************************** -** -** This is part of an SQLite module implementing full-text search. -** This particular file implements the generic tokenizer interface. -*/ - -/* -** The code in this file is only compiled if: -** -** * The FTS3 module is being built as an extension -** (in which case SQLITE_CORE is not defined), or -** -** * The FTS3 module is being built into the core of -** SQLite (in which case SQLITE_ENABLE_FTS3 is defined). -*/ -#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) - -/* #include */ -/* #include */ - -/* -** Implementation of the SQL scalar function for accessing the underlying -** hash table. This function may be called as follows: -** -** SELECT (); -** SELECT (, ); -** -** where is the name passed as the second argument -** to the sqlite3Fts3InitHashTable() function (e.g. 'fts3_tokenizer'). -** -** If the argument is specified, it must be a blob value -** containing a pointer to be stored as the hash data corresponding -** to the string . If is not specified, then -** the string must already exist in the has table. Otherwise, -** an error is returned. -** -** Whether or not the argument is specified, the value returned -** is a blob containing the pointer stored as the hash data corresponding -** to string (after the hash-table is updated, if applicable). -*/ -static void scalarFunc( - sqlite3_context *context, - int argc, - sqlite3_value **argv -){ - Fts3Hash *pHash; - void *pPtr = 0; - const unsigned char *zName; - int nName; - - assert( argc==1 || argc==2 ); - - pHash = (Fts3Hash *)sqlite3_user_data(context); - - zName = sqlite3_value_text(argv[0]); - nName = sqlite3_value_bytes(argv[0])+1; - - if( argc==2 ){ - void *pOld; - int n = sqlite3_value_bytes(argv[1]); - if( n!=sizeof(pPtr) ){ - sqlite3_result_error(context, "argument type mismatch", -1); - return; - } - pPtr = *(void **)sqlite3_value_blob(argv[1]); - pOld = sqlite3Fts3HashInsert(pHash, (void *)zName, nName, pPtr); - if( pOld==pPtr ){ - sqlite3_result_error(context, "out of memory", -1); - return; - } - }else{ - pPtr = sqlite3Fts3HashFind(pHash, zName, nName); - if( !pPtr ){ - char *zErr = sqlite3_mprintf("unknown tokenizer: %s", zName); - sqlite3_result_error(context, zErr, -1); - sqlite3_free(zErr); - return; - } - } - - sqlite3_result_blob(context, (void *)&pPtr, sizeof(pPtr), SQLITE_TRANSIENT); -} - -SQLITE_PRIVATE int sqlite3Fts3IsIdChar(char c){ - static const char isFtsIdChar[] = { - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0x */ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 1x */ - 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 2x */ - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, /* 3x */ - 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 4x */ - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, /* 5x */ - 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 6x */ - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, /* 7x */ - }; - return (c&0x80 || isFtsIdChar[(int)(c)]); -} - -SQLITE_PRIVATE const char *sqlite3Fts3NextToken(const char *zStr, int *pn){ - const char *z1; - const char *z2 = 0; - - /* Find the start of the next token. */ - z1 = zStr; - while( z2==0 ){ - char c = *z1; - switch( c ){ - case '\0': return 0; /* No more tokens here */ - case '\'': - case '"': - case '`': { - z2 = z1; - while( *++z2 && (*z2!=c || *++z2==c) ); - break; - } - case '[': - z2 = &z1[1]; - while( *z2 && z2[0]!=']' ) z2++; - if( *z2 ) z2++; - break; - - default: - if( sqlite3Fts3IsIdChar(*z1) ){ - z2 = &z1[1]; - while( sqlite3Fts3IsIdChar(*z2) ) z2++; - }else{ - z1++; - } - } - } - - *pn = (int)(z2-z1); - return z1; -} - -SQLITE_PRIVATE int sqlite3Fts3InitTokenizer( - Fts3Hash *pHash, /* Tokenizer hash table */ - const char *zArg, /* Tokenizer name */ - sqlite3_tokenizer **ppTok, /* OUT: Tokenizer (if applicable) */ - char **pzErr /* OUT: Set to malloced error message */ -){ - int rc; - char *z = (char *)zArg; - int n = 0; - char *zCopy; - char *zEnd; /* Pointer to nul-term of zCopy */ - sqlite3_tokenizer_module *m; - - zCopy = sqlite3_mprintf("%s", zArg); - if( !zCopy ) return SQLITE_NOMEM; - zEnd = &zCopy[strlen(zCopy)]; - - z = (char *)sqlite3Fts3NextToken(zCopy, &n); - z[n] = '\0'; - sqlite3Fts3Dequote(z); - - m = (sqlite3_tokenizer_module *)sqlite3Fts3HashFind(pHash,z,(int)strlen(z)+1); - if( !m ){ - *pzErr = sqlite3_mprintf("unknown tokenizer: %s", z); - rc = SQLITE_ERROR; - }else{ - char const **aArg = 0; - int iArg = 0; - z = &z[n+1]; - while( zxCreate(iArg, aArg, ppTok); - assert( rc!=SQLITE_OK || *ppTok ); - if( rc!=SQLITE_OK ){ - *pzErr = sqlite3_mprintf("unknown tokenizer"); - }else{ - (*ppTok)->pModule = m; - } - sqlite3_free((void *)aArg); - } - - sqlite3_free(zCopy); - return rc; -} - - -#ifdef SQLITE_TEST - -#include -/* #include */ - -/* -** Implementation of a special SQL scalar function for testing tokenizers -** designed to be used in concert with the Tcl testing framework. This -** function must be called with two or more arguments: -** -** SELECT (, ..., ); -** -** where is the name passed as the second argument -** to the sqlite3Fts3InitHashTable() function (e.g. 'fts3_tokenizer') -** concatenated with the string '_test' (e.g. 'fts3_tokenizer_test'). -** -** The return value is a string that may be interpreted as a Tcl -** list. For each token in the , three elements are -** added to the returned list. The first is the token position, the -** second is the token text (folded, stemmed, etc.) and the third is the -** substring of associated with the token. For example, -** using the built-in "simple" tokenizer: -** -** SELECT fts_tokenizer_test('simple', 'I don't see how'); -** -** will return the string: -** -** "{0 i I 1 dont don't 2 see see 3 how how}" -** -*/ -static void testFunc( - sqlite3_context *context, - int argc, - sqlite3_value **argv -){ - Fts3Hash *pHash; - sqlite3_tokenizer_module *p; - sqlite3_tokenizer *pTokenizer = 0; - sqlite3_tokenizer_cursor *pCsr = 0; - - const char *zErr = 0; - - const char *zName; - int nName; - const char *zInput; - int nInput; - - const char *azArg[64]; - - const char *zToken; - int nToken = 0; - int iStart = 0; - int iEnd = 0; - int iPos = 0; - int i; - - Tcl_Obj *pRet; - - if( argc<2 ){ - sqlite3_result_error(context, "insufficient arguments", -1); - return; - } - - nName = sqlite3_value_bytes(argv[0]); - zName = (const char *)sqlite3_value_text(argv[0]); - nInput = sqlite3_value_bytes(argv[argc-1]); - zInput = (const char *)sqlite3_value_text(argv[argc-1]); - - pHash = (Fts3Hash *)sqlite3_user_data(context); - p = (sqlite3_tokenizer_module *)sqlite3Fts3HashFind(pHash, zName, nName+1); - - if( !p ){ - char *zErr = sqlite3_mprintf("unknown tokenizer: %s", zName); - sqlite3_result_error(context, zErr, -1); - sqlite3_free(zErr); - return; - } - - pRet = Tcl_NewObj(); - Tcl_IncrRefCount(pRet); - - for(i=1; ixCreate(argc-2, azArg, &pTokenizer) ){ - zErr = "error in xCreate()"; - goto finish; - } - pTokenizer->pModule = p; - if( sqlite3Fts3OpenTokenizer(pTokenizer, 0, zInput, nInput, &pCsr) ){ - zErr = "error in xOpen()"; - goto finish; - } - - while( SQLITE_OK==p->xNext(pCsr, &zToken, &nToken, &iStart, &iEnd, &iPos) ){ - Tcl_ListObjAppendElement(0, pRet, Tcl_NewIntObj(iPos)); - Tcl_ListObjAppendElement(0, pRet, Tcl_NewStringObj(zToken, nToken)); - zToken = &zInput[iStart]; - nToken = iEnd-iStart; - Tcl_ListObjAppendElement(0, pRet, Tcl_NewStringObj(zToken, nToken)); - } - - if( SQLITE_OK!=p->xClose(pCsr) ){ - zErr = "error in xClose()"; - goto finish; - } - if( SQLITE_OK!=p->xDestroy(pTokenizer) ){ - zErr = "error in xDestroy()"; - goto finish; - } - -finish: - if( zErr ){ - sqlite3_result_error(context, zErr, -1); - }else{ - sqlite3_result_text(context, Tcl_GetString(pRet), -1, SQLITE_TRANSIENT); - } - Tcl_DecrRefCount(pRet); -} - -static -int registerTokenizer( - sqlite3 *db, - char *zName, - const sqlite3_tokenizer_module *p -){ - int rc; - sqlite3_stmt *pStmt; - const char zSql[] = "SELECT fts3_tokenizer(?, ?)"; - - rc = sqlite3_prepare_v2(db, zSql, -1, &pStmt, 0); - if( rc!=SQLITE_OK ){ - return rc; - } - - sqlite3_bind_text(pStmt, 1, zName, -1, SQLITE_STATIC); - sqlite3_bind_blob(pStmt, 2, &p, sizeof(p), SQLITE_STATIC); - sqlite3_step(pStmt); - - return sqlite3_finalize(pStmt); -} - -static -int queryTokenizer( - sqlite3 *db, - char *zName, - const sqlite3_tokenizer_module **pp -){ - int rc; - sqlite3_stmt *pStmt; - const char zSql[] = "SELECT fts3_tokenizer(?)"; - - *pp = 0; - rc = sqlite3_prepare_v2(db, zSql, -1, &pStmt, 0); - if( rc!=SQLITE_OK ){ - return rc; - } - - sqlite3_bind_text(pStmt, 1, zName, -1, SQLITE_STATIC); - if( SQLITE_ROW==sqlite3_step(pStmt) ){ - if( sqlite3_column_type(pStmt, 0)==SQLITE_BLOB ){ - memcpy((void *)pp, sqlite3_column_blob(pStmt, 0), sizeof(*pp)); - } - } - - return sqlite3_finalize(pStmt); -} - -SQLITE_PRIVATE void sqlite3Fts3SimpleTokenizerModule(sqlite3_tokenizer_module const**ppModule); - -/* -** Implementation of the scalar function fts3_tokenizer_internal_test(). -** This function is used for testing only, it is not included in the -** build unless SQLITE_TEST is defined. -** -** The purpose of this is to test that the fts3_tokenizer() function -** can be used as designed by the C-code in the queryTokenizer and -** registerTokenizer() functions above. These two functions are repeated -** in the README.tokenizer file as an example, so it is important to -** test them. -** -** To run the tests, evaluate the fts3_tokenizer_internal_test() scalar -** function with no arguments. An assert() will fail if a problem is -** detected. i.e.: -** -** SELECT fts3_tokenizer_internal_test(); -** -*/ -static void intTestFunc( - sqlite3_context *context, - int argc, - sqlite3_value **argv -){ - int rc; - const sqlite3_tokenizer_module *p1; - const sqlite3_tokenizer_module *p2; - sqlite3 *db = (sqlite3 *)sqlite3_user_data(context); - - UNUSED_PARAMETER(argc); - UNUSED_PARAMETER(argv); - - /* Test the query function */ - sqlite3Fts3SimpleTokenizerModule(&p1); - rc = queryTokenizer(db, "simple", &p2); - assert( rc==SQLITE_OK ); - assert( p1==p2 ); - rc = queryTokenizer(db, "nosuchtokenizer", &p2); - assert( rc==SQLITE_ERROR ); - assert( p2==0 ); - assert( 0==strcmp(sqlite3_errmsg(db), "unknown tokenizer: nosuchtokenizer") ); - - /* Test the storage function */ - rc = registerTokenizer(db, "nosuchtokenizer", p1); - assert( rc==SQLITE_OK ); - rc = queryTokenizer(db, "nosuchtokenizer", &p2); - assert( rc==SQLITE_OK ); - assert( p2==p1 ); - - sqlite3_result_text(context, "ok", -1, SQLITE_STATIC); -} - -#endif - -/* -** Set up SQL objects in database db used to access the contents of -** the hash table pointed to by argument pHash. The hash table must -** been initialized to use string keys, and to take a private copy -** of the key when a value is inserted. i.e. by a call similar to: -** -** sqlite3Fts3HashInit(pHash, FTS3_HASH_STRING, 1); -** -** This function adds a scalar function (see header comment above -** scalarFunc() in this file for details) and, if ENABLE_TABLE is -** defined at compilation time, a temporary virtual table (see header -** comment above struct HashTableVtab) to the database schema. Both -** provide read/write access to the contents of *pHash. -** -** The third argument to this function, zName, is used as the name -** of both the scalar and, if created, the virtual table. -*/ -SQLITE_PRIVATE int sqlite3Fts3InitHashTable( - sqlite3 *db, - Fts3Hash *pHash, - const char *zName -){ - int rc = SQLITE_OK; - void *p = (void *)pHash; - const int any = SQLITE_ANY; - -#ifdef SQLITE_TEST - char *zTest = 0; - char *zTest2 = 0; - void *pdb = (void *)db; - zTest = sqlite3_mprintf("%s_test", zName); - zTest2 = sqlite3_mprintf("%s_internal_test", zName); - if( !zTest || !zTest2 ){ - rc = SQLITE_NOMEM; - } -#endif - - if( SQLITE_OK==rc ){ - rc = sqlite3_create_function(db, zName, 1, any, p, scalarFunc, 0, 0); - } - if( SQLITE_OK==rc ){ - rc = sqlite3_create_function(db, zName, 2, any, p, scalarFunc, 0, 0); - } -#ifdef SQLITE_TEST - if( SQLITE_OK==rc ){ - rc = sqlite3_create_function(db, zTest, -1, any, p, testFunc, 0, 0); - } - if( SQLITE_OK==rc ){ - rc = sqlite3_create_function(db, zTest2, 0, any, pdb, intTestFunc, 0, 0); - } -#endif - -#ifdef SQLITE_TEST - sqlite3_free(zTest); - sqlite3_free(zTest2); -#endif - - return rc; -} - -#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) */ - -/************** End of fts3_tokenizer.c **************************************/ -/************** Begin file fts3_tokenizer1.c *********************************/ -/* -** 2006 Oct 10 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -****************************************************************************** -** -** Implementation of the "simple" full-text-search tokenizer. -*/ - -/* -** The code in this file is only compiled if: -** -** * The FTS3 module is being built as an extension -** (in which case SQLITE_CORE is not defined), or -** -** * The FTS3 module is being built into the core of -** SQLite (in which case SQLITE_ENABLE_FTS3 is defined). -*/ -#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) - -/* #include */ -/* #include */ -/* #include */ -/* #include */ - - -typedef struct simple_tokenizer { - sqlite3_tokenizer base; - char delim[128]; /* flag ASCII delimiters */ -} simple_tokenizer; - -typedef struct simple_tokenizer_cursor { - sqlite3_tokenizer_cursor base; - const char *pInput; /* input we are tokenizing */ - int nBytes; /* size of the input */ - int iOffset; /* current position in pInput */ - int iToken; /* index of next token to be returned */ - char *pToken; /* storage for current token */ - int nTokenAllocated; /* space allocated to zToken buffer */ -} simple_tokenizer_cursor; - - -static int simpleDelim(simple_tokenizer *t, unsigned char c){ - return c<0x80 && t->delim[c]; -} -static int fts3_isalnum(int x){ - return (x>='0' && x<='9') || (x>='A' && x<='Z') || (x>='a' && x<='z'); -} - -/* -** Create a new tokenizer instance. -*/ -static int simpleCreate( - int argc, const char * const *argv, - sqlite3_tokenizer **ppTokenizer -){ - simple_tokenizer *t; - - t = (simple_tokenizer *) sqlite3_malloc(sizeof(*t)); - if( t==NULL ) return SQLITE_NOMEM; - memset(t, 0, sizeof(*t)); - - /* TODO(shess) Delimiters need to remain the same from run to run, - ** else we need to reindex. One solution would be a meta-table to - ** track such information in the database, then we'd only want this - ** information on the initial create. - */ - if( argc>1 ){ - int i, n = (int)strlen(argv[1]); - for(i=0; i=0x80 ){ - sqlite3_free(t); - return SQLITE_ERROR; - } - t->delim[ch] = 1; - } - } else { - /* Mark non-alphanumeric ASCII characters as delimiters */ - int i; - for(i=1; i<0x80; i++){ - t->delim[i] = !fts3_isalnum(i) ? -1 : 0; - } - } - - *ppTokenizer = &t->base; - return SQLITE_OK; -} - -/* -** Destroy a tokenizer -*/ -static int simpleDestroy(sqlite3_tokenizer *pTokenizer){ - sqlite3_free(pTokenizer); - return SQLITE_OK; -} - -/* -** Prepare to begin tokenizing a particular string. The input -** string to be tokenized is pInput[0..nBytes-1]. A cursor -** used to incrementally tokenize this string is returned in -** *ppCursor. -*/ -static int simpleOpen( - sqlite3_tokenizer *pTokenizer, /* The tokenizer */ - const char *pInput, int nBytes, /* String to be tokenized */ - sqlite3_tokenizer_cursor **ppCursor /* OUT: Tokenization cursor */ -){ - simple_tokenizer_cursor *c; - - UNUSED_PARAMETER(pTokenizer); - - c = (simple_tokenizer_cursor *) sqlite3_malloc(sizeof(*c)); - if( c==NULL ) return SQLITE_NOMEM; - - c->pInput = pInput; - if( pInput==0 ){ - c->nBytes = 0; - }else if( nBytes<0 ){ - c->nBytes = (int)strlen(pInput); - }else{ - c->nBytes = nBytes; - } - c->iOffset = 0; /* start tokenizing at the beginning */ - c->iToken = 0; - c->pToken = NULL; /* no space allocated, yet. */ - c->nTokenAllocated = 0; - - *ppCursor = &c->base; - return SQLITE_OK; -} - -/* -** Close a tokenization cursor previously opened by a call to -** simpleOpen() above. -*/ -static int simpleClose(sqlite3_tokenizer_cursor *pCursor){ - simple_tokenizer_cursor *c = (simple_tokenizer_cursor *) pCursor; - sqlite3_free(c->pToken); - sqlite3_free(c); - return SQLITE_OK; -} - -/* -** Extract the next token from a tokenization cursor. The cursor must -** have been opened by a prior call to simpleOpen(). -*/ -static int simpleNext( - sqlite3_tokenizer_cursor *pCursor, /* Cursor returned by simpleOpen */ - const char **ppToken, /* OUT: *ppToken is the token text */ - int *pnBytes, /* OUT: Number of bytes in token */ - int *piStartOffset, /* OUT: Starting offset of token */ - int *piEndOffset, /* OUT: Ending offset of token */ - int *piPosition /* OUT: Position integer of token */ -){ - simple_tokenizer_cursor *c = (simple_tokenizer_cursor *) pCursor; - simple_tokenizer *t = (simple_tokenizer *) pCursor->pTokenizer; - unsigned char *p = (unsigned char *)c->pInput; - - while( c->iOffsetnBytes ){ - int iStartOffset; - - /* Scan past delimiter characters */ - while( c->iOffsetnBytes && simpleDelim(t, p[c->iOffset]) ){ - c->iOffset++; - } - - /* Count non-delimiter characters. */ - iStartOffset = c->iOffset; - while( c->iOffsetnBytes && !simpleDelim(t, p[c->iOffset]) ){ - c->iOffset++; - } - - if( c->iOffset>iStartOffset ){ - int i, n = c->iOffset-iStartOffset; - if( n>c->nTokenAllocated ){ - char *pNew; - c->nTokenAllocated = n+20; - pNew = sqlite3_realloc(c->pToken, c->nTokenAllocated); - if( !pNew ) return SQLITE_NOMEM; - c->pToken = pNew; - } - for(i=0; ipToken[i] = (char)((ch>='A' && ch<='Z') ? ch-'A'+'a' : ch); - } - *ppToken = c->pToken; - *pnBytes = n; - *piStartOffset = iStartOffset; - *piEndOffset = c->iOffset; - *piPosition = c->iToken++; - - return SQLITE_OK; - } - } - return SQLITE_DONE; -} - -/* -** The set of routines that implement the simple tokenizer -*/ -static const sqlite3_tokenizer_module simpleTokenizerModule = { - 0, - simpleCreate, - simpleDestroy, - simpleOpen, - simpleClose, - simpleNext, - 0, -}; - -/* -** Allocate a new simple tokenizer. Return a pointer to the new -** tokenizer in *ppModule -*/ -SQLITE_PRIVATE void sqlite3Fts3SimpleTokenizerModule( - sqlite3_tokenizer_module const**ppModule -){ - *ppModule = &simpleTokenizerModule; -} - -#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) */ - -/************** End of fts3_tokenizer1.c *************************************/ -/************** Begin file fts3_tokenize_vtab.c ******************************/ -/* -** 2013 Apr 22 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -****************************************************************************** -** -** This file contains code for the "fts3tokenize" virtual table module. -** An fts3tokenize virtual table is created as follows: -** -** CREATE VIRTUAL TABLE USING fts3tokenize( -** , , ... -** ); -** -** The table created has the following schema: -** -** CREATE TABLE (input, token, start, end, position) -** -** When queried, the query must include a WHERE clause of type: -** -** input = -** -** The virtual table module tokenizes this , using the FTS3 -** tokenizer specified by the arguments to the CREATE VIRTUAL TABLE -** statement and returns one row for each token in the result. With -** fields set as follows: -** -** input: Always set to a copy of -** token: A token from the input. -** start: Byte offset of the token within the input . -** end: Byte offset of the byte immediately following the end of the -** token within the input string. -** pos: Token offset of token within input. -** -*/ -#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) - -/* #include */ -/* #include */ - -typedef struct Fts3tokTable Fts3tokTable; -typedef struct Fts3tokCursor Fts3tokCursor; - -/* -** Virtual table structure. -*/ -struct Fts3tokTable { - sqlite3_vtab base; /* Base class used by SQLite core */ - const sqlite3_tokenizer_module *pMod; - sqlite3_tokenizer *pTok; -}; - -/* -** Virtual table cursor structure. -*/ -struct Fts3tokCursor { - sqlite3_vtab_cursor base; /* Base class used by SQLite core */ - char *zInput; /* Input string */ - sqlite3_tokenizer_cursor *pCsr; /* Cursor to iterate through zInput */ - int iRowid; /* Current 'rowid' value */ - const char *zToken; /* Current 'token' value */ - int nToken; /* Size of zToken in bytes */ - int iStart; /* Current 'start' value */ - int iEnd; /* Current 'end' value */ - int iPos; /* Current 'pos' value */ -}; - -/* -** Query FTS for the tokenizer implementation named zName. -*/ -static int fts3tokQueryTokenizer( - Fts3Hash *pHash, - const char *zName, - const sqlite3_tokenizer_module **pp, - char **pzErr -){ - sqlite3_tokenizer_module *p; - int nName = (int)strlen(zName); - - p = (sqlite3_tokenizer_module *)sqlite3Fts3HashFind(pHash, zName, nName+1); - if( !p ){ - *pzErr = sqlite3_mprintf("unknown tokenizer: %s", zName); - return SQLITE_ERROR; - } - - *pp = p; - return SQLITE_OK; -} - -/* -** The second argument, argv[], is an array of pointers to nul-terminated -** strings. This function makes a copy of the array and strings into a -** single block of memory. It then dequotes any of the strings that appear -** to be quoted. -** -** If successful, output parameter *pazDequote is set to point at the -** array of dequoted strings and SQLITE_OK is returned. The caller is -** responsible for eventually calling sqlite3_free() to free the array -** in this case. Or, if an error occurs, an SQLite error code is returned. -** The final value of *pazDequote is undefined in this case. -*/ -static int fts3tokDequoteArray( - int argc, /* Number of elements in argv[] */ - const char * const *argv, /* Input array */ - char ***pazDequote /* Output array */ -){ - int rc = SQLITE_OK; /* Return code */ - if( argc==0 ){ - *pazDequote = 0; - }else{ - int i; - int nByte = 0; - char **azDequote; - - for(i=0; ixCreate((nDequote>1 ? nDequote-1 : 0), azArg, &pTok); - } - - if( rc==SQLITE_OK ){ - pTab = (Fts3tokTable *)sqlite3_malloc(sizeof(Fts3tokTable)); - if( pTab==0 ){ - rc = SQLITE_NOMEM; - } - } - - if( rc==SQLITE_OK ){ - memset(pTab, 0, sizeof(Fts3tokTable)); - pTab->pMod = pMod; - pTab->pTok = pTok; - *ppVtab = &pTab->base; - }else{ - if( pTok ){ - pMod->xDestroy(pTok); - } - } - - sqlite3_free(azDequote); - return rc; -} - -/* -** This function does the work for both the xDisconnect and xDestroy methods. -** These tables have no persistent representation of their own, so xDisconnect -** and xDestroy are identical operations. -*/ -static int fts3tokDisconnectMethod(sqlite3_vtab *pVtab){ - Fts3tokTable *pTab = (Fts3tokTable *)pVtab; - - pTab->pMod->xDestroy(pTab->pTok); - sqlite3_free(pTab); - return SQLITE_OK; -} - -/* -** xBestIndex - Analyze a WHERE and ORDER BY clause. -*/ -static int fts3tokBestIndexMethod( - sqlite3_vtab *pVTab, - sqlite3_index_info *pInfo -){ - int i; - UNUSED_PARAMETER(pVTab); - - for(i=0; inConstraint; i++){ - if( pInfo->aConstraint[i].usable - && pInfo->aConstraint[i].iColumn==0 - && pInfo->aConstraint[i].op==SQLITE_INDEX_CONSTRAINT_EQ - ){ - pInfo->idxNum = 1; - pInfo->aConstraintUsage[i].argvIndex = 1; - pInfo->aConstraintUsage[i].omit = 1; - pInfo->estimatedCost = 1; - return SQLITE_OK; - } - } - - pInfo->idxNum = 0; - assert( pInfo->estimatedCost>1000000.0 ); - - return SQLITE_OK; -} - -/* -** xOpen - Open a cursor. -*/ -static int fts3tokOpenMethod(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCsr){ - Fts3tokCursor *pCsr; - UNUSED_PARAMETER(pVTab); - - pCsr = (Fts3tokCursor *)sqlite3_malloc(sizeof(Fts3tokCursor)); - if( pCsr==0 ){ - return SQLITE_NOMEM; - } - memset(pCsr, 0, sizeof(Fts3tokCursor)); - - *ppCsr = (sqlite3_vtab_cursor *)pCsr; - return SQLITE_OK; -} - -/* -** Reset the tokenizer cursor passed as the only argument. As if it had -** just been returned by fts3tokOpenMethod(). -*/ -static void fts3tokResetCursor(Fts3tokCursor *pCsr){ - if( pCsr->pCsr ){ - Fts3tokTable *pTab = (Fts3tokTable *)(pCsr->base.pVtab); - pTab->pMod->xClose(pCsr->pCsr); - pCsr->pCsr = 0; - } - sqlite3_free(pCsr->zInput); - pCsr->zInput = 0; - pCsr->zToken = 0; - pCsr->nToken = 0; - pCsr->iStart = 0; - pCsr->iEnd = 0; - pCsr->iPos = 0; - pCsr->iRowid = 0; -} - -/* -** xClose - Close a cursor. -*/ -static int fts3tokCloseMethod(sqlite3_vtab_cursor *pCursor){ - Fts3tokCursor *pCsr = (Fts3tokCursor *)pCursor; - - fts3tokResetCursor(pCsr); - sqlite3_free(pCsr); - return SQLITE_OK; -} - -/* -** xNext - Advance the cursor to the next row, if any. -*/ -static int fts3tokNextMethod(sqlite3_vtab_cursor *pCursor){ - Fts3tokCursor *pCsr = (Fts3tokCursor *)pCursor; - Fts3tokTable *pTab = (Fts3tokTable *)(pCursor->pVtab); - int rc; /* Return code */ - - pCsr->iRowid++; - rc = pTab->pMod->xNext(pCsr->pCsr, - &pCsr->zToken, &pCsr->nToken, - &pCsr->iStart, &pCsr->iEnd, &pCsr->iPos - ); - - if( rc!=SQLITE_OK ){ - fts3tokResetCursor(pCsr); - if( rc==SQLITE_DONE ) rc = SQLITE_OK; - } - - return rc; -} - -/* -** xFilter - Initialize a cursor to point at the start of its data. -*/ -static int fts3tokFilterMethod( - sqlite3_vtab_cursor *pCursor, /* The cursor used for this query */ - int idxNum, /* Strategy index */ - const char *idxStr, /* Unused */ - int nVal, /* Number of elements in apVal */ - sqlite3_value **apVal /* Arguments for the indexing scheme */ -){ - int rc = SQLITE_ERROR; - Fts3tokCursor *pCsr = (Fts3tokCursor *)pCursor; - Fts3tokTable *pTab = (Fts3tokTable *)(pCursor->pVtab); - UNUSED_PARAMETER(idxStr); - UNUSED_PARAMETER(nVal); - - fts3tokResetCursor(pCsr); - if( idxNum==1 ){ - const char *zByte = (const char *)sqlite3_value_text(apVal[0]); - int nByte = sqlite3_value_bytes(apVal[0]); - pCsr->zInput = sqlite3_malloc(nByte+1); - if( pCsr->zInput==0 ){ - rc = SQLITE_NOMEM; - }else{ - memcpy(pCsr->zInput, zByte, nByte); - pCsr->zInput[nByte] = 0; - rc = pTab->pMod->xOpen(pTab->pTok, pCsr->zInput, nByte, &pCsr->pCsr); - if( rc==SQLITE_OK ){ - pCsr->pCsr->pTokenizer = pTab->pTok; - } - } - } - - if( rc!=SQLITE_OK ) return rc; - return fts3tokNextMethod(pCursor); -} - -/* -** xEof - Return true if the cursor is at EOF, or false otherwise. -*/ -static int fts3tokEofMethod(sqlite3_vtab_cursor *pCursor){ - Fts3tokCursor *pCsr = (Fts3tokCursor *)pCursor; - return (pCsr->zToken==0); -} - -/* -** xColumn - Return a column value. -*/ -static int fts3tokColumnMethod( - sqlite3_vtab_cursor *pCursor, /* Cursor to retrieve value from */ - sqlite3_context *pCtx, /* Context for sqlite3_result_xxx() calls */ - int iCol /* Index of column to read value from */ -){ - Fts3tokCursor *pCsr = (Fts3tokCursor *)pCursor; - - /* CREATE TABLE x(input, token, start, end, position) */ - switch( iCol ){ - case 0: - sqlite3_result_text(pCtx, pCsr->zInput, -1, SQLITE_TRANSIENT); - break; - case 1: - sqlite3_result_text(pCtx, pCsr->zToken, pCsr->nToken, SQLITE_TRANSIENT); - break; - case 2: - sqlite3_result_int(pCtx, pCsr->iStart); - break; - case 3: - sqlite3_result_int(pCtx, pCsr->iEnd); - break; - default: - assert( iCol==4 ); - sqlite3_result_int(pCtx, pCsr->iPos); - break; - } - return SQLITE_OK; -} - -/* -** xRowid - Return the current rowid for the cursor. -*/ -static int fts3tokRowidMethod( - sqlite3_vtab_cursor *pCursor, /* Cursor to retrieve value from */ - sqlite_int64 *pRowid /* OUT: Rowid value */ -){ - Fts3tokCursor *pCsr = (Fts3tokCursor *)pCursor; - *pRowid = (sqlite3_int64)pCsr->iRowid; - return SQLITE_OK; -} - -/* -** Register the fts3tok module with database connection db. Return SQLITE_OK -** if successful or an error code if sqlite3_create_module() fails. -*/ -SQLITE_PRIVATE int sqlite3Fts3InitTok(sqlite3 *db, Fts3Hash *pHash){ - static const sqlite3_module fts3tok_module = { - 0, /* iVersion */ - fts3tokConnectMethod, /* xCreate */ - fts3tokConnectMethod, /* xConnect */ - fts3tokBestIndexMethod, /* xBestIndex */ - fts3tokDisconnectMethod, /* xDisconnect */ - fts3tokDisconnectMethod, /* xDestroy */ - fts3tokOpenMethod, /* xOpen */ - fts3tokCloseMethod, /* xClose */ - fts3tokFilterMethod, /* xFilter */ - fts3tokNextMethod, /* xNext */ - fts3tokEofMethod, /* xEof */ - fts3tokColumnMethod, /* xColumn */ - fts3tokRowidMethod, /* xRowid */ - 0, /* xUpdate */ - 0, /* xBegin */ - 0, /* xSync */ - 0, /* xCommit */ - 0, /* xRollback */ - 0, /* xFindFunction */ - 0, /* xRename */ - 0, /* xSavepoint */ - 0, /* xRelease */ - 0 /* xRollbackTo */ - }; - int rc; /* Return code */ - - rc = sqlite3_create_module(db, "fts3tokenize", &fts3tok_module, (void*)pHash); - return rc; -} - -#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) */ - -/************** End of fts3_tokenize_vtab.c **********************************/ -/************** Begin file fts3_write.c **************************************/ -/* -** 2009 Oct 23 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -****************************************************************************** -** -** This file is part of the SQLite FTS3 extension module. Specifically, -** this file contains code to insert, update and delete rows from FTS3 -** tables. It also contains code to merge FTS3 b-tree segments. Some -** of the sub-routines used to merge segments are also used by the query -** code in fts3.c. -*/ - -#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) - -/* #include */ -/* #include */ -/* #include */ - - -#define FTS_MAX_APPENDABLE_HEIGHT 16 - -/* -** When full-text index nodes are loaded from disk, the buffer that they -** are loaded into has the following number of bytes of padding at the end -** of it. i.e. if a full-text index node is 900 bytes in size, then a buffer -** of 920 bytes is allocated for it. -** -** This means that if we have a pointer into a buffer containing node data, -** it is always safe to read up to two varints from it without risking an -** overread, even if the node data is corrupted. -*/ -#define FTS3_NODE_PADDING (FTS3_VARINT_MAX*2) - -/* -** Under certain circumstances, b-tree nodes (doclists) can be loaded into -** memory incrementally instead of all at once. This can be a big performance -** win (reduced IO and CPU) if SQLite stops calling the virtual table xNext() -** method before retrieving all query results (as may happen, for example, -** if a query has a LIMIT clause). -** -** Incremental loading is used for b-tree nodes FTS3_NODE_CHUNK_THRESHOLD -** bytes and larger. Nodes are loaded in chunks of FTS3_NODE_CHUNKSIZE bytes. -** The code is written so that the hard lower-limit for each of these values -** is 1. Clearly such small values would be inefficient, but can be useful -** for testing purposes. -** -** If this module is built with SQLITE_TEST defined, these constants may -** be overridden at runtime for testing purposes. File fts3_test.c contains -** a Tcl interface to read and write the values. -*/ -#ifdef SQLITE_TEST -int test_fts3_node_chunksize = (4*1024); -int test_fts3_node_chunk_threshold = (4*1024)*4; -# define FTS3_NODE_CHUNKSIZE test_fts3_node_chunksize -# define FTS3_NODE_CHUNK_THRESHOLD test_fts3_node_chunk_threshold -#else -# define FTS3_NODE_CHUNKSIZE (4*1024) -# define FTS3_NODE_CHUNK_THRESHOLD (FTS3_NODE_CHUNKSIZE*4) -#endif - -/* -** The two values that may be meaningfully bound to the :1 parameter in -** statements SQL_REPLACE_STAT and SQL_SELECT_STAT. -*/ -#define FTS_STAT_DOCTOTAL 0 -#define FTS_STAT_INCRMERGEHINT 1 -#define FTS_STAT_AUTOINCRMERGE 2 - -/* -** If FTS_LOG_MERGES is defined, call sqlite3_log() to report each automatic -** and incremental merge operation that takes place. This is used for -** debugging FTS only, it should not usually be turned on in production -** systems. -*/ -#ifdef FTS3_LOG_MERGES -static void fts3LogMerge(int nMerge, sqlite3_int64 iAbsLevel){ - sqlite3_log(SQLITE_OK, "%d-way merge from level %d", nMerge, (int)iAbsLevel); -} -#else -#define fts3LogMerge(x, y) -#endif - - -typedef struct PendingList PendingList; -typedef struct SegmentNode SegmentNode; -typedef struct SegmentWriter SegmentWriter; - -/* -** An instance of the following data structure is used to build doclists -** incrementally. See function fts3PendingListAppend() for details. -*/ -struct PendingList { - int nData; - char *aData; - int nSpace; - sqlite3_int64 iLastDocid; - sqlite3_int64 iLastCol; - sqlite3_int64 iLastPos; -}; - - -/* -** Each cursor has a (possibly empty) linked list of the following objects. -*/ -struct Fts3DeferredToken { - Fts3PhraseToken *pToken; /* Pointer to corresponding expr token */ - int iCol; /* Column token must occur in */ - Fts3DeferredToken *pNext; /* Next in list of deferred tokens */ - PendingList *pList; /* Doclist is assembled here */ -}; - -/* -** An instance of this structure is used to iterate through the terms on -** a contiguous set of segment b-tree leaf nodes. Although the details of -** this structure are only manipulated by code in this file, opaque handles -** of type Fts3SegReader* are also used by code in fts3.c to iterate through -** terms when querying the full-text index. See functions: -** -** sqlite3Fts3SegReaderNew() -** sqlite3Fts3SegReaderFree() -** sqlite3Fts3SegReaderIterate() -** -** Methods used to manipulate Fts3SegReader structures: -** -** fts3SegReaderNext() -** fts3SegReaderFirstDocid() -** fts3SegReaderNextDocid() -*/ -struct Fts3SegReader { - int iIdx; /* Index within level, or 0x7FFFFFFF for PT */ - u8 bLookup; /* True for a lookup only */ - u8 rootOnly; /* True for a root-only reader */ - - sqlite3_int64 iStartBlock; /* Rowid of first leaf block to traverse */ - sqlite3_int64 iLeafEndBlock; /* Rowid of final leaf block to traverse */ - sqlite3_int64 iEndBlock; /* Rowid of final block in segment (or 0) */ - sqlite3_int64 iCurrentBlock; /* Current leaf block (or 0) */ - - char *aNode; /* Pointer to node data (or NULL) */ - int nNode; /* Size of buffer at aNode (or 0) */ - int nPopulate; /* If >0, bytes of buffer aNode[] loaded */ - sqlite3_blob *pBlob; /* If not NULL, blob handle to read node */ - - Fts3HashElem **ppNextElem; - - /* Variables set by fts3SegReaderNext(). These may be read directly - ** by the caller. They are valid from the time SegmentReaderNew() returns - ** until SegmentReaderNext() returns something other than SQLITE_OK - ** (i.e. SQLITE_DONE). - */ - int nTerm; /* Number of bytes in current term */ - char *zTerm; /* Pointer to current term */ - int nTermAlloc; /* Allocated size of zTerm buffer */ - char *aDoclist; /* Pointer to doclist of current entry */ - int nDoclist; /* Size of doclist in current entry */ - - /* The following variables are used by fts3SegReaderNextDocid() to iterate - ** through the current doclist (aDoclist/nDoclist). - */ - char *pOffsetList; - int nOffsetList; /* For descending pending seg-readers only */ - sqlite3_int64 iDocid; -}; - -#define fts3SegReaderIsPending(p) ((p)->ppNextElem!=0) -#define fts3SegReaderIsRootOnly(p) ((p)->rootOnly!=0) - -/* -** An instance of this structure is used to create a segment b-tree in the -** database. The internal details of this type are only accessed by the -** following functions: -** -** fts3SegWriterAdd() -** fts3SegWriterFlush() -** fts3SegWriterFree() -*/ -struct SegmentWriter { - SegmentNode *pTree; /* Pointer to interior tree structure */ - sqlite3_int64 iFirst; /* First slot in %_segments written */ - sqlite3_int64 iFree; /* Next free slot in %_segments */ - char *zTerm; /* Pointer to previous term buffer */ - int nTerm; /* Number of bytes in zTerm */ - int nMalloc; /* Size of malloc'd buffer at zMalloc */ - char *zMalloc; /* Malloc'd space (possibly) used for zTerm */ - int nSize; /* Size of allocation at aData */ - int nData; /* Bytes of data in aData */ - char *aData; /* Pointer to block from malloc() */ - i64 nLeafData; /* Number of bytes of leaf data written */ -}; - -/* -** Type SegmentNode is used by the following three functions to create -** the interior part of the segment b+-tree structures (everything except -** the leaf nodes). These functions and type are only ever used by code -** within the fts3SegWriterXXX() family of functions described above. -** -** fts3NodeAddTerm() -** fts3NodeWrite() -** fts3NodeFree() -** -** When a b+tree is written to the database (either as a result of a merge -** or the pending-terms table being flushed), leaves are written into the -** database file as soon as they are completely populated. The interior of -** the tree is assembled in memory and written out only once all leaves have -** been populated and stored. This is Ok, as the b+-tree fanout is usually -** very large, meaning that the interior of the tree consumes relatively -** little memory. -*/ -struct SegmentNode { - SegmentNode *pParent; /* Parent node (or NULL for root node) */ - SegmentNode *pRight; /* Pointer to right-sibling */ - SegmentNode *pLeftmost; /* Pointer to left-most node of this depth */ - int nEntry; /* Number of terms written to node so far */ - char *zTerm; /* Pointer to previous term buffer */ - int nTerm; /* Number of bytes in zTerm */ - int nMalloc; /* Size of malloc'd buffer at zMalloc */ - char *zMalloc; /* Malloc'd space (possibly) used for zTerm */ - int nData; /* Bytes of valid data so far */ - char *aData; /* Node data */ -}; - -/* -** Valid values for the second argument to fts3SqlStmt(). -*/ -#define SQL_DELETE_CONTENT 0 -#define SQL_IS_EMPTY 1 -#define SQL_DELETE_ALL_CONTENT 2 -#define SQL_DELETE_ALL_SEGMENTS 3 -#define SQL_DELETE_ALL_SEGDIR 4 -#define SQL_DELETE_ALL_DOCSIZE 5 -#define SQL_DELETE_ALL_STAT 6 -#define SQL_SELECT_CONTENT_BY_ROWID 7 -#define SQL_NEXT_SEGMENT_INDEX 8 -#define SQL_INSERT_SEGMENTS 9 -#define SQL_NEXT_SEGMENTS_ID 10 -#define SQL_INSERT_SEGDIR 11 -#define SQL_SELECT_LEVEL 12 -#define SQL_SELECT_LEVEL_RANGE 13 -#define SQL_SELECT_LEVEL_COUNT 14 -#define SQL_SELECT_SEGDIR_MAX_LEVEL 15 -#define SQL_DELETE_SEGDIR_LEVEL 16 -#define SQL_DELETE_SEGMENTS_RANGE 17 -#define SQL_CONTENT_INSERT 18 -#define SQL_DELETE_DOCSIZE 19 -#define SQL_REPLACE_DOCSIZE 20 -#define SQL_SELECT_DOCSIZE 21 -#define SQL_SELECT_STAT 22 -#define SQL_REPLACE_STAT 23 - -#define SQL_SELECT_ALL_PREFIX_LEVEL 24 -#define SQL_DELETE_ALL_TERMS_SEGDIR 25 -#define SQL_DELETE_SEGDIR_RANGE 26 -#define SQL_SELECT_ALL_LANGID 27 -#define SQL_FIND_MERGE_LEVEL 28 -#define SQL_MAX_LEAF_NODE_ESTIMATE 29 -#define SQL_DELETE_SEGDIR_ENTRY 30 -#define SQL_SHIFT_SEGDIR_ENTRY 31 -#define SQL_SELECT_SEGDIR 32 -#define SQL_CHOMP_SEGDIR 33 -#define SQL_SEGMENT_IS_APPENDABLE 34 -#define SQL_SELECT_INDEXES 35 -#define SQL_SELECT_MXLEVEL 36 - -#define SQL_SELECT_LEVEL_RANGE2 37 -#define SQL_UPDATE_LEVEL_IDX 38 -#define SQL_UPDATE_LEVEL 39 - -/* -** This function is used to obtain an SQLite prepared statement handle -** for the statement identified by the second argument. If successful, -** *pp is set to the requested statement handle and SQLITE_OK returned. -** Otherwise, an SQLite error code is returned and *pp is set to 0. -** -** If argument apVal is not NULL, then it must point to an array with -** at least as many entries as the requested statement has bound -** parameters. The values are bound to the statements parameters before -** returning. -*/ -static int fts3SqlStmt( - Fts3Table *p, /* Virtual table handle */ - int eStmt, /* One of the SQL_XXX constants above */ - sqlite3_stmt **pp, /* OUT: Statement handle */ - sqlite3_value **apVal /* Values to bind to statement */ -){ - const char *azSql[] = { -/* 0 */ "DELETE FROM %Q.'%q_content' WHERE rowid = ?", -/* 1 */ "SELECT NOT EXISTS(SELECT docid FROM %Q.'%q_content' WHERE rowid!=?)", -/* 2 */ "DELETE FROM %Q.'%q_content'", -/* 3 */ "DELETE FROM %Q.'%q_segments'", -/* 4 */ "DELETE FROM %Q.'%q_segdir'", -/* 5 */ "DELETE FROM %Q.'%q_docsize'", -/* 6 */ "DELETE FROM %Q.'%q_stat'", -/* 7 */ "SELECT %s WHERE rowid=?", -/* 8 */ "SELECT (SELECT max(idx) FROM %Q.'%q_segdir' WHERE level = ?) + 1", -/* 9 */ "REPLACE INTO %Q.'%q_segments'(blockid, block) VALUES(?, ?)", -/* 10 */ "SELECT coalesce((SELECT max(blockid) FROM %Q.'%q_segments') + 1, 1)", -/* 11 */ "REPLACE INTO %Q.'%q_segdir' VALUES(?,?,?,?,?,?)", - - /* Return segments in order from oldest to newest.*/ -/* 12 */ "SELECT idx, start_block, leaves_end_block, end_block, root " - "FROM %Q.'%q_segdir' WHERE level = ? ORDER BY idx ASC", -/* 13 */ "SELECT idx, start_block, leaves_end_block, end_block, root " - "FROM %Q.'%q_segdir' WHERE level BETWEEN ? AND ?" - "ORDER BY level DESC, idx ASC", - -/* 14 */ "SELECT count(*) FROM %Q.'%q_segdir' WHERE level = ?", -/* 15 */ "SELECT max(level) FROM %Q.'%q_segdir' WHERE level BETWEEN ? AND ?", - -/* 16 */ "DELETE FROM %Q.'%q_segdir' WHERE level = ?", -/* 17 */ "DELETE FROM %Q.'%q_segments' WHERE blockid BETWEEN ? AND ?", -/* 18 */ "INSERT INTO %Q.'%q_content' VALUES(%s)", -/* 19 */ "DELETE FROM %Q.'%q_docsize' WHERE docid = ?", -/* 20 */ "REPLACE INTO %Q.'%q_docsize' VALUES(?,?)", -/* 21 */ "SELECT size FROM %Q.'%q_docsize' WHERE docid=?", -/* 22 */ "SELECT value FROM %Q.'%q_stat' WHERE id=?", -/* 23 */ "REPLACE INTO %Q.'%q_stat' VALUES(?,?)", -/* 24 */ "", -/* 25 */ "", - -/* 26 */ "DELETE FROM %Q.'%q_segdir' WHERE level BETWEEN ? AND ?", -/* 27 */ "SELECT DISTINCT level / (1024 * ?) FROM %Q.'%q_segdir'", - -/* This statement is used to determine which level to read the input from -** when performing an incremental merge. It returns the absolute level number -** of the oldest level in the db that contains at least ? segments. Or, -** if no level in the FTS index contains more than ? segments, the statement -** returns zero rows. */ -/* 28 */ "SELECT level FROM %Q.'%q_segdir' GROUP BY level HAVING count(*)>=?" - " ORDER BY (level %% 1024) ASC LIMIT 1", - -/* Estimate the upper limit on the number of leaf nodes in a new segment -** created by merging the oldest :2 segments from absolute level :1. See -** function sqlite3Fts3Incrmerge() for details. */ -/* 29 */ "SELECT 2 * total(1 + leaves_end_block - start_block) " - " FROM %Q.'%q_segdir' WHERE level = ? AND idx < ?", - -/* SQL_DELETE_SEGDIR_ENTRY -** Delete the %_segdir entry on absolute level :1 with index :2. */ -/* 30 */ "DELETE FROM %Q.'%q_segdir' WHERE level = ? AND idx = ?", - -/* SQL_SHIFT_SEGDIR_ENTRY -** Modify the idx value for the segment with idx=:3 on absolute level :2 -** to :1. */ -/* 31 */ "UPDATE %Q.'%q_segdir' SET idx = ? WHERE level=? AND idx=?", - -/* SQL_SELECT_SEGDIR -** Read a single entry from the %_segdir table. The entry from absolute -** level :1 with index value :2. */ -/* 32 */ "SELECT idx, start_block, leaves_end_block, end_block, root " - "FROM %Q.'%q_segdir' WHERE level = ? AND idx = ?", - -/* SQL_CHOMP_SEGDIR -** Update the start_block (:1) and root (:2) fields of the %_segdir -** entry located on absolute level :3 with index :4. */ -/* 33 */ "UPDATE %Q.'%q_segdir' SET start_block = ?, root = ?" - "WHERE level = ? AND idx = ?", - -/* SQL_SEGMENT_IS_APPENDABLE -** Return a single row if the segment with end_block=? is appendable. Or -** no rows otherwise. */ -/* 34 */ "SELECT 1 FROM %Q.'%q_segments' WHERE blockid=? AND block IS NULL", - -/* SQL_SELECT_INDEXES -** Return the list of valid segment indexes for absolute level ? */ -/* 35 */ "SELECT idx FROM %Q.'%q_segdir' WHERE level=? ORDER BY 1 ASC", - -/* SQL_SELECT_MXLEVEL -** Return the largest relative level in the FTS index or indexes. */ -/* 36 */ "SELECT max( level %% 1024 ) FROM %Q.'%q_segdir'", - - /* Return segments in order from oldest to newest.*/ -/* 37 */ "SELECT level, idx, end_block " - "FROM %Q.'%q_segdir' WHERE level BETWEEN ? AND ? " - "ORDER BY level DESC, idx ASC", - - /* Update statements used while promoting segments */ -/* 38 */ "UPDATE OR FAIL %Q.'%q_segdir' SET level=-1,idx=? " - "WHERE level=? AND idx=?", -/* 39 */ "UPDATE OR FAIL %Q.'%q_segdir' SET level=? WHERE level=-1" - - }; - int rc = SQLITE_OK; - sqlite3_stmt *pStmt; - - assert( SizeofArray(azSql)==SizeofArray(p->aStmt) ); - assert( eStmt=0 ); - - pStmt = p->aStmt[eStmt]; - if( !pStmt ){ - char *zSql; - if( eStmt==SQL_CONTENT_INSERT ){ - zSql = sqlite3_mprintf(azSql[eStmt], p->zDb, p->zName, p->zWriteExprlist); - }else if( eStmt==SQL_SELECT_CONTENT_BY_ROWID ){ - zSql = sqlite3_mprintf(azSql[eStmt], p->zReadExprlist); - }else{ - zSql = sqlite3_mprintf(azSql[eStmt], p->zDb, p->zName); - } - if( !zSql ){ - rc = SQLITE_NOMEM; - }else{ - rc = sqlite3_prepare_v2(p->db, zSql, -1, &pStmt, NULL); - sqlite3_free(zSql); - assert( rc==SQLITE_OK || pStmt==0 ); - p->aStmt[eStmt] = pStmt; - } - } - if( apVal ){ - int i; - int nParam = sqlite3_bind_parameter_count(pStmt); - for(i=0; rc==SQLITE_OK && inPendingData==0 ){ - sqlite3_stmt *pStmt; - rc = fts3SqlStmt(p, SQL_DELETE_SEGDIR_LEVEL, &pStmt, 0); - if( rc==SQLITE_OK ){ - sqlite3_bind_null(pStmt, 1); - sqlite3_step(pStmt); - rc = sqlite3_reset(pStmt); - } - } - - return rc; -} - -/* -** FTS maintains a separate indexes for each language-id (a 32-bit integer). -** Within each language id, a separate index is maintained to store the -** document terms, and each configured prefix size (configured the FTS -** "prefix=" option). And each index consists of multiple levels ("relative -** levels"). -** -** All three of these values (the language id, the specific index and the -** level within the index) are encoded in 64-bit integer values stored -** in the %_segdir table on disk. This function is used to convert three -** separate component values into the single 64-bit integer value that -** can be used to query the %_segdir table. -** -** Specifically, each language-id/index combination is allocated 1024 -** 64-bit integer level values ("absolute levels"). The main terms index -** for language-id 0 is allocate values 0-1023. The first prefix index -** (if any) for language-id 0 is allocated values 1024-2047. And so on. -** Language 1 indexes are allocated immediately following language 0. -** -** So, for a system with nPrefix prefix indexes configured, the block of -** absolute levels that corresponds to language-id iLangid and index -** iIndex starts at absolute level ((iLangid * (nPrefix+1) + iIndex) * 1024). -*/ -static sqlite3_int64 getAbsoluteLevel( - Fts3Table *p, /* FTS3 table handle */ - int iLangid, /* Language id */ - int iIndex, /* Index in p->aIndex[] */ - int iLevel /* Level of segments */ -){ - sqlite3_int64 iBase; /* First absolute level for iLangid/iIndex */ - assert( iLangid>=0 ); - assert( p->nIndex>0 ); - assert( iIndex>=0 && iIndexnIndex ); - - iBase = ((sqlite3_int64)iLangid * p->nIndex + iIndex) * FTS3_SEGDIR_MAXLEVEL; - return iBase + iLevel; -} - -/* -** Set *ppStmt to a statement handle that may be used to iterate through -** all rows in the %_segdir table, from oldest to newest. If successful, -** return SQLITE_OK. If an error occurs while preparing the statement, -** return an SQLite error code. -** -** There is only ever one instance of this SQL statement compiled for -** each FTS3 table. -** -** The statement returns the following columns from the %_segdir table: -** -** 0: idx -** 1: start_block -** 2: leaves_end_block -** 3: end_block -** 4: root -*/ -SQLITE_PRIVATE int sqlite3Fts3AllSegdirs( - Fts3Table *p, /* FTS3 table */ - int iLangid, /* Language being queried */ - int iIndex, /* Index for p->aIndex[] */ - int iLevel, /* Level to select (relative level) */ - sqlite3_stmt **ppStmt /* OUT: Compiled statement */ -){ - int rc; - sqlite3_stmt *pStmt = 0; - - assert( iLevel==FTS3_SEGCURSOR_ALL || iLevel>=0 ); - assert( iLevel=0 && iIndexnIndex ); - - if( iLevel<0 ){ - /* "SELECT * FROM %_segdir WHERE level BETWEEN ? AND ? ORDER BY ..." */ - rc = fts3SqlStmt(p, SQL_SELECT_LEVEL_RANGE, &pStmt, 0); - if( rc==SQLITE_OK ){ - sqlite3_bind_int64(pStmt, 1, getAbsoluteLevel(p, iLangid, iIndex, 0)); - sqlite3_bind_int64(pStmt, 2, - getAbsoluteLevel(p, iLangid, iIndex, FTS3_SEGDIR_MAXLEVEL-1) - ); - } - }else{ - /* "SELECT * FROM %_segdir WHERE level = ? ORDER BY ..." */ - rc = fts3SqlStmt(p, SQL_SELECT_LEVEL, &pStmt, 0); - if( rc==SQLITE_OK ){ - sqlite3_bind_int64(pStmt, 1, getAbsoluteLevel(p, iLangid, iIndex,iLevel)); - } - } - *ppStmt = pStmt; - return rc; -} - - -/* -** Append a single varint to a PendingList buffer. SQLITE_OK is returned -** if successful, or an SQLite error code otherwise. -** -** This function also serves to allocate the PendingList structure itself. -** For example, to create a new PendingList structure containing two -** varints: -** -** PendingList *p = 0; -** fts3PendingListAppendVarint(&p, 1); -** fts3PendingListAppendVarint(&p, 2); -*/ -static int fts3PendingListAppendVarint( - PendingList **pp, /* IN/OUT: Pointer to PendingList struct */ - sqlite3_int64 i /* Value to append to data */ -){ - PendingList *p = *pp; - - /* Allocate or grow the PendingList as required. */ - if( !p ){ - p = sqlite3_malloc(sizeof(*p) + 100); - if( !p ){ - return SQLITE_NOMEM; - } - p->nSpace = 100; - p->aData = (char *)&p[1]; - p->nData = 0; - } - else if( p->nData+FTS3_VARINT_MAX+1>p->nSpace ){ - int nNew = p->nSpace * 2; - p = sqlite3_realloc(p, sizeof(*p) + nNew); - if( !p ){ - sqlite3_free(*pp); - *pp = 0; - return SQLITE_NOMEM; - } - p->nSpace = nNew; - p->aData = (char *)&p[1]; - } - - /* Append the new serialized varint to the end of the list. */ - p->nData += sqlite3Fts3PutVarint(&p->aData[p->nData], i); - p->aData[p->nData] = '\0'; - *pp = p; - return SQLITE_OK; -} - -/* -** Add a docid/column/position entry to a PendingList structure. Non-zero -** is returned if the structure is sqlite3_realloced as part of adding -** the entry. Otherwise, zero. -** -** If an OOM error occurs, *pRc is set to SQLITE_NOMEM before returning. -** Zero is always returned in this case. Otherwise, if no OOM error occurs, -** it is set to SQLITE_OK. -*/ -static int fts3PendingListAppend( - PendingList **pp, /* IN/OUT: PendingList structure */ - sqlite3_int64 iDocid, /* Docid for entry to add */ - sqlite3_int64 iCol, /* Column for entry to add */ - sqlite3_int64 iPos, /* Position of term for entry to add */ - int *pRc /* OUT: Return code */ -){ - PendingList *p = *pp; - int rc = SQLITE_OK; - - assert( !p || p->iLastDocid<=iDocid ); - - if( !p || p->iLastDocid!=iDocid ){ - sqlite3_int64 iDelta = iDocid - (p ? p->iLastDocid : 0); - if( p ){ - assert( p->nDatanSpace ); - assert( p->aData[p->nData]==0 ); - p->nData++; - } - if( SQLITE_OK!=(rc = fts3PendingListAppendVarint(&p, iDelta)) ){ - goto pendinglistappend_out; - } - p->iLastCol = -1; - p->iLastPos = 0; - p->iLastDocid = iDocid; - } - if( iCol>0 && p->iLastCol!=iCol ){ - if( SQLITE_OK!=(rc = fts3PendingListAppendVarint(&p, 1)) - || SQLITE_OK!=(rc = fts3PendingListAppendVarint(&p, iCol)) - ){ - goto pendinglistappend_out; - } - p->iLastCol = iCol; - p->iLastPos = 0; - } - if( iCol>=0 ){ - assert( iPos>p->iLastPos || (iPos==0 && p->iLastPos==0) ); - rc = fts3PendingListAppendVarint(&p, 2+iPos-p->iLastPos); - if( rc==SQLITE_OK ){ - p->iLastPos = iPos; - } - } - - pendinglistappend_out: - *pRc = rc; - if( p!=*pp ){ - *pp = p; - return 1; - } - return 0; -} - -/* -** Free a PendingList object allocated by fts3PendingListAppend(). -*/ -static void fts3PendingListDelete(PendingList *pList){ - sqlite3_free(pList); -} - -/* -** Add an entry to one of the pending-terms hash tables. -*/ -static int fts3PendingTermsAddOne( - Fts3Table *p, - int iCol, - int iPos, - Fts3Hash *pHash, /* Pending terms hash table to add entry to */ - const char *zToken, - int nToken -){ - PendingList *pList; - int rc = SQLITE_OK; - - pList = (PendingList *)fts3HashFind(pHash, zToken, nToken); - if( pList ){ - p->nPendingData -= (pList->nData + nToken + sizeof(Fts3HashElem)); - } - if( fts3PendingListAppend(&pList, p->iPrevDocid, iCol, iPos, &rc) ){ - if( pList==fts3HashInsert(pHash, zToken, nToken, pList) ){ - /* Malloc failed while inserting the new entry. This can only - ** happen if there was no previous entry for this token. - */ - assert( 0==fts3HashFind(pHash, zToken, nToken) ); - sqlite3_free(pList); - rc = SQLITE_NOMEM; - } - } - if( rc==SQLITE_OK ){ - p->nPendingData += (pList->nData + nToken + sizeof(Fts3HashElem)); - } - return rc; -} - -/* -** Tokenize the nul-terminated string zText and add all tokens to the -** pending-terms hash-table. The docid used is that currently stored in -** p->iPrevDocid, and the column is specified by argument iCol. -** -** If successful, SQLITE_OK is returned. Otherwise, an SQLite error code. -*/ -static int fts3PendingTermsAdd( - Fts3Table *p, /* Table into which text will be inserted */ - int iLangid, /* Language id to use */ - const char *zText, /* Text of document to be inserted */ - int iCol, /* Column into which text is being inserted */ - u32 *pnWord /* IN/OUT: Incr. by number tokens inserted */ -){ - int rc; - int iStart = 0; - int iEnd = 0; - int iPos = 0; - int nWord = 0; - - char const *zToken; - int nToken = 0; - - sqlite3_tokenizer *pTokenizer = p->pTokenizer; - sqlite3_tokenizer_module const *pModule = pTokenizer->pModule; - sqlite3_tokenizer_cursor *pCsr; - int (*xNext)(sqlite3_tokenizer_cursor *pCursor, - const char**,int*,int*,int*,int*); - - assert( pTokenizer && pModule ); - - /* If the user has inserted a NULL value, this function may be called with - ** zText==0. In this case, add zero token entries to the hash table and - ** return early. */ - if( zText==0 ){ - *pnWord = 0; - return SQLITE_OK; - } - - rc = sqlite3Fts3OpenTokenizer(pTokenizer, iLangid, zText, -1, &pCsr); - if( rc!=SQLITE_OK ){ - return rc; - } - - xNext = pModule->xNext; - while( SQLITE_OK==rc - && SQLITE_OK==(rc = xNext(pCsr, &zToken, &nToken, &iStart, &iEnd, &iPos)) - ){ - int i; - if( iPos>=nWord ) nWord = iPos+1; - - /* Positions cannot be negative; we use -1 as a terminator internally. - ** Tokens must have a non-zero length. - */ - if( iPos<0 || !zToken || nToken<=0 ){ - rc = SQLITE_ERROR; - break; - } - - /* Add the term to the terms index */ - rc = fts3PendingTermsAddOne( - p, iCol, iPos, &p->aIndex[0].hPending, zToken, nToken - ); - - /* Add the term to each of the prefix indexes that it is not too - ** short for. */ - for(i=1; rc==SQLITE_OK && inIndex; i++){ - struct Fts3Index *pIndex = &p->aIndex[i]; - if( nTokennPrefix ) continue; - rc = fts3PendingTermsAddOne( - p, iCol, iPos, &pIndex->hPending, zToken, pIndex->nPrefix - ); - } - } - - pModule->xClose(pCsr); - *pnWord += nWord; - return (rc==SQLITE_DONE ? SQLITE_OK : rc); -} - -/* -** Calling this function indicates that subsequent calls to -** fts3PendingTermsAdd() are to add term/position-list pairs for the -** contents of the document with docid iDocid. -*/ -static int fts3PendingTermsDocid( - Fts3Table *p, /* Full-text table handle */ - int iLangid, /* Language id of row being written */ - sqlite_int64 iDocid /* Docid of row being written */ -){ - assert( iLangid>=0 ); - - /* TODO(shess) Explore whether partially flushing the buffer on - ** forced-flush would provide better performance. I suspect that if - ** we ordered the doclists by size and flushed the largest until the - ** buffer was half empty, that would let the less frequent terms - ** generate longer doclists. - */ - if( iDocid<=p->iPrevDocid - || p->iPrevLangid!=iLangid - || p->nPendingData>p->nMaxPendingData - ){ - int rc = sqlite3Fts3PendingTermsFlush(p); - if( rc!=SQLITE_OK ) return rc; - } - p->iPrevDocid = iDocid; - p->iPrevLangid = iLangid; - return SQLITE_OK; -} - -/* -** Discard the contents of the pending-terms hash tables. -*/ -SQLITE_PRIVATE void sqlite3Fts3PendingTermsClear(Fts3Table *p){ - int i; - for(i=0; inIndex; i++){ - Fts3HashElem *pElem; - Fts3Hash *pHash = &p->aIndex[i].hPending; - for(pElem=fts3HashFirst(pHash); pElem; pElem=fts3HashNext(pElem)){ - PendingList *pList = (PendingList *)fts3HashData(pElem); - fts3PendingListDelete(pList); - } - fts3HashClear(pHash); - } - p->nPendingData = 0; -} - -/* -** This function is called by the xUpdate() method as part of an INSERT -** operation. It adds entries for each term in the new record to the -** pendingTerms hash table. -** -** Argument apVal is the same as the similarly named argument passed to -** fts3InsertData(). Parameter iDocid is the docid of the new row. -*/ -static int fts3InsertTerms( - Fts3Table *p, - int iLangid, - sqlite3_value **apVal, - u32 *aSz -){ - int i; /* Iterator variable */ - for(i=2; inColumn+2; i++){ - int iCol = i-2; - if( p->abNotindexed[iCol]==0 ){ - const char *zText = (const char *)sqlite3_value_text(apVal[i]); - int rc = fts3PendingTermsAdd(p, iLangid, zText, iCol, &aSz[iCol]); - if( rc!=SQLITE_OK ){ - return rc; - } - aSz[p->nColumn] += sqlite3_value_bytes(apVal[i]); - } - } - return SQLITE_OK; -} - -/* -** This function is called by the xUpdate() method for an INSERT operation. -** The apVal parameter is passed a copy of the apVal argument passed by -** SQLite to the xUpdate() method. i.e: -** -** apVal[0] Not used for INSERT. -** apVal[1] rowid -** apVal[2] Left-most user-defined column -** ... -** apVal[p->nColumn+1] Right-most user-defined column -** apVal[p->nColumn+2] Hidden column with same name as table -** apVal[p->nColumn+3] Hidden "docid" column (alias for rowid) -** apVal[p->nColumn+4] Hidden languageid column -*/ -static int fts3InsertData( - Fts3Table *p, /* Full-text table */ - sqlite3_value **apVal, /* Array of values to insert */ - sqlite3_int64 *piDocid /* OUT: Docid for row just inserted */ -){ - int rc; /* Return code */ - sqlite3_stmt *pContentInsert; /* INSERT INTO %_content VALUES(...) */ - - if( p->zContentTbl ){ - sqlite3_value *pRowid = apVal[p->nColumn+3]; - if( sqlite3_value_type(pRowid)==SQLITE_NULL ){ - pRowid = apVal[1]; - } - if( sqlite3_value_type(pRowid)!=SQLITE_INTEGER ){ - return SQLITE_CONSTRAINT; - } - *piDocid = sqlite3_value_int64(pRowid); - return SQLITE_OK; - } - - /* Locate the statement handle used to insert data into the %_content - ** table. The SQL for this statement is: - ** - ** INSERT INTO %_content VALUES(?, ?, ?, ...) - ** - ** The statement features N '?' variables, where N is the number of user - ** defined columns in the FTS3 table, plus one for the docid field. - */ - rc = fts3SqlStmt(p, SQL_CONTENT_INSERT, &pContentInsert, &apVal[1]); - if( rc==SQLITE_OK && p->zLanguageid ){ - rc = sqlite3_bind_int( - pContentInsert, p->nColumn+2, - sqlite3_value_int(apVal[p->nColumn+4]) - ); - } - if( rc!=SQLITE_OK ) return rc; - - /* There is a quirk here. The users INSERT statement may have specified - ** a value for the "rowid" field, for the "docid" field, or for both. - ** Which is a problem, since "rowid" and "docid" are aliases for the - ** same value. For example: - ** - ** INSERT INTO fts3tbl(rowid, docid) VALUES(1, 2); - ** - ** In FTS3, this is an error. It is an error to specify non-NULL values - ** for both docid and some other rowid alias. - */ - if( SQLITE_NULL!=sqlite3_value_type(apVal[3+p->nColumn]) ){ - if( SQLITE_NULL==sqlite3_value_type(apVal[0]) - && SQLITE_NULL!=sqlite3_value_type(apVal[1]) - ){ - /* A rowid/docid conflict. */ - return SQLITE_ERROR; - } - rc = sqlite3_bind_value(pContentInsert, 1, apVal[3+p->nColumn]); - if( rc!=SQLITE_OK ) return rc; - } - - /* Execute the statement to insert the record. Set *piDocid to the - ** new docid value. - */ - sqlite3_step(pContentInsert); - rc = sqlite3_reset(pContentInsert); - - *piDocid = sqlite3_last_insert_rowid(p->db); - return rc; -} - - - -/* -** Remove all data from the FTS3 table. Clear the hash table containing -** pending terms. -*/ -static int fts3DeleteAll(Fts3Table *p, int bContent){ - int rc = SQLITE_OK; /* Return code */ - - /* Discard the contents of the pending-terms hash table. */ - sqlite3Fts3PendingTermsClear(p); - - /* Delete everything from the shadow tables. Except, leave %_content as - ** is if bContent is false. */ - assert( p->zContentTbl==0 || bContent==0 ); - if( bContent ) fts3SqlExec(&rc, p, SQL_DELETE_ALL_CONTENT, 0); - fts3SqlExec(&rc, p, SQL_DELETE_ALL_SEGMENTS, 0); - fts3SqlExec(&rc, p, SQL_DELETE_ALL_SEGDIR, 0); - if( p->bHasDocsize ){ - fts3SqlExec(&rc, p, SQL_DELETE_ALL_DOCSIZE, 0); - } - if( p->bHasStat ){ - fts3SqlExec(&rc, p, SQL_DELETE_ALL_STAT, 0); - } - return rc; -} - -/* -** -*/ -static int langidFromSelect(Fts3Table *p, sqlite3_stmt *pSelect){ - int iLangid = 0; - if( p->zLanguageid ) iLangid = sqlite3_column_int(pSelect, p->nColumn+1); - return iLangid; -} - -/* -** The first element in the apVal[] array is assumed to contain the docid -** (an integer) of a row about to be deleted. Remove all terms from the -** full-text index. -*/ -static void fts3DeleteTerms( - int *pRC, /* Result code */ - Fts3Table *p, /* The FTS table to delete from */ - sqlite3_value *pRowid, /* The docid to be deleted */ - u32 *aSz, /* Sizes of deleted document written here */ - int *pbFound /* OUT: Set to true if row really does exist */ -){ - int rc; - sqlite3_stmt *pSelect; - - assert( *pbFound==0 ); - if( *pRC ) return; - rc = fts3SqlStmt(p, SQL_SELECT_CONTENT_BY_ROWID, &pSelect, &pRowid); - if( rc==SQLITE_OK ){ - if( SQLITE_ROW==sqlite3_step(pSelect) ){ - int i; - int iLangid = langidFromSelect(p, pSelect); - rc = fts3PendingTermsDocid(p, iLangid, sqlite3_column_int64(pSelect, 0)); - for(i=1; rc==SQLITE_OK && i<=p->nColumn; i++){ - int iCol = i-1; - if( p->abNotindexed[iCol]==0 ){ - const char *zText = (const char *)sqlite3_column_text(pSelect, i); - rc = fts3PendingTermsAdd(p, iLangid, zText, -1, &aSz[iCol]); - aSz[p->nColumn] += sqlite3_column_bytes(pSelect, i); - } - } - if( rc!=SQLITE_OK ){ - sqlite3_reset(pSelect); - *pRC = rc; - return; - } - *pbFound = 1; - } - rc = sqlite3_reset(pSelect); - }else{ - sqlite3_reset(pSelect); - } - *pRC = rc; -} - -/* -** Forward declaration to account for the circular dependency between -** functions fts3SegmentMerge() and fts3AllocateSegdirIdx(). -*/ -static int fts3SegmentMerge(Fts3Table *, int, int, int); - -/* -** This function allocates a new level iLevel index in the segdir table. -** Usually, indexes are allocated within a level sequentially starting -** with 0, so the allocated index is one greater than the value returned -** by: -** -** SELECT max(idx) FROM %_segdir WHERE level = :iLevel -** -** However, if there are already FTS3_MERGE_COUNT indexes at the requested -** level, they are merged into a single level (iLevel+1) segment and the -** allocated index is 0. -** -** If successful, *piIdx is set to the allocated index slot and SQLITE_OK -** returned. Otherwise, an SQLite error code is returned. -*/ -static int fts3AllocateSegdirIdx( - Fts3Table *p, - int iLangid, /* Language id */ - int iIndex, /* Index for p->aIndex */ - int iLevel, - int *piIdx -){ - int rc; /* Return Code */ - sqlite3_stmt *pNextIdx; /* Query for next idx at level iLevel */ - int iNext = 0; /* Result of query pNextIdx */ - - assert( iLangid>=0 ); - assert( p->nIndex>=1 ); - - /* Set variable iNext to the next available segdir index at level iLevel. */ - rc = fts3SqlStmt(p, SQL_NEXT_SEGMENT_INDEX, &pNextIdx, 0); - if( rc==SQLITE_OK ){ - sqlite3_bind_int64( - pNextIdx, 1, getAbsoluteLevel(p, iLangid, iIndex, iLevel) - ); - if( SQLITE_ROW==sqlite3_step(pNextIdx) ){ - iNext = sqlite3_column_int(pNextIdx, 0); - } - rc = sqlite3_reset(pNextIdx); - } - - if( rc==SQLITE_OK ){ - /* If iNext is FTS3_MERGE_COUNT, indicating that level iLevel is already - ** full, merge all segments in level iLevel into a single iLevel+1 - ** segment and allocate (newly freed) index 0 at level iLevel. Otherwise, - ** if iNext is less than FTS3_MERGE_COUNT, allocate index iNext. - */ - if( iNext>=FTS3_MERGE_COUNT ){ - fts3LogMerge(16, getAbsoluteLevel(p, iLangid, iIndex, iLevel)); - rc = fts3SegmentMerge(p, iLangid, iIndex, iLevel); - *piIdx = 0; - }else{ - *piIdx = iNext; - } - } - - return rc; -} - -/* -** The %_segments table is declared as follows: -** -** CREATE TABLE %_segments(blockid INTEGER PRIMARY KEY, block BLOB) -** -** This function reads data from a single row of the %_segments table. The -** specific row is identified by the iBlockid parameter. If paBlob is not -** NULL, then a buffer is allocated using sqlite3_malloc() and populated -** with the contents of the blob stored in the "block" column of the -** identified table row is. Whether or not paBlob is NULL, *pnBlob is set -** to the size of the blob in bytes before returning. -** -** If an error occurs, or the table does not contain the specified row, -** an SQLite error code is returned. Otherwise, SQLITE_OK is returned. If -** paBlob is non-NULL, then it is the responsibility of the caller to -** eventually free the returned buffer. -** -** This function may leave an open sqlite3_blob* handle in the -** Fts3Table.pSegments variable. This handle is reused by subsequent calls -** to this function. The handle may be closed by calling the -** sqlite3Fts3SegmentsClose() function. Reusing a blob handle is a handy -** performance improvement, but the blob handle should always be closed -** before control is returned to the user (to prevent a lock being held -** on the database file for longer than necessary). Thus, any virtual table -** method (xFilter etc.) that may directly or indirectly call this function -** must call sqlite3Fts3SegmentsClose() before returning. -*/ -SQLITE_PRIVATE int sqlite3Fts3ReadBlock( - Fts3Table *p, /* FTS3 table handle */ - sqlite3_int64 iBlockid, /* Access the row with blockid=$iBlockid */ - char **paBlob, /* OUT: Blob data in malloc'd buffer */ - int *pnBlob, /* OUT: Size of blob data */ - int *pnLoad /* OUT: Bytes actually loaded */ -){ - int rc; /* Return code */ - - /* pnBlob must be non-NULL. paBlob may be NULL or non-NULL. */ - assert( pnBlob ); - - if( p->pSegments ){ - rc = sqlite3_blob_reopen(p->pSegments, iBlockid); - }else{ - if( 0==p->zSegmentsTbl ){ - p->zSegmentsTbl = sqlite3_mprintf("%s_segments", p->zName); - if( 0==p->zSegmentsTbl ) return SQLITE_NOMEM; - } - rc = sqlite3_blob_open( - p->db, p->zDb, p->zSegmentsTbl, "block", iBlockid, 0, &p->pSegments - ); - } - - if( rc==SQLITE_OK ){ - int nByte = sqlite3_blob_bytes(p->pSegments); - *pnBlob = nByte; - if( paBlob ){ - char *aByte = sqlite3_malloc(nByte + FTS3_NODE_PADDING); - if( !aByte ){ - rc = SQLITE_NOMEM; - }else{ - if( pnLoad && nByte>(FTS3_NODE_CHUNK_THRESHOLD) ){ - nByte = FTS3_NODE_CHUNKSIZE; - *pnLoad = nByte; - } - rc = sqlite3_blob_read(p->pSegments, aByte, nByte, 0); - memset(&aByte[nByte], 0, FTS3_NODE_PADDING); - if( rc!=SQLITE_OK ){ - sqlite3_free(aByte); - aByte = 0; - } - } - *paBlob = aByte; - } - } - - return rc; -} - -/* -** Close the blob handle at p->pSegments, if it is open. See comments above -** the sqlite3Fts3ReadBlock() function for details. -*/ -SQLITE_PRIVATE void sqlite3Fts3SegmentsClose(Fts3Table *p){ - sqlite3_blob_close(p->pSegments); - p->pSegments = 0; -} - -static int fts3SegReaderIncrRead(Fts3SegReader *pReader){ - int nRead; /* Number of bytes to read */ - int rc; /* Return code */ - - nRead = MIN(pReader->nNode - pReader->nPopulate, FTS3_NODE_CHUNKSIZE); - rc = sqlite3_blob_read( - pReader->pBlob, - &pReader->aNode[pReader->nPopulate], - nRead, - pReader->nPopulate - ); - - if( rc==SQLITE_OK ){ - pReader->nPopulate += nRead; - memset(&pReader->aNode[pReader->nPopulate], 0, FTS3_NODE_PADDING); - if( pReader->nPopulate==pReader->nNode ){ - sqlite3_blob_close(pReader->pBlob); - pReader->pBlob = 0; - pReader->nPopulate = 0; - } - } - return rc; -} - -static int fts3SegReaderRequire(Fts3SegReader *pReader, char *pFrom, int nByte){ - int rc = SQLITE_OK; - assert( !pReader->pBlob - || (pFrom>=pReader->aNode && pFrom<&pReader->aNode[pReader->nNode]) - ); - while( pReader->pBlob && rc==SQLITE_OK - && (pFrom - pReader->aNode + nByte)>pReader->nPopulate - ){ - rc = fts3SegReaderIncrRead(pReader); - } - return rc; -} - -/* -** Set an Fts3SegReader cursor to point at EOF. -*/ -static void fts3SegReaderSetEof(Fts3SegReader *pSeg){ - if( !fts3SegReaderIsRootOnly(pSeg) ){ - sqlite3_free(pSeg->aNode); - sqlite3_blob_close(pSeg->pBlob); - pSeg->pBlob = 0; - } - pSeg->aNode = 0; -} - -/* -** Move the iterator passed as the first argument to the next term in the -** segment. If successful, SQLITE_OK is returned. If there is no next term, -** SQLITE_DONE. Otherwise, an SQLite error code. -*/ -static int fts3SegReaderNext( - Fts3Table *p, - Fts3SegReader *pReader, - int bIncr -){ - int rc; /* Return code of various sub-routines */ - char *pNext; /* Cursor variable */ - int nPrefix; /* Number of bytes in term prefix */ - int nSuffix; /* Number of bytes in term suffix */ - - if( !pReader->aDoclist ){ - pNext = pReader->aNode; - }else{ - pNext = &pReader->aDoclist[pReader->nDoclist]; - } - - if( !pNext || pNext>=&pReader->aNode[pReader->nNode] ){ - - if( fts3SegReaderIsPending(pReader) ){ - Fts3HashElem *pElem = *(pReader->ppNextElem); - if( pElem==0 ){ - pReader->aNode = 0; - }else{ - PendingList *pList = (PendingList *)fts3HashData(pElem); - pReader->zTerm = (char *)fts3HashKey(pElem); - pReader->nTerm = fts3HashKeysize(pElem); - pReader->nNode = pReader->nDoclist = pList->nData + 1; - pReader->aNode = pReader->aDoclist = pList->aData; - pReader->ppNextElem++; - assert( pReader->aNode ); - } - return SQLITE_OK; - } - - fts3SegReaderSetEof(pReader); - - /* If iCurrentBlock>=iLeafEndBlock, this is an EOF condition. All leaf - ** blocks have already been traversed. */ - assert( pReader->iCurrentBlock<=pReader->iLeafEndBlock ); - if( pReader->iCurrentBlock>=pReader->iLeafEndBlock ){ - return SQLITE_OK; - } - - rc = sqlite3Fts3ReadBlock( - p, ++pReader->iCurrentBlock, &pReader->aNode, &pReader->nNode, - (bIncr ? &pReader->nPopulate : 0) - ); - if( rc!=SQLITE_OK ) return rc; - assert( pReader->pBlob==0 ); - if( bIncr && pReader->nPopulatenNode ){ - pReader->pBlob = p->pSegments; - p->pSegments = 0; - } - pNext = pReader->aNode; - } - - assert( !fts3SegReaderIsPending(pReader) ); - - rc = fts3SegReaderRequire(pReader, pNext, FTS3_VARINT_MAX*2); - if( rc!=SQLITE_OK ) return rc; - - /* Because of the FTS3_NODE_PADDING bytes of padding, the following is - ** safe (no risk of overread) even if the node data is corrupted. */ - pNext += fts3GetVarint32(pNext, &nPrefix); - pNext += fts3GetVarint32(pNext, &nSuffix); - if( nPrefix<0 || nSuffix<=0 - || &pNext[nSuffix]>&pReader->aNode[pReader->nNode] - ){ - return FTS_CORRUPT_VTAB; - } - - if( nPrefix+nSuffix>pReader->nTermAlloc ){ - int nNew = (nPrefix+nSuffix)*2; - char *zNew = sqlite3_realloc(pReader->zTerm, nNew); - if( !zNew ){ - return SQLITE_NOMEM; - } - pReader->zTerm = zNew; - pReader->nTermAlloc = nNew; - } - - rc = fts3SegReaderRequire(pReader, pNext, nSuffix+FTS3_VARINT_MAX); - if( rc!=SQLITE_OK ) return rc; - - memcpy(&pReader->zTerm[nPrefix], pNext, nSuffix); - pReader->nTerm = nPrefix+nSuffix; - pNext += nSuffix; - pNext += fts3GetVarint32(pNext, &pReader->nDoclist); - pReader->aDoclist = pNext; - pReader->pOffsetList = 0; - - /* Check that the doclist does not appear to extend past the end of the - ** b-tree node. And that the final byte of the doclist is 0x00. If either - ** of these statements is untrue, then the data structure is corrupt. - */ - if( &pReader->aDoclist[pReader->nDoclist]>&pReader->aNode[pReader->nNode] - || (pReader->nPopulate==0 && pReader->aDoclist[pReader->nDoclist-1]) - ){ - return FTS_CORRUPT_VTAB; - } - return SQLITE_OK; -} - -/* -** Set the SegReader to point to the first docid in the doclist associated -** with the current term. -*/ -static int fts3SegReaderFirstDocid(Fts3Table *pTab, Fts3SegReader *pReader){ - int rc = SQLITE_OK; - assert( pReader->aDoclist ); - assert( !pReader->pOffsetList ); - if( pTab->bDescIdx && fts3SegReaderIsPending(pReader) ){ - u8 bEof = 0; - pReader->iDocid = 0; - pReader->nOffsetList = 0; - sqlite3Fts3DoclistPrev(0, - pReader->aDoclist, pReader->nDoclist, &pReader->pOffsetList, - &pReader->iDocid, &pReader->nOffsetList, &bEof - ); - }else{ - rc = fts3SegReaderRequire(pReader, pReader->aDoclist, FTS3_VARINT_MAX); - if( rc==SQLITE_OK ){ - int n = sqlite3Fts3GetVarint(pReader->aDoclist, &pReader->iDocid); - pReader->pOffsetList = &pReader->aDoclist[n]; - } - } - return rc; -} - -/* -** Advance the SegReader to point to the next docid in the doclist -** associated with the current term. -** -** If arguments ppOffsetList and pnOffsetList are not NULL, then -** *ppOffsetList is set to point to the first column-offset list -** in the doclist entry (i.e. immediately past the docid varint). -** *pnOffsetList is set to the length of the set of column-offset -** lists, not including the nul-terminator byte. For example: -*/ -static int fts3SegReaderNextDocid( - Fts3Table *pTab, - Fts3SegReader *pReader, /* Reader to advance to next docid */ - char **ppOffsetList, /* OUT: Pointer to current position-list */ - int *pnOffsetList /* OUT: Length of *ppOffsetList in bytes */ -){ - int rc = SQLITE_OK; - char *p = pReader->pOffsetList; - char c = 0; - - assert( p ); - - if( pTab->bDescIdx && fts3SegReaderIsPending(pReader) ){ - /* A pending-terms seg-reader for an FTS4 table that uses order=desc. - ** Pending-terms doclists are always built up in ascending order, so - ** we have to iterate through them backwards here. */ - u8 bEof = 0; - if( ppOffsetList ){ - *ppOffsetList = pReader->pOffsetList; - *pnOffsetList = pReader->nOffsetList - 1; - } - sqlite3Fts3DoclistPrev(0, - pReader->aDoclist, pReader->nDoclist, &p, &pReader->iDocid, - &pReader->nOffsetList, &bEof - ); - if( bEof ){ - pReader->pOffsetList = 0; - }else{ - pReader->pOffsetList = p; - } - }else{ - char *pEnd = &pReader->aDoclist[pReader->nDoclist]; - - /* Pointer p currently points at the first byte of an offset list. The - ** following block advances it to point one byte past the end of - ** the same offset list. */ - while( 1 ){ - - /* The following line of code (and the "p++" below the while() loop) is - ** normally all that is required to move pointer p to the desired - ** position. The exception is if this node is being loaded from disk - ** incrementally and pointer "p" now points to the first byte past - ** the populated part of pReader->aNode[]. - */ - while( *p | c ) c = *p++ & 0x80; - assert( *p==0 ); - - if( pReader->pBlob==0 || p<&pReader->aNode[pReader->nPopulate] ) break; - rc = fts3SegReaderIncrRead(pReader); - if( rc!=SQLITE_OK ) return rc; - } - p++; - - /* If required, populate the output variables with a pointer to and the - ** size of the previous offset-list. - */ - if( ppOffsetList ){ - *ppOffsetList = pReader->pOffsetList; - *pnOffsetList = (int)(p - pReader->pOffsetList - 1); - } - - /* List may have been edited in place by fts3EvalNearTrim() */ - while( p=pEnd ){ - pReader->pOffsetList = 0; - }else{ - rc = fts3SegReaderRequire(pReader, p, FTS3_VARINT_MAX); - if( rc==SQLITE_OK ){ - sqlite3_int64 iDelta; - pReader->pOffsetList = p + sqlite3Fts3GetVarint(p, &iDelta); - if( pTab->bDescIdx ){ - pReader->iDocid -= iDelta; - }else{ - pReader->iDocid += iDelta; - } - } - } - } - - return SQLITE_OK; -} - - -SQLITE_PRIVATE int sqlite3Fts3MsrOvfl( - Fts3Cursor *pCsr, - Fts3MultiSegReader *pMsr, - int *pnOvfl -){ - Fts3Table *p = (Fts3Table*)pCsr->base.pVtab; - int nOvfl = 0; - int ii; - int rc = SQLITE_OK; - int pgsz = p->nPgsz; - - assert( p->bFts4 ); - assert( pgsz>0 ); - - for(ii=0; rc==SQLITE_OK && iinSegment; ii++){ - Fts3SegReader *pReader = pMsr->apSegment[ii]; - if( !fts3SegReaderIsPending(pReader) - && !fts3SegReaderIsRootOnly(pReader) - ){ - sqlite3_int64 jj; - for(jj=pReader->iStartBlock; jj<=pReader->iLeafEndBlock; jj++){ - int nBlob; - rc = sqlite3Fts3ReadBlock(p, jj, 0, &nBlob, 0); - if( rc!=SQLITE_OK ) break; - if( (nBlob+35)>pgsz ){ - nOvfl += (nBlob + 34)/pgsz; - } - } - } - } - *pnOvfl = nOvfl; - return rc; -} - -/* -** Free all allocations associated with the iterator passed as the -** second argument. -*/ -SQLITE_PRIVATE void sqlite3Fts3SegReaderFree(Fts3SegReader *pReader){ - if( pReader && !fts3SegReaderIsPending(pReader) ){ - sqlite3_free(pReader->zTerm); - if( !fts3SegReaderIsRootOnly(pReader) ){ - sqlite3_free(pReader->aNode); - sqlite3_blob_close(pReader->pBlob); - } - } - sqlite3_free(pReader); -} - -/* -** Allocate a new SegReader object. -*/ -SQLITE_PRIVATE int sqlite3Fts3SegReaderNew( - int iAge, /* Segment "age". */ - int bLookup, /* True for a lookup only */ - sqlite3_int64 iStartLeaf, /* First leaf to traverse */ - sqlite3_int64 iEndLeaf, /* Final leaf to traverse */ - sqlite3_int64 iEndBlock, /* Final block of segment */ - const char *zRoot, /* Buffer containing root node */ - int nRoot, /* Size of buffer containing root node */ - Fts3SegReader **ppReader /* OUT: Allocated Fts3SegReader */ -){ - Fts3SegReader *pReader; /* Newly allocated SegReader object */ - int nExtra = 0; /* Bytes to allocate segment root node */ - - assert( iStartLeaf<=iEndLeaf ); - if( iStartLeaf==0 ){ - nExtra = nRoot + FTS3_NODE_PADDING; - } - - pReader = (Fts3SegReader *)sqlite3_malloc(sizeof(Fts3SegReader) + nExtra); - if( !pReader ){ - return SQLITE_NOMEM; - } - memset(pReader, 0, sizeof(Fts3SegReader)); - pReader->iIdx = iAge; - pReader->bLookup = bLookup!=0; - pReader->iStartBlock = iStartLeaf; - pReader->iLeafEndBlock = iEndLeaf; - pReader->iEndBlock = iEndBlock; - - if( nExtra ){ - /* The entire segment is stored in the root node. */ - pReader->aNode = (char *)&pReader[1]; - pReader->rootOnly = 1; - pReader->nNode = nRoot; - memcpy(pReader->aNode, zRoot, nRoot); - memset(&pReader->aNode[nRoot], 0, FTS3_NODE_PADDING); - }else{ - pReader->iCurrentBlock = iStartLeaf-1; - } - *ppReader = pReader; - return SQLITE_OK; -} - -/* -** This is a comparison function used as a qsort() callback when sorting -** an array of pending terms by term. This occurs as part of flushing -** the contents of the pending-terms hash table to the database. -*/ -static int fts3CompareElemByTerm(const void *lhs, const void *rhs){ - char *z1 = fts3HashKey(*(Fts3HashElem **)lhs); - char *z2 = fts3HashKey(*(Fts3HashElem **)rhs); - int n1 = fts3HashKeysize(*(Fts3HashElem **)lhs); - int n2 = fts3HashKeysize(*(Fts3HashElem **)rhs); - - int n = (n1aIndex */ - const char *zTerm, /* Term to search for */ - int nTerm, /* Size of buffer zTerm */ - int bPrefix, /* True for a prefix iterator */ - Fts3SegReader **ppReader /* OUT: SegReader for pending-terms */ -){ - Fts3SegReader *pReader = 0; /* Fts3SegReader object to return */ - Fts3HashElem *pE; /* Iterator variable */ - Fts3HashElem **aElem = 0; /* Array of term hash entries to scan */ - int nElem = 0; /* Size of array at aElem */ - int rc = SQLITE_OK; /* Return Code */ - Fts3Hash *pHash; - - pHash = &p->aIndex[iIndex].hPending; - if( bPrefix ){ - int nAlloc = 0; /* Size of allocated array at aElem */ - - for(pE=fts3HashFirst(pHash); pE; pE=fts3HashNext(pE)){ - char *zKey = (char *)fts3HashKey(pE); - int nKey = fts3HashKeysize(pE); - if( nTerm==0 || (nKey>=nTerm && 0==memcmp(zKey, zTerm, nTerm)) ){ - if( nElem==nAlloc ){ - Fts3HashElem **aElem2; - nAlloc += 16; - aElem2 = (Fts3HashElem **)sqlite3_realloc( - aElem, nAlloc*sizeof(Fts3HashElem *) - ); - if( !aElem2 ){ - rc = SQLITE_NOMEM; - nElem = 0; - break; - } - aElem = aElem2; - } - - aElem[nElem++] = pE; - } - } - - /* If more than one term matches the prefix, sort the Fts3HashElem - ** objects in term order using qsort(). This uses the same comparison - ** callback as is used when flushing terms to disk. - */ - if( nElem>1 ){ - qsort(aElem, nElem, sizeof(Fts3HashElem *), fts3CompareElemByTerm); - } - - }else{ - /* The query is a simple term lookup that matches at most one term in - ** the index. All that is required is a straight hash-lookup. - ** - ** Because the stack address of pE may be accessed via the aElem pointer - ** below, the "Fts3HashElem *pE" must be declared so that it is valid - ** within this entire function, not just this "else{...}" block. - */ - pE = fts3HashFindElem(pHash, zTerm, nTerm); - if( pE ){ - aElem = &pE; - nElem = 1; - } - } - - if( nElem>0 ){ - int nByte = sizeof(Fts3SegReader) + (nElem+1)*sizeof(Fts3HashElem *); - pReader = (Fts3SegReader *)sqlite3_malloc(nByte); - if( !pReader ){ - rc = SQLITE_NOMEM; - }else{ - memset(pReader, 0, nByte); - pReader->iIdx = 0x7FFFFFFF; - pReader->ppNextElem = (Fts3HashElem **)&pReader[1]; - memcpy(pReader->ppNextElem, aElem, nElem*sizeof(Fts3HashElem *)); - } - } - - if( bPrefix ){ - sqlite3_free(aElem); - } - *ppReader = pReader; - return rc; -} - -/* -** Compare the entries pointed to by two Fts3SegReader structures. -** Comparison is as follows: -** -** 1) EOF is greater than not EOF. -** -** 2) The current terms (if any) are compared using memcmp(). If one -** term is a prefix of another, the longer term is considered the -** larger. -** -** 3) By segment age. An older segment is considered larger. -*/ -static int fts3SegReaderCmp(Fts3SegReader *pLhs, Fts3SegReader *pRhs){ - int rc; - if( pLhs->aNode && pRhs->aNode ){ - int rc2 = pLhs->nTerm - pRhs->nTerm; - if( rc2<0 ){ - rc = memcmp(pLhs->zTerm, pRhs->zTerm, pLhs->nTerm); - }else{ - rc = memcmp(pLhs->zTerm, pRhs->zTerm, pRhs->nTerm); - } - if( rc==0 ){ - rc = rc2; - } - }else{ - rc = (pLhs->aNode==0) - (pRhs->aNode==0); - } - if( rc==0 ){ - rc = pRhs->iIdx - pLhs->iIdx; - } - assert( rc!=0 ); - return rc; -} - -/* -** A different comparison function for SegReader structures. In this -** version, it is assumed that each SegReader points to an entry in -** a doclist for identical terms. Comparison is made as follows: -** -** 1) EOF (end of doclist in this case) is greater than not EOF. -** -** 2) By current docid. -** -** 3) By segment age. An older segment is considered larger. -*/ -static int fts3SegReaderDoclistCmp(Fts3SegReader *pLhs, Fts3SegReader *pRhs){ - int rc = (pLhs->pOffsetList==0)-(pRhs->pOffsetList==0); - if( rc==0 ){ - if( pLhs->iDocid==pRhs->iDocid ){ - rc = pRhs->iIdx - pLhs->iIdx; - }else{ - rc = (pLhs->iDocid > pRhs->iDocid) ? 1 : -1; - } - } - assert( pLhs->aNode && pRhs->aNode ); - return rc; -} -static int fts3SegReaderDoclistCmpRev(Fts3SegReader *pLhs, Fts3SegReader *pRhs){ - int rc = (pLhs->pOffsetList==0)-(pRhs->pOffsetList==0); - if( rc==0 ){ - if( pLhs->iDocid==pRhs->iDocid ){ - rc = pRhs->iIdx - pLhs->iIdx; - }else{ - rc = (pLhs->iDocid < pRhs->iDocid) ? 1 : -1; - } - } - assert( pLhs->aNode && pRhs->aNode ); - return rc; -} - -/* -** Compare the term that the Fts3SegReader object passed as the first argument -** points to with the term specified by arguments zTerm and nTerm. -** -** If the pSeg iterator is already at EOF, return 0. Otherwise, return -** -ve if the pSeg term is less than zTerm/nTerm, 0 if the two terms are -** equal, or +ve if the pSeg term is greater than zTerm/nTerm. -*/ -static int fts3SegReaderTermCmp( - Fts3SegReader *pSeg, /* Segment reader object */ - const char *zTerm, /* Term to compare to */ - int nTerm /* Size of term zTerm in bytes */ -){ - int res = 0; - if( pSeg->aNode ){ - if( pSeg->nTerm>nTerm ){ - res = memcmp(pSeg->zTerm, zTerm, nTerm); - }else{ - res = memcmp(pSeg->zTerm, zTerm, pSeg->nTerm); - } - if( res==0 ){ - res = pSeg->nTerm-nTerm; - } - } - return res; -} - -/* -** Argument apSegment is an array of nSegment elements. It is known that -** the final (nSegment-nSuspect) members are already in sorted order -** (according to the comparison function provided). This function shuffles -** the array around until all entries are in sorted order. -*/ -static void fts3SegReaderSort( - Fts3SegReader **apSegment, /* Array to sort entries of */ - int nSegment, /* Size of apSegment array */ - int nSuspect, /* Unsorted entry count */ - int (*xCmp)(Fts3SegReader *, Fts3SegReader *) /* Comparison function */ -){ - int i; /* Iterator variable */ - - assert( nSuspect<=nSegment ); - - if( nSuspect==nSegment ) nSuspect--; - for(i=nSuspect-1; i>=0; i--){ - int j; - for(j=i; j<(nSegment-1); j++){ - Fts3SegReader *pTmp; - if( xCmp(apSegment[j], apSegment[j+1])<0 ) break; - pTmp = apSegment[j+1]; - apSegment[j+1] = apSegment[j]; - apSegment[j] = pTmp; - } - } - -#ifndef NDEBUG - /* Check that the list really is sorted now. */ - for(i=0; i<(nSuspect-1); i++){ - assert( xCmp(apSegment[i], apSegment[i+1])<0 ); - } -#endif -} - -/* -** Insert a record into the %_segments table. -*/ -static int fts3WriteSegment( - Fts3Table *p, /* Virtual table handle */ - sqlite3_int64 iBlock, /* Block id for new block */ - char *z, /* Pointer to buffer containing block data */ - int n /* Size of buffer z in bytes */ -){ - sqlite3_stmt *pStmt; - int rc = fts3SqlStmt(p, SQL_INSERT_SEGMENTS, &pStmt, 0); - if( rc==SQLITE_OK ){ - sqlite3_bind_int64(pStmt, 1, iBlock); - sqlite3_bind_blob(pStmt, 2, z, n, SQLITE_STATIC); - sqlite3_step(pStmt); - rc = sqlite3_reset(pStmt); - } - return rc; -} - -/* -** Find the largest relative level number in the table. If successful, set -** *pnMax to this value and return SQLITE_OK. Otherwise, if an error occurs, -** set *pnMax to zero and return an SQLite error code. -*/ -SQLITE_PRIVATE int sqlite3Fts3MaxLevel(Fts3Table *p, int *pnMax){ - int rc; - int mxLevel = 0; - sqlite3_stmt *pStmt = 0; - - rc = fts3SqlStmt(p, SQL_SELECT_MXLEVEL, &pStmt, 0); - if( rc==SQLITE_OK ){ - if( SQLITE_ROW==sqlite3_step(pStmt) ){ - mxLevel = sqlite3_column_int(pStmt, 0); - } - rc = sqlite3_reset(pStmt); - } - *pnMax = mxLevel; - return rc; -} - -/* -** Insert a record into the %_segdir table. -*/ -static int fts3WriteSegdir( - Fts3Table *p, /* Virtual table handle */ - sqlite3_int64 iLevel, /* Value for "level" field (absolute level) */ - int iIdx, /* Value for "idx" field */ - sqlite3_int64 iStartBlock, /* Value for "start_block" field */ - sqlite3_int64 iLeafEndBlock, /* Value for "leaves_end_block" field */ - sqlite3_int64 iEndBlock, /* Value for "end_block" field */ - sqlite3_int64 nLeafData, /* Bytes of leaf data in segment */ - char *zRoot, /* Blob value for "root" field */ - int nRoot /* Number of bytes in buffer zRoot */ -){ - sqlite3_stmt *pStmt; - int rc = fts3SqlStmt(p, SQL_INSERT_SEGDIR, &pStmt, 0); - if( rc==SQLITE_OK ){ - sqlite3_bind_int64(pStmt, 1, iLevel); - sqlite3_bind_int(pStmt, 2, iIdx); - sqlite3_bind_int64(pStmt, 3, iStartBlock); - sqlite3_bind_int64(pStmt, 4, iLeafEndBlock); - if( nLeafData==0 ){ - sqlite3_bind_int64(pStmt, 5, iEndBlock); - }else{ - char *zEnd = sqlite3_mprintf("%lld %lld", iEndBlock, nLeafData); - if( !zEnd ) return SQLITE_NOMEM; - sqlite3_bind_text(pStmt, 5, zEnd, -1, sqlite3_free); - } - sqlite3_bind_blob(pStmt, 6, zRoot, nRoot, SQLITE_STATIC); - sqlite3_step(pStmt); - rc = sqlite3_reset(pStmt); - } - return rc; -} - -/* -** Return the size of the common prefix (if any) shared by zPrev and -** zNext, in bytes. For example, -** -** fts3PrefixCompress("abc", 3, "abcdef", 6) // returns 3 -** fts3PrefixCompress("abX", 3, "abcdef", 6) // returns 2 -** fts3PrefixCompress("abX", 3, "Xbcdef", 6) // returns 0 -*/ -static int fts3PrefixCompress( - const char *zPrev, /* Buffer containing previous term */ - int nPrev, /* Size of buffer zPrev in bytes */ - const char *zNext, /* Buffer containing next term */ - int nNext /* Size of buffer zNext in bytes */ -){ - int n; - UNUSED_PARAMETER(nNext); - for(n=0; nnData; /* Current size of node in bytes */ - int nReq = nData; /* Required space after adding zTerm */ - int nPrefix; /* Number of bytes of prefix compression */ - int nSuffix; /* Suffix length */ - - nPrefix = fts3PrefixCompress(pTree->zTerm, pTree->nTerm, zTerm, nTerm); - nSuffix = nTerm-nPrefix; - - nReq += sqlite3Fts3VarintLen(nPrefix)+sqlite3Fts3VarintLen(nSuffix)+nSuffix; - if( nReq<=p->nNodeSize || !pTree->zTerm ){ - - if( nReq>p->nNodeSize ){ - /* An unusual case: this is the first term to be added to the node - ** and the static node buffer (p->nNodeSize bytes) is not large - ** enough. Use a separately malloced buffer instead This wastes - ** p->nNodeSize bytes, but since this scenario only comes about when - ** the database contain two terms that share a prefix of almost 2KB, - ** this is not expected to be a serious problem. - */ - assert( pTree->aData==(char *)&pTree[1] ); - pTree->aData = (char *)sqlite3_malloc(nReq); - if( !pTree->aData ){ - return SQLITE_NOMEM; - } - } - - if( pTree->zTerm ){ - /* There is no prefix-length field for first term in a node */ - nData += sqlite3Fts3PutVarint(&pTree->aData[nData], nPrefix); - } - - nData += sqlite3Fts3PutVarint(&pTree->aData[nData], nSuffix); - memcpy(&pTree->aData[nData], &zTerm[nPrefix], nSuffix); - pTree->nData = nData + nSuffix; - pTree->nEntry++; - - if( isCopyTerm ){ - if( pTree->nMalloczMalloc, nTerm*2); - if( !zNew ){ - return SQLITE_NOMEM; - } - pTree->nMalloc = nTerm*2; - pTree->zMalloc = zNew; - } - pTree->zTerm = pTree->zMalloc; - memcpy(pTree->zTerm, zTerm, nTerm); - pTree->nTerm = nTerm; - }else{ - pTree->zTerm = (char *)zTerm; - pTree->nTerm = nTerm; - } - return SQLITE_OK; - } - } - - /* If control flows to here, it was not possible to append zTerm to the - ** current node. Create a new node (a right-sibling of the current node). - ** If this is the first node in the tree, the term is added to it. - ** - ** Otherwise, the term is not added to the new node, it is left empty for - ** now. Instead, the term is inserted into the parent of pTree. If pTree - ** has no parent, one is created here. - */ - pNew = (SegmentNode *)sqlite3_malloc(sizeof(SegmentNode) + p->nNodeSize); - if( !pNew ){ - return SQLITE_NOMEM; - } - memset(pNew, 0, sizeof(SegmentNode)); - pNew->nData = 1 + FTS3_VARINT_MAX; - pNew->aData = (char *)&pNew[1]; - - if( pTree ){ - SegmentNode *pParent = pTree->pParent; - rc = fts3NodeAddTerm(p, &pParent, isCopyTerm, zTerm, nTerm); - if( pTree->pParent==0 ){ - pTree->pParent = pParent; - } - pTree->pRight = pNew; - pNew->pLeftmost = pTree->pLeftmost; - pNew->pParent = pParent; - pNew->zMalloc = pTree->zMalloc; - pNew->nMalloc = pTree->nMalloc; - pTree->zMalloc = 0; - }else{ - pNew->pLeftmost = pNew; - rc = fts3NodeAddTerm(p, &pNew, isCopyTerm, zTerm, nTerm); - } - - *ppTree = pNew; - return rc; -} - -/* -** Helper function for fts3NodeWrite(). -*/ -static int fts3TreeFinishNode( - SegmentNode *pTree, - int iHeight, - sqlite3_int64 iLeftChild -){ - int nStart; - assert( iHeight>=1 && iHeight<128 ); - nStart = FTS3_VARINT_MAX - sqlite3Fts3VarintLen(iLeftChild); - pTree->aData[nStart] = (char)iHeight; - sqlite3Fts3PutVarint(&pTree->aData[nStart+1], iLeftChild); - return nStart; -} - -/* -** Write the buffer for the segment node pTree and all of its peers to the -** database. Then call this function recursively to write the parent of -** pTree and its peers to the database. -** -** Except, if pTree is a root node, do not write it to the database. Instead, -** set output variables *paRoot and *pnRoot to contain the root node. -** -** If successful, SQLITE_OK is returned and output variable *piLast is -** set to the largest blockid written to the database (or zero if no -** blocks were written to the db). Otherwise, an SQLite error code is -** returned. -*/ -static int fts3NodeWrite( - Fts3Table *p, /* Virtual table handle */ - SegmentNode *pTree, /* SegmentNode handle */ - int iHeight, /* Height of this node in tree */ - sqlite3_int64 iLeaf, /* Block id of first leaf node */ - sqlite3_int64 iFree, /* Block id of next free slot in %_segments */ - sqlite3_int64 *piLast, /* OUT: Block id of last entry written */ - char **paRoot, /* OUT: Data for root node */ - int *pnRoot /* OUT: Size of root node in bytes */ -){ - int rc = SQLITE_OK; - - if( !pTree->pParent ){ - /* Root node of the tree. */ - int nStart = fts3TreeFinishNode(pTree, iHeight, iLeaf); - *piLast = iFree-1; - *pnRoot = pTree->nData - nStart; - *paRoot = &pTree->aData[nStart]; - }else{ - SegmentNode *pIter; - sqlite3_int64 iNextFree = iFree; - sqlite3_int64 iNextLeaf = iLeaf; - for(pIter=pTree->pLeftmost; pIter && rc==SQLITE_OK; pIter=pIter->pRight){ - int nStart = fts3TreeFinishNode(pIter, iHeight, iNextLeaf); - int nWrite = pIter->nData - nStart; - - rc = fts3WriteSegment(p, iNextFree, &pIter->aData[nStart], nWrite); - iNextFree++; - iNextLeaf += (pIter->nEntry+1); - } - if( rc==SQLITE_OK ){ - assert( iNextLeaf==iFree ); - rc = fts3NodeWrite( - p, pTree->pParent, iHeight+1, iFree, iNextFree, piLast, paRoot, pnRoot - ); - } - } - - return rc; -} - -/* -** Free all memory allocations associated with the tree pTree. -*/ -static void fts3NodeFree(SegmentNode *pTree){ - if( pTree ){ - SegmentNode *p = pTree->pLeftmost; - fts3NodeFree(p->pParent); - while( p ){ - SegmentNode *pRight = p->pRight; - if( p->aData!=(char *)&p[1] ){ - sqlite3_free(p->aData); - } - assert( pRight==0 || p->zMalloc==0 ); - sqlite3_free(p->zMalloc); - sqlite3_free(p); - p = pRight; - } - } -} - -/* -** Add a term to the segment being constructed by the SegmentWriter object -** *ppWriter. When adding the first term to a segment, *ppWriter should -** be passed NULL. This function will allocate a new SegmentWriter object -** and return it via the input/output variable *ppWriter in this case. -** -** If successful, SQLITE_OK is returned. Otherwise, an SQLite error code. -*/ -static int fts3SegWriterAdd( - Fts3Table *p, /* Virtual table handle */ - SegmentWriter **ppWriter, /* IN/OUT: SegmentWriter handle */ - int isCopyTerm, /* True if buffer zTerm must be copied */ - const char *zTerm, /* Pointer to buffer containing term */ - int nTerm, /* Size of term in bytes */ - const char *aDoclist, /* Pointer to buffer containing doclist */ - int nDoclist /* Size of doclist in bytes */ -){ - int nPrefix; /* Size of term prefix in bytes */ - int nSuffix; /* Size of term suffix in bytes */ - int nReq; /* Number of bytes required on leaf page */ - int nData; - SegmentWriter *pWriter = *ppWriter; - - if( !pWriter ){ - int rc; - sqlite3_stmt *pStmt; - - /* Allocate the SegmentWriter structure */ - pWriter = (SegmentWriter *)sqlite3_malloc(sizeof(SegmentWriter)); - if( !pWriter ) return SQLITE_NOMEM; - memset(pWriter, 0, sizeof(SegmentWriter)); - *ppWriter = pWriter; - - /* Allocate a buffer in which to accumulate data */ - pWriter->aData = (char *)sqlite3_malloc(p->nNodeSize); - if( !pWriter->aData ) return SQLITE_NOMEM; - pWriter->nSize = p->nNodeSize; - - /* Find the next free blockid in the %_segments table */ - rc = fts3SqlStmt(p, SQL_NEXT_SEGMENTS_ID, &pStmt, 0); - if( rc!=SQLITE_OK ) return rc; - if( SQLITE_ROW==sqlite3_step(pStmt) ){ - pWriter->iFree = sqlite3_column_int64(pStmt, 0); - pWriter->iFirst = pWriter->iFree; - } - rc = sqlite3_reset(pStmt); - if( rc!=SQLITE_OK ) return rc; - } - nData = pWriter->nData; - - nPrefix = fts3PrefixCompress(pWriter->zTerm, pWriter->nTerm, zTerm, nTerm); - nSuffix = nTerm-nPrefix; - - /* Figure out how many bytes are required by this new entry */ - nReq = sqlite3Fts3VarintLen(nPrefix) + /* varint containing prefix size */ - sqlite3Fts3VarintLen(nSuffix) + /* varint containing suffix size */ - nSuffix + /* Term suffix */ - sqlite3Fts3VarintLen(nDoclist) + /* Size of doclist */ - nDoclist; /* Doclist data */ - - if( nData>0 && nData+nReq>p->nNodeSize ){ - int rc; - - /* The current leaf node is full. Write it out to the database. */ - rc = fts3WriteSegment(p, pWriter->iFree++, pWriter->aData, nData); - if( rc!=SQLITE_OK ) return rc; - p->nLeafAdd++; - - /* Add the current term to the interior node tree. The term added to - ** the interior tree must: - ** - ** a) be greater than the largest term on the leaf node just written - ** to the database (still available in pWriter->zTerm), and - ** - ** b) be less than or equal to the term about to be added to the new - ** leaf node (zTerm/nTerm). - ** - ** In other words, it must be the prefix of zTerm 1 byte longer than - ** the common prefix (if any) of zTerm and pWriter->zTerm. - */ - assert( nPrefixpTree, isCopyTerm, zTerm, nPrefix+1); - if( rc!=SQLITE_OK ) return rc; - - nData = 0; - pWriter->nTerm = 0; - - nPrefix = 0; - nSuffix = nTerm; - nReq = 1 + /* varint containing prefix size */ - sqlite3Fts3VarintLen(nTerm) + /* varint containing suffix size */ - nTerm + /* Term suffix */ - sqlite3Fts3VarintLen(nDoclist) + /* Size of doclist */ - nDoclist; /* Doclist data */ - } - - /* Increase the total number of bytes written to account for the new entry. */ - pWriter->nLeafData += nReq; - - /* If the buffer currently allocated is too small for this entry, realloc - ** the buffer to make it large enough. - */ - if( nReq>pWriter->nSize ){ - char *aNew = sqlite3_realloc(pWriter->aData, nReq); - if( !aNew ) return SQLITE_NOMEM; - pWriter->aData = aNew; - pWriter->nSize = nReq; - } - assert( nData+nReq<=pWriter->nSize ); - - /* Append the prefix-compressed term and doclist to the buffer. */ - nData += sqlite3Fts3PutVarint(&pWriter->aData[nData], nPrefix); - nData += sqlite3Fts3PutVarint(&pWriter->aData[nData], nSuffix); - memcpy(&pWriter->aData[nData], &zTerm[nPrefix], nSuffix); - nData += nSuffix; - nData += sqlite3Fts3PutVarint(&pWriter->aData[nData], nDoclist); - memcpy(&pWriter->aData[nData], aDoclist, nDoclist); - pWriter->nData = nData + nDoclist; - - /* Save the current term so that it can be used to prefix-compress the next. - ** If the isCopyTerm parameter is true, then the buffer pointed to by - ** zTerm is transient, so take a copy of the term data. Otherwise, just - ** store a copy of the pointer. - */ - if( isCopyTerm ){ - if( nTerm>pWriter->nMalloc ){ - char *zNew = sqlite3_realloc(pWriter->zMalloc, nTerm*2); - if( !zNew ){ - return SQLITE_NOMEM; - } - pWriter->nMalloc = nTerm*2; - pWriter->zMalloc = zNew; - pWriter->zTerm = zNew; - } - assert( pWriter->zTerm==pWriter->zMalloc ); - memcpy(pWriter->zTerm, zTerm, nTerm); - }else{ - pWriter->zTerm = (char *)zTerm; - } - pWriter->nTerm = nTerm; - - return SQLITE_OK; -} - -/* -** Flush all data associated with the SegmentWriter object pWriter to the -** database. This function must be called after all terms have been added -** to the segment using fts3SegWriterAdd(). If successful, SQLITE_OK is -** returned. Otherwise, an SQLite error code. -*/ -static int fts3SegWriterFlush( - Fts3Table *p, /* Virtual table handle */ - SegmentWriter *pWriter, /* SegmentWriter to flush to the db */ - sqlite3_int64 iLevel, /* Value for 'level' column of %_segdir */ - int iIdx /* Value for 'idx' column of %_segdir */ -){ - int rc; /* Return code */ - if( pWriter->pTree ){ - sqlite3_int64 iLast = 0; /* Largest block id written to database */ - sqlite3_int64 iLastLeaf; /* Largest leaf block id written to db */ - char *zRoot = NULL; /* Pointer to buffer containing root node */ - int nRoot = 0; /* Size of buffer zRoot */ - - iLastLeaf = pWriter->iFree; - rc = fts3WriteSegment(p, pWriter->iFree++, pWriter->aData, pWriter->nData); - if( rc==SQLITE_OK ){ - rc = fts3NodeWrite(p, pWriter->pTree, 1, - pWriter->iFirst, pWriter->iFree, &iLast, &zRoot, &nRoot); - } - if( rc==SQLITE_OK ){ - rc = fts3WriteSegdir(p, iLevel, iIdx, - pWriter->iFirst, iLastLeaf, iLast, pWriter->nLeafData, zRoot, nRoot); - } - }else{ - /* The entire tree fits on the root node. Write it to the segdir table. */ - rc = fts3WriteSegdir(p, iLevel, iIdx, - 0, 0, 0, pWriter->nLeafData, pWriter->aData, pWriter->nData); - } - p->nLeafAdd++; - return rc; -} - -/* -** Release all memory held by the SegmentWriter object passed as the -** first argument. -*/ -static void fts3SegWriterFree(SegmentWriter *pWriter){ - if( pWriter ){ - sqlite3_free(pWriter->aData); - sqlite3_free(pWriter->zMalloc); - fts3NodeFree(pWriter->pTree); - sqlite3_free(pWriter); - } -} - -/* -** The first value in the apVal[] array is assumed to contain an integer. -** This function tests if there exist any documents with docid values that -** are different from that integer. i.e. if deleting the document with docid -** pRowid would mean the FTS3 table were empty. -** -** If successful, *pisEmpty is set to true if the table is empty except for -** document pRowid, or false otherwise, and SQLITE_OK is returned. If an -** error occurs, an SQLite error code is returned. -*/ -static int fts3IsEmpty(Fts3Table *p, sqlite3_value *pRowid, int *pisEmpty){ - sqlite3_stmt *pStmt; - int rc; - if( p->zContentTbl ){ - /* If using the content=xxx option, assume the table is never empty */ - *pisEmpty = 0; - rc = SQLITE_OK; - }else{ - rc = fts3SqlStmt(p, SQL_IS_EMPTY, &pStmt, &pRowid); - if( rc==SQLITE_OK ){ - if( SQLITE_ROW==sqlite3_step(pStmt) ){ - *pisEmpty = sqlite3_column_int(pStmt, 0); - } - rc = sqlite3_reset(pStmt); - } - } - return rc; -} - -/* -** Set *pnMax to the largest segment level in the database for the index -** iIndex. -** -** Segment levels are stored in the 'level' column of the %_segdir table. -** -** Return SQLITE_OK if successful, or an SQLite error code if not. -*/ -static int fts3SegmentMaxLevel( - Fts3Table *p, - int iLangid, - int iIndex, - sqlite3_int64 *pnMax -){ - sqlite3_stmt *pStmt; - int rc; - assert( iIndex>=0 && iIndexnIndex ); - - /* Set pStmt to the compiled version of: - ** - ** SELECT max(level) FROM %Q.'%q_segdir' WHERE level BETWEEN ? AND ? - ** - ** (1024 is actually the value of macro FTS3_SEGDIR_PREFIXLEVEL_STR). - */ - rc = fts3SqlStmt(p, SQL_SELECT_SEGDIR_MAX_LEVEL, &pStmt, 0); - if( rc!=SQLITE_OK ) return rc; - sqlite3_bind_int64(pStmt, 1, getAbsoluteLevel(p, iLangid, iIndex, 0)); - sqlite3_bind_int64(pStmt, 2, - getAbsoluteLevel(p, iLangid, iIndex, FTS3_SEGDIR_MAXLEVEL-1) - ); - if( SQLITE_ROW==sqlite3_step(pStmt) ){ - *pnMax = sqlite3_column_int64(pStmt, 0); - } - return sqlite3_reset(pStmt); -} - -/* -** iAbsLevel is an absolute level that may be assumed to exist within -** the database. This function checks if it is the largest level number -** within its index. Assuming no error occurs, *pbMax is set to 1 if -** iAbsLevel is indeed the largest level, or 0 otherwise, and SQLITE_OK -** is returned. If an error occurs, an error code is returned and the -** final value of *pbMax is undefined. -*/ -static int fts3SegmentIsMaxLevel(Fts3Table *p, i64 iAbsLevel, int *pbMax){ - - /* Set pStmt to the compiled version of: - ** - ** SELECT max(level) FROM %Q.'%q_segdir' WHERE level BETWEEN ? AND ? - ** - ** (1024 is actually the value of macro FTS3_SEGDIR_PREFIXLEVEL_STR). - */ - sqlite3_stmt *pStmt; - int rc = fts3SqlStmt(p, SQL_SELECT_SEGDIR_MAX_LEVEL, &pStmt, 0); - if( rc!=SQLITE_OK ) return rc; - sqlite3_bind_int64(pStmt, 1, iAbsLevel+1); - sqlite3_bind_int64(pStmt, 2, - ((iAbsLevel/FTS3_SEGDIR_MAXLEVEL)+1) * FTS3_SEGDIR_MAXLEVEL - ); - - *pbMax = 0; - if( SQLITE_ROW==sqlite3_step(pStmt) ){ - *pbMax = sqlite3_column_type(pStmt, 0)==SQLITE_NULL; - } - return sqlite3_reset(pStmt); -} - -/* -** Delete all entries in the %_segments table associated with the segment -** opened with seg-reader pSeg. This function does not affect the contents -** of the %_segdir table. -*/ -static int fts3DeleteSegment( - Fts3Table *p, /* FTS table handle */ - Fts3SegReader *pSeg /* Segment to delete */ -){ - int rc = SQLITE_OK; /* Return code */ - if( pSeg->iStartBlock ){ - sqlite3_stmt *pDelete; /* SQL statement to delete rows */ - rc = fts3SqlStmt(p, SQL_DELETE_SEGMENTS_RANGE, &pDelete, 0); - if( rc==SQLITE_OK ){ - sqlite3_bind_int64(pDelete, 1, pSeg->iStartBlock); - sqlite3_bind_int64(pDelete, 2, pSeg->iEndBlock); - sqlite3_step(pDelete); - rc = sqlite3_reset(pDelete); - } - } - return rc; -} - -/* -** This function is used after merging multiple segments into a single large -** segment to delete the old, now redundant, segment b-trees. Specifically, -** it: -** -** 1) Deletes all %_segments entries for the segments associated with -** each of the SegReader objects in the array passed as the third -** argument, and -** -** 2) deletes all %_segdir entries with level iLevel, or all %_segdir -** entries regardless of level if (iLevel<0). -** -** SQLITE_OK is returned if successful, otherwise an SQLite error code. -*/ -static int fts3DeleteSegdir( - Fts3Table *p, /* Virtual table handle */ - int iLangid, /* Language id */ - int iIndex, /* Index for p->aIndex */ - int iLevel, /* Level of %_segdir entries to delete */ - Fts3SegReader **apSegment, /* Array of SegReader objects */ - int nReader /* Size of array apSegment */ -){ - int rc = SQLITE_OK; /* Return Code */ - int i; /* Iterator variable */ - sqlite3_stmt *pDelete = 0; /* SQL statement to delete rows */ - - for(i=0; rc==SQLITE_OK && i=0 || iLevel==FTS3_SEGCURSOR_ALL ); - if( iLevel==FTS3_SEGCURSOR_ALL ){ - rc = fts3SqlStmt(p, SQL_DELETE_SEGDIR_RANGE, &pDelete, 0); - if( rc==SQLITE_OK ){ - sqlite3_bind_int64(pDelete, 1, getAbsoluteLevel(p, iLangid, iIndex, 0)); - sqlite3_bind_int64(pDelete, 2, - getAbsoluteLevel(p, iLangid, iIndex, FTS3_SEGDIR_MAXLEVEL-1) - ); - } - }else{ - rc = fts3SqlStmt(p, SQL_DELETE_SEGDIR_LEVEL, &pDelete, 0); - if( rc==SQLITE_OK ){ - sqlite3_bind_int64( - pDelete, 1, getAbsoluteLevel(p, iLangid, iIndex, iLevel) - ); - } - } - - if( rc==SQLITE_OK ){ - sqlite3_step(pDelete); - rc = sqlite3_reset(pDelete); - } - - return rc; -} - -/* -** When this function is called, buffer *ppList (size *pnList bytes) contains -** a position list that may (or may not) feature multiple columns. This -** function adjusts the pointer *ppList and the length *pnList so that they -** identify the subset of the position list that corresponds to column iCol. -** -** If there are no entries in the input position list for column iCol, then -** *pnList is set to zero before returning. -** -** If parameter bZero is non-zero, then any part of the input list following -** the end of the output list is zeroed before returning. -*/ -static void fts3ColumnFilter( - int iCol, /* Column to filter on */ - int bZero, /* Zero out anything following *ppList */ - char **ppList, /* IN/OUT: Pointer to position list */ - int *pnList /* IN/OUT: Size of buffer *ppList in bytes */ -){ - char *pList = *ppList; - int nList = *pnList; - char *pEnd = &pList[nList]; - int iCurrent = 0; - char *p = pList; - - assert( iCol>=0 ); - while( 1 ){ - char c = 0; - while( ppMsr->nBuffer ){ - char *pNew; - pMsr->nBuffer = nList*2; - pNew = (char *)sqlite3_realloc(pMsr->aBuffer, pMsr->nBuffer); - if( !pNew ) return SQLITE_NOMEM; - pMsr->aBuffer = pNew; - } - - memcpy(pMsr->aBuffer, pList, nList); - return SQLITE_OK; -} - -SQLITE_PRIVATE int sqlite3Fts3MsrIncrNext( - Fts3Table *p, /* Virtual table handle */ - Fts3MultiSegReader *pMsr, /* Multi-segment-reader handle */ - sqlite3_int64 *piDocid, /* OUT: Docid value */ - char **paPoslist, /* OUT: Pointer to position list */ - int *pnPoslist /* OUT: Size of position list in bytes */ -){ - int nMerge = pMsr->nAdvance; - Fts3SegReader **apSegment = pMsr->apSegment; - int (*xCmp)(Fts3SegReader *, Fts3SegReader *) = ( - p->bDescIdx ? fts3SegReaderDoclistCmpRev : fts3SegReaderDoclistCmp - ); - - if( nMerge==0 ){ - *paPoslist = 0; - return SQLITE_OK; - } - - while( 1 ){ - Fts3SegReader *pSeg; - pSeg = pMsr->apSegment[0]; - - if( pSeg->pOffsetList==0 ){ - *paPoslist = 0; - break; - }else{ - int rc; - char *pList; - int nList; - int j; - sqlite3_int64 iDocid = apSegment[0]->iDocid; - - rc = fts3SegReaderNextDocid(p, apSegment[0], &pList, &nList); - j = 1; - while( rc==SQLITE_OK - && jpOffsetList - && apSegment[j]->iDocid==iDocid - ){ - rc = fts3SegReaderNextDocid(p, apSegment[j], 0, 0); - j++; - } - if( rc!=SQLITE_OK ) return rc; - fts3SegReaderSort(pMsr->apSegment, nMerge, j, xCmp); - - if( nList>0 && fts3SegReaderIsPending(apSegment[0]) ){ - rc = fts3MsrBufferData(pMsr, pList, nList+1); - if( rc!=SQLITE_OK ) return rc; - assert( (pMsr->aBuffer[nList] & 0xFE)==0x00 ); - pList = pMsr->aBuffer; - } - - if( pMsr->iColFilter>=0 ){ - fts3ColumnFilter(pMsr->iColFilter, 1, &pList, &nList); - } - - if( nList>0 ){ - *paPoslist = pList; - *piDocid = iDocid; - *pnPoslist = nList; - break; - } - } - } - - return SQLITE_OK; -} - -static int fts3SegReaderStart( - Fts3Table *p, /* Virtual table handle */ - Fts3MultiSegReader *pCsr, /* Cursor object */ - const char *zTerm, /* Term searched for (or NULL) */ - int nTerm /* Length of zTerm in bytes */ -){ - int i; - int nSeg = pCsr->nSegment; - - /* If the Fts3SegFilter defines a specific term (or term prefix) to search - ** for, then advance each segment iterator until it points to a term of - ** equal or greater value than the specified term. This prevents many - ** unnecessary merge/sort operations for the case where single segment - ** b-tree leaf nodes contain more than one term. - */ - for(i=0; pCsr->bRestart==0 && inSegment; i++){ - int res = 0; - Fts3SegReader *pSeg = pCsr->apSegment[i]; - do { - int rc = fts3SegReaderNext(p, pSeg, 0); - if( rc!=SQLITE_OK ) return rc; - }while( zTerm && (res = fts3SegReaderTermCmp(pSeg, zTerm, nTerm))<0 ); - - if( pSeg->bLookup && res!=0 ){ - fts3SegReaderSetEof(pSeg); - } - } - fts3SegReaderSort(pCsr->apSegment, nSeg, nSeg, fts3SegReaderCmp); - - return SQLITE_OK; -} - -SQLITE_PRIVATE int sqlite3Fts3SegReaderStart( - Fts3Table *p, /* Virtual table handle */ - Fts3MultiSegReader *pCsr, /* Cursor object */ - Fts3SegFilter *pFilter /* Restrictions on range of iteration */ -){ - pCsr->pFilter = pFilter; - return fts3SegReaderStart(p, pCsr, pFilter->zTerm, pFilter->nTerm); -} - -SQLITE_PRIVATE int sqlite3Fts3MsrIncrStart( - Fts3Table *p, /* Virtual table handle */ - Fts3MultiSegReader *pCsr, /* Cursor object */ - int iCol, /* Column to match on. */ - const char *zTerm, /* Term to iterate through a doclist for */ - int nTerm /* Number of bytes in zTerm */ -){ - int i; - int rc; - int nSegment = pCsr->nSegment; - int (*xCmp)(Fts3SegReader *, Fts3SegReader *) = ( - p->bDescIdx ? fts3SegReaderDoclistCmpRev : fts3SegReaderDoclistCmp - ); - - assert( pCsr->pFilter==0 ); - assert( zTerm && nTerm>0 ); - - /* Advance each segment iterator until it points to the term zTerm/nTerm. */ - rc = fts3SegReaderStart(p, pCsr, zTerm, nTerm); - if( rc!=SQLITE_OK ) return rc; - - /* Determine how many of the segments actually point to zTerm/nTerm. */ - for(i=0; iapSegment[i]; - if( !pSeg->aNode || fts3SegReaderTermCmp(pSeg, zTerm, nTerm) ){ - break; - } - } - pCsr->nAdvance = i; - - /* Advance each of the segments to point to the first docid. */ - for(i=0; inAdvance; i++){ - rc = fts3SegReaderFirstDocid(p, pCsr->apSegment[i]); - if( rc!=SQLITE_OK ) return rc; - } - fts3SegReaderSort(pCsr->apSegment, i, i, xCmp); - - assert( iCol<0 || iColnColumn ); - pCsr->iColFilter = iCol; - - return SQLITE_OK; -} - -/* -** This function is called on a MultiSegReader that has been started using -** sqlite3Fts3MsrIncrStart(). One or more calls to MsrIncrNext() may also -** have been made. Calling this function puts the MultiSegReader in such -** a state that if the next two calls are: -** -** sqlite3Fts3SegReaderStart() -** sqlite3Fts3SegReaderStep() -** -** then the entire doclist for the term is available in -** MultiSegReader.aDoclist/nDoclist. -*/ -SQLITE_PRIVATE int sqlite3Fts3MsrIncrRestart(Fts3MultiSegReader *pCsr){ - int i; /* Used to iterate through segment-readers */ - - assert( pCsr->zTerm==0 ); - assert( pCsr->nTerm==0 ); - assert( pCsr->aDoclist==0 ); - assert( pCsr->nDoclist==0 ); - - pCsr->nAdvance = 0; - pCsr->bRestart = 1; - for(i=0; inSegment; i++){ - pCsr->apSegment[i]->pOffsetList = 0; - pCsr->apSegment[i]->nOffsetList = 0; - pCsr->apSegment[i]->iDocid = 0; - } - - return SQLITE_OK; -} - - -SQLITE_PRIVATE int sqlite3Fts3SegReaderStep( - Fts3Table *p, /* Virtual table handle */ - Fts3MultiSegReader *pCsr /* Cursor object */ -){ - int rc = SQLITE_OK; - - int isIgnoreEmpty = (pCsr->pFilter->flags & FTS3_SEGMENT_IGNORE_EMPTY); - int isRequirePos = (pCsr->pFilter->flags & FTS3_SEGMENT_REQUIRE_POS); - int isColFilter = (pCsr->pFilter->flags & FTS3_SEGMENT_COLUMN_FILTER); - int isPrefix = (pCsr->pFilter->flags & FTS3_SEGMENT_PREFIX); - int isScan = (pCsr->pFilter->flags & FTS3_SEGMENT_SCAN); - int isFirst = (pCsr->pFilter->flags & FTS3_SEGMENT_FIRST); - - Fts3SegReader **apSegment = pCsr->apSegment; - int nSegment = pCsr->nSegment; - Fts3SegFilter *pFilter = pCsr->pFilter; - int (*xCmp)(Fts3SegReader *, Fts3SegReader *) = ( - p->bDescIdx ? fts3SegReaderDoclistCmpRev : fts3SegReaderDoclistCmp - ); - - if( pCsr->nSegment==0 ) return SQLITE_OK; - - do { - int nMerge; - int i; - - /* Advance the first pCsr->nAdvance entries in the apSegment[] array - ** forward. Then sort the list in order of current term again. - */ - for(i=0; inAdvance; i++){ - Fts3SegReader *pSeg = apSegment[i]; - if( pSeg->bLookup ){ - fts3SegReaderSetEof(pSeg); - }else{ - rc = fts3SegReaderNext(p, pSeg, 0); - } - if( rc!=SQLITE_OK ) return rc; - } - fts3SegReaderSort(apSegment, nSegment, pCsr->nAdvance, fts3SegReaderCmp); - pCsr->nAdvance = 0; - - /* If all the seg-readers are at EOF, we're finished. return SQLITE_OK. */ - assert( rc==SQLITE_OK ); - if( apSegment[0]->aNode==0 ) break; - - pCsr->nTerm = apSegment[0]->nTerm; - pCsr->zTerm = apSegment[0]->zTerm; - - /* If this is a prefix-search, and if the term that apSegment[0] points - ** to does not share a suffix with pFilter->zTerm/nTerm, then all - ** required callbacks have been made. In this case exit early. - ** - ** Similarly, if this is a search for an exact match, and the first term - ** of segment apSegment[0] is not a match, exit early. - */ - if( pFilter->zTerm && !isScan ){ - if( pCsr->nTermnTerm - || (!isPrefix && pCsr->nTerm>pFilter->nTerm) - || memcmp(pCsr->zTerm, pFilter->zTerm, pFilter->nTerm) - ){ - break; - } - } - - nMerge = 1; - while( nMergeaNode - && apSegment[nMerge]->nTerm==pCsr->nTerm - && 0==memcmp(pCsr->zTerm, apSegment[nMerge]->zTerm, pCsr->nTerm) - ){ - nMerge++; - } - - assert( isIgnoreEmpty || (isRequirePos && !isColFilter) ); - if( nMerge==1 - && !isIgnoreEmpty - && !isFirst - && (p->bDescIdx==0 || fts3SegReaderIsPending(apSegment[0])==0) - ){ - pCsr->nDoclist = apSegment[0]->nDoclist; - if( fts3SegReaderIsPending(apSegment[0]) ){ - rc = fts3MsrBufferData(pCsr, apSegment[0]->aDoclist, pCsr->nDoclist); - pCsr->aDoclist = pCsr->aBuffer; - }else{ - pCsr->aDoclist = apSegment[0]->aDoclist; - } - if( rc==SQLITE_OK ) rc = SQLITE_ROW; - }else{ - int nDoclist = 0; /* Size of doclist */ - sqlite3_int64 iPrev = 0; /* Previous docid stored in doclist */ - - /* The current term of the first nMerge entries in the array - ** of Fts3SegReader objects is the same. The doclists must be merged - ** and a single term returned with the merged doclist. - */ - for(i=0; ipOffsetList ){ - int j; /* Number of segments that share a docid */ - char *pList = 0; - int nList = 0; - int nByte; - sqlite3_int64 iDocid = apSegment[0]->iDocid; - fts3SegReaderNextDocid(p, apSegment[0], &pList, &nList); - j = 1; - while( jpOffsetList - && apSegment[j]->iDocid==iDocid - ){ - fts3SegReaderNextDocid(p, apSegment[j], 0, 0); - j++; - } - - if( isColFilter ){ - fts3ColumnFilter(pFilter->iCol, 0, &pList, &nList); - } - - if( !isIgnoreEmpty || nList>0 ){ - - /* Calculate the 'docid' delta value to write into the merged - ** doclist. */ - sqlite3_int64 iDelta; - if( p->bDescIdx && nDoclist>0 ){ - iDelta = iPrev - iDocid; - }else{ - iDelta = iDocid - iPrev; - } - assert( iDelta>0 || (nDoclist==0 && iDelta==iDocid) ); - assert( nDoclist>0 || iDelta==iDocid ); - - nByte = sqlite3Fts3VarintLen(iDelta) + (isRequirePos?nList+1:0); - if( nDoclist+nByte>pCsr->nBuffer ){ - char *aNew; - pCsr->nBuffer = (nDoclist+nByte)*2; - aNew = sqlite3_realloc(pCsr->aBuffer, pCsr->nBuffer); - if( !aNew ){ - return SQLITE_NOMEM; - } - pCsr->aBuffer = aNew; - } - - if( isFirst ){ - char *a = &pCsr->aBuffer[nDoclist]; - int nWrite; - - nWrite = sqlite3Fts3FirstFilter(iDelta, pList, nList, a); - if( nWrite ){ - iPrev = iDocid; - nDoclist += nWrite; - } - }else{ - nDoclist += sqlite3Fts3PutVarint(&pCsr->aBuffer[nDoclist], iDelta); - iPrev = iDocid; - if( isRequirePos ){ - memcpy(&pCsr->aBuffer[nDoclist], pList, nList); - nDoclist += nList; - pCsr->aBuffer[nDoclist++] = '\0'; - } - } - } - - fts3SegReaderSort(apSegment, nMerge, j, xCmp); - } - if( nDoclist>0 ){ - pCsr->aDoclist = pCsr->aBuffer; - pCsr->nDoclist = nDoclist; - rc = SQLITE_ROW; - } - } - pCsr->nAdvance = nMerge; - }while( rc==SQLITE_OK ); - - return rc; -} - - -SQLITE_PRIVATE void sqlite3Fts3SegReaderFinish( - Fts3MultiSegReader *pCsr /* Cursor object */ -){ - if( pCsr ){ - int i; - for(i=0; inSegment; i++){ - sqlite3Fts3SegReaderFree(pCsr->apSegment[i]); - } - sqlite3_free(pCsr->apSegment); - sqlite3_free(pCsr->aBuffer); - - pCsr->nSegment = 0; - pCsr->apSegment = 0; - pCsr->aBuffer = 0; - } -} - -/* -** Decode the "end_block" field, selected by column iCol of the SELECT -** statement passed as the first argument. -** -** The "end_block" field may contain either an integer, or a text field -** containing the text representation of two non-negative integers separated -** by one or more space (0x20) characters. In the first case, set *piEndBlock -** to the integer value and *pnByte to zero before returning. In the second, -** set *piEndBlock to the first value and *pnByte to the second. -*/ -static void fts3ReadEndBlockField( - sqlite3_stmt *pStmt, - int iCol, - i64 *piEndBlock, - i64 *pnByte -){ - const unsigned char *zText = sqlite3_column_text(pStmt, iCol); - if( zText ){ - int i; - int iMul = 1; - i64 iVal = 0; - for(i=0; zText[i]>='0' && zText[i]<='9'; i++){ - iVal = iVal*10 + (zText[i] - '0'); - } - *piEndBlock = iVal; - while( zText[i]==' ' ) i++; - iVal = 0; - if( zText[i]=='-' ){ - i++; - iMul = -1; - } - for(/* no-op */; zText[i]>='0' && zText[i]<='9'; i++){ - iVal = iVal*10 + (zText[i] - '0'); - } - *pnByte = (iVal * (i64)iMul); - } -} - - -/* -** A segment of size nByte bytes has just been written to absolute level -** iAbsLevel. Promote any segments that should be promoted as a result. -*/ -static int fts3PromoteSegments( - Fts3Table *p, /* FTS table handle */ - sqlite3_int64 iAbsLevel, /* Absolute level just updated */ - sqlite3_int64 nByte /* Size of new segment at iAbsLevel */ -){ - int rc = SQLITE_OK; - sqlite3_stmt *pRange; - - rc = fts3SqlStmt(p, SQL_SELECT_LEVEL_RANGE2, &pRange, 0); - - if( rc==SQLITE_OK ){ - int bOk = 0; - i64 iLast = (iAbsLevel/FTS3_SEGDIR_MAXLEVEL + 1) * FTS3_SEGDIR_MAXLEVEL - 1; - i64 nLimit = (nByte*3)/2; - - /* Loop through all entries in the %_segdir table corresponding to - ** segments in this index on levels greater than iAbsLevel. If there is - ** at least one such segment, and it is possible to determine that all - ** such segments are smaller than nLimit bytes in size, they will be - ** promoted to level iAbsLevel. */ - sqlite3_bind_int64(pRange, 1, iAbsLevel+1); - sqlite3_bind_int64(pRange, 2, iLast); - while( SQLITE_ROW==sqlite3_step(pRange) ){ - i64 nSize = 0, dummy; - fts3ReadEndBlockField(pRange, 2, &dummy, &nSize); - if( nSize<=0 || nSize>nLimit ){ - /* If nSize==0, then the %_segdir.end_block field does not not - ** contain a size value. This happens if it was written by an - ** old version of FTS. In this case it is not possible to determine - ** the size of the segment, and so segment promotion does not - ** take place. */ - bOk = 0; - break; - } - bOk = 1; - } - rc = sqlite3_reset(pRange); - - if( bOk ){ - int iIdx = 0; - sqlite3_stmt *pUpdate1; - sqlite3_stmt *pUpdate2; - - if( rc==SQLITE_OK ){ - rc = fts3SqlStmt(p, SQL_UPDATE_LEVEL_IDX, &pUpdate1, 0); - } - if( rc==SQLITE_OK ){ - rc = fts3SqlStmt(p, SQL_UPDATE_LEVEL, &pUpdate2, 0); - } - - if( rc==SQLITE_OK ){ - - /* Loop through all %_segdir entries for segments in this index with - ** levels equal to or greater than iAbsLevel. As each entry is visited, - ** updated it to set (level = -1) and (idx = N), where N is 0 for the - ** oldest segment in the range, 1 for the next oldest, and so on. - ** - ** In other words, move all segments being promoted to level -1, - ** setting the "idx" fields as appropriate to keep them in the same - ** order. The contents of level -1 (which is never used, except - ** transiently here), will be moved back to level iAbsLevel below. */ - sqlite3_bind_int64(pRange, 1, iAbsLevel); - while( SQLITE_ROW==sqlite3_step(pRange) ){ - sqlite3_bind_int(pUpdate1, 1, iIdx++); - sqlite3_bind_int(pUpdate1, 2, sqlite3_column_int(pRange, 0)); - sqlite3_bind_int(pUpdate1, 3, sqlite3_column_int(pRange, 1)); - sqlite3_step(pUpdate1); - rc = sqlite3_reset(pUpdate1); - if( rc!=SQLITE_OK ){ - sqlite3_reset(pRange); - break; - } - } - } - if( rc==SQLITE_OK ){ - rc = sqlite3_reset(pRange); - } - - /* Move level -1 to level iAbsLevel */ - if( rc==SQLITE_OK ){ - sqlite3_bind_int64(pUpdate2, 1, iAbsLevel); - sqlite3_step(pUpdate2); - rc = sqlite3_reset(pUpdate2); - } - } - } - - - return rc; -} - -/* -** Merge all level iLevel segments in the database into a single -** iLevel+1 segment. Or, if iLevel<0, merge all segments into a -** single segment with a level equal to the numerically largest level -** currently present in the database. -** -** If this function is called with iLevel<0, but there is only one -** segment in the database, SQLITE_DONE is returned immediately. -** Otherwise, if successful, SQLITE_OK is returned. If an error occurs, -** an SQLite error code is returned. -*/ -static int fts3SegmentMerge( - Fts3Table *p, - int iLangid, /* Language id to merge */ - int iIndex, /* Index in p->aIndex[] to merge */ - int iLevel /* Level to merge */ -){ - int rc; /* Return code */ - int iIdx = 0; /* Index of new segment */ - sqlite3_int64 iNewLevel = 0; /* Level/index to create new segment at */ - SegmentWriter *pWriter = 0; /* Used to write the new, merged, segment */ - Fts3SegFilter filter; /* Segment term filter condition */ - Fts3MultiSegReader csr; /* Cursor to iterate through level(s) */ - int bIgnoreEmpty = 0; /* True to ignore empty segments */ - i64 iMaxLevel = 0; /* Max level number for this index/langid */ - - assert( iLevel==FTS3_SEGCURSOR_ALL - || iLevel==FTS3_SEGCURSOR_PENDING - || iLevel>=0 - ); - assert( iLevel=0 && iIndexnIndex ); - - rc = sqlite3Fts3SegReaderCursor(p, iLangid, iIndex, iLevel, 0, 0, 1, 0, &csr); - if( rc!=SQLITE_OK || csr.nSegment==0 ) goto finished; - - if( iLevel!=FTS3_SEGCURSOR_PENDING ){ - rc = fts3SegmentMaxLevel(p, iLangid, iIndex, &iMaxLevel); - if( rc!=SQLITE_OK ) goto finished; - } - - if( iLevel==FTS3_SEGCURSOR_ALL ){ - /* This call is to merge all segments in the database to a single - ** segment. The level of the new segment is equal to the numerically - ** greatest segment level currently present in the database for this - ** index. The idx of the new segment is always 0. */ - if( csr.nSegment==1 ){ - rc = SQLITE_DONE; - goto finished; - } - iNewLevel = iMaxLevel; - bIgnoreEmpty = 1; - - }else{ - /* This call is to merge all segments at level iLevel. find the next - ** available segment index at level iLevel+1. The call to - ** fts3AllocateSegdirIdx() will merge the segments at level iLevel+1 to - ** a single iLevel+2 segment if necessary. */ - assert( FTS3_SEGCURSOR_PENDING==-1 ); - iNewLevel = getAbsoluteLevel(p, iLangid, iIndex, iLevel+1); - rc = fts3AllocateSegdirIdx(p, iLangid, iIndex, iLevel+1, &iIdx); - bIgnoreEmpty = (iLevel!=FTS3_SEGCURSOR_PENDING) && (iNewLevel>iMaxLevel); - } - if( rc!=SQLITE_OK ) goto finished; - - assert( csr.nSegment>0 ); - assert( iNewLevel>=getAbsoluteLevel(p, iLangid, iIndex, 0) ); - assert( iNewLevelnLeafData); - } - } - } - - finished: - fts3SegWriterFree(pWriter); - sqlite3Fts3SegReaderFinish(&csr); - return rc; -} - - -/* -** Flush the contents of pendingTerms to level 0 segments. -*/ -SQLITE_PRIVATE int sqlite3Fts3PendingTermsFlush(Fts3Table *p){ - int rc = SQLITE_OK; - int i; - - for(i=0; rc==SQLITE_OK && inIndex; i++){ - rc = fts3SegmentMerge(p, p->iPrevLangid, i, FTS3_SEGCURSOR_PENDING); - if( rc==SQLITE_DONE ) rc = SQLITE_OK; - } - sqlite3Fts3PendingTermsClear(p); - - /* Determine the auto-incr-merge setting if unknown. If enabled, - ** estimate the number of leaf blocks of content to be written - */ - if( rc==SQLITE_OK && p->bHasStat - && p->nAutoincrmerge==0xff && p->nLeafAdd>0 - ){ - sqlite3_stmt *pStmt = 0; - rc = fts3SqlStmt(p, SQL_SELECT_STAT, &pStmt, 0); - if( rc==SQLITE_OK ){ - sqlite3_bind_int(pStmt, 1, FTS_STAT_AUTOINCRMERGE); - rc = sqlite3_step(pStmt); - if( rc==SQLITE_ROW ){ - p->nAutoincrmerge = sqlite3_column_int(pStmt, 0); - if( p->nAutoincrmerge==1 ) p->nAutoincrmerge = 8; - }else if( rc==SQLITE_DONE ){ - p->nAutoincrmerge = 0; - } - rc = sqlite3_reset(pStmt); - } - } - return rc; -} - -/* -** Encode N integers as varints into a blob. -*/ -static void fts3EncodeIntArray( - int N, /* The number of integers to encode */ - u32 *a, /* The integer values */ - char *zBuf, /* Write the BLOB here */ - int *pNBuf /* Write number of bytes if zBuf[] used here */ -){ - int i, j; - for(i=j=0; iiPrevDocid. The sizes are encoded as -** a blob of varints. -*/ -static void fts3InsertDocsize( - int *pRC, /* Result code */ - Fts3Table *p, /* Table into which to insert */ - u32 *aSz /* Sizes of each column, in tokens */ -){ - char *pBlob; /* The BLOB encoding of the document size */ - int nBlob; /* Number of bytes in the BLOB */ - sqlite3_stmt *pStmt; /* Statement used to insert the encoding */ - int rc; /* Result code from subfunctions */ - - if( *pRC ) return; - pBlob = sqlite3_malloc( 10*p->nColumn ); - if( pBlob==0 ){ - *pRC = SQLITE_NOMEM; - return; - } - fts3EncodeIntArray(p->nColumn, aSz, pBlob, &nBlob); - rc = fts3SqlStmt(p, SQL_REPLACE_DOCSIZE, &pStmt, 0); - if( rc ){ - sqlite3_free(pBlob); - *pRC = rc; - return; - } - sqlite3_bind_int64(pStmt, 1, p->iPrevDocid); - sqlite3_bind_blob(pStmt, 2, pBlob, nBlob, sqlite3_free); - sqlite3_step(pStmt); - *pRC = sqlite3_reset(pStmt); -} - -/* -** Record 0 of the %_stat table contains a blob consisting of N varints, -** where N is the number of user defined columns in the fts3 table plus -** two. If nCol is the number of user defined columns, then values of the -** varints are set as follows: -** -** Varint 0: Total number of rows in the table. -** -** Varint 1..nCol: For each column, the total number of tokens stored in -** the column for all rows of the table. -** -** Varint 1+nCol: The total size, in bytes, of all text values in all -** columns of all rows of the table. -** -*/ -static void fts3UpdateDocTotals( - int *pRC, /* The result code */ - Fts3Table *p, /* Table being updated */ - u32 *aSzIns, /* Size increases */ - u32 *aSzDel, /* Size decreases */ - int nChng /* Change in the number of documents */ -){ - char *pBlob; /* Storage for BLOB written into %_stat */ - int nBlob; /* Size of BLOB written into %_stat */ - u32 *a; /* Array of integers that becomes the BLOB */ - sqlite3_stmt *pStmt; /* Statement for reading and writing */ - int i; /* Loop counter */ - int rc; /* Result code from subfunctions */ - - const int nStat = p->nColumn+2; - - if( *pRC ) return; - a = sqlite3_malloc( (sizeof(u32)+10)*nStat ); - if( a==0 ){ - *pRC = SQLITE_NOMEM; - return; - } - pBlob = (char*)&a[nStat]; - rc = fts3SqlStmt(p, SQL_SELECT_STAT, &pStmt, 0); - if( rc ){ - sqlite3_free(a); - *pRC = rc; - return; - } - sqlite3_bind_int(pStmt, 1, FTS_STAT_DOCTOTAL); - if( sqlite3_step(pStmt)==SQLITE_ROW ){ - fts3DecodeIntArray(nStat, a, - sqlite3_column_blob(pStmt, 0), - sqlite3_column_bytes(pStmt, 0)); - }else{ - memset(a, 0, sizeof(u32)*(nStat) ); - } - rc = sqlite3_reset(pStmt); - if( rc!=SQLITE_OK ){ - sqlite3_free(a); - *pRC = rc; - return; - } - if( nChng<0 && a[0]<(u32)(-nChng) ){ - a[0] = 0; - }else{ - a[0] += nChng; - } - for(i=0; inColumn+1; i++){ - u32 x = a[i+1]; - if( x+aSzIns[i] < aSzDel[i] ){ - x = 0; - }else{ - x = x + aSzIns[i] - aSzDel[i]; - } - a[i+1] = x; - } - fts3EncodeIntArray(nStat, a, pBlob, &nBlob); - rc = fts3SqlStmt(p, SQL_REPLACE_STAT, &pStmt, 0); - if( rc ){ - sqlite3_free(a); - *pRC = rc; - return; - } - sqlite3_bind_int(pStmt, 1, FTS_STAT_DOCTOTAL); - sqlite3_bind_blob(pStmt, 2, pBlob, nBlob, SQLITE_STATIC); - sqlite3_step(pStmt); - *pRC = sqlite3_reset(pStmt); - sqlite3_free(a); -} - -/* -** Merge the entire database so that there is one segment for each -** iIndex/iLangid combination. -*/ -static int fts3DoOptimize(Fts3Table *p, int bReturnDone){ - int bSeenDone = 0; - int rc; - sqlite3_stmt *pAllLangid = 0; - - rc = fts3SqlStmt(p, SQL_SELECT_ALL_LANGID, &pAllLangid, 0); - if( rc==SQLITE_OK ){ - int rc2; - sqlite3_bind_int(pAllLangid, 1, p->nIndex); - while( sqlite3_step(pAllLangid)==SQLITE_ROW ){ - int i; - int iLangid = sqlite3_column_int(pAllLangid, 0); - for(i=0; rc==SQLITE_OK && inIndex; i++){ - rc = fts3SegmentMerge(p, iLangid, i, FTS3_SEGCURSOR_ALL); - if( rc==SQLITE_DONE ){ - bSeenDone = 1; - rc = SQLITE_OK; - } - } - } - rc2 = sqlite3_reset(pAllLangid); - if( rc==SQLITE_OK ) rc = rc2; - } - - sqlite3Fts3SegmentsClose(p); - sqlite3Fts3PendingTermsClear(p); - - return (rc==SQLITE_OK && bReturnDone && bSeenDone) ? SQLITE_DONE : rc; -} - -/* -** This function is called when the user executes the following statement: -** -** INSERT INTO () VALUES('rebuild'); -** -** The entire FTS index is discarded and rebuilt. If the table is one -** created using the content=xxx option, then the new index is based on -** the current contents of the xxx table. Otherwise, it is rebuilt based -** on the contents of the %_content table. -*/ -static int fts3DoRebuild(Fts3Table *p){ - int rc; /* Return Code */ - - rc = fts3DeleteAll(p, 0); - if( rc==SQLITE_OK ){ - u32 *aSz = 0; - u32 *aSzIns = 0; - u32 *aSzDel = 0; - sqlite3_stmt *pStmt = 0; - int nEntry = 0; - - /* Compose and prepare an SQL statement to loop through the content table */ - char *zSql = sqlite3_mprintf("SELECT %s" , p->zReadExprlist); - if( !zSql ){ - rc = SQLITE_NOMEM; - }else{ - rc = sqlite3_prepare_v2(p->db, zSql, -1, &pStmt, 0); - sqlite3_free(zSql); - } - - if( rc==SQLITE_OK ){ - int nByte = sizeof(u32) * (p->nColumn+1)*3; - aSz = (u32 *)sqlite3_malloc(nByte); - if( aSz==0 ){ - rc = SQLITE_NOMEM; - }else{ - memset(aSz, 0, nByte); - aSzIns = &aSz[p->nColumn+1]; - aSzDel = &aSzIns[p->nColumn+1]; - } - } - - while( rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pStmt) ){ - int iCol; - int iLangid = langidFromSelect(p, pStmt); - rc = fts3PendingTermsDocid(p, iLangid, sqlite3_column_int64(pStmt, 0)); - memset(aSz, 0, sizeof(aSz[0]) * (p->nColumn+1)); - for(iCol=0; rc==SQLITE_OK && iColnColumn; iCol++){ - if( p->abNotindexed[iCol]==0 ){ - const char *z = (const char *) sqlite3_column_text(pStmt, iCol+1); - rc = fts3PendingTermsAdd(p, iLangid, z, iCol, &aSz[iCol]); - aSz[p->nColumn] += sqlite3_column_bytes(pStmt, iCol+1); - } - } - if( p->bHasDocsize ){ - fts3InsertDocsize(&rc, p, aSz); - } - if( rc!=SQLITE_OK ){ - sqlite3_finalize(pStmt); - pStmt = 0; - }else{ - nEntry++; - for(iCol=0; iCol<=p->nColumn; iCol++){ - aSzIns[iCol] += aSz[iCol]; - } - } - } - if( p->bFts4 ){ - fts3UpdateDocTotals(&rc, p, aSzIns, aSzDel, nEntry); - } - sqlite3_free(aSz); - - if( pStmt ){ - int rc2 = sqlite3_finalize(pStmt); - if( rc==SQLITE_OK ){ - rc = rc2; - } - } - } - - return rc; -} - - -/* -** This function opens a cursor used to read the input data for an -** incremental merge operation. Specifically, it opens a cursor to scan -** the oldest nSeg segments (idx=0 through idx=(nSeg-1)) in absolute -** level iAbsLevel. -*/ -static int fts3IncrmergeCsr( - Fts3Table *p, /* FTS3 table handle */ - sqlite3_int64 iAbsLevel, /* Absolute level to open */ - int nSeg, /* Number of segments to merge */ - Fts3MultiSegReader *pCsr /* Cursor object to populate */ -){ - int rc; /* Return Code */ - sqlite3_stmt *pStmt = 0; /* Statement used to read %_segdir entry */ - int nByte; /* Bytes allocated at pCsr->apSegment[] */ - - /* Allocate space for the Fts3MultiSegReader.aCsr[] array */ - memset(pCsr, 0, sizeof(*pCsr)); - nByte = sizeof(Fts3SegReader *) * nSeg; - pCsr->apSegment = (Fts3SegReader **)sqlite3_malloc(nByte); - - if( pCsr->apSegment==0 ){ - rc = SQLITE_NOMEM; - }else{ - memset(pCsr->apSegment, 0, nByte); - rc = fts3SqlStmt(p, SQL_SELECT_LEVEL, &pStmt, 0); - } - if( rc==SQLITE_OK ){ - int i; - int rc2; - sqlite3_bind_int64(pStmt, 1, iAbsLevel); - assert( pCsr->nSegment==0 ); - for(i=0; rc==SQLITE_OK && sqlite3_step(pStmt)==SQLITE_ROW && iapSegment[i] - ); - pCsr->nSegment++; - } - rc2 = sqlite3_reset(pStmt); - if( rc==SQLITE_OK ) rc = rc2; - } - - return rc; -} - -typedef struct IncrmergeWriter IncrmergeWriter; -typedef struct NodeWriter NodeWriter; -typedef struct Blob Blob; -typedef struct NodeReader NodeReader; - -/* -** An instance of the following structure is used as a dynamic buffer -** to build up nodes or other blobs of data in. -** -** The function blobGrowBuffer() is used to extend the allocation. -*/ -struct Blob { - char *a; /* Pointer to allocation */ - int n; /* Number of valid bytes of data in a[] */ - int nAlloc; /* Allocated size of a[] (nAlloc>=n) */ -}; - -/* -** This structure is used to build up buffers containing segment b-tree -** nodes (blocks). -*/ -struct NodeWriter { - sqlite3_int64 iBlock; /* Current block id */ - Blob key; /* Last key written to the current block */ - Blob block; /* Current block image */ -}; - -/* -** An object of this type contains the state required to create or append -** to an appendable b-tree segment. -*/ -struct IncrmergeWriter { - int nLeafEst; /* Space allocated for leaf blocks */ - int nWork; /* Number of leaf pages flushed */ - sqlite3_int64 iAbsLevel; /* Absolute level of input segments */ - int iIdx; /* Index of *output* segment in iAbsLevel+1 */ - sqlite3_int64 iStart; /* Block number of first allocated block */ - sqlite3_int64 iEnd; /* Block number of last allocated block */ - sqlite3_int64 nLeafData; /* Bytes of leaf page data so far */ - u8 bNoLeafData; /* If true, store 0 for segment size */ - NodeWriter aNodeWriter[FTS_MAX_APPENDABLE_HEIGHT]; -}; - -/* -** An object of the following type is used to read data from a single -** FTS segment node. See the following functions: -** -** nodeReaderInit() -** nodeReaderNext() -** nodeReaderRelease() -*/ -struct NodeReader { - const char *aNode; - int nNode; - int iOff; /* Current offset within aNode[] */ - - /* Output variables. Containing the current node entry. */ - sqlite3_int64 iChild; /* Pointer to child node */ - Blob term; /* Current term */ - const char *aDoclist; /* Pointer to doclist */ - int nDoclist; /* Size of doclist in bytes */ -}; - -/* -** If *pRc is not SQLITE_OK when this function is called, it is a no-op. -** Otherwise, if the allocation at pBlob->a is not already at least nMin -** bytes in size, extend (realloc) it to be so. -** -** If an OOM error occurs, set *pRc to SQLITE_NOMEM and leave pBlob->a -** unmodified. Otherwise, if the allocation succeeds, update pBlob->nAlloc -** to reflect the new size of the pBlob->a[] buffer. -*/ -static void blobGrowBuffer(Blob *pBlob, int nMin, int *pRc){ - if( *pRc==SQLITE_OK && nMin>pBlob->nAlloc ){ - int nAlloc = nMin; - char *a = (char *)sqlite3_realloc(pBlob->a, nAlloc); - if( a ){ - pBlob->nAlloc = nAlloc; - pBlob->a = a; - }else{ - *pRc = SQLITE_NOMEM; - } - } -} - -/* -** Attempt to advance the node-reader object passed as the first argument to -** the next entry on the node. -** -** Return an error code if an error occurs (SQLITE_NOMEM is possible). -** Otherwise return SQLITE_OK. If there is no next entry on the node -** (e.g. because the current entry is the last) set NodeReader->aNode to -** NULL to indicate EOF. Otherwise, populate the NodeReader structure output -** variables for the new entry. -*/ -static int nodeReaderNext(NodeReader *p){ - int bFirst = (p->term.n==0); /* True for first term on the node */ - int nPrefix = 0; /* Bytes to copy from previous term */ - int nSuffix = 0; /* Bytes to append to the prefix */ - int rc = SQLITE_OK; /* Return code */ - - assert( p->aNode ); - if( p->iChild && bFirst==0 ) p->iChild++; - if( p->iOff>=p->nNode ){ - /* EOF */ - p->aNode = 0; - }else{ - if( bFirst==0 ){ - p->iOff += fts3GetVarint32(&p->aNode[p->iOff], &nPrefix); - } - p->iOff += fts3GetVarint32(&p->aNode[p->iOff], &nSuffix); - - blobGrowBuffer(&p->term, nPrefix+nSuffix, &rc); - if( rc==SQLITE_OK ){ - memcpy(&p->term.a[nPrefix], &p->aNode[p->iOff], nSuffix); - p->term.n = nPrefix+nSuffix; - p->iOff += nSuffix; - if( p->iChild==0 ){ - p->iOff += fts3GetVarint32(&p->aNode[p->iOff], &p->nDoclist); - p->aDoclist = &p->aNode[p->iOff]; - p->iOff += p->nDoclist; - } - } - } - - assert( p->iOff<=p->nNode ); - - return rc; -} - -/* -** Release all dynamic resources held by node-reader object *p. -*/ -static void nodeReaderRelease(NodeReader *p){ - sqlite3_free(p->term.a); -} - -/* -** Initialize a node-reader object to read the node in buffer aNode/nNode. -** -** If successful, SQLITE_OK is returned and the NodeReader object set to -** point to the first entry on the node (if any). Otherwise, an SQLite -** error code is returned. -*/ -static int nodeReaderInit(NodeReader *p, const char *aNode, int nNode){ - memset(p, 0, sizeof(NodeReader)); - p->aNode = aNode; - p->nNode = nNode; - - /* Figure out if this is a leaf or an internal node. */ - if( p->aNode[0] ){ - /* An internal node. */ - p->iOff = 1 + sqlite3Fts3GetVarint(&p->aNode[1], &p->iChild); - }else{ - p->iOff = 1; - } - - return nodeReaderNext(p); -} - -/* -** This function is called while writing an FTS segment each time a leaf o -** node is finished and written to disk. The key (zTerm/nTerm) is guaranteed -** to be greater than the largest key on the node just written, but smaller -** than or equal to the first key that will be written to the next leaf -** node. -** -** The block id of the leaf node just written to disk may be found in -** (pWriter->aNodeWriter[0].iBlock) when this function is called. -*/ -static int fts3IncrmergePush( - Fts3Table *p, /* Fts3 table handle */ - IncrmergeWriter *pWriter, /* Writer object */ - const char *zTerm, /* Term to write to internal node */ - int nTerm /* Bytes at zTerm */ -){ - sqlite3_int64 iPtr = pWriter->aNodeWriter[0].iBlock; - int iLayer; - - assert( nTerm>0 ); - for(iLayer=1; ALWAYS(iLayeraNodeWriter[iLayer]; - int rc = SQLITE_OK; - int nPrefix; - int nSuffix; - int nSpace; - - /* Figure out how much space the key will consume if it is written to - ** the current node of layer iLayer. Due to the prefix compression, - ** the space required changes depending on which node the key is to - ** be added to. */ - nPrefix = fts3PrefixCompress(pNode->key.a, pNode->key.n, zTerm, nTerm); - nSuffix = nTerm - nPrefix; - nSpace = sqlite3Fts3VarintLen(nPrefix); - nSpace += sqlite3Fts3VarintLen(nSuffix) + nSuffix; - - if( pNode->key.n==0 || (pNode->block.n + nSpace)<=p->nNodeSize ){ - /* If the current node of layer iLayer contains zero keys, or if adding - ** the key to it will not cause it to grow to larger than nNodeSize - ** bytes in size, write the key here. */ - - Blob *pBlk = &pNode->block; - if( pBlk->n==0 ){ - blobGrowBuffer(pBlk, p->nNodeSize, &rc); - if( rc==SQLITE_OK ){ - pBlk->a[0] = (char)iLayer; - pBlk->n = 1 + sqlite3Fts3PutVarint(&pBlk->a[1], iPtr); - } - } - blobGrowBuffer(pBlk, pBlk->n + nSpace, &rc); - blobGrowBuffer(&pNode->key, nTerm, &rc); - - if( rc==SQLITE_OK ){ - if( pNode->key.n ){ - pBlk->n += sqlite3Fts3PutVarint(&pBlk->a[pBlk->n], nPrefix); - } - pBlk->n += sqlite3Fts3PutVarint(&pBlk->a[pBlk->n], nSuffix); - memcpy(&pBlk->a[pBlk->n], &zTerm[nPrefix], nSuffix); - pBlk->n += nSuffix; - - memcpy(pNode->key.a, zTerm, nTerm); - pNode->key.n = nTerm; - } - }else{ - /* Otherwise, flush the current node of layer iLayer to disk. - ** Then allocate a new, empty sibling node. The key will be written - ** into the parent of this node. */ - rc = fts3WriteSegment(p, pNode->iBlock, pNode->block.a, pNode->block.n); - - assert( pNode->block.nAlloc>=p->nNodeSize ); - pNode->block.a[0] = (char)iLayer; - pNode->block.n = 1 + sqlite3Fts3PutVarint(&pNode->block.a[1], iPtr+1); - - iNextPtr = pNode->iBlock; - pNode->iBlock++; - pNode->key.n = 0; - } - - if( rc!=SQLITE_OK || iNextPtr==0 ) return rc; - iPtr = iNextPtr; - } - - assert( 0 ); - return 0; -} - -/* -** Append a term and (optionally) doclist to the FTS segment node currently -** stored in blob *pNode. The node need not contain any terms, but the -** header must be written before this function is called. -** -** A node header is a single 0x00 byte for a leaf node, or a height varint -** followed by the left-hand-child varint for an internal node. -** -** The term to be appended is passed via arguments zTerm/nTerm. For a -** leaf node, the doclist is passed as aDoclist/nDoclist. For an internal -** node, both aDoclist and nDoclist must be passed 0. -** -** If the size of the value in blob pPrev is zero, then this is the first -** term written to the node. Otherwise, pPrev contains a copy of the -** previous term. Before this function returns, it is updated to contain a -** copy of zTerm/nTerm. -** -** It is assumed that the buffer associated with pNode is already large -** enough to accommodate the new entry. The buffer associated with pPrev -** is extended by this function if requrired. -** -** If an error (i.e. OOM condition) occurs, an SQLite error code is -** returned. Otherwise, SQLITE_OK. -*/ -static int fts3AppendToNode( - Blob *pNode, /* Current node image to append to */ - Blob *pPrev, /* Buffer containing previous term written */ - const char *zTerm, /* New term to write */ - int nTerm, /* Size of zTerm in bytes */ - const char *aDoclist, /* Doclist (or NULL) to write */ - int nDoclist /* Size of aDoclist in bytes */ -){ - int rc = SQLITE_OK; /* Return code */ - int bFirst = (pPrev->n==0); /* True if this is the first term written */ - int nPrefix; /* Size of term prefix in bytes */ - int nSuffix; /* Size of term suffix in bytes */ - - /* Node must have already been started. There must be a doclist for a - ** leaf node, and there must not be a doclist for an internal node. */ - assert( pNode->n>0 ); - assert( (pNode->a[0]=='\0')==(aDoclist!=0) ); - - blobGrowBuffer(pPrev, nTerm, &rc); - if( rc!=SQLITE_OK ) return rc; - - nPrefix = fts3PrefixCompress(pPrev->a, pPrev->n, zTerm, nTerm); - nSuffix = nTerm - nPrefix; - memcpy(pPrev->a, zTerm, nTerm); - pPrev->n = nTerm; - - if( bFirst==0 ){ - pNode->n += sqlite3Fts3PutVarint(&pNode->a[pNode->n], nPrefix); - } - pNode->n += sqlite3Fts3PutVarint(&pNode->a[pNode->n], nSuffix); - memcpy(&pNode->a[pNode->n], &zTerm[nPrefix], nSuffix); - pNode->n += nSuffix; - - if( aDoclist ){ - pNode->n += sqlite3Fts3PutVarint(&pNode->a[pNode->n], nDoclist); - memcpy(&pNode->a[pNode->n], aDoclist, nDoclist); - pNode->n += nDoclist; - } - - assert( pNode->n<=pNode->nAlloc ); - - return SQLITE_OK; -} - -/* -** Append the current term and doclist pointed to by cursor pCsr to the -** appendable b-tree segment opened for writing by pWriter. -** -** Return SQLITE_OK if successful, or an SQLite error code otherwise. -*/ -static int fts3IncrmergeAppend( - Fts3Table *p, /* Fts3 table handle */ - IncrmergeWriter *pWriter, /* Writer object */ - Fts3MultiSegReader *pCsr /* Cursor containing term and doclist */ -){ - const char *zTerm = pCsr->zTerm; - int nTerm = pCsr->nTerm; - const char *aDoclist = pCsr->aDoclist; - int nDoclist = pCsr->nDoclist; - int rc = SQLITE_OK; /* Return code */ - int nSpace; /* Total space in bytes required on leaf */ - int nPrefix; /* Size of prefix shared with previous term */ - int nSuffix; /* Size of suffix (nTerm - nPrefix) */ - NodeWriter *pLeaf; /* Object used to write leaf nodes */ - - pLeaf = &pWriter->aNodeWriter[0]; - nPrefix = fts3PrefixCompress(pLeaf->key.a, pLeaf->key.n, zTerm, nTerm); - nSuffix = nTerm - nPrefix; - - nSpace = sqlite3Fts3VarintLen(nPrefix); - nSpace += sqlite3Fts3VarintLen(nSuffix) + nSuffix; - nSpace += sqlite3Fts3VarintLen(nDoclist) + nDoclist; - - /* If the current block is not empty, and if adding this term/doclist - ** to the current block would make it larger than Fts3Table.nNodeSize - ** bytes, write this block out to the database. */ - if( pLeaf->block.n>0 && (pLeaf->block.n + nSpace)>p->nNodeSize ){ - rc = fts3WriteSegment(p, pLeaf->iBlock, pLeaf->block.a, pLeaf->block.n); - pWriter->nWork++; - - /* Add the current term to the parent node. The term added to the - ** parent must: - ** - ** a) be greater than the largest term on the leaf node just written - ** to the database (still available in pLeaf->key), and - ** - ** b) be less than or equal to the term about to be added to the new - ** leaf node (zTerm/nTerm). - ** - ** In other words, it must be the prefix of zTerm 1 byte longer than - ** the common prefix (if any) of zTerm and pWriter->zTerm. - */ - if( rc==SQLITE_OK ){ - rc = fts3IncrmergePush(p, pWriter, zTerm, nPrefix+1); - } - - /* Advance to the next output block */ - pLeaf->iBlock++; - pLeaf->key.n = 0; - pLeaf->block.n = 0; - - nSuffix = nTerm; - nSpace = 1; - nSpace += sqlite3Fts3VarintLen(nSuffix) + nSuffix; - nSpace += sqlite3Fts3VarintLen(nDoclist) + nDoclist; - } - - pWriter->nLeafData += nSpace; - blobGrowBuffer(&pLeaf->block, pLeaf->block.n + nSpace, &rc); - if( rc==SQLITE_OK ){ - if( pLeaf->block.n==0 ){ - pLeaf->block.n = 1; - pLeaf->block.a[0] = '\0'; - } - rc = fts3AppendToNode( - &pLeaf->block, &pLeaf->key, zTerm, nTerm, aDoclist, nDoclist - ); - } - - return rc; -} - -/* -** This function is called to release all dynamic resources held by the -** merge-writer object pWriter, and if no error has occurred, to flush -** all outstanding node buffers held by pWriter to disk. -** -** If *pRc is not SQLITE_OK when this function is called, then no attempt -** is made to write any data to disk. Instead, this function serves only -** to release outstanding resources. -** -** Otherwise, if *pRc is initially SQLITE_OK and an error occurs while -** flushing buffers to disk, *pRc is set to an SQLite error code before -** returning. -*/ -static void fts3IncrmergeRelease( - Fts3Table *p, /* FTS3 table handle */ - IncrmergeWriter *pWriter, /* Merge-writer object */ - int *pRc /* IN/OUT: Error code */ -){ - int i; /* Used to iterate through non-root layers */ - int iRoot; /* Index of root in pWriter->aNodeWriter */ - NodeWriter *pRoot; /* NodeWriter for root node */ - int rc = *pRc; /* Error code */ - - /* Set iRoot to the index in pWriter->aNodeWriter[] of the output segment - ** root node. If the segment fits entirely on a single leaf node, iRoot - ** will be set to 0. If the root node is the parent of the leaves, iRoot - ** will be 1. And so on. */ - for(iRoot=FTS_MAX_APPENDABLE_HEIGHT-1; iRoot>=0; iRoot--){ - NodeWriter *pNode = &pWriter->aNodeWriter[iRoot]; - if( pNode->block.n>0 ) break; - assert( *pRc || pNode->block.nAlloc==0 ); - assert( *pRc || pNode->key.nAlloc==0 ); - sqlite3_free(pNode->block.a); - sqlite3_free(pNode->key.a); - } - - /* Empty output segment. This is a no-op. */ - if( iRoot<0 ) return; - - /* The entire output segment fits on a single node. Normally, this means - ** the node would be stored as a blob in the "root" column of the %_segdir - ** table. However, this is not permitted in this case. The problem is that - ** space has already been reserved in the %_segments table, and so the - ** start_block and end_block fields of the %_segdir table must be populated. - ** And, by design or by accident, released versions of FTS cannot handle - ** segments that fit entirely on the root node with start_block!=0. - ** - ** Instead, create a synthetic root node that contains nothing but a - ** pointer to the single content node. So that the segment consists of a - ** single leaf and a single interior (root) node. - ** - ** Todo: Better might be to defer allocating space in the %_segments - ** table until we are sure it is needed. - */ - if( iRoot==0 ){ - Blob *pBlock = &pWriter->aNodeWriter[1].block; - blobGrowBuffer(pBlock, 1 + FTS3_VARINT_MAX, &rc); - if( rc==SQLITE_OK ){ - pBlock->a[0] = 0x01; - pBlock->n = 1 + sqlite3Fts3PutVarint( - &pBlock->a[1], pWriter->aNodeWriter[0].iBlock - ); - } - iRoot = 1; - } - pRoot = &pWriter->aNodeWriter[iRoot]; - - /* Flush all currently outstanding nodes to disk. */ - for(i=0; iaNodeWriter[i]; - if( pNode->block.n>0 && rc==SQLITE_OK ){ - rc = fts3WriteSegment(p, pNode->iBlock, pNode->block.a, pNode->block.n); - } - sqlite3_free(pNode->block.a); - sqlite3_free(pNode->key.a); - } - - /* Write the %_segdir record. */ - if( rc==SQLITE_OK ){ - rc = fts3WriteSegdir(p, - pWriter->iAbsLevel+1, /* level */ - pWriter->iIdx, /* idx */ - pWriter->iStart, /* start_block */ - pWriter->aNodeWriter[0].iBlock, /* leaves_end_block */ - pWriter->iEnd, /* end_block */ - (pWriter->bNoLeafData==0 ? pWriter->nLeafData : 0), /* end_block */ - pRoot->block.a, pRoot->block.n /* root */ - ); - } - sqlite3_free(pRoot->block.a); - sqlite3_free(pRoot->key.a); - - *pRc = rc; -} - -/* -** Compare the term in buffer zLhs (size in bytes nLhs) with that in -** zRhs (size in bytes nRhs) using memcmp. If one term is a prefix of -** the other, it is considered to be smaller than the other. -** -** Return -ve if zLhs is smaller than zRhs, 0 if it is equal, or +ve -** if it is greater. -*/ -static int fts3TermCmp( - const char *zLhs, int nLhs, /* LHS of comparison */ - const char *zRhs, int nRhs /* RHS of comparison */ -){ - int nCmp = MIN(nLhs, nRhs); - int res; - - res = memcmp(zLhs, zRhs, nCmp); - if( res==0 ) res = nLhs - nRhs; - - return res; -} - - -/* -** Query to see if the entry in the %_segments table with blockid iEnd is -** NULL. If no error occurs and the entry is NULL, set *pbRes 1 before -** returning. Otherwise, set *pbRes to 0. -** -** Or, if an error occurs while querying the database, return an SQLite -** error code. The final value of *pbRes is undefined in this case. -** -** This is used to test if a segment is an "appendable" segment. If it -** is, then a NULL entry has been inserted into the %_segments table -** with blockid %_segdir.end_block. -*/ -static int fts3IsAppendable(Fts3Table *p, sqlite3_int64 iEnd, int *pbRes){ - int bRes = 0; /* Result to set *pbRes to */ - sqlite3_stmt *pCheck = 0; /* Statement to query database with */ - int rc; /* Return code */ - - rc = fts3SqlStmt(p, SQL_SEGMENT_IS_APPENDABLE, &pCheck, 0); - if( rc==SQLITE_OK ){ - sqlite3_bind_int64(pCheck, 1, iEnd); - if( SQLITE_ROW==sqlite3_step(pCheck) ) bRes = 1; - rc = sqlite3_reset(pCheck); - } - - *pbRes = bRes; - return rc; -} - -/* -** This function is called when initializing an incremental-merge operation. -** It checks if the existing segment with index value iIdx at absolute level -** (iAbsLevel+1) can be appended to by the incremental merge. If it can, the -** merge-writer object *pWriter is initialized to write to it. -** -** An existing segment can be appended to by an incremental merge if: -** -** * It was initially created as an appendable segment (with all required -** space pre-allocated), and -** -** * The first key read from the input (arguments zKey and nKey) is -** greater than the largest key currently stored in the potential -** output segment. -*/ -static int fts3IncrmergeLoad( - Fts3Table *p, /* Fts3 table handle */ - sqlite3_int64 iAbsLevel, /* Absolute level of input segments */ - int iIdx, /* Index of candidate output segment */ - const char *zKey, /* First key to write */ - int nKey, /* Number of bytes in nKey */ - IncrmergeWriter *pWriter /* Populate this object */ -){ - int rc; /* Return code */ - sqlite3_stmt *pSelect = 0; /* SELECT to read %_segdir entry */ - - rc = fts3SqlStmt(p, SQL_SELECT_SEGDIR, &pSelect, 0); - if( rc==SQLITE_OK ){ - sqlite3_int64 iStart = 0; /* Value of %_segdir.start_block */ - sqlite3_int64 iLeafEnd = 0; /* Value of %_segdir.leaves_end_block */ - sqlite3_int64 iEnd = 0; /* Value of %_segdir.end_block */ - const char *aRoot = 0; /* Pointer to %_segdir.root buffer */ - int nRoot = 0; /* Size of aRoot[] in bytes */ - int rc2; /* Return code from sqlite3_reset() */ - int bAppendable = 0; /* Set to true if segment is appendable */ - - /* Read the %_segdir entry for index iIdx absolute level (iAbsLevel+1) */ - sqlite3_bind_int64(pSelect, 1, iAbsLevel+1); - sqlite3_bind_int(pSelect, 2, iIdx); - if( sqlite3_step(pSelect)==SQLITE_ROW ){ - iStart = sqlite3_column_int64(pSelect, 1); - iLeafEnd = sqlite3_column_int64(pSelect, 2); - fts3ReadEndBlockField(pSelect, 3, &iEnd, &pWriter->nLeafData); - if( pWriter->nLeafData<0 ){ - pWriter->nLeafData = pWriter->nLeafData * -1; - } - pWriter->bNoLeafData = (pWriter->nLeafData==0); - nRoot = sqlite3_column_bytes(pSelect, 4); - aRoot = sqlite3_column_blob(pSelect, 4); - }else{ - return sqlite3_reset(pSelect); - } - - /* Check for the zero-length marker in the %_segments table */ - rc = fts3IsAppendable(p, iEnd, &bAppendable); - - /* Check that zKey/nKey is larger than the largest key the candidate */ - if( rc==SQLITE_OK && bAppendable ){ - char *aLeaf = 0; - int nLeaf = 0; - - rc = sqlite3Fts3ReadBlock(p, iLeafEnd, &aLeaf, &nLeaf, 0); - if( rc==SQLITE_OK ){ - NodeReader reader; - for(rc = nodeReaderInit(&reader, aLeaf, nLeaf); - rc==SQLITE_OK && reader.aNode; - rc = nodeReaderNext(&reader) - ){ - assert( reader.aNode ); - } - if( fts3TermCmp(zKey, nKey, reader.term.a, reader.term.n)<=0 ){ - bAppendable = 0; - } - nodeReaderRelease(&reader); - } - sqlite3_free(aLeaf); - } - - if( rc==SQLITE_OK && bAppendable ){ - /* It is possible to append to this segment. Set up the IncrmergeWriter - ** object to do so. */ - int i; - int nHeight = (int)aRoot[0]; - NodeWriter *pNode; - - pWriter->nLeafEst = (int)((iEnd - iStart) + 1)/FTS_MAX_APPENDABLE_HEIGHT; - pWriter->iStart = iStart; - pWriter->iEnd = iEnd; - pWriter->iAbsLevel = iAbsLevel; - pWriter->iIdx = iIdx; - - for(i=nHeight+1; iaNodeWriter[i].iBlock = pWriter->iStart + i*pWriter->nLeafEst; - } - - pNode = &pWriter->aNodeWriter[nHeight]; - pNode->iBlock = pWriter->iStart + pWriter->nLeafEst*nHeight; - blobGrowBuffer(&pNode->block, MAX(nRoot, p->nNodeSize), &rc); - if( rc==SQLITE_OK ){ - memcpy(pNode->block.a, aRoot, nRoot); - pNode->block.n = nRoot; - } - - for(i=nHeight; i>=0 && rc==SQLITE_OK; i--){ - NodeReader reader; - pNode = &pWriter->aNodeWriter[i]; - - rc = nodeReaderInit(&reader, pNode->block.a, pNode->block.n); - while( reader.aNode && rc==SQLITE_OK ) rc = nodeReaderNext(&reader); - blobGrowBuffer(&pNode->key, reader.term.n, &rc); - if( rc==SQLITE_OK ){ - memcpy(pNode->key.a, reader.term.a, reader.term.n); - pNode->key.n = reader.term.n; - if( i>0 ){ - char *aBlock = 0; - int nBlock = 0; - pNode = &pWriter->aNodeWriter[i-1]; - pNode->iBlock = reader.iChild; - rc = sqlite3Fts3ReadBlock(p, reader.iChild, &aBlock, &nBlock, 0); - blobGrowBuffer(&pNode->block, MAX(nBlock, p->nNodeSize), &rc); - if( rc==SQLITE_OK ){ - memcpy(pNode->block.a, aBlock, nBlock); - pNode->block.n = nBlock; - } - sqlite3_free(aBlock); - } - } - nodeReaderRelease(&reader); - } - } - - rc2 = sqlite3_reset(pSelect); - if( rc==SQLITE_OK ) rc = rc2; - } - - return rc; -} - -/* -** Determine the largest segment index value that exists within absolute -** level iAbsLevel+1. If no error occurs, set *piIdx to this value plus -** one before returning SQLITE_OK. Or, if there are no segments at all -** within level iAbsLevel, set *piIdx to zero. -** -** If an error occurs, return an SQLite error code. The final value of -** *piIdx is undefined in this case. -*/ -static int fts3IncrmergeOutputIdx( - Fts3Table *p, /* FTS Table handle */ - sqlite3_int64 iAbsLevel, /* Absolute index of input segments */ - int *piIdx /* OUT: Next free index at iAbsLevel+1 */ -){ - int rc; - sqlite3_stmt *pOutputIdx = 0; /* SQL used to find output index */ - - rc = fts3SqlStmt(p, SQL_NEXT_SEGMENT_INDEX, &pOutputIdx, 0); - if( rc==SQLITE_OK ){ - sqlite3_bind_int64(pOutputIdx, 1, iAbsLevel+1); - sqlite3_step(pOutputIdx); - *piIdx = sqlite3_column_int(pOutputIdx, 0); - rc = sqlite3_reset(pOutputIdx); - } - - return rc; -} - -/* -** Allocate an appendable output segment on absolute level iAbsLevel+1 -** with idx value iIdx. -** -** In the %_segdir table, a segment is defined by the values in three -** columns: -** -** start_block -** leaves_end_block -** end_block -** -** When an appendable segment is allocated, it is estimated that the -** maximum number of leaf blocks that may be required is the sum of the -** number of leaf blocks consumed by the input segments, plus the number -** of input segments, multiplied by two. This value is stored in stack -** variable nLeafEst. -** -** A total of 16*nLeafEst blocks are allocated when an appendable segment -** is created ((1 + end_block - start_block)==16*nLeafEst). The contiguous -** array of leaf nodes starts at the first block allocated. The array -** of interior nodes that are parents of the leaf nodes start at block -** (start_block + (1 + end_block - start_block) / 16). And so on. -** -** In the actual code below, the value "16" is replaced with the -** pre-processor macro FTS_MAX_APPENDABLE_HEIGHT. -*/ -static int fts3IncrmergeWriter( - Fts3Table *p, /* Fts3 table handle */ - sqlite3_int64 iAbsLevel, /* Absolute level of input segments */ - int iIdx, /* Index of new output segment */ - Fts3MultiSegReader *pCsr, /* Cursor that data will be read from */ - IncrmergeWriter *pWriter /* Populate this object */ -){ - int rc; /* Return Code */ - int i; /* Iterator variable */ - int nLeafEst = 0; /* Blocks allocated for leaf nodes */ - sqlite3_stmt *pLeafEst = 0; /* SQL used to determine nLeafEst */ - sqlite3_stmt *pFirstBlock = 0; /* SQL used to determine first block */ - - /* Calculate nLeafEst. */ - rc = fts3SqlStmt(p, SQL_MAX_LEAF_NODE_ESTIMATE, &pLeafEst, 0); - if( rc==SQLITE_OK ){ - sqlite3_bind_int64(pLeafEst, 1, iAbsLevel); - sqlite3_bind_int64(pLeafEst, 2, pCsr->nSegment); - if( SQLITE_ROW==sqlite3_step(pLeafEst) ){ - nLeafEst = sqlite3_column_int(pLeafEst, 0); - } - rc = sqlite3_reset(pLeafEst); - } - if( rc!=SQLITE_OK ) return rc; - - /* Calculate the first block to use in the output segment */ - rc = fts3SqlStmt(p, SQL_NEXT_SEGMENTS_ID, &pFirstBlock, 0); - if( rc==SQLITE_OK ){ - if( SQLITE_ROW==sqlite3_step(pFirstBlock) ){ - pWriter->iStart = sqlite3_column_int64(pFirstBlock, 0); - pWriter->iEnd = pWriter->iStart - 1; - pWriter->iEnd += nLeafEst * FTS_MAX_APPENDABLE_HEIGHT; - } - rc = sqlite3_reset(pFirstBlock); - } - if( rc!=SQLITE_OK ) return rc; - - /* Insert the marker in the %_segments table to make sure nobody tries - ** to steal the space just allocated. This is also used to identify - ** appendable segments. */ - rc = fts3WriteSegment(p, pWriter->iEnd, 0, 0); - if( rc!=SQLITE_OK ) return rc; - - pWriter->iAbsLevel = iAbsLevel; - pWriter->nLeafEst = nLeafEst; - pWriter->iIdx = iIdx; - - /* Set up the array of NodeWriter objects */ - for(i=0; iaNodeWriter[i].iBlock = pWriter->iStart + i*pWriter->nLeafEst; - } - return SQLITE_OK; -} - -/* -** Remove an entry from the %_segdir table. This involves running the -** following two statements: -** -** DELETE FROM %_segdir WHERE level = :iAbsLevel AND idx = :iIdx -** UPDATE %_segdir SET idx = idx - 1 WHERE level = :iAbsLevel AND idx > :iIdx -** -** The DELETE statement removes the specific %_segdir level. The UPDATE -** statement ensures that the remaining segments have contiguously allocated -** idx values. -*/ -static int fts3RemoveSegdirEntry( - Fts3Table *p, /* FTS3 table handle */ - sqlite3_int64 iAbsLevel, /* Absolute level to delete from */ - int iIdx /* Index of %_segdir entry to delete */ -){ - int rc; /* Return code */ - sqlite3_stmt *pDelete = 0; /* DELETE statement */ - - rc = fts3SqlStmt(p, SQL_DELETE_SEGDIR_ENTRY, &pDelete, 0); - if( rc==SQLITE_OK ){ - sqlite3_bind_int64(pDelete, 1, iAbsLevel); - sqlite3_bind_int(pDelete, 2, iIdx); - sqlite3_step(pDelete); - rc = sqlite3_reset(pDelete); - } - - return rc; -} - -/* -** One or more segments have just been removed from absolute level iAbsLevel. -** Update the 'idx' values of the remaining segments in the level so that -** the idx values are a contiguous sequence starting from 0. -*/ -static int fts3RepackSegdirLevel( - Fts3Table *p, /* FTS3 table handle */ - sqlite3_int64 iAbsLevel /* Absolute level to repack */ -){ - int rc; /* Return code */ - int *aIdx = 0; /* Array of remaining idx values */ - int nIdx = 0; /* Valid entries in aIdx[] */ - int nAlloc = 0; /* Allocated size of aIdx[] */ - int i; /* Iterator variable */ - sqlite3_stmt *pSelect = 0; /* Select statement to read idx values */ - sqlite3_stmt *pUpdate = 0; /* Update statement to modify idx values */ - - rc = fts3SqlStmt(p, SQL_SELECT_INDEXES, &pSelect, 0); - if( rc==SQLITE_OK ){ - int rc2; - sqlite3_bind_int64(pSelect, 1, iAbsLevel); - while( SQLITE_ROW==sqlite3_step(pSelect) ){ - if( nIdx>=nAlloc ){ - int *aNew; - nAlloc += 16; - aNew = sqlite3_realloc(aIdx, nAlloc*sizeof(int)); - if( !aNew ){ - rc = SQLITE_NOMEM; - break; - } - aIdx = aNew; - } - aIdx[nIdx++] = sqlite3_column_int(pSelect, 0); - } - rc2 = sqlite3_reset(pSelect); - if( rc==SQLITE_OK ) rc = rc2; - } - - if( rc==SQLITE_OK ){ - rc = fts3SqlStmt(p, SQL_SHIFT_SEGDIR_ENTRY, &pUpdate, 0); - } - if( rc==SQLITE_OK ){ - sqlite3_bind_int64(pUpdate, 2, iAbsLevel); - } - - assert( p->bIgnoreSavepoint==0 ); - p->bIgnoreSavepoint = 1; - for(i=0; rc==SQLITE_OK && ibIgnoreSavepoint = 0; - - sqlite3_free(aIdx); - return rc; -} - -static void fts3StartNode(Blob *pNode, int iHeight, sqlite3_int64 iChild){ - pNode->a[0] = (char)iHeight; - if( iChild ){ - assert( pNode->nAlloc>=1+sqlite3Fts3VarintLen(iChild) ); - pNode->n = 1 + sqlite3Fts3PutVarint(&pNode->a[1], iChild); - }else{ - assert( pNode->nAlloc>=1 ); - pNode->n = 1; - } -} - -/* -** The first two arguments are a pointer to and the size of a segment b-tree -** node. The node may be a leaf or an internal node. -** -** This function creates a new node image in blob object *pNew by copying -** all terms that are greater than or equal to zTerm/nTerm (for leaf nodes) -** or greater than zTerm/nTerm (for internal nodes) from aNode/nNode. -*/ -static int fts3TruncateNode( - const char *aNode, /* Current node image */ - int nNode, /* Size of aNode in bytes */ - Blob *pNew, /* OUT: Write new node image here */ - const char *zTerm, /* Omit all terms smaller than this */ - int nTerm, /* Size of zTerm in bytes */ - sqlite3_int64 *piBlock /* OUT: Block number in next layer down */ -){ - NodeReader reader; /* Reader object */ - Blob prev = {0, 0, 0}; /* Previous term written to new node */ - int rc = SQLITE_OK; /* Return code */ - int bLeaf = aNode[0]=='\0'; /* True for a leaf node */ - - /* Allocate required output space */ - blobGrowBuffer(pNew, nNode, &rc); - if( rc!=SQLITE_OK ) return rc; - pNew->n = 0; - - /* Populate new node buffer */ - for(rc = nodeReaderInit(&reader, aNode, nNode); - rc==SQLITE_OK && reader.aNode; - rc = nodeReaderNext(&reader) - ){ - if( pNew->n==0 ){ - int res = fts3TermCmp(reader.term.a, reader.term.n, zTerm, nTerm); - if( res<0 || (bLeaf==0 && res==0) ) continue; - fts3StartNode(pNew, (int)aNode[0], reader.iChild); - *piBlock = reader.iChild; - } - rc = fts3AppendToNode( - pNew, &prev, reader.term.a, reader.term.n, - reader.aDoclist, reader.nDoclist - ); - if( rc!=SQLITE_OK ) break; - } - if( pNew->n==0 ){ - fts3StartNode(pNew, (int)aNode[0], reader.iChild); - *piBlock = reader.iChild; - } - assert( pNew->n<=pNew->nAlloc ); - - nodeReaderRelease(&reader); - sqlite3_free(prev.a); - return rc; -} - -/* -** Remove all terms smaller than zTerm/nTerm from segment iIdx in absolute -** level iAbsLevel. This may involve deleting entries from the %_segments -** table, and modifying existing entries in both the %_segments and %_segdir -** tables. -** -** SQLITE_OK is returned if the segment is updated successfully. Or an -** SQLite error code otherwise. -*/ -static int fts3TruncateSegment( - Fts3Table *p, /* FTS3 table handle */ - sqlite3_int64 iAbsLevel, /* Absolute level of segment to modify */ - int iIdx, /* Index within level of segment to modify */ - const char *zTerm, /* Remove terms smaller than this */ - int nTerm /* Number of bytes in buffer zTerm */ -){ - int rc = SQLITE_OK; /* Return code */ - Blob root = {0,0,0}; /* New root page image */ - Blob block = {0,0,0}; /* Buffer used for any other block */ - sqlite3_int64 iBlock = 0; /* Block id */ - sqlite3_int64 iNewStart = 0; /* New value for iStartBlock */ - sqlite3_int64 iOldStart = 0; /* Old value for iStartBlock */ - sqlite3_stmt *pFetch = 0; /* Statement used to fetch segdir */ - - rc = fts3SqlStmt(p, SQL_SELECT_SEGDIR, &pFetch, 0); - if( rc==SQLITE_OK ){ - int rc2; /* sqlite3_reset() return code */ - sqlite3_bind_int64(pFetch, 1, iAbsLevel); - sqlite3_bind_int(pFetch, 2, iIdx); - if( SQLITE_ROW==sqlite3_step(pFetch) ){ - const char *aRoot = sqlite3_column_blob(pFetch, 4); - int nRoot = sqlite3_column_bytes(pFetch, 4); - iOldStart = sqlite3_column_int64(pFetch, 1); - rc = fts3TruncateNode(aRoot, nRoot, &root, zTerm, nTerm, &iBlock); - } - rc2 = sqlite3_reset(pFetch); - if( rc==SQLITE_OK ) rc = rc2; - } - - while( rc==SQLITE_OK && iBlock ){ - char *aBlock = 0; - int nBlock = 0; - iNewStart = iBlock; - - rc = sqlite3Fts3ReadBlock(p, iBlock, &aBlock, &nBlock, 0); - if( rc==SQLITE_OK ){ - rc = fts3TruncateNode(aBlock, nBlock, &block, zTerm, nTerm, &iBlock); - } - if( rc==SQLITE_OK ){ - rc = fts3WriteSegment(p, iNewStart, block.a, block.n); - } - sqlite3_free(aBlock); - } - - /* Variable iNewStart now contains the first valid leaf node. */ - if( rc==SQLITE_OK && iNewStart ){ - sqlite3_stmt *pDel = 0; - rc = fts3SqlStmt(p, SQL_DELETE_SEGMENTS_RANGE, &pDel, 0); - if( rc==SQLITE_OK ){ - sqlite3_bind_int64(pDel, 1, iOldStart); - sqlite3_bind_int64(pDel, 2, iNewStart-1); - sqlite3_step(pDel); - rc = sqlite3_reset(pDel); - } - } - - if( rc==SQLITE_OK ){ - sqlite3_stmt *pChomp = 0; - rc = fts3SqlStmt(p, SQL_CHOMP_SEGDIR, &pChomp, 0); - if( rc==SQLITE_OK ){ - sqlite3_bind_int64(pChomp, 1, iNewStart); - sqlite3_bind_blob(pChomp, 2, root.a, root.n, SQLITE_STATIC); - sqlite3_bind_int64(pChomp, 3, iAbsLevel); - sqlite3_bind_int(pChomp, 4, iIdx); - sqlite3_step(pChomp); - rc = sqlite3_reset(pChomp); - } - } - - sqlite3_free(root.a); - sqlite3_free(block.a); - return rc; -} - -/* -** This function is called after an incrmental-merge operation has run to -** merge (or partially merge) two or more segments from absolute level -** iAbsLevel. -** -** Each input segment is either removed from the db completely (if all of -** its data was copied to the output segment by the incrmerge operation) -** or modified in place so that it no longer contains those entries that -** have been duplicated in the output segment. -*/ -static int fts3IncrmergeChomp( - Fts3Table *p, /* FTS table handle */ - sqlite3_int64 iAbsLevel, /* Absolute level containing segments */ - Fts3MultiSegReader *pCsr, /* Chomp all segments opened by this cursor */ - int *pnRem /* Number of segments not deleted */ -){ - int i; - int nRem = 0; - int rc = SQLITE_OK; - - for(i=pCsr->nSegment-1; i>=0 && rc==SQLITE_OK; i--){ - Fts3SegReader *pSeg = 0; - int j; - - /* Find the Fts3SegReader object with Fts3SegReader.iIdx==i. It is hiding - ** somewhere in the pCsr->apSegment[] array. */ - for(j=0; ALWAYS(jnSegment); j++){ - pSeg = pCsr->apSegment[j]; - if( pSeg->iIdx==i ) break; - } - assert( jnSegment && pSeg->iIdx==i ); - - if( pSeg->aNode==0 ){ - /* Seg-reader is at EOF. Remove the entire input segment. */ - rc = fts3DeleteSegment(p, pSeg); - if( rc==SQLITE_OK ){ - rc = fts3RemoveSegdirEntry(p, iAbsLevel, pSeg->iIdx); - } - *pnRem = 0; - }else{ - /* The incremental merge did not copy all the data from this - ** segment to the upper level. The segment is modified in place - ** so that it contains no keys smaller than zTerm/nTerm. */ - const char *zTerm = pSeg->zTerm; - int nTerm = pSeg->nTerm; - rc = fts3TruncateSegment(p, iAbsLevel, pSeg->iIdx, zTerm, nTerm); - nRem++; - } - } - - if( rc==SQLITE_OK && nRem!=pCsr->nSegment ){ - rc = fts3RepackSegdirLevel(p, iAbsLevel); - } - - *pnRem = nRem; - return rc; -} - -/* -** Store an incr-merge hint in the database. -*/ -static int fts3IncrmergeHintStore(Fts3Table *p, Blob *pHint){ - sqlite3_stmt *pReplace = 0; - int rc; /* Return code */ - - rc = fts3SqlStmt(p, SQL_REPLACE_STAT, &pReplace, 0); - if( rc==SQLITE_OK ){ - sqlite3_bind_int(pReplace, 1, FTS_STAT_INCRMERGEHINT); - sqlite3_bind_blob(pReplace, 2, pHint->a, pHint->n, SQLITE_STATIC); - sqlite3_step(pReplace); - rc = sqlite3_reset(pReplace); - } - - return rc; -} - -/* -** Load an incr-merge hint from the database. The incr-merge hint, if one -** exists, is stored in the rowid==1 row of the %_stat table. -** -** If successful, populate blob *pHint with the value read from the %_stat -** table and return SQLITE_OK. Otherwise, if an error occurs, return an -** SQLite error code. -*/ -static int fts3IncrmergeHintLoad(Fts3Table *p, Blob *pHint){ - sqlite3_stmt *pSelect = 0; - int rc; - - pHint->n = 0; - rc = fts3SqlStmt(p, SQL_SELECT_STAT, &pSelect, 0); - if( rc==SQLITE_OK ){ - int rc2; - sqlite3_bind_int(pSelect, 1, FTS_STAT_INCRMERGEHINT); - if( SQLITE_ROW==sqlite3_step(pSelect) ){ - const char *aHint = sqlite3_column_blob(pSelect, 0); - int nHint = sqlite3_column_bytes(pSelect, 0); - if( aHint ){ - blobGrowBuffer(pHint, nHint, &rc); - if( rc==SQLITE_OK ){ - memcpy(pHint->a, aHint, nHint); - pHint->n = nHint; - } - } - } - rc2 = sqlite3_reset(pSelect); - if( rc==SQLITE_OK ) rc = rc2; - } - - return rc; -} - -/* -** If *pRc is not SQLITE_OK when this function is called, it is a no-op. -** Otherwise, append an entry to the hint stored in blob *pHint. Each entry -** consists of two varints, the absolute level number of the input segments -** and the number of input segments. -** -** If successful, leave *pRc set to SQLITE_OK and return. If an error occurs, -** set *pRc to an SQLite error code before returning. -*/ -static void fts3IncrmergeHintPush( - Blob *pHint, /* Hint blob to append to */ - i64 iAbsLevel, /* First varint to store in hint */ - int nInput, /* Second varint to store in hint */ - int *pRc /* IN/OUT: Error code */ -){ - blobGrowBuffer(pHint, pHint->n + 2*FTS3_VARINT_MAX, pRc); - if( *pRc==SQLITE_OK ){ - pHint->n += sqlite3Fts3PutVarint(&pHint->a[pHint->n], iAbsLevel); - pHint->n += sqlite3Fts3PutVarint(&pHint->a[pHint->n], (i64)nInput); - } -} - -/* -** Read the last entry (most recently pushed) from the hint blob *pHint -** and then remove the entry. Write the two values read to *piAbsLevel and -** *pnInput before returning. -** -** If no error occurs, return SQLITE_OK. If the hint blob in *pHint does -** not contain at least two valid varints, return SQLITE_CORRUPT_VTAB. -*/ -static int fts3IncrmergeHintPop(Blob *pHint, i64 *piAbsLevel, int *pnInput){ - const int nHint = pHint->n; - int i; - - i = pHint->n-2; - while( i>0 && (pHint->a[i-1] & 0x80) ) i--; - while( i>0 && (pHint->a[i-1] & 0x80) ) i--; - - pHint->n = i; - i += sqlite3Fts3GetVarint(&pHint->a[i], piAbsLevel); - i += fts3GetVarint32(&pHint->a[i], pnInput); - if( i!=nHint ) return SQLITE_CORRUPT_VTAB; - - return SQLITE_OK; -} - - -/* -** Attempt an incremental merge that writes nMerge leaf blocks. -** -** Incremental merges happen nMin segments at a time. The segments -** to be merged are the nMin oldest segments (the ones with the smallest -** values for the _segdir.idx field) in the highest level that contains -** at least nMin segments. Multiple merges might occur in an attempt to -** write the quota of nMerge leaf blocks. -*/ -SQLITE_PRIVATE int sqlite3Fts3Incrmerge(Fts3Table *p, int nMerge, int nMin){ - int rc; /* Return code */ - int nRem = nMerge; /* Number of leaf pages yet to be written */ - Fts3MultiSegReader *pCsr; /* Cursor used to read input data */ - Fts3SegFilter *pFilter; /* Filter used with cursor pCsr */ - IncrmergeWriter *pWriter; /* Writer object */ - int nSeg = 0; /* Number of input segments */ - sqlite3_int64 iAbsLevel = 0; /* Absolute level number to work on */ - Blob hint = {0, 0, 0}; /* Hint read from %_stat table */ - int bDirtyHint = 0; /* True if blob 'hint' has been modified */ - - /* Allocate space for the cursor, filter and writer objects */ - const int nAlloc = sizeof(*pCsr) + sizeof(*pFilter) + sizeof(*pWriter); - pWriter = (IncrmergeWriter *)sqlite3_malloc(nAlloc); - if( !pWriter ) return SQLITE_NOMEM; - pFilter = (Fts3SegFilter *)&pWriter[1]; - pCsr = (Fts3MultiSegReader *)&pFilter[1]; - - rc = fts3IncrmergeHintLoad(p, &hint); - while( rc==SQLITE_OK && nRem>0 ){ - const i64 nMod = FTS3_SEGDIR_MAXLEVEL * p->nIndex; - sqlite3_stmt *pFindLevel = 0; /* SQL used to determine iAbsLevel */ - int bUseHint = 0; /* True if attempting to append */ - int iIdx = 0; /* Largest idx in level (iAbsLevel+1) */ - - /* Search the %_segdir table for the absolute level with the smallest - ** relative level number that contains at least nMin segments, if any. - ** If one is found, set iAbsLevel to the absolute level number and - ** nSeg to nMin. If no level with at least nMin segments can be found, - ** set nSeg to -1. - */ - rc = fts3SqlStmt(p, SQL_FIND_MERGE_LEVEL, &pFindLevel, 0); - sqlite3_bind_int(pFindLevel, 1, nMin); - if( sqlite3_step(pFindLevel)==SQLITE_ROW ){ - iAbsLevel = sqlite3_column_int64(pFindLevel, 0); - nSeg = nMin; - }else{ - nSeg = -1; - } - rc = sqlite3_reset(pFindLevel); - - /* If the hint read from the %_stat table is not empty, check if the - ** last entry in it specifies a relative level smaller than or equal - ** to the level identified by the block above (if any). If so, this - ** iteration of the loop will work on merging at the hinted level. - */ - if( rc==SQLITE_OK && hint.n ){ - int nHint = hint.n; - sqlite3_int64 iHintAbsLevel = 0; /* Hint level */ - int nHintSeg = 0; /* Hint number of segments */ - - rc = fts3IncrmergeHintPop(&hint, &iHintAbsLevel, &nHintSeg); - if( nSeg<0 || (iAbsLevel % nMod) >= (iHintAbsLevel % nMod) ){ - iAbsLevel = iHintAbsLevel; - nSeg = nHintSeg; - bUseHint = 1; - bDirtyHint = 1; - }else{ - /* This undoes the effect of the HintPop() above - so that no entry - ** is removed from the hint blob. */ - hint.n = nHint; - } - } - - /* If nSeg is less that zero, then there is no level with at least - ** nMin segments and no hint in the %_stat table. No work to do. - ** Exit early in this case. */ - if( nSeg<0 ) break; - - /* Open a cursor to iterate through the contents of the oldest nSeg - ** indexes of absolute level iAbsLevel. If this cursor is opened using - ** the 'hint' parameters, it is possible that there are less than nSeg - ** segments available in level iAbsLevel. In this case, no work is - ** done on iAbsLevel - fall through to the next iteration of the loop - ** to start work on some other level. */ - memset(pWriter, 0, nAlloc); - pFilter->flags = FTS3_SEGMENT_REQUIRE_POS; - - if( rc==SQLITE_OK ){ - rc = fts3IncrmergeOutputIdx(p, iAbsLevel, &iIdx); - assert( bUseHint==1 || bUseHint==0 ); - if( iIdx==0 || (bUseHint && iIdx==1) ){ - int bIgnore = 0; - rc = fts3SegmentIsMaxLevel(p, iAbsLevel+1, &bIgnore); - if( bIgnore ){ - pFilter->flags |= FTS3_SEGMENT_IGNORE_EMPTY; - } - } - } - - if( rc==SQLITE_OK ){ - rc = fts3IncrmergeCsr(p, iAbsLevel, nSeg, pCsr); - } - if( SQLITE_OK==rc && pCsr->nSegment==nSeg - && SQLITE_OK==(rc = sqlite3Fts3SegReaderStart(p, pCsr, pFilter)) - && SQLITE_ROW==(rc = sqlite3Fts3SegReaderStep(p, pCsr)) - ){ - if( bUseHint && iIdx>0 ){ - const char *zKey = pCsr->zTerm; - int nKey = pCsr->nTerm; - rc = fts3IncrmergeLoad(p, iAbsLevel, iIdx-1, zKey, nKey, pWriter); - }else{ - rc = fts3IncrmergeWriter(p, iAbsLevel, iIdx, pCsr, pWriter); - } - - if( rc==SQLITE_OK && pWriter->nLeafEst ){ - fts3LogMerge(nSeg, iAbsLevel); - do { - rc = fts3IncrmergeAppend(p, pWriter, pCsr); - if( rc==SQLITE_OK ) rc = sqlite3Fts3SegReaderStep(p, pCsr); - if( pWriter->nWork>=nRem && rc==SQLITE_ROW ) rc = SQLITE_OK; - }while( rc==SQLITE_ROW ); - - /* Update or delete the input segments */ - if( rc==SQLITE_OK ){ - nRem -= (1 + pWriter->nWork); - rc = fts3IncrmergeChomp(p, iAbsLevel, pCsr, &nSeg); - if( nSeg!=0 ){ - bDirtyHint = 1; - fts3IncrmergeHintPush(&hint, iAbsLevel, nSeg, &rc); - } - } - } - - if( nSeg!=0 ){ - pWriter->nLeafData = pWriter->nLeafData * -1; - } - fts3IncrmergeRelease(p, pWriter, &rc); - if( nSeg==0 && pWriter->bNoLeafData==0 ){ - fts3PromoteSegments(p, iAbsLevel+1, pWriter->nLeafData); - } - } - - sqlite3Fts3SegReaderFinish(pCsr); - } - - /* Write the hint values into the %_stat table for the next incr-merger */ - if( bDirtyHint && rc==SQLITE_OK ){ - rc = fts3IncrmergeHintStore(p, &hint); - } - - sqlite3_free(pWriter); - sqlite3_free(hint.a); - return rc; -} - -/* -** Convert the text beginning at *pz into an integer and return -** its value. Advance *pz to point to the first character past -** the integer. -*/ -static int fts3Getint(const char **pz){ - const char *z = *pz; - int i = 0; - while( (*z)>='0' && (*z)<='9' ) i = 10*i + *(z++) - '0'; - *pz = z; - return i; -} - -/* -** Process statements of the form: -** -** INSERT INTO table(table) VALUES('merge=A,B'); -** -** A and B are integers that decode to be the number of leaf pages -** written for the merge, and the minimum number of segments on a level -** before it will be selected for a merge, respectively. -*/ -static int fts3DoIncrmerge( - Fts3Table *p, /* FTS3 table handle */ - const char *zParam /* Nul-terminated string containing "A,B" */ -){ - int rc; - int nMin = (FTS3_MERGE_COUNT / 2); - int nMerge = 0; - const char *z = zParam; - - /* Read the first integer value */ - nMerge = fts3Getint(&z); - - /* If the first integer value is followed by a ',', read the second - ** integer value. */ - if( z[0]==',' && z[1]!='\0' ){ - z++; - nMin = fts3Getint(&z); - } - - if( z[0]!='\0' || nMin<2 ){ - rc = SQLITE_ERROR; - }else{ - rc = SQLITE_OK; - if( !p->bHasStat ){ - assert( p->bFts4==0 ); - sqlite3Fts3CreateStatTable(&rc, p); - } - if( rc==SQLITE_OK ){ - rc = sqlite3Fts3Incrmerge(p, nMerge, nMin); - } - sqlite3Fts3SegmentsClose(p); - } - return rc; -} - -/* -** Process statements of the form: -** -** INSERT INTO table(table) VALUES('automerge=X'); -** -** where X is an integer. X==0 means to turn automerge off. X!=0 means -** turn it on. The setting is persistent. -*/ -static int fts3DoAutoincrmerge( - Fts3Table *p, /* FTS3 table handle */ - const char *zParam /* Nul-terminated string containing boolean */ -){ - int rc = SQLITE_OK; - sqlite3_stmt *pStmt = 0; - p->nAutoincrmerge = fts3Getint(&zParam); - if( p->nAutoincrmerge==1 || p->nAutoincrmerge>FTS3_MERGE_COUNT ){ - p->nAutoincrmerge = 8; - } - if( !p->bHasStat ){ - assert( p->bFts4==0 ); - sqlite3Fts3CreateStatTable(&rc, p); - if( rc ) return rc; - } - rc = fts3SqlStmt(p, SQL_REPLACE_STAT, &pStmt, 0); - if( rc ) return rc; - sqlite3_bind_int(pStmt, 1, FTS_STAT_AUTOINCRMERGE); - sqlite3_bind_int(pStmt, 2, p->nAutoincrmerge); - sqlite3_step(pStmt); - rc = sqlite3_reset(pStmt); - return rc; -} - -/* -** Return a 64-bit checksum for the FTS index entry specified by the -** arguments to this function. -*/ -static u64 fts3ChecksumEntry( - const char *zTerm, /* Pointer to buffer containing term */ - int nTerm, /* Size of zTerm in bytes */ - int iLangid, /* Language id for current row */ - int iIndex, /* Index (0..Fts3Table.nIndex-1) */ - i64 iDocid, /* Docid for current row. */ - int iCol, /* Column number */ - int iPos /* Position */ -){ - int i; - u64 ret = (u64)iDocid; - - ret += (ret<<3) + iLangid; - ret += (ret<<3) + iIndex; - ret += (ret<<3) + iCol; - ret += (ret<<3) + iPos; - for(i=0; inIndex-1) */ - int *pRc /* OUT: Return code */ -){ - Fts3SegFilter filter; - Fts3MultiSegReader csr; - int rc; - u64 cksum = 0; - - assert( *pRc==SQLITE_OK ); - - memset(&filter, 0, sizeof(filter)); - memset(&csr, 0, sizeof(csr)); - filter.flags = FTS3_SEGMENT_REQUIRE_POS|FTS3_SEGMENT_IGNORE_EMPTY; - filter.flags |= FTS3_SEGMENT_SCAN; - - rc = sqlite3Fts3SegReaderCursor( - p, iLangid, iIndex, FTS3_SEGCURSOR_ALL, 0, 0, 0, 1,&csr - ); - if( rc==SQLITE_OK ){ - rc = sqlite3Fts3SegReaderStart(p, &csr, &filter); - } - - if( rc==SQLITE_OK ){ - while( SQLITE_ROW==(rc = sqlite3Fts3SegReaderStep(p, &csr)) ){ - char *pCsr = csr.aDoclist; - char *pEnd = &pCsr[csr.nDoclist]; - - i64 iDocid = 0; - i64 iCol = 0; - i64 iPos = 0; - - pCsr += sqlite3Fts3GetVarint(pCsr, &iDocid); - while( pCsrnIndex); - while( rc==SQLITE_OK && sqlite3_step(pAllLangid)==SQLITE_ROW ){ - int iLangid = sqlite3_column_int(pAllLangid, 0); - int i; - for(i=0; inIndex; i++){ - cksum1 = cksum1 ^ fts3ChecksumIndex(p, iLangid, i, &rc); - } - } - rc2 = sqlite3_reset(pAllLangid); - if( rc==SQLITE_OK ) rc = rc2; - } - - /* This block calculates the checksum according to the %_content table */ - rc = fts3SqlStmt(p, SQL_SELECT_ALL_LANGID, &pAllLangid, 0); - if( rc==SQLITE_OK ){ - sqlite3_tokenizer_module const *pModule = p->pTokenizer->pModule; - sqlite3_stmt *pStmt = 0; - char *zSql; - - zSql = sqlite3_mprintf("SELECT %s" , p->zReadExprlist); - if( !zSql ){ - rc = SQLITE_NOMEM; - }else{ - rc = sqlite3_prepare_v2(p->db, zSql, -1, &pStmt, 0); - sqlite3_free(zSql); - } - - while( rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pStmt) ){ - i64 iDocid = sqlite3_column_int64(pStmt, 0); - int iLang = langidFromSelect(p, pStmt); - int iCol; - - for(iCol=0; rc==SQLITE_OK && iColnColumn; iCol++){ - const char *zText = (const char *)sqlite3_column_text(pStmt, iCol+1); - int nText = sqlite3_column_bytes(pStmt, iCol+1); - sqlite3_tokenizer_cursor *pT = 0; - - rc = sqlite3Fts3OpenTokenizer(p->pTokenizer, iLang, zText, nText, &pT); - while( rc==SQLITE_OK ){ - char const *zToken; /* Buffer containing token */ - int nToken = 0; /* Number of bytes in token */ - int iDum1 = 0, iDum2 = 0; /* Dummy variables */ - int iPos = 0; /* Position of token in zText */ - - rc = pModule->xNext(pT, &zToken, &nToken, &iDum1, &iDum2, &iPos); - if( rc==SQLITE_OK ){ - int i; - cksum2 = cksum2 ^ fts3ChecksumEntry( - zToken, nToken, iLang, 0, iDocid, iCol, iPos - ); - for(i=1; inIndex; i++){ - if( p->aIndex[i].nPrefix<=nToken ){ - cksum2 = cksum2 ^ fts3ChecksumEntry( - zToken, p->aIndex[i].nPrefix, iLang, i, iDocid, iCol, iPos - ); - } - } - } - } - if( pT ) pModule->xClose(pT); - if( rc==SQLITE_DONE ) rc = SQLITE_OK; - } - } - - sqlite3_finalize(pStmt); - } - - *pbOk = (cksum1==cksum2); - return rc; -} - -/* -** Run the integrity-check. If no error occurs and the current contents of -** the FTS index are correct, return SQLITE_OK. Or, if the contents of the -** FTS index are incorrect, return SQLITE_CORRUPT_VTAB. -** -** Or, if an error (e.g. an OOM or IO error) occurs, return an SQLite -** error code. -** -** The integrity-check works as follows. For each token and indexed token -** prefix in the document set, a 64-bit checksum is calculated (by code -** in fts3ChecksumEntry()) based on the following: -** -** + The index number (0 for the main index, 1 for the first prefix -** index etc.), -** + The token (or token prefix) text itself, -** + The language-id of the row it appears in, -** + The docid of the row it appears in, -** + The column it appears in, and -** + The tokens position within that column. -** -** The checksums for all entries in the index are XORed together to create -** a single checksum for the entire index. -** -** The integrity-check code calculates the same checksum in two ways: -** -** 1. By scanning the contents of the FTS index, and -** 2. By scanning and tokenizing the content table. -** -** If the two checksums are identical, the integrity-check is deemed to have -** passed. -*/ -static int fts3DoIntegrityCheck( - Fts3Table *p /* FTS3 table handle */ -){ - int rc; - int bOk = 0; - rc = fts3IntegrityCheck(p, &bOk); - if( rc==SQLITE_OK && bOk==0 ) rc = SQLITE_CORRUPT_VTAB; - return rc; -} - -/* -** Handle a 'special' INSERT of the form: -** -** "INSERT INTO tbl(tbl) VALUES()" -** -** Argument pVal contains the result of . Currently the only -** meaningful value to insert is the text 'optimize'. -*/ -static int fts3SpecialInsert(Fts3Table *p, sqlite3_value *pVal){ - int rc; /* Return Code */ - const char *zVal = (const char *)sqlite3_value_text(pVal); - int nVal = sqlite3_value_bytes(pVal); - - if( !zVal ){ - return SQLITE_NOMEM; - }else if( nVal==8 && 0==sqlite3_strnicmp(zVal, "optimize", 8) ){ - rc = fts3DoOptimize(p, 0); - }else if( nVal==7 && 0==sqlite3_strnicmp(zVal, "rebuild", 7) ){ - rc = fts3DoRebuild(p); - }else if( nVal==15 && 0==sqlite3_strnicmp(zVal, "integrity-check", 15) ){ - rc = fts3DoIntegrityCheck(p); - }else if( nVal>6 && 0==sqlite3_strnicmp(zVal, "merge=", 6) ){ - rc = fts3DoIncrmerge(p, &zVal[6]); - }else if( nVal>10 && 0==sqlite3_strnicmp(zVal, "automerge=", 10) ){ - rc = fts3DoAutoincrmerge(p, &zVal[10]); -#ifdef SQLITE_TEST - }else if( nVal>9 && 0==sqlite3_strnicmp(zVal, "nodesize=", 9) ){ - p->nNodeSize = atoi(&zVal[9]); - rc = SQLITE_OK; - }else if( nVal>11 && 0==sqlite3_strnicmp(zVal, "maxpending=", 9) ){ - p->nMaxPendingData = atoi(&zVal[11]); - rc = SQLITE_OK; - }else if( nVal>21 && 0==sqlite3_strnicmp(zVal, "test-no-incr-doclist=", 21) ){ - p->bNoIncrDoclist = atoi(&zVal[21]); - rc = SQLITE_OK; -#endif - }else{ - rc = SQLITE_ERROR; - } - - return rc; -} - -#ifndef SQLITE_DISABLE_FTS4_DEFERRED -/* -** Delete all cached deferred doclists. Deferred doclists are cached -** (allocated) by the sqlite3Fts3CacheDeferredDoclists() function. -*/ -SQLITE_PRIVATE void sqlite3Fts3FreeDeferredDoclists(Fts3Cursor *pCsr){ - Fts3DeferredToken *pDef; - for(pDef=pCsr->pDeferred; pDef; pDef=pDef->pNext){ - fts3PendingListDelete(pDef->pList); - pDef->pList = 0; - } -} - -/* -** Free all entries in the pCsr->pDeffered list. Entries are added to -** this list using sqlite3Fts3DeferToken(). -*/ -SQLITE_PRIVATE void sqlite3Fts3FreeDeferredTokens(Fts3Cursor *pCsr){ - Fts3DeferredToken *pDef; - Fts3DeferredToken *pNext; - for(pDef=pCsr->pDeferred; pDef; pDef=pNext){ - pNext = pDef->pNext; - fts3PendingListDelete(pDef->pList); - sqlite3_free(pDef); - } - pCsr->pDeferred = 0; -} - -/* -** Generate deferred-doclists for all tokens in the pCsr->pDeferred list -** based on the row that pCsr currently points to. -** -** A deferred-doclist is like any other doclist with position information -** included, except that it only contains entries for a single row of the -** table, not for all rows. -*/ -SQLITE_PRIVATE int sqlite3Fts3CacheDeferredDoclists(Fts3Cursor *pCsr){ - int rc = SQLITE_OK; /* Return code */ - if( pCsr->pDeferred ){ - int i; /* Used to iterate through table columns */ - sqlite3_int64 iDocid; /* Docid of the row pCsr points to */ - Fts3DeferredToken *pDef; /* Used to iterate through deferred tokens */ - - Fts3Table *p = (Fts3Table *)pCsr->base.pVtab; - sqlite3_tokenizer *pT = p->pTokenizer; - sqlite3_tokenizer_module const *pModule = pT->pModule; - - assert( pCsr->isRequireSeek==0 ); - iDocid = sqlite3_column_int64(pCsr->pStmt, 0); - - for(i=0; inColumn && rc==SQLITE_OK; i++){ - if( p->abNotindexed[i]==0 ){ - const char *zText = (const char *)sqlite3_column_text(pCsr->pStmt, i+1); - sqlite3_tokenizer_cursor *pTC = 0; - - rc = sqlite3Fts3OpenTokenizer(pT, pCsr->iLangid, zText, -1, &pTC); - while( rc==SQLITE_OK ){ - char const *zToken; /* Buffer containing token */ - int nToken = 0; /* Number of bytes in token */ - int iDum1 = 0, iDum2 = 0; /* Dummy variables */ - int iPos = 0; /* Position of token in zText */ - - rc = pModule->xNext(pTC, &zToken, &nToken, &iDum1, &iDum2, &iPos); - for(pDef=pCsr->pDeferred; pDef && rc==SQLITE_OK; pDef=pDef->pNext){ - Fts3PhraseToken *pPT = pDef->pToken; - if( (pDef->iCol>=p->nColumn || pDef->iCol==i) - && (pPT->bFirst==0 || iPos==0) - && (pPT->n==nToken || (pPT->isPrefix && pPT->nz, pPT->n)) - ){ - fts3PendingListAppend(&pDef->pList, iDocid, i, iPos, &rc); - } - } - } - if( pTC ) pModule->xClose(pTC); - if( rc==SQLITE_DONE ) rc = SQLITE_OK; - } - } - - for(pDef=pCsr->pDeferred; pDef && rc==SQLITE_OK; pDef=pDef->pNext){ - if( pDef->pList ){ - rc = fts3PendingListAppendVarint(&pDef->pList, 0); - } - } - } - - return rc; -} - -SQLITE_PRIVATE int sqlite3Fts3DeferredTokenList( - Fts3DeferredToken *p, - char **ppData, - int *pnData -){ - char *pRet; - int nSkip; - sqlite3_int64 dummy; - - *ppData = 0; - *pnData = 0; - - if( p->pList==0 ){ - return SQLITE_OK; - } - - pRet = (char *)sqlite3_malloc(p->pList->nData); - if( !pRet ) return SQLITE_NOMEM; - - nSkip = sqlite3Fts3GetVarint(p->pList->aData, &dummy); - *pnData = p->pList->nData - nSkip; - *ppData = pRet; - - memcpy(pRet, &p->pList->aData[nSkip], *pnData); - return SQLITE_OK; -} - -/* -** Add an entry for token pToken to the pCsr->pDeferred list. -*/ -SQLITE_PRIVATE int sqlite3Fts3DeferToken( - Fts3Cursor *pCsr, /* Fts3 table cursor */ - Fts3PhraseToken *pToken, /* Token to defer */ - int iCol /* Column that token must appear in (or -1) */ -){ - Fts3DeferredToken *pDeferred; - pDeferred = sqlite3_malloc(sizeof(*pDeferred)); - if( !pDeferred ){ - return SQLITE_NOMEM; - } - memset(pDeferred, 0, sizeof(*pDeferred)); - pDeferred->pToken = pToken; - pDeferred->pNext = pCsr->pDeferred; - pDeferred->iCol = iCol; - pCsr->pDeferred = pDeferred; - - assert( pToken->pDeferred==0 ); - pToken->pDeferred = pDeferred; - - return SQLITE_OK; -} -#endif - -/* -** SQLite value pRowid contains the rowid of a row that may or may not be -** present in the FTS3 table. If it is, delete it and adjust the contents -** of subsiduary data structures accordingly. -*/ -static int fts3DeleteByRowid( - Fts3Table *p, - sqlite3_value *pRowid, - int *pnChng, /* IN/OUT: Decrement if row is deleted */ - u32 *aSzDel -){ - int rc = SQLITE_OK; /* Return code */ - int bFound = 0; /* True if *pRowid really is in the table */ - - fts3DeleteTerms(&rc, p, pRowid, aSzDel, &bFound); - if( bFound && rc==SQLITE_OK ){ - int isEmpty = 0; /* Deleting *pRowid leaves the table empty */ - rc = fts3IsEmpty(p, pRowid, &isEmpty); - if( rc==SQLITE_OK ){ - if( isEmpty ){ - /* Deleting this row means the whole table is empty. In this case - ** delete the contents of all three tables and throw away any - ** data in the pendingTerms hash table. */ - rc = fts3DeleteAll(p, 1); - *pnChng = 0; - memset(aSzDel, 0, sizeof(u32) * (p->nColumn+1) * 2); - }else{ - *pnChng = *pnChng - 1; - if( p->zContentTbl==0 ){ - fts3SqlExec(&rc, p, SQL_DELETE_CONTENT, &pRowid); - } - if( p->bHasDocsize ){ - fts3SqlExec(&rc, p, SQL_DELETE_DOCSIZE, &pRowid); - } - } - } - } - - return rc; -} - -/* -** This function does the work for the xUpdate method of FTS3 virtual -** tables. The schema of the virtual table being: -** -** CREATE TABLE
    ( -** , -**
    HIDDEN, -** docid HIDDEN, -** HIDDEN -** ); -** -** -*/ -SQLITE_PRIVATE int sqlite3Fts3UpdateMethod( - sqlite3_vtab *pVtab, /* FTS3 vtab object */ - int nArg, /* Size of argument array */ - sqlite3_value **apVal, /* Array of arguments */ - sqlite_int64 *pRowid /* OUT: The affected (or effected) rowid */ -){ - Fts3Table *p = (Fts3Table *)pVtab; - int rc = SQLITE_OK; /* Return Code */ - int isRemove = 0; /* True for an UPDATE or DELETE */ - u32 *aSzIns = 0; /* Sizes of inserted documents */ - u32 *aSzDel = 0; /* Sizes of deleted documents */ - int nChng = 0; /* Net change in number of documents */ - int bInsertDone = 0; - - /* At this point it must be known if the %_stat table exists or not. - ** So bHasStat may not be 2. */ - assert( p->bHasStat==0 || p->bHasStat==1 ); - - assert( p->pSegments==0 ); - assert( - nArg==1 /* DELETE operations */ - || nArg==(2 + p->nColumn + 3) /* INSERT or UPDATE operations */ - ); - - /* Check for a "special" INSERT operation. One of the form: - ** - ** INSERT INTO xyz(xyz) VALUES('command'); - */ - if( nArg>1 - && sqlite3_value_type(apVal[0])==SQLITE_NULL - && sqlite3_value_type(apVal[p->nColumn+2])!=SQLITE_NULL - ){ - rc = fts3SpecialInsert(p, apVal[p->nColumn+2]); - goto update_out; - } - - if( nArg>1 && sqlite3_value_int(apVal[2 + p->nColumn + 2])<0 ){ - rc = SQLITE_CONSTRAINT; - goto update_out; - } - - /* Allocate space to hold the change in document sizes */ - aSzDel = sqlite3_malloc( sizeof(aSzDel[0])*(p->nColumn+1)*2 ); - if( aSzDel==0 ){ - rc = SQLITE_NOMEM; - goto update_out; - } - aSzIns = &aSzDel[p->nColumn+1]; - memset(aSzDel, 0, sizeof(aSzDel[0])*(p->nColumn+1)*2); - - rc = fts3Writelock(p); - if( rc!=SQLITE_OK ) goto update_out; - - /* If this is an INSERT operation, or an UPDATE that modifies the rowid - ** value, then this operation requires constraint handling. - ** - ** If the on-conflict mode is REPLACE, this means that the existing row - ** should be deleted from the database before inserting the new row. Or, - ** if the on-conflict mode is other than REPLACE, then this method must - ** detect the conflict and return SQLITE_CONSTRAINT before beginning to - ** modify the database file. - */ - if( nArg>1 && p->zContentTbl==0 ){ - /* Find the value object that holds the new rowid value. */ - sqlite3_value *pNewRowid = apVal[3+p->nColumn]; - if( sqlite3_value_type(pNewRowid)==SQLITE_NULL ){ - pNewRowid = apVal[1]; - } - - if( sqlite3_value_type(pNewRowid)!=SQLITE_NULL && ( - sqlite3_value_type(apVal[0])==SQLITE_NULL - || sqlite3_value_int64(apVal[0])!=sqlite3_value_int64(pNewRowid) - )){ - /* The new rowid is not NULL (in this case the rowid will be - ** automatically assigned and there is no chance of a conflict), and - ** the statement is either an INSERT or an UPDATE that modifies the - ** rowid column. So if the conflict mode is REPLACE, then delete any - ** existing row with rowid=pNewRowid. - ** - ** Or, if the conflict mode is not REPLACE, insert the new record into - ** the %_content table. If we hit the duplicate rowid constraint (or any - ** other error) while doing so, return immediately. - ** - ** This branch may also run if pNewRowid contains a value that cannot - ** be losslessly converted to an integer. In this case, the eventual - ** call to fts3InsertData() (either just below or further on in this - ** function) will return SQLITE_MISMATCH. If fts3DeleteByRowid is - ** invoked, it will delete zero rows (since no row will have - ** docid=$pNewRowid if $pNewRowid is not an integer value). - */ - if( sqlite3_vtab_on_conflict(p->db)==SQLITE_REPLACE ){ - rc = fts3DeleteByRowid(p, pNewRowid, &nChng, aSzDel); - }else{ - rc = fts3InsertData(p, apVal, pRowid); - bInsertDone = 1; - } - } - } - if( rc!=SQLITE_OK ){ - goto update_out; - } - - /* If this is a DELETE or UPDATE operation, remove the old record. */ - if( sqlite3_value_type(apVal[0])!=SQLITE_NULL ){ - assert( sqlite3_value_type(apVal[0])==SQLITE_INTEGER ); - rc = fts3DeleteByRowid(p, apVal[0], &nChng, aSzDel); - isRemove = 1; - } - - /* If this is an INSERT or UPDATE operation, insert the new record. */ - if( nArg>1 && rc==SQLITE_OK ){ - int iLangid = sqlite3_value_int(apVal[2 + p->nColumn + 2]); - if( bInsertDone==0 ){ - rc = fts3InsertData(p, apVal, pRowid); - if( rc==SQLITE_CONSTRAINT && p->zContentTbl==0 ){ - rc = FTS_CORRUPT_VTAB; - } - } - if( rc==SQLITE_OK && (!isRemove || *pRowid!=p->iPrevDocid ) ){ - rc = fts3PendingTermsDocid(p, iLangid, *pRowid); - } - if( rc==SQLITE_OK ){ - assert( p->iPrevDocid==*pRowid ); - rc = fts3InsertTerms(p, iLangid, apVal, aSzIns); - } - if( p->bHasDocsize ){ - fts3InsertDocsize(&rc, p, aSzIns); - } - nChng++; - } - - if( p->bFts4 ){ - fts3UpdateDocTotals(&rc, p, aSzIns, aSzDel, nChng); - } - - update_out: - sqlite3_free(aSzDel); - sqlite3Fts3SegmentsClose(p); - return rc; -} - -/* -** Flush any data in the pending-terms hash table to disk. If successful, -** merge all segments in the database (including the new segment, if -** there was any data to flush) into a single segment. -*/ -SQLITE_PRIVATE int sqlite3Fts3Optimize(Fts3Table *p){ - int rc; - rc = sqlite3_exec(p->db, "SAVEPOINT fts3", 0, 0, 0); - if( rc==SQLITE_OK ){ - rc = fts3DoOptimize(p, 1); - if( rc==SQLITE_OK || rc==SQLITE_DONE ){ - int rc2 = sqlite3_exec(p->db, "RELEASE fts3", 0, 0, 0); - if( rc2!=SQLITE_OK ) rc = rc2; - }else{ - sqlite3_exec(p->db, "ROLLBACK TO fts3", 0, 0, 0); - sqlite3_exec(p->db, "RELEASE fts3", 0, 0, 0); - } - } - sqlite3Fts3SegmentsClose(p); - return rc; -} - -#endif - -/************** End of fts3_write.c ******************************************/ -/************** Begin file fts3_snippet.c ************************************/ -/* -** 2009 Oct 23 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -****************************************************************************** -*/ - -#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) - -/* #include */ -/* #include */ - -/* -** Characters that may appear in the second argument to matchinfo(). -*/ -#define FTS3_MATCHINFO_NPHRASE 'p' /* 1 value */ -#define FTS3_MATCHINFO_NCOL 'c' /* 1 value */ -#define FTS3_MATCHINFO_NDOC 'n' /* 1 value */ -#define FTS3_MATCHINFO_AVGLENGTH 'a' /* nCol values */ -#define FTS3_MATCHINFO_LENGTH 'l' /* nCol values */ -#define FTS3_MATCHINFO_LCS 's' /* nCol values */ -#define FTS3_MATCHINFO_HITS 'x' /* 3*nCol*nPhrase values */ - -/* -** The default value for the second argument to matchinfo(). -*/ -#define FTS3_MATCHINFO_DEFAULT "pcx" - - -/* -** Used as an fts3ExprIterate() context when loading phrase doclists to -** Fts3Expr.aDoclist[]/nDoclist. -*/ -typedef struct LoadDoclistCtx LoadDoclistCtx; -struct LoadDoclistCtx { - Fts3Cursor *pCsr; /* FTS3 Cursor */ - int nPhrase; /* Number of phrases seen so far */ - int nToken; /* Number of tokens seen so far */ -}; - -/* -** The following types are used as part of the implementation of the -** fts3BestSnippet() routine. -*/ -typedef struct SnippetIter SnippetIter; -typedef struct SnippetPhrase SnippetPhrase; -typedef struct SnippetFragment SnippetFragment; - -struct SnippetIter { - Fts3Cursor *pCsr; /* Cursor snippet is being generated from */ - int iCol; /* Extract snippet from this column */ - int nSnippet; /* Requested snippet length (in tokens) */ - int nPhrase; /* Number of phrases in query */ - SnippetPhrase *aPhrase; /* Array of size nPhrase */ - int iCurrent; /* First token of current snippet */ -}; - -struct SnippetPhrase { - int nToken; /* Number of tokens in phrase */ - char *pList; /* Pointer to start of phrase position list */ - int iHead; /* Next value in position list */ - char *pHead; /* Position list data following iHead */ - int iTail; /* Next value in trailing position list */ - char *pTail; /* Position list data following iTail */ -}; - -struct SnippetFragment { - int iCol; /* Column snippet is extracted from */ - int iPos; /* Index of first token in snippet */ - u64 covered; /* Mask of query phrases covered */ - u64 hlmask; /* Mask of snippet terms to highlight */ -}; - -/* -** This type is used as an fts3ExprIterate() context object while -** accumulating the data returned by the matchinfo() function. -*/ -typedef struct MatchInfo MatchInfo; -struct MatchInfo { - Fts3Cursor *pCursor; /* FTS3 Cursor */ - int nCol; /* Number of columns in table */ - int nPhrase; /* Number of matchable phrases in query */ - sqlite3_int64 nDoc; /* Number of docs in database */ - u32 *aMatchinfo; /* Pre-allocated buffer */ -}; - - - -/* -** The snippet() and offsets() functions both return text values. An instance -** of the following structure is used to accumulate those values while the -** functions are running. See fts3StringAppend() for details. -*/ -typedef struct StrBuffer StrBuffer; -struct StrBuffer { - char *z; /* Pointer to buffer containing string */ - int n; /* Length of z in bytes (excl. nul-term) */ - int nAlloc; /* Allocated size of buffer z in bytes */ -}; - - -/* -** This function is used to help iterate through a position-list. A position -** list is a list of unique integers, sorted from smallest to largest. Each -** element of the list is represented by an FTS3 varint that takes the value -** of the difference between the current element and the previous one plus -** two. For example, to store the position-list: -** -** 4 9 113 -** -** the three varints: -** -** 6 7 106 -** -** are encoded. -** -** When this function is called, *pp points to the start of an element of -** the list. *piPos contains the value of the previous entry in the list. -** After it returns, *piPos contains the value of the next element of the -** list and *pp is advanced to the following varint. -*/ -static void fts3GetDeltaPosition(char **pp, int *piPos){ - int iVal; - *pp += fts3GetVarint32(*pp, &iVal); - *piPos += (iVal-2); -} - -/* -** Helper function for fts3ExprIterate() (see below). -*/ -static int fts3ExprIterate2( - Fts3Expr *pExpr, /* Expression to iterate phrases of */ - int *piPhrase, /* Pointer to phrase counter */ - int (*x)(Fts3Expr*,int,void*), /* Callback function to invoke for phrases */ - void *pCtx /* Second argument to pass to callback */ -){ - int rc; /* Return code */ - int eType = pExpr->eType; /* Type of expression node pExpr */ - - if( eType!=FTSQUERY_PHRASE ){ - assert( pExpr->pLeft && pExpr->pRight ); - rc = fts3ExprIterate2(pExpr->pLeft, piPhrase, x, pCtx); - if( rc==SQLITE_OK && eType!=FTSQUERY_NOT ){ - rc = fts3ExprIterate2(pExpr->pRight, piPhrase, x, pCtx); - } - }else{ - rc = x(pExpr, *piPhrase, pCtx); - (*piPhrase)++; - } - return rc; -} - -/* -** Iterate through all phrase nodes in an FTS3 query, except those that -** are part of a sub-tree that is the right-hand-side of a NOT operator. -** For each phrase node found, the supplied callback function is invoked. -** -** If the callback function returns anything other than SQLITE_OK, -** the iteration is abandoned and the error code returned immediately. -** Otherwise, SQLITE_OK is returned after a callback has been made for -** all eligible phrase nodes. -*/ -static int fts3ExprIterate( - Fts3Expr *pExpr, /* Expression to iterate phrases of */ - int (*x)(Fts3Expr*,int,void*), /* Callback function to invoke for phrases */ - void *pCtx /* Second argument to pass to callback */ -){ - int iPhrase = 0; /* Variable used as the phrase counter */ - return fts3ExprIterate2(pExpr, &iPhrase, x, pCtx); -} - -/* -** This is an fts3ExprIterate() callback used while loading the doclists -** for each phrase into Fts3Expr.aDoclist[]/nDoclist. See also -** fts3ExprLoadDoclists(). -*/ -static int fts3ExprLoadDoclistsCb(Fts3Expr *pExpr, int iPhrase, void *ctx){ - int rc = SQLITE_OK; - Fts3Phrase *pPhrase = pExpr->pPhrase; - LoadDoclistCtx *p = (LoadDoclistCtx *)ctx; - - UNUSED_PARAMETER(iPhrase); - - p->nPhrase++; - p->nToken += pPhrase->nToken; - - return rc; -} - -/* -** Load the doclists for each phrase in the query associated with FTS3 cursor -** pCsr. -** -** If pnPhrase is not NULL, then *pnPhrase is set to the number of matchable -** phrases in the expression (all phrases except those directly or -** indirectly descended from the right-hand-side of a NOT operator). If -** pnToken is not NULL, then it is set to the number of tokens in all -** matchable phrases of the expression. -*/ -static int fts3ExprLoadDoclists( - Fts3Cursor *pCsr, /* Fts3 cursor for current query */ - int *pnPhrase, /* OUT: Number of phrases in query */ - int *pnToken /* OUT: Number of tokens in query */ -){ - int rc; /* Return Code */ - LoadDoclistCtx sCtx = {0,0,0}; /* Context for fts3ExprIterate() */ - sCtx.pCsr = pCsr; - rc = fts3ExprIterate(pCsr->pExpr, fts3ExprLoadDoclistsCb, (void *)&sCtx); - if( pnPhrase ) *pnPhrase = sCtx.nPhrase; - if( pnToken ) *pnToken = sCtx.nToken; - return rc; -} - -static int fts3ExprPhraseCountCb(Fts3Expr *pExpr, int iPhrase, void *ctx){ - (*(int *)ctx)++; - UNUSED_PARAMETER(pExpr); - UNUSED_PARAMETER(iPhrase); - return SQLITE_OK; -} -static int fts3ExprPhraseCount(Fts3Expr *pExpr){ - int nPhrase = 0; - (void)fts3ExprIterate(pExpr, fts3ExprPhraseCountCb, (void *)&nPhrase); - return nPhrase; -} - -/* -** Advance the position list iterator specified by the first two -** arguments so that it points to the first element with a value greater -** than or equal to parameter iNext. -*/ -static void fts3SnippetAdvance(char **ppIter, int *piIter, int iNext){ - char *pIter = *ppIter; - if( pIter ){ - int iIter = *piIter; - - while( iIteriCurrent<0 ){ - /* The SnippetIter object has just been initialized. The first snippet - ** candidate always starts at offset 0 (even if this candidate has a - ** score of 0.0). - */ - pIter->iCurrent = 0; - - /* Advance the 'head' iterator of each phrase to the first offset that - ** is greater than or equal to (iNext+nSnippet). - */ - for(i=0; inPhrase; i++){ - SnippetPhrase *pPhrase = &pIter->aPhrase[i]; - fts3SnippetAdvance(&pPhrase->pHead, &pPhrase->iHead, pIter->nSnippet); - } - }else{ - int iStart; - int iEnd = 0x7FFFFFFF; - - for(i=0; inPhrase; i++){ - SnippetPhrase *pPhrase = &pIter->aPhrase[i]; - if( pPhrase->pHead && pPhrase->iHeadiHead; - } - } - if( iEnd==0x7FFFFFFF ){ - return 1; - } - - pIter->iCurrent = iStart = iEnd - pIter->nSnippet + 1; - for(i=0; inPhrase; i++){ - SnippetPhrase *pPhrase = &pIter->aPhrase[i]; - fts3SnippetAdvance(&pPhrase->pHead, &pPhrase->iHead, iEnd+1); - fts3SnippetAdvance(&pPhrase->pTail, &pPhrase->iTail, iStart); - } - } - - return 0; -} - -/* -** Retrieve information about the current candidate snippet of snippet -** iterator pIter. -*/ -static void fts3SnippetDetails( - SnippetIter *pIter, /* Snippet iterator */ - u64 mCovered, /* Bitmask of phrases already covered */ - int *piToken, /* OUT: First token of proposed snippet */ - int *piScore, /* OUT: "Score" for this snippet */ - u64 *pmCover, /* OUT: Bitmask of phrases covered */ - u64 *pmHighlight /* OUT: Bitmask of terms to highlight */ -){ - int iStart = pIter->iCurrent; /* First token of snippet */ - int iScore = 0; /* Score of this snippet */ - int i; /* Loop counter */ - u64 mCover = 0; /* Mask of phrases covered by this snippet */ - u64 mHighlight = 0; /* Mask of tokens to highlight in snippet */ - - for(i=0; inPhrase; i++){ - SnippetPhrase *pPhrase = &pIter->aPhrase[i]; - if( pPhrase->pTail ){ - char *pCsr = pPhrase->pTail; - int iCsr = pPhrase->iTail; - - while( iCsr<(iStart+pIter->nSnippet) ){ - int j; - u64 mPhrase = (u64)1 << i; - u64 mPos = (u64)1 << (iCsr - iStart); - assert( iCsr>=iStart ); - if( (mCover|mCovered)&mPhrase ){ - iScore++; - }else{ - iScore += 1000; - } - mCover |= mPhrase; - - for(j=0; jnToken; j++){ - mHighlight |= (mPos>>j); - } - - if( 0==(*pCsr & 0x0FE) ) break; - fts3GetDeltaPosition(&pCsr, &iCsr); - } - } - } - - /* Set the output variables before returning. */ - *piToken = iStart; - *piScore = iScore; - *pmCover = mCover; - *pmHighlight = mHighlight; -} - -/* -** This function is an fts3ExprIterate() callback used by fts3BestSnippet(). -** Each invocation populates an element of the SnippetIter.aPhrase[] array. -*/ -static int fts3SnippetFindPositions(Fts3Expr *pExpr, int iPhrase, void *ctx){ - SnippetIter *p = (SnippetIter *)ctx; - SnippetPhrase *pPhrase = &p->aPhrase[iPhrase]; - char *pCsr; - int rc; - - pPhrase->nToken = pExpr->pPhrase->nToken; - rc = sqlite3Fts3EvalPhrasePoslist(p->pCsr, pExpr, p->iCol, &pCsr); - assert( rc==SQLITE_OK || pCsr==0 ); - if( pCsr ){ - int iFirst = 0; - pPhrase->pList = pCsr; - fts3GetDeltaPosition(&pCsr, &iFirst); - assert( iFirst>=0 ); - pPhrase->pHead = pCsr; - pPhrase->pTail = pCsr; - pPhrase->iHead = iFirst; - pPhrase->iTail = iFirst; - }else{ - assert( rc!=SQLITE_OK || ( - pPhrase->pList==0 && pPhrase->pHead==0 && pPhrase->pTail==0 - )); - } - - return rc; -} - -/* -** Select the fragment of text consisting of nFragment contiguous tokens -** from column iCol that represent the "best" snippet. The best snippet -** is the snippet with the highest score, where scores are calculated -** by adding: -** -** (a) +1 point for each occurrence of a matchable phrase in the snippet. -** -** (b) +1000 points for the first occurrence of each matchable phrase in -** the snippet for which the corresponding mCovered bit is not set. -** -** The selected snippet parameters are stored in structure *pFragment before -** returning. The score of the selected snippet is stored in *piScore -** before returning. -*/ -static int fts3BestSnippet( - int nSnippet, /* Desired snippet length */ - Fts3Cursor *pCsr, /* Cursor to create snippet for */ - int iCol, /* Index of column to create snippet from */ - u64 mCovered, /* Mask of phrases already covered */ - u64 *pmSeen, /* IN/OUT: Mask of phrases seen */ - SnippetFragment *pFragment, /* OUT: Best snippet found */ - int *piScore /* OUT: Score of snippet pFragment */ -){ - int rc; /* Return Code */ - int nList; /* Number of phrases in expression */ - SnippetIter sIter; /* Iterates through snippet candidates */ - int nByte; /* Number of bytes of space to allocate */ - int iBestScore = -1; /* Best snippet score found so far */ - int i; /* Loop counter */ - - memset(&sIter, 0, sizeof(sIter)); - - /* Iterate through the phrases in the expression to count them. The same - ** callback makes sure the doclists are loaded for each phrase. - */ - rc = fts3ExprLoadDoclists(pCsr, &nList, 0); - if( rc!=SQLITE_OK ){ - return rc; - } - - /* Now that it is known how many phrases there are, allocate and zero - ** the required space using malloc(). - */ - nByte = sizeof(SnippetPhrase) * nList; - sIter.aPhrase = (SnippetPhrase *)sqlite3_malloc(nByte); - if( !sIter.aPhrase ){ - return SQLITE_NOMEM; - } - memset(sIter.aPhrase, 0, nByte); - - /* Initialize the contents of the SnippetIter object. Then iterate through - ** the set of phrases in the expression to populate the aPhrase[] array. - */ - sIter.pCsr = pCsr; - sIter.iCol = iCol; - sIter.nSnippet = nSnippet; - sIter.nPhrase = nList; - sIter.iCurrent = -1; - (void)fts3ExprIterate(pCsr->pExpr, fts3SnippetFindPositions, (void *)&sIter); - - /* Set the *pmSeen output variable. */ - for(i=0; iiCol = iCol; - while( !fts3SnippetNextCandidate(&sIter) ){ - int iPos; - int iScore; - u64 mCover; - u64 mHighlight; - fts3SnippetDetails(&sIter, mCovered, &iPos, &iScore, &mCover, &mHighlight); - assert( iScore>=0 ); - if( iScore>iBestScore ){ - pFragment->iPos = iPos; - pFragment->hlmask = mHighlight; - pFragment->covered = mCover; - iBestScore = iScore; - } - } - - sqlite3_free(sIter.aPhrase); - *piScore = iBestScore; - return SQLITE_OK; -} - - -/* -** Append a string to the string-buffer passed as the first argument. -** -** If nAppend is negative, then the length of the string zAppend is -** determined using strlen(). -*/ -static int fts3StringAppend( - StrBuffer *pStr, /* Buffer to append to */ - const char *zAppend, /* Pointer to data to append to buffer */ - int nAppend /* Size of zAppend in bytes (or -1) */ -){ - if( nAppend<0 ){ - nAppend = (int)strlen(zAppend); - } - - /* If there is insufficient space allocated at StrBuffer.z, use realloc() - ** to grow the buffer until so that it is big enough to accomadate the - ** appended data. - */ - if( pStr->n+nAppend+1>=pStr->nAlloc ){ - int nAlloc = pStr->nAlloc+nAppend+100; - char *zNew = sqlite3_realloc(pStr->z, nAlloc); - if( !zNew ){ - return SQLITE_NOMEM; - } - pStr->z = zNew; - pStr->nAlloc = nAlloc; - } - assert( pStr->z!=0 && (pStr->nAlloc >= pStr->n+nAppend+1) ); - - /* Append the data to the string buffer. */ - memcpy(&pStr->z[pStr->n], zAppend, nAppend); - pStr->n += nAppend; - pStr->z[pStr->n] = '\0'; - - return SQLITE_OK; -} - -/* -** The fts3BestSnippet() function often selects snippets that end with a -** query term. That is, the final term of the snippet is always a term -** that requires highlighting. For example, if 'X' is a highlighted term -** and '.' is a non-highlighted term, BestSnippet() may select: -** -** ........X.....X -** -** This function "shifts" the beginning of the snippet forward in the -** document so that there are approximately the same number of -** non-highlighted terms to the right of the final highlighted term as there -** are to the left of the first highlighted term. For example, to this: -** -** ....X.....X.... -** -** This is done as part of extracting the snippet text, not when selecting -** the snippet. Snippet selection is done based on doclists only, so there -** is no way for fts3BestSnippet() to know whether or not the document -** actually contains terms that follow the final highlighted term. -*/ -static int fts3SnippetShift( - Fts3Table *pTab, /* FTS3 table snippet comes from */ - int iLangid, /* Language id to use in tokenizing */ - int nSnippet, /* Number of tokens desired for snippet */ - const char *zDoc, /* Document text to extract snippet from */ - int nDoc, /* Size of buffer zDoc in bytes */ - int *piPos, /* IN/OUT: First token of snippet */ - u64 *pHlmask /* IN/OUT: Mask of tokens to highlight */ -){ - u64 hlmask = *pHlmask; /* Local copy of initial highlight-mask */ - - if( hlmask ){ - int nLeft; /* Tokens to the left of first highlight */ - int nRight; /* Tokens to the right of last highlight */ - int nDesired; /* Ideal number of tokens to shift forward */ - - for(nLeft=0; !(hlmask & ((u64)1 << nLeft)); nLeft++); - for(nRight=0; !(hlmask & ((u64)1 << (nSnippet-1-nRight))); nRight++); - nDesired = (nLeft-nRight)/2; - - /* Ideally, the start of the snippet should be pushed forward in the - ** document nDesired tokens. This block checks if there are actually - ** nDesired tokens to the right of the snippet. If so, *piPos and - ** *pHlMask are updated to shift the snippet nDesired tokens to the - ** right. Otherwise, the snippet is shifted by the number of tokens - ** available. - */ - if( nDesired>0 ){ - int nShift; /* Number of tokens to shift snippet by */ - int iCurrent = 0; /* Token counter */ - int rc; /* Return Code */ - sqlite3_tokenizer_module *pMod; - sqlite3_tokenizer_cursor *pC; - pMod = (sqlite3_tokenizer_module *)pTab->pTokenizer->pModule; - - /* Open a cursor on zDoc/nDoc. Check if there are (nSnippet+nDesired) - ** or more tokens in zDoc/nDoc. - */ - rc = sqlite3Fts3OpenTokenizer(pTab->pTokenizer, iLangid, zDoc, nDoc, &pC); - if( rc!=SQLITE_OK ){ - return rc; - } - while( rc==SQLITE_OK && iCurrent<(nSnippet+nDesired) ){ - const char *ZDUMMY; int DUMMY1 = 0, DUMMY2 = 0, DUMMY3 = 0; - rc = pMod->xNext(pC, &ZDUMMY, &DUMMY1, &DUMMY2, &DUMMY3, &iCurrent); - } - pMod->xClose(pC); - if( rc!=SQLITE_OK && rc!=SQLITE_DONE ){ return rc; } - - nShift = (rc==SQLITE_DONE)+iCurrent-nSnippet; - assert( nShift<=nDesired ); - if( nShift>0 ){ - *piPos += nShift; - *pHlmask = hlmask >> nShift; - } - } - } - return SQLITE_OK; -} - -/* -** Extract the snippet text for fragment pFragment from cursor pCsr and -** append it to string buffer pOut. -*/ -static int fts3SnippetText( - Fts3Cursor *pCsr, /* FTS3 Cursor */ - SnippetFragment *pFragment, /* Snippet to extract */ - int iFragment, /* Fragment number */ - int isLast, /* True for final fragment in snippet */ - int nSnippet, /* Number of tokens in extracted snippet */ - const char *zOpen, /* String inserted before highlighted term */ - const char *zClose, /* String inserted after highlighted term */ - const char *zEllipsis, /* String inserted between snippets */ - StrBuffer *pOut /* Write output here */ -){ - Fts3Table *pTab = (Fts3Table *)pCsr->base.pVtab; - int rc; /* Return code */ - const char *zDoc; /* Document text to extract snippet from */ - int nDoc; /* Size of zDoc in bytes */ - int iCurrent = 0; /* Current token number of document */ - int iEnd = 0; /* Byte offset of end of current token */ - int isShiftDone = 0; /* True after snippet is shifted */ - int iPos = pFragment->iPos; /* First token of snippet */ - u64 hlmask = pFragment->hlmask; /* Highlight-mask for snippet */ - int iCol = pFragment->iCol+1; /* Query column to extract text from */ - sqlite3_tokenizer_module *pMod; /* Tokenizer module methods object */ - sqlite3_tokenizer_cursor *pC; /* Tokenizer cursor open on zDoc/nDoc */ - - zDoc = (const char *)sqlite3_column_text(pCsr->pStmt, iCol); - if( zDoc==0 ){ - if( sqlite3_column_type(pCsr->pStmt, iCol)!=SQLITE_NULL ){ - return SQLITE_NOMEM; - } - return SQLITE_OK; - } - nDoc = sqlite3_column_bytes(pCsr->pStmt, iCol); - - /* Open a token cursor on the document. */ - pMod = (sqlite3_tokenizer_module *)pTab->pTokenizer->pModule; - rc = sqlite3Fts3OpenTokenizer(pTab->pTokenizer, pCsr->iLangid, zDoc,nDoc,&pC); - if( rc!=SQLITE_OK ){ - return rc; - } - - while( rc==SQLITE_OK ){ - const char *ZDUMMY; /* Dummy argument used with tokenizer */ - int DUMMY1 = -1; /* Dummy argument used with tokenizer */ - int iBegin = 0; /* Offset in zDoc of start of token */ - int iFin = 0; /* Offset in zDoc of end of token */ - int isHighlight = 0; /* True for highlighted terms */ - - /* Variable DUMMY1 is initialized to a negative value above. Elsewhere - ** in the FTS code the variable that the third argument to xNext points to - ** is initialized to zero before the first (*but not necessarily - ** subsequent*) call to xNext(). This is done for a particular application - ** that needs to know whether or not the tokenizer is being used for - ** snippet generation or for some other purpose. - ** - ** Extreme care is required when writing code to depend on this - ** initialization. It is not a documented part of the tokenizer interface. - ** If a tokenizer is used directly by any code outside of FTS, this - ** convention might not be respected. */ - rc = pMod->xNext(pC, &ZDUMMY, &DUMMY1, &iBegin, &iFin, &iCurrent); - if( rc!=SQLITE_OK ){ - if( rc==SQLITE_DONE ){ - /* Special case - the last token of the snippet is also the last token - ** of the column. Append any punctuation that occurred between the end - ** of the previous token and the end of the document to the output. - ** Then break out of the loop. */ - rc = fts3StringAppend(pOut, &zDoc[iEnd], -1); - } - break; - } - if( iCurrentiLangid, nSnippet, &zDoc[iBegin], n, &iPos, &hlmask - ); - isShiftDone = 1; - - /* Now that the shift has been done, check if the initial "..." are - ** required. They are required if (a) this is not the first fragment, - ** or (b) this fragment does not begin at position 0 of its column. - */ - if( rc==SQLITE_OK && (iPos>0 || iFragment>0) ){ - rc = fts3StringAppend(pOut, zEllipsis, -1); - } - if( rc!=SQLITE_OK || iCurrent=(iPos+nSnippet) ){ - if( isLast ){ - rc = fts3StringAppend(pOut, zEllipsis, -1); - } - break; - } - - /* Set isHighlight to true if this term should be highlighted. */ - isHighlight = (hlmask & ((u64)1 << (iCurrent-iPos)))!=0; - - if( iCurrent>iPos ) rc = fts3StringAppend(pOut, &zDoc[iEnd], iBegin-iEnd); - if( rc==SQLITE_OK && isHighlight ) rc = fts3StringAppend(pOut, zOpen, -1); - if( rc==SQLITE_OK ) rc = fts3StringAppend(pOut, &zDoc[iBegin], iFin-iBegin); - if( rc==SQLITE_OK && isHighlight ) rc = fts3StringAppend(pOut, zClose, -1); - - iEnd = iFin; - } - - pMod->xClose(pC); - return rc; -} - - -/* -** This function is used to count the entries in a column-list (a -** delta-encoded list of term offsets within a single column of a single -** row). When this function is called, *ppCollist should point to the -** beginning of the first varint in the column-list (the varint that -** contains the position of the first matching term in the column data). -** Before returning, *ppCollist is set to point to the first byte after -** the last varint in the column-list (either the 0x00 signifying the end -** of the position-list, or the 0x01 that precedes the column number of -** the next column in the position-list). -** -** The number of elements in the column-list is returned. -*/ -static int fts3ColumnlistCount(char **ppCollist){ - char *pEnd = *ppCollist; - char c = 0; - int nEntry = 0; - - /* A column-list is terminated by either a 0x01 or 0x00. */ - while( 0xFE & (*pEnd | c) ){ - c = *pEnd++ & 0x80; - if( !c ) nEntry++; - } - - *ppCollist = pEnd; - return nEntry; -} - -/* -** fts3ExprIterate() callback used to collect the "global" matchinfo stats -** for a single query. -** -** fts3ExprIterate() callback to load the 'global' elements of a -** FTS3_MATCHINFO_HITS matchinfo array. The global stats are those elements -** of the matchinfo array that are constant for all rows returned by the -** current query. -** -** Argument pCtx is actually a pointer to a struct of type MatchInfo. This -** function populates Matchinfo.aMatchinfo[] as follows: -** -** for(iCol=0; iColpCursor, pExpr, &p->aMatchinfo[3*iPhrase*p->nCol] - ); -} - -/* -** fts3ExprIterate() callback used to collect the "local" part of the -** FTS3_MATCHINFO_HITS array. The local stats are those elements of the -** array that are different for each row returned by the query. -*/ -static int fts3ExprLocalHitsCb( - Fts3Expr *pExpr, /* Phrase expression node */ - int iPhrase, /* Phrase number */ - void *pCtx /* Pointer to MatchInfo structure */ -){ - int rc = SQLITE_OK; - MatchInfo *p = (MatchInfo *)pCtx; - int iStart = iPhrase * p->nCol * 3; - int i; - - for(i=0; inCol && rc==SQLITE_OK; i++){ - char *pCsr; - rc = sqlite3Fts3EvalPhrasePoslist(p->pCursor, pExpr, i, &pCsr); - if( pCsr ){ - p->aMatchinfo[iStart+i*3] = fts3ColumnlistCount(&pCsr); - }else{ - p->aMatchinfo[iStart+i*3] = 0; - } - } - - return rc; -} - -static int fts3MatchinfoCheck( - Fts3Table *pTab, - char cArg, - char **pzErr -){ - if( (cArg==FTS3_MATCHINFO_NPHRASE) - || (cArg==FTS3_MATCHINFO_NCOL) - || (cArg==FTS3_MATCHINFO_NDOC && pTab->bFts4) - || (cArg==FTS3_MATCHINFO_AVGLENGTH && pTab->bFts4) - || (cArg==FTS3_MATCHINFO_LENGTH && pTab->bHasDocsize) - || (cArg==FTS3_MATCHINFO_LCS) - || (cArg==FTS3_MATCHINFO_HITS) - ){ - return SQLITE_OK; - } - *pzErr = sqlite3_mprintf("unrecognized matchinfo request: %c", cArg); - return SQLITE_ERROR; -} - -static int fts3MatchinfoSize(MatchInfo *pInfo, char cArg){ - int nVal; /* Number of integers output by cArg */ - - switch( cArg ){ - case FTS3_MATCHINFO_NDOC: - case FTS3_MATCHINFO_NPHRASE: - case FTS3_MATCHINFO_NCOL: - nVal = 1; - break; - - case FTS3_MATCHINFO_AVGLENGTH: - case FTS3_MATCHINFO_LENGTH: - case FTS3_MATCHINFO_LCS: - nVal = pInfo->nCol; - break; - - default: - assert( cArg==FTS3_MATCHINFO_HITS ); - nVal = pInfo->nCol * pInfo->nPhrase * 3; - break; - } - - return nVal; -} - -static int fts3MatchinfoSelectDoctotal( - Fts3Table *pTab, - sqlite3_stmt **ppStmt, - sqlite3_int64 *pnDoc, - const char **paLen -){ - sqlite3_stmt *pStmt; - const char *a; - sqlite3_int64 nDoc; - - if( !*ppStmt ){ - int rc = sqlite3Fts3SelectDoctotal(pTab, ppStmt); - if( rc!=SQLITE_OK ) return rc; - } - pStmt = *ppStmt; - assert( sqlite3_data_count(pStmt)==1 ); - - a = sqlite3_column_blob(pStmt, 0); - a += sqlite3Fts3GetVarint(a, &nDoc); - if( nDoc==0 ) return FTS_CORRUPT_VTAB; - *pnDoc = (u32)nDoc; - - if( paLen ) *paLen = a; - return SQLITE_OK; -} - -/* -** An instance of the following structure is used to store state while -** iterating through a multi-column position-list corresponding to the -** hits for a single phrase on a single row in order to calculate the -** values for a matchinfo() FTS3_MATCHINFO_LCS request. -*/ -typedef struct LcsIterator LcsIterator; -struct LcsIterator { - Fts3Expr *pExpr; /* Pointer to phrase expression */ - int iPosOffset; /* Tokens count up to end of this phrase */ - char *pRead; /* Cursor used to iterate through aDoclist */ - int iPos; /* Current position */ -}; - -/* -** If LcsIterator.iCol is set to the following value, the iterator has -** finished iterating through all offsets for all columns. -*/ -#define LCS_ITERATOR_FINISHED 0x7FFFFFFF; - -static int fts3MatchinfoLcsCb( - Fts3Expr *pExpr, /* Phrase expression node */ - int iPhrase, /* Phrase number (numbered from zero) */ - void *pCtx /* Pointer to MatchInfo structure */ -){ - LcsIterator *aIter = (LcsIterator *)pCtx; - aIter[iPhrase].pExpr = pExpr; - return SQLITE_OK; -} - -/* -** Advance the iterator passed as an argument to the next position. Return -** 1 if the iterator is at EOF or if it now points to the start of the -** position list for the next column. -*/ -static int fts3LcsIteratorAdvance(LcsIterator *pIter){ - char *pRead = pIter->pRead; - sqlite3_int64 iRead; - int rc = 0; - - pRead += sqlite3Fts3GetVarint(pRead, &iRead); - if( iRead==0 || iRead==1 ){ - pRead = 0; - rc = 1; - }else{ - pIter->iPos += (int)(iRead-2); - } - - pIter->pRead = pRead; - return rc; -} - -/* -** This function implements the FTS3_MATCHINFO_LCS matchinfo() flag. -** -** If the call is successful, the longest-common-substring lengths for each -** column are written into the first nCol elements of the pInfo->aMatchinfo[] -** array before returning. SQLITE_OK is returned in this case. -** -** Otherwise, if an error occurs, an SQLite error code is returned and the -** data written to the first nCol elements of pInfo->aMatchinfo[] is -** undefined. -*/ -static int fts3MatchinfoLcs(Fts3Cursor *pCsr, MatchInfo *pInfo){ - LcsIterator *aIter; - int i; - int iCol; - int nToken = 0; - - /* Allocate and populate the array of LcsIterator objects. The array - ** contains one element for each matchable phrase in the query. - **/ - aIter = sqlite3_malloc(sizeof(LcsIterator) * pCsr->nPhrase); - if( !aIter ) return SQLITE_NOMEM; - memset(aIter, 0, sizeof(LcsIterator) * pCsr->nPhrase); - (void)fts3ExprIterate(pCsr->pExpr, fts3MatchinfoLcsCb, (void*)aIter); - - for(i=0; inPhrase; i++){ - LcsIterator *pIter = &aIter[i]; - nToken -= pIter->pExpr->pPhrase->nToken; - pIter->iPosOffset = nToken; - } - - for(iCol=0; iColnCol; iCol++){ - int nLcs = 0; /* LCS value for this column */ - int nLive = 0; /* Number of iterators in aIter not at EOF */ - - for(i=0; inPhrase; i++){ - int rc; - LcsIterator *pIt = &aIter[i]; - rc = sqlite3Fts3EvalPhrasePoslist(pCsr, pIt->pExpr, iCol, &pIt->pRead); - if( rc!=SQLITE_OK ) return rc; - if( pIt->pRead ){ - pIt->iPos = pIt->iPosOffset; - fts3LcsIteratorAdvance(&aIter[i]); - nLive++; - } - } - - while( nLive>0 ){ - LcsIterator *pAdv = 0; /* The iterator to advance by one position */ - int nThisLcs = 0; /* LCS for the current iterator positions */ - - for(i=0; inPhrase; i++){ - LcsIterator *pIter = &aIter[i]; - if( pIter->pRead==0 ){ - /* This iterator is already at EOF for this column. */ - nThisLcs = 0; - }else{ - if( pAdv==0 || pIter->iPosiPos ){ - pAdv = pIter; - } - if( nThisLcs==0 || pIter->iPos==pIter[-1].iPos ){ - nThisLcs++; - }else{ - nThisLcs = 1; - } - if( nThisLcs>nLcs ) nLcs = nThisLcs; - } - } - if( fts3LcsIteratorAdvance(pAdv) ) nLive--; - } - - pInfo->aMatchinfo[iCol] = nLcs; - } - - sqlite3_free(aIter); - return SQLITE_OK; -} - -/* -** Populate the buffer pInfo->aMatchinfo[] with an array of integers to -** be returned by the matchinfo() function. Argument zArg contains the -** format string passed as the second argument to matchinfo (or the -** default value "pcx" if no second argument was specified). The format -** string has already been validated and the pInfo->aMatchinfo[] array -** is guaranteed to be large enough for the output. -** -** If bGlobal is true, then populate all fields of the matchinfo() output. -** If it is false, then assume that those fields that do not change between -** rows (i.e. FTS3_MATCHINFO_NPHRASE, NCOL, NDOC, AVGLENGTH and part of HITS) -** have already been populated. -** -** Return SQLITE_OK if successful, or an SQLite error code if an error -** occurs. If a value other than SQLITE_OK is returned, the state the -** pInfo->aMatchinfo[] buffer is left in is undefined. -*/ -static int fts3MatchinfoValues( - Fts3Cursor *pCsr, /* FTS3 cursor object */ - int bGlobal, /* True to grab the global stats */ - MatchInfo *pInfo, /* Matchinfo context object */ - const char *zArg /* Matchinfo format string */ -){ - int rc = SQLITE_OK; - int i; - Fts3Table *pTab = (Fts3Table *)pCsr->base.pVtab; - sqlite3_stmt *pSelect = 0; - - for(i=0; rc==SQLITE_OK && zArg[i]; i++){ - - switch( zArg[i] ){ - case FTS3_MATCHINFO_NPHRASE: - if( bGlobal ) pInfo->aMatchinfo[0] = pInfo->nPhrase; - break; - - case FTS3_MATCHINFO_NCOL: - if( bGlobal ) pInfo->aMatchinfo[0] = pInfo->nCol; - break; - - case FTS3_MATCHINFO_NDOC: - if( bGlobal ){ - sqlite3_int64 nDoc = 0; - rc = fts3MatchinfoSelectDoctotal(pTab, &pSelect, &nDoc, 0); - pInfo->aMatchinfo[0] = (u32)nDoc; - } - break; - - case FTS3_MATCHINFO_AVGLENGTH: - if( bGlobal ){ - sqlite3_int64 nDoc; /* Number of rows in table */ - const char *a; /* Aggregate column length array */ - - rc = fts3MatchinfoSelectDoctotal(pTab, &pSelect, &nDoc, &a); - if( rc==SQLITE_OK ){ - int iCol; - for(iCol=0; iColnCol; iCol++){ - u32 iVal; - sqlite3_int64 nToken; - a += sqlite3Fts3GetVarint(a, &nToken); - iVal = (u32)(((u32)(nToken&0xffffffff)+nDoc/2)/nDoc); - pInfo->aMatchinfo[iCol] = iVal; - } - } - } - break; - - case FTS3_MATCHINFO_LENGTH: { - sqlite3_stmt *pSelectDocsize = 0; - rc = sqlite3Fts3SelectDocsize(pTab, pCsr->iPrevId, &pSelectDocsize); - if( rc==SQLITE_OK ){ - int iCol; - const char *a = sqlite3_column_blob(pSelectDocsize, 0); - for(iCol=0; iColnCol; iCol++){ - sqlite3_int64 nToken; - a += sqlite3Fts3GetVarint(a, &nToken); - pInfo->aMatchinfo[iCol] = (u32)nToken; - } - } - sqlite3_reset(pSelectDocsize); - break; - } - - case FTS3_MATCHINFO_LCS: - rc = fts3ExprLoadDoclists(pCsr, 0, 0); - if( rc==SQLITE_OK ){ - rc = fts3MatchinfoLcs(pCsr, pInfo); - } - break; - - default: { - Fts3Expr *pExpr; - assert( zArg[i]==FTS3_MATCHINFO_HITS ); - pExpr = pCsr->pExpr; - rc = fts3ExprLoadDoclists(pCsr, 0, 0); - if( rc!=SQLITE_OK ) break; - if( bGlobal ){ - if( pCsr->pDeferred ){ - rc = fts3MatchinfoSelectDoctotal(pTab, &pSelect, &pInfo->nDoc, 0); - if( rc!=SQLITE_OK ) break; - } - rc = fts3ExprIterate(pExpr, fts3ExprGlobalHitsCb,(void*)pInfo); - if( rc!=SQLITE_OK ) break; - } - (void)fts3ExprIterate(pExpr, fts3ExprLocalHitsCb,(void*)pInfo); - break; - } - } - - pInfo->aMatchinfo += fts3MatchinfoSize(pInfo, zArg[i]); - } - - sqlite3_reset(pSelect); - return rc; -} - - -/* -** Populate pCsr->aMatchinfo[] with data for the current row. The -** 'matchinfo' data is an array of 32-bit unsigned integers (C type u32). -*/ -static int fts3GetMatchinfo( - Fts3Cursor *pCsr, /* FTS3 Cursor object */ - const char *zArg /* Second argument to matchinfo() function */ -){ - MatchInfo sInfo; - Fts3Table *pTab = (Fts3Table *)pCsr->base.pVtab; - int rc = SQLITE_OK; - int bGlobal = 0; /* Collect 'global' stats as well as local */ - - memset(&sInfo, 0, sizeof(MatchInfo)); - sInfo.pCursor = pCsr; - sInfo.nCol = pTab->nColumn; - - /* If there is cached matchinfo() data, but the format string for the - ** cache does not match the format string for this request, discard - ** the cached data. */ - if( pCsr->zMatchinfo && strcmp(pCsr->zMatchinfo, zArg) ){ - assert( pCsr->aMatchinfo ); - sqlite3_free(pCsr->aMatchinfo); - pCsr->zMatchinfo = 0; - pCsr->aMatchinfo = 0; - } - - /* If Fts3Cursor.aMatchinfo[] is NULL, then this is the first time the - ** matchinfo function has been called for this query. In this case - ** allocate the array used to accumulate the matchinfo data and - ** initialize those elements that are constant for every row. - */ - if( pCsr->aMatchinfo==0 ){ - int nMatchinfo = 0; /* Number of u32 elements in match-info */ - int nArg; /* Bytes in zArg */ - int i; /* Used to iterate through zArg */ - - /* Determine the number of phrases in the query */ - pCsr->nPhrase = fts3ExprPhraseCount(pCsr->pExpr); - sInfo.nPhrase = pCsr->nPhrase; - - /* Determine the number of integers in the buffer returned by this call. */ - for(i=0; zArg[i]; i++){ - nMatchinfo += fts3MatchinfoSize(&sInfo, zArg[i]); - } - - /* Allocate space for Fts3Cursor.aMatchinfo[] and Fts3Cursor.zMatchinfo. */ - nArg = (int)strlen(zArg); - pCsr->aMatchinfo = (u32 *)sqlite3_malloc(sizeof(u32)*nMatchinfo + nArg + 1); - if( !pCsr->aMatchinfo ) return SQLITE_NOMEM; - - pCsr->zMatchinfo = (char *)&pCsr->aMatchinfo[nMatchinfo]; - pCsr->nMatchinfo = nMatchinfo; - memcpy(pCsr->zMatchinfo, zArg, nArg+1); - memset(pCsr->aMatchinfo, 0, sizeof(u32)*nMatchinfo); - pCsr->isMatchinfoNeeded = 1; - bGlobal = 1; - } - - sInfo.aMatchinfo = pCsr->aMatchinfo; - sInfo.nPhrase = pCsr->nPhrase; - if( pCsr->isMatchinfoNeeded ){ - rc = fts3MatchinfoValues(pCsr, bGlobal, &sInfo, zArg); - pCsr->isMatchinfoNeeded = 0; - } - - return rc; -} - -/* -** Implementation of snippet() function. -*/ -SQLITE_PRIVATE void sqlite3Fts3Snippet( - sqlite3_context *pCtx, /* SQLite function call context */ - Fts3Cursor *pCsr, /* Cursor object */ - const char *zStart, /* Snippet start text - "" */ - const char *zEnd, /* Snippet end text - "" */ - const char *zEllipsis, /* Snippet ellipsis text - "..." */ - int iCol, /* Extract snippet from this column */ - int nToken /* Approximate number of tokens in snippet */ -){ - Fts3Table *pTab = (Fts3Table *)pCsr->base.pVtab; - int rc = SQLITE_OK; - int i; - StrBuffer res = {0, 0, 0}; - - /* The returned text includes up to four fragments of text extracted from - ** the data in the current row. The first iteration of the for(...) loop - ** below attempts to locate a single fragment of text nToken tokens in - ** size that contains at least one instance of all phrases in the query - ** expression that appear in the current row. If such a fragment of text - ** cannot be found, the second iteration of the loop attempts to locate - ** a pair of fragments, and so on. - */ - int nSnippet = 0; /* Number of fragments in this snippet */ - SnippetFragment aSnippet[4]; /* Maximum of 4 fragments per snippet */ - int nFToken = -1; /* Number of tokens in each fragment */ - - if( !pCsr->pExpr ){ - sqlite3_result_text(pCtx, "", 0, SQLITE_STATIC); - return; - } - - for(nSnippet=1; 1; nSnippet++){ - - int iSnip; /* Loop counter 0..nSnippet-1 */ - u64 mCovered = 0; /* Bitmask of phrases covered by snippet */ - u64 mSeen = 0; /* Bitmask of phrases seen by BestSnippet() */ - - if( nToken>=0 ){ - nFToken = (nToken+nSnippet-1) / nSnippet; - }else{ - nFToken = -1 * nToken; - } - - for(iSnip=0; iSnipnColumn; iRead++){ - SnippetFragment sF = {0, 0, 0, 0}; - int iS; - if( iCol>=0 && iRead!=iCol ) continue; - - /* Find the best snippet of nFToken tokens in column iRead. */ - rc = fts3BestSnippet(nFToken, pCsr, iRead, mCovered, &mSeen, &sF, &iS); - if( rc!=SQLITE_OK ){ - goto snippet_out; - } - if( iS>iBestScore ){ - *pFragment = sF; - iBestScore = iS; - } - } - - mCovered |= pFragment->covered; - } - - /* If all query phrases seen by fts3BestSnippet() are present in at least - ** one of the nSnippet snippet fragments, break out of the loop. - */ - assert( (mCovered&mSeen)==mCovered ); - if( mSeen==mCovered || nSnippet==SizeofArray(aSnippet) ) break; - } - - assert( nFToken>0 ); - - for(i=0; ipCsr, pExpr, p->iCol, &pList); - nTerm = pExpr->pPhrase->nToken; - if( pList ){ - fts3GetDeltaPosition(&pList, &iPos); - assert( iPos>=0 ); - } - - for(iTerm=0; iTermaTerm[p->iTerm++]; - pT->iOff = nTerm-iTerm-1; - pT->pList = pList; - pT->iPos = iPos; - } - - return rc; -} - -/* -** Implementation of offsets() function. -*/ -SQLITE_PRIVATE void sqlite3Fts3Offsets( - sqlite3_context *pCtx, /* SQLite function call context */ - Fts3Cursor *pCsr /* Cursor object */ -){ - Fts3Table *pTab = (Fts3Table *)pCsr->base.pVtab; - sqlite3_tokenizer_module const *pMod = pTab->pTokenizer->pModule; - int rc; /* Return Code */ - int nToken; /* Number of tokens in query */ - int iCol; /* Column currently being processed */ - StrBuffer res = {0, 0, 0}; /* Result string */ - TermOffsetCtx sCtx; /* Context for fts3ExprTermOffsetInit() */ - - if( !pCsr->pExpr ){ - sqlite3_result_text(pCtx, "", 0, SQLITE_STATIC); - return; - } - - memset(&sCtx, 0, sizeof(sCtx)); - assert( pCsr->isRequireSeek==0 ); - - /* Count the number of terms in the query */ - rc = fts3ExprLoadDoclists(pCsr, 0, &nToken); - if( rc!=SQLITE_OK ) goto offsets_out; - - /* Allocate the array of TermOffset iterators. */ - sCtx.aTerm = (TermOffset *)sqlite3_malloc(sizeof(TermOffset)*nToken); - if( 0==sCtx.aTerm ){ - rc = SQLITE_NOMEM; - goto offsets_out; - } - sCtx.iDocid = pCsr->iPrevId; - sCtx.pCsr = pCsr; - - /* Loop through the table columns, appending offset information to - ** string-buffer res for each column. - */ - for(iCol=0; iColnColumn; iCol++){ - sqlite3_tokenizer_cursor *pC; /* Tokenizer cursor */ - const char *ZDUMMY; /* Dummy argument used with xNext() */ - int NDUMMY = 0; /* Dummy argument used with xNext() */ - int iStart = 0; - int iEnd = 0; - int iCurrent = 0; - const char *zDoc; - int nDoc; - - /* Initialize the contents of sCtx.aTerm[] for column iCol. There is - ** no way that this operation can fail, so the return code from - ** fts3ExprIterate() can be discarded. - */ - sCtx.iCol = iCol; - sCtx.iTerm = 0; - (void)fts3ExprIterate(pCsr->pExpr, fts3ExprTermOffsetInit, (void *)&sCtx); - - /* Retreive the text stored in column iCol. If an SQL NULL is stored - ** in column iCol, jump immediately to the next iteration of the loop. - ** If an OOM occurs while retrieving the data (this can happen if SQLite - ** needs to transform the data from utf-16 to utf-8), return SQLITE_NOMEM - ** to the caller. - */ - zDoc = (const char *)sqlite3_column_text(pCsr->pStmt, iCol+1); - nDoc = sqlite3_column_bytes(pCsr->pStmt, iCol+1); - if( zDoc==0 ){ - if( sqlite3_column_type(pCsr->pStmt, iCol+1)==SQLITE_NULL ){ - continue; - } - rc = SQLITE_NOMEM; - goto offsets_out; - } - - /* Initialize a tokenizer iterator to iterate through column iCol. */ - rc = sqlite3Fts3OpenTokenizer(pTab->pTokenizer, pCsr->iLangid, - zDoc, nDoc, &pC - ); - if( rc!=SQLITE_OK ) goto offsets_out; - - rc = pMod->xNext(pC, &ZDUMMY, &NDUMMY, &iStart, &iEnd, &iCurrent); - while( rc==SQLITE_OK ){ - int i; /* Used to loop through terms */ - int iMinPos = 0x7FFFFFFF; /* Position of next token */ - TermOffset *pTerm = 0; /* TermOffset associated with next token */ - - for(i=0; ipList && (pT->iPos-pT->iOff)iPos-pT->iOff; - pTerm = pT; - } - } - - if( !pTerm ){ - /* All offsets for this column have been gathered. */ - rc = SQLITE_DONE; - }else{ - assert( iCurrent<=iMinPos ); - if( 0==(0xFE&*pTerm->pList) ){ - pTerm->pList = 0; - }else{ - fts3GetDeltaPosition(&pTerm->pList, &pTerm->iPos); - } - while( rc==SQLITE_OK && iCurrentxNext(pC, &ZDUMMY, &NDUMMY, &iStart, &iEnd, &iCurrent); - } - if( rc==SQLITE_OK ){ - char aBuffer[64]; - sqlite3_snprintf(sizeof(aBuffer), aBuffer, - "%d %d %d %d ", iCol, pTerm-sCtx.aTerm, iStart, iEnd-iStart - ); - rc = fts3StringAppend(&res, aBuffer, -1); - }else if( rc==SQLITE_DONE && pTab->zContentTbl==0 ){ - rc = FTS_CORRUPT_VTAB; - } - } - } - if( rc==SQLITE_DONE ){ - rc = SQLITE_OK; - } - - pMod->xClose(pC); - if( rc!=SQLITE_OK ) goto offsets_out; - } - - offsets_out: - sqlite3_free(sCtx.aTerm); - assert( rc!=SQLITE_DONE ); - sqlite3Fts3SegmentsClose(pTab); - if( rc!=SQLITE_OK ){ - sqlite3_result_error_code(pCtx, rc); - sqlite3_free(res.z); - }else{ - sqlite3_result_text(pCtx, res.z, res.n-1, sqlite3_free); - } - return; -} - -/* -** Implementation of matchinfo() function. -*/ -SQLITE_PRIVATE void sqlite3Fts3Matchinfo( - sqlite3_context *pContext, /* Function call context */ - Fts3Cursor *pCsr, /* FTS3 table cursor */ - const char *zArg /* Second arg to matchinfo() function */ -){ - Fts3Table *pTab = (Fts3Table *)pCsr->base.pVtab; - int rc; - int i; - const char *zFormat; - - if( zArg ){ - for(i=0; zArg[i]; i++){ - char *zErr = 0; - if( fts3MatchinfoCheck(pTab, zArg[i], &zErr) ){ - sqlite3_result_error(pContext, zErr, -1); - sqlite3_free(zErr); - return; - } - } - zFormat = zArg; - }else{ - zFormat = FTS3_MATCHINFO_DEFAULT; - } - - if( !pCsr->pExpr ){ - sqlite3_result_blob(pContext, "", 0, SQLITE_STATIC); - return; - } - - /* Retrieve matchinfo() data. */ - rc = fts3GetMatchinfo(pCsr, zFormat); - sqlite3Fts3SegmentsClose(pTab); - - if( rc!=SQLITE_OK ){ - sqlite3_result_error_code(pContext, rc); - }else{ - int n = pCsr->nMatchinfo * sizeof(u32); - sqlite3_result_blob(pContext, pCsr->aMatchinfo, n, SQLITE_TRANSIENT); - } -} - -#endif - -/************** End of fts3_snippet.c ****************************************/ -/************** Begin file fts3_unicode.c ************************************/ -/* -** 2012 May 24 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -****************************************************************************** -** -** Implementation of the "unicode" full-text-search tokenizer. -*/ - -#ifdef SQLITE_ENABLE_FTS4_UNICODE61 - -#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) - -/* #include */ -/* #include */ -/* #include */ -/* #include */ - - -/* -** The following two macros - READ_UTF8 and WRITE_UTF8 - have been copied -** from the sqlite3 source file utf.c. If this file is compiled as part -** of the amalgamation, they are not required. -*/ -#ifndef SQLITE_AMALGAMATION - -static const unsigned char sqlite3Utf8Trans1[] = { - 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, - 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, - 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, - 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, - 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, - 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, - 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, - 0x00, 0x01, 0x02, 0x03, 0x00, 0x01, 0x00, 0x00, -}; - -#define READ_UTF8(zIn, zTerm, c) \ - c = *(zIn++); \ - if( c>=0xc0 ){ \ - c = sqlite3Utf8Trans1[c-0xc0]; \ - while( zIn!=zTerm && (*zIn & 0xc0)==0x80 ){ \ - c = (c<<6) + (0x3f & *(zIn++)); \ - } \ - if( c<0x80 \ - || (c&0xFFFFF800)==0xD800 \ - || (c&0xFFFFFFFE)==0xFFFE ){ c = 0xFFFD; } \ - } - -#define WRITE_UTF8(zOut, c) { \ - if( c<0x00080 ){ \ - *zOut++ = (u8)(c&0xFF); \ - } \ - else if( c<0x00800 ){ \ - *zOut++ = 0xC0 + (u8)((c>>6)&0x1F); \ - *zOut++ = 0x80 + (u8)(c & 0x3F); \ - } \ - else if( c<0x10000 ){ \ - *zOut++ = 0xE0 + (u8)((c>>12)&0x0F); \ - *zOut++ = 0x80 + (u8)((c>>6) & 0x3F); \ - *zOut++ = 0x80 + (u8)(c & 0x3F); \ - }else{ \ - *zOut++ = 0xF0 + (u8)((c>>18) & 0x07); \ - *zOut++ = 0x80 + (u8)((c>>12) & 0x3F); \ - *zOut++ = 0x80 + (u8)((c>>6) & 0x3F); \ - *zOut++ = 0x80 + (u8)(c & 0x3F); \ - } \ -} - -#endif /* ifndef SQLITE_AMALGAMATION */ - -typedef struct unicode_tokenizer unicode_tokenizer; -typedef struct unicode_cursor unicode_cursor; - -struct unicode_tokenizer { - sqlite3_tokenizer base; - int bRemoveDiacritic; - int nException; - int *aiException; -}; - -struct unicode_cursor { - sqlite3_tokenizer_cursor base; - const unsigned char *aInput; /* Input text being tokenized */ - int nInput; /* Size of aInput[] in bytes */ - int iOff; /* Current offset within aInput[] */ - int iToken; /* Index of next token to be returned */ - char *zToken; /* storage for current token */ - int nAlloc; /* space allocated at zToken */ -}; - - -/* -** Destroy a tokenizer allocated by unicodeCreate(). -*/ -static int unicodeDestroy(sqlite3_tokenizer *pTokenizer){ - if( pTokenizer ){ - unicode_tokenizer *p = (unicode_tokenizer *)pTokenizer; - sqlite3_free(p->aiException); - sqlite3_free(p); - } - return SQLITE_OK; -} - -/* -** As part of a tokenchars= or separators= option, the CREATE VIRTUAL TABLE -** statement has specified that the tokenizer for this table shall consider -** all characters in string zIn/nIn to be separators (if bAlnum==0) or -** token characters (if bAlnum==1). -** -** For each codepoint in the zIn/nIn string, this function checks if the -** sqlite3FtsUnicodeIsalnum() function already returns the desired result. -** If so, no action is taken. Otherwise, the codepoint is added to the -** unicode_tokenizer.aiException[] array. For the purposes of tokenization, -** the return value of sqlite3FtsUnicodeIsalnum() is inverted for all -** codepoints in the aiException[] array. -** -** If a standalone diacritic mark (one that sqlite3FtsUnicodeIsdiacritic() -** identifies as a diacritic) occurs in the zIn/nIn string it is ignored. -** It is not possible to change the behavior of the tokenizer with respect -** to these codepoints. -*/ -static int unicodeAddExceptions( - unicode_tokenizer *p, /* Tokenizer to add exceptions to */ - int bAlnum, /* Replace Isalnum() return value with this */ - const char *zIn, /* Array of characters to make exceptions */ - int nIn /* Length of z in bytes */ -){ - const unsigned char *z = (const unsigned char *)zIn; - const unsigned char *zTerm = &z[nIn]; - int iCode; - int nEntry = 0; - - assert( bAlnum==0 || bAlnum==1 ); - - while( zaiException, (p->nException+nEntry)*sizeof(int)); - if( aNew==0 ) return SQLITE_NOMEM; - nNew = p->nException; - - z = (const unsigned char *)zIn; - while( zi; j--) aNew[j] = aNew[j-1]; - aNew[i] = iCode; - nNew++; - } - } - p->aiException = aNew; - p->nException = nNew; - } - - return SQLITE_OK; -} - -/* -** Return true if the p->aiException[] array contains the value iCode. -*/ -static int unicodeIsException(unicode_tokenizer *p, int iCode){ - if( p->nException>0 ){ - int *a = p->aiException; - int iLo = 0; - int iHi = p->nException-1; - - while( iHi>=iLo ){ - int iTest = (iHi + iLo) / 2; - if( iCode==a[iTest] ){ - return 1; - }else if( iCode>a[iTest] ){ - iLo = iTest+1; - }else{ - iHi = iTest-1; - } - } - } - - return 0; -} - -/* -** Return true if, for the purposes of tokenization, codepoint iCode is -** considered a token character (not a separator). -*/ -static int unicodeIsAlnum(unicode_tokenizer *p, int iCode){ - assert( (sqlite3FtsUnicodeIsalnum(iCode) & 0xFFFFFFFE)==0 ); - return sqlite3FtsUnicodeIsalnum(iCode) ^ unicodeIsException(p, iCode); -} - -/* -** Create a new tokenizer instance. -*/ -static int unicodeCreate( - int nArg, /* Size of array argv[] */ - const char * const *azArg, /* Tokenizer creation arguments */ - sqlite3_tokenizer **pp /* OUT: New tokenizer handle */ -){ - unicode_tokenizer *pNew; /* New tokenizer object */ - int i; - int rc = SQLITE_OK; - - pNew = (unicode_tokenizer *) sqlite3_malloc(sizeof(unicode_tokenizer)); - if( pNew==NULL ) return SQLITE_NOMEM; - memset(pNew, 0, sizeof(unicode_tokenizer)); - pNew->bRemoveDiacritic = 1; - - for(i=0; rc==SQLITE_OK && ibRemoveDiacritic = 1; - } - else if( n==19 && memcmp("remove_diacritics=0", z, 19)==0 ){ - pNew->bRemoveDiacritic = 0; - } - else if( n>=11 && memcmp("tokenchars=", z, 11)==0 ){ - rc = unicodeAddExceptions(pNew, 1, &z[11], n-11); - } - else if( n>=11 && memcmp("separators=", z, 11)==0 ){ - rc = unicodeAddExceptions(pNew, 0, &z[11], n-11); - } - else{ - /* Unrecognized argument */ - rc = SQLITE_ERROR; - } - } - - if( rc!=SQLITE_OK ){ - unicodeDestroy((sqlite3_tokenizer *)pNew); - pNew = 0; - } - *pp = (sqlite3_tokenizer *)pNew; - return rc; -} - -/* -** Prepare to begin tokenizing a particular string. The input -** string to be tokenized is pInput[0..nBytes-1]. A cursor -** used to incrementally tokenize this string is returned in -** *ppCursor. -*/ -static int unicodeOpen( - sqlite3_tokenizer *p, /* The tokenizer */ - const char *aInput, /* Input string */ - int nInput, /* Size of string aInput in bytes */ - sqlite3_tokenizer_cursor **pp /* OUT: New cursor object */ -){ - unicode_cursor *pCsr; - - pCsr = (unicode_cursor *)sqlite3_malloc(sizeof(unicode_cursor)); - if( pCsr==0 ){ - return SQLITE_NOMEM; - } - memset(pCsr, 0, sizeof(unicode_cursor)); - - pCsr->aInput = (const unsigned char *)aInput; - if( aInput==0 ){ - pCsr->nInput = 0; - }else if( nInput<0 ){ - pCsr->nInput = (int)strlen(aInput); - }else{ - pCsr->nInput = nInput; - } - - *pp = &pCsr->base; - UNUSED_PARAMETER(p); - return SQLITE_OK; -} - -/* -** Close a tokenization cursor previously opened by a call to -** simpleOpen() above. -*/ -static int unicodeClose(sqlite3_tokenizer_cursor *pCursor){ - unicode_cursor *pCsr = (unicode_cursor *) pCursor; - sqlite3_free(pCsr->zToken); - sqlite3_free(pCsr); - return SQLITE_OK; -} - -/* -** Extract the next token from a tokenization cursor. The cursor must -** have been opened by a prior call to simpleOpen(). -*/ -static int unicodeNext( - sqlite3_tokenizer_cursor *pC, /* Cursor returned by simpleOpen */ - const char **paToken, /* OUT: Token text */ - int *pnToken, /* OUT: Number of bytes at *paToken */ - int *piStart, /* OUT: Starting offset of token */ - int *piEnd, /* OUT: Ending offset of token */ - int *piPos /* OUT: Position integer of token */ -){ - unicode_cursor *pCsr = (unicode_cursor *)pC; - unicode_tokenizer *p = ((unicode_tokenizer *)pCsr->base.pTokenizer); - int iCode; - char *zOut; - const unsigned char *z = &pCsr->aInput[pCsr->iOff]; - const unsigned char *zStart = z; - const unsigned char *zEnd; - const unsigned char *zTerm = &pCsr->aInput[pCsr->nInput]; - - /* Scan past any delimiter characters before the start of the next token. - ** Return SQLITE_DONE early if this takes us all the way to the end of - ** the input. */ - while( z=zTerm ) return SQLITE_DONE; - - zOut = pCsr->zToken; - do { - int iOut; - - /* Grow the output buffer if required. */ - if( (zOut-pCsr->zToken)>=(pCsr->nAlloc-4) ){ - char *zNew = sqlite3_realloc(pCsr->zToken, pCsr->nAlloc+64); - if( !zNew ) return SQLITE_NOMEM; - zOut = &zNew[zOut - pCsr->zToken]; - pCsr->zToken = zNew; - pCsr->nAlloc += 64; - } - - /* Write the folded case of the last character read to the output */ - zEnd = z; - iOut = sqlite3FtsUnicodeFold(iCode, p->bRemoveDiacritic); - if( iOut ){ - WRITE_UTF8(zOut, iOut); - } - - /* If the cursor is not at EOF, read the next character */ - if( z>=zTerm ) break; - READ_UTF8(z, zTerm, iCode); - }while( unicodeIsAlnum(p, iCode) - || sqlite3FtsUnicodeIsdiacritic(iCode) - ); - - /* Set the output variables and return. */ - pCsr->iOff = (z - pCsr->aInput); - *paToken = pCsr->zToken; - *pnToken = zOut - pCsr->zToken; - *piStart = (zStart - pCsr->aInput); - *piEnd = (zEnd - pCsr->aInput); - *piPos = pCsr->iToken++; - return SQLITE_OK; -} - -/* -** Set *ppModule to a pointer to the sqlite3_tokenizer_module -** structure for the unicode tokenizer. -*/ -SQLITE_PRIVATE void sqlite3Fts3UnicodeTokenizer(sqlite3_tokenizer_module const **ppModule){ - static const sqlite3_tokenizer_module module = { - 0, - unicodeCreate, - unicodeDestroy, - unicodeOpen, - unicodeClose, - unicodeNext, - 0, - }; - *ppModule = &module; -} - -#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) */ -#endif /* ifndef SQLITE_ENABLE_FTS4_UNICODE61 */ - -/************** End of fts3_unicode.c ****************************************/ -/************** Begin file fts3_unicode2.c ***********************************/ -/* -** 2012 May 25 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -****************************************************************************** -*/ - -/* -** DO NOT EDIT THIS MACHINE GENERATED FILE. -*/ - -#if defined(SQLITE_ENABLE_FTS4_UNICODE61) -#if defined(SQLITE_ENABLE_FTS3) || defined(SQLITE_ENABLE_FTS4) - -/* #include */ - -/* -** Return true if the argument corresponds to a unicode codepoint -** classified as either a letter or a number. Otherwise false. -** -** The results are undefined if the value passed to this function -** is less than zero. -*/ -SQLITE_PRIVATE int sqlite3FtsUnicodeIsalnum(int c){ - /* Each unsigned integer in the following array corresponds to a contiguous - ** range of unicode codepoints that are not either letters or numbers (i.e. - ** codepoints for which this function should return 0). - ** - ** The most significant 22 bits in each 32-bit value contain the first - ** codepoint in the range. The least significant 10 bits are used to store - ** the size of the range (always at least 1). In other words, the value - ** ((C<<22) + N) represents a range of N codepoints starting with codepoint - ** C. It is not possible to represent a range larger than 1023 codepoints - ** using this format. - */ - const static unsigned int aEntry[] = { - 0x00000030, 0x0000E807, 0x00016C06, 0x0001EC2F, 0x0002AC07, - 0x0002D001, 0x0002D803, 0x0002EC01, 0x0002FC01, 0x00035C01, - 0x0003DC01, 0x000B0804, 0x000B480E, 0x000B9407, 0x000BB401, - 0x000BBC81, 0x000DD401, 0x000DF801, 0x000E1002, 0x000E1C01, - 0x000FD801, 0x00120808, 0x00156806, 0x00162402, 0x00163C01, - 0x00164437, 0x0017CC02, 0x00180005, 0x00181816, 0x00187802, - 0x00192C15, 0x0019A804, 0x0019C001, 0x001B5001, 0x001B580F, - 0x001B9C07, 0x001BF402, 0x001C000E, 0x001C3C01, 0x001C4401, - 0x001CC01B, 0x001E980B, 0x001FAC09, 0x001FD804, 0x00205804, - 0x00206C09, 0x00209403, 0x0020A405, 0x0020C00F, 0x00216403, - 0x00217801, 0x0023901B, 0x00240004, 0x0024E803, 0x0024F812, - 0x00254407, 0x00258804, 0x0025C001, 0x00260403, 0x0026F001, - 0x0026F807, 0x00271C02, 0x00272C03, 0x00275C01, 0x00278802, - 0x0027C802, 0x0027E802, 0x00280403, 0x0028F001, 0x0028F805, - 0x00291C02, 0x00292C03, 0x00294401, 0x0029C002, 0x0029D401, - 0x002A0403, 0x002AF001, 0x002AF808, 0x002B1C03, 0x002B2C03, - 0x002B8802, 0x002BC002, 0x002C0403, 0x002CF001, 0x002CF807, - 0x002D1C02, 0x002D2C03, 0x002D5802, 0x002D8802, 0x002DC001, - 0x002E0801, 0x002EF805, 0x002F1803, 0x002F2804, 0x002F5C01, - 0x002FCC08, 0x00300403, 0x0030F807, 0x00311803, 0x00312804, - 0x00315402, 0x00318802, 0x0031FC01, 0x00320802, 0x0032F001, - 0x0032F807, 0x00331803, 0x00332804, 0x00335402, 0x00338802, - 0x00340802, 0x0034F807, 0x00351803, 0x00352804, 0x00355C01, - 0x00358802, 0x0035E401, 0x00360802, 0x00372801, 0x00373C06, - 0x00375801, 0x00376008, 0x0037C803, 0x0038C401, 0x0038D007, - 0x0038FC01, 0x00391C09, 0x00396802, 0x003AC401, 0x003AD006, - 0x003AEC02, 0x003B2006, 0x003C041F, 0x003CD00C, 0x003DC417, - 0x003E340B, 0x003E6424, 0x003EF80F, 0x003F380D, 0x0040AC14, - 0x00412806, 0x00415804, 0x00417803, 0x00418803, 0x00419C07, - 0x0041C404, 0x0042080C, 0x00423C01, 0x00426806, 0x0043EC01, - 0x004D740C, 0x004E400A, 0x00500001, 0x0059B402, 0x005A0001, - 0x005A6C02, 0x005BAC03, 0x005C4803, 0x005CC805, 0x005D4802, - 0x005DC802, 0x005ED023, 0x005F6004, 0x005F7401, 0x0060000F, - 0x0062A401, 0x0064800C, 0x0064C00C, 0x00650001, 0x00651002, - 0x0066C011, 0x00672002, 0x00677822, 0x00685C05, 0x00687802, - 0x0069540A, 0x0069801D, 0x0069FC01, 0x006A8007, 0x006AA006, - 0x006C0005, 0x006CD011, 0x006D6823, 0x006E0003, 0x006E840D, - 0x006F980E, 0x006FF004, 0x00709014, 0x0070EC05, 0x0071F802, - 0x00730008, 0x00734019, 0x0073B401, 0x0073C803, 0x00770027, - 0x0077F004, 0x007EF401, 0x007EFC03, 0x007F3403, 0x007F7403, - 0x007FB403, 0x007FF402, 0x00800065, 0x0081A806, 0x0081E805, - 0x00822805, 0x0082801A, 0x00834021, 0x00840002, 0x00840C04, - 0x00842002, 0x00845001, 0x00845803, 0x00847806, 0x00849401, - 0x00849C01, 0x0084A401, 0x0084B801, 0x0084E802, 0x00850005, - 0x00852804, 0x00853C01, 0x00864264, 0x00900027, 0x0091000B, - 0x0092704E, 0x00940200, 0x009C0475, 0x009E53B9, 0x00AD400A, - 0x00B39406, 0x00B3BC03, 0x00B3E404, 0x00B3F802, 0x00B5C001, - 0x00B5FC01, 0x00B7804F, 0x00B8C00C, 0x00BA001A, 0x00BA6C59, - 0x00BC00D6, 0x00BFC00C, 0x00C00005, 0x00C02019, 0x00C0A807, - 0x00C0D802, 0x00C0F403, 0x00C26404, 0x00C28001, 0x00C3EC01, - 0x00C64002, 0x00C6580A, 0x00C70024, 0x00C8001F, 0x00C8A81E, - 0x00C94001, 0x00C98020, 0x00CA2827, 0x00CB003F, 0x00CC0100, - 0x01370040, 0x02924037, 0x0293F802, 0x02983403, 0x0299BC10, - 0x029A7C01, 0x029BC008, 0x029C0017, 0x029C8002, 0x029E2402, - 0x02A00801, 0x02A01801, 0x02A02C01, 0x02A08C09, 0x02A0D804, - 0x02A1D004, 0x02A20002, 0x02A2D011, 0x02A33802, 0x02A38012, - 0x02A3E003, 0x02A4980A, 0x02A51C0D, 0x02A57C01, 0x02A60004, - 0x02A6CC1B, 0x02A77802, 0x02A8A40E, 0x02A90C01, 0x02A93002, - 0x02A97004, 0x02A9DC03, 0x02A9EC01, 0x02AAC001, 0x02AAC803, - 0x02AADC02, 0x02AAF802, 0x02AB0401, 0x02AB7802, 0x02ABAC07, - 0x02ABD402, 0x02AF8C0B, 0x03600001, 0x036DFC02, 0x036FFC02, - 0x037FFC01, 0x03EC7801, 0x03ECA401, 0x03EEC810, 0x03F4F802, - 0x03F7F002, 0x03F8001A, 0x03F88007, 0x03F8C023, 0x03F95013, - 0x03F9A004, 0x03FBFC01, 0x03FC040F, 0x03FC6807, 0x03FCEC06, - 0x03FD6C0B, 0x03FF8007, 0x03FFA007, 0x03FFE405, 0x04040003, - 0x0404DC09, 0x0405E411, 0x0406400C, 0x0407402E, 0x040E7C01, - 0x040F4001, 0x04215C01, 0x04247C01, 0x0424FC01, 0x04280403, - 0x04281402, 0x04283004, 0x0428E003, 0x0428FC01, 0x04294009, - 0x0429FC01, 0x042CE407, 0x04400003, 0x0440E016, 0x04420003, - 0x0442C012, 0x04440003, 0x04449C0E, 0x04450004, 0x04460003, - 0x0446CC0E, 0x04471404, 0x045AAC0D, 0x0491C004, 0x05BD442E, - 0x05BE3C04, 0x074000F6, 0x07440027, 0x0744A4B5, 0x07480046, - 0x074C0057, 0x075B0401, 0x075B6C01, 0x075BEC01, 0x075C5401, - 0x075CD401, 0x075D3C01, 0x075DBC01, 0x075E2401, 0x075EA401, - 0x075F0C01, 0x07BBC002, 0x07C0002C, 0x07C0C064, 0x07C2800F, - 0x07C2C40E, 0x07C3040F, 0x07C3440F, 0x07C4401F, 0x07C4C03C, - 0x07C5C02B, 0x07C7981D, 0x07C8402B, 0x07C90009, 0x07C94002, - 0x07CC0021, 0x07CCC006, 0x07CCDC46, 0x07CE0014, 0x07CE8025, - 0x07CF1805, 0x07CF8011, 0x07D0003F, 0x07D10001, 0x07D108B6, - 0x07D3E404, 0x07D4003E, 0x07D50004, 0x07D54018, 0x07D7EC46, - 0x07D9140B, 0x07DA0046, 0x07DC0074, 0x38000401, 0x38008060, - 0x380400F0, - }; - static const unsigned int aAscii[4] = { - 0xFFFFFFFF, 0xFC00FFFF, 0xF8000001, 0xF8000001, - }; - - if( c<128 ){ - return ( (aAscii[c >> 5] & (1 << (c & 0x001F)))==0 ); - }else if( c<(1<<22) ){ - unsigned int key = (((unsigned int)c)<<10) | 0x000003FF; - int iRes; - int iHi = sizeof(aEntry)/sizeof(aEntry[0]) - 1; - int iLo = 0; - while( iHi>=iLo ){ - int iTest = (iHi + iLo) / 2; - if( key >= aEntry[iTest] ){ - iRes = iTest; - iLo = iTest+1; - }else{ - iHi = iTest-1; - } - } - assert( aEntry[0]=aEntry[iRes] ); - return (((unsigned int)c) >= ((aEntry[iRes]>>10) + (aEntry[iRes]&0x3FF))); - } - return 1; -} - - -/* -** If the argument is a codepoint corresponding to a lowercase letter -** in the ASCII range with a diacritic added, return the codepoint -** of the ASCII letter only. For example, if passed 235 - "LATIN -** SMALL LETTER E WITH DIAERESIS" - return 65 ("LATIN SMALL LETTER -** E"). The resuls of passing a codepoint that corresponds to an -** uppercase letter are undefined. -*/ -static int remove_diacritic(int c){ - unsigned short aDia[] = { - 0, 1797, 1848, 1859, 1891, 1928, 1940, 1995, - 2024, 2040, 2060, 2110, 2168, 2206, 2264, 2286, - 2344, 2383, 2472, 2488, 2516, 2596, 2668, 2732, - 2782, 2842, 2894, 2954, 2984, 3000, 3028, 3336, - 3456, 3696, 3712, 3728, 3744, 3896, 3912, 3928, - 3968, 4008, 4040, 4106, 4138, 4170, 4202, 4234, - 4266, 4296, 4312, 4344, 4408, 4424, 4472, 4504, - 6148, 6198, 6264, 6280, 6360, 6429, 6505, 6529, - 61448, 61468, 61534, 61592, 61642, 61688, 61704, 61726, - 61784, 61800, 61836, 61880, 61914, 61948, 61998, 62122, - 62154, 62200, 62218, 62302, 62364, 62442, 62478, 62536, - 62554, 62584, 62604, 62640, 62648, 62656, 62664, 62730, - 62924, 63050, 63082, 63274, 63390, - }; - char aChar[] = { - '\0', 'a', 'c', 'e', 'i', 'n', 'o', 'u', 'y', 'y', 'a', 'c', - 'd', 'e', 'e', 'g', 'h', 'i', 'j', 'k', 'l', 'n', 'o', 'r', - 's', 't', 'u', 'u', 'w', 'y', 'z', 'o', 'u', 'a', 'i', 'o', - 'u', 'g', 'k', 'o', 'j', 'g', 'n', 'a', 'e', 'i', 'o', 'r', - 'u', 's', 't', 'h', 'a', 'e', 'o', 'y', '\0', '\0', '\0', '\0', - '\0', '\0', '\0', '\0', 'a', 'b', 'd', 'd', 'e', 'f', 'g', 'h', - 'h', 'i', 'k', 'l', 'l', 'm', 'n', 'p', 'r', 'r', 's', 't', - 'u', 'v', 'w', 'w', 'x', 'y', 'z', 'h', 't', 'w', 'y', 'a', - 'e', 'i', 'o', 'u', 'y', - }; - - unsigned int key = (((unsigned int)c)<<3) | 0x00000007; - int iRes = 0; - int iHi = sizeof(aDia)/sizeof(aDia[0]) - 1; - int iLo = 0; - while( iHi>=iLo ){ - int iTest = (iHi + iLo) / 2; - if( key >= aDia[iTest] ){ - iRes = iTest; - iLo = iTest+1; - }else{ - iHi = iTest-1; - } - } - assert( key>=aDia[iRes] ); - return ((c > (aDia[iRes]>>3) + (aDia[iRes]&0x07)) ? c : (int)aChar[iRes]); -}; - - -/* -** Return true if the argument interpreted as a unicode codepoint -** is a diacritical modifier character. -*/ -SQLITE_PRIVATE int sqlite3FtsUnicodeIsdiacritic(int c){ - unsigned int mask0 = 0x08029FDF; - unsigned int mask1 = 0x000361F8; - if( c<768 || c>817 ) return 0; - return (c < 768+32) ? - (mask0 & (1 << (c-768))) : - (mask1 & (1 << (c-768-32))); -} - - -/* -** Interpret the argument as a unicode codepoint. If the codepoint -** is an upper case character that has a lower case equivalent, -** return the codepoint corresponding to the lower case version. -** Otherwise, return a copy of the argument. -** -** The results are undefined if the value passed to this function -** is less than zero. -*/ -SQLITE_PRIVATE int sqlite3FtsUnicodeFold(int c, int bRemoveDiacritic){ - /* Each entry in the following array defines a rule for folding a range - ** of codepoints to lower case. The rule applies to a range of nRange - ** codepoints starting at codepoint iCode. - ** - ** If the least significant bit in flags is clear, then the rule applies - ** to all nRange codepoints (i.e. all nRange codepoints are upper case and - ** need to be folded). Or, if it is set, then the rule only applies to - ** every second codepoint in the range, starting with codepoint C. - ** - ** The 7 most significant bits in flags are an index into the aiOff[] - ** array. If a specific codepoint C does require folding, then its lower - ** case equivalent is ((C + aiOff[flags>>1]) & 0xFFFF). - ** - ** The contents of this array are generated by parsing the CaseFolding.txt - ** file distributed as part of the "Unicode Character Database". See - ** http://www.unicode.org for details. - */ - static const struct TableEntry { - unsigned short iCode; - unsigned char flags; - unsigned char nRange; - } aEntry[] = { - {65, 14, 26}, {181, 64, 1}, {192, 14, 23}, - {216, 14, 7}, {256, 1, 48}, {306, 1, 6}, - {313, 1, 16}, {330, 1, 46}, {376, 116, 1}, - {377, 1, 6}, {383, 104, 1}, {385, 50, 1}, - {386, 1, 4}, {390, 44, 1}, {391, 0, 1}, - {393, 42, 2}, {395, 0, 1}, {398, 32, 1}, - {399, 38, 1}, {400, 40, 1}, {401, 0, 1}, - {403, 42, 1}, {404, 46, 1}, {406, 52, 1}, - {407, 48, 1}, {408, 0, 1}, {412, 52, 1}, - {413, 54, 1}, {415, 56, 1}, {416, 1, 6}, - {422, 60, 1}, {423, 0, 1}, {425, 60, 1}, - {428, 0, 1}, {430, 60, 1}, {431, 0, 1}, - {433, 58, 2}, {435, 1, 4}, {439, 62, 1}, - {440, 0, 1}, {444, 0, 1}, {452, 2, 1}, - {453, 0, 1}, {455, 2, 1}, {456, 0, 1}, - {458, 2, 1}, {459, 1, 18}, {478, 1, 18}, - {497, 2, 1}, {498, 1, 4}, {502, 122, 1}, - {503, 134, 1}, {504, 1, 40}, {544, 110, 1}, - {546, 1, 18}, {570, 70, 1}, {571, 0, 1}, - {573, 108, 1}, {574, 68, 1}, {577, 0, 1}, - {579, 106, 1}, {580, 28, 1}, {581, 30, 1}, - {582, 1, 10}, {837, 36, 1}, {880, 1, 4}, - {886, 0, 1}, {902, 18, 1}, {904, 16, 3}, - {908, 26, 1}, {910, 24, 2}, {913, 14, 17}, - {931, 14, 9}, {962, 0, 1}, {975, 4, 1}, - {976, 140, 1}, {977, 142, 1}, {981, 146, 1}, - {982, 144, 1}, {984, 1, 24}, {1008, 136, 1}, - {1009, 138, 1}, {1012, 130, 1}, {1013, 128, 1}, - {1015, 0, 1}, {1017, 152, 1}, {1018, 0, 1}, - {1021, 110, 3}, {1024, 34, 16}, {1040, 14, 32}, - {1120, 1, 34}, {1162, 1, 54}, {1216, 6, 1}, - {1217, 1, 14}, {1232, 1, 88}, {1329, 22, 38}, - {4256, 66, 38}, {4295, 66, 1}, {4301, 66, 1}, - {7680, 1, 150}, {7835, 132, 1}, {7838, 96, 1}, - {7840, 1, 96}, {7944, 150, 8}, {7960, 150, 6}, - {7976, 150, 8}, {7992, 150, 8}, {8008, 150, 6}, - {8025, 151, 8}, {8040, 150, 8}, {8072, 150, 8}, - {8088, 150, 8}, {8104, 150, 8}, {8120, 150, 2}, - {8122, 126, 2}, {8124, 148, 1}, {8126, 100, 1}, - {8136, 124, 4}, {8140, 148, 1}, {8152, 150, 2}, - {8154, 120, 2}, {8168, 150, 2}, {8170, 118, 2}, - {8172, 152, 1}, {8184, 112, 2}, {8186, 114, 2}, - {8188, 148, 1}, {8486, 98, 1}, {8490, 92, 1}, - {8491, 94, 1}, {8498, 12, 1}, {8544, 8, 16}, - {8579, 0, 1}, {9398, 10, 26}, {11264, 22, 47}, - {11360, 0, 1}, {11362, 88, 1}, {11363, 102, 1}, - {11364, 90, 1}, {11367, 1, 6}, {11373, 84, 1}, - {11374, 86, 1}, {11375, 80, 1}, {11376, 82, 1}, - {11378, 0, 1}, {11381, 0, 1}, {11390, 78, 2}, - {11392, 1, 100}, {11499, 1, 4}, {11506, 0, 1}, - {42560, 1, 46}, {42624, 1, 24}, {42786, 1, 14}, - {42802, 1, 62}, {42873, 1, 4}, {42877, 76, 1}, - {42878, 1, 10}, {42891, 0, 1}, {42893, 74, 1}, - {42896, 1, 4}, {42912, 1, 10}, {42922, 72, 1}, - {65313, 14, 26}, - }; - static const unsigned short aiOff[] = { - 1, 2, 8, 15, 16, 26, 28, 32, - 37, 38, 40, 48, 63, 64, 69, 71, - 79, 80, 116, 202, 203, 205, 206, 207, - 209, 210, 211, 213, 214, 217, 218, 219, - 775, 7264, 10792, 10795, 23228, 23256, 30204, 54721, - 54753, 54754, 54756, 54787, 54793, 54809, 57153, 57274, - 57921, 58019, 58363, 61722, 65268, 65341, 65373, 65406, - 65408, 65410, 65415, 65424, 65436, 65439, 65450, 65462, - 65472, 65476, 65478, 65480, 65482, 65488, 65506, 65511, - 65514, 65521, 65527, 65528, 65529, - }; - - int ret = c; - - assert( c>=0 ); - assert( sizeof(unsigned short)==2 && sizeof(unsigned char)==1 ); - - if( c<128 ){ - if( c>='A' && c<='Z' ) ret = c + ('a' - 'A'); - }else if( c<65536 ){ - int iHi = sizeof(aEntry)/sizeof(aEntry[0]) - 1; - int iLo = 0; - int iRes = -1; - - while( iHi>=iLo ){ - int iTest = (iHi + iLo) / 2; - int cmp = (c - aEntry[iTest].iCode); - if( cmp>=0 ){ - iRes = iTest; - iLo = iTest+1; - }else{ - iHi = iTest-1; - } - } - assert( iRes<0 || c>=aEntry[iRes].iCode ); - - if( iRes>=0 ){ - const struct TableEntry *p = &aEntry[iRes]; - if( c<(p->iCode + p->nRange) && 0==(0x01 & p->flags & (p->iCode ^ c)) ){ - ret = (c + (aiOff[p->flags>>1])) & 0x0000FFFF; - assert( ret>0 ); - } - } - - if( bRemoveDiacritic ) ret = remove_diacritic(ret); - } - - else if( c>=66560 && c<66600 ){ - ret = c + 40; - } - - return ret; -} -#endif /* defined(SQLITE_ENABLE_FTS3) || defined(SQLITE_ENABLE_FTS4) */ -#endif /* !defined(SQLITE_ENABLE_FTS4_UNICODE61) */ - -/************** End of fts3_unicode2.c ***************************************/ -/************** Begin file rtree.c *******************************************/ -/* -** 2001 September 15 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** This file contains code for implementations of the r-tree and r*-tree -** algorithms packaged as an SQLite virtual table module. -*/ - -/* -** Database Format of R-Tree Tables -** -------------------------------- -** -** The data structure for a single virtual r-tree table is stored in three -** native SQLite tables declared as follows. In each case, the '%' character -** in the table name is replaced with the user-supplied name of the r-tree -** table. -** -** CREATE TABLE %_node(nodeno INTEGER PRIMARY KEY, data BLOB) -** CREATE TABLE %_parent(nodeno INTEGER PRIMARY KEY, parentnode INTEGER) -** CREATE TABLE %_rowid(rowid INTEGER PRIMARY KEY, nodeno INTEGER) -** -** The data for each node of the r-tree structure is stored in the %_node -** table. For each node that is not the root node of the r-tree, there is -** an entry in the %_parent table associating the node with its parent. -** And for each row of data in the table, there is an entry in the %_rowid -** table that maps from the entries rowid to the id of the node that it -** is stored on. -** -** The root node of an r-tree always exists, even if the r-tree table is -** empty. The nodeno of the root node is always 1. All other nodes in the -** table must be the same size as the root node. The content of each node -** is formatted as follows: -** -** 1. If the node is the root node (node 1), then the first 2 bytes -** of the node contain the tree depth as a big-endian integer. -** For non-root nodes, the first 2 bytes are left unused. -** -** 2. The next 2 bytes contain the number of entries currently -** stored in the node. -** -** 3. The remainder of the node contains the node entries. Each entry -** consists of a single 8-byte integer followed by an even number -** of 4-byte coordinates. For leaf nodes the integer is the rowid -** of a record. For internal nodes it is the node number of a -** child page. -*/ - -#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_RTREE) - -#ifndef SQLITE_CORE - SQLITE_EXTENSION_INIT1 -#else -#endif - -/* #include */ -/* #include */ -/* #include */ - -#ifndef SQLITE_AMALGAMATION -#include "sqlite3rtree.h" -typedef sqlite3_int64 i64; -typedef unsigned char u8; -typedef unsigned short u16; -typedef unsigned int u32; -#endif - -/* The following macro is used to suppress compiler warnings. -*/ -#ifndef UNUSED_PARAMETER -# define UNUSED_PARAMETER(x) (void)(x) -#endif - -typedef struct Rtree Rtree; -typedef struct RtreeCursor RtreeCursor; -typedef struct RtreeNode RtreeNode; -typedef struct RtreeCell RtreeCell; -typedef struct RtreeConstraint RtreeConstraint; -typedef struct RtreeMatchArg RtreeMatchArg; -typedef struct RtreeGeomCallback RtreeGeomCallback; -typedef union RtreeCoord RtreeCoord; -typedef struct RtreeSearchPoint RtreeSearchPoint; - -/* The rtree may have between 1 and RTREE_MAX_DIMENSIONS dimensions. */ -#define RTREE_MAX_DIMENSIONS 5 - -/* Size of hash table Rtree.aHash. This hash table is not expected to -** ever contain very many entries, so a fixed number of buckets is -** used. -*/ -#define HASHSIZE 97 - -/* The xBestIndex method of this virtual table requires an estimate of -** the number of rows in the virtual table to calculate the costs of -** various strategies. If possible, this estimate is loaded from the -** sqlite_stat1 table (with RTREE_MIN_ROWEST as a hard-coded minimum). -** Otherwise, if no sqlite_stat1 entry is available, use -** RTREE_DEFAULT_ROWEST. -*/ -#define RTREE_DEFAULT_ROWEST 1048576 -#define RTREE_MIN_ROWEST 100 - -/* -** An rtree virtual-table object. -*/ -struct Rtree { - sqlite3_vtab base; /* Base class. Must be first */ - sqlite3 *db; /* Host database connection */ - int iNodeSize; /* Size in bytes of each node in the node table */ - u8 nDim; /* Number of dimensions */ - u8 eCoordType; /* RTREE_COORD_REAL32 or RTREE_COORD_INT32 */ - u8 nBytesPerCell; /* Bytes consumed per cell */ - int iDepth; /* Current depth of the r-tree structure */ - char *zDb; /* Name of database containing r-tree table */ - char *zName; /* Name of r-tree table */ - int nBusy; /* Current number of users of this structure */ - i64 nRowEst; /* Estimated number of rows in this table */ - - /* List of nodes removed during a CondenseTree operation. List is - ** linked together via the pointer normally used for hash chains - - ** RtreeNode.pNext. RtreeNode.iNode stores the depth of the sub-tree - ** headed by the node (leaf nodes have RtreeNode.iNode==0). - */ - RtreeNode *pDeleted; - int iReinsertHeight; /* Height of sub-trees Reinsert() has run on */ - - /* Statements to read/write/delete a record from xxx_node */ - sqlite3_stmt *pReadNode; - sqlite3_stmt *pWriteNode; - sqlite3_stmt *pDeleteNode; - - /* Statements to read/write/delete a record from xxx_rowid */ - sqlite3_stmt *pReadRowid; - sqlite3_stmt *pWriteRowid; - sqlite3_stmt *pDeleteRowid; - - /* Statements to read/write/delete a record from xxx_parent */ - sqlite3_stmt *pReadParent; - sqlite3_stmt *pWriteParent; - sqlite3_stmt *pDeleteParent; - - RtreeNode *aHash[HASHSIZE]; /* Hash table of in-memory nodes. */ -}; - -/* Possible values for Rtree.eCoordType: */ -#define RTREE_COORD_REAL32 0 -#define RTREE_COORD_INT32 1 - -/* -** If SQLITE_RTREE_INT_ONLY is defined, then this virtual table will -** only deal with integer coordinates. No floating point operations -** will be done. -*/ -#ifdef SQLITE_RTREE_INT_ONLY - typedef sqlite3_int64 RtreeDValue; /* High accuracy coordinate */ - typedef int RtreeValue; /* Low accuracy coordinate */ -# define RTREE_ZERO 0 -#else - typedef double RtreeDValue; /* High accuracy coordinate */ - typedef float RtreeValue; /* Low accuracy coordinate */ -# define RTREE_ZERO 0.0 -#endif - -/* -** When doing a search of an r-tree, instances of the following structure -** record intermediate results from the tree walk. -** -** The id is always a node-id. For iLevel>=1 the id is the node-id of -** the node that the RtreeSearchPoint represents. When iLevel==0, however, -** the id is of the parent node and the cell that RtreeSearchPoint -** represents is the iCell-th entry in the parent node. -*/ -struct RtreeSearchPoint { - RtreeDValue rScore; /* The score for this node. Smallest goes first. */ - sqlite3_int64 id; /* Node ID */ - u8 iLevel; /* 0=entries. 1=leaf node. 2+ for higher */ - u8 eWithin; /* PARTLY_WITHIN or FULLY_WITHIN */ - u8 iCell; /* Cell index within the node */ -}; - -/* -** The minimum number of cells allowed for a node is a third of the -** maximum. In Gutman's notation: -** -** m = M/3 -** -** If an R*-tree "Reinsert" operation is required, the same number of -** cells are removed from the overfull node and reinserted into the tree. -*/ -#define RTREE_MINCELLS(p) ((((p)->iNodeSize-4)/(p)->nBytesPerCell)/3) -#define RTREE_REINSERT(p) RTREE_MINCELLS(p) -#define RTREE_MAXCELLS 51 - -/* -** The smallest possible node-size is (512-64)==448 bytes. And the largest -** supported cell size is 48 bytes (8 byte rowid + ten 4 byte coordinates). -** Therefore all non-root nodes must contain at least 3 entries. Since -** 2^40 is greater than 2^64, an r-tree structure always has a depth of -** 40 or less. -*/ -#define RTREE_MAX_DEPTH 40 - - -/* -** Number of entries in the cursor RtreeNode cache. The first entry is -** used to cache the RtreeNode for RtreeCursor.sPoint. The remaining -** entries cache the RtreeNode for the first elements of the priority queue. -*/ -#define RTREE_CACHE_SZ 5 - -/* -** An rtree cursor object. -*/ -struct RtreeCursor { - sqlite3_vtab_cursor base; /* Base class. Must be first */ - u8 atEOF; /* True if at end of search */ - u8 bPoint; /* True if sPoint is valid */ - int iStrategy; /* Copy of idxNum search parameter */ - int nConstraint; /* Number of entries in aConstraint */ - RtreeConstraint *aConstraint; /* Search constraints. */ - int nPointAlloc; /* Number of slots allocated for aPoint[] */ - int nPoint; /* Number of slots used in aPoint[] */ - int mxLevel; /* iLevel value for root of the tree */ - RtreeSearchPoint *aPoint; /* Priority queue for search points */ - RtreeSearchPoint sPoint; /* Cached next search point */ - RtreeNode *aNode[RTREE_CACHE_SZ]; /* Rtree node cache */ - u32 anQueue[RTREE_MAX_DEPTH+1]; /* Number of queued entries by iLevel */ -}; - -/* Return the Rtree of a RtreeCursor */ -#define RTREE_OF_CURSOR(X) ((Rtree*)((X)->base.pVtab)) - -/* -** A coordinate can be either a floating point number or a integer. All -** coordinates within a single R-Tree are always of the same time. -*/ -union RtreeCoord { - RtreeValue f; /* Floating point value */ - int i; /* Integer value */ - u32 u; /* Unsigned for byte-order conversions */ -}; - -/* -** The argument is an RtreeCoord. Return the value stored within the RtreeCoord -** formatted as a RtreeDValue (double or int64). This macro assumes that local -** variable pRtree points to the Rtree structure associated with the -** RtreeCoord. -*/ -#ifdef SQLITE_RTREE_INT_ONLY -# define DCOORD(coord) ((RtreeDValue)coord.i) -#else -# define DCOORD(coord) ( \ - (pRtree->eCoordType==RTREE_COORD_REAL32) ? \ - ((double)coord.f) : \ - ((double)coord.i) \ - ) -#endif - -/* -** A search constraint. -*/ -struct RtreeConstraint { - int iCoord; /* Index of constrained coordinate */ - int op; /* Constraining operation */ - union { - RtreeDValue rValue; /* Constraint value. */ - int (*xGeom)(sqlite3_rtree_geometry*,int,RtreeDValue*,int*); - int (*xQueryFunc)(sqlite3_rtree_query_info*); - } u; - sqlite3_rtree_query_info *pInfo; /* xGeom and xQueryFunc argument */ -}; - -/* Possible values for RtreeConstraint.op */ -#define RTREE_EQ 0x41 /* A */ -#define RTREE_LE 0x42 /* B */ -#define RTREE_LT 0x43 /* C */ -#define RTREE_GE 0x44 /* D */ -#define RTREE_GT 0x45 /* E */ -#define RTREE_MATCH 0x46 /* F: Old-style sqlite3_rtree_geometry_callback() */ -#define RTREE_QUERY 0x47 /* G: New-style sqlite3_rtree_query_callback() */ - - -/* -** An rtree structure node. -*/ -struct RtreeNode { - RtreeNode *pParent; /* Parent node */ - i64 iNode; /* The node number */ - int nRef; /* Number of references to this node */ - int isDirty; /* True if the node needs to be written to disk */ - u8 *zData; /* Content of the node, as should be on disk */ - RtreeNode *pNext; /* Next node in this hash collision chain */ -}; - -/* Return the number of cells in a node */ -#define NCELL(pNode) readInt16(&(pNode)->zData[2]) - -/* -** A single cell from a node, deserialized -*/ -struct RtreeCell { - i64 iRowid; /* Node or entry ID */ - RtreeCoord aCoord[RTREE_MAX_DIMENSIONS*2]; /* Bounding box coordinates */ -}; - - -/* -** This object becomes the sqlite3_user_data() for the SQL functions -** that are created by sqlite3_rtree_geometry_callback() and -** sqlite3_rtree_query_callback() and which appear on the right of MATCH -** operators in order to constrain a search. -** -** xGeom and xQueryFunc are the callback functions. Exactly one of -** xGeom and xQueryFunc fields is non-NULL, depending on whether the -** SQL function was created using sqlite3_rtree_geometry_callback() or -** sqlite3_rtree_query_callback(). -** -** This object is deleted automatically by the destructor mechanism in -** sqlite3_create_function_v2(). -*/ -struct RtreeGeomCallback { - int (*xGeom)(sqlite3_rtree_geometry*, int, RtreeDValue*, int*); - int (*xQueryFunc)(sqlite3_rtree_query_info*); - void (*xDestructor)(void*); - void *pContext; -}; - - -/* -** Value for the first field of every RtreeMatchArg object. The MATCH -** operator tests that the first field of a blob operand matches this -** value to avoid operating on invalid blobs (which could cause a segfault). -*/ -#define RTREE_GEOMETRY_MAGIC 0x891245AB - -/* -** An instance of this structure (in the form of a BLOB) is returned by -** the SQL functions that sqlite3_rtree_geometry_callback() and -** sqlite3_rtree_query_callback() create, and is read as the right-hand -** operand to the MATCH operator of an R-Tree. -*/ -struct RtreeMatchArg { - u32 magic; /* Always RTREE_GEOMETRY_MAGIC */ - RtreeGeomCallback cb; /* Info about the callback functions */ - int nParam; /* Number of parameters to the SQL function */ - RtreeDValue aParam[1]; /* Values for parameters to the SQL function */ -}; - -#ifndef MAX -# define MAX(x,y) ((x) < (y) ? (y) : (x)) -#endif -#ifndef MIN -# define MIN(x,y) ((x) > (y) ? (y) : (x)) -#endif - -/* -** Functions to deserialize a 16 bit integer, 32 bit real number and -** 64 bit integer. The deserialized value is returned. -*/ -static int readInt16(u8 *p){ - return (p[0]<<8) + p[1]; -} -static void readCoord(u8 *p, RtreeCoord *pCoord){ - u32 i = ( - (((u32)p[0]) << 24) + - (((u32)p[1]) << 16) + - (((u32)p[2]) << 8) + - (((u32)p[3]) << 0) - ); - *(u32 *)pCoord = i; -} -static i64 readInt64(u8 *p){ - return ( - (((i64)p[0]) << 56) + - (((i64)p[1]) << 48) + - (((i64)p[2]) << 40) + - (((i64)p[3]) << 32) + - (((i64)p[4]) << 24) + - (((i64)p[5]) << 16) + - (((i64)p[6]) << 8) + - (((i64)p[7]) << 0) - ); -} - -/* -** Functions to serialize a 16 bit integer, 32 bit real number and -** 64 bit integer. The value returned is the number of bytes written -** to the argument buffer (always 2, 4 and 8 respectively). -*/ -static int writeInt16(u8 *p, int i){ - p[0] = (i>> 8)&0xFF; - p[1] = (i>> 0)&0xFF; - return 2; -} -static int writeCoord(u8 *p, RtreeCoord *pCoord){ - u32 i; - assert( sizeof(RtreeCoord)==4 ); - assert( sizeof(u32)==4 ); - i = *(u32 *)pCoord; - p[0] = (i>>24)&0xFF; - p[1] = (i>>16)&0xFF; - p[2] = (i>> 8)&0xFF; - p[3] = (i>> 0)&0xFF; - return 4; -} -static int writeInt64(u8 *p, i64 i){ - p[0] = (i>>56)&0xFF; - p[1] = (i>>48)&0xFF; - p[2] = (i>>40)&0xFF; - p[3] = (i>>32)&0xFF; - p[4] = (i>>24)&0xFF; - p[5] = (i>>16)&0xFF; - p[6] = (i>> 8)&0xFF; - p[7] = (i>> 0)&0xFF; - return 8; -} - -/* -** Increment the reference count of node p. -*/ -static void nodeReference(RtreeNode *p){ - if( p ){ - p->nRef++; - } -} - -/* -** Clear the content of node p (set all bytes to 0x00). -*/ -static void nodeZero(Rtree *pRtree, RtreeNode *p){ - memset(&p->zData[2], 0, pRtree->iNodeSize-2); - p->isDirty = 1; -} - -/* -** Given a node number iNode, return the corresponding key to use -** in the Rtree.aHash table. -*/ -static int nodeHash(i64 iNode){ - return iNode % HASHSIZE; -} - -/* -** Search the node hash table for node iNode. If found, return a pointer -** to it. Otherwise, return 0. -*/ -static RtreeNode *nodeHashLookup(Rtree *pRtree, i64 iNode){ - RtreeNode *p; - for(p=pRtree->aHash[nodeHash(iNode)]; p && p->iNode!=iNode; p=p->pNext); - return p; -} - -/* -** Add node pNode to the node hash table. -*/ -static void nodeHashInsert(Rtree *pRtree, RtreeNode *pNode){ - int iHash; - assert( pNode->pNext==0 ); - iHash = nodeHash(pNode->iNode); - pNode->pNext = pRtree->aHash[iHash]; - pRtree->aHash[iHash] = pNode; -} - -/* -** Remove node pNode from the node hash table. -*/ -static void nodeHashDelete(Rtree *pRtree, RtreeNode *pNode){ - RtreeNode **pp; - if( pNode->iNode!=0 ){ - pp = &pRtree->aHash[nodeHash(pNode->iNode)]; - for( ; (*pp)!=pNode; pp = &(*pp)->pNext){ assert(*pp); } - *pp = pNode->pNext; - pNode->pNext = 0; - } -} - -/* -** Allocate and return new r-tree node. Initially, (RtreeNode.iNode==0), -** indicating that node has not yet been assigned a node number. It is -** assigned a node number when nodeWrite() is called to write the -** node contents out to the database. -*/ -static RtreeNode *nodeNew(Rtree *pRtree, RtreeNode *pParent){ - RtreeNode *pNode; - pNode = (RtreeNode *)sqlite3_malloc(sizeof(RtreeNode) + pRtree->iNodeSize); - if( pNode ){ - memset(pNode, 0, sizeof(RtreeNode) + pRtree->iNodeSize); - pNode->zData = (u8 *)&pNode[1]; - pNode->nRef = 1; - pNode->pParent = pParent; - pNode->isDirty = 1; - nodeReference(pParent); - } - return pNode; -} - -/* -** Obtain a reference to an r-tree node. -*/ -static int nodeAcquire( - Rtree *pRtree, /* R-tree structure */ - i64 iNode, /* Node number to load */ - RtreeNode *pParent, /* Either the parent node or NULL */ - RtreeNode **ppNode /* OUT: Acquired node */ -){ - int rc; - int rc2 = SQLITE_OK; - RtreeNode *pNode; - - /* Check if the requested node is already in the hash table. If so, - ** increase its reference count and return it. - */ - if( (pNode = nodeHashLookup(pRtree, iNode)) ){ - assert( !pParent || !pNode->pParent || pNode->pParent==pParent ); - if( pParent && !pNode->pParent ){ - nodeReference(pParent); - pNode->pParent = pParent; - } - pNode->nRef++; - *ppNode = pNode; - return SQLITE_OK; - } - - sqlite3_bind_int64(pRtree->pReadNode, 1, iNode); - rc = sqlite3_step(pRtree->pReadNode); - if( rc==SQLITE_ROW ){ - const u8 *zBlob = sqlite3_column_blob(pRtree->pReadNode, 0); - if( pRtree->iNodeSize==sqlite3_column_bytes(pRtree->pReadNode, 0) ){ - pNode = (RtreeNode *)sqlite3_malloc(sizeof(RtreeNode)+pRtree->iNodeSize); - if( !pNode ){ - rc2 = SQLITE_NOMEM; - }else{ - pNode->pParent = pParent; - pNode->zData = (u8 *)&pNode[1]; - pNode->nRef = 1; - pNode->iNode = iNode; - pNode->isDirty = 0; - pNode->pNext = 0; - memcpy(pNode->zData, zBlob, pRtree->iNodeSize); - nodeReference(pParent); - } - } - } - rc = sqlite3_reset(pRtree->pReadNode); - if( rc==SQLITE_OK ) rc = rc2; - - /* If the root node was just loaded, set pRtree->iDepth to the height - ** of the r-tree structure. A height of zero means all data is stored on - ** the root node. A height of one means the children of the root node - ** are the leaves, and so on. If the depth as specified on the root node - ** is greater than RTREE_MAX_DEPTH, the r-tree structure must be corrupt. - */ - if( pNode && iNode==1 ){ - pRtree->iDepth = readInt16(pNode->zData); - if( pRtree->iDepth>RTREE_MAX_DEPTH ){ - rc = SQLITE_CORRUPT_VTAB; - } - } - - /* If no error has occurred so far, check if the "number of entries" - ** field on the node is too large. If so, set the return code to - ** SQLITE_CORRUPT_VTAB. - */ - if( pNode && rc==SQLITE_OK ){ - if( NCELL(pNode)>((pRtree->iNodeSize-4)/pRtree->nBytesPerCell) ){ - rc = SQLITE_CORRUPT_VTAB; - } - } - - if( rc==SQLITE_OK ){ - if( pNode!=0 ){ - nodeHashInsert(pRtree, pNode); - }else{ - rc = SQLITE_CORRUPT_VTAB; - } - *ppNode = pNode; - }else{ - sqlite3_free(pNode); - *ppNode = 0; - } - - return rc; -} - -/* -** Overwrite cell iCell of node pNode with the contents of pCell. -*/ -static void nodeOverwriteCell( - Rtree *pRtree, /* The overall R-Tree */ - RtreeNode *pNode, /* The node into which the cell is to be written */ - RtreeCell *pCell, /* The cell to write */ - int iCell /* Index into pNode into which pCell is written */ -){ - int ii; - u8 *p = &pNode->zData[4 + pRtree->nBytesPerCell*iCell]; - p += writeInt64(p, pCell->iRowid); - for(ii=0; ii<(pRtree->nDim*2); ii++){ - p += writeCoord(p, &pCell->aCoord[ii]); - } - pNode->isDirty = 1; -} - -/* -** Remove the cell with index iCell from node pNode. -*/ -static void nodeDeleteCell(Rtree *pRtree, RtreeNode *pNode, int iCell){ - u8 *pDst = &pNode->zData[4 + pRtree->nBytesPerCell*iCell]; - u8 *pSrc = &pDst[pRtree->nBytesPerCell]; - int nByte = (NCELL(pNode) - iCell - 1) * pRtree->nBytesPerCell; - memmove(pDst, pSrc, nByte); - writeInt16(&pNode->zData[2], NCELL(pNode)-1); - pNode->isDirty = 1; -} - -/* -** Insert the contents of cell pCell into node pNode. If the insert -** is successful, return SQLITE_OK. -** -** If there is not enough free space in pNode, return SQLITE_FULL. -*/ -static int nodeInsertCell( - Rtree *pRtree, /* The overall R-Tree */ - RtreeNode *pNode, /* Write new cell into this node */ - RtreeCell *pCell /* The cell to be inserted */ -){ - int nCell; /* Current number of cells in pNode */ - int nMaxCell; /* Maximum number of cells for pNode */ - - nMaxCell = (pRtree->iNodeSize-4)/pRtree->nBytesPerCell; - nCell = NCELL(pNode); - - assert( nCell<=nMaxCell ); - if( nCellzData[2], nCell+1); - pNode->isDirty = 1; - } - - return (nCell==nMaxCell); -} - -/* -** If the node is dirty, write it out to the database. -*/ -static int nodeWrite(Rtree *pRtree, RtreeNode *pNode){ - int rc = SQLITE_OK; - if( pNode->isDirty ){ - sqlite3_stmt *p = pRtree->pWriteNode; - if( pNode->iNode ){ - sqlite3_bind_int64(p, 1, pNode->iNode); - }else{ - sqlite3_bind_null(p, 1); - } - sqlite3_bind_blob(p, 2, pNode->zData, pRtree->iNodeSize, SQLITE_STATIC); - sqlite3_step(p); - pNode->isDirty = 0; - rc = sqlite3_reset(p); - if( pNode->iNode==0 && rc==SQLITE_OK ){ - pNode->iNode = sqlite3_last_insert_rowid(pRtree->db); - nodeHashInsert(pRtree, pNode); - } - } - return rc; -} - -/* -** Release a reference to a node. If the node is dirty and the reference -** count drops to zero, the node data is written to the database. -*/ -static int nodeRelease(Rtree *pRtree, RtreeNode *pNode){ - int rc = SQLITE_OK; - if( pNode ){ - assert( pNode->nRef>0 ); - pNode->nRef--; - if( pNode->nRef==0 ){ - if( pNode->iNode==1 ){ - pRtree->iDepth = -1; - } - if( pNode->pParent ){ - rc = nodeRelease(pRtree, pNode->pParent); - } - if( rc==SQLITE_OK ){ - rc = nodeWrite(pRtree, pNode); - } - nodeHashDelete(pRtree, pNode); - sqlite3_free(pNode); - } - } - return rc; -} - -/* -** Return the 64-bit integer value associated with cell iCell of -** node pNode. If pNode is a leaf node, this is a rowid. If it is -** an internal node, then the 64-bit integer is a child page number. -*/ -static i64 nodeGetRowid( - Rtree *pRtree, /* The overall R-Tree */ - RtreeNode *pNode, /* The node from which to extract the ID */ - int iCell /* The cell index from which to extract the ID */ -){ - assert( iCellzData[4 + pRtree->nBytesPerCell*iCell]); -} - -/* -** Return coordinate iCoord from cell iCell in node pNode. -*/ -static void nodeGetCoord( - Rtree *pRtree, /* The overall R-Tree */ - RtreeNode *pNode, /* The node from which to extract a coordinate */ - int iCell, /* The index of the cell within the node */ - int iCoord, /* Which coordinate to extract */ - RtreeCoord *pCoord /* OUT: Space to write result to */ -){ - readCoord(&pNode->zData[12 + pRtree->nBytesPerCell*iCell + 4*iCoord], pCoord); -} - -/* -** Deserialize cell iCell of node pNode. Populate the structure pointed -** to by pCell with the results. -*/ -static void nodeGetCell( - Rtree *pRtree, /* The overall R-Tree */ - RtreeNode *pNode, /* The node containing the cell to be read */ - int iCell, /* Index of the cell within the node */ - RtreeCell *pCell /* OUT: Write the cell contents here */ -){ - u8 *pData; - u8 *pEnd; - RtreeCoord *pCoord; - pCell->iRowid = nodeGetRowid(pRtree, pNode, iCell); - pData = pNode->zData + (12 + pRtree->nBytesPerCell*iCell); - pEnd = pData + pRtree->nDim*8; - pCoord = pCell->aCoord; - for(; pDatanBusy++; -} - -/* -** Decrement the r-tree reference count. When the reference count reaches -** zero the structure is deleted. -*/ -static void rtreeRelease(Rtree *pRtree){ - pRtree->nBusy--; - if( pRtree->nBusy==0 ){ - sqlite3_finalize(pRtree->pReadNode); - sqlite3_finalize(pRtree->pWriteNode); - sqlite3_finalize(pRtree->pDeleteNode); - sqlite3_finalize(pRtree->pReadRowid); - sqlite3_finalize(pRtree->pWriteRowid); - sqlite3_finalize(pRtree->pDeleteRowid); - sqlite3_finalize(pRtree->pReadParent); - sqlite3_finalize(pRtree->pWriteParent); - sqlite3_finalize(pRtree->pDeleteParent); - sqlite3_free(pRtree); - } -} - -/* -** Rtree virtual table module xDisconnect method. -*/ -static int rtreeDisconnect(sqlite3_vtab *pVtab){ - rtreeRelease((Rtree *)pVtab); - return SQLITE_OK; -} - -/* -** Rtree virtual table module xDestroy method. -*/ -static int rtreeDestroy(sqlite3_vtab *pVtab){ - Rtree *pRtree = (Rtree *)pVtab; - int rc; - char *zCreate = sqlite3_mprintf( - "DROP TABLE '%q'.'%q_node';" - "DROP TABLE '%q'.'%q_rowid';" - "DROP TABLE '%q'.'%q_parent';", - pRtree->zDb, pRtree->zName, - pRtree->zDb, pRtree->zName, - pRtree->zDb, pRtree->zName - ); - if( !zCreate ){ - rc = SQLITE_NOMEM; - }else{ - rc = sqlite3_exec(pRtree->db, zCreate, 0, 0, 0); - sqlite3_free(zCreate); - } - if( rc==SQLITE_OK ){ - rtreeRelease(pRtree); - } - - return rc; -} - -/* -** Rtree virtual table module xOpen method. -*/ -static int rtreeOpen(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCursor){ - int rc = SQLITE_NOMEM; - RtreeCursor *pCsr; - - pCsr = (RtreeCursor *)sqlite3_malloc(sizeof(RtreeCursor)); - if( pCsr ){ - memset(pCsr, 0, sizeof(RtreeCursor)); - pCsr->base.pVtab = pVTab; - rc = SQLITE_OK; - } - *ppCursor = (sqlite3_vtab_cursor *)pCsr; - - return rc; -} - - -/* -** Free the RtreeCursor.aConstraint[] array and its contents. -*/ -static void freeCursorConstraints(RtreeCursor *pCsr){ - if( pCsr->aConstraint ){ - int i; /* Used to iterate through constraint array */ - for(i=0; inConstraint; i++){ - sqlite3_rtree_query_info *pInfo = pCsr->aConstraint[i].pInfo; - if( pInfo ){ - if( pInfo->xDelUser ) pInfo->xDelUser(pInfo->pUser); - sqlite3_free(pInfo); - } - } - sqlite3_free(pCsr->aConstraint); - pCsr->aConstraint = 0; - } -} - -/* -** Rtree virtual table module xClose method. -*/ -static int rtreeClose(sqlite3_vtab_cursor *cur){ - Rtree *pRtree = (Rtree *)(cur->pVtab); - int ii; - RtreeCursor *pCsr = (RtreeCursor *)cur; - freeCursorConstraints(pCsr); - sqlite3_free(pCsr->aPoint); - for(ii=0; iiaNode[ii]); - sqlite3_free(pCsr); - return SQLITE_OK; -} - -/* -** Rtree virtual table module xEof method. -** -** Return non-zero if the cursor does not currently point to a valid -** record (i.e if the scan has finished), or zero otherwise. -*/ -static int rtreeEof(sqlite3_vtab_cursor *cur){ - RtreeCursor *pCsr = (RtreeCursor *)cur; - return pCsr->atEOF; -} - -/* -** Convert raw bits from the on-disk RTree record into a coordinate value. -** The on-disk format is big-endian and needs to be converted for little- -** endian platforms. The on-disk record stores integer coordinates if -** eInt is true and it stores 32-bit floating point records if eInt is -** false. a[] is the four bytes of the on-disk record to be decoded. -** Store the results in "r". -** -** There are three versions of this macro, one each for little-endian and -** big-endian processors and a third generic implementation. The endian- -** specific implementations are much faster and are preferred if the -** processor endianness is known at compile-time. The SQLITE_BYTEORDER -** macro is part of sqliteInt.h and hence the endian-specific -** implementation will only be used if this module is compiled as part -** of the amalgamation. -*/ -#if defined(SQLITE_BYTEORDER) && SQLITE_BYTEORDER==1234 -#define RTREE_DECODE_COORD(eInt, a, r) { \ - RtreeCoord c; /* Coordinate decoded */ \ - memcpy(&c.u,a,4); \ - c.u = ((c.u>>24)&0xff)|((c.u>>8)&0xff00)| \ - ((c.u&0xff)<<24)|((c.u&0xff00)<<8); \ - r = eInt ? (sqlite3_rtree_dbl)c.i : (sqlite3_rtree_dbl)c.f; \ -} -#elif defined(SQLITE_BYTEORDER) && SQLITE_BYTEORDER==4321 -#define RTREE_DECODE_COORD(eInt, a, r) { \ - RtreeCoord c; /* Coordinate decoded */ \ - memcpy(&c.u,a,4); \ - r = eInt ? (sqlite3_rtree_dbl)c.i : (sqlite3_rtree_dbl)c.f; \ -} -#else -#define RTREE_DECODE_COORD(eInt, a, r) { \ - RtreeCoord c; /* Coordinate decoded */ \ - c.u = ((u32)a[0]<<24) + ((u32)a[1]<<16) \ - +((u32)a[2]<<8) + a[3]; \ - r = eInt ? (sqlite3_rtree_dbl)c.i : (sqlite3_rtree_dbl)c.f; \ -} -#endif - -/* -** Check the RTree node or entry given by pCellData and p against the MATCH -** constraint pConstraint. -*/ -static int rtreeCallbackConstraint( - RtreeConstraint *pConstraint, /* The constraint to test */ - int eInt, /* True if RTree holding integer coordinates */ - u8 *pCellData, /* Raw cell content */ - RtreeSearchPoint *pSearch, /* Container of this cell */ - sqlite3_rtree_dbl *prScore, /* OUT: score for the cell */ - int *peWithin /* OUT: visibility of the cell */ -){ - int i; /* Loop counter */ - sqlite3_rtree_query_info *pInfo = pConstraint->pInfo; /* Callback info */ - int nCoord = pInfo->nCoord; /* No. of coordinates */ - int rc; /* Callback return code */ - sqlite3_rtree_dbl aCoord[RTREE_MAX_DIMENSIONS*2]; /* Decoded coordinates */ - - assert( pConstraint->op==RTREE_MATCH || pConstraint->op==RTREE_QUERY ); - assert( nCoord==2 || nCoord==4 || nCoord==6 || nCoord==8 || nCoord==10 ); - - if( pConstraint->op==RTREE_QUERY && pSearch->iLevel==1 ){ - pInfo->iRowid = readInt64(pCellData); - } - pCellData += 8; - for(i=0; iop==RTREE_MATCH ){ - rc = pConstraint->u.xGeom((sqlite3_rtree_geometry*)pInfo, - nCoord, aCoord, &i); - if( i==0 ) *peWithin = NOT_WITHIN; - *prScore = RTREE_ZERO; - }else{ - pInfo->aCoord = aCoord; - pInfo->iLevel = pSearch->iLevel - 1; - pInfo->rScore = pInfo->rParentScore = pSearch->rScore; - pInfo->eWithin = pInfo->eParentWithin = pSearch->eWithin; - rc = pConstraint->u.xQueryFunc(pInfo); - if( pInfo->eWithin<*peWithin ) *peWithin = pInfo->eWithin; - if( pInfo->rScore<*prScore || *prScorerScore; - } - } - return rc; -} - -/* -** Check the internal RTree node given by pCellData against constraint p. -** If this constraint cannot be satisfied by any child within the node, -** set *peWithin to NOT_WITHIN. -*/ -static void rtreeNonleafConstraint( - RtreeConstraint *p, /* The constraint to test */ - int eInt, /* True if RTree holds integer coordinates */ - u8 *pCellData, /* Raw cell content as appears on disk */ - int *peWithin /* Adjust downward, as appropriate */ -){ - sqlite3_rtree_dbl val; /* Coordinate value convert to a double */ - - /* p->iCoord might point to either a lower or upper bound coordinate - ** in a coordinate pair. But make pCellData point to the lower bound. - */ - pCellData += 8 + 4*(p->iCoord&0xfe); - - assert(p->op==RTREE_LE || p->op==RTREE_LT || p->op==RTREE_GE - || p->op==RTREE_GT || p->op==RTREE_EQ ); - switch( p->op ){ - case RTREE_LE: - case RTREE_LT: - case RTREE_EQ: - RTREE_DECODE_COORD(eInt, pCellData, val); - /* val now holds the lower bound of the coordinate pair */ - if( p->u.rValue>=val ) return; - if( p->op!=RTREE_EQ ) break; /* RTREE_LE and RTREE_LT end here */ - /* Fall through for the RTREE_EQ case */ - - default: /* RTREE_GT or RTREE_GE, or fallthrough of RTREE_EQ */ - pCellData += 4; - RTREE_DECODE_COORD(eInt, pCellData, val); - /* val now holds the upper bound of the coordinate pair */ - if( p->u.rValue<=val ) return; - } - *peWithin = NOT_WITHIN; -} - -/* -** Check the leaf RTree cell given by pCellData against constraint p. -** If this constraint is not satisfied, set *peWithin to NOT_WITHIN. -** If the constraint is satisfied, leave *peWithin unchanged. -** -** The constraint is of the form: xN op $val -** -** The op is given by p->op. The xN is p->iCoord-th coordinate in -** pCellData. $val is given by p->u.rValue. -*/ -static void rtreeLeafConstraint( - RtreeConstraint *p, /* The constraint to test */ - int eInt, /* True if RTree holds integer coordinates */ - u8 *pCellData, /* Raw cell content as appears on disk */ - int *peWithin /* Adjust downward, as appropriate */ -){ - RtreeDValue xN; /* Coordinate value converted to a double */ - - assert(p->op==RTREE_LE || p->op==RTREE_LT || p->op==RTREE_GE - || p->op==RTREE_GT || p->op==RTREE_EQ ); - pCellData += 8 + p->iCoord*4; - RTREE_DECODE_COORD(eInt, pCellData, xN); - switch( p->op ){ - case RTREE_LE: if( xN <= p->u.rValue ) return; break; - case RTREE_LT: if( xN < p->u.rValue ) return; break; - case RTREE_GE: if( xN >= p->u.rValue ) return; break; - case RTREE_GT: if( xN > p->u.rValue ) return; break; - default: if( xN == p->u.rValue ) return; break; - } - *peWithin = NOT_WITHIN; -} - -/* -** One of the cells in node pNode is guaranteed to have a 64-bit -** integer value equal to iRowid. Return the index of this cell. -*/ -static int nodeRowidIndex( - Rtree *pRtree, - RtreeNode *pNode, - i64 iRowid, - int *piIndex -){ - int ii; - int nCell = NCELL(pNode); - assert( nCell<200 ); - for(ii=0; iipParent; - if( pParent ){ - return nodeRowidIndex(pRtree, pParent, pNode->iNode, piIndex); - } - *piIndex = -1; - return SQLITE_OK; -} - -/* -** Compare two search points. Return negative, zero, or positive if the first -** is less than, equal to, or greater than the second. -** -** The rScore is the primary key. Smaller rScore values come first. -** If the rScore is a tie, then use iLevel as the tie breaker with smaller -** iLevel values coming first. In this way, if rScore is the same for all -** SearchPoints, then iLevel becomes the deciding factor and the result -** is a depth-first search, which is the desired default behavior. -*/ -static int rtreeSearchPointCompare( - const RtreeSearchPoint *pA, - const RtreeSearchPoint *pB -){ - if( pA->rScorerScore ) return -1; - if( pA->rScore>pB->rScore ) return +1; - if( pA->iLeveliLevel ) return -1; - if( pA->iLevel>pB->iLevel ) return +1; - return 0; -} - -/* -** Interchange to search points in a cursor. -*/ -static void rtreeSearchPointSwap(RtreeCursor *p, int i, int j){ - RtreeSearchPoint t = p->aPoint[i]; - assert( iaPoint[i] = p->aPoint[j]; - p->aPoint[j] = t; - i++; j++; - if( i=RTREE_CACHE_SZ ){ - nodeRelease(RTREE_OF_CURSOR(p), p->aNode[i]); - p->aNode[i] = 0; - }else{ - RtreeNode *pTemp = p->aNode[i]; - p->aNode[i] = p->aNode[j]; - p->aNode[j] = pTemp; - } - } -} - -/* -** Return the search point with the lowest current score. -*/ -static RtreeSearchPoint *rtreeSearchPointFirst(RtreeCursor *pCur){ - return pCur->bPoint ? &pCur->sPoint : pCur->nPoint ? pCur->aPoint : 0; -} - -/* -** Get the RtreeNode for the search point with the lowest score. -*/ -static RtreeNode *rtreeNodeOfFirstSearchPoint(RtreeCursor *pCur, int *pRC){ - sqlite3_int64 id; - int ii = 1 - pCur->bPoint; - assert( ii==0 || ii==1 ); - assert( pCur->bPoint || pCur->nPoint ); - if( pCur->aNode[ii]==0 ){ - assert( pRC!=0 ); - id = ii ? pCur->aPoint[0].id : pCur->sPoint.id; - *pRC = nodeAcquire(RTREE_OF_CURSOR(pCur), id, 0, &pCur->aNode[ii]); - } - return pCur->aNode[ii]; -} - -/* -** Push a new element onto the priority queue -*/ -static RtreeSearchPoint *rtreeEnqueue( - RtreeCursor *pCur, /* The cursor */ - RtreeDValue rScore, /* Score for the new search point */ - u8 iLevel /* Level for the new search point */ -){ - int i, j; - RtreeSearchPoint *pNew; - if( pCur->nPoint>=pCur->nPointAlloc ){ - int nNew = pCur->nPointAlloc*2 + 8; - pNew = sqlite3_realloc(pCur->aPoint, nNew*sizeof(pCur->aPoint[0])); - if( pNew==0 ) return 0; - pCur->aPoint = pNew; - pCur->nPointAlloc = nNew; - } - i = pCur->nPoint++; - pNew = pCur->aPoint + i; - pNew->rScore = rScore; - pNew->iLevel = iLevel; - assert( iLevel>=0 && iLevel<=RTREE_MAX_DEPTH ); - while( i>0 ){ - RtreeSearchPoint *pParent; - j = (i-1)/2; - pParent = pCur->aPoint + j; - if( rtreeSearchPointCompare(pNew, pParent)>=0 ) break; - rtreeSearchPointSwap(pCur, j, i); - i = j; - pNew = pParent; - } - return pNew; -} - -/* -** Allocate a new RtreeSearchPoint and return a pointer to it. Return -** NULL if malloc fails. -*/ -static RtreeSearchPoint *rtreeSearchPointNew( - RtreeCursor *pCur, /* The cursor */ - RtreeDValue rScore, /* Score for the new search point */ - u8 iLevel /* Level for the new search point */ -){ - RtreeSearchPoint *pNew, *pFirst; - pFirst = rtreeSearchPointFirst(pCur); - pCur->anQueue[iLevel]++; - if( pFirst==0 - || pFirst->rScore>rScore - || (pFirst->rScore==rScore && pFirst->iLevel>iLevel) - ){ - if( pCur->bPoint ){ - int ii; - pNew = rtreeEnqueue(pCur, rScore, iLevel); - if( pNew==0 ) return 0; - ii = (int)(pNew - pCur->aPoint) + 1; - if( iiaNode[ii]==0 ); - pCur->aNode[ii] = pCur->aNode[0]; - }else{ - nodeRelease(RTREE_OF_CURSOR(pCur), pCur->aNode[0]); - } - pCur->aNode[0] = 0; - *pNew = pCur->sPoint; - } - pCur->sPoint.rScore = rScore; - pCur->sPoint.iLevel = iLevel; - pCur->bPoint = 1; - return &pCur->sPoint; - }else{ - return rtreeEnqueue(pCur, rScore, iLevel); - } -} - -#if 0 -/* Tracing routines for the RtreeSearchPoint queue */ -static void tracePoint(RtreeSearchPoint *p, int idx, RtreeCursor *pCur){ - if( idx<0 ){ printf(" s"); }else{ printf("%2d", idx); } - printf(" %d.%05lld.%02d %g %d", - p->iLevel, p->id, p->iCell, p->rScore, p->eWithin - ); - idx++; - if( idxaNode[idx]); - }else{ - printf("\n"); - } -} -static void traceQueue(RtreeCursor *pCur, const char *zPrefix){ - int ii; - printf("=== %9s ", zPrefix); - if( pCur->bPoint ){ - tracePoint(&pCur->sPoint, -1, pCur); - } - for(ii=0; iinPoint; ii++){ - if( ii>0 || pCur->bPoint ) printf(" "); - tracePoint(&pCur->aPoint[ii], ii, pCur); - } -} -# define RTREE_QUEUE_TRACE(A,B) traceQueue(A,B) -#else -# define RTREE_QUEUE_TRACE(A,B) /* no-op */ -#endif - -/* Remove the search point with the lowest current score. -*/ -static void rtreeSearchPointPop(RtreeCursor *p){ - int i, j, k, n; - i = 1 - p->bPoint; - assert( i==0 || i==1 ); - if( p->aNode[i] ){ - nodeRelease(RTREE_OF_CURSOR(p), p->aNode[i]); - p->aNode[i] = 0; - } - if( p->bPoint ){ - p->anQueue[p->sPoint.iLevel]--; - p->bPoint = 0; - }else if( p->nPoint ){ - p->anQueue[p->aPoint[0].iLevel]--; - n = --p->nPoint; - p->aPoint[0] = p->aPoint[n]; - if( naNode[1] = p->aNode[n+1]; - p->aNode[n+1] = 0; - } - i = 0; - while( (j = i*2+1)aPoint[k], &p->aPoint[j])<0 ){ - if( rtreeSearchPointCompare(&p->aPoint[k], &p->aPoint[i])<0 ){ - rtreeSearchPointSwap(p, i, k); - i = k; - }else{ - break; - } - }else{ - if( rtreeSearchPointCompare(&p->aPoint[j], &p->aPoint[i])<0 ){ - rtreeSearchPointSwap(p, i, j); - i = j; - }else{ - break; - } - } - } - } -} - - -/* -** Continue the search on cursor pCur until the front of the queue -** contains an entry suitable for returning as a result-set row, -** or until the RtreeSearchPoint queue is empty, indicating that the -** query has completed. -*/ -static int rtreeStepToLeaf(RtreeCursor *pCur){ - RtreeSearchPoint *p; - Rtree *pRtree = RTREE_OF_CURSOR(pCur); - RtreeNode *pNode; - int eWithin; - int rc = SQLITE_OK; - int nCell; - int nConstraint = pCur->nConstraint; - int ii; - int eInt; - RtreeSearchPoint x; - - eInt = pRtree->eCoordType==RTREE_COORD_INT32; - while( (p = rtreeSearchPointFirst(pCur))!=0 && p->iLevel>0 ){ - pNode = rtreeNodeOfFirstSearchPoint(pCur, &rc); - if( rc ) return rc; - nCell = NCELL(pNode); - assert( nCell<200 ); - while( p->iCellzData + (4+pRtree->nBytesPerCell*p->iCell); - eWithin = FULLY_WITHIN; - for(ii=0; iiaConstraint + ii; - if( pConstraint->op>=RTREE_MATCH ){ - rc = rtreeCallbackConstraint(pConstraint, eInt, pCellData, p, - &rScore, &eWithin); - if( rc ) return rc; - }else if( p->iLevel==1 ){ - rtreeLeafConstraint(pConstraint, eInt, pCellData, &eWithin); - }else{ - rtreeNonleafConstraint(pConstraint, eInt, pCellData, &eWithin); - } - if( eWithin==NOT_WITHIN ) break; - } - p->iCell++; - if( eWithin==NOT_WITHIN ) continue; - x.iLevel = p->iLevel - 1; - if( x.iLevel ){ - x.id = readInt64(pCellData); - x.iCell = 0; - }else{ - x.id = p->id; - x.iCell = p->iCell - 1; - } - if( p->iCell>=nCell ){ - RTREE_QUEUE_TRACE(pCur, "POP-S:"); - rtreeSearchPointPop(pCur); - } - if( rScoreeWithin = eWithin; - p->id = x.id; - p->iCell = x.iCell; - RTREE_QUEUE_TRACE(pCur, "PUSH-S:"); - break; - } - if( p->iCell>=nCell ){ - RTREE_QUEUE_TRACE(pCur, "POP-Se:"); - rtreeSearchPointPop(pCur); - } - } - pCur->atEOF = p==0; - return SQLITE_OK; -} - -/* -** Rtree virtual table module xNext method. -*/ -static int rtreeNext(sqlite3_vtab_cursor *pVtabCursor){ - RtreeCursor *pCsr = (RtreeCursor *)pVtabCursor; - int rc = SQLITE_OK; - - /* Move to the next entry that matches the configured constraints. */ - RTREE_QUEUE_TRACE(pCsr, "POP-Nx:"); - rtreeSearchPointPop(pCsr); - rc = rtreeStepToLeaf(pCsr); - return rc; -} - -/* -** Rtree virtual table module xRowid method. -*/ -static int rtreeRowid(sqlite3_vtab_cursor *pVtabCursor, sqlite_int64 *pRowid){ - RtreeCursor *pCsr = (RtreeCursor *)pVtabCursor; - RtreeSearchPoint *p = rtreeSearchPointFirst(pCsr); - int rc = SQLITE_OK; - RtreeNode *pNode = rtreeNodeOfFirstSearchPoint(pCsr, &rc); - if( rc==SQLITE_OK && p ){ - *pRowid = nodeGetRowid(RTREE_OF_CURSOR(pCsr), pNode, p->iCell); - } - return rc; -} - -/* -** Rtree virtual table module xColumn method. -*/ -static int rtreeColumn(sqlite3_vtab_cursor *cur, sqlite3_context *ctx, int i){ - Rtree *pRtree = (Rtree *)cur->pVtab; - RtreeCursor *pCsr = (RtreeCursor *)cur; - RtreeSearchPoint *p = rtreeSearchPointFirst(pCsr); - RtreeCoord c; - int rc = SQLITE_OK; - RtreeNode *pNode = rtreeNodeOfFirstSearchPoint(pCsr, &rc); - - if( rc ) return rc; - if( p==0 ) return SQLITE_OK; - if( i==0 ){ - sqlite3_result_int64(ctx, nodeGetRowid(pRtree, pNode, p->iCell)); - }else{ - if( rc ) return rc; - nodeGetCoord(pRtree, pNode, p->iCell, i-1, &c); -#ifndef SQLITE_RTREE_INT_ONLY - if( pRtree->eCoordType==RTREE_COORD_REAL32 ){ - sqlite3_result_double(ctx, c.f); - }else -#endif - { - assert( pRtree->eCoordType==RTREE_COORD_INT32 ); - sqlite3_result_int(ctx, c.i); - } - } - return SQLITE_OK; -} - -/* -** Use nodeAcquire() to obtain the leaf node containing the record with -** rowid iRowid. If successful, set *ppLeaf to point to the node and -** return SQLITE_OK. If there is no such record in the table, set -** *ppLeaf to 0 and return SQLITE_OK. If an error occurs, set *ppLeaf -** to zero and return an SQLite error code. -*/ -static int findLeafNode( - Rtree *pRtree, /* RTree to search */ - i64 iRowid, /* The rowid searching for */ - RtreeNode **ppLeaf, /* Write the node here */ - sqlite3_int64 *piNode /* Write the node-id here */ -){ - int rc; - *ppLeaf = 0; - sqlite3_bind_int64(pRtree->pReadRowid, 1, iRowid); - if( sqlite3_step(pRtree->pReadRowid)==SQLITE_ROW ){ - i64 iNode = sqlite3_column_int64(pRtree->pReadRowid, 0); - if( piNode ) *piNode = iNode; - rc = nodeAcquire(pRtree, iNode, 0, ppLeaf); - sqlite3_reset(pRtree->pReadRowid); - }else{ - rc = sqlite3_reset(pRtree->pReadRowid); - } - return rc; -} - -/* -** This function is called to configure the RtreeConstraint object passed -** as the second argument for a MATCH constraint. The value passed as the -** first argument to this function is the right-hand operand to the MATCH -** operator. -*/ -static int deserializeGeometry(sqlite3_value *pValue, RtreeConstraint *pCons){ - RtreeMatchArg *pBlob; /* BLOB returned by geometry function */ - sqlite3_rtree_query_info *pInfo; /* Callback information */ - int nBlob; /* Size of the geometry function blob */ - int nExpected; /* Expected size of the BLOB */ - - /* Check that value is actually a blob. */ - if( sqlite3_value_type(pValue)!=SQLITE_BLOB ) return SQLITE_ERROR; - - /* Check that the blob is roughly the right size. */ - nBlob = sqlite3_value_bytes(pValue); - if( nBlob<(int)sizeof(RtreeMatchArg) - || ((nBlob-sizeof(RtreeMatchArg))%sizeof(RtreeDValue))!=0 - ){ - return SQLITE_ERROR; - } - - pInfo = (sqlite3_rtree_query_info*)sqlite3_malloc( sizeof(*pInfo)+nBlob ); - if( !pInfo ) return SQLITE_NOMEM; - memset(pInfo, 0, sizeof(*pInfo)); - pBlob = (RtreeMatchArg*)&pInfo[1]; - - memcpy(pBlob, sqlite3_value_blob(pValue), nBlob); - nExpected = (int)(sizeof(RtreeMatchArg) + - (pBlob->nParam-1)*sizeof(RtreeDValue)); - if( pBlob->magic!=RTREE_GEOMETRY_MAGIC || nBlob!=nExpected ){ - sqlite3_free(pInfo); - return SQLITE_ERROR; - } - pInfo->pContext = pBlob->cb.pContext; - pInfo->nParam = pBlob->nParam; - pInfo->aParam = pBlob->aParam; - - if( pBlob->cb.xGeom ){ - pCons->u.xGeom = pBlob->cb.xGeom; - }else{ - pCons->op = RTREE_QUERY; - pCons->u.xQueryFunc = pBlob->cb.xQueryFunc; - } - pCons->pInfo = pInfo; - return SQLITE_OK; -} - -/* -** Rtree virtual table module xFilter method. -*/ -static int rtreeFilter( - sqlite3_vtab_cursor *pVtabCursor, - int idxNum, const char *idxStr, - int argc, sqlite3_value **argv -){ - Rtree *pRtree = (Rtree *)pVtabCursor->pVtab; - RtreeCursor *pCsr = (RtreeCursor *)pVtabCursor; - RtreeNode *pRoot = 0; - int ii; - int rc = SQLITE_OK; - int iCell = 0; - - rtreeReference(pRtree); - - freeCursorConstraints(pCsr); - pCsr->iStrategy = idxNum; - - if( idxNum==1 ){ - /* Special case - lookup by rowid. */ - RtreeNode *pLeaf; /* Leaf on which the required cell resides */ - RtreeSearchPoint *p; /* Search point for the the leaf */ - i64 iRowid = sqlite3_value_int64(argv[0]); - i64 iNode = 0; - rc = findLeafNode(pRtree, iRowid, &pLeaf, &iNode); - if( rc==SQLITE_OK && pLeaf!=0 ){ - p = rtreeSearchPointNew(pCsr, RTREE_ZERO, 0); - assert( p!=0 ); /* Always returns pCsr->sPoint */ - pCsr->aNode[0] = pLeaf; - p->id = iNode; - p->eWithin = PARTLY_WITHIN; - rc = nodeRowidIndex(pRtree, pLeaf, iRowid, &iCell); - p->iCell = iCell; - RTREE_QUEUE_TRACE(pCsr, "PUSH-F1:"); - }else{ - pCsr->atEOF = 1; - } - }else{ - /* Normal case - r-tree scan. Set up the RtreeCursor.aConstraint array - ** with the configured constraints. - */ - rc = nodeAcquire(pRtree, 1, 0, &pRoot); - if( rc==SQLITE_OK && argc>0 ){ - pCsr->aConstraint = sqlite3_malloc(sizeof(RtreeConstraint)*argc); - pCsr->nConstraint = argc; - if( !pCsr->aConstraint ){ - rc = SQLITE_NOMEM; - }else{ - memset(pCsr->aConstraint, 0, sizeof(RtreeConstraint)*argc); - memset(pCsr->anQueue, 0, sizeof(u32)*(pRtree->iDepth + 1)); - assert( (idxStr==0 && argc==0) - || (idxStr && (int)strlen(idxStr)==argc*2) ); - for(ii=0; iiaConstraint[ii]; - p->op = idxStr[ii*2]; - p->iCoord = idxStr[ii*2+1]-'0'; - if( p->op>=RTREE_MATCH ){ - /* A MATCH operator. The right-hand-side must be a blob that - ** can be cast into an RtreeMatchArg object. One created using - ** an sqlite3_rtree_geometry_callback() SQL user function. - */ - rc = deserializeGeometry(argv[ii], p); - if( rc!=SQLITE_OK ){ - break; - } - p->pInfo->nCoord = pRtree->nDim*2; - p->pInfo->anQueue = pCsr->anQueue; - p->pInfo->mxLevel = pRtree->iDepth + 1; - }else{ -#ifdef SQLITE_RTREE_INT_ONLY - p->u.rValue = sqlite3_value_int64(argv[ii]); -#else - p->u.rValue = sqlite3_value_double(argv[ii]); -#endif - } - } - } - } - if( rc==SQLITE_OK ){ - RtreeSearchPoint *pNew; - pNew = rtreeSearchPointNew(pCsr, RTREE_ZERO, pRtree->iDepth+1); - if( pNew==0 ) return SQLITE_NOMEM; - pNew->id = 1; - pNew->iCell = 0; - pNew->eWithin = PARTLY_WITHIN; - assert( pCsr->bPoint==1 ); - pCsr->aNode[0] = pRoot; - pRoot = 0; - RTREE_QUEUE_TRACE(pCsr, "PUSH-Fm:"); - rc = rtreeStepToLeaf(pCsr); - } - } - - nodeRelease(pRtree, pRoot); - rtreeRelease(pRtree); - return rc; -} - -/* -** Set the pIdxInfo->estimatedRows variable to nRow. Unless this -** extension is currently being used by a version of SQLite too old to -** support estimatedRows. In that case this function is a no-op. -*/ -static void setEstimatedRows(sqlite3_index_info *pIdxInfo, i64 nRow){ -#if SQLITE_VERSION_NUMBER>=3008002 - if( sqlite3_libversion_number()>=3008002 ){ - pIdxInfo->estimatedRows = nRow; - } -#endif -} - -/* -** Rtree virtual table module xBestIndex method. There are three -** table scan strategies to choose from (in order from most to -** least desirable): -** -** idxNum idxStr Strategy -** ------------------------------------------------ -** 1 Unused Direct lookup by rowid. -** 2 See below R-tree query or full-table scan. -** ------------------------------------------------ -** -** If strategy 1 is used, then idxStr is not meaningful. If strategy -** 2 is used, idxStr is formatted to contain 2 bytes for each -** constraint used. The first two bytes of idxStr correspond to -** the constraint in sqlite3_index_info.aConstraintUsage[] with -** (argvIndex==1) etc. -** -** The first of each pair of bytes in idxStr identifies the constraint -** operator as follows: -** -** Operator Byte Value -** ---------------------- -** = 0x41 ('A') -** <= 0x42 ('B') -** < 0x43 ('C') -** >= 0x44 ('D') -** > 0x45 ('E') -** MATCH 0x46 ('F') -** ---------------------- -** -** The second of each pair of bytes identifies the coordinate column -** to which the constraint applies. The leftmost coordinate column -** is 'a', the second from the left 'b' etc. -*/ -static int rtreeBestIndex(sqlite3_vtab *tab, sqlite3_index_info *pIdxInfo){ - Rtree *pRtree = (Rtree*)tab; - int rc = SQLITE_OK; - int ii; - i64 nRow; /* Estimated rows returned by this scan */ - - int iIdx = 0; - char zIdxStr[RTREE_MAX_DIMENSIONS*8+1]; - memset(zIdxStr, 0, sizeof(zIdxStr)); - - assert( pIdxInfo->idxStr==0 ); - for(ii=0; iinConstraint && iIdx<(int)(sizeof(zIdxStr)-1); ii++){ - struct sqlite3_index_constraint *p = &pIdxInfo->aConstraint[ii]; - - if( p->usable && p->iColumn==0 && p->op==SQLITE_INDEX_CONSTRAINT_EQ ){ - /* We have an equality constraint on the rowid. Use strategy 1. */ - int jj; - for(jj=0; jjaConstraintUsage[jj].argvIndex = 0; - pIdxInfo->aConstraintUsage[jj].omit = 0; - } - pIdxInfo->idxNum = 1; - pIdxInfo->aConstraintUsage[ii].argvIndex = 1; - pIdxInfo->aConstraintUsage[jj].omit = 1; - - /* This strategy involves a two rowid lookups on an B-Tree structures - ** and then a linear search of an R-Tree node. This should be - ** considered almost as quick as a direct rowid lookup (for which - ** sqlite uses an internal cost of 0.0). It is expected to return - ** a single row. - */ - pIdxInfo->estimatedCost = 30.0; - setEstimatedRows(pIdxInfo, 1); - return SQLITE_OK; - } - - if( p->usable && (p->iColumn>0 || p->op==SQLITE_INDEX_CONSTRAINT_MATCH) ){ - u8 op; - switch( p->op ){ - case SQLITE_INDEX_CONSTRAINT_EQ: op = RTREE_EQ; break; - case SQLITE_INDEX_CONSTRAINT_GT: op = RTREE_GT; break; - case SQLITE_INDEX_CONSTRAINT_LE: op = RTREE_LE; break; - case SQLITE_INDEX_CONSTRAINT_LT: op = RTREE_LT; break; - case SQLITE_INDEX_CONSTRAINT_GE: op = RTREE_GE; break; - default: - assert( p->op==SQLITE_INDEX_CONSTRAINT_MATCH ); - op = RTREE_MATCH; - break; - } - zIdxStr[iIdx++] = op; - zIdxStr[iIdx++] = p->iColumn - 1 + '0'; - pIdxInfo->aConstraintUsage[ii].argvIndex = (iIdx/2); - pIdxInfo->aConstraintUsage[ii].omit = 1; - } - } - - pIdxInfo->idxNum = 2; - pIdxInfo->needToFreeIdxStr = 1; - if( iIdx>0 && 0==(pIdxInfo->idxStr = sqlite3_mprintf("%s", zIdxStr)) ){ - return SQLITE_NOMEM; - } - - nRow = pRtree->nRowEst / (iIdx + 1); - pIdxInfo->estimatedCost = (double)6.0 * (double)nRow; - setEstimatedRows(pIdxInfo, nRow); - - return rc; -} - -/* -** Return the N-dimensional volumn of the cell stored in *p. -*/ -static RtreeDValue cellArea(Rtree *pRtree, RtreeCell *p){ - RtreeDValue area = (RtreeDValue)1; - int ii; - for(ii=0; ii<(pRtree->nDim*2); ii+=2){ - area = (area * (DCOORD(p->aCoord[ii+1]) - DCOORD(p->aCoord[ii]))); - } - return area; -} - -/* -** Return the margin length of cell p. The margin length is the sum -** of the objects size in each dimension. -*/ -static RtreeDValue cellMargin(Rtree *pRtree, RtreeCell *p){ - RtreeDValue margin = (RtreeDValue)0; - int ii; - for(ii=0; ii<(pRtree->nDim*2); ii+=2){ - margin += (DCOORD(p->aCoord[ii+1]) - DCOORD(p->aCoord[ii])); - } - return margin; -} - -/* -** Store the union of cells p1 and p2 in p1. -*/ -static void cellUnion(Rtree *pRtree, RtreeCell *p1, RtreeCell *p2){ - int ii; - if( pRtree->eCoordType==RTREE_COORD_REAL32 ){ - for(ii=0; ii<(pRtree->nDim*2); ii+=2){ - p1->aCoord[ii].f = MIN(p1->aCoord[ii].f, p2->aCoord[ii].f); - p1->aCoord[ii+1].f = MAX(p1->aCoord[ii+1].f, p2->aCoord[ii+1].f); - } - }else{ - for(ii=0; ii<(pRtree->nDim*2); ii+=2){ - p1->aCoord[ii].i = MIN(p1->aCoord[ii].i, p2->aCoord[ii].i); - p1->aCoord[ii+1].i = MAX(p1->aCoord[ii+1].i, p2->aCoord[ii+1].i); - } - } -} - -/* -** Return true if the area covered by p2 is a subset of the area covered -** by p1. False otherwise. -*/ -static int cellContains(Rtree *pRtree, RtreeCell *p1, RtreeCell *p2){ - int ii; - int isInt = (pRtree->eCoordType==RTREE_COORD_INT32); - for(ii=0; ii<(pRtree->nDim*2); ii+=2){ - RtreeCoord *a1 = &p1->aCoord[ii]; - RtreeCoord *a2 = &p2->aCoord[ii]; - if( (!isInt && (a2[0].fa1[1].f)) - || ( isInt && (a2[0].ia1[1].i)) - ){ - return 0; - } - } - return 1; -} - -/* -** Return the amount cell p would grow by if it were unioned with pCell. -*/ -static RtreeDValue cellGrowth(Rtree *pRtree, RtreeCell *p, RtreeCell *pCell){ - RtreeDValue area; - RtreeCell cell; - memcpy(&cell, p, sizeof(RtreeCell)); - area = cellArea(pRtree, &cell); - cellUnion(pRtree, &cell, pCell); - return (cellArea(pRtree, &cell)-area); -} - -static RtreeDValue cellOverlap( - Rtree *pRtree, - RtreeCell *p, - RtreeCell *aCell, - int nCell -){ - int ii; - RtreeDValue overlap = RTREE_ZERO; - for(ii=0; iinDim*2); jj+=2){ - RtreeDValue x1, x2; - x1 = MAX(DCOORD(p->aCoord[jj]), DCOORD(aCell[ii].aCoord[jj])); - x2 = MIN(DCOORD(p->aCoord[jj+1]), DCOORD(aCell[ii].aCoord[jj+1])); - if( x2iDepth-iHeight); ii++){ - int iCell; - sqlite3_int64 iBest = 0; - - RtreeDValue fMinGrowth = RTREE_ZERO; - RtreeDValue fMinArea = RTREE_ZERO; - - int nCell = NCELL(pNode); - RtreeCell cell; - RtreeNode *pChild; - - RtreeCell *aCell = 0; - - /* Select the child node which will be enlarged the least if pCell - ** is inserted into it. Resolve ties by choosing the entry with - ** the smallest area. - */ - for(iCell=0; iCellpParent ){ - RtreeNode *pParent = p->pParent; - RtreeCell cell; - int iCell; - - if( nodeParentIndex(pRtree, p, &iCell) ){ - return SQLITE_CORRUPT_VTAB; - } - - nodeGetCell(pRtree, pParent, iCell, &cell); - if( !cellContains(pRtree, &cell, pCell) ){ - cellUnion(pRtree, &cell, pCell); - nodeOverwriteCell(pRtree, pParent, &cell, iCell); - } - - p = pParent; - } - return SQLITE_OK; -} - -/* -** Write mapping (iRowid->iNode) to the _rowid table. -*/ -static int rowidWrite(Rtree *pRtree, sqlite3_int64 iRowid, sqlite3_int64 iNode){ - sqlite3_bind_int64(pRtree->pWriteRowid, 1, iRowid); - sqlite3_bind_int64(pRtree->pWriteRowid, 2, iNode); - sqlite3_step(pRtree->pWriteRowid); - return sqlite3_reset(pRtree->pWriteRowid); -} - -/* -** Write mapping (iNode->iPar) to the _parent table. -*/ -static int parentWrite(Rtree *pRtree, sqlite3_int64 iNode, sqlite3_int64 iPar){ - sqlite3_bind_int64(pRtree->pWriteParent, 1, iNode); - sqlite3_bind_int64(pRtree->pWriteParent, 2, iPar); - sqlite3_step(pRtree->pWriteParent); - return sqlite3_reset(pRtree->pWriteParent); -} - -static int rtreeInsertCell(Rtree *, RtreeNode *, RtreeCell *, int); - - -/* -** Arguments aIdx, aDistance and aSpare all point to arrays of size -** nIdx. The aIdx array contains the set of integers from 0 to -** (nIdx-1) in no particular order. This function sorts the values -** in aIdx according to the indexed values in aDistance. For -** example, assuming the inputs: -** -** aIdx = { 0, 1, 2, 3 } -** aDistance = { 5.0, 2.0, 7.0, 6.0 } -** -** this function sets the aIdx array to contain: -** -** aIdx = { 0, 1, 2, 3 } -** -** The aSpare array is used as temporary working space by the -** sorting algorithm. -*/ -static void SortByDistance( - int *aIdx, - int nIdx, - RtreeDValue *aDistance, - int *aSpare -){ - if( nIdx>1 ){ - int iLeft = 0; - int iRight = 0; - - int nLeft = nIdx/2; - int nRight = nIdx-nLeft; - int *aLeft = aIdx; - int *aRight = &aIdx[nLeft]; - - SortByDistance(aLeft, nLeft, aDistance, aSpare); - SortByDistance(aRight, nRight, aDistance, aSpare); - - memcpy(aSpare, aLeft, sizeof(int)*nLeft); - aLeft = aSpare; - - while( iLeft1 ){ - - int iLeft = 0; - int iRight = 0; - - int nLeft = nIdx/2; - int nRight = nIdx-nLeft; - int *aLeft = aIdx; - int *aRight = &aIdx[nLeft]; - - SortByDimension(pRtree, aLeft, nLeft, iDim, aCell, aSpare); - SortByDimension(pRtree, aRight, nRight, iDim, aCell, aSpare); - - memcpy(aSpare, aLeft, sizeof(int)*nLeft); - aLeft = aSpare; - while( iLeftnDim+1)*(sizeof(int*)+nCell*sizeof(int)); - - aaSorted = (int **)sqlite3_malloc(nByte); - if( !aaSorted ){ - return SQLITE_NOMEM; - } - - aSpare = &((int *)&aaSorted[pRtree->nDim])[pRtree->nDim*nCell]; - memset(aaSorted, 0, nByte); - for(ii=0; iinDim; ii++){ - int jj; - aaSorted[ii] = &((int *)&aaSorted[pRtree->nDim])[ii*nCell]; - for(jj=0; jjnDim; ii++){ - RtreeDValue margin = RTREE_ZERO; - RtreeDValue fBestOverlap = RTREE_ZERO; - RtreeDValue fBestArea = RTREE_ZERO; - int iBestLeft = 0; - int nLeft; - - for( - nLeft=RTREE_MINCELLS(pRtree); - nLeft<=(nCell-RTREE_MINCELLS(pRtree)); - nLeft++ - ){ - RtreeCell left; - RtreeCell right; - int kk; - RtreeDValue overlap; - RtreeDValue area; - - memcpy(&left, &aCell[aaSorted[ii][0]], sizeof(RtreeCell)); - memcpy(&right, &aCell[aaSorted[ii][nCell-1]], sizeof(RtreeCell)); - for(kk=1; kk<(nCell-1); kk++){ - if( kk0 ){ - RtreeNode *pChild = nodeHashLookup(pRtree, iRowid); - if( pChild ){ - nodeRelease(pRtree, pChild->pParent); - nodeReference(pNode); - pChild->pParent = pNode; - } - } - return xSetMapping(pRtree, iRowid, pNode->iNode); -} - -static int SplitNode( - Rtree *pRtree, - RtreeNode *pNode, - RtreeCell *pCell, - int iHeight -){ - int i; - int newCellIsRight = 0; - - int rc = SQLITE_OK; - int nCell = NCELL(pNode); - RtreeCell *aCell; - int *aiUsed; - - RtreeNode *pLeft = 0; - RtreeNode *pRight = 0; - - RtreeCell leftbbox; - RtreeCell rightbbox; - - /* Allocate an array and populate it with a copy of pCell and - ** all cells from node pLeft. Then zero the original node. - */ - aCell = sqlite3_malloc((sizeof(RtreeCell)+sizeof(int))*(nCell+1)); - if( !aCell ){ - rc = SQLITE_NOMEM; - goto splitnode_out; - } - aiUsed = (int *)&aCell[nCell+1]; - memset(aiUsed, 0, sizeof(int)*(nCell+1)); - for(i=0; iiNode==1 ){ - pRight = nodeNew(pRtree, pNode); - pLeft = nodeNew(pRtree, pNode); - pRtree->iDepth++; - pNode->isDirty = 1; - writeInt16(pNode->zData, pRtree->iDepth); - }else{ - pLeft = pNode; - pRight = nodeNew(pRtree, pLeft->pParent); - nodeReference(pLeft); - } - - if( !pLeft || !pRight ){ - rc = SQLITE_NOMEM; - goto splitnode_out; - } - - memset(pLeft->zData, 0, pRtree->iNodeSize); - memset(pRight->zData, 0, pRtree->iNodeSize); - - rc = splitNodeStartree(pRtree, aCell, nCell, pLeft, pRight, - &leftbbox, &rightbbox); - if( rc!=SQLITE_OK ){ - goto splitnode_out; - } - - /* Ensure both child nodes have node numbers assigned to them by calling - ** nodeWrite(). Node pRight always needs a node number, as it was created - ** by nodeNew() above. But node pLeft sometimes already has a node number. - ** In this case avoid the all to nodeWrite(). - */ - if( SQLITE_OK!=(rc = nodeWrite(pRtree, pRight)) - || (0==pLeft->iNode && SQLITE_OK!=(rc = nodeWrite(pRtree, pLeft))) - ){ - goto splitnode_out; - } - - rightbbox.iRowid = pRight->iNode; - leftbbox.iRowid = pLeft->iNode; - - if( pNode->iNode==1 ){ - rc = rtreeInsertCell(pRtree, pLeft->pParent, &leftbbox, iHeight+1); - if( rc!=SQLITE_OK ){ - goto splitnode_out; - } - }else{ - RtreeNode *pParent = pLeft->pParent; - int iCell; - rc = nodeParentIndex(pRtree, pLeft, &iCell); - if( rc==SQLITE_OK ){ - nodeOverwriteCell(pRtree, pParent, &leftbbox, iCell); - rc = AdjustTree(pRtree, pParent, &leftbbox); - } - if( rc!=SQLITE_OK ){ - goto splitnode_out; - } - } - if( (rc = rtreeInsertCell(pRtree, pRight->pParent, &rightbbox, iHeight+1)) ){ - goto splitnode_out; - } - - for(i=0; iiRowid ){ - newCellIsRight = 1; - } - if( rc!=SQLITE_OK ){ - goto splitnode_out; - } - } - if( pNode->iNode==1 ){ - for(i=0; iiRowid, pLeft, iHeight); - } - - if( rc==SQLITE_OK ){ - rc = nodeRelease(pRtree, pRight); - pRight = 0; - } - if( rc==SQLITE_OK ){ - rc = nodeRelease(pRtree, pLeft); - pLeft = 0; - } - -splitnode_out: - nodeRelease(pRtree, pRight); - nodeRelease(pRtree, pLeft); - sqlite3_free(aCell); - return rc; -} - -/* -** If node pLeaf is not the root of the r-tree and its pParent pointer is -** still NULL, load all ancestor nodes of pLeaf into memory and populate -** the pLeaf->pParent chain all the way up to the root node. -** -** This operation is required when a row is deleted (or updated - an update -** is implemented as a delete followed by an insert). SQLite provides the -** rowid of the row to delete, which can be used to find the leaf on which -** the entry resides (argument pLeaf). Once the leaf is located, this -** function is called to determine its ancestry. -*/ -static int fixLeafParent(Rtree *pRtree, RtreeNode *pLeaf){ - int rc = SQLITE_OK; - RtreeNode *pChild = pLeaf; - while( rc==SQLITE_OK && pChild->iNode!=1 && pChild->pParent==0 ){ - int rc2 = SQLITE_OK; /* sqlite3_reset() return code */ - sqlite3_bind_int64(pRtree->pReadParent, 1, pChild->iNode); - rc = sqlite3_step(pRtree->pReadParent); - if( rc==SQLITE_ROW ){ - RtreeNode *pTest; /* Used to test for reference loops */ - i64 iNode; /* Node number of parent node */ - - /* Before setting pChild->pParent, test that we are not creating a - ** loop of references (as we would if, say, pChild==pParent). We don't - ** want to do this as it leads to a memory leak when trying to delete - ** the referenced counted node structures. - */ - iNode = sqlite3_column_int64(pRtree->pReadParent, 0); - for(pTest=pLeaf; pTest && pTest->iNode!=iNode; pTest=pTest->pParent); - if( !pTest ){ - rc2 = nodeAcquire(pRtree, iNode, 0, &pChild->pParent); - } - } - rc = sqlite3_reset(pRtree->pReadParent); - if( rc==SQLITE_OK ) rc = rc2; - if( rc==SQLITE_OK && !pChild->pParent ) rc = SQLITE_CORRUPT_VTAB; - pChild = pChild->pParent; - } - return rc; -} - -static int deleteCell(Rtree *, RtreeNode *, int, int); - -static int removeNode(Rtree *pRtree, RtreeNode *pNode, int iHeight){ - int rc; - int rc2; - RtreeNode *pParent = 0; - int iCell; - - assert( pNode->nRef==1 ); - - /* Remove the entry in the parent cell. */ - rc = nodeParentIndex(pRtree, pNode, &iCell); - if( rc==SQLITE_OK ){ - pParent = pNode->pParent; - pNode->pParent = 0; - rc = deleteCell(pRtree, pParent, iCell, iHeight+1); - } - rc2 = nodeRelease(pRtree, pParent); - if( rc==SQLITE_OK ){ - rc = rc2; - } - if( rc!=SQLITE_OK ){ - return rc; - } - - /* Remove the xxx_node entry. */ - sqlite3_bind_int64(pRtree->pDeleteNode, 1, pNode->iNode); - sqlite3_step(pRtree->pDeleteNode); - if( SQLITE_OK!=(rc = sqlite3_reset(pRtree->pDeleteNode)) ){ - return rc; - } - - /* Remove the xxx_parent entry. */ - sqlite3_bind_int64(pRtree->pDeleteParent, 1, pNode->iNode); - sqlite3_step(pRtree->pDeleteParent); - if( SQLITE_OK!=(rc = sqlite3_reset(pRtree->pDeleteParent)) ){ - return rc; - } - - /* Remove the node from the in-memory hash table and link it into - ** the Rtree.pDeleted list. Its contents will be re-inserted later on. - */ - nodeHashDelete(pRtree, pNode); - pNode->iNode = iHeight; - pNode->pNext = pRtree->pDeleted; - pNode->nRef++; - pRtree->pDeleted = pNode; - - return SQLITE_OK; -} - -static int fixBoundingBox(Rtree *pRtree, RtreeNode *pNode){ - RtreeNode *pParent = pNode->pParent; - int rc = SQLITE_OK; - if( pParent ){ - int ii; - int nCell = NCELL(pNode); - RtreeCell box; /* Bounding box for pNode */ - nodeGetCell(pRtree, pNode, 0, &box); - for(ii=1; iiiNode; - rc = nodeParentIndex(pRtree, pNode, &ii); - if( rc==SQLITE_OK ){ - nodeOverwriteCell(pRtree, pParent, &box, ii); - rc = fixBoundingBox(pRtree, pParent); - } - } - return rc; -} - -/* -** Delete the cell at index iCell of node pNode. After removing the -** cell, adjust the r-tree data structure if required. -*/ -static int deleteCell(Rtree *pRtree, RtreeNode *pNode, int iCell, int iHeight){ - RtreeNode *pParent; - int rc; - - if( SQLITE_OK!=(rc = fixLeafParent(pRtree, pNode)) ){ - return rc; - } - - /* Remove the cell from the node. This call just moves bytes around - ** the in-memory node image, so it cannot fail. - */ - nodeDeleteCell(pRtree, pNode, iCell); - - /* If the node is not the tree root and now has less than the minimum - ** number of cells, remove it from the tree. Otherwise, update the - ** cell in the parent node so that it tightly contains the updated - ** node. - */ - pParent = pNode->pParent; - assert( pParent || pNode->iNode==1 ); - if( pParent ){ - if( NCELL(pNode)nDim; iDim++){ - aCenterCoord[iDim] += DCOORD(aCell[ii].aCoord[iDim*2]); - aCenterCoord[iDim] += DCOORD(aCell[ii].aCoord[iDim*2+1]); - } - } - for(iDim=0; iDimnDim; iDim++){ - aCenterCoord[iDim] = (aCenterCoord[iDim]/(nCell*(RtreeDValue)2)); - } - - for(ii=0; iinDim; iDim++){ - RtreeDValue coord = (DCOORD(aCell[ii].aCoord[iDim*2+1]) - - DCOORD(aCell[ii].aCoord[iDim*2])); - aDistance[ii] += (coord-aCenterCoord[iDim])*(coord-aCenterCoord[iDim]); - } - } - - SortByDistance(aOrder, nCell, aDistance, aSpare); - nodeZero(pRtree, pNode); - - for(ii=0; rc==SQLITE_OK && ii<(nCell-(RTREE_MINCELLS(pRtree)+1)); ii++){ - RtreeCell *p = &aCell[aOrder[ii]]; - nodeInsertCell(pRtree, pNode, p); - if( p->iRowid==pCell->iRowid ){ - if( iHeight==0 ){ - rc = rowidWrite(pRtree, p->iRowid, pNode->iNode); - }else{ - rc = parentWrite(pRtree, p->iRowid, pNode->iNode); - } - } - } - if( rc==SQLITE_OK ){ - rc = fixBoundingBox(pRtree, pNode); - } - for(; rc==SQLITE_OK && iiiNode currently contains - ** the height of the sub-tree headed by the cell. - */ - RtreeNode *pInsert; - RtreeCell *p = &aCell[aOrder[ii]]; - rc = ChooseLeaf(pRtree, p, iHeight, &pInsert); - if( rc==SQLITE_OK ){ - int rc2; - rc = rtreeInsertCell(pRtree, pInsert, p, iHeight); - rc2 = nodeRelease(pRtree, pInsert); - if( rc==SQLITE_OK ){ - rc = rc2; - } - } - } - - sqlite3_free(aCell); - return rc; -} - -/* -** Insert cell pCell into node pNode. Node pNode is the head of a -** subtree iHeight high (leaf nodes have iHeight==0). -*/ -static int rtreeInsertCell( - Rtree *pRtree, - RtreeNode *pNode, - RtreeCell *pCell, - int iHeight -){ - int rc = SQLITE_OK; - if( iHeight>0 ){ - RtreeNode *pChild = nodeHashLookup(pRtree, pCell->iRowid); - if( pChild ){ - nodeRelease(pRtree, pChild->pParent); - nodeReference(pNode); - pChild->pParent = pNode; - } - } - if( nodeInsertCell(pRtree, pNode, pCell) ){ - if( iHeight<=pRtree->iReinsertHeight || pNode->iNode==1){ - rc = SplitNode(pRtree, pNode, pCell, iHeight); - }else{ - pRtree->iReinsertHeight = iHeight; - rc = Reinsert(pRtree, pNode, pCell, iHeight); - } - }else{ - rc = AdjustTree(pRtree, pNode, pCell); - if( rc==SQLITE_OK ){ - if( iHeight==0 ){ - rc = rowidWrite(pRtree, pCell->iRowid, pNode->iNode); - }else{ - rc = parentWrite(pRtree, pCell->iRowid, pNode->iNode); - } - } - } - return rc; -} - -static int reinsertNodeContent(Rtree *pRtree, RtreeNode *pNode){ - int ii; - int rc = SQLITE_OK; - int nCell = NCELL(pNode); - - for(ii=0; rc==SQLITE_OK && iiiNode currently contains - ** the height of the sub-tree headed by the cell. - */ - rc = ChooseLeaf(pRtree, &cell, (int)pNode->iNode, &pInsert); - if( rc==SQLITE_OK ){ - int rc2; - rc = rtreeInsertCell(pRtree, pInsert, &cell, (int)pNode->iNode); - rc2 = nodeRelease(pRtree, pInsert); - if( rc==SQLITE_OK ){ - rc = rc2; - } - } - } - return rc; -} - -/* -** Select a currently unused rowid for a new r-tree record. -*/ -static int newRowid(Rtree *pRtree, i64 *piRowid){ - int rc; - sqlite3_bind_null(pRtree->pWriteRowid, 1); - sqlite3_bind_null(pRtree->pWriteRowid, 2); - sqlite3_step(pRtree->pWriteRowid); - rc = sqlite3_reset(pRtree->pWriteRowid); - *piRowid = sqlite3_last_insert_rowid(pRtree->db); - return rc; -} - -/* -** Remove the entry with rowid=iDelete from the r-tree structure. -*/ -static int rtreeDeleteRowid(Rtree *pRtree, sqlite3_int64 iDelete){ - int rc; /* Return code */ - RtreeNode *pLeaf = 0; /* Leaf node containing record iDelete */ - int iCell; /* Index of iDelete cell in pLeaf */ - RtreeNode *pRoot; /* Root node of rtree structure */ - - - /* Obtain a reference to the root node to initialize Rtree.iDepth */ - rc = nodeAcquire(pRtree, 1, 0, &pRoot); - - /* Obtain a reference to the leaf node that contains the entry - ** about to be deleted. - */ - if( rc==SQLITE_OK ){ - rc = findLeafNode(pRtree, iDelete, &pLeaf, 0); - } - - /* Delete the cell in question from the leaf node. */ - if( rc==SQLITE_OK ){ - int rc2; - rc = nodeRowidIndex(pRtree, pLeaf, iDelete, &iCell); - if( rc==SQLITE_OK ){ - rc = deleteCell(pRtree, pLeaf, iCell, 0); - } - rc2 = nodeRelease(pRtree, pLeaf); - if( rc==SQLITE_OK ){ - rc = rc2; - } - } - - /* Delete the corresponding entry in the _rowid table. */ - if( rc==SQLITE_OK ){ - sqlite3_bind_int64(pRtree->pDeleteRowid, 1, iDelete); - sqlite3_step(pRtree->pDeleteRowid); - rc = sqlite3_reset(pRtree->pDeleteRowid); - } - - /* Check if the root node now has exactly one child. If so, remove - ** it, schedule the contents of the child for reinsertion and - ** reduce the tree height by one. - ** - ** This is equivalent to copying the contents of the child into - ** the root node (the operation that Gutman's paper says to perform - ** in this scenario). - */ - if( rc==SQLITE_OK && pRtree->iDepth>0 && NCELL(pRoot)==1 ){ - int rc2; - RtreeNode *pChild; - i64 iChild = nodeGetRowid(pRtree, pRoot, 0); - rc = nodeAcquire(pRtree, iChild, pRoot, &pChild); - if( rc==SQLITE_OK ){ - rc = removeNode(pRtree, pChild, pRtree->iDepth-1); - } - rc2 = nodeRelease(pRtree, pChild); - if( rc==SQLITE_OK ) rc = rc2; - if( rc==SQLITE_OK ){ - pRtree->iDepth--; - writeInt16(pRoot->zData, pRtree->iDepth); - pRoot->isDirty = 1; - } - } - - /* Re-insert the contents of any underfull nodes removed from the tree. */ - for(pLeaf=pRtree->pDeleted; pLeaf; pLeaf=pRtree->pDeleted){ - if( rc==SQLITE_OK ){ - rc = reinsertNodeContent(pRtree, pLeaf); - } - pRtree->pDeleted = pLeaf->pNext; - sqlite3_free(pLeaf); - } - - /* Release the reference to the root node. */ - if( rc==SQLITE_OK ){ - rc = nodeRelease(pRtree, pRoot); - }else{ - nodeRelease(pRtree, pRoot); - } - - return rc; -} - -/* -** Rounding constants for float->double conversion. -*/ -#define RNDTOWARDS (1.0 - 1.0/8388608.0) /* Round towards zero */ -#define RNDAWAY (1.0 + 1.0/8388608.0) /* Round away from zero */ - -#if !defined(SQLITE_RTREE_INT_ONLY) -/* -** Convert an sqlite3_value into an RtreeValue (presumably a float) -** while taking care to round toward negative or positive, respectively. -*/ -static RtreeValue rtreeValueDown(sqlite3_value *v){ - double d = sqlite3_value_double(v); - float f = (float)d; - if( f>d ){ - f = (float)(d*(d<0 ? RNDAWAY : RNDTOWARDS)); - } - return f; -} -static RtreeValue rtreeValueUp(sqlite3_value *v){ - double d = sqlite3_value_double(v); - float f = (float)d; - if( f1 */ - int bHaveRowid = 0; /* Set to 1 after new rowid is determined */ - - rtreeReference(pRtree); - assert(nData>=1); - - /* Constraint handling. A write operation on an r-tree table may return - ** SQLITE_CONSTRAINT for two reasons: - ** - ** 1. A duplicate rowid value, or - ** 2. The supplied data violates the "x2>=x1" constraint. - ** - ** In the first case, if the conflict-handling mode is REPLACE, then - ** the conflicting row can be removed before proceeding. In the second - ** case, SQLITE_CONSTRAINT must be returned regardless of the - ** conflict-handling mode specified by the user. - */ - if( nData>1 ){ - int ii; - - /* Populate the cell.aCoord[] array. The first coordinate is azData[3]. */ - assert( nData==(pRtree->nDim*2 + 3) ); -#ifndef SQLITE_RTREE_INT_ONLY - if( pRtree->eCoordType==RTREE_COORD_REAL32 ){ - for(ii=0; ii<(pRtree->nDim*2); ii+=2){ - cell.aCoord[ii].f = rtreeValueDown(azData[ii+3]); - cell.aCoord[ii+1].f = rtreeValueUp(azData[ii+4]); - if( cell.aCoord[ii].f>cell.aCoord[ii+1].f ){ - rc = SQLITE_CONSTRAINT; - goto constraint; - } - } - }else -#endif - { - for(ii=0; ii<(pRtree->nDim*2); ii+=2){ - cell.aCoord[ii].i = sqlite3_value_int(azData[ii+3]); - cell.aCoord[ii+1].i = sqlite3_value_int(azData[ii+4]); - if( cell.aCoord[ii].i>cell.aCoord[ii+1].i ){ - rc = SQLITE_CONSTRAINT; - goto constraint; - } - } - } - - /* If a rowid value was supplied, check if it is already present in - ** the table. If so, the constraint has failed. */ - if( sqlite3_value_type(azData[2])!=SQLITE_NULL ){ - cell.iRowid = sqlite3_value_int64(azData[2]); - if( sqlite3_value_type(azData[0])==SQLITE_NULL - || sqlite3_value_int64(azData[0])!=cell.iRowid - ){ - int steprc; - sqlite3_bind_int64(pRtree->pReadRowid, 1, cell.iRowid); - steprc = sqlite3_step(pRtree->pReadRowid); - rc = sqlite3_reset(pRtree->pReadRowid); - if( SQLITE_ROW==steprc ){ - if( sqlite3_vtab_on_conflict(pRtree->db)==SQLITE_REPLACE ){ - rc = rtreeDeleteRowid(pRtree, cell.iRowid); - }else{ - rc = SQLITE_CONSTRAINT; - goto constraint; - } - } - } - bHaveRowid = 1; - } - } - - /* If azData[0] is not an SQL NULL value, it is the rowid of a - ** record to delete from the r-tree table. The following block does - ** just that. - */ - if( sqlite3_value_type(azData[0])!=SQLITE_NULL ){ - rc = rtreeDeleteRowid(pRtree, sqlite3_value_int64(azData[0])); - } - - /* If the azData[] array contains more than one element, elements - ** (azData[2]..azData[argc-1]) contain a new record to insert into - ** the r-tree structure. - */ - if( rc==SQLITE_OK && nData>1 ){ - /* Insert the new record into the r-tree */ - RtreeNode *pLeaf = 0; - - /* Figure out the rowid of the new row. */ - if( bHaveRowid==0 ){ - rc = newRowid(pRtree, &cell.iRowid); - } - *pRowid = cell.iRowid; - - if( rc==SQLITE_OK ){ - rc = ChooseLeaf(pRtree, &cell, 0, &pLeaf); - } - if( rc==SQLITE_OK ){ - int rc2; - pRtree->iReinsertHeight = -1; - rc = rtreeInsertCell(pRtree, pLeaf, &cell, 0); - rc2 = nodeRelease(pRtree, pLeaf); - if( rc==SQLITE_OK ){ - rc = rc2; - } - } - } - -constraint: - rtreeRelease(pRtree); - return rc; -} - -/* -** The xRename method for rtree module virtual tables. -*/ -static int rtreeRename(sqlite3_vtab *pVtab, const char *zNewName){ - Rtree *pRtree = (Rtree *)pVtab; - int rc = SQLITE_NOMEM; - char *zSql = sqlite3_mprintf( - "ALTER TABLE %Q.'%q_node' RENAME TO \"%w_node\";" - "ALTER TABLE %Q.'%q_parent' RENAME TO \"%w_parent\";" - "ALTER TABLE %Q.'%q_rowid' RENAME TO \"%w_rowid\";" - , pRtree->zDb, pRtree->zName, zNewName - , pRtree->zDb, pRtree->zName, zNewName - , pRtree->zDb, pRtree->zName, zNewName - ); - if( zSql ){ - rc = sqlite3_exec(pRtree->db, zSql, 0, 0, 0); - sqlite3_free(zSql); - } - return rc; -} - -/* -** This function populates the pRtree->nRowEst variable with an estimate -** of the number of rows in the virtual table. If possible, this is based -** on sqlite_stat1 data. Otherwise, use RTREE_DEFAULT_ROWEST. -*/ -static int rtreeQueryStat1(sqlite3 *db, Rtree *pRtree){ - const char *zFmt = "SELECT stat FROM %Q.sqlite_stat1 WHERE tbl = '%q_rowid'"; - char *zSql; - sqlite3_stmt *p; - int rc; - i64 nRow = 0; - - zSql = sqlite3_mprintf(zFmt, pRtree->zDb, pRtree->zName); - if( zSql==0 ){ - rc = SQLITE_NOMEM; - }else{ - rc = sqlite3_prepare_v2(db, zSql, -1, &p, 0); - if( rc==SQLITE_OK ){ - if( sqlite3_step(p)==SQLITE_ROW ) nRow = sqlite3_column_int64(p, 0); - rc = sqlite3_finalize(p); - }else if( rc!=SQLITE_NOMEM ){ - rc = SQLITE_OK; - } - - if( rc==SQLITE_OK ){ - if( nRow==0 ){ - pRtree->nRowEst = RTREE_DEFAULT_ROWEST; - }else{ - pRtree->nRowEst = MAX(nRow, RTREE_MIN_ROWEST); - } - } - sqlite3_free(zSql); - } - - return rc; -} - -static sqlite3_module rtreeModule = { - 0, /* iVersion */ - rtreeCreate, /* xCreate - create a table */ - rtreeConnect, /* xConnect - connect to an existing table */ - rtreeBestIndex, /* xBestIndex - Determine search strategy */ - rtreeDisconnect, /* xDisconnect - Disconnect from a table */ - rtreeDestroy, /* xDestroy - Drop a table */ - rtreeOpen, /* xOpen - open a cursor */ - rtreeClose, /* xClose - close a cursor */ - rtreeFilter, /* xFilter - configure scan constraints */ - rtreeNext, /* xNext - advance a cursor */ - rtreeEof, /* xEof */ - rtreeColumn, /* xColumn - read data */ - rtreeRowid, /* xRowid - read data */ - rtreeUpdate, /* xUpdate - write data */ - 0, /* xBegin - begin transaction */ - 0, /* xSync - sync transaction */ - 0, /* xCommit - commit transaction */ - 0, /* xRollback - rollback transaction */ - 0, /* xFindFunction - function overloading */ - rtreeRename, /* xRename - rename the table */ - 0, /* xSavepoint */ - 0, /* xRelease */ - 0 /* xRollbackTo */ -}; - -static int rtreeSqlInit( - Rtree *pRtree, - sqlite3 *db, - const char *zDb, - const char *zPrefix, - int isCreate -){ - int rc = SQLITE_OK; - - #define N_STATEMENT 9 - static const char *azSql[N_STATEMENT] = { - /* Read and write the xxx_node table */ - "SELECT data FROM '%q'.'%q_node' WHERE nodeno = :1", - "INSERT OR REPLACE INTO '%q'.'%q_node' VALUES(:1, :2)", - "DELETE FROM '%q'.'%q_node' WHERE nodeno = :1", - - /* Read and write the xxx_rowid table */ - "SELECT nodeno FROM '%q'.'%q_rowid' WHERE rowid = :1", - "INSERT OR REPLACE INTO '%q'.'%q_rowid' VALUES(:1, :2)", - "DELETE FROM '%q'.'%q_rowid' WHERE rowid = :1", - - /* Read and write the xxx_parent table */ - "SELECT parentnode FROM '%q'.'%q_parent' WHERE nodeno = :1", - "INSERT OR REPLACE INTO '%q'.'%q_parent' VALUES(:1, :2)", - "DELETE FROM '%q'.'%q_parent' WHERE nodeno = :1" - }; - sqlite3_stmt **appStmt[N_STATEMENT]; - int i; - - pRtree->db = db; - - if( isCreate ){ - char *zCreate = sqlite3_mprintf( -"CREATE TABLE \"%w\".\"%w_node\"(nodeno INTEGER PRIMARY KEY, data BLOB);" -"CREATE TABLE \"%w\".\"%w_rowid\"(rowid INTEGER PRIMARY KEY, nodeno INTEGER);" -"CREATE TABLE \"%w\".\"%w_parent\"(nodeno INTEGER PRIMARY KEY," - " parentnode INTEGER);" -"INSERT INTO '%q'.'%q_node' VALUES(1, zeroblob(%d))", - zDb, zPrefix, zDb, zPrefix, zDb, zPrefix, zDb, zPrefix, pRtree->iNodeSize - ); - if( !zCreate ){ - return SQLITE_NOMEM; - } - rc = sqlite3_exec(db, zCreate, 0, 0, 0); - sqlite3_free(zCreate); - if( rc!=SQLITE_OK ){ - return rc; - } - } - - appStmt[0] = &pRtree->pReadNode; - appStmt[1] = &pRtree->pWriteNode; - appStmt[2] = &pRtree->pDeleteNode; - appStmt[3] = &pRtree->pReadRowid; - appStmt[4] = &pRtree->pWriteRowid; - appStmt[5] = &pRtree->pDeleteRowid; - appStmt[6] = &pRtree->pReadParent; - appStmt[7] = &pRtree->pWriteParent; - appStmt[8] = &pRtree->pDeleteParent; - - rc = rtreeQueryStat1(db, pRtree); - for(i=0; iiNodeSize is populated and SQLITE_OK returned. -** Otherwise, an SQLite error code is returned. -** -** If this function is being called as part of an xConnect(), then the rtree -** table already exists. In this case the node-size is determined by inspecting -** the root node of the tree. -** -** Otherwise, for an xCreate(), use 64 bytes less than the database page-size. -** This ensures that each node is stored on a single database page. If the -** database page-size is so large that more than RTREE_MAXCELLS entries -** would fit in a single node, use a smaller node-size. -*/ -static int getNodeSize( - sqlite3 *db, /* Database handle */ - Rtree *pRtree, /* Rtree handle */ - int isCreate, /* True for xCreate, false for xConnect */ - char **pzErr /* OUT: Error message, if any */ -){ - int rc; - char *zSql; - if( isCreate ){ - int iPageSize = 0; - zSql = sqlite3_mprintf("PRAGMA %Q.page_size", pRtree->zDb); - rc = getIntFromStmt(db, zSql, &iPageSize); - if( rc==SQLITE_OK ){ - pRtree->iNodeSize = iPageSize-64; - if( (4+pRtree->nBytesPerCell*RTREE_MAXCELLS)iNodeSize ){ - pRtree->iNodeSize = 4+pRtree->nBytesPerCell*RTREE_MAXCELLS; - } - }else{ - *pzErr = sqlite3_mprintf("%s", sqlite3_errmsg(db)); - } - }else{ - zSql = sqlite3_mprintf( - "SELECT length(data) FROM '%q'.'%q_node' WHERE nodeno = 1", - pRtree->zDb, pRtree->zName - ); - rc = getIntFromStmt(db, zSql, &pRtree->iNodeSize); - if( rc!=SQLITE_OK ){ - *pzErr = sqlite3_mprintf("%s", sqlite3_errmsg(db)); - } - } - - sqlite3_free(zSql); - return rc; -} - -/* -** This function is the implementation of both the xConnect and xCreate -** methods of the r-tree virtual table. -** -** argv[0] -> module name -** argv[1] -> database name -** argv[2] -> table name -** argv[...] -> column names... -*/ -static int rtreeInit( - sqlite3 *db, /* Database connection */ - void *pAux, /* One of the RTREE_COORD_* constants */ - int argc, const char *const*argv, /* Parameters to CREATE TABLE statement */ - sqlite3_vtab **ppVtab, /* OUT: New virtual table */ - char **pzErr, /* OUT: Error message, if any */ - int isCreate /* True for xCreate, false for xConnect */ -){ - int rc = SQLITE_OK; - Rtree *pRtree; - int nDb; /* Length of string argv[1] */ - int nName; /* Length of string argv[2] */ - int eCoordType = (pAux ? RTREE_COORD_INT32 : RTREE_COORD_REAL32); - - const char *aErrMsg[] = { - 0, /* 0 */ - "Wrong number of columns for an rtree table", /* 1 */ - "Too few columns for an rtree table", /* 2 */ - "Too many columns for an rtree table" /* 3 */ - }; - - int iErr = (argc<6) ? 2 : argc>(RTREE_MAX_DIMENSIONS*2+4) ? 3 : argc%2; - if( aErrMsg[iErr] ){ - *pzErr = sqlite3_mprintf("%s", aErrMsg[iErr]); - return SQLITE_ERROR; - } - - sqlite3_vtab_config(db, SQLITE_VTAB_CONSTRAINT_SUPPORT, 1); - - /* Allocate the sqlite3_vtab structure */ - nDb = (int)strlen(argv[1]); - nName = (int)strlen(argv[2]); - pRtree = (Rtree *)sqlite3_malloc(sizeof(Rtree)+nDb+nName+2); - if( !pRtree ){ - return SQLITE_NOMEM; - } - memset(pRtree, 0, sizeof(Rtree)+nDb+nName+2); - pRtree->nBusy = 1; - pRtree->base.pModule = &rtreeModule; - pRtree->zDb = (char *)&pRtree[1]; - pRtree->zName = &pRtree->zDb[nDb+1]; - pRtree->nDim = (argc-4)/2; - pRtree->nBytesPerCell = 8 + pRtree->nDim*4*2; - pRtree->eCoordType = eCoordType; - memcpy(pRtree->zDb, argv[1], nDb); - memcpy(pRtree->zName, argv[2], nName); - - /* Figure out the node size to use. */ - rc = getNodeSize(db, pRtree, isCreate, pzErr); - - /* Create/Connect to the underlying relational database schema. If - ** that is successful, call sqlite3_declare_vtab() to configure - ** the r-tree table schema. - */ - if( rc==SQLITE_OK ){ - if( (rc = rtreeSqlInit(pRtree, db, argv[1], argv[2], isCreate)) ){ - *pzErr = sqlite3_mprintf("%s", sqlite3_errmsg(db)); - }else{ - char *zSql = sqlite3_mprintf("CREATE TABLE x(%s", argv[3]); - char *zTmp; - int ii; - for(ii=4; zSql && iinBusy==1 ); - rtreeRelease(pRtree); - } - return rc; -} - - -/* -** Implementation of a scalar function that decodes r-tree nodes to -** human readable strings. This can be used for debugging and analysis. -** -** The scalar function takes two arguments: (1) the number of dimensions -** to the rtree (between 1 and 5, inclusive) and (2) a blob of data containing -** an r-tree node. For a two-dimensional r-tree structure called "rt", to -** deserialize all nodes, a statement like: -** -** SELECT rtreenode(2, data) FROM rt_node; -** -** The human readable string takes the form of a Tcl list with one -** entry for each cell in the r-tree node. Each entry is itself a -** list, containing the 8-byte rowid/pageno followed by the -** *2 coordinates. -*/ -static void rtreenode(sqlite3_context *ctx, int nArg, sqlite3_value **apArg){ - char *zText = 0; - RtreeNode node; - Rtree tree; - int ii; - - UNUSED_PARAMETER(nArg); - memset(&node, 0, sizeof(RtreeNode)); - memset(&tree, 0, sizeof(Rtree)); - tree.nDim = sqlite3_value_int(apArg[0]); - tree.nBytesPerCell = 8 + 8 * tree.nDim; - node.zData = (u8 *)sqlite3_value_blob(apArg[1]); - - for(ii=0; iixDestructor ) pInfo->xDestructor(pInfo->pContext); - sqlite3_free(p); -} - -/* -** Each call to sqlite3_rtree_geometry_callback() or -** sqlite3_rtree_query_callback() creates an ordinary SQLite -** scalar function that is implemented by this routine. -** -** All this function does is construct an RtreeMatchArg object that -** contains the geometry-checking callback routines and a list of -** parameters to this function, then return that RtreeMatchArg object -** as a BLOB. -** -** The R-Tree MATCH operator will read the returned BLOB, deserialize -** the RtreeMatchArg object, and use the RtreeMatchArg object to figure -** out which elements of the R-Tree should be returned by the query. -*/ -static void geomCallback(sqlite3_context *ctx, int nArg, sqlite3_value **aArg){ - RtreeGeomCallback *pGeomCtx = (RtreeGeomCallback *)sqlite3_user_data(ctx); - RtreeMatchArg *pBlob; - int nBlob; - - nBlob = sizeof(RtreeMatchArg) + (nArg-1)*sizeof(RtreeDValue); - pBlob = (RtreeMatchArg *)sqlite3_malloc(nBlob); - if( !pBlob ){ - sqlite3_result_error_nomem(ctx); - }else{ - int i; - pBlob->magic = RTREE_GEOMETRY_MAGIC; - pBlob->cb = pGeomCtx[0]; - pBlob->nParam = nArg; - for(i=0; iaParam[i] = sqlite3_value_int64(aArg[i]); -#else - pBlob->aParam[i] = sqlite3_value_double(aArg[i]); -#endif - } - sqlite3_result_blob(ctx, pBlob, nBlob, sqlite3_free); - } -} - -/* -** Register a new geometry function for use with the r-tree MATCH operator. -*/ -SQLITE_API int sqlite3_rtree_geometry_callback( - sqlite3 *db, /* Register SQL function on this connection */ - const char *zGeom, /* Name of the new SQL function */ - int (*xGeom)(sqlite3_rtree_geometry*,int,RtreeDValue*,int*), /* Callback */ - void *pContext /* Extra data associated with the callback */ -){ - RtreeGeomCallback *pGeomCtx; /* Context object for new user-function */ - - /* Allocate and populate the context object. */ - pGeomCtx = (RtreeGeomCallback *)sqlite3_malloc(sizeof(RtreeGeomCallback)); - if( !pGeomCtx ) return SQLITE_NOMEM; - pGeomCtx->xGeom = xGeom; - pGeomCtx->xQueryFunc = 0; - pGeomCtx->xDestructor = 0; - pGeomCtx->pContext = pContext; - return sqlite3_create_function_v2(db, zGeom, -1, SQLITE_ANY, - (void *)pGeomCtx, geomCallback, 0, 0, rtreeFreeCallback - ); -} - -/* -** Register a new 2nd-generation geometry function for use with the -** r-tree MATCH operator. -*/ -SQLITE_API int sqlite3_rtree_query_callback( - sqlite3 *db, /* Register SQL function on this connection */ - const char *zQueryFunc, /* Name of new SQL function */ - int (*xQueryFunc)(sqlite3_rtree_query_info*), /* Callback */ - void *pContext, /* Extra data passed into the callback */ - void (*xDestructor)(void*) /* Destructor for the extra data */ -){ - RtreeGeomCallback *pGeomCtx; /* Context object for new user-function */ - - /* Allocate and populate the context object. */ - pGeomCtx = (RtreeGeomCallback *)sqlite3_malloc(sizeof(RtreeGeomCallback)); - if( !pGeomCtx ) return SQLITE_NOMEM; - pGeomCtx->xGeom = 0; - pGeomCtx->xQueryFunc = xQueryFunc; - pGeomCtx->xDestructor = xDestructor; - pGeomCtx->pContext = pContext; - return sqlite3_create_function_v2(db, zQueryFunc, -1, SQLITE_ANY, - (void *)pGeomCtx, geomCallback, 0, 0, rtreeFreeCallback - ); -} - -#if !SQLITE_CORE -#ifdef _WIN32 -__declspec(dllexport) -#endif -SQLITE_API int sqlite3_rtree_init( - sqlite3 *db, - char **pzErrMsg, - const sqlite3_api_routines *pApi -){ - SQLITE_EXTENSION_INIT2(pApi) - return sqlite3RtreeInit(db); -} -#endif - -#endif - -/************** End of rtree.c ***********************************************/ -/************** Begin file icu.c *********************************************/ -/* -** 2007 May 6 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** $Id: icu.c,v 1.7 2007/12/13 21:54:11 drh Exp $ -** -** This file implements an integration between the ICU library -** ("International Components for Unicode", an open-source library -** for handling unicode data) and SQLite. The integration uses -** ICU to provide the following to SQLite: -** -** * An implementation of the SQL regexp() function (and hence REGEXP -** operator) using the ICU uregex_XX() APIs. -** -** * Implementations of the SQL scalar upper() and lower() functions -** for case mapping. -** -** * Integration of ICU and SQLite collation sequences. -** -** * An implementation of the LIKE operator that uses ICU to -** provide case-independent matching. -*/ - -#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_ICU) - -/* Include ICU headers */ -#include -#include -#include -#include - -/* #include */ - -#ifndef SQLITE_CORE - SQLITE_EXTENSION_INIT1 -#else -#endif - -/* -** Maximum length (in bytes) of the pattern in a LIKE or GLOB -** operator. -*/ -#ifndef SQLITE_MAX_LIKE_PATTERN_LENGTH -# define SQLITE_MAX_LIKE_PATTERN_LENGTH 50000 -#endif - -/* -** Version of sqlite3_free() that is always a function, never a macro. -*/ -static void xFree(void *p){ - sqlite3_free(p); -} - -/* -** Compare two UTF-8 strings for equality where the first string is -** a "LIKE" expression. Return true (1) if they are the same and -** false (0) if they are different. -*/ -static int icuLikeCompare( - const uint8_t *zPattern, /* LIKE pattern */ - const uint8_t *zString, /* The UTF-8 string to compare against */ - const UChar32 uEsc /* The escape character */ -){ - static const int MATCH_ONE = (UChar32)'_'; - static const int MATCH_ALL = (UChar32)'%'; - - int iPattern = 0; /* Current byte index in zPattern */ - int iString = 0; /* Current byte index in zString */ - - int prevEscape = 0; /* True if the previous character was uEsc */ - - while( zPattern[iPattern]!=0 ){ - - /* Read (and consume) the next character from the input pattern. */ - UChar32 uPattern; - U8_NEXT_UNSAFE(zPattern, iPattern, uPattern); - assert(uPattern!=0); - - /* There are now 4 possibilities: - ** - ** 1. uPattern is an unescaped match-all character "%", - ** 2. uPattern is an unescaped match-one character "_", - ** 3. uPattern is an unescaped escape character, or - ** 4. uPattern is to be handled as an ordinary character - */ - if( !prevEscape && uPattern==MATCH_ALL ){ - /* Case 1. */ - uint8_t c; - - /* Skip any MATCH_ALL or MATCH_ONE characters that follow a - ** MATCH_ALL. For each MATCH_ONE, skip one character in the - ** test string. - */ - while( (c=zPattern[iPattern]) == MATCH_ALL || c == MATCH_ONE ){ - if( c==MATCH_ONE ){ - if( zString[iString]==0 ) return 0; - U8_FWD_1_UNSAFE(zString, iString); - } - iPattern++; - } - - if( zPattern[iPattern]==0 ) return 1; - - while( zString[iString] ){ - if( icuLikeCompare(&zPattern[iPattern], &zString[iString], uEsc) ){ - return 1; - } - U8_FWD_1_UNSAFE(zString, iString); - } - return 0; - - }else if( !prevEscape && uPattern==MATCH_ONE ){ - /* Case 2. */ - if( zString[iString]==0 ) return 0; - U8_FWD_1_UNSAFE(zString, iString); - - }else if( !prevEscape && uPattern==uEsc){ - /* Case 3. */ - prevEscape = 1; - - }else{ - /* Case 4. */ - UChar32 uString; - U8_NEXT_UNSAFE(zString, iString, uString); - uString = u_foldCase(uString, U_FOLD_CASE_DEFAULT); - uPattern = u_foldCase(uPattern, U_FOLD_CASE_DEFAULT); - if( uString!=uPattern ){ - return 0; - } - prevEscape = 0; - } - } - - return zString[iString]==0; -} - -/* -** Implementation of the like() SQL function. This function implements -** the build-in LIKE operator. The first argument to the function is the -** pattern and the second argument is the string. So, the SQL statements: -** -** A LIKE B -** -** is implemented as like(B, A). If there is an escape character E, -** -** A LIKE B ESCAPE E -** -** is mapped to like(B, A, E). -*/ -static void icuLikeFunc( - sqlite3_context *context, - int argc, - sqlite3_value **argv -){ - const unsigned char *zA = sqlite3_value_text(argv[0]); - const unsigned char *zB = sqlite3_value_text(argv[1]); - UChar32 uEsc = 0; - - /* Limit the length of the LIKE or GLOB pattern to avoid problems - ** of deep recursion and N*N behavior in patternCompare(). - */ - if( sqlite3_value_bytes(argv[0])>SQLITE_MAX_LIKE_PATTERN_LENGTH ){ - sqlite3_result_error(context, "LIKE or GLOB pattern too complex", -1); - return; - } - - - if( argc==3 ){ - /* The escape character string must consist of a single UTF-8 character. - ** Otherwise, return an error. - */ - int nE= sqlite3_value_bytes(argv[2]); - const unsigned char *zE = sqlite3_value_text(argv[2]); - int i = 0; - if( zE==0 ) return; - U8_NEXT(zE, i, nE, uEsc); - if( i!=nE){ - sqlite3_result_error(context, - "ESCAPE expression must be a single character", -1); - return; - } - } - - if( zA && zB ){ - sqlite3_result_int(context, icuLikeCompare(zA, zB, uEsc)); - } -} - -/* -** This function is called when an ICU function called from within -** the implementation of an SQL scalar function returns an error. -** -** The scalar function context passed as the first argument is -** loaded with an error message based on the following two args. -*/ -static void icuFunctionError( - sqlite3_context *pCtx, /* SQLite scalar function context */ - const char *zName, /* Name of ICU function that failed */ - UErrorCode e /* Error code returned by ICU function */ -){ - char zBuf[128]; - sqlite3_snprintf(128, zBuf, "ICU error: %s(): %s", zName, u_errorName(e)); - zBuf[127] = '\0'; - sqlite3_result_error(pCtx, zBuf, -1); -} - -/* -** Function to delete compiled regexp objects. Registered as -** a destructor function with sqlite3_set_auxdata(). -*/ -static void icuRegexpDelete(void *p){ - URegularExpression *pExpr = (URegularExpression *)p; - uregex_close(pExpr); -} - -/* -** Implementation of SQLite REGEXP operator. This scalar function takes -** two arguments. The first is a regular expression pattern to compile -** the second is a string to match against that pattern. If either -** argument is an SQL NULL, then NULL Is returned. Otherwise, the result -** is 1 if the string matches the pattern, or 0 otherwise. -** -** SQLite maps the regexp() function to the regexp() operator such -** that the following two are equivalent: -** -** zString REGEXP zPattern -** regexp(zPattern, zString) -** -** Uses the following ICU regexp APIs: -** -** uregex_open() -** uregex_matches() -** uregex_close() -*/ -static void icuRegexpFunc(sqlite3_context *p, int nArg, sqlite3_value **apArg){ - UErrorCode status = U_ZERO_ERROR; - URegularExpression *pExpr; - UBool res; - const UChar *zString = sqlite3_value_text16(apArg[1]); - - (void)nArg; /* Unused parameter */ - - /* If the left hand side of the regexp operator is NULL, - ** then the result is also NULL. - */ - if( !zString ){ - return; - } - - pExpr = sqlite3_get_auxdata(p, 0); - if( !pExpr ){ - const UChar *zPattern = sqlite3_value_text16(apArg[0]); - if( !zPattern ){ - return; - } - pExpr = uregex_open(zPattern, -1, 0, 0, &status); - - if( U_SUCCESS(status) ){ - sqlite3_set_auxdata(p, 0, pExpr, icuRegexpDelete); - }else{ - assert(!pExpr); - icuFunctionError(p, "uregex_open", status); - return; - } - } - - /* Configure the text that the regular expression operates on. */ - uregex_setText(pExpr, zString, -1, &status); - if( !U_SUCCESS(status) ){ - icuFunctionError(p, "uregex_setText", status); - return; - } - - /* Attempt the match */ - res = uregex_matches(pExpr, 0, &status); - if( !U_SUCCESS(status) ){ - icuFunctionError(p, "uregex_matches", status); - return; - } - - /* Set the text that the regular expression operates on to a NULL - ** pointer. This is not really necessary, but it is tidier than - ** leaving the regular expression object configured with an invalid - ** pointer after this function returns. - */ - uregex_setText(pExpr, 0, 0, &status); - - /* Return 1 or 0. */ - sqlite3_result_int(p, res ? 1 : 0); -} - -/* -** Implementations of scalar functions for case mapping - upper() and -** lower(). Function upper() converts its input to upper-case (ABC). -** Function lower() converts to lower-case (abc). -** -** ICU provides two types of case mapping, "general" case mapping and -** "language specific". Refer to ICU documentation for the differences -** between the two. -** -** To utilise "general" case mapping, the upper() or lower() scalar -** functions are invoked with one argument: -** -** upper('ABC') -> 'abc' -** lower('abc') -> 'ABC' -** -** To access ICU "language specific" case mapping, upper() or lower() -** should be invoked with two arguments. The second argument is the name -** of the locale to use. Passing an empty string ("") or SQL NULL value -** as the second argument is the same as invoking the 1 argument version -** of upper() or lower(). -** -** lower('I', 'en_us') -> 'i' -** lower('I', 'tr_tr') -> 'ı' (small dotless i) -** -** http://www.icu-project.org/userguide/posix.html#case_mappings -*/ -static void icuCaseFunc16(sqlite3_context *p, int nArg, sqlite3_value **apArg){ - const UChar *zInput; - UChar *zOutput; - int nInput; - int nOutput; - - UErrorCode status = U_ZERO_ERROR; - const char *zLocale = 0; - - assert(nArg==1 || nArg==2); - if( nArg==2 ){ - zLocale = (const char *)sqlite3_value_text(apArg[1]); - } - - zInput = sqlite3_value_text16(apArg[0]); - if( !zInput ){ - return; - } - nInput = sqlite3_value_bytes16(apArg[0]); - - nOutput = nInput * 2 + 2; - zOutput = sqlite3_malloc(nOutput); - if( !zOutput ){ - return; - } - - if( sqlite3_user_data(p) ){ - u_strToUpper(zOutput, nOutput/2, zInput, nInput/2, zLocale, &status); - }else{ - u_strToLower(zOutput, nOutput/2, zInput, nInput/2, zLocale, &status); - } - - if( !U_SUCCESS(status) ){ - icuFunctionError(p, "u_strToLower()/u_strToUpper", status); - return; - } - - sqlite3_result_text16(p, zOutput, -1, xFree); -} - -/* -** Collation sequence destructor function. The pCtx argument points to -** a UCollator structure previously allocated using ucol_open(). -*/ -static void icuCollationDel(void *pCtx){ - UCollator *p = (UCollator *)pCtx; - ucol_close(p); -} - -/* -** Collation sequence comparison function. The pCtx argument points to -** a UCollator structure previously allocated using ucol_open(). -*/ -static int icuCollationColl( - void *pCtx, - int nLeft, - const void *zLeft, - int nRight, - const void *zRight -){ - UCollationResult res; - UCollator *p = (UCollator *)pCtx; - res = ucol_strcoll(p, (UChar *)zLeft, nLeft/2, (UChar *)zRight, nRight/2); - switch( res ){ - case UCOL_LESS: return -1; - case UCOL_GREATER: return +1; - case UCOL_EQUAL: return 0; - } - assert(!"Unexpected return value from ucol_strcoll()"); - return 0; -} - -/* -** Implementation of the scalar function icu_load_collation(). -** -** This scalar function is used to add ICU collation based collation -** types to an SQLite database connection. It is intended to be called -** as follows: -** -** SELECT icu_load_collation(, ); -** -** Where is a string containing an ICU locale identifier (i.e. -** "en_AU", "tr_TR" etc.) and is the name of the -** collation sequence to create. -*/ -static void icuLoadCollation( - sqlite3_context *p, - int nArg, - sqlite3_value **apArg -){ - sqlite3 *db = (sqlite3 *)sqlite3_user_data(p); - UErrorCode status = U_ZERO_ERROR; - const char *zLocale; /* Locale identifier - (eg. "jp_JP") */ - const char *zName; /* SQL Collation sequence name (eg. "japanese") */ - UCollator *pUCollator; /* ICU library collation object */ - int rc; /* Return code from sqlite3_create_collation_x() */ - - assert(nArg==2); - zLocale = (const char *)sqlite3_value_text(apArg[0]); - zName = (const char *)sqlite3_value_text(apArg[1]); - - if( !zLocale || !zName ){ - return; - } - - pUCollator = ucol_open(zLocale, &status); - if( !U_SUCCESS(status) ){ - icuFunctionError(p, "ucol_open", status); - return; - } - assert(p); - - rc = sqlite3_create_collation_v2(db, zName, SQLITE_UTF16, (void *)pUCollator, - icuCollationColl, icuCollationDel - ); - if( rc!=SQLITE_OK ){ - ucol_close(pUCollator); - sqlite3_result_error(p, "Error registering collation function", -1); - } -} - -/* -** Register the ICU extension functions with database db. -*/ -SQLITE_PRIVATE int sqlite3IcuInit(sqlite3 *db){ - struct IcuScalar { - const char *zName; /* Function name */ - int nArg; /* Number of arguments */ - int enc; /* Optimal text encoding */ - void *pContext; /* sqlite3_user_data() context */ - void (*xFunc)(sqlite3_context*,int,sqlite3_value**); - } scalars[] = { - {"regexp", 2, SQLITE_ANY, 0, icuRegexpFunc}, - - {"lower", 1, SQLITE_UTF16, 0, icuCaseFunc16}, - {"lower", 2, SQLITE_UTF16, 0, icuCaseFunc16}, - {"upper", 1, SQLITE_UTF16, (void*)1, icuCaseFunc16}, - {"upper", 2, SQLITE_UTF16, (void*)1, icuCaseFunc16}, - - {"lower", 1, SQLITE_UTF8, 0, icuCaseFunc16}, - {"lower", 2, SQLITE_UTF8, 0, icuCaseFunc16}, - {"upper", 1, SQLITE_UTF8, (void*)1, icuCaseFunc16}, - {"upper", 2, SQLITE_UTF8, (void*)1, icuCaseFunc16}, - - {"like", 2, SQLITE_UTF8, 0, icuLikeFunc}, - {"like", 3, SQLITE_UTF8, 0, icuLikeFunc}, - - {"icu_load_collation", 2, SQLITE_UTF8, (void*)db, icuLoadCollation}, - }; - - int rc = SQLITE_OK; - int i; - - for(i=0; rc==SQLITE_OK && i<(int)(sizeof(scalars)/sizeof(scalars[0])); i++){ - struct IcuScalar *p = &scalars[i]; - rc = sqlite3_create_function( - db, p->zName, p->nArg, p->enc, p->pContext, p->xFunc, 0, 0 - ); - } - - return rc; -} - -#if !SQLITE_CORE -#ifdef _WIN32 -__declspec(dllexport) -#endif -SQLITE_API int sqlite3_icu_init( - sqlite3 *db, - char **pzErrMsg, - const sqlite3_api_routines *pApi -){ - SQLITE_EXTENSION_INIT2(pApi) - return sqlite3IcuInit(db); -} -#endif - -#endif - -/************** End of icu.c *************************************************/ -/************** Begin file fts3_icu.c ****************************************/ -/* -** 2007 June 22 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** This file implements a tokenizer for fts3 based on the ICU library. -*/ -#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) -#ifdef SQLITE_ENABLE_ICU - -/* #include */ -/* #include */ - -#include -/* #include */ -/* #include */ -#include - -typedef struct IcuTokenizer IcuTokenizer; -typedef struct IcuCursor IcuCursor; - -struct IcuTokenizer { - sqlite3_tokenizer base; - char *zLocale; -}; - -struct IcuCursor { - sqlite3_tokenizer_cursor base; - - UBreakIterator *pIter; /* ICU break-iterator object */ - int nChar; /* Number of UChar elements in pInput */ - UChar *aChar; /* Copy of input using utf-16 encoding */ - int *aOffset; /* Offsets of each character in utf-8 input */ - - int nBuffer; - char *zBuffer; - - int iToken; -}; - -/* -** Create a new tokenizer instance. -*/ -static int icuCreate( - int argc, /* Number of entries in argv[] */ - const char * const *argv, /* Tokenizer creation arguments */ - sqlite3_tokenizer **ppTokenizer /* OUT: Created tokenizer */ -){ - IcuTokenizer *p; - int n = 0; - - if( argc>0 ){ - n = strlen(argv[0])+1; - } - p = (IcuTokenizer *)sqlite3_malloc(sizeof(IcuTokenizer)+n); - if( !p ){ - return SQLITE_NOMEM; - } - memset(p, 0, sizeof(IcuTokenizer)); - - if( n ){ - p->zLocale = (char *)&p[1]; - memcpy(p->zLocale, argv[0], n); - } - - *ppTokenizer = (sqlite3_tokenizer *)p; - - return SQLITE_OK; -} - -/* -** Destroy a tokenizer -*/ -static int icuDestroy(sqlite3_tokenizer *pTokenizer){ - IcuTokenizer *p = (IcuTokenizer *)pTokenizer; - sqlite3_free(p); - return SQLITE_OK; -} - -/* -** Prepare to begin tokenizing a particular string. The input -** string to be tokenized is pInput[0..nBytes-1]. A cursor -** used to incrementally tokenize this string is returned in -** *ppCursor. -*/ -static int icuOpen( - sqlite3_tokenizer *pTokenizer, /* The tokenizer */ - const char *zInput, /* Input string */ - int nInput, /* Length of zInput in bytes */ - sqlite3_tokenizer_cursor **ppCursor /* OUT: Tokenization cursor */ -){ - IcuTokenizer *p = (IcuTokenizer *)pTokenizer; - IcuCursor *pCsr; - - const int32_t opt = U_FOLD_CASE_DEFAULT; - UErrorCode status = U_ZERO_ERROR; - int nChar; - - UChar32 c; - int iInput = 0; - int iOut = 0; - - *ppCursor = 0; - - if( zInput==0 ){ - nInput = 0; - zInput = ""; - }else if( nInput<0 ){ - nInput = strlen(zInput); - } - nChar = nInput+1; - pCsr = (IcuCursor *)sqlite3_malloc( - sizeof(IcuCursor) + /* IcuCursor */ - ((nChar+3)&~3) * sizeof(UChar) + /* IcuCursor.aChar[] */ - (nChar+1) * sizeof(int) /* IcuCursor.aOffset[] */ - ); - if( !pCsr ){ - return SQLITE_NOMEM; - } - memset(pCsr, 0, sizeof(IcuCursor)); - pCsr->aChar = (UChar *)&pCsr[1]; - pCsr->aOffset = (int *)&pCsr->aChar[(nChar+3)&~3]; - - pCsr->aOffset[iOut] = iInput; - U8_NEXT(zInput, iInput, nInput, c); - while( c>0 ){ - int isError = 0; - c = u_foldCase(c, opt); - U16_APPEND(pCsr->aChar, iOut, nChar, c, isError); - if( isError ){ - sqlite3_free(pCsr); - return SQLITE_ERROR; - } - pCsr->aOffset[iOut] = iInput; - - if( iInputpIter = ubrk_open(UBRK_WORD, p->zLocale, pCsr->aChar, iOut, &status); - if( !U_SUCCESS(status) ){ - sqlite3_free(pCsr); - return SQLITE_ERROR; - } - pCsr->nChar = iOut; - - ubrk_first(pCsr->pIter); - *ppCursor = (sqlite3_tokenizer_cursor *)pCsr; - return SQLITE_OK; -} - -/* -** Close a tokenization cursor previously opened by a call to icuOpen(). -*/ -static int icuClose(sqlite3_tokenizer_cursor *pCursor){ - IcuCursor *pCsr = (IcuCursor *)pCursor; - ubrk_close(pCsr->pIter); - sqlite3_free(pCsr->zBuffer); - sqlite3_free(pCsr); - return SQLITE_OK; -} - -/* -** Extract the next token from a tokenization cursor. -*/ -static int icuNext( - sqlite3_tokenizer_cursor *pCursor, /* Cursor returned by simpleOpen */ - const char **ppToken, /* OUT: *ppToken is the token text */ - int *pnBytes, /* OUT: Number of bytes in token */ - int *piStartOffset, /* OUT: Starting offset of token */ - int *piEndOffset, /* OUT: Ending offset of token */ - int *piPosition /* OUT: Position integer of token */ -){ - IcuCursor *pCsr = (IcuCursor *)pCursor; - - int iStart = 0; - int iEnd = 0; - int nByte = 0; - - while( iStart==iEnd ){ - UChar32 c; - - iStart = ubrk_current(pCsr->pIter); - iEnd = ubrk_next(pCsr->pIter); - if( iEnd==UBRK_DONE ){ - return SQLITE_DONE; - } - - while( iStartaChar, iWhite, pCsr->nChar, c); - if( u_isspace(c) ){ - iStart = iWhite; - }else{ - break; - } - } - assert(iStart<=iEnd); - } - - do { - UErrorCode status = U_ZERO_ERROR; - if( nByte ){ - char *zNew = sqlite3_realloc(pCsr->zBuffer, nByte); - if( !zNew ){ - return SQLITE_NOMEM; - } - pCsr->zBuffer = zNew; - pCsr->nBuffer = nByte; - } - - u_strToUTF8( - pCsr->zBuffer, pCsr->nBuffer, &nByte, /* Output vars */ - &pCsr->aChar[iStart], iEnd-iStart, /* Input vars */ - &status /* Output success/failure */ - ); - } while( nByte>pCsr->nBuffer ); - - *ppToken = pCsr->zBuffer; - *pnBytes = nByte; - *piStartOffset = pCsr->aOffset[iStart]; - *piEndOffset = pCsr->aOffset[iEnd]; - *piPosition = pCsr->iToken++; - - return SQLITE_OK; -} - -/* -** The set of routines that implement the simple tokenizer -*/ -static const sqlite3_tokenizer_module icuTokenizerModule = { - 0, /* iVersion */ - icuCreate, /* xCreate */ - icuDestroy, /* xCreate */ - icuOpen, /* xOpen */ - icuClose, /* xClose */ - icuNext, /* xNext */ -}; - -/* -** Set *ppModule to point at the implementation of the ICU tokenizer. -*/ -SQLITE_PRIVATE void sqlite3Fts3IcuTokenizerModule( - sqlite3_tokenizer_module const**ppModule -){ - *ppModule = &icuTokenizerModule; -} - -#endif /* defined(SQLITE_ENABLE_ICU) */ -#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) */ - -/************** End of fts3_icu.c ********************************************/ diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h b/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h deleted file mode 100644 index e86d83f..0000000 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h +++ /dev/null @@ -1,7478 +0,0 @@ -/* -** 2001 September 15 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** This header file defines the interface that the SQLite library -** presents to client programs. If a C-function, structure, datatype, -** or constant definition does not appear in this file, then it is -** not a published API of SQLite, is subject to change without -** notice, and should not be referenced by programs that use SQLite. -** -** Some of the definitions that are in this file are marked as -** "experimental". Experimental interfaces are normally new -** features recently added to SQLite. We do not anticipate changes -** to experimental interfaces but reserve the right to make minor changes -** if experience from use "in the wild" suggest such changes are prudent. -** -** The official C-language API documentation for SQLite is derived -** from comments in this file. This file is the authoritative source -** on how SQLite interfaces are suppose to operate. -** -** The name of this file under configuration management is "sqlite.h.in". -** The makefile makes some minor changes to this file (such as inserting -** the version number) and changes its name to "sqlite3.h" as -** part of the build process. -*/ -#ifndef _SQLITE3_H_ -#define _SQLITE3_H_ -#include /* Needed for the definition of va_list */ - -/* -** Make sure we can call this stuff from C++. -*/ -#ifdef __cplusplus -extern "C" { -#endif - - -/* -** Add the ability to override 'extern' -*/ -#ifndef SQLITE_EXTERN -# define SQLITE_EXTERN extern -#endif - -#ifndef SQLITE_API -# define SQLITE_API -#endif - - -/* -** These no-op macros are used in front of interfaces to mark those -** interfaces as either deprecated or experimental. New applications -** should not use deprecated interfaces - they are support for backwards -** compatibility only. Application writers should be aware that -** experimental interfaces are subject to change in point releases. -** -** These macros used to resolve to various kinds of compiler magic that -** would generate warning messages when they were used. But that -** compiler magic ended up generating such a flurry of bug reports -** that we have taken it all out and gone back to using simple -** noop macros. -*/ -#define SQLITE_DEPRECATED -#define SQLITE_EXPERIMENTAL - -/* -** Ensure these symbols were not defined by some previous header file. -*/ -#ifdef SQLITE_VERSION -# undef SQLITE_VERSION -#endif -#ifdef SQLITE_VERSION_NUMBER -# undef SQLITE_VERSION_NUMBER -#endif - -/* -** CAPI3REF: Compile-Time Library Version Numbers -** -** ^(The [SQLITE_VERSION] C preprocessor macro in the sqlite3.h header -** evaluates to a string literal that is the SQLite version in the -** format "X.Y.Z" where X is the major version number (always 3 for -** SQLite3) and Y is the minor version number and Z is the release number.)^ -** ^(The [SQLITE_VERSION_NUMBER] C preprocessor macro resolves to an integer -** with the value (X*1000000 + Y*1000 + Z) where X, Y, and Z are the same -** numbers used in [SQLITE_VERSION].)^ -** The SQLITE_VERSION_NUMBER for any given release of SQLite will also -** be larger than the release from which it is derived. Either Y will -** be held constant and Z will be incremented or else Y will be incremented -** and Z will be reset to zero. -** -** Since version 3.6.18, SQLite source code has been stored in the -** Fossil configuration management -** system. ^The SQLITE_SOURCE_ID macro evaluates to -** a string which identifies a particular check-in of SQLite -** within its configuration management system. ^The SQLITE_SOURCE_ID -** string contains the date and time of the check-in (UTC) and an SHA1 -** hash of the entire source tree. -** -** See also: [sqlite3_libversion()], -** [sqlite3_libversion_number()], [sqlite3_sourceid()], -** [sqlite_version()] and [sqlite_source_id()]. -*/ -#define SQLITE_VERSION "3.8.5" -#define SQLITE_VERSION_NUMBER 3008005 -#define SQLITE_SOURCE_ID "2014-06-04 14:06:34 b1ed4f2a34ba66c29b130f8d13e9092758019212" - -/* -** CAPI3REF: Run-Time Library Version Numbers -** KEYWORDS: sqlite3_version, sqlite3_sourceid -** -** These interfaces provide the same information as the [SQLITE_VERSION], -** [SQLITE_VERSION_NUMBER], and [SQLITE_SOURCE_ID] C preprocessor macros -** but are associated with the library instead of the header file. ^(Cautious -** programmers might include assert() statements in their application to -** verify that values returned by these interfaces match the macros in -** the header, and thus insure that the application is -** compiled with matching library and header files. -** -**
    -** assert( sqlite3_libversion_number()==SQLITE_VERSION_NUMBER );
    -** assert( strcmp(sqlite3_sourceid(),SQLITE_SOURCE_ID)==0 );
    -** assert( strcmp(sqlite3_libversion(),SQLITE_VERSION)==0 );
    -** 
    )^ -** -** ^The sqlite3_version[] string constant contains the text of [SQLITE_VERSION] -** macro. ^The sqlite3_libversion() function returns a pointer to the -** to the sqlite3_version[] string constant. The sqlite3_libversion() -** function is provided for use in DLLs since DLL users usually do not have -** direct access to string constants within the DLL. ^The -** sqlite3_libversion_number() function returns an integer equal to -** [SQLITE_VERSION_NUMBER]. ^The sqlite3_sourceid() function returns -** a pointer to a string constant whose value is the same as the -** [SQLITE_SOURCE_ID] C preprocessor macro. -** -** See also: [sqlite_version()] and [sqlite_source_id()]. -*/ -SQLITE_API SQLITE_EXTERN const char sqlite3_version[]; -SQLITE_API const char *sqlite3_libversion(void); -SQLITE_API const char *sqlite3_sourceid(void); -SQLITE_API int sqlite3_libversion_number(void); - -/* -** CAPI3REF: Run-Time Library Compilation Options Diagnostics -** -** ^The sqlite3_compileoption_used() function returns 0 or 1 -** indicating whether the specified option was defined at -** compile time. ^The SQLITE_ prefix may be omitted from the -** option name passed to sqlite3_compileoption_used(). -** -** ^The sqlite3_compileoption_get() function allows iterating -** over the list of options that were defined at compile time by -** returning the N-th compile time option string. ^If N is out of range, -** sqlite3_compileoption_get() returns a NULL pointer. ^The SQLITE_ -** prefix is omitted from any strings returned by -** sqlite3_compileoption_get(). -** -** ^Support for the diagnostic functions sqlite3_compileoption_used() -** and sqlite3_compileoption_get() may be omitted by specifying the -** [SQLITE_OMIT_COMPILEOPTION_DIAGS] option at compile time. -** -** See also: SQL functions [sqlite_compileoption_used()] and -** [sqlite_compileoption_get()] and the [compile_options pragma]. -*/ -#ifndef SQLITE_OMIT_COMPILEOPTION_DIAGS -SQLITE_API int sqlite3_compileoption_used(const char *zOptName); -SQLITE_API const char *sqlite3_compileoption_get(int N); -#endif - -/* -** CAPI3REF: Test To See If The Library Is Threadsafe -** -** ^The sqlite3_threadsafe() function returns zero if and only if -** SQLite was compiled with mutexing code omitted due to the -** [SQLITE_THREADSAFE] compile-time option being set to 0. -** -** SQLite can be compiled with or without mutexes. When -** the [SQLITE_THREADSAFE] C preprocessor macro is 1 or 2, mutexes -** are enabled and SQLite is threadsafe. When the -** [SQLITE_THREADSAFE] macro is 0, -** the mutexes are omitted. Without the mutexes, it is not safe -** to use SQLite concurrently from more than one thread. -** -** Enabling mutexes incurs a measurable performance penalty. -** So if speed is of utmost importance, it makes sense to disable -** the mutexes. But for maximum safety, mutexes should be enabled. -** ^The default behavior is for mutexes to be enabled. -** -** This interface can be used by an application to make sure that the -** version of SQLite that it is linking against was compiled with -** the desired setting of the [SQLITE_THREADSAFE] macro. -** -** This interface only reports on the compile-time mutex setting -** of the [SQLITE_THREADSAFE] flag. If SQLite is compiled with -** SQLITE_THREADSAFE=1 or =2 then mutexes are enabled by default but -** can be fully or partially disabled using a call to [sqlite3_config()] -** with the verbs [SQLITE_CONFIG_SINGLETHREAD], [SQLITE_CONFIG_MULTITHREAD], -** or [SQLITE_CONFIG_MUTEX]. ^(The return value of the -** sqlite3_threadsafe() function shows only the compile-time setting of -** thread safety, not any run-time changes to that setting made by -** sqlite3_config(). In other words, the return value from sqlite3_threadsafe() -** is unchanged by calls to sqlite3_config().)^ -** -** See the [threading mode] documentation for additional information. -*/ -SQLITE_API int sqlite3_threadsafe(void); - -/* -** CAPI3REF: Database Connection Handle -** KEYWORDS: {database connection} {database connections} -** -** Each open SQLite database is represented by a pointer to an instance of -** the opaque structure named "sqlite3". It is useful to think of an sqlite3 -** pointer as an object. The [sqlite3_open()], [sqlite3_open16()], and -** [sqlite3_open_v2()] interfaces are its constructors, and [sqlite3_close()] -** and [sqlite3_close_v2()] are its destructors. There are many other -** interfaces (such as -** [sqlite3_prepare_v2()], [sqlite3_create_function()], and -** [sqlite3_busy_timeout()] to name but three) that are methods on an -** sqlite3 object. -*/ -typedef struct sqlite3 sqlite3; - -/* -** CAPI3REF: 64-Bit Integer Types -** KEYWORDS: sqlite_int64 sqlite_uint64 -** -** Because there is no cross-platform way to specify 64-bit integer types -** SQLite includes typedefs for 64-bit signed and unsigned integers. -** -** The sqlite3_int64 and sqlite3_uint64 are the preferred type definitions. -** The sqlite_int64 and sqlite_uint64 types are supported for backwards -** compatibility only. -** -** ^The sqlite3_int64 and sqlite_int64 types can store integer values -** between -9223372036854775808 and +9223372036854775807 inclusive. ^The -** sqlite3_uint64 and sqlite_uint64 types can store integer values -** between 0 and +18446744073709551615 inclusive. -*/ -#ifdef SQLITE_INT64_TYPE - typedef SQLITE_INT64_TYPE sqlite_int64; - typedef unsigned SQLITE_INT64_TYPE sqlite_uint64; -#elif defined(_MSC_VER) || defined(__BORLANDC__) - typedef __int64 sqlite_int64; - typedef unsigned __int64 sqlite_uint64; -#else - typedef long long int sqlite_int64; - typedef unsigned long long int sqlite_uint64; -#endif -typedef sqlite_int64 sqlite3_int64; -typedef sqlite_uint64 sqlite3_uint64; - -/* -** If compiling for a processor that lacks floating point support, -** substitute integer for floating-point. -*/ -#ifdef SQLITE_OMIT_FLOATING_POINT -# define double sqlite3_int64 -#endif - -/* -** CAPI3REF: Closing A Database Connection -** -** ^The sqlite3_close() and sqlite3_close_v2() routines are destructors -** for the [sqlite3] object. -** ^Calls to sqlite3_close() and sqlite3_close_v2() return SQLITE_OK if -** the [sqlite3] object is successfully destroyed and all associated -** resources are deallocated. -** -** ^If the database connection is associated with unfinalized prepared -** statements or unfinished sqlite3_backup objects then sqlite3_close() -** will leave the database connection open and return [SQLITE_BUSY]. -** ^If sqlite3_close_v2() is called with unfinalized prepared statements -** and unfinished sqlite3_backups, then the database connection becomes -** an unusable "zombie" which will automatically be deallocated when the -** last prepared statement is finalized or the last sqlite3_backup is -** finished. The sqlite3_close_v2() interface is intended for use with -** host languages that are garbage collected, and where the order in which -** destructors are called is arbitrary. -** -** Applications should [sqlite3_finalize | finalize] all [prepared statements], -** [sqlite3_blob_close | close] all [BLOB handles], and -** [sqlite3_backup_finish | finish] all [sqlite3_backup] objects associated -** with the [sqlite3] object prior to attempting to close the object. ^If -** sqlite3_close_v2() is called on a [database connection] that still has -** outstanding [prepared statements], [BLOB handles], and/or -** [sqlite3_backup] objects then it returns SQLITE_OK but the deallocation -** of resources is deferred until all [prepared statements], [BLOB handles], -** and [sqlite3_backup] objects are also destroyed. -** -** ^If an [sqlite3] object is destroyed while a transaction is open, -** the transaction is automatically rolled back. -** -** The C parameter to [sqlite3_close(C)] and [sqlite3_close_v2(C)] -** must be either a NULL -** pointer or an [sqlite3] object pointer obtained -** from [sqlite3_open()], [sqlite3_open16()], or -** [sqlite3_open_v2()], and not previously closed. -** ^Calling sqlite3_close() or sqlite3_close_v2() with a NULL pointer -** argument is a harmless no-op. -*/ -SQLITE_API int sqlite3_close(sqlite3*); -SQLITE_API int sqlite3_close_v2(sqlite3*); - -/* -** The type for a callback function. -** This is legacy and deprecated. It is included for historical -** compatibility and is not documented. -*/ -typedef int (*sqlite3_callback)(void*,int,char**, char**); - -/* -** CAPI3REF: One-Step Query Execution Interface -** -** The sqlite3_exec() interface is a convenience wrapper around -** [sqlite3_prepare_v2()], [sqlite3_step()], and [sqlite3_finalize()], -** that allows an application to run multiple statements of SQL -** without having to use a lot of C code. -** -** ^The sqlite3_exec() interface runs zero or more UTF-8 encoded, -** semicolon-separate SQL statements passed into its 2nd argument, -** in the context of the [database connection] passed in as its 1st -** argument. ^If the callback function of the 3rd argument to -** sqlite3_exec() is not NULL, then it is invoked for each result row -** coming out of the evaluated SQL statements. ^The 4th argument to -** sqlite3_exec() is relayed through to the 1st argument of each -** callback invocation. ^If the callback pointer to sqlite3_exec() -** is NULL, then no callback is ever invoked and result rows are -** ignored. -** -** ^If an error occurs while evaluating the SQL statements passed into -** sqlite3_exec(), then execution of the current statement stops and -** subsequent statements are skipped. ^If the 5th parameter to sqlite3_exec() -** is not NULL then any error message is written into memory obtained -** from [sqlite3_malloc()] and passed back through the 5th parameter. -** To avoid memory leaks, the application should invoke [sqlite3_free()] -** on error message strings returned through the 5th parameter of -** of sqlite3_exec() after the error message string is no longer needed. -** ^If the 5th parameter to sqlite3_exec() is not NULL and no errors -** occur, then sqlite3_exec() sets the pointer in its 5th parameter to -** NULL before returning. -** -** ^If an sqlite3_exec() callback returns non-zero, the sqlite3_exec() -** routine returns SQLITE_ABORT without invoking the callback again and -** without running any subsequent SQL statements. -** -** ^The 2nd argument to the sqlite3_exec() callback function is the -** number of columns in the result. ^The 3rd argument to the sqlite3_exec() -** callback is an array of pointers to strings obtained as if from -** [sqlite3_column_text()], one for each column. ^If an element of a -** result row is NULL then the corresponding string pointer for the -** sqlite3_exec() callback is a NULL pointer. ^The 4th argument to the -** sqlite3_exec() callback is an array of pointers to strings where each -** entry represents the name of corresponding result column as obtained -** from [sqlite3_column_name()]. -** -** ^If the 2nd parameter to sqlite3_exec() is a NULL pointer, a pointer -** to an empty string, or a pointer that contains only whitespace and/or -** SQL comments, then no SQL statements are evaluated and the database -** is not changed. -** -** Restrictions: -** -**
      -**
    • The application must insure that the 1st parameter to sqlite3_exec() -** is a valid and open [database connection]. -**
    • The application must not close the [database connection] specified by -** the 1st parameter to sqlite3_exec() while sqlite3_exec() is running. -**
    • The application must not modify the SQL statement text passed into -** the 2nd parameter of sqlite3_exec() while sqlite3_exec() is running. -**
    -*/ -SQLITE_API int sqlite3_exec( - sqlite3*, /* An open database */ - const char *sql, /* SQL to be evaluated */ - int (*callback)(void*,int,char**,char**), /* Callback function */ - void *, /* 1st argument to callback */ - char **errmsg /* Error msg written here */ -); - -/* -** CAPI3REF: Result Codes -** KEYWORDS: SQLITE_OK {error code} {error codes} -** KEYWORDS: {result code} {result codes} -** -** Many SQLite functions return an integer result code from the set shown -** here in order to indicate success or failure. -** -** New error codes may be added in future versions of SQLite. -** -** See also: [SQLITE_IOERR_READ | extended result codes], -** [sqlite3_vtab_on_conflict()] [SQLITE_ROLLBACK | result codes]. -*/ -#define SQLITE_OK 0 /* Successful result */ -/* beginning-of-error-codes */ -#define SQLITE_ERROR 1 /* SQL error or missing database */ -#define SQLITE_INTERNAL 2 /* Internal logic error in SQLite */ -#define SQLITE_PERM 3 /* Access permission denied */ -#define SQLITE_ABORT 4 /* Callback routine requested an abort */ -#define SQLITE_BUSY 5 /* The database file is locked */ -#define SQLITE_LOCKED 6 /* A table in the database is locked */ -#define SQLITE_NOMEM 7 /* A malloc() failed */ -#define SQLITE_READONLY 8 /* Attempt to write a readonly database */ -#define SQLITE_INTERRUPT 9 /* Operation terminated by sqlite3_interrupt()*/ -#define SQLITE_IOERR 10 /* Some kind of disk I/O error occurred */ -#define SQLITE_CORRUPT 11 /* The database disk image is malformed */ -#define SQLITE_NOTFOUND 12 /* Unknown opcode in sqlite3_file_control() */ -#define SQLITE_FULL 13 /* Insertion failed because database is full */ -#define SQLITE_CANTOPEN 14 /* Unable to open the database file */ -#define SQLITE_PROTOCOL 15 /* Database lock protocol error */ -#define SQLITE_EMPTY 16 /* Database is empty */ -#define SQLITE_SCHEMA 17 /* The database schema changed */ -#define SQLITE_TOOBIG 18 /* String or BLOB exceeds size limit */ -#define SQLITE_CONSTRAINT 19 /* Abort due to constraint violation */ -#define SQLITE_MISMATCH 20 /* Data type mismatch */ -#define SQLITE_MISUSE 21 /* Library used incorrectly */ -#define SQLITE_NOLFS 22 /* Uses OS features not supported on host */ -#define SQLITE_AUTH 23 /* Authorization denied */ -#define SQLITE_FORMAT 24 /* Auxiliary database format error */ -#define SQLITE_RANGE 25 /* 2nd parameter to sqlite3_bind out of range */ -#define SQLITE_NOTADB 26 /* File opened that is not a database file */ -#define SQLITE_NOTICE 27 /* Notifications from sqlite3_log() */ -#define SQLITE_WARNING 28 /* Warnings from sqlite3_log() */ -#define SQLITE_ROW 100 /* sqlite3_step() has another row ready */ -#define SQLITE_DONE 101 /* sqlite3_step() has finished executing */ -/* end-of-error-codes */ - -/* -** CAPI3REF: Extended Result Codes -** KEYWORDS: {extended error code} {extended error codes} -** KEYWORDS: {extended result code} {extended result codes} -** -** In its default configuration, SQLite API routines return one of 26 integer -** [SQLITE_OK | result codes]. However, experience has shown that many of -** these result codes are too coarse-grained. They do not provide as -** much information about problems as programmers might like. In an effort to -** address this, newer versions of SQLite (version 3.3.8 and later) include -** support for additional result codes that provide more detailed information -** about errors. The extended result codes are enabled or disabled -** on a per database connection basis using the -** [sqlite3_extended_result_codes()] API. -** -** Some of the available extended result codes are listed here. -** One may expect the number of extended result codes will increase -** over time. Software that uses extended result codes should expect -** to see new result codes in future releases of SQLite. -** -** The SQLITE_OK result code will never be extended. It will always -** be exactly zero. -*/ -#define SQLITE_IOERR_READ (SQLITE_IOERR | (1<<8)) -#define SQLITE_IOERR_SHORT_READ (SQLITE_IOERR | (2<<8)) -#define SQLITE_IOERR_WRITE (SQLITE_IOERR | (3<<8)) -#define SQLITE_IOERR_FSYNC (SQLITE_IOERR | (4<<8)) -#define SQLITE_IOERR_DIR_FSYNC (SQLITE_IOERR | (5<<8)) -#define SQLITE_IOERR_TRUNCATE (SQLITE_IOERR | (6<<8)) -#define SQLITE_IOERR_FSTAT (SQLITE_IOERR | (7<<8)) -#define SQLITE_IOERR_UNLOCK (SQLITE_IOERR | (8<<8)) -#define SQLITE_IOERR_RDLOCK (SQLITE_IOERR | (9<<8)) -#define SQLITE_IOERR_DELETE (SQLITE_IOERR | (10<<8)) -#define SQLITE_IOERR_BLOCKED (SQLITE_IOERR | (11<<8)) -#define SQLITE_IOERR_NOMEM (SQLITE_IOERR | (12<<8)) -#define SQLITE_IOERR_ACCESS (SQLITE_IOERR | (13<<8)) -#define SQLITE_IOERR_CHECKRESERVEDLOCK (SQLITE_IOERR | (14<<8)) -#define SQLITE_IOERR_LOCK (SQLITE_IOERR | (15<<8)) -#define SQLITE_IOERR_CLOSE (SQLITE_IOERR | (16<<8)) -#define SQLITE_IOERR_DIR_CLOSE (SQLITE_IOERR | (17<<8)) -#define SQLITE_IOERR_SHMOPEN (SQLITE_IOERR | (18<<8)) -#define SQLITE_IOERR_SHMSIZE (SQLITE_IOERR | (19<<8)) -#define SQLITE_IOERR_SHMLOCK (SQLITE_IOERR | (20<<8)) -#define SQLITE_IOERR_SHMMAP (SQLITE_IOERR | (21<<8)) -#define SQLITE_IOERR_SEEK (SQLITE_IOERR | (22<<8)) -#define SQLITE_IOERR_DELETE_NOENT (SQLITE_IOERR | (23<<8)) -#define SQLITE_IOERR_MMAP (SQLITE_IOERR | (24<<8)) -#define SQLITE_IOERR_GETTEMPPATH (SQLITE_IOERR | (25<<8)) -#define SQLITE_IOERR_CONVPATH (SQLITE_IOERR | (26<<8)) -#define SQLITE_LOCKED_SHAREDCACHE (SQLITE_LOCKED | (1<<8)) -#define SQLITE_BUSY_RECOVERY (SQLITE_BUSY | (1<<8)) -#define SQLITE_BUSY_SNAPSHOT (SQLITE_BUSY | (2<<8)) -#define SQLITE_CANTOPEN_NOTEMPDIR (SQLITE_CANTOPEN | (1<<8)) -#define SQLITE_CANTOPEN_ISDIR (SQLITE_CANTOPEN | (2<<8)) -#define SQLITE_CANTOPEN_FULLPATH (SQLITE_CANTOPEN | (3<<8)) -#define SQLITE_CANTOPEN_CONVPATH (SQLITE_CANTOPEN | (4<<8)) -#define SQLITE_CORRUPT_VTAB (SQLITE_CORRUPT | (1<<8)) -#define SQLITE_READONLY_RECOVERY (SQLITE_READONLY | (1<<8)) -#define SQLITE_READONLY_CANTLOCK (SQLITE_READONLY | (2<<8)) -#define SQLITE_READONLY_ROLLBACK (SQLITE_READONLY | (3<<8)) -#define SQLITE_READONLY_DBMOVED (SQLITE_READONLY | (4<<8)) -#define SQLITE_ABORT_ROLLBACK (SQLITE_ABORT | (2<<8)) -#define SQLITE_CONSTRAINT_CHECK (SQLITE_CONSTRAINT | (1<<8)) -#define SQLITE_CONSTRAINT_COMMITHOOK (SQLITE_CONSTRAINT | (2<<8)) -#define SQLITE_CONSTRAINT_FOREIGNKEY (SQLITE_CONSTRAINT | (3<<8)) -#define SQLITE_CONSTRAINT_FUNCTION (SQLITE_CONSTRAINT | (4<<8)) -#define SQLITE_CONSTRAINT_NOTNULL (SQLITE_CONSTRAINT | (5<<8)) -#define SQLITE_CONSTRAINT_PRIMARYKEY (SQLITE_CONSTRAINT | (6<<8)) -#define SQLITE_CONSTRAINT_TRIGGER (SQLITE_CONSTRAINT | (7<<8)) -#define SQLITE_CONSTRAINT_UNIQUE (SQLITE_CONSTRAINT | (8<<8)) -#define SQLITE_CONSTRAINT_VTAB (SQLITE_CONSTRAINT | (9<<8)) -#define SQLITE_CONSTRAINT_ROWID (SQLITE_CONSTRAINT |(10<<8)) -#define SQLITE_NOTICE_RECOVER_WAL (SQLITE_NOTICE | (1<<8)) -#define SQLITE_NOTICE_RECOVER_ROLLBACK (SQLITE_NOTICE | (2<<8)) -#define SQLITE_WARNING_AUTOINDEX (SQLITE_WARNING | (1<<8)) - -/* -** CAPI3REF: Flags For File Open Operations -** -** These bit values are intended for use in the -** 3rd parameter to the [sqlite3_open_v2()] interface and -** in the 4th parameter to the [sqlite3_vfs.xOpen] method. -*/ -#define SQLITE_OPEN_READONLY 0x00000001 /* Ok for sqlite3_open_v2() */ -#define SQLITE_OPEN_READWRITE 0x00000002 /* Ok for sqlite3_open_v2() */ -#define SQLITE_OPEN_CREATE 0x00000004 /* Ok for sqlite3_open_v2() */ -#define SQLITE_OPEN_DELETEONCLOSE 0x00000008 /* VFS only */ -#define SQLITE_OPEN_EXCLUSIVE 0x00000010 /* VFS only */ -#define SQLITE_OPEN_AUTOPROXY 0x00000020 /* VFS only */ -#define SQLITE_OPEN_URI 0x00000040 /* Ok for sqlite3_open_v2() */ -#define SQLITE_OPEN_MEMORY 0x00000080 /* Ok for sqlite3_open_v2() */ -#define SQLITE_OPEN_MAIN_DB 0x00000100 /* VFS only */ -#define SQLITE_OPEN_TEMP_DB 0x00000200 /* VFS only */ -#define SQLITE_OPEN_TRANSIENT_DB 0x00000400 /* VFS only */ -#define SQLITE_OPEN_MAIN_JOURNAL 0x00000800 /* VFS only */ -#define SQLITE_OPEN_TEMP_JOURNAL 0x00001000 /* VFS only */ -#define SQLITE_OPEN_SUBJOURNAL 0x00002000 /* VFS only */ -#define SQLITE_OPEN_MASTER_JOURNAL 0x00004000 /* VFS only */ -#define SQLITE_OPEN_NOMUTEX 0x00008000 /* Ok for sqlite3_open_v2() */ -#define SQLITE_OPEN_FULLMUTEX 0x00010000 /* Ok for sqlite3_open_v2() */ -#define SQLITE_OPEN_SHAREDCACHE 0x00020000 /* Ok for sqlite3_open_v2() */ -#define SQLITE_OPEN_PRIVATECACHE 0x00040000 /* Ok for sqlite3_open_v2() */ -#define SQLITE_OPEN_WAL 0x00080000 /* VFS only */ - -/* Reserved: 0x00F00000 */ - -/* -** CAPI3REF: Device Characteristics -** -** The xDeviceCharacteristics method of the [sqlite3_io_methods] -** object returns an integer which is a vector of these -** bit values expressing I/O characteristics of the mass storage -** device that holds the file that the [sqlite3_io_methods] -** refers to. -** -** The SQLITE_IOCAP_ATOMIC property means that all writes of -** any size are atomic. The SQLITE_IOCAP_ATOMICnnn values -** mean that writes of blocks that are nnn bytes in size and -** are aligned to an address which is an integer multiple of -** nnn are atomic. The SQLITE_IOCAP_SAFE_APPEND value means -** that when data is appended to a file, the data is appended -** first then the size of the file is extended, never the other -** way around. The SQLITE_IOCAP_SEQUENTIAL property means that -** information is written to disk in the same order as calls -** to xWrite(). The SQLITE_IOCAP_POWERSAFE_OVERWRITE property means that -** after reboot following a crash or power loss, the only bytes in a -** file that were written at the application level might have changed -** and that adjacent bytes, even bytes within the same sector are -** guaranteed to be unchanged. The SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN -** flag indicate that a file cannot be deleted when open. The -** SQLITE_IOCAP_IMMUTABLE flag indicates that the file is on -** read-only media and cannot be changed even by processes with -** elevated privileges. -*/ -#define SQLITE_IOCAP_ATOMIC 0x00000001 -#define SQLITE_IOCAP_ATOMIC512 0x00000002 -#define SQLITE_IOCAP_ATOMIC1K 0x00000004 -#define SQLITE_IOCAP_ATOMIC2K 0x00000008 -#define SQLITE_IOCAP_ATOMIC4K 0x00000010 -#define SQLITE_IOCAP_ATOMIC8K 0x00000020 -#define SQLITE_IOCAP_ATOMIC16K 0x00000040 -#define SQLITE_IOCAP_ATOMIC32K 0x00000080 -#define SQLITE_IOCAP_ATOMIC64K 0x00000100 -#define SQLITE_IOCAP_SAFE_APPEND 0x00000200 -#define SQLITE_IOCAP_SEQUENTIAL 0x00000400 -#define SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN 0x00000800 -#define SQLITE_IOCAP_POWERSAFE_OVERWRITE 0x00001000 -#define SQLITE_IOCAP_IMMUTABLE 0x00002000 - -/* -** CAPI3REF: File Locking Levels -** -** SQLite uses one of these integer values as the second -** argument to calls it makes to the xLock() and xUnlock() methods -** of an [sqlite3_io_methods] object. -*/ -#define SQLITE_LOCK_NONE 0 -#define SQLITE_LOCK_SHARED 1 -#define SQLITE_LOCK_RESERVED 2 -#define SQLITE_LOCK_PENDING 3 -#define SQLITE_LOCK_EXCLUSIVE 4 - -/* -** CAPI3REF: Synchronization Type Flags -** -** When SQLite invokes the xSync() method of an -** [sqlite3_io_methods] object it uses a combination of -** these integer values as the second argument. -** -** When the SQLITE_SYNC_DATAONLY flag is used, it means that the -** sync operation only needs to flush data to mass storage. Inode -** information need not be flushed. If the lower four bits of the flag -** equal SQLITE_SYNC_NORMAL, that means to use normal fsync() semantics. -** If the lower four bits equal SQLITE_SYNC_FULL, that means -** to use Mac OS X style fullsync instead of fsync(). -** -** Do not confuse the SQLITE_SYNC_NORMAL and SQLITE_SYNC_FULL flags -** with the [PRAGMA synchronous]=NORMAL and [PRAGMA synchronous]=FULL -** settings. The [synchronous pragma] determines when calls to the -** xSync VFS method occur and applies uniformly across all platforms. -** The SQLITE_SYNC_NORMAL and SQLITE_SYNC_FULL flags determine how -** energetic or rigorous or forceful the sync operations are and -** only make a difference on Mac OSX for the default SQLite code. -** (Third-party VFS implementations might also make the distinction -** between SQLITE_SYNC_NORMAL and SQLITE_SYNC_FULL, but among the -** operating systems natively supported by SQLite, only Mac OSX -** cares about the difference.) -*/ -#define SQLITE_SYNC_NORMAL 0x00002 -#define SQLITE_SYNC_FULL 0x00003 -#define SQLITE_SYNC_DATAONLY 0x00010 - -/* -** CAPI3REF: OS Interface Open File Handle -** -** An [sqlite3_file] object represents an open file in the -** [sqlite3_vfs | OS interface layer]. Individual OS interface -** implementations will -** want to subclass this object by appending additional fields -** for their own use. The pMethods entry is a pointer to an -** [sqlite3_io_methods] object that defines methods for performing -** I/O operations on the open file. -*/ -typedef struct sqlite3_file sqlite3_file; -struct sqlite3_file { - const struct sqlite3_io_methods *pMethods; /* Methods for an open file */ -}; - -/* -** CAPI3REF: OS Interface File Virtual Methods Object -** -** Every file opened by the [sqlite3_vfs.xOpen] method populates an -** [sqlite3_file] object (or, more commonly, a subclass of the -** [sqlite3_file] object) with a pointer to an instance of this object. -** This object defines the methods used to perform various operations -** against the open file represented by the [sqlite3_file] object. -** -** If the [sqlite3_vfs.xOpen] method sets the sqlite3_file.pMethods element -** to a non-NULL pointer, then the sqlite3_io_methods.xClose method -** may be invoked even if the [sqlite3_vfs.xOpen] reported that it failed. The -** only way to prevent a call to xClose following a failed [sqlite3_vfs.xOpen] -** is for the [sqlite3_vfs.xOpen] to set the sqlite3_file.pMethods element -** to NULL. -** -** The flags argument to xSync may be one of [SQLITE_SYNC_NORMAL] or -** [SQLITE_SYNC_FULL]. The first choice is the normal fsync(). -** The second choice is a Mac OS X style fullsync. The [SQLITE_SYNC_DATAONLY] -** flag may be ORed in to indicate that only the data of the file -** and not its inode needs to be synced. -** -** The integer values to xLock() and xUnlock() are one of -**
      -**
    • [SQLITE_LOCK_NONE], -**
    • [SQLITE_LOCK_SHARED], -**
    • [SQLITE_LOCK_RESERVED], -**
    • [SQLITE_LOCK_PENDING], or -**
    • [SQLITE_LOCK_EXCLUSIVE]. -**
    -** xLock() increases the lock. xUnlock() decreases the lock. -** The xCheckReservedLock() method checks whether any database connection, -** either in this process or in some other process, is holding a RESERVED, -** PENDING, or EXCLUSIVE lock on the file. It returns true -** if such a lock exists and false otherwise. -** -** The xFileControl() method is a generic interface that allows custom -** VFS implementations to directly control an open file using the -** [sqlite3_file_control()] interface. The second "op" argument is an -** integer opcode. The third argument is a generic pointer intended to -** point to a structure that may contain arguments or space in which to -** write return values. Potential uses for xFileControl() might be -** functions to enable blocking locks with timeouts, to change the -** locking strategy (for example to use dot-file locks), to inquire -** about the status of a lock, or to break stale locks. The SQLite -** core reserves all opcodes less than 100 for its own use. -** A [SQLITE_FCNTL_LOCKSTATE | list of opcodes] less than 100 is available. -** Applications that define a custom xFileControl method should use opcodes -** greater than 100 to avoid conflicts. VFS implementations should -** return [SQLITE_NOTFOUND] for file control opcodes that they do not -** recognize. -** -** The xSectorSize() method returns the sector size of the -** device that underlies the file. The sector size is the -** minimum write that can be performed without disturbing -** other bytes in the file. The xDeviceCharacteristics() -** method returns a bit vector describing behaviors of the -** underlying device: -** -**
      -**
    • [SQLITE_IOCAP_ATOMIC] -**
    • [SQLITE_IOCAP_ATOMIC512] -**
    • [SQLITE_IOCAP_ATOMIC1K] -**
    • [SQLITE_IOCAP_ATOMIC2K] -**
    • [SQLITE_IOCAP_ATOMIC4K] -**
    • [SQLITE_IOCAP_ATOMIC8K] -**
    • [SQLITE_IOCAP_ATOMIC16K] -**
    • [SQLITE_IOCAP_ATOMIC32K] -**
    • [SQLITE_IOCAP_ATOMIC64K] -**
    • [SQLITE_IOCAP_SAFE_APPEND] -**
    • [SQLITE_IOCAP_SEQUENTIAL] -**
    -** -** The SQLITE_IOCAP_ATOMIC property means that all writes of -** any size are atomic. The SQLITE_IOCAP_ATOMICnnn values -** mean that writes of blocks that are nnn bytes in size and -** are aligned to an address which is an integer multiple of -** nnn are atomic. The SQLITE_IOCAP_SAFE_APPEND value means -** that when data is appended to a file, the data is appended -** first then the size of the file is extended, never the other -** way around. The SQLITE_IOCAP_SEQUENTIAL property means that -** information is written to disk in the same order as calls -** to xWrite(). -** -** If xRead() returns SQLITE_IOERR_SHORT_READ it must also fill -** in the unread portions of the buffer with zeros. A VFS that -** fails to zero-fill short reads might seem to work. However, -** failure to zero-fill short reads will eventually lead to -** database corruption. -*/ -typedef struct sqlite3_io_methods sqlite3_io_methods; -struct sqlite3_io_methods { - int iVersion; - int (*xClose)(sqlite3_file*); - int (*xRead)(sqlite3_file*, void*, int iAmt, sqlite3_int64 iOfst); - int (*xWrite)(sqlite3_file*, const void*, int iAmt, sqlite3_int64 iOfst); - int (*xTruncate)(sqlite3_file*, sqlite3_int64 size); - int (*xSync)(sqlite3_file*, int flags); - int (*xFileSize)(sqlite3_file*, sqlite3_int64 *pSize); - int (*xLock)(sqlite3_file*, int); - int (*xUnlock)(sqlite3_file*, int); - int (*xCheckReservedLock)(sqlite3_file*, int *pResOut); - int (*xFileControl)(sqlite3_file*, int op, void *pArg); - int (*xSectorSize)(sqlite3_file*); - int (*xDeviceCharacteristics)(sqlite3_file*); - /* Methods above are valid for version 1 */ - int (*xShmMap)(sqlite3_file*, int iPg, int pgsz, int, void volatile**); - int (*xShmLock)(sqlite3_file*, int offset, int n, int flags); - void (*xShmBarrier)(sqlite3_file*); - int (*xShmUnmap)(sqlite3_file*, int deleteFlag); - /* Methods above are valid for version 2 */ - int (*xFetch)(sqlite3_file*, sqlite3_int64 iOfst, int iAmt, void **pp); - int (*xUnfetch)(sqlite3_file*, sqlite3_int64 iOfst, void *p); - /* Methods above are valid for version 3 */ - /* Additional methods may be added in future releases */ -}; - -/* -** CAPI3REF: Standard File Control Opcodes -** -** These integer constants are opcodes for the xFileControl method -** of the [sqlite3_io_methods] object and for the [sqlite3_file_control()] -** interface. -** -** The [SQLITE_FCNTL_LOCKSTATE] opcode is used for debugging. This -** opcode causes the xFileControl method to write the current state of -** the lock (one of [SQLITE_LOCK_NONE], [SQLITE_LOCK_SHARED], -** [SQLITE_LOCK_RESERVED], [SQLITE_LOCK_PENDING], or [SQLITE_LOCK_EXCLUSIVE]) -** into an integer that the pArg argument points to. This capability -** is used during testing and only needs to be supported when SQLITE_TEST -** is defined. -**
      -**
    • [[SQLITE_FCNTL_SIZE_HINT]] -** The [SQLITE_FCNTL_SIZE_HINT] opcode is used by SQLite to give the VFS -** layer a hint of how large the database file will grow to be during the -** current transaction. This hint is not guaranteed to be accurate but it -** is often close. The underlying VFS might choose to preallocate database -** file space based on this hint in order to help writes to the database -** file run faster. -** -**
    • [[SQLITE_FCNTL_CHUNK_SIZE]] -** The [SQLITE_FCNTL_CHUNK_SIZE] opcode is used to request that the VFS -** extends and truncates the database file in chunks of a size specified -** by the user. The fourth argument to [sqlite3_file_control()] should -** point to an integer (type int) containing the new chunk-size to use -** for the nominated database. Allocating database file space in large -** chunks (say 1MB at a time), may reduce file-system fragmentation and -** improve performance on some systems. -** -**
    • [[SQLITE_FCNTL_FILE_POINTER]] -** The [SQLITE_FCNTL_FILE_POINTER] opcode is used to obtain a pointer -** to the [sqlite3_file] object associated with a particular database -** connection. See the [sqlite3_file_control()] documentation for -** additional information. -** -**
    • [[SQLITE_FCNTL_SYNC_OMITTED]] -** No longer in use. -** -**
    • [[SQLITE_FCNTL_SYNC]] -** The [SQLITE_FCNTL_SYNC] opcode is generated internally by SQLite and -** sent to the VFS immediately before the xSync method is invoked on a -** database file descriptor. Or, if the xSync method is not invoked -** because the user has configured SQLite with -** [PRAGMA synchronous | PRAGMA synchronous=OFF] it is invoked in place -** of the xSync method. In most cases, the pointer argument passed with -** this file-control is NULL. However, if the database file is being synced -** as part of a multi-database commit, the argument points to a nul-terminated -** string containing the transactions master-journal file name. VFSes that -** do not need this signal should silently ignore this opcode. Applications -** should not call [sqlite3_file_control()] with this opcode as doing so may -** disrupt the operation of the specialized VFSes that do require it. -** -**
    • [[SQLITE_FCNTL_COMMIT_PHASETWO]] -** The [SQLITE_FCNTL_COMMIT_PHASETWO] opcode is generated internally by SQLite -** and sent to the VFS after a transaction has been committed immediately -** but before the database is unlocked. VFSes that do not need this signal -** should silently ignore this opcode. Applications should not call -** [sqlite3_file_control()] with this opcode as doing so may disrupt the -** operation of the specialized VFSes that do require it. -** -**
    • [[SQLITE_FCNTL_WIN32_AV_RETRY]] -** ^The [SQLITE_FCNTL_WIN32_AV_RETRY] opcode is used to configure automatic -** retry counts and intervals for certain disk I/O operations for the -** windows [VFS] in order to provide robustness in the presence of -** anti-virus programs. By default, the windows VFS will retry file read, -** file write, and file delete operations up to 10 times, with a delay -** of 25 milliseconds before the first retry and with the delay increasing -** by an additional 25 milliseconds with each subsequent retry. This -** opcode allows these two values (10 retries and 25 milliseconds of delay) -** to be adjusted. The values are changed for all database connections -** within the same process. The argument is a pointer to an array of two -** integers where the first integer i the new retry count and the second -** integer is the delay. If either integer is negative, then the setting -** is not changed but instead the prior value of that setting is written -** into the array entry, allowing the current retry settings to be -** interrogated. The zDbName parameter is ignored. -** -**
    • [[SQLITE_FCNTL_PERSIST_WAL]] -** ^The [SQLITE_FCNTL_PERSIST_WAL] opcode is used to set or query the -** persistent [WAL | Write Ahead Log] setting. By default, the auxiliary -** write ahead log and shared memory files used for transaction control -** are automatically deleted when the latest connection to the database -** closes. Setting persistent WAL mode causes those files to persist after -** close. Persisting the files is useful when other processes that do not -** have write permission on the directory containing the database file want -** to read the database file, as the WAL and shared memory files must exist -** in order for the database to be readable. The fourth parameter to -** [sqlite3_file_control()] for this opcode should be a pointer to an integer. -** That integer is 0 to disable persistent WAL mode or 1 to enable persistent -** WAL mode. If the integer is -1, then it is overwritten with the current -** WAL persistence setting. -** -**
    • [[SQLITE_FCNTL_POWERSAFE_OVERWRITE]] -** ^The [SQLITE_FCNTL_POWERSAFE_OVERWRITE] opcode is used to set or query the -** persistent "powersafe-overwrite" or "PSOW" setting. The PSOW setting -** determines the [SQLITE_IOCAP_POWERSAFE_OVERWRITE] bit of the -** xDeviceCharacteristics methods. The fourth parameter to -** [sqlite3_file_control()] for this opcode should be a pointer to an integer. -** That integer is 0 to disable zero-damage mode or 1 to enable zero-damage -** mode. If the integer is -1, then it is overwritten with the current -** zero-damage mode setting. -** -**
    • [[SQLITE_FCNTL_OVERWRITE]] -** ^The [SQLITE_FCNTL_OVERWRITE] opcode is invoked by SQLite after opening -** a write transaction to indicate that, unless it is rolled back for some -** reason, the entire database file will be overwritten by the current -** transaction. This is used by VACUUM operations. -** -**
    • [[SQLITE_FCNTL_VFSNAME]] -** ^The [SQLITE_FCNTL_VFSNAME] opcode can be used to obtain the names of -** all [VFSes] in the VFS stack. The names are of all VFS shims and the -** final bottom-level VFS are written into memory obtained from -** [sqlite3_malloc()] and the result is stored in the char* variable -** that the fourth parameter of [sqlite3_file_control()] points to. -** The caller is responsible for freeing the memory when done. As with -** all file-control actions, there is no guarantee that this will actually -** do anything. Callers should initialize the char* variable to a NULL -** pointer in case this file-control is not implemented. This file-control -** is intended for diagnostic use only. -** -**
    • [[SQLITE_FCNTL_PRAGMA]] -** ^Whenever a [PRAGMA] statement is parsed, an [SQLITE_FCNTL_PRAGMA] -** file control is sent to the open [sqlite3_file] object corresponding -** to the database file to which the pragma statement refers. ^The argument -** to the [SQLITE_FCNTL_PRAGMA] file control is an array of -** pointers to strings (char**) in which the second element of the array -** is the name of the pragma and the third element is the argument to the -** pragma or NULL if the pragma has no argument. ^The handler for an -** [SQLITE_FCNTL_PRAGMA] file control can optionally make the first element -** of the char** argument point to a string obtained from [sqlite3_mprintf()] -** or the equivalent and that string will become the result of the pragma or -** the error message if the pragma fails. ^If the -** [SQLITE_FCNTL_PRAGMA] file control returns [SQLITE_NOTFOUND], then normal -** [PRAGMA] processing continues. ^If the [SQLITE_FCNTL_PRAGMA] -** file control returns [SQLITE_OK], then the parser assumes that the -** VFS has handled the PRAGMA itself and the parser generates a no-op -** prepared statement. ^If the [SQLITE_FCNTL_PRAGMA] file control returns -** any result code other than [SQLITE_OK] or [SQLITE_NOTFOUND], that means -** that the VFS encountered an error while handling the [PRAGMA] and the -** compilation of the PRAGMA fails with an error. ^The [SQLITE_FCNTL_PRAGMA] -** file control occurs at the beginning of pragma statement analysis and so -** it is able to override built-in [PRAGMA] statements. -** -**
    • [[SQLITE_FCNTL_BUSYHANDLER]] -** ^The [SQLITE_FCNTL_BUSYHANDLER] -** file-control may be invoked by SQLite on the database file handle -** shortly after it is opened in order to provide a custom VFS with access -** to the connections busy-handler callback. The argument is of type (void **) -** - an array of two (void *) values. The first (void *) actually points -** to a function of type (int (*)(void *)). In order to invoke the connections -** busy-handler, this function should be invoked with the second (void *) in -** the array as the only argument. If it returns non-zero, then the operation -** should be retried. If it returns zero, the custom VFS should abandon the -** current operation. -** -**
    • [[SQLITE_FCNTL_TEMPFILENAME]] -** ^Application can invoke the [SQLITE_FCNTL_TEMPFILENAME] file-control -** to have SQLite generate a -** temporary filename using the same algorithm that is followed to generate -** temporary filenames for TEMP tables and other internal uses. The -** argument should be a char** which will be filled with the filename -** written into memory obtained from [sqlite3_malloc()]. The caller should -** invoke [sqlite3_free()] on the result to avoid a memory leak. -** -**
    • [[SQLITE_FCNTL_MMAP_SIZE]] -** The [SQLITE_FCNTL_MMAP_SIZE] file control is used to query or set the -** maximum number of bytes that will be used for memory-mapped I/O. -** The argument is a pointer to a value of type sqlite3_int64 that -** is an advisory maximum number of bytes in the file to memory map. The -** pointer is overwritten with the old value. The limit is not changed if -** the value originally pointed to is negative, and so the current limit -** can be queried by passing in a pointer to a negative number. This -** file-control is used internally to implement [PRAGMA mmap_size]. -** -**
    • [[SQLITE_FCNTL_TRACE]] -** The [SQLITE_FCNTL_TRACE] file control provides advisory information -** to the VFS about what the higher layers of the SQLite stack are doing. -** This file control is used by some VFS activity tracing [shims]. -** The argument is a zero-terminated string. Higher layers in the -** SQLite stack may generate instances of this file control if -** the [SQLITE_USE_FCNTL_TRACE] compile-time option is enabled. -** -**
    • [[SQLITE_FCNTL_HAS_MOVED]] -** The [SQLITE_FCNTL_HAS_MOVED] file control interprets its argument as a -** pointer to an integer and it writes a boolean into that integer depending -** on whether or not the file has been renamed, moved, or deleted since it -** was first opened. -** -**
    • [[SQLITE_FCNTL_WIN32_SET_HANDLE]] -** The [SQLITE_FCNTL_WIN32_SET_HANDLE] opcode is used for debugging. This -** opcode causes the xFileControl method to swap the file handle with the one -** pointed to by the pArg argument. This capability is used during testing -** and only needs to be supported when SQLITE_TEST is defined. -** -**
    -*/ -#define SQLITE_FCNTL_LOCKSTATE 1 -#define SQLITE_GET_LOCKPROXYFILE 2 -#define SQLITE_SET_LOCKPROXYFILE 3 -#define SQLITE_LAST_ERRNO 4 -#define SQLITE_FCNTL_SIZE_HINT 5 -#define SQLITE_FCNTL_CHUNK_SIZE 6 -#define SQLITE_FCNTL_FILE_POINTER 7 -#define SQLITE_FCNTL_SYNC_OMITTED 8 -#define SQLITE_FCNTL_WIN32_AV_RETRY 9 -#define SQLITE_FCNTL_PERSIST_WAL 10 -#define SQLITE_FCNTL_OVERWRITE 11 -#define SQLITE_FCNTL_VFSNAME 12 -#define SQLITE_FCNTL_POWERSAFE_OVERWRITE 13 -#define SQLITE_FCNTL_PRAGMA 14 -#define SQLITE_FCNTL_BUSYHANDLER 15 -#define SQLITE_FCNTL_TEMPFILENAME 16 -#define SQLITE_FCNTL_MMAP_SIZE 18 -#define SQLITE_FCNTL_TRACE 19 -#define SQLITE_FCNTL_HAS_MOVED 20 -#define SQLITE_FCNTL_SYNC 21 -#define SQLITE_FCNTL_COMMIT_PHASETWO 22 -#define SQLITE_FCNTL_WIN32_SET_HANDLE 23 - -/* -** CAPI3REF: Mutex Handle -** -** The mutex module within SQLite defines [sqlite3_mutex] to be an -** abstract type for a mutex object. The SQLite core never looks -** at the internal representation of an [sqlite3_mutex]. It only -** deals with pointers to the [sqlite3_mutex] object. -** -** Mutexes are created using [sqlite3_mutex_alloc()]. -*/ -typedef struct sqlite3_mutex sqlite3_mutex; - -/* -** CAPI3REF: OS Interface Object -** -** An instance of the sqlite3_vfs object defines the interface between -** the SQLite core and the underlying operating system. The "vfs" -** in the name of the object stands for "virtual file system". See -** the [VFS | VFS documentation] for further information. -** -** The value of the iVersion field is initially 1 but may be larger in -** future versions of SQLite. Additional fields may be appended to this -** object when the iVersion value is increased. Note that the structure -** of the sqlite3_vfs object changes in the transaction between -** SQLite version 3.5.9 and 3.6.0 and yet the iVersion field was not -** modified. -** -** The szOsFile field is the size of the subclassed [sqlite3_file] -** structure used by this VFS. mxPathname is the maximum length of -** a pathname in this VFS. -** -** Registered sqlite3_vfs objects are kept on a linked list formed by -** the pNext pointer. The [sqlite3_vfs_register()] -** and [sqlite3_vfs_unregister()] interfaces manage this list -** in a thread-safe way. The [sqlite3_vfs_find()] interface -** searches the list. Neither the application code nor the VFS -** implementation should use the pNext pointer. -** -** The pNext field is the only field in the sqlite3_vfs -** structure that SQLite will ever modify. SQLite will only access -** or modify this field while holding a particular static mutex. -** The application should never modify anything within the sqlite3_vfs -** object once the object has been registered. -** -** The zName field holds the name of the VFS module. The name must -** be unique across all VFS modules. -** -** [[sqlite3_vfs.xOpen]] -** ^SQLite guarantees that the zFilename parameter to xOpen -** is either a NULL pointer or string obtained -** from xFullPathname() with an optional suffix added. -** ^If a suffix is added to the zFilename parameter, it will -** consist of a single "-" character followed by no more than -** 11 alphanumeric and/or "-" characters. -** ^SQLite further guarantees that -** the string will be valid and unchanged until xClose() is -** called. Because of the previous sentence, -** the [sqlite3_file] can safely store a pointer to the -** filename if it needs to remember the filename for some reason. -** If the zFilename parameter to xOpen is a NULL pointer then xOpen -** must invent its own temporary name for the file. ^Whenever the -** xFilename parameter is NULL it will also be the case that the -** flags parameter will include [SQLITE_OPEN_DELETEONCLOSE]. -** -** The flags argument to xOpen() includes all bits set in -** the flags argument to [sqlite3_open_v2()]. Or if [sqlite3_open()] -** or [sqlite3_open16()] is used, then flags includes at least -** [SQLITE_OPEN_READWRITE] | [SQLITE_OPEN_CREATE]. -** If xOpen() opens a file read-only then it sets *pOutFlags to -** include [SQLITE_OPEN_READONLY]. Other bits in *pOutFlags may be set. -** -** ^(SQLite will also add one of the following flags to the xOpen() -** call, depending on the object being opened: -** -**
      -**
    • [SQLITE_OPEN_MAIN_DB] -**
    • [SQLITE_OPEN_MAIN_JOURNAL] -**
    • [SQLITE_OPEN_TEMP_DB] -**
    • [SQLITE_OPEN_TEMP_JOURNAL] -**
    • [SQLITE_OPEN_TRANSIENT_DB] -**
    • [SQLITE_OPEN_SUBJOURNAL] -**
    • [SQLITE_OPEN_MASTER_JOURNAL] -**
    • [SQLITE_OPEN_WAL] -**
    )^ -** -** The file I/O implementation can use the object type flags to -** change the way it deals with files. For example, an application -** that does not care about crash recovery or rollback might make -** the open of a journal file a no-op. Writes to this journal would -** also be no-ops, and any attempt to read the journal would return -** SQLITE_IOERR. Or the implementation might recognize that a database -** file will be doing page-aligned sector reads and writes in a random -** order and set up its I/O subsystem accordingly. -** -** SQLite might also add one of the following flags to the xOpen method: -** -**
      -**
    • [SQLITE_OPEN_DELETEONCLOSE] -**
    • [SQLITE_OPEN_EXCLUSIVE] -**
    -** -** The [SQLITE_OPEN_DELETEONCLOSE] flag means the file should be -** deleted when it is closed. ^The [SQLITE_OPEN_DELETEONCLOSE] -** will be set for TEMP databases and their journals, transient -** databases, and subjournals. -** -** ^The [SQLITE_OPEN_EXCLUSIVE] flag is always used in conjunction -** with the [SQLITE_OPEN_CREATE] flag, which are both directly -** analogous to the O_EXCL and O_CREAT flags of the POSIX open() -** API. The SQLITE_OPEN_EXCLUSIVE flag, when paired with the -** SQLITE_OPEN_CREATE, is used to indicate that file should always -** be created, and that it is an error if it already exists. -** It is not used to indicate the file should be opened -** for exclusive access. -** -** ^At least szOsFile bytes of memory are allocated by SQLite -** to hold the [sqlite3_file] structure passed as the third -** argument to xOpen. The xOpen method does not have to -** allocate the structure; it should just fill it in. Note that -** the xOpen method must set the sqlite3_file.pMethods to either -** a valid [sqlite3_io_methods] object or to NULL. xOpen must do -** this even if the open fails. SQLite expects that the sqlite3_file.pMethods -** element will be valid after xOpen returns regardless of the success -** or failure of the xOpen call. -** -** [[sqlite3_vfs.xAccess]] -** ^The flags argument to xAccess() may be [SQLITE_ACCESS_EXISTS] -** to test for the existence of a file, or [SQLITE_ACCESS_READWRITE] to -** test whether a file is readable and writable, or [SQLITE_ACCESS_READ] -** to test whether a file is at least readable. The file can be a -** directory. -** -** ^SQLite will always allocate at least mxPathname+1 bytes for the -** output buffer xFullPathname. The exact size of the output buffer -** is also passed as a parameter to both methods. If the output buffer -** is not large enough, [SQLITE_CANTOPEN] should be returned. Since this is -** handled as a fatal error by SQLite, vfs implementations should endeavor -** to prevent this by setting mxPathname to a sufficiently large value. -** -** The xRandomness(), xSleep(), xCurrentTime(), and xCurrentTimeInt64() -** interfaces are not strictly a part of the filesystem, but they are -** included in the VFS structure for completeness. -** The xRandomness() function attempts to return nBytes bytes -** of good-quality randomness into zOut. The return value is -** the actual number of bytes of randomness obtained. -** The xSleep() method causes the calling thread to sleep for at -** least the number of microseconds given. ^The xCurrentTime() -** method returns a Julian Day Number for the current date and time as -** a floating point value. -** ^The xCurrentTimeInt64() method returns, as an integer, the Julian -** Day Number multiplied by 86400000 (the number of milliseconds in -** a 24-hour day). -** ^SQLite will use the xCurrentTimeInt64() method to get the current -** date and time if that method is available (if iVersion is 2 or -** greater and the function pointer is not NULL) and will fall back -** to xCurrentTime() if xCurrentTimeInt64() is unavailable. -** -** ^The xSetSystemCall(), xGetSystemCall(), and xNestSystemCall() interfaces -** are not used by the SQLite core. These optional interfaces are provided -** by some VFSes to facilitate testing of the VFS code. By overriding -** system calls with functions under its control, a test program can -** simulate faults and error conditions that would otherwise be difficult -** or impossible to induce. The set of system calls that can be overridden -** varies from one VFS to another, and from one version of the same VFS to the -** next. Applications that use these interfaces must be prepared for any -** or all of these interfaces to be NULL or for their behavior to change -** from one release to the next. Applications must not attempt to access -** any of these methods if the iVersion of the VFS is less than 3. -*/ -typedef struct sqlite3_vfs sqlite3_vfs; -typedef void (*sqlite3_syscall_ptr)(void); -struct sqlite3_vfs { - int iVersion; /* Structure version number (currently 3) */ - int szOsFile; /* Size of subclassed sqlite3_file */ - int mxPathname; /* Maximum file pathname length */ - sqlite3_vfs *pNext; /* Next registered VFS */ - const char *zName; /* Name of this virtual file system */ - void *pAppData; /* Pointer to application-specific data */ - int (*xOpen)(sqlite3_vfs*, const char *zName, sqlite3_file*, - int flags, int *pOutFlags); - int (*xDelete)(sqlite3_vfs*, const char *zName, int syncDir); - int (*xAccess)(sqlite3_vfs*, const char *zName, int flags, int *pResOut); - int (*xFullPathname)(sqlite3_vfs*, const char *zName, int nOut, char *zOut); - void *(*xDlOpen)(sqlite3_vfs*, const char *zFilename); - void (*xDlError)(sqlite3_vfs*, int nByte, char *zErrMsg); - void (*(*xDlSym)(sqlite3_vfs*,void*, const char *zSymbol))(void); - void (*xDlClose)(sqlite3_vfs*, void*); - int (*xRandomness)(sqlite3_vfs*, int nByte, char *zOut); - int (*xSleep)(sqlite3_vfs*, int microseconds); - int (*xCurrentTime)(sqlite3_vfs*, double*); - int (*xGetLastError)(sqlite3_vfs*, int, char *); - /* - ** The methods above are in version 1 of the sqlite_vfs object - ** definition. Those that follow are added in version 2 or later - */ - int (*xCurrentTimeInt64)(sqlite3_vfs*, sqlite3_int64*); - /* - ** The methods above are in versions 1 and 2 of the sqlite_vfs object. - ** Those below are for version 3 and greater. - */ - int (*xSetSystemCall)(sqlite3_vfs*, const char *zName, sqlite3_syscall_ptr); - sqlite3_syscall_ptr (*xGetSystemCall)(sqlite3_vfs*, const char *zName); - const char *(*xNextSystemCall)(sqlite3_vfs*, const char *zName); - /* - ** The methods above are in versions 1 through 3 of the sqlite_vfs object. - ** New fields may be appended in figure versions. The iVersion - ** value will increment whenever this happens. - */ -}; - -/* -** CAPI3REF: Flags for the xAccess VFS method -** -** These integer constants can be used as the third parameter to -** the xAccess method of an [sqlite3_vfs] object. They determine -** what kind of permissions the xAccess method is looking for. -** With SQLITE_ACCESS_EXISTS, the xAccess method -** simply checks whether the file exists. -** With SQLITE_ACCESS_READWRITE, the xAccess method -** checks whether the named directory is both readable and writable -** (in other words, if files can be added, removed, and renamed within -** the directory). -** The SQLITE_ACCESS_READWRITE constant is currently used only by the -** [temp_store_directory pragma], though this could change in a future -** release of SQLite. -** With SQLITE_ACCESS_READ, the xAccess method -** checks whether the file is readable. The SQLITE_ACCESS_READ constant is -** currently unused, though it might be used in a future release of -** SQLite. -*/ -#define SQLITE_ACCESS_EXISTS 0 -#define SQLITE_ACCESS_READWRITE 1 /* Used by PRAGMA temp_store_directory */ -#define SQLITE_ACCESS_READ 2 /* Unused */ - -/* -** CAPI3REF: Flags for the xShmLock VFS method -** -** These integer constants define the various locking operations -** allowed by the xShmLock method of [sqlite3_io_methods]. The -** following are the only legal combinations of flags to the -** xShmLock method: -** -**
      -**
    • SQLITE_SHM_LOCK | SQLITE_SHM_SHARED -**
    • SQLITE_SHM_LOCK | SQLITE_SHM_EXCLUSIVE -**
    • SQLITE_SHM_UNLOCK | SQLITE_SHM_SHARED -**
    • SQLITE_SHM_UNLOCK | SQLITE_SHM_EXCLUSIVE -**
    -** -** When unlocking, the same SHARED or EXCLUSIVE flag must be supplied as -** was given no the corresponding lock. -** -** The xShmLock method can transition between unlocked and SHARED or -** between unlocked and EXCLUSIVE. It cannot transition between SHARED -** and EXCLUSIVE. -*/ -#define SQLITE_SHM_UNLOCK 1 -#define SQLITE_SHM_LOCK 2 -#define SQLITE_SHM_SHARED 4 -#define SQLITE_SHM_EXCLUSIVE 8 - -/* -** CAPI3REF: Maximum xShmLock index -** -** The xShmLock method on [sqlite3_io_methods] may use values -** between 0 and this upper bound as its "offset" argument. -** The SQLite core will never attempt to acquire or release a -** lock outside of this range -*/ -#define SQLITE_SHM_NLOCK 8 - - -/* -** CAPI3REF: Initialize The SQLite Library -** -** ^The sqlite3_initialize() routine initializes the -** SQLite library. ^The sqlite3_shutdown() routine -** deallocates any resources that were allocated by sqlite3_initialize(). -** These routines are designed to aid in process initialization and -** shutdown on embedded systems. Workstation applications using -** SQLite normally do not need to invoke either of these routines. -** -** A call to sqlite3_initialize() is an "effective" call if it is -** the first time sqlite3_initialize() is invoked during the lifetime of -** the process, or if it is the first time sqlite3_initialize() is invoked -** following a call to sqlite3_shutdown(). ^(Only an effective call -** of sqlite3_initialize() does any initialization. All other calls -** are harmless no-ops.)^ -** -** A call to sqlite3_shutdown() is an "effective" call if it is the first -** call to sqlite3_shutdown() since the last sqlite3_initialize(). ^(Only -** an effective call to sqlite3_shutdown() does any deinitialization. -** All other valid calls to sqlite3_shutdown() are harmless no-ops.)^ -** -** The sqlite3_initialize() interface is threadsafe, but sqlite3_shutdown() -** is not. The sqlite3_shutdown() interface must only be called from a -** single thread. All open [database connections] must be closed and all -** other SQLite resources must be deallocated prior to invoking -** sqlite3_shutdown(). -** -** Among other things, ^sqlite3_initialize() will invoke -** sqlite3_os_init(). Similarly, ^sqlite3_shutdown() -** will invoke sqlite3_os_end(). -** -** ^The sqlite3_initialize() routine returns [SQLITE_OK] on success. -** ^If for some reason, sqlite3_initialize() is unable to initialize -** the library (perhaps it is unable to allocate a needed resource such -** as a mutex) it returns an [error code] other than [SQLITE_OK]. -** -** ^The sqlite3_initialize() routine is called internally by many other -** SQLite interfaces so that an application usually does not need to -** invoke sqlite3_initialize() directly. For example, [sqlite3_open()] -** calls sqlite3_initialize() so the SQLite library will be automatically -** initialized when [sqlite3_open()] is called if it has not be initialized -** already. ^However, if SQLite is compiled with the [SQLITE_OMIT_AUTOINIT] -** compile-time option, then the automatic calls to sqlite3_initialize() -** are omitted and the application must call sqlite3_initialize() directly -** prior to using any other SQLite interface. For maximum portability, -** it is recommended that applications always invoke sqlite3_initialize() -** directly prior to using any other SQLite interface. Future releases -** of SQLite may require this. In other words, the behavior exhibited -** when SQLite is compiled with [SQLITE_OMIT_AUTOINIT] might become the -** default behavior in some future release of SQLite. -** -** The sqlite3_os_init() routine does operating-system specific -** initialization of the SQLite library. The sqlite3_os_end() -** routine undoes the effect of sqlite3_os_init(). Typical tasks -** performed by these routines include allocation or deallocation -** of static resources, initialization of global variables, -** setting up a default [sqlite3_vfs] module, or setting up -** a default configuration using [sqlite3_config()]. -** -** The application should never invoke either sqlite3_os_init() -** or sqlite3_os_end() directly. The application should only invoke -** sqlite3_initialize() and sqlite3_shutdown(). The sqlite3_os_init() -** interface is called automatically by sqlite3_initialize() and -** sqlite3_os_end() is called by sqlite3_shutdown(). Appropriate -** implementations for sqlite3_os_init() and sqlite3_os_end() -** are built into SQLite when it is compiled for Unix, Windows, or OS/2. -** When [custom builds | built for other platforms] -** (using the [SQLITE_OS_OTHER=1] compile-time -** option) the application must supply a suitable implementation for -** sqlite3_os_init() and sqlite3_os_end(). An application-supplied -** implementation of sqlite3_os_init() or sqlite3_os_end() -** must return [SQLITE_OK] on success and some other [error code] upon -** failure. -*/ -SQLITE_API int sqlite3_initialize(void); -SQLITE_API int sqlite3_shutdown(void); -SQLITE_API int sqlite3_os_init(void); -SQLITE_API int sqlite3_os_end(void); - -/* -** CAPI3REF: Configuring The SQLite Library -** -** The sqlite3_config() interface is used to make global configuration -** changes to SQLite in order to tune SQLite to the specific needs of -** the application. The default configuration is recommended for most -** applications and so this routine is usually not necessary. It is -** provided to support rare applications with unusual needs. -** -** The sqlite3_config() interface is not threadsafe. The application -** must insure that no other SQLite interfaces are invoked by other -** threads while sqlite3_config() is running. Furthermore, sqlite3_config() -** may only be invoked prior to library initialization using -** [sqlite3_initialize()] or after shutdown by [sqlite3_shutdown()]. -** ^If sqlite3_config() is called after [sqlite3_initialize()] and before -** [sqlite3_shutdown()] then it will return SQLITE_MISUSE. -** Note, however, that ^sqlite3_config() can be called as part of the -** implementation of an application-defined [sqlite3_os_init()]. -** -** The first argument to sqlite3_config() is an integer -** [configuration option] that determines -** what property of SQLite is to be configured. Subsequent arguments -** vary depending on the [configuration option] -** in the first argument. -** -** ^When a configuration option is set, sqlite3_config() returns [SQLITE_OK]. -** ^If the option is unknown or SQLite is unable to set the option -** then this routine returns a non-zero [error code]. -*/ -SQLITE_API int sqlite3_config(int, ...); - -/* -** CAPI3REF: Configure database connections -** -** The sqlite3_db_config() interface is used to make configuration -** changes to a [database connection]. The interface is similar to -** [sqlite3_config()] except that the changes apply to a single -** [database connection] (specified in the first argument). -** -** The second argument to sqlite3_db_config(D,V,...) is the -** [SQLITE_DBCONFIG_LOOKASIDE | configuration verb] - an integer code -** that indicates what aspect of the [database connection] is being configured. -** Subsequent arguments vary depending on the configuration verb. -** -** ^Calls to sqlite3_db_config() return SQLITE_OK if and only if -** the call is considered successful. -*/ -SQLITE_API int sqlite3_db_config(sqlite3*, int op, ...); - -/* -** CAPI3REF: Memory Allocation Routines -** -** An instance of this object defines the interface between SQLite -** and low-level memory allocation routines. -** -** This object is used in only one place in the SQLite interface. -** A pointer to an instance of this object is the argument to -** [sqlite3_config()] when the configuration option is -** [SQLITE_CONFIG_MALLOC] or [SQLITE_CONFIG_GETMALLOC]. -** By creating an instance of this object -** and passing it to [sqlite3_config]([SQLITE_CONFIG_MALLOC]) -** during configuration, an application can specify an alternative -** memory allocation subsystem for SQLite to use for all of its -** dynamic memory needs. -** -** Note that SQLite comes with several [built-in memory allocators] -** that are perfectly adequate for the overwhelming majority of applications -** and that this object is only useful to a tiny minority of applications -** with specialized memory allocation requirements. This object is -** also used during testing of SQLite in order to specify an alternative -** memory allocator that simulates memory out-of-memory conditions in -** order to verify that SQLite recovers gracefully from such -** conditions. -** -** The xMalloc, xRealloc, and xFree methods must work like the -** malloc(), realloc() and free() functions from the standard C library. -** ^SQLite guarantees that the second argument to -** xRealloc is always a value returned by a prior call to xRoundup. -** -** xSize should return the allocated size of a memory allocation -** previously obtained from xMalloc or xRealloc. The allocated size -** is always at least as big as the requested size but may be larger. -** -** The xRoundup method returns what would be the allocated size of -** a memory allocation given a particular requested size. Most memory -** allocators round up memory allocations at least to the next multiple -** of 8. Some allocators round up to a larger multiple or to a power of 2. -** Every memory allocation request coming in through [sqlite3_malloc()] -** or [sqlite3_realloc()] first calls xRoundup. If xRoundup returns 0, -** that causes the corresponding memory allocation to fail. -** -** The xInit method initializes the memory allocator. For example, -** it might allocate any require mutexes or initialize internal data -** structures. The xShutdown method is invoked (indirectly) by -** [sqlite3_shutdown()] and should deallocate any resources acquired -** by xInit. The pAppData pointer is used as the only parameter to -** xInit and xShutdown. -** -** SQLite holds the [SQLITE_MUTEX_STATIC_MASTER] mutex when it invokes -** the xInit method, so the xInit method need not be threadsafe. The -** xShutdown method is only called from [sqlite3_shutdown()] so it does -** not need to be threadsafe either. For all other methods, SQLite -** holds the [SQLITE_MUTEX_STATIC_MEM] mutex as long as the -** [SQLITE_CONFIG_MEMSTATUS] configuration option is turned on (which -** it is by default) and so the methods are automatically serialized. -** However, if [SQLITE_CONFIG_MEMSTATUS] is disabled, then the other -** methods must be threadsafe or else make their own arrangements for -** serialization. -** -** SQLite will never invoke xInit() more than once without an intervening -** call to xShutdown(). -*/ -typedef struct sqlite3_mem_methods sqlite3_mem_methods; -struct sqlite3_mem_methods { - void *(*xMalloc)(int); /* Memory allocation function */ - void (*xFree)(void*); /* Free a prior allocation */ - void *(*xRealloc)(void*,int); /* Resize an allocation */ - int (*xSize)(void*); /* Return the size of an allocation */ - int (*xRoundup)(int); /* Round up request size to allocation size */ - int (*xInit)(void*); /* Initialize the memory allocator */ - void (*xShutdown)(void*); /* Deinitialize the memory allocator */ - void *pAppData; /* Argument to xInit() and xShutdown() */ -}; - -/* -** CAPI3REF: Configuration Options -** KEYWORDS: {configuration option} -** -** These constants are the available integer configuration options that -** can be passed as the first argument to the [sqlite3_config()] interface. -** -** New configuration options may be added in future releases of SQLite. -** Existing configuration options might be discontinued. Applications -** should check the return code from [sqlite3_config()] to make sure that -** the call worked. The [sqlite3_config()] interface will return a -** non-zero [error code] if a discontinued or unsupported configuration option -** is invoked. -** -**
    -** [[SQLITE_CONFIG_SINGLETHREAD]]
    SQLITE_CONFIG_SINGLETHREAD
    -**
    There are no arguments to this option. ^This option sets the -** [threading mode] to Single-thread. In other words, it disables -** all mutexing and puts SQLite into a mode where it can only be used -** by a single thread. ^If SQLite is compiled with -** the [SQLITE_THREADSAFE | SQLITE_THREADSAFE=0] compile-time option then -** it is not possible to change the [threading mode] from its default -** value of Single-thread and so [sqlite3_config()] will return -** [SQLITE_ERROR] if called with the SQLITE_CONFIG_SINGLETHREAD -** configuration option.
    -** -** [[SQLITE_CONFIG_MULTITHREAD]]
    SQLITE_CONFIG_MULTITHREAD
    -**
    There are no arguments to this option. ^This option sets the -** [threading mode] to Multi-thread. In other words, it disables -** mutexing on [database connection] and [prepared statement] objects. -** The application is responsible for serializing access to -** [database connections] and [prepared statements]. But other mutexes -** are enabled so that SQLite will be safe to use in a multi-threaded -** environment as long as no two threads attempt to use the same -** [database connection] at the same time. ^If SQLite is compiled with -** the [SQLITE_THREADSAFE | SQLITE_THREADSAFE=0] compile-time option then -** it is not possible to set the Multi-thread [threading mode] and -** [sqlite3_config()] will return [SQLITE_ERROR] if called with the -** SQLITE_CONFIG_MULTITHREAD configuration option.
    -** -** [[SQLITE_CONFIG_SERIALIZED]]
    SQLITE_CONFIG_SERIALIZED
    -**
    There are no arguments to this option. ^This option sets the -** [threading mode] to Serialized. In other words, this option enables -** all mutexes including the recursive -** mutexes on [database connection] and [prepared statement] objects. -** In this mode (which is the default when SQLite is compiled with -** [SQLITE_THREADSAFE=1]) the SQLite library will itself serialize access -** to [database connections] and [prepared statements] so that the -** application is free to use the same [database connection] or the -** same [prepared statement] in different threads at the same time. -** ^If SQLite is compiled with -** the [SQLITE_THREADSAFE | SQLITE_THREADSAFE=0] compile-time option then -** it is not possible to set the Serialized [threading mode] and -** [sqlite3_config()] will return [SQLITE_ERROR] if called with the -** SQLITE_CONFIG_SERIALIZED configuration option.
    -** -** [[SQLITE_CONFIG_MALLOC]]
    SQLITE_CONFIG_MALLOC
    -**
    ^(This option takes a single argument which is a pointer to an -** instance of the [sqlite3_mem_methods] structure. The argument specifies -** alternative low-level memory allocation routines to be used in place of -** the memory allocation routines built into SQLite.)^ ^SQLite makes -** its own private copy of the content of the [sqlite3_mem_methods] structure -** before the [sqlite3_config()] call returns.
    -** -** [[SQLITE_CONFIG_GETMALLOC]]
    SQLITE_CONFIG_GETMALLOC
    -**
    ^(This option takes a single argument which is a pointer to an -** instance of the [sqlite3_mem_methods] structure. The [sqlite3_mem_methods] -** structure is filled with the currently defined memory allocation routines.)^ -** This option can be used to overload the default memory allocation -** routines with a wrapper that simulations memory allocation failure or -** tracks memory usage, for example.
    -** -** [[SQLITE_CONFIG_MEMSTATUS]]
    SQLITE_CONFIG_MEMSTATUS
    -**
    ^This option takes single argument of type int, interpreted as a -** boolean, which enables or disables the collection of memory allocation -** statistics. ^(When memory allocation statistics are disabled, the -** following SQLite interfaces become non-operational: -**
      -**
    • [sqlite3_memory_used()] -**
    • [sqlite3_memory_highwater()] -**
    • [sqlite3_soft_heap_limit64()] -**
    • [sqlite3_status()] -**
    )^ -** ^Memory allocation statistics are enabled by default unless SQLite is -** compiled with [SQLITE_DEFAULT_MEMSTATUS]=0 in which case memory -** allocation statistics are disabled by default. -**
    -** -** [[SQLITE_CONFIG_SCRATCH]]
    SQLITE_CONFIG_SCRATCH
    -**
    ^This option specifies a static memory buffer that SQLite can use for -** scratch memory. There are three arguments: A pointer an 8-byte -** aligned memory buffer from which the scratch allocations will be -** drawn, the size of each scratch allocation (sz), -** and the maximum number of scratch allocations (N). The sz -** argument must be a multiple of 16. -** The first argument must be a pointer to an 8-byte aligned buffer -** of at least sz*N bytes of memory. -** ^SQLite will use no more than two scratch buffers per thread. So -** N should be set to twice the expected maximum number of threads. -** ^SQLite will never require a scratch buffer that is more than 6 -** times the database page size. ^If SQLite needs needs additional -** scratch memory beyond what is provided by this configuration option, then -** [sqlite3_malloc()] will be used to obtain the memory needed.
    -** -** [[SQLITE_CONFIG_PAGECACHE]]
    SQLITE_CONFIG_PAGECACHE
    -**
    ^This option specifies a static memory buffer that SQLite can use for -** the database page cache with the default page cache implementation. -** This configuration should not be used if an application-define page -** cache implementation is loaded using the SQLITE_CONFIG_PCACHE2 option. -** There are three arguments to this option: A pointer to 8-byte aligned -** memory, the size of each page buffer (sz), and the number of pages (N). -** The sz argument should be the size of the largest database page -** (a power of two between 512 and 32768) plus a little extra for each -** page header. ^The page header size is 20 to 40 bytes depending on -** the host architecture. ^It is harmless, apart from the wasted memory, -** to make sz a little too large. The first -** argument should point to an allocation of at least sz*N bytes of memory. -** ^SQLite will use the memory provided by the first argument to satisfy its -** memory needs for the first N pages that it adds to cache. ^If additional -** page cache memory is needed beyond what is provided by this option, then -** SQLite goes to [sqlite3_malloc()] for the additional storage space. -** The pointer in the first argument must -** be aligned to an 8-byte boundary or subsequent behavior of SQLite -** will be undefined.
    -** -** [[SQLITE_CONFIG_HEAP]]
    SQLITE_CONFIG_HEAP
    -**
    ^This option specifies a static memory buffer that SQLite will use -** for all of its dynamic memory allocation needs beyond those provided -** for by [SQLITE_CONFIG_SCRATCH] and [SQLITE_CONFIG_PAGECACHE]. -** There are three arguments: An 8-byte aligned pointer to the memory, -** the number of bytes in the memory buffer, and the minimum allocation size. -** ^If the first pointer (the memory pointer) is NULL, then SQLite reverts -** to using its default memory allocator (the system malloc() implementation), -** undoing any prior invocation of [SQLITE_CONFIG_MALLOC]. ^If the -** memory pointer is not NULL and either [SQLITE_ENABLE_MEMSYS3] or -** [SQLITE_ENABLE_MEMSYS5] are defined, then the alternative memory -** allocator is engaged to handle all of SQLites memory allocation needs. -** The first pointer (the memory pointer) must be aligned to an 8-byte -** boundary or subsequent behavior of SQLite will be undefined. -** The minimum allocation size is capped at 2**12. Reasonable values -** for the minimum allocation size are 2**5 through 2**8.
    -** -** [[SQLITE_CONFIG_MUTEX]]
    SQLITE_CONFIG_MUTEX
    -**
    ^(This option takes a single argument which is a pointer to an -** instance of the [sqlite3_mutex_methods] structure. The argument specifies -** alternative low-level mutex routines to be used in place -** the mutex routines built into SQLite.)^ ^SQLite makes a copy of the -** content of the [sqlite3_mutex_methods] structure before the call to -** [sqlite3_config()] returns. ^If SQLite is compiled with -** the [SQLITE_THREADSAFE | SQLITE_THREADSAFE=0] compile-time option then -** the entire mutexing subsystem is omitted from the build and hence calls to -** [sqlite3_config()] with the SQLITE_CONFIG_MUTEX configuration option will -** return [SQLITE_ERROR].
    -** -** [[SQLITE_CONFIG_GETMUTEX]]
    SQLITE_CONFIG_GETMUTEX
    -**
    ^(This option takes a single argument which is a pointer to an -** instance of the [sqlite3_mutex_methods] structure. The -** [sqlite3_mutex_methods] -** structure is filled with the currently defined mutex routines.)^ -** This option can be used to overload the default mutex allocation -** routines with a wrapper used to track mutex usage for performance -** profiling or testing, for example. ^If SQLite is compiled with -** the [SQLITE_THREADSAFE | SQLITE_THREADSAFE=0] compile-time option then -** the entire mutexing subsystem is omitted from the build and hence calls to -** [sqlite3_config()] with the SQLITE_CONFIG_GETMUTEX configuration option will -** return [SQLITE_ERROR].
    -** -** [[SQLITE_CONFIG_LOOKASIDE]]
    SQLITE_CONFIG_LOOKASIDE
    -**
    ^(This option takes two arguments that determine the default -** memory allocation for the lookaside memory allocator on each -** [database connection]. The first argument is the -** size of each lookaside buffer slot and the second is the number of -** slots allocated to each database connection.)^ ^(This option sets the -** default lookaside size. The [SQLITE_DBCONFIG_LOOKASIDE] -** verb to [sqlite3_db_config()] can be used to change the lookaside -** configuration on individual connections.)^
    -** -** [[SQLITE_CONFIG_PCACHE2]]
    SQLITE_CONFIG_PCACHE2
    -**
    ^(This option takes a single argument which is a pointer to -** an [sqlite3_pcache_methods2] object. This object specifies the interface -** to a custom page cache implementation.)^ ^SQLite makes a copy of the -** object and uses it for page cache memory allocations.
    -** -** [[SQLITE_CONFIG_GETPCACHE2]]
    SQLITE_CONFIG_GETPCACHE2
    -**
    ^(This option takes a single argument which is a pointer to an -** [sqlite3_pcache_methods2] object. SQLite copies of the current -** page cache implementation into that object.)^
    -** -** [[SQLITE_CONFIG_LOG]]
    SQLITE_CONFIG_LOG
    -**
    The SQLITE_CONFIG_LOG option is used to configure the SQLite -** global [error log]. -** (^The SQLITE_CONFIG_LOG option takes two arguments: a pointer to a -** function with a call signature of void(*)(void*,int,const char*), -** and a pointer to void. ^If the function pointer is not NULL, it is -** invoked by [sqlite3_log()] to process each logging event. ^If the -** function pointer is NULL, the [sqlite3_log()] interface becomes a no-op. -** ^The void pointer that is the second argument to SQLITE_CONFIG_LOG is -** passed through as the first parameter to the application-defined logger -** function whenever that function is invoked. ^The second parameter to -** the logger function is a copy of the first parameter to the corresponding -** [sqlite3_log()] call and is intended to be a [result code] or an -** [extended result code]. ^The third parameter passed to the logger is -** log message after formatting via [sqlite3_snprintf()]. -** The SQLite logging interface is not reentrant; the logger function -** supplied by the application must not invoke any SQLite interface. -** In a multi-threaded application, the application-defined logger -** function must be threadsafe.
    -** -** [[SQLITE_CONFIG_URI]]
    SQLITE_CONFIG_URI -**
    ^(This option takes a single argument of type int. If non-zero, then -** URI handling is globally enabled. If the parameter is zero, then URI handling -** is globally disabled.)^ ^If URI handling is globally enabled, all filenames -** passed to [sqlite3_open()], [sqlite3_open_v2()], [sqlite3_open16()] or -** specified as part of [ATTACH] commands are interpreted as URIs, regardless -** of whether or not the [SQLITE_OPEN_URI] flag is set when the database -** connection is opened. ^If it is globally disabled, filenames are -** only interpreted as URIs if the SQLITE_OPEN_URI flag is set when the -** database connection is opened. ^(By default, URI handling is globally -** disabled. The default value may be changed by compiling with the -** [SQLITE_USE_URI] symbol defined.)^ -** -** [[SQLITE_CONFIG_COVERING_INDEX_SCAN]]
    SQLITE_CONFIG_COVERING_INDEX_SCAN -**
    ^This option takes a single integer argument which is interpreted as -** a boolean in order to enable or disable the use of covering indices for -** full table scans in the query optimizer. ^The default setting is determined -** by the [SQLITE_ALLOW_COVERING_INDEX_SCAN] compile-time option, or is "on" -** if that compile-time option is omitted. -** The ability to disable the use of covering indices for full table scans -** is because some incorrectly coded legacy applications might malfunction -** when the optimization is enabled. Providing the ability to -** disable the optimization allows the older, buggy application code to work -** without change even with newer versions of SQLite. -** -** [[SQLITE_CONFIG_PCACHE]] [[SQLITE_CONFIG_GETPCACHE]] -**
    SQLITE_CONFIG_PCACHE and SQLITE_CONFIG_GETPCACHE -**
    These options are obsolete and should not be used by new code. -** They are retained for backwards compatibility but are now no-ops. -**
    -** -** [[SQLITE_CONFIG_SQLLOG]] -**
    SQLITE_CONFIG_SQLLOG -**
    This option is only available if sqlite is compiled with the -** [SQLITE_ENABLE_SQLLOG] pre-processor macro defined. The first argument should -** be a pointer to a function of type void(*)(void*,sqlite3*,const char*, int). -** The second should be of type (void*). The callback is invoked by the library -** in three separate circumstances, identified by the value passed as the -** fourth parameter. If the fourth parameter is 0, then the database connection -** passed as the second argument has just been opened. The third argument -** points to a buffer containing the name of the main database file. If the -** fourth parameter is 1, then the SQL statement that the third parameter -** points to has just been executed. Or, if the fourth parameter is 2, then -** the connection being passed as the second parameter is being closed. The -** third parameter is passed NULL In this case. An example of using this -** configuration option can be seen in the "test_sqllog.c" source file in -** the canonical SQLite source tree.
    -** -** [[SQLITE_CONFIG_MMAP_SIZE]] -**
    SQLITE_CONFIG_MMAP_SIZE -**
    ^SQLITE_CONFIG_MMAP_SIZE takes two 64-bit integer (sqlite3_int64) values -** that are the default mmap size limit (the default setting for -** [PRAGMA mmap_size]) and the maximum allowed mmap size limit. -** ^The default setting can be overridden by each database connection using -** either the [PRAGMA mmap_size] command, or by using the -** [SQLITE_FCNTL_MMAP_SIZE] file control. ^(The maximum allowed mmap size -** cannot be changed at run-time. Nor may the maximum allowed mmap size -** exceed the compile-time maximum mmap size set by the -** [SQLITE_MAX_MMAP_SIZE] compile-time option.)^ -** ^If either argument to this option is negative, then that argument is -** changed to its compile-time default. -** -** [[SQLITE_CONFIG_WIN32_HEAPSIZE]] -**
    SQLITE_CONFIG_WIN32_HEAPSIZE -**
    ^This option is only available if SQLite is compiled for Windows -** with the [SQLITE_WIN32_MALLOC] pre-processor macro defined. -** SQLITE_CONFIG_WIN32_HEAPSIZE takes a 32-bit unsigned integer value -** that specifies the maximum size of the created heap. -**
    -*/ -#define SQLITE_CONFIG_SINGLETHREAD 1 /* nil */ -#define SQLITE_CONFIG_MULTITHREAD 2 /* nil */ -#define SQLITE_CONFIG_SERIALIZED 3 /* nil */ -#define SQLITE_CONFIG_MALLOC 4 /* sqlite3_mem_methods* */ -#define SQLITE_CONFIG_GETMALLOC 5 /* sqlite3_mem_methods* */ -#define SQLITE_CONFIG_SCRATCH 6 /* void*, int sz, int N */ -#define SQLITE_CONFIG_PAGECACHE 7 /* void*, int sz, int N */ -#define SQLITE_CONFIG_HEAP 8 /* void*, int nByte, int min */ -#define SQLITE_CONFIG_MEMSTATUS 9 /* boolean */ -#define SQLITE_CONFIG_MUTEX 10 /* sqlite3_mutex_methods* */ -#define SQLITE_CONFIG_GETMUTEX 11 /* sqlite3_mutex_methods* */ -/* previously SQLITE_CONFIG_CHUNKALLOC 12 which is now unused. */ -#define SQLITE_CONFIG_LOOKASIDE 13 /* int int */ -#define SQLITE_CONFIG_PCACHE 14 /* no-op */ -#define SQLITE_CONFIG_GETPCACHE 15 /* no-op */ -#define SQLITE_CONFIG_LOG 16 /* xFunc, void* */ -#define SQLITE_CONFIG_URI 17 /* int */ -#define SQLITE_CONFIG_PCACHE2 18 /* sqlite3_pcache_methods2* */ -#define SQLITE_CONFIG_GETPCACHE2 19 /* sqlite3_pcache_methods2* */ -#define SQLITE_CONFIG_COVERING_INDEX_SCAN 20 /* int */ -#define SQLITE_CONFIG_SQLLOG 21 /* xSqllog, void* */ -#define SQLITE_CONFIG_MMAP_SIZE 22 /* sqlite3_int64, sqlite3_int64 */ -#define SQLITE_CONFIG_WIN32_HEAPSIZE 23 /* int nByte */ - -/* -** CAPI3REF: Database Connection Configuration Options -** -** These constants are the available integer configuration options that -** can be passed as the second argument to the [sqlite3_db_config()] interface. -** -** New configuration options may be added in future releases of SQLite. -** Existing configuration options might be discontinued. Applications -** should check the return code from [sqlite3_db_config()] to make sure that -** the call worked. ^The [sqlite3_db_config()] interface will return a -** non-zero [error code] if a discontinued or unsupported configuration option -** is invoked. -** -**
    -**
    SQLITE_DBCONFIG_LOOKASIDE
    -**
    ^This option takes three additional arguments that determine the -** [lookaside memory allocator] configuration for the [database connection]. -** ^The first argument (the third parameter to [sqlite3_db_config()] is a -** pointer to a memory buffer to use for lookaside memory. -** ^The first argument after the SQLITE_DBCONFIG_LOOKASIDE verb -** may be NULL in which case SQLite will allocate the -** lookaside buffer itself using [sqlite3_malloc()]. ^The second argument is the -** size of each lookaside buffer slot. ^The third argument is the number of -** slots. The size of the buffer in the first argument must be greater than -** or equal to the product of the second and third arguments. The buffer -** must be aligned to an 8-byte boundary. ^If the second argument to -** SQLITE_DBCONFIG_LOOKASIDE is not a multiple of 8, it is internally -** rounded down to the next smaller multiple of 8. ^(The lookaside memory -** configuration for a database connection can only be changed when that -** connection is not currently using lookaside memory, or in other words -** when the "current value" returned by -** [sqlite3_db_status](D,[SQLITE_CONFIG_LOOKASIDE],...) is zero. -** Any attempt to change the lookaside memory configuration when lookaside -** memory is in use leaves the configuration unchanged and returns -** [SQLITE_BUSY].)^
    -** -**
    SQLITE_DBCONFIG_ENABLE_FKEY
    -**
    ^This option is used to enable or disable the enforcement of -** [foreign key constraints]. There should be two additional arguments. -** The first argument is an integer which is 0 to disable FK enforcement, -** positive to enable FK enforcement or negative to leave FK enforcement -** unchanged. The second parameter is a pointer to an integer into which -** is written 0 or 1 to indicate whether FK enforcement is off or on -** following this call. The second parameter may be a NULL pointer, in -** which case the FK enforcement setting is not reported back.
    -** -**
    SQLITE_DBCONFIG_ENABLE_TRIGGER
    -**
    ^This option is used to enable or disable [CREATE TRIGGER | triggers]. -** There should be two additional arguments. -** The first argument is an integer which is 0 to disable triggers, -** positive to enable triggers or negative to leave the setting unchanged. -** The second parameter is a pointer to an integer into which -** is written 0 or 1 to indicate whether triggers are disabled or enabled -** following this call. The second parameter may be a NULL pointer, in -** which case the trigger setting is not reported back.
    -** -**
    -*/ -#define SQLITE_DBCONFIG_LOOKASIDE 1001 /* void* int int */ -#define SQLITE_DBCONFIG_ENABLE_FKEY 1002 /* int int* */ -#define SQLITE_DBCONFIG_ENABLE_TRIGGER 1003 /* int int* */ - - -/* -** CAPI3REF: Enable Or Disable Extended Result Codes -** -** ^The sqlite3_extended_result_codes() routine enables or disables the -** [extended result codes] feature of SQLite. ^The extended result -** codes are disabled by default for historical compatibility. -*/ -SQLITE_API int sqlite3_extended_result_codes(sqlite3*, int onoff); - -/* -** CAPI3REF: Last Insert Rowid -** -** ^Each entry in most SQLite tables (except for [WITHOUT ROWID] tables) -** has a unique 64-bit signed -** integer key called the [ROWID | "rowid"]. ^The rowid is always available -** as an undeclared column named ROWID, OID, or _ROWID_ as long as those -** names are not also used by explicitly declared columns. ^If -** the table has a column of type [INTEGER PRIMARY KEY] then that column -** is another alias for the rowid. -** -** ^The sqlite3_last_insert_rowid(D) interface returns the [rowid] of the -** most recent successful [INSERT] into a rowid table or [virtual table] -** on database connection D. -** ^Inserts into [WITHOUT ROWID] tables are not recorded. -** ^If no successful [INSERT]s into rowid tables -** have ever occurred on the database connection D, -** then sqlite3_last_insert_rowid(D) returns zero. -** -** ^(If an [INSERT] occurs within a trigger or within a [virtual table] -** method, then this routine will return the [rowid] of the inserted -** row as long as the trigger or virtual table method is running. -** But once the trigger or virtual table method ends, the value returned -** by this routine reverts to what it was before the trigger or virtual -** table method began.)^ -** -** ^An [INSERT] that fails due to a constraint violation is not a -** successful [INSERT] and does not change the value returned by this -** routine. ^Thus INSERT OR FAIL, INSERT OR IGNORE, INSERT OR ROLLBACK, -** and INSERT OR ABORT make no changes to the return value of this -** routine when their insertion fails. ^(When INSERT OR REPLACE -** encounters a constraint violation, it does not fail. The -** INSERT continues to completion after deleting rows that caused -** the constraint problem so INSERT OR REPLACE will always change -** the return value of this interface.)^ -** -** ^For the purposes of this routine, an [INSERT] is considered to -** be successful even if it is subsequently rolled back. -** -** This function is accessible to SQL statements via the -** [last_insert_rowid() SQL function]. -** -** If a separate thread performs a new [INSERT] on the same -** database connection while the [sqlite3_last_insert_rowid()] -** function is running and thus changes the last insert [rowid], -** then the value returned by [sqlite3_last_insert_rowid()] is -** unpredictable and might not equal either the old or the new -** last insert [rowid]. -*/ -SQLITE_API sqlite3_int64 sqlite3_last_insert_rowid(sqlite3*); - -/* -** CAPI3REF: Count The Number Of Rows Modified -** -** ^This function returns the number of database rows that were changed -** or inserted or deleted by the most recently completed SQL statement -** on the [database connection] specified by the first parameter. -** ^(Only changes that are directly specified by the [INSERT], [UPDATE], -** or [DELETE] statement are counted. Auxiliary changes caused by -** triggers or [foreign key actions] are not counted.)^ Use the -** [sqlite3_total_changes()] function to find the total number of changes -** including changes caused by triggers and foreign key actions. -** -** ^Changes to a view that are simulated by an [INSTEAD OF trigger] -** are not counted. Only real table changes are counted. -** -** ^(A "row change" is a change to a single row of a single table -** caused by an INSERT, DELETE, or UPDATE statement. Rows that -** are changed as side effects of [REPLACE] constraint resolution, -** rollback, ABORT processing, [DROP TABLE], or by any other -** mechanisms do not count as direct row changes.)^ -** -** A "trigger context" is a scope of execution that begins and -** ends with the script of a [CREATE TRIGGER | trigger]. -** Most SQL statements are -** evaluated outside of any trigger. This is the "top level" -** trigger context. If a trigger fires from the top level, a -** new trigger context is entered for the duration of that one -** trigger. Subtriggers create subcontexts for their duration. -** -** ^Calling [sqlite3_exec()] or [sqlite3_step()] recursively does -** not create a new trigger context. -** -** ^This function returns the number of direct row changes in the -** most recent INSERT, UPDATE, or DELETE statement within the same -** trigger context. -** -** ^Thus, when called from the top level, this function returns the -** number of changes in the most recent INSERT, UPDATE, or DELETE -** that also occurred at the top level. ^(Within the body of a trigger, -** the sqlite3_changes() interface can be called to find the number of -** changes in the most recently completed INSERT, UPDATE, or DELETE -** statement within the body of the same trigger. -** However, the number returned does not include changes -** caused by subtriggers since those have their own context.)^ -** -** See also the [sqlite3_total_changes()] interface, the -** [count_changes pragma], and the [changes() SQL function]. -** -** If a separate thread makes changes on the same database connection -** while [sqlite3_changes()] is running then the value returned -** is unpredictable and not meaningful. -*/ -SQLITE_API int sqlite3_changes(sqlite3*); - -/* -** CAPI3REF: Total Number Of Rows Modified -** -** ^This function returns the number of row changes caused by [INSERT], -** [UPDATE] or [DELETE] statements since the [database connection] was opened. -** ^(The count returned by sqlite3_total_changes() includes all changes -** from all [CREATE TRIGGER | trigger] contexts and changes made by -** [foreign key actions]. However, -** the count does not include changes used to implement [REPLACE] constraints, -** do rollbacks or ABORT processing, or [DROP TABLE] processing. The -** count does not include rows of views that fire an [INSTEAD OF trigger], -** though if the INSTEAD OF trigger makes changes of its own, those changes -** are counted.)^ -** ^The sqlite3_total_changes() function counts the changes as soon as -** the statement that makes them is completed (when the statement handle -** is passed to [sqlite3_reset()] or [sqlite3_finalize()]). -** -** See also the [sqlite3_changes()] interface, the -** [count_changes pragma], and the [total_changes() SQL function]. -** -** If a separate thread makes changes on the same database connection -** while [sqlite3_total_changes()] is running then the value -** returned is unpredictable and not meaningful. -*/ -SQLITE_API int sqlite3_total_changes(sqlite3*); - -/* -** CAPI3REF: Interrupt A Long-Running Query -** -** ^This function causes any pending database operation to abort and -** return at its earliest opportunity. This routine is typically -** called in response to a user action such as pressing "Cancel" -** or Ctrl-C where the user wants a long query operation to halt -** immediately. -** -** ^It is safe to call this routine from a thread different from the -** thread that is currently running the database operation. But it -** is not safe to call this routine with a [database connection] that -** is closed or might close before sqlite3_interrupt() returns. -** -** ^If an SQL operation is very nearly finished at the time when -** sqlite3_interrupt() is called, then it might not have an opportunity -** to be interrupted and might continue to completion. -** -** ^An SQL operation that is interrupted will return [SQLITE_INTERRUPT]. -** ^If the interrupted SQL operation is an INSERT, UPDATE, or DELETE -** that is inside an explicit transaction, then the entire transaction -** will be rolled back automatically. -** -** ^The sqlite3_interrupt(D) call is in effect until all currently running -** SQL statements on [database connection] D complete. ^Any new SQL statements -** that are started after the sqlite3_interrupt() call and before the -** running statements reaches zero are interrupted as if they had been -** running prior to the sqlite3_interrupt() call. ^New SQL statements -** that are started after the running statement count reaches zero are -** not effected by the sqlite3_interrupt(). -** ^A call to sqlite3_interrupt(D) that occurs when there are no running -** SQL statements is a no-op and has no effect on SQL statements -** that are started after the sqlite3_interrupt() call returns. -** -** If the database connection closes while [sqlite3_interrupt()] -** is running then bad things will likely happen. -*/ -SQLITE_API void sqlite3_interrupt(sqlite3*); - -/* -** CAPI3REF: Determine If An SQL Statement Is Complete -** -** These routines are useful during command-line input to determine if the -** currently entered text seems to form a complete SQL statement or -** if additional input is needed before sending the text into -** SQLite for parsing. ^These routines return 1 if the input string -** appears to be a complete SQL statement. ^A statement is judged to be -** complete if it ends with a semicolon token and is not a prefix of a -** well-formed CREATE TRIGGER statement. ^Semicolons that are embedded within -** string literals or quoted identifier names or comments are not -** independent tokens (they are part of the token in which they are -** embedded) and thus do not count as a statement terminator. ^Whitespace -** and comments that follow the final semicolon are ignored. -** -** ^These routines return 0 if the statement is incomplete. ^If a -** memory allocation fails, then SQLITE_NOMEM is returned. -** -** ^These routines do not parse the SQL statements thus -** will not detect syntactically incorrect SQL. -** -** ^(If SQLite has not been initialized using [sqlite3_initialize()] prior -** to invoking sqlite3_complete16() then sqlite3_initialize() is invoked -** automatically by sqlite3_complete16(). If that initialization fails, -** then the return value from sqlite3_complete16() will be non-zero -** regardless of whether or not the input SQL is complete.)^ -** -** The input to [sqlite3_complete()] must be a zero-terminated -** UTF-8 string. -** -** The input to [sqlite3_complete16()] must be a zero-terminated -** UTF-16 string in native byte order. -*/ -SQLITE_API int sqlite3_complete(const char *sql); -SQLITE_API int sqlite3_complete16(const void *sql); - -/* -** CAPI3REF: Register A Callback To Handle SQLITE_BUSY Errors -** -** ^This routine sets a callback function that might be invoked whenever -** an attempt is made to open a database table that another thread -** or process has locked. -** -** ^If the busy callback is NULL, then [SQLITE_BUSY] or [SQLITE_IOERR_BLOCKED] -** is returned immediately upon encountering the lock. ^If the busy callback -** is not NULL, then the callback might be invoked with two arguments. -** -** ^The first argument to the busy handler is a copy of the void* pointer which -** is the third argument to sqlite3_busy_handler(). ^The second argument to -** the busy handler callback is the number of times that the busy handler has -** been invoked for this locking event. ^If the -** busy callback returns 0, then no additional attempts are made to -** access the database and [SQLITE_BUSY] or [SQLITE_IOERR_BLOCKED] is returned. -** ^If the callback returns non-zero, then another attempt -** is made to open the database for reading and the cycle repeats. -** -** The presence of a busy handler does not guarantee that it will be invoked -** when there is lock contention. ^If SQLite determines that invoking the busy -** handler could result in a deadlock, it will go ahead and return [SQLITE_BUSY] -** or [SQLITE_IOERR_BLOCKED] instead of invoking the busy handler. -** Consider a scenario where one process is holding a read lock that -** it is trying to promote to a reserved lock and -** a second process is holding a reserved lock that it is trying -** to promote to an exclusive lock. The first process cannot proceed -** because it is blocked by the second and the second process cannot -** proceed because it is blocked by the first. If both processes -** invoke the busy handlers, neither will make any progress. Therefore, -** SQLite returns [SQLITE_BUSY] for the first process, hoping that this -** will induce the first process to release its read lock and allow -** the second process to proceed. -** -** ^The default busy callback is NULL. -** -** ^The [SQLITE_BUSY] error is converted to [SQLITE_IOERR_BLOCKED] -** when SQLite is in the middle of a large transaction where all the -** changes will not fit into the in-memory cache. SQLite will -** already hold a RESERVED lock on the database file, but it needs -** to promote this lock to EXCLUSIVE so that it can spill cache -** pages into the database file without harm to concurrent -** readers. ^If it is unable to promote the lock, then the in-memory -** cache will be left in an inconsistent state and so the error -** code is promoted from the relatively benign [SQLITE_BUSY] to -** the more severe [SQLITE_IOERR_BLOCKED]. ^This error code promotion -** forces an automatic rollback of the changes. See the -** -** CorruptionFollowingBusyError wiki page for a discussion of why -** this is important. -** -** ^(There can only be a single busy handler defined for each -** [database connection]. Setting a new busy handler clears any -** previously set handler.)^ ^Note that calling [sqlite3_busy_timeout()] -** will also set or clear the busy handler. -** -** The busy callback should not take any actions which modify the -** database connection that invoked the busy handler. Any such actions -** result in undefined behavior. -** -** A busy handler must not close the database connection -** or [prepared statement] that invoked the busy handler. -*/ -SQLITE_API int sqlite3_busy_handler(sqlite3*, int(*)(void*,int), void*); - -/* -** CAPI3REF: Set A Busy Timeout -** -** ^This routine sets a [sqlite3_busy_handler | busy handler] that sleeps -** for a specified amount of time when a table is locked. ^The handler -** will sleep multiple times until at least "ms" milliseconds of sleeping -** have accumulated. ^After at least "ms" milliseconds of sleeping, -** the handler returns 0 which causes [sqlite3_step()] to return -** [SQLITE_BUSY] or [SQLITE_IOERR_BLOCKED]. -** -** ^Calling this routine with an argument less than or equal to zero -** turns off all busy handlers. -** -** ^(There can only be a single busy handler for a particular -** [database connection] any any given moment. If another busy handler -** was defined (using [sqlite3_busy_handler()]) prior to calling -** this routine, that other busy handler is cleared.)^ -*/ -SQLITE_API int sqlite3_busy_timeout(sqlite3*, int ms); - -/* -** CAPI3REF: Convenience Routines For Running Queries -** -** This is a legacy interface that is preserved for backwards compatibility. -** Use of this interface is not recommended. -** -** Definition: A result table is memory data structure created by the -** [sqlite3_get_table()] interface. A result table records the -** complete query results from one or more queries. -** -** The table conceptually has a number of rows and columns. But -** these numbers are not part of the result table itself. These -** numbers are obtained separately. Let N be the number of rows -** and M be the number of columns. -** -** A result table is an array of pointers to zero-terminated UTF-8 strings. -** There are (N+1)*M elements in the array. The first M pointers point -** to zero-terminated strings that contain the names of the columns. -** The remaining entries all point to query results. NULL values result -** in NULL pointers. All other values are in their UTF-8 zero-terminated -** string representation as returned by [sqlite3_column_text()]. -** -** A result table might consist of one or more memory allocations. -** It is not safe to pass a result table directly to [sqlite3_free()]. -** A result table should be deallocated using [sqlite3_free_table()]. -** -** ^(As an example of the result table format, suppose a query result -** is as follows: -** -**
    -**        Name        | Age
    -**        -----------------------
    -**        Alice       | 43
    -**        Bob         | 28
    -**        Cindy       | 21
    -** 
    -** -** There are two column (M==2) and three rows (N==3). Thus the -** result table has 8 entries. Suppose the result table is stored -** in an array names azResult. Then azResult holds this content: -** -**
    -**        azResult[0] = "Name";
    -**        azResult[1] = "Age";
    -**        azResult[2] = "Alice";
    -**        azResult[3] = "43";
    -**        azResult[4] = "Bob";
    -**        azResult[5] = "28";
    -**        azResult[6] = "Cindy";
    -**        azResult[7] = "21";
    -** 
    )^ -** -** ^The sqlite3_get_table() function evaluates one or more -** semicolon-separated SQL statements in the zero-terminated UTF-8 -** string of its 2nd parameter and returns a result table to the -** pointer given in its 3rd parameter. -** -** After the application has finished with the result from sqlite3_get_table(), -** it must pass the result table pointer to sqlite3_free_table() in order to -** release the memory that was malloced. Because of the way the -** [sqlite3_malloc()] happens within sqlite3_get_table(), the calling -** function must not try to call [sqlite3_free()] directly. Only -** [sqlite3_free_table()] is able to release the memory properly and safely. -** -** The sqlite3_get_table() interface is implemented as a wrapper around -** [sqlite3_exec()]. The sqlite3_get_table() routine does not have access -** to any internal data structures of SQLite. It uses only the public -** interface defined here. As a consequence, errors that occur in the -** wrapper layer outside of the internal [sqlite3_exec()] call are not -** reflected in subsequent calls to [sqlite3_errcode()] or -** [sqlite3_errmsg()]. -*/ -SQLITE_API int sqlite3_get_table( - sqlite3 *db, /* An open database */ - const char *zSql, /* SQL to be evaluated */ - char ***pazResult, /* Results of the query */ - int *pnRow, /* Number of result rows written here */ - int *pnColumn, /* Number of result columns written here */ - char **pzErrmsg /* Error msg written here */ -); -SQLITE_API void sqlite3_free_table(char **result); - -/* -** CAPI3REF: Formatted String Printing Functions -** -** These routines are work-alikes of the "printf()" family of functions -** from the standard C library. -** -** ^The sqlite3_mprintf() and sqlite3_vmprintf() routines write their -** results into memory obtained from [sqlite3_malloc()]. -** The strings returned by these two routines should be -** released by [sqlite3_free()]. ^Both routines return a -** NULL pointer if [sqlite3_malloc()] is unable to allocate enough -** memory to hold the resulting string. -** -** ^(The sqlite3_snprintf() routine is similar to "snprintf()" from -** the standard C library. The result is written into the -** buffer supplied as the second parameter whose size is given by -** the first parameter. Note that the order of the -** first two parameters is reversed from snprintf().)^ This is an -** historical accident that cannot be fixed without breaking -** backwards compatibility. ^(Note also that sqlite3_snprintf() -** returns a pointer to its buffer instead of the number of -** characters actually written into the buffer.)^ We admit that -** the number of characters written would be a more useful return -** value but we cannot change the implementation of sqlite3_snprintf() -** now without breaking compatibility. -** -** ^As long as the buffer size is greater than zero, sqlite3_snprintf() -** guarantees that the buffer is always zero-terminated. ^The first -** parameter "n" is the total size of the buffer, including space for -** the zero terminator. So the longest string that can be completely -** written will be n-1 characters. -** -** ^The sqlite3_vsnprintf() routine is a varargs version of sqlite3_snprintf(). -** -** These routines all implement some additional formatting -** options that are useful for constructing SQL statements. -** All of the usual printf() formatting options apply. In addition, there -** is are "%q", "%Q", and "%z" options. -** -** ^(The %q option works like %s in that it substitutes a nul-terminated -** string from the argument list. But %q also doubles every '\'' character. -** %q is designed for use inside a string literal.)^ By doubling each '\'' -** character it escapes that character and allows it to be inserted into -** the string. -** -** For example, assume the string variable zText contains text as follows: -** -**
    -**  char *zText = "It's a happy day!";
    -** 
    -** -** One can use this text in an SQL statement as follows: -** -**
    -**  char *zSQL = sqlite3_mprintf("INSERT INTO table VALUES('%q')", zText);
    -**  sqlite3_exec(db, zSQL, 0, 0, 0);
    -**  sqlite3_free(zSQL);
    -** 
    -** -** Because the %q format string is used, the '\'' character in zText -** is escaped and the SQL generated is as follows: -** -**
    -**  INSERT INTO table1 VALUES('It''s a happy day!')
    -** 
    -** -** This is correct. Had we used %s instead of %q, the generated SQL -** would have looked like this: -** -**
    -**  INSERT INTO table1 VALUES('It's a happy day!');
    -** 
    -** -** This second example is an SQL syntax error. As a general rule you should -** always use %q instead of %s when inserting text into a string literal. -** -** ^(The %Q option works like %q except it also adds single quotes around -** the outside of the total string. Additionally, if the parameter in the -** argument list is a NULL pointer, %Q substitutes the text "NULL" (without -** single quotes).)^ So, for example, one could say: -** -**
    -**  char *zSQL = sqlite3_mprintf("INSERT INTO table VALUES(%Q)", zText);
    -**  sqlite3_exec(db, zSQL, 0, 0, 0);
    -**  sqlite3_free(zSQL);
    -** 
    -** -** The code above will render a correct SQL statement in the zSQL -** variable even if the zText variable is a NULL pointer. -** -** ^(The "%z" formatting option works like "%s" but with the -** addition that after the string has been read and copied into -** the result, [sqlite3_free()] is called on the input string.)^ -*/ -SQLITE_API char *sqlite3_mprintf(const char*,...); -SQLITE_API char *sqlite3_vmprintf(const char*, va_list); -SQLITE_API char *sqlite3_snprintf(int,char*,const char*, ...); -SQLITE_API char *sqlite3_vsnprintf(int,char*,const char*, va_list); - -/* -** CAPI3REF: Memory Allocation Subsystem -** -** The SQLite core uses these three routines for all of its own -** internal memory allocation needs. "Core" in the previous sentence -** does not include operating-system specific VFS implementation. The -** Windows VFS uses native malloc() and free() for some operations. -** -** ^The sqlite3_malloc() routine returns a pointer to a block -** of memory at least N bytes in length, where N is the parameter. -** ^If sqlite3_malloc() is unable to obtain sufficient free -** memory, it returns a NULL pointer. ^If the parameter N to -** sqlite3_malloc() is zero or negative then sqlite3_malloc() returns -** a NULL pointer. -** -** ^Calling sqlite3_free() with a pointer previously returned -** by sqlite3_malloc() or sqlite3_realloc() releases that memory so -** that it might be reused. ^The sqlite3_free() routine is -** a no-op if is called with a NULL pointer. Passing a NULL pointer -** to sqlite3_free() is harmless. After being freed, memory -** should neither be read nor written. Even reading previously freed -** memory might result in a segmentation fault or other severe error. -** Memory corruption, a segmentation fault, or other severe error -** might result if sqlite3_free() is called with a non-NULL pointer that -** was not obtained from sqlite3_malloc() or sqlite3_realloc(). -** -** ^(The sqlite3_realloc() interface attempts to resize a -** prior memory allocation to be at least N bytes, where N is the -** second parameter. The memory allocation to be resized is the first -** parameter.)^ ^ If the first parameter to sqlite3_realloc() -** is a NULL pointer then its behavior is identical to calling -** sqlite3_malloc(N) where N is the second parameter to sqlite3_realloc(). -** ^If the second parameter to sqlite3_realloc() is zero or -** negative then the behavior is exactly the same as calling -** sqlite3_free(P) where P is the first parameter to sqlite3_realloc(). -** ^sqlite3_realloc() returns a pointer to a memory allocation -** of at least N bytes in size or NULL if sufficient memory is unavailable. -** ^If M is the size of the prior allocation, then min(N,M) bytes -** of the prior allocation are copied into the beginning of buffer returned -** by sqlite3_realloc() and the prior allocation is freed. -** ^If sqlite3_realloc() returns NULL, then the prior allocation -** is not freed. -** -** ^The memory returned by sqlite3_malloc() and sqlite3_realloc() -** is always aligned to at least an 8 byte boundary, or to a -** 4 byte boundary if the [SQLITE_4_BYTE_ALIGNED_MALLOC] compile-time -** option is used. -** -** In SQLite version 3.5.0 and 3.5.1, it was possible to define -** the SQLITE_OMIT_MEMORY_ALLOCATION which would cause the built-in -** implementation of these routines to be omitted. That capability -** is no longer provided. Only built-in memory allocators can be used. -** -** Prior to SQLite version 3.7.10, the Windows OS interface layer called -** the system malloc() and free() directly when converting -** filenames between the UTF-8 encoding used by SQLite -** and whatever filename encoding is used by the particular Windows -** installation. Memory allocation errors were detected, but -** they were reported back as [SQLITE_CANTOPEN] or -** [SQLITE_IOERR] rather than [SQLITE_NOMEM]. -** -** The pointer arguments to [sqlite3_free()] and [sqlite3_realloc()] -** must be either NULL or else pointers obtained from a prior -** invocation of [sqlite3_malloc()] or [sqlite3_realloc()] that have -** not yet been released. -** -** The application must not read or write any part of -** a block of memory after it has been released using -** [sqlite3_free()] or [sqlite3_realloc()]. -*/ -SQLITE_API void *sqlite3_malloc(int); -SQLITE_API void *sqlite3_realloc(void*, int); -SQLITE_API void sqlite3_free(void*); - -/* -** CAPI3REF: Memory Allocator Statistics -** -** SQLite provides these two interfaces for reporting on the status -** of the [sqlite3_malloc()], [sqlite3_free()], and [sqlite3_realloc()] -** routines, which form the built-in memory allocation subsystem. -** -** ^The [sqlite3_memory_used()] routine returns the number of bytes -** of memory currently outstanding (malloced but not freed). -** ^The [sqlite3_memory_highwater()] routine returns the maximum -** value of [sqlite3_memory_used()] since the high-water mark -** was last reset. ^The values returned by [sqlite3_memory_used()] and -** [sqlite3_memory_highwater()] include any overhead -** added by SQLite in its implementation of [sqlite3_malloc()], -** but not overhead added by the any underlying system library -** routines that [sqlite3_malloc()] may call. -** -** ^The memory high-water mark is reset to the current value of -** [sqlite3_memory_used()] if and only if the parameter to -** [sqlite3_memory_highwater()] is true. ^The value returned -** by [sqlite3_memory_highwater(1)] is the high-water mark -** prior to the reset. -*/ -SQLITE_API sqlite3_int64 sqlite3_memory_used(void); -SQLITE_API sqlite3_int64 sqlite3_memory_highwater(int resetFlag); - -/* -** CAPI3REF: Pseudo-Random Number Generator -** -** SQLite contains a high-quality pseudo-random number generator (PRNG) used to -** select random [ROWID | ROWIDs] when inserting new records into a table that -** already uses the largest possible [ROWID]. The PRNG is also used for -** the build-in random() and randomblob() SQL functions. This interface allows -** applications to access the same PRNG for other purposes. -** -** ^A call to this routine stores N bytes of randomness into buffer P. -** ^If N is less than one, then P can be a NULL pointer. -** -** ^If this routine has not been previously called or if the previous -** call had N less than one, then the PRNG is seeded using randomness -** obtained from the xRandomness method of the default [sqlite3_vfs] object. -** ^If the previous call to this routine had an N of 1 or more then -** the pseudo-randomness is generated -** internally and without recourse to the [sqlite3_vfs] xRandomness -** method. -*/ -SQLITE_API void sqlite3_randomness(int N, void *P); - -/* -** CAPI3REF: Compile-Time Authorization Callbacks -** -** ^This routine registers an authorizer callback with a particular -** [database connection], supplied in the first argument. -** ^The authorizer callback is invoked as SQL statements are being compiled -** by [sqlite3_prepare()] or its variants [sqlite3_prepare_v2()], -** [sqlite3_prepare16()] and [sqlite3_prepare16_v2()]. ^At various -** points during the compilation process, as logic is being created -** to perform various actions, the authorizer callback is invoked to -** see if those actions are allowed. ^The authorizer callback should -** return [SQLITE_OK] to allow the action, [SQLITE_IGNORE] to disallow the -** specific action but allow the SQL statement to continue to be -** compiled, or [SQLITE_DENY] to cause the entire SQL statement to be -** rejected with an error. ^If the authorizer callback returns -** any value other than [SQLITE_IGNORE], [SQLITE_OK], or [SQLITE_DENY] -** then the [sqlite3_prepare_v2()] or equivalent call that triggered -** the authorizer will fail with an error message. -** -** When the callback returns [SQLITE_OK], that means the operation -** requested is ok. ^When the callback returns [SQLITE_DENY], the -** [sqlite3_prepare_v2()] or equivalent call that triggered the -** authorizer will fail with an error message explaining that -** access is denied. -** -** ^The first parameter to the authorizer callback is a copy of the third -** parameter to the sqlite3_set_authorizer() interface. ^The second parameter -** to the callback is an integer [SQLITE_COPY | action code] that specifies -** the particular action to be authorized. ^The third through sixth parameters -** to the callback are zero-terminated strings that contain additional -** details about the action to be authorized. -** -** ^If the action code is [SQLITE_READ] -** and the callback returns [SQLITE_IGNORE] then the -** [prepared statement] statement is constructed to substitute -** a NULL value in place of the table column that would have -** been read if [SQLITE_OK] had been returned. The [SQLITE_IGNORE] -** return can be used to deny an untrusted user access to individual -** columns of a table. -** ^If the action code is [SQLITE_DELETE] and the callback returns -** [SQLITE_IGNORE] then the [DELETE] operation proceeds but the -** [truncate optimization] is disabled and all rows are deleted individually. -** -** An authorizer is used when [sqlite3_prepare | preparing] -** SQL statements from an untrusted source, to ensure that the SQL statements -** do not try to access data they are not allowed to see, or that they do not -** try to execute malicious statements that damage the database. For -** example, an application may allow a user to enter arbitrary -** SQL queries for evaluation by a database. But the application does -** not want the user to be able to make arbitrary changes to the -** database. An authorizer could then be put in place while the -** user-entered SQL is being [sqlite3_prepare | prepared] that -** disallows everything except [SELECT] statements. -** -** Applications that need to process SQL from untrusted sources -** might also consider lowering resource limits using [sqlite3_limit()] -** and limiting database size using the [max_page_count] [PRAGMA] -** in addition to using an authorizer. -** -** ^(Only a single authorizer can be in place on a database connection -** at a time. Each call to sqlite3_set_authorizer overrides the -** previous call.)^ ^Disable the authorizer by installing a NULL callback. -** The authorizer is disabled by default. -** -** The authorizer callback must not do anything that will modify -** the database connection that invoked the authorizer callback. -** Note that [sqlite3_prepare_v2()] and [sqlite3_step()] both modify their -** database connections for the meaning of "modify" in this paragraph. -** -** ^When [sqlite3_prepare_v2()] is used to prepare a statement, the -** statement might be re-prepared during [sqlite3_step()] due to a -** schema change. Hence, the application should ensure that the -** correct authorizer callback remains in place during the [sqlite3_step()]. -** -** ^Note that the authorizer callback is invoked only during -** [sqlite3_prepare()] or its variants. Authorization is not -** performed during statement evaluation in [sqlite3_step()], unless -** as stated in the previous paragraph, sqlite3_step() invokes -** sqlite3_prepare_v2() to reprepare a statement after a schema change. -*/ -SQLITE_API int sqlite3_set_authorizer( - sqlite3*, - int (*xAuth)(void*,int,const char*,const char*,const char*,const char*), - void *pUserData -); - -/* -** CAPI3REF: Authorizer Return Codes -** -** The [sqlite3_set_authorizer | authorizer callback function] must -** return either [SQLITE_OK] or one of these two constants in order -** to signal SQLite whether or not the action is permitted. See the -** [sqlite3_set_authorizer | authorizer documentation] for additional -** information. -** -** Note that SQLITE_IGNORE is also used as a [SQLITE_ROLLBACK | return code] -** from the [sqlite3_vtab_on_conflict()] interface. -*/ -#define SQLITE_DENY 1 /* Abort the SQL statement with an error */ -#define SQLITE_IGNORE 2 /* Don't allow access, but don't generate an error */ - -/* -** CAPI3REF: Authorizer Action Codes -** -** The [sqlite3_set_authorizer()] interface registers a callback function -** that is invoked to authorize certain SQL statement actions. The -** second parameter to the callback is an integer code that specifies -** what action is being authorized. These are the integer action codes that -** the authorizer callback may be passed. -** -** These action code values signify what kind of operation is to be -** authorized. The 3rd and 4th parameters to the authorization -** callback function will be parameters or NULL depending on which of these -** codes is used as the second parameter. ^(The 5th parameter to the -** authorizer callback is the name of the database ("main", "temp", -** etc.) if applicable.)^ ^The 6th parameter to the authorizer callback -** is the name of the inner-most trigger or view that is responsible for -** the access attempt or NULL if this access attempt is directly from -** top-level SQL code. -*/ -/******************************************* 3rd ************ 4th ***********/ -#define SQLITE_CREATE_INDEX 1 /* Index Name Table Name */ -#define SQLITE_CREATE_TABLE 2 /* Table Name NULL */ -#define SQLITE_CREATE_TEMP_INDEX 3 /* Index Name Table Name */ -#define SQLITE_CREATE_TEMP_TABLE 4 /* Table Name NULL */ -#define SQLITE_CREATE_TEMP_TRIGGER 5 /* Trigger Name Table Name */ -#define SQLITE_CREATE_TEMP_VIEW 6 /* View Name NULL */ -#define SQLITE_CREATE_TRIGGER 7 /* Trigger Name Table Name */ -#define SQLITE_CREATE_VIEW 8 /* View Name NULL */ -#define SQLITE_DELETE 9 /* Table Name NULL */ -#define SQLITE_DROP_INDEX 10 /* Index Name Table Name */ -#define SQLITE_DROP_TABLE 11 /* Table Name NULL */ -#define SQLITE_DROP_TEMP_INDEX 12 /* Index Name Table Name */ -#define SQLITE_DROP_TEMP_TABLE 13 /* Table Name NULL */ -#define SQLITE_DROP_TEMP_TRIGGER 14 /* Trigger Name Table Name */ -#define SQLITE_DROP_TEMP_VIEW 15 /* View Name NULL */ -#define SQLITE_DROP_TRIGGER 16 /* Trigger Name Table Name */ -#define SQLITE_DROP_VIEW 17 /* View Name NULL */ -#define SQLITE_INSERT 18 /* Table Name NULL */ -#define SQLITE_PRAGMA 19 /* Pragma Name 1st arg or NULL */ -#define SQLITE_READ 20 /* Table Name Column Name */ -#define SQLITE_SELECT 21 /* NULL NULL */ -#define SQLITE_TRANSACTION 22 /* Operation NULL */ -#define SQLITE_UPDATE 23 /* Table Name Column Name */ -#define SQLITE_ATTACH 24 /* Filename NULL */ -#define SQLITE_DETACH 25 /* Database Name NULL */ -#define SQLITE_ALTER_TABLE 26 /* Database Name Table Name */ -#define SQLITE_REINDEX 27 /* Index Name NULL */ -#define SQLITE_ANALYZE 28 /* Table Name NULL */ -#define SQLITE_CREATE_VTABLE 29 /* Table Name Module Name */ -#define SQLITE_DROP_VTABLE 30 /* Table Name Module Name */ -#define SQLITE_FUNCTION 31 /* NULL Function Name */ -#define SQLITE_SAVEPOINT 32 /* Operation Savepoint Name */ -#define SQLITE_COPY 0 /* No longer used */ -#define SQLITE_RECURSIVE 33 /* NULL NULL */ - -/* -** CAPI3REF: Tracing And Profiling Functions -** -** These routines register callback functions that can be used for -** tracing and profiling the execution of SQL statements. -** -** ^The callback function registered by sqlite3_trace() is invoked at -** various times when an SQL statement is being run by [sqlite3_step()]. -** ^The sqlite3_trace() callback is invoked with a UTF-8 rendering of the -** SQL statement text as the statement first begins executing. -** ^(Additional sqlite3_trace() callbacks might occur -** as each triggered subprogram is entered. The callbacks for triggers -** contain a UTF-8 SQL comment that identifies the trigger.)^ -** -** The [SQLITE_TRACE_SIZE_LIMIT] compile-time option can be used to limit -** the length of [bound parameter] expansion in the output of sqlite3_trace(). -** -** ^The callback function registered by sqlite3_profile() is invoked -** as each SQL statement finishes. ^The profile callback contains -** the original statement text and an estimate of wall-clock time -** of how long that statement took to run. ^The profile callback -** time is in units of nanoseconds, however the current implementation -** is only capable of millisecond resolution so the six least significant -** digits in the time are meaningless. Future versions of SQLite -** might provide greater resolution on the profiler callback. The -** sqlite3_profile() function is considered experimental and is -** subject to change in future versions of SQLite. -*/ -SQLITE_API void *sqlite3_trace(sqlite3*, void(*xTrace)(void*,const char*), void*); -SQLITE_API SQLITE_EXPERIMENTAL void *sqlite3_profile(sqlite3*, - void(*xProfile)(void*,const char*,sqlite3_uint64), void*); - -/* -** CAPI3REF: Query Progress Callbacks -** -** ^The sqlite3_progress_handler(D,N,X,P) interface causes the callback -** function X to be invoked periodically during long running calls to -** [sqlite3_exec()], [sqlite3_step()] and [sqlite3_get_table()] for -** database connection D. An example use for this -** interface is to keep a GUI updated during a large query. -** -** ^The parameter P is passed through as the only parameter to the -** callback function X. ^The parameter N is the approximate number of -** [virtual machine instructions] that are evaluated between successive -** invocations of the callback X. ^If N is less than one then the progress -** handler is disabled. -** -** ^Only a single progress handler may be defined at one time per -** [database connection]; setting a new progress handler cancels the -** old one. ^Setting parameter X to NULL disables the progress handler. -** ^The progress handler is also disabled by setting N to a value less -** than 1. -** -** ^If the progress callback returns non-zero, the operation is -** interrupted. This feature can be used to implement a -** "Cancel" button on a GUI progress dialog box. -** -** The progress handler callback must not do anything that will modify -** the database connection that invoked the progress handler. -** Note that [sqlite3_prepare_v2()] and [sqlite3_step()] both modify their -** database connections for the meaning of "modify" in this paragraph. -** -*/ -SQLITE_API void sqlite3_progress_handler(sqlite3*, int, int(*)(void*), void*); - -/* -** CAPI3REF: Opening A New Database Connection -** -** ^These routines open an SQLite database file as specified by the -** filename argument. ^The filename argument is interpreted as UTF-8 for -** sqlite3_open() and sqlite3_open_v2() and as UTF-16 in the native byte -** order for sqlite3_open16(). ^(A [database connection] handle is usually -** returned in *ppDb, even if an error occurs. The only exception is that -** if SQLite is unable to allocate memory to hold the [sqlite3] object, -** a NULL will be written into *ppDb instead of a pointer to the [sqlite3] -** object.)^ ^(If the database is opened (and/or created) successfully, then -** [SQLITE_OK] is returned. Otherwise an [error code] is returned.)^ ^The -** [sqlite3_errmsg()] or [sqlite3_errmsg16()] routines can be used to obtain -** an English language description of the error following a failure of any -** of the sqlite3_open() routines. -** -** ^The default encoding for the database will be UTF-8 if -** sqlite3_open() or sqlite3_open_v2() is called and -** UTF-16 in the native byte order if sqlite3_open16() is used. -** -** Whether or not an error occurs when it is opened, resources -** associated with the [database connection] handle should be released by -** passing it to [sqlite3_close()] when it is no longer required. -** -** The sqlite3_open_v2() interface works like sqlite3_open() -** except that it accepts two additional parameters for additional control -** over the new database connection. ^(The flags parameter to -** sqlite3_open_v2() can take one of -** the following three values, optionally combined with the -** [SQLITE_OPEN_NOMUTEX], [SQLITE_OPEN_FULLMUTEX], [SQLITE_OPEN_SHAREDCACHE], -** [SQLITE_OPEN_PRIVATECACHE], and/or [SQLITE_OPEN_URI] flags:)^ -** -**
    -** ^(
    [SQLITE_OPEN_READONLY]
    -**
    The database is opened in read-only mode. If the database does not -** already exist, an error is returned.
    )^ -** -** ^(
    [SQLITE_OPEN_READWRITE]
    -**
    The database is opened for reading and writing if possible, or reading -** only if the file is write protected by the operating system. In either -** case the database must already exist, otherwise an error is returned.
    )^ -** -** ^(
    [SQLITE_OPEN_READWRITE] | [SQLITE_OPEN_CREATE]
    -**
    The database is opened for reading and writing, and is created if -** it does not already exist. This is the behavior that is always used for -** sqlite3_open() and sqlite3_open16().
    )^ -**
    -** -** If the 3rd parameter to sqlite3_open_v2() is not one of the -** combinations shown above optionally combined with other -** [SQLITE_OPEN_READONLY | SQLITE_OPEN_* bits] -** then the behavior is undefined. -** -** ^If the [SQLITE_OPEN_NOMUTEX] flag is set, then the database connection -** opens in the multi-thread [threading mode] as long as the single-thread -** mode has not been set at compile-time or start-time. ^If the -** [SQLITE_OPEN_FULLMUTEX] flag is set then the database connection opens -** in the serialized [threading mode] unless single-thread was -** previously selected at compile-time or start-time. -** ^The [SQLITE_OPEN_SHAREDCACHE] flag causes the database connection to be -** eligible to use [shared cache mode], regardless of whether or not shared -** cache is enabled using [sqlite3_enable_shared_cache()]. ^The -** [SQLITE_OPEN_PRIVATECACHE] flag causes the database connection to not -** participate in [shared cache mode] even if it is enabled. -** -** ^The fourth parameter to sqlite3_open_v2() is the name of the -** [sqlite3_vfs] object that defines the operating system interface that -** the new database connection should use. ^If the fourth parameter is -** a NULL pointer then the default [sqlite3_vfs] object is used. -** -** ^If the filename is ":memory:", then a private, temporary in-memory database -** is created for the connection. ^This in-memory database will vanish when -** the database connection is closed. Future versions of SQLite might -** make use of additional special filenames that begin with the ":" character. -** It is recommended that when a database filename actually does begin with -** a ":" character you should prefix the filename with a pathname such as -** "./" to avoid ambiguity. -** -** ^If the filename is an empty string, then a private, temporary -** on-disk database will be created. ^This private database will be -** automatically deleted as soon as the database connection is closed. -** -** [[URI filenames in sqlite3_open()]]

    URI Filenames

    -** -** ^If [URI filename] interpretation is enabled, and the filename argument -** begins with "file:", then the filename is interpreted as a URI. ^URI -** filename interpretation is enabled if the [SQLITE_OPEN_URI] flag is -** set in the fourth argument to sqlite3_open_v2(), or if it has -** been enabled globally using the [SQLITE_CONFIG_URI] option with the -** [sqlite3_config()] method or by the [SQLITE_USE_URI] compile-time option. -** As of SQLite version 3.7.7, URI filename interpretation is turned off -** by default, but future releases of SQLite might enable URI filename -** interpretation by default. See "[URI filenames]" for additional -** information. -** -** URI filenames are parsed according to RFC 3986. ^If the URI contains an -** authority, then it must be either an empty string or the string -** "localhost". ^If the authority is not an empty string or "localhost", an -** error is returned to the caller. ^The fragment component of a URI, if -** present, is ignored. -** -** ^SQLite uses the path component of the URI as the name of the disk file -** which contains the database. ^If the path begins with a '/' character, -** then it is interpreted as an absolute path. ^If the path does not begin -** with a '/' (meaning that the authority section is omitted from the URI) -** then the path is interpreted as a relative path. -** ^On windows, the first component of an absolute path -** is a drive specification (e.g. "C:"). -** -** [[core URI query parameters]] -** The query component of a URI may contain parameters that are interpreted -** either by SQLite itself, or by a [VFS | custom VFS implementation]. -** SQLite interprets the following three query parameters: -** -**
      -**
    • vfs: ^The "vfs" parameter may be used to specify the name of -** a VFS object that provides the operating system interface that should -** be used to access the database file on disk. ^If this option is set to -** an empty string the default VFS object is used. ^Specifying an unknown -** VFS is an error. ^If sqlite3_open_v2() is used and the vfs option is -** present, then the VFS specified by the option takes precedence over -** the value passed as the fourth parameter to sqlite3_open_v2(). -** -**
    • mode: ^(The mode parameter may be set to either "ro", "rw", -** "rwc", or "memory". Attempting to set it to any other value is -** an error)^. -** ^If "ro" is specified, then the database is opened for read-only -** access, just as if the [SQLITE_OPEN_READONLY] flag had been set in the -** third argument to sqlite3_open_v2(). ^If the mode option is set to -** "rw", then the database is opened for read-write (but not create) -** access, as if SQLITE_OPEN_READWRITE (but not SQLITE_OPEN_CREATE) had -** been set. ^Value "rwc" is equivalent to setting both -** SQLITE_OPEN_READWRITE and SQLITE_OPEN_CREATE. ^If the mode option is -** set to "memory" then a pure [in-memory database] that never reads -** or writes from disk is used. ^It is an error to specify a value for -** the mode parameter that is less restrictive than that specified by -** the flags passed in the third parameter to sqlite3_open_v2(). -** -**
    • cache: ^The cache parameter may be set to either "shared" or -** "private". ^Setting it to "shared" is equivalent to setting the -** SQLITE_OPEN_SHAREDCACHE bit in the flags argument passed to -** sqlite3_open_v2(). ^Setting the cache parameter to "private" is -** equivalent to setting the SQLITE_OPEN_PRIVATECACHE bit. -** ^If sqlite3_open_v2() is used and the "cache" parameter is present in -** a URI filename, its value overrides any behavior requested by setting -** SQLITE_OPEN_PRIVATECACHE or SQLITE_OPEN_SHAREDCACHE flag. -** -**
    • psow: ^The psow parameter may be "true" (or "on" or "yes" or -** "1") or "false" (or "off" or "no" or "0") to indicate that the -** [powersafe overwrite] property does or does not apply to the -** storage media on which the database file resides. ^The psow query -** parameter only works for the built-in unix and Windows VFSes. -** -**
    • nolock: ^The nolock parameter is a boolean query parameter -** which if set disables file locking in rollback journal modes. This -** is useful for accessing a database on a filesystem that does not -** support locking. Caution: Database corruption might result if two -** or more processes write to the same database and any one of those -** processes uses nolock=1. -** -**
    • immutable: ^The immutable parameter is a boolean query -** parameter that indicates that the database file is stored on -** read-only media. ^When immutable is set, SQLite assumes that the -** database file cannot be changed, even by a process with higher -** privilege, and so the database is opened read-only and all locking -** and change detection is disabled. Caution: Setting the immutable -** property on a database file that does in fact change can result -** in incorrect query results and/or [SQLITE_CORRUPT] errors. -** See also: [SQLITE_IOCAP_IMMUTABLE]. -** -**
    -** -** ^Specifying an unknown parameter in the query component of a URI is not an -** error. Future versions of SQLite might understand additional query -** parameters. See "[query parameters with special meaning to SQLite]" for -** additional information. -** -** [[URI filename examples]]

    URI filename examples

    -** -**
    -**
    URI filenames Results -**
    file:data.db -** Open the file "data.db" in the current directory. -**
    file:/home/fred/data.db
    -** file:///home/fred/data.db
    -** file://localhost/home/fred/data.db
    -** Open the database file "/home/fred/data.db". -**
    file://darkstar/home/fred/data.db -** An error. "darkstar" is not a recognized authority. -**
    -** file:///C:/Documents%20and%20Settings/fred/Desktop/data.db -** Windows only: Open the file "data.db" on fred's desktop on drive -** C:. Note that the %20 escaping in this example is not strictly -** necessary - space characters can be used literally -** in URI filenames. -**
    file:data.db?mode=ro&cache=private -** Open file "data.db" in the current directory for read-only access. -** Regardless of whether or not shared-cache mode is enabled by -** default, use a private cache. -**
    file:/home/fred/data.db?vfs=unix-dotfile -** Open file "/home/fred/data.db". Use the special VFS "unix-dotfile" -** that uses dot-files in place of posix advisory locking. -**
    file:data.db?mode=readonly -** An error. "readonly" is not a valid option for the "mode" parameter. -**
    -** -** ^URI hexadecimal escape sequences (%HH) are supported within the path and -** query components of a URI. A hexadecimal escape sequence consists of a -** percent sign - "%" - followed by exactly two hexadecimal digits -** specifying an octet value. ^Before the path or query components of a -** URI filename are interpreted, they are encoded using UTF-8 and all -** hexadecimal escape sequences replaced by a single byte containing the -** corresponding octet. If this process generates an invalid UTF-8 encoding, -** the results are undefined. -** -** Note to Windows users: The encoding used for the filename argument -** of sqlite3_open() and sqlite3_open_v2() must be UTF-8, not whatever -** codepage is currently defined. Filenames containing international -** characters must be converted to UTF-8 prior to passing them into -** sqlite3_open() or sqlite3_open_v2(). -** -** Note to Windows Runtime users: The temporary directory must be set -** prior to calling sqlite3_open() or sqlite3_open_v2(). Otherwise, various -** features that require the use of temporary files may fail. -** -** See also: [sqlite3_temp_directory] -*/ -SQLITE_API int sqlite3_open( - const char *filename, /* Database filename (UTF-8) */ - sqlite3 **ppDb /* OUT: SQLite db handle */ -); -SQLITE_API int sqlite3_open16( - const void *filename, /* Database filename (UTF-16) */ - sqlite3 **ppDb /* OUT: SQLite db handle */ -); -SQLITE_API int sqlite3_open_v2( - const char *filename, /* Database filename (UTF-8) */ - sqlite3 **ppDb, /* OUT: SQLite db handle */ - int flags, /* Flags */ - const char *zVfs /* Name of VFS module to use */ -); - -/* -** CAPI3REF: Obtain Values For URI Parameters -** -** These are utility routines, useful to VFS implementations, that check -** to see if a database file was a URI that contained a specific query -** parameter, and if so obtains the value of that query parameter. -** -** If F is the database filename pointer passed into the xOpen() method of -** a VFS implementation when the flags parameter to xOpen() has one or -** more of the [SQLITE_OPEN_URI] or [SQLITE_OPEN_MAIN_DB] bits set and -** P is the name of the query parameter, then -** sqlite3_uri_parameter(F,P) returns the value of the P -** parameter if it exists or a NULL pointer if P does not appear as a -** query parameter on F. If P is a query parameter of F -** has no explicit value, then sqlite3_uri_parameter(F,P) returns -** a pointer to an empty string. -** -** The sqlite3_uri_boolean(F,P,B) routine assumes that P is a boolean -** parameter and returns true (1) or false (0) according to the value -** of P. The sqlite3_uri_boolean(F,P,B) routine returns true (1) if the -** value of query parameter P is one of "yes", "true", or "on" in any -** case or if the value begins with a non-zero number. The -** sqlite3_uri_boolean(F,P,B) routines returns false (0) if the value of -** query parameter P is one of "no", "false", or "off" in any case or -** if the value begins with a numeric zero. If P is not a query -** parameter on F or if the value of P is does not match any of the -** above, then sqlite3_uri_boolean(F,P,B) returns (B!=0). -** -** The sqlite3_uri_int64(F,P,D) routine converts the value of P into a -** 64-bit signed integer and returns that integer, or D if P does not -** exist. If the value of P is something other than an integer, then -** zero is returned. -** -** If F is a NULL pointer, then sqlite3_uri_parameter(F,P) returns NULL and -** sqlite3_uri_boolean(F,P,B) returns B. If F is not a NULL pointer and -** is not a database file pathname pointer that SQLite passed into the xOpen -** VFS method, then the behavior of this routine is undefined and probably -** undesirable. -*/ -SQLITE_API const char *sqlite3_uri_parameter(const char *zFilename, const char *zParam); -SQLITE_API int sqlite3_uri_boolean(const char *zFile, const char *zParam, int bDefault); -SQLITE_API sqlite3_int64 sqlite3_uri_int64(const char*, const char*, sqlite3_int64); - - -/* -** CAPI3REF: Error Codes And Messages -** -** ^The sqlite3_errcode() interface returns the numeric [result code] or -** [extended result code] for the most recent failed sqlite3_* API call -** associated with a [database connection]. If a prior API call failed -** but the most recent API call succeeded, the return value from -** sqlite3_errcode() is undefined. ^The sqlite3_extended_errcode() -** interface is the same except that it always returns the -** [extended result code] even when extended result codes are -** disabled. -** -** ^The sqlite3_errmsg() and sqlite3_errmsg16() return English-language -** text that describes the error, as either UTF-8 or UTF-16 respectively. -** ^(Memory to hold the error message string is managed internally. -** The application does not need to worry about freeing the result. -** However, the error string might be overwritten or deallocated by -** subsequent calls to other SQLite interface functions.)^ -** -** ^The sqlite3_errstr() interface returns the English-language text -** that describes the [result code], as UTF-8. -** ^(Memory to hold the error message string is managed internally -** and must not be freed by the application)^. -** -** When the serialized [threading mode] is in use, it might be the -** case that a second error occurs on a separate thread in between -** the time of the first error and the call to these interfaces. -** When that happens, the second error will be reported since these -** interfaces always report the most recent result. To avoid -** this, each thread can obtain exclusive use of the [database connection] D -** by invoking [sqlite3_mutex_enter]([sqlite3_db_mutex](D)) before beginning -** to use D and invoking [sqlite3_mutex_leave]([sqlite3_db_mutex](D)) after -** all calls to the interfaces listed here are completed. -** -** If an interface fails with SQLITE_MISUSE, that means the interface -** was invoked incorrectly by the application. In that case, the -** error code and message may or may not be set. -*/ -SQLITE_API int sqlite3_errcode(sqlite3 *db); -SQLITE_API int sqlite3_extended_errcode(sqlite3 *db); -SQLITE_API const char *sqlite3_errmsg(sqlite3*); -SQLITE_API const void *sqlite3_errmsg16(sqlite3*); -SQLITE_API const char *sqlite3_errstr(int); - -/* -** CAPI3REF: SQL Statement Object -** KEYWORDS: {prepared statement} {prepared statements} -** -** An instance of this object represents a single SQL statement. -** This object is variously known as a "prepared statement" or a -** "compiled SQL statement" or simply as a "statement". -** -** The life of a statement object goes something like this: -** -**
      -**
    1. Create the object using [sqlite3_prepare_v2()] or a related -** function. -**
    2. Bind values to [host parameters] using the sqlite3_bind_*() -** interfaces. -**
    3. Run the SQL by calling [sqlite3_step()] one or more times. -**
    4. Reset the statement using [sqlite3_reset()] then go back -** to step 2. Do this zero or more times. -**
    5. Destroy the object using [sqlite3_finalize()]. -**
    -** -** Refer to documentation on individual methods above for additional -** information. -*/ -typedef struct sqlite3_stmt sqlite3_stmt; - -/* -** CAPI3REF: Run-time Limits -** -** ^(This interface allows the size of various constructs to be limited -** on a connection by connection basis. The first parameter is the -** [database connection] whose limit is to be set or queried. The -** second parameter is one of the [limit categories] that define a -** class of constructs to be size limited. The third parameter is the -** new limit for that construct.)^ -** -** ^If the new limit is a negative number, the limit is unchanged. -** ^(For each limit category SQLITE_LIMIT_NAME there is a -** [limits | hard upper bound] -** set at compile-time by a C preprocessor macro called -** [limits | SQLITE_MAX_NAME]. -** (The "_LIMIT_" in the name is changed to "_MAX_".))^ -** ^Attempts to increase a limit above its hard upper bound are -** silently truncated to the hard upper bound. -** -** ^Regardless of whether or not the limit was changed, the -** [sqlite3_limit()] interface returns the prior value of the limit. -** ^Hence, to find the current value of a limit without changing it, -** simply invoke this interface with the third parameter set to -1. -** -** Run-time limits are intended for use in applications that manage -** both their own internal database and also databases that are controlled -** by untrusted external sources. An example application might be a -** web browser that has its own databases for storing history and -** separate databases controlled by JavaScript applications downloaded -** off the Internet. The internal databases can be given the -** large, default limits. Databases managed by external sources can -** be given much smaller limits designed to prevent a denial of service -** attack. Developers might also want to use the [sqlite3_set_authorizer()] -** interface to further control untrusted SQL. The size of the database -** created by an untrusted script can be contained using the -** [max_page_count] [PRAGMA]. -** -** New run-time limit categories may be added in future releases. -*/ -SQLITE_API int sqlite3_limit(sqlite3*, int id, int newVal); - -/* -** CAPI3REF: Run-Time Limit Categories -** KEYWORDS: {limit category} {*limit categories} -** -** These constants define various performance limits -** that can be lowered at run-time using [sqlite3_limit()]. -** The synopsis of the meanings of the various limits is shown below. -** Additional information is available at [limits | Limits in SQLite]. -** -**
    -** [[SQLITE_LIMIT_LENGTH]] ^(
    SQLITE_LIMIT_LENGTH
    -**
    The maximum size of any string or BLOB or table row, in bytes.
    )^ -** -** [[SQLITE_LIMIT_SQL_LENGTH]] ^(
    SQLITE_LIMIT_SQL_LENGTH
    -**
    The maximum length of an SQL statement, in bytes.
    )^ -** -** [[SQLITE_LIMIT_COLUMN]] ^(
    SQLITE_LIMIT_COLUMN
    -**
    The maximum number of columns in a table definition or in the -** result set of a [SELECT] or the maximum number of columns in an index -** or in an ORDER BY or GROUP BY clause.
    )^ -** -** [[SQLITE_LIMIT_EXPR_DEPTH]] ^(
    SQLITE_LIMIT_EXPR_DEPTH
    -**
    The maximum depth of the parse tree on any expression.
    )^ -** -** [[SQLITE_LIMIT_COMPOUND_SELECT]] ^(
    SQLITE_LIMIT_COMPOUND_SELECT
    -**
    The maximum number of terms in a compound SELECT statement.
    )^ -** -** [[SQLITE_LIMIT_VDBE_OP]] ^(
    SQLITE_LIMIT_VDBE_OP
    -**
    The maximum number of instructions in a virtual machine program -** used to implement an SQL statement. This limit is not currently -** enforced, though that might be added in some future release of -** SQLite.
    )^ -** -** [[SQLITE_LIMIT_FUNCTION_ARG]] ^(
    SQLITE_LIMIT_FUNCTION_ARG
    -**
    The maximum number of arguments on a function.
    )^ -** -** [[SQLITE_LIMIT_ATTACHED]] ^(
    SQLITE_LIMIT_ATTACHED
    -**
    The maximum number of [ATTACH | attached databases].)^
    -** -** [[SQLITE_LIMIT_LIKE_PATTERN_LENGTH]] -** ^(
    SQLITE_LIMIT_LIKE_PATTERN_LENGTH
    -**
    The maximum length of the pattern argument to the [LIKE] or -** [GLOB] operators.
    )^ -** -** [[SQLITE_LIMIT_VARIABLE_NUMBER]] -** ^(
    SQLITE_LIMIT_VARIABLE_NUMBER
    -**
    The maximum index number of any [parameter] in an SQL statement.)^ -** -** [[SQLITE_LIMIT_TRIGGER_DEPTH]] ^(
    SQLITE_LIMIT_TRIGGER_DEPTH
    -**
    The maximum depth of recursion for triggers.
    )^ -**
    -*/ -#define SQLITE_LIMIT_LENGTH 0 -#define SQLITE_LIMIT_SQL_LENGTH 1 -#define SQLITE_LIMIT_COLUMN 2 -#define SQLITE_LIMIT_EXPR_DEPTH 3 -#define SQLITE_LIMIT_COMPOUND_SELECT 4 -#define SQLITE_LIMIT_VDBE_OP 5 -#define SQLITE_LIMIT_FUNCTION_ARG 6 -#define SQLITE_LIMIT_ATTACHED 7 -#define SQLITE_LIMIT_LIKE_PATTERN_LENGTH 8 -#define SQLITE_LIMIT_VARIABLE_NUMBER 9 -#define SQLITE_LIMIT_TRIGGER_DEPTH 10 - -/* -** CAPI3REF: Compiling An SQL Statement -** KEYWORDS: {SQL statement compiler} -** -** To execute an SQL query, it must first be compiled into a byte-code -** program using one of these routines. -** -** The first argument, "db", is a [database connection] obtained from a -** prior successful call to [sqlite3_open()], [sqlite3_open_v2()] or -** [sqlite3_open16()]. The database connection must not have been closed. -** -** The second argument, "zSql", is the statement to be compiled, encoded -** as either UTF-8 or UTF-16. The sqlite3_prepare() and sqlite3_prepare_v2() -** interfaces use UTF-8, and sqlite3_prepare16() and sqlite3_prepare16_v2() -** use UTF-16. -** -** ^If the nByte argument is less than zero, then zSql is read up to the -** first zero terminator. ^If nByte is non-negative, then it is the maximum -** number of bytes read from zSql. ^When nByte is non-negative, the -** zSql string ends at either the first '\000' or '\u0000' character or -** the nByte-th byte, whichever comes first. If the caller knows -** that the supplied string is nul-terminated, then there is a small -** performance advantage to be gained by passing an nByte parameter that -** is equal to the number of bytes in the input string including -** the nul-terminator bytes as this saves SQLite from having to -** make a copy of the input string. -** -** ^If pzTail is not NULL then *pzTail is made to point to the first byte -** past the end of the first SQL statement in zSql. These routines only -** compile the first statement in zSql, so *pzTail is left pointing to -** what remains uncompiled. -** -** ^*ppStmt is left pointing to a compiled [prepared statement] that can be -** executed using [sqlite3_step()]. ^If there is an error, *ppStmt is set -** to NULL. ^If the input text contains no SQL (if the input is an empty -** string or a comment) then *ppStmt is set to NULL. -** The calling procedure is responsible for deleting the compiled -** SQL statement using [sqlite3_finalize()] after it has finished with it. -** ppStmt may not be NULL. -** -** ^On success, the sqlite3_prepare() family of routines return [SQLITE_OK]; -** otherwise an [error code] is returned. -** -** The sqlite3_prepare_v2() and sqlite3_prepare16_v2() interfaces are -** recommended for all new programs. The two older interfaces are retained -** for backwards compatibility, but their use is discouraged. -** ^In the "v2" interfaces, the prepared statement -** that is returned (the [sqlite3_stmt] object) contains a copy of the -** original SQL text. This causes the [sqlite3_step()] interface to -** behave differently in three ways: -** -**
      -**
    1. -** ^If the database schema changes, instead of returning [SQLITE_SCHEMA] as it -** always used to do, [sqlite3_step()] will automatically recompile the SQL -** statement and try to run it again. As many as [SQLITE_MAX_SCHEMA_RETRY] -** retries will occur before sqlite3_step() gives up and returns an error. -**
    2. -** -**
    3. -** ^When an error occurs, [sqlite3_step()] will return one of the detailed -** [error codes] or [extended error codes]. ^The legacy behavior was that -** [sqlite3_step()] would only return a generic [SQLITE_ERROR] result code -** and the application would have to make a second call to [sqlite3_reset()] -** in order to find the underlying cause of the problem. With the "v2" prepare -** interfaces, the underlying reason for the error is returned immediately. -**
    4. -** -**
    5. -** ^If the specific value bound to [parameter | host parameter] in the -** WHERE clause might influence the choice of query plan for a statement, -** then the statement will be automatically recompiled, as if there had been -** a schema change, on the first [sqlite3_step()] call following any change -** to the [sqlite3_bind_text | bindings] of that [parameter]. -** ^The specific value of WHERE-clause [parameter] might influence the -** choice of query plan if the parameter is the left-hand side of a [LIKE] -** or [GLOB] operator or if the parameter is compared to an indexed column -** and the [SQLITE_ENABLE_STAT3] compile-time option is enabled. -**
    6. -**
    -*/ -SQLITE_API int sqlite3_prepare( - sqlite3 *db, /* Database handle */ - const char *zSql, /* SQL statement, UTF-8 encoded */ - int nByte, /* Maximum length of zSql in bytes. */ - sqlite3_stmt **ppStmt, /* OUT: Statement handle */ - const char **pzTail /* OUT: Pointer to unused portion of zSql */ -); -SQLITE_API int sqlite3_prepare_v2( - sqlite3 *db, /* Database handle */ - const char *zSql, /* SQL statement, UTF-8 encoded */ - int nByte, /* Maximum length of zSql in bytes. */ - sqlite3_stmt **ppStmt, /* OUT: Statement handle */ - const char **pzTail /* OUT: Pointer to unused portion of zSql */ -); -SQLITE_API int sqlite3_prepare16( - sqlite3 *db, /* Database handle */ - const void *zSql, /* SQL statement, UTF-16 encoded */ - int nByte, /* Maximum length of zSql in bytes. */ - sqlite3_stmt **ppStmt, /* OUT: Statement handle */ - const void **pzTail /* OUT: Pointer to unused portion of zSql */ -); -SQLITE_API int sqlite3_prepare16_v2( - sqlite3 *db, /* Database handle */ - const void *zSql, /* SQL statement, UTF-16 encoded */ - int nByte, /* Maximum length of zSql in bytes. */ - sqlite3_stmt **ppStmt, /* OUT: Statement handle */ - const void **pzTail /* OUT: Pointer to unused portion of zSql */ -); - -/* -** CAPI3REF: Retrieving Statement SQL -** -** ^This interface can be used to retrieve a saved copy of the original -** SQL text used to create a [prepared statement] if that statement was -** compiled using either [sqlite3_prepare_v2()] or [sqlite3_prepare16_v2()]. -*/ -SQLITE_API const char *sqlite3_sql(sqlite3_stmt *pStmt); - -/* -** CAPI3REF: Determine If An SQL Statement Writes The Database -** -** ^The sqlite3_stmt_readonly(X) interface returns true (non-zero) if -** and only if the [prepared statement] X makes no direct changes to -** the content of the database file. -** -** Note that [application-defined SQL functions] or -** [virtual tables] might change the database indirectly as a side effect. -** ^(For example, if an application defines a function "eval()" that -** calls [sqlite3_exec()], then the following SQL statement would -** change the database file through side-effects: -** -**
    -**    SELECT eval('DELETE FROM t1') FROM t2;
    -** 
    -** -** But because the [SELECT] statement does not change the database file -** directly, sqlite3_stmt_readonly() would still return true.)^ -** -** ^Transaction control statements such as [BEGIN], [COMMIT], [ROLLBACK], -** [SAVEPOINT], and [RELEASE] cause sqlite3_stmt_readonly() to return true, -** since the statements themselves do not actually modify the database but -** rather they control the timing of when other statements modify the -** database. ^The [ATTACH] and [DETACH] statements also cause -** sqlite3_stmt_readonly() to return true since, while those statements -** change the configuration of a database connection, they do not make -** changes to the content of the database files on disk. -*/ -SQLITE_API int sqlite3_stmt_readonly(sqlite3_stmt *pStmt); - -/* -** CAPI3REF: Determine If A Prepared Statement Has Been Reset -** -** ^The sqlite3_stmt_busy(S) interface returns true (non-zero) if the -** [prepared statement] S has been stepped at least once using -** [sqlite3_step(S)] but has not run to completion and/or has not -** been reset using [sqlite3_reset(S)]. ^The sqlite3_stmt_busy(S) -** interface returns false if S is a NULL pointer. If S is not a -** NULL pointer and is not a pointer to a valid [prepared statement] -** object, then the behavior is undefined and probably undesirable. -** -** This interface can be used in combination [sqlite3_next_stmt()] -** to locate all prepared statements associated with a database -** connection that are in need of being reset. This can be used, -** for example, in diagnostic routines to search for prepared -** statements that are holding a transaction open. -*/ -SQLITE_API int sqlite3_stmt_busy(sqlite3_stmt*); - -/* -** CAPI3REF: Dynamically Typed Value Object -** KEYWORDS: {protected sqlite3_value} {unprotected sqlite3_value} -** -** SQLite uses the sqlite3_value object to represent all values -** that can be stored in a database table. SQLite uses dynamic typing -** for the values it stores. ^Values stored in sqlite3_value objects -** can be integers, floating point values, strings, BLOBs, or NULL. -** -** An sqlite3_value object may be either "protected" or "unprotected". -** Some interfaces require a protected sqlite3_value. Other interfaces -** will accept either a protected or an unprotected sqlite3_value. -** Every interface that accepts sqlite3_value arguments specifies -** whether or not it requires a protected sqlite3_value. -** -** The terms "protected" and "unprotected" refer to whether or not -** a mutex is held. An internal mutex is held for a protected -** sqlite3_value object but no mutex is held for an unprotected -** sqlite3_value object. If SQLite is compiled to be single-threaded -** (with [SQLITE_THREADSAFE=0] and with [sqlite3_threadsafe()] returning 0) -** or if SQLite is run in one of reduced mutex modes -** [SQLITE_CONFIG_SINGLETHREAD] or [SQLITE_CONFIG_MULTITHREAD] -** then there is no distinction between protected and unprotected -** sqlite3_value objects and they can be used interchangeably. However, -** for maximum code portability it is recommended that applications -** still make the distinction between protected and unprotected -** sqlite3_value objects even when not strictly required. -** -** ^The sqlite3_value objects that are passed as parameters into the -** implementation of [application-defined SQL functions] are protected. -** ^The sqlite3_value object returned by -** [sqlite3_column_value()] is unprotected. -** Unprotected sqlite3_value objects may only be used with -** [sqlite3_result_value()] and [sqlite3_bind_value()]. -** The [sqlite3_value_blob | sqlite3_value_type()] family of -** interfaces require protected sqlite3_value objects. -*/ -typedef struct Mem sqlite3_value; - -/* -** CAPI3REF: SQL Function Context Object -** -** The context in which an SQL function executes is stored in an -** sqlite3_context object. ^A pointer to an sqlite3_context object -** is always first parameter to [application-defined SQL functions]. -** The application-defined SQL function implementation will pass this -** pointer through into calls to [sqlite3_result_int | sqlite3_result()], -** [sqlite3_aggregate_context()], [sqlite3_user_data()], -** [sqlite3_context_db_handle()], [sqlite3_get_auxdata()], -** and/or [sqlite3_set_auxdata()]. -*/ -typedef struct sqlite3_context sqlite3_context; - -/* -** CAPI3REF: Binding Values To Prepared Statements -** KEYWORDS: {host parameter} {host parameters} {host parameter name} -** KEYWORDS: {SQL parameter} {SQL parameters} {parameter binding} -** -** ^(In the SQL statement text input to [sqlite3_prepare_v2()] and its variants, -** literals may be replaced by a [parameter] that matches one of following -** templates: -** -**
      -**
    • ? -**
    • ?NNN -**
    • :VVV -**
    • @VVV -**
    • $VVV -**
    -** -** In the templates above, NNN represents an integer literal, -** and VVV represents an alphanumeric identifier.)^ ^The values of these -** parameters (also called "host parameter names" or "SQL parameters") -** can be set using the sqlite3_bind_*() routines defined here. -** -** ^The first argument to the sqlite3_bind_*() routines is always -** a pointer to the [sqlite3_stmt] object returned from -** [sqlite3_prepare_v2()] or its variants. -** -** ^The second argument is the index of the SQL parameter to be set. -** ^The leftmost SQL parameter has an index of 1. ^When the same named -** SQL parameter is used more than once, second and subsequent -** occurrences have the same index as the first occurrence. -** ^The index for named parameters can be looked up using the -** [sqlite3_bind_parameter_index()] API if desired. ^The index -** for "?NNN" parameters is the value of NNN. -** ^The NNN value must be between 1 and the [sqlite3_limit()] -** parameter [SQLITE_LIMIT_VARIABLE_NUMBER] (default value: 999). -** -** ^The third argument is the value to bind to the parameter. -** ^If the third parameter to sqlite3_bind_text() or sqlite3_bind_text16() -** or sqlite3_bind_blob() is a NULL pointer then the fourth parameter -** is ignored and the end result is the same as sqlite3_bind_null(). -** -** ^(In those routines that have a fourth argument, its value is the -** number of bytes in the parameter. To be clear: the value is the -** number of bytes in the value, not the number of characters.)^ -** ^If the fourth parameter to sqlite3_bind_text() or sqlite3_bind_text16() -** is negative, then the length of the string is -** the number of bytes up to the first zero terminator. -** If the fourth parameter to sqlite3_bind_blob() is negative, then -** the behavior is undefined. -** If a non-negative fourth parameter is provided to sqlite3_bind_text() -** or sqlite3_bind_text16() then that parameter must be the byte offset -** where the NUL terminator would occur assuming the string were NUL -** terminated. If any NUL characters occur at byte offsets less than -** the value of the fourth parameter then the resulting string value will -** contain embedded NULs. The result of expressions involving strings -** with embedded NULs is undefined. -** -** ^The fifth argument to sqlite3_bind_blob(), sqlite3_bind_text(), and -** sqlite3_bind_text16() is a destructor used to dispose of the BLOB or -** string after SQLite has finished with it. ^The destructor is called -** to dispose of the BLOB or string even if the call to sqlite3_bind_blob(), -** sqlite3_bind_text(), or sqlite3_bind_text16() fails. -** ^If the fifth argument is -** the special value [SQLITE_STATIC], then SQLite assumes that the -** information is in static, unmanaged space and does not need to be freed. -** ^If the fifth argument has the value [SQLITE_TRANSIENT], then -** SQLite makes its own private copy of the data immediately, before -** the sqlite3_bind_*() routine returns. -** -** ^The sqlite3_bind_zeroblob() routine binds a BLOB of length N that -** is filled with zeroes. ^A zeroblob uses a fixed amount of memory -** (just an integer to hold its size) while it is being processed. -** Zeroblobs are intended to serve as placeholders for BLOBs whose -** content is later written using -** [sqlite3_blob_open | incremental BLOB I/O] routines. -** ^A negative value for the zeroblob results in a zero-length BLOB. -** -** ^If any of the sqlite3_bind_*() routines are called with a NULL pointer -** for the [prepared statement] or with a prepared statement for which -** [sqlite3_step()] has been called more recently than [sqlite3_reset()], -** then the call will return [SQLITE_MISUSE]. If any sqlite3_bind_() -** routine is passed a [prepared statement] that has been finalized, the -** result is undefined and probably harmful. -** -** ^Bindings are not cleared by the [sqlite3_reset()] routine. -** ^Unbound parameters are interpreted as NULL. -** -** ^The sqlite3_bind_* routines return [SQLITE_OK] on success or an -** [error code] if anything goes wrong. -** ^[SQLITE_RANGE] is returned if the parameter -** index is out of range. ^[SQLITE_NOMEM] is returned if malloc() fails. -** -** See also: [sqlite3_bind_parameter_count()], -** [sqlite3_bind_parameter_name()], and [sqlite3_bind_parameter_index()]. -*/ -SQLITE_API int sqlite3_bind_blob(sqlite3_stmt*, int, const void*, int n, void(*)(void*)); -SQLITE_API int sqlite3_bind_double(sqlite3_stmt*, int, double); -SQLITE_API int sqlite3_bind_int(sqlite3_stmt*, int, int); -SQLITE_API int sqlite3_bind_int64(sqlite3_stmt*, int, sqlite3_int64); -SQLITE_API int sqlite3_bind_null(sqlite3_stmt*, int); -SQLITE_API int sqlite3_bind_text(sqlite3_stmt*, int, const char*, int n, void(*)(void*)); -SQLITE_API int sqlite3_bind_text16(sqlite3_stmt*, int, const void*, int, void(*)(void*)); -SQLITE_API int sqlite3_bind_value(sqlite3_stmt*, int, const sqlite3_value*); -SQLITE_API int sqlite3_bind_zeroblob(sqlite3_stmt*, int, int n); - -/* -** CAPI3REF: Number Of SQL Parameters -** -** ^This routine can be used to find the number of [SQL parameters] -** in a [prepared statement]. SQL parameters are tokens of the -** form "?", "?NNN", ":AAA", "$AAA", or "@AAA" that serve as -** placeholders for values that are [sqlite3_bind_blob | bound] -** to the parameters at a later time. -** -** ^(This routine actually returns the index of the largest (rightmost) -** parameter. For all forms except ?NNN, this will correspond to the -** number of unique parameters. If parameters of the ?NNN form are used, -** there may be gaps in the list.)^ -** -** See also: [sqlite3_bind_blob|sqlite3_bind()], -** [sqlite3_bind_parameter_name()], and -** [sqlite3_bind_parameter_index()]. -*/ -SQLITE_API int sqlite3_bind_parameter_count(sqlite3_stmt*); - -/* -** CAPI3REF: Name Of A Host Parameter -** -** ^The sqlite3_bind_parameter_name(P,N) interface returns -** the name of the N-th [SQL parameter] in the [prepared statement] P. -** ^(SQL parameters of the form "?NNN" or ":AAA" or "@AAA" or "$AAA" -** have a name which is the string "?NNN" or ":AAA" or "@AAA" or "$AAA" -** respectively. -** In other words, the initial ":" or "$" or "@" or "?" -** is included as part of the name.)^ -** ^Parameters of the form "?" without a following integer have no name -** and are referred to as "nameless" or "anonymous parameters". -** -** ^The first host parameter has an index of 1, not 0. -** -** ^If the value N is out of range or if the N-th parameter is -** nameless, then NULL is returned. ^The returned string is -** always in UTF-8 encoding even if the named parameter was -** originally specified as UTF-16 in [sqlite3_prepare16()] or -** [sqlite3_prepare16_v2()]. -** -** See also: [sqlite3_bind_blob|sqlite3_bind()], -** [sqlite3_bind_parameter_count()], and -** [sqlite3_bind_parameter_index()]. -*/ -SQLITE_API const char *sqlite3_bind_parameter_name(sqlite3_stmt*, int); - -/* -** CAPI3REF: Index Of A Parameter With A Given Name -** -** ^Return the index of an SQL parameter given its name. ^The -** index value returned is suitable for use as the second -** parameter to [sqlite3_bind_blob|sqlite3_bind()]. ^A zero -** is returned if no matching parameter is found. ^The parameter -** name must be given in UTF-8 even if the original statement -** was prepared from UTF-16 text using [sqlite3_prepare16_v2()]. -** -** See also: [sqlite3_bind_blob|sqlite3_bind()], -** [sqlite3_bind_parameter_count()], and -** [sqlite3_bind_parameter_index()]. -*/ -SQLITE_API int sqlite3_bind_parameter_index(sqlite3_stmt*, const char *zName); - -/* -** CAPI3REF: Reset All Bindings On A Prepared Statement -** -** ^Contrary to the intuition of many, [sqlite3_reset()] does not reset -** the [sqlite3_bind_blob | bindings] on a [prepared statement]. -** ^Use this routine to reset all host parameters to NULL. -*/ -SQLITE_API int sqlite3_clear_bindings(sqlite3_stmt*); - -/* -** CAPI3REF: Number Of Columns In A Result Set -** -** ^Return the number of columns in the result set returned by the -** [prepared statement]. ^This routine returns 0 if pStmt is an SQL -** statement that does not return data (for example an [UPDATE]). -** -** See also: [sqlite3_data_count()] -*/ -SQLITE_API int sqlite3_column_count(sqlite3_stmt *pStmt); - -/* -** CAPI3REF: Column Names In A Result Set -** -** ^These routines return the name assigned to a particular column -** in the result set of a [SELECT] statement. ^The sqlite3_column_name() -** interface returns a pointer to a zero-terminated UTF-8 string -** and sqlite3_column_name16() returns a pointer to a zero-terminated -** UTF-16 string. ^The first parameter is the [prepared statement] -** that implements the [SELECT] statement. ^The second parameter is the -** column number. ^The leftmost column is number 0. -** -** ^The returned string pointer is valid until either the [prepared statement] -** is destroyed by [sqlite3_finalize()] or until the statement is automatically -** reprepared by the first call to [sqlite3_step()] for a particular run -** or until the next call to -** sqlite3_column_name() or sqlite3_column_name16() on the same column. -** -** ^If sqlite3_malloc() fails during the processing of either routine -** (for example during a conversion from UTF-8 to UTF-16) then a -** NULL pointer is returned. -** -** ^The name of a result column is the value of the "AS" clause for -** that column, if there is an AS clause. If there is no AS clause -** then the name of the column is unspecified and may change from -** one release of SQLite to the next. -*/ -SQLITE_API const char *sqlite3_column_name(sqlite3_stmt*, int N); -SQLITE_API const void *sqlite3_column_name16(sqlite3_stmt*, int N); - -/* -** CAPI3REF: Source Of Data In A Query Result -** -** ^These routines provide a means to determine the database, table, and -** table column that is the origin of a particular result column in -** [SELECT] statement. -** ^The name of the database or table or column can be returned as -** either a UTF-8 or UTF-16 string. ^The _database_ routines return -** the database name, the _table_ routines return the table name, and -** the origin_ routines return the column name. -** ^The returned string is valid until the [prepared statement] is destroyed -** using [sqlite3_finalize()] or until the statement is automatically -** reprepared by the first call to [sqlite3_step()] for a particular run -** or until the same information is requested -** again in a different encoding. -** -** ^The names returned are the original un-aliased names of the -** database, table, and column. -** -** ^The first argument to these interfaces is a [prepared statement]. -** ^These functions return information about the Nth result column returned by -** the statement, where N is the second function argument. -** ^The left-most column is column 0 for these routines. -** -** ^If the Nth column returned by the statement is an expression or -** subquery and is not a column value, then all of these functions return -** NULL. ^These routine might also return NULL if a memory allocation error -** occurs. ^Otherwise, they return the name of the attached database, table, -** or column that query result column was extracted from. -** -** ^As with all other SQLite APIs, those whose names end with "16" return -** UTF-16 encoded strings and the other functions return UTF-8. -** -** ^These APIs are only available if the library was compiled with the -** [SQLITE_ENABLE_COLUMN_METADATA] C-preprocessor symbol. -** -** If two or more threads call one or more of these routines against the same -** prepared statement and column at the same time then the results are -** undefined. -** -** If two or more threads call one or more -** [sqlite3_column_database_name | column metadata interfaces] -** for the same [prepared statement] and result column -** at the same time then the results are undefined. -*/ -SQLITE_API const char *sqlite3_column_database_name(sqlite3_stmt*,int); -SQLITE_API const void *sqlite3_column_database_name16(sqlite3_stmt*,int); -SQLITE_API const char *sqlite3_column_table_name(sqlite3_stmt*,int); -SQLITE_API const void *sqlite3_column_table_name16(sqlite3_stmt*,int); -SQLITE_API const char *sqlite3_column_origin_name(sqlite3_stmt*,int); -SQLITE_API const void *sqlite3_column_origin_name16(sqlite3_stmt*,int); - -/* -** CAPI3REF: Declared Datatype Of A Query Result -** -** ^(The first parameter is a [prepared statement]. -** If this statement is a [SELECT] statement and the Nth column of the -** returned result set of that [SELECT] is a table column (not an -** expression or subquery) then the declared type of the table -** column is returned.)^ ^If the Nth column of the result set is an -** expression or subquery, then a NULL pointer is returned. -** ^The returned string is always UTF-8 encoded. -** -** ^(For example, given the database schema: -** -** CREATE TABLE t1(c1 VARIANT); -** -** and the following statement to be compiled: -** -** SELECT c1 + 1, c1 FROM t1; -** -** this routine would return the string "VARIANT" for the second result -** column (i==1), and a NULL pointer for the first result column (i==0).)^ -** -** ^SQLite uses dynamic run-time typing. ^So just because a column -** is declared to contain a particular type does not mean that the -** data stored in that column is of the declared type. SQLite is -** strongly typed, but the typing is dynamic not static. ^Type -** is associated with individual values, not with the containers -** used to hold those values. -*/ -SQLITE_API const char *sqlite3_column_decltype(sqlite3_stmt*,int); -SQLITE_API const void *sqlite3_column_decltype16(sqlite3_stmt*,int); - -/* -** CAPI3REF: Evaluate An SQL Statement -** -** After a [prepared statement] has been prepared using either -** [sqlite3_prepare_v2()] or [sqlite3_prepare16_v2()] or one of the legacy -** interfaces [sqlite3_prepare()] or [sqlite3_prepare16()], this function -** must be called one or more times to evaluate the statement. -** -** The details of the behavior of the sqlite3_step() interface depend -** on whether the statement was prepared using the newer "v2" interface -** [sqlite3_prepare_v2()] and [sqlite3_prepare16_v2()] or the older legacy -** interface [sqlite3_prepare()] and [sqlite3_prepare16()]. The use of the -** new "v2" interface is recommended for new applications but the legacy -** interface will continue to be supported. -** -** ^In the legacy interface, the return value will be either [SQLITE_BUSY], -** [SQLITE_DONE], [SQLITE_ROW], [SQLITE_ERROR], or [SQLITE_MISUSE]. -** ^With the "v2" interface, any of the other [result codes] or -** [extended result codes] might be returned as well. -** -** ^[SQLITE_BUSY] means that the database engine was unable to acquire the -** database locks it needs to do its job. ^If the statement is a [COMMIT] -** or occurs outside of an explicit transaction, then you can retry the -** statement. If the statement is not a [COMMIT] and occurs within an -** explicit transaction then you should rollback the transaction before -** continuing. -** -** ^[SQLITE_DONE] means that the statement has finished executing -** successfully. sqlite3_step() should not be called again on this virtual -** machine without first calling [sqlite3_reset()] to reset the virtual -** machine back to its initial state. -** -** ^If the SQL statement being executed returns any data, then [SQLITE_ROW] -** is returned each time a new row of data is ready for processing by the -** caller. The values may be accessed using the [column access functions]. -** sqlite3_step() is called again to retrieve the next row of data. -** -** ^[SQLITE_ERROR] means that a run-time error (such as a constraint -** violation) has occurred. sqlite3_step() should not be called again on -** the VM. More information may be found by calling [sqlite3_errmsg()]. -** ^With the legacy interface, a more specific error code (for example, -** [SQLITE_INTERRUPT], [SQLITE_SCHEMA], [SQLITE_CORRUPT], and so forth) -** can be obtained by calling [sqlite3_reset()] on the -** [prepared statement]. ^In the "v2" interface, -** the more specific error code is returned directly by sqlite3_step(). -** -** [SQLITE_MISUSE] means that the this routine was called inappropriately. -** Perhaps it was called on a [prepared statement] that has -** already been [sqlite3_finalize | finalized] or on one that had -** previously returned [SQLITE_ERROR] or [SQLITE_DONE]. Or it could -** be the case that the same database connection is being used by two or -** more threads at the same moment in time. -** -** For all versions of SQLite up to and including 3.6.23.1, a call to -** [sqlite3_reset()] was required after sqlite3_step() returned anything -** other than [SQLITE_ROW] before any subsequent invocation of -** sqlite3_step(). Failure to reset the prepared statement using -** [sqlite3_reset()] would result in an [SQLITE_MISUSE] return from -** sqlite3_step(). But after version 3.6.23.1, sqlite3_step() began -** calling [sqlite3_reset()] automatically in this circumstance rather -** than returning [SQLITE_MISUSE]. This is not considered a compatibility -** break because any application that ever receives an SQLITE_MISUSE error -** is broken by definition. The [SQLITE_OMIT_AUTORESET] compile-time option -** can be used to restore the legacy behavior. -** -** Goofy Interface Alert: In the legacy interface, the sqlite3_step() -** API always returns a generic error code, [SQLITE_ERROR], following any -** error other than [SQLITE_BUSY] and [SQLITE_MISUSE]. You must call -** [sqlite3_reset()] or [sqlite3_finalize()] in order to find one of the -** specific [error codes] that better describes the error. -** We admit that this is a goofy design. The problem has been fixed -** with the "v2" interface. If you prepare all of your SQL statements -** using either [sqlite3_prepare_v2()] or [sqlite3_prepare16_v2()] instead -** of the legacy [sqlite3_prepare()] and [sqlite3_prepare16()] interfaces, -** then the more specific [error codes] are returned directly -** by sqlite3_step(). The use of the "v2" interface is recommended. -*/ -SQLITE_API int sqlite3_step(sqlite3_stmt*); - -/* -** CAPI3REF: Number of columns in a result set -** -** ^The sqlite3_data_count(P) interface returns the number of columns in the -** current row of the result set of [prepared statement] P. -** ^If prepared statement P does not have results ready to return -** (via calls to the [sqlite3_column_int | sqlite3_column_*()] of -** interfaces) then sqlite3_data_count(P) returns 0. -** ^The sqlite3_data_count(P) routine also returns 0 if P is a NULL pointer. -** ^The sqlite3_data_count(P) routine returns 0 if the previous call to -** [sqlite3_step](P) returned [SQLITE_DONE]. ^The sqlite3_data_count(P) -** will return non-zero if previous call to [sqlite3_step](P) returned -** [SQLITE_ROW], except in the case of the [PRAGMA incremental_vacuum] -** where it always returns zero since each step of that multi-step -** pragma returns 0 columns of data. -** -** See also: [sqlite3_column_count()] -*/ -SQLITE_API int sqlite3_data_count(sqlite3_stmt *pStmt); - -/* -** CAPI3REF: Fundamental Datatypes -** KEYWORDS: SQLITE_TEXT -** -** ^(Every value in SQLite has one of five fundamental datatypes: -** -**
      -**
    • 64-bit signed integer -**
    • 64-bit IEEE floating point number -**
    • string -**
    • BLOB -**
    • NULL -**
    )^ -** -** These constants are codes for each of those types. -** -** Note that the SQLITE_TEXT constant was also used in SQLite version 2 -** for a completely different meaning. Software that links against both -** SQLite version 2 and SQLite version 3 should use SQLITE3_TEXT, not -** SQLITE_TEXT. -*/ -#define SQLITE_INTEGER 1 -#define SQLITE_FLOAT 2 -#define SQLITE_BLOB 4 -#define SQLITE_NULL 5 -#ifdef SQLITE_TEXT -# undef SQLITE_TEXT -#else -# define SQLITE_TEXT 3 -#endif -#define SQLITE3_TEXT 3 - -/* -** CAPI3REF: Result Values From A Query -** KEYWORDS: {column access functions} -** -** These routines form the "result set" interface. -** -** ^These routines return information about a single column of the current -** result row of a query. ^In every case the first argument is a pointer -** to the [prepared statement] that is being evaluated (the [sqlite3_stmt*] -** that was returned from [sqlite3_prepare_v2()] or one of its variants) -** and the second argument is the index of the column for which information -** should be returned. ^The leftmost column of the result set has the index 0. -** ^The number of columns in the result can be determined using -** [sqlite3_column_count()]. -** -** If the SQL statement does not currently point to a valid row, or if the -** column index is out of range, the result is undefined. -** These routines may only be called when the most recent call to -** [sqlite3_step()] has returned [SQLITE_ROW] and neither -** [sqlite3_reset()] nor [sqlite3_finalize()] have been called subsequently. -** If any of these routines are called after [sqlite3_reset()] or -** [sqlite3_finalize()] or after [sqlite3_step()] has returned -** something other than [SQLITE_ROW], the results are undefined. -** If [sqlite3_step()] or [sqlite3_reset()] or [sqlite3_finalize()] -** are called from a different thread while any of these routines -** are pending, then the results are undefined. -** -** ^The sqlite3_column_type() routine returns the -** [SQLITE_INTEGER | datatype code] for the initial data type -** of the result column. ^The returned value is one of [SQLITE_INTEGER], -** [SQLITE_FLOAT], [SQLITE_TEXT], [SQLITE_BLOB], or [SQLITE_NULL]. The value -** returned by sqlite3_column_type() is only meaningful if no type -** conversions have occurred as described below. After a type conversion, -** the value returned by sqlite3_column_type() is undefined. Future -** versions of SQLite may change the behavior of sqlite3_column_type() -** following a type conversion. -** -** ^If the result is a BLOB or UTF-8 string then the sqlite3_column_bytes() -** routine returns the number of bytes in that BLOB or string. -** ^If the result is a UTF-16 string, then sqlite3_column_bytes() converts -** the string to UTF-8 and then returns the number of bytes. -** ^If the result is a numeric value then sqlite3_column_bytes() uses -** [sqlite3_snprintf()] to convert that value to a UTF-8 string and returns -** the number of bytes in that string. -** ^If the result is NULL, then sqlite3_column_bytes() returns zero. -** -** ^If the result is a BLOB or UTF-16 string then the sqlite3_column_bytes16() -** routine returns the number of bytes in that BLOB or string. -** ^If the result is a UTF-8 string, then sqlite3_column_bytes16() converts -** the string to UTF-16 and then returns the number of bytes. -** ^If the result is a numeric value then sqlite3_column_bytes16() uses -** [sqlite3_snprintf()] to convert that value to a UTF-16 string and returns -** the number of bytes in that string. -** ^If the result is NULL, then sqlite3_column_bytes16() returns zero. -** -** ^The values returned by [sqlite3_column_bytes()] and -** [sqlite3_column_bytes16()] do not include the zero terminators at the end -** of the string. ^For clarity: the values returned by -** [sqlite3_column_bytes()] and [sqlite3_column_bytes16()] are the number of -** bytes in the string, not the number of characters. -** -** ^Strings returned by sqlite3_column_text() and sqlite3_column_text16(), -** even empty strings, are always zero-terminated. ^The return -** value from sqlite3_column_blob() for a zero-length BLOB is a NULL pointer. -** -** ^The object returned by [sqlite3_column_value()] is an -** [unprotected sqlite3_value] object. An unprotected sqlite3_value object -** may only be used with [sqlite3_bind_value()] and [sqlite3_result_value()]. -** If the [unprotected sqlite3_value] object returned by -** [sqlite3_column_value()] is used in any other way, including calls -** to routines like [sqlite3_value_int()], [sqlite3_value_text()], -** or [sqlite3_value_bytes()], then the behavior is undefined. -** -** These routines attempt to convert the value where appropriate. ^For -** example, if the internal representation is FLOAT and a text result -** is requested, [sqlite3_snprintf()] is used internally to perform the -** conversion automatically. ^(The following table details the conversions -** that are applied: -** -**
    -** -**
    Internal
    Type
    Requested
    Type
    Conversion -** -**
    NULL INTEGER Result is 0 -**
    NULL FLOAT Result is 0.0 -**
    NULL TEXT Result is a NULL pointer -**
    NULL BLOB Result is a NULL pointer -**
    INTEGER FLOAT Convert from integer to float -**
    INTEGER TEXT ASCII rendering of the integer -**
    INTEGER BLOB Same as INTEGER->TEXT -**
    FLOAT INTEGER [CAST] to INTEGER -**
    FLOAT TEXT ASCII rendering of the float -**
    FLOAT BLOB [CAST] to BLOB -**
    TEXT INTEGER [CAST] to INTEGER -**
    TEXT FLOAT [CAST] to REAL -**
    TEXT BLOB No change -**
    BLOB INTEGER [CAST] to INTEGER -**
    BLOB FLOAT [CAST] to REAL -**
    BLOB TEXT Add a zero terminator if needed -**
    -**
    )^ -** -** The table above makes reference to standard C library functions atoi() -** and atof(). SQLite does not really use these functions. It has its -** own equivalent internal routines. The atoi() and atof() names are -** used in the table for brevity and because they are familiar to most -** C programmers. -** -** Note that when type conversions occur, pointers returned by prior -** calls to sqlite3_column_blob(), sqlite3_column_text(), and/or -** sqlite3_column_text16() may be invalidated. -** Type conversions and pointer invalidations might occur -** in the following cases: -** -**
      -**
    • The initial content is a BLOB and sqlite3_column_text() or -** sqlite3_column_text16() is called. A zero-terminator might -** need to be added to the string.
    • -**
    • The initial content is UTF-8 text and sqlite3_column_bytes16() or -** sqlite3_column_text16() is called. The content must be converted -** to UTF-16.
    • -**
    • The initial content is UTF-16 text and sqlite3_column_bytes() or -** sqlite3_column_text() is called. The content must be converted -** to UTF-8.
    • -**
    -** -** ^Conversions between UTF-16be and UTF-16le are always done in place and do -** not invalidate a prior pointer, though of course the content of the buffer -** that the prior pointer references will have been modified. Other kinds -** of conversion are done in place when it is possible, but sometimes they -** are not possible and in those cases prior pointers are invalidated. -** -** The safest and easiest to remember policy is to invoke these routines -** in one of the following ways: -** -**
      -**
    • sqlite3_column_text() followed by sqlite3_column_bytes()
    • -**
    • sqlite3_column_blob() followed by sqlite3_column_bytes()
    • -**
    • sqlite3_column_text16() followed by sqlite3_column_bytes16()
    • -**
    -** -** In other words, you should call sqlite3_column_text(), -** sqlite3_column_blob(), or sqlite3_column_text16() first to force the result -** into the desired format, then invoke sqlite3_column_bytes() or -** sqlite3_column_bytes16() to find the size of the result. Do not mix calls -** to sqlite3_column_text() or sqlite3_column_blob() with calls to -** sqlite3_column_bytes16(), and do not mix calls to sqlite3_column_text16() -** with calls to sqlite3_column_bytes(). -** -** ^The pointers returned are valid until a type conversion occurs as -** described above, or until [sqlite3_step()] or [sqlite3_reset()] or -** [sqlite3_finalize()] is called. ^The memory space used to hold strings -** and BLOBs is freed automatically. Do not pass the pointers returned -** from [sqlite3_column_blob()], [sqlite3_column_text()], etc. into -** [sqlite3_free()]. -** -** ^(If a memory allocation error occurs during the evaluation of any -** of these routines, a default value is returned. The default value -** is either the integer 0, the floating point number 0.0, or a NULL -** pointer. Subsequent calls to [sqlite3_errcode()] will return -** [SQLITE_NOMEM].)^ -*/ -SQLITE_API const void *sqlite3_column_blob(sqlite3_stmt*, int iCol); -SQLITE_API int sqlite3_column_bytes(sqlite3_stmt*, int iCol); -SQLITE_API int sqlite3_column_bytes16(sqlite3_stmt*, int iCol); -SQLITE_API double sqlite3_column_double(sqlite3_stmt*, int iCol); -SQLITE_API int sqlite3_column_int(sqlite3_stmt*, int iCol); -SQLITE_API sqlite3_int64 sqlite3_column_int64(sqlite3_stmt*, int iCol); -SQLITE_API const unsigned char *sqlite3_column_text(sqlite3_stmt*, int iCol); -SQLITE_API const void *sqlite3_column_text16(sqlite3_stmt*, int iCol); -SQLITE_API int sqlite3_column_type(sqlite3_stmt*, int iCol); -SQLITE_API sqlite3_value *sqlite3_column_value(sqlite3_stmt*, int iCol); - -/* -** CAPI3REF: Destroy A Prepared Statement Object -** -** ^The sqlite3_finalize() function is called to delete a [prepared statement]. -** ^If the most recent evaluation of the statement encountered no errors -** or if the statement is never been evaluated, then sqlite3_finalize() returns -** SQLITE_OK. ^If the most recent evaluation of statement S failed, then -** sqlite3_finalize(S) returns the appropriate [error code] or -** [extended error code]. -** -** ^The sqlite3_finalize(S) routine can be called at any point during -** the life cycle of [prepared statement] S: -** before statement S is ever evaluated, after -** one or more calls to [sqlite3_reset()], or after any call -** to [sqlite3_step()] regardless of whether or not the statement has -** completed execution. -** -** ^Invoking sqlite3_finalize() on a NULL pointer is a harmless no-op. -** -** The application must finalize every [prepared statement] in order to avoid -** resource leaks. It is a grievous error for the application to try to use -** a prepared statement after it has been finalized. Any use of a prepared -** statement after it has been finalized can result in undefined and -** undesirable behavior such as segfaults and heap corruption. -*/ -SQLITE_API int sqlite3_finalize(sqlite3_stmt *pStmt); - -/* -** CAPI3REF: Reset A Prepared Statement Object -** -** The sqlite3_reset() function is called to reset a [prepared statement] -** object back to its initial state, ready to be re-executed. -** ^Any SQL statement variables that had values bound to them using -** the [sqlite3_bind_blob | sqlite3_bind_*() API] retain their values. -** Use [sqlite3_clear_bindings()] to reset the bindings. -** -** ^The [sqlite3_reset(S)] interface resets the [prepared statement] S -** back to the beginning of its program. -** -** ^If the most recent call to [sqlite3_step(S)] for the -** [prepared statement] S returned [SQLITE_ROW] or [SQLITE_DONE], -** or if [sqlite3_step(S)] has never before been called on S, -** then [sqlite3_reset(S)] returns [SQLITE_OK]. -** -** ^If the most recent call to [sqlite3_step(S)] for the -** [prepared statement] S indicated an error, then -** [sqlite3_reset(S)] returns an appropriate [error code]. -** -** ^The [sqlite3_reset(S)] interface does not change the values -** of any [sqlite3_bind_blob|bindings] on the [prepared statement] S. -*/ -SQLITE_API int sqlite3_reset(sqlite3_stmt *pStmt); - -/* -** CAPI3REF: Create Or Redefine SQL Functions -** KEYWORDS: {function creation routines} -** KEYWORDS: {application-defined SQL function} -** KEYWORDS: {application-defined SQL functions} -** -** ^These functions (collectively known as "function creation routines") -** are used to add SQL functions or aggregates or to redefine the behavior -** of existing SQL functions or aggregates. The only differences between -** these routines are the text encoding expected for -** the second parameter (the name of the function being created) -** and the presence or absence of a destructor callback for -** the application data pointer. -** -** ^The first parameter is the [database connection] to which the SQL -** function is to be added. ^If an application uses more than one database -** connection then application-defined SQL functions must be added -** to each database connection separately. -** -** ^The second parameter is the name of the SQL function to be created or -** redefined. ^The length of the name is limited to 255 bytes in a UTF-8 -** representation, exclusive of the zero-terminator. ^Note that the name -** length limit is in UTF-8 bytes, not characters nor UTF-16 bytes. -** ^Any attempt to create a function with a longer name -** will result in [SQLITE_MISUSE] being returned. -** -** ^The third parameter (nArg) -** is the number of arguments that the SQL function or -** aggregate takes. ^If this parameter is -1, then the SQL function or -** aggregate may take any number of arguments between 0 and the limit -** set by [sqlite3_limit]([SQLITE_LIMIT_FUNCTION_ARG]). If the third -** parameter is less than -1 or greater than 127 then the behavior is -** undefined. -** -** ^The fourth parameter, eTextRep, specifies what -** [SQLITE_UTF8 | text encoding] this SQL function prefers for -** its parameters. The application should set this parameter to -** [SQLITE_UTF16LE] if the function implementation invokes -** [sqlite3_value_text16le()] on an input, or [SQLITE_UTF16BE] if the -** implementation invokes [sqlite3_value_text16be()] on an input, or -** [SQLITE_UTF16] if [sqlite3_value_text16()] is used, or [SQLITE_UTF8] -** otherwise. ^The same SQL function may be registered multiple times using -** different preferred text encodings, with different implementations for -** each encoding. -** ^When multiple implementations of the same function are available, SQLite -** will pick the one that involves the least amount of data conversion. -** -** ^The fourth parameter may optionally be ORed with [SQLITE_DETERMINISTIC] -** to signal that the function will always return the same result given -** the same inputs within a single SQL statement. Most SQL functions are -** deterministic. The built-in [random()] SQL function is an example of a -** function that is not deterministic. The SQLite query planner is able to -** perform additional optimizations on deterministic functions, so use -** of the [SQLITE_DETERMINISTIC] flag is recommended where possible. -** -** ^(The fifth parameter is an arbitrary pointer. The implementation of the -** function can gain access to this pointer using [sqlite3_user_data()].)^ -** -** ^The sixth, seventh and eighth parameters, xFunc, xStep and xFinal, are -** pointers to C-language functions that implement the SQL function or -** aggregate. ^A scalar SQL function requires an implementation of the xFunc -** callback only; NULL pointers must be passed as the xStep and xFinal -** parameters. ^An aggregate SQL function requires an implementation of xStep -** and xFinal and NULL pointer must be passed for xFunc. ^To delete an existing -** SQL function or aggregate, pass NULL pointers for all three function -** callbacks. -** -** ^(If the ninth parameter to sqlite3_create_function_v2() is not NULL, -** then it is destructor for the application data pointer. -** The destructor is invoked when the function is deleted, either by being -** overloaded or when the database connection closes.)^ -** ^The destructor is also invoked if the call to -** sqlite3_create_function_v2() fails. -** ^When the destructor callback of the tenth parameter is invoked, it -** is passed a single argument which is a copy of the application data -** pointer which was the fifth parameter to sqlite3_create_function_v2(). -** -** ^It is permitted to register multiple implementations of the same -** functions with the same name but with either differing numbers of -** arguments or differing preferred text encodings. ^SQLite will use -** the implementation that most closely matches the way in which the -** SQL function is used. ^A function implementation with a non-negative -** nArg parameter is a better match than a function implementation with -** a negative nArg. ^A function where the preferred text encoding -** matches the database encoding is a better -** match than a function where the encoding is different. -** ^A function where the encoding difference is between UTF16le and UTF16be -** is a closer match than a function where the encoding difference is -** between UTF8 and UTF16. -** -** ^Built-in functions may be overloaded by new application-defined functions. -** -** ^An application-defined function is permitted to call other -** SQLite interfaces. However, such calls must not -** close the database connection nor finalize or reset the prepared -** statement in which the function is running. -*/ -SQLITE_API int sqlite3_create_function( - sqlite3 *db, - const char *zFunctionName, - int nArg, - int eTextRep, - void *pApp, - void (*xFunc)(sqlite3_context*,int,sqlite3_value**), - void (*xStep)(sqlite3_context*,int,sqlite3_value**), - void (*xFinal)(sqlite3_context*) -); -SQLITE_API int sqlite3_create_function16( - sqlite3 *db, - const void *zFunctionName, - int nArg, - int eTextRep, - void *pApp, - void (*xFunc)(sqlite3_context*,int,sqlite3_value**), - void (*xStep)(sqlite3_context*,int,sqlite3_value**), - void (*xFinal)(sqlite3_context*) -); -SQLITE_API int sqlite3_create_function_v2( - sqlite3 *db, - const char *zFunctionName, - int nArg, - int eTextRep, - void *pApp, - void (*xFunc)(sqlite3_context*,int,sqlite3_value**), - void (*xStep)(sqlite3_context*,int,sqlite3_value**), - void (*xFinal)(sqlite3_context*), - void(*xDestroy)(void*) -); - -/* -** CAPI3REF: Text Encodings -** -** These constant define integer codes that represent the various -** text encodings supported by SQLite. -*/ -#define SQLITE_UTF8 1 -#define SQLITE_UTF16LE 2 -#define SQLITE_UTF16BE 3 -#define SQLITE_UTF16 4 /* Use native byte order */ -#define SQLITE_ANY 5 /* Deprecated */ -#define SQLITE_UTF16_ALIGNED 8 /* sqlite3_create_collation only */ - -/* -** CAPI3REF: Function Flags -** -** These constants may be ORed together with the -** [SQLITE_UTF8 | preferred text encoding] as the fourth argument -** to [sqlite3_create_function()], [sqlite3_create_function16()], or -** [sqlite3_create_function_v2()]. -*/ -#define SQLITE_DETERMINISTIC 0x800 - -/* -** CAPI3REF: Deprecated Functions -** DEPRECATED -** -** These functions are [deprecated]. In order to maintain -** backwards compatibility with older code, these functions continue -** to be supported. However, new applications should avoid -** the use of these functions. To help encourage people to avoid -** using these functions, we are not going to tell you what they do. -*/ -#ifndef SQLITE_OMIT_DEPRECATED -SQLITE_API SQLITE_DEPRECATED int sqlite3_aggregate_count(sqlite3_context*); -SQLITE_API SQLITE_DEPRECATED int sqlite3_expired(sqlite3_stmt*); -SQLITE_API SQLITE_DEPRECATED int sqlite3_transfer_bindings(sqlite3_stmt*, sqlite3_stmt*); -SQLITE_API SQLITE_DEPRECATED int sqlite3_global_recover(void); -SQLITE_API SQLITE_DEPRECATED void sqlite3_thread_cleanup(void); -SQLITE_API SQLITE_DEPRECATED int sqlite3_memory_alarm(void(*)(void*,sqlite3_int64,int), - void*,sqlite3_int64); -#endif - -/* -** CAPI3REF: Obtaining SQL Function Parameter Values -** -** The C-language implementation of SQL functions and aggregates uses -** this set of interface routines to access the parameter values on -** the function or aggregate. -** -** The xFunc (for scalar functions) or xStep (for aggregates) parameters -** to [sqlite3_create_function()] and [sqlite3_create_function16()] -** define callbacks that implement the SQL functions and aggregates. -** The 3rd parameter to these callbacks is an array of pointers to -** [protected sqlite3_value] objects. There is one [sqlite3_value] object for -** each parameter to the SQL function. These routines are used to -** extract values from the [sqlite3_value] objects. -** -** These routines work only with [protected sqlite3_value] objects. -** Any attempt to use these routines on an [unprotected sqlite3_value] -** object results in undefined behavior. -** -** ^These routines work just like the corresponding [column access functions] -** except that these routines take a single [protected sqlite3_value] object -** pointer instead of a [sqlite3_stmt*] pointer and an integer column number. -** -** ^The sqlite3_value_text16() interface extracts a UTF-16 string -** in the native byte-order of the host machine. ^The -** sqlite3_value_text16be() and sqlite3_value_text16le() interfaces -** extract UTF-16 strings as big-endian and little-endian respectively. -** -** ^(The sqlite3_value_numeric_type() interface attempts to apply -** numeric affinity to the value. This means that an attempt is -** made to convert the value to an integer or floating point. If -** such a conversion is possible without loss of information (in other -** words, if the value is a string that looks like a number) -** then the conversion is performed. Otherwise no conversion occurs. -** The [SQLITE_INTEGER | datatype] after conversion is returned.)^ -** -** Please pay particular attention to the fact that the pointer returned -** from [sqlite3_value_blob()], [sqlite3_value_text()], or -** [sqlite3_value_text16()] can be invalidated by a subsequent call to -** [sqlite3_value_bytes()], [sqlite3_value_bytes16()], [sqlite3_value_text()], -** or [sqlite3_value_text16()]. -** -** These routines must be called from the same thread as -** the SQL function that supplied the [sqlite3_value*] parameters. -*/ -SQLITE_API const void *sqlite3_value_blob(sqlite3_value*); -SQLITE_API int sqlite3_value_bytes(sqlite3_value*); -SQLITE_API int sqlite3_value_bytes16(sqlite3_value*); -SQLITE_API double sqlite3_value_double(sqlite3_value*); -SQLITE_API int sqlite3_value_int(sqlite3_value*); -SQLITE_API sqlite3_int64 sqlite3_value_int64(sqlite3_value*); -SQLITE_API const unsigned char *sqlite3_value_text(sqlite3_value*); -SQLITE_API const void *sqlite3_value_text16(sqlite3_value*); -SQLITE_API const void *sqlite3_value_text16le(sqlite3_value*); -SQLITE_API const void *sqlite3_value_text16be(sqlite3_value*); -SQLITE_API int sqlite3_value_type(sqlite3_value*); -SQLITE_API int sqlite3_value_numeric_type(sqlite3_value*); - -/* -** CAPI3REF: Obtain Aggregate Function Context -** -** Implementations of aggregate SQL functions use this -** routine to allocate memory for storing their state. -** -** ^The first time the sqlite3_aggregate_context(C,N) routine is called -** for a particular aggregate function, SQLite -** allocates N of memory, zeroes out that memory, and returns a pointer -** to the new memory. ^On second and subsequent calls to -** sqlite3_aggregate_context() for the same aggregate function instance, -** the same buffer is returned. Sqlite3_aggregate_context() is normally -** called once for each invocation of the xStep callback and then one -** last time when the xFinal callback is invoked. ^(When no rows match -** an aggregate query, the xStep() callback of the aggregate function -** implementation is never called and xFinal() is called exactly once. -** In those cases, sqlite3_aggregate_context() might be called for the -** first time from within xFinal().)^ -** -** ^The sqlite3_aggregate_context(C,N) routine returns a NULL pointer -** when first called if N is less than or equal to zero or if a memory -** allocate error occurs. -** -** ^(The amount of space allocated by sqlite3_aggregate_context(C,N) is -** determined by the N parameter on first successful call. Changing the -** value of N in subsequent call to sqlite3_aggregate_context() within -** the same aggregate function instance will not resize the memory -** allocation.)^ Within the xFinal callback, it is customary to set -** N=0 in calls to sqlite3_aggregate_context(C,N) so that no -** pointless memory allocations occur. -** -** ^SQLite automatically frees the memory allocated by -** sqlite3_aggregate_context() when the aggregate query concludes. -** -** The first parameter must be a copy of the -** [sqlite3_context | SQL function context] that is the first parameter -** to the xStep or xFinal callback routine that implements the aggregate -** function. -** -** This routine must be called from the same thread in which -** the aggregate SQL function is running. -*/ -SQLITE_API void *sqlite3_aggregate_context(sqlite3_context*, int nBytes); - -/* -** CAPI3REF: User Data For Functions -** -** ^The sqlite3_user_data() interface returns a copy of -** the pointer that was the pUserData parameter (the 5th parameter) -** of the [sqlite3_create_function()] -** and [sqlite3_create_function16()] routines that originally -** registered the application defined function. -** -** This routine must be called from the same thread in which -** the application-defined function is running. -*/ -SQLITE_API void *sqlite3_user_data(sqlite3_context*); - -/* -** CAPI3REF: Database Connection For Functions -** -** ^The sqlite3_context_db_handle() interface returns a copy of -** the pointer to the [database connection] (the 1st parameter) -** of the [sqlite3_create_function()] -** and [sqlite3_create_function16()] routines that originally -** registered the application defined function. -*/ -SQLITE_API sqlite3 *sqlite3_context_db_handle(sqlite3_context*); - -/* -** CAPI3REF: Function Auxiliary Data -** -** These functions may be used by (non-aggregate) SQL functions to -** associate metadata with argument values. If the same value is passed to -** multiple invocations of the same SQL function during query execution, under -** some circumstances the associated metadata may be preserved. An example -** of where this might be useful is in a regular-expression matching -** function. The compiled version of the regular expression can be stored as -** metadata associated with the pattern string. -** Then as long as the pattern string remains the same, -** the compiled regular expression can be reused on multiple -** invocations of the same function. -** -** ^The sqlite3_get_auxdata() interface returns a pointer to the metadata -** associated by the sqlite3_set_auxdata() function with the Nth argument -** value to the application-defined function. ^If there is no metadata -** associated with the function argument, this sqlite3_get_auxdata() interface -** returns a NULL pointer. -** -** ^The sqlite3_set_auxdata(C,N,P,X) interface saves P as metadata for the N-th -** argument of the application-defined function. ^Subsequent -** calls to sqlite3_get_auxdata(C,N) return P from the most recent -** sqlite3_set_auxdata(C,N,P,X) call if the metadata is still valid or -** NULL if the metadata has been discarded. -** ^After each call to sqlite3_set_auxdata(C,N,P,X) where X is not NULL, -** SQLite will invoke the destructor function X with parameter P exactly -** once, when the metadata is discarded. -** SQLite is free to discard the metadata at any time, including:
      -**
    • when the corresponding function parameter changes, or -**
    • when [sqlite3_reset()] or [sqlite3_finalize()] is called for the -** SQL statement, or -**
    • when sqlite3_set_auxdata() is invoked again on the same parameter, or -**
    • during the original sqlite3_set_auxdata() call when a memory -** allocation error occurs.
    )^ -** -** Note the last bullet in particular. The destructor X in -** sqlite3_set_auxdata(C,N,P,X) might be called immediately, before the -** sqlite3_set_auxdata() interface even returns. Hence sqlite3_set_auxdata() -** should be called near the end of the function implementation and the -** function implementation should not make any use of P after -** sqlite3_set_auxdata() has been called. -** -** ^(In practice, metadata is preserved between function calls for -** function parameters that are compile-time constants, including literal -** values and [parameters] and expressions composed from the same.)^ -** -** These routines must be called from the same thread in which -** the SQL function is running. -*/ -SQLITE_API void *sqlite3_get_auxdata(sqlite3_context*, int N); -SQLITE_API void sqlite3_set_auxdata(sqlite3_context*, int N, void*, void (*)(void*)); - - -/* -** CAPI3REF: Constants Defining Special Destructor Behavior -** -** These are special values for the destructor that is passed in as the -** final argument to routines like [sqlite3_result_blob()]. ^If the destructor -** argument is SQLITE_STATIC, it means that the content pointer is constant -** and will never change. It does not need to be destroyed. ^The -** SQLITE_TRANSIENT value means that the content will likely change in -** the near future and that SQLite should make its own private copy of -** the content before returning. -** -** The typedef is necessary to work around problems in certain -** C++ compilers. -*/ -typedef void (*sqlite3_destructor_type)(void*); -#define SQLITE_STATIC ((sqlite3_destructor_type)0) -#define SQLITE_TRANSIENT ((sqlite3_destructor_type)-1) - -/* -** CAPI3REF: Setting The Result Of An SQL Function -** -** These routines are used by the xFunc or xFinal callbacks that -** implement SQL functions and aggregates. See -** [sqlite3_create_function()] and [sqlite3_create_function16()] -** for additional information. -** -** These functions work very much like the [parameter binding] family of -** functions used to bind values to host parameters in prepared statements. -** Refer to the [SQL parameter] documentation for additional information. -** -** ^The sqlite3_result_blob() interface sets the result from -** an application-defined function to be the BLOB whose content is pointed -** to by the second parameter and which is N bytes long where N is the -** third parameter. -** -** ^The sqlite3_result_zeroblob() interfaces set the result of -** the application-defined function to be a BLOB containing all zero -** bytes and N bytes in size, where N is the value of the 2nd parameter. -** -** ^The sqlite3_result_double() interface sets the result from -** an application-defined function to be a floating point value specified -** by its 2nd argument. -** -** ^The sqlite3_result_error() and sqlite3_result_error16() functions -** cause the implemented SQL function to throw an exception. -** ^SQLite uses the string pointed to by the -** 2nd parameter of sqlite3_result_error() or sqlite3_result_error16() -** as the text of an error message. ^SQLite interprets the error -** message string from sqlite3_result_error() as UTF-8. ^SQLite -** interprets the string from sqlite3_result_error16() as UTF-16 in native -** byte order. ^If the third parameter to sqlite3_result_error() -** or sqlite3_result_error16() is negative then SQLite takes as the error -** message all text up through the first zero character. -** ^If the third parameter to sqlite3_result_error() or -** sqlite3_result_error16() is non-negative then SQLite takes that many -** bytes (not characters) from the 2nd parameter as the error message. -** ^The sqlite3_result_error() and sqlite3_result_error16() -** routines make a private copy of the error message text before -** they return. Hence, the calling function can deallocate or -** modify the text after they return without harm. -** ^The sqlite3_result_error_code() function changes the error code -** returned by SQLite as a result of an error in a function. ^By default, -** the error code is SQLITE_ERROR. ^A subsequent call to sqlite3_result_error() -** or sqlite3_result_error16() resets the error code to SQLITE_ERROR. -** -** ^The sqlite3_result_error_toobig() interface causes SQLite to throw an -** error indicating that a string or BLOB is too long to represent. -** -** ^The sqlite3_result_error_nomem() interface causes SQLite to throw an -** error indicating that a memory allocation failed. -** -** ^The sqlite3_result_int() interface sets the return value -** of the application-defined function to be the 32-bit signed integer -** value given in the 2nd argument. -** ^The sqlite3_result_int64() interface sets the return value -** of the application-defined function to be the 64-bit signed integer -** value given in the 2nd argument. -** -** ^The sqlite3_result_null() interface sets the return value -** of the application-defined function to be NULL. -** -** ^The sqlite3_result_text(), sqlite3_result_text16(), -** sqlite3_result_text16le(), and sqlite3_result_text16be() interfaces -** set the return value of the application-defined function to be -** a text string which is represented as UTF-8, UTF-16 native byte order, -** UTF-16 little endian, or UTF-16 big endian, respectively. -** ^SQLite takes the text result from the application from -** the 2nd parameter of the sqlite3_result_text* interfaces. -** ^If the 3rd parameter to the sqlite3_result_text* interfaces -** is negative, then SQLite takes result text from the 2nd parameter -** through the first zero character. -** ^If the 3rd parameter to the sqlite3_result_text* interfaces -** is non-negative, then as many bytes (not characters) of the text -** pointed to by the 2nd parameter are taken as the application-defined -** function result. If the 3rd parameter is non-negative, then it -** must be the byte offset into the string where the NUL terminator would -** appear if the string where NUL terminated. If any NUL characters occur -** in the string at a byte offset that is less than the value of the 3rd -** parameter, then the resulting string will contain embedded NULs and the -** result of expressions operating on strings with embedded NULs is undefined. -** ^If the 4th parameter to the sqlite3_result_text* interfaces -** or sqlite3_result_blob is a non-NULL pointer, then SQLite calls that -** function as the destructor on the text or BLOB result when it has -** finished using that result. -** ^If the 4th parameter to the sqlite3_result_text* interfaces or to -** sqlite3_result_blob is the special constant SQLITE_STATIC, then SQLite -** assumes that the text or BLOB result is in constant space and does not -** copy the content of the parameter nor call a destructor on the content -** when it has finished using that result. -** ^If the 4th parameter to the sqlite3_result_text* interfaces -** or sqlite3_result_blob is the special constant SQLITE_TRANSIENT -** then SQLite makes a copy of the result into space obtained from -** from [sqlite3_malloc()] before it returns. -** -** ^The sqlite3_result_value() interface sets the result of -** the application-defined function to be a copy the -** [unprotected sqlite3_value] object specified by the 2nd parameter. ^The -** sqlite3_result_value() interface makes a copy of the [sqlite3_value] -** so that the [sqlite3_value] specified in the parameter may change or -** be deallocated after sqlite3_result_value() returns without harm. -** ^A [protected sqlite3_value] object may always be used where an -** [unprotected sqlite3_value] object is required, so either -** kind of [sqlite3_value] object can be used with this interface. -** -** If these routines are called from within the different thread -** than the one containing the application-defined function that received -** the [sqlite3_context] pointer, the results are undefined. -*/ -SQLITE_API void sqlite3_result_blob(sqlite3_context*, const void*, int, void(*)(void*)); -SQLITE_API void sqlite3_result_double(sqlite3_context*, double); -SQLITE_API void sqlite3_result_error(sqlite3_context*, const char*, int); -SQLITE_API void sqlite3_result_error16(sqlite3_context*, const void*, int); -SQLITE_API void sqlite3_result_error_toobig(sqlite3_context*); -SQLITE_API void sqlite3_result_error_nomem(sqlite3_context*); -SQLITE_API void sqlite3_result_error_code(sqlite3_context*, int); -SQLITE_API void sqlite3_result_int(sqlite3_context*, int); -SQLITE_API void sqlite3_result_int64(sqlite3_context*, sqlite3_int64); -SQLITE_API void sqlite3_result_null(sqlite3_context*); -SQLITE_API void sqlite3_result_text(sqlite3_context*, const char*, int, void(*)(void*)); -SQLITE_API void sqlite3_result_text16(sqlite3_context*, const void*, int, void(*)(void*)); -SQLITE_API void sqlite3_result_text16le(sqlite3_context*, const void*, int,void(*)(void*)); -SQLITE_API void sqlite3_result_text16be(sqlite3_context*, const void*, int,void(*)(void*)); -SQLITE_API void sqlite3_result_value(sqlite3_context*, sqlite3_value*); -SQLITE_API void sqlite3_result_zeroblob(sqlite3_context*, int n); - -/* -** CAPI3REF: Define New Collating Sequences -** -** ^These functions add, remove, or modify a [collation] associated -** with the [database connection] specified as the first argument. -** -** ^The name of the collation is a UTF-8 string -** for sqlite3_create_collation() and sqlite3_create_collation_v2() -** and a UTF-16 string in native byte order for sqlite3_create_collation16(). -** ^Collation names that compare equal according to [sqlite3_strnicmp()] are -** considered to be the same name. -** -** ^(The third argument (eTextRep) must be one of the constants: -**
      -**
    • [SQLITE_UTF8], -**
    • [SQLITE_UTF16LE], -**
    • [SQLITE_UTF16BE], -**
    • [SQLITE_UTF16], or -**
    • [SQLITE_UTF16_ALIGNED]. -**
    )^ -** ^The eTextRep argument determines the encoding of strings passed -** to the collating function callback, xCallback. -** ^The [SQLITE_UTF16] and [SQLITE_UTF16_ALIGNED] values for eTextRep -** force strings to be UTF16 with native byte order. -** ^The [SQLITE_UTF16_ALIGNED] value for eTextRep forces strings to begin -** on an even byte address. -** -** ^The fourth argument, pArg, is an application data pointer that is passed -** through as the first argument to the collating function callback. -** -** ^The fifth argument, xCallback, is a pointer to the collating function. -** ^Multiple collating functions can be registered using the same name but -** with different eTextRep parameters and SQLite will use whichever -** function requires the least amount of data transformation. -** ^If the xCallback argument is NULL then the collating function is -** deleted. ^When all collating functions having the same name are deleted, -** that collation is no longer usable. -** -** ^The collating function callback is invoked with a copy of the pArg -** application data pointer and with two strings in the encoding specified -** by the eTextRep argument. The collating function must return an -** integer that is negative, zero, or positive -** if the first string is less than, equal to, or greater than the second, -** respectively. A collating function must always return the same answer -** given the same inputs. If two or more collating functions are registered -** to the same collation name (using different eTextRep values) then all -** must give an equivalent answer when invoked with equivalent strings. -** The collating function must obey the following properties for all -** strings A, B, and C: -** -**
      -**
    1. If A==B then B==A. -**
    2. If A==B and B==C then A==C. -**
    3. If A<B THEN B>A. -**
    4. If A<B and B<C then A<C. -**
    -** -** If a collating function fails any of the above constraints and that -** collating function is registered and used, then the behavior of SQLite -** is undefined. -** -** ^The sqlite3_create_collation_v2() works like sqlite3_create_collation() -** with the addition that the xDestroy callback is invoked on pArg when -** the collating function is deleted. -** ^Collating functions are deleted when they are overridden by later -** calls to the collation creation functions or when the -** [database connection] is closed using [sqlite3_close()]. -** -** ^The xDestroy callback is not called if the -** sqlite3_create_collation_v2() function fails. Applications that invoke -** sqlite3_create_collation_v2() with a non-NULL xDestroy argument should -** check the return code and dispose of the application data pointer -** themselves rather than expecting SQLite to deal with it for them. -** This is different from every other SQLite interface. The inconsistency -** is unfortunate but cannot be changed without breaking backwards -** compatibility. -** -** See also: [sqlite3_collation_needed()] and [sqlite3_collation_needed16()]. -*/ -SQLITE_API int sqlite3_create_collation( - sqlite3*, - const char *zName, - int eTextRep, - void *pArg, - int(*xCompare)(void*,int,const void*,int,const void*) -); -SQLITE_API int sqlite3_create_collation_v2( - sqlite3*, - const char *zName, - int eTextRep, - void *pArg, - int(*xCompare)(void*,int,const void*,int,const void*), - void(*xDestroy)(void*) -); -SQLITE_API int sqlite3_create_collation16( - sqlite3*, - const void *zName, - int eTextRep, - void *pArg, - int(*xCompare)(void*,int,const void*,int,const void*) -); - -/* -** CAPI3REF: Collation Needed Callbacks -** -** ^To avoid having to register all collation sequences before a database -** can be used, a single callback function may be registered with the -** [database connection] to be invoked whenever an undefined collation -** sequence is required. -** -** ^If the function is registered using the sqlite3_collation_needed() API, -** then it is passed the names of undefined collation sequences as strings -** encoded in UTF-8. ^If sqlite3_collation_needed16() is used, -** the names are passed as UTF-16 in machine native byte order. -** ^A call to either function replaces the existing collation-needed callback. -** -** ^(When the callback is invoked, the first argument passed is a copy -** of the second argument to sqlite3_collation_needed() or -** sqlite3_collation_needed16(). The second argument is the database -** connection. The third argument is one of [SQLITE_UTF8], [SQLITE_UTF16BE], -** or [SQLITE_UTF16LE], indicating the most desirable form of the collation -** sequence function required. The fourth parameter is the name of the -** required collation sequence.)^ -** -** The callback function should register the desired collation using -** [sqlite3_create_collation()], [sqlite3_create_collation16()], or -** [sqlite3_create_collation_v2()]. -*/ -SQLITE_API int sqlite3_collation_needed( - sqlite3*, - void*, - void(*)(void*,sqlite3*,int eTextRep,const char*) -); -SQLITE_API int sqlite3_collation_needed16( - sqlite3*, - void*, - void(*)(void*,sqlite3*,int eTextRep,const void*) -); - -#ifdef SQLITE_HAS_CODEC -/* -** Specify the key for an encrypted database. This routine should be -** called right after sqlite3_open(). -** -** The code to implement this API is not available in the public release -** of SQLite. -*/ -SQLITE_API int sqlite3_key( - sqlite3 *db, /* Database to be rekeyed */ - const void *pKey, int nKey /* The key */ -); -SQLITE_API int sqlite3_key_v2( - sqlite3 *db, /* Database to be rekeyed */ - const char *zDbName, /* Name of the database */ - const void *pKey, int nKey /* The key */ -); - -/* -** Change the key on an open database. If the current database is not -** encrypted, this routine will encrypt it. If pNew==0 or nNew==0, the -** database is decrypted. -** -** The code to implement this API is not available in the public release -** of SQLite. -*/ -SQLITE_API int sqlite3_rekey( - sqlite3 *db, /* Database to be rekeyed */ - const void *pKey, int nKey /* The new key */ -); -SQLITE_API int sqlite3_rekey_v2( - sqlite3 *db, /* Database to be rekeyed */ - const char *zDbName, /* Name of the database */ - const void *pKey, int nKey /* The new key */ -); - -/* -** Specify the activation key for a SEE database. Unless -** activated, none of the SEE routines will work. -*/ -SQLITE_API void sqlite3_activate_see( - const char *zPassPhrase /* Activation phrase */ -); -#endif - -#ifdef SQLITE_ENABLE_CEROD -/* -** Specify the activation key for a CEROD database. Unless -** activated, none of the CEROD routines will work. -*/ -SQLITE_API void sqlite3_activate_cerod( - const char *zPassPhrase /* Activation phrase */ -); -#endif - -/* -** CAPI3REF: Suspend Execution For A Short Time -** -** The sqlite3_sleep() function causes the current thread to suspend execution -** for at least a number of milliseconds specified in its parameter. -** -** If the operating system does not support sleep requests with -** millisecond time resolution, then the time will be rounded up to -** the nearest second. The number of milliseconds of sleep actually -** requested from the operating system is returned. -** -** ^SQLite implements this interface by calling the xSleep() -** method of the default [sqlite3_vfs] object. If the xSleep() method -** of the default VFS is not implemented correctly, or not implemented at -** all, then the behavior of sqlite3_sleep() may deviate from the description -** in the previous paragraphs. -*/ -SQLITE_API int sqlite3_sleep(int); - -/* -** CAPI3REF: Name Of The Folder Holding Temporary Files -** -** ^(If this global variable is made to point to a string which is -** the name of a folder (a.k.a. directory), then all temporary files -** created by SQLite when using a built-in [sqlite3_vfs | VFS] -** will be placed in that directory.)^ ^If this variable -** is a NULL pointer, then SQLite performs a search for an appropriate -** temporary file directory. -** -** It is not safe to read or modify this variable in more than one -** thread at a time. It is not safe to read or modify this variable -** if a [database connection] is being used at the same time in a separate -** thread. -** It is intended that this variable be set once -** as part of process initialization and before any SQLite interface -** routines have been called and that this variable remain unchanged -** thereafter. -** -** ^The [temp_store_directory pragma] may modify this variable and cause -** it to point to memory obtained from [sqlite3_malloc]. ^Furthermore, -** the [temp_store_directory pragma] always assumes that any string -** that this variable points to is held in memory obtained from -** [sqlite3_malloc] and the pragma may attempt to free that memory -** using [sqlite3_free]. -** Hence, if this variable is modified directly, either it should be -** made NULL or made to point to memory obtained from [sqlite3_malloc] -** or else the use of the [temp_store_directory pragma] should be avoided. -** -** Note to Windows Runtime users: The temporary directory must be set -** prior to calling [sqlite3_open] or [sqlite3_open_v2]. Otherwise, various -** features that require the use of temporary files may fail. Here is an -** example of how to do this using C++ with the Windows Runtime: -** -**
    -** LPCWSTR zPath = Windows::Storage::ApplicationData::Current->
    -**       TemporaryFolder->Path->Data();
    -** char zPathBuf[MAX_PATH + 1];
    -** memset(zPathBuf, 0, sizeof(zPathBuf));
    -** WideCharToMultiByte(CP_UTF8, 0, zPath, -1, zPathBuf, sizeof(zPathBuf),
    -**       NULL, NULL);
    -** sqlite3_temp_directory = sqlite3_mprintf("%s", zPathBuf);
    -** 
    -*/ -SQLITE_API SQLITE_EXTERN char *sqlite3_temp_directory; - -/* -** CAPI3REF: Name Of The Folder Holding Database Files -** -** ^(If this global variable is made to point to a string which is -** the name of a folder (a.k.a. directory), then all database files -** specified with a relative pathname and created or accessed by -** SQLite when using a built-in windows [sqlite3_vfs | VFS] will be assumed -** to be relative to that directory.)^ ^If this variable is a NULL -** pointer, then SQLite assumes that all database files specified -** with a relative pathname are relative to the current directory -** for the process. Only the windows VFS makes use of this global -** variable; it is ignored by the unix VFS. -** -** Changing the value of this variable while a database connection is -** open can result in a corrupt database. -** -** It is not safe to read or modify this variable in more than one -** thread at a time. It is not safe to read or modify this variable -** if a [database connection] is being used at the same time in a separate -** thread. -** It is intended that this variable be set once -** as part of process initialization and before any SQLite interface -** routines have been called and that this variable remain unchanged -** thereafter. -** -** ^The [data_store_directory pragma] may modify this variable and cause -** it to point to memory obtained from [sqlite3_malloc]. ^Furthermore, -** the [data_store_directory pragma] always assumes that any string -** that this variable points to is held in memory obtained from -** [sqlite3_malloc] and the pragma may attempt to free that memory -** using [sqlite3_free]. -** Hence, if this variable is modified directly, either it should be -** made NULL or made to point to memory obtained from [sqlite3_malloc] -** or else the use of the [data_store_directory pragma] should be avoided. -*/ -SQLITE_API SQLITE_EXTERN char *sqlite3_data_directory; - -/* -** CAPI3REF: Test For Auto-Commit Mode -** KEYWORDS: {autocommit mode} -** -** ^The sqlite3_get_autocommit() interface returns non-zero or -** zero if the given database connection is or is not in autocommit mode, -** respectively. ^Autocommit mode is on by default. -** ^Autocommit mode is disabled by a [BEGIN] statement. -** ^Autocommit mode is re-enabled by a [COMMIT] or [ROLLBACK]. -** -** If certain kinds of errors occur on a statement within a multi-statement -** transaction (errors including [SQLITE_FULL], [SQLITE_IOERR], -** [SQLITE_NOMEM], [SQLITE_BUSY], and [SQLITE_INTERRUPT]) then the -** transaction might be rolled back automatically. The only way to -** find out whether SQLite automatically rolled back the transaction after -** an error is to use this function. -** -** If another thread changes the autocommit status of the database -** connection while this routine is running, then the return value -** is undefined. -*/ -SQLITE_API int sqlite3_get_autocommit(sqlite3*); - -/* -** CAPI3REF: Find The Database Handle Of A Prepared Statement -** -** ^The sqlite3_db_handle interface returns the [database connection] handle -** to which a [prepared statement] belongs. ^The [database connection] -** returned by sqlite3_db_handle is the same [database connection] -** that was the first argument -** to the [sqlite3_prepare_v2()] call (or its variants) that was used to -** create the statement in the first place. -*/ -SQLITE_API sqlite3 *sqlite3_db_handle(sqlite3_stmt*); - -/* -** CAPI3REF: Return The Filename For A Database Connection -** -** ^The sqlite3_db_filename(D,N) interface returns a pointer to a filename -** associated with database N of connection D. ^The main database file -** has the name "main". If there is no attached database N on the database -** connection D, or if database N is a temporary or in-memory database, then -** a NULL pointer is returned. -** -** ^The filename returned by this function is the output of the -** xFullPathname method of the [VFS]. ^In other words, the filename -** will be an absolute pathname, even if the filename used -** to open the database originally was a URI or relative pathname. -*/ -SQLITE_API const char *sqlite3_db_filename(sqlite3 *db, const char *zDbName); - -/* -** CAPI3REF: Determine if a database is read-only -** -** ^The sqlite3_db_readonly(D,N) interface returns 1 if the database N -** of connection D is read-only, 0 if it is read/write, or -1 if N is not -** the name of a database on connection D. -*/ -SQLITE_API int sqlite3_db_readonly(sqlite3 *db, const char *zDbName); - -/* -** CAPI3REF: Find the next prepared statement -** -** ^This interface returns a pointer to the next [prepared statement] after -** pStmt associated with the [database connection] pDb. ^If pStmt is NULL -** then this interface returns a pointer to the first prepared statement -** associated with the database connection pDb. ^If no prepared statement -** satisfies the conditions of this routine, it returns NULL. -** -** The [database connection] pointer D in a call to -** [sqlite3_next_stmt(D,S)] must refer to an open database -** connection and in particular must not be a NULL pointer. -*/ -SQLITE_API sqlite3_stmt *sqlite3_next_stmt(sqlite3 *pDb, sqlite3_stmt *pStmt); - -/* -** CAPI3REF: Commit And Rollback Notification Callbacks -** -** ^The sqlite3_commit_hook() interface registers a callback -** function to be invoked whenever a transaction is [COMMIT | committed]. -** ^Any callback set by a previous call to sqlite3_commit_hook() -** for the same database connection is overridden. -** ^The sqlite3_rollback_hook() interface registers a callback -** function to be invoked whenever a transaction is [ROLLBACK | rolled back]. -** ^Any callback set by a previous call to sqlite3_rollback_hook() -** for the same database connection is overridden. -** ^The pArg argument is passed through to the callback. -** ^If the callback on a commit hook function returns non-zero, -** then the commit is converted into a rollback. -** -** ^The sqlite3_commit_hook(D,C,P) and sqlite3_rollback_hook(D,C,P) functions -** return the P argument from the previous call of the same function -** on the same [database connection] D, or NULL for -** the first call for each function on D. -** -** The commit and rollback hook callbacks are not reentrant. -** The callback implementation must not do anything that will modify -** the database connection that invoked the callback. Any actions -** to modify the database connection must be deferred until after the -** completion of the [sqlite3_step()] call that triggered the commit -** or rollback hook in the first place. -** Note that running any other SQL statements, including SELECT statements, -** or merely calling [sqlite3_prepare_v2()] and [sqlite3_step()] will modify -** the database connections for the meaning of "modify" in this paragraph. -** -** ^Registering a NULL function disables the callback. -** -** ^When the commit hook callback routine returns zero, the [COMMIT] -** operation is allowed to continue normally. ^If the commit hook -** returns non-zero, then the [COMMIT] is converted into a [ROLLBACK]. -** ^The rollback hook is invoked on a rollback that results from a commit -** hook returning non-zero, just as it would be with any other rollback. -** -** ^For the purposes of this API, a transaction is said to have been -** rolled back if an explicit "ROLLBACK" statement is executed, or -** an error or constraint causes an implicit rollback to occur. -** ^The rollback callback is not invoked if a transaction is -** automatically rolled back because the database connection is closed. -** -** See also the [sqlite3_update_hook()] interface. -*/ -SQLITE_API void *sqlite3_commit_hook(sqlite3*, int(*)(void*), void*); -SQLITE_API void *sqlite3_rollback_hook(sqlite3*, void(*)(void *), void*); - -/* -** CAPI3REF: Data Change Notification Callbacks -** -** ^The sqlite3_update_hook() interface registers a callback function -** with the [database connection] identified by the first argument -** to be invoked whenever a row is updated, inserted or deleted in -** a rowid table. -** ^Any callback set by a previous call to this function -** for the same database connection is overridden. -** -** ^The second argument is a pointer to the function to invoke when a -** row is updated, inserted or deleted in a rowid table. -** ^The first argument to the callback is a copy of the third argument -** to sqlite3_update_hook(). -** ^The second callback argument is one of [SQLITE_INSERT], [SQLITE_DELETE], -** or [SQLITE_UPDATE], depending on the operation that caused the callback -** to be invoked. -** ^The third and fourth arguments to the callback contain pointers to the -** database and table name containing the affected row. -** ^The final callback parameter is the [rowid] of the row. -** ^In the case of an update, this is the [rowid] after the update takes place. -** -** ^(The update hook is not invoked when internal system tables are -** modified (i.e. sqlite_master and sqlite_sequence).)^ -** ^The update hook is not invoked when [WITHOUT ROWID] tables are modified. -** -** ^In the current implementation, the update hook -** is not invoked when duplication rows are deleted because of an -** [ON CONFLICT | ON CONFLICT REPLACE] clause. ^Nor is the update hook -** invoked when rows are deleted using the [truncate optimization]. -** The exceptions defined in this paragraph might change in a future -** release of SQLite. -** -** The update hook implementation must not do anything that will modify -** the database connection that invoked the update hook. Any actions -** to modify the database connection must be deferred until after the -** completion of the [sqlite3_step()] call that triggered the update hook. -** Note that [sqlite3_prepare_v2()] and [sqlite3_step()] both modify their -** database connections for the meaning of "modify" in this paragraph. -** -** ^The sqlite3_update_hook(D,C,P) function -** returns the P argument from the previous call -** on the same [database connection] D, or NULL for -** the first call on D. -** -** See also the [sqlite3_commit_hook()] and [sqlite3_rollback_hook()] -** interfaces. -*/ -SQLITE_API void *sqlite3_update_hook( - sqlite3*, - void(*)(void *,int ,char const *,char const *,sqlite3_int64), - void* -); - -/* -** CAPI3REF: Enable Or Disable Shared Pager Cache -** -** ^(This routine enables or disables the sharing of the database cache -** and schema data structures between [database connection | connections] -** to the same database. Sharing is enabled if the argument is true -** and disabled if the argument is false.)^ -** -** ^Cache sharing is enabled and disabled for an entire process. -** This is a change as of SQLite version 3.5.0. In prior versions of SQLite, -** sharing was enabled or disabled for each thread separately. -** -** ^(The cache sharing mode set by this interface effects all subsequent -** calls to [sqlite3_open()], [sqlite3_open_v2()], and [sqlite3_open16()]. -** Existing database connections continue use the sharing mode -** that was in effect at the time they were opened.)^ -** -** ^(This routine returns [SQLITE_OK] if shared cache was enabled or disabled -** successfully. An [error code] is returned otherwise.)^ -** -** ^Shared cache is disabled by default. But this might change in -** future releases of SQLite. Applications that care about shared -** cache setting should set it explicitly. -** -** This interface is threadsafe on processors where writing a -** 32-bit integer is atomic. -** -** See Also: [SQLite Shared-Cache Mode] -*/ -SQLITE_API int sqlite3_enable_shared_cache(int); - -/* -** CAPI3REF: Attempt To Free Heap Memory -** -** ^The sqlite3_release_memory() interface attempts to free N bytes -** of heap memory by deallocating non-essential memory allocations -** held by the database library. Memory used to cache database -** pages to improve performance is an example of non-essential memory. -** ^sqlite3_release_memory() returns the number of bytes actually freed, -** which might be more or less than the amount requested. -** ^The sqlite3_release_memory() routine is a no-op returning zero -** if SQLite is not compiled with [SQLITE_ENABLE_MEMORY_MANAGEMENT]. -** -** See also: [sqlite3_db_release_memory()] -*/ -SQLITE_API int sqlite3_release_memory(int); - -/* -** CAPI3REF: Free Memory Used By A Database Connection -** -** ^The sqlite3_db_release_memory(D) interface attempts to free as much heap -** memory as possible from database connection D. Unlike the -** [sqlite3_release_memory()] interface, this interface is in effect even -** when the [SQLITE_ENABLE_MEMORY_MANAGEMENT] compile-time option is -** omitted. -** -** See also: [sqlite3_release_memory()] -*/ -SQLITE_API int sqlite3_db_release_memory(sqlite3*); - -/* -** CAPI3REF: Impose A Limit On Heap Size -** -** ^The sqlite3_soft_heap_limit64() interface sets and/or queries the -** soft limit on the amount of heap memory that may be allocated by SQLite. -** ^SQLite strives to keep heap memory utilization below the soft heap -** limit by reducing the number of pages held in the page cache -** as heap memory usages approaches the limit. -** ^The soft heap limit is "soft" because even though SQLite strives to stay -** below the limit, it will exceed the limit rather than generate -** an [SQLITE_NOMEM] error. In other words, the soft heap limit -** is advisory only. -** -** ^The return value from sqlite3_soft_heap_limit64() is the size of -** the soft heap limit prior to the call, or negative in the case of an -** error. ^If the argument N is negative -** then no change is made to the soft heap limit. Hence, the current -** size of the soft heap limit can be determined by invoking -** sqlite3_soft_heap_limit64() with a negative argument. -** -** ^If the argument N is zero then the soft heap limit is disabled. -** -** ^(The soft heap limit is not enforced in the current implementation -** if one or more of following conditions are true: -** -**
      -**
    • The soft heap limit is set to zero. -**
    • Memory accounting is disabled using a combination of the -** [sqlite3_config]([SQLITE_CONFIG_MEMSTATUS],...) start-time option and -** the [SQLITE_DEFAULT_MEMSTATUS] compile-time option. -**
    • An alternative page cache implementation is specified using -** [sqlite3_config]([SQLITE_CONFIG_PCACHE2],...). -**
    • The page cache allocates from its own memory pool supplied -** by [sqlite3_config]([SQLITE_CONFIG_PAGECACHE],...) rather than -** from the heap. -**
    )^ -** -** Beginning with SQLite version 3.7.3, the soft heap limit is enforced -** regardless of whether or not the [SQLITE_ENABLE_MEMORY_MANAGEMENT] -** compile-time option is invoked. With [SQLITE_ENABLE_MEMORY_MANAGEMENT], -** the soft heap limit is enforced on every memory allocation. Without -** [SQLITE_ENABLE_MEMORY_MANAGEMENT], the soft heap limit is only enforced -** when memory is allocated by the page cache. Testing suggests that because -** the page cache is the predominate memory user in SQLite, most -** applications will achieve adequate soft heap limit enforcement without -** the use of [SQLITE_ENABLE_MEMORY_MANAGEMENT]. -** -** The circumstances under which SQLite will enforce the soft heap limit may -** changes in future releases of SQLite. -*/ -SQLITE_API sqlite3_int64 sqlite3_soft_heap_limit64(sqlite3_int64 N); - -/* -** CAPI3REF: Deprecated Soft Heap Limit Interface -** DEPRECATED -** -** This is a deprecated version of the [sqlite3_soft_heap_limit64()] -** interface. This routine is provided for historical compatibility -** only. All new applications should use the -** [sqlite3_soft_heap_limit64()] interface rather than this one. -*/ -SQLITE_API SQLITE_DEPRECATED void sqlite3_soft_heap_limit(int N); - - -/* -** CAPI3REF: Extract Metadata About A Column Of A Table -** -** ^This routine returns metadata about a specific column of a specific -** database table accessible using the [database connection] handle -** passed as the first function argument. -** -** ^The column is identified by the second, third and fourth parameters to -** this function. ^The second parameter is either the name of the database -** (i.e. "main", "temp", or an attached database) containing the specified -** table or NULL. ^If it is NULL, then all attached databases are searched -** for the table using the same algorithm used by the database engine to -** resolve unqualified table references. -** -** ^The third and fourth parameters to this function are the table and column -** name of the desired column, respectively. Neither of these parameters -** may be NULL. -** -** ^Metadata is returned by writing to the memory locations passed as the 5th -** and subsequent parameters to this function. ^Any of these arguments may be -** NULL, in which case the corresponding element of metadata is omitted. -** -** ^(
    -** -**
    Parameter Output
    Type
    Description -** -**
    5th const char* Data type -**
    6th const char* Name of default collation sequence -**
    7th int True if column has a NOT NULL constraint -**
    8th int True if column is part of the PRIMARY KEY -**
    9th int True if column is [AUTOINCREMENT] -**
    -**
    )^ -** -** ^The memory pointed to by the character pointers returned for the -** declaration type and collation sequence is valid only until the next -** call to any SQLite API function. -** -** ^If the specified table is actually a view, an [error code] is returned. -** -** ^If the specified column is "rowid", "oid" or "_rowid_" and an -** [INTEGER PRIMARY KEY] column has been explicitly declared, then the output -** parameters are set for the explicitly declared column. ^(If there is no -** explicitly declared [INTEGER PRIMARY KEY] column, then the output -** parameters are set as follows: -** -**
    -**     data type: "INTEGER"
    -**     collation sequence: "BINARY"
    -**     not null: 0
    -**     primary key: 1
    -**     auto increment: 0
    -** 
    )^ -** -** ^(This function may load one or more schemas from database files. If an -** error occurs during this process, or if the requested table or column -** cannot be found, an [error code] is returned and an error message left -** in the [database connection] (to be retrieved using sqlite3_errmsg()).)^ -** -** ^This API is only available if the library was compiled with the -** [SQLITE_ENABLE_COLUMN_METADATA] C-preprocessor symbol defined. -*/ -SQLITE_API int sqlite3_table_column_metadata( - sqlite3 *db, /* Connection handle */ - const char *zDbName, /* Database name or NULL */ - const char *zTableName, /* Table name */ - const char *zColumnName, /* Column name */ - char const **pzDataType, /* OUTPUT: Declared data type */ - char const **pzCollSeq, /* OUTPUT: Collation sequence name */ - int *pNotNull, /* OUTPUT: True if NOT NULL constraint exists */ - int *pPrimaryKey, /* OUTPUT: True if column part of PK */ - int *pAutoinc /* OUTPUT: True if column is auto-increment */ -); - -/* -** CAPI3REF: Load An Extension -** -** ^This interface loads an SQLite extension library from the named file. -** -** ^The sqlite3_load_extension() interface attempts to load an -** [SQLite extension] library contained in the file zFile. If -** the file cannot be loaded directly, attempts are made to load -** with various operating-system specific extensions added. -** So for example, if "samplelib" cannot be loaded, then names like -** "samplelib.so" or "samplelib.dylib" or "samplelib.dll" might -** be tried also. -** -** ^The entry point is zProc. -** ^(zProc may be 0, in which case SQLite will try to come up with an -** entry point name on its own. It first tries "sqlite3_extension_init". -** If that does not work, it constructs a name "sqlite3_X_init" where the -** X is consists of the lower-case equivalent of all ASCII alphabetic -** characters in the filename from the last "/" to the first following -** "." and omitting any initial "lib".)^ -** ^The sqlite3_load_extension() interface returns -** [SQLITE_OK] on success and [SQLITE_ERROR] if something goes wrong. -** ^If an error occurs and pzErrMsg is not 0, then the -** [sqlite3_load_extension()] interface shall attempt to -** fill *pzErrMsg with error message text stored in memory -** obtained from [sqlite3_malloc()]. The calling function -** should free this memory by calling [sqlite3_free()]. -** -** ^Extension loading must be enabled using -** [sqlite3_enable_load_extension()] prior to calling this API, -** otherwise an error will be returned. -** -** See also the [load_extension() SQL function]. -*/ -SQLITE_API int sqlite3_load_extension( - sqlite3 *db, /* Load the extension into this database connection */ - const char *zFile, /* Name of the shared library containing extension */ - const char *zProc, /* Entry point. Derived from zFile if 0 */ - char **pzErrMsg /* Put error message here if not 0 */ -); - -/* -** CAPI3REF: Enable Or Disable Extension Loading -** -** ^So as not to open security holes in older applications that are -** unprepared to deal with [extension loading], and as a means of disabling -** [extension loading] while evaluating user-entered SQL, the following API -** is provided to turn the [sqlite3_load_extension()] mechanism on and off. -** -** ^Extension loading is off by default. -** ^Call the sqlite3_enable_load_extension() routine with onoff==1 -** to turn extension loading on and call it with onoff==0 to turn -** it back off again. -*/ -SQLITE_API int sqlite3_enable_load_extension(sqlite3 *db, int onoff); - -/* -** CAPI3REF: Automatically Load Statically Linked Extensions -** -** ^This interface causes the xEntryPoint() function to be invoked for -** each new [database connection] that is created. The idea here is that -** xEntryPoint() is the entry point for a statically linked [SQLite extension] -** that is to be automatically loaded into all new database connections. -** -** ^(Even though the function prototype shows that xEntryPoint() takes -** no arguments and returns void, SQLite invokes xEntryPoint() with three -** arguments and expects and integer result as if the signature of the -** entry point where as follows: -** -**
    -**    int xEntryPoint(
    -**      sqlite3 *db,
    -**      const char **pzErrMsg,
    -**      const struct sqlite3_api_routines *pThunk
    -**    );
    -** 
    )^ -** -** If the xEntryPoint routine encounters an error, it should make *pzErrMsg -** point to an appropriate error message (obtained from [sqlite3_mprintf()]) -** and return an appropriate [error code]. ^SQLite ensures that *pzErrMsg -** is NULL before calling the xEntryPoint(). ^SQLite will invoke -** [sqlite3_free()] on *pzErrMsg after xEntryPoint() returns. ^If any -** xEntryPoint() returns an error, the [sqlite3_open()], [sqlite3_open16()], -** or [sqlite3_open_v2()] call that provoked the xEntryPoint() will fail. -** -** ^Calling sqlite3_auto_extension(X) with an entry point X that is already -** on the list of automatic extensions is a harmless no-op. ^No entry point -** will be called more than once for each database connection that is opened. -** -** See also: [sqlite3_reset_auto_extension()] -** and [sqlite3_cancel_auto_extension()] -*/ -SQLITE_API int sqlite3_auto_extension(void (*xEntryPoint)(void)); - -/* -** CAPI3REF: Cancel Automatic Extension Loading -** -** ^The [sqlite3_cancel_auto_extension(X)] interface unregisters the -** initialization routine X that was registered using a prior call to -** [sqlite3_auto_extension(X)]. ^The [sqlite3_cancel_auto_extension(X)] -** routine returns 1 if initialization routine X was successfully -** unregistered and it returns 0 if X was not on the list of initialization -** routines. -*/ -SQLITE_API int sqlite3_cancel_auto_extension(void (*xEntryPoint)(void)); - -/* -** CAPI3REF: Reset Automatic Extension Loading -** -** ^This interface disables all automatic extensions previously -** registered using [sqlite3_auto_extension()]. -*/ -SQLITE_API void sqlite3_reset_auto_extension(void); - -/* -** The interface to the virtual-table mechanism is currently considered -** to be experimental. The interface might change in incompatible ways. -** If this is a problem for you, do not use the interface at this time. -** -** When the virtual-table mechanism stabilizes, we will declare the -** interface fixed, support it indefinitely, and remove this comment. -*/ - -/* -** Structures used by the virtual table interface -*/ -typedef struct sqlite3_vtab sqlite3_vtab; -typedef struct sqlite3_index_info sqlite3_index_info; -typedef struct sqlite3_vtab_cursor sqlite3_vtab_cursor; -typedef struct sqlite3_module sqlite3_module; - -/* -** CAPI3REF: Virtual Table Object -** KEYWORDS: sqlite3_module {virtual table module} -** -** This structure, sometimes called a "virtual table module", -** defines the implementation of a [virtual tables]. -** This structure consists mostly of methods for the module. -** -** ^A virtual table module is created by filling in a persistent -** instance of this structure and passing a pointer to that instance -** to [sqlite3_create_module()] or [sqlite3_create_module_v2()]. -** ^The registration remains valid until it is replaced by a different -** module or until the [database connection] closes. The content -** of this structure must not change while it is registered with -** any database connection. -*/ -struct sqlite3_module { - int iVersion; - int (*xCreate)(sqlite3*, void *pAux, - int argc, const char *const*argv, - sqlite3_vtab **ppVTab, char**); - int (*xConnect)(sqlite3*, void *pAux, - int argc, const char *const*argv, - sqlite3_vtab **ppVTab, char**); - int (*xBestIndex)(sqlite3_vtab *pVTab, sqlite3_index_info*); - int (*xDisconnect)(sqlite3_vtab *pVTab); - int (*xDestroy)(sqlite3_vtab *pVTab); - int (*xOpen)(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCursor); - int (*xClose)(sqlite3_vtab_cursor*); - int (*xFilter)(sqlite3_vtab_cursor*, int idxNum, const char *idxStr, - int argc, sqlite3_value **argv); - int (*xNext)(sqlite3_vtab_cursor*); - int (*xEof)(sqlite3_vtab_cursor*); - int (*xColumn)(sqlite3_vtab_cursor*, sqlite3_context*, int); - int (*xRowid)(sqlite3_vtab_cursor*, sqlite3_int64 *pRowid); - int (*xUpdate)(sqlite3_vtab *, int, sqlite3_value **, sqlite3_int64 *); - int (*xBegin)(sqlite3_vtab *pVTab); - int (*xSync)(sqlite3_vtab *pVTab); - int (*xCommit)(sqlite3_vtab *pVTab); - int (*xRollback)(sqlite3_vtab *pVTab); - int (*xFindFunction)(sqlite3_vtab *pVtab, int nArg, const char *zName, - void (**pxFunc)(sqlite3_context*,int,sqlite3_value**), - void **ppArg); - int (*xRename)(sqlite3_vtab *pVtab, const char *zNew); - /* The methods above are in version 1 of the sqlite_module object. Those - ** below are for version 2 and greater. */ - int (*xSavepoint)(sqlite3_vtab *pVTab, int); - int (*xRelease)(sqlite3_vtab *pVTab, int); - int (*xRollbackTo)(sqlite3_vtab *pVTab, int); -}; - -/* -** CAPI3REF: Virtual Table Indexing Information -** KEYWORDS: sqlite3_index_info -** -** The sqlite3_index_info structure and its substructures is used as part -** of the [virtual table] interface to -** pass information into and receive the reply from the [xBestIndex] -** method of a [virtual table module]. The fields under **Inputs** are the -** inputs to xBestIndex and are read-only. xBestIndex inserts its -** results into the **Outputs** fields. -** -** ^(The aConstraint[] array records WHERE clause constraints of the form: -** -**
    column OP expr
    -** -** where OP is =, <, <=, >, or >=.)^ ^(The particular operator is -** stored in aConstraint[].op using one of the -** [SQLITE_INDEX_CONSTRAINT_EQ | SQLITE_INDEX_CONSTRAINT_ values].)^ -** ^(The index of the column is stored in -** aConstraint[].iColumn.)^ ^(aConstraint[].usable is TRUE if the -** expr on the right-hand side can be evaluated (and thus the constraint -** is usable) and false if it cannot.)^ -** -** ^The optimizer automatically inverts terms of the form "expr OP column" -** and makes other simplifications to the WHERE clause in an attempt to -** get as many WHERE clause terms into the form shown above as possible. -** ^The aConstraint[] array only reports WHERE clause terms that are -** relevant to the particular virtual table being queried. -** -** ^Information about the ORDER BY clause is stored in aOrderBy[]. -** ^Each term of aOrderBy records a column of the ORDER BY clause. -** -** The [xBestIndex] method must fill aConstraintUsage[] with information -** about what parameters to pass to xFilter. ^If argvIndex>0 then -** the right-hand side of the corresponding aConstraint[] is evaluated -** and becomes the argvIndex-th entry in argv. ^(If aConstraintUsage[].omit -** is true, then the constraint is assumed to be fully handled by the -** virtual table and is not checked again by SQLite.)^ -** -** ^The idxNum and idxPtr values are recorded and passed into the -** [xFilter] method. -** ^[sqlite3_free()] is used to free idxPtr if and only if -** needToFreeIdxPtr is true. -** -** ^The orderByConsumed means that output from [xFilter]/[xNext] will occur in -** the correct order to satisfy the ORDER BY clause so that no separate -** sorting step is required. -** -** ^The estimatedCost value is an estimate of the cost of a particular -** strategy. A cost of N indicates that the cost of the strategy is similar -** to a linear scan of an SQLite table with N rows. A cost of log(N) -** indicates that the expense of the operation is similar to that of a -** binary search on a unique indexed field of an SQLite table with N rows. -** -** ^The estimatedRows value is an estimate of the number of rows that -** will be returned by the strategy. -** -** IMPORTANT: The estimatedRows field was added to the sqlite3_index_info -** structure for SQLite version 3.8.2. If a virtual table extension is -** used with an SQLite version earlier than 3.8.2, the results of attempting -** to read or write the estimatedRows field are undefined (but are likely -** to included crashing the application). The estimatedRows field should -** therefore only be used if [sqlite3_libversion_number()] returns a -** value greater than or equal to 3008002. -*/ -struct sqlite3_index_info { - /* Inputs */ - int nConstraint; /* Number of entries in aConstraint */ - struct sqlite3_index_constraint { - int iColumn; /* Column on left-hand side of constraint */ - unsigned char op; /* Constraint operator */ - unsigned char usable; /* True if this constraint is usable */ - int iTermOffset; /* Used internally - xBestIndex should ignore */ - } *aConstraint; /* Table of WHERE clause constraints */ - int nOrderBy; /* Number of terms in the ORDER BY clause */ - struct sqlite3_index_orderby { - int iColumn; /* Column number */ - unsigned char desc; /* True for DESC. False for ASC. */ - } *aOrderBy; /* The ORDER BY clause */ - /* Outputs */ - struct sqlite3_index_constraint_usage { - int argvIndex; /* if >0, constraint is part of argv to xFilter */ - unsigned char omit; /* Do not code a test for this constraint */ - } *aConstraintUsage; - int idxNum; /* Number used to identify the index */ - char *idxStr; /* String, possibly obtained from sqlite3_malloc */ - int needToFreeIdxStr; /* Free idxStr using sqlite3_free() if true */ - int orderByConsumed; /* True if output is already ordered */ - double estimatedCost; /* Estimated cost of using this index */ - /* Fields below are only available in SQLite 3.8.2 and later */ - sqlite3_int64 estimatedRows; /* Estimated number of rows returned */ -}; - -/* -** CAPI3REF: Virtual Table Constraint Operator Codes -** -** These macros defined the allowed values for the -** [sqlite3_index_info].aConstraint[].op field. Each value represents -** an operator that is part of a constraint term in the wHERE clause of -** a query that uses a [virtual table]. -*/ -#define SQLITE_INDEX_CONSTRAINT_EQ 2 -#define SQLITE_INDEX_CONSTRAINT_GT 4 -#define SQLITE_INDEX_CONSTRAINT_LE 8 -#define SQLITE_INDEX_CONSTRAINT_LT 16 -#define SQLITE_INDEX_CONSTRAINT_GE 32 -#define SQLITE_INDEX_CONSTRAINT_MATCH 64 - -/* -** CAPI3REF: Register A Virtual Table Implementation -** -** ^These routines are used to register a new [virtual table module] name. -** ^Module names must be registered before -** creating a new [virtual table] using the module and before using a -** preexisting [virtual table] for the module. -** -** ^The module name is registered on the [database connection] specified -** by the first parameter. ^The name of the module is given by the -** second parameter. ^The third parameter is a pointer to -** the implementation of the [virtual table module]. ^The fourth -** parameter is an arbitrary client data pointer that is passed through -** into the [xCreate] and [xConnect] methods of the virtual table module -** when a new virtual table is be being created or reinitialized. -** -** ^The sqlite3_create_module_v2() interface has a fifth parameter which -** is a pointer to a destructor for the pClientData. ^SQLite will -** invoke the destructor function (if it is not NULL) when SQLite -** no longer needs the pClientData pointer. ^The destructor will also -** be invoked if the call to sqlite3_create_module_v2() fails. -** ^The sqlite3_create_module() -** interface is equivalent to sqlite3_create_module_v2() with a NULL -** destructor. -*/ -SQLITE_API int sqlite3_create_module( - sqlite3 *db, /* SQLite connection to register module with */ - const char *zName, /* Name of the module */ - const sqlite3_module *p, /* Methods for the module */ - void *pClientData /* Client data for xCreate/xConnect */ -); -SQLITE_API int sqlite3_create_module_v2( - sqlite3 *db, /* SQLite connection to register module with */ - const char *zName, /* Name of the module */ - const sqlite3_module *p, /* Methods for the module */ - void *pClientData, /* Client data for xCreate/xConnect */ - void(*xDestroy)(void*) /* Module destructor function */ -); - -/* -** CAPI3REF: Virtual Table Instance Object -** KEYWORDS: sqlite3_vtab -** -** Every [virtual table module] implementation uses a subclass -** of this object to describe a particular instance -** of the [virtual table]. Each subclass will -** be tailored to the specific needs of the module implementation. -** The purpose of this superclass is to define certain fields that are -** common to all module implementations. -** -** ^Virtual tables methods can set an error message by assigning a -** string obtained from [sqlite3_mprintf()] to zErrMsg. The method should -** take care that any prior string is freed by a call to [sqlite3_free()] -** prior to assigning a new string to zErrMsg. ^After the error message -** is delivered up to the client application, the string will be automatically -** freed by sqlite3_free() and the zErrMsg field will be zeroed. -*/ -struct sqlite3_vtab { - const sqlite3_module *pModule; /* The module for this virtual table */ - int nRef; /* NO LONGER USED */ - char *zErrMsg; /* Error message from sqlite3_mprintf() */ - /* Virtual table implementations will typically add additional fields */ -}; - -/* -** CAPI3REF: Virtual Table Cursor Object -** KEYWORDS: sqlite3_vtab_cursor {virtual table cursor} -** -** Every [virtual table module] implementation uses a subclass of the -** following structure to describe cursors that point into the -** [virtual table] and are used -** to loop through the virtual table. Cursors are created using the -** [sqlite3_module.xOpen | xOpen] method of the module and are destroyed -** by the [sqlite3_module.xClose | xClose] method. Cursors are used -** by the [xFilter], [xNext], [xEof], [xColumn], and [xRowid] methods -** of the module. Each module implementation will define -** the content of a cursor structure to suit its own needs. -** -** This superclass exists in order to define fields of the cursor that -** are common to all implementations. -*/ -struct sqlite3_vtab_cursor { - sqlite3_vtab *pVtab; /* Virtual table of this cursor */ - /* Virtual table implementations will typically add additional fields */ -}; - -/* -** CAPI3REF: Declare The Schema Of A Virtual Table -** -** ^The [xCreate] and [xConnect] methods of a -** [virtual table module] call this interface -** to declare the format (the names and datatypes of the columns) of -** the virtual tables they implement. -*/ -SQLITE_API int sqlite3_declare_vtab(sqlite3*, const char *zSQL); - -/* -** CAPI3REF: Overload A Function For A Virtual Table -** -** ^(Virtual tables can provide alternative implementations of functions -** using the [xFindFunction] method of the [virtual table module]. -** But global versions of those functions -** must exist in order to be overloaded.)^ -** -** ^(This API makes sure a global version of a function with a particular -** name and number of parameters exists. If no such function exists -** before this API is called, a new function is created.)^ ^The implementation -** of the new function always causes an exception to be thrown. So -** the new function is not good for anything by itself. Its only -** purpose is to be a placeholder function that can be overloaded -** by a [virtual table]. -*/ -SQLITE_API int sqlite3_overload_function(sqlite3*, const char *zFuncName, int nArg); - -/* -** The interface to the virtual-table mechanism defined above (back up -** to a comment remarkably similar to this one) is currently considered -** to be experimental. The interface might change in incompatible ways. -** If this is a problem for you, do not use the interface at this time. -** -** When the virtual-table mechanism stabilizes, we will declare the -** interface fixed, support it indefinitely, and remove this comment. -*/ - -/* -** CAPI3REF: A Handle To An Open BLOB -** KEYWORDS: {BLOB handle} {BLOB handles} -** -** An instance of this object represents an open BLOB on which -** [sqlite3_blob_open | incremental BLOB I/O] can be performed. -** ^Objects of this type are created by [sqlite3_blob_open()] -** and destroyed by [sqlite3_blob_close()]. -** ^The [sqlite3_blob_read()] and [sqlite3_blob_write()] interfaces -** can be used to read or write small subsections of the BLOB. -** ^The [sqlite3_blob_bytes()] interface returns the size of the BLOB in bytes. -*/ -typedef struct sqlite3_blob sqlite3_blob; - -/* -** CAPI3REF: Open A BLOB For Incremental I/O -** -** ^(This interfaces opens a [BLOB handle | handle] to the BLOB located -** in row iRow, column zColumn, table zTable in database zDb; -** in other words, the same BLOB that would be selected by: -** -**
    -**     SELECT zColumn FROM zDb.zTable WHERE [rowid] = iRow;
    -** 
    )^ -** -** ^If the flags parameter is non-zero, then the BLOB is opened for read -** and write access. ^If it is zero, the BLOB is opened for read access. -** ^It is not possible to open a column that is part of an index or primary -** key for writing. ^If [foreign key constraints] are enabled, it is -** not possible to open a column that is part of a [child key] for writing. -** -** ^Note that the database name is not the filename that contains -** the database but rather the symbolic name of the database that -** appears after the AS keyword when the database is connected using [ATTACH]. -** ^For the main database file, the database name is "main". -** ^For TEMP tables, the database name is "temp". -** -** ^(On success, [SQLITE_OK] is returned and the new [BLOB handle] is written -** to *ppBlob. Otherwise an [error code] is returned and *ppBlob is set -** to be a null pointer.)^ -** ^This function sets the [database connection] error code and message -** accessible via [sqlite3_errcode()] and [sqlite3_errmsg()] and related -** functions. ^Note that the *ppBlob variable is always initialized in a -** way that makes it safe to invoke [sqlite3_blob_close()] on *ppBlob -** regardless of the success or failure of this routine. -** -** ^(If the row that a BLOB handle points to is modified by an -** [UPDATE], [DELETE], or by [ON CONFLICT] side-effects -** then the BLOB handle is marked as "expired". -** This is true if any column of the row is changed, even a column -** other than the one the BLOB handle is open on.)^ -** ^Calls to [sqlite3_blob_read()] and [sqlite3_blob_write()] for -** an expired BLOB handle fail with a return code of [SQLITE_ABORT]. -** ^(Changes written into a BLOB prior to the BLOB expiring are not -** rolled back by the expiration of the BLOB. Such changes will eventually -** commit if the transaction continues to completion.)^ -** -** ^Use the [sqlite3_blob_bytes()] interface to determine the size of -** the opened blob. ^The size of a blob may not be changed by this -** interface. Use the [UPDATE] SQL command to change the size of a -** blob. -** -** ^The [sqlite3_blob_open()] interface will fail for a [WITHOUT ROWID] -** table. Incremental BLOB I/O is not possible on [WITHOUT ROWID] tables. -** -** ^The [sqlite3_bind_zeroblob()] and [sqlite3_result_zeroblob()] interfaces -** and the built-in [zeroblob] SQL function can be used, if desired, -** to create an empty, zero-filled blob in which to read or write using -** this interface. -** -** To avoid a resource leak, every open [BLOB handle] should eventually -** be released by a call to [sqlite3_blob_close()]. -*/ -SQLITE_API int sqlite3_blob_open( - sqlite3*, - const char *zDb, - const char *zTable, - const char *zColumn, - sqlite3_int64 iRow, - int flags, - sqlite3_blob **ppBlob -); - -/* -** CAPI3REF: Move a BLOB Handle to a New Row -** -** ^This function is used to move an existing blob handle so that it points -** to a different row of the same database table. ^The new row is identified -** by the rowid value passed as the second argument. Only the row can be -** changed. ^The database, table and column on which the blob handle is open -** remain the same. Moving an existing blob handle to a new row can be -** faster than closing the existing handle and opening a new one. -** -** ^(The new row must meet the same criteria as for [sqlite3_blob_open()] - -** it must exist and there must be either a blob or text value stored in -** the nominated column.)^ ^If the new row is not present in the table, or if -** it does not contain a blob or text value, or if another error occurs, an -** SQLite error code is returned and the blob handle is considered aborted. -** ^All subsequent calls to [sqlite3_blob_read()], [sqlite3_blob_write()] or -** [sqlite3_blob_reopen()] on an aborted blob handle immediately return -** SQLITE_ABORT. ^Calling [sqlite3_blob_bytes()] on an aborted blob handle -** always returns zero. -** -** ^This function sets the database handle error code and message. -*/ -SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_blob_reopen(sqlite3_blob *, sqlite3_int64); - -/* -** CAPI3REF: Close A BLOB Handle -** -** ^Closes an open [BLOB handle]. -** -** ^Closing a BLOB shall cause the current transaction to commit -** if there are no other BLOBs, no pending prepared statements, and the -** database connection is in [autocommit mode]. -** ^If any writes were made to the BLOB, they might be held in cache -** until the close operation if they will fit. -** -** ^(Closing the BLOB often forces the changes -** out to disk and so if any I/O errors occur, they will likely occur -** at the time when the BLOB is closed. Any errors that occur during -** closing are reported as a non-zero return value.)^ -** -** ^(The BLOB is closed unconditionally. Even if this routine returns -** an error code, the BLOB is still closed.)^ -** -** ^Calling this routine with a null pointer (such as would be returned -** by a failed call to [sqlite3_blob_open()]) is a harmless no-op. -*/ -SQLITE_API int sqlite3_blob_close(sqlite3_blob *); - -/* -** CAPI3REF: Return The Size Of An Open BLOB -** -** ^Returns the size in bytes of the BLOB accessible via the -** successfully opened [BLOB handle] in its only argument. ^The -** incremental blob I/O routines can only read or overwriting existing -** blob content; they cannot change the size of a blob. -** -** This routine only works on a [BLOB handle] which has been created -** by a prior successful call to [sqlite3_blob_open()] and which has not -** been closed by [sqlite3_blob_close()]. Passing any other pointer in -** to this routine results in undefined and probably undesirable behavior. -*/ -SQLITE_API int sqlite3_blob_bytes(sqlite3_blob *); - -/* -** CAPI3REF: Read Data From A BLOB Incrementally -** -** ^(This function is used to read data from an open [BLOB handle] into a -** caller-supplied buffer. N bytes of data are copied into buffer Z -** from the open BLOB, starting at offset iOffset.)^ -** -** ^If offset iOffset is less than N bytes from the end of the BLOB, -** [SQLITE_ERROR] is returned and no data is read. ^If N or iOffset is -** less than zero, [SQLITE_ERROR] is returned and no data is read. -** ^The size of the blob (and hence the maximum value of N+iOffset) -** can be determined using the [sqlite3_blob_bytes()] interface. -** -** ^An attempt to read from an expired [BLOB handle] fails with an -** error code of [SQLITE_ABORT]. -** -** ^(On success, sqlite3_blob_read() returns SQLITE_OK. -** Otherwise, an [error code] or an [extended error code] is returned.)^ -** -** This routine only works on a [BLOB handle] which has been created -** by a prior successful call to [sqlite3_blob_open()] and which has not -** been closed by [sqlite3_blob_close()]. Passing any other pointer in -** to this routine results in undefined and probably undesirable behavior. -** -** See also: [sqlite3_blob_write()]. -*/ -SQLITE_API int sqlite3_blob_read(sqlite3_blob *, void *Z, int N, int iOffset); - -/* -** CAPI3REF: Write Data Into A BLOB Incrementally -** -** ^This function is used to write data into an open [BLOB handle] from a -** caller-supplied buffer. ^N bytes of data are copied from the buffer Z -** into the open BLOB, starting at offset iOffset. -** -** ^If the [BLOB handle] passed as the first argument was not opened for -** writing (the flags parameter to [sqlite3_blob_open()] was zero), -** this function returns [SQLITE_READONLY]. -** -** ^This function may only modify the contents of the BLOB; it is -** not possible to increase the size of a BLOB using this API. -** ^If offset iOffset is less than N bytes from the end of the BLOB, -** [SQLITE_ERROR] is returned and no data is written. ^If N is -** less than zero [SQLITE_ERROR] is returned and no data is written. -** The size of the BLOB (and hence the maximum value of N+iOffset) -** can be determined using the [sqlite3_blob_bytes()] interface. -** -** ^An attempt to write to an expired [BLOB handle] fails with an -** error code of [SQLITE_ABORT]. ^Writes to the BLOB that occurred -** before the [BLOB handle] expired are not rolled back by the -** expiration of the handle, though of course those changes might -** have been overwritten by the statement that expired the BLOB handle -** or by other independent statements. -** -** ^(On success, sqlite3_blob_write() returns SQLITE_OK. -** Otherwise, an [error code] or an [extended error code] is returned.)^ -** -** This routine only works on a [BLOB handle] which has been created -** by a prior successful call to [sqlite3_blob_open()] and which has not -** been closed by [sqlite3_blob_close()]. Passing any other pointer in -** to this routine results in undefined and probably undesirable behavior. -** -** See also: [sqlite3_blob_read()]. -*/ -SQLITE_API int sqlite3_blob_write(sqlite3_blob *, const void *z, int n, int iOffset); - -/* -** CAPI3REF: Virtual File System Objects -** -** A virtual filesystem (VFS) is an [sqlite3_vfs] object -** that SQLite uses to interact -** with the underlying operating system. Most SQLite builds come with a -** single default VFS that is appropriate for the host computer. -** New VFSes can be registered and existing VFSes can be unregistered. -** The following interfaces are provided. -** -** ^The sqlite3_vfs_find() interface returns a pointer to a VFS given its name. -** ^Names are case sensitive. -** ^Names are zero-terminated UTF-8 strings. -** ^If there is no match, a NULL pointer is returned. -** ^If zVfsName is NULL then the default VFS is returned. -** -** ^New VFSes are registered with sqlite3_vfs_register(). -** ^Each new VFS becomes the default VFS if the makeDflt flag is set. -** ^The same VFS can be registered multiple times without injury. -** ^To make an existing VFS into the default VFS, register it again -** with the makeDflt flag set. If two different VFSes with the -** same name are registered, the behavior is undefined. If a -** VFS is registered with a name that is NULL or an empty string, -** then the behavior is undefined. -** -** ^Unregister a VFS with the sqlite3_vfs_unregister() interface. -** ^(If the default VFS is unregistered, another VFS is chosen as -** the default. The choice for the new VFS is arbitrary.)^ -*/ -SQLITE_API sqlite3_vfs *sqlite3_vfs_find(const char *zVfsName); -SQLITE_API int sqlite3_vfs_register(sqlite3_vfs*, int makeDflt); -SQLITE_API int sqlite3_vfs_unregister(sqlite3_vfs*); - -/* -** CAPI3REF: Mutexes -** -** The SQLite core uses these routines for thread -** synchronization. Though they are intended for internal -** use by SQLite, code that links against SQLite is -** permitted to use any of these routines. -** -** The SQLite source code contains multiple implementations -** of these mutex routines. An appropriate implementation -** is selected automatically at compile-time. ^(The following -** implementations are available in the SQLite core: -** -**
      -**
    • SQLITE_MUTEX_PTHREADS -**
    • SQLITE_MUTEX_W32 -**
    • SQLITE_MUTEX_NOOP -**
    )^ -** -** ^The SQLITE_MUTEX_NOOP implementation is a set of routines -** that does no real locking and is appropriate for use in -** a single-threaded application. ^The SQLITE_MUTEX_PTHREADS and -** SQLITE_MUTEX_W32 implementations are appropriate for use on Unix -** and Windows. -** -** ^(If SQLite is compiled with the SQLITE_MUTEX_APPDEF preprocessor -** macro defined (with "-DSQLITE_MUTEX_APPDEF=1"), then no mutex -** implementation is included with the library. In this case the -** application must supply a custom mutex implementation using the -** [SQLITE_CONFIG_MUTEX] option of the sqlite3_config() function -** before calling sqlite3_initialize() or any other public sqlite3_ -** function that calls sqlite3_initialize().)^ -** -** ^The sqlite3_mutex_alloc() routine allocates a new -** mutex and returns a pointer to it. ^If it returns NULL -** that means that a mutex could not be allocated. ^SQLite -** will unwind its stack and return an error. ^(The argument -** to sqlite3_mutex_alloc() is one of these integer constants: -** -**
      -**
    • SQLITE_MUTEX_FAST -**
    • SQLITE_MUTEX_RECURSIVE -**
    • SQLITE_MUTEX_STATIC_MASTER -**
    • SQLITE_MUTEX_STATIC_MEM -**
    • SQLITE_MUTEX_STATIC_MEM2 -**
    • SQLITE_MUTEX_STATIC_PRNG -**
    • SQLITE_MUTEX_STATIC_LRU -**
    • SQLITE_MUTEX_STATIC_LRU2 -**
    )^ -** -** ^The first two constants (SQLITE_MUTEX_FAST and SQLITE_MUTEX_RECURSIVE) -** cause sqlite3_mutex_alloc() to create -** a new mutex. ^The new mutex is recursive when SQLITE_MUTEX_RECURSIVE -** is used but not necessarily so when SQLITE_MUTEX_FAST is used. -** The mutex implementation does not need to make a distinction -** between SQLITE_MUTEX_RECURSIVE and SQLITE_MUTEX_FAST if it does -** not want to. ^SQLite will only request a recursive mutex in -** cases where it really needs one. ^If a faster non-recursive mutex -** implementation is available on the host platform, the mutex subsystem -** might return such a mutex in response to SQLITE_MUTEX_FAST. -** -** ^The other allowed parameters to sqlite3_mutex_alloc() (anything other -** than SQLITE_MUTEX_FAST and SQLITE_MUTEX_RECURSIVE) each return -** a pointer to a static preexisting mutex. ^Six static mutexes are -** used by the current version of SQLite. Future versions of SQLite -** may add additional static mutexes. Static mutexes are for internal -** use by SQLite only. Applications that use SQLite mutexes should -** use only the dynamic mutexes returned by SQLITE_MUTEX_FAST or -** SQLITE_MUTEX_RECURSIVE. -** -** ^Note that if one of the dynamic mutex parameters (SQLITE_MUTEX_FAST -** or SQLITE_MUTEX_RECURSIVE) is used then sqlite3_mutex_alloc() -** returns a different mutex on every call. ^But for the static -** mutex types, the same mutex is returned on every call that has -** the same type number. -** -** ^The sqlite3_mutex_free() routine deallocates a previously -** allocated dynamic mutex. ^SQLite is careful to deallocate every -** dynamic mutex that it allocates. The dynamic mutexes must not be in -** use when they are deallocated. Attempting to deallocate a static -** mutex results in undefined behavior. ^SQLite never deallocates -** a static mutex. -** -** ^The sqlite3_mutex_enter() and sqlite3_mutex_try() routines attempt -** to enter a mutex. ^If another thread is already within the mutex, -** sqlite3_mutex_enter() will block and sqlite3_mutex_try() will return -** SQLITE_BUSY. ^The sqlite3_mutex_try() interface returns [SQLITE_OK] -** upon successful entry. ^(Mutexes created using -** SQLITE_MUTEX_RECURSIVE can be entered multiple times by the same thread. -** In such cases the, -** mutex must be exited an equal number of times before another thread -** can enter.)^ ^(If the same thread tries to enter any other -** kind of mutex more than once, the behavior is undefined. -** SQLite will never exhibit -** such behavior in its own use of mutexes.)^ -** -** ^(Some systems (for example, Windows 95) do not support the operation -** implemented by sqlite3_mutex_try(). On those systems, sqlite3_mutex_try() -** will always return SQLITE_BUSY. The SQLite core only ever uses -** sqlite3_mutex_try() as an optimization so this is acceptable behavior.)^ -** -** ^The sqlite3_mutex_leave() routine exits a mutex that was -** previously entered by the same thread. ^(The behavior -** is undefined if the mutex is not currently entered by the -** calling thread or is not currently allocated. SQLite will -** never do either.)^ -** -** ^If the argument to sqlite3_mutex_enter(), sqlite3_mutex_try(), or -** sqlite3_mutex_leave() is a NULL pointer, then all three routines -** behave as no-ops. -** -** See also: [sqlite3_mutex_held()] and [sqlite3_mutex_notheld()]. -*/ -SQLITE_API sqlite3_mutex *sqlite3_mutex_alloc(int); -SQLITE_API void sqlite3_mutex_free(sqlite3_mutex*); -SQLITE_API void sqlite3_mutex_enter(sqlite3_mutex*); -SQLITE_API int sqlite3_mutex_try(sqlite3_mutex*); -SQLITE_API void sqlite3_mutex_leave(sqlite3_mutex*); - -/* -** CAPI3REF: Mutex Methods Object -** -** An instance of this structure defines the low-level routines -** used to allocate and use mutexes. -** -** Usually, the default mutex implementations provided by SQLite are -** sufficient, however the user has the option of substituting a custom -** implementation for specialized deployments or systems for which SQLite -** does not provide a suitable implementation. In this case, the user -** creates and populates an instance of this structure to pass -** to sqlite3_config() along with the [SQLITE_CONFIG_MUTEX] option. -** Additionally, an instance of this structure can be used as an -** output variable when querying the system for the current mutex -** implementation, using the [SQLITE_CONFIG_GETMUTEX] option. -** -** ^The xMutexInit method defined by this structure is invoked as -** part of system initialization by the sqlite3_initialize() function. -** ^The xMutexInit routine is called by SQLite exactly once for each -** effective call to [sqlite3_initialize()]. -** -** ^The xMutexEnd method defined by this structure is invoked as -** part of system shutdown by the sqlite3_shutdown() function. The -** implementation of this method is expected to release all outstanding -** resources obtained by the mutex methods implementation, especially -** those obtained by the xMutexInit method. ^The xMutexEnd() -** interface is invoked exactly once for each call to [sqlite3_shutdown()]. -** -** ^(The remaining seven methods defined by this structure (xMutexAlloc, -** xMutexFree, xMutexEnter, xMutexTry, xMutexLeave, xMutexHeld and -** xMutexNotheld) implement the following interfaces (respectively): -** -**
      -**
    • [sqlite3_mutex_alloc()]
    • -**
    • [sqlite3_mutex_free()]
    • -**
    • [sqlite3_mutex_enter()]
    • -**
    • [sqlite3_mutex_try()]
    • -**
    • [sqlite3_mutex_leave()]
    • -**
    • [sqlite3_mutex_held()]
    • -**
    • [sqlite3_mutex_notheld()]
    • -**
    )^ -** -** The only difference is that the public sqlite3_XXX functions enumerated -** above silently ignore any invocations that pass a NULL pointer instead -** of a valid mutex handle. The implementations of the methods defined -** by this structure are not required to handle this case, the results -** of passing a NULL pointer instead of a valid mutex handle are undefined -** (i.e. it is acceptable to provide an implementation that segfaults if -** it is passed a NULL pointer). -** -** The xMutexInit() method must be threadsafe. ^It must be harmless to -** invoke xMutexInit() multiple times within the same process and without -** intervening calls to xMutexEnd(). Second and subsequent calls to -** xMutexInit() must be no-ops. -** -** ^xMutexInit() must not use SQLite memory allocation ([sqlite3_malloc()] -** and its associates). ^Similarly, xMutexAlloc() must not use SQLite memory -** allocation for a static mutex. ^However xMutexAlloc() may use SQLite -** memory allocation for a fast or recursive mutex. -** -** ^SQLite will invoke the xMutexEnd() method when [sqlite3_shutdown()] is -** called, but only if the prior call to xMutexInit returned SQLITE_OK. -** If xMutexInit fails in any way, it is expected to clean up after itself -** prior to returning. -*/ -typedef struct sqlite3_mutex_methods sqlite3_mutex_methods; -struct sqlite3_mutex_methods { - int (*xMutexInit)(void); - int (*xMutexEnd)(void); - sqlite3_mutex *(*xMutexAlloc)(int); - void (*xMutexFree)(sqlite3_mutex *); - void (*xMutexEnter)(sqlite3_mutex *); - int (*xMutexTry)(sqlite3_mutex *); - void (*xMutexLeave)(sqlite3_mutex *); - int (*xMutexHeld)(sqlite3_mutex *); - int (*xMutexNotheld)(sqlite3_mutex *); -}; - -/* -** CAPI3REF: Mutex Verification Routines -** -** The sqlite3_mutex_held() and sqlite3_mutex_notheld() routines -** are intended for use inside assert() statements. ^The SQLite core -** never uses these routines except inside an assert() and applications -** are advised to follow the lead of the core. ^The SQLite core only -** provides implementations for these routines when it is compiled -** with the SQLITE_DEBUG flag. ^External mutex implementations -** are only required to provide these routines if SQLITE_DEBUG is -** defined and if NDEBUG is not defined. -** -** ^These routines should return true if the mutex in their argument -** is held or not held, respectively, by the calling thread. -** -** ^The implementation is not required to provide versions of these -** routines that actually work. If the implementation does not provide working -** versions of these routines, it should at least provide stubs that always -** return true so that one does not get spurious assertion failures. -** -** ^If the argument to sqlite3_mutex_held() is a NULL pointer then -** the routine should return 1. This seems counter-intuitive since -** clearly the mutex cannot be held if it does not exist. But -** the reason the mutex does not exist is because the build is not -** using mutexes. And we do not want the assert() containing the -** call to sqlite3_mutex_held() to fail, so a non-zero return is -** the appropriate thing to do. ^The sqlite3_mutex_notheld() -** interface should also return 1 when given a NULL pointer. -*/ -#ifndef NDEBUG -SQLITE_API int sqlite3_mutex_held(sqlite3_mutex*); -SQLITE_API int sqlite3_mutex_notheld(sqlite3_mutex*); -#endif - -/* -** CAPI3REF: Mutex Types -** -** The [sqlite3_mutex_alloc()] interface takes a single argument -** which is one of these integer constants. -** -** The set of static mutexes may change from one SQLite release to the -** next. Applications that override the built-in mutex logic must be -** prepared to accommodate additional static mutexes. -*/ -#define SQLITE_MUTEX_FAST 0 -#define SQLITE_MUTEX_RECURSIVE 1 -#define SQLITE_MUTEX_STATIC_MASTER 2 -#define SQLITE_MUTEX_STATIC_MEM 3 /* sqlite3_malloc() */ -#define SQLITE_MUTEX_STATIC_MEM2 4 /* NOT USED */ -#define SQLITE_MUTEX_STATIC_OPEN 4 /* sqlite3BtreeOpen() */ -#define SQLITE_MUTEX_STATIC_PRNG 5 /* sqlite3_random() */ -#define SQLITE_MUTEX_STATIC_LRU 6 /* lru page list */ -#define SQLITE_MUTEX_STATIC_LRU2 7 /* NOT USED */ -#define SQLITE_MUTEX_STATIC_PMEM 7 /* sqlite3PageMalloc() */ - -/* -** CAPI3REF: Retrieve the mutex for a database connection -** -** ^This interface returns a pointer the [sqlite3_mutex] object that -** serializes access to the [database connection] given in the argument -** when the [threading mode] is Serialized. -** ^If the [threading mode] is Single-thread or Multi-thread then this -** routine returns a NULL pointer. -*/ -SQLITE_API sqlite3_mutex *sqlite3_db_mutex(sqlite3*); - -/* -** CAPI3REF: Low-Level Control Of Database Files -** -** ^The [sqlite3_file_control()] interface makes a direct call to the -** xFileControl method for the [sqlite3_io_methods] object associated -** with a particular database identified by the second argument. ^The -** name of the database is "main" for the main database or "temp" for the -** TEMP database, or the name that appears after the AS keyword for -** databases that are added using the [ATTACH] SQL command. -** ^A NULL pointer can be used in place of "main" to refer to the -** main database file. -** ^The third and fourth parameters to this routine -** are passed directly through to the second and third parameters of -** the xFileControl method. ^The return value of the xFileControl -** method becomes the return value of this routine. -** -** ^The SQLITE_FCNTL_FILE_POINTER value for the op parameter causes -** a pointer to the underlying [sqlite3_file] object to be written into -** the space pointed to by the 4th parameter. ^The SQLITE_FCNTL_FILE_POINTER -** case is a short-circuit path which does not actually invoke the -** underlying sqlite3_io_methods.xFileControl method. -** -** ^If the second parameter (zDbName) does not match the name of any -** open database file, then SQLITE_ERROR is returned. ^This error -** code is not remembered and will not be recalled by [sqlite3_errcode()] -** or [sqlite3_errmsg()]. The underlying xFileControl method might -** also return SQLITE_ERROR. There is no way to distinguish between -** an incorrect zDbName and an SQLITE_ERROR return from the underlying -** xFileControl method. -** -** See also: [SQLITE_FCNTL_LOCKSTATE] -*/ -SQLITE_API int sqlite3_file_control(sqlite3*, const char *zDbName, int op, void*); - -/* -** CAPI3REF: Testing Interface -** -** ^The sqlite3_test_control() interface is used to read out internal -** state of SQLite and to inject faults into SQLite for testing -** purposes. ^The first parameter is an operation code that determines -** the number, meaning, and operation of all subsequent parameters. -** -** This interface is not for use by applications. It exists solely -** for verifying the correct operation of the SQLite library. Depending -** on how the SQLite library is compiled, this interface might not exist. -** -** The details of the operation codes, their meanings, the parameters -** they take, and what they do are all subject to change without notice. -** Unlike most of the SQLite API, this function is not guaranteed to -** operate consistently from one release to the next. -*/ -SQLITE_API int sqlite3_test_control(int op, ...); - -/* -** CAPI3REF: Testing Interface Operation Codes -** -** These constants are the valid operation code parameters used -** as the first argument to [sqlite3_test_control()]. -** -** These parameters and their meanings are subject to change -** without notice. These values are for testing purposes only. -** Applications should not use any of these parameters or the -** [sqlite3_test_control()] interface. -*/ -#define SQLITE_TESTCTRL_FIRST 5 -#define SQLITE_TESTCTRL_PRNG_SAVE 5 -#define SQLITE_TESTCTRL_PRNG_RESTORE 6 -#define SQLITE_TESTCTRL_PRNG_RESET 7 -#define SQLITE_TESTCTRL_BITVEC_TEST 8 -#define SQLITE_TESTCTRL_FAULT_INSTALL 9 -#define SQLITE_TESTCTRL_BENIGN_MALLOC_HOOKS 10 -#define SQLITE_TESTCTRL_PENDING_BYTE 11 -#define SQLITE_TESTCTRL_ASSERT 12 -#define SQLITE_TESTCTRL_ALWAYS 13 -#define SQLITE_TESTCTRL_RESERVE 14 -#define SQLITE_TESTCTRL_OPTIMIZATIONS 15 -#define SQLITE_TESTCTRL_ISKEYWORD 16 -#define SQLITE_TESTCTRL_SCRATCHMALLOC 17 -#define SQLITE_TESTCTRL_LOCALTIME_FAULT 18 -#define SQLITE_TESTCTRL_EXPLAIN_STMT 19 -#define SQLITE_TESTCTRL_NEVER_CORRUPT 20 -#define SQLITE_TESTCTRL_VDBE_COVERAGE 21 -#define SQLITE_TESTCTRL_BYTEORDER 22 -#define SQLITE_TESTCTRL_LAST 22 - -/* -** CAPI3REF: SQLite Runtime Status -** -** ^This interface is used to retrieve runtime status information -** about the performance of SQLite, and optionally to reset various -** highwater marks. ^The first argument is an integer code for -** the specific parameter to measure. ^(Recognized integer codes -** are of the form [status parameters | SQLITE_STATUS_...].)^ -** ^The current value of the parameter is returned into *pCurrent. -** ^The highest recorded value is returned in *pHighwater. ^If the -** resetFlag is true, then the highest record value is reset after -** *pHighwater is written. ^(Some parameters do not record the highest -** value. For those parameters -** nothing is written into *pHighwater and the resetFlag is ignored.)^ -** ^(Other parameters record only the highwater mark and not the current -** value. For these latter parameters nothing is written into *pCurrent.)^ -** -** ^The sqlite3_status() routine returns SQLITE_OK on success and a -** non-zero [error code] on failure. -** -** This routine is threadsafe but is not atomic. This routine can be -** called while other threads are running the same or different SQLite -** interfaces. However the values returned in *pCurrent and -** *pHighwater reflect the status of SQLite at different points in time -** and it is possible that another thread might change the parameter -** in between the times when *pCurrent and *pHighwater are written. -** -** See also: [sqlite3_db_status()] -*/ -SQLITE_API int sqlite3_status(int op, int *pCurrent, int *pHighwater, int resetFlag); - - -/* -** CAPI3REF: Status Parameters -** KEYWORDS: {status parameters} -** -** These integer constants designate various run-time status parameters -** that can be returned by [sqlite3_status()]. -** -**
    -** [[SQLITE_STATUS_MEMORY_USED]] ^(
    SQLITE_STATUS_MEMORY_USED
    -**
    This parameter is the current amount of memory checked out -** using [sqlite3_malloc()], either directly or indirectly. The -** figure includes calls made to [sqlite3_malloc()] by the application -** and internal memory usage by the SQLite library. Scratch memory -** controlled by [SQLITE_CONFIG_SCRATCH] and auxiliary page-cache -** memory controlled by [SQLITE_CONFIG_PAGECACHE] is not included in -** this parameter. The amount returned is the sum of the allocation -** sizes as reported by the xSize method in [sqlite3_mem_methods].
    )^ -** -** [[SQLITE_STATUS_MALLOC_SIZE]] ^(
    SQLITE_STATUS_MALLOC_SIZE
    -**
    This parameter records the largest memory allocation request -** handed to [sqlite3_malloc()] or [sqlite3_realloc()] (or their -** internal equivalents). Only the value returned in the -** *pHighwater parameter to [sqlite3_status()] is of interest. -** The value written into the *pCurrent parameter is undefined.
    )^ -** -** [[SQLITE_STATUS_MALLOC_COUNT]] ^(
    SQLITE_STATUS_MALLOC_COUNT
    -**
    This parameter records the number of separate memory allocations -** currently checked out.
    )^ -** -** [[SQLITE_STATUS_PAGECACHE_USED]] ^(
    SQLITE_STATUS_PAGECACHE_USED
    -**
    This parameter returns the number of pages used out of the -** [pagecache memory allocator] that was configured using -** [SQLITE_CONFIG_PAGECACHE]. The -** value returned is in pages, not in bytes.
    )^ -** -** [[SQLITE_STATUS_PAGECACHE_OVERFLOW]] -** ^(
    SQLITE_STATUS_PAGECACHE_OVERFLOW
    -**
    This parameter returns the number of bytes of page cache -** allocation which could not be satisfied by the [SQLITE_CONFIG_PAGECACHE] -** buffer and where forced to overflow to [sqlite3_malloc()]. The -** returned value includes allocations that overflowed because they -** where too large (they were larger than the "sz" parameter to -** [SQLITE_CONFIG_PAGECACHE]) and allocations that overflowed because -** no space was left in the page cache.
    )^ -** -** [[SQLITE_STATUS_PAGECACHE_SIZE]] ^(
    SQLITE_STATUS_PAGECACHE_SIZE
    -**
    This parameter records the largest memory allocation request -** handed to [pagecache memory allocator]. Only the value returned in the -** *pHighwater parameter to [sqlite3_status()] is of interest. -** The value written into the *pCurrent parameter is undefined.
    )^ -** -** [[SQLITE_STATUS_SCRATCH_USED]] ^(
    SQLITE_STATUS_SCRATCH_USED
    -**
    This parameter returns the number of allocations used out of the -** [scratch memory allocator] configured using -** [SQLITE_CONFIG_SCRATCH]. The value returned is in allocations, not -** in bytes. Since a single thread may only have one scratch allocation -** outstanding at time, this parameter also reports the number of threads -** using scratch memory at the same time.
    )^ -** -** [[SQLITE_STATUS_SCRATCH_OVERFLOW]] ^(
    SQLITE_STATUS_SCRATCH_OVERFLOW
    -**
    This parameter returns the number of bytes of scratch memory -** allocation which could not be satisfied by the [SQLITE_CONFIG_SCRATCH] -** buffer and where forced to overflow to [sqlite3_malloc()]. The values -** returned include overflows because the requested allocation was too -** larger (that is, because the requested allocation was larger than the -** "sz" parameter to [SQLITE_CONFIG_SCRATCH]) and because no scratch buffer -** slots were available. -**
    )^ -** -** [[SQLITE_STATUS_SCRATCH_SIZE]] ^(
    SQLITE_STATUS_SCRATCH_SIZE
    -**
    This parameter records the largest memory allocation request -** handed to [scratch memory allocator]. Only the value returned in the -** *pHighwater parameter to [sqlite3_status()] is of interest. -** The value written into the *pCurrent parameter is undefined.
    )^ -** -** [[SQLITE_STATUS_PARSER_STACK]] ^(
    SQLITE_STATUS_PARSER_STACK
    -**
    This parameter records the deepest parser stack. It is only -** meaningful if SQLite is compiled with [YYTRACKMAXSTACKDEPTH].
    )^ -**
    -** -** New status parameters may be added from time to time. -*/ -#define SQLITE_STATUS_MEMORY_USED 0 -#define SQLITE_STATUS_PAGECACHE_USED 1 -#define SQLITE_STATUS_PAGECACHE_OVERFLOW 2 -#define SQLITE_STATUS_SCRATCH_USED 3 -#define SQLITE_STATUS_SCRATCH_OVERFLOW 4 -#define SQLITE_STATUS_MALLOC_SIZE 5 -#define SQLITE_STATUS_PARSER_STACK 6 -#define SQLITE_STATUS_PAGECACHE_SIZE 7 -#define SQLITE_STATUS_SCRATCH_SIZE 8 -#define SQLITE_STATUS_MALLOC_COUNT 9 - -/* -** CAPI3REF: Database Connection Status -** -** ^This interface is used to retrieve runtime status information -** about a single [database connection]. ^The first argument is the -** database connection object to be interrogated. ^The second argument -** is an integer constant, taken from the set of -** [SQLITE_DBSTATUS options], that -** determines the parameter to interrogate. The set of -** [SQLITE_DBSTATUS options] is likely -** to grow in future releases of SQLite. -** -** ^The current value of the requested parameter is written into *pCur -** and the highest instantaneous value is written into *pHiwtr. ^If -** the resetFlg is true, then the highest instantaneous value is -** reset back down to the current value. -** -** ^The sqlite3_db_status() routine returns SQLITE_OK on success and a -** non-zero [error code] on failure. -** -** See also: [sqlite3_status()] and [sqlite3_stmt_status()]. -*/ -SQLITE_API int sqlite3_db_status(sqlite3*, int op, int *pCur, int *pHiwtr, int resetFlg); - -/* -** CAPI3REF: Status Parameters for database connections -** KEYWORDS: {SQLITE_DBSTATUS options} -** -** These constants are the available integer "verbs" that can be passed as -** the second argument to the [sqlite3_db_status()] interface. -** -** New verbs may be added in future releases of SQLite. Existing verbs -** might be discontinued. Applications should check the return code from -** [sqlite3_db_status()] to make sure that the call worked. -** The [sqlite3_db_status()] interface will return a non-zero error code -** if a discontinued or unsupported verb is invoked. -** -**
    -** [[SQLITE_DBSTATUS_LOOKASIDE_USED]] ^(
    SQLITE_DBSTATUS_LOOKASIDE_USED
    -**
    This parameter returns the number of lookaside memory slots currently -** checked out.
    )^ -** -** [[SQLITE_DBSTATUS_LOOKASIDE_HIT]] ^(
    SQLITE_DBSTATUS_LOOKASIDE_HIT
    -**
    This parameter returns the number malloc attempts that were -** satisfied using lookaside memory. Only the high-water value is meaningful; -** the current value is always zero.)^ -** -** [[SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE]] -** ^(
    SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE
    -**
    This parameter returns the number malloc attempts that might have -** been satisfied using lookaside memory but failed due to the amount of -** memory requested being larger than the lookaside slot size. -** Only the high-water value is meaningful; -** the current value is always zero.)^ -** -** [[SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL]] -** ^(
    SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL
    -**
    This parameter returns the number malloc attempts that might have -** been satisfied using lookaside memory but failed due to all lookaside -** memory already being in use. -** Only the high-water value is meaningful; -** the current value is always zero.)^ -** -** [[SQLITE_DBSTATUS_CACHE_USED]] ^(
    SQLITE_DBSTATUS_CACHE_USED
    -**
    This parameter returns the approximate number of of bytes of heap -** memory used by all pager caches associated with the database connection.)^ -** ^The highwater mark associated with SQLITE_DBSTATUS_CACHE_USED is always 0. -** -** [[SQLITE_DBSTATUS_SCHEMA_USED]] ^(
    SQLITE_DBSTATUS_SCHEMA_USED
    -**
    This parameter returns the approximate number of of bytes of heap -** memory used to store the schema for all databases associated -** with the connection - main, temp, and any [ATTACH]-ed databases.)^ -** ^The full amount of memory used by the schemas is reported, even if the -** schema memory is shared with other database connections due to -** [shared cache mode] being enabled. -** ^The highwater mark associated with SQLITE_DBSTATUS_SCHEMA_USED is always 0. -** -** [[SQLITE_DBSTATUS_STMT_USED]] ^(
    SQLITE_DBSTATUS_STMT_USED
    -**
    This parameter returns the approximate number of of bytes of heap -** and lookaside memory used by all prepared statements associated with -** the database connection.)^ -** ^The highwater mark associated with SQLITE_DBSTATUS_STMT_USED is always 0. -**
    -** -** [[SQLITE_DBSTATUS_CACHE_HIT]] ^(
    SQLITE_DBSTATUS_CACHE_HIT
    -**
    This parameter returns the number of pager cache hits that have -** occurred.)^ ^The highwater mark associated with SQLITE_DBSTATUS_CACHE_HIT -** is always 0. -**
    -** -** [[SQLITE_DBSTATUS_CACHE_MISS]] ^(
    SQLITE_DBSTATUS_CACHE_MISS
    -**
    This parameter returns the number of pager cache misses that have -** occurred.)^ ^The highwater mark associated with SQLITE_DBSTATUS_CACHE_MISS -** is always 0. -**
    -** -** [[SQLITE_DBSTATUS_CACHE_WRITE]] ^(
    SQLITE_DBSTATUS_CACHE_WRITE
    -**
    This parameter returns the number of dirty cache entries that have -** been written to disk. Specifically, the number of pages written to the -** wal file in wal mode databases, or the number of pages written to the -** database file in rollback mode databases. Any pages written as part of -** transaction rollback or database recovery operations are not included. -** If an IO or other error occurs while writing a page to disk, the effect -** on subsequent SQLITE_DBSTATUS_CACHE_WRITE requests is undefined.)^ ^The -** highwater mark associated with SQLITE_DBSTATUS_CACHE_WRITE is always 0. -**
    -** -** [[SQLITE_DBSTATUS_DEFERRED_FKS]] ^(
    SQLITE_DBSTATUS_DEFERRED_FKS
    -**
    This parameter returns zero for the current value if and only if -** all foreign key constraints (deferred or immediate) have been -** resolved.)^ ^The highwater mark is always 0. -**
    -**
    -*/ -#define SQLITE_DBSTATUS_LOOKASIDE_USED 0 -#define SQLITE_DBSTATUS_CACHE_USED 1 -#define SQLITE_DBSTATUS_SCHEMA_USED 2 -#define SQLITE_DBSTATUS_STMT_USED 3 -#define SQLITE_DBSTATUS_LOOKASIDE_HIT 4 -#define SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE 5 -#define SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL 6 -#define SQLITE_DBSTATUS_CACHE_HIT 7 -#define SQLITE_DBSTATUS_CACHE_MISS 8 -#define SQLITE_DBSTATUS_CACHE_WRITE 9 -#define SQLITE_DBSTATUS_DEFERRED_FKS 10 -#define SQLITE_DBSTATUS_MAX 10 /* Largest defined DBSTATUS */ - - -/* -** CAPI3REF: Prepared Statement Status -** -** ^(Each prepared statement maintains various -** [SQLITE_STMTSTATUS counters] that measure the number -** of times it has performed specific operations.)^ These counters can -** be used to monitor the performance characteristics of the prepared -** statements. For example, if the number of table steps greatly exceeds -** the number of table searches or result rows, that would tend to indicate -** that the prepared statement is using a full table scan rather than -** an index. -** -** ^(This interface is used to retrieve and reset counter values from -** a [prepared statement]. The first argument is the prepared statement -** object to be interrogated. The second argument -** is an integer code for a specific [SQLITE_STMTSTATUS counter] -** to be interrogated.)^ -** ^The current value of the requested counter is returned. -** ^If the resetFlg is true, then the counter is reset to zero after this -** interface call returns. -** -** See also: [sqlite3_status()] and [sqlite3_db_status()]. -*/ -SQLITE_API int sqlite3_stmt_status(sqlite3_stmt*, int op,int resetFlg); - -/* -** CAPI3REF: Status Parameters for prepared statements -** KEYWORDS: {SQLITE_STMTSTATUS counter} {SQLITE_STMTSTATUS counters} -** -** These preprocessor macros define integer codes that name counter -** values associated with the [sqlite3_stmt_status()] interface. -** The meanings of the various counters are as follows: -** -**
    -** [[SQLITE_STMTSTATUS_FULLSCAN_STEP]]
    SQLITE_STMTSTATUS_FULLSCAN_STEP
    -**
    ^This is the number of times that SQLite has stepped forward in -** a table as part of a full table scan. Large numbers for this counter -** may indicate opportunities for performance improvement through -** careful use of indices.
    -** -** [[SQLITE_STMTSTATUS_SORT]]
    SQLITE_STMTSTATUS_SORT
    -**
    ^This is the number of sort operations that have occurred. -** A non-zero value in this counter may indicate an opportunity to -** improvement performance through careful use of indices.
    -** -** [[SQLITE_STMTSTATUS_AUTOINDEX]]
    SQLITE_STMTSTATUS_AUTOINDEX
    -**
    ^This is the number of rows inserted into transient indices that -** were created automatically in order to help joins run faster. -** A non-zero value in this counter may indicate an opportunity to -** improvement performance by adding permanent indices that do not -** need to be reinitialized each time the statement is run.
    -** -** [[SQLITE_STMTSTATUS_VM_STEP]]
    SQLITE_STMTSTATUS_VM_STEP
    -**
    ^This is the number of virtual machine operations executed -** by the prepared statement if that number is less than or equal -** to 2147483647. The number of virtual machine operations can be -** used as a proxy for the total work done by the prepared statement. -** If the number of virtual machine operations exceeds 2147483647 -** then the value returned by this statement status code is undefined. -**
    -**
    -*/ -#define SQLITE_STMTSTATUS_FULLSCAN_STEP 1 -#define SQLITE_STMTSTATUS_SORT 2 -#define SQLITE_STMTSTATUS_AUTOINDEX 3 -#define SQLITE_STMTSTATUS_VM_STEP 4 - -/* -** CAPI3REF: Custom Page Cache Object -** -** The sqlite3_pcache type is opaque. It is implemented by -** the pluggable module. The SQLite core has no knowledge of -** its size or internal structure and never deals with the -** sqlite3_pcache object except by holding and passing pointers -** to the object. -** -** See [sqlite3_pcache_methods2] for additional information. -*/ -typedef struct sqlite3_pcache sqlite3_pcache; - -/* -** CAPI3REF: Custom Page Cache Object -** -** The sqlite3_pcache_page object represents a single page in the -** page cache. The page cache will allocate instances of this -** object. Various methods of the page cache use pointers to instances -** of this object as parameters or as their return value. -** -** See [sqlite3_pcache_methods2] for additional information. -*/ -typedef struct sqlite3_pcache_page sqlite3_pcache_page; -struct sqlite3_pcache_page { - void *pBuf; /* The content of the page */ - void *pExtra; /* Extra information associated with the page */ -}; - -/* -** CAPI3REF: Application Defined Page Cache. -** KEYWORDS: {page cache} -** -** ^(The [sqlite3_config]([SQLITE_CONFIG_PCACHE2], ...) interface can -** register an alternative page cache implementation by passing in an -** instance of the sqlite3_pcache_methods2 structure.)^ -** In many applications, most of the heap memory allocated by -** SQLite is used for the page cache. -** By implementing a -** custom page cache using this API, an application can better control -** the amount of memory consumed by SQLite, the way in which -** that memory is allocated and released, and the policies used to -** determine exactly which parts of a database file are cached and for -** how long. -** -** The alternative page cache mechanism is an -** extreme measure that is only needed by the most demanding applications. -** The built-in page cache is recommended for most uses. -** -** ^(The contents of the sqlite3_pcache_methods2 structure are copied to an -** internal buffer by SQLite within the call to [sqlite3_config]. Hence -** the application may discard the parameter after the call to -** [sqlite3_config()] returns.)^ -** -** [[the xInit() page cache method]] -** ^(The xInit() method is called once for each effective -** call to [sqlite3_initialize()])^ -** (usually only once during the lifetime of the process). ^(The xInit() -** method is passed a copy of the sqlite3_pcache_methods2.pArg value.)^ -** The intent of the xInit() method is to set up global data structures -** required by the custom page cache implementation. -** ^(If the xInit() method is NULL, then the -** built-in default page cache is used instead of the application defined -** page cache.)^ -** -** [[the xShutdown() page cache method]] -** ^The xShutdown() method is called by [sqlite3_shutdown()]. -** It can be used to clean up -** any outstanding resources before process shutdown, if required. -** ^The xShutdown() method may be NULL. -** -** ^SQLite automatically serializes calls to the xInit method, -** so the xInit method need not be threadsafe. ^The -** xShutdown method is only called from [sqlite3_shutdown()] so it does -** not need to be threadsafe either. All other methods must be threadsafe -** in multithreaded applications. -** -** ^SQLite will never invoke xInit() more than once without an intervening -** call to xShutdown(). -** -** [[the xCreate() page cache methods]] -** ^SQLite invokes the xCreate() method to construct a new cache instance. -** SQLite will typically create one cache instance for each open database file, -** though this is not guaranteed. ^The -** first parameter, szPage, is the size in bytes of the pages that must -** be allocated by the cache. ^szPage will always a power of two. ^The -** second parameter szExtra is a number of bytes of extra storage -** associated with each page cache entry. ^The szExtra parameter will -** a number less than 250. SQLite will use the -** extra szExtra bytes on each page to store metadata about the underlying -** database page on disk. The value passed into szExtra depends -** on the SQLite version, the target platform, and how SQLite was compiled. -** ^The third argument to xCreate(), bPurgeable, is true if the cache being -** created will be used to cache database pages of a file stored on disk, or -** false if it is used for an in-memory database. The cache implementation -** does not have to do anything special based with the value of bPurgeable; -** it is purely advisory. ^On a cache where bPurgeable is false, SQLite will -** never invoke xUnpin() except to deliberately delete a page. -** ^In other words, calls to xUnpin() on a cache with bPurgeable set to -** false will always have the "discard" flag set to true. -** ^Hence, a cache created with bPurgeable false will -** never contain any unpinned pages. -** -** [[the xCachesize() page cache method]] -** ^(The xCachesize() method may be called at any time by SQLite to set the -** suggested maximum cache-size (number of pages stored by) the cache -** instance passed as the first argument. This is the value configured using -** the SQLite "[PRAGMA cache_size]" command.)^ As with the bPurgeable -** parameter, the implementation is not required to do anything with this -** value; it is advisory only. -** -** [[the xPagecount() page cache methods]] -** The xPagecount() method must return the number of pages currently -** stored in the cache, both pinned and unpinned. -** -** [[the xFetch() page cache methods]] -** The xFetch() method locates a page in the cache and returns a pointer to -** an sqlite3_pcache_page object associated with that page, or a NULL pointer. -** The pBuf element of the returned sqlite3_pcache_page object will be a -** pointer to a buffer of szPage bytes used to store the content of a -** single database page. The pExtra element of sqlite3_pcache_page will be -** a pointer to the szExtra bytes of extra storage that SQLite has requested -** for each entry in the page cache. -** -** The page to be fetched is determined by the key. ^The minimum key value -** is 1. After it has been retrieved using xFetch, the page is considered -** to be "pinned". -** -** If the requested page is already in the page cache, then the page cache -** implementation must return a pointer to the page buffer with its content -** intact. If the requested page is not already in the cache, then the -** cache implementation should use the value of the createFlag -** parameter to help it determined what action to take: -** -** -**
    createFlag Behavior when page is not already in cache -**
    0 Do not allocate a new page. Return NULL. -**
    1 Allocate a new page if it easy and convenient to do so. -** Otherwise return NULL. -**
    2 Make every effort to allocate a new page. Only return -** NULL if allocating a new page is effectively impossible. -**
    -** -** ^(SQLite will normally invoke xFetch() with a createFlag of 0 or 1. SQLite -** will only use a createFlag of 2 after a prior call with a createFlag of 1 -** failed.)^ In between the to xFetch() calls, SQLite may -** attempt to unpin one or more cache pages by spilling the content of -** pinned pages to disk and synching the operating system disk cache. -** -** [[the xUnpin() page cache method]] -** ^xUnpin() is called by SQLite with a pointer to a currently pinned page -** as its second argument. If the third parameter, discard, is non-zero, -** then the page must be evicted from the cache. -** ^If the discard parameter is -** zero, then the page may be discarded or retained at the discretion of -** page cache implementation. ^The page cache implementation -** may choose to evict unpinned pages at any time. -** -** The cache must not perform any reference counting. A single -** call to xUnpin() unpins the page regardless of the number of prior calls -** to xFetch(). -** -** [[the xRekey() page cache methods]] -** The xRekey() method is used to change the key value associated with the -** page passed as the second argument. If the cache -** previously contains an entry associated with newKey, it must be -** discarded. ^Any prior cache entry associated with newKey is guaranteed not -** to be pinned. -** -** When SQLite calls the xTruncate() method, the cache must discard all -** existing cache entries with page numbers (keys) greater than or equal -** to the value of the iLimit parameter passed to xTruncate(). If any -** of these pages are pinned, they are implicitly unpinned, meaning that -** they can be safely discarded. -** -** [[the xDestroy() page cache method]] -** ^The xDestroy() method is used to delete a cache allocated by xCreate(). -** All resources associated with the specified cache should be freed. ^After -** calling the xDestroy() method, SQLite considers the [sqlite3_pcache*] -** handle invalid, and will not use it with any other sqlite3_pcache_methods2 -** functions. -** -** [[the xShrink() page cache method]] -** ^SQLite invokes the xShrink() method when it wants the page cache to -** free up as much of heap memory as possible. The page cache implementation -** is not obligated to free any memory, but well-behaved implementations should -** do their best. -*/ -typedef struct sqlite3_pcache_methods2 sqlite3_pcache_methods2; -struct sqlite3_pcache_methods2 { - int iVersion; - void *pArg; - int (*xInit)(void*); - void (*xShutdown)(void*); - sqlite3_pcache *(*xCreate)(int szPage, int szExtra, int bPurgeable); - void (*xCachesize)(sqlite3_pcache*, int nCachesize); - int (*xPagecount)(sqlite3_pcache*); - sqlite3_pcache_page *(*xFetch)(sqlite3_pcache*, unsigned key, int createFlag); - void (*xUnpin)(sqlite3_pcache*, sqlite3_pcache_page*, int discard); - void (*xRekey)(sqlite3_pcache*, sqlite3_pcache_page*, - unsigned oldKey, unsigned newKey); - void (*xTruncate)(sqlite3_pcache*, unsigned iLimit); - void (*xDestroy)(sqlite3_pcache*); - void (*xShrink)(sqlite3_pcache*); -}; - -/* -** This is the obsolete pcache_methods object that has now been replaced -** by sqlite3_pcache_methods2. This object is not used by SQLite. It is -** retained in the header file for backwards compatibility only. -*/ -typedef struct sqlite3_pcache_methods sqlite3_pcache_methods; -struct sqlite3_pcache_methods { - void *pArg; - int (*xInit)(void*); - void (*xShutdown)(void*); - sqlite3_pcache *(*xCreate)(int szPage, int bPurgeable); - void (*xCachesize)(sqlite3_pcache*, int nCachesize); - int (*xPagecount)(sqlite3_pcache*); - void *(*xFetch)(sqlite3_pcache*, unsigned key, int createFlag); - void (*xUnpin)(sqlite3_pcache*, void*, int discard); - void (*xRekey)(sqlite3_pcache*, void*, unsigned oldKey, unsigned newKey); - void (*xTruncate)(sqlite3_pcache*, unsigned iLimit); - void (*xDestroy)(sqlite3_pcache*); -}; - - -/* -** CAPI3REF: Online Backup Object -** -** The sqlite3_backup object records state information about an ongoing -** online backup operation. ^The sqlite3_backup object is created by -** a call to [sqlite3_backup_init()] and is destroyed by a call to -** [sqlite3_backup_finish()]. -** -** See Also: [Using the SQLite Online Backup API] -*/ -typedef struct sqlite3_backup sqlite3_backup; - -/* -** CAPI3REF: Online Backup API. -** -** The backup API copies the content of one database into another. -** It is useful either for creating backups of databases or -** for copying in-memory databases to or from persistent files. -** -** See Also: [Using the SQLite Online Backup API] -** -** ^SQLite holds a write transaction open on the destination database file -** for the duration of the backup operation. -** ^The source database is read-locked only while it is being read; -** it is not locked continuously for the entire backup operation. -** ^Thus, the backup may be performed on a live source database without -** preventing other database connections from -** reading or writing to the source database while the backup is underway. -** -** ^(To perform a backup operation: -**
      -**
    1. sqlite3_backup_init() is called once to initialize the -** backup, -**
    2. sqlite3_backup_step() is called one or more times to transfer -** the data between the two databases, and finally -**
    3. sqlite3_backup_finish() is called to release all resources -** associated with the backup operation. -**
    )^ -** There should be exactly one call to sqlite3_backup_finish() for each -** successful call to sqlite3_backup_init(). -** -** [[sqlite3_backup_init()]] sqlite3_backup_init() -** -** ^The D and N arguments to sqlite3_backup_init(D,N,S,M) are the -** [database connection] associated with the destination database -** and the database name, respectively. -** ^The database name is "main" for the main database, "temp" for the -** temporary database, or the name specified after the AS keyword in -** an [ATTACH] statement for an attached database. -** ^The S and M arguments passed to -** sqlite3_backup_init(D,N,S,M) identify the [database connection] -** and database name of the source database, respectively. -** ^The source and destination [database connections] (parameters S and D) -** must be different or else sqlite3_backup_init(D,N,S,M) will fail with -** an error. -** -** ^If an error occurs within sqlite3_backup_init(D,N,S,M), then NULL is -** returned and an error code and error message are stored in the -** destination [database connection] D. -** ^The error code and message for the failed call to sqlite3_backup_init() -** can be retrieved using the [sqlite3_errcode()], [sqlite3_errmsg()], and/or -** [sqlite3_errmsg16()] functions. -** ^A successful call to sqlite3_backup_init() returns a pointer to an -** [sqlite3_backup] object. -** ^The [sqlite3_backup] object may be used with the sqlite3_backup_step() and -** sqlite3_backup_finish() functions to perform the specified backup -** operation. -** -** [[sqlite3_backup_step()]] sqlite3_backup_step() -** -** ^Function sqlite3_backup_step(B,N) will copy up to N pages between -** the source and destination databases specified by [sqlite3_backup] object B. -** ^If N is negative, all remaining source pages are copied. -** ^If sqlite3_backup_step(B,N) successfully copies N pages and there -** are still more pages to be copied, then the function returns [SQLITE_OK]. -** ^If sqlite3_backup_step(B,N) successfully finishes copying all pages -** from source to destination, then it returns [SQLITE_DONE]. -** ^If an error occurs while running sqlite3_backup_step(B,N), -** then an [error code] is returned. ^As well as [SQLITE_OK] and -** [SQLITE_DONE], a call to sqlite3_backup_step() may return [SQLITE_READONLY], -** [SQLITE_NOMEM], [SQLITE_BUSY], [SQLITE_LOCKED], or an -** [SQLITE_IOERR_ACCESS | SQLITE_IOERR_XXX] extended error code. -** -** ^(The sqlite3_backup_step() might return [SQLITE_READONLY] if -**
      -**
    1. the destination database was opened read-only, or -**
    2. the destination database is using write-ahead-log journaling -** and the destination and source page sizes differ, or -**
    3. the destination database is an in-memory database and the -** destination and source page sizes differ. -**
    )^ -** -** ^If sqlite3_backup_step() cannot obtain a required file-system lock, then -** the [sqlite3_busy_handler | busy-handler function] -** is invoked (if one is specified). ^If the -** busy-handler returns non-zero before the lock is available, then -** [SQLITE_BUSY] is returned to the caller. ^In this case the call to -** sqlite3_backup_step() can be retried later. ^If the source -** [database connection] -** is being used to write to the source database when sqlite3_backup_step() -** is called, then [SQLITE_LOCKED] is returned immediately. ^Again, in this -** case the call to sqlite3_backup_step() can be retried later on. ^(If -** [SQLITE_IOERR_ACCESS | SQLITE_IOERR_XXX], [SQLITE_NOMEM], or -** [SQLITE_READONLY] is returned, then -** there is no point in retrying the call to sqlite3_backup_step(). These -** errors are considered fatal.)^ The application must accept -** that the backup operation has failed and pass the backup operation handle -** to the sqlite3_backup_finish() to release associated resources. -** -** ^The first call to sqlite3_backup_step() obtains an exclusive lock -** on the destination file. ^The exclusive lock is not released until either -** sqlite3_backup_finish() is called or the backup operation is complete -** and sqlite3_backup_step() returns [SQLITE_DONE]. ^Every call to -** sqlite3_backup_step() obtains a [shared lock] on the source database that -** lasts for the duration of the sqlite3_backup_step() call. -** ^Because the source database is not locked between calls to -** sqlite3_backup_step(), the source database may be modified mid-way -** through the backup process. ^If the source database is modified by an -** external process or via a database connection other than the one being -** used by the backup operation, then the backup will be automatically -** restarted by the next call to sqlite3_backup_step(). ^If the source -** database is modified by the using the same database connection as is used -** by the backup operation, then the backup database is automatically -** updated at the same time. -** -** [[sqlite3_backup_finish()]] sqlite3_backup_finish() -** -** When sqlite3_backup_step() has returned [SQLITE_DONE], or when the -** application wishes to abandon the backup operation, the application -** should destroy the [sqlite3_backup] by passing it to sqlite3_backup_finish(). -** ^The sqlite3_backup_finish() interfaces releases all -** resources associated with the [sqlite3_backup] object. -** ^If sqlite3_backup_step() has not yet returned [SQLITE_DONE], then any -** active write-transaction on the destination database is rolled back. -** The [sqlite3_backup] object is invalid -** and may not be used following a call to sqlite3_backup_finish(). -** -** ^The value returned by sqlite3_backup_finish is [SQLITE_OK] if no -** sqlite3_backup_step() errors occurred, regardless or whether or not -** sqlite3_backup_step() completed. -** ^If an out-of-memory condition or IO error occurred during any prior -** sqlite3_backup_step() call on the same [sqlite3_backup] object, then -** sqlite3_backup_finish() returns the corresponding [error code]. -** -** ^A return of [SQLITE_BUSY] or [SQLITE_LOCKED] from sqlite3_backup_step() -** is not a permanent error and does not affect the return value of -** sqlite3_backup_finish(). -** -** [[sqlite3_backup__remaining()]] [[sqlite3_backup_pagecount()]] -** sqlite3_backup_remaining() and sqlite3_backup_pagecount() -** -** ^Each call to sqlite3_backup_step() sets two values inside -** the [sqlite3_backup] object: the number of pages still to be backed -** up and the total number of pages in the source database file. -** The sqlite3_backup_remaining() and sqlite3_backup_pagecount() interfaces -** retrieve these two values, respectively. -** -** ^The values returned by these functions are only updated by -** sqlite3_backup_step(). ^If the source database is modified during a backup -** operation, then the values are not updated to account for any extra -** pages that need to be updated or the size of the source database file -** changing. -** -** Concurrent Usage of Database Handles -** -** ^The source [database connection] may be used by the application for other -** purposes while a backup operation is underway or being initialized. -** ^If SQLite is compiled and configured to support threadsafe database -** connections, then the source database connection may be used concurrently -** from within other threads. -** -** However, the application must guarantee that the destination -** [database connection] is not passed to any other API (by any thread) after -** sqlite3_backup_init() is called and before the corresponding call to -** sqlite3_backup_finish(). SQLite does not currently check to see -** if the application incorrectly accesses the destination [database connection] -** and so no error code is reported, but the operations may malfunction -** nevertheless. Use of the destination database connection while a -** backup is in progress might also also cause a mutex deadlock. -** -** If running in [shared cache mode], the application must -** guarantee that the shared cache used by the destination database -** is not accessed while the backup is running. In practice this means -** that the application must guarantee that the disk file being -** backed up to is not accessed by any connection within the process, -** not just the specific connection that was passed to sqlite3_backup_init(). -** -** The [sqlite3_backup] object itself is partially threadsafe. Multiple -** threads may safely make multiple concurrent calls to sqlite3_backup_step(). -** However, the sqlite3_backup_remaining() and sqlite3_backup_pagecount() -** APIs are not strictly speaking threadsafe. If they are invoked at the -** same time as another thread is invoking sqlite3_backup_step() it is -** possible that they return invalid values. -*/ -SQLITE_API sqlite3_backup *sqlite3_backup_init( - sqlite3 *pDest, /* Destination database handle */ - const char *zDestName, /* Destination database name */ - sqlite3 *pSource, /* Source database handle */ - const char *zSourceName /* Source database name */ -); -SQLITE_API int sqlite3_backup_step(sqlite3_backup *p, int nPage); -SQLITE_API int sqlite3_backup_finish(sqlite3_backup *p); -SQLITE_API int sqlite3_backup_remaining(sqlite3_backup *p); -SQLITE_API int sqlite3_backup_pagecount(sqlite3_backup *p); - -/* -** CAPI3REF: Unlock Notification -** -** ^When running in shared-cache mode, a database operation may fail with -** an [SQLITE_LOCKED] error if the required locks on the shared-cache or -** individual tables within the shared-cache cannot be obtained. See -** [SQLite Shared-Cache Mode] for a description of shared-cache locking. -** ^This API may be used to register a callback that SQLite will invoke -** when the connection currently holding the required lock relinquishes it. -** ^This API is only available if the library was compiled with the -** [SQLITE_ENABLE_UNLOCK_NOTIFY] C-preprocessor symbol defined. -** -** See Also: [Using the SQLite Unlock Notification Feature]. -** -** ^Shared-cache locks are released when a database connection concludes -** its current transaction, either by committing it or rolling it back. -** -** ^When a connection (known as the blocked connection) fails to obtain a -** shared-cache lock and SQLITE_LOCKED is returned to the caller, the -** identity of the database connection (the blocking connection) that -** has locked the required resource is stored internally. ^After an -** application receives an SQLITE_LOCKED error, it may call the -** sqlite3_unlock_notify() method with the blocked connection handle as -** the first argument to register for a callback that will be invoked -** when the blocking connections current transaction is concluded. ^The -** callback is invoked from within the [sqlite3_step] or [sqlite3_close] -** call that concludes the blocking connections transaction. -** -** ^(If sqlite3_unlock_notify() is called in a multi-threaded application, -** there is a chance that the blocking connection will have already -** concluded its transaction by the time sqlite3_unlock_notify() is invoked. -** If this happens, then the specified callback is invoked immediately, -** from within the call to sqlite3_unlock_notify().)^ -** -** ^If the blocked connection is attempting to obtain a write-lock on a -** shared-cache table, and more than one other connection currently holds -** a read-lock on the same table, then SQLite arbitrarily selects one of -** the other connections to use as the blocking connection. -** -** ^(There may be at most one unlock-notify callback registered by a -** blocked connection. If sqlite3_unlock_notify() is called when the -** blocked connection already has a registered unlock-notify callback, -** then the new callback replaces the old.)^ ^If sqlite3_unlock_notify() is -** called with a NULL pointer as its second argument, then any existing -** unlock-notify callback is canceled. ^The blocked connections -** unlock-notify callback may also be canceled by closing the blocked -** connection using [sqlite3_close()]. -** -** The unlock-notify callback is not reentrant. If an application invokes -** any sqlite3_xxx API functions from within an unlock-notify callback, a -** crash or deadlock may be the result. -** -** ^Unless deadlock is detected (see below), sqlite3_unlock_notify() always -** returns SQLITE_OK. -** -** Callback Invocation Details -** -** When an unlock-notify callback is registered, the application provides a -** single void* pointer that is passed to the callback when it is invoked. -** However, the signature of the callback function allows SQLite to pass -** it an array of void* context pointers. The first argument passed to -** an unlock-notify callback is a pointer to an array of void* pointers, -** and the second is the number of entries in the array. -** -** When a blocking connections transaction is concluded, there may be -** more than one blocked connection that has registered for an unlock-notify -** callback. ^If two or more such blocked connections have specified the -** same callback function, then instead of invoking the callback function -** multiple times, it is invoked once with the set of void* context pointers -** specified by the blocked connections bundled together into an array. -** This gives the application an opportunity to prioritize any actions -** related to the set of unblocked database connections. -** -** Deadlock Detection -** -** Assuming that after registering for an unlock-notify callback a -** database waits for the callback to be issued before taking any further -** action (a reasonable assumption), then using this API may cause the -** application to deadlock. For example, if connection X is waiting for -** connection Y's transaction to be concluded, and similarly connection -** Y is waiting on connection X's transaction, then neither connection -** will proceed and the system may remain deadlocked indefinitely. -** -** To avoid this scenario, the sqlite3_unlock_notify() performs deadlock -** detection. ^If a given call to sqlite3_unlock_notify() would put the -** system in a deadlocked state, then SQLITE_LOCKED is returned and no -** unlock-notify callback is registered. The system is said to be in -** a deadlocked state if connection A has registered for an unlock-notify -** callback on the conclusion of connection B's transaction, and connection -** B has itself registered for an unlock-notify callback when connection -** A's transaction is concluded. ^Indirect deadlock is also detected, so -** the system is also considered to be deadlocked if connection B has -** registered for an unlock-notify callback on the conclusion of connection -** C's transaction, where connection C is waiting on connection A. ^Any -** number of levels of indirection are allowed. -** -** The "DROP TABLE" Exception -** -** When a call to [sqlite3_step()] returns SQLITE_LOCKED, it is almost -** always appropriate to call sqlite3_unlock_notify(). There is however, -** one exception. When executing a "DROP TABLE" or "DROP INDEX" statement, -** SQLite checks if there are any currently executing SELECT statements -** that belong to the same connection. If there are, SQLITE_LOCKED is -** returned. In this case there is no "blocking connection", so invoking -** sqlite3_unlock_notify() results in the unlock-notify callback being -** invoked immediately. If the application then re-attempts the "DROP TABLE" -** or "DROP INDEX" query, an infinite loop might be the result. -** -** One way around this problem is to check the extended error code returned -** by an sqlite3_step() call. ^(If there is a blocking connection, then the -** extended error code is set to SQLITE_LOCKED_SHAREDCACHE. Otherwise, in -** the special "DROP TABLE/INDEX" case, the extended error code is just -** SQLITE_LOCKED.)^ -*/ -SQLITE_API int sqlite3_unlock_notify( - sqlite3 *pBlocked, /* Waiting connection */ - void (*xNotify)(void **apArg, int nArg), /* Callback function to invoke */ - void *pNotifyArg /* Argument to pass to xNotify */ -); - - -/* -** CAPI3REF: String Comparison -** -** ^The [sqlite3_stricmp()] and [sqlite3_strnicmp()] APIs allow applications -** and extensions to compare the contents of two buffers containing UTF-8 -** strings in a case-independent fashion, using the same definition of "case -** independence" that SQLite uses internally when comparing identifiers. -*/ -SQLITE_API int sqlite3_stricmp(const char *, const char *); -SQLITE_API int sqlite3_strnicmp(const char *, const char *, int); - -/* -** CAPI3REF: String Globbing -* -** ^The [sqlite3_strglob(P,X)] interface returns zero if string X matches -** the glob pattern P, and it returns non-zero if string X does not match -** the glob pattern P. ^The definition of glob pattern matching used in -** [sqlite3_strglob(P,X)] is the same as for the "X GLOB P" operator in the -** SQL dialect used by SQLite. ^The sqlite3_strglob(P,X) function is case -** sensitive. -** -** Note that this routine returns zero on a match and non-zero if the strings -** do not match, the same as [sqlite3_stricmp()] and [sqlite3_strnicmp()]. -*/ -SQLITE_API int sqlite3_strglob(const char *zGlob, const char *zStr); - -/* -** CAPI3REF: Error Logging Interface -** -** ^The [sqlite3_log()] interface writes a message into the [error log] -** established by the [SQLITE_CONFIG_LOG] option to [sqlite3_config()]. -** ^If logging is enabled, the zFormat string and subsequent arguments are -** used with [sqlite3_snprintf()] to generate the final output string. -** -** The sqlite3_log() interface is intended for use by extensions such as -** virtual tables, collating functions, and SQL functions. While there is -** nothing to prevent an application from calling sqlite3_log(), doing so -** is considered bad form. -** -** The zFormat string must not be NULL. -** -** To avoid deadlocks and other threading problems, the sqlite3_log() routine -** will not use dynamically allocated memory. The log message is stored in -** a fixed-length buffer on the stack. If the log message is longer than -** a few hundred characters, it will be truncated to the length of the -** buffer. -*/ -SQLITE_API void sqlite3_log(int iErrCode, const char *zFormat, ...); - -/* -** CAPI3REF: Write-Ahead Log Commit Hook -** -** ^The [sqlite3_wal_hook()] function is used to register a callback that -** will be invoked each time a database connection commits data to a -** [write-ahead log] (i.e. whenever a transaction is committed in -** [journal_mode | journal_mode=WAL mode]). -** -** ^The callback is invoked by SQLite after the commit has taken place and -** the associated write-lock on the database released, so the implementation -** may read, write or [checkpoint] the database as required. -** -** ^The first parameter passed to the callback function when it is invoked -** is a copy of the third parameter passed to sqlite3_wal_hook() when -** registering the callback. ^The second is a copy of the database handle. -** ^The third parameter is the name of the database that was written to - -** either "main" or the name of an [ATTACH]-ed database. ^The fourth parameter -** is the number of pages currently in the write-ahead log file, -** including those that were just committed. -** -** The callback function should normally return [SQLITE_OK]. ^If an error -** code is returned, that error will propagate back up through the -** SQLite code base to cause the statement that provoked the callback -** to report an error, though the commit will have still occurred. If the -** callback returns [SQLITE_ROW] or [SQLITE_DONE], or if it returns a value -** that does not correspond to any valid SQLite error code, the results -** are undefined. -** -** A single database handle may have at most a single write-ahead log callback -** registered at one time. ^Calling [sqlite3_wal_hook()] replaces any -** previously registered write-ahead log callback. ^Note that the -** [sqlite3_wal_autocheckpoint()] interface and the -** [wal_autocheckpoint pragma] both invoke [sqlite3_wal_hook()] and will -** those overwrite any prior [sqlite3_wal_hook()] settings. -*/ -SQLITE_API void *sqlite3_wal_hook( - sqlite3*, - int(*)(void *,sqlite3*,const char*,int), - void* -); - -/* -** CAPI3REF: Configure an auto-checkpoint -** -** ^The [sqlite3_wal_autocheckpoint(D,N)] is a wrapper around -** [sqlite3_wal_hook()] that causes any database on [database connection] D -** to automatically [checkpoint] -** after committing a transaction if there are N or -** more frames in the [write-ahead log] file. ^Passing zero or -** a negative value as the nFrame parameter disables automatic -** checkpoints entirely. -** -** ^The callback registered by this function replaces any existing callback -** registered using [sqlite3_wal_hook()]. ^Likewise, registering a callback -** using [sqlite3_wal_hook()] disables the automatic checkpoint mechanism -** configured by this function. -** -** ^The [wal_autocheckpoint pragma] can be used to invoke this interface -** from SQL. -** -** ^Every new [database connection] defaults to having the auto-checkpoint -** enabled with a threshold of 1000 or [SQLITE_DEFAULT_WAL_AUTOCHECKPOINT] -** pages. The use of this interface -** is only necessary if the default setting is found to be suboptimal -** for a particular application. -*/ -SQLITE_API int sqlite3_wal_autocheckpoint(sqlite3 *db, int N); - -/* -** CAPI3REF: Checkpoint a database -** -** ^The [sqlite3_wal_checkpoint(D,X)] interface causes database named X -** on [database connection] D to be [checkpointed]. ^If X is NULL or an -** empty string, then a checkpoint is run on all databases of -** connection D. ^If the database connection D is not in -** [WAL | write-ahead log mode] then this interface is a harmless no-op. -** -** ^The [wal_checkpoint pragma] can be used to invoke this interface -** from SQL. ^The [sqlite3_wal_autocheckpoint()] interface and the -** [wal_autocheckpoint pragma] can be used to cause this interface to be -** run whenever the WAL reaches a certain size threshold. -** -** See also: [sqlite3_wal_checkpoint_v2()] -*/ -SQLITE_API int sqlite3_wal_checkpoint(sqlite3 *db, const char *zDb); - -/* -** CAPI3REF: Checkpoint a database -** -** Run a checkpoint operation on WAL database zDb attached to database -** handle db. The specific operation is determined by the value of the -** eMode parameter: -** -**
    -**
    SQLITE_CHECKPOINT_PASSIVE
    -** Checkpoint as many frames as possible without waiting for any database -** readers or writers to finish. Sync the db file if all frames in the log -** are checkpointed. This mode is the same as calling -** sqlite3_wal_checkpoint(). The busy-handler callback is never invoked. -** -**
    SQLITE_CHECKPOINT_FULL
    -** This mode blocks (calls the busy-handler callback) until there is no -** database writer and all readers are reading from the most recent database -** snapshot. It then checkpoints all frames in the log file and syncs the -** database file. This call blocks database writers while it is running, -** but not database readers. -** -**
    SQLITE_CHECKPOINT_RESTART
    -** This mode works the same way as SQLITE_CHECKPOINT_FULL, except after -** checkpointing the log file it blocks (calls the busy-handler callback) -** until all readers are reading from the database file only. This ensures -** that the next client to write to the database file restarts the log file -** from the beginning. This call blocks database writers while it is running, -** but not database readers. -**
    -** -** If pnLog is not NULL, then *pnLog is set to the total number of frames in -** the log file before returning. If pnCkpt is not NULL, then *pnCkpt is set to -** the total number of checkpointed frames (including any that were already -** checkpointed when this function is called). *pnLog and *pnCkpt may be -** populated even if sqlite3_wal_checkpoint_v2() returns other than SQLITE_OK. -** If no values are available because of an error, they are both set to -1 -** before returning to communicate this to the caller. -** -** All calls obtain an exclusive "checkpoint" lock on the database file. If -** any other process is running a checkpoint operation at the same time, the -** lock cannot be obtained and SQLITE_BUSY is returned. Even if there is a -** busy-handler configured, it will not be invoked in this case. -** -** The SQLITE_CHECKPOINT_FULL and RESTART modes also obtain the exclusive -** "writer" lock on the database file. If the writer lock cannot be obtained -** immediately, and a busy-handler is configured, it is invoked and the writer -** lock retried until either the busy-handler returns 0 or the lock is -** successfully obtained. The busy-handler is also invoked while waiting for -** database readers as described above. If the busy-handler returns 0 before -** the writer lock is obtained or while waiting for database readers, the -** checkpoint operation proceeds from that point in the same way as -** SQLITE_CHECKPOINT_PASSIVE - checkpointing as many frames as possible -** without blocking any further. SQLITE_BUSY is returned in this case. -** -** If parameter zDb is NULL or points to a zero length string, then the -** specified operation is attempted on all WAL databases. In this case the -** values written to output parameters *pnLog and *pnCkpt are undefined. If -** an SQLITE_BUSY error is encountered when processing one or more of the -** attached WAL databases, the operation is still attempted on any remaining -** attached databases and SQLITE_BUSY is returned to the caller. If any other -** error occurs while processing an attached database, processing is abandoned -** and the error code returned to the caller immediately. If no error -** (SQLITE_BUSY or otherwise) is encountered while processing the attached -** databases, SQLITE_OK is returned. -** -** If database zDb is the name of an attached database that is not in WAL -** mode, SQLITE_OK is returned and both *pnLog and *pnCkpt set to -1. If -** zDb is not NULL (or a zero length string) and is not the name of any -** attached database, SQLITE_ERROR is returned to the caller. -*/ -SQLITE_API int sqlite3_wal_checkpoint_v2( - sqlite3 *db, /* Database handle */ - const char *zDb, /* Name of attached database (or NULL) */ - int eMode, /* SQLITE_CHECKPOINT_* value */ - int *pnLog, /* OUT: Size of WAL log in frames */ - int *pnCkpt /* OUT: Total number of frames checkpointed */ -); - -/* -** CAPI3REF: Checkpoint operation parameters -** -** These constants can be used as the 3rd parameter to -** [sqlite3_wal_checkpoint_v2()]. See the [sqlite3_wal_checkpoint_v2()] -** documentation for additional information about the meaning and use of -** each of these values. -*/ -#define SQLITE_CHECKPOINT_PASSIVE 0 -#define SQLITE_CHECKPOINT_FULL 1 -#define SQLITE_CHECKPOINT_RESTART 2 - -/* -** CAPI3REF: Virtual Table Interface Configuration -** -** This function may be called by either the [xConnect] or [xCreate] method -** of a [virtual table] implementation to configure -** various facets of the virtual table interface. -** -** If this interface is invoked outside the context of an xConnect or -** xCreate virtual table method then the behavior is undefined. -** -** At present, there is only one option that may be configured using -** this function. (See [SQLITE_VTAB_CONSTRAINT_SUPPORT].) Further options -** may be added in the future. -*/ -SQLITE_API int sqlite3_vtab_config(sqlite3*, int op, ...); - -/* -** CAPI3REF: Virtual Table Configuration Options -** -** These macros define the various options to the -** [sqlite3_vtab_config()] interface that [virtual table] implementations -** can use to customize and optimize their behavior. -** -**
    -**
    SQLITE_VTAB_CONSTRAINT_SUPPORT -**
    Calls of the form -** [sqlite3_vtab_config](db,SQLITE_VTAB_CONSTRAINT_SUPPORT,X) are supported, -** where X is an integer. If X is zero, then the [virtual table] whose -** [xCreate] or [xConnect] method invoked [sqlite3_vtab_config()] does not -** support constraints. In this configuration (which is the default) if -** a call to the [xUpdate] method returns [SQLITE_CONSTRAINT], then the entire -** statement is rolled back as if [ON CONFLICT | OR ABORT] had been -** specified as part of the users SQL statement, regardless of the actual -** ON CONFLICT mode specified. -** -** If X is non-zero, then the virtual table implementation guarantees -** that if [xUpdate] returns [SQLITE_CONSTRAINT], it will do so before -** any modifications to internal or persistent data structures have been made. -** If the [ON CONFLICT] mode is ABORT, FAIL, IGNORE or ROLLBACK, SQLite -** is able to roll back a statement or database transaction, and abandon -** or continue processing the current SQL statement as appropriate. -** If the ON CONFLICT mode is REPLACE and the [xUpdate] method returns -** [SQLITE_CONSTRAINT], SQLite handles this as if the ON CONFLICT mode -** had been ABORT. -** -** Virtual table implementations that are required to handle OR REPLACE -** must do so within the [xUpdate] method. If a call to the -** [sqlite3_vtab_on_conflict()] function indicates that the current ON -** CONFLICT policy is REPLACE, the virtual table implementation should -** silently replace the appropriate rows within the xUpdate callback and -** return SQLITE_OK. Or, if this is not possible, it may return -** SQLITE_CONSTRAINT, in which case SQLite falls back to OR ABORT -** constraint handling. -**
    -*/ -#define SQLITE_VTAB_CONSTRAINT_SUPPORT 1 - -/* -** CAPI3REF: Determine The Virtual Table Conflict Policy -** -** This function may only be called from within a call to the [xUpdate] method -** of a [virtual table] implementation for an INSERT or UPDATE operation. ^The -** value returned is one of [SQLITE_ROLLBACK], [SQLITE_IGNORE], [SQLITE_FAIL], -** [SQLITE_ABORT], or [SQLITE_REPLACE], according to the [ON CONFLICT] mode -** of the SQL statement that triggered the call to the [xUpdate] method of the -** [virtual table]. -*/ -SQLITE_API int sqlite3_vtab_on_conflict(sqlite3 *); - -/* -** CAPI3REF: Conflict resolution modes -** -** These constants are returned by [sqlite3_vtab_on_conflict()] to -** inform a [virtual table] implementation what the [ON CONFLICT] mode -** is for the SQL statement being evaluated. -** -** Note that the [SQLITE_IGNORE] constant is also used as a potential -** return value from the [sqlite3_set_authorizer()] callback and that -** [SQLITE_ABORT] is also a [result code]. -*/ -#define SQLITE_ROLLBACK 1 -/* #define SQLITE_IGNORE 2 // Also used by sqlite3_authorizer() callback */ -#define SQLITE_FAIL 3 -/* #define SQLITE_ABORT 4 // Also an error code */ -#define SQLITE_REPLACE 5 - - - -/* -** Undo the hack that converts floating point types to integer for -** builds on processors without floating point support. -*/ -#ifdef SQLITE_OMIT_FLOATING_POINT -# undef double -#endif - -#ifdef __cplusplus -} /* End of the 'extern "C"' block */ -#endif -#endif /* _SQLITE3_H_ */ - -/* -** 2010 August 30 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -*/ - -#ifndef _SQLITE3RTREE_H_ -#define _SQLITE3RTREE_H_ - - -#ifdef __cplusplus -extern "C" { -#endif - -typedef struct sqlite3_rtree_geometry sqlite3_rtree_geometry; -typedef struct sqlite3_rtree_query_info sqlite3_rtree_query_info; - -/* The double-precision datatype used by RTree depends on the -** SQLITE_RTREE_INT_ONLY compile-time option. -*/ -#ifdef SQLITE_RTREE_INT_ONLY - typedef sqlite3_int64 sqlite3_rtree_dbl; -#else - typedef double sqlite3_rtree_dbl; -#endif - -/* -** Register a geometry callback named zGeom that can be used as part of an -** R-Tree geometry query as follows: -** -** SELECT ... FROM WHERE MATCH $zGeom(... params ...) -*/ -SQLITE_API int sqlite3_rtree_geometry_callback( - sqlite3 *db, - const char *zGeom, - int (*xGeom)(sqlite3_rtree_geometry*, int, sqlite3_rtree_dbl*,int*), - void *pContext -); - - -/* -** A pointer to a structure of the following type is passed as the first -** argument to callbacks registered using rtree_geometry_callback(). -*/ -struct sqlite3_rtree_geometry { - void *pContext; /* Copy of pContext passed to s_r_g_c() */ - int nParam; /* Size of array aParam[] */ - sqlite3_rtree_dbl *aParam; /* Parameters passed to SQL geom function */ - void *pUser; /* Callback implementation user data */ - void (*xDelUser)(void *); /* Called by SQLite to clean up pUser */ -}; - -/* -** Register a 2nd-generation geometry callback named zScore that can be -** used as part of an R-Tree geometry query as follows: -** -** SELECT ... FROM WHERE MATCH $zQueryFunc(... params ...) -*/ -SQLITE_API int sqlite3_rtree_query_callback( - sqlite3 *db, - const char *zQueryFunc, - int (*xQueryFunc)(sqlite3_rtree_query_info*), - void *pContext, - void (*xDestructor)(void*) -); - - -/* -** A pointer to a structure of the following type is passed as the -** argument to scored geometry callback registered using -** sqlite3_rtree_query_callback(). -** -** Note that the first 5 fields of this structure are identical to -** sqlite3_rtree_geometry. This structure is a subclass of -** sqlite3_rtree_geometry. -*/ -struct sqlite3_rtree_query_info { - void *pContext; /* pContext from when function registered */ - int nParam; /* Number of function parameters */ - sqlite3_rtree_dbl *aParam; /* value of function parameters */ - void *pUser; /* callback can use this, if desired */ - void (*xDelUser)(void*); /* function to free pUser */ - sqlite3_rtree_dbl *aCoord; /* Coordinates of node or entry to check */ - unsigned int *anQueue; /* Number of pending entries in the queue */ - int nCoord; /* Number of coordinates */ - int iLevel; /* Level of current node or entry */ - int mxLevel; /* The largest iLevel value in the tree */ - sqlite3_int64 iRowid; /* Rowid for current entry */ - sqlite3_rtree_dbl rParentScore; /* Score of parent node */ - int eParentWithin; /* Visibility of parent node */ - int eWithin; /* OUT: Visiblity */ - sqlite3_rtree_dbl rScore; /* OUT: Write the score here */ -}; - -/* -** Allowed values for sqlite3_rtree_query.eWithin and .eParentWithin. -*/ -#define NOT_WITHIN 0 /* Object completely outside of query region */ -#define PARTLY_WITHIN 1 /* Object partially overlaps query region */ -#define FULLY_WITHIN 2 /* Object fully contained within query region */ - - -#ifdef __cplusplus -} /* end of the 'extern "C"' block */ -#endif - -#endif /* ifndef _SQLITE3RTREE_H_ */ - diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3.go b/vendor/github.com/mattn/go-sqlite3/sqlite3.go deleted file mode 100644 index 233e7e9..0000000 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3.go +++ /dev/null @@ -1,675 +0,0 @@ -// Copyright (C) 2014 Yasuhiro Matsumoto . -// -// Use of this source code is governed by an MIT-style -// license that can be found in the LICENSE file. - -package sqlite3 - -/* -#cgo CFLAGS: -std=gnu99 -#cgo CFLAGS: -DSQLITE_ENABLE_RTREE -DSQLITE_THREADSAFE -#cgo CFLAGS: -DSQLITE_ENABLE_FTS3 -DSQLITE_ENABLE_FTS3_PARENTHESIS -#include -#include -#include - -#ifdef __CYGWIN__ -# include -#endif - -#ifndef SQLITE_OPEN_READWRITE -# define SQLITE_OPEN_READWRITE 0 -#endif - -#ifndef SQLITE_OPEN_FULLMUTEX -# define SQLITE_OPEN_FULLMUTEX 0 -#endif - -static int -_sqlite3_open_v2(const char *filename, sqlite3 **ppDb, int flags, const char *zVfs) { -#ifdef SQLITE_OPEN_URI - return sqlite3_open_v2(filename, ppDb, flags | SQLITE_OPEN_URI, zVfs); -#else - return sqlite3_open_v2(filename, ppDb, flags, zVfs); -#endif -} - -static int -_sqlite3_bind_text(sqlite3_stmt *stmt, int n, char *p, int np) { - return sqlite3_bind_text(stmt, n, p, np, SQLITE_TRANSIENT); -} - -static int -_sqlite3_bind_blob(sqlite3_stmt *stmt, int n, void *p, int np) { - return sqlite3_bind_blob(stmt, n, p, np, SQLITE_TRANSIENT); -} - -#include -#include - -static int -_sqlite3_exec(sqlite3* db, const char* pcmd, long* rowid, long* changes) -{ - int rv = sqlite3_exec(db, pcmd, 0, 0, 0); - *rowid = (long) sqlite3_last_insert_rowid(db); - *changes = (long) sqlite3_changes(db); - return rv; -} - -static int -_sqlite3_step(sqlite3_stmt* stmt, long* rowid, long* changes) -{ - int rv = sqlite3_step(stmt); - sqlite3* db = sqlite3_db_handle(stmt); - *rowid = (long) sqlite3_last_insert_rowid(db); - *changes = (long) sqlite3_changes(db); - return rv; -} - -*/ -import "C" -import ( - "database/sql" - "database/sql/driver" - "errors" - "fmt" - "io" - "net/url" - "runtime" - "strconv" - "strings" - "time" - "unsafe" -) - -// Timestamp formats understood by both this module and SQLite. -// The first format in the slice will be used when saving time values -// into the database. When parsing a string from a timestamp or -// datetime column, the formats are tried in order. -var SQLiteTimestampFormats = []string{ - "2006-01-02 15:04:05.999999999", - "2006-01-02T15:04:05.999999999", - "2006-01-02 15:04:05", - "2006-01-02T15:04:05", - "2006-01-02 15:04", - "2006-01-02T15:04", - "2006-01-02", - "2006-01-02 15:04:05-07:00", -} - -func init() { - sql.Register("sqlite3", &SQLiteDriver{}) -} - -// Return SQLite library Version information. -func Version() (libVersion string, libVersionNumber int, sourceId string) { - libVersion = C.GoString(C.sqlite3_libversion()) - libVersionNumber = int(C.sqlite3_libversion_number()) - sourceId = C.GoString(C.sqlite3_sourceid()) - return libVersion, libVersionNumber, sourceId -} - -// Driver struct. -type SQLiteDriver struct { - Extensions []string - ConnectHook func(*SQLiteConn) error -} - -// Conn struct. -type SQLiteConn struct { - db *C.sqlite3 - loc *time.Location - txlock string -} - -// Tx struct. -type SQLiteTx struct { - c *SQLiteConn -} - -// Stmt struct. -type SQLiteStmt struct { - c *SQLiteConn - s *C.sqlite3_stmt - nv int - nn []string - t string - closed bool - cls bool -} - -// Result struct. -type SQLiteResult struct { - id int64 - changes int64 -} - -// Rows struct. -type SQLiteRows struct { - s *SQLiteStmt - nc int - cols []string - decltype []string - cls bool -} - -// Commit transaction. -func (tx *SQLiteTx) Commit() error { - _, err := tx.c.exec("COMMIT") - return err -} - -// Rollback transaction. -func (tx *SQLiteTx) Rollback() error { - _, err := tx.c.exec("ROLLBACK") - return err -} - -// AutoCommit return which currently auto commit or not. -func (c *SQLiteConn) AutoCommit() bool { - return int(C.sqlite3_get_autocommit(c.db)) != 0 -} - -func (c *SQLiteConn) lastError() Error { - return Error{ - Code: ErrNo(C.sqlite3_errcode(c.db)), - ExtendedCode: ErrNoExtended(C.sqlite3_extended_errcode(c.db)), - err: C.GoString(C.sqlite3_errmsg(c.db)), - } -} - -// Implements Execer -func (c *SQLiteConn) Exec(query string, args []driver.Value) (driver.Result, error) { - if len(args) == 0 { - return c.exec(query) - } - - for { - s, err := c.Prepare(query) - if err != nil { - return nil, err - } - var res driver.Result - if s.(*SQLiteStmt).s != nil { - na := s.NumInput() - if len(args) < na { - return nil, fmt.Errorf("Not enough args to execute query. Expected %d, got %d.", na, len(args)) - } - res, err = s.Exec(args[:na]) - if err != nil && err != driver.ErrSkip { - s.Close() - return nil, err - } - args = args[na:] - } - tail := s.(*SQLiteStmt).t - s.Close() - if tail == "" { - return res, nil - } - query = tail - } -} - -// Implements Queryer -func (c *SQLiteConn) Query(query string, args []driver.Value) (driver.Rows, error) { - for { - s, err := c.Prepare(query) - if err != nil { - return nil, err - } - s.(*SQLiteStmt).cls = true - na := s.NumInput() - if len(args) < na { - return nil, fmt.Errorf("Not enough args to execute query. Expected %d, got %d.", na, len(args)) - } - rows, err := s.Query(args[:na]) - if err != nil && err != driver.ErrSkip { - s.Close() - return nil, err - } - args = args[na:] - tail := s.(*SQLiteStmt).t - if tail == "" { - return rows, nil - } - rows.Close() - s.Close() - query = tail - } -} - -func (c *SQLiteConn) exec(cmd string) (driver.Result, error) { - pcmd := C.CString(cmd) - defer C.free(unsafe.Pointer(pcmd)) - - var rowid, changes C.long - rv := C._sqlite3_exec(c.db, pcmd, &rowid, &changes) - if rv != C.SQLITE_OK { - return nil, c.lastError() - } - return &SQLiteResult{int64(rowid), int64(changes)}, nil -} - -// Begin transaction. -func (c *SQLiteConn) Begin() (driver.Tx, error) { - if _, err := c.exec(c.txlock); err != nil { - return nil, err - } - return &SQLiteTx{c}, nil -} - -func errorString(err Error) string { - return C.GoString(C.sqlite3_errstr(C.int(err.Code))) -} - -// Open database and return a new connection. -// You can specify DSN string with URI filename. -// test.db -// file:test.db?cache=shared&mode=memory -// :memory: -// file::memory: -// go-sqlite handle especially query parameters. -// _loc=XXX -// Specify location of time format. It's possible to specify "auto". -// _busy_timeout=XXX -// Specify value for sqlite3_busy_timeout. -// _txlock=XXX -// Specify locking behavior for transactions. XXX can be "immediate", -// "deferred", "exclusive". -func (d *SQLiteDriver) Open(dsn string) (driver.Conn, error) { - if C.sqlite3_threadsafe() == 0 { - return nil, errors.New("sqlite library was not compiled for thread-safe operation") - } - - var loc *time.Location - txlock := "BEGIN" - busy_timeout := 5000 - pos := strings.IndexRune(dsn, '?') - if pos >= 1 { - params, err := url.ParseQuery(dsn[pos+1:]) - if err != nil { - return nil, err - } - - // _loc - if val := params.Get("_loc"); val != "" { - if val == "auto" { - loc = time.Local - } else { - loc, err = time.LoadLocation(val) - if err != nil { - return nil, fmt.Errorf("Invalid _loc: %v: %v", val, err) - } - } - } - - // _busy_timeout - if val := params.Get("_busy_timeout"); val != "" { - iv, err := strconv.ParseInt(val, 10, 64) - if err != nil { - return nil, fmt.Errorf("Invalid _busy_timeout: %v: %v", val, err) - } - busy_timeout = int(iv) - } - - // _txlock - if val := params.Get("_txlock"); val != "" { - switch val { - case "immediate": - txlock = "BEGIN IMMEDIATE" - case "exclusive": - txlock = "BEGIN EXCLUSIVE" - case "deferred": - txlock = "BEGIN" - default: - return nil, fmt.Errorf("Invalid _txlock: %v", val) - } - } - - if !strings.HasPrefix(dsn, "file:") { - dsn = dsn[:pos] - } - } - - var db *C.sqlite3 - name := C.CString(dsn) - defer C.free(unsafe.Pointer(name)) - rv := C._sqlite3_open_v2(name, &db, - C.SQLITE_OPEN_FULLMUTEX| - C.SQLITE_OPEN_READWRITE| - C.SQLITE_OPEN_CREATE, - nil) - if rv != 0 { - return nil, Error{Code: ErrNo(rv)} - } - if db == nil { - return nil, errors.New("sqlite succeeded without returning a database") - } - - rv = C.sqlite3_busy_timeout(db, C.int(busy_timeout)) - if rv != C.SQLITE_OK { - return nil, Error{Code: ErrNo(rv)} - } - - conn := &SQLiteConn{db: db, loc: loc, txlock: txlock} - - if len(d.Extensions) > 0 { - rv = C.sqlite3_enable_load_extension(db, 1) - if rv != C.SQLITE_OK { - return nil, errors.New(C.GoString(C.sqlite3_errmsg(db))) - } - - for _, extension := range d.Extensions { - cext := C.CString(extension) - defer C.free(unsafe.Pointer(cext)) - rv = C.sqlite3_load_extension(db, cext, nil, nil) - if rv != C.SQLITE_OK { - return nil, errors.New(C.GoString(C.sqlite3_errmsg(db))) - } - } - - rv = C.sqlite3_enable_load_extension(db, 0) - if rv != C.SQLITE_OK { - return nil, errors.New(C.GoString(C.sqlite3_errmsg(db))) - } - } - - if d.ConnectHook != nil { - if err := d.ConnectHook(conn); err != nil { - return nil, err - } - } - runtime.SetFinalizer(conn, (*SQLiteConn).Close) - return conn, nil -} - -// Close the connection. -func (c *SQLiteConn) Close() error { - rv := C.sqlite3_close_v2(c.db) - if rv != C.SQLITE_OK { - return c.lastError() - } - c.db = nil - runtime.SetFinalizer(c, nil) - return nil -} - -// Prepare query string. Return a new statement. -func (c *SQLiteConn) Prepare(query string) (driver.Stmt, error) { - pquery := C.CString(query) - defer C.free(unsafe.Pointer(pquery)) - var s *C.sqlite3_stmt - var tail *C.char - rv := C.sqlite3_prepare_v2(c.db, pquery, -1, &s, &tail) - if rv != C.SQLITE_OK { - return nil, c.lastError() - } - var t string - if tail != nil && *tail != '\000' { - t = strings.TrimSpace(C.GoString(tail)) - } - nv := int(C.sqlite3_bind_parameter_count(s)) - var nn []string - for i := 0; i < nv; i++ { - pn := C.GoString(C.sqlite3_bind_parameter_name(s, C.int(i+1))) - if len(pn) > 1 && pn[0] == '$' && 48 <= pn[1] && pn[1] <= 57 { - nn = append(nn, C.GoString(C.sqlite3_bind_parameter_name(s, C.int(i+1)))) - } - } - ss := &SQLiteStmt{c: c, s: s, nv: nv, nn: nn, t: t} - runtime.SetFinalizer(ss, (*SQLiteStmt).Close) - return ss, nil -} - -// Close the statement. -func (s *SQLiteStmt) Close() error { - if s.closed { - return nil - } - s.closed = true - if s.c == nil || s.c.db == nil { - return errors.New("sqlite statement with already closed database connection") - } - rv := C.sqlite3_finalize(s.s) - if rv != C.SQLITE_OK { - return s.c.lastError() - } - runtime.SetFinalizer(s, nil) - return nil -} - -// Return a number of parameters. -func (s *SQLiteStmt) NumInput() int { - return s.nv -} - -type bindArg struct { - n int - v driver.Value -} - -func (s *SQLiteStmt) bind(args []driver.Value) error { - rv := C.sqlite3_reset(s.s) - if rv != C.SQLITE_ROW && rv != C.SQLITE_OK && rv != C.SQLITE_DONE { - return s.c.lastError() - } - - var vargs []bindArg - narg := len(args) - vargs = make([]bindArg, narg) - if len(s.nn) > 0 { - for i, v := range s.nn { - if pi, err := strconv.Atoi(v[1:]); err == nil { - vargs[i] = bindArg{pi, args[i]} - } - } - } else { - for i, v := range args { - vargs[i] = bindArg{i + 1, v} - } - } - - for _, varg := range vargs { - n := C.int(varg.n) - v := varg.v - switch v := v.(type) { - case nil: - rv = C.sqlite3_bind_null(s.s, n) - case string: - if len(v) == 0 { - b := []byte{0} - rv = C._sqlite3_bind_text(s.s, n, (*C.char)(unsafe.Pointer(&b[0])), C.int(0)) - } else { - b := []byte(v) - rv = C._sqlite3_bind_text(s.s, n, (*C.char)(unsafe.Pointer(&b[0])), C.int(len(b))) - } - case int64: - rv = C.sqlite3_bind_int64(s.s, n, C.sqlite3_int64(v)) - case bool: - if bool(v) { - rv = C.sqlite3_bind_int(s.s, n, 1) - } else { - rv = C.sqlite3_bind_int(s.s, n, 0) - } - case float64: - rv = C.sqlite3_bind_double(s.s, n, C.double(v)) - case []byte: - var p *byte - if len(v) > 0 { - p = &v[0] - } - rv = C._sqlite3_bind_blob(s.s, n, unsafe.Pointer(p), C.int(len(v))) - case time.Time: - b := []byte(v.UTC().Format(SQLiteTimestampFormats[0])) - rv = C._sqlite3_bind_text(s.s, n, (*C.char)(unsafe.Pointer(&b[0])), C.int(len(b))) - } - if rv != C.SQLITE_OK { - return s.c.lastError() - } - } - return nil -} - -// Query the statement with arguments. Return records. -func (s *SQLiteStmt) Query(args []driver.Value) (driver.Rows, error) { - if err := s.bind(args); err != nil { - return nil, err - } - return &SQLiteRows{s, int(C.sqlite3_column_count(s.s)), nil, nil, s.cls}, nil -} - -// Return last inserted ID. -func (r *SQLiteResult) LastInsertId() (int64, error) { - return r.id, nil -} - -// Return how many rows affected. -func (r *SQLiteResult) RowsAffected() (int64, error) { - return r.changes, nil -} - -// Execute the statement with arguments. Return result object. -func (s *SQLiteStmt) Exec(args []driver.Value) (driver.Result, error) { - if err := s.bind(args); err != nil { - C.sqlite3_reset(s.s) - C.sqlite3_clear_bindings(s.s) - return nil, err - } - var rowid, changes C.long - rv := C._sqlite3_step(s.s, &rowid, &changes) - if rv != C.SQLITE_ROW && rv != C.SQLITE_OK && rv != C.SQLITE_DONE { - err := s.c.lastError() - C.sqlite3_reset(s.s) - C.sqlite3_clear_bindings(s.s) - return nil, err - } - return &SQLiteResult{int64(rowid), int64(changes)}, nil -} - -// Close the rows. -func (rc *SQLiteRows) Close() error { - if rc.s.closed { - return nil - } - if rc.cls { - return rc.s.Close() - } - rv := C.sqlite3_reset(rc.s.s) - if rv != C.SQLITE_OK { - return rc.s.c.lastError() - } - return nil -} - -// Return column names. -func (rc *SQLiteRows) Columns() []string { - if rc.nc != len(rc.cols) { - rc.cols = make([]string, rc.nc) - for i := 0; i < rc.nc; i++ { - rc.cols[i] = C.GoString(C.sqlite3_column_name(rc.s.s, C.int(i))) - } - } - return rc.cols -} - -// Move cursor to next. -func (rc *SQLiteRows) Next(dest []driver.Value) error { - rv := C.sqlite3_step(rc.s.s) - if rv == C.SQLITE_DONE { - return io.EOF - } - if rv != C.SQLITE_ROW { - rv = C.sqlite3_reset(rc.s.s) - if rv != C.SQLITE_OK { - return rc.s.c.lastError() - } - return nil - } - - if rc.decltype == nil { - rc.decltype = make([]string, rc.nc) - for i := 0; i < rc.nc; i++ { - rc.decltype[i] = strings.ToLower(C.GoString(C.sqlite3_column_decltype(rc.s.s, C.int(i)))) - } - } - - for i := range dest { - switch C.sqlite3_column_type(rc.s.s, C.int(i)) { - case C.SQLITE_INTEGER: - val := int64(C.sqlite3_column_int64(rc.s.s, C.int(i))) - switch rc.decltype[i] { - case "timestamp", "datetime", "date": - unixTimestamp := strconv.FormatInt(val, 10) - var t time.Time - if len(unixTimestamp) == 13 { - duration, err := time.ParseDuration(unixTimestamp + "ms") - if err != nil { - return fmt.Errorf("error parsing %s value %d, %s", rc.decltype[i], val, err) - } - epoch := time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC) - t = epoch.Add(duration) - } else { - t = time.Unix(val, 0) - } - if rc.s.c.loc != nil { - t = t.In(rc.s.c.loc) - } - dest[i] = t - case "boolean": - dest[i] = val > 0 - default: - dest[i] = val - } - case C.SQLITE_FLOAT: - dest[i] = float64(C.sqlite3_column_double(rc.s.s, C.int(i))) - case C.SQLITE_BLOB: - p := C.sqlite3_column_blob(rc.s.s, C.int(i)) - if p == nil { - dest[i] = nil - continue - } - n := int(C.sqlite3_column_bytes(rc.s.s, C.int(i))) - switch dest[i].(type) { - case sql.RawBytes: - dest[i] = (*[1 << 30]byte)(unsafe.Pointer(p))[0:n] - default: - slice := make([]byte, n) - copy(slice[:], (*[1 << 30]byte)(unsafe.Pointer(p))[0:n]) - dest[i] = slice - } - case C.SQLITE_NULL: - dest[i] = nil - case C.SQLITE_TEXT: - var err error - var timeVal time.Time - - n := int(C.sqlite3_column_bytes(rc.s.s, C.int(i))) - s := C.GoStringN((*C.char)(unsafe.Pointer(C.sqlite3_column_text(rc.s.s, C.int(i)))), C.int(n)) - - switch rc.decltype[i] { - case "timestamp", "datetime", "date": - var t time.Time - s = strings.TrimSuffix(s, "Z") - for _, format := range SQLiteTimestampFormats { - if timeVal, err = time.ParseInLocation(format, s, time.UTC); err == nil { - t = timeVal - break - } - } - if err != nil { - // The column is a time value, so return the zero time on parse failure. - t = time.Time{} - } - if rc.s.c.loc != nil { - t = t.In(rc.s.c.loc) - } - dest[i] = t - default: - dest[i] = []byte(s) - } - - } - } - return nil -} diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_other.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_other.go deleted file mode 100644 index a20d02c..0000000 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_other.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright (C) 2014 Yasuhiro Matsumoto . -// -// Use of this source code is governed by an MIT-style -// license that can be found in the LICENSE file. -// +build !windows - -package sqlite3 - -/* -#cgo CFLAGS: -I. -#cgo linux LDFLAGS: -ldl -*/ -import "C" diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_windows.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_windows.go deleted file mode 100644 index abc8384..0000000 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_windows.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright (C) 2014 Yasuhiro Matsumoto . -// -// Use of this source code is governed by an MIT-style -// license that can be found in the LICENSE file. -// +build windows - -package sqlite3 - -/* -#cgo CFLAGS: -I. -fno-stack-check -fno-stack-protector -mno-stack-arg-probe -#cgo windows,386 CFLAGS: -D_localtime32=localtime -#cgo LDFLAGS: -lmingwex -lmingw32 -*/ -import "C" diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3ext.h b/vendor/github.com/mattn/go-sqlite3/sqlite3ext.h deleted file mode 100644 index 7cc58b6..0000000 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3ext.h +++ /dev/null @@ -1,487 +0,0 @@ -/* -** 2006 June 7 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** This header file defines the SQLite interface for use by -** shared libraries that want to be imported as extensions into -** an SQLite instance. Shared libraries that intend to be loaded -** as extensions by SQLite should #include this file instead of -** sqlite3.h. -*/ -#ifndef _SQLITE3EXT_H_ -#define _SQLITE3EXT_H_ -#include "sqlite3-binding.h" - -typedef struct sqlite3_api_routines sqlite3_api_routines; - -/* -** The following structure holds pointers to all of the SQLite API -** routines. -** -** WARNING: In order to maintain backwards compatibility, add new -** interfaces to the end of this structure only. If you insert new -** interfaces in the middle of this structure, then older different -** versions of SQLite will not be able to load each others' shared -** libraries! -*/ -struct sqlite3_api_routines { - void * (*aggregate_context)(sqlite3_context*,int nBytes); - int (*aggregate_count)(sqlite3_context*); - int (*bind_blob)(sqlite3_stmt*,int,const void*,int n,void(*)(void*)); - int (*bind_double)(sqlite3_stmt*,int,double); - int (*bind_int)(sqlite3_stmt*,int,int); - int (*bind_int64)(sqlite3_stmt*,int,sqlite_int64); - int (*bind_null)(sqlite3_stmt*,int); - int (*bind_parameter_count)(sqlite3_stmt*); - int (*bind_parameter_index)(sqlite3_stmt*,const char*zName); - const char * (*bind_parameter_name)(sqlite3_stmt*,int); - int (*bind_text)(sqlite3_stmt*,int,const char*,int n,void(*)(void*)); - int (*bind_text16)(sqlite3_stmt*,int,const void*,int,void(*)(void*)); - int (*bind_value)(sqlite3_stmt*,int,const sqlite3_value*); - int (*busy_handler)(sqlite3*,int(*)(void*,int),void*); - int (*busy_timeout)(sqlite3*,int ms); - int (*changes)(sqlite3*); - int (*close)(sqlite3*); - int (*collation_needed)(sqlite3*,void*,void(*)(void*,sqlite3*, - int eTextRep,const char*)); - int (*collation_needed16)(sqlite3*,void*,void(*)(void*,sqlite3*, - int eTextRep,const void*)); - const void * (*column_blob)(sqlite3_stmt*,int iCol); - int (*column_bytes)(sqlite3_stmt*,int iCol); - int (*column_bytes16)(sqlite3_stmt*,int iCol); - int (*column_count)(sqlite3_stmt*pStmt); - const char * (*column_database_name)(sqlite3_stmt*,int); - const void * (*column_database_name16)(sqlite3_stmt*,int); - const char * (*column_decltype)(sqlite3_stmt*,int i); - const void * (*column_decltype16)(sqlite3_stmt*,int); - double (*column_double)(sqlite3_stmt*,int iCol); - int (*column_int)(sqlite3_stmt*,int iCol); - sqlite_int64 (*column_int64)(sqlite3_stmt*,int iCol); - const char * (*column_name)(sqlite3_stmt*,int); - const void * (*column_name16)(sqlite3_stmt*,int); - const char * (*column_origin_name)(sqlite3_stmt*,int); - const void * (*column_origin_name16)(sqlite3_stmt*,int); - const char * (*column_table_name)(sqlite3_stmt*,int); - const void * (*column_table_name16)(sqlite3_stmt*,int); - const unsigned char * (*column_text)(sqlite3_stmt*,int iCol); - const void * (*column_text16)(sqlite3_stmt*,int iCol); - int (*column_type)(sqlite3_stmt*,int iCol); - sqlite3_value* (*column_value)(sqlite3_stmt*,int iCol); - void * (*commit_hook)(sqlite3*,int(*)(void*),void*); - int (*complete)(const char*sql); - int (*complete16)(const void*sql); - int (*create_collation)(sqlite3*,const char*,int,void*, - int(*)(void*,int,const void*,int,const void*)); - int (*create_collation16)(sqlite3*,const void*,int,void*, - int(*)(void*,int,const void*,int,const void*)); - int (*create_function)(sqlite3*,const char*,int,int,void*, - void (*xFunc)(sqlite3_context*,int,sqlite3_value**), - void (*xStep)(sqlite3_context*,int,sqlite3_value**), - void (*xFinal)(sqlite3_context*)); - int (*create_function16)(sqlite3*,const void*,int,int,void*, - void (*xFunc)(sqlite3_context*,int,sqlite3_value**), - void (*xStep)(sqlite3_context*,int,sqlite3_value**), - void (*xFinal)(sqlite3_context*)); - int (*create_module)(sqlite3*,const char*,const sqlite3_module*,void*); - int (*data_count)(sqlite3_stmt*pStmt); - sqlite3 * (*db_handle)(sqlite3_stmt*); - int (*declare_vtab)(sqlite3*,const char*); - int (*enable_shared_cache)(int); - int (*errcode)(sqlite3*db); - const char * (*errmsg)(sqlite3*); - const void * (*errmsg16)(sqlite3*); - int (*exec)(sqlite3*,const char*,sqlite3_callback,void*,char**); - int (*expired)(sqlite3_stmt*); - int (*finalize)(sqlite3_stmt*pStmt); - void (*free)(void*); - void (*free_table)(char**result); - int (*get_autocommit)(sqlite3*); - void * (*get_auxdata)(sqlite3_context*,int); - int (*get_table)(sqlite3*,const char*,char***,int*,int*,char**); - int (*global_recover)(void); - void (*interruptx)(sqlite3*); - sqlite_int64 (*last_insert_rowid)(sqlite3*); - const char * (*libversion)(void); - int (*libversion_number)(void); - void *(*malloc)(int); - char * (*mprintf)(const char*,...); - int (*open)(const char*,sqlite3**); - int (*open16)(const void*,sqlite3**); - int (*prepare)(sqlite3*,const char*,int,sqlite3_stmt**,const char**); - int (*prepare16)(sqlite3*,const void*,int,sqlite3_stmt**,const void**); - void * (*profile)(sqlite3*,void(*)(void*,const char*,sqlite_uint64),void*); - void (*progress_handler)(sqlite3*,int,int(*)(void*),void*); - void *(*realloc)(void*,int); - int (*reset)(sqlite3_stmt*pStmt); - void (*result_blob)(sqlite3_context*,const void*,int,void(*)(void*)); - void (*result_double)(sqlite3_context*,double); - void (*result_error)(sqlite3_context*,const char*,int); - void (*result_error16)(sqlite3_context*,const void*,int); - void (*result_int)(sqlite3_context*,int); - void (*result_int64)(sqlite3_context*,sqlite_int64); - void (*result_null)(sqlite3_context*); - void (*result_text)(sqlite3_context*,const char*,int,void(*)(void*)); - void (*result_text16)(sqlite3_context*,const void*,int,void(*)(void*)); - void (*result_text16be)(sqlite3_context*,const void*,int,void(*)(void*)); - void (*result_text16le)(sqlite3_context*,const void*,int,void(*)(void*)); - void (*result_value)(sqlite3_context*,sqlite3_value*); - void * (*rollback_hook)(sqlite3*,void(*)(void*),void*); - int (*set_authorizer)(sqlite3*,int(*)(void*,int,const char*,const char*, - const char*,const char*),void*); - void (*set_auxdata)(sqlite3_context*,int,void*,void (*)(void*)); - char * (*snprintf)(int,char*,const char*,...); - int (*step)(sqlite3_stmt*); - int (*table_column_metadata)(sqlite3*,const char*,const char*,const char*, - char const**,char const**,int*,int*,int*); - void (*thread_cleanup)(void); - int (*total_changes)(sqlite3*); - void * (*trace)(sqlite3*,void(*xTrace)(void*,const char*),void*); - int (*transfer_bindings)(sqlite3_stmt*,sqlite3_stmt*); - void * (*update_hook)(sqlite3*,void(*)(void*,int ,char const*,char const*, - sqlite_int64),void*); - void * (*user_data)(sqlite3_context*); - const void * (*value_blob)(sqlite3_value*); - int (*value_bytes)(sqlite3_value*); - int (*value_bytes16)(sqlite3_value*); - double (*value_double)(sqlite3_value*); - int (*value_int)(sqlite3_value*); - sqlite_int64 (*value_int64)(sqlite3_value*); - int (*value_numeric_type)(sqlite3_value*); - const unsigned char * (*value_text)(sqlite3_value*); - const void * (*value_text16)(sqlite3_value*); - const void * (*value_text16be)(sqlite3_value*); - const void * (*value_text16le)(sqlite3_value*); - int (*value_type)(sqlite3_value*); - char *(*vmprintf)(const char*,va_list); - /* Added ??? */ - int (*overload_function)(sqlite3*, const char *zFuncName, int nArg); - /* Added by 3.3.13 */ - int (*prepare_v2)(sqlite3*,const char*,int,sqlite3_stmt**,const char**); - int (*prepare16_v2)(sqlite3*,const void*,int,sqlite3_stmt**,const void**); - int (*clear_bindings)(sqlite3_stmt*); - /* Added by 3.4.1 */ - int (*create_module_v2)(sqlite3*,const char*,const sqlite3_module*,void*, - void (*xDestroy)(void *)); - /* Added by 3.5.0 */ - int (*bind_zeroblob)(sqlite3_stmt*,int,int); - int (*blob_bytes)(sqlite3_blob*); - int (*blob_close)(sqlite3_blob*); - int (*blob_open)(sqlite3*,const char*,const char*,const char*,sqlite3_int64, - int,sqlite3_blob**); - int (*blob_read)(sqlite3_blob*,void*,int,int); - int (*blob_write)(sqlite3_blob*,const void*,int,int); - int (*create_collation_v2)(sqlite3*,const char*,int,void*, - int(*)(void*,int,const void*,int,const void*), - void(*)(void*)); - int (*file_control)(sqlite3*,const char*,int,void*); - sqlite3_int64 (*memory_highwater)(int); - sqlite3_int64 (*memory_used)(void); - sqlite3_mutex *(*mutex_alloc)(int); - void (*mutex_enter)(sqlite3_mutex*); - void (*mutex_free)(sqlite3_mutex*); - void (*mutex_leave)(sqlite3_mutex*); - int (*mutex_try)(sqlite3_mutex*); - int (*open_v2)(const char*,sqlite3**,int,const char*); - int (*release_memory)(int); - void (*result_error_nomem)(sqlite3_context*); - void (*result_error_toobig)(sqlite3_context*); - int (*sleep)(int); - void (*soft_heap_limit)(int); - sqlite3_vfs *(*vfs_find)(const char*); - int (*vfs_register)(sqlite3_vfs*,int); - int (*vfs_unregister)(sqlite3_vfs*); - int (*xthreadsafe)(void); - void (*result_zeroblob)(sqlite3_context*,int); - void (*result_error_code)(sqlite3_context*,int); - int (*test_control)(int, ...); - void (*randomness)(int,void*); - sqlite3 *(*context_db_handle)(sqlite3_context*); - int (*extended_result_codes)(sqlite3*,int); - int (*limit)(sqlite3*,int,int); - sqlite3_stmt *(*next_stmt)(sqlite3*,sqlite3_stmt*); - const char *(*sql)(sqlite3_stmt*); - int (*status)(int,int*,int*,int); - int (*backup_finish)(sqlite3_backup*); - sqlite3_backup *(*backup_init)(sqlite3*,const char*,sqlite3*,const char*); - int (*backup_pagecount)(sqlite3_backup*); - int (*backup_remaining)(sqlite3_backup*); - int (*backup_step)(sqlite3_backup*,int); - const char *(*compileoption_get)(int); - int (*compileoption_used)(const char*); - int (*create_function_v2)(sqlite3*,const char*,int,int,void*, - void (*xFunc)(sqlite3_context*,int,sqlite3_value**), - void (*xStep)(sqlite3_context*,int,sqlite3_value**), - void (*xFinal)(sqlite3_context*), - void(*xDestroy)(void*)); - int (*db_config)(sqlite3*,int,...); - sqlite3_mutex *(*db_mutex)(sqlite3*); - int (*db_status)(sqlite3*,int,int*,int*,int); - int (*extended_errcode)(sqlite3*); - void (*log)(int,const char*,...); - sqlite3_int64 (*soft_heap_limit64)(sqlite3_int64); - const char *(*sourceid)(void); - int (*stmt_status)(sqlite3_stmt*,int,int); - int (*strnicmp)(const char*,const char*,int); - int (*unlock_notify)(sqlite3*,void(*)(void**,int),void*); - int (*wal_autocheckpoint)(sqlite3*,int); - int (*wal_checkpoint)(sqlite3*,const char*); - void *(*wal_hook)(sqlite3*,int(*)(void*,sqlite3*,const char*,int),void*); - int (*blob_reopen)(sqlite3_blob*,sqlite3_int64); - int (*vtab_config)(sqlite3*,int op,...); - int (*vtab_on_conflict)(sqlite3*); - /* Version 3.7.16 and later */ - int (*close_v2)(sqlite3*); - const char *(*db_filename)(sqlite3*,const char*); - int (*db_readonly)(sqlite3*,const char*); - int (*db_release_memory)(sqlite3*); - const char *(*errstr)(int); - int (*stmt_busy)(sqlite3_stmt*); - int (*stmt_readonly)(sqlite3_stmt*); - int (*stricmp)(const char*,const char*); - int (*uri_boolean)(const char*,const char*,int); - sqlite3_int64 (*uri_int64)(const char*,const char*,sqlite3_int64); - const char *(*uri_parameter)(const char*,const char*); - char *(*vsnprintf)(int,char*,const char*,va_list); - int (*wal_checkpoint_v2)(sqlite3*,const char*,int,int*,int*); -}; - -/* -** The following macros redefine the API routines so that they are -** redirected throught the global sqlite3_api structure. -** -** This header file is also used by the loadext.c source file -** (part of the main SQLite library - not an extension) so that -** it can get access to the sqlite3_api_routines structure -** definition. But the main library does not want to redefine -** the API. So the redefinition macros are only valid if the -** SQLITE_CORE macros is undefined. -*/ -#ifndef SQLITE_CORE -#define sqlite3_aggregate_context sqlite3_api->aggregate_context -#ifndef SQLITE_OMIT_DEPRECATED -#define sqlite3_aggregate_count sqlite3_api->aggregate_count -#endif -#define sqlite3_bind_blob sqlite3_api->bind_blob -#define sqlite3_bind_double sqlite3_api->bind_double -#define sqlite3_bind_int sqlite3_api->bind_int -#define sqlite3_bind_int64 sqlite3_api->bind_int64 -#define sqlite3_bind_null sqlite3_api->bind_null -#define sqlite3_bind_parameter_count sqlite3_api->bind_parameter_count -#define sqlite3_bind_parameter_index sqlite3_api->bind_parameter_index -#define sqlite3_bind_parameter_name sqlite3_api->bind_parameter_name -#define sqlite3_bind_text sqlite3_api->bind_text -#define sqlite3_bind_text16 sqlite3_api->bind_text16 -#define sqlite3_bind_value sqlite3_api->bind_value -#define sqlite3_busy_handler sqlite3_api->busy_handler -#define sqlite3_busy_timeout sqlite3_api->busy_timeout -#define sqlite3_changes sqlite3_api->changes -#define sqlite3_close sqlite3_api->close -#define sqlite3_collation_needed sqlite3_api->collation_needed -#define sqlite3_collation_needed16 sqlite3_api->collation_needed16 -#define sqlite3_column_blob sqlite3_api->column_blob -#define sqlite3_column_bytes sqlite3_api->column_bytes -#define sqlite3_column_bytes16 sqlite3_api->column_bytes16 -#define sqlite3_column_count sqlite3_api->column_count -#define sqlite3_column_database_name sqlite3_api->column_database_name -#define sqlite3_column_database_name16 sqlite3_api->column_database_name16 -#define sqlite3_column_decltype sqlite3_api->column_decltype -#define sqlite3_column_decltype16 sqlite3_api->column_decltype16 -#define sqlite3_column_double sqlite3_api->column_double -#define sqlite3_column_int sqlite3_api->column_int -#define sqlite3_column_int64 sqlite3_api->column_int64 -#define sqlite3_column_name sqlite3_api->column_name -#define sqlite3_column_name16 sqlite3_api->column_name16 -#define sqlite3_column_origin_name sqlite3_api->column_origin_name -#define sqlite3_column_origin_name16 sqlite3_api->column_origin_name16 -#define sqlite3_column_table_name sqlite3_api->column_table_name -#define sqlite3_column_table_name16 sqlite3_api->column_table_name16 -#define sqlite3_column_text sqlite3_api->column_text -#define sqlite3_column_text16 sqlite3_api->column_text16 -#define sqlite3_column_type sqlite3_api->column_type -#define sqlite3_column_value sqlite3_api->column_value -#define sqlite3_commit_hook sqlite3_api->commit_hook -#define sqlite3_complete sqlite3_api->complete -#define sqlite3_complete16 sqlite3_api->complete16 -#define sqlite3_create_collation sqlite3_api->create_collation -#define sqlite3_create_collation16 sqlite3_api->create_collation16 -#define sqlite3_create_function sqlite3_api->create_function -#define sqlite3_create_function16 sqlite3_api->create_function16 -#define sqlite3_create_module sqlite3_api->create_module -#define sqlite3_create_module_v2 sqlite3_api->create_module_v2 -#define sqlite3_data_count sqlite3_api->data_count -#define sqlite3_db_handle sqlite3_api->db_handle -#define sqlite3_declare_vtab sqlite3_api->declare_vtab -#define sqlite3_enable_shared_cache sqlite3_api->enable_shared_cache -#define sqlite3_errcode sqlite3_api->errcode -#define sqlite3_errmsg sqlite3_api->errmsg -#define sqlite3_errmsg16 sqlite3_api->errmsg16 -#define sqlite3_exec sqlite3_api->exec -#ifndef SQLITE_OMIT_DEPRECATED -#define sqlite3_expired sqlite3_api->expired -#endif -#define sqlite3_finalize sqlite3_api->finalize -#define sqlite3_free sqlite3_api->free -#define sqlite3_free_table sqlite3_api->free_table -#define sqlite3_get_autocommit sqlite3_api->get_autocommit -#define sqlite3_get_auxdata sqlite3_api->get_auxdata -#define sqlite3_get_table sqlite3_api->get_table -#ifndef SQLITE_OMIT_DEPRECATED -#define sqlite3_global_recover sqlite3_api->global_recover -#endif -#define sqlite3_interrupt sqlite3_api->interruptx -#define sqlite3_last_insert_rowid sqlite3_api->last_insert_rowid -#define sqlite3_libversion sqlite3_api->libversion -#define sqlite3_libversion_number sqlite3_api->libversion_number -#define sqlite3_malloc sqlite3_api->malloc -#define sqlite3_mprintf sqlite3_api->mprintf -#define sqlite3_open sqlite3_api->open -#define sqlite3_open16 sqlite3_api->open16 -#define sqlite3_prepare sqlite3_api->prepare -#define sqlite3_prepare16 sqlite3_api->prepare16 -#define sqlite3_prepare_v2 sqlite3_api->prepare_v2 -#define sqlite3_prepare16_v2 sqlite3_api->prepare16_v2 -#define sqlite3_profile sqlite3_api->profile -#define sqlite3_progress_handler sqlite3_api->progress_handler -#define sqlite3_realloc sqlite3_api->realloc -#define sqlite3_reset sqlite3_api->reset -#define sqlite3_result_blob sqlite3_api->result_blob -#define sqlite3_result_double sqlite3_api->result_double -#define sqlite3_result_error sqlite3_api->result_error -#define sqlite3_result_error16 sqlite3_api->result_error16 -#define sqlite3_result_int sqlite3_api->result_int -#define sqlite3_result_int64 sqlite3_api->result_int64 -#define sqlite3_result_null sqlite3_api->result_null -#define sqlite3_result_text sqlite3_api->result_text -#define sqlite3_result_text16 sqlite3_api->result_text16 -#define sqlite3_result_text16be sqlite3_api->result_text16be -#define sqlite3_result_text16le sqlite3_api->result_text16le -#define sqlite3_result_value sqlite3_api->result_value -#define sqlite3_rollback_hook sqlite3_api->rollback_hook -#define sqlite3_set_authorizer sqlite3_api->set_authorizer -#define sqlite3_set_auxdata sqlite3_api->set_auxdata -#define sqlite3_snprintf sqlite3_api->snprintf -#define sqlite3_step sqlite3_api->step -#define sqlite3_table_column_metadata sqlite3_api->table_column_metadata -#define sqlite3_thread_cleanup sqlite3_api->thread_cleanup -#define sqlite3_total_changes sqlite3_api->total_changes -#define sqlite3_trace sqlite3_api->trace -#ifndef SQLITE_OMIT_DEPRECATED -#define sqlite3_transfer_bindings sqlite3_api->transfer_bindings -#endif -#define sqlite3_update_hook sqlite3_api->update_hook -#define sqlite3_user_data sqlite3_api->user_data -#define sqlite3_value_blob sqlite3_api->value_blob -#define sqlite3_value_bytes sqlite3_api->value_bytes -#define sqlite3_value_bytes16 sqlite3_api->value_bytes16 -#define sqlite3_value_double sqlite3_api->value_double -#define sqlite3_value_int sqlite3_api->value_int -#define sqlite3_value_int64 sqlite3_api->value_int64 -#define sqlite3_value_numeric_type sqlite3_api->value_numeric_type -#define sqlite3_value_text sqlite3_api->value_text -#define sqlite3_value_text16 sqlite3_api->value_text16 -#define sqlite3_value_text16be sqlite3_api->value_text16be -#define sqlite3_value_text16le sqlite3_api->value_text16le -#define sqlite3_value_type sqlite3_api->value_type -#define sqlite3_vmprintf sqlite3_api->vmprintf -#define sqlite3_overload_function sqlite3_api->overload_function -#define sqlite3_prepare_v2 sqlite3_api->prepare_v2 -#define sqlite3_prepare16_v2 sqlite3_api->prepare16_v2 -#define sqlite3_clear_bindings sqlite3_api->clear_bindings -#define sqlite3_bind_zeroblob sqlite3_api->bind_zeroblob -#define sqlite3_blob_bytes sqlite3_api->blob_bytes -#define sqlite3_blob_close sqlite3_api->blob_close -#define sqlite3_blob_open sqlite3_api->blob_open -#define sqlite3_blob_read sqlite3_api->blob_read -#define sqlite3_blob_write sqlite3_api->blob_write -#define sqlite3_create_collation_v2 sqlite3_api->create_collation_v2 -#define sqlite3_file_control sqlite3_api->file_control -#define sqlite3_memory_highwater sqlite3_api->memory_highwater -#define sqlite3_memory_used sqlite3_api->memory_used -#define sqlite3_mutex_alloc sqlite3_api->mutex_alloc -#define sqlite3_mutex_enter sqlite3_api->mutex_enter -#define sqlite3_mutex_free sqlite3_api->mutex_free -#define sqlite3_mutex_leave sqlite3_api->mutex_leave -#define sqlite3_mutex_try sqlite3_api->mutex_try -#define sqlite3_open_v2 sqlite3_api->open_v2 -#define sqlite3_release_memory sqlite3_api->release_memory -#define sqlite3_result_error_nomem sqlite3_api->result_error_nomem -#define sqlite3_result_error_toobig sqlite3_api->result_error_toobig -#define sqlite3_sleep sqlite3_api->sleep -#define sqlite3_soft_heap_limit sqlite3_api->soft_heap_limit -#define sqlite3_vfs_find sqlite3_api->vfs_find -#define sqlite3_vfs_register sqlite3_api->vfs_register -#define sqlite3_vfs_unregister sqlite3_api->vfs_unregister -#define sqlite3_threadsafe sqlite3_api->xthreadsafe -#define sqlite3_result_zeroblob sqlite3_api->result_zeroblob -#define sqlite3_result_error_code sqlite3_api->result_error_code -#define sqlite3_test_control sqlite3_api->test_control -#define sqlite3_randomness sqlite3_api->randomness -#define sqlite3_context_db_handle sqlite3_api->context_db_handle -#define sqlite3_extended_result_codes sqlite3_api->extended_result_codes -#define sqlite3_limit sqlite3_api->limit -#define sqlite3_next_stmt sqlite3_api->next_stmt -#define sqlite3_sql sqlite3_api->sql -#define sqlite3_status sqlite3_api->status -#define sqlite3_backup_finish sqlite3_api->backup_finish -#define sqlite3_backup_init sqlite3_api->backup_init -#define sqlite3_backup_pagecount sqlite3_api->backup_pagecount -#define sqlite3_backup_remaining sqlite3_api->backup_remaining -#define sqlite3_backup_step sqlite3_api->backup_step -#define sqlite3_compileoption_get sqlite3_api->compileoption_get -#define sqlite3_compileoption_used sqlite3_api->compileoption_used -#define sqlite3_create_function_v2 sqlite3_api->create_function_v2 -#define sqlite3_db_config sqlite3_api->db_config -#define sqlite3_db_mutex sqlite3_api->db_mutex -#define sqlite3_db_status sqlite3_api->db_status -#define sqlite3_extended_errcode sqlite3_api->extended_errcode -#define sqlite3_log sqlite3_api->log -#define sqlite3_soft_heap_limit64 sqlite3_api->soft_heap_limit64 -#define sqlite3_sourceid sqlite3_api->sourceid -#define sqlite3_stmt_status sqlite3_api->stmt_status -#define sqlite3_strnicmp sqlite3_api->strnicmp -#define sqlite3_unlock_notify sqlite3_api->unlock_notify -#define sqlite3_wal_autocheckpoint sqlite3_api->wal_autocheckpoint -#define sqlite3_wal_checkpoint sqlite3_api->wal_checkpoint -#define sqlite3_wal_hook sqlite3_api->wal_hook -#define sqlite3_blob_reopen sqlite3_api->blob_reopen -#define sqlite3_vtab_config sqlite3_api->vtab_config -#define sqlite3_vtab_on_conflict sqlite3_api->vtab_on_conflict -/* Version 3.7.16 and later */ -#define sqlite3_close_v2 sqlite3_api->close_v2 -#define sqlite3_db_filename sqlite3_api->db_filename -#define sqlite3_db_readonly sqlite3_api->db_readonly -#define sqlite3_db_release_memory sqlite3_api->db_release_memory -#define sqlite3_errstr sqlite3_api->errstr -#define sqlite3_stmt_busy sqlite3_api->stmt_busy -#define sqlite3_stmt_readonly sqlite3_api->stmt_readonly -#define sqlite3_stricmp sqlite3_api->stricmp -#define sqlite3_uri_boolean sqlite3_api->uri_boolean -#define sqlite3_uri_int64 sqlite3_api->uri_int64 -#define sqlite3_uri_parameter sqlite3_api->uri_parameter -#define sqlite3_uri_vsnprintf sqlite3_api->vsnprintf -#define sqlite3_wal_checkpoint_v2 sqlite3_api->wal_checkpoint_v2 -#endif /* SQLITE_CORE */ - -#ifndef SQLITE_CORE - /* This case when the file really is being compiled as a loadable - ** extension */ -# define SQLITE_EXTENSION_INIT1 const sqlite3_api_routines *sqlite3_api=0; -# define SQLITE_EXTENSION_INIT2(v) sqlite3_api=v; -# define SQLITE_EXTENSION_INIT3 \ - extern const sqlite3_api_routines *sqlite3_api; -#else - /* This case when the file is being statically linked into the - ** application */ -# define SQLITE_EXTENSION_INIT1 /*no-op*/ -# define SQLITE_EXTENSION_INIT2(v) (void)v; /* unused parameter */ -# define SQLITE_EXTENSION_INIT3 /*no-op*/ -#endif - -#endif /* _SQLITE3EXT_H_ */ diff --git a/vendor/github.com/rubenv/sql-migrate/README.md b/vendor/github.com/rubenv/sql-migrate/README.md deleted file mode 100644 index 897a5f6..0000000 --- a/vendor/github.com/rubenv/sql-migrate/README.md +++ /dev/null @@ -1,245 +0,0 @@ -# sql-migrate - -> SQL Schema migration tool for [Go](http://golang.org/). Based on [gorp](https://github.com/go-gorp/gorp) and [goose](https://bitbucket.org/liamstask/goose). - -[![Build Status](https://travis-ci.org/rubenv/sql-migrate.svg?branch=master)](https://travis-ci.org/rubenv/sql-migrate) [![GoDoc](https://godoc.org/github.com/rubenv/sql-migrate?status.png)](https://godoc.org/github.com/rubenv/sql-migrate) - -Using [modl](https://github.com/jmoiron/modl)? Check out [modl-migrate](https://github.com/rubenv/modl-migrate). - -## Features - -* Usable as a CLI tool or as a library -* Supports SQLite, PostgreSQL, MySQL, MSSQL and Oracle databases (through [gorp](https://github.com/go-gorp/gorp)) -* Can embed migrations into your application -* Migrations are defined with SQL for full flexibility -* Atomic migrations -* Up/down migrations to allow rollback -* Supports multiple database types in one project - -## Installation - -To install the library and command line program, use the following: - -```bash -go get github.com/rubenv/sql-migrate/... -``` - -## Usage -### As a standalone tool -``` -$ sql-migrate --help -usage: sql-migrate [--version] [--help] [] - -Available commands are: - down Undo a database migration - redo Reapply the last migration - status Show migration status - up Migrates the database to the most recent version available -``` - -Each command requires a configuration file (which defaults to `dbconfig.yml`, but can be specified with the `-config` flag). This config file should specify one or more environments: - -```yml -development: - dialect: sqlite3 - datasource: test.db - dir: migrations/sqlite3 - -production: - dialect: postgres - datasource: dbname=myapp sslmode=disable - dir: migrations/postgres - table: migrations -``` - -The `table` setting is optional and will default to `gorp_migrations`. - -The environment that will be used can be specified with the `-env` flag (defaults to `development`). - -Use the `--help` flag in combination with any of the commands to get an overview of its usage: - -``` -$ sql-migrate up --help -Usage: sql-migrate up [options] ... - - Migrates the database to the most recent version available. - -Options: - - -config=config.yml Configuration file to use. - -env="development" Environment. - -limit=0 Limit the number of migrations (0 = unlimited). - -dryrun Don't apply migrations, just print them. -``` - -The `up` command applies all available migrations. By contrast, `down` will only apply one migration by default. This behavior can be changed for both by using the `-limit` parameter. - -The `redo` command will unapply the last migration and reapply it. This is useful during development, when you're writing migrations. - -Use the `status` command to see the state of the applied migrations: - -```bash -$ sql-migrate status -+---------------+-----------------------------------------+ -| MIGRATION | APPLIED | -+---------------+-----------------------------------------+ -| 1_initial.sql | 2014-09-13 08:19:06.788354925 +0000 UTC | -| 2_record.sql | no | -+---------------+-----------------------------------------+ -``` - -### As a library -Import sql-migrate into your application: - -```go -import "github.com/rubenv/sql-migrate" -``` - -Set up a source of migrations, this can be from memory, from a set of files or from bindata (more on that later): - -```go -// Hardcoded strings in memory: -migrations := &migrate.MemoryMigrationSource{ - Migrations: []*migrate.Migration{ - &migrate.Migration{ - Id: "123", - Up: []string{"CREATE TABLE people (id int)"}, - Down: []string{"DROP TABLE people"}, - }, - }, -} - -// OR: Read migrations from a folder: -migrations := &migrate.FileMigrationSource{ - Dir: "db/migrations", -} - -// OR: Use migrations from bindata: -migrations := &migrate.AssetMigrationSource{ - Asset: Asset, - AssetDir: AssetDir, - Dir: "migrations", -} -``` - -Then use the `Exec` function to upgrade your database: - -```go -db, err := sql.Open("sqlite3", filename) -if err != nil { - // Handle errors! -} - -n, err := migrate.Exec(db, "sqlite3", migrations, migrate.Up) -if err != nil { - // Handle errors! -} -fmt.Printf("Applied %d migrations!\n", n) -``` - -Note that `n` can be greater than `0` even if there is an error: any migration that succeeded will remain applied even if a later one fails. - -Check [the GoDoc reference](https://godoc.org/github.com/rubenv/sql-migrate) for the full documentation. - -## Writing migrations -Migrations are defined in SQL files, which contain a set of SQL statements. Special comments are used to distinguish up and down migrations. - -```sql --- +migrate Up --- SQL in section 'Up' is executed when this migration is applied -CREATE TABLE people (id int); - - --- +migrate Down --- SQL section 'Down' is executed when this migration is rolled back -DROP TABLE people; -``` - -You can put multiple statements in each block, as long as you end them with a semicolon (`;`). - -If you have complex statements which contain semicolons, use `StatementBegin` and `StatementEnd` to indicate boundaries: - -```sql --- +migrate Up -CREATE TABLE people (id int); - --- +migrate StatementBegin -CREATE OR REPLACE FUNCTION do_something() -returns void AS $$ -DECLARE - create_query text; -BEGIN - -- Do something here -END; -$$ -language plpgsql; --- +migrate StatementEnd - --- +migrate Down -DROP FUNCTION do_something(); -DROP TABLE people; -``` - -The order in which migrations are applied is defined through the filename: sql-migrate will sort migrations based on their name. It's recommended to use an increasing version number or a timestamp as the first part of the filename. - -## Embedding migrations with [bindata](https://github.com/jteeuwen/go-bindata) -If you like your Go applications self-contained (that is: a single binary): use [bindata](https://github.com/jteeuwen/go-bindata) to embed the migration files. - -Just write your migration files as usual, as a set of SQL files in a folder. - -Then use bindata to generate a `.go` file with the migrations embedded: - -```bash -go-bindata -pkg myapp -o bindata.go db/migrations/ -``` - -The resulting `bindata.go` file will contain your migrations. Remember to regenerate your `bindata.go` file whenever you add/modify a migration (`go generate` will help here, once it arrives). - -Use the `AssetMigrationSource` in your application to find the migrations: - -```go -migrations := &migrate.AssetMigrationSource{ - Asset: Asset, - AssetDir: AssetDir, - Dir: "db/migrations", -} -``` - -Both `Asset` and `AssetDir` are functions provided by bindata. - -Then proceed as usual. - -## Extending -Adding a new migration source means implementing `MigrationSource`. - -```go -type MigrationSource interface { - FindMigrations() ([]*Migration, error) -} -``` - -The resulting slice of migrations will be executed in the given order, so it should usually be sorted by the `Id` field. - -## License - - (The MIT License) - - Copyright (C) 2014-2015 by Ruben Vermeersch - - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in - all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - THE SOFTWARE. diff --git a/vendor/github.com/rubenv/sql-migrate/doc.go b/vendor/github.com/rubenv/sql-migrate/doc.go deleted file mode 100644 index 63db39f..0000000 --- a/vendor/github.com/rubenv/sql-migrate/doc.go +++ /dev/null @@ -1,199 +0,0 @@ -/* - -SQL Schema migration tool for Go. - -Key features: - - * Usable as a CLI tool or as a library - * Supports SQLite, PostgreSQL, MySQL, MSSQL and Oracle databases (through gorp) - * Can embed migrations into your application - * Migrations are defined with SQL for full flexibility - * Atomic migrations - * Up/down migrations to allow rollback - * Supports multiple database types in one project - -Installation - -To install the library and command line program, use the following: - - go get github.com/rubenv/sql-migrate/... - -Command-line tool - -The main command is called sql-migrate. - - $ sql-migrate --help - usage: sql-migrate [--version] [--help] [] - - Available commands are: - down Undo a database migration - redo Reapply the last migration - status Show migration status - up Migrates the database to the most recent version available - -Each command requires a configuration file (which defaults to dbconfig.yml, but can be specified with the -config flag). This config file should specify one or more environments: - - development: - dialect: sqlite3 - datasource: test.db - dir: migrations/sqlite3 - - production: - dialect: postgres - datasource: dbname=myapp sslmode=disable - dir: migrations/postgres - table: migrations - -The `table` setting is optional and will default to `gorp_migrations`. - -The environment that will be used can be specified with the -env flag (defaults to development). - -Use the --help flag in combination with any of the commands to get an overview of its usage: - - $ sql-migrate up --help - Usage: sql-migrate up [options] ... - - Migrates the database to the most recent version available. - - Options: - - -config=config.yml Configuration file to use. - -env="development" Environment. - -limit=0 Limit the number of migrations (0 = unlimited). - -dryrun Don't apply migrations, just print them. - -The up command applies all available migrations. By contrast, down will only apply one migration by default. This behavior can be changed for both by using the -limit parameter. - -The redo command will unapply the last migration and reapply it. This is useful during development, when you're writing migrations. - -Use the status command to see the state of the applied migrations: - - $ sql-migrate status - +---------------+-----------------------------------------+ - | MIGRATION | APPLIED | - +---------------+-----------------------------------------+ - | 1_initial.sql | 2014-09-13 08:19:06.788354925 +0000 UTC | - | 2_record.sql | no | - +---------------+-----------------------------------------+ - -Library - -Import sql-migrate into your application: - - import "github.com/rubenv/sql-migrate" - -Set up a source of migrations, this can be from memory, from a set of files or from bindata (more on that later): - - // Hardcoded strings in memory: - migrations := &migrate.MemoryMigrationSource{ - Migrations: []*migrate.Migration{ - &migrate.Migration{ - Id: "123", - Up: []string{"CREATE TABLE people (id int)"}, - Down: []string{"DROP TABLE people"}, - }, - }, - } - - // OR: Read migrations from a folder: - migrations := &migrate.FileMigrationSource{ - Dir: "db/migrations", - } - - // OR: Use migrations from bindata: - migrations := &migrate.AssetMigrationSource{ - Asset: Asset, - AssetDir: AssetDir, - Dir: "migrations", - } - -Then use the Exec function to upgrade your database: - - db, err := sql.Open("sqlite3", filename) - if err != nil { - // Handle errors! - } - - n, err := migrate.Exec(db, "sqlite3", migrations, migrate.Up) - if err != nil { - // Handle errors! - } - fmt.Printf("Applied %d migrations!\n", n) - -Note that n can be greater than 0 even if there is an error: any migration that succeeded will remain applied even if a later one fails. - -The full set of capabilities can be found in the API docs below. - -Writing migrations - -Migrations are defined in SQL files, which contain a set of SQL statements. Special comments are used to distinguish up and down migrations. - - -- +migrate Up - -- SQL in section 'Up' is executed when this migration is applied - CREATE TABLE people (id int); - - - -- +migrate Down - -- SQL section 'Down' is executed when this migration is rolled back - DROP TABLE people; - -You can put multiple statements in each block, as long as you end them with a semicolon (;). - -If you have complex statements which contain semicolons, use StatementBegin and StatementEnd to indicate boundaries: - - -- +migrate Up - CREATE TABLE people (id int); - - -- +migrate StatementBegin - CREATE OR REPLACE FUNCTION do_something() - returns void AS $$ - DECLARE - create_query text; - BEGIN - -- Do something here - END; - $$ - language plpgsql; - -- +migrate StatementEnd - - -- +migrate Down - DROP FUNCTION do_something(); - DROP TABLE people; - -The order in which migrations are applied is defined through the filename: sql-migrate will sort migrations based on their name. It's recommended to use an increasing version number or a timestamp as the first part of the filename. - -Embedding migrations with bindata - -If you like your Go applications self-contained (that is: a single binary): use bindata (https://github.com/jteeuwen/go-bindata) to embed the migration files. - -Just write your migration files as usual, as a set of SQL files in a folder. - -Then use bindata to generate a .go file with the migrations embedded: - - go-bindata -pkg myapp -o bindata.go db/migrations/ - -The resulting bindata.go file will contain your migrations. Remember to regenerate your bindata.go file whenever you add/modify a migration (go generate will help here, once it arrives). - -Use the AssetMigrationSource in your application to find the migrations: - - migrations := &migrate.AssetMigrationSource{ - Asset: Asset, - AssetDir: AssetDir, - Dir: "db/migrations", - } - -Both Asset and AssetDir are functions provided by bindata. - -Then proceed as usual. - -Extending - -Adding a new migration source means implementing MigrationSource. - - type MigrationSource interface { - FindMigrations() ([]*Migration, error) - } - -The resulting slice of migrations will be executed in the given order, so it should usually be sorted by the Id field. -*/ -package migrate diff --git a/vendor/github.com/rubenv/sql-migrate/migrate.go b/vendor/github.com/rubenv/sql-migrate/migrate.go deleted file mode 100644 index 4f36982..0000000 --- a/vendor/github.com/rubenv/sql-migrate/migrate.go +++ /dev/null @@ -1,475 +0,0 @@ -package migrate - -import ( - "bytes" - "database/sql" - "errors" - "fmt" - "io" - "os" - "path" - "regexp" - "sort" - "strconv" - "strings" - "time" - - "github.com/rubenv/sql-migrate/sqlparse" - "gopkg.in/gorp.v1" -) - -type MigrationDirection int - -const ( - Up MigrationDirection = iota - Down -) - -var tableName = "gorp_migrations" -var schemaName = "" -var numberPrefixRegex = regexp.MustCompile(`^(\d+).*$`) - -// Set the name of the table used to store migration info. -// -// Should be called before any other call such as (Exec, ExecMax, ...). -func SetTable(name string) { - if name != "" { - tableName = name - } -} - -// SetSchema sets the name of a schema that the migration table be referenced. -func SetSchema(name string) { - if name != "" { - schemaName = name - } -} - -func getTableName() string { - t := tableName - if schemaName != "" { - t = fmt.Sprintf("%s.%s", schemaName, t) - } - - return t -} - -type Migration struct { - Id string - Up []string - Down []string -} - -func (m Migration) Less(other *Migration) bool { - switch { - case m.isNumeric() && other.isNumeric(): - return m.VersionInt() < other.VersionInt() - case m.isNumeric() && !other.isNumeric(): - return true - case !m.isNumeric() && other.isNumeric(): - return false - default: - return m.Id < other.Id - } -} - -func (m Migration) isNumeric() bool { - return len(m.NumberPrefixMatches()) > 0 -} - -func (m Migration) NumberPrefixMatches() []string { - return numberPrefixRegex.FindStringSubmatch(m.Id) -} - -func (m Migration) VersionInt() int64 { - v := m.NumberPrefixMatches()[1] - value, err := strconv.ParseInt(v, 10, 64) - if err != nil { - panic(fmt.Sprintf("Could not parse %q into int64: %s", v, err)) - } - return value -} - -type PlannedMigration struct { - *Migration - Queries []string -} - -type byId []*Migration - -func (b byId) Len() int { return len(b) } -func (b byId) Swap(i, j int) { b[i], b[j] = b[j], b[i] } -func (b byId) Less(i, j int) bool { return b[i].Less(b[j]) } - -type MigrationRecord struct { - Id string `db:"id"` - AppliedAt time.Time `db:"applied_at"` -} - -var MigrationDialects = map[string]gorp.Dialect{ - "sqlite3": gorp.SqliteDialect{}, - "postgres": gorp.PostgresDialect{}, - "mysql": gorp.MySQLDialect{"InnoDB", "UTF8"}, - "mssql": gorp.SqlServerDialect{}, - "oci8": gorp.OracleDialect{}, -} - -type MigrationSource interface { - // Finds the migrations. - // - // The resulting slice of migrations should be sorted by Id. - FindMigrations() ([]*Migration, error) -} - -// A hardcoded set of migrations, in-memory. -type MemoryMigrationSource struct { - Migrations []*Migration -} - -var _ MigrationSource = (*MemoryMigrationSource)(nil) - -func (m MemoryMigrationSource) FindMigrations() ([]*Migration, error) { - // Make sure migrations are sorted - sort.Sort(byId(m.Migrations)) - - return m.Migrations, nil -} - -// A set of migrations loaded from a directory. -type FileMigrationSource struct { - Dir string -} - -var _ MigrationSource = (*FileMigrationSource)(nil) - -func (f FileMigrationSource) FindMigrations() ([]*Migration, error) { - migrations := make([]*Migration, 0) - - file, err := os.Open(f.Dir) - if err != nil { - return nil, err - } - - files, err := file.Readdir(0) - if err != nil { - return nil, err - } - - for _, info := range files { - if strings.HasSuffix(info.Name(), ".sql") { - file, err := os.Open(path.Join(f.Dir, info.Name())) - if err != nil { - return nil, err - } - - migration, err := ParseMigration(info.Name(), file) - if err != nil { - return nil, err - } - - migrations = append(migrations, migration) - } - } - - // Make sure migrations are sorted - sort.Sort(byId(migrations)) - - return migrations, nil -} - -// Migrations from a bindata asset set. -type AssetMigrationSource struct { - // Asset should return content of file in path if exists - Asset func(path string) ([]byte, error) - - // AssetDir should return list of files in the path - AssetDir func(path string) ([]string, error) - - // Path in the bindata to use. - Dir string -} - -var _ MigrationSource = (*AssetMigrationSource)(nil) - -func (a AssetMigrationSource) FindMigrations() ([]*Migration, error) { - migrations := make([]*Migration, 0) - - files, err := a.AssetDir(a.Dir) - if err != nil { - return nil, err - } - - for _, name := range files { - if strings.HasSuffix(name, ".sql") { - file, err := a.Asset(path.Join(a.Dir, name)) - if err != nil { - return nil, err - } - - migration, err := ParseMigration(name, bytes.NewReader(file)) - if err != nil { - return nil, err - } - - migrations = append(migrations, migration) - } - } - - // Make sure migrations are sorted - sort.Sort(byId(migrations)) - - return migrations, nil -} - -// Migration parsing -func ParseMigration(id string, r io.ReadSeeker) (*Migration, error) { - m := &Migration{ - Id: id, - } - - up, err := sqlparse.SplitSQLStatements(r, true) - if err != nil { - return nil, err - } - - down, err := sqlparse.SplitSQLStatements(r, false) - if err != nil { - return nil, err - } - - m.Up = up - m.Down = down - - return m, nil -} - -// Execute a set of migrations -// -// Returns the number of applied migrations. -func Exec(db *sql.DB, dialect string, m MigrationSource, dir MigrationDirection) (int, error) { - return ExecMax(db, dialect, m, dir, 0) -} - -// Execute a set of migrations -// -// Will apply at most `max` migrations. Pass 0 for no limit (or use Exec). -// -// Returns the number of applied migrations. -func ExecMax(db *sql.DB, dialect string, m MigrationSource, dir MigrationDirection, max int) (int, error) { - migrations, dbMap, err := PlanMigration(db, dialect, m, dir, max) - if err != nil { - return 0, err - } - - // Apply migrations - applied := 0 - for _, migration := range migrations { - trans, err := dbMap.Begin() - if err != nil { - return applied, err - } - - for _, stmt := range migration.Queries { - _, err := trans.Exec(stmt) - if err != nil { - trans.Rollback() - return applied, err - } - } - - if dir == Up { - err = trans.Insert(&MigrationRecord{ - Id: migration.Id, - AppliedAt: time.Now(), - }) - if err != nil { - return applied, err - } - } else if dir == Down { - _, err := trans.Delete(&MigrationRecord{ - Id: migration.Id, - }) - if err != nil { - return applied, err - } - } else { - panic("Not possible") - } - - err = trans.Commit() - if err != nil { - return applied, err - } - - applied++ - } - - return applied, nil -} - -// Plan a migration. -func PlanMigration(db *sql.DB, dialect string, m MigrationSource, dir MigrationDirection, max int) ([]*PlannedMigration, *gorp.DbMap, error) { - dbMap, err := getMigrationDbMap(db, dialect) - if err != nil { - return nil, nil, err - } - - migrations, err := m.FindMigrations() - if err != nil { - return nil, nil, err - } - - var migrationRecords []MigrationRecord - _, err = dbMap.Select(&migrationRecords, fmt.Sprintf("SELECT * FROM %s", getTableName())) - if err != nil { - return nil, nil, err - } - - // Sort migrations that have been run by Id. - var existingMigrations []*Migration - for _, migrationRecord := range migrationRecords { - existingMigrations = append(existingMigrations, &Migration{ - Id: migrationRecord.Id, - }) - } - sort.Sort(byId(existingMigrations)) - - // Get last migration that was run - record := &Migration{} - if len(existingMigrations) > 0 { - record = existingMigrations[len(existingMigrations)-1] - } - - result := make([]*PlannedMigration, 0) - - // Add missing migrations up to the last run migration. - // This can happen for example when merges happened. - if len(existingMigrations) > 0 { - result = append(result, ToCatchup(migrations, existingMigrations, record)...) - } - - // Figure out which migrations to apply - toApply := ToApply(migrations, record.Id, dir) - toApplyCount := len(toApply) - if max > 0 && max < toApplyCount { - toApplyCount = max - } - for _, v := range toApply[0:toApplyCount] { - - if dir == Up { - result = append(result, &PlannedMigration{ - Migration: v, - Queries: v.Up, - }) - } else if dir == Down { - result = append(result, &PlannedMigration{ - Migration: v, - Queries: v.Down, - }) - } - } - - return result, dbMap, nil -} - -// Filter a slice of migrations into ones that should be applied. -func ToApply(migrations []*Migration, current string, direction MigrationDirection) []*Migration { - var index = -1 - if current != "" { - for index < len(migrations)-1 { - index++ - if migrations[index].Id == current { - break - } - } - } - - if direction == Up { - return migrations[index+1:] - } else if direction == Down { - if index == -1 { - return []*Migration{} - } - - // Add in reverse order - toApply := make([]*Migration, index+1) - for i := 0; i < index+1; i++ { - toApply[index-i] = migrations[i] - } - return toApply - } - - panic("Not possible") -} - -func ToCatchup(migrations, existingMigrations []*Migration, lastRun *Migration) []*PlannedMigration { - missing := make([]*PlannedMigration, 0) - for _, migration := range migrations { - found := false - for _, existing := range existingMigrations { - if existing.Id == migration.Id { - found = true - break - } - } - if !found && migration.Less(lastRun) { - missing = append(missing, &PlannedMigration{Migration: migration, Queries: migration.Up}) - } - } - return missing -} - -func GetMigrationRecords(db *sql.DB, dialect string) ([]*MigrationRecord, error) { - dbMap, err := getMigrationDbMap(db, dialect) - if err != nil { - return nil, err - } - - var records []*MigrationRecord - query := fmt.Sprintf("SELECT * FROM %s ORDER BY id ASC", getTableName()) - _, err = dbMap.Select(&records, query) - if err != nil { - return nil, err - } - - return records, nil -} - -func getMigrationDbMap(db *sql.DB, dialect string) (*gorp.DbMap, error) { - d, ok := MigrationDialects[dialect] - if !ok { - return nil, fmt.Errorf("Unknown dialect: %s", dialect) - } - - // When using the mysql driver, make sure that the parseTime option is - // configured, otherwise it won't map time columns to time.Time. See - // https://github.com/rubenv/sql-migrate/issues/2 - if dialect == "mysql" { - var out *time.Time - err := db.QueryRow("SELECT NOW()").Scan(&out) - if err != nil { - if err.Error() == "sql: Scan error on column index 0: unsupported driver -> Scan pair: []uint8 -> *time.Time" { - return nil, errors.New(`Cannot parse dates. - -Make sure that the parseTime option is supplied to your database connection. -Check https://github.com/go-sql-driver/mysql#parsetime for more info.`) - } else { - return nil, err - } - } - } - - // Create migration database map - dbMap := &gorp.DbMap{Db: db, Dialect: d} - dbMap.AddTableWithNameAndSchema(MigrationRecord{}, schemaName, tableName).SetKeys(false, "Id") - //dbMap.TraceOn("", log.New(os.Stdout, "migrate: ", log.Lmicroseconds)) - - err := dbMap.CreateTablesIfNotExists() - if err != nil { - return nil, err - } - - return dbMap, nil -} - -// TODO: Run migration + record insert in transaction. diff --git a/vendor/github.com/rubenv/sql-migrate/sqlparse/README.md b/vendor/github.com/rubenv/sql-migrate/sqlparse/README.md deleted file mode 100644 index a41706a..0000000 --- a/vendor/github.com/rubenv/sql-migrate/sqlparse/README.md +++ /dev/null @@ -1,28 +0,0 @@ -# SQL migration parser - -Based on the [goose](https://bitbucket.org/liamstask/goose) migration parser. - -## License - - (The MIT License) - - Copyright (C) 2014 by Ruben Vermeersch - Copyright (C) 2012-2014 by Liam Staskawicz - - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in - all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - THE SOFTWARE. diff --git a/vendor/github.com/rubenv/sql-migrate/sqlparse/sqlparse.go b/vendor/github.com/rubenv/sql-migrate/sqlparse/sqlparse.go deleted file mode 100644 index df95011..0000000 --- a/vendor/github.com/rubenv/sql-migrate/sqlparse/sqlparse.go +++ /dev/null @@ -1,128 +0,0 @@ -package sqlparse - -import ( - "bufio" - "bytes" - "errors" - "io" - - "strings" -) - -const sqlCmdPrefix = "-- +migrate " - -// Checks the line to see if the line has a statement-ending semicolon -// or if the line contains a double-dash comment. -func endsWithSemicolon(line string) bool { - - prev := "" - scanner := bufio.NewScanner(strings.NewReader(line)) - scanner.Split(bufio.ScanWords) - - for scanner.Scan() { - word := scanner.Text() - if strings.HasPrefix(word, "--") { - break - } - prev = word - } - - return strings.HasSuffix(prev, ";") -} - -// Split the given sql script into individual statements. -// -// The base case is to simply split on semicolons, as these -// naturally terminate a statement. -// -// However, more complex cases like pl/pgsql can have semicolons -// within a statement. For these cases, we provide the explicit annotations -// 'StatementBegin' and 'StatementEnd' to allow the script to -// tell us to ignore semicolons. -func SplitSQLStatements(r io.ReadSeeker, direction bool) ([]string, error) { - _, err := r.Seek(0, 0) - if err != nil { - return nil, err - } - - var buf bytes.Buffer - scanner := bufio.NewScanner(r) - - // track the count of each section - // so we can diagnose scripts with no annotations - upSections := 0 - downSections := 0 - - statementEnded := false - ignoreSemicolons := false - directionIsActive := false - - stmts := make([]string, 0) - - for scanner.Scan() { - - line := scanner.Text() - - // handle any migrate-specific commands - if strings.HasPrefix(line, sqlCmdPrefix) { - cmd := strings.TrimSpace(line[len(sqlCmdPrefix):]) - switch cmd { - case "Up": - directionIsActive = (direction == true) - upSections++ - break - - case "Down": - directionIsActive = (direction == false) - downSections++ - break - - case "StatementBegin": - if directionIsActive { - ignoreSemicolons = true - } - break - - case "StatementEnd": - if directionIsActive { - statementEnded = (ignoreSemicolons == true) - ignoreSemicolons = false - } - break - } - } - - if !directionIsActive { - continue - } - - if _, err := buf.WriteString(line + "\n"); err != nil { - return nil, err - } - - // Wrap up the two supported cases: 1) basic with semicolon; 2) psql statement - // Lines that end with semicolon that are in a statement block - // do not conclude statement. - if (!ignoreSemicolons && endsWithSemicolon(line)) || statementEnded { - statementEnded = false - stmts = append(stmts, buf.String()) - buf.Reset() - } - } - - if err := scanner.Err(); err != nil { - return nil, err - } - - // diagnose likely migration script errors - if ignoreSemicolons { - return nil, errors.New("ERROR: saw '-- +migrate StatementBegin' with no matching '-- +migrate StatementEnd'") - } - - if upSections == 0 && downSections == 0 { - return nil, errors.New(`ERROR: no Up/Down annotations found, so no statements were executed. - See https://github.com/rubenv/sql-migrate for details.`) - } - - return stmts, nil -} diff --git a/vendor/github.com/russross/meddler/README.md b/vendor/github.com/russross/meddler/README.md deleted file mode 100644 index a873045..0000000 --- a/vendor/github.com/russross/meddler/README.md +++ /dev/null @@ -1,352 +0,0 @@ -Meddler -======= - -Meddler is a small toolkit to take some of the tedium out of moving data -back and forth between sql queries and structs. - -It is not a complete ORM. It is intended to be lightweight way to add some -of the convenience of an ORM while leaving more control in the hands of the -programmer. - -Package docs are available at: - -* http://godoc.org/github.com/russross/meddler - -The package is housed on github, and the README there has more info: - -* http://github.com/russross/meddler - -This is currently configured for SQLite, MySQL, and PostgreSQL, but it -can be configured for use with other databases. If you use it -successfully with a different database, please contact me and I will -add it to the list of pre-configured databases. - -### DANGER - -Meddler is still a work in progress, and additional -backward-incompatible changes to the API are likely. The most recent -change added support for multiple database types and made it easier -to switch between them. This is most likely to affect the way you -initialize the library to work with your database (see the install -section below). - -Another recent update is the change to int64 for primary keys. This -matches the convention used in database/sql, and is more portable, -but it may require some minor changes to existing code. - - -Install -------- - -The usual `go get` command will put it in your `$GOPATH`: - - go get github.com/russross/meddler - -If you are only using one type of database, you should set Default -to match your database type, e.g.: - - meddler.Default = meddler.PostgreSQL - -The default database is MySQL, so you should change it for anything -else. To use multiple databases within a single project, or to use a -database other than MySQL, PostgreSQL, or SQLite, see below. - -Note: If you are using MySQL with the `github.com/go-sql-driver/mysql` -driver, you must set "parseTime=true" in the sql.Open call or the -time conversion meddlers will not work. - - -Why? ----- - -These are the features that set meddler apart from similar -libraries: - -* It uses standard database/sql types, and does not require - special fields in your structs. This lets you use meddler - selectively, without having to alter other database code already - in your project. After creating meddler, I incorporated it into - an existing project, and I was able to convert the code one - struct and one query at a time. -* It leaves query writing to you. It has convenience functions for - simple INSERT/UPDATE/SELECT queries by integer primary key, but - beyond that it stays out of query writing. -* It supports on-the-fly data transformations. If you have a map - or a slice in your struct, you can instruct meddler to - encode/decode using JSON or Gob automatically. If you have time - fields, you can have meddler automatically write them into the - database as UTC, and convert them to the local time zone on - reads. These processors are called “meddlers”, because they - meddle with the data instead of passing it through directly. -* NULL fields in the database can be read as zero values in the - struct, and zero values in the struct can be written as NULL - values. This is not always the right thing to do, but it is - often good enough and is much simpler than most alternatives. -* It exposes low-level hooks for more complex situations. If you - are writing a query that does not map well to the main helper - functions, you can still get some help by using the lower-level - functions to build your own helpers. - - -High-level functions --------------------- - -Meddler does not create or alter tables. It just provides a little -glue to make it easier to read and write structs as SQL rows. Start -by annotating a struct: - -``` go -type Person struct { - ID int `meddler:"id,pk"` - Name string `meddler:"name"` - Age int - salary int - Created time.Time `meddler:"created,localtime"` - Closed time.Time `meddler:",localtimez"` -} -``` - -Notes about this example: - -* If the optional tag is provided, the first field is the database - column name. Note that "Closed" does not provide a column name, - so it will default to "Closed". Likewise, if there is no tag, - the field name will be used. -* ID is marked as the primary key. Currently only integer primary - keys are supported. This is only relevant to Load, Save, Insert, - and Update, a few of the higher-level functions that need to - understand primary keys. Meddler assumes that pk fields have an - autoincrement mechanism set in the database. -* Age has a column name of "Age". A tag is only necessary when the - column name is not the same as the field name, or when you need - to select other options. -* salary is not an exported field, so meddler does not see it. It - will be ignored. -* Created is marked with "localtime". This means that it will be - converted to UTC when being saved, and back to the local time - zone when being loaded. -* Closed has a column name of "Closed", since the tag did not - specify anything different. Closed is marked as "localtimez". - This has the same properties as "localtime", except that the - zero time will be saved in the database as a null column (and - null values will be loaded as the zero time value). - -Meddler provides a few high-level functions (note: DB is an -interface that works with a *sql.DB or a *sql.Tx): - -* Load(db DB, table string, dst interface{}, pk int64) error - - This loads a single record by its primary key. For example: - - elt := new(Person) - err := meddler.Load(db, "person", elt, 15) - - db can be a *sql.DB or a *sql.Tx. The table is the name of the - table, pk is the primary key value, and dst is a pointer to the - struct where it should be stored. - - Note: this call requires that the struct have an integer primary - key field marked. - -* Insert(db DB, table string, src interface{}) error - - This inserts a new row into the database. If the struct value - has a primary key field, it must be zero (and will be omitted - from the insert statement, prompting a default autoincrement - value). - - elt := &Person{ - Name: "Alice", - Age: 22, - // ... - } - err := meddler.Insert(db, "person", elt) - // elt.ID is updated to the value assigned by the database - -* Update(db DB, table string, src interface{}) error - - This updates an existing row. It must have a primary key, which - must be non-zero. - - Note: this call requires that the struct have an integer primary - key field marked. - -* Save(db DB, table string, src interface{}) error - - Pick Insert or Update automatically. If there is a non-zero - primary key present, it uses Update, otherwise it uses Insert. - - Note: this call requires that the struct have an integer primary - key field marked. - -* QueryRow(db DB, dst interface{}, query string, args ...interface) error - - Perform the given query, and scan the single-row result into - dst, which must be a pointer to a struct. - - For example: - - elt := new(Person) - err := meddler.QueryRow(db, elt, "select * from person where name = ?", "bob") - -* QueryAll(db DB, dst interface{}, query string, args ...interface) error - - Perform the given query, and scan the results into dst, which - must be a pointer to a slice of struct pointers. - - For example: - - var people []*Person - err := meddler.QueryAll(db, &people, "select * from person") - -* Scan(rows *sql.Rows, dst interface{}) error - - Scans a single row of data into a struct, complete with - meddling. Can be called repeatedly to walk through all of the - rows in a result set. Returns sql.ErrNoRows when there is no - more data. - -* ScanRow(rows *sql.Rows, dst interface{}) error - - Similar to Scan, but guarantees that the rows object - is closed when it returns. Also returns sql.ErrNoRows if there - was no row. - -* ScanAll(rows *sql.Rows, dst interface{}) error - - Expects a pointer to a slice of structs/pointers to structs, and - appends as many elements as it finds in the row set. Closes the - row set when it is finished. Does not return sql.ErrNoRows on an - empty set; instead it just does not add anything to the slice. - -Note: all of these functions can also be used as methods on Database -objects. When used as package functions, they use the Default -Database object, which is MySQL unless you change it. - - -Meddlers --------- - -A meddler is a handler that gets to meddle with a field before it is -saved, or when it is loaded. "localtime" and "localtimez" are -examples of built-in meddlers. The full list of built-in meddlers -includes: - -* identity: the default meddler, which does not do anything - -* localtime: for time.Time and *time.Time fields. Converts the - value to UTC on save, and back to the local time zone on loads. - To set your local time zone, make sure the TZ environment - variable is set when your program is launched, or use something - like: - - os.Setenv("TZ", "America/Denver") - - in your initial setup, before you start using time functions. - -* localtimez: same, but only for time.Time, and treats the zero - time as a null field (converts both ways) - -* utctime: similar to localtime, but keeps the value in UTC on - loads. This ensures that the time is always coverted to UTC on - save, which is the sane way to save time values in a database. - -* utctimez: same, but with zero time means null. - -* zeroisnull: for other types where a zero value should be - inserted as null, and null values should be read as zero values. - Works for integer, unsigned integer, float, complex number, and - string types. Note: not for pointer types. - -* json: marshals the field value into JSON when saving, and - unmarshals on load. - -* jsongzip: same, but compresses using gzip on save, and - uncompresses on load - -* gob: encodes the field value using Gob when saving, and - decodes on load. - -* gobgzip: same, but compresses using gzip on save, and - uncompresses on load - -You can implement custom meddlers as well by implementing the -Meddler interface. See the existing implementations in medder.go for -examples. - - -Working with different database types -------------------------------------- - -Meddler can work with multiple database types simultaneously. -Database-specific parameters are stored in a Database struct, and -structs are pre-defined for MySQL, PostgreSQL, and SQLite. - -Instead of relying on the package-level functions, use the method -form on the appropriate database type, e.g.: - - err = meddler.PostgreSQL.Load(...) - -instead of - - err = meddler.Load(...) - -Or to save typing, define your own abbreviated name for each -database: - - ms := meddler.MySQL - pg := meddler.PostgreSQL - err = ms.Load(...) - err = pg.QueryAll(...) - -If you need a different database, create your own Database instance -with the appropriate parameters set. If everything works okay, -please contact me with the parameters you used so I can add the new -database to the pre-defined list. - - -Lower-level functions ---------------------- - -If you are using more complex queries and just want to reduce the -tedium of reading and writing values, there are some lower-level -helper functions as well. See the package docs for details, and -see the implementations of the higher-level functions to see how -they are used. - - -License -------- - -Meddler is distributed under the BSD 2-Clause License. If this -license prevents you from using Meddler in your project, please -contact me and I will consider adding an additional license that is -better suited to your needs. - -> Copyright © 2013 Russ Ross. -> All rights reserved. -> -> Redistribution and use in source and binary forms, with or without -> modification, are permitted provided that the following conditions -> are met: -> -> 1. Redistributions of source code must retain the above copyright -> notice, this list of conditions and the following disclaimer. -> -> 2. Redistributions in binary form must reproduce the above -> copyright notice, this list of conditions and the following -> disclaimer in the documentation and/or other materials provided with -> the distribution. -> -> THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -> "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -> LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -> FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -> COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -> INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -> BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -> LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -> CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -> LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN -> ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -> POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/russross/meddler/doc.go b/vendor/github.com/russross/meddler/doc.go deleted file mode 100644 index eb2738c..0000000 --- a/vendor/github.com/russross/meddler/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -/* -Meddler is a small toolkit to take some of the tedium out of moving data -back and forth between sql queries and structs. - -It is not a complete ORM. It is intended to be lightweight way to add some -of the convenience of an ORM while leaving more control in the hands of the -programmer. - -Package docs are available at: - - http://godoc.org/github.com/russross/meddler - -The package is housed on github, and the README there has more info: - - http://github.com/russross/meddler - -*/ -package meddler diff --git a/vendor/github.com/russross/meddler/loadsave.go b/vendor/github.com/russross/meddler/loadsave.go deleted file mode 100644 index 27ed1a5..0000000 --- a/vendor/github.com/russross/meddler/loadsave.go +++ /dev/null @@ -1,228 +0,0 @@ -package meddler - -import ( - "database/sql" - "fmt" - "strings" -) - -// DB is a generic database interface, matching both *sql.Db and *sql.Tx -type DB interface { - Exec(query string, args ...interface{}) (sql.Result, error) - Query(query string, args ...interface{}) (*sql.Rows, error) - QueryRow(query string, args ...interface{}) *sql.Row -} - -// Load loads a record using a query for the primary key field. -// Returns sql.ErrNoRows if not found. -func (d *Database) Load(db DB, table string, dst interface{}, pk int64) error { - columns, err := d.ColumnsQuoted(dst, true) - if err != nil { - return err - } - - // make sure we have a primary key field - pkName, _, err := d.PrimaryKey(dst) - if err != nil { - return err - } - if pkName == "" { - return fmt.Errorf("meddler.Load: no primary key field found") - } - - // run the query - q := fmt.Sprintf("SELECT %s FROM %s WHERE %s = %s", columns, d.quoted(table), d.quoted(pkName), d.Placeholder) - - rows, err := db.Query(q, pk) - if err != nil { - return fmt.Errorf("meddler.Load: DB error in Query: %v", err) - } - - // scan the row - return d.ScanRow(rows, dst) -} - -// Load using the Default Database type -func Load(db DB, table string, dst interface{}, pk int64) error { - return Default.Load(db, table, dst, pk) -} - -// Insert performs an INSERT query for the given record. -// If the record has a primary key flagged, it must be zero, and it -// will be set to the newly-allocated primary key value from the database -// as returned by LastInsertId. -func (d *Database) Insert(db DB, table string, src interface{}) error { - pkName, pkValue, err := d.PrimaryKey(src) - if err != nil { - return err - } - if pkName != "" && pkValue != 0 { - return fmt.Errorf("meddler.Insert: primary key must be zero") - } - - // gather the query parts - namesPart, err := d.ColumnsQuoted(src, false) - if err != nil { - return err - } - valuesPart, err := d.PlaceholdersString(src, false) - if err != nil { - return err - } - values, err := d.Values(src, false) - if err != nil { - return err - } - - // run the query - q := fmt.Sprintf("INSERT INTO %s (%s) VALUES (%s)", d.quoted(table), namesPart, valuesPart) - if d.UseReturningToGetID && pkName != "" { - q += " RETURNING " + d.quoted(pkName) - var newPk int64 - err := db.QueryRow(q, values...).Scan(&newPk) - if err != nil { - return fmt.Errorf("meddler.Insert: DB error in QueryRow: %v", err) - } - if err = d.SetPrimaryKey(src, newPk); err != nil { - return fmt.Errorf("meddler.Insert: Error saving updated pk: %v", err) - } - } else if pkName != "" { - result, err := db.Exec(q, values...) - if err != nil { - return fmt.Errorf("meddler.Insert: DB error in Exec: %v", err) - } - - // save the new primary key - newPk, err := result.LastInsertId() - if err != nil { - return fmt.Errorf("meddler.Insert: DB error getting new primary key value: %v", err) - } - if err = d.SetPrimaryKey(src, newPk); err != nil { - return fmt.Errorf("meddler.Insert: Error saving updated pk: %v", err) - } - } else { - // no primary key, so no need to lookup new value - _, err := db.Exec(q, values...) - if err != nil { - return fmt.Errorf("meddler.Insert: DB error in Exec: %v", err) - } - } - - return nil -} - -// Insert using the Default Database type -func Insert(db DB, table string, src interface{}) error { - return Default.Insert(db, table, src) -} - -// Update performs and UPDATE query for the given record. -// The record must have an integer primary key field that is non-zero, -// and it will be used to select the database row that gets updated. -func (d *Database) Update(db DB, table string, src interface{}) error { - // gather the query parts - names, err := d.Columns(src, false) - if err != nil { - return err - } - placeholders, err := d.Placeholders(src, false) - if err != nil { - return err - } - values, err := d.Values(src, false) - if err != nil { - return err - } - - // form the column=placeholder pairs - var pairs []string - for i := 0; i < len(names) && i < len(placeholders); i++ { - pair := fmt.Sprintf("%s=%s", d.quoted(names[i]), placeholders[i]) - pairs = append(pairs, pair) - } - - pkName, pkValue, err := d.PrimaryKey(src) - if err != nil { - return err - } - if pkName == "" { - return fmt.Errorf("meddler.Update: no primary key field") - } - if pkValue < 1 { - return fmt.Errorf("meddler.Update: primary key must be an integer > 0") - } - ph := d.placeholder(len(placeholders) + 1) - - // run the query - q := fmt.Sprintf("UPDATE %s SET %s WHERE %s=%s", d.quoted(table), - strings.Join(pairs, ","), - d.quoted(pkName), ph) - values = append(values, pkValue) - - if _, err := db.Exec(q, values...); err != nil { - return fmt.Errorf("meddler.Update: DB error in Exec: %v", err) - } - - return nil -} - -// Update using the Default Database type -func Update(db DB, table string, src interface{}) error { - return Default.Update(db, table, src) -} - -// Save performs an INSERT or an UPDATE, depending on whether or not -// a primary keys exists and is non-zero. -func (d *Database) Save(db DB, table string, src interface{}) error { - pkName, pkValue, err := d.PrimaryKey(src) - if err != nil { - return err - } - if pkName != "" && pkValue != 0 { - return d.Update(db, table, src) - } else { - return d.Insert(db, table, src) - } -} - -// Save using the Default Database type -func Save(db DB, table string, src interface{}) error { - return Default.Save(db, table, src) -} - -// QueryOne performs the given query with the given arguments, scanning a -// single row of results into dst. Returns sql.ErrNoRows if there was no -// result row. -func (d *Database) QueryRow(db DB, dst interface{}, query string, args ...interface{}) error { - // perform the query - rows, err := db.Query(query, args...) - if err != nil { - return err - } - - // gather the result - return d.ScanRow(rows, dst) -} - -// QueryRow using the Default Database type -func QueryRow(db DB, dst interface{}, query string, args ...interface{}) error { - return Default.QueryRow(db, dst, query, args...) -} - -// QueryAll performs the given query with the given arguments, scanning -// all results rows into dst. -func (d *Database) QueryAll(db DB, dst interface{}, query string, args ...interface{}) error { - // perform the query - rows, err := db.Query(query, args...) - if err != nil { - return err - } - - // gather the results - return d.ScanAll(rows, dst) -} - -// QueryAll using the Default Database type -func QueryAll(db DB, dst interface{}, query string, args ...interface{}) error { - return Default.QueryAll(db, dst, query, args...) -} diff --git a/vendor/github.com/russross/meddler/meddler.go b/vendor/github.com/russross/meddler/meddler.go deleted file mode 100644 index ad41572..0000000 --- a/vendor/github.com/russross/meddler/meddler.go +++ /dev/null @@ -1,351 +0,0 @@ -package meddler - -import ( - "bytes" - "compress/gzip" - "encoding/gob" - "encoding/json" - "fmt" - "reflect" - "time" -) - -// Meddler is the interface for a field meddler. Implementations can be -// registered to convert struct fields being loaded and saved in the database. -type Meddler interface { - // PreRead is called before a Scan operation. It is given a pointer to - // the raw struct field, and returns the value that will be given to - // the database driver. - PreRead(fieldAddr interface{}) (scanTarget interface{}, err error) - - // PostRead is called after a Scan operation. It is given the value returned - // by PreRead and a pointer to the raw struct field. It is expected to fill - // in the struct field if the two are different. - PostRead(fieldAddr interface{}, scanTarget interface{}) error - - // PreWrite is called before an Insert or Update operation. It is given - // a pointer to the raw struct field, and returns the value that will be - // given to the database driver. - PreWrite(field interface{}) (saveValue interface{}, err error) -} - -// Register sets up a meddler type. Meddlers get a chance to meddle with the -// data being loaded or saved when a field is annotated with the name of the meddler. -// The registry is global. -func Register(name string, m Meddler) { - if name == "pk" { - panic("meddler.Register: pk cannot be used as a meddler name") - } - registry[name] = m -} - -var registry = make(map[string]Meddler) - -func init() { - Register("identity", IdentityMeddler(false)) - Register("localtime", TimeMeddler{ZeroIsNull: false, Local: true}) - Register("localtimez", TimeMeddler{ZeroIsNull: true, Local: true}) - Register("utctime", TimeMeddler{ZeroIsNull: false, Local: false}) - Register("utctimez", TimeMeddler{ZeroIsNull: true, Local: false}) - Register("zeroisnull", ZeroIsNullMeddler(false)) - Register("json", JSONMeddler(false)) - Register("jsongzip", JSONMeddler(true)) - Register("gob", GobMeddler(false)) - Register("gobgzip", GobMeddler(true)) -} - -// IdentityMeddler is the default meddler, and it passes the original value through with -// no changes. -type IdentityMeddler bool - -func (elt IdentityMeddler) PreRead(fieldAddr interface{}) (scanTarget interface{}, err error) { - return fieldAddr, nil -} - -func (elt IdentityMeddler) PostRead(fieldAddr, scanTarget interface{}) error { - return nil -} - -func (elt IdentityMeddler) PreWrite(field interface{}) (saveValue interface{}, err error) { - return field, nil -} - -// TimeMeddler provides useful operations on time.Time fields. It can convert the zero time -// to and from a null column, and it can convert the time zone to UTC on save and to Local on load. -type TimeMeddler struct { - ZeroIsNull bool - Local bool -} - -func (elt TimeMeddler) PreRead(fieldAddr interface{}) (scanTarget interface{}, err error) { - switch tgt := fieldAddr.(type) { - case *time.Time: - if elt.ZeroIsNull { - return &tgt, nil - } - return fieldAddr, nil - case **time.Time: - if elt.ZeroIsNull { - return nil, fmt.Errorf("meddler.TimeMeddler cannot be used on a *time.Time field, only time.Time") - } - return fieldAddr, nil - default: - return nil, fmt.Errorf("meddler.TimeMeddler.PreRead: unknown struct field type: %T", fieldAddr) - } -} - -func (elt TimeMeddler) PostRead(fieldAddr, scanTarget interface{}) error { - switch tgt := fieldAddr.(type) { - case *time.Time: - if elt.ZeroIsNull { - src := scanTarget.(**time.Time) - if *src == nil { - *tgt = time.Time{} - } else if elt.Local { - *tgt = (*src).Local() - } else { - *tgt = (*src).UTC() - } - return nil - } - - src := scanTarget.(*time.Time) - if elt.Local { - *tgt = src.Local() - } else { - *tgt = src.UTC() - } - - return nil - - case **time.Time: - if elt.ZeroIsNull { - return fmt.Errorf("meddler TimeMeddler cannot be used on a *time.Time field, only time.Time") - } - src := scanTarget.(**time.Time) - if *src == nil { - *tgt = nil - } else if elt.Local { - **src = (*src).Local() - *tgt = *src - } else { - **src = (*src).UTC() - *tgt = *src - } - - return nil - - default: - return fmt.Errorf("meddler.TimeMeddler.PostRead: unknown struct field type: %T", fieldAddr) - } -} - -func (elt TimeMeddler) PreWrite(field interface{}) (saveValue interface{}, err error) { - switch tgt := field.(type) { - case time.Time: - if elt.ZeroIsNull && tgt.IsZero() { - return nil, nil - } - return tgt.UTC(), nil - - case *time.Time: - if tgt == nil || elt.ZeroIsNull && tgt.IsZero() { - return nil, nil - } - return tgt.UTC(), nil - - default: - return nil, fmt.Errorf("meddler.TimeMeddler.PreWrite: unknown struct field type: %T", field) - } -} - -// ZeroIsNullMeddler converts zero value fields (integers both signed and unsigned, floats, complex numbers, -// and strings) to and from null database columns. -type ZeroIsNullMeddler bool - -func (elt ZeroIsNullMeddler) PreRead(fieldAddr interface{}) (scanTarget interface{}, err error) { - // create a pointer to this element - // the database driver will set it to nil if the column value is null - return reflect.New(reflect.TypeOf(fieldAddr)).Interface(), nil -} - -func (elt ZeroIsNullMeddler) PostRead(fieldAddr, scanTarget interface{}) error { - sv := reflect.ValueOf(scanTarget) - fv := reflect.ValueOf(fieldAddr) - if sv.Elem().IsNil() { - // null column, so set target to be zero value - fv.Elem().Set(reflect.Zero(fv.Elem().Type())) - } else { - // copy the value that scan found - fv.Elem().Set(sv.Elem().Elem()) - } - return nil -} - -func (elt ZeroIsNullMeddler) PreWrite(field interface{}) (saveValue interface{}, err error) { - val := reflect.ValueOf(field) - switch val.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - if val.Int() == 0 { - return nil, nil - } - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - if val.Uint() == 0 { - return nil, nil - } - case reflect.Float32, reflect.Float64: - if val.Float() == 0 { - return nil, nil - } - case reflect.Complex64, reflect.Complex128: - if val.Complex() == 0 { - return nil, nil - } - case reflect.String: - if val.String() == "" { - return nil, nil - } - case reflect.Bool: - if !val.Bool() { - return nil, nil - } - default: - return nil, fmt.Errorf("ZeroIsNullMeddler.PreWrite: unknown struct field type: %T", field) - } - - return field, nil -} - -type JSONMeddler bool - -func (zip JSONMeddler) PreRead(fieldAddr interface{}) (scanTarget interface{}, err error) { - // give a pointer to a byte buffer to grab the raw data - return new([]byte), nil -} - -func (zip JSONMeddler) PostRead(fieldAddr, scanTarget interface{}) error { - ptr := scanTarget.(*[]byte) - if ptr == nil { - return fmt.Errorf("JSONMeddler.PostRead: nil pointer") - } - raw := *ptr - - if zip { - // un-gzip and decode json - gzipReader, err := gzip.NewReader(bytes.NewReader(raw)) - if err != nil { - return fmt.Errorf("Error creating gzip Reader: %v", err) - } - defer gzipReader.Close() - jsonDecoder := json.NewDecoder(gzipReader) - if err := jsonDecoder.Decode(fieldAddr); err != nil { - return fmt.Errorf("JSON decoder/gzip error: %v", err) - } - if err := gzipReader.Close(); err != nil { - return fmt.Errorf("Closing gzip reader: %v", err) - } - - return nil - } - - // decode json - jsonDecoder := json.NewDecoder(bytes.NewReader(raw)) - if err := jsonDecoder.Decode(fieldAddr); err != nil { - return fmt.Errorf("JSON decode error: %v", err) - } - - return nil -} - -func (zip JSONMeddler) PreWrite(field interface{}) (saveValue interface{}, err error) { - buffer := new(bytes.Buffer) - - if zip { - // json encode and gzip - gzipWriter := gzip.NewWriter(buffer) - defer gzipWriter.Close() - jsonEncoder := json.NewEncoder(gzipWriter) - if err := jsonEncoder.Encode(field); err != nil { - return nil, fmt.Errorf("JSON encoding/gzip error: %v", err) - } - if err := gzipWriter.Close(); err != nil { - return nil, fmt.Errorf("Closing gzip writer: %v", err) - } - - return buffer.Bytes(), nil - } - - // json encode - jsonEncoder := json.NewEncoder(buffer) - if err := jsonEncoder.Encode(field); err != nil { - return nil, fmt.Errorf("JSON encoding error: %v", err) - } - return buffer.Bytes(), nil -} - -type GobMeddler bool - -func (zip GobMeddler) PreRead(fieldAddr interface{}) (scanTarget interface{}, err error) { - // give a pointer to a byte buffer to grab the raw data - return new([]byte), nil -} - -func (zip GobMeddler) PostRead(fieldAddr, scanTarget interface{}) error { - ptr := scanTarget.(*[]byte) - if ptr == nil { - return fmt.Errorf("GobMeddler.PostRead: nil pointer") - } - raw := *ptr - - if zip { - // un-gzip and decode gob - gzipReader, err := gzip.NewReader(bytes.NewReader(raw)) - if err != nil { - return fmt.Errorf("Error creating gzip Reader: %v", err) - } - defer gzipReader.Close() - gobDecoder := gob.NewDecoder(gzipReader) - if err := gobDecoder.Decode(fieldAddr); err != nil { - return fmt.Errorf("Gob decoder/gzip error: %v", err) - } - if err := gzipReader.Close(); err != nil { - return fmt.Errorf("Closing gzip reader: %v", err) - } - - return nil - } - - // decode gob - gobDecoder := gob.NewDecoder(bytes.NewReader(raw)) - if err := gobDecoder.Decode(fieldAddr); err != nil { - return fmt.Errorf("Gob decode error: %v", err) - } - - return nil -} - -func (zip GobMeddler) PreWrite(field interface{}) (saveValue interface{}, err error) { - buffer := new(bytes.Buffer) - - if zip { - // gob encode and gzip - gzipWriter := gzip.NewWriter(buffer) - defer gzipWriter.Close() - gobEncoder := gob.NewEncoder(gzipWriter) - if err := gobEncoder.Encode(field); err != nil { - return nil, fmt.Errorf("Gob encoding/gzip error: %v", err) - } - if err := gzipWriter.Close(); err != nil { - return nil, fmt.Errorf("Closing gzip writer: %v", err) - } - - return buffer.Bytes(), nil - } - - // gob encode - gobEncoder := gob.NewEncoder(buffer) - if err := gobEncoder.Encode(field); err != nil { - return nil, fmt.Errorf("Gob encoding error: %v", err) - } - return buffer.Bytes(), nil -} diff --git a/vendor/github.com/russross/meddler/scan.go b/vendor/github.com/russross/meddler/scan.go deleted file mode 100644 index 5e1242e..0000000 --- a/vendor/github.com/russross/meddler/scan.go +++ /dev/null @@ -1,576 +0,0 @@ -package meddler - -import ( - "database/sql" - "fmt" - "log" - "reflect" - "strconv" - "strings" - "sync" -) - -// the name of our struct tag -const tagName = "meddler" - -// Database contains database-specific options. -// MySQL, PostgreSQL, and SQLite are provided for convenience. -// Setting Default to any of these lets you use the package-level convenience functions. -type Database struct { - Quote string // the quote character for table and column names - Placeholder string // the placeholder style to use in generated queries - UseReturningToGetID bool // use PostgreSQL-style RETURNING "ID" instead of calling sql.Result.LastInsertID -} - -var MySQL = &Database{ - Quote: "`", - Placeholder: "?", - UseReturningToGetID: false, -} - -var PostgreSQL = &Database{ - Quote: `"`, - Placeholder: "$1", - UseReturningToGetID: true, -} - -var SQLite = &Database{ - Quote: `"`, - Placeholder: "?", - UseReturningToGetID: false, -} - -var Default = MySQL - -func (d *Database) quoted(s string) string { - return d.Quote + s + d.Quote -} - -func (d *Database) placeholder(n int) string { - return strings.Replace(d.Placeholder, "1", strconv.FormatInt(int64(n), 10), 1) -} - -// Debug enables debug mode, where unused columns and struct fields will be logged -var Debug = true - -type structField struct { - column string - index int - primaryKey bool - meddler Meddler -} - -type structData struct { - columns []string - fields map[string]*structField - pk string -} - -// cache reflection data -var fieldsCache = make(map[reflect.Type]*structData) -var fieldsCacheMutex sync.Mutex - -// getFields gathers the list of columns from a struct using reflection. -func getFields(dstType reflect.Type) (*structData, error) { - fieldsCacheMutex.Lock() - defer fieldsCacheMutex.Unlock() - - if result, present := fieldsCache[dstType]; present { - return result, nil - } - - // make sure dst is a non-nil pointer to a struct - if dstType.Kind() != reflect.Ptr { - return nil, fmt.Errorf("meddler called with non-pointer destination %v", dstType) - } - structType := dstType.Elem() - if structType.Kind() != reflect.Struct { - return nil, fmt.Errorf("meddler called with pointer to non-struct %v", dstType) - } - - // gather the list of fields in the struct - data := new(structData) - data.fields = make(map[string]*structField) - - for i := 0; i < structType.NumField(); i++ { - f := structType.Field(i) - - // skip non-exported fields - if f.PkgPath != "" { - continue - } - - // examine the tag for metadata - tag := strings.Split(f.Tag.Get(tagName), ",") - - // was this field marked for skipping? - if len(tag) > 0 && tag[0] == "-" { - continue - } - - // default to the field name - name := f.Name - - // the tag can override the field name - if len(tag) > 0 && tag[0] != "" { - name = tag[0] - } - - // check for a meddler - var meddler Meddler = registry["identity"] - for j := 1; j < len(tag); j++ { - if tag[j] == "pk" { - if f.Type.Kind() == reflect.Ptr { - return nil, fmt.Errorf("meddler found field %s which is marked as the primary key but is a pointer", f.Name) - } - - // make sure it is an int of some kind - switch f.Type.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - default: - return nil, fmt.Errorf("meddler found field %s which is marked as the primary key, but is not an integer type", f.Name) - } - - if data.pk != "" { - return nil, fmt.Errorf("meddler found field %s which is marked as the primary key, but a primary key field was already found", f.Name) - } - data.pk = name - } else if m, present := registry[tag[j]]; present { - meddler = m - } else { - return nil, fmt.Errorf("meddler found field %s with meddler %s, but that meddler is not registered", f.Name, tag[j]) - } - } - - if _, present := data.fields[name]; present { - return nil, fmt.Errorf("meddler found multiple fields for column %s", name) - } - data.fields[name] = &structField{ - column: name, - primaryKey: name == data.pk, - index: i, - meddler: meddler, - } - data.columns = append(data.columns, name) - } - - fieldsCache[dstType] = data - return data, nil -} - -// Columns returns a list of column names for its input struct. -func (d *Database) Columns(src interface{}, includePk bool) ([]string, error) { - data, err := getFields(reflect.TypeOf(src)) - if err != nil { - return nil, err - } - - var names []string - for _, elt := range data.columns { - if !includePk && elt == data.pk { - continue - } - names = append(names, elt) - } - - return names, nil -} - -// Columns using the Default Database type -func Columns(src interface{}, includePk bool) ([]string, error) { - return Default.Columns(src, includePk) -} - -// ColumnsQuoted is similar to Columns, but it return the list of columns in the form: -// `column1`,`column2`,... -// using Quote as the quote character. -func (d *Database) ColumnsQuoted(src interface{}, includePk bool) (string, error) { - unquoted, err := Columns(src, includePk) - if err != nil { - return "", err - } - - var parts []string - for _, elt := range unquoted { - parts = append(parts, d.quoted(elt)) - } - - return strings.Join(parts, ","), nil -} - -// ColumnsQuoted using the Default Database type -func ColumnsQuoted(src interface{}, includePk bool) (string, error) { - return Default.ColumnsQuoted(src, includePk) -} - -// PrimaryKey returns the name and value of the primary key field. The name -// is the empty string if there is not primary key field marked. -func (d *Database) PrimaryKey(src interface{}) (name string, pk int64, err error) { - data, err := getFields(reflect.TypeOf(src)) - if err != nil { - return "", 0, err - } - - if data.pk == "" { - return "", 0, nil - } - - name = data.pk - field := reflect.ValueOf(src).Elem().Field(data.fields[name].index) - switch field.Type().Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - pk = field.Int() - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - pk = int64(field.Uint()) - default: - return "", 0, fmt.Errorf("meddler found field %s which is marked as the primary key, but is not an integer type", name) - } - - return name, pk, nil -} - -// PrimaryKey using the Default Database type -func PrimaryKey(src interface{}) (name string, pk int64, err error) { - return Default.PrimaryKey(src) -} - -// SetPrimaryKey sets the primary key field to the given int value. -func (d *Database) SetPrimaryKey(src interface{}, pk int64) error { - data, err := getFields(reflect.TypeOf(src)) - if err != nil { - return err - } - - if data.pk == "" { - return fmt.Errorf("meddler.SetPrimaryKey: no primary key field found") - } - - field := reflect.ValueOf(src).Elem().Field(data.fields[data.pk].index) - switch field.Type().Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - field.SetInt(pk) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - field.SetUint(uint64(pk)) - default: - return fmt.Errorf("meddler found field %s which is marked as the primary key, but is not an integer type", data.pk) - } - - return nil -} - -// SetPrimaryKey using the Default Database type -func SetPrimaryKey(src interface{}, pk int64) error { - return Default.SetPrimaryKey(src, pk) -} - -// Values returns a list of PreWrite processed values suitable for -// use in an INSERT or UPDATE query. If includePk is false, the primary -// key field is omitted. The columns used are the same ones (in the same -// order) as returned by Columns. -func (d *Database) Values(src interface{}, includePk bool) ([]interface{}, error) { - columns, err := d.Columns(src, includePk) - if err != nil { - return nil, err - } - return d.SomeValues(src, columns) -} - -// Values using the Default Database type -func Values(src interface{}, includePk bool) ([]interface{}, error) { - return Default.Values(src, includePk) -} - -// SomeValues returns a list of PreWrite processed values suitable for -// use in an INSERT or UPDATE query. The columns used are the same ones (in -// the same order) as specified in the columns argument. -func (d *Database) SomeValues(src interface{}, columns []string) ([]interface{}, error) { - data, err := getFields(reflect.TypeOf(src)) - if err != nil { - return nil, err - } - structVal := reflect.ValueOf(src).Elem() - - var values []interface{} - for _, name := range columns { - field, present := data.fields[name] - if !present { - // write null to the database - values = append(values, nil) - - if Debug { - log.Printf("meddler.SomeValues: column [%s] not found in struct", name) - } - continue - } - - saveVal, err := field.meddler.PreWrite(structVal.Field(field.index).Interface()) - if err != nil { - return nil, fmt.Errorf("meddler.SomeValues: PreWrite error on column [%s]: %v", name, err) - } - values = append(values, saveVal) - } - - return values, nil -} - -// SomeValues using the Default Database type -func SomeValues(src interface{}, columns []string) ([]interface{}, error) { - return Default.SomeValues(src, columns) -} - -// Placeholders returns a list of placeholders suitable for an INSERT or UPDATE query. -// If includePk is false, the primary key field is omitted. -func (d *Database) Placeholders(src interface{}, includePk bool) ([]string, error) { - data, err := getFields(reflect.TypeOf(src)) - if err != nil { - return nil, err - } - - var placeholders []string - for _, name := range data.columns { - if !includePk && name == data.pk { - continue - } - ph := d.placeholder(len(placeholders) + 1) - placeholders = append(placeholders, ph) - } - - return placeholders, nil -} - -// Placeholders using the Default Database type -func Placeholders(src interface{}, includePk bool) ([]string, error) { - return Default.Placeholders(src, includePk) -} - -// PlaceholdersString returns a list of placeholders suitable for an INSERT -// or UPDATE query in string form, e.g.: -// ?,?,?,? -// if includePk is false, the primary key field is omitted. -func (d *Database) PlaceholdersString(src interface{}, includePk bool) (string, error) { - lst, err := d.Placeholders(src, includePk) - if err != nil { - return "", err - } - return strings.Join(lst, ","), nil -} - -// PlaceholdersString using the Default Database type -func PlaceholdersString(src interface{}, includePk bool) (string, error) { - return Default.PlaceholdersString(src, includePk) -} - -// scan a single row of data into a struct. -func (d *Database) scanRow(data *structData, rows *sql.Rows, dst interface{}, columns []string) error { - // check if there is data waiting - if !rows.Next() { - if err := rows.Err(); err != nil { - return err - } - return sql.ErrNoRows - } - - // get a list of targets - targets, err := d.Targets(dst, columns) - if err != nil { - return err - } - - // perform the scan - if err := rows.Scan(targets...); err != nil { - return err - } - - // post-process and copy the target values into the struct - if err := d.WriteTargets(dst, columns, targets); err != nil { - return err - } - - return rows.Err() -} - -// Targets returns a list of values suitable for handing to a -// Scan function in the sql package, complete with meddling. After -// the Scan is performed, the same values should be handed to -// WriteTargets to finalize the values and record them in the struct. -func (d *Database) Targets(dst interface{}, columns []string) ([]interface{}, error) { - data, err := getFields(reflect.TypeOf(dst)) - if err != nil { - return nil, err - } - - structVal := reflect.ValueOf(dst).Elem() - - var targets []interface{} - for _, name := range columns { - if field, present := data.fields[name]; present { - fieldAddr := structVal.Field(field.index).Addr().Interface() - scanTarget, err := field.meddler.PreRead(fieldAddr) - if err != nil { - return nil, fmt.Errorf("meddler.Targets: PreRead error on column %s: %v", name, err) - } - targets = append(targets, scanTarget) - } else { - // no destination, so throw this away - targets = append(targets, new(interface{})) - - if Debug { - log.Printf("meddler.Targets: column [%s] not found in struct", name) - } - } - } - - return targets, nil -} - -// Targets using the Default Database type -func Targets(dst interface{}, columns []string) ([]interface{}, error) { - return Default.Targets(dst, columns) -} - -// WriteTargets post-processes values with meddlers after a Scan from the -// sql package has been performed. The list of targets is normally produced -// by Targets. -func (d *Database) WriteTargets(dst interface{}, columns []string, targets []interface{}) error { - if len(columns) != len(targets) { - return fmt.Errorf("meddler.WriteTargets: mismatch in number of columns (%d) and targets (%s)", - len(columns), len(targets)) - } - - data, err := getFields(reflect.TypeOf(dst)) - if err != nil { - return err - } - structVal := reflect.ValueOf(dst).Elem() - - for i, name := range columns { - if field, present := data.fields[name]; present { - fieldAddr := structVal.Field(field.index).Addr().Interface() - err := field.meddler.PostRead(fieldAddr, targets[i]) - if err != nil { - return fmt.Errorf("meddler.WriteTargets: PostRead error on column [%s]: %v", name, err) - } - } else { - // not destination, so throw this away - if Debug { - log.Printf("meddler.WriteTargets: column [%s] not found in struct", name) - } - } - } - - return nil -} - -// WriteTargets using the Default Database type -func WriteTargets(dst interface{}, columns []string, targets []interface{}) error { - return Default.WriteTargets(dst, columns, targets) -} - -// Scan scans a single sql result row into a struct. -// It leaves rows ready to be scanned again for the next row. -// Returns sql.ErrNoRows if there is no data to read. -func (d *Database) Scan(rows *sql.Rows, dst interface{}) error { - // get the list of struct fields - data, err := getFields(reflect.TypeOf(dst)) - if err != nil { - return err - } - - // get the sql columns - columns, err := rows.Columns() - if err != nil { - return err - } - - return d.scanRow(data, rows, dst, columns) -} - -// Scan using the Default Database type -func Scan(rows *sql.Rows, dst interface{}) error { - return Default.Scan(rows, dst) -} - -// ScanRow scans a single sql result row into a struct. -// It reads exactly one result row and closes rows when finished. -// Returns sql.ErrNoRows if there is no result row. -func (d *Database) ScanRow(rows *sql.Rows, dst interface{}) error { - // make sure we always close rows - defer rows.Close() - - if err := d.Scan(rows, dst); err != nil { - return err - } - if err := rows.Close(); err != nil { - return err - } - - return nil -} - -// ScanRow using the Default Database type -func ScanRow(rows *sql.Rows, dst interface{}) error { - return Default.ScanRow(rows, dst) -} - -// ScanAll scans all sql result rows into a slice of structs. -// It reads all rows and closes rows when finished. -// dst should be a pointer to a slice of the appropriate type. -// The new results will be appended to any existing data in dst. -func (d *Database) ScanAll(rows *sql.Rows, dst interface{}) error { - // make sure we always close rows - defer rows.Close() - - // make sure dst is an appropriate type - dstVal := reflect.ValueOf(dst) - if dstVal.Kind() != reflect.Ptr || dstVal.IsNil() { - return fmt.Errorf("ScanAll called with non-pointer destination: %T", dst) - } - sliceVal := dstVal.Elem() - if sliceVal.Kind() != reflect.Slice { - return fmt.Errorf("ScanAll called with pointer to non-slice: %T", dst) - } - ptrType := sliceVal.Type().Elem() - if ptrType.Kind() != reflect.Ptr { - return fmt.Errorf("ScanAll expects element to be pointers, found %T", dst) - } - eltType := ptrType.Elem() - if eltType.Kind() != reflect.Struct { - return fmt.Errorf("ScanAll expects element to be pointers to structs, found %T", dst) - } - - // get the list of struct fields - data, err := getFields(ptrType) - if err != nil { - return err - } - - // get the sql columns - columns, err := rows.Columns() - if err != nil { - return err - } - - // gather the results - for { - // create a new element - eltVal := reflect.New(eltType) - elt := eltVal.Interface() - - // scan it - if err := d.scanRow(data, rows, elt, columns); err != nil { - if err == sql.ErrNoRows { - return nil - } - return err - } - - // add to the result slice - sliceVal.Set(reflect.Append(sliceVal, eltVal)) - } -} - -// ScanAll using the Default Database type -func ScanAll(rows *sql.Rows, dst interface{}) error { - return Default.ScanAll(rows, dst) -} diff --git a/vendor/github.com/stretchr/objx/LICENSE.md b/vendor/github.com/stretchr/objx/LICENSE.md deleted file mode 100644 index 2199945..0000000 --- a/vendor/github.com/stretchr/objx/LICENSE.md +++ /dev/null @@ -1,23 +0,0 @@ -objx - by Mat Ryer and Tyler Bunnell - -The MIT License (MIT) - -Copyright (c) 2014 Stretchr, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/stretchr/objx/README.md b/vendor/github.com/stretchr/objx/README.md deleted file mode 100644 index 4aa1806..0000000 --- a/vendor/github.com/stretchr/objx/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# objx - - * Jump into the [API Documentation](http://godoc.org/github.com/stretchr/objx) diff --git a/vendor/github.com/stretchr/objx/accessors.go b/vendor/github.com/stretchr/objx/accessors.go deleted file mode 100644 index 721bcac..0000000 --- a/vendor/github.com/stretchr/objx/accessors.go +++ /dev/null @@ -1,179 +0,0 @@ -package objx - -import ( - "fmt" - "regexp" - "strconv" - "strings" -) - -// arrayAccesRegexString is the regex used to extract the array number -// from the access path -const arrayAccesRegexString = `^(.+)\[([0-9]+)\]$` - -// arrayAccesRegex is the compiled arrayAccesRegexString -var arrayAccesRegex = regexp.MustCompile(arrayAccesRegexString) - -// Get gets the value using the specified selector and -// returns it inside a new Obj object. -// -// If it cannot find the value, Get will return a nil -// value inside an instance of Obj. -// -// Get can only operate directly on map[string]interface{} and []interface. -// -// Example -// -// To access the title of the third chapter of the second book, do: -// -// o.Get("books[1].chapters[2].title") -func (m Map) Get(selector string) *Value { - rawObj := access(m, selector, nil, false, false) - return &Value{data: rawObj} -} - -// Set sets the value using the specified selector and -// returns the object on which Set was called. -// -// Set can only operate directly on map[string]interface{} and []interface -// -// Example -// -// To set the title of the third chapter of the second book, do: -// -// o.Set("books[1].chapters[2].title","Time to Go") -func (m Map) Set(selector string, value interface{}) Map { - access(m, selector, value, true, false) - return m -} - -// access accesses the object using the selector and performs the -// appropriate action. -func access(current, selector, value interface{}, isSet, panics bool) interface{} { - - switch selector.(type) { - case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: - - if array, ok := current.([]interface{}); ok { - index := intFromInterface(selector) - - if index >= len(array) { - if panics { - panic(fmt.Sprintf("objx: Index %d is out of range. Slice only contains %d items.", index, len(array))) - } - return nil - } - - return array[index] - } - - return nil - - case string: - - selStr := selector.(string) - selSegs := strings.SplitN(selStr, PathSeparator, 2) - thisSel := selSegs[0] - index := -1 - var err error - - // https://github.com/stretchr/objx/issues/12 - if strings.Contains(thisSel, "[") { - - arrayMatches := arrayAccesRegex.FindStringSubmatch(thisSel) - - if len(arrayMatches) > 0 { - - // Get the key into the map - thisSel = arrayMatches[1] - - // Get the index into the array at the key - index, err = strconv.Atoi(arrayMatches[2]) - - if err != nil { - // This should never happen. If it does, something has gone - // seriously wrong. Panic. - panic("objx: Array index is not an integer. Must use array[int].") - } - - } - } - - if curMap, ok := current.(Map); ok { - current = map[string]interface{}(curMap) - } - - // get the object in question - switch current.(type) { - case map[string]interface{}: - curMSI := current.(map[string]interface{}) - if len(selSegs) <= 1 && isSet { - curMSI[thisSel] = value - return nil - } else { - current = curMSI[thisSel] - } - default: - current = nil - } - - if current == nil && panics { - panic(fmt.Sprintf("objx: '%v' invalid on object.", selector)) - } - - // do we need to access the item of an array? - if index > -1 { - if array, ok := current.([]interface{}); ok { - if index < len(array) { - current = array[index] - } else { - if panics { - panic(fmt.Sprintf("objx: Index %d is out of range. Slice only contains %d items.", index, len(array))) - } - current = nil - } - } - } - - if len(selSegs) > 1 { - current = access(current, selSegs[1], value, isSet, panics) - } - - } - - return current - -} - -// intFromInterface converts an interface object to the largest -// representation of an unsigned integer using a type switch and -// assertions -func intFromInterface(selector interface{}) int { - var value int - switch selector.(type) { - case int: - value = selector.(int) - case int8: - value = int(selector.(int8)) - case int16: - value = int(selector.(int16)) - case int32: - value = int(selector.(int32)) - case int64: - value = int(selector.(int64)) - case uint: - value = int(selector.(uint)) - case uint8: - value = int(selector.(uint8)) - case uint16: - value = int(selector.(uint16)) - case uint32: - value = int(selector.(uint32)) - case uint64: - value = int(selector.(uint64)) - default: - panic("objx: array access argument is not an integer type (this should never happen)") - } - - return value -} diff --git a/vendor/github.com/stretchr/objx/constants.go b/vendor/github.com/stretchr/objx/constants.go deleted file mode 100644 index f9eb42a..0000000 --- a/vendor/github.com/stretchr/objx/constants.go +++ /dev/null @@ -1,13 +0,0 @@ -package objx - -const ( - // PathSeparator is the character used to separate the elements - // of the keypath. - // - // For example, `location.address.city` - PathSeparator string = "." - - // SignatureSeparator is the character that is used to - // separate the Base64 string from the security signature. - SignatureSeparator = "_" -) diff --git a/vendor/github.com/stretchr/objx/conversions.go b/vendor/github.com/stretchr/objx/conversions.go deleted file mode 100644 index 9cdfa9f..0000000 --- a/vendor/github.com/stretchr/objx/conversions.go +++ /dev/null @@ -1,117 +0,0 @@ -package objx - -import ( - "bytes" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "net/url" -) - -// JSON converts the contained object to a JSON string -// representation -func (m Map) JSON() (string, error) { - - result, err := json.Marshal(m) - - if err != nil { - err = errors.New("objx: JSON encode failed with: " + err.Error()) - } - - return string(result), err - -} - -// MustJSON converts the contained object to a JSON string -// representation and panics if there is an error -func (m Map) MustJSON() string { - result, err := m.JSON() - if err != nil { - panic(err.Error()) - } - return result -} - -// Base64 converts the contained object to a Base64 string -// representation of the JSON string representation -func (m Map) Base64() (string, error) { - - var buf bytes.Buffer - - jsonData, err := m.JSON() - if err != nil { - return "", err - } - - encoder := base64.NewEncoder(base64.StdEncoding, &buf) - encoder.Write([]byte(jsonData)) - encoder.Close() - - return buf.String(), nil - -} - -// MustBase64 converts the contained object to a Base64 string -// representation of the JSON string representation and panics -// if there is an error -func (m Map) MustBase64() string { - result, err := m.Base64() - if err != nil { - panic(err.Error()) - } - return result -} - -// SignedBase64 converts the contained object to a Base64 string -// representation of the JSON string representation and signs it -// using the provided key. -func (m Map) SignedBase64(key string) (string, error) { - - base64, err := m.Base64() - if err != nil { - return "", err - } - - sig := HashWithKey(base64, key) - - return base64 + SignatureSeparator + sig, nil - -} - -// MustSignedBase64 converts the contained object to a Base64 string -// representation of the JSON string representation and signs it -// using the provided key and panics if there is an error -func (m Map) MustSignedBase64(key string) string { - result, err := m.SignedBase64(key) - if err != nil { - panic(err.Error()) - } - return result -} - -/* - URL Query - ------------------------------------------------ -*/ - -// URLValues creates a url.Values object from an Obj. This -// function requires that the wrapped object be a map[string]interface{} -func (m Map) URLValues() url.Values { - - vals := make(url.Values) - - for k, v := range m { - //TODO: can this be done without sprintf? - vals.Set(k, fmt.Sprintf("%v", v)) - } - - return vals -} - -// URLQuery gets an encoded URL query representing the given -// Obj. This function requires that the wrapped object be a -// map[string]interface{} -func (m Map) URLQuery() (string, error) { - return m.URLValues().Encode(), nil -} diff --git a/vendor/github.com/stretchr/objx/doc.go b/vendor/github.com/stretchr/objx/doc.go deleted file mode 100644 index 47bf85e..0000000 --- a/vendor/github.com/stretchr/objx/doc.go +++ /dev/null @@ -1,72 +0,0 @@ -// objx - Go package for dealing with maps, slices, JSON and other data. -// -// Overview -// -// Objx provides the `objx.Map` type, which is a `map[string]interface{}` that exposes -// a powerful `Get` method (among others) that allows you to easily and quickly get -// access to data within the map, without having to worry too much about type assertions, -// missing data, default values etc. -// -// Pattern -// -// Objx uses a preditable pattern to make access data from within `map[string]interface{}'s -// easy. -// -// Call one of the `objx.` functions to create your `objx.Map` to get going: -// -// m, err := objx.FromJSON(json) -// -// NOTE: Any methods or functions with the `Must` prefix will panic if something goes wrong, -// the rest will be optimistic and try to figure things out without panicking. -// -// Use `Get` to access the value you're interested in. You can use dot and array -// notation too: -// -// m.Get("places[0].latlng") -// -// Once you have saught the `Value` you're interested in, you can use the `Is*` methods -// to determine its type. -// -// if m.Get("code").IsStr() { /* ... */ } -// -// Or you can just assume the type, and use one of the strong type methods to -// extract the real value: -// -// m.Get("code").Int() -// -// If there's no value there (or if it's the wrong type) then a default value -// will be returned, or you can be explicit about the default value. -// -// Get("code").Int(-1) -// -// If you're dealing with a slice of data as a value, Objx provides many useful -// methods for iterating, manipulating and selecting that data. You can find out more -// by exploring the index below. -// -// Reading data -// -// A simple example of how to use Objx: -// -// // use MustFromJSON to make an objx.Map from some JSON -// m := objx.MustFromJSON(`{"name": "Mat", "age": 30}`) -// -// // get the details -// name := m.Get("name").Str() -// age := m.Get("age").Int() -// -// // get their nickname (or use their name if they -// // don't have one) -// nickname := m.Get("nickname").Str(name) -// -// Ranging -// -// Since `objx.Map` is a `map[string]interface{}` you can treat it as such. For -// example, to `range` the data, do what you would expect: -// -// m := objx.MustFromJSON(json) -// for key, value := range m { -// -// /* ... do your magic ... */ -// -// } -package objx diff --git a/vendor/github.com/stretchr/objx/map.go b/vendor/github.com/stretchr/objx/map.go deleted file mode 100644 index eb6ed8e..0000000 --- a/vendor/github.com/stretchr/objx/map.go +++ /dev/null @@ -1,222 +0,0 @@ -package objx - -import ( - "encoding/base64" - "encoding/json" - "errors" - "io/ioutil" - "net/url" - "strings" -) - -// MSIConvertable is an interface that defines methods for converting your -// custom types to a map[string]interface{} representation. -type MSIConvertable interface { - // MSI gets a map[string]interface{} (msi) representing the - // object. - MSI() map[string]interface{} -} - -// Map provides extended functionality for working with -// untyped data, in particular map[string]interface (msi). -type Map map[string]interface{} - -// Value returns the internal value instance -func (m Map) Value() *Value { - return &Value{data: m} -} - -// Nil represents a nil Map. -var Nil Map = New(nil) - -// New creates a new Map containing the map[string]interface{} in the data argument. -// If the data argument is not a map[string]interface, New attempts to call the -// MSI() method on the MSIConvertable interface to create one. -func New(data interface{}) Map { - if _, ok := data.(map[string]interface{}); !ok { - if converter, ok := data.(MSIConvertable); ok { - data = converter.MSI() - } else { - return nil - } - } - return Map(data.(map[string]interface{})) -} - -// MSI creates a map[string]interface{} and puts it inside a new Map. -// -// The arguments follow a key, value pattern. -// -// Panics -// -// Panics if any key arugment is non-string or if there are an odd number of arguments. -// -// Example -// -// To easily create Maps: -// -// m := objx.MSI("name", "Mat", "age", 29, "subobj", objx.MSI("active", true)) -// -// // creates an Map equivalent to -// m := objx.New(map[string]interface{}{"name": "Mat", "age": 29, "subobj": map[string]interface{}{"active": true}}) -func MSI(keyAndValuePairs ...interface{}) Map { - - newMap := make(map[string]interface{}) - keyAndValuePairsLen := len(keyAndValuePairs) - - if keyAndValuePairsLen%2 != 0 { - panic("objx: MSI must have an even number of arguments following the 'key, value' pattern.") - } - - for i := 0; i < keyAndValuePairsLen; i = i + 2 { - - key := keyAndValuePairs[i] - value := keyAndValuePairs[i+1] - - // make sure the key is a string - keyString, keyStringOK := key.(string) - if !keyStringOK { - panic("objx: MSI must follow 'string, interface{}' pattern. " + keyString + " is not a valid key.") - } - - newMap[keyString] = value - - } - - return New(newMap) -} - -// ****** Conversion Constructors - -// MustFromJSON creates a new Map containing the data specified in the -// jsonString. -// -// Panics if the JSON is invalid. -func MustFromJSON(jsonString string) Map { - o, err := FromJSON(jsonString) - - if err != nil { - panic("objx: MustFromJSON failed with error: " + err.Error()) - } - - return o -} - -// FromJSON creates a new Map containing the data specified in the -// jsonString. -// -// Returns an error if the JSON is invalid. -func FromJSON(jsonString string) (Map, error) { - - var data interface{} - err := json.Unmarshal([]byte(jsonString), &data) - - if err != nil { - return Nil, err - } - - return New(data), nil - -} - -// FromBase64 creates a new Obj containing the data specified -// in the Base64 string. -// -// The string is an encoded JSON string returned by Base64 -func FromBase64(base64String string) (Map, error) { - - decoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(base64String)) - - decoded, err := ioutil.ReadAll(decoder) - if err != nil { - return nil, err - } - - return FromJSON(string(decoded)) -} - -// MustFromBase64 creates a new Obj containing the data specified -// in the Base64 string and panics if there is an error. -// -// The string is an encoded JSON string returned by Base64 -func MustFromBase64(base64String string) Map { - - result, err := FromBase64(base64String) - - if err != nil { - panic("objx: MustFromBase64 failed with error: " + err.Error()) - } - - return result -} - -// FromSignedBase64 creates a new Obj containing the data specified -// in the Base64 string. -// -// The string is an encoded JSON string returned by SignedBase64 -func FromSignedBase64(base64String, key string) (Map, error) { - parts := strings.Split(base64String, SignatureSeparator) - if len(parts) != 2 { - return nil, errors.New("objx: Signed base64 string is malformed.") - } - - sig := HashWithKey(parts[0], key) - if parts[1] != sig { - return nil, errors.New("objx: Signature for base64 data does not match.") - } - - return FromBase64(parts[0]) -} - -// MustFromSignedBase64 creates a new Obj containing the data specified -// in the Base64 string and panics if there is an error. -// -// The string is an encoded JSON string returned by Base64 -func MustFromSignedBase64(base64String, key string) Map { - - result, err := FromSignedBase64(base64String, key) - - if err != nil { - panic("objx: MustFromSignedBase64 failed with error: " + err.Error()) - } - - return result -} - -// FromURLQuery generates a new Obj by parsing the specified -// query. -// -// For queries with multiple values, the first value is selected. -func FromURLQuery(query string) (Map, error) { - - vals, err := url.ParseQuery(query) - - if err != nil { - return nil, err - } - - m := make(map[string]interface{}) - for k, vals := range vals { - m[k] = vals[0] - } - - return New(m), nil -} - -// MustFromURLQuery generates a new Obj by parsing the specified -// query. -// -// For queries with multiple values, the first value is selected. -// -// Panics if it encounters an error -func MustFromURLQuery(query string) Map { - - o, err := FromURLQuery(query) - - if err != nil { - panic("objx: MustFromURLQuery failed with error: " + err.Error()) - } - - return o - -} diff --git a/vendor/github.com/stretchr/objx/mutations.go b/vendor/github.com/stretchr/objx/mutations.go deleted file mode 100644 index b35c863..0000000 --- a/vendor/github.com/stretchr/objx/mutations.go +++ /dev/null @@ -1,81 +0,0 @@ -package objx - -// Exclude returns a new Map with the keys in the specified []string -// excluded. -func (d Map) Exclude(exclude []string) Map { - - excluded := make(Map) - for k, v := range d { - var shouldInclude bool = true - for _, toExclude := range exclude { - if k == toExclude { - shouldInclude = false - break - } - } - if shouldInclude { - excluded[k] = v - } - } - - return excluded -} - -// Copy creates a shallow copy of the Obj. -func (m Map) Copy() Map { - copied := make(map[string]interface{}) - for k, v := range m { - copied[k] = v - } - return New(copied) -} - -// Merge blends the specified map with a copy of this map and returns the result. -// -// Keys that appear in both will be selected from the specified map. -// This method requires that the wrapped object be a map[string]interface{} -func (m Map) Merge(merge Map) Map { - return m.Copy().MergeHere(merge) -} - -// Merge blends the specified map with this map and returns the current map. -// -// Keys that appear in both will be selected from the specified map. The original map -// will be modified. This method requires that -// the wrapped object be a map[string]interface{} -func (m Map) MergeHere(merge Map) Map { - - for k, v := range merge { - m[k] = v - } - - return m - -} - -// Transform builds a new Obj giving the transformer a chance -// to change the keys and values as it goes. This method requires that -// the wrapped object be a map[string]interface{} -func (m Map) Transform(transformer func(key string, value interface{}) (string, interface{})) Map { - newMap := make(map[string]interface{}) - for k, v := range m { - modifiedKey, modifiedVal := transformer(k, v) - newMap[modifiedKey] = modifiedVal - } - return New(newMap) -} - -// TransformKeys builds a new map using the specified key mapping. -// -// Unspecified keys will be unaltered. -// This method requires that the wrapped object be a map[string]interface{} -func (m Map) TransformKeys(mapping map[string]string) Map { - return m.Transform(func(key string, value interface{}) (string, interface{}) { - - if newKey, ok := mapping[key]; ok { - return newKey, value - } - - return key, value - }) -} diff --git a/vendor/github.com/stretchr/objx/security.go b/vendor/github.com/stretchr/objx/security.go deleted file mode 100644 index fdd6be9..0000000 --- a/vendor/github.com/stretchr/objx/security.go +++ /dev/null @@ -1,14 +0,0 @@ -package objx - -import ( - "crypto/sha1" - "encoding/hex" -) - -// HashWithKey hashes the specified string using the security -// key. -func HashWithKey(data, key string) string { - hash := sha1.New() - hash.Write([]byte(data + ":" + key)) - return hex.EncodeToString(hash.Sum(nil)) -} diff --git a/vendor/github.com/stretchr/objx/tests.go b/vendor/github.com/stretchr/objx/tests.go deleted file mode 100644 index d9e0b47..0000000 --- a/vendor/github.com/stretchr/objx/tests.go +++ /dev/null @@ -1,17 +0,0 @@ -package objx - -// Has gets whether there is something at the specified selector -// or not. -// -// If m is nil, Has will always return false. -func (m Map) Has(selector string) bool { - if m == nil { - return false - } - return !m.Get(selector).IsNil() -} - -// IsNil gets whether the data is nil or not. -func (v *Value) IsNil() bool { - return v == nil || v.data == nil -} diff --git a/vendor/github.com/stretchr/objx/type_specific_codegen.go b/vendor/github.com/stretchr/objx/type_specific_codegen.go deleted file mode 100644 index f3ecb29..0000000 --- a/vendor/github.com/stretchr/objx/type_specific_codegen.go +++ /dev/null @@ -1,2881 +0,0 @@ -package objx - -/* - Inter (interface{} and []interface{}) - -------------------------------------------------- -*/ - -// Inter gets the value as a interface{}, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Inter(optionalDefault ...interface{}) interface{} { - if s, ok := v.data.(interface{}); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustInter gets the value as a interface{}. -// -// Panics if the object is not a interface{}. -func (v *Value) MustInter() interface{} { - return v.data.(interface{}) -} - -// InterSlice gets the value as a []interface{}, returns the optionalDefault -// value or nil if the value is not a []interface{}. -func (v *Value) InterSlice(optionalDefault ...[]interface{}) []interface{} { - if s, ok := v.data.([]interface{}); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustInterSlice gets the value as a []interface{}. -// -// Panics if the object is not a []interface{}. -func (v *Value) MustInterSlice() []interface{} { - return v.data.([]interface{}) -} - -// IsInter gets whether the object contained is a interface{} or not. -func (v *Value) IsInter() bool { - _, ok := v.data.(interface{}) - return ok -} - -// IsInterSlice gets whether the object contained is a []interface{} or not. -func (v *Value) IsInterSlice() bool { - _, ok := v.data.([]interface{}) - return ok -} - -// EachInter calls the specified callback for each object -// in the []interface{}. -// -// Panics if the object is the wrong type. -func (v *Value) EachInter(callback func(int, interface{}) bool) *Value { - - for index, val := range v.MustInterSlice() { - carryon := callback(index, val) - if carryon == false { - break - } - } - - return v - -} - -// WhereInter uses the specified decider function to select items -// from the []interface{}. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereInter(decider func(int, interface{}) bool) *Value { - - var selected []interface{} - - v.EachInter(func(index int, val interface{}) bool { - shouldSelect := decider(index, val) - if shouldSelect == false { - selected = append(selected, val) - } - return true - }) - - return &Value{data: selected} - -} - -// GroupInter uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]interface{}. -func (v *Value) GroupInter(grouper func(int, interface{}) string) *Value { - - groups := make(map[string][]interface{}) - - v.EachInter(func(index int, val interface{}) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]interface{}, 0) - } - groups[group] = append(groups[group], val) - return true - }) - - return &Value{data: groups} - -} - -// ReplaceInter uses the specified function to replace each interface{}s -// by iterating each item. The data in the returned result will be a -// []interface{} containing the replaced items. -func (v *Value) ReplaceInter(replacer func(int, interface{}) interface{}) *Value { - - arr := v.MustInterSlice() - replaced := make([]interface{}, len(arr)) - - v.EachInter(func(index int, val interface{}) bool { - replaced[index] = replacer(index, val) - return true - }) - - return &Value{data: replaced} - -} - -// CollectInter uses the specified collector function to collect a value -// for each of the interface{}s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectInter(collector func(int, interface{}) interface{}) *Value { - - arr := v.MustInterSlice() - collected := make([]interface{}, len(arr)) - - v.EachInter(func(index int, val interface{}) bool { - collected[index] = collector(index, val) - return true - }) - - return &Value{data: collected} -} - -/* - MSI (map[string]interface{} and []map[string]interface{}) - -------------------------------------------------- -*/ - -// MSI gets the value as a map[string]interface{}, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) MSI(optionalDefault ...map[string]interface{}) map[string]interface{} { - if s, ok := v.data.(map[string]interface{}); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustMSI gets the value as a map[string]interface{}. -// -// Panics if the object is not a map[string]interface{}. -func (v *Value) MustMSI() map[string]interface{} { - return v.data.(map[string]interface{}) -} - -// MSISlice gets the value as a []map[string]interface{}, returns the optionalDefault -// value or nil if the value is not a []map[string]interface{}. -func (v *Value) MSISlice(optionalDefault ...[]map[string]interface{}) []map[string]interface{} { - if s, ok := v.data.([]map[string]interface{}); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustMSISlice gets the value as a []map[string]interface{}. -// -// Panics if the object is not a []map[string]interface{}. -func (v *Value) MustMSISlice() []map[string]interface{} { - return v.data.([]map[string]interface{}) -} - -// IsMSI gets whether the object contained is a map[string]interface{} or not. -func (v *Value) IsMSI() bool { - _, ok := v.data.(map[string]interface{}) - return ok -} - -// IsMSISlice gets whether the object contained is a []map[string]interface{} or not. -func (v *Value) IsMSISlice() bool { - _, ok := v.data.([]map[string]interface{}) - return ok -} - -// EachMSI calls the specified callback for each object -// in the []map[string]interface{}. -// -// Panics if the object is the wrong type. -func (v *Value) EachMSI(callback func(int, map[string]interface{}) bool) *Value { - - for index, val := range v.MustMSISlice() { - carryon := callback(index, val) - if carryon == false { - break - } - } - - return v - -} - -// WhereMSI uses the specified decider function to select items -// from the []map[string]interface{}. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereMSI(decider func(int, map[string]interface{}) bool) *Value { - - var selected []map[string]interface{} - - v.EachMSI(func(index int, val map[string]interface{}) bool { - shouldSelect := decider(index, val) - if shouldSelect == false { - selected = append(selected, val) - } - return true - }) - - return &Value{data: selected} - -} - -// GroupMSI uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]map[string]interface{}. -func (v *Value) GroupMSI(grouper func(int, map[string]interface{}) string) *Value { - - groups := make(map[string][]map[string]interface{}) - - v.EachMSI(func(index int, val map[string]interface{}) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]map[string]interface{}, 0) - } - groups[group] = append(groups[group], val) - return true - }) - - return &Value{data: groups} - -} - -// ReplaceMSI uses the specified function to replace each map[string]interface{}s -// by iterating each item. The data in the returned result will be a -// []map[string]interface{} containing the replaced items. -func (v *Value) ReplaceMSI(replacer func(int, map[string]interface{}) map[string]interface{}) *Value { - - arr := v.MustMSISlice() - replaced := make([]map[string]interface{}, len(arr)) - - v.EachMSI(func(index int, val map[string]interface{}) bool { - replaced[index] = replacer(index, val) - return true - }) - - return &Value{data: replaced} - -} - -// CollectMSI uses the specified collector function to collect a value -// for each of the map[string]interface{}s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectMSI(collector func(int, map[string]interface{}) interface{}) *Value { - - arr := v.MustMSISlice() - collected := make([]interface{}, len(arr)) - - v.EachMSI(func(index int, val map[string]interface{}) bool { - collected[index] = collector(index, val) - return true - }) - - return &Value{data: collected} -} - -/* - ObjxMap ((Map) and [](Map)) - -------------------------------------------------- -*/ - -// ObjxMap gets the value as a (Map), returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) ObjxMap(optionalDefault ...(Map)) Map { - if s, ok := v.data.((Map)); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return New(nil) -} - -// MustObjxMap gets the value as a (Map). -// -// Panics if the object is not a (Map). -func (v *Value) MustObjxMap() Map { - return v.data.((Map)) -} - -// ObjxMapSlice gets the value as a [](Map), returns the optionalDefault -// value or nil if the value is not a [](Map). -func (v *Value) ObjxMapSlice(optionalDefault ...[](Map)) [](Map) { - if s, ok := v.data.([](Map)); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustObjxMapSlice gets the value as a [](Map). -// -// Panics if the object is not a [](Map). -func (v *Value) MustObjxMapSlice() [](Map) { - return v.data.([](Map)) -} - -// IsObjxMap gets whether the object contained is a (Map) or not. -func (v *Value) IsObjxMap() bool { - _, ok := v.data.((Map)) - return ok -} - -// IsObjxMapSlice gets whether the object contained is a [](Map) or not. -func (v *Value) IsObjxMapSlice() bool { - _, ok := v.data.([](Map)) - return ok -} - -// EachObjxMap calls the specified callback for each object -// in the [](Map). -// -// Panics if the object is the wrong type. -func (v *Value) EachObjxMap(callback func(int, Map) bool) *Value { - - for index, val := range v.MustObjxMapSlice() { - carryon := callback(index, val) - if carryon == false { - break - } - } - - return v - -} - -// WhereObjxMap uses the specified decider function to select items -// from the [](Map). The object contained in the result will contain -// only the selected items. -func (v *Value) WhereObjxMap(decider func(int, Map) bool) *Value { - - var selected [](Map) - - v.EachObjxMap(func(index int, val Map) bool { - shouldSelect := decider(index, val) - if shouldSelect == false { - selected = append(selected, val) - } - return true - }) - - return &Value{data: selected} - -} - -// GroupObjxMap uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][](Map). -func (v *Value) GroupObjxMap(grouper func(int, Map) string) *Value { - - groups := make(map[string][](Map)) - - v.EachObjxMap(func(index int, val Map) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([](Map), 0) - } - groups[group] = append(groups[group], val) - return true - }) - - return &Value{data: groups} - -} - -// ReplaceObjxMap uses the specified function to replace each (Map)s -// by iterating each item. The data in the returned result will be a -// [](Map) containing the replaced items. -func (v *Value) ReplaceObjxMap(replacer func(int, Map) Map) *Value { - - arr := v.MustObjxMapSlice() - replaced := make([](Map), len(arr)) - - v.EachObjxMap(func(index int, val Map) bool { - replaced[index] = replacer(index, val) - return true - }) - - return &Value{data: replaced} - -} - -// CollectObjxMap uses the specified collector function to collect a value -// for each of the (Map)s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectObjxMap(collector func(int, Map) interface{}) *Value { - - arr := v.MustObjxMapSlice() - collected := make([]interface{}, len(arr)) - - v.EachObjxMap(func(index int, val Map) bool { - collected[index] = collector(index, val) - return true - }) - - return &Value{data: collected} -} - -/* - Bool (bool and []bool) - -------------------------------------------------- -*/ - -// Bool gets the value as a bool, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Bool(optionalDefault ...bool) bool { - if s, ok := v.data.(bool); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return false -} - -// MustBool gets the value as a bool. -// -// Panics if the object is not a bool. -func (v *Value) MustBool() bool { - return v.data.(bool) -} - -// BoolSlice gets the value as a []bool, returns the optionalDefault -// value or nil if the value is not a []bool. -func (v *Value) BoolSlice(optionalDefault ...[]bool) []bool { - if s, ok := v.data.([]bool); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustBoolSlice gets the value as a []bool. -// -// Panics if the object is not a []bool. -func (v *Value) MustBoolSlice() []bool { - return v.data.([]bool) -} - -// IsBool gets whether the object contained is a bool or not. -func (v *Value) IsBool() bool { - _, ok := v.data.(bool) - return ok -} - -// IsBoolSlice gets whether the object contained is a []bool or not. -func (v *Value) IsBoolSlice() bool { - _, ok := v.data.([]bool) - return ok -} - -// EachBool calls the specified callback for each object -// in the []bool. -// -// Panics if the object is the wrong type. -func (v *Value) EachBool(callback func(int, bool) bool) *Value { - - for index, val := range v.MustBoolSlice() { - carryon := callback(index, val) - if carryon == false { - break - } - } - - return v - -} - -// WhereBool uses the specified decider function to select items -// from the []bool. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereBool(decider func(int, bool) bool) *Value { - - var selected []bool - - v.EachBool(func(index int, val bool) bool { - shouldSelect := decider(index, val) - if shouldSelect == false { - selected = append(selected, val) - } - return true - }) - - return &Value{data: selected} - -} - -// GroupBool uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]bool. -func (v *Value) GroupBool(grouper func(int, bool) string) *Value { - - groups := make(map[string][]bool) - - v.EachBool(func(index int, val bool) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]bool, 0) - } - groups[group] = append(groups[group], val) - return true - }) - - return &Value{data: groups} - -} - -// ReplaceBool uses the specified function to replace each bools -// by iterating each item. The data in the returned result will be a -// []bool containing the replaced items. -func (v *Value) ReplaceBool(replacer func(int, bool) bool) *Value { - - arr := v.MustBoolSlice() - replaced := make([]bool, len(arr)) - - v.EachBool(func(index int, val bool) bool { - replaced[index] = replacer(index, val) - return true - }) - - return &Value{data: replaced} - -} - -// CollectBool uses the specified collector function to collect a value -// for each of the bools in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectBool(collector func(int, bool) interface{}) *Value { - - arr := v.MustBoolSlice() - collected := make([]interface{}, len(arr)) - - v.EachBool(func(index int, val bool) bool { - collected[index] = collector(index, val) - return true - }) - - return &Value{data: collected} -} - -/* - Str (string and []string) - -------------------------------------------------- -*/ - -// Str gets the value as a string, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Str(optionalDefault ...string) string { - if s, ok := v.data.(string); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return "" -} - -// MustStr gets the value as a string. -// -// Panics if the object is not a string. -func (v *Value) MustStr() string { - return v.data.(string) -} - -// StrSlice gets the value as a []string, returns the optionalDefault -// value or nil if the value is not a []string. -func (v *Value) StrSlice(optionalDefault ...[]string) []string { - if s, ok := v.data.([]string); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustStrSlice gets the value as a []string. -// -// Panics if the object is not a []string. -func (v *Value) MustStrSlice() []string { - return v.data.([]string) -} - -// IsStr gets whether the object contained is a string or not. -func (v *Value) IsStr() bool { - _, ok := v.data.(string) - return ok -} - -// IsStrSlice gets whether the object contained is a []string or not. -func (v *Value) IsStrSlice() bool { - _, ok := v.data.([]string) - return ok -} - -// EachStr calls the specified callback for each object -// in the []string. -// -// Panics if the object is the wrong type. -func (v *Value) EachStr(callback func(int, string) bool) *Value { - - for index, val := range v.MustStrSlice() { - carryon := callback(index, val) - if carryon == false { - break - } - } - - return v - -} - -// WhereStr uses the specified decider function to select items -// from the []string. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereStr(decider func(int, string) bool) *Value { - - var selected []string - - v.EachStr(func(index int, val string) bool { - shouldSelect := decider(index, val) - if shouldSelect == false { - selected = append(selected, val) - } - return true - }) - - return &Value{data: selected} - -} - -// GroupStr uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]string. -func (v *Value) GroupStr(grouper func(int, string) string) *Value { - - groups := make(map[string][]string) - - v.EachStr(func(index int, val string) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]string, 0) - } - groups[group] = append(groups[group], val) - return true - }) - - return &Value{data: groups} - -} - -// ReplaceStr uses the specified function to replace each strings -// by iterating each item. The data in the returned result will be a -// []string containing the replaced items. -func (v *Value) ReplaceStr(replacer func(int, string) string) *Value { - - arr := v.MustStrSlice() - replaced := make([]string, len(arr)) - - v.EachStr(func(index int, val string) bool { - replaced[index] = replacer(index, val) - return true - }) - - return &Value{data: replaced} - -} - -// CollectStr uses the specified collector function to collect a value -// for each of the strings in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectStr(collector func(int, string) interface{}) *Value { - - arr := v.MustStrSlice() - collected := make([]interface{}, len(arr)) - - v.EachStr(func(index int, val string) bool { - collected[index] = collector(index, val) - return true - }) - - return &Value{data: collected} -} - -/* - Int (int and []int) - -------------------------------------------------- -*/ - -// Int gets the value as a int, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Int(optionalDefault ...int) int { - if s, ok := v.data.(int); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustInt gets the value as a int. -// -// Panics if the object is not a int. -func (v *Value) MustInt() int { - return v.data.(int) -} - -// IntSlice gets the value as a []int, returns the optionalDefault -// value or nil if the value is not a []int. -func (v *Value) IntSlice(optionalDefault ...[]int) []int { - if s, ok := v.data.([]int); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustIntSlice gets the value as a []int. -// -// Panics if the object is not a []int. -func (v *Value) MustIntSlice() []int { - return v.data.([]int) -} - -// IsInt gets whether the object contained is a int or not. -func (v *Value) IsInt() bool { - _, ok := v.data.(int) - return ok -} - -// IsIntSlice gets whether the object contained is a []int or not. -func (v *Value) IsIntSlice() bool { - _, ok := v.data.([]int) - return ok -} - -// EachInt calls the specified callback for each object -// in the []int. -// -// Panics if the object is the wrong type. -func (v *Value) EachInt(callback func(int, int) bool) *Value { - - for index, val := range v.MustIntSlice() { - carryon := callback(index, val) - if carryon == false { - break - } - } - - return v - -} - -// WhereInt uses the specified decider function to select items -// from the []int. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereInt(decider func(int, int) bool) *Value { - - var selected []int - - v.EachInt(func(index int, val int) bool { - shouldSelect := decider(index, val) - if shouldSelect == false { - selected = append(selected, val) - } - return true - }) - - return &Value{data: selected} - -} - -// GroupInt uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]int. -func (v *Value) GroupInt(grouper func(int, int) string) *Value { - - groups := make(map[string][]int) - - v.EachInt(func(index int, val int) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]int, 0) - } - groups[group] = append(groups[group], val) - return true - }) - - return &Value{data: groups} - -} - -// ReplaceInt uses the specified function to replace each ints -// by iterating each item. The data in the returned result will be a -// []int containing the replaced items. -func (v *Value) ReplaceInt(replacer func(int, int) int) *Value { - - arr := v.MustIntSlice() - replaced := make([]int, len(arr)) - - v.EachInt(func(index int, val int) bool { - replaced[index] = replacer(index, val) - return true - }) - - return &Value{data: replaced} - -} - -// CollectInt uses the specified collector function to collect a value -// for each of the ints in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectInt(collector func(int, int) interface{}) *Value { - - arr := v.MustIntSlice() - collected := make([]interface{}, len(arr)) - - v.EachInt(func(index int, val int) bool { - collected[index] = collector(index, val) - return true - }) - - return &Value{data: collected} -} - -/* - Int8 (int8 and []int8) - -------------------------------------------------- -*/ - -// Int8 gets the value as a int8, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Int8(optionalDefault ...int8) int8 { - if s, ok := v.data.(int8); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustInt8 gets the value as a int8. -// -// Panics if the object is not a int8. -func (v *Value) MustInt8() int8 { - return v.data.(int8) -} - -// Int8Slice gets the value as a []int8, returns the optionalDefault -// value or nil if the value is not a []int8. -func (v *Value) Int8Slice(optionalDefault ...[]int8) []int8 { - if s, ok := v.data.([]int8); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustInt8Slice gets the value as a []int8. -// -// Panics if the object is not a []int8. -func (v *Value) MustInt8Slice() []int8 { - return v.data.([]int8) -} - -// IsInt8 gets whether the object contained is a int8 or not. -func (v *Value) IsInt8() bool { - _, ok := v.data.(int8) - return ok -} - -// IsInt8Slice gets whether the object contained is a []int8 or not. -func (v *Value) IsInt8Slice() bool { - _, ok := v.data.([]int8) - return ok -} - -// EachInt8 calls the specified callback for each object -// in the []int8. -// -// Panics if the object is the wrong type. -func (v *Value) EachInt8(callback func(int, int8) bool) *Value { - - for index, val := range v.MustInt8Slice() { - carryon := callback(index, val) - if carryon == false { - break - } - } - - return v - -} - -// WhereInt8 uses the specified decider function to select items -// from the []int8. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereInt8(decider func(int, int8) bool) *Value { - - var selected []int8 - - v.EachInt8(func(index int, val int8) bool { - shouldSelect := decider(index, val) - if shouldSelect == false { - selected = append(selected, val) - } - return true - }) - - return &Value{data: selected} - -} - -// GroupInt8 uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]int8. -func (v *Value) GroupInt8(grouper func(int, int8) string) *Value { - - groups := make(map[string][]int8) - - v.EachInt8(func(index int, val int8) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]int8, 0) - } - groups[group] = append(groups[group], val) - return true - }) - - return &Value{data: groups} - -} - -// ReplaceInt8 uses the specified function to replace each int8s -// by iterating each item. The data in the returned result will be a -// []int8 containing the replaced items. -func (v *Value) ReplaceInt8(replacer func(int, int8) int8) *Value { - - arr := v.MustInt8Slice() - replaced := make([]int8, len(arr)) - - v.EachInt8(func(index int, val int8) bool { - replaced[index] = replacer(index, val) - return true - }) - - return &Value{data: replaced} - -} - -// CollectInt8 uses the specified collector function to collect a value -// for each of the int8s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectInt8(collector func(int, int8) interface{}) *Value { - - arr := v.MustInt8Slice() - collected := make([]interface{}, len(arr)) - - v.EachInt8(func(index int, val int8) bool { - collected[index] = collector(index, val) - return true - }) - - return &Value{data: collected} -} - -/* - Int16 (int16 and []int16) - -------------------------------------------------- -*/ - -// Int16 gets the value as a int16, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Int16(optionalDefault ...int16) int16 { - if s, ok := v.data.(int16); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustInt16 gets the value as a int16. -// -// Panics if the object is not a int16. -func (v *Value) MustInt16() int16 { - return v.data.(int16) -} - -// Int16Slice gets the value as a []int16, returns the optionalDefault -// value or nil if the value is not a []int16. -func (v *Value) Int16Slice(optionalDefault ...[]int16) []int16 { - if s, ok := v.data.([]int16); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustInt16Slice gets the value as a []int16. -// -// Panics if the object is not a []int16. -func (v *Value) MustInt16Slice() []int16 { - return v.data.([]int16) -} - -// IsInt16 gets whether the object contained is a int16 or not. -func (v *Value) IsInt16() bool { - _, ok := v.data.(int16) - return ok -} - -// IsInt16Slice gets whether the object contained is a []int16 or not. -func (v *Value) IsInt16Slice() bool { - _, ok := v.data.([]int16) - return ok -} - -// EachInt16 calls the specified callback for each object -// in the []int16. -// -// Panics if the object is the wrong type. -func (v *Value) EachInt16(callback func(int, int16) bool) *Value { - - for index, val := range v.MustInt16Slice() { - carryon := callback(index, val) - if carryon == false { - break - } - } - - return v - -} - -// WhereInt16 uses the specified decider function to select items -// from the []int16. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereInt16(decider func(int, int16) bool) *Value { - - var selected []int16 - - v.EachInt16(func(index int, val int16) bool { - shouldSelect := decider(index, val) - if shouldSelect == false { - selected = append(selected, val) - } - return true - }) - - return &Value{data: selected} - -} - -// GroupInt16 uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]int16. -func (v *Value) GroupInt16(grouper func(int, int16) string) *Value { - - groups := make(map[string][]int16) - - v.EachInt16(func(index int, val int16) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]int16, 0) - } - groups[group] = append(groups[group], val) - return true - }) - - return &Value{data: groups} - -} - -// ReplaceInt16 uses the specified function to replace each int16s -// by iterating each item. The data in the returned result will be a -// []int16 containing the replaced items. -func (v *Value) ReplaceInt16(replacer func(int, int16) int16) *Value { - - arr := v.MustInt16Slice() - replaced := make([]int16, len(arr)) - - v.EachInt16(func(index int, val int16) bool { - replaced[index] = replacer(index, val) - return true - }) - - return &Value{data: replaced} - -} - -// CollectInt16 uses the specified collector function to collect a value -// for each of the int16s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectInt16(collector func(int, int16) interface{}) *Value { - - arr := v.MustInt16Slice() - collected := make([]interface{}, len(arr)) - - v.EachInt16(func(index int, val int16) bool { - collected[index] = collector(index, val) - return true - }) - - return &Value{data: collected} -} - -/* - Int32 (int32 and []int32) - -------------------------------------------------- -*/ - -// Int32 gets the value as a int32, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Int32(optionalDefault ...int32) int32 { - if s, ok := v.data.(int32); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustInt32 gets the value as a int32. -// -// Panics if the object is not a int32. -func (v *Value) MustInt32() int32 { - return v.data.(int32) -} - -// Int32Slice gets the value as a []int32, returns the optionalDefault -// value or nil if the value is not a []int32. -func (v *Value) Int32Slice(optionalDefault ...[]int32) []int32 { - if s, ok := v.data.([]int32); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustInt32Slice gets the value as a []int32. -// -// Panics if the object is not a []int32. -func (v *Value) MustInt32Slice() []int32 { - return v.data.([]int32) -} - -// IsInt32 gets whether the object contained is a int32 or not. -func (v *Value) IsInt32() bool { - _, ok := v.data.(int32) - return ok -} - -// IsInt32Slice gets whether the object contained is a []int32 or not. -func (v *Value) IsInt32Slice() bool { - _, ok := v.data.([]int32) - return ok -} - -// EachInt32 calls the specified callback for each object -// in the []int32. -// -// Panics if the object is the wrong type. -func (v *Value) EachInt32(callback func(int, int32) bool) *Value { - - for index, val := range v.MustInt32Slice() { - carryon := callback(index, val) - if carryon == false { - break - } - } - - return v - -} - -// WhereInt32 uses the specified decider function to select items -// from the []int32. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereInt32(decider func(int, int32) bool) *Value { - - var selected []int32 - - v.EachInt32(func(index int, val int32) bool { - shouldSelect := decider(index, val) - if shouldSelect == false { - selected = append(selected, val) - } - return true - }) - - return &Value{data: selected} - -} - -// GroupInt32 uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]int32. -func (v *Value) GroupInt32(grouper func(int, int32) string) *Value { - - groups := make(map[string][]int32) - - v.EachInt32(func(index int, val int32) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]int32, 0) - } - groups[group] = append(groups[group], val) - return true - }) - - return &Value{data: groups} - -} - -// ReplaceInt32 uses the specified function to replace each int32s -// by iterating each item. The data in the returned result will be a -// []int32 containing the replaced items. -func (v *Value) ReplaceInt32(replacer func(int, int32) int32) *Value { - - arr := v.MustInt32Slice() - replaced := make([]int32, len(arr)) - - v.EachInt32(func(index int, val int32) bool { - replaced[index] = replacer(index, val) - return true - }) - - return &Value{data: replaced} - -} - -// CollectInt32 uses the specified collector function to collect a value -// for each of the int32s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectInt32(collector func(int, int32) interface{}) *Value { - - arr := v.MustInt32Slice() - collected := make([]interface{}, len(arr)) - - v.EachInt32(func(index int, val int32) bool { - collected[index] = collector(index, val) - return true - }) - - return &Value{data: collected} -} - -/* - Int64 (int64 and []int64) - -------------------------------------------------- -*/ - -// Int64 gets the value as a int64, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Int64(optionalDefault ...int64) int64 { - if s, ok := v.data.(int64); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustInt64 gets the value as a int64. -// -// Panics if the object is not a int64. -func (v *Value) MustInt64() int64 { - return v.data.(int64) -} - -// Int64Slice gets the value as a []int64, returns the optionalDefault -// value or nil if the value is not a []int64. -func (v *Value) Int64Slice(optionalDefault ...[]int64) []int64 { - if s, ok := v.data.([]int64); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustInt64Slice gets the value as a []int64. -// -// Panics if the object is not a []int64. -func (v *Value) MustInt64Slice() []int64 { - return v.data.([]int64) -} - -// IsInt64 gets whether the object contained is a int64 or not. -func (v *Value) IsInt64() bool { - _, ok := v.data.(int64) - return ok -} - -// IsInt64Slice gets whether the object contained is a []int64 or not. -func (v *Value) IsInt64Slice() bool { - _, ok := v.data.([]int64) - return ok -} - -// EachInt64 calls the specified callback for each object -// in the []int64. -// -// Panics if the object is the wrong type. -func (v *Value) EachInt64(callback func(int, int64) bool) *Value { - - for index, val := range v.MustInt64Slice() { - carryon := callback(index, val) - if carryon == false { - break - } - } - - return v - -} - -// WhereInt64 uses the specified decider function to select items -// from the []int64. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereInt64(decider func(int, int64) bool) *Value { - - var selected []int64 - - v.EachInt64(func(index int, val int64) bool { - shouldSelect := decider(index, val) - if shouldSelect == false { - selected = append(selected, val) - } - return true - }) - - return &Value{data: selected} - -} - -// GroupInt64 uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]int64. -func (v *Value) GroupInt64(grouper func(int, int64) string) *Value { - - groups := make(map[string][]int64) - - v.EachInt64(func(index int, val int64) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]int64, 0) - } - groups[group] = append(groups[group], val) - return true - }) - - return &Value{data: groups} - -} - -// ReplaceInt64 uses the specified function to replace each int64s -// by iterating each item. The data in the returned result will be a -// []int64 containing the replaced items. -func (v *Value) ReplaceInt64(replacer func(int, int64) int64) *Value { - - arr := v.MustInt64Slice() - replaced := make([]int64, len(arr)) - - v.EachInt64(func(index int, val int64) bool { - replaced[index] = replacer(index, val) - return true - }) - - return &Value{data: replaced} - -} - -// CollectInt64 uses the specified collector function to collect a value -// for each of the int64s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectInt64(collector func(int, int64) interface{}) *Value { - - arr := v.MustInt64Slice() - collected := make([]interface{}, len(arr)) - - v.EachInt64(func(index int, val int64) bool { - collected[index] = collector(index, val) - return true - }) - - return &Value{data: collected} -} - -/* - Uint (uint and []uint) - -------------------------------------------------- -*/ - -// Uint gets the value as a uint, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Uint(optionalDefault ...uint) uint { - if s, ok := v.data.(uint); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustUint gets the value as a uint. -// -// Panics if the object is not a uint. -func (v *Value) MustUint() uint { - return v.data.(uint) -} - -// UintSlice gets the value as a []uint, returns the optionalDefault -// value or nil if the value is not a []uint. -func (v *Value) UintSlice(optionalDefault ...[]uint) []uint { - if s, ok := v.data.([]uint); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustUintSlice gets the value as a []uint. -// -// Panics if the object is not a []uint. -func (v *Value) MustUintSlice() []uint { - return v.data.([]uint) -} - -// IsUint gets whether the object contained is a uint or not. -func (v *Value) IsUint() bool { - _, ok := v.data.(uint) - return ok -} - -// IsUintSlice gets whether the object contained is a []uint or not. -func (v *Value) IsUintSlice() bool { - _, ok := v.data.([]uint) - return ok -} - -// EachUint calls the specified callback for each object -// in the []uint. -// -// Panics if the object is the wrong type. -func (v *Value) EachUint(callback func(int, uint) bool) *Value { - - for index, val := range v.MustUintSlice() { - carryon := callback(index, val) - if carryon == false { - break - } - } - - return v - -} - -// WhereUint uses the specified decider function to select items -// from the []uint. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereUint(decider func(int, uint) bool) *Value { - - var selected []uint - - v.EachUint(func(index int, val uint) bool { - shouldSelect := decider(index, val) - if shouldSelect == false { - selected = append(selected, val) - } - return true - }) - - return &Value{data: selected} - -} - -// GroupUint uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]uint. -func (v *Value) GroupUint(grouper func(int, uint) string) *Value { - - groups := make(map[string][]uint) - - v.EachUint(func(index int, val uint) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]uint, 0) - } - groups[group] = append(groups[group], val) - return true - }) - - return &Value{data: groups} - -} - -// ReplaceUint uses the specified function to replace each uints -// by iterating each item. The data in the returned result will be a -// []uint containing the replaced items. -func (v *Value) ReplaceUint(replacer func(int, uint) uint) *Value { - - arr := v.MustUintSlice() - replaced := make([]uint, len(arr)) - - v.EachUint(func(index int, val uint) bool { - replaced[index] = replacer(index, val) - return true - }) - - return &Value{data: replaced} - -} - -// CollectUint uses the specified collector function to collect a value -// for each of the uints in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectUint(collector func(int, uint) interface{}) *Value { - - arr := v.MustUintSlice() - collected := make([]interface{}, len(arr)) - - v.EachUint(func(index int, val uint) bool { - collected[index] = collector(index, val) - return true - }) - - return &Value{data: collected} -} - -/* - Uint8 (uint8 and []uint8) - -------------------------------------------------- -*/ - -// Uint8 gets the value as a uint8, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Uint8(optionalDefault ...uint8) uint8 { - if s, ok := v.data.(uint8); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustUint8 gets the value as a uint8. -// -// Panics if the object is not a uint8. -func (v *Value) MustUint8() uint8 { - return v.data.(uint8) -} - -// Uint8Slice gets the value as a []uint8, returns the optionalDefault -// value or nil if the value is not a []uint8. -func (v *Value) Uint8Slice(optionalDefault ...[]uint8) []uint8 { - if s, ok := v.data.([]uint8); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustUint8Slice gets the value as a []uint8. -// -// Panics if the object is not a []uint8. -func (v *Value) MustUint8Slice() []uint8 { - return v.data.([]uint8) -} - -// IsUint8 gets whether the object contained is a uint8 or not. -func (v *Value) IsUint8() bool { - _, ok := v.data.(uint8) - return ok -} - -// IsUint8Slice gets whether the object contained is a []uint8 or not. -func (v *Value) IsUint8Slice() bool { - _, ok := v.data.([]uint8) - return ok -} - -// EachUint8 calls the specified callback for each object -// in the []uint8. -// -// Panics if the object is the wrong type. -func (v *Value) EachUint8(callback func(int, uint8) bool) *Value { - - for index, val := range v.MustUint8Slice() { - carryon := callback(index, val) - if carryon == false { - break - } - } - - return v - -} - -// WhereUint8 uses the specified decider function to select items -// from the []uint8. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereUint8(decider func(int, uint8) bool) *Value { - - var selected []uint8 - - v.EachUint8(func(index int, val uint8) bool { - shouldSelect := decider(index, val) - if shouldSelect == false { - selected = append(selected, val) - } - return true - }) - - return &Value{data: selected} - -} - -// GroupUint8 uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]uint8. -func (v *Value) GroupUint8(grouper func(int, uint8) string) *Value { - - groups := make(map[string][]uint8) - - v.EachUint8(func(index int, val uint8) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]uint8, 0) - } - groups[group] = append(groups[group], val) - return true - }) - - return &Value{data: groups} - -} - -// ReplaceUint8 uses the specified function to replace each uint8s -// by iterating each item. The data in the returned result will be a -// []uint8 containing the replaced items. -func (v *Value) ReplaceUint8(replacer func(int, uint8) uint8) *Value { - - arr := v.MustUint8Slice() - replaced := make([]uint8, len(arr)) - - v.EachUint8(func(index int, val uint8) bool { - replaced[index] = replacer(index, val) - return true - }) - - return &Value{data: replaced} - -} - -// CollectUint8 uses the specified collector function to collect a value -// for each of the uint8s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectUint8(collector func(int, uint8) interface{}) *Value { - - arr := v.MustUint8Slice() - collected := make([]interface{}, len(arr)) - - v.EachUint8(func(index int, val uint8) bool { - collected[index] = collector(index, val) - return true - }) - - return &Value{data: collected} -} - -/* - Uint16 (uint16 and []uint16) - -------------------------------------------------- -*/ - -// Uint16 gets the value as a uint16, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Uint16(optionalDefault ...uint16) uint16 { - if s, ok := v.data.(uint16); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustUint16 gets the value as a uint16. -// -// Panics if the object is not a uint16. -func (v *Value) MustUint16() uint16 { - return v.data.(uint16) -} - -// Uint16Slice gets the value as a []uint16, returns the optionalDefault -// value or nil if the value is not a []uint16. -func (v *Value) Uint16Slice(optionalDefault ...[]uint16) []uint16 { - if s, ok := v.data.([]uint16); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustUint16Slice gets the value as a []uint16. -// -// Panics if the object is not a []uint16. -func (v *Value) MustUint16Slice() []uint16 { - return v.data.([]uint16) -} - -// IsUint16 gets whether the object contained is a uint16 or not. -func (v *Value) IsUint16() bool { - _, ok := v.data.(uint16) - return ok -} - -// IsUint16Slice gets whether the object contained is a []uint16 or not. -func (v *Value) IsUint16Slice() bool { - _, ok := v.data.([]uint16) - return ok -} - -// EachUint16 calls the specified callback for each object -// in the []uint16. -// -// Panics if the object is the wrong type. -func (v *Value) EachUint16(callback func(int, uint16) bool) *Value { - - for index, val := range v.MustUint16Slice() { - carryon := callback(index, val) - if carryon == false { - break - } - } - - return v - -} - -// WhereUint16 uses the specified decider function to select items -// from the []uint16. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereUint16(decider func(int, uint16) bool) *Value { - - var selected []uint16 - - v.EachUint16(func(index int, val uint16) bool { - shouldSelect := decider(index, val) - if shouldSelect == false { - selected = append(selected, val) - } - return true - }) - - return &Value{data: selected} - -} - -// GroupUint16 uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]uint16. -func (v *Value) GroupUint16(grouper func(int, uint16) string) *Value { - - groups := make(map[string][]uint16) - - v.EachUint16(func(index int, val uint16) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]uint16, 0) - } - groups[group] = append(groups[group], val) - return true - }) - - return &Value{data: groups} - -} - -// ReplaceUint16 uses the specified function to replace each uint16s -// by iterating each item. The data in the returned result will be a -// []uint16 containing the replaced items. -func (v *Value) ReplaceUint16(replacer func(int, uint16) uint16) *Value { - - arr := v.MustUint16Slice() - replaced := make([]uint16, len(arr)) - - v.EachUint16(func(index int, val uint16) bool { - replaced[index] = replacer(index, val) - return true - }) - - return &Value{data: replaced} - -} - -// CollectUint16 uses the specified collector function to collect a value -// for each of the uint16s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectUint16(collector func(int, uint16) interface{}) *Value { - - arr := v.MustUint16Slice() - collected := make([]interface{}, len(arr)) - - v.EachUint16(func(index int, val uint16) bool { - collected[index] = collector(index, val) - return true - }) - - return &Value{data: collected} -} - -/* - Uint32 (uint32 and []uint32) - -------------------------------------------------- -*/ - -// Uint32 gets the value as a uint32, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Uint32(optionalDefault ...uint32) uint32 { - if s, ok := v.data.(uint32); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustUint32 gets the value as a uint32. -// -// Panics if the object is not a uint32. -func (v *Value) MustUint32() uint32 { - return v.data.(uint32) -} - -// Uint32Slice gets the value as a []uint32, returns the optionalDefault -// value or nil if the value is not a []uint32. -func (v *Value) Uint32Slice(optionalDefault ...[]uint32) []uint32 { - if s, ok := v.data.([]uint32); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustUint32Slice gets the value as a []uint32. -// -// Panics if the object is not a []uint32. -func (v *Value) MustUint32Slice() []uint32 { - return v.data.([]uint32) -} - -// IsUint32 gets whether the object contained is a uint32 or not. -func (v *Value) IsUint32() bool { - _, ok := v.data.(uint32) - return ok -} - -// IsUint32Slice gets whether the object contained is a []uint32 or not. -func (v *Value) IsUint32Slice() bool { - _, ok := v.data.([]uint32) - return ok -} - -// EachUint32 calls the specified callback for each object -// in the []uint32. -// -// Panics if the object is the wrong type. -func (v *Value) EachUint32(callback func(int, uint32) bool) *Value { - - for index, val := range v.MustUint32Slice() { - carryon := callback(index, val) - if carryon == false { - break - } - } - - return v - -} - -// WhereUint32 uses the specified decider function to select items -// from the []uint32. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereUint32(decider func(int, uint32) bool) *Value { - - var selected []uint32 - - v.EachUint32(func(index int, val uint32) bool { - shouldSelect := decider(index, val) - if shouldSelect == false { - selected = append(selected, val) - } - return true - }) - - return &Value{data: selected} - -} - -// GroupUint32 uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]uint32. -func (v *Value) GroupUint32(grouper func(int, uint32) string) *Value { - - groups := make(map[string][]uint32) - - v.EachUint32(func(index int, val uint32) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]uint32, 0) - } - groups[group] = append(groups[group], val) - return true - }) - - return &Value{data: groups} - -} - -// ReplaceUint32 uses the specified function to replace each uint32s -// by iterating each item. The data in the returned result will be a -// []uint32 containing the replaced items. -func (v *Value) ReplaceUint32(replacer func(int, uint32) uint32) *Value { - - arr := v.MustUint32Slice() - replaced := make([]uint32, len(arr)) - - v.EachUint32(func(index int, val uint32) bool { - replaced[index] = replacer(index, val) - return true - }) - - return &Value{data: replaced} - -} - -// CollectUint32 uses the specified collector function to collect a value -// for each of the uint32s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectUint32(collector func(int, uint32) interface{}) *Value { - - arr := v.MustUint32Slice() - collected := make([]interface{}, len(arr)) - - v.EachUint32(func(index int, val uint32) bool { - collected[index] = collector(index, val) - return true - }) - - return &Value{data: collected} -} - -/* - Uint64 (uint64 and []uint64) - -------------------------------------------------- -*/ - -// Uint64 gets the value as a uint64, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Uint64(optionalDefault ...uint64) uint64 { - if s, ok := v.data.(uint64); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustUint64 gets the value as a uint64. -// -// Panics if the object is not a uint64. -func (v *Value) MustUint64() uint64 { - return v.data.(uint64) -} - -// Uint64Slice gets the value as a []uint64, returns the optionalDefault -// value or nil if the value is not a []uint64. -func (v *Value) Uint64Slice(optionalDefault ...[]uint64) []uint64 { - if s, ok := v.data.([]uint64); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustUint64Slice gets the value as a []uint64. -// -// Panics if the object is not a []uint64. -func (v *Value) MustUint64Slice() []uint64 { - return v.data.([]uint64) -} - -// IsUint64 gets whether the object contained is a uint64 or not. -func (v *Value) IsUint64() bool { - _, ok := v.data.(uint64) - return ok -} - -// IsUint64Slice gets whether the object contained is a []uint64 or not. -func (v *Value) IsUint64Slice() bool { - _, ok := v.data.([]uint64) - return ok -} - -// EachUint64 calls the specified callback for each object -// in the []uint64. -// -// Panics if the object is the wrong type. -func (v *Value) EachUint64(callback func(int, uint64) bool) *Value { - - for index, val := range v.MustUint64Slice() { - carryon := callback(index, val) - if carryon == false { - break - } - } - - return v - -} - -// WhereUint64 uses the specified decider function to select items -// from the []uint64. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereUint64(decider func(int, uint64) bool) *Value { - - var selected []uint64 - - v.EachUint64(func(index int, val uint64) bool { - shouldSelect := decider(index, val) - if shouldSelect == false { - selected = append(selected, val) - } - return true - }) - - return &Value{data: selected} - -} - -// GroupUint64 uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]uint64. -func (v *Value) GroupUint64(grouper func(int, uint64) string) *Value { - - groups := make(map[string][]uint64) - - v.EachUint64(func(index int, val uint64) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]uint64, 0) - } - groups[group] = append(groups[group], val) - return true - }) - - return &Value{data: groups} - -} - -// ReplaceUint64 uses the specified function to replace each uint64s -// by iterating each item. The data in the returned result will be a -// []uint64 containing the replaced items. -func (v *Value) ReplaceUint64(replacer func(int, uint64) uint64) *Value { - - arr := v.MustUint64Slice() - replaced := make([]uint64, len(arr)) - - v.EachUint64(func(index int, val uint64) bool { - replaced[index] = replacer(index, val) - return true - }) - - return &Value{data: replaced} - -} - -// CollectUint64 uses the specified collector function to collect a value -// for each of the uint64s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectUint64(collector func(int, uint64) interface{}) *Value { - - arr := v.MustUint64Slice() - collected := make([]interface{}, len(arr)) - - v.EachUint64(func(index int, val uint64) bool { - collected[index] = collector(index, val) - return true - }) - - return &Value{data: collected} -} - -/* - Uintptr (uintptr and []uintptr) - -------------------------------------------------- -*/ - -// Uintptr gets the value as a uintptr, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Uintptr(optionalDefault ...uintptr) uintptr { - if s, ok := v.data.(uintptr); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustUintptr gets the value as a uintptr. -// -// Panics if the object is not a uintptr. -func (v *Value) MustUintptr() uintptr { - return v.data.(uintptr) -} - -// UintptrSlice gets the value as a []uintptr, returns the optionalDefault -// value or nil if the value is not a []uintptr. -func (v *Value) UintptrSlice(optionalDefault ...[]uintptr) []uintptr { - if s, ok := v.data.([]uintptr); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustUintptrSlice gets the value as a []uintptr. -// -// Panics if the object is not a []uintptr. -func (v *Value) MustUintptrSlice() []uintptr { - return v.data.([]uintptr) -} - -// IsUintptr gets whether the object contained is a uintptr or not. -func (v *Value) IsUintptr() bool { - _, ok := v.data.(uintptr) - return ok -} - -// IsUintptrSlice gets whether the object contained is a []uintptr or not. -func (v *Value) IsUintptrSlice() bool { - _, ok := v.data.([]uintptr) - return ok -} - -// EachUintptr calls the specified callback for each object -// in the []uintptr. -// -// Panics if the object is the wrong type. -func (v *Value) EachUintptr(callback func(int, uintptr) bool) *Value { - - for index, val := range v.MustUintptrSlice() { - carryon := callback(index, val) - if carryon == false { - break - } - } - - return v - -} - -// WhereUintptr uses the specified decider function to select items -// from the []uintptr. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereUintptr(decider func(int, uintptr) bool) *Value { - - var selected []uintptr - - v.EachUintptr(func(index int, val uintptr) bool { - shouldSelect := decider(index, val) - if shouldSelect == false { - selected = append(selected, val) - } - return true - }) - - return &Value{data: selected} - -} - -// GroupUintptr uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]uintptr. -func (v *Value) GroupUintptr(grouper func(int, uintptr) string) *Value { - - groups := make(map[string][]uintptr) - - v.EachUintptr(func(index int, val uintptr) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]uintptr, 0) - } - groups[group] = append(groups[group], val) - return true - }) - - return &Value{data: groups} - -} - -// ReplaceUintptr uses the specified function to replace each uintptrs -// by iterating each item. The data in the returned result will be a -// []uintptr containing the replaced items. -func (v *Value) ReplaceUintptr(replacer func(int, uintptr) uintptr) *Value { - - arr := v.MustUintptrSlice() - replaced := make([]uintptr, len(arr)) - - v.EachUintptr(func(index int, val uintptr) bool { - replaced[index] = replacer(index, val) - return true - }) - - return &Value{data: replaced} - -} - -// CollectUintptr uses the specified collector function to collect a value -// for each of the uintptrs in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectUintptr(collector func(int, uintptr) interface{}) *Value { - - arr := v.MustUintptrSlice() - collected := make([]interface{}, len(arr)) - - v.EachUintptr(func(index int, val uintptr) bool { - collected[index] = collector(index, val) - return true - }) - - return &Value{data: collected} -} - -/* - Float32 (float32 and []float32) - -------------------------------------------------- -*/ - -// Float32 gets the value as a float32, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Float32(optionalDefault ...float32) float32 { - if s, ok := v.data.(float32); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustFloat32 gets the value as a float32. -// -// Panics if the object is not a float32. -func (v *Value) MustFloat32() float32 { - return v.data.(float32) -} - -// Float32Slice gets the value as a []float32, returns the optionalDefault -// value or nil if the value is not a []float32. -func (v *Value) Float32Slice(optionalDefault ...[]float32) []float32 { - if s, ok := v.data.([]float32); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustFloat32Slice gets the value as a []float32. -// -// Panics if the object is not a []float32. -func (v *Value) MustFloat32Slice() []float32 { - return v.data.([]float32) -} - -// IsFloat32 gets whether the object contained is a float32 or not. -func (v *Value) IsFloat32() bool { - _, ok := v.data.(float32) - return ok -} - -// IsFloat32Slice gets whether the object contained is a []float32 or not. -func (v *Value) IsFloat32Slice() bool { - _, ok := v.data.([]float32) - return ok -} - -// EachFloat32 calls the specified callback for each object -// in the []float32. -// -// Panics if the object is the wrong type. -func (v *Value) EachFloat32(callback func(int, float32) bool) *Value { - - for index, val := range v.MustFloat32Slice() { - carryon := callback(index, val) - if carryon == false { - break - } - } - - return v - -} - -// WhereFloat32 uses the specified decider function to select items -// from the []float32. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereFloat32(decider func(int, float32) bool) *Value { - - var selected []float32 - - v.EachFloat32(func(index int, val float32) bool { - shouldSelect := decider(index, val) - if shouldSelect == false { - selected = append(selected, val) - } - return true - }) - - return &Value{data: selected} - -} - -// GroupFloat32 uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]float32. -func (v *Value) GroupFloat32(grouper func(int, float32) string) *Value { - - groups := make(map[string][]float32) - - v.EachFloat32(func(index int, val float32) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]float32, 0) - } - groups[group] = append(groups[group], val) - return true - }) - - return &Value{data: groups} - -} - -// ReplaceFloat32 uses the specified function to replace each float32s -// by iterating each item. The data in the returned result will be a -// []float32 containing the replaced items. -func (v *Value) ReplaceFloat32(replacer func(int, float32) float32) *Value { - - arr := v.MustFloat32Slice() - replaced := make([]float32, len(arr)) - - v.EachFloat32(func(index int, val float32) bool { - replaced[index] = replacer(index, val) - return true - }) - - return &Value{data: replaced} - -} - -// CollectFloat32 uses the specified collector function to collect a value -// for each of the float32s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectFloat32(collector func(int, float32) interface{}) *Value { - - arr := v.MustFloat32Slice() - collected := make([]interface{}, len(arr)) - - v.EachFloat32(func(index int, val float32) bool { - collected[index] = collector(index, val) - return true - }) - - return &Value{data: collected} -} - -/* - Float64 (float64 and []float64) - -------------------------------------------------- -*/ - -// Float64 gets the value as a float64, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Float64(optionalDefault ...float64) float64 { - if s, ok := v.data.(float64); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustFloat64 gets the value as a float64. -// -// Panics if the object is not a float64. -func (v *Value) MustFloat64() float64 { - return v.data.(float64) -} - -// Float64Slice gets the value as a []float64, returns the optionalDefault -// value or nil if the value is not a []float64. -func (v *Value) Float64Slice(optionalDefault ...[]float64) []float64 { - if s, ok := v.data.([]float64); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustFloat64Slice gets the value as a []float64. -// -// Panics if the object is not a []float64. -func (v *Value) MustFloat64Slice() []float64 { - return v.data.([]float64) -} - -// IsFloat64 gets whether the object contained is a float64 or not. -func (v *Value) IsFloat64() bool { - _, ok := v.data.(float64) - return ok -} - -// IsFloat64Slice gets whether the object contained is a []float64 or not. -func (v *Value) IsFloat64Slice() bool { - _, ok := v.data.([]float64) - return ok -} - -// EachFloat64 calls the specified callback for each object -// in the []float64. -// -// Panics if the object is the wrong type. -func (v *Value) EachFloat64(callback func(int, float64) bool) *Value { - - for index, val := range v.MustFloat64Slice() { - carryon := callback(index, val) - if carryon == false { - break - } - } - - return v - -} - -// WhereFloat64 uses the specified decider function to select items -// from the []float64. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereFloat64(decider func(int, float64) bool) *Value { - - var selected []float64 - - v.EachFloat64(func(index int, val float64) bool { - shouldSelect := decider(index, val) - if shouldSelect == false { - selected = append(selected, val) - } - return true - }) - - return &Value{data: selected} - -} - -// GroupFloat64 uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]float64. -func (v *Value) GroupFloat64(grouper func(int, float64) string) *Value { - - groups := make(map[string][]float64) - - v.EachFloat64(func(index int, val float64) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]float64, 0) - } - groups[group] = append(groups[group], val) - return true - }) - - return &Value{data: groups} - -} - -// ReplaceFloat64 uses the specified function to replace each float64s -// by iterating each item. The data in the returned result will be a -// []float64 containing the replaced items. -func (v *Value) ReplaceFloat64(replacer func(int, float64) float64) *Value { - - arr := v.MustFloat64Slice() - replaced := make([]float64, len(arr)) - - v.EachFloat64(func(index int, val float64) bool { - replaced[index] = replacer(index, val) - return true - }) - - return &Value{data: replaced} - -} - -// CollectFloat64 uses the specified collector function to collect a value -// for each of the float64s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectFloat64(collector func(int, float64) interface{}) *Value { - - arr := v.MustFloat64Slice() - collected := make([]interface{}, len(arr)) - - v.EachFloat64(func(index int, val float64) bool { - collected[index] = collector(index, val) - return true - }) - - return &Value{data: collected} -} - -/* - Complex64 (complex64 and []complex64) - -------------------------------------------------- -*/ - -// Complex64 gets the value as a complex64, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Complex64(optionalDefault ...complex64) complex64 { - if s, ok := v.data.(complex64); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustComplex64 gets the value as a complex64. -// -// Panics if the object is not a complex64. -func (v *Value) MustComplex64() complex64 { - return v.data.(complex64) -} - -// Complex64Slice gets the value as a []complex64, returns the optionalDefault -// value or nil if the value is not a []complex64. -func (v *Value) Complex64Slice(optionalDefault ...[]complex64) []complex64 { - if s, ok := v.data.([]complex64); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustComplex64Slice gets the value as a []complex64. -// -// Panics if the object is not a []complex64. -func (v *Value) MustComplex64Slice() []complex64 { - return v.data.([]complex64) -} - -// IsComplex64 gets whether the object contained is a complex64 or not. -func (v *Value) IsComplex64() bool { - _, ok := v.data.(complex64) - return ok -} - -// IsComplex64Slice gets whether the object contained is a []complex64 or not. -func (v *Value) IsComplex64Slice() bool { - _, ok := v.data.([]complex64) - return ok -} - -// EachComplex64 calls the specified callback for each object -// in the []complex64. -// -// Panics if the object is the wrong type. -func (v *Value) EachComplex64(callback func(int, complex64) bool) *Value { - - for index, val := range v.MustComplex64Slice() { - carryon := callback(index, val) - if carryon == false { - break - } - } - - return v - -} - -// WhereComplex64 uses the specified decider function to select items -// from the []complex64. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereComplex64(decider func(int, complex64) bool) *Value { - - var selected []complex64 - - v.EachComplex64(func(index int, val complex64) bool { - shouldSelect := decider(index, val) - if shouldSelect == false { - selected = append(selected, val) - } - return true - }) - - return &Value{data: selected} - -} - -// GroupComplex64 uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]complex64. -func (v *Value) GroupComplex64(grouper func(int, complex64) string) *Value { - - groups := make(map[string][]complex64) - - v.EachComplex64(func(index int, val complex64) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]complex64, 0) - } - groups[group] = append(groups[group], val) - return true - }) - - return &Value{data: groups} - -} - -// ReplaceComplex64 uses the specified function to replace each complex64s -// by iterating each item. The data in the returned result will be a -// []complex64 containing the replaced items. -func (v *Value) ReplaceComplex64(replacer func(int, complex64) complex64) *Value { - - arr := v.MustComplex64Slice() - replaced := make([]complex64, len(arr)) - - v.EachComplex64(func(index int, val complex64) bool { - replaced[index] = replacer(index, val) - return true - }) - - return &Value{data: replaced} - -} - -// CollectComplex64 uses the specified collector function to collect a value -// for each of the complex64s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectComplex64(collector func(int, complex64) interface{}) *Value { - - arr := v.MustComplex64Slice() - collected := make([]interface{}, len(arr)) - - v.EachComplex64(func(index int, val complex64) bool { - collected[index] = collector(index, val) - return true - }) - - return &Value{data: collected} -} - -/* - Complex128 (complex128 and []complex128) - -------------------------------------------------- -*/ - -// Complex128 gets the value as a complex128, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) Complex128(optionalDefault ...complex128) complex128 { - if s, ok := v.data.(complex128); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return 0 -} - -// MustComplex128 gets the value as a complex128. -// -// Panics if the object is not a complex128. -func (v *Value) MustComplex128() complex128 { - return v.data.(complex128) -} - -// Complex128Slice gets the value as a []complex128, returns the optionalDefault -// value or nil if the value is not a []complex128. -func (v *Value) Complex128Slice(optionalDefault ...[]complex128) []complex128 { - if s, ok := v.data.([]complex128); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustComplex128Slice gets the value as a []complex128. -// -// Panics if the object is not a []complex128. -func (v *Value) MustComplex128Slice() []complex128 { - return v.data.([]complex128) -} - -// IsComplex128 gets whether the object contained is a complex128 or not. -func (v *Value) IsComplex128() bool { - _, ok := v.data.(complex128) - return ok -} - -// IsComplex128Slice gets whether the object contained is a []complex128 or not. -func (v *Value) IsComplex128Slice() bool { - _, ok := v.data.([]complex128) - return ok -} - -// EachComplex128 calls the specified callback for each object -// in the []complex128. -// -// Panics if the object is the wrong type. -func (v *Value) EachComplex128(callback func(int, complex128) bool) *Value { - - for index, val := range v.MustComplex128Slice() { - carryon := callback(index, val) - if carryon == false { - break - } - } - - return v - -} - -// WhereComplex128 uses the specified decider function to select items -// from the []complex128. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereComplex128(decider func(int, complex128) bool) *Value { - - var selected []complex128 - - v.EachComplex128(func(index int, val complex128) bool { - shouldSelect := decider(index, val) - if shouldSelect == false { - selected = append(selected, val) - } - return true - }) - - return &Value{data: selected} - -} - -// GroupComplex128 uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]complex128. -func (v *Value) GroupComplex128(grouper func(int, complex128) string) *Value { - - groups := make(map[string][]complex128) - - v.EachComplex128(func(index int, val complex128) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]complex128, 0) - } - groups[group] = append(groups[group], val) - return true - }) - - return &Value{data: groups} - -} - -// ReplaceComplex128 uses the specified function to replace each complex128s -// by iterating each item. The data in the returned result will be a -// []complex128 containing the replaced items. -func (v *Value) ReplaceComplex128(replacer func(int, complex128) complex128) *Value { - - arr := v.MustComplex128Slice() - replaced := make([]complex128, len(arr)) - - v.EachComplex128(func(index int, val complex128) bool { - replaced[index] = replacer(index, val) - return true - }) - - return &Value{data: replaced} - -} - -// CollectComplex128 uses the specified collector function to collect a value -// for each of the complex128s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectComplex128(collector func(int, complex128) interface{}) *Value { - - arr := v.MustComplex128Slice() - collected := make([]interface{}, len(arr)) - - v.EachComplex128(func(index int, val complex128) bool { - collected[index] = collector(index, val) - return true - }) - - return &Value{data: collected} -} diff --git a/vendor/github.com/stretchr/objx/value.go b/vendor/github.com/stretchr/objx/value.go deleted file mode 100644 index 7aaef06..0000000 --- a/vendor/github.com/stretchr/objx/value.go +++ /dev/null @@ -1,13 +0,0 @@ -package objx - -// Value provides methods for extracting interface{} data in various -// types. -type Value struct { - // data contains the raw data being managed by this Value - data interface{} -} - -// Data returns the raw data contained by this Value -func (v *Value) Data() interface{} { - return v.data -} diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go deleted file mode 100644 index e4a920f..0000000 --- a/vendor/github.com/stretchr/testify/assert/assertions.go +++ /dev/null @@ -1,844 +0,0 @@ -package assert - -import ( - "bufio" - "bytes" - "fmt" - "reflect" - "regexp" - "runtime" - "strings" - "time" -) - -// TestingT is an interface wrapper around *testing.T -type TestingT interface { - Errorf(format string, args ...interface{}) -} - -// Comparison a custom function that returns true on success and false on failure -type Comparison func() (success bool) - -/* - Helper functions -*/ - -// ObjectsAreEqual determines if two objects are considered equal. -// -// This function does no assertion of any kind. -func ObjectsAreEqual(expected, actual interface{}) bool { - - if expected == nil || actual == nil { - return expected == actual - } - - if reflect.DeepEqual(expected, actual) { - return true - } - - return false - -} - -// ObjectsAreEqualValues gets whether two objects are equal, or if their -// values are equal. -func ObjectsAreEqualValues(expected, actual interface{}) bool { - if ObjectsAreEqual(expected, actual) { - return true - } - - actualType := reflect.TypeOf(actual) - expectedValue := reflect.ValueOf(expected) - if expectedValue.Type().ConvertibleTo(actualType) { - // Attempt comparison after type conversion - if reflect.DeepEqual(actual, expectedValue.Convert(actualType).Interface()) { - return true - } - } - - return false -} - -/* CallerInfo is necessary because the assert functions use the testing object -internally, causing it to print the file:line of the assert method, rather than where -the problem actually occured in calling code.*/ - -// CallerInfo returns a string containing the file and line number of the assert call -// that failed. -func CallerInfo() string { - - file := "" - line := 0 - ok := false - - for i := 0; ; i++ { - _, file, line, ok = runtime.Caller(i) - if !ok { - return "" - } - parts := strings.Split(file, "/") - dir := parts[len(parts)-2] - file = parts[len(parts)-1] - if (dir != "assert" && dir != "mock" && dir != "require") || file == "mock_test.go" { - break - } - } - - return fmt.Sprintf("%s:%d", file, line) -} - -// getWhitespaceString returns a string that is long enough to overwrite the default -// output from the go testing framework. -func getWhitespaceString() string { - - _, file, line, ok := runtime.Caller(1) - if !ok { - return "" - } - parts := strings.Split(file, "/") - file = parts[len(parts)-1] - - return strings.Repeat(" ", len(fmt.Sprintf("%s:%d: ", file, line))) - -} - -func messageFromMsgAndArgs(msgAndArgs ...interface{}) string { - if len(msgAndArgs) == 0 || msgAndArgs == nil { - return "" - } - if len(msgAndArgs) == 1 { - return msgAndArgs[0].(string) - } - if len(msgAndArgs) > 1 { - return fmt.Sprintf(msgAndArgs[0].(string), msgAndArgs[1:]...) - } - return "" -} - -// Indents all lines of the message by appending a number of tabs to each line, in an output format compatible with Go's -// test printing (see inner comment for specifics) -func indentMessageLines(message string, tabs int) string { - outBuf := new(bytes.Buffer) - - for i, scanner := 0, bufio.NewScanner(strings.NewReader(message)); scanner.Scan(); i++ { - if i != 0 { - outBuf.WriteRune('\n') - } - for ii := 0; ii < tabs; ii++ { - outBuf.WriteRune('\t') - // Bizarrely, all lines except the first need one fewer tabs prepended, so deliberately advance the counter - // by 1 prematurely. - if ii == 0 && i > 0 { - ii++ - } - } - outBuf.WriteString(scanner.Text()) - } - - return outBuf.String() -} - -// Fail reports a failure through -func Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool { - - message := messageFromMsgAndArgs(msgAndArgs...) - - if len(message) > 0 { - t.Errorf("\r%s\r\tLocation:\t%s\n"+ - "\r\tError:%s\n"+ - "\r\tMessages:\t%s\n\r", - getWhitespaceString(), - CallerInfo(), - indentMessageLines(failureMessage, 2), - message) - } else { - t.Errorf("\r%s\r\tLocation:\t%s\n"+ - "\r\tError:%s\n\r", - getWhitespaceString(), - CallerInfo(), - indentMessageLines(failureMessage, 2)) - } - - return false -} - -// Implements asserts that an object is implemented by the specified interface. -// -// assert.Implements(t, (*MyInterface)(nil), new(MyObject), "MyObject") -func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { - - interfaceType := reflect.TypeOf(interfaceObject).Elem() - - if !reflect.TypeOf(object).Implements(interfaceType) { - return Fail(t, fmt.Sprintf("Object must implement %v", interfaceType), msgAndArgs...) - } - - return true - -} - -// IsType asserts that the specified objects are of the same type. -func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { - - if !ObjectsAreEqual(reflect.TypeOf(object), reflect.TypeOf(expectedType)) { - return Fail(t, fmt.Sprintf("Object expected to be of type %v, but was %v", reflect.TypeOf(expectedType), reflect.TypeOf(object)), msgAndArgs...) - } - - return true -} - -// Equal asserts that two objects are equal. -// -// assert.Equal(t, 123, 123, "123 and 123 should be equal") -// -// Returns whether the assertion was successful (true) or not (false). -func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { - - if !ObjectsAreEqual(expected, actual) { - return Fail(t, fmt.Sprintf("Not equal: %#v (expected)\n"+ - " != %#v (actual)", expected, actual), msgAndArgs...) - } - - return true - -} - -// EqualValues asserts that two objects are equal or convertable to the same types -// and equal. -// -// assert.EqualValues(t, uint32(123), int32(123), "123 and 123 should be equal") -// -// Returns whether the assertion was successful (true) or not (false). -func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { - - if !ObjectsAreEqualValues(expected, actual) { - return Fail(t, fmt.Sprintf("Not equal: %#v (expected)\n"+ - " != %#v (actual)", expected, actual), msgAndArgs...) - } - - return true - -} - -// Exactly asserts that two objects are equal is value and type. -// -// assert.Exactly(t, int32(123), int64(123), "123 and 123 should NOT be equal") -// -// Returns whether the assertion was successful (true) or not (false). -func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { - - aType := reflect.TypeOf(expected) - bType := reflect.TypeOf(actual) - - if aType != bType { - return Fail(t, "Types expected to match exactly", "%v != %v", aType, bType) - } - - return Equal(t, expected, actual, msgAndArgs...) - -} - -// NotNil asserts that the specified object is not nil. -// -// assert.NotNil(t, err, "err should be something") -// -// Returns whether the assertion was successful (true) or not (false). -func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - - success := true - - if object == nil { - success = false - } else { - value := reflect.ValueOf(object) - kind := value.Kind() - if kind >= reflect.Chan && kind <= reflect.Slice && value.IsNil() { - success = false - } - } - - if !success { - Fail(t, "Expected not to be nil.", msgAndArgs...) - } - - return success -} - -// isNil checks if a specified object is nil or not, without Failing. -func isNil(object interface{}) bool { - if object == nil { - return true - } - - value := reflect.ValueOf(object) - kind := value.Kind() - if kind >= reflect.Chan && kind <= reflect.Slice && value.IsNil() { - return true - } - - return false -} - -// Nil asserts that the specified object is nil. -// -// assert.Nil(t, err, "err should be nothing") -// -// Returns whether the assertion was successful (true) or not (false). -func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - if isNil(object) { - return true - } - return Fail(t, fmt.Sprintf("Expected nil, but got: %#v", object), msgAndArgs...) -} - -var zeros = []interface{}{ - int(0), - int8(0), - int16(0), - int32(0), - int64(0), - uint(0), - uint8(0), - uint16(0), - uint32(0), - uint64(0), - float32(0), - float64(0), -} - -// isEmpty gets whether the specified object is considered empty or not. -func isEmpty(object interface{}) bool { - - if object == nil { - return true - } else if object == "" { - return true - } else if object == false { - return true - } - - for _, v := range zeros { - if object == v { - return true - } - } - - objValue := reflect.ValueOf(object) - - switch objValue.Kind() { - case reflect.Map: - fallthrough - case reflect.Slice, reflect.Chan: - { - return (objValue.Len() == 0) - } - case reflect.Ptr: - { - switch object.(type) { - case *time.Time: - return object.(*time.Time).IsZero() - default: - return false - } - } - } - return false -} - -// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// assert.Empty(t, obj) -// -// Returns whether the assertion was successful (true) or not (false). -func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - - pass := isEmpty(object) - if !pass { - Fail(t, fmt.Sprintf("Should be empty, but was %v", object), msgAndArgs...) - } - - return pass - -} - -// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// if assert.NotEmpty(t, obj) { -// assert.Equal(t, "two", obj[1]) -// } -// -// Returns whether the assertion was successful (true) or not (false). -func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - - pass := !isEmpty(object) - if !pass { - Fail(t, fmt.Sprintf("Should NOT be empty, but was %v", object), msgAndArgs...) - } - - return pass - -} - -// getLen try to get length of object. -// return (false, 0) if impossible. -func getLen(x interface{}) (ok bool, length int) { - v := reflect.ValueOf(x) - defer func() { - if e := recover(); e != nil { - ok = false - } - }() - return true, v.Len() -} - -// Len asserts that the specified object has specific length. -// Len also fails if the object has a type that len() not accept. -// -// assert.Len(t, mySlice, 3, "The size of slice is not 3") -// -// Returns whether the assertion was successful (true) or not (false). -func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) bool { - ok, l := getLen(object) - if !ok { - return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", object), msgAndArgs...) - } - - if l != length { - return Fail(t, fmt.Sprintf("\"%s\" should have %d item(s), but has %d", object, length, l), msgAndArgs...) - } - return true -} - -// True asserts that the specified value is true. -// -// assert.True(t, myBool, "myBool should be true") -// -// Returns whether the assertion was successful (true) or not (false). -func True(t TestingT, value bool, msgAndArgs ...interface{}) bool { - - if value != true { - return Fail(t, "Should be true", msgAndArgs...) - } - - return true - -} - -// False asserts that the specified value is true. -// -// assert.False(t, myBool, "myBool should be false") -// -// Returns whether the assertion was successful (true) or not (false). -func False(t TestingT, value bool, msgAndArgs ...interface{}) bool { - - if value != false { - return Fail(t, "Should be false", msgAndArgs...) - } - - return true - -} - -// NotEqual asserts that the specified values are NOT equal. -// -// assert.NotEqual(t, obj1, obj2, "two objects shouldn't be equal") -// -// Returns whether the assertion was successful (true) or not (false). -func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { - - if ObjectsAreEqual(expected, actual) { - return Fail(t, "Should not be equal", msgAndArgs...) - } - - return true - -} - -// containsElement try loop over the list check if the list includes the element. -// return (false, false) if impossible. -// return (true, false) if element was not found. -// return (true, true) if element was found. -func includeElement(list interface{}, element interface{}) (ok, found bool) { - - listValue := reflect.ValueOf(list) - elementValue := reflect.ValueOf(element) - defer func() { - if e := recover(); e != nil { - ok = false - found = false - } - }() - - if reflect.TypeOf(list).Kind() == reflect.String { - return true, strings.Contains(listValue.String(), elementValue.String()) - } - - for i := 0; i < listValue.Len(); i++ { - if ObjectsAreEqual(listValue.Index(i).Interface(), element) { - return true, true - } - } - return true, false - -} - -// Contains asserts that the specified string or list(array, slice...) contains the -// specified substring or element. -// -// assert.Contains(t, "Hello World", "World", "But 'Hello World' does contain 'World'") -// assert.Contains(t, ["Hello", "World"], "World", "But ["Hello", "World"] does contain 'World'") -// -// Returns whether the assertion was successful (true) or not (false). -func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool { - - ok, found := includeElement(s, contains) - if !ok { - return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...) - } - if !found { - return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", s, contains), msgAndArgs...) - } - - return true - -} - -// NotContains asserts that the specified string or list(array, slice...) does NOT contain the -// specified substring or element. -// -// assert.NotContains(t, "Hello World", "Earth", "But 'Hello World' does NOT contain 'Earth'") -// assert.NotContains(t, ["Hello", "World"], "Earth", "But ['Hello', 'World'] does NOT contain 'Earth'") -// -// Returns whether the assertion was successful (true) or not (false). -func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool { - - ok, found := includeElement(s, contains) - if !ok { - return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...) - } - if found { - return Fail(t, fmt.Sprintf("\"%s\" should not contain \"%s\"", s, contains), msgAndArgs...) - } - - return true - -} - -// Condition uses a Comparison to assert a complex condition. -func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool { - result := comp() - if !result { - Fail(t, "Condition failed!", msgAndArgs...) - } - return result -} - -// PanicTestFunc defines a func that should be passed to the assert.Panics and assert.NotPanics -// methods, and represents a simple func that takes no arguments, and returns nothing. -type PanicTestFunc func() - -// didPanic returns true if the function passed to it panics. Otherwise, it returns false. -func didPanic(f PanicTestFunc) (bool, interface{}) { - - didPanic := false - var message interface{} - func() { - - defer func() { - if message = recover(); message != nil { - didPanic = true - } - }() - - // call the target function - f() - - }() - - return didPanic, message - -} - -// Panics asserts that the code inside the specified PanicTestFunc panics. -// -// assert.Panics(t, func(){ -// GoCrazy() -// }, "Calling GoCrazy() should panic") -// -// Returns whether the assertion was successful (true) or not (false). -func Panics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { - - if funcDidPanic, panicValue := didPanic(f); !funcDidPanic { - return Fail(t, fmt.Sprintf("func %#v should panic\n\r\tPanic value:\t%v", f, panicValue), msgAndArgs...) - } - - return true -} - -// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. -// -// assert.NotPanics(t, func(){ -// RemainCalm() -// }, "Calling RemainCalm() should NOT panic") -// -// Returns whether the assertion was successful (true) or not (false). -func NotPanics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { - - if funcDidPanic, panicValue := didPanic(f); funcDidPanic { - return Fail(t, fmt.Sprintf("func %#v should not panic\n\r\tPanic value:\t%v", f, panicValue), msgAndArgs...) - } - - return true -} - -// WithinDuration asserts that the two times are within duration delta of each other. -// -// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s") -// -// Returns whether the assertion was successful (true) or not (false). -func WithinDuration(t TestingT, expected, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool { - - dt := expected.Sub(actual) - if dt < -delta || dt > delta { - return Fail(t, fmt.Sprintf("Max difference between %v and %v allowed is %v, but difference was %v", expected, actual, delta, dt), msgAndArgs...) - } - - return true -} - -func toFloat(x interface{}) (float64, bool) { - var xf float64 - xok := true - - switch xn := x.(type) { - case uint8: - xf = float64(xn) - case uint16: - xf = float64(xn) - case uint32: - xf = float64(xn) - case uint64: - xf = float64(xn) - case int: - xf = float64(xn) - case int8: - xf = float64(xn) - case int16: - xf = float64(xn) - case int32: - xf = float64(xn) - case int64: - xf = float64(xn) - case float32: - xf = float64(xn) - case float64: - xf = float64(xn) - default: - xok = false - } - - return xf, xok -} - -// InDelta asserts that the two numerals are within delta of each other. -// -// assert.InDelta(t, math.Pi, (22 / 7.0), 0.01) -// -// Returns whether the assertion was successful (true) or not (false). -func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { - - af, aok := toFloat(expected) - bf, bok := toFloat(actual) - - if !aok || !bok { - return Fail(t, fmt.Sprintf("Parameters must be numerical"), msgAndArgs...) - } - - dt := af - bf - if dt < -delta || dt > delta { - return Fail(t, fmt.Sprintf("Max difference between %v and %v allowed is %v, but difference was %v", expected, actual, delta, dt), msgAndArgs...) - } - - return true -} - -// InDeltaSlice is the same as InDelta, except it compares two slices. -func InDeltaSlice(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { - if expected == nil || actual == nil || - reflect.TypeOf(actual).Kind() != reflect.Slice || - reflect.TypeOf(expected).Kind() != reflect.Slice { - return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...) - } - - actualSlice := reflect.ValueOf(actual) - expectedSlice := reflect.ValueOf(expected) - - for i := 0; i < actualSlice.Len(); i++ { - result := InDelta(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), delta) - if !result { - return result - } - } - - return true -} - -// min(|expected|, |actual|) * epsilon -func calcEpsilonDelta(expected, actual interface{}, epsilon float64) float64 { - af, aok := toFloat(expected) - bf, bok := toFloat(actual) - - if !aok || !bok { - // invalid input - return 0 - } - - if af < 0 { - af = -af - } - if bf < 0 { - bf = -bf - } - var delta float64 - if af < bf { - delta = af * epsilon - } else { - delta = bf * epsilon - } - return delta -} - -// InEpsilon asserts that expected and actual have a relative error less than epsilon -// -// Returns whether the assertion was successful (true) or not (false). -func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { - delta := calcEpsilonDelta(expected, actual, epsilon) - - return InDelta(t, expected, actual, delta, msgAndArgs...) -} - -// InEpsilonSlice is the same as InEpsilon, except it compares two slices. -func InEpsilonSlice(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { - if expected == nil || actual == nil || - reflect.TypeOf(actual).Kind() != reflect.Slice || - reflect.TypeOf(expected).Kind() != reflect.Slice { - return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...) - } - - actualSlice := reflect.ValueOf(actual) - expectedSlice := reflect.ValueOf(expected) - - for i := 0; i < actualSlice.Len(); i++ { - result := InEpsilon(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), delta) - if !result { - return result - } - } - - return true -} - -/* - Errors -*/ - -// NoError asserts that a function returned no error (i.e. `nil`). -// -// actualObj, err := SomeFunction() -// if assert.NoError(t, err) { -// assert.Equal(t, actualObj, expectedObj) -// } -// -// Returns whether the assertion was successful (true) or not (false). -func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool { - if isNil(err) { - return true - } - - return Fail(t, fmt.Sprintf("No error is expected but got %v", err), msgAndArgs...) -} - -// Error asserts that a function returned an error (i.e. not `nil`). -// -// actualObj, err := SomeFunction() -// if assert.Error(t, err, "An error was expected") { -// assert.Equal(t, err, expectedError) -// } -// -// Returns whether the assertion was successful (true) or not (false). -func Error(t TestingT, err error, msgAndArgs ...interface{}) bool { - - message := messageFromMsgAndArgs(msgAndArgs...) - return NotNil(t, err, "An error is expected but got nil. %s", message) - -} - -// EqualError asserts that a function returned an error (i.e. not `nil`) -// and that it is equal to the provided error. -// -// actualObj, err := SomeFunction() -// if assert.Error(t, err, "An error was expected") { -// assert.Equal(t, err, expectedError) -// } -// -// Returns whether the assertion was successful (true) or not (false). -func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) bool { - - message := messageFromMsgAndArgs(msgAndArgs...) - if !NotNil(t, theError, "An error is expected but got nil. %s", message) { - return false - } - s := "An error with value \"%s\" is expected but got \"%s\". %s" - return Equal(t, theError.Error(), errString, - s, errString, theError.Error(), message) -} - -// matchRegexp return true if a specified regexp matches a string. -func matchRegexp(rx interface{}, str interface{}) bool { - - var r *regexp.Regexp - if rr, ok := rx.(*regexp.Regexp); ok { - r = rr - } else { - r = regexp.MustCompile(fmt.Sprint(rx)) - } - - return (r.FindStringIndex(fmt.Sprint(str)) != nil) - -} - -// Regexp asserts that a specified regexp matches a string. -// -// assert.Regexp(t, regexp.MustCompile("start"), "it's starting") -// assert.Regexp(t, "start...$", "it's not starting") -// -// Returns whether the assertion was successful (true) or not (false). -func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { - - match := matchRegexp(rx, str) - - if !match { - Fail(t, fmt.Sprintf("Expect \"%v\" to match \"%v\"", str, rx), msgAndArgs...) - } - - return match -} - -// NotRegexp asserts that a specified regexp does not match a string. -// -// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") -// assert.NotRegexp(t, "^start", "it's not starting") -// -// Returns whether the assertion was successful (true) or not (false). -func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { - match := matchRegexp(rx, str) - - if match { - Fail(t, fmt.Sprintf("Expect \"%v\" to NOT match \"%v\"", str, rx), msgAndArgs...) - } - - return !match - -} diff --git a/vendor/github.com/stretchr/testify/assert/doc.go b/vendor/github.com/stretchr/testify/assert/doc.go deleted file mode 100644 index f678106..0000000 --- a/vendor/github.com/stretchr/testify/assert/doc.go +++ /dev/null @@ -1,154 +0,0 @@ -// Package assert provides a set of comprehensive testing tools for use with the normal Go testing system. -// -// Example Usage -// -// The following is a complete example using assert in a standard test function: -// import ( -// "testing" -// "github.com/stretchr/testify/assert" -// ) -// -// func TestSomething(t *testing.T) { -// -// var a string = "Hello" -// var b string = "Hello" -// -// assert.Equal(t, a, b, "The two words should be the same.") -// -// } -// -// if you assert many times, use the below: -// -// import ( -// "testing" -// "github.com/stretchr/testify/assert" -// ) -// -// func TestSomething(t *testing.T) { -// assert := assert.New(t) -// -// var a string = "Hello" -// var b string = "Hello" -// -// assert.Equal(a, b, "The two words should be the same.") -// } -// -// Assertions -// -// Assertions allow you to easily write test code, and are global funcs in the `assert` package. -// All assertion functions take, as the first argument, the `*testing.T` object provided by the -// testing framework. This allows the assertion funcs to write the failings and other details to -// the correct place. -// -// Every assertion function also takes an optional string message as the final argument, -// allowing custom error messages to be appended to the message the assertion method outputs. -// -// Here is an overview of the assert functions: -// -// assert.Equal(t, expected, actual [, message [, format-args]]) -// -// assert.EqualValues(t, expected, actual [, message [, format-args]]) -// -// assert.NotEqual(t, notExpected, actual [, message [, format-args]]) -// -// assert.True(t, actualBool [, message [, format-args]]) -// -// assert.False(t, actualBool [, message [, format-args]]) -// -// assert.Nil(t, actualObject [, message [, format-args]]) -// -// assert.NotNil(t, actualObject [, message [, format-args]]) -// -// assert.Empty(t, actualObject [, message [, format-args]]) -// -// assert.NotEmpty(t, actualObject [, message [, format-args]]) -// -// assert.Len(t, actualObject, expectedLength, [, message [, format-args]]) -// -// assert.Error(t, errorObject [, message [, format-args]]) -// -// assert.NoError(t, errorObject [, message [, format-args]]) -// -// assert.EqualError(t, theError, errString [, message [, format-args]]) -// -// assert.Implements(t, (*MyInterface)(nil), new(MyObject) [,message [, format-args]]) -// -// assert.IsType(t, expectedObject, actualObject [, message [, format-args]]) -// -// assert.Contains(t, stringOrSlice, substringOrElement [, message [, format-args]]) -// -// assert.NotContains(t, stringOrSlice, substringOrElement [, message [, format-args]]) -// -// assert.Panics(t, func(){ -// -// // call code that should panic -// -// } [, message [, format-args]]) -// -// assert.NotPanics(t, func(){ -// -// // call code that should not panic -// -// } [, message [, format-args]]) -// -// assert.WithinDuration(t, timeA, timeB, deltaTime, [, message [, format-args]]) -// -// assert.InDelta(t, numA, numB, delta, [, message [, format-args]]) -// -// assert.InEpsilon(t, numA, numB, epsilon, [, message [, format-args]]) -// -// assert package contains Assertions object. it has assertion methods. -// -// Here is an overview of the assert functions: -// assert.Equal(expected, actual [, message [, format-args]]) -// -// assert.EqualValues(expected, actual [, message [, format-args]]) -// -// assert.NotEqual(notExpected, actual [, message [, format-args]]) -// -// assert.True(actualBool [, message [, format-args]]) -// -// assert.False(actualBool [, message [, format-args]]) -// -// assert.Nil(actualObject [, message [, format-args]]) -// -// assert.NotNil(actualObject [, message [, format-args]]) -// -// assert.Empty(actualObject [, message [, format-args]]) -// -// assert.NotEmpty(actualObject [, message [, format-args]]) -// -// assert.Len(actualObject, expectedLength, [, message [, format-args]]) -// -// assert.Error(errorObject [, message [, format-args]]) -// -// assert.NoError(errorObject [, message [, format-args]]) -// -// assert.EqualError(theError, errString [, message [, format-args]]) -// -// assert.Implements((*MyInterface)(nil), new(MyObject) [,message [, format-args]]) -// -// assert.IsType(expectedObject, actualObject [, message [, format-args]]) -// -// assert.Contains(stringOrSlice, substringOrElement [, message [, format-args]]) -// -// assert.NotContains(stringOrSlice, substringOrElement [, message [, format-args]]) -// -// assert.Panics(func(){ -// -// // call code that should panic -// -// } [, message [, format-args]]) -// -// assert.NotPanics(func(){ -// -// // call code that should not panic -// -// } [, message [, format-args]]) -// -// assert.WithinDuration(timeA, timeB, deltaTime, [, message [, format-args]]) -// -// assert.InDelta(numA, numB, delta, [, message [, format-args]]) -// -// assert.InEpsilon(numA, numB, epsilon, [, message [, format-args]]) -package assert diff --git a/vendor/github.com/stretchr/testify/assert/errors.go b/vendor/github.com/stretchr/testify/assert/errors.go deleted file mode 100644 index ac9dc9d..0000000 --- a/vendor/github.com/stretchr/testify/assert/errors.go +++ /dev/null @@ -1,10 +0,0 @@ -package assert - -import ( - "errors" -) - -// AnError is an error instance useful for testing. If the code does not care -// about error specifics, and only needs to return the error for example, this -// error should be used to make the test code more readable. -var AnError = errors.New("assert.AnError general error for testing") diff --git a/vendor/github.com/stretchr/testify/assert/forward_assertions.go b/vendor/github.com/stretchr/testify/assert/forward_assertions.go deleted file mode 100644 index d8d3f53..0000000 --- a/vendor/github.com/stretchr/testify/assert/forward_assertions.go +++ /dev/null @@ -1,265 +0,0 @@ -package assert - -import "time" - -// Assertions provides assertion methods around the -// TestingT interface. -type Assertions struct { - t TestingT -} - -// New makes a new Assertions object for the specified TestingT. -func New(t TestingT) *Assertions { - return &Assertions{ - t: t, - } -} - -// Fail reports a failure through -func (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) bool { - return Fail(a.t, failureMessage, msgAndArgs...) -} - -// Implements asserts that an object is implemented by the specified interface. -// -// assert.Implements((*MyInterface)(nil), new(MyObject), "MyObject") -func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { - return Implements(a.t, interfaceObject, object, msgAndArgs...) -} - -// IsType asserts that the specified objects are of the same type. -func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { - return IsType(a.t, expectedType, object, msgAndArgs...) -} - -// Equal asserts that two objects are equal. -// -// assert.Equal(123, 123, "123 and 123 should be equal") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) Equal(expected, actual interface{}, msgAndArgs ...interface{}) bool { - return Equal(a.t, expected, actual, msgAndArgs...) -} - -// EqualValues asserts that two objects are equal or convertable to the same types -// and equal. -// -// assert.EqualValues(uint32(123), int32(123), "123 and 123 should be equal") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) EqualValues(expected, actual interface{}, msgAndArgs ...interface{}) bool { - return EqualValues(a.t, expected, actual, msgAndArgs...) -} - -// Exactly asserts that two objects are equal is value and type. -// -// assert.Exactly(int32(123), int64(123), "123 and 123 should NOT be equal") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) Exactly(expected, actual interface{}, msgAndArgs ...interface{}) bool { - return Exactly(a.t, expected, actual, msgAndArgs...) -} - -// NotNil asserts that the specified object is not nil. -// -// assert.NotNil(err, "err should be something") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) bool { - return NotNil(a.t, object, msgAndArgs...) -} - -// Nil asserts that the specified object is nil. -// -// assert.Nil(err, "err should be nothing") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) bool { - return Nil(a.t, object, msgAndArgs...) -} - -// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or a -// slice with len == 0. -// -// assert.Empty(obj) -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool { - return Empty(a.t, object, msgAndArgs...) -} - -// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or a -// slice with len == 0. -// -// if assert.NotEmpty(obj) { -// assert.Equal("two", obj[1]) -// } -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) bool { - return NotEmpty(a.t, object, msgAndArgs...) -} - -// Len asserts that the specified object has specific length. -// Len also fails if the object has a type that len() not accept. -// -// assert.Len(mySlice, 3, "The size of slice is not 3") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) bool { - return Len(a.t, object, length, msgAndArgs...) -} - -// True asserts that the specified value is true. -// -// assert.True(myBool, "myBool should be true") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) True(value bool, msgAndArgs ...interface{}) bool { - return True(a.t, value, msgAndArgs...) -} - -// False asserts that the specified value is true. -// -// assert.False(myBool, "myBool should be false") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) False(value bool, msgAndArgs ...interface{}) bool { - return False(a.t, value, msgAndArgs...) -} - -// NotEqual asserts that the specified values are NOT equal. -// -// assert.NotEqual(obj1, obj2, "two objects shouldn't be equal") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) NotEqual(expected, actual interface{}, msgAndArgs ...interface{}) bool { - return NotEqual(a.t, expected, actual, msgAndArgs...) -} - -// Contains asserts that the specified string contains the specified substring. -// -// assert.Contains("Hello World", "World", "But 'Hello World' does contain 'World'") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) Contains(s, contains interface{}, msgAndArgs ...interface{}) bool { - return Contains(a.t, s, contains, msgAndArgs...) -} - -// NotContains asserts that the specified string does NOT contain the specified substring. -// -// assert.NotContains("Hello World", "Earth", "But 'Hello World' does NOT contain 'Earth'") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) NotContains(s, contains interface{}, msgAndArgs ...interface{}) bool { - return NotContains(a.t, s, contains, msgAndArgs...) -} - -// Condition uses a Comparison to assert a complex condition. -func (a *Assertions) Condition(comp Comparison, msgAndArgs ...interface{}) bool { - return Condition(a.t, comp, msgAndArgs...) -} - -// Panics asserts that the code inside the specified PanicTestFunc panics. -// -// assert.Panics(func(){ -// GoCrazy() -// }, "Calling GoCrazy() should panic") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) Panics(f PanicTestFunc, msgAndArgs ...interface{}) bool { - return Panics(a.t, f, msgAndArgs...) -} - -// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. -// -// assert.NotPanics(func(){ -// RemainCalm() -// }, "Calling RemainCalm() should NOT panic") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) NotPanics(f PanicTestFunc, msgAndArgs ...interface{}) bool { - return NotPanics(a.t, f, msgAndArgs...) -} - -// WithinDuration asserts that the two times are within duration delta of each other. -// -// assert.WithinDuration(time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) WithinDuration(expected, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool { - return WithinDuration(a.t, expected, actual, delta, msgAndArgs...) -} - -// InDelta asserts that the two numerals are within delta of each other. -// -// assert.InDelta(t, math.Pi, (22 / 7.0), 0.01) -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) InDelta(expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { - return InDelta(a.t, expected, actual, delta, msgAndArgs...) -} - -// InEpsilon asserts that expected and actual have a relative error less than epsilon -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) InEpsilon(expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { - return InEpsilon(a.t, expected, actual, epsilon, msgAndArgs...) -} - -// NoError asserts that a function returned no error (i.e. `nil`). -// -// actualObj, err := SomeFunction() -// if assert.NoError(err) { -// assert.Equal(actualObj, expectedObj) -// } -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) NoError(theError error, msgAndArgs ...interface{}) bool { - return NoError(a.t, theError, msgAndArgs...) -} - -// Error asserts that a function returned an error (i.e. not `nil`). -// -// actualObj, err := SomeFunction() -// if assert.Error(err, "An error was expected") { -// assert.Equal(err, expectedError) -// } -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) Error(theError error, msgAndArgs ...interface{}) bool { - return Error(a.t, theError, msgAndArgs...) -} - -// EqualError asserts that a function returned an error (i.e. not `nil`) -// and that it is equal to the provided error. -// -// actualObj, err := SomeFunction() -// if assert.Error(err, "An error was expected") { -// assert.Equal(err, expectedError) -// } -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) bool { - return EqualError(a.t, theError, errString, msgAndArgs...) -} - -// Regexp asserts that a specified regexp matches a string. -// -// assert.Regexp(t, regexp.MustCompile("start"), "it's starting") -// assert.Regexp(t, "start...$", "it's not starting") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { - return Regexp(a.t, rx, str, msgAndArgs...) -} - -// NotRegexp asserts that a specified regexp does not match a string. -// -// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") -// assert.NotRegexp(t, "^start", "it's not starting") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { - return NotRegexp(a.t, rx, str, msgAndArgs...) -} diff --git a/vendor/github.com/stretchr/testify/assert/http_assertions.go b/vendor/github.com/stretchr/testify/assert/http_assertions.go deleted file mode 100644 index 1246e58..0000000 --- a/vendor/github.com/stretchr/testify/assert/http_assertions.go +++ /dev/null @@ -1,157 +0,0 @@ -package assert - -import ( - "fmt" - "net/http" - "net/http/httptest" - "net/url" - "strings" -) - -// httpCode is a helper that returns HTTP code of the response. It returns -1 -// if building a new request fails. -func httpCode(handler http.HandlerFunc, mode, url string, values url.Values) int { - w := httptest.NewRecorder() - req, err := http.NewRequest(mode, url+"?"+values.Encode(), nil) - if err != nil { - return -1 - } - handler(w, req) - return w.Code -} - -// HTTPSuccess asserts that a specified handler returns a success status code. -// -// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil) -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPSuccess(t TestingT, handler http.HandlerFunc, mode, url string, values url.Values) bool { - code := httpCode(handler, mode, url, values) - if code == -1 { - return false - } - return code >= http.StatusOK && code <= http.StatusPartialContent -} - -// HTTPRedirect asserts that a specified handler returns a redirect status code. -// -// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPRedirect(t TestingT, handler http.HandlerFunc, mode, url string, values url.Values) bool { - code := httpCode(handler, mode, url, values) - if code == -1 { - return false - } - return code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect -} - -// HTTPError asserts that a specified handler returns an error status code. -// -// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPError(t TestingT, handler http.HandlerFunc, mode, url string, values url.Values) bool { - code := httpCode(handler, mode, url, values) - if code == -1 { - return false - } - return code >= http.StatusBadRequest -} - -// HTTPBody is a helper that returns HTTP body of the response. It returns -// empty string if building a new request fails. -func HTTPBody(handler http.HandlerFunc, mode, url string, values url.Values) string { - w := httptest.NewRecorder() - req, err := http.NewRequest(mode, url+"?"+values.Encode(), nil) - if err != nil { - return "" - } - handler(w, req) - return w.Body.String() -} - -// HTTPBodyContains asserts that a specified handler returns a -// body that contains a string. -// -// assert.HTTPBodyContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky") -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPBodyContains(t TestingT, handler http.HandlerFunc, mode, url string, values url.Values, str interface{}) bool { - body := HTTPBody(handler, mode, url, values) - - contains := strings.Contains(body, fmt.Sprint(str)) - if !contains { - Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body)) - } - - return contains -} - -// HTTPBodyNotContains asserts that a specified handler returns a -// body that does not contain a string. -// -// assert.HTTPBodyNotContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky") -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, mode, url string, values url.Values, str interface{}) bool { - body := HTTPBody(handler, mode, url, values) - - contains := strings.Contains(body, fmt.Sprint(str)) - if contains { - Fail(t, "Expected response body for %s to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body) - } - - return !contains -} - -// -// Assertions Wrappers -// - -// HTTPSuccess asserts that a specified handler returns a success status code. -// -// assert.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil) -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, mode, url string, values url.Values) bool { - return HTTPSuccess(a.t, handler, mode, url, values) -} - -// HTTPRedirect asserts that a specified handler returns a redirect status code. -// -// assert.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, mode, url string, values url.Values) bool { - return HTTPRedirect(a.t, handler, mode, url, values) -} - -// HTTPError asserts that a specified handler returns an error status code. -// -// assert.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPError(handler http.HandlerFunc, mode, url string, values url.Values) bool { - return HTTPError(a.t, handler, mode, url, values) -} - -// HTTPBodyContains asserts that a specified handler returns a -// body that contains a string. -// -// assert.HTTPBodyContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, mode, url string, values url.Values, str interface{}) bool { - return HTTPBodyContains(a.t, handler, mode, url, values, str) -} - -// HTTPBodyNotContains asserts that a specified handler returns a -// body that does not contain a string. -// -// assert.HTTPBodyNotContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, mode, url string, values url.Values, str interface{}) bool { - return HTTPBodyNotContains(a.t, handler, mode, url, values, str) -} diff --git a/vendor/github.com/stretchr/testify/mock/doc.go b/vendor/github.com/stretchr/testify/mock/doc.go deleted file mode 100644 index dd38507..0000000 --- a/vendor/github.com/stretchr/testify/mock/doc.go +++ /dev/null @@ -1,43 +0,0 @@ -// Provides a system by which it is possible to mock your objects and verify calls are happening as expected. -// -// Example Usage -// -// The mock package provides an object, Mock, that tracks activity on another object. It is usually -// embedded into a test object as shown below: -// -// type MyTestObject struct { -// // add a Mock object instance -// mock.Mock -// -// // other fields go here as normal -// } -// -// When implementing the methods of an interface, you wire your functions up -// to call the Mock.Called(args...) method, and return the appropriate values. -// -// For example, to mock a method that saves the name and age of a person and returns -// the year of their birth or an error, you might write this: -// -// func (o *MyTestObject) SavePersonDetails(firstname, lastname string, age int) (int, error) { -// args := o.Called(firstname, lastname, age) -// return args.Int(0), args.Error(1) -// } -// -// The Int, Error and Bool methods are examples of strongly typed getters that take the argument -// index position. Given this argument list: -// -// (12, true, "Something") -// -// You could read them out strongly typed like this: -// -// args.Int(0) -// args.Bool(1) -// args.String(2) -// -// For objects of your own type, use the generic Arguments.Get(index) method and make a type assertion: -// -// return args.Get(0).(*MyObject), args.Get(1).(*AnotherObjectOfMine) -// -// This may cause a panic if the object you are getting is nil (the type assertion will fail), in those -// cases you should check for nil first. -package mock diff --git a/vendor/github.com/stretchr/testify/mock/mock.go b/vendor/github.com/stretchr/testify/mock/mock.go deleted file mode 100644 index e9fb923..0000000 --- a/vendor/github.com/stretchr/testify/mock/mock.go +++ /dev/null @@ -1,559 +0,0 @@ -package mock - -import ( - "fmt" - "github.com/stretchr/objx" - "github.com/stretchr/testify/assert" - "reflect" - "runtime" - "strings" - "sync" - "time" -) - -// TestingT is an interface wrapper around *testing.T -type TestingT interface { - Logf(format string, args ...interface{}) - Errorf(format string, args ...interface{}) -} - -/* - Call -*/ - -// Call represents a method call and is used for setting expectations, -// as well as recording activity. -type Call struct { - - // The name of the method that was or will be called. - Method string - - // Holds the arguments of the method. - Arguments Arguments - - // Holds the arguments that should be returned when - // this method is called. - ReturnArguments Arguments - - // The number of times to return the return arguments when setting - // expectations. 0 means to always return the value. - Repeatability int - - // Holds a channel that will be used to block the Return until it either - // recieves a message or is closed. nil means it returns immediately. - WaitFor <-chan time.Time - - // Holds a handler used to manipulate arguments content that are passed by - // reference. It's useful when mocking methods such as unmarshalers or - // decoders. - Run func(Arguments) -} - -// Mock is the workhorse used to track activity on another object. -// For an example of its usage, refer to the "Example Usage" section at the top of this document. -type Mock struct { - - // The method name that is currently - // being referred to by the On method. - onMethodName string - - // An array of the arguments that are - // currently being referred to by the On method. - onMethodArguments Arguments - - // Represents the calls that are expected of - // an object. - ExpectedCalls []Call - - // Holds the calls that were made to this mocked object. - Calls []Call - - // TestData holds any data that might be useful for testing. Testify ignores - // this data completely allowing you to do whatever you like with it. - testData objx.Map - - mutex sync.Mutex -} - -// TestData holds any data that might be useful for testing. Testify ignores -// this data completely allowing you to do whatever you like with it. -func (m *Mock) TestData() objx.Map { - - if m.testData == nil { - m.testData = make(objx.Map) - } - - return m.testData -} - -/* - Setting expectations -*/ - -// On starts a description of an expectation of the specified method -// being called. -// -// Mock.On("MyMethod", arg1, arg2) -func (m *Mock) On(methodName string, arguments ...interface{}) *Mock { - m.onMethodName = methodName - m.onMethodArguments = arguments - return m -} - -// Return finishes a description of an expectation of the method (and arguments) -// specified in the most recent On method call. -// -// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2) -func (m *Mock) Return(returnArguments ...interface{}) *Mock { - m.ExpectedCalls = append(m.ExpectedCalls, Call{m.onMethodName, m.onMethodArguments, returnArguments, 0, nil, nil}) - return m -} - -// Once indicates that that the mock should only return the value once. -// -// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Once() -func (m *Mock) Once() { - m.ExpectedCalls[len(m.ExpectedCalls)-1].Repeatability = 1 -} - -// Twice indicates that that the mock should only return the value twice. -// -// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Twice() -func (m *Mock) Twice() { - m.ExpectedCalls[len(m.ExpectedCalls)-1].Repeatability = 2 -} - -// Times indicates that that the mock should only return the indicated number -// of times. -// -// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Times(5) -func (m *Mock) Times(i int) { - m.ExpectedCalls[len(m.ExpectedCalls)-1].Repeatability = i -} - -// WaitUntil sets the channel that will block the mock's return until its closed -// or a message is received. -// -// Mock.On("MyMethod", arg1, arg2).WaitUntil(time.After(time.Second)) -func (m *Mock) WaitUntil(w <-chan time.Time) *Mock { - m.ExpectedCalls[len(m.ExpectedCalls)-1].WaitFor = w - return m -} - -// After sets how long to block until the call returns -// -// Mock.On("MyMethod", arg1, arg2).After(time.Second) -func (m *Mock) After(d time.Duration) *Mock { - return m.WaitUntil(time.After(d)) -} - -// Run sets a handler to be called before returning. It can be used when -// mocking a method such as unmarshalers that takes a pointer to a struct and -// sets properties in such struct -// -// Mock.On("Unmarshal", AnythingOfType("*map[string]interface{}").Return().Run(function(args Arguments) { -// arg := args.Get(0).(*map[string]interface{}) -// arg["foo"] = "bar" -// }) -func (m *Mock) Run(fn func(Arguments)) *Mock { - m.ExpectedCalls[len(m.ExpectedCalls)-1].Run = fn - return m -} - -/* - Recording and responding to activity -*/ - -func (m *Mock) findExpectedCall(method string, arguments ...interface{}) (int, *Call) { - for i, call := range m.ExpectedCalls { - if call.Method == method && call.Repeatability > -1 { - - _, diffCount := call.Arguments.Diff(arguments) - if diffCount == 0 { - return i, &call - } - - } - } - return -1, nil -} - -func (m *Mock) findClosestCall(method string, arguments ...interface{}) (bool, *Call) { - - diffCount := 0 - var closestCall *Call = nil - - for _, call := range m.ExpectedCalls { - if call.Method == method { - - _, tempDiffCount := call.Arguments.Diff(arguments) - if tempDiffCount < diffCount || diffCount == 0 { - diffCount = tempDiffCount - closestCall = &call - } - - } - } - - if closestCall == nil { - return false, nil - } - - return true, closestCall -} - -func callString(method string, arguments Arguments, includeArgumentValues bool) string { - - var argValsString string = "" - if includeArgumentValues { - var argVals []string - for argIndex, arg := range arguments { - argVals = append(argVals, fmt.Sprintf("%d: %v", argIndex, arg)) - } - argValsString = fmt.Sprintf("\n\t\t%s", strings.Join(argVals, "\n\t\t")) - } - - return fmt.Sprintf("%s(%s)%s", method, arguments.String(), argValsString) -} - -// Called tells the mock object that a method has been called, and gets an array -// of arguments to return. Panics if the call is unexpected (i.e. not preceeded by -// appropriate .On .Return() calls) -// If Call.WaitFor is set, blocks until the channel is closed or receives a message. -func (m *Mock) Called(arguments ...interface{}) Arguments { - defer m.mutex.Unlock() - m.mutex.Lock() - - // get the calling function's name - pc, _, _, ok := runtime.Caller(1) - if !ok { - panic("Couldn't get the caller information") - } - functionPath := runtime.FuncForPC(pc).Name() - parts := strings.Split(functionPath, ".") - functionName := parts[len(parts)-1] - - found, call := m.findExpectedCall(functionName, arguments...) - - switch { - case found < 0: - // we have to fail here - because we don't know what to do - // as the return arguments. This is because: - // - // a) this is a totally unexpected call to this method, - // b) the arguments are not what was expected, or - // c) the developer has forgotten to add an accompanying On...Return pair. - - closestFound, closestCall := m.findClosestCall(functionName, arguments...) - - if closestFound { - panic(fmt.Sprintf("\n\nmock: Unexpected Method Call\n-----------------------------\n\n%s\n\nThe closest call I have is: \n\n%s\n", callString(functionName, arguments, true), callString(functionName, closestCall.Arguments, true))) - } else { - panic(fmt.Sprintf("\nassert: mock: I don't know what to return because the method call was unexpected.\n\tEither do Mock.On(\"%s\").Return(...) first, or remove the %s() call.\n\tThis method was unexpected:\n\t\t%s\n\tat: %s", functionName, functionName, callString(functionName, arguments, true), assert.CallerInfo())) - } - case call.Repeatability == 1: - call.Repeatability = -1 - m.ExpectedCalls[found] = *call - case call.Repeatability > 1: - call.Repeatability -= 1 - m.ExpectedCalls[found] = *call - } - - // add the call - m.Calls = append(m.Calls, Call{functionName, arguments, make([]interface{}, 0), 0, nil, nil}) - - // block if specified - if call.WaitFor != nil { - <-call.WaitFor - } - - if call.Run != nil { - call.Run(arguments) - } - - return call.ReturnArguments - -} - -/* - Assertions -*/ - -// AssertExpectationsForObjects asserts that everything specified with On and Return -// of the specified objects was in fact called as expected. -// -// Calls may have occurred in any order. -func AssertExpectationsForObjects(t TestingT, testObjects ...interface{}) bool { - var success bool = true - for _, obj := range testObjects { - mockObj := obj.(Mock) - success = success && mockObj.AssertExpectations(t) - } - return success -} - -// AssertExpectations asserts that everything specified with On and Return was -// in fact called as expected. Calls may have occurred in any order. -func (m *Mock) AssertExpectations(t TestingT) bool { - - var somethingMissing bool = false - var failedExpectations int = 0 - - // iterate through each expectation - for _, expectedCall := range m.ExpectedCalls { - switch { - case !m.methodWasCalled(expectedCall.Method, expectedCall.Arguments): - somethingMissing = true - failedExpectations++ - t.Logf("\u274C\t%s(%s)", expectedCall.Method, expectedCall.Arguments.String()) - case expectedCall.Repeatability > 0: - somethingMissing = true - failedExpectations++ - default: - t.Logf("\u2705\t%s(%s)", expectedCall.Method, expectedCall.Arguments.String()) - } - } - - if somethingMissing { - t.Errorf("FAIL: %d out of %d expectation(s) were met.\n\tThe code you are testing needs to make %d more call(s).\n\tat: %s", len(m.ExpectedCalls)-failedExpectations, len(m.ExpectedCalls), failedExpectations, assert.CallerInfo()) - } - - return !somethingMissing -} - -// AssertNumberOfCalls asserts that the method was called expectedCalls times. -func (m *Mock) AssertNumberOfCalls(t TestingT, methodName string, expectedCalls int) bool { - var actualCalls int = 0 - for _, call := range m.Calls { - if call.Method == methodName { - actualCalls++ - } - } - return assert.Equal(t, actualCalls, expectedCalls, fmt.Sprintf("Expected number of calls (%d) does not match the actual number of calls (%d).", expectedCalls, actualCalls)) -} - -// AssertCalled asserts that the method was called. -func (m *Mock) AssertCalled(t TestingT, methodName string, arguments ...interface{}) bool { - if !assert.True(t, m.methodWasCalled(methodName, arguments), fmt.Sprintf("The \"%s\" method should have been called with %d argument(s), but was not.", methodName, len(arguments))) { - t.Logf("%v", m.ExpectedCalls) - return false - } - return true -} - -// AssertNotCalled asserts that the method was not called. -func (m *Mock) AssertNotCalled(t TestingT, methodName string, arguments ...interface{}) bool { - if !assert.False(t, m.methodWasCalled(methodName, arguments), fmt.Sprintf("The \"%s\" method was called with %d argument(s), but should NOT have been.", methodName, len(arguments))) { - t.Logf("%v", m.ExpectedCalls) - return false - } - return true -} - -func (m *Mock) methodWasCalled(methodName string, expected []interface{}) bool { - for _, call := range m.Calls { - if call.Method == methodName { - - _, differences := Arguments(expected).Diff(call.Arguments) - - if differences == 0 { - // found the expected call - return true - } - - } - } - // we didn't find the expected call - return false -} - -/* - Arguments -*/ - -// Arguments holds an array of method arguments or return values. -type Arguments []interface{} - -const ( - // The "any" argument. Used in Diff and Assert when - // the argument being tested shouldn't be taken into consideration. - Anything string = "mock.Anything" -) - -// AnythingOfTypeArgument is a string that contains the type of an argument -// for use when type checking. Used in Diff and Assert. -type AnythingOfTypeArgument string - -// AnythingOfType returns an AnythingOfTypeArgument object containing the -// name of the type to check for. Used in Diff and Assert. -// -// For example: -// Assert(t, AnythingOfType("string"), AnythingOfType("int")) -func AnythingOfType(t string) AnythingOfTypeArgument { - return AnythingOfTypeArgument(t) -} - -// Get Returns the argument at the specified index. -func (args Arguments) Get(index int) interface{} { - if index+1 > len(args) { - panic(fmt.Sprintf("assert: arguments: Cannot call Get(%d) because there are %d argument(s).", index, len(args))) - } - return args[index] -} - -// Is gets whether the objects match the arguments specified. -func (args Arguments) Is(objects ...interface{}) bool { - for i, obj := range args { - if obj != objects[i] { - return false - } - } - return true -} - -// Diff gets a string describing the differences between the arguments -// and the specified objects. -// -// Returns the diff string and number of differences found. -func (args Arguments) Diff(objects []interface{}) (string, int) { - - var output string = "\n" - var differences int - - var maxArgCount int = len(args) - if len(objects) > maxArgCount { - maxArgCount = len(objects) - } - - for i := 0; i < maxArgCount; i++ { - var actual, expected interface{} - - if len(objects) <= i { - actual = "(Missing)" - } else { - actual = objects[i] - } - - if len(args) <= i { - expected = "(Missing)" - } else { - expected = args[i] - } - - if reflect.TypeOf(expected) == reflect.TypeOf((*AnythingOfTypeArgument)(nil)).Elem() { - - // type checking - if reflect.TypeOf(actual).Name() != string(expected.(AnythingOfTypeArgument)) && reflect.TypeOf(actual).String() != string(expected.(AnythingOfTypeArgument)) { - // not match - differences++ - output = fmt.Sprintf("%s\t%d: \u274C type %s != type %s - %s\n", output, i, expected, reflect.TypeOf(actual).Name(), actual) - } - - } else { - - // normal checking - - if assert.ObjectsAreEqual(expected, Anything) || assert.ObjectsAreEqual(actual, Anything) || assert.ObjectsAreEqual(actual, expected) { - // match - output = fmt.Sprintf("%s\t%d: \u2705 %s == %s\n", output, i, actual, expected) - } else { - // not match - differences++ - output = fmt.Sprintf("%s\t%d: \u274C %s != %s\n", output, i, actual, expected) - } - } - - } - - if differences == 0 { - return "No differences.", differences - } - - return output, differences - -} - -// Assert compares the arguments with the specified objects and fails if -// they do not exactly match. -func (args Arguments) Assert(t TestingT, objects ...interface{}) bool { - - // get the differences - diff, diffCount := args.Diff(objects) - - if diffCount == 0 { - return true - } - - // there are differences... report them... - t.Logf(diff) - t.Errorf("%sArguments do not match.", assert.CallerInfo()) - - return false - -} - -// String gets the argument at the specified index. Panics if there is no argument, or -// if the argument is of the wrong type. -// -// If no index is provided, String() returns a complete string representation -// of the arguments. -func (args Arguments) String(indexOrNil ...int) string { - - if len(indexOrNil) == 0 { - // normal String() method - return a string representation of the args - var argsStr []string - for _, arg := range args { - argsStr = append(argsStr, fmt.Sprintf("%s", reflect.TypeOf(arg))) - } - return strings.Join(argsStr, ",") - } else if len(indexOrNil) == 1 { - // Index has been specified - get the argument at that index - var index int = indexOrNil[0] - var s string - var ok bool - if s, ok = args.Get(index).(string); !ok { - panic(fmt.Sprintf("assert: arguments: String(%d) failed because object wasn't correct type: %s", index, args.Get(index))) - } - return s - } - - panic(fmt.Sprintf("assert: arguments: Wrong number of arguments passed to String. Must be 0 or 1, not %d", len(indexOrNil))) - -} - -// Int gets the argument at the specified index. Panics if there is no argument, or -// if the argument is of the wrong type. -func (args Arguments) Int(index int) int { - var s int - var ok bool - if s, ok = args.Get(index).(int); !ok { - panic(fmt.Sprintf("assert: arguments: Int(%d) failed because object wasn't correct type: %v", index, args.Get(index))) - } - return s -} - -// Error gets the argument at the specified index. Panics if there is no argument, or -// if the argument is of the wrong type. -func (args Arguments) Error(index int) error { - obj := args.Get(index) - var s error - var ok bool - if obj == nil { - return nil - } - if s, ok = obj.(error); !ok { - panic(fmt.Sprintf("assert: arguments: Error(%d) failed because object wasn't correct type: %v", index, args.Get(index))) - } - return s -} - -// Bool gets the argument at the specified index. Panics if there is no argument, or -// if the argument is of the wrong type. -func (args Arguments) Bool(index int) bool { - var s bool - var ok bool - if s, ok = args.Get(index).(bool); !ok { - panic(fmt.Sprintf("assert: arguments: Bool(%d) failed because object wasn't correct type: %v", index, args.Get(index))) - } - return s -} diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go deleted file mode 100644 index 77b64d0..0000000 --- a/vendor/golang.org/x/net/context/context.go +++ /dev/null @@ -1,447 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package context defines the Context type, which carries deadlines, -// cancelation signals, and other request-scoped values across API boundaries -// and between processes. -// -// Incoming requests to a server should create a Context, and outgoing calls to -// servers should accept a Context. The chain of function calls between must -// propagate the Context, optionally replacing it with a modified copy created -// using WithDeadline, WithTimeout, WithCancel, or WithValue. -// -// Programs that use Contexts should follow these rules to keep interfaces -// consistent across packages and enable static analysis tools to check context -// propagation: -// -// Do not store Contexts inside a struct type; instead, pass a Context -// explicitly to each function that needs it. The Context should be the first -// parameter, typically named ctx: -// -// func DoSomething(ctx context.Context, arg Arg) error { -// // ... use ctx ... -// } -// -// Do not pass a nil Context, even if a function permits it. Pass context.TODO -// if you are unsure about which Context to use. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -// -// The same Context may be passed to functions running in different goroutines; -// Contexts are safe for simultaneous use by multiple goroutines. -// -// See http://blog.golang.org/context for example code for a server that uses -// Contexts. -package context // import "golang.org/x/net/context" - -import ( - "errors" - "fmt" - "sync" - "time" -) - -// A Context carries a deadline, a cancelation signal, and other values across -// API boundaries. -// -// Context's methods may be called by multiple goroutines simultaneously. -type Context interface { - // Deadline returns the time when work done on behalf of this context - // should be canceled. Deadline returns ok==false when no deadline is - // set. Successive calls to Deadline return the same results. - Deadline() (deadline time.Time, ok bool) - - // Done returns a channel that's closed when work done on behalf of this - // context should be canceled. Done may return nil if this context can - // never be canceled. Successive calls to Done return the same value. - // - // WithCancel arranges for Done to be closed when cancel is called; - // WithDeadline arranges for Done to be closed when the deadline - // expires; WithTimeout arranges for Done to be closed when the timeout - // elapses. - // - // Done is provided for use in select statements: - // - // // Stream generates values with DoSomething and sends them to out - // // until DoSomething returns an error or ctx.Done is closed. - // func Stream(ctx context.Context, out <-chan Value) error { - // for { - // v, err := DoSomething(ctx) - // if err != nil { - // return err - // } - // select { - // case <-ctx.Done(): - // return ctx.Err() - // case out <- v: - // } - // } - // } - // - // See http://blog.golang.org/pipelines for more examples of how to use - // a Done channel for cancelation. - Done() <-chan struct{} - - // Err returns a non-nil error value after Done is closed. Err returns - // Canceled if the context was canceled or DeadlineExceeded if the - // context's deadline passed. No other values for Err are defined. - // After Done is closed, successive calls to Err return the same value. - Err() error - - // Value returns the value associated with this context for key, or nil - // if no value is associated with key. Successive calls to Value with - // the same key returns the same result. - // - // Use context values only for request-scoped data that transits - // processes and API boundaries, not for passing optional parameters to - // functions. - // - // A key identifies a specific value in a Context. Functions that wish - // to store values in Context typically allocate a key in a global - // variable then use that key as the argument to context.WithValue and - // Context.Value. A key can be any type that supports equality; - // packages should define keys as an unexported type to avoid - // collisions. - // - // Packages that define a Context key should provide type-safe accessors - // for the values stores using that key: - // - // // Package user defines a User type that's stored in Contexts. - // package user - // - // import "golang.org/x/net/context" - // - // // User is the type of value stored in the Contexts. - // type User struct {...} - // - // // key is an unexported type for keys defined in this package. - // // This prevents collisions with keys defined in other packages. - // type key int - // - // // userKey is the key for user.User values in Contexts. It is - // // unexported; clients use user.NewContext and user.FromContext - // // instead of using this key directly. - // var userKey key = 0 - // - // // NewContext returns a new Context that carries value u. - // func NewContext(ctx context.Context, u *User) context.Context { - // return context.WithValue(ctx, userKey, u) - // } - // - // // FromContext returns the User value stored in ctx, if any. - // func FromContext(ctx context.Context) (*User, bool) { - // u, ok := ctx.Value(userKey).(*User) - // return u, ok - // } - Value(key interface{}) interface{} -} - -// Canceled is the error returned by Context.Err when the context is canceled. -var Canceled = errors.New("context canceled") - -// DeadlineExceeded is the error returned by Context.Err when the context's -// deadline passes. -var DeadlineExceeded = errors.New("context deadline exceeded") - -// An emptyCtx is never canceled, has no values, and has no deadline. It is not -// struct{}, since vars of this type must have distinct addresses. -type emptyCtx int - -func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { - return -} - -func (*emptyCtx) Done() <-chan struct{} { - return nil -} - -func (*emptyCtx) Err() error { - return nil -} - -func (*emptyCtx) Value(key interface{}) interface{} { - return nil -} - -func (e *emptyCtx) String() string { - switch e { - case background: - return "context.Background" - case todo: - return "context.TODO" - } - return "unknown empty Context" -} - -var ( - background = new(emptyCtx) - todo = new(emptyCtx) -) - -// Background returns a non-nil, empty Context. It is never canceled, has no -// values, and has no deadline. It is typically used by the main function, -// initialization, and tests, and as the top-level Context for incoming -// requests. -func Background() Context { - return background -} - -// TODO returns a non-nil, empty Context. Code should use context.TODO when -// it's unclear which Context to use or it is not yet available (because the -// surrounding function has not yet been extended to accept a Context -// parameter). TODO is recognized by static analysis tools that determine -// whether Contexts are propagated correctly in a program. -func TODO() Context { - return todo -} - -// A CancelFunc tells an operation to abandon its work. -// A CancelFunc does not wait for the work to stop. -// After the first call, subsequent calls to a CancelFunc do nothing. -type CancelFunc func() - -// WithCancel returns a copy of parent with a new Done channel. The returned -// context's Done channel is closed when the returned cancel function is called -// or when the parent context's Done channel is closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { - c := newCancelCtx(parent) - propagateCancel(parent, &c) - return &c, func() { c.cancel(true, Canceled) } -} - -// newCancelCtx returns an initialized cancelCtx. -func newCancelCtx(parent Context) cancelCtx { - return cancelCtx{ - Context: parent, - done: make(chan struct{}), - } -} - -// propagateCancel arranges for child to be canceled when parent is. -func propagateCancel(parent Context, child canceler) { - if parent.Done() == nil { - return // parent is never canceled - } - if p, ok := parentCancelCtx(parent); ok { - p.mu.Lock() - if p.err != nil { - // parent has already been canceled - child.cancel(false, p.err) - } else { - if p.children == nil { - p.children = make(map[canceler]bool) - } - p.children[child] = true - } - p.mu.Unlock() - } else { - go func() { - select { - case <-parent.Done(): - child.cancel(false, parent.Err()) - case <-child.Done(): - } - }() - } -} - -// parentCancelCtx follows a chain of parent references until it finds a -// *cancelCtx. This function understands how each of the concrete types in this -// package represents its parent. -func parentCancelCtx(parent Context) (*cancelCtx, bool) { - for { - switch c := parent.(type) { - case *cancelCtx: - return c, true - case *timerCtx: - return &c.cancelCtx, true - case *valueCtx: - parent = c.Context - default: - return nil, false - } - } -} - -// removeChild removes a context from its parent. -func removeChild(parent Context, child canceler) { - p, ok := parentCancelCtx(parent) - if !ok { - return - } - p.mu.Lock() - if p.children != nil { - delete(p.children, child) - } - p.mu.Unlock() -} - -// A canceler is a context type that can be canceled directly. The -// implementations are *cancelCtx and *timerCtx. -type canceler interface { - cancel(removeFromParent bool, err error) - Done() <-chan struct{} -} - -// A cancelCtx can be canceled. When canceled, it also cancels any children -// that implement canceler. -type cancelCtx struct { - Context - - done chan struct{} // closed by the first cancel call. - - mu sync.Mutex - children map[canceler]bool // set to nil by the first cancel call - err error // set to non-nil by the first cancel call -} - -func (c *cancelCtx) Done() <-chan struct{} { - return c.done -} - -func (c *cancelCtx) Err() error { - c.mu.Lock() - defer c.mu.Unlock() - return c.err -} - -func (c *cancelCtx) String() string { - return fmt.Sprintf("%v.WithCancel", c.Context) -} - -// cancel closes c.done, cancels each of c's children, and, if -// removeFromParent is true, removes c from its parent's children. -func (c *cancelCtx) cancel(removeFromParent bool, err error) { - if err == nil { - panic("context: internal error: missing cancel error") - } - c.mu.Lock() - if c.err != nil { - c.mu.Unlock() - return // already canceled - } - c.err = err - close(c.done) - for child := range c.children { - // NOTE: acquiring the child's lock while holding parent's lock. - child.cancel(false, err) - } - c.children = nil - c.mu.Unlock() - - if removeFromParent { - removeChild(c.Context, c) - } -} - -// WithDeadline returns a copy of the parent context with the deadline adjusted -// to be no later than d. If the parent's deadline is already earlier than d, -// WithDeadline(parent, d) is semantically equivalent to parent. The returned -// context's Done channel is closed when the deadline expires, when the returned -// cancel function is called, or when the parent context's Done channel is -// closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { - if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { - // The current deadline is already sooner than the new one. - return WithCancel(parent) - } - c := &timerCtx{ - cancelCtx: newCancelCtx(parent), - deadline: deadline, - } - propagateCancel(parent, c) - d := deadline.Sub(time.Now()) - if d <= 0 { - c.cancel(true, DeadlineExceeded) // deadline has already passed - return c, func() { c.cancel(true, Canceled) } - } - c.mu.Lock() - defer c.mu.Unlock() - if c.err == nil { - c.timer = time.AfterFunc(d, func() { - c.cancel(true, DeadlineExceeded) - }) - } - return c, func() { c.cancel(true, Canceled) } -} - -// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to -// implement Done and Err. It implements cancel by stopping its timer then -// delegating to cancelCtx.cancel. -type timerCtx struct { - cancelCtx - timer *time.Timer // Under cancelCtx.mu. - - deadline time.Time -} - -func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { - return c.deadline, true -} - -func (c *timerCtx) String() string { - return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now())) -} - -func (c *timerCtx) cancel(removeFromParent bool, err error) { - c.cancelCtx.cancel(false, err) - if removeFromParent { - // Remove this timerCtx from its parent cancelCtx's children. - removeChild(c.cancelCtx.Context, c) - } - c.mu.Lock() - if c.timer != nil { - c.timer.Stop() - c.timer = nil - } - c.mu.Unlock() -} - -// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete: -// -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } -func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { - return WithDeadline(parent, time.Now().Add(timeout)) -} - -// WithValue returns a copy of parent in which the value associated with key is -// val. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -func WithValue(parent Context, key interface{}, val interface{}) Context { - return &valueCtx{parent, key, val} -} - -// A valueCtx carries a key-value pair. It implements Value for that key and -// delegates all other calls to the embedded Context. -type valueCtx struct { - Context - key, val interface{} -} - -func (c *valueCtx) String() string { - return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) -} - -func (c *valueCtx) Value(key interface{}) interface{} { - if c.key == key { - return c.val - } - return c.Context.Value(key) -} diff --git a/vendor/golang.org/x/oauth2/AUTHORS b/vendor/golang.org/x/oauth2/AUTHORS deleted file mode 100644 index 15167cd..0000000 --- a/vendor/golang.org/x/oauth2/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/oauth2/CONTRIBUTING.md b/vendor/golang.org/x/oauth2/CONTRIBUTING.md deleted file mode 100644 index 46aa2b1..0000000 --- a/vendor/golang.org/x/oauth2/CONTRIBUTING.md +++ /dev/null @@ -1,31 +0,0 @@ -# Contributing to Go - -Go is an open source project. - -It is the work of hundreds of contributors. We appreciate your help! - - -## Filing issues - -When [filing an issue](https://github.com/golang/oauth2/issues), make sure to answer these five questions: - -1. What version of Go are you using (`go version`)? -2. What operating system and processor architecture are you using? -3. What did you do? -4. What did you expect to see? -5. What did you see instead? - -General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker. -The gophers there will answer or ask you to file an issue if you've tripped over a bug. - -## Contributing code - -Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html) -before sending patches. - -**We do not accept GitHub pull requests** -(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review). - -Unless otherwise noted, the Go source files are distributed under -the BSD-style license found in the LICENSE file. - diff --git a/vendor/golang.org/x/oauth2/CONTRIBUTORS b/vendor/golang.org/x/oauth2/CONTRIBUTORS deleted file mode 100644 index 1c4577e..0000000 --- a/vendor/golang.org/x/oauth2/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/oauth2/LICENSE b/vendor/golang.org/x/oauth2/LICENSE deleted file mode 100644 index d02f24f..0000000 --- a/vendor/golang.org/x/oauth2/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The oauth2 Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/oauth2/README.md b/vendor/golang.org/x/oauth2/README.md deleted file mode 100644 index 0d51417..0000000 --- a/vendor/golang.org/x/oauth2/README.md +++ /dev/null @@ -1,64 +0,0 @@ -# OAuth2 for Go - -[![Build Status](https://travis-ci.org/golang/oauth2.svg?branch=master)](https://travis-ci.org/golang/oauth2) - -oauth2 package contains a client implementation for OAuth 2.0 spec. - -## Installation - -~~~~ -go get golang.org/x/oauth2 -~~~~ - -See godoc for further documentation and examples. - -* [godoc.org/golang.org/x/oauth2](http://godoc.org/golang.org/x/oauth2) -* [godoc.org/golang.org/x/oauth2/google](http://godoc.org/golang.org/x/oauth2/google) - - -## App Engine - -In change 96e89be (March 2015) we removed the `oauth2.Context2` type in favor -of the [`context.Context`](https://golang.org/x/net/context#Context) type from -the `golang.org/x/net/context` package - -This means its no longer possible to use the "Classic App Engine" -`appengine.Context` type with the `oauth2` package. (You're using -Classic App Engine if you import the package `"appengine"`.) - -To work around this, you may use the new `"google.golang.org/appengine"` -package. This package has almost the same API as the `"appengine"` package, -but it can be fetched with `go get` and used on "Managed VMs" and well as -Classic App Engine. - -See the [new `appengine` package's readme](https://github.com/golang/appengine#updating-a-go-app-engine-app) -for information on updating your app. - -If you don't want to update your entire app to use the new App Engine packages, -you may use both sets of packages in parallel, using only the new packages -with the `oauth2` package. - - import ( - "golang.org/x/net/context" - "golang.org/x/oauth2" - "golang.org/x/oauth2/google" - newappengine "google.golang.org/appengine" - newurlfetch "google.golang.org/appengine/urlfetch" - - "appengine" - ) - - func handler(w http.ResponseWriter, r *http.Request) { - var c appengine.Context = appengine.NewContext(r) - c.Infof("Logging a message with the old package") - - var ctx context.Context = newappengine.NewContext(r) - client := &http.Client{ - Transport: &oauth2.Transport{ - Source: google.AppEngineTokenSource(ctx, "scope"), - Base: &newurlfetch.Transport{Context: ctx}, - }, - } - client.Get("...") - } - diff --git a/vendor/golang.org/x/oauth2/client_appengine.go b/vendor/golang.org/x/oauth2/client_appengine.go deleted file mode 100644 index 8962c49..0000000 --- a/vendor/golang.org/x/oauth2/client_appengine.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build appengine - -// App Engine hooks. - -package oauth2 - -import ( - "net/http" - - "golang.org/x/net/context" - "golang.org/x/oauth2/internal" - "google.golang.org/appengine/urlfetch" -) - -func init() { - internal.RegisterContextClientFunc(contextClientAppEngine) -} - -func contextClientAppEngine(ctx context.Context) (*http.Client, error) { - return urlfetch.Client(ctx), nil -} diff --git a/vendor/golang.org/x/oauth2/internal/oauth2.go b/vendor/golang.org/x/oauth2/internal/oauth2.go deleted file mode 100644 index fbe1028..0000000 --- a/vendor/golang.org/x/oauth2/internal/oauth2.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package internal contains support packages for oauth2 package. -package internal - -import ( - "bufio" - "crypto/rsa" - "crypto/x509" - "encoding/pem" - "errors" - "fmt" - "io" - "strings" -) - -// ParseKey converts the binary contents of a private key file -// to an *rsa.PrivateKey. It detects whether the private key is in a -// PEM container or not. If so, it extracts the the private key -// from PEM container before conversion. It only supports PEM -// containers with no passphrase. -func ParseKey(key []byte) (*rsa.PrivateKey, error) { - block, _ := pem.Decode(key) - if block != nil { - key = block.Bytes - } - parsedKey, err := x509.ParsePKCS8PrivateKey(key) - if err != nil { - parsedKey, err = x509.ParsePKCS1PrivateKey(key) - if err != nil { - return nil, fmt.Errorf("private key should be a PEM or plain PKSC1 or PKCS8; parse error: %v", err) - } - } - parsed, ok := parsedKey.(*rsa.PrivateKey) - if !ok { - return nil, errors.New("private key is invalid") - } - return parsed, nil -} - -func ParseINI(ini io.Reader) (map[string]map[string]string, error) { - result := map[string]map[string]string{ - "": map[string]string{}, // root section - } - scanner := bufio.NewScanner(ini) - currentSection := "" - for scanner.Scan() { - line := strings.TrimSpace(scanner.Text()) - if strings.HasPrefix(line, ";") { - // comment. - continue - } - if strings.HasPrefix(line, "[") && strings.HasSuffix(line, "]") { - currentSection = strings.TrimSpace(line[1 : len(line)-1]) - result[currentSection] = map[string]string{} - continue - } - parts := strings.SplitN(line, "=", 2) - if len(parts) == 2 && parts[0] != "" { - result[currentSection][strings.TrimSpace(parts[0])] = strings.TrimSpace(parts[1]) - } - } - if err := scanner.Err(); err != nil { - return nil, fmt.Errorf("error scanning ini: %v", err) - } - return result, nil -} - -func CondVal(v string) []string { - if v == "" { - return nil - } - return []string{v} -} diff --git a/vendor/golang.org/x/oauth2/internal/token.go b/vendor/golang.org/x/oauth2/internal/token.go deleted file mode 100644 index 39caf6c..0000000 --- a/vendor/golang.org/x/oauth2/internal/token.go +++ /dev/null @@ -1,221 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package internal contains support packages for oauth2 package. -package internal - -import ( - "encoding/json" - "fmt" - "io" - "io/ioutil" - "mime" - "net/http" - "net/url" - "strconv" - "strings" - "time" - - "golang.org/x/net/context" -) - -// Token represents the crendentials used to authorize -// the requests to access protected resources on the OAuth 2.0 -// provider's backend. -// -// This type is a mirror of oauth2.Token and exists to break -// an otherwise-circular dependency. Other internal packages -// should convert this Token into an oauth2.Token before use. -type Token struct { - // AccessToken is the token that authorizes and authenticates - // the requests. - AccessToken string - - // TokenType is the type of token. - // The Type method returns either this or "Bearer", the default. - TokenType string - - // RefreshToken is a token that's used by the application - // (as opposed to the user) to refresh the access token - // if it expires. - RefreshToken string - - // Expiry is the optional expiration time of the access token. - // - // If zero, TokenSource implementations will reuse the same - // token forever and RefreshToken or equivalent - // mechanisms for that TokenSource will not be used. - Expiry time.Time - - // Raw optionally contains extra metadata from the server - // when updating a token. - Raw interface{} -} - -// tokenJSON is the struct representing the HTTP response from OAuth2 -// providers returning a token in JSON form. -type tokenJSON struct { - AccessToken string `json:"access_token"` - TokenType string `json:"token_type"` - RefreshToken string `json:"refresh_token"` - ExpiresIn expirationTime `json:"expires_in"` // at least PayPal returns string, while most return number - Expires expirationTime `json:"expires"` // broken Facebook spelling of expires_in -} - -func (e *tokenJSON) expiry() (t time.Time) { - if v := e.ExpiresIn; v != 0 { - return time.Now().Add(time.Duration(v) * time.Second) - } - if v := e.Expires; v != 0 { - return time.Now().Add(time.Duration(v) * time.Second) - } - return -} - -type expirationTime int32 - -func (e *expirationTime) UnmarshalJSON(b []byte) error { - var n json.Number - err := json.Unmarshal(b, &n) - if err != nil { - return err - } - i, err := n.Int64() - if err != nil { - return err - } - *e = expirationTime(i) - return nil -} - -var brokenAuthHeaderProviders = []string{ - "https://accounts.google.com/", - "https://api.dropbox.com/", - "https://api.instagram.com/", - "https://api.netatmo.net/", - "https://api.odnoklassniki.ru/", - "https://api.pushbullet.com/", - "https://api.soundcloud.com/", - "https://api.twitch.tv/", - "https://app.box.com/", - "https://connect.stripe.com/", - "https://login.microsoftonline.com/", - "https://login.salesforce.com/", - "https://oauth.sandbox.trainingpeaks.com/", - "https://oauth.trainingpeaks.com/", - "https://oauth.vk.com/", - "https://slack.com/", - "https://test-sandbox.auth.corp.google.com", - "https://test.salesforce.com/", - "https://user.gini.net/", - "https://www.douban.com/", - "https://www.googleapis.com/", - "https://www.linkedin.com/", - "https://www.strava.com/oauth/", -} - -func RegisterBrokenAuthHeaderProvider(tokenURL string) { - brokenAuthHeaderProviders = append(brokenAuthHeaderProviders, tokenURL) -} - -// providerAuthHeaderWorks reports whether the OAuth2 server identified by the tokenURL -// implements the OAuth2 spec correctly -// See https://code.google.com/p/goauth2/issues/detail?id=31 for background. -// In summary: -// - Reddit only accepts client secret in the Authorization header -// - Dropbox accepts either it in URL param or Auth header, but not both. -// - Google only accepts URL param (not spec compliant?), not Auth header -// - Stripe only accepts client secret in Auth header with Bearer method, not Basic -func providerAuthHeaderWorks(tokenURL string) bool { - for _, s := range brokenAuthHeaderProviders { - if strings.HasPrefix(tokenURL, s) { - // Some sites fail to implement the OAuth2 spec fully. - return false - } - } - - // Assume the provider implements the spec properly - // otherwise. We can add more exceptions as they're - // discovered. We will _not_ be adding configurable hooks - // to this package to let users select server bugs. - return true -} - -func RetrieveToken(ctx context.Context, ClientID, ClientSecret, TokenURL string, v url.Values) (*Token, error) { - hc, err := ContextClient(ctx) - if err != nil { - return nil, err - } - v.Set("client_id", ClientID) - bustedAuth := !providerAuthHeaderWorks(TokenURL) - if bustedAuth && ClientSecret != "" { - v.Set("client_secret", ClientSecret) - } - req, err := http.NewRequest("POST", TokenURL, strings.NewReader(v.Encode())) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", "application/x-www-form-urlencoded") - if !bustedAuth { - req.SetBasicAuth(ClientID, ClientSecret) - } - r, err := hc.Do(req) - if err != nil { - return nil, err - } - defer r.Body.Close() - body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<20)) - if err != nil { - return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) - } - if code := r.StatusCode; code < 200 || code > 299 { - return nil, fmt.Errorf("oauth2: cannot fetch token: %v\nResponse: %s", r.Status, body) - } - - var token *Token - content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type")) - switch content { - case "application/x-www-form-urlencoded", "text/plain": - vals, err := url.ParseQuery(string(body)) - if err != nil { - return nil, err - } - token = &Token{ - AccessToken: vals.Get("access_token"), - TokenType: vals.Get("token_type"), - RefreshToken: vals.Get("refresh_token"), - Raw: vals, - } - e := vals.Get("expires_in") - if e == "" { - // TODO(jbd): Facebook's OAuth2 implementation is broken and - // returns expires_in field in expires. Remove the fallback to expires, - // when Facebook fixes their implementation. - e = vals.Get("expires") - } - expires, _ := strconv.Atoi(e) - if expires != 0 { - token.Expiry = time.Now().Add(time.Duration(expires) * time.Second) - } - default: - var tj tokenJSON - if err = json.Unmarshal(body, &tj); err != nil { - return nil, err - } - token = &Token{ - AccessToken: tj.AccessToken, - TokenType: tj.TokenType, - RefreshToken: tj.RefreshToken, - Expiry: tj.expiry(), - Raw: make(map[string]interface{}), - } - json.Unmarshal(body, &token.Raw) // no error checks for optional fields - } - // Don't overwrite `RefreshToken` with an empty value - // if this was a token refreshing request. - if token.RefreshToken == "" { - token.RefreshToken = v.Get("refresh_token") - } - return token, nil -} diff --git a/vendor/golang.org/x/oauth2/internal/transport.go b/vendor/golang.org/x/oauth2/internal/transport.go deleted file mode 100644 index 33d3669..0000000 --- a/vendor/golang.org/x/oauth2/internal/transport.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package internal contains support packages for oauth2 package. -package internal - -import ( - "net/http" - - "golang.org/x/net/context" -) - -// HTTPClient is the context key to use with golang.org/x/net/context's -// WithValue function to associate an *http.Client value with a context. -var HTTPClient ContextKey - -// ContextKey is just an empty struct. It exists so HTTPClient can be -// an immutable public variable with a unique type. It's immutable -// because nobody else can create a ContextKey, being unexported. -type ContextKey struct{} - -// ContextClientFunc is a func which tries to return an *http.Client -// given a Context value. If it returns an error, the search stops -// with that error. If it returns (nil, nil), the search continues -// down the list of registered funcs. -type ContextClientFunc func(context.Context) (*http.Client, error) - -var contextClientFuncs []ContextClientFunc - -func RegisterContextClientFunc(fn ContextClientFunc) { - contextClientFuncs = append(contextClientFuncs, fn) -} - -func ContextClient(ctx context.Context) (*http.Client, error) { - for _, fn := range contextClientFuncs { - c, err := fn(ctx) - if err != nil { - return nil, err - } - if c != nil { - return c, nil - } - } - if hc, ok := ctx.Value(HTTPClient).(*http.Client); ok { - return hc, nil - } - return http.DefaultClient, nil -} - -func ContextTransport(ctx context.Context) http.RoundTripper { - hc, err := ContextClient(ctx) - // This is a rare error case (somebody using nil on App Engine). - if err != nil { - return ErrorTransport{err} - } - return hc.Transport -} - -// ErrorTransport returns the specified error on RoundTrip. -// This RoundTripper should be used in rare error cases where -// error handling can be postponed to response handling time. -type ErrorTransport struct{ Err error } - -func (t ErrorTransport) RoundTrip(*http.Request) (*http.Response, error) { - return nil, t.Err -} diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go deleted file mode 100644 index a682896..0000000 --- a/vendor/golang.org/x/oauth2/oauth2.go +++ /dev/null @@ -1,337 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package oauth2 provides support for making -// OAuth2 authorized and authenticated HTTP requests. -// It can additionally grant authorization with Bearer JWT. -package oauth2 // import "golang.org/x/oauth2" - -import ( - "bytes" - "errors" - "net/http" - "net/url" - "strings" - "sync" - - "golang.org/x/net/context" - "golang.org/x/oauth2/internal" -) - -// NoContext is the default context you should supply if not using -// your own context.Context (see https://golang.org/x/net/context). -var NoContext = context.TODO() - -// RegisterBrokenAuthHeaderProvider registers an OAuth2 server -// identified by the tokenURL prefix as an OAuth2 implementation -// which doesn't support the HTTP Basic authentication -// scheme to authenticate with the authorization server. -// Once a server is registered, credentials (client_id and client_secret) -// will be passed as query parameters rather than being present -// in the Authorization header. -// See https://code.google.com/p/goauth2/issues/detail?id=31 for background. -func RegisterBrokenAuthHeaderProvider(tokenURL string) { - internal.RegisterBrokenAuthHeaderProvider(tokenURL) -} - -// Config describes a typical 3-legged OAuth2 flow, with both the -// client application information and the server's endpoint URLs. -type Config struct { - // ClientID is the application's ID. - ClientID string - - // ClientSecret is the application's secret. - ClientSecret string - - // Endpoint contains the resource server's token endpoint - // URLs. These are constants specific to each server and are - // often available via site-specific packages, such as - // google.Endpoint or github.Endpoint. - Endpoint Endpoint - - // RedirectURL is the URL to redirect users going through - // the OAuth flow, after the resource owner's URLs. - RedirectURL string - - // Scope specifies optional requested permissions. - Scopes []string -} - -// A TokenSource is anything that can return a token. -type TokenSource interface { - // Token returns a token or an error. - // Token must be safe for concurrent use by multiple goroutines. - // The returned Token must not be modified. - Token() (*Token, error) -} - -// Endpoint contains the OAuth 2.0 provider's authorization and token -// endpoint URLs. -type Endpoint struct { - AuthURL string - TokenURL string -} - -var ( - // AccessTypeOnline and AccessTypeOffline are options passed - // to the Options.AuthCodeURL method. They modify the - // "access_type" field that gets sent in the URL returned by - // AuthCodeURL. - // - // Online is the default if neither is specified. If your - // application needs to refresh access tokens when the user - // is not present at the browser, then use offline. This will - // result in your application obtaining a refresh token the - // first time your application exchanges an authorization - // code for a user. - AccessTypeOnline AuthCodeOption = SetAuthURLParam("access_type", "online") - AccessTypeOffline AuthCodeOption = SetAuthURLParam("access_type", "offline") - - // ApprovalForce forces the users to view the consent dialog - // and confirm the permissions request at the URL returned - // from AuthCodeURL, even if they've already done so. - ApprovalForce AuthCodeOption = SetAuthURLParam("approval_prompt", "force") -) - -// An AuthCodeOption is passed to Config.AuthCodeURL. -type AuthCodeOption interface { - setValue(url.Values) -} - -type setParam struct{ k, v string } - -func (p setParam) setValue(m url.Values) { m.Set(p.k, p.v) } - -// SetAuthURLParam builds an AuthCodeOption which passes key/value parameters -// to a provider's authorization endpoint. -func SetAuthURLParam(key, value string) AuthCodeOption { - return setParam{key, value} -} - -// AuthCodeURL returns a URL to OAuth 2.0 provider's consent page -// that asks for permissions for the required scopes explicitly. -// -// State is a token to protect the user from CSRF attacks. You must -// always provide a non-zero string and validate that it matches the -// the state query parameter on your redirect callback. -// See http://tools.ietf.org/html/rfc6749#section-10.12 for more info. -// -// Opts may include AccessTypeOnline or AccessTypeOffline, as well -// as ApprovalForce. -func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string { - var buf bytes.Buffer - buf.WriteString(c.Endpoint.AuthURL) - v := url.Values{ - "response_type": {"code"}, - "client_id": {c.ClientID}, - "redirect_uri": internal.CondVal(c.RedirectURL), - "scope": internal.CondVal(strings.Join(c.Scopes, " ")), - "state": internal.CondVal(state), - } - for _, opt := range opts { - opt.setValue(v) - } - if strings.Contains(c.Endpoint.AuthURL, "?") { - buf.WriteByte('&') - } else { - buf.WriteByte('?') - } - buf.WriteString(v.Encode()) - return buf.String() -} - -// PasswordCredentialsToken converts a resource owner username and password -// pair into a token. -// -// Per the RFC, this grant type should only be used "when there is a high -// degree of trust between the resource owner and the client (e.g., the client -// is part of the device operating system or a highly privileged application), -// and when other authorization grant types are not available." -// See https://tools.ietf.org/html/rfc6749#section-4.3 for more info. -// -// The HTTP client to use is derived from the context. -// If nil, http.DefaultClient is used. -func (c *Config) PasswordCredentialsToken(ctx context.Context, username, password string) (*Token, error) { - return retrieveToken(ctx, c, url.Values{ - "grant_type": {"password"}, - "username": {username}, - "password": {password}, - "scope": internal.CondVal(strings.Join(c.Scopes, " ")), - }) -} - -// Exchange converts an authorization code into a token. -// -// It is used after a resource provider redirects the user back -// to the Redirect URI (the URL obtained from AuthCodeURL). -// -// The HTTP client to use is derived from the context. -// If a client is not provided via the context, http.DefaultClient is used. -// -// The code will be in the *http.Request.FormValue("code"). Before -// calling Exchange, be sure to validate FormValue("state"). -func (c *Config) Exchange(ctx context.Context, code string) (*Token, error) { - return retrieveToken(ctx, c, url.Values{ - "grant_type": {"authorization_code"}, - "code": {code}, - "redirect_uri": internal.CondVal(c.RedirectURL), - "scope": internal.CondVal(strings.Join(c.Scopes, " ")), - }) -} - -// Client returns an HTTP client using the provided token. -// The token will auto-refresh as necessary. The underlying -// HTTP transport will be obtained using the provided context. -// The returned client and its Transport should not be modified. -func (c *Config) Client(ctx context.Context, t *Token) *http.Client { - return NewClient(ctx, c.TokenSource(ctx, t)) -} - -// TokenSource returns a TokenSource that returns t until t expires, -// automatically refreshing it as necessary using the provided context. -// -// Most users will use Config.Client instead. -func (c *Config) TokenSource(ctx context.Context, t *Token) TokenSource { - tkr := &tokenRefresher{ - ctx: ctx, - conf: c, - } - if t != nil { - tkr.refreshToken = t.RefreshToken - } - return &reuseTokenSource{ - t: t, - new: tkr, - } -} - -// tokenRefresher is a TokenSource that makes "grant_type"=="refresh_token" -// HTTP requests to renew a token using a RefreshToken. -type tokenRefresher struct { - ctx context.Context // used to get HTTP requests - conf *Config - refreshToken string -} - -// WARNING: Token is not safe for concurrent access, as it -// updates the tokenRefresher's refreshToken field. -// Within this package, it is used by reuseTokenSource which -// synchronizes calls to this method with its own mutex. -func (tf *tokenRefresher) Token() (*Token, error) { - if tf.refreshToken == "" { - return nil, errors.New("oauth2: token expired and refresh token is not set") - } - - tk, err := retrieveToken(tf.ctx, tf.conf, url.Values{ - "grant_type": {"refresh_token"}, - "refresh_token": {tf.refreshToken}, - }) - - if err != nil { - return nil, err - } - if tf.refreshToken != tk.RefreshToken { - tf.refreshToken = tk.RefreshToken - } - return tk, err -} - -// reuseTokenSource is a TokenSource that holds a single token in memory -// and validates its expiry before each call to retrieve it with -// Token. If it's expired, it will be auto-refreshed using the -// new TokenSource. -type reuseTokenSource struct { - new TokenSource // called when t is expired. - - mu sync.Mutex // guards t - t *Token -} - -// Token returns the current token if it's still valid, else will -// refresh the current token (using r.Context for HTTP client -// information) and return the new one. -func (s *reuseTokenSource) Token() (*Token, error) { - s.mu.Lock() - defer s.mu.Unlock() - if s.t.Valid() { - return s.t, nil - } - t, err := s.new.Token() - if err != nil { - return nil, err - } - s.t = t - return t, nil -} - -// StaticTokenSource returns a TokenSource that always returns the same token. -// Because the provided token t is never refreshed, StaticTokenSource is only -// useful for tokens that never expire. -func StaticTokenSource(t *Token) TokenSource { - return staticTokenSource{t} -} - -// staticTokenSource is a TokenSource that always returns the same Token. -type staticTokenSource struct { - t *Token -} - -func (s staticTokenSource) Token() (*Token, error) { - return s.t, nil -} - -// HTTPClient is the context key to use with golang.org/x/net/context's -// WithValue function to associate an *http.Client value with a context. -var HTTPClient internal.ContextKey - -// NewClient creates an *http.Client from a Context and TokenSource. -// The returned client is not valid beyond the lifetime of the context. -// -// As a special case, if src is nil, a non-OAuth2 client is returned -// using the provided context. This exists to support related OAuth2 -// packages. -func NewClient(ctx context.Context, src TokenSource) *http.Client { - if src == nil { - c, err := internal.ContextClient(ctx) - if err != nil { - return &http.Client{Transport: internal.ErrorTransport{err}} - } - return c - } - return &http.Client{ - Transport: &Transport{ - Base: internal.ContextTransport(ctx), - Source: ReuseTokenSource(nil, src), - }, - } -} - -// ReuseTokenSource returns a TokenSource which repeatedly returns the -// same token as long as it's valid, starting with t. -// When its cached token is invalid, a new token is obtained from src. -// -// ReuseTokenSource is typically used to reuse tokens from a cache -// (such as a file on disk) between runs of a program, rather than -// obtaining new tokens unnecessarily. -// -// The initial token t may be nil, in which case the TokenSource is -// wrapped in a caching version if it isn't one already. This also -// means it's always safe to wrap ReuseTokenSource around any other -// TokenSource without adverse effects. -func ReuseTokenSource(t *Token, src TokenSource) TokenSource { - // Don't wrap a reuseTokenSource in itself. That would work, - // but cause an unnecessary number of mutex operations. - // Just build the equivalent one. - if rt, ok := src.(*reuseTokenSource); ok { - if t == nil { - // Just use it directly. - return rt - } - src = rt.new - } - return &reuseTokenSource{ - t: t, - new: src, - } -} diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go deleted file mode 100644 index 7a3167f..0000000 --- a/vendor/golang.org/x/oauth2/token.go +++ /dev/null @@ -1,158 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package oauth2 - -import ( - "net/http" - "net/url" - "strconv" - "strings" - "time" - - "golang.org/x/net/context" - "golang.org/x/oauth2/internal" -) - -// expiryDelta determines how earlier a token should be considered -// expired than its actual expiration time. It is used to avoid late -// expirations due to client-server time mismatches. -const expiryDelta = 10 * time.Second - -// Token represents the crendentials used to authorize -// the requests to access protected resources on the OAuth 2.0 -// provider's backend. -// -// Most users of this package should not access fields of Token -// directly. They're exported mostly for use by related packages -// implementing derivative OAuth2 flows. -type Token struct { - // AccessToken is the token that authorizes and authenticates - // the requests. - AccessToken string `json:"access_token"` - - // TokenType is the type of token. - // The Type method returns either this or "Bearer", the default. - TokenType string `json:"token_type,omitempty"` - - // RefreshToken is a token that's used by the application - // (as opposed to the user) to refresh the access token - // if it expires. - RefreshToken string `json:"refresh_token,omitempty"` - - // Expiry is the optional expiration time of the access token. - // - // If zero, TokenSource implementations will reuse the same - // token forever and RefreshToken or equivalent - // mechanisms for that TokenSource will not be used. - Expiry time.Time `json:"expiry,omitempty"` - - // raw optionally contains extra metadata from the server - // when updating a token. - raw interface{} -} - -// Type returns t.TokenType if non-empty, else "Bearer". -func (t *Token) Type() string { - if strings.EqualFold(t.TokenType, "bearer") { - return "Bearer" - } - if strings.EqualFold(t.TokenType, "mac") { - return "MAC" - } - if strings.EqualFold(t.TokenType, "basic") { - return "Basic" - } - if t.TokenType != "" { - return t.TokenType - } - return "Bearer" -} - -// SetAuthHeader sets the Authorization header to r using the access -// token in t. -// -// This method is unnecessary when using Transport or an HTTP Client -// returned by this package. -func (t *Token) SetAuthHeader(r *http.Request) { - r.Header.Set("Authorization", t.Type()+" "+t.AccessToken) -} - -// WithExtra returns a new Token that's a clone of t, but using the -// provided raw extra map. This is only intended for use by packages -// implementing derivative OAuth2 flows. -func (t *Token) WithExtra(extra interface{}) *Token { - t2 := new(Token) - *t2 = *t - t2.raw = extra - return t2 -} - -// Extra returns an extra field. -// Extra fields are key-value pairs returned by the server as a -// part of the token retrieval response. -func (t *Token) Extra(key string) interface{} { - if raw, ok := t.raw.(map[string]interface{}); ok { - return raw[key] - } - - vals, ok := t.raw.(url.Values) - if !ok { - return nil - } - - v := vals.Get(key) - switch s := strings.TrimSpace(v); strings.Count(s, ".") { - case 0: // Contains no "."; try to parse as int - if i, err := strconv.ParseInt(s, 10, 64); err == nil { - return i - } - case 1: // Contains a single "."; try to parse as float - if f, err := strconv.ParseFloat(s, 64); err == nil { - return f - } - } - - return v -} - -// expired reports whether the token is expired. -// t must be non-nil. -func (t *Token) expired() bool { - if t.Expiry.IsZero() { - return false - } - return t.Expiry.Add(-expiryDelta).Before(time.Now()) -} - -// Valid reports whether t is non-nil, has an AccessToken, and is not expired. -func (t *Token) Valid() bool { - return t != nil && t.AccessToken != "" && !t.expired() -} - -// tokenFromInternal maps an *internal.Token struct into -// a *Token struct. -func tokenFromInternal(t *internal.Token) *Token { - if t == nil { - return nil - } - return &Token{ - AccessToken: t.AccessToken, - TokenType: t.TokenType, - RefreshToken: t.RefreshToken, - Expiry: t.Expiry, - raw: t.Raw, - } -} - -// retrieveToken takes a *Config and uses that to retrieve an *internal.Token. -// This token is then mapped from *internal.Token into an *oauth2.Token which is returned along -// with an error.. -func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) { - tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v) - if err != nil { - return nil, err - } - return tokenFromInternal(tk), nil -} diff --git a/vendor/golang.org/x/oauth2/transport.go b/vendor/golang.org/x/oauth2/transport.go deleted file mode 100644 index 92ac7e2..0000000 --- a/vendor/golang.org/x/oauth2/transport.go +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package oauth2 - -import ( - "errors" - "io" - "net/http" - "sync" -) - -// Transport is an http.RoundTripper that makes OAuth 2.0 HTTP requests, -// wrapping a base RoundTripper and adding an Authorization header -// with a token from the supplied Sources. -// -// Transport is a low-level mechanism. Most code will use the -// higher-level Config.Client method instead. -type Transport struct { - // Source supplies the token to add to outgoing requests' - // Authorization headers. - Source TokenSource - - // Base is the base RoundTripper used to make HTTP requests. - // If nil, http.DefaultTransport is used. - Base http.RoundTripper - - mu sync.Mutex // guards modReq - modReq map[*http.Request]*http.Request // original -> modified -} - -// RoundTrip authorizes and authenticates the request with an -// access token. If no token exists or token is expired, -// tries to refresh/fetch a new token. -func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { - if t.Source == nil { - return nil, errors.New("oauth2: Transport's Source is nil") - } - token, err := t.Source.Token() - if err != nil { - return nil, err - } - - req2 := cloneRequest(req) // per RoundTripper contract - token.SetAuthHeader(req2) - t.setModReq(req, req2) - res, err := t.base().RoundTrip(req2) - if err != nil { - t.setModReq(req, nil) - return nil, err - } - res.Body = &onEOFReader{ - rc: res.Body, - fn: func() { t.setModReq(req, nil) }, - } - return res, nil -} - -// CancelRequest cancels an in-flight request by closing its connection. -func (t *Transport) CancelRequest(req *http.Request) { - type canceler interface { - CancelRequest(*http.Request) - } - if cr, ok := t.base().(canceler); ok { - t.mu.Lock() - modReq := t.modReq[req] - delete(t.modReq, req) - t.mu.Unlock() - cr.CancelRequest(modReq) - } -} - -func (t *Transport) base() http.RoundTripper { - if t.Base != nil { - return t.Base - } - return http.DefaultTransport -} - -func (t *Transport) setModReq(orig, mod *http.Request) { - t.mu.Lock() - defer t.mu.Unlock() - if t.modReq == nil { - t.modReq = make(map[*http.Request]*http.Request) - } - if mod == nil { - delete(t.modReq, orig) - } else { - t.modReq[orig] = mod - } -} - -// cloneRequest returns a clone of the provided *http.Request. -// The clone is a shallow copy of the struct and its Header map. -func cloneRequest(r *http.Request) *http.Request { - // shallow copy of the struct - r2 := new(http.Request) - *r2 = *r - // deep copy of the Header - r2.Header = make(http.Header, len(r.Header)) - for k, s := range r.Header { - r2.Header[k] = append([]string(nil), s...) - } - return r2 -} - -type onEOFReader struct { - rc io.ReadCloser - fn func() -} - -func (r *onEOFReader) Read(p []byte) (n int, err error) { - n, err = r.rc.Read(p) - if err == io.EOF { - r.runFunc() - } - return -} - -func (r *onEOFReader) Close() error { - err := r.rc.Close() - r.runFunc() - return err -} - -func (r *onEOFReader) runFunc() { - if fn := r.fn; fn != nil { - fn() - r.fn = nil - } -} diff --git a/vendor/gopkg.in/bluesuncorp/validator.v5/LICENSE b/vendor/gopkg.in/bluesuncorp/validator.v5/LICENSE deleted file mode 100644 index 6a2ae9a..0000000 --- a/vendor/gopkg.in/bluesuncorp/validator.v5/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Dean Karn - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - diff --git a/vendor/gopkg.in/bluesuncorp/validator.v5/README.md b/vendor/gopkg.in/bluesuncorp/validator.v5/README.md deleted file mode 100644 index 1a78bec..0000000 --- a/vendor/gopkg.in/bluesuncorp/validator.v5/README.md +++ /dev/null @@ -1,154 +0,0 @@ -Package validator -================ - -[![Join the chat at https://gitter.im/go-playground/validator](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/go-playground/validator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) -[![Build Status](https://semaphoreci.com/api/v1/projects/ec20115f-ef1b-4c7d-9393-cc76aba74eb4/487382/badge.svg)](https://semaphoreci.com/joeybloggs/validator) -[![Coverage Status](https://coveralls.io/repos/go-playground/validator/badge.svg?branch=v5)](https://coveralls.io/r/go-playground/validator?branch=v5) -[![GoDoc](https://godoc.org/gopkg.in/go-playground/validator.v5?status.svg)](https://godoc.org/gopkg.in/go-playground/validator.v5) - -Package validator implements value validations for structs and individual fields based on tags. - -It has the following **unique** features: - -- Cross Field and Cross Struct validations. -- Slice, Array and Map diving, which allows any or all levels of a multidimensional field to be validated. -- Handles type interface by determining it's underlying type prior to validation. - -Installation ------------- - -Use go get. - - go get gopkg.in/go-playground/validator.v5 - -or to update - - go get -u gopkg.in/go-playground/validator.v5 - -Then import the validator package into your own code. - - import "gopkg.in/go-playground/validator.v5" - -Usage and documentation ------- - -Please see http://godoc.org/gopkg.in/go-playground/validator.v5 for detailed usage docs. - -##### Example: -```go -package main - -import ( - "fmt" - - "gopkg.in/go-playground/validator.v5" -) - -// User contains user information -type User struct { - FirstName string `validate:"required"` - LastName string `validate:"required"` - Age uint8 `validate:"gte=0,lte=130"` - Email string `validate:"required,email"` - FavouriteColor string `validate:"hexcolor|rgb|rgba"` - Addresses []*Address `validate:"required,dive,required"` // a person can have a home and cottage... -} - -// Address houses a users address information -type Address struct { - Street string `validate:"required"` - City string `validate:"required"` - Planet string `validate:"required"` - Phone string `validate:"required"` -} - -var validate *validator.Validate - -func main() { - - validate = validator.New("validate", validator.BakedInValidators) - - address := &Address{ - Street: "Eavesdown Docks", - Planet: "Persphone", - Phone: "none", - } - - user := &User{ - FirstName: "Badger", - LastName: "Smith", - Age: 135, - Email: "Badger.Smith@gmail.com", - FavouriteColor: "#000", - Addresses: []*Address{address}, - } - - // returns nil or *StructErrors - errs := validate.Struct(user) - - if errs != nil { - - // err will be of type *FieldError - err := errs.Errors["Age"] - fmt.Println(err.Error()) // output: Field validation for "Age" failed on the "lte" tag - fmt.Println(err.Field) // output: Age - fmt.Println(err.Tag) // output: lte - fmt.Println(err.Kind) // output: uint8 - fmt.Println(err.Type) // output: uint8 - fmt.Println(err.Param) // output: 130 - fmt.Println(err.Value) // output: 135 - - // or if you prefer you can use the Flatten function - // NOTE: I find this usefull when using a more hard static approach of checking field errors. - // The above, is best for passing to some generic code to say parse the errors. i.e. I pass errs - // to a routine which loops through the errors, creates and translates the error message into the - // users locale and returns a map of map[string]string // field and error which I then use - // within the HTML rendering. - - flat := errs.Flatten() - fmt.Println(flat) // output: map[Age:Field validation for "Age" failed on the "lte" tag Addresses[0].Address.City:Field validation for "City" failed on the "required" tag] - err = flat["Addresses[0].Address.City"] - fmt.Println(err.Field) // output: City - fmt.Println(err.Tag) // output: required - fmt.Println(err.Kind) // output: string - fmt.Println(err.Type) // output: string - fmt.Println(err.Param) // output: - fmt.Println(err.Value) // output: - - // from here you can create your own error messages in whatever language you wish - return - } - - // save user to database -} -``` - -Benchmarks ------- -###### Run on MacBook Pro (Retina, 15-inch, Late 2013) 2.6 GHz Intel Core i7 16 GB 1600 MHz DDR3 -```go -$ go test -cpu=4 -bench=. -benchmem=true -PASS -BenchmarkValidateField-4 3000000 429 ns/op 192 B/op 2 allocs/op -BenchmarkValidateStructSimple-4 500000 2877 ns/op 657 B/op 10 allocs/op -BenchmarkTemplateParallelSimple-4 500000 3097 ns/op 657 B/op 10 allocs/op -BenchmarkValidateStructLarge-4 100000 15228 ns/op 4350 B/op 62 allocs/op -BenchmarkTemplateParallelLarge-4 100000 14257 ns/op 4354 B/op 62 allocs/op -``` - -How to Contribute ------- - -There will always be a development branch for each version i.e. `v1-development`. In order to contribute, -please make your pull requests against those branches. - -If the changes being proposed or requested are breaking changes, please create an issue, for discussion -or create a pull request against the highest development branch for example this package has a -v1 and v1-development branch however, there will also be a v2-development brach even though v2 doesn't exist yet. - -I strongly encourage everyone whom creates a custom validation function to contribute them and -help make this package even better. - -License ------- -Distributed under MIT License, please see license file in code for more details. diff --git a/vendor/gopkg.in/bluesuncorp/validator.v5/baked_in.go b/vendor/gopkg.in/bluesuncorp/validator.v5/baked_in.go deleted file mode 100644 index 82868cc..0000000 --- a/vendor/gopkg.in/bluesuncorp/validator.v5/baked_in.go +++ /dev/null @@ -1,1001 +0,0 @@ -package validator - -import ( - "fmt" - "net/url" - "reflect" - "strconv" - "strings" - "time" - "unicode/utf8" -) - -// BakedInValidators is the default map of ValidationFunc -// you can add, remove or even replace items to suite your needs, -// or even disregard and use your own map if so desired. -var BakedInValidators = map[string]Func{ - "required": hasValue, - "len": hasLengthOf, - "min": hasMinOf, - "max": hasMaxOf, - "eq": isEq, - "ne": isNe, - "lt": isLt, - "lte": isLte, - "gt": isGt, - "gte": isGte, - "eqfield": isEqField, - "nefield": isNeField, - "gtefield": isGteField, - "gtfield": isGtField, - "ltefield": isLteField, - "ltfield": isLtField, - "alpha": isAlpha, - "alphanum": isAlphanum, - "numeric": isNumeric, - "number": isNumber, - "hexadecimal": isHexadecimal, - "hexcolor": isHexcolor, - "rgb": isRgb, - "rgba": isRgba, - "hsl": isHsl, - "hsla": isHsla, - "email": isEmail, - "url": isURL, - "uri": isURI, - "base64": isBase64, - "contains": contains, - "containsany": containsAny, - "containsrune": containsRune, - "excludes": excludes, - "excludesall": excludesAll, - "excludesrune": excludesRune, - "isbn": isISBN, - "isbn10": isISBN10, - "isbn13": isISBN13, - "uuid": isUUID, - "uuid3": isUUID3, - "uuid4": isUUID4, - "uuid5": isUUID5, - "ascii": isASCII, - "printascii": isPrintableASCII, - "multibyte": hasMultiByteCharacter, - "datauri": isDataURI, - "latitude": isLatitude, - "longitude": isLongitude, - "ssn": isSSN, -} - -func isSSN(top interface{}, current interface{}, field interface{}, param string) bool { - - if len(field.(string)) != 11 { - return false - } - - return matchesRegex(sSNRegex, field) -} - -func isLongitude(top interface{}, current interface{}, field interface{}, param string) bool { - return matchesRegex(longitudeRegex, field) -} - -func isLatitude(top interface{}, current interface{}, field interface{}, param string) bool { - return matchesRegex(latitudeRegex, field) -} - -func isDataURI(top interface{}, current interface{}, field interface{}, param string) bool { - - uri := strings.SplitN(field.(string), ",", 2) - - if len(uri) != 2 { - return false - } - - if !matchesRegex(dataURIRegex, uri[0]) { - return false - } - - return isBase64(top, current, uri[1], param) -} - -func hasMultiByteCharacter(top interface{}, current interface{}, field interface{}, param string) bool { - - if len(field.(string)) == 0 { - return true - } - - return matchesRegex(multibyteRegex, field) -} - -func isPrintableASCII(top interface{}, current interface{}, field interface{}, param string) bool { - return matchesRegex(printableASCIIRegex, field) -} - -func isASCII(top interface{}, current interface{}, field interface{}, param string) bool { - return matchesRegex(aSCIIRegex, field) -} - -func isUUID5(top interface{}, current interface{}, field interface{}, param string) bool { - return matchesRegex(uUID5Regex, field) -} - -func isUUID4(top interface{}, current interface{}, field interface{}, param string) bool { - return matchesRegex(uUID4Regex, field) -} - -func isUUID3(top interface{}, current interface{}, field interface{}, param string) bool { - return matchesRegex(uUID3Regex, field) -} - -func isUUID(top interface{}, current interface{}, field interface{}, param string) bool { - return matchesRegex(uUIDRegex, field) -} - -func isISBN(top interface{}, current interface{}, field interface{}, param string) bool { - return isISBN10(top, current, field, param) || isISBN13(top, current, field, param) -} - -func isISBN13(top interface{}, current interface{}, field interface{}, param string) bool { - - s := strings.Replace(strings.Replace(field.(string), "-", "", 4), " ", "", 4) - - if !matchesRegex(iSBN13Regex, s) { - return false - } - - var checksum int32 - var i int32 - - factor := []int32{1, 3} - - for i = 0; i < 12; i++ { - checksum += factor[i%2] * int32(s[i]-'0') - } - - if (int32(s[12]-'0'))-((10-(checksum%10))%10) == 0 { - return true - } - - return false -} - -func isISBN10(top interface{}, current interface{}, field interface{}, param string) bool { - - s := strings.Replace(strings.Replace(field.(string), "-", "", 3), " ", "", 3) - - if !matchesRegex(iSBN10Regex, s) { - return false - } - - var checksum int32 - var i int32 - - for i = 0; i < 9; i++ { - checksum += (i + 1) * int32(s[i]-'0') - } - - if s[9] == 'X' { - checksum += 10 * 10 - } else { - checksum += 10 * int32(s[9]-'0') - } - - if checksum%11 == 0 { - return true - } - - return false -} - -func excludesRune(top interface{}, current interface{}, field interface{}, param string) bool { - return !containsRune(top, current, field, param) -} - -func excludesAll(top interface{}, current interface{}, field interface{}, param string) bool { - return !containsAny(top, current, field, param) -} - -func excludes(top interface{}, current interface{}, field interface{}, param string) bool { - return !contains(top, current, field, param) -} - -func containsRune(top interface{}, current interface{}, field interface{}, param string) bool { - r, _ := utf8.DecodeRuneInString(param) - - return strings.ContainsRune(field.(string), r) -} - -func containsAny(top interface{}, current interface{}, field interface{}, param string) bool { - return strings.ContainsAny(field.(string), param) -} - -func contains(top interface{}, current interface{}, field interface{}, param string) bool { - return strings.Contains(field.(string), param) -} - -func isNeField(top interface{}, current interface{}, field interface{}, param string) bool { - return !isEqField(top, current, field, param) -} - -func isNe(top interface{}, current interface{}, field interface{}, param string) bool { - return !isEq(top, current, field, param) -} - -func isEqField(top interface{}, current interface{}, field interface{}, param string) bool { - - if current == nil { - panic("struct not passed for cross validation") - } - - currentVal := reflect.ValueOf(current) - - if currentVal.Kind() == reflect.Ptr && !currentVal.IsNil() { - currentVal = reflect.ValueOf(currentVal.Elem().Interface()) - } - - var currentFielVal reflect.Value - - switch currentVal.Kind() { - - case reflect.Struct: - - if currentVal.Type() == reflect.TypeOf(time.Time{}) { - currentFielVal = currentVal - break - } - - f := currentVal.FieldByName(param) - - if f.Kind() == reflect.Invalid { - panic(fmt.Sprintf("Field \"%s\" not found in struct", param)) - } - - currentFielVal = f - - default: - - currentFielVal = currentVal - } - - if currentFielVal.Kind() == reflect.Ptr && !currentFielVal.IsNil() { - - currentFielVal = reflect.ValueOf(currentFielVal.Elem().Interface()) - } - - fv := reflect.ValueOf(field) - - switch fv.Kind() { - - case reflect.String: - return fv.String() == currentFielVal.String() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - - return fv.Int() == currentFielVal.Int() - - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - - return fv.Uint() == currentFielVal.Uint() - - case reflect.Float32, reflect.Float64: - - return fv.Float() == currentFielVal.Float() - case reflect.Slice, reflect.Map, reflect.Array: - - return int64(fv.Len()) == int64(currentFielVal.Len()) - case reflect.Struct: - - if fv.Type() == reflect.TypeOf(time.Time{}) { - - if currentFielVal.Type() != reflect.TypeOf(time.Time{}) { - panic("Bad Top Level field type") - } - - t := currentFielVal.Interface().(time.Time) - fieldTime := field.(time.Time) - - return fieldTime.Equal(t) - } - } - - panic(fmt.Sprintf("Bad field type %T", field)) -} - -func isEq(top interface{}, current interface{}, field interface{}, param string) bool { - - st := reflect.ValueOf(field) - - switch st.Kind() { - - case reflect.String: - - return st.String() == param - - case reflect.Slice, reflect.Map, reflect.Array: - p := asInt(param) - - return int64(st.Len()) == p - - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - p := asInt(param) - - return st.Int() == p - - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - p := asUint(param) - - return st.Uint() == p - - case reflect.Float32, reflect.Float64: - p := asFloat(param) - - return st.Float() == p - } - - panic(fmt.Sprintf("Bad field type %T", field)) -} - -func isBase64(top interface{}, current interface{}, field interface{}, param string) bool { - return matchesRegex(base64Regex, field) -} - -func isURI(top interface{}, current interface{}, field interface{}, param string) bool { - - st := reflect.ValueOf(field) - - switch st.Kind() { - - case reflect.String: - _, err := url.ParseRequestURI(field.(string)) - - return err == nil - } - - panic(fmt.Sprintf("Bad field type %T", field)) -} - -func isURL(top interface{}, current interface{}, field interface{}, param string) bool { - st := reflect.ValueOf(field) - - switch st.Kind() { - - case reflect.String: - url, err := url.ParseRequestURI(field.(string)) - - if err != nil { - return false - } - - if len(url.Scheme) == 0 { - return false - } - - return err == nil - } - - panic(fmt.Sprintf("Bad field type %T", field)) -} - -func isEmail(top interface{}, current interface{}, field interface{}, param string) bool { - return matchesRegex(emailRegex, field) -} - -func isHsla(top interface{}, current interface{}, field interface{}, param string) bool { - return matchesRegex(hslaRegex, field) -} - -func isHsl(top interface{}, current interface{}, field interface{}, param string) bool { - return matchesRegex(hslRegex, field) -} - -func isRgba(top interface{}, current interface{}, field interface{}, param string) bool { - return matchesRegex(rgbaRegex, field) -} - -func isRgb(top interface{}, current interface{}, field interface{}, param string) bool { - return matchesRegex(rgbRegex, field) -} - -func isHexcolor(top interface{}, current interface{}, field interface{}, param string) bool { - return matchesRegex(hexcolorRegex, field) -} - -func isHexadecimal(top interface{}, current interface{}, field interface{}, param string) bool { - return matchesRegex(hexadecimalRegex, field) -} - -func isNumber(top interface{}, current interface{}, field interface{}, param string) bool { - return matchesRegex(numberRegex, field) -} - -func isNumeric(top interface{}, current interface{}, field interface{}, param string) bool { - return matchesRegex(numericRegex, field) -} - -func isAlphanum(top interface{}, current interface{}, field interface{}, param string) bool { - return matchesRegex(alphaNumericRegex, field) -} - -func isAlpha(top interface{}, current interface{}, field interface{}, param string) bool { - return matchesRegex(alphaRegex, field) -} - -func hasValue(top interface{}, current interface{}, field interface{}, param string) bool { - - st := reflect.ValueOf(field) - - switch st.Kind() { - case reflect.Invalid: - return false - case reflect.Slice, reflect.Map, reflect.Ptr, reflect.Interface, reflect.Chan, reflect.Func: - return !st.IsNil() - case reflect.Array: - return field != reflect.Zero(reflect.TypeOf(field)).Interface() - default: - return field != nil && field != reflect.Zero(reflect.TypeOf(field)).Interface() - } -} - -func isGteField(top interface{}, current interface{}, field interface{}, param string) bool { - - if current == nil { - panic("struct not passed for cross validation") - } - - currentVal := reflect.ValueOf(current) - - if currentVal.Kind() == reflect.Ptr && !currentVal.IsNil() { - currentVal = reflect.ValueOf(currentVal.Elem().Interface()) - } - - var currentFielVal reflect.Value - - switch currentVal.Kind() { - - case reflect.Struct: - - if currentVal.Type() == reflect.TypeOf(time.Time{}) { - currentFielVal = currentVal - break - } - - f := currentVal.FieldByName(param) - - if f.Kind() == reflect.Invalid { - panic(fmt.Sprintf("Field \"%s\" not found in struct", param)) - } - - currentFielVal = f - - default: - - currentFielVal = currentVal - } - - if currentFielVal.Kind() == reflect.Ptr && !currentFielVal.IsNil() { - - currentFielVal = reflect.ValueOf(currentFielVal.Elem().Interface()) - } - - fv := reflect.ValueOf(field) - - switch fv.Kind() { - - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - - return fv.Int() >= currentFielVal.Int() - - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - - return fv.Uint() >= currentFielVal.Uint() - - case reflect.Float32, reflect.Float64: - - return fv.Float() >= currentFielVal.Float() - - case reflect.Struct: - - if fv.Type() == reflect.TypeOf(time.Time{}) { - - if currentFielVal.Type() != reflect.TypeOf(time.Time{}) { - panic("Bad Top Level field type") - } - - t := currentFielVal.Interface().(time.Time) - fieldTime := field.(time.Time) - - return fieldTime.After(t) || fieldTime.Equal(t) - } - } - - panic(fmt.Sprintf("Bad field type %T", field)) -} - -func isGtField(top interface{}, current interface{}, field interface{}, param string) bool { - - if current == nil { - panic("struct not passed for cross validation") - } - - currentVal := reflect.ValueOf(current) - - if currentVal.Kind() == reflect.Ptr && !currentVal.IsNil() { - currentVal = reflect.ValueOf(currentVal.Elem().Interface()) - } - - var currentFielVal reflect.Value - - switch currentVal.Kind() { - - case reflect.Struct: - - if currentVal.Type() == reflect.TypeOf(time.Time{}) { - currentFielVal = currentVal - break - } - - f := currentVal.FieldByName(param) - - if f.Kind() == reflect.Invalid { - panic(fmt.Sprintf("Field \"%s\" not found in struct", param)) - } - - currentFielVal = f - - default: - - currentFielVal = currentVal - } - - if currentFielVal.Kind() == reflect.Ptr && !currentFielVal.IsNil() { - - currentFielVal = reflect.ValueOf(currentFielVal.Elem().Interface()) - } - - fv := reflect.ValueOf(field) - - switch fv.Kind() { - - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - - return fv.Int() > currentFielVal.Int() - - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - - return fv.Uint() > currentFielVal.Uint() - - case reflect.Float32, reflect.Float64: - - return fv.Float() > currentFielVal.Float() - - case reflect.Struct: - - if fv.Type() == reflect.TypeOf(time.Time{}) { - - if currentFielVal.Type() != reflect.TypeOf(time.Time{}) { - panic("Bad Top Level field type") - } - - t := currentFielVal.Interface().(time.Time) - fieldTime := field.(time.Time) - - return fieldTime.After(t) - } - } - - panic(fmt.Sprintf("Bad field type %T", field)) -} - -func isGte(top interface{}, current interface{}, field interface{}, param string) bool { - - st := reflect.ValueOf(field) - - switch st.Kind() { - - case reflect.String: - p := asInt(param) - - return int64(utf8.RuneCountInString(st.String())) >= p - - case reflect.Slice, reflect.Map, reflect.Array: - p := asInt(param) - - return int64(st.Len()) >= p - - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - p := asInt(param) - - return st.Int() >= p - - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - p := asUint(param) - - return st.Uint() >= p - - case reflect.Float32, reflect.Float64: - p := asFloat(param) - - return st.Float() >= p - - case reflect.Struct: - - if st.Type() == reflect.TypeOf(time.Time{}) { - - now := time.Now().UTC() - t := field.(time.Time) - - return t.After(now) || t.Equal(now) - } - } - - panic(fmt.Sprintf("Bad field type %T", field)) -} - -func isGt(top interface{}, current interface{}, field interface{}, param string) bool { - - st := reflect.ValueOf(field) - - switch st.Kind() { - - case reflect.String: - p := asInt(param) - - return int64(utf8.RuneCountInString(st.String())) > p - - case reflect.Slice, reflect.Map, reflect.Array: - p := asInt(param) - - return int64(st.Len()) > p - - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - p := asInt(param) - - return st.Int() > p - - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - p := asUint(param) - - return st.Uint() > p - - case reflect.Float32, reflect.Float64: - p := asFloat(param) - - return st.Float() > p - case reflect.Struct: - - if st.Type() == reflect.TypeOf(time.Time{}) { - - return field.(time.Time).After(time.Now().UTC()) - } - } - - panic(fmt.Sprintf("Bad field type %T", field)) -} - -// length tests whether a variable's length is equal to a given -// value. For strings it tests the number of characters whereas -// for maps and slices it tests the number of items. -func hasLengthOf(top interface{}, current interface{}, field interface{}, param string) bool { - - st := reflect.ValueOf(field) - - switch st.Kind() { - - case reflect.String: - p := asInt(param) - - return int64(utf8.RuneCountInString(st.String())) == p - - case reflect.Slice, reflect.Map, reflect.Array: - p := asInt(param) - - return int64(st.Len()) == p - - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - p := asInt(param) - - return st.Int() == p - - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - p := asUint(param) - - return st.Uint() == p - - case reflect.Float32, reflect.Float64: - p := asFloat(param) - - return st.Float() == p - } - - panic(fmt.Sprintf("Bad field type %T", field)) -} - -// min tests whether a variable value is larger or equal to a given -// number. For number types, it's a simple lesser-than test; for -// strings it tests the number of characters whereas for maps -// and slices it tests the number of items. -func hasMinOf(top interface{}, current interface{}, field interface{}, param string) bool { - - return isGte(top, current, field, param) -} - -func isLteField(top interface{}, current interface{}, field interface{}, param string) bool { - - if current == nil { - panic("struct not passed for cross validation") - } - - currentVal := reflect.ValueOf(current) - - if currentVal.Kind() == reflect.Ptr && !currentVal.IsNil() { - currentVal = reflect.ValueOf(currentVal.Elem().Interface()) - } - - var currentFielVal reflect.Value - - switch currentVal.Kind() { - - case reflect.Struct: - - if currentVal.Type() == reflect.TypeOf(time.Time{}) { - currentFielVal = currentVal - break - } - - f := currentVal.FieldByName(param) - - if f.Kind() == reflect.Invalid { - panic(fmt.Sprintf("Field \"%s\" not found in struct", param)) - } - - currentFielVal = f - - default: - - currentFielVal = currentVal - } - - if currentFielVal.Kind() == reflect.Ptr && !currentFielVal.IsNil() { - - currentFielVal = reflect.ValueOf(currentFielVal.Elem().Interface()) - } - - fv := reflect.ValueOf(field) - - switch fv.Kind() { - - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - - return fv.Int() <= currentFielVal.Int() - - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - - return fv.Uint() <= currentFielVal.Uint() - - case reflect.Float32, reflect.Float64: - - return fv.Float() <= currentFielVal.Float() - - case reflect.Struct: - - if fv.Type() == reflect.TypeOf(time.Time{}) { - - if currentFielVal.Type() != reflect.TypeOf(time.Time{}) { - panic("Bad Top Level field type") - } - - t := currentFielVal.Interface().(time.Time) - fieldTime := field.(time.Time) - - return fieldTime.Before(t) || fieldTime.Equal(t) - } - } - - panic(fmt.Sprintf("Bad field type %T", field)) -} - -func isLtField(top interface{}, current interface{}, field interface{}, param string) bool { - - if current == nil { - panic("struct not passed for cross validation") - } - - currentVal := reflect.ValueOf(current) - - if currentVal.Kind() == reflect.Ptr && !currentVal.IsNil() { - currentVal = reflect.ValueOf(currentVal.Elem().Interface()) - } - - var currentFielVal reflect.Value - - switch currentVal.Kind() { - - case reflect.Struct: - - if currentVal.Type() == reflect.TypeOf(time.Time{}) { - currentFielVal = currentVal - break - } - - f := currentVal.FieldByName(param) - - if f.Kind() == reflect.Invalid { - panic(fmt.Sprintf("Field \"%s\" not found in struct", param)) - } - - currentFielVal = f - - default: - - currentFielVal = currentVal - } - - if currentFielVal.Kind() == reflect.Ptr && !currentFielVal.IsNil() { - - currentFielVal = reflect.ValueOf(currentFielVal.Elem().Interface()) - } - - fv := reflect.ValueOf(field) - - switch fv.Kind() { - - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - - return fv.Int() < currentFielVal.Int() - - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - - return fv.Uint() < currentFielVal.Uint() - - case reflect.Float32, reflect.Float64: - - return fv.Float() < currentFielVal.Float() - - case reflect.Struct: - - if fv.Type() == reflect.TypeOf(time.Time{}) { - - if currentFielVal.Type() != reflect.TypeOf(time.Time{}) { - panic("Bad Top Level field type") - } - - t := currentFielVal.Interface().(time.Time) - fieldTime := field.(time.Time) - - return fieldTime.Before(t) - } - } - - panic(fmt.Sprintf("Bad field type %T", field)) -} - -func isLte(top interface{}, current interface{}, field interface{}, param string) bool { - - st := reflect.ValueOf(field) - - switch st.Kind() { - - case reflect.String: - p := asInt(param) - - return int64(utf8.RuneCountInString(st.String())) <= p - - case reflect.Slice, reflect.Map, reflect.Array: - p := asInt(param) - - return int64(st.Len()) <= p - - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - p := asInt(param) - - return st.Int() <= p - - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - p := asUint(param) - - return st.Uint() <= p - - case reflect.Float32, reflect.Float64: - p := asFloat(param) - - return st.Float() <= p - - case reflect.Struct: - - if st.Type() == reflect.TypeOf(time.Time{}) { - - now := time.Now().UTC() - t := field.(time.Time) - - return t.Before(now) || t.Equal(now) - } - } - - panic(fmt.Sprintf("Bad field type %T", field)) -} - -func isLt(top interface{}, current interface{}, field interface{}, param string) bool { - - st := reflect.ValueOf(field) - - switch st.Kind() { - - case reflect.String: - p := asInt(param) - - return int64(utf8.RuneCountInString(st.String())) < p - - case reflect.Slice, reflect.Map, reflect.Array: - p := asInt(param) - - return int64(st.Len()) < p - - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - p := asInt(param) - - return st.Int() < p - - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - p := asUint(param) - - return st.Uint() < p - - case reflect.Float32, reflect.Float64: - p := asFloat(param) - - return st.Float() < p - - case reflect.Struct: - - if st.Type() == reflect.TypeOf(time.Time{}) { - - return field.(time.Time).Before(time.Now().UTC()) - } - } - - panic(fmt.Sprintf("Bad field type %T", field)) -} - -// max tests whether a variable value is lesser than a given -// value. For numbers, it's a simple lesser-than test; for -// strings it tests the number of characters whereas for maps -// and slices it tests the number of items. -func hasMaxOf(top interface{}, current interface{}, field interface{}, param string) bool { - - return isLte(top, current, field, param) -} - -// asInt retuns the parameter as a int64 -// or panics if it can't convert -func asInt(param string) int64 { - - i, err := strconv.ParseInt(param, 0, 64) - panicIf(err) - - return i -} - -// asUint returns the parameter as a uint64 -// or panics if it can't convert -func asUint(param string) uint64 { - - i, err := strconv.ParseUint(param, 0, 64) - panicIf(err) - - return i -} - -// asFloat returns the parameter as a float64 -// or panics if it can't convert -func asFloat(param string) float64 { - - i, err := strconv.ParseFloat(param, 64) - panicIf(err) - - return i -} - -func panicIf(err error) { - if err != nil { - panic(err.Error()) - } -} diff --git a/vendor/gopkg.in/bluesuncorp/validator.v5/doc.go b/vendor/gopkg.in/bluesuncorp/validator.v5/doc.go deleted file mode 100644 index 74db756..0000000 --- a/vendor/gopkg.in/bluesuncorp/validator.v5/doc.go +++ /dev/null @@ -1,483 +0,0 @@ -/* -Package validator implements value validations for structs and individual fields based on tags. It can also handle Cross Field and Cross Struct validation for nested structs. - -Validate - - validate := validator.New("validate", validator.BakedInValidators) - - errs := validate.Struct(//your struct) - valErr := validate.Field(field, "omitempty,min=1,max=10") - -A simple example usage: - - type UserDetail struct { - Details string `validate:"-"` - } - - type User struct { - Name string `validate:"required,max=60"` - PreferedName string `validate:"omitempty,max=60"` - Sub UserDetail - } - - user := &User { - Name: "", - } - - // errs will contain a hierarchical list of errors - // using the StructErrors struct - // or nil if no errors exist - errs := validate.Struct(user) - - // in this case 1 error Name is required - errs.Struct will be "User" - errs.StructErrors will be empty <-- fields that were structs - errs.Errors will have 1 error of type FieldError - - NOTE: Anonymous Structs - they don't have names so expect the Struct name - within StructErrors to be blank. - -Error Handling - -The error can be used like so - - fieldErr, _ := errs["Name"] - fieldErr.Field // "Name" - fieldErr.ErrorTag // "required" - -Both StructErrors and FieldError implement the Error interface but it's -intended use is for development + debugging, not a production error message. - - fieldErr.Error() // Field validation for "Name" failed on the "required" tag - errs.Error() - // Struct: User - // Field validation for "Name" failed on the "required" tag - -Why not a better error message? because this library intends for you to handle your own error messages - -Why should I handle my own errors? Many reasons, for us building an internationalized application -I needed to know the field and what validation failed so that I could provide an error in the users specific language. - - if fieldErr.Field == "Name" { - switch fieldErr.ErrorTag - case "required": - return "Translated string based on field + error" - default: - return "Translated string based on field" - } - -The hierarchical error structure is hard to work with sometimes.. Agreed Flatten function to the rescue! -Flatten will return a map of FieldError's but the field name will be namespaced. - - // if UserDetail Details field failed validation - Field will be "Sub.Details" - - // for Name - Field will be "Name" - -Custom Functions - -Custom functions can be added - - //Structure - func customFunc(top interface{}, current interface{}, field interface{}, param string) bool { - - if whatever { - return false - } - - return true - } - - validate.AddFunction("custom tag name", customFunc) - // NOTES: using the same tag name as an existing function - // will overwrite the existing one - -Cross Field Validation - -Cross Field Validation can be implemented, for example Start & End Date range validation - - // NOTE: when calling validate.Struct(val) val will be the top level struct passed - // into the function - // when calling validate.FieldWithValue(val, field, tag) val will be - // whatever you pass, struct, field... - // when calling validate.Field(field, tag) val will be nil - // - // Because of the specific requirements and field names within each persons project that - // uses this library it is likely that custom functions will need to be created for your - // Cross Field Validation needs, however there are some build in Generic Cross Field validations, - // see Baked In Validators and Tags below - - func isDateRangeValid(val interface{}, field interface{}, param string) bool { - - myStruct := val.(myStructType) - - if myStruct.Start.After(field.(time.Time)) { - return false - } - - return true - } - -Multiple Validators - -Multiple validators on a field will process in the order defined - - type Test struct { - Field `validate:"max=10,min=1"` - } - - // max will be checked then min - -Bad Validator definitions are not handled by the library - - type Test struct { - Field `validate:"min=10,max=0"` - } - - // this definition of min max will never validate - -Baked In Validators and Tags - -NOTE: Baked In Cross field validation only compares fields on the same struct, -if cross field + cross struct validation is needed your own custom validator -should be implemented. - -NOTE2: comma is the default separator of validation tags, if you wish to have a comma -included within the parameter i.e. excludesall=, you will need to use the UTF-8 hex -representation 0x2C, which is replaced in the code as a comma, so the above will -become excludesall=0x2C - -Here is a list of the current built in validators: - - - - Tells the validation to skip this struct field; this is particularily - handy in ignoring embedded structs from being validated. (Usage: -) - - | - This is the 'or' operator allowing multiple validators to be used and - accepted. (Usage: rbg|rgba) <-- this would allow either rgb or rgba - colors to be accepted. This can also be combined with 'and' for example - ( Usage: omitempty,rgb|rgba) - - structonly - When a field that is a nest struct in encountered and contains this flag - any validation on the nested struct such as "required" will be run, but - none of the nested struct fields will be validated. This is usefull if - inside of you program you know the struct will be valid, but need to - verify it has been assigned. - - exists - Is a special tag without a validation function attached. It is used when a field - is a Pointer, Interface or Invalid and you wish to validate that it exists. - Example: want to ensure a bool exists if you define the bool as a pointer and - use exists it will ensure there is a value; couldn't use required as it would - fail when the bool was false. exists will fail is the value is a Pointer, Interface - or Invalid and is nil. (Usage: exists) - - omitempty - Allows conditional validation, for example if a field is not set with - a value (Determined by the required validator) then other validation - such as min or max won't run, but if a value is set validation will run. - (Usage: omitempty) - - dive - This tells the validator to dive into a slice, array or map and validate that - level of the slice, array or map with the validation tags that follow. - Multidimensional nesting is also supported, each level you wish to dive will - require another dive tag. (Usage: dive) - Example: [][]string with validation tag "gt=0,dive,len=1,dive,required" - gt=0 will be applied to [] - len=1 will be applied to []string - required will be applied to string - Example2: [][]string with validation tag "gt=0,dive,dive,required" - gt=0 will be applied to [] - []string will be spared validation - required will be applied to string - NOTE: in Example2 if the required validation failed, but all others passed - the hierarchy of FieldError's in the middle with have their IsPlaceHolder field - set to true. If a FieldError has IsSliceOrMap=true or IsMap=true then the - FieldError is a Slice or Map field and if IsPlaceHolder=true then contains errors - within its SliceOrArrayErrs or MapErrs fields. - - required - This validates that the value is not the data types default zero value. - For numbers ensures value is not zero. For strings ensures value is - not "". For slices, maps, pointers, interfaces, channels and functions - ensures the value is not nil. - (Usage: required) - - len - For numbers, max will ensure that the value is - equal to the parameter given. For strings, it checks that - the string length is exactly that number of characters. For slices, - arrays, and maps, validates the number of items. (Usage: len=10) - - max - For numbers, max will ensure that the value is - less than or equal to the parameter given. For strings, it checks - that the string length is at most that number of characters. For - slices, arrays, and maps, validates the number of items. (Usage: max=10) - - min - For numbers, min will ensure that the value is - greater or equal to the parameter given. For strings, it checks that - the string length is at least that number of characters. For slices, - arrays, and maps, validates the number of items. (Usage: min=10) - - eq - For strings & numbers, eq will ensure that the value is - equal to the parameter given. For slices, arrays, and maps, - validates the number of items. (Usage: eq=10) - - ne - For strings & numbers, eq will ensure that the value is not - equal to the parameter given. For slices, arrays, and maps, - validates the number of items. (Usage: eq=10) - - gt - For numbers, this will ensure that the value is greater than the - parameter given. For strings, it checks that the string length - is greater than that number of characters. For slices, arrays - and maps it validates the number of items. (Usage: gt=10) - For time.Time ensures the time value is greater than time.Now.UTC() - (Usage: gt) - - gte - Same as 'min' above. Kept both to make terminology with 'len' easier - (Usage: gte=10) - For time.Time ensures the time value is greater than or equal to time.Now.UTC() - (Usage: gte) - - lt - For numbers, this will ensure that the value is - less than the parameter given. For strings, it checks - that the string length is less than that number of characters. - For slices, arrays, and maps it validates the number of items. - (Usage: lt=10) - For time.Time ensures the time value is less than time.Now.UTC() - (Usage: lt) - - lte - Same as 'max' above. Kept both to make terminology with 'len' easier - (Usage: lte=10) - For time.Time ensures the time value is less than or equal to time.Now.UTC() - (Usage: lte) - - eqfield - This will validate the field value against another fields value either within - a struct or passed in field. - usage examples are for validation of a password and confirm password: - Validation on Password field using validate.Struct Usage(eqfield=ConfirmPassword) - Validating by field validate.FieldWithValue(password, confirmpassword, "eqfield") - - nefield - This will validate the field value against another fields value either within - a struct or passed in field. - usage examples are for ensuring two colors are not the same: - Validation on Color field using validate.Struct Usage(nefield=Color2) - Validating by field validate.FieldWithValue(color1, color2, "nefield") - - gtfield - Only valid for Numbers and time.Time types, this will validate the field value - against another fields value either within a struct or passed in field. - usage examples are for validation of a Start and End date: - Validation on End field using validate.Struct Usage(gtfield=Start) - Validating by field validate.FieldWithValue(start, end, "gtfield") - - gtefield - Only valid for Numbers and time.Time types, this will validate the field value - against another fields value either within a struct or passed in field. - usage examples are for validation of a Start and End date: - Validation on End field using validate.Struct Usage(gtefield=Start) - Validating by field validate.FieldWithValue(start, end, "gtefield") - - ltfield - Only valid for Numbers and time.Time types, this will validate the field value - against another fields value either within a struct or passed in field. - usage examples are for validation of a Start and End date: - Validation on End field using validate.Struct Usage(ltfield=Start) - Validating by field validate.FieldWithValue(start, end, "ltfield") - - ltefield - Only valid for Numbers and time.Time types, this will validate the field value - against another fields value either within a struct or passed in field. - usage examples are for validation of a Start and End date: - Validation on End field using validate.Struct Usage(ltefield=Start) - Validating by field validate.FieldWithValue(start, end, "ltefield") - - alpha - This validates that a string value contains alpha characters only - (Usage: alpha) - - alphanum - This validates that a string value contains alphanumeric characters only - (Usage: alphanum) - - numeric - This validates that a string value contains a basic numeric value. - basic excludes exponents etc... - (Usage: numeric) - - hexadecimal - This validates that a string value contains a valid hexadecimal. - (Usage: hexadecimal) - - hexcolor - This validates that a string value contains a valid hex color including - hashtag (#) - (Usage: hexcolor) - - rgb - This validates that a string value contains a valid rgb color - (Usage: rgb) - - rgba - This validates that a string value contains a valid rgba color - (Usage: rgba) - - hsl - This validates that a string value contains a valid hsl color - (Usage: hsl) - - hsla - This validates that a string value contains a valid hsla color - (Usage: hsla) - - email - This validates that a string value contains a valid email - This may not conform to all possibilities of any rfc standard, but neither - does any email provider accept all posibilities... - (Usage: email) - - url - This validates that a string value contains a valid url - This will accept any url the golang request uri accepts but must contain - a schema for example http:// or rtmp:// - (Usage: url) - - uri - This validates that a string value contains a valid uri - This will accept any uri the golang request uri accepts (Usage: uri) - - base64 - This validates that a string value contains a valid base64 value. - Although an empty string is valid base64 this will report an empty string - as an error, if you wish to accept an empty string as valid you can use - this with the omitempty tag. (Usage: base64) - - contains - This validates that a string value contains the substring value. - (Usage: contains=@) - - containsany - This validates that a string value contains any Unicode code points - in the substring value. (Usage: containsany=!@#?) - - containsrune - This validates that a string value contains the supplied rune value. - (Usage: containsrune=@) - - excludes - This validates that a string value does not contain the substring value. - (Usage: excludes=@) - - excludesall - This validates that a string value does not contain any Unicode code - points in the substring value. (Usage: excludesall=!@#?) - - excludesrune - This validates that a string value does not contain the supplied rune value. - (Usage: excludesrune=@) - - isbn - This validates that a string value contains a valid isbn10 or isbn13 value. - (Usage: isbn) - - isbn10 - This validates that a string value contains a valid isbn10 value. - (Usage: isbn10) - - isbn13 - This validates that a string value contains a valid isbn13 value. - (Usage: isbn13) - - uuid - This validates that a string value contains a valid UUID. - (Usage: uuid) - - uuid3 - This validates that a string value contains a valid version 3 UUID. - (Usage: uuid3) - - uuid4 - This validates that a string value contains a valid version 4 UUID. - (Usage: uuid4) - - uuid5 - This validates that a string value contains a valid version 5 UUID. - (Usage: uuid5) - - ascii - This validates that a string value contains only ASCII characters. - NOTE: if the string is blank, this validates as true. - (Usage: ascii) - - asciiprint - This validates that a string value contains only printable ASCII characters. - NOTE: if the string is blank, this validates as true. - (Usage: asciiprint) - - multibyte - This validates that a string value contains one or more multibyte characters. - NOTE: if the string is blank, this validates as true. - (Usage: multibyte) - - datauri - This validates that a string value contains a valid DataURI. - NOTE: this will also validate that the data portion is valid base64 - (Usage: datauri) - - latitude - This validates that a string value contains a valid latitude. - (Usage: latitude) - - longitude - This validates that a string value contains a valid longitude. - (Usage: longitude) - - ssn - This validates that a string value contains a valid U.S. Social Security Number. - (Usage: ssn) - -Validator notes: - - regex - a regex validator won't be added because commas and = signs can be part of - a regex which conflict with the validation definitions, although workarounds - can be made, they take away from using pure regex's. Furthermore it's quick - and dirty but the regex's become harder to maintain and are not reusable, so - it's as much a programming philosiphy as anything. - - In place of this new validator functions should be created; a regex can be - used within the validator function and even be precompiled for better efficiency - within regexes.go. - - And the best reason, you can submit a pull request and we can keep on adding to the - validation library of this package! - -Panics - -This package panics when bad input is provided, this is by design, bad code like that should not make it to production. - - type Test struct { - TestField string `validate:"nonexistantfunction=1"` - } - - t := &Test{ - TestField: "Test" - } - - validate.Struct(t) // this will panic -*/ -package validator diff --git a/vendor/gopkg.in/bluesuncorp/validator.v5/regexes.go b/vendor/gopkg.in/bluesuncorp/validator.v5/regexes.go deleted file mode 100644 index d3e8d80..0000000 --- a/vendor/gopkg.in/bluesuncorp/validator.v5/regexes.go +++ /dev/null @@ -1,64 +0,0 @@ -package validator - -import "regexp" - -const ( - alphaRegexString = "^[a-zA-Z]+$" - alphaNumericRegexString = "^[a-zA-Z0-9]+$" - numericRegexString = "^[-+]?[0-9]+(?:\\.[0-9]+)?$" - numberRegexString = "^[0-9]+$" - hexadecimalRegexString = "^[0-9a-fA-F]+$" - hexcolorRegexString = "^#(?:[0-9a-fA-F]{3}|[0-9a-fA-F]{6})$" - rgbRegexString = "^rgb\\(\\s*(?:(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])|(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%)\\s*\\)$" - rgbaRegexString = "^rgba\\(\\s*(?:(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])|(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%)\\s*,\\s*(?:(?:0.[1-9]*)|[01])\\s*\\)$" - hslRegexString = "^hsl\\(\\s*(?:0|[1-9]\\d?|[12]\\d\\d|3[0-5]\\d|360)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*\\)$" - hslaRegexString = "^hsla\\(\\s*(?:0|[1-9]\\d?|[12]\\d\\d|3[0-5]\\d|360)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*,\\s*(?:(?:0.[1-9]*)|[01])\\s*\\)$" - emailRegexString = "^(?:(?:(?:(?:[a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+(?:\\.([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+)*)|(?:(?:\\x22)(?:(?:(?:(?:\\x20|\\x09)*(?:\\x0d\\x0a))?(?:\\x20|\\x09)+)?(?:(?:[\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x7f]|\\x21|[\\x23-\\x5b]|[\\x5d-\\x7e]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(?:\\(?:[\\x01-\\x09\\x0b\\x0c\\x0d-\\x7f]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}]))))*(?:(?:(?:\\x20|\\x09)*(?:\\x0d\\x0a))?(\\x20|\\x09)+)?(?:\\x22)))@(?:(?:(?:[a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(?:(?:[a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])(?:[a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*(?:[a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.)+(?:(?:[a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(?:(?:[a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])(?:[a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*(?:[a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.?$" - base64RegexString = "^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=|[A-Za-z0-9+\\/]{4})$" - iSBN10RegexString = "^(?:[0-9]{9}X|[0-9]{10})$" - iSBN13RegexString = "^(?:(?:97(?:8|9))[0-9]{10})$" - uUID3RegexString = "^[0-9a-f]{8}-[0-9a-f]{4}-3[0-9a-f]{3}-[0-9a-f]{4}-[0-9a-f]{12}$" - uUID4RegexString = "^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$" - uUID5RegexString = "^[0-9a-f]{8}-[0-9a-f]{4}-5[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$" - uUIDRegexString = "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$" - aSCIIRegexString = "^[\x00-\x7F]*$" - printableASCIIRegexString = "^[\x20-\x7E]*$" - multibyteRegexString = "[^\x00-\x7F]" - dataURIRegexString = "^data:.+\\/(.+);base64$" - latitudeRegexString = "^[-+]?([1-8]?\\d(\\.\\d+)?|90(\\.0+)?)$" - longitudeRegexString = "^[-+]?(180(\\.0+)?|((1[0-7]\\d)|([1-9]?\\d))(\\.\\d+)?)$" - sSNRegexString = `^\d{3}[- ]?\d{2}[- ]?\d{4}$` -) - -var ( - alphaRegex = regexp.MustCompile(alphaRegexString) - alphaNumericRegex = regexp.MustCompile(alphaNumericRegexString) - numericRegex = regexp.MustCompile(numericRegexString) - numberRegex = regexp.MustCompile(numberRegexString) - hexadecimalRegex = regexp.MustCompile(hexadecimalRegexString) - hexcolorRegex = regexp.MustCompile(hexcolorRegexString) - rgbRegex = regexp.MustCompile(rgbRegexString) - rgbaRegex = regexp.MustCompile(rgbaRegexString) - hslRegex = regexp.MustCompile(hslRegexString) - hslaRegex = regexp.MustCompile(hslaRegexString) - emailRegex = regexp.MustCompile(emailRegexString) - base64Regex = regexp.MustCompile(base64RegexString) - iSBN10Regex = regexp.MustCompile(iSBN10RegexString) - iSBN13Regex = regexp.MustCompile(iSBN13RegexString) - uUID3Regex = regexp.MustCompile(uUID3RegexString) - uUID4Regex = regexp.MustCompile(uUID4RegexString) - uUID5Regex = regexp.MustCompile(uUID5RegexString) - uUIDRegex = regexp.MustCompile(uUIDRegexString) - aSCIIRegex = regexp.MustCompile(aSCIIRegexString) - printableASCIIRegex = regexp.MustCompile(printableASCIIRegexString) - multibyteRegex = regexp.MustCompile(multibyteRegexString) - dataURIRegex = regexp.MustCompile(dataURIRegexString) - latitudeRegex = regexp.MustCompile(latitudeRegexString) - longitudeRegex = regexp.MustCompile(longitudeRegexString) - sSNRegex = regexp.MustCompile(sSNRegexString) -) - -func matchesRegex(regex *regexp.Regexp, field interface{}) bool { - fieldAsString := field.(string) //this will panic inherently - return regex.MatchString(fieldAsString) -} diff --git a/vendor/gopkg.in/bluesuncorp/validator.v5/validator.go b/vendor/gopkg.in/bluesuncorp/validator.v5/validator.go deleted file mode 100644 index f195647..0000000 --- a/vendor/gopkg.in/bluesuncorp/validator.v5/validator.go +++ /dev/null @@ -1,1031 +0,0 @@ -/** - * Package validator - * - * MISC: - * - anonymous structs - they don't have names so expect the Struct name within StructErrors to be blank - * - */ - -package validator - -import ( - "bytes" - "errors" - "fmt" - "reflect" - "strings" - "sync" - "time" - "unicode" -) - -const ( - utf8HexComma = "0x2C" - tagSeparator = "," - orSeparator = "|" - noValidationTag = "-" - tagKeySeparator = "=" - structOnlyTag = "structonly" - omitempty = "omitempty" - required = "required" - fieldErrMsg = "Field validation for \"%s\" failed on the \"%s\" tag" - sliceErrMsg = "Field validation for \"%s\" failed at index \"%d\" with error(s): %s" - mapErrMsg = "Field validation for \"%s\" failed on key \"%v\" with error(s): %s" - structErrMsg = "Struct:%s\n" - diveTag = "dive" - existsTag = "exists" - arrayIndexFieldName = "%s[%d]" - mapIndexFieldName = "%s[%v]" -) - -var structPool *sync.Pool - -// returns new *StructErrors to the pool -func newStructErrors() interface{} { - return &StructErrors{ - Errors: map[string]*FieldError{}, - StructErrors: map[string]*StructErrors{}, - } -} - -type cachedTags struct { - keyVals [][]string - isOrVal bool -} - -type cachedField struct { - index int - name string - tags []*cachedTags - tag string - kind reflect.Kind - typ reflect.Type - isTime bool - isSliceOrArray bool - isMap bool - isTimeSubtype bool - sliceSubtype reflect.Type - mapSubtype reflect.Type - sliceSubKind reflect.Kind - mapSubKind reflect.Kind - dive bool - diveTag string -} - -type cachedStruct struct { - children int - name string - kind reflect.Kind - fields []*cachedField -} - -type structsCacheMap struct { - lock sync.RWMutex - m map[reflect.Type]*cachedStruct -} - -func (s *structsCacheMap) Get(key reflect.Type) (*cachedStruct, bool) { - s.lock.RLock() - defer s.lock.RUnlock() - value, ok := s.m[key] - return value, ok -} - -func (s *structsCacheMap) Set(key reflect.Type, value *cachedStruct) { - s.lock.Lock() - defer s.lock.Unlock() - s.m[key] = value -} - -var structCache = &structsCacheMap{m: map[reflect.Type]*cachedStruct{}} - -type fieldsCacheMap struct { - lock sync.RWMutex - m map[string][]*cachedTags -} - -func (s *fieldsCacheMap) Get(key string) ([]*cachedTags, bool) { - s.lock.RLock() - defer s.lock.RUnlock() - value, ok := s.m[key] - return value, ok -} - -func (s *fieldsCacheMap) Set(key string, value []*cachedTags) { - s.lock.Lock() - defer s.lock.Unlock() - s.m[key] = value -} - -var fieldsCache = &fieldsCacheMap{m: map[string][]*cachedTags{}} - -// FieldError contains a single field's validation error along -// with other properties that may be needed for error message creation -type FieldError struct { - Field string - Tag string - Kind reflect.Kind - Type reflect.Type - Param string - Value interface{} - IsPlaceholderErr bool - IsSliceOrArray bool - IsMap bool - SliceOrArrayErrs map[int]error // counld be FieldError, StructErrors - MapErrs map[interface{}]error // counld be FieldError, StructErrors -} - -// This is intended for use in development + debugging and not intended to be a production error message. -// it also allows FieldError to be used as an Error interface -func (e *FieldError) Error() string { - - if e.IsPlaceholderErr { - - buff := bytes.NewBufferString("") - - if e.IsSliceOrArray { - - for j, err := range e.SliceOrArrayErrs { - buff.WriteString("\n") - buff.WriteString(fmt.Sprintf(sliceErrMsg, e.Field, j, "\n"+err.Error())) - } - - } else if e.IsMap { - - for key, err := range e.MapErrs { - buff.WriteString(fmt.Sprintf(mapErrMsg, e.Field, key, "\n"+err.Error())) - } - } - - return strings.TrimSpace(buff.String()) - } - - return fmt.Sprintf(fieldErrMsg, e.Field, e.Tag) -} - -// Flatten flattens the FieldError hierarchical structure into a flat namespace style field name -// for those that want/need it. -// This is now needed because of the new dive functionality -func (e *FieldError) Flatten() map[string]*FieldError { - - errs := map[string]*FieldError{} - - if e.IsPlaceholderErr { - - if e.IsSliceOrArray { - for key, err := range e.SliceOrArrayErrs { - - fe, ok := err.(*FieldError) - - if ok { - - if flat := fe.Flatten(); flat != nil && len(flat) > 0 { - for k, v := range flat { - if fe.IsPlaceholderErr { - errs[fmt.Sprintf("[%#v]%s", key, k)] = v - } else { - errs[fmt.Sprintf("[%#v]", key)] = v - } - - } - } - } else { - - se := err.(*StructErrors) - - if flat := se.Flatten(); flat != nil && len(flat) > 0 { - for k, v := range flat { - errs[fmt.Sprintf("[%#v].%s.%s", key, se.Struct, k)] = v - } - } - } - } - } - - if e.IsMap { - for key, err := range e.MapErrs { - - fe, ok := err.(*FieldError) - - if ok { - - if flat := fe.Flatten(); flat != nil && len(flat) > 0 { - for k, v := range flat { - if fe.IsPlaceholderErr { - errs[fmt.Sprintf("[%#v]%s", key, k)] = v - } else { - errs[fmt.Sprintf("[%#v]", key)] = v - } - } - } - } else { - - se := err.(*StructErrors) - - if flat := se.Flatten(); flat != nil && len(flat) > 0 { - for k, v := range flat { - errs[fmt.Sprintf("[%#v].%s.%s", key, se.Struct, k)] = v - } - } - } - } - } - - return errs - } - - errs[e.Field] = e - - return errs -} - -// StructErrors is hierarchical list of field and struct validation errors -// for a non hierarchical representation please see the Flatten method for StructErrors -type StructErrors struct { - // Name of the Struct - Struct string - // Struct Field Errors - Errors map[string]*FieldError - // Struct Fields of type struct and their errors - // key = Field Name of current struct, but internally Struct will be the actual struct name unless anonymous struct, it will be blank - StructErrors map[string]*StructErrors -} - -// This is intended for use in development + debugging and not intended to be a production error message. -// it also allows StructErrors to be used as an Error interface -func (e *StructErrors) Error() string { - buff := bytes.NewBufferString(fmt.Sprintf(structErrMsg, e.Struct)) - - for _, err := range e.Errors { - buff.WriteString(err.Error()) - buff.WriteString("\n") - } - - for _, err := range e.StructErrors { - buff.WriteString(err.Error()) - } - - return strings.TrimSpace(buff.String()) -} - -// Flatten flattens the StructErrors hierarchical structure into a flat namespace style field name -// for those that want/need it -func (e *StructErrors) Flatten() map[string]*FieldError { - - if e == nil { - return nil - } - - errs := map[string]*FieldError{} - - for _, f := range e.Errors { - - if flat := f.Flatten(); flat != nil && len(flat) > 0 { - - for k, fe := range flat { - - if f.IsPlaceholderErr { - errs[f.Field+k] = fe - } else { - errs[k] = fe - } - } - } - } - - for key, val := range e.StructErrors { - - otherErrs := val.Flatten() - - for _, f2 := range otherErrs { - - f2.Field = fmt.Sprintf("%s.%s", key, f2.Field) - errs[f2.Field] = f2 - } - } - - return errs -} - -// Func accepts all values needed for file and cross field validation -// top = top level struct when validating by struct otherwise nil -// current = current level struct when validating by struct otherwise optional comparison value -// f = field value for validation -// param = parameter used in validation i.e. gt=0 param would be 0 -type Func func(top interface{}, current interface{}, f interface{}, param string) bool - -// Validate implements the Validate Struct -// NOTE: Fields within are not thread safe and that is on purpose -// Functions and Tags should all be predifined before use, so subscribe to the philosiphy -// or make it thread safe on your end -type Validate struct { - // tagName being used. - tagName string - // validateFuncs is a map of validation functions and the tag keys - validationFuncs map[string]Func -} - -// New creates a new Validate instance for use. -func New(tagName string, funcs map[string]Func) *Validate { - - structPool = &sync.Pool{New: newStructErrors} - - return &Validate{ - tagName: tagName, - validationFuncs: funcs, - } -} - -// SetTag sets tagName of the Validator to one of your choosing after creation -// perhaps to dodge a tag name conflict in a specific section of code -// NOTE: this method is not thread-safe -func (v *Validate) SetTag(tagName string) { - v.tagName = tagName -} - -// SetMaxStructPoolSize sets the struct pools max size. this may be usefull for fine grained -// performance tuning towards your application, however, the default should be fine for -// nearly all cases. only increase if you have a deeply nested struct structure. -// NOTE: this method is not thread-safe -// NOTE: this is only here to keep compatibility with v5, in v6 the method will be removed -func (v *Validate) SetMaxStructPoolSize(max int) { - structPool = &sync.Pool{New: newStructErrors} -} - -// AddFunction adds a validation Func to a Validate's map of validators denoted by the key -// NOTE: if the key already exists, it will get replaced. -// NOTE: this method is not thread-safe -func (v *Validate) AddFunction(key string, f Func) error { - - if len(key) == 0 { - return errors.New("Function Key cannot be empty") - } - - if f == nil { - return errors.New("Function cannot be empty") - } - - v.validationFuncs[key] = f - - return nil -} - -// Struct validates a struct, even it's nested structs, and returns a struct containing the errors -// NOTE: Nested Arrays, or Maps of structs do not get validated only the Array or Map itself; the reason is that there is no good -// way to represent or report which struct within the array has the error, besides can validate the struct prior to adding it to -// the Array or Map. -func (v *Validate) Struct(s interface{}) *StructErrors { - - return v.structRecursive(s, s, s) -} - -// structRecursive validates a struct recursivly and passes the top level and current struct around for use in validator functions and returns a struct containing the errors -func (v *Validate) structRecursive(top interface{}, current interface{}, s interface{}) *StructErrors { - - structValue := reflect.ValueOf(s) - - if structValue.Kind() == reflect.Ptr && !structValue.IsNil() { - return v.structRecursive(top, current, structValue.Elem().Interface()) - } - - if structValue.Kind() != reflect.Struct && structValue.Kind() != reflect.Interface { - panic("interface passed for validation is not a struct") - } - - structType := reflect.TypeOf(s) - - var structName string - var numFields int - var cs *cachedStruct - var isCached bool - - cs, isCached = structCache.Get(structType) - - if isCached { - structName = cs.name - numFields = cs.children - } else { - structName = structType.Name() - numFields = structValue.NumField() - cs = &cachedStruct{name: structName, children: numFields} - } - - validationErrors := structPool.Get().(*StructErrors) - validationErrors.Struct = structName - - for i := 0; i < numFields; i++ { - - var valueField reflect.Value - var cField *cachedField - var typeField reflect.StructField - - if isCached { - cField = cs.fields[i] - valueField = structValue.Field(cField.index) - - if valueField.Kind() == reflect.Ptr && !valueField.IsNil() { - valueField = valueField.Elem() - } - } else { - valueField = structValue.Field(i) - - if valueField.Kind() == reflect.Ptr && !valueField.IsNil() { - valueField = valueField.Elem() - } - - typeField = structType.Field(i) - - cField = &cachedField{index: i, tag: typeField.Tag.Get(v.tagName), isTime: (valueField.Type() == reflect.TypeOf(time.Time{}) || valueField.Type() == reflect.TypeOf(&time.Time{}))} - - if cField.tag == noValidationTag { - cs.children-- - continue - } - - // if no validation and not a struct (which may containt fields for validation) - if cField.tag == "" && ((valueField.Kind() != reflect.Struct && valueField.Kind() != reflect.Interface) || valueField.Type() == reflect.TypeOf(time.Time{})) { - cs.children-- - continue - } - - cField.name = typeField.Name - cField.kind = valueField.Kind() - cField.typ = valueField.Type() - } - - // this can happen if the first cache value was nil - // but the second actually has a value - if cField.kind == reflect.Ptr { - cField.kind = valueField.Kind() - } - - switch cField.kind { - - case reflect.Struct, reflect.Interface: - - if !unicode.IsUpper(rune(cField.name[0])) { - cs.children-- - continue - } - - if cField.isTime { - - if fieldError := v.fieldWithNameAndValue(top, current, valueField.Interface(), cField.tag, cField.name, false, cField); fieldError != nil { - validationErrors.Errors[fieldError.Field] = fieldError - // free up memory reference - fieldError = nil - } - - } else { - - if strings.Contains(cField.tag, structOnlyTag) { - cs.children-- - continue - } - - if (valueField.Kind() == reflect.Ptr || cField.kind == reflect.Interface) && valueField.IsNil() { - - if strings.Contains(cField.tag, omitempty) { - goto CACHEFIELD - } - - tags := strings.Split(cField.tag, tagSeparator) - - if len(tags) > 0 { - - var param string - vals := strings.SplitN(tags[0], tagKeySeparator, 2) - - if len(vals) > 1 { - param = vals[1] - } - - validationErrors.Errors[cField.name] = &FieldError{ - Field: cField.name, - Tag: vals[0], - Param: param, - Value: valueField.Interface(), - Kind: valueField.Kind(), - Type: valueField.Type(), - } - - goto CACHEFIELD - } - } - - // if we get here, the field is interface and could be a struct or a field - // and we need to check the inner type and validate - if cField.kind == reflect.Interface { - - valueField = valueField.Elem() - - if valueField.Kind() == reflect.Ptr && !valueField.IsNil() { - valueField = valueField.Elem() - } - - if valueField.Kind() == reflect.Struct { - goto VALIDATESTRUCT - } - - // sending nil for cField as it was type interface and could be anything - // each time and so must be calculated each time and can't be cached reliably - if fieldError := v.fieldWithNameAndValue(top, current, valueField.Interface(), cField.tag, cField.name, false, nil); fieldError != nil { - validationErrors.Errors[fieldError.Field] = fieldError - // free up memory reference - fieldError = nil - } - - goto CACHEFIELD - } - - VALIDATESTRUCT: - if structErrors := v.structRecursive(top, valueField.Interface(), valueField.Interface()); structErrors != nil { - validationErrors.StructErrors[cField.name] = structErrors - // free up memory map no longer needed - structErrors = nil - } - } - - case reflect.Slice, reflect.Array: - cField.isSliceOrArray = true - cField.sliceSubtype = cField.typ.Elem() - cField.isTimeSubtype = (cField.sliceSubtype == reflect.TypeOf(time.Time{}) || cField.sliceSubtype == reflect.TypeOf(&time.Time{})) - cField.sliceSubKind = cField.sliceSubtype.Kind() - - if fieldError := v.fieldWithNameAndValue(top, current, valueField.Interface(), cField.tag, cField.name, false, cField); fieldError != nil { - validationErrors.Errors[fieldError.Field] = fieldError - // free up memory reference - fieldError = nil - } - - case reflect.Map: - cField.isMap = true - cField.mapSubtype = cField.typ.Elem() - cField.isTimeSubtype = (cField.mapSubtype == reflect.TypeOf(time.Time{}) || cField.mapSubtype == reflect.TypeOf(&time.Time{})) - cField.mapSubKind = cField.mapSubtype.Kind() - - if fieldError := v.fieldWithNameAndValue(top, current, valueField.Interface(), cField.tag, cField.name, false, cField); fieldError != nil { - validationErrors.Errors[fieldError.Field] = fieldError - // free up memory reference - fieldError = nil - } - - default: - if fieldError := v.fieldWithNameAndValue(top, current, valueField.Interface(), cField.tag, cField.name, false, cField); fieldError != nil { - validationErrors.Errors[fieldError.Field] = fieldError - // free up memory reference - fieldError = nil - } - } - - CACHEFIELD: - if !isCached { - cs.fields = append(cs.fields, cField) - } - } - - structCache.Set(structType, cs) - - if len(validationErrors.Errors) == 0 && len(validationErrors.StructErrors) == 0 { - structPool.Put(validationErrors) - return nil - } - - return validationErrors -} - -// Field allows validation of a single field, still using tag style validation to check multiple errors -func (v *Validate) Field(f interface{}, tag string) *FieldError { - return v.FieldWithValue(nil, f, tag) -} - -// FieldWithValue allows validation of a single field, possibly even against another fields value, still using tag style validation to check multiple errors -func (v *Validate) FieldWithValue(val interface{}, f interface{}, tag string) *FieldError { - return v.fieldWithNameAndValue(nil, val, f, tag, "", true, nil) -} - -func (v *Validate) fieldWithNameAndValue(val interface{}, current interface{}, f interface{}, tag string, name string, isSingleField bool, cacheField *cachedField) *FieldError { - - var cField *cachedField - var isCached bool - var valueField reflect.Value - - // This is a double check if coming from validate.Struct but need to be here in case function is called directly - if tag == noValidationTag || tag == "" { - return nil - } - - if strings.Contains(tag, omitempty) && !hasValue(val, current, f, "") { - return nil - } - - valueField = reflect.ValueOf(f) - - if cacheField == nil { - - if valueField.Kind() == reflect.Ptr && !valueField.IsNil() { - valueField = valueField.Elem() - f = valueField.Interface() - } - - cField = &cachedField{name: name, kind: valueField.Kind(), tag: tag} - - if cField.kind != reflect.Invalid { - cField.typ = valueField.Type() - } - - switch cField.kind { - case reflect.Slice, reflect.Array: - isSingleField = false // cached tags mean nothing because it will be split up while diving - cField.isSliceOrArray = true - cField.sliceSubtype = cField.typ.Elem() - cField.isTimeSubtype = (cField.sliceSubtype == reflect.TypeOf(time.Time{}) || cField.sliceSubtype == reflect.TypeOf(&time.Time{})) - cField.sliceSubKind = cField.sliceSubtype.Kind() - case reflect.Map: - isSingleField = false // cached tags mean nothing because it will be split up while diving - cField.isMap = true - cField.mapSubtype = cField.typ.Elem() - cField.isTimeSubtype = (cField.mapSubtype == reflect.TypeOf(time.Time{}) || cField.mapSubtype == reflect.TypeOf(&time.Time{})) - cField.mapSubKind = cField.mapSubtype.Kind() - } - } else { - cField = cacheField - } - - switch cField.kind { - case reflect.Invalid: - return &FieldError{ - Field: cField.name, - Tag: cField.tag, - Kind: cField.kind, - } - - case reflect.Struct, reflect.Interface: - - if cField.typ != reflect.TypeOf(time.Time{}) { - panic("Invalid field passed to fieldWithNameAndValue") - } - } - - if len(cField.tags) == 0 { - - if isSingleField { - cField.tags, isCached = fieldsCache.Get(tag) - } - - if !isCached { - - for _, t := range strings.Split(tag, tagSeparator) { - - if t == diveTag { - - cField.dive = true - cField.diveTag = strings.TrimLeft(strings.SplitN(tag, diveTag, 2)[1], ",") - break - } - - orVals := strings.Split(t, orSeparator) - cTag := &cachedTags{isOrVal: len(orVals) > 1, keyVals: make([][]string, len(orVals))} - cField.tags = append(cField.tags, cTag) - - for i, val := range orVals { - vals := strings.SplitN(val, tagKeySeparator, 2) - - key := strings.TrimSpace(vals[0]) - - if len(key) == 0 { - panic(fmt.Sprintf("Invalid validation tag on field %s", name)) - } - - param := "" - if len(vals) > 1 { - param = strings.Replace(vals[1], utf8HexComma, ",", -1) - } - - cTag.keyVals[i] = []string{key, param} - } - } - - if isSingleField { - fieldsCache.Set(cField.tag, cField.tags) - } - } - } - - var fieldErr *FieldError - var err error - - for _, cTag := range cField.tags { - - if cTag.isOrVal { - - errTag := "" - - for _, val := range cTag.keyVals { - - // if (idxField.Kind() == reflect.Ptr || idxField.Kind() == reflect.Interface) && idxField.IsNil() { - // if val[0] == existsTag { - // if (cField.kind == reflect.Ptr || cField.kind == reflect.Interface) && valueField.IsNil() { - // fieldErr = &FieldError{ - // Field: name, - // Tag: val[0], - // Value: f, - // Param: val[1], - // } - // err = errors.New(fieldErr.Tag) - // } - - // } else { - - fieldErr, err = v.fieldWithNameAndSingleTag(val, current, f, val[0], val[1], name) - // } - - if err == nil { - return nil - } - - errTag += orSeparator + fieldErr.Tag - } - - errTag = strings.TrimLeft(errTag, orSeparator) - - fieldErr.Tag = errTag - fieldErr.Kind = cField.kind - fieldErr.Type = cField.typ - - return fieldErr - } - - if cTag.keyVals[0][0] == existsTag { - if (cField.kind == reflect.Ptr || cField.kind == reflect.Interface) && valueField.IsNil() { - return &FieldError{ - Field: name, - Tag: cTag.keyVals[0][0], - Value: f, - Param: cTag.keyVals[0][1], - } - } - continue - } - - if fieldErr, err = v.fieldWithNameAndSingleTag(val, current, f, cTag.keyVals[0][0], cTag.keyVals[0][1], name); err != nil { - - fieldErr.Kind = cField.kind - fieldErr.Type = cField.typ - - return fieldErr - } - } - - if cField.dive { - - if cField.isSliceOrArray { - - if errs := v.traverseSliceOrArray(val, current, valueField, cField); errs != nil && len(errs) > 0 { - - return &FieldError{ - Field: cField.name, - Kind: cField.kind, - Type: cField.typ, - Value: f, - IsPlaceholderErr: true, - IsSliceOrArray: true, - SliceOrArrayErrs: errs, - } - } - - } else if cField.isMap { - if errs := v.traverseMap(val, current, valueField, cField); errs != nil && len(errs) > 0 { - - return &FieldError{ - Field: cField.name, - Kind: cField.kind, - Type: cField.typ, - Value: f, - IsPlaceholderErr: true, - IsMap: true, - MapErrs: errs, - } - } - } else { - // throw error, if not a slice or map then should not have gotten here - panic("dive error! can't dive on a non slice or map") - } - } - - return nil -} - -func (v *Validate) traverseMap(val interface{}, current interface{}, valueField reflect.Value, cField *cachedField) map[interface{}]error { - - errs := map[interface{}]error{} - - for _, key := range valueField.MapKeys() { - - idxField := valueField.MapIndex(key) - - if cField.mapSubKind == reflect.Ptr && !idxField.IsNil() { - idxField = idxField.Elem() - cField.mapSubKind = idxField.Kind() - } - - switch cField.mapSubKind { - case reflect.Struct, reflect.Interface: - - if cField.isTimeSubtype { - - if fieldError := v.fieldWithNameAndValue(val, current, idxField.Interface(), cField.diveTag, fmt.Sprintf(mapIndexFieldName, cField.name, key.Interface()), false, nil); fieldError != nil { - errs[key.Interface()] = fieldError - } - - continue - } - - if (idxField.Kind() == reflect.Ptr || idxField.Kind() == reflect.Interface) && idxField.IsNil() { - - if strings.Contains(cField.diveTag, omitempty) { - continue - } - - tags := strings.Split(cField.diveTag, tagSeparator) - - if len(tags) > 0 { - - var param string - vals := strings.SplitN(tags[0], tagKeySeparator, 2) - - if len(vals) > 1 { - param = vals[1] - } - - errs[key.Interface()] = &FieldError{ - Field: fmt.Sprintf(mapIndexFieldName, cField.name, key.Interface()), - Tag: vals[0], - Param: param, - Value: idxField.Interface(), - Kind: idxField.Kind(), - Type: cField.mapSubtype, - } - } - - continue - } - - // if we get here, the field is interface and could be a struct or a field - // and we need to check the inner type and validate - if idxField.Kind() == reflect.Interface { - - idxField = idxField.Elem() - - if idxField.Kind() == reflect.Ptr && !idxField.IsNil() { - idxField = idxField.Elem() - } - - if idxField.Kind() == reflect.Struct { - goto VALIDATESTRUCT - } - - // sending nil for cField as it was type interface and could be anything - // each time and so must be calculated each time and can't be cached reliably - if fieldError := v.fieldWithNameAndValue(val, current, idxField.Interface(), cField.diveTag, fmt.Sprintf(mapIndexFieldName, cField.name, key.Interface()), false, nil); fieldError != nil { - errs[key.Interface()] = fieldError - } - - continue - } - - VALIDATESTRUCT: - if structErrors := v.structRecursive(val, current, idxField.Interface()); structErrors != nil { - errs[key.Interface()] = structErrors - } - - default: - if fieldError := v.fieldWithNameAndValue(val, current, idxField.Interface(), cField.diveTag, fmt.Sprintf(mapIndexFieldName, cField.name, key.Interface()), false, nil); fieldError != nil { - errs[key.Interface()] = fieldError - } - } - } - - return errs -} - -func (v *Validate) traverseSliceOrArray(val interface{}, current interface{}, valueField reflect.Value, cField *cachedField) map[int]error { - - errs := map[int]error{} - - for i := 0; i < valueField.Len(); i++ { - - idxField := valueField.Index(i) - - if cField.sliceSubKind == reflect.Ptr && !idxField.IsNil() { - idxField = idxField.Elem() - cField.sliceSubKind = idxField.Kind() - } - - switch cField.sliceSubKind { - case reflect.Struct, reflect.Interface: - - if cField.isTimeSubtype { - - if fieldError := v.fieldWithNameAndValue(val, current, idxField.Interface(), cField.diveTag, fmt.Sprintf(arrayIndexFieldName, cField.name, i), false, nil); fieldError != nil { - errs[i] = fieldError - } - - continue - } - - if (idxField.Kind() == reflect.Ptr || idxField.Kind() == reflect.Interface) && idxField.IsNil() { - - if strings.Contains(cField.diveTag, omitempty) { - continue - } - - tags := strings.Split(cField.diveTag, tagSeparator) - - if len(tags) > 0 { - - var param string - vals := strings.SplitN(tags[0], tagKeySeparator, 2) - - if len(vals) > 1 { - param = vals[1] - } - - errs[i] = &FieldError{ - Field: fmt.Sprintf(arrayIndexFieldName, cField.name, i), - Tag: vals[0], - Param: param, - Value: idxField.Interface(), - Kind: idxField.Kind(), - Type: cField.sliceSubtype, - } - } - - continue - } - - // if we get here, the field is interface and could be a struct or a field - // and we need to check the inner type and validate - if idxField.Kind() == reflect.Interface { - - idxField = idxField.Elem() - - if idxField.Kind() == reflect.Ptr && !idxField.IsNil() { - idxField = idxField.Elem() - } - - if idxField.Kind() == reflect.Struct { - goto VALIDATESTRUCT - } - - // sending nil for cField as it was type interface and could be anything - // each time and so must be calculated each time and can't be cached reliably - if fieldError := v.fieldWithNameAndValue(val, current, idxField.Interface(), cField.diveTag, fmt.Sprintf(arrayIndexFieldName, cField.name, i), false, nil); fieldError != nil { - errs[i] = fieldError - } - - continue - } - - VALIDATESTRUCT: - if structErrors := v.structRecursive(val, current, idxField.Interface()); structErrors != nil { - errs[i] = structErrors - } - - default: - if fieldError := v.fieldWithNameAndValue(val, current, idxField.Interface(), cField.diveTag, fmt.Sprintf(arrayIndexFieldName, cField.name, i), false, nil); fieldError != nil { - errs[i] = fieldError - } - } - } - - return errs -} - -func (v *Validate) fieldWithNameAndSingleTag(val interface{}, current interface{}, f interface{}, key string, param string, name string) (*FieldError, error) { - - // OK to continue because we checked it's existance before getting into this loop - if key == omitempty { - return nil, nil - } - - // if key == existsTag { - // continue - // } - - valFunc, ok := v.validationFuncs[key] - if !ok { - panic(fmt.Sprintf("Undefined validation function on field %s", name)) - } - - if err := valFunc(val, current, f, param); err { - return nil, nil - } - - return &FieldError{ - Field: name, - Tag: key, - Value: f, - Param: param, - }, errors.New(key) -} diff --git a/vendor/gopkg.in/go-playground/validator.v8/LICENSE b/vendor/gopkg.in/go-playground/validator.v8/LICENSE deleted file mode 100644 index 6a2ae9a..0000000 --- a/vendor/gopkg.in/go-playground/validator.v8/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Dean Karn - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - diff --git a/vendor/gopkg.in/go-playground/validator.v8/README.md b/vendor/gopkg.in/go-playground/validator.v8/README.md deleted file mode 100644 index dd2a4f4..0000000 --- a/vendor/gopkg.in/go-playground/validator.v8/README.md +++ /dev/null @@ -1,368 +0,0 @@ -Package validator -================ - -[![Join the chat at https://gitter.im/bluesuncorp/validator](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/go-playground/validator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) -![Project status](https://img.shields.io/badge/version-8.17.1-green.svg) -[![Build Status](https://semaphoreci.com/api/v1/projects/ec20115f-ef1b-4c7d-9393-cc76aba74eb4/530054/badge.svg)](https://semaphoreci.com/joeybloggs/validator) -[![Coverage Status](https://coveralls.io/repos/go-playground/validator/badge.svg?branch=v8&service=github)](https://coveralls.io/github/go-playground/validator?branch=v8) -[![Go Report Card](http://goreportcard.com/badge/go-playground/validator)](http://goreportcard.com/report/go-playground/validator) -[![GoDoc](https://godoc.org/gopkg.in/go-playground/validator.v8?status.svg)](https://godoc.org/gopkg.in/go-playground/validator.v8) -![License](https://img.shields.io/dub/l/vibe-d.svg) - -Package validator implements value validations for structs and individual fields based on tags. - -It has the following **unique** features: - -- Cross Field and Cross Struct validations by using validation tags or custom validators. -- Slice, Array and Map diving, which allows any or all levels of a multidimensional field to be validated. -- Handles type interface by determining it's underlying type prior to validation. -- Handles custom field types such as sql driver Valuer see [Valuer](https://golang.org/src/database/sql/driver/types.go?s=1210:1293#L29) -- Alias validation tags, which allows for mapping of several validations to a single tag for easier defining of validations on structs -- Extraction of custom defined Field Name e.g. can specify to extract the JSON name while validating and have it available in the resulting FieldError - -Installation ------------- - -Use go get. - - go get gopkg.in/go-playground/validator.v8 - -or to update - - go get -u gopkg.in/go-playground/validator.v8 - -Then import the validator package into your own code. - - import "gopkg.in/go-playground/validator.v8" - -Error Return Value -------- - -Validation functions return type error - -They return type error to avoid the issue discussed in the following, where err is always != nil: - -* http://stackoverflow.com/a/29138676/3158232 -* https://github.com/go-playground/validator/issues/134 - -validator only returns nil or ValidationErrors as type error; so in you code all you need to do -is check if the error returned is not nil, and if it's not type cast it to type ValidationErrors -like so: - -```go -err := validate.Struct(mystruct) -validationErrors := err.(validator.ValidationErrors) - ``` - -Usage and documentation ------- - -Please see http://godoc.org/gopkg.in/go-playground/validator.v8 for detailed usage docs. - -##### Examples: - -Struct & Field validation -```go -package main - -import ( - "fmt" - - "gopkg.in/go-playground/validator.v8" -) - -// User contains user information -type User struct { - FirstName string `validate:"required"` - LastName string `validate:"required"` - Age uint8 `validate:"gte=0,lte=130"` - Email string `validate:"required,email"` - FavouriteColor string `validate:"hexcolor|rgb|rgba"` - Addresses []*Address `validate:"required,dive,required"` // a person can have a home and cottage... -} - -// Address houses a users address information -type Address struct { - Street string `validate:"required"` - City string `validate:"required"` - Planet string `validate:"required"` - Phone string `validate:"required"` -} - -var validate *validator.Validate - -func main() { - - config := &validator.Config{TagName: "validate"} - - validate = validator.New(config) - - validateStruct() - validateField() -} - -func validateStruct() { - - address := &Address{ - Street: "Eavesdown Docks", - Planet: "Persphone", - Phone: "none", - } - - user := &User{ - FirstName: "Badger", - LastName: "Smith", - Age: 135, - Email: "Badger.Smith@gmail.com", - FavouriteColor: "#000", - Addresses: []*Address{address}, - } - - // returns nil or ValidationErrors ( map[string]*FieldError ) - errs := validate.Struct(user) - - if errs != nil { - - fmt.Println(errs) // output: Key: "User.Age" Error:Field validation for "Age" failed on the "lte" tag - // Key: "User.Addresses[0].City" Error:Field validation for "City" failed on the "required" tag - err := errs.(validator.ValidationErrors)["User.Addresses[0].City"] - fmt.Println(err.Field) // output: City - fmt.Println(err.Tag) // output: required - fmt.Println(err.Kind) // output: string - fmt.Println(err.Type) // output: string - fmt.Println(err.Param) // output: - fmt.Println(err.Value) // output: - - // from here you can create your own error messages in whatever language you wish - return - } - - // save user to database -} - -func validateField() { - myEmail := "joeybloggs.gmail.com" - - errs := validate.Field(myEmail, "required,email") - - if errs != nil { - fmt.Println(errs) // output: Key: "" Error:Field validation for "" failed on the "email" tag - return - } - - // email ok, move on -} -``` - -Custom Field Type -```go -package main - -import ( - "database/sql" - "database/sql/driver" - "fmt" - "reflect" - - "gopkg.in/go-playground/validator.v8" -) - -// DbBackedUser User struct -type DbBackedUser struct { - Name sql.NullString `validate:"required"` - Age sql.NullInt64 `validate:"required"` -} - -func main() { - - config := &validator.Config{TagName: "validate"} - - validate := validator.New(config) - - // register all sql.Null* types to use the ValidateValuer CustomTypeFunc - validate.RegisterCustomTypeFunc(ValidateValuer, sql.NullString{}, sql.NullInt64{}, sql.NullBool{}, sql.NullFloat64{}) - - x := DbBackedUser{Name: sql.NullString{String: "", Valid: true}, Age: sql.NullInt64{Int64: 0, Valid: false}} - errs := validate.Struct(x) - - if len(errs.(validator.ValidationErrors)) > 0 { - fmt.Printf("Errs:\n%+v\n", errs) - } -} - -// ValidateValuer implements validator.CustomTypeFunc -func ValidateValuer(field reflect.Value) interface{} { - if valuer, ok := field.Interface().(driver.Valuer); ok { - val, err := valuer.Value() - if err == nil { - return val - } - // handle the error how you want - } - return nil -} -``` - -Struct Level Validation -```go -package main - -import ( - "fmt" - "reflect" - - "gopkg.in/go-playground/validator.v8" -) - -// User contains user information -type User struct { - FirstName string `json:"fname"` - LastName string `json:"lname"` - Age uint8 `validate:"gte=0,lte=130"` - Email string `validate:"required,email"` - FavouriteColor string `validate:"hexcolor|rgb|rgba"` - Addresses []*Address `validate:"required,dive,required"` // a person can have a home and cottage... -} - -// Address houses a users address information -type Address struct { - Street string `validate:"required"` - City string `validate:"required"` - Planet string `validate:"required"` - Phone string `validate:"required"` -} - -var validate *validator.Validate - -func main() { - - config := &validator.Config{TagName: "validate"} - - validate = validator.New(config) - validate.RegisterStructValidation(UserStructLevelValidation, User{}) - - validateStruct() -} - -// UserStructLevelValidation contains custom struct level validations that don't always -// make sense at the field validation level. For Example this function validates that either -// FirstName or LastName exist; could have done that with a custom field validation but then -// would have had to add it to both fields duplicating the logic + overhead, this way it's -// only validated once. -// -// NOTE: you may ask why wouldn't I just do this outside of validator, because doing this way -// hooks right into validator and you can combine with validation tags and still have a -// common error output format. -func UserStructLevelValidation(v *validator.Validate, structLevel *validator.StructLevel) { - - user := structLevel.CurrentStruct.Interface().(User) - - if len(user.FirstName) == 0 && len(user.LastName) == 0 { - structLevel.ReportError(reflect.ValueOf(user.FirstName), "FirstName", "fname", "fnameorlname") - structLevel.ReportError(reflect.ValueOf(user.LastName), "LastName", "lname", "fnameorlname") - } - - // plus can to more, even with different tag than "fnameorlname" -} - -func validateStruct() { - - address := &Address{ - Street: "Eavesdown Docks", - Planet: "Persphone", - Phone: "none", - City: "Unknown", - } - - user := &User{ - FirstName: "", - LastName: "", - Age: 45, - Email: "Badger.Smith@gmail.com", - FavouriteColor: "#000", - Addresses: []*Address{address}, - } - - // returns nil or ValidationErrors ( map[string]*FieldError ) - errs := validate.Struct(user) - - if errs != nil { - - fmt.Println(errs) // output: Key: 'User.LastName' Error:Field validation for 'LastName' failed on the 'fnameorlname' tag - // Key: 'User.FirstName' Error:Field validation for 'FirstName' failed on the 'fnameorlname' tag - err := errs.(validator.ValidationErrors)["User.FirstName"] - fmt.Println(err.Field) // output: FirstName - fmt.Println(err.Tag) // output: fnameorlname - fmt.Println(err.Kind) // output: string - fmt.Println(err.Type) // output: string - fmt.Println(err.Param) // output: - fmt.Println(err.Value) // output: - - // from here you can create your own error messages in whatever language you wish - return - } - - // save user to database -} -``` - -Benchmarks ------- -###### Run on MacBook Pro (Retina, 15-inch, Late 2013) 2.6 GHz Intel Core i7 16 GB 1600 MHz DDR3 using Go version go1.5.3 darwin/amd64 -```go -go test -cpu=4 -bench=. -benchmem=true -PASS -BenchmarkFieldSuccess-4 10000000 167 ns/op 0 B/op 0 allocs/op -BenchmarkFieldFailure-4 2000000 701 ns/op 432 B/op 4 allocs/op -BenchmarkFieldDiveSuccess-4 500000 2937 ns/op 480 B/op 27 allocs/op -BenchmarkFieldDiveFailure-4 500000 3536 ns/op 912 B/op 31 allocs/op -BenchmarkFieldCustomTypeSuccess-4 5000000 341 ns/op 32 B/op 2 allocs/op -BenchmarkFieldCustomTypeFailure-4 2000000 679 ns/op 432 B/op 4 allocs/op -BenchmarkFieldOrTagSuccess-4 1000000 1157 ns/op 16 B/op 1 allocs/op -BenchmarkFieldOrTagFailure-4 1000000 1109 ns/op 464 B/op 6 allocs/op -BenchmarkStructLevelValidationSuccess-4 2000000 694 ns/op 176 B/op 6 allocs/op -BenchmarkStructLevelValidationFailure-4 1000000 1311 ns/op 640 B/op 11 allocs/op -BenchmarkStructSimpleCustomTypeSuccess-4 2000000 894 ns/op 80 B/op 5 allocs/op -BenchmarkStructSimpleCustomTypeFailure-4 1000000 1496 ns/op 688 B/op 11 allocs/op -BenchmarkStructPartialSuccess-4 1000000 1229 ns/op 384 B/op 10 allocs/op -BenchmarkStructPartialFailure-4 1000000 1838 ns/op 832 B/op 15 allocs/op -BenchmarkStructExceptSuccess-4 2000000 961 ns/op 336 B/op 7 allocs/op -BenchmarkStructExceptFailure-4 1000000 1218 ns/op 384 B/op 10 allocs/op -BenchmarkStructSimpleCrossFieldSuccess-4 2000000 954 ns/op 128 B/op 6 allocs/op -BenchmarkStructSimpleCrossFieldFailure-4 1000000 1569 ns/op 592 B/op 11 allocs/op -BenchmarkStructSimpleCrossStructCrossFieldSuccess-4 1000000 1588 ns/op 192 B/op 10 allocs/op -BenchmarkStructSimpleCrossStructCrossFieldFailure-4 1000000 2217 ns/op 656 B/op 15 allocs/op -BenchmarkStructSimpleSuccess-4 2000000 925 ns/op 48 B/op 3 allocs/op -BenchmarkStructSimpleFailure-4 1000000 1650 ns/op 688 B/op 11 allocs/op -BenchmarkStructSimpleSuccessParallel-4 5000000 261 ns/op 48 B/op 3 allocs/op -BenchmarkStructSimpleFailureParallel-4 2000000 758 ns/op 688 B/op 11 allocs/op -BenchmarkStructComplexSuccess-4 300000 5868 ns/op 544 B/op 32 allocs/op -BenchmarkStructComplexFailure-4 200000 10767 ns/op 3912 B/op 77 allocs/op -BenchmarkStructComplexSuccessParallel-4 1000000 1559 ns/op 544 B/op 32 allocs/op -BenchmarkStructComplexFailureParallel-4 500000 3747 ns/op 3912 B/op 77 allocs -``` - -Complimentary Software ----------------------- - -Here is a list of software that compliments using this library either pre or post validation. - -* [Gorilla Schema](https://github.com/gorilla/schema) - Package gorilla/schema fills a struct with form values. -* [Conform](https://github.com/leebenson/conform) - Trims, sanitizes & scrubs data based on struct tags. - -How to Contribute ------- - -There will always be a development branch for each version i.e. `v1-development`. In order to contribute, -please make your pull requests against those branches. - -If the changes being proposed or requested are breaking changes, please create an issue, for discussion -or create a pull request against the highest development branch for example this package has a -v1 and v1-development branch however, there will also be a v2-development branch even though v2 doesn't exist yet. - -I strongly encourage everyone whom creates a custom validation function to contribute them and -help make this package even better. - -License ------- -Distributed under MIT License, please see license file in code for more details. diff --git a/vendor/gopkg.in/go-playground/validator.v8/baked_in.go b/vendor/gopkg.in/go-playground/validator.v8/baked_in.go deleted file mode 100644 index 3dcb0de..0000000 --- a/vendor/gopkg.in/go-playground/validator.v8/baked_in.go +++ /dev/null @@ -1,1418 +0,0 @@ -package validator - -import ( - "fmt" - "net" - "net/url" - "reflect" - "strings" - "time" - "unicode/utf8" -) - -// BakedInAliasValidators is a default mapping of a single validationstag that -// defines a common or complex set of validation(s) to simplify -// adding validation to structs. i.e. set key "_ageok" and the tags -// are "gt=0,lte=130" or key "_preferredname" and tags "omitempty,gt=0,lte=60" -var bakedInAliasValidators = map[string]string{ - "iscolor": "hexcolor|rgb|rgba|hsl|hsla", -} - -// BakedInValidators is the default map of ValidationFunc -// you can add, remove or even replace items to suite your needs, -// or even disregard and use your own map if so desired. -var bakedInValidators = map[string]Func{ - "required": HasValue, - "len": HasLengthOf, - "min": HasMinOf, - "max": HasMaxOf, - "eq": IsEq, - "ne": IsNe, - "lt": IsLt, - "lte": IsLte, - "gt": IsGt, - "gte": IsGte, - "eqfield": IsEqField, - "eqcsfield": IsEqCrossStructField, - "necsfield": IsNeCrossStructField, - "gtcsfield": IsGtCrossStructField, - "gtecsfield": IsGteCrossStructField, - "ltcsfield": IsLtCrossStructField, - "ltecsfield": IsLteCrossStructField, - "nefield": IsNeField, - "gtefield": IsGteField, - "gtfield": IsGtField, - "ltefield": IsLteField, - "ltfield": IsLtField, - "alpha": IsAlpha, - "alphanum": IsAlphanum, - "numeric": IsNumeric, - "number": IsNumber, - "hexadecimal": IsHexadecimal, - "hexcolor": IsHEXColor, - "rgb": IsRGB, - "rgba": IsRGBA, - "hsl": IsHSL, - "hsla": IsHSLA, - "email": IsEmail, - "url": IsURL, - "uri": IsURI, - "base64": IsBase64, - "contains": Contains, - "containsany": ContainsAny, - "containsrune": ContainsRune, - "excludes": Excludes, - "excludesall": ExcludesAll, - "excludesrune": ExcludesRune, - "isbn": IsISBN, - "isbn10": IsISBN10, - "isbn13": IsISBN13, - "uuid": IsUUID, - "uuid3": IsUUID3, - "uuid4": IsUUID4, - "uuid5": IsUUID5, - "ascii": IsASCII, - "printascii": IsPrintableASCII, - "multibyte": HasMultiByteCharacter, - "datauri": IsDataURI, - "latitude": IsLatitude, - "longitude": IsLongitude, - "ssn": IsSSN, - "ipv4": IsIPv4, - "ipv6": IsIPv6, - "ip": IsIP, - "cidrv4": IsCIDRv4, - "cidrv6": IsCIDRv6, - "cidr": IsCIDR, - "tcp4_addr": IsTCP4AddrResolvable, - "tcp6_addr": IsTCP6AddrResolvable, - "tcp_addr": IsTCPAddrResolvable, - "udp4_addr": IsUDP4AddrResolvable, - "udp6_addr": IsUDP6AddrResolvable, - "udp_addr": IsUDPAddrResolvable, - "ip4_addr": IsIP4AddrResolvable, - "ip6_addr": IsIP6AddrResolvable, - "ip_addr": IsIPAddrResolvable, - "unix_addr": IsUnixAddrResolvable, - "mac": IsMAC, -} - -// IsMAC is the validation function for validating if the field's value is a valid MAC address. -// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. -func IsMAC(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - _, err := net.ParseMAC(field.String()) - return err == nil -} - -// IsCIDRv4 is the validation function for validating if the field's value is a valid v4 CIDR address. -// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. -func IsCIDRv4(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - - ip, _, err := net.ParseCIDR(field.String()) - - return err == nil && ip.To4() != nil -} - -// IsCIDRv6 is the validation function for validating if the field's value is a valid v6 CIDR address. -// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. -func IsCIDRv6(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - - ip, _, err := net.ParseCIDR(field.String()) - - return err == nil && ip.To4() == nil -} - -// IsCIDR is the validation function for validating if the field's value is a valid v4 or v6 CIDR address. -// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. -func IsCIDR(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - - _, _, err := net.ParseCIDR(field.String()) - - return err == nil -} - -// IsIPv4 is the validation function for validating if a value is a valid v4 IP address. -// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. -func IsIPv4(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - - ip := net.ParseIP(field.String()) - - return ip != nil && ip.To4() != nil -} - -// IsIPv6 is the validation function for validating if the field's value is a valid v6 IP address. -// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. -func IsIPv6(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - ip := net.ParseIP(field.String()) - - return ip != nil && ip.To4() == nil -} - -// IsIP is the validation function for validating if the field's value is a valid v4 or v6 IP address. -// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. -func IsIP(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - - ip := net.ParseIP(field.String()) - - return ip != nil -} - -// IsSSN is the validation function for validating if the field's value is a valid SSN. -// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. -func IsSSN(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - - if field.Len() != 11 { - return false - } - - return sSNRegex.MatchString(field.String()) -} - -// IsLongitude is the validation function for validating if the field's value is a valid longitude coordinate. -// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. -func IsLongitude(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - return longitudeRegex.MatchString(field.String()) -} - -// IsLatitude is the validation function for validating if the field's value is a valid latitude coordinate. -// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. -func IsLatitude(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - return latitudeRegex.MatchString(field.String()) -} - -// IsDataURI is the validation function for validating if the field's value is a valid data URI. -// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. -func IsDataURI(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - - uri := strings.SplitN(field.String(), ",", 2) - - if len(uri) != 2 { - return false - } - - if !dataURIRegex.MatchString(uri[0]) { - return false - } - - fld := reflect.ValueOf(uri[1]) - - return IsBase64(v, topStruct, currentStructOrField, fld, fld.Type(), fld.Kind(), param) -} - -// HasMultiByteCharacter is the validation function for validating if the field's value has a multi byte character. -// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. -func HasMultiByteCharacter(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - - if field.Len() == 0 { - return true - } - - return multibyteRegex.MatchString(field.String()) -} - -// IsPrintableASCII is the validation function for validating if the field's value is a valid printable ASCII character. -// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. -func IsPrintableASCII(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - return printableASCIIRegex.MatchString(field.String()) -} - -// IsASCII is the validation function for validating if the field's value is a valid ASCII character. -// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. -func IsASCII(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - return aSCIIRegex.MatchString(field.String()) -} - -// IsUUID5 is the validation function for validating if the field's value is a valid v5 UUID. -// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. -func IsUUID5(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - return uUID5Regex.MatchString(field.String()) -} - -// IsUUID4 is the validation function for validating if the field's value is a valid v4 UUID. -// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. -func IsUUID4(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - return uUID4Regex.MatchString(field.String()) -} - -// IsUUID3 is the validation function for validating if the field's value is a valid v3 UUID. -// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. -func IsUUID3(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - return uUID3Regex.MatchString(field.String()) -} - -// IsUUID is the validation function for validating if the field's value is a valid UUID of any version. -// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. -func IsUUID(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - return uUIDRegex.MatchString(field.String()) -} - -// IsISBN is the validation function for validating if the field's value is a valid v10 or v13 ISBN. -// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. -func IsISBN(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - return IsISBN10(v, topStruct, currentStructOrField, field, fieldType, fieldKind, param) || IsISBN13(v, topStruct, currentStructOrField, field, fieldType, fieldKind, param) -} - -// IsISBN13 is the validation function for validating if the field's value is a valid v13 ISBN. -// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. -func IsISBN13(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - - s := strings.Replace(strings.Replace(field.String(), "-", "", 4), " ", "", 4) - - if !iSBN13Regex.MatchString(s) { - return false - } - - var checksum int32 - var i int32 - - factor := []int32{1, 3} - - for i = 0; i < 12; i++ { - checksum += factor[i%2] * int32(s[i]-'0') - } - - if (int32(s[12]-'0'))-((10-(checksum%10))%10) == 0 { - return true - } - - return false -} - -// IsISBN10 is the validation function for validating if the field's value is a valid v10 ISBN. -// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. -func IsISBN10(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - - s := strings.Replace(strings.Replace(field.String(), "-", "", 3), " ", "", 3) - - if !iSBN10Regex.MatchString(s) { - return false - } - - var checksum int32 - var i int32 - - for i = 0; i < 9; i++ { - checksum += (i + 1) * int32(s[i]-'0') - } - - if s[9] == 'X' { - checksum += 10 * 10 - } else { - checksum += 10 * int32(s[9]-'0') - } - - if checksum%11 == 0 { - return true - } - - return false -} - -// ExcludesRune is the validation function for validating that the field's value does not contain the rune specified withing the param. -// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. -func ExcludesRune(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - return !ContainsRune(v, topStruct, currentStructOrField, field, fieldType, fieldKind, param) -} - -// ExcludesAll is the validation function for validating that the field's value does not contain any of the characters specified withing the param. -// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. -func ExcludesAll(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - return !ContainsAny(v, topStruct, currentStructOrField, field, fieldType, fieldKind, param) -} - -// Excludes is the validation function for validating that the field's value does not contain the text specified withing the param. -// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. -func Excludes(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - return !Contains(v, topStruct, currentStructOrField, field, fieldType, fieldKind, param) -} - -// ContainsRune is the validation function for validating that the field's value contains the rune specified withing the param. -// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. -func ContainsRune(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - r, _ := utf8.DecodeRuneInString(param) - - return strings.ContainsRune(field.String(), r) -} - -// ContainsAny is the validation function for validating that the field's value contains any of the characters specified withing the param. -// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. -func ContainsAny(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - return strings.ContainsAny(field.String(), param) -} - -// Contains is the validation function for validating that the field's value contains the text specified withing the param. -// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. -func Contains(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - return strings.Contains(field.String(), param) -} - -// IsNeField is the validation function for validating if the current field's value is not equal to the field specified by the param's value. -// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. -func IsNeField(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - - currentField, currentKind, ok := v.GetStructFieldOK(currentStructOrField, param) - - if !ok || currentKind != fieldKind { - return true - } - - switch fieldKind { - - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return field.Int() != currentField.Int() - - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return field.Uint() != currentField.Uint() - - case reflect.Float32, reflect.Float64: - return field.Float() != currentField.Float() - - case reflect.Slice, reflect.Map, reflect.Array: - return int64(field.Len()) != int64(currentField.Len()) - - case reflect.Struct: - - // Not Same underlying type i.e. struct and time - if fieldType != currentField.Type() { - return true - } - - if fieldType == timeType { - - t := currentField.Interface().(time.Time) - fieldTime := field.Interface().(time.Time) - - return !fieldTime.Equal(t) - } - - } - - // default reflect.String: - return field.String() != currentField.String() -} - -// IsNe is the validation function for validating that the field's value does not equal the provided param value. -// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. -func IsNe(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - return !IsEq(v, topStruct, currentStructOrField, field, fieldType, fieldKind, param) -} - -// IsLteCrossStructField is the validation function for validating if the current field's value is less than or equal to the field, within a separate struct, specified by the param's value. -// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. -func IsLteCrossStructField(v *Validate, topStruct reflect.Value, current reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - - topField, topKind, ok := v.GetStructFieldOK(topStruct, param) - if !ok || topKind != fieldKind { - return false - } - - switch fieldKind { - - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return field.Int() <= topField.Int() - - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return field.Uint() <= topField.Uint() - - case reflect.Float32, reflect.Float64: - return field.Float() <= topField.Float() - - case reflect.Slice, reflect.Map, reflect.Array: - return int64(field.Len()) <= int64(topField.Len()) - - case reflect.Struct: - - // Not Same underlying type i.e. struct and time - if fieldType != topField.Type() { - return false - } - - if fieldType == timeType { - - fieldTime := field.Interface().(time.Time) - topTime := topField.Interface().(time.Time) - - return fieldTime.Before(topTime) || fieldTime.Equal(topTime) - } - } - - // default reflect.String: - return field.String() <= topField.String() -} - -// IsLtCrossStructField is the validation function for validating if the current field's value is less than the field, within a separate struct, specified by the param's value. -// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. -func IsLtCrossStructField(v *Validate, topStruct reflect.Value, current reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - - topField, topKind, ok := v.GetStructFieldOK(topStruct, param) - if !ok || topKind != fieldKind { - return false - } - - switch fieldKind { - - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return field.Int() < topField.Int() - - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return field.Uint() < topField.Uint() - - case reflect.Float32, reflect.Float64: - return field.Float() < topField.Float() - - case reflect.Slice, reflect.Map, reflect.Array: - return int64(field.Len()) < int64(topField.Len()) - - case reflect.Struct: - - // Not Same underlying type i.e. struct and time - if fieldType != topField.Type() { - return false - } - - if fieldType == timeType { - - fieldTime := field.Interface().(time.Time) - topTime := topField.Interface().(time.Time) - - return fieldTime.Before(topTime) - } - } - - // default reflect.String: - return field.String() < topField.String() -} - -// IsGteCrossStructField is the validation function for validating if the current field's value is greater than or equal to the field, within a separate struct, specified by the param's value. -// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. -func IsGteCrossStructField(v *Validate, topStruct reflect.Value, current reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - - topField, topKind, ok := v.GetStructFieldOK(topStruct, param) - if !ok || topKind != fieldKind { - return false - } - - switch fieldKind { - - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return field.Int() >= topField.Int() - - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return field.Uint() >= topField.Uint() - - case reflect.Float32, reflect.Float64: - return field.Float() >= topField.Float() - - case reflect.Slice, reflect.Map, reflect.Array: - return int64(field.Len()) >= int64(topField.Len()) - - case reflect.Struct: - - // Not Same underlying type i.e. struct and time - if fieldType != topField.Type() { - return false - } - - if fieldType == timeType { - - fieldTime := field.Interface().(time.Time) - topTime := topField.Interface().(time.Time) - - return fieldTime.After(topTime) || fieldTime.Equal(topTime) - } - } - - // default reflect.String: - return field.String() >= topField.String() -} - -// IsGtCrossStructField is the validation function for validating if the current field's value is greater than the field, within a separate struct, specified by the param's value. -// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. -func IsGtCrossStructField(v *Validate, topStruct reflect.Value, current reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - - topField, topKind, ok := v.GetStructFieldOK(topStruct, param) - if !ok || topKind != fieldKind { - return false - } - - switch fieldKind { - - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return field.Int() > topField.Int() - - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return field.Uint() > topField.Uint() - - case reflect.Float32, reflect.Float64: - return field.Float() > topField.Float() - - case reflect.Slice, reflect.Map, reflect.Array: - return int64(field.Len()) > int64(topField.Len()) - - case reflect.Struct: - - // Not Same underlying type i.e. struct and time - if fieldType != topField.Type() { - return false - } - - if fieldType == timeType { - - fieldTime := field.Interface().(time.Time) - topTime := topField.Interface().(time.Time) - - return fieldTime.After(topTime) - } - } - - // default reflect.String: - return field.String() > topField.String() -} - -// IsNeCrossStructField is the validation function for validating that the current field's value is not equal to the field, within a separate struct, specified by the param's value. -// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. -func IsNeCrossStructField(v *Validate, topStruct reflect.Value, current reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - - topField, currentKind, ok := v.GetStructFieldOK(topStruct, param) - if !ok || currentKind != fieldKind { - return true - } - - switch fieldKind { - - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return topField.Int() != field.Int() - - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return topField.Uint() != field.Uint() - - case reflect.Float32, reflect.Float64: - return topField.Float() != field.Float() - - case reflect.Slice, reflect.Map, reflect.Array: - return int64(topField.Len()) != int64(field.Len()) - - case reflect.Struct: - - // Not Same underlying type i.e. struct and time - if fieldType != topField.Type() { - return true - } - - if fieldType == timeType { - - t := field.Interface().(time.Time) - fieldTime := topField.Interface().(time.Time) - - return !fieldTime.Equal(t) - } - } - - // default reflect.String: - return topField.String() != field.String() -} - -// IsEqCrossStructField is the validation function for validating that the current field's value is equal to the field, within a separate struct, specified by the param's value. -// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. -func IsEqCrossStructField(v *Validate, topStruct reflect.Value, current reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - - topField, topKind, ok := v.GetStructFieldOK(topStruct, param) - if !ok || topKind != fieldKind { - return false - } - - switch fieldKind { - - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return topField.Int() == field.Int() - - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return topField.Uint() == field.Uint() - - case reflect.Float32, reflect.Float64: - return topField.Float() == field.Float() - - case reflect.Slice, reflect.Map, reflect.Array: - return int64(topField.Len()) == int64(field.Len()) - - case reflect.Struct: - - // Not Same underlying type i.e. struct and time - if fieldType != topField.Type() { - return false - } - - if fieldType == timeType { - - t := field.Interface().(time.Time) - fieldTime := topField.Interface().(time.Time) - - return fieldTime.Equal(t) - } - } - - // default reflect.String: - return topField.String() == field.String() -} - -// IsEqField is the validation function for validating if the current field's value is equal to the field specified by the param's value. -// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. -func IsEqField(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - - currentField, currentKind, ok := v.GetStructFieldOK(currentStructOrField, param) - if !ok || currentKind != fieldKind { - return false - } - - switch fieldKind { - - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return field.Int() == currentField.Int() - - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return field.Uint() == currentField.Uint() - - case reflect.Float32, reflect.Float64: - return field.Float() == currentField.Float() - - case reflect.Slice, reflect.Map, reflect.Array: - return int64(field.Len()) == int64(currentField.Len()) - - case reflect.Struct: - - // Not Same underlying type i.e. struct and time - if fieldType != currentField.Type() { - return false - } - - if fieldType == timeType { - - t := currentField.Interface().(time.Time) - fieldTime := field.Interface().(time.Time) - - return fieldTime.Equal(t) - } - - } - - // default reflect.String: - return field.String() == currentField.String() -} - -// IsEq is the validation function for validating if the current field's value is equal to the param's value. -// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. -func IsEq(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - - switch fieldKind { - - case reflect.String: - return field.String() == param - - case reflect.Slice, reflect.Map, reflect.Array: - p := asInt(param) - - return int64(field.Len()) == p - - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - p := asInt(param) - - return field.Int() == p - - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - p := asUint(param) - - return field.Uint() == p - - case reflect.Float32, reflect.Float64: - p := asFloat(param) - - return field.Float() == p - } - - panic(fmt.Sprintf("Bad field type %T", field.Interface())) -} - -// IsBase64 is the validation function for validating if the current field's value is a valid base 64. -// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. -func IsBase64(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - return base64Regex.MatchString(field.String()) -} - -// IsURI is the validation function for validating if the current field's value is a valid URI. -// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. -func IsURI(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - - switch fieldKind { - - case reflect.String: - - s := field.String() - - // checks needed as of Go 1.6 because of change https://github.com/golang/go/commit/617c93ce740c3c3cc28cdd1a0d712be183d0b328#diff-6c2d018290e298803c0c9419d8739885L195 - // emulate browser and strip the '#' suffix prior to validation. see issue-#237 - if i := strings.Index(s, "#"); i > -1 { - s = s[:i] - } - - if s == blank { - return false - } - - _, err := url.ParseRequestURI(s) - - return err == nil - } - - panic(fmt.Sprintf("Bad field type %T", field.Interface())) -} - -// IsURL is the validation function for validating if the current field's value is a valid URL. -// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. -func IsURL(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - - switch fieldKind { - - case reflect.String: - - var i int - s := field.String() - - // checks needed as of Go 1.6 because of change https://github.com/golang/go/commit/617c93ce740c3c3cc28cdd1a0d712be183d0b328#diff-6c2d018290e298803c0c9419d8739885L195 - // emulate browser and strip the '#' suffix prior to validation. see issue-#237 - if i = strings.Index(s, "#"); i > -1 { - s = s[:i] - } - - if s == blank { - return false - } - - url, err := url.ParseRequestURI(s) - - if err != nil || url.Scheme == blank { - return false - } - - return err == nil - } - - panic(fmt.Sprintf("Bad field type %T", field.Interface())) -} - -// IsEmail is the validation function for validating if the current field's value is a valid email address. -// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. -func IsEmail(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - return emailRegex.MatchString(field.String()) -} - -// IsHSLA is the validation function for validating if the current field's value is a valid HSLA color. -// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. -func IsHSLA(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - return hslaRegex.MatchString(field.String()) -} - -// IsHSL is the validation function for validating if the current field's value is a valid HSL color. -// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. -func IsHSL(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - return hslRegex.MatchString(field.String()) -} - -// IsRGBA is the validation function for validating if the current field's value is a valid RGBA color. -// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. -func IsRGBA(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - return rgbaRegex.MatchString(field.String()) -} - -// IsRGB is the validation function for validating if the current field's value is a valid RGB color. -// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. -func IsRGB(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - return rgbRegex.MatchString(field.String()) -} - -// IsHEXColor is the validation function for validating if the current field's value is a valid HEX color. -// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. -func IsHEXColor(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - return hexcolorRegex.MatchString(field.String()) -} - -// IsHexadecimal is the validation function for validating if the current field's value is a valid hexadecimal. -// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. -func IsHexadecimal(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - return hexadecimalRegex.MatchString(field.String()) -} - -// IsNumber is the validation function for validating if the current field's value is a valid number. -// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. -func IsNumber(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - return numberRegex.MatchString(field.String()) -} - -// IsNumeric is the validation function for validating if the current field's value is a valid numeric value. -// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. -func IsNumeric(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - return numericRegex.MatchString(field.String()) -} - -// IsAlphanum is the validation function for validating if the current field's value is a valid alphanumeric value. -// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. -func IsAlphanum(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - return alphaNumericRegex.MatchString(field.String()) -} - -// IsAlpha is the validation function for validating if the current field's value is a valid alpha value. -// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. -func IsAlpha(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - return alphaRegex.MatchString(field.String()) -} - -// HasValue is the validation function for validating if the current field's value is not the default static value. -// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. -func HasValue(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - - switch fieldKind { - case reflect.Slice, reflect.Map, reflect.Ptr, reflect.Interface, reflect.Chan, reflect.Func: - return !field.IsNil() - default: - return field.IsValid() && field.Interface() != reflect.Zero(fieldType).Interface() - } -} - -// IsGteField is the validation function for validating if the current field's value is greater than or equal to the field specified by the param's value. -// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. -func IsGteField(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - - currentField, currentKind, ok := v.GetStructFieldOK(currentStructOrField, param) - if !ok || currentKind != fieldKind { - return false - } - - switch fieldKind { - - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - - return field.Int() >= currentField.Int() - - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - - return field.Uint() >= currentField.Uint() - - case reflect.Float32, reflect.Float64: - - return field.Float() >= currentField.Float() - - case reflect.Struct: - - // Not Same underlying type i.e. struct and time - if fieldType != currentField.Type() { - return false - } - - if fieldType == timeType { - - t := currentField.Interface().(time.Time) - fieldTime := field.Interface().(time.Time) - - return fieldTime.After(t) || fieldTime.Equal(t) - } - } - - // default reflect.String - return len(field.String()) >= len(currentField.String()) -} - -// IsGtField is the validation function for validating if the current field's value is greater than the field specified by the param's value. -// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. -func IsGtField(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - - currentField, currentKind, ok := v.GetStructFieldOK(currentStructOrField, param) - if !ok || currentKind != fieldKind { - return false - } - - switch fieldKind { - - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - - return field.Int() > currentField.Int() - - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - - return field.Uint() > currentField.Uint() - - case reflect.Float32, reflect.Float64: - - return field.Float() > currentField.Float() - - case reflect.Struct: - - // Not Same underlying type i.e. struct and time - if fieldType != currentField.Type() { - return false - } - - if fieldType == timeType { - - t := currentField.Interface().(time.Time) - fieldTime := field.Interface().(time.Time) - - return fieldTime.After(t) - } - } - - // default reflect.String - return len(field.String()) > len(currentField.String()) -} - -// IsGte is the validation function for validating if the current field's value is greater than or equal to the param's value. -// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. -func IsGte(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - - switch fieldKind { - - case reflect.String: - p := asInt(param) - - return int64(utf8.RuneCountInString(field.String())) >= p - - case reflect.Slice, reflect.Map, reflect.Array: - p := asInt(param) - - return int64(field.Len()) >= p - - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - p := asInt(param) - - return field.Int() >= p - - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - p := asUint(param) - - return field.Uint() >= p - - case reflect.Float32, reflect.Float64: - p := asFloat(param) - - return field.Float() >= p - - case reflect.Struct: - - if fieldType == timeType || fieldType == timePtrType { - - now := time.Now().UTC() - t := field.Interface().(time.Time) - - return t.After(now) || t.Equal(now) - } - } - - panic(fmt.Sprintf("Bad field type %T", field.Interface())) -} - -// IsGt is the validation function for validating if the current field's value is greater than the param's value. -// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. -func IsGt(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - - switch fieldKind { - - case reflect.String: - p := asInt(param) - - return int64(utf8.RuneCountInString(field.String())) > p - - case reflect.Slice, reflect.Map, reflect.Array: - p := asInt(param) - - return int64(field.Len()) > p - - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - p := asInt(param) - - return field.Int() > p - - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - p := asUint(param) - - return field.Uint() > p - - case reflect.Float32, reflect.Float64: - p := asFloat(param) - - return field.Float() > p - case reflect.Struct: - - if field.Type() == timeType || field.Type() == timePtrType { - - return field.Interface().(time.Time).After(time.Now().UTC()) - } - } - - panic(fmt.Sprintf("Bad field type %T", field.Interface())) -} - -// HasLengthOf is the validation function for validating if the current field's value is equal to the param's value. -// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. -func HasLengthOf(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - - switch fieldKind { - - case reflect.String: - p := asInt(param) - - return int64(utf8.RuneCountInString(field.String())) == p - - case reflect.Slice, reflect.Map, reflect.Array: - p := asInt(param) - - return int64(field.Len()) == p - - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - p := asInt(param) - - return field.Int() == p - - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - p := asUint(param) - - return field.Uint() == p - - case reflect.Float32, reflect.Float64: - p := asFloat(param) - - return field.Float() == p - } - - panic(fmt.Sprintf("Bad field type %T", field.Interface())) -} - -// HasMinOf is the validation function for validating if the current field's value is greater than or equal to the param's value. -// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. -func HasMinOf(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - - return IsGte(v, topStruct, currentStructOrField, field, fieldType, fieldKind, param) -} - -// IsLteField is the validation function for validating if the current field's value is less than or equal to the field specified by the param's value. -// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. -func IsLteField(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - - currentField, currentKind, ok := v.GetStructFieldOK(currentStructOrField, param) - if !ok || currentKind != fieldKind { - return false - } - - switch fieldKind { - - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - - return field.Int() <= currentField.Int() - - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - - return field.Uint() <= currentField.Uint() - - case reflect.Float32, reflect.Float64: - - return field.Float() <= currentField.Float() - - case reflect.Struct: - - // Not Same underlying type i.e. struct and time - if fieldType != currentField.Type() { - return false - } - - if fieldType == timeType { - - t := currentField.Interface().(time.Time) - fieldTime := field.Interface().(time.Time) - - return fieldTime.Before(t) || fieldTime.Equal(t) - } - } - - // default reflect.String - return len(field.String()) <= len(currentField.String()) -} - -// IsLtField is the validation function for validating if the current field's value is less than the field specified by the param's value. -// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. -func IsLtField(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - - currentField, currentKind, ok := v.GetStructFieldOK(currentStructOrField, param) - if !ok || currentKind != fieldKind { - return false - } - - switch fieldKind { - - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - - return field.Int() < currentField.Int() - - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - - return field.Uint() < currentField.Uint() - - case reflect.Float32, reflect.Float64: - - return field.Float() < currentField.Float() - - case reflect.Struct: - - // Not Same underlying type i.e. struct and time - if fieldType != currentField.Type() { - return false - } - - if fieldType == timeType { - - t := currentField.Interface().(time.Time) - fieldTime := field.Interface().(time.Time) - - return fieldTime.Before(t) - } - } - - // default reflect.String - return len(field.String()) < len(currentField.String()) -} - -// IsLte is the validation function for validating if the current field's value is less than or equal to the param's value. -// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. -func IsLte(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - - switch fieldKind { - - case reflect.String: - p := asInt(param) - - return int64(utf8.RuneCountInString(field.String())) <= p - - case reflect.Slice, reflect.Map, reflect.Array: - p := asInt(param) - - return int64(field.Len()) <= p - - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - p := asInt(param) - - return field.Int() <= p - - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - p := asUint(param) - - return field.Uint() <= p - - case reflect.Float32, reflect.Float64: - p := asFloat(param) - - return field.Float() <= p - - case reflect.Struct: - - if fieldType == timeType || fieldType == timePtrType { - - now := time.Now().UTC() - t := field.Interface().(time.Time) - - return t.Before(now) || t.Equal(now) - } - } - - panic(fmt.Sprintf("Bad field type %T", field.Interface())) -} - -// IsLt is the validation function for validating if the current field's value is less than the param's value. -// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. -func IsLt(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - - switch fieldKind { - - case reflect.String: - p := asInt(param) - - return int64(utf8.RuneCountInString(field.String())) < p - - case reflect.Slice, reflect.Map, reflect.Array: - p := asInt(param) - - return int64(field.Len()) < p - - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - p := asInt(param) - - return field.Int() < p - - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - p := asUint(param) - - return field.Uint() < p - - case reflect.Float32, reflect.Float64: - p := asFloat(param) - - return field.Float() < p - - case reflect.Struct: - - if field.Type() == timeType || field.Type() == timePtrType { - - return field.Interface().(time.Time).Before(time.Now().UTC()) - } - } - - panic(fmt.Sprintf("Bad field type %T", field.Interface())) -} - -// HasMaxOf is the validation function for validating if the current field's value is less than or equal to the param's value. -// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. -func HasMaxOf(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - return IsLte(v, topStruct, currentStructOrField, field, fieldType, fieldKind, param) -} - -// IsTCP4AddrResolvable is the validation function for validating if the field's value is a resolvable tcp4 address. -// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. -func IsTCP4AddrResolvable(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - - if !isIP4Addr(v, topStruct, currentStructOrField, field, fieldType, fieldKind, param) { - return false - } - - _, err := net.ResolveTCPAddr("tcp4", field.String()) - return err == nil -} - -// IsTCP6AddrResolvable is the validation function for validating if the field's value is a resolvable tcp6 address. -// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. -func IsTCP6AddrResolvable(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - - if !isIP6Addr(v, topStruct, currentStructOrField, field, fieldType, fieldKind, param) { - return false - } - - _, err := net.ResolveTCPAddr("tcp6", field.String()) - return err == nil -} - -// IsTCPAddrResolvable is the validation function for validating if the field's value is a resolvable tcp address. -// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. -func IsTCPAddrResolvable(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - - if !isIP4Addr(v, topStruct, currentStructOrField, field, fieldType, fieldKind, param) && - !isIP6Addr(v, topStruct, currentStructOrField, field, fieldType, fieldKind, param) { - return false - } - - _, err := net.ResolveTCPAddr("tcp", field.String()) - return err == nil -} - -// IsUDP4AddrResolvable is the validation function for validating if the field's value is a resolvable udp4 address. -// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. -func IsUDP4AddrResolvable(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - - if !isIP4Addr(v, topStruct, currentStructOrField, field, fieldType, fieldKind, param) { - return false - } - - _, err := net.ResolveUDPAddr("udp4", field.String()) - return err == nil -} - -// IsUDP6AddrResolvable is the validation function for validating if the field's value is a resolvable udp6 address. -// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. -func IsUDP6AddrResolvable(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - - if !isIP6Addr(v, topStruct, currentStructOrField, field, fieldType, fieldKind, param) { - return false - } - - _, err := net.ResolveUDPAddr("udp6", field.String()) - return err == nil -} - -// IsUDPAddrResolvable is the validation function for validating if the field's value is a resolvable udp address. -// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. -func IsUDPAddrResolvable(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - - if !isIP4Addr(v, topStruct, currentStructOrField, field, fieldType, fieldKind, param) && - !isIP6Addr(v, topStruct, currentStructOrField, field, fieldType, fieldKind, param) { - return false - } - - _, err := net.ResolveUDPAddr("udp", field.String()) - return err == nil -} - -// IsIP4AddrResolvable is the validation function for validating if the field's value is a resolvable ip4 address. -// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. -func IsIP4AddrResolvable(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - - if !IsIPv4(v, topStruct, currentStructOrField, field, fieldType, fieldKind, param) { - return false - } - - _, err := net.ResolveIPAddr("ip4", field.String()) - return err == nil -} - -// IsIP6AddrResolvable is the validation function for validating if the field's value is a resolvable ip6 address. -// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. -func IsIP6AddrResolvable(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - - if !IsIPv6(v, topStruct, currentStructOrField, field, fieldType, fieldKind, param) { - return false - } - - _, err := net.ResolveIPAddr("ip6", field.String()) - return err == nil -} - -// IsIPAddrResolvable is the validation function for validating if the field's value is a resolvable ip address. -// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. -func IsIPAddrResolvable(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - - if !IsIP(v, topStruct, currentStructOrField, field, fieldType, fieldKind, param) { - return false - } - - _, err := net.ResolveIPAddr("ip", field.String()) - return err == nil -} - -// IsUnixAddrResolvable is the validation function for validating if the field's value is a resolvable unix address. -// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. -func IsUnixAddrResolvable(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - _, err := net.ResolveUnixAddr("unix", field.String()) - return err == nil -} - -func isIP4Addr(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - val := field.String() - - if idx := strings.LastIndex(val, ":"); idx != -1 { - val = val[0:idx] - } - - if !IsIPv4(v, topStruct, currentStructOrField, reflect.ValueOf(val), fieldType, fieldKind, param) { - return false - } - - return true -} - -func isIP6Addr(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - val := field.String() - - if idx := strings.LastIndex(val, ":"); idx != -1 { - if idx != 0 && val[idx-1:idx] == "]" { - val = val[1 : idx-1] - } - } - - if !IsIPv6(v, topStruct, currentStructOrField, reflect.ValueOf(val), fieldType, fieldKind, param) { - return false - } - - return true -} diff --git a/vendor/gopkg.in/go-playground/validator.v8/cache.go b/vendor/gopkg.in/go-playground/validator.v8/cache.go deleted file mode 100644 index 289226e..0000000 --- a/vendor/gopkg.in/go-playground/validator.v8/cache.go +++ /dev/null @@ -1,71 +0,0 @@ -package validator - -import ( - "reflect" - "sync" -) - -type cachedField struct { - Idx int - Name string - AltName string - CachedTag *cachedTag -} - -type cachedStruct struct { - Name string - fields map[int]cachedField -} - -type structCacheMap struct { - lock sync.RWMutex - m map[reflect.Type]*cachedStruct -} - -func (s *structCacheMap) Get(key reflect.Type) (*cachedStruct, bool) { - s.lock.RLock() - value, ok := s.m[key] - s.lock.RUnlock() - return value, ok -} - -func (s *structCacheMap) Set(key reflect.Type, value *cachedStruct) { - s.lock.Lock() - s.m[key] = value - s.lock.Unlock() -} - -type cachedTag struct { - tag string - isOmitEmpty bool - isNoStructLevel bool - isStructOnly bool - diveTag string - tags []*tagVals -} - -type tagVals struct { - tagVals [][]string - isOrVal bool - isAlias bool - tag string -} - -type tagCacheMap struct { - lock sync.RWMutex - m map[string]*cachedTag -} - -func (s *tagCacheMap) Get(key string) (*cachedTag, bool) { - s.lock.RLock() - value, ok := s.m[key] - s.lock.RUnlock() - - return value, ok -} - -func (s *tagCacheMap) Set(key string, value *cachedTag) { - s.lock.Lock() - s.m[key] = value - s.lock.Unlock() -} diff --git a/vendor/gopkg.in/go-playground/validator.v8/doc.go b/vendor/gopkg.in/go-playground/validator.v8/doc.go deleted file mode 100644 index c351a61..0000000 --- a/vendor/gopkg.in/go-playground/validator.v8/doc.go +++ /dev/null @@ -1,852 +0,0 @@ -/* -Package validator implements value validations for structs and individual fields -based on tags. - -It can also handle Cross-Field and Cross-Struct validation for nested structs -and has the ability to dive into arrays and maps of any type. - -Why not a better error message? -Because this library intends for you to handle your own error messages. - -Why should I handle my own errors? -Many reasons. We built an internationalized application and needed to know the -field, and what validation failed so we could provide a localized error. - - if fieldErr.Field == "Name" { - switch fieldErr.ErrorTag - case "required": - return "Translated string based on field + error" - default: - return "Translated string based on field" - } - - -Validation Functions Return Type error - -Doing things this way is actually the way the standard library does, see the -file.Open method here: - - https://golang.org/pkg/os/#Open. - -The authors return type "error" to avoid the issue discussed in the following, -where err is always != nil: - - http://stackoverflow.com/a/29138676/3158232 - https://github.com/go-playground/validator/issues/134 - -Validator only returns nil or ValidationErrors as type error; so, in your code -all you need to do is check if the error returned is not nil, and if it's not -type cast it to type ValidationErrors like so err.(validator.ValidationErrors). - -Custom Functions - -Custom functions can be added. Example: - - // Structure - func customFunc(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { - - if whatever { - return false - } - - return true - } - - validate.RegisterValidation("custom tag name", customFunc) - // NOTES: using the same tag name as an existing function - // will overwrite the existing one - -Cross-Field Validation - -Cross-Field Validation can be done via the following tags: - - eqfield - - nefield - - gtfield - - gtefield - - ltfield - - ltefield - - eqcsfield - - necsfield - - gtcsfield - - ftecsfield - - ltcsfield - - ltecsfield - -If, however, some custom cross-field validation is required, it can be done -using a custom validation. - -Why not just have cross-fields validation tags (i.e. only eqcsfield and not -eqfield)? - -The reason is efficiency. If you want to check a field within the same struct -"eqfield" only has to find the field on the same struct (1 level). But, if we -used "eqcsfield" it could be multiple levels down. Example: - - type Inner struct { - StartDate time.Time - } - - type Outer struct { - InnerStructField *Inner - CreatedAt time.Time `validate:"ltecsfield=InnerStructField.StartDate"` - } - - now := time.Now() - - inner := &Inner{ - StartDate: now, - } - - outer := &Outer{ - InnerStructField: inner, - CreatedAt: now, - } - - errs := validate.Struct(outer) - - // NOTE: when calling validate.Struct(val) topStruct will be the top level struct passed - // into the function - // when calling validate.FieldWithValue(val, field, tag) val will be - // whatever you pass, struct, field... - // when calling validate.Field(field, tag) val will be nil - -Multiple Validators - -Multiple validators on a field will process in the order defined. Example: - - type Test struct { - Field `validate:"max=10,min=1"` - } - - // max will be checked then min - -Bad Validator definitions are not handled by the library. Example: - - type Test struct { - Field `validate:"min=10,max=0"` - } - - // this definition of min max will never succeed - -Using Validator Tags - -Baked In Cross-Field validation only compares fields on the same struct. -If Cross-Field + Cross-Struct validation is needed you should implement your -own custom validator. - -Comma (",") is the default separator of validation tags. If you wish to -have a comma included within the parameter (i.e. excludesall=,) you will need to -use the UTF-8 hex representation 0x2C, which is replaced in the code as a comma, -so the above will become excludesall=0x2C. - - type Test struct { - Field `validate:"excludesall=,"` // BAD! Do not include a comma. - Field `validate:"excludesall=0x2C"` // GOOD! Use the UTF-8 hex representation. - } - -Pipe ("|") is the default separator of validation tags. If you wish to -have a pipe included within the parameter i.e. excludesall=| you will need to -use the UTF-8 hex representation 0x7C, which is replaced in the code as a pipe, -so the above will become excludesall=0x7C - - type Test struct { - Field `validate:"excludesall=|"` // BAD! Do not include a a pipe! - Field `validate:"excludesall=0x7C"` // GOOD! Use the UTF-8 hex representation. - } - - -Baked In Validators and Tags - -Here is a list of the current built in validators: - - -Skip Field - -Tells the validation to skip this struct field; this is particularily -handy in ignoring embedded structs from being validated. (Usage: -) - Usage: - - - -Or Operator - -This is the 'or' operator allowing multiple validators to be used and -accepted. (Usage: rbg|rgba) <-- this would allow either rgb or rgba -colors to be accepted. This can also be combined with 'and' for example -( Usage: omitempty,rgb|rgba) - - Usage: | - -StructOnly - -When a field that is a nested struct is encountered, and contains this flag -any validation on the nested struct will be run, but none of the nested -struct fields will be validated. This is usefull if inside of you program -you know the struct will be valid, but need to verify it has been assigned. -NOTE: only "required" and "omitempty" can be used on a struct itself. - - Usage: structonly - -NoStructLevel - -Same as structonly tag except that any struct level validations will not run. - - Usage: nostructlevel - -Exists - -Is a special tag without a validation function attached. It is used when a field -is a Pointer, Interface or Invalid and you wish to validate that it exists. -Example: want to ensure a bool exists if you define the bool as a pointer and -use exists it will ensure there is a value; couldn't use required as it would -fail when the bool was false. exists will fail is the value is a Pointer, Interface -or Invalid and is nil. - - Usage: exists - -Omit Empty - -Allows conditional validation, for example if a field is not set with -a value (Determined by the "required" validator) then other validation -such as min or max won't run, but if a value is set validation will run. - - Usage: omitempty - -Dive - -This tells the validator to dive into a slice, array or map and validate that -level of the slice, array or map with the validation tags that follow. -Multidimensional nesting is also supported, each level you wish to dive will -require another dive tag. - - Usage: dive - -Example #1 - - [][]string with validation tag "gt=0,dive,len=1,dive,required" - // gt=0 will be applied to [] - // len=1 will be applied to []string - // required will be applied to string - -Example #2 - - [][]string with validation tag "gt=0,dive,dive,required" - // gt=0 will be applied to [] - // []string will be spared validation - // required will be applied to string - -Required - -This validates that the value is not the data types default zero value. -For numbers ensures value is not zero. For strings ensures value is -not "". For slices, maps, pointers, interfaces, channels and functions -ensures the value is not nil. - - Usage: required - -Length - -For numbers, max will ensure that the value is -equal to the parameter given. For strings, it checks that -the string length is exactly that number of characters. For slices, -arrays, and maps, validates the number of items. - - Usage: len=10 - -Maximum - -For numbers, max will ensure that the value is -less than or equal to the parameter given. For strings, it checks -that the string length is at most that number of characters. For -slices, arrays, and maps, validates the number of items. - - Usage: max=10 - -Mininum - -For numbers, min will ensure that the value is -greater or equal to the parameter given. For strings, it checks that -the string length is at least that number of characters. For slices, -arrays, and maps, validates the number of items. - - Usage: min=10 - -Equals - -For strings & numbers, eq will ensure that the value is -equal to the parameter given. For slices, arrays, and maps, -validates the number of items. - - Usage: eq=10 - -Not Equal - -For strings & numbers, eq will ensure that the value is not -equal to the parameter given. For slices, arrays, and maps, -validates the number of items. - - Usage: eq=10 - -Greater Than - -For numbers, this will ensure that the value is greater than the -parameter given. For strings, it checks that the string length -is greater than that number of characters. For slices, arrays -and maps it validates the number of items. - -Example #1 - - Usage: gt=10 - -Example #2 (time.Time) - -For time.Time ensures the time value is greater than time.Now.UTC(). - - Usage: gt - -Greater Than or Equal - -Same as 'min' above. Kept both to make terminology with 'len' easier. - - -Example #1 - - Usage: gte=10 - -Example #2 (time.Time) - -For time.Time ensures the time value is greater than or equal to time.Now.UTC(). - - Usage: gte - -Less Than - -For numbers, this will ensure that the value is less than the parameter given. -For strings, it checks that the string length is less than that number of -characters. For slices, arrays, and maps it validates the number of items. - -Example #1 - - Usage: lt=10 - -Example #2 (time.Time) -For time.Time ensures the time value is less than time.Now.UTC(). - - Usage: lt - -Less Than or Equal - -Same as 'max' above. Kept both to make terminology with 'len' easier. - -Example #1 - - Usage: lte=10 - -Example #2 (time.Time) - -For time.Time ensures the time value is less than or equal to time.Now.UTC(). - - Usage: lte - -Field Equals Another Field - -This will validate the field value against another fields value either within -a struct or passed in field. - -Example #1: - - // Validation on Password field using: - Usage: eqfield=ConfirmPassword - -Example #2: - - // Validating by field: - validate.FieldWithValue(password, confirmpassword, "eqfield") - -Field Equals Another Field (relative) - -This does the same as eqfield except that it validates the field provided relative -to the top level struct. - - Usage: eqcsfield=InnerStructField.Field) - -Field Does Not Equal Another Field - -This will validate the field value against another fields value either within -a struct or passed in field. - -Examples: - - // Confirm two colors are not the same: - // - // Validation on Color field: - Usage: nefield=Color2 - - // Validating by field: - validate.FieldWithValue(color1, color2, "nefield") - -Field Does Not Equal Another Field (relative) - -This does the same as nefield except that it validates the field provided -relative to the top level struct. - - Usage: necsfield=InnerStructField.Field - -Field Greater Than Another Field - -Only valid for Numbers and time.Time types, this will validate the field value -against another fields value either within a struct or passed in field. -usage examples are for validation of a Start and End date: - -Example #1: - - // Validation on End field using: - validate.Struct Usage(gtfield=Start) - -Example #2: - - // Validating by field: - validate.FieldWithValue(start, end, "gtfield") - - -Field Greater Than Another Relative Field - -This does the same as gtfield except that it validates the field provided -relative to the top level struct. - - Usage: gtcsfield=InnerStructField.Field - -Field Greater Than or Equal To Another Field - -Only valid for Numbers and time.Time types, this will validate the field value -against another fields value either within a struct or passed in field. -usage examples are for validation of a Start and End date: - -Example #1: - - // Validation on End field using: - validate.Struct Usage(gtefield=Start) - -Example #2: - - // Validating by field: - validate.FieldWithValue(start, end, "gtefield") - -Field Greater Than or Equal To Another Relative Field - -This does the same as gtefield except that it validates the field provided relative -to the top level struct. - - Usage: gtecsfield=InnerStructField.Field - -Less Than Another Field - -Only valid for Numbers and time.Time types, this will validate the field value -against another fields value either within a struct or passed in field. -usage examples are for validation of a Start and End date: - -Example #1: - - // Validation on End field using: - validate.Struct Usage(ltfield=Start) - -Example #2: - - // Validating by field: - validate.FieldWithValue(start, end, "ltfield") - -Less Than Another Relative Field - -This does the same as ltfield except that it validates the field provided relative -to the top level struct. - - Usage: ltcsfield=InnerStructField.Field - -Less Than or Equal To Another Field - -Only valid for Numbers and time.Time types, this will validate the field value -against another fields value either within a struct or passed in field. -usage examples are for validation of a Start and End date: - -Example #1: - - // Validation on End field using: - validate.Struct Usage(ltefield=Start) - -Example #2: - - // Validating by field: - validate.FieldWithValue(start, end, "ltefield") - -Less Than or Equal To Another Relative Field - -This does the same as ltefield except that it validates the field provided relative -to the top level struct. - - Usage: ltecsfield=InnerStructField.Field - -Alpha Only - -This validates that a string value contains alpha characters only - - Usage: alpha - -Alphanumeric - -This validates that a string value contains alphanumeric characters only - - Usage: alphanum - -Numeric - -This validates that a string value contains a basic numeric value. -basic excludes exponents etc... - - Usage: numeric - -Hexadecimal String - -This validates that a string value contains a valid hexadecimal. - - Usage: hexadecimal - -Hexcolor String - -This validates that a string value contains a valid hex color including -hashtag (#) - - Usage: hexcolor - -RGB String - -This validates that a string value contains a valid rgb color - - Usage: rgb - -RGBA String - -This validates that a string value contains a valid rgba color - - Usage: rgba - -HSL String - -This validates that a string value contains a valid hsl color - - Usage: hsl - -HSLA String - -This validates that a string value contains a valid hsla color - - Usage: hsla - -E-mail String - -This validates that a string value contains a valid email -This may not conform to all possibilities of any rfc standard, but neither -does any email provider accept all posibilities. - - Usage: email - -URL String - -This validates that a string value contains a valid url -This will accept any url the golang request uri accepts but must contain -a schema for example http:// or rtmp:// - - Usage: url - -URI String - -This validates that a string value contains a valid uri -This will accept any uri the golang request uri accepts - - Usage: uri - -Base64 String - -This validates that a string value contains a valid base64 value. -Although an empty string is valid base64 this will report an empty string -as an error, if you wish to accept an empty string as valid you can use -this with the omitempty tag. - - Usage: base64 - -Contains - -This validates that a string value contains the substring value. - - Usage: contains=@ - -Contains Any - -This validates that a string value contains any Unicode code points -in the substring value. - - Usage: containsany=!@#? - -Contains Rune - -This validates that a string value contains the supplied rune value. - - Usage: containsrune=@ - -Excludes - -This validates that a string value does not contain the substring value. - - Usage: excludes=@ - -Excludes All - -This validates that a string value does not contain any Unicode code -points in the substring value. - - Usage: excludesall=!@#? - -Excludes Rune - -This validates that a string value does not contain the supplied rune value. - - Usage: excludesrune=@ - -International Standard Book Number - -This validates that a string value contains a valid isbn10 or isbn13 value. - - Usage: isbn - -International Standard Book Number 10 - -This validates that a string value contains a valid isbn10 value. - - Usage: isbn10 - -International Standard Book Number 13 - -This validates that a string value contains a valid isbn13 value. - - Usage: isbn13 - - -Universally Unique Identifier UUID - -This validates that a string value contains a valid UUID. - - Usage: uuid - -Universally Unique Identifier UUID v3 - -This validates that a string value contains a valid version 3 UUID. - - Usage: uuid3 - -Universally Unique Identifier UUID v4 - -This validates that a string value contains a valid version 4 UUID. - - Usage: uuid4 - -Universally Unique Identifier UUID v5 - -This validates that a string value contains a valid version 5 UUID. - - Usage: uuid5 - -ASCII - -This validates that a string value contains only ASCII characters. -NOTE: if the string is blank, this validates as true. - - Usage: ascii - -Printable ASCII - -This validates that a string value contains only printable ASCII characters. -NOTE: if the string is blank, this validates as true. - - Usage: asciiprint - -Multi-Byte Characters - -This validates that a string value contains one or more multibyte characters. -NOTE: if the string is blank, this validates as true. - - Usage: multibyte - -Data URL - -This validates that a string value contains a valid DataURI. -NOTE: this will also validate that the data portion is valid base64 - - Usage: datauri - -Latitude - -This validates that a string value contains a valid latitude. - - Usage: latitude - -Longitude - -This validates that a string value contains a valid longitude. - - Usage: longitude - -Social Security Number SSN - -This validates that a string value contains a valid U.S. Social Security Number. - - Usage: ssn - -Internet Protocol Address IP - -This validates that a string value contains a valid IP Adress. - - Usage: ip - -Internet Protocol Address IPv4 - -This validates that a string value contains a valid v4 IP Adress. - - Usage: ipv4 - -Internet Protocol Address IPv6 - -This validates that a string value contains a valid v6 IP Adress. - - Usage: ipv6 - -Classless Inter-Domain Routing CIDR - -This validates that a string value contains a valid CIDR Adress. - - Usage: cidr - -Classless Inter-Domain Routing CIDRv4 - -This validates that a string value contains a valid v4 CIDR Adress. - - Usage: cidrv4 - -Classless Inter-Domain Routing CIDRv6 - -This validates that a string value contains a valid v6 CIDR Adress. - - Usage: cidrv6 - -Transmission Control Protocol Address TCP - -This validates that a string value contains a valid resolvable TCP Adress. - - Usage: tcp_addr - -Transmission Control Protocol Address TCPv4 - -This validates that a string value contains a valid resolvable v4 TCP Adress. - - Usage: tcp4_addr - -Transmission Control Protocol Address TCPv6 - -This validates that a string value contains a valid resolvable v6 TCP Adress. - - Usage: tcp6_addr - -User Datagram Protocol Address UDP - -This validates that a string value contains a valid resolvable UDP Adress. - - Usage: udp_addr - -User Datagram Protocol Address UDPv4 - -This validates that a string value contains a valid resolvable v4 UDP Adress. - - Usage: udp4_addr - -User Datagram Protocol Address UDPv6 - -This validates that a string value contains a valid resolvable v6 UDP Adress. - - Usage: udp6_addr - -Internet Protocol Address IP - -This validates that a string value contains a valid resolvable IP Adress. - - Usage: ip_addr - -Internet Protocol Address IPv4 - -This validates that a string value contains a valid resolvable v4 IP Adress. - - Usage: ip4_addr - -Internet Protocol Address IPv6 - -This validates that a string value contains a valid resolvable v6 IP Adress. - - Usage: ip6_addr - -Unix domain socket end point Address - -This validates that a string value contains a valid Unix Adress. - - Usage: unix_addr - -Media Access Control Address MAC - -This validates that a string value contains a valid MAC Adress. - - Usage: mac - -Note: See Go's ParseMAC for accepted formats and types: - - http://golang.org/src/net/mac.go?s=866:918#L29 - -Alias Validators and Tags - -NOTE: When returning an error, the tag returned in "FieldError" will be -the alias tag unless the dive tag is part of the alias. Everything after the -dive tag is not reported as the alias tag. Also, the "ActualTag" in the before -case will be the actual tag within the alias that failed. - -Here is a list of the current built in alias tags: - - "iscolor" - alias is "hexcolor|rgb|rgba|hsl|hsla" (Usage: iscolor) - -Validator notes: - - regex - a regex validator won't be added because commas and = signs can be part - of a regex which conflict with the validation definitions. Although - workarounds can be made, they take away from using pure regex's. - Furthermore it's quick and dirty but the regex's become harder to - maintain and are not reusable, so it's as much a programming philosiphy - as anything. - - In place of this new validator functions should be created; a regex can - be used within the validator function and even be precompiled for better - efficiency within regexes.go. - - And the best reason, you can submit a pull request and we can keep on - adding to the validation library of this package! - -Panics - -This package panics when bad input is provided, this is by design, bad code like -that should not make it to production. - - type Test struct { - TestField string `validate:"nonexistantfunction=1"` - } - - t := &Test{ - TestField: "Test" - } - - validate.Struct(t) // this will panic -*/ -package validator diff --git a/vendor/gopkg.in/go-playground/validator.v8/logo.png b/vendor/gopkg.in/go-playground/validator.v8/logo.png deleted file mode 100644 index 1a03886..0000000 Binary files a/vendor/gopkg.in/go-playground/validator.v8/logo.png and /dev/null differ diff --git a/vendor/gopkg.in/go-playground/validator.v8/regexes.go b/vendor/gopkg.in/go-playground/validator.v8/regexes.go deleted file mode 100644 index 83ae198..0000000 --- a/vendor/gopkg.in/go-playground/validator.v8/regexes.go +++ /dev/null @@ -1,59 +0,0 @@ -package validator - -import "regexp" - -const ( - alphaRegexString = "^[a-zA-Z]+$" - alphaNumericRegexString = "^[a-zA-Z0-9]+$" - numericRegexString = "^[-+]?[0-9]+(?:\\.[0-9]+)?$" - numberRegexString = "^[0-9]+$" - hexadecimalRegexString = "^[0-9a-fA-F]+$" - hexcolorRegexString = "^#(?:[0-9a-fA-F]{3}|[0-9a-fA-F]{6})$" - rgbRegexString = "^rgb\\(\\s*(?:(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])|(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%)\\s*\\)$" - rgbaRegexString = "^rgba\\(\\s*(?:(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])|(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%)\\s*,\\s*(?:(?:0.[1-9]*)|[01])\\s*\\)$" - hslRegexString = "^hsl\\(\\s*(?:0|[1-9]\\d?|[12]\\d\\d|3[0-5]\\d|360)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*\\)$" - hslaRegexString = "^hsla\\(\\s*(?:0|[1-9]\\d?|[12]\\d\\d|3[0-5]\\d|360)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*,\\s*(?:(?:0.[1-9]*)|[01])\\s*\\)$" - emailRegexString = "^(?:(?:(?:(?:[a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+(?:\\.([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+)*)|(?:(?:\\x22)(?:(?:(?:(?:\\x20|\\x09)*(?:\\x0d\\x0a))?(?:\\x20|\\x09)+)?(?:(?:[\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x7f]|\\x21|[\\x23-\\x5b]|[\\x5d-\\x7e]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(?:\\(?:[\\x01-\\x09\\x0b\\x0c\\x0d-\\x7f]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}]))))*(?:(?:(?:\\x20|\\x09)*(?:\\x0d\\x0a))?(\\x20|\\x09)+)?(?:\\x22)))@(?:(?:(?:[a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(?:(?:[a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])(?:[a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*(?:[a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.)+(?:(?:[a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(?:(?:[a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])(?:[a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*(?:[a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.?$" - base64RegexString = "^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=|[A-Za-z0-9+\\/]{4})$" - iSBN10RegexString = "^(?:[0-9]{9}X|[0-9]{10})$" - iSBN13RegexString = "^(?:(?:97(?:8|9))[0-9]{10})$" - uUID3RegexString = "^[0-9a-f]{8}-[0-9a-f]{4}-3[0-9a-f]{3}-[0-9a-f]{4}-[0-9a-f]{12}$" - uUID4RegexString = "^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$" - uUID5RegexString = "^[0-9a-f]{8}-[0-9a-f]{4}-5[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$" - uUIDRegexString = "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$" - aSCIIRegexString = "^[\x00-\x7F]*$" - printableASCIIRegexString = "^[\x20-\x7E]*$" - multibyteRegexString = "[^\x00-\x7F]" - dataURIRegexString = "^data:.+\\/(.+);base64$" - latitudeRegexString = "^[-+]?([1-8]?\\d(\\.\\d+)?|90(\\.0+)?)$" - longitudeRegexString = "^[-+]?(180(\\.0+)?|((1[0-7]\\d)|([1-9]?\\d))(\\.\\d+)?)$" - sSNRegexString = `^\d{3}[- ]?\d{2}[- ]?\d{4}$` -) - -var ( - alphaRegex = regexp.MustCompile(alphaRegexString) - alphaNumericRegex = regexp.MustCompile(alphaNumericRegexString) - numericRegex = regexp.MustCompile(numericRegexString) - numberRegex = regexp.MustCompile(numberRegexString) - hexadecimalRegex = regexp.MustCompile(hexadecimalRegexString) - hexcolorRegex = regexp.MustCompile(hexcolorRegexString) - rgbRegex = regexp.MustCompile(rgbRegexString) - rgbaRegex = regexp.MustCompile(rgbaRegexString) - hslRegex = regexp.MustCompile(hslRegexString) - hslaRegex = regexp.MustCompile(hslaRegexString) - emailRegex = regexp.MustCompile(emailRegexString) - base64Regex = regexp.MustCompile(base64RegexString) - iSBN10Regex = regexp.MustCompile(iSBN10RegexString) - iSBN13Regex = regexp.MustCompile(iSBN13RegexString) - uUID3Regex = regexp.MustCompile(uUID3RegexString) - uUID4Regex = regexp.MustCompile(uUID4RegexString) - uUID5Regex = regexp.MustCompile(uUID5RegexString) - uUIDRegex = regexp.MustCompile(uUIDRegexString) - aSCIIRegex = regexp.MustCompile(aSCIIRegexString) - printableASCIIRegex = regexp.MustCompile(printableASCIIRegexString) - multibyteRegex = regexp.MustCompile(multibyteRegexString) - dataURIRegex = regexp.MustCompile(dataURIRegexString) - latitudeRegex = regexp.MustCompile(latitudeRegexString) - longitudeRegex = regexp.MustCompile(longitudeRegexString) - sSNRegex = regexp.MustCompile(sSNRegexString) -) diff --git a/vendor/gopkg.in/go-playground/validator.v8/util.go b/vendor/gopkg.in/go-playground/validator.v8/util.go deleted file mode 100644 index ce01c7c..0000000 --- a/vendor/gopkg.in/go-playground/validator.v8/util.go +++ /dev/null @@ -1,382 +0,0 @@ -package validator - -import ( - "fmt" - "reflect" - "strconv" - "strings" -) - -const ( - dash = "-" - blank = "" - namespaceSeparator = "." - leftBracket = "[" - rightBracket = "]" - restrictedTagChars = ".[],|=+()`~!@#$%^&*\\\"/?<>{}" - restrictedAliasErr = "Alias '%s' either contains restricted characters or is the same as a restricted tag needed for normal operation" - restrictedTagErr = "Tag '%s' either contains restricted characters or is the same as a restricted tag needed for normal operation" -) - -var ( - restrictedTags = map[string]*struct{}{ - diveTag: emptyStructPtr, - existsTag: emptyStructPtr, - structOnlyTag: emptyStructPtr, - omitempty: emptyStructPtr, - skipValidationTag: emptyStructPtr, - utf8HexComma: emptyStructPtr, - utf8Pipe: emptyStructPtr, - noStructLevelTag: emptyStructPtr, - } -) - -// ExtractType gets the actual underlying type of field value. -// It will dive into pointers, customTypes and return you the -// underlying value and it's kind. -// it is exposed for use within you Custom Functions -func (v *Validate) ExtractType(current reflect.Value) (reflect.Value, reflect.Kind) { - - switch current.Kind() { - case reflect.Ptr: - - if current.IsNil() { - return current, reflect.Ptr - } - - return v.ExtractType(current.Elem()) - - case reflect.Interface: - - if current.IsNil() { - return current, reflect.Interface - } - - return v.ExtractType(current.Elem()) - - case reflect.Invalid: - return current, reflect.Invalid - - default: - - if v.hasCustomFuncs { - // fmt.Println("Type", current.Type()) - if fn, ok := v.customTypeFuncs[current.Type()]; ok { - - // fmt.Println("OK") - - return v.ExtractType(reflect.ValueOf(fn(current))) - } - - // fmt.Println("NOT OK") - } - - return current, current.Kind() - } -} - -// GetStructFieldOK traverses a struct to retrieve a specific field denoted by the provided namespace and -// returns the field, field kind and whether is was successful in retrieving the field at all. -// NOTE: when not successful ok will be false, this can happen when a nested struct is nil and so the field -// could not be retrived because it didnt exist. -func (v *Validate) GetStructFieldOK(current reflect.Value, namespace string) (reflect.Value, reflect.Kind, bool) { - - current, kind := v.ExtractType(current) - - if kind == reflect.Invalid { - return current, kind, false - } - - if namespace == blank { - return current, kind, true - } - - switch kind { - - case reflect.Ptr, reflect.Interface: - - return current, kind, false - - case reflect.Struct: - - typ := current.Type() - fld := namespace - ns := namespace - - if typ != timeType && typ != timePtrType { - - idx := strings.Index(namespace, namespaceSeparator) - - if idx != -1 { - fld = namespace[:idx] - ns = namespace[idx+1:] - } else { - ns = blank - idx = len(namespace) - } - - bracketIdx := strings.Index(fld, leftBracket) - if bracketIdx != -1 { - fld = fld[:bracketIdx] - - ns = namespace[bracketIdx:] - } - - current = current.FieldByName(fld) - - return v.GetStructFieldOK(current, ns) - } - - case reflect.Array, reflect.Slice: - idx := strings.Index(namespace, leftBracket) - idx2 := strings.Index(namespace, rightBracket) - - arrIdx, _ := strconv.Atoi(namespace[idx+1 : idx2]) - - if arrIdx >= current.Len() { - return current, kind, false - } - - startIdx := idx2 + 1 - - if startIdx < len(namespace) { - if namespace[startIdx:startIdx+1] == namespaceSeparator { - startIdx++ - } - } - - return v.GetStructFieldOK(current.Index(arrIdx), namespace[startIdx:]) - - case reflect.Map: - idx := strings.Index(namespace, leftBracket) + 1 - idx2 := strings.Index(namespace, rightBracket) - - endIdx := idx2 - - if endIdx+1 < len(namespace) { - if namespace[endIdx+1:endIdx+2] == namespaceSeparator { - endIdx++ - } - } - - key := namespace[idx:idx2] - - switch current.Type().Key().Kind() { - case reflect.Int: - i, _ := strconv.Atoi(key) - return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(i)), namespace[endIdx+1:]) - case reflect.Int8: - i, _ := strconv.ParseInt(key, 10, 8) - return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(int8(i))), namespace[endIdx+1:]) - case reflect.Int16: - i, _ := strconv.ParseInt(key, 10, 16) - return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(int16(i))), namespace[endIdx+1:]) - case reflect.Int32: - i, _ := strconv.ParseInt(key, 10, 32) - return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(int32(i))), namespace[endIdx+1:]) - case reflect.Int64: - i, _ := strconv.ParseInt(key, 10, 64) - return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(i)), namespace[endIdx+1:]) - case reflect.Uint: - i, _ := strconv.ParseUint(key, 10, 0) - return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(uint(i))), namespace[endIdx+1:]) - case reflect.Uint8: - i, _ := strconv.ParseUint(key, 10, 8) - return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(uint8(i))), namespace[endIdx+1:]) - case reflect.Uint16: - i, _ := strconv.ParseUint(key, 10, 16) - return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(uint16(i))), namespace[endIdx+1:]) - case reflect.Uint32: - i, _ := strconv.ParseUint(key, 10, 32) - return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(uint32(i))), namespace[endIdx+1:]) - case reflect.Uint64: - i, _ := strconv.ParseUint(key, 10, 64) - return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(i)), namespace[endIdx+1:]) - case reflect.Float32: - f, _ := strconv.ParseFloat(key, 32) - return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(float32(f))), namespace[endIdx+1:]) - case reflect.Float64: - f, _ := strconv.ParseFloat(key, 64) - return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(f)), namespace[endIdx+1:]) - case reflect.Bool: - b, _ := strconv.ParseBool(key) - return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(b)), namespace[endIdx+1:]) - - // reflect.Type = string - default: - return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(key)), namespace[endIdx+1:]) - } - } - - // if got here there was more namespace, cannot go any deeper - panic("Invalid field namespace") -} - -// asInt retuns the parameter as a int64 -// or panics if it can't convert -func asInt(param string) int64 { - - i, err := strconv.ParseInt(param, 0, 64) - panicIf(err) - - return i -} - -// asUint returns the parameter as a uint64 -// or panics if it can't convert -func asUint(param string) uint64 { - - i, err := strconv.ParseUint(param, 0, 64) - panicIf(err) - - return i -} - -// asFloat returns the parameter as a float64 -// or panics if it can't convert -func asFloat(param string) float64 { - - i, err := strconv.ParseFloat(param, 64) - panicIf(err) - - return i -} - -func panicIf(err error) { - if err != nil { - panic(err.Error()) - } -} - -func (v *Validate) parseStruct(current reflect.Value, sName string) *cachedStruct { - - typ := current.Type() - s := &cachedStruct{Name: sName, fields: map[int]cachedField{}} - - numFields := current.NumField() - - var fld reflect.StructField - var tag string - var customName string - - for i := 0; i < numFields; i++ { - - fld = typ.Field(i) - - if fld.PkgPath != blank { - continue - } - - tag = fld.Tag.Get(v.tagName) - - if tag == skipValidationTag { - continue - } - - customName = fld.Name - if v.fieldNameTag != blank { - - name := strings.SplitN(fld.Tag.Get(v.fieldNameTag), ",", 2)[0] - - // dash check is for json "-" (aka skipValidationTag) means don't output in json - if name != "" && name != skipValidationTag { - customName = name - } - } - - cTag, ok := v.tagCache.Get(tag) - if !ok { - cTag = v.parseTags(tag, fld.Name) - } - - s.fields[i] = cachedField{Idx: i, Name: fld.Name, AltName: customName, CachedTag: cTag} - } - - v.structCache.Set(typ, s) - - return s -} - -func (v *Validate) parseTags(tag, fieldName string) *cachedTag { - - cTag := &cachedTag{tag: tag} - - v.parseTagsRecursive(cTag, tag, fieldName, blank, false) - - v.tagCache.Set(tag, cTag) - - return cTag -} - -func (v *Validate) parseTagsRecursive(cTag *cachedTag, tag, fieldName, alias string, isAlias bool) bool { - - if tag == blank { - return true - } - - for _, t := range strings.Split(tag, tagSeparator) { - - if v.hasAliasValidators { - // check map for alias and process new tags, otherwise process as usual - if tagsVal, ok := v.aliasValidators[t]; ok { - - leave := v.parseTagsRecursive(cTag, tagsVal, fieldName, t, true) - - if leave { - return leave - } - - continue - } - } - - switch t { - - case diveTag: - cTag.diveTag = tag - tVals := &tagVals{tagVals: [][]string{{t}}} - cTag.tags = append(cTag.tags, tVals) - return true - - case omitempty: - cTag.isOmitEmpty = true - - case structOnlyTag: - cTag.isStructOnly = true - - case noStructLevelTag: - cTag.isNoStructLevel = true - } - - // if a pipe character is needed within the param you must use the utf8Pipe representation "0x7C" - orVals := strings.Split(t, orSeparator) - tagVal := &tagVals{isAlias: isAlias, isOrVal: len(orVals) > 1, tagVals: make([][]string, len(orVals))} - cTag.tags = append(cTag.tags, tagVal) - - var key string - var param string - - for i, val := range orVals { - vals := strings.SplitN(val, tagKeySeparator, 2) - key = vals[0] - - tagVal.tag = key - - if isAlias { - tagVal.tag = alias - } - - if key == blank { - panic(strings.TrimSpace(fmt.Sprintf(invalidValidation, fieldName))) - } - - if len(vals) > 1 { - param = strings.Replace(strings.Replace(vals[1], utf8HexComma, ",", -1), utf8Pipe, "|", -1) - } - - tagVal.tagVals[i] = []string{key, param} - } - } - - return false -} diff --git a/vendor/gopkg.in/go-playground/validator.v8/validator.go b/vendor/gopkg.in/go-playground/validator.v8/validator.go deleted file mode 100644 index 0e6db7e..0000000 --- a/vendor/gopkg.in/go-playground/validator.v8/validator.go +++ /dev/null @@ -1,797 +0,0 @@ -/** - * Package validator - * - * MISC: - * - anonymous structs - they don't have names so expect the Struct name within StructErrors to be blank - * - */ - -package validator - -import ( - "bytes" - "errors" - "fmt" - "reflect" - "strings" - "sync" - "time" -) - -const ( - utf8HexComma = "0x2C" - utf8Pipe = "0x7C" - tagSeparator = "," - orSeparator = "|" - tagKeySeparator = "=" - structOnlyTag = "structonly" - noStructLevelTag = "nostructlevel" - omitempty = "omitempty" - skipValidationTag = "-" - diveTag = "dive" - existsTag = "exists" - fieldErrMsg = "Key: '%s' Error:Field validation for '%s' failed on the '%s' tag" - arrayIndexFieldName = "%s" + leftBracket + "%d" + rightBracket - mapIndexFieldName = "%s" + leftBracket + "%v" + rightBracket - invalidValidation = "Invalid validation tag on field %s" - undefinedValidation = "Undefined validation function on field %s" - validatorNotInitialized = "Validator instance not initialized" - fieldNameRequired = "Field Name Required" - tagRequired = "Tag Required" -) - -var ( - timeType = reflect.TypeOf(time.Time{}) - timePtrType = reflect.TypeOf(&time.Time{}) - emptyStructPtr = new(struct{}) -) - -// StructLevel contains all of the information and helper methods -// for reporting errors during struct level validation -type StructLevel struct { - TopStruct reflect.Value - CurrentStruct reflect.Value - errPrefix string - nsPrefix string - errs ValidationErrors - v *Validate -} - -// ReportValidationErrors accepts the key relative to the top level struct and validatin errors. -// Example: had a triple nested struct User, ContactInfo, Country and ran errs := validate.Struct(country) -// from within a User struct level validation would call this method like so: -// ReportValidationErrors("ContactInfo.", errs) -// NOTE: relativeKey can contain both the Field Relative and Custom name relative paths -// i.e. ReportValidationErrors("ContactInfo.|cInfo", errs) where cInfo represents say the JSON name of -// the relative path; this will be split into 2 variables in the next valiator version. -func (sl *StructLevel) ReportValidationErrors(relativeKey string, errs ValidationErrors) { - for _, e := range errs { - - idx := strings.Index(relativeKey, "|") - var rel string - var cRel string - - if idx != -1 { - rel = relativeKey[:idx] - cRel = relativeKey[idx+1:] - } else { - rel = relativeKey - } - - key := sl.errPrefix + rel + e.Field - - e.FieldNamespace = key - e.NameNamespace = sl.nsPrefix + cRel + e.Name - - sl.errs[key] = e - } -} - -// ReportError reports an error just by passing the field and tag information -// NOTE: tag can be an existing validation tag or just something you make up -// and precess on the flip side it's up to you. -func (sl *StructLevel) ReportError(field reflect.Value, fieldName string, customName string, tag string) { - - field, kind := sl.v.ExtractType(field) - - if fieldName == blank { - panic(fieldNameRequired) - } - - if customName == blank { - customName = fieldName - } - - if tag == blank { - panic(tagRequired) - } - - ns := sl.errPrefix + fieldName - - switch kind { - case reflect.Invalid: - sl.errs[ns] = &FieldError{ - FieldNamespace: ns, - NameNamespace: sl.nsPrefix + customName, - Name: customName, - Field: fieldName, - Tag: tag, - ActualTag: tag, - Param: blank, - Kind: kind, - } - default: - sl.errs[ns] = &FieldError{ - FieldNamespace: ns, - NameNamespace: sl.nsPrefix + customName, - Name: customName, - Field: fieldName, - Tag: tag, - ActualTag: tag, - Param: blank, - Value: field.Interface(), - Kind: kind, - Type: field.Type(), - } - } -} - -// Validate contains the validator settings passed in using the Config struct -type Validate struct { - tagName string - fieldNameTag string - validationFuncs map[string]Func - structLevelFuncs map[reflect.Type]StructLevelFunc - customTypeFuncs map[reflect.Type]CustomTypeFunc - aliasValidators map[string]string - hasCustomFuncs bool - hasAliasValidators bool - hasStructLevelFuncs bool - tagCache *tagCacheMap - structCache *structCacheMap - errsPool *sync.Pool -} - -func (v *Validate) initCheck() { - if v == nil { - panic(validatorNotInitialized) - } -} - -// Config contains the options that a Validator instance will use. -// It is passed to the New() function -type Config struct { - TagName string - FieldNameTag string -} - -// CustomTypeFunc allows for overriding or adding custom field type handler functions -// field = field value of the type to return a value to be validated -// example Valuer from sql drive see https://golang.org/src/database/sql/driver/types.go?s=1210:1293#L29 -type CustomTypeFunc func(field reflect.Value) interface{} - -// Func accepts all values needed for file and cross field validation -// v = validator instance, needed but some built in functions for it's custom types -// topStruct = top level struct when validating by struct otherwise nil -// currentStruct = current level struct when validating by struct otherwise optional comparison value -// field = field value for validation -// param = parameter used in validation i.e. gt=0 param would be 0 -type Func func(v *Validate, topStruct reflect.Value, currentStruct reflect.Value, field reflect.Value, fieldtype reflect.Type, fieldKind reflect.Kind, param string) bool - -// StructLevelFunc accepts all values needed for struct level validation -type StructLevelFunc func(v *Validate, structLevel *StructLevel) - -// ValidationErrors is a type of map[string]*FieldError -// it exists to allow for multiple errors to be passed from this library -// and yet still subscribe to the error interface -type ValidationErrors map[string]*FieldError - -// Error is intended for use in development + debugging and not intended to be a production error message. -// It allows ValidationErrors to subscribe to the Error interface. -// All information to create an error message specific to your application is contained within -// the FieldError found within the ValidationErrors map -func (ve ValidationErrors) Error() string { - - buff := bytes.NewBufferString(blank) - - for key, err := range ve { - buff.WriteString(fmt.Sprintf(fieldErrMsg, key, err.Field, err.Tag)) - buff.WriteString("\n") - } - - return strings.TrimSpace(buff.String()) -} - -// FieldError contains a single field's validation error along -// with other properties that may be needed for error message creation -type FieldError struct { - FieldNamespace string - NameNamespace string - Field string - Name string - Tag string - ActualTag string - Kind reflect.Kind - Type reflect.Type - Param string - Value interface{} -} - -// New creates a new Validate instance for use. -func New(config *Config) *Validate { - - v := &Validate{ - tagName: config.TagName, - fieldNameTag: config.FieldNameTag, - tagCache: &tagCacheMap{m: map[string]*cachedTag{}}, - structCache: &structCacheMap{m: map[reflect.Type]*cachedStruct{}}, - errsPool: &sync.Pool{New: func() interface{} { - return ValidationErrors{} - }}} - - if len(v.aliasValidators) == 0 { - // must copy alias validators for separate validations to be used in each validator instance - v.aliasValidators = map[string]string{} - for k, val := range bakedInAliasValidators { - v.RegisterAliasValidation(k, val) - } - } - - if len(v.validationFuncs) == 0 { - // must copy validators for separate validations to be used in each instance - v.validationFuncs = map[string]Func{} - for k, val := range bakedInValidators { - v.RegisterValidation(k, val) - } - } - - return v -} - -// RegisterStructValidation registers a StructLevelFunc against a number of types -// NOTE: this method is not thread-safe it is intended that these all be registered prior to any validation -func (v *Validate) RegisterStructValidation(fn StructLevelFunc, types ...interface{}) { - v.initCheck() - - if v.structLevelFuncs == nil { - v.structLevelFuncs = map[reflect.Type]StructLevelFunc{} - } - - for _, t := range types { - v.structLevelFuncs[reflect.TypeOf(t)] = fn - } - - v.hasStructLevelFuncs = true -} - -// RegisterValidation adds a validation Func to a Validate's map of validators denoted by the key -// NOTE: if the key already exists, the previous validation function will be replaced. -// NOTE: this method is not thread-safe it is intended that these all be registered prior to any validation -func (v *Validate) RegisterValidation(key string, fn Func) error { - v.initCheck() - - if key == blank { - return errors.New("Function Key cannot be empty") - } - - if fn == nil { - return errors.New("Function cannot be empty") - } - - _, ok := restrictedTags[key] - - if ok || strings.ContainsAny(key, restrictedTagChars) { - panic(fmt.Sprintf(restrictedTagErr, key)) - } - - v.validationFuncs[key] = fn - - return nil -} - -// RegisterCustomTypeFunc registers a CustomTypeFunc against a number of types -// NOTE: this method is not thread-safe it is intended that these all be registered prior to any validation -func (v *Validate) RegisterCustomTypeFunc(fn CustomTypeFunc, types ...interface{}) { - v.initCheck() - - if v.customTypeFuncs == nil { - v.customTypeFuncs = map[reflect.Type]CustomTypeFunc{} - } - - for _, t := range types { - v.customTypeFuncs[reflect.TypeOf(t)] = fn - } - - v.hasCustomFuncs = true -} - -// RegisterAliasValidation registers a mapping of a single validationstag that -// defines a common or complex set of validation(s) to simplify adding validation -// to structs. NOTE: when returning an error the tag returned in FieldError will be -// the alias tag unless the dive tag is part of the alias; everything after the -// dive tag is not reported as the alias tag. Also the ActualTag in the before case -// will be the actual tag within the alias that failed. -// NOTE: this method is not thread-safe it is intended that these all be registered prior to any validation -func (v *Validate) RegisterAliasValidation(alias, tags string) { - v.initCheck() - - _, ok := restrictedTags[alias] - - if ok || strings.ContainsAny(alias, restrictedTagChars) { - panic(fmt.Sprintf(restrictedAliasErr, alias)) - } - - v.aliasValidators[alias] = tags - v.hasAliasValidators = true -} - -// Field validates a single field using tag style validation and returns nil or ValidationErrors as type error. -// You will need to assert the error if it's not nil i.e. err.(validator.ValidationErrors) to access the map of errors. -// NOTE: it returns ValidationErrors instead of a single FieldError because this can also -// validate Array, Slice and maps fields which may contain more than one error -func (v *Validate) Field(field interface{}, tag string) error { - v.initCheck() - - errs := v.errsPool.Get().(ValidationErrors) - fieldVal := reflect.ValueOf(field) - - v.traverseField(fieldVal, fieldVal, fieldVal, blank, blank, errs, false, tag, blank, blank, false, false, nil, nil) - - if len(errs) == 0 { - v.errsPool.Put(errs) - return nil - } - - return errs -} - -// FieldWithValue validates a single field, against another fields value using tag style validation and returns nil or ValidationErrors. -// You will need to assert the error if it's not nil i.e. err.(validator.ValidationErrors) to access the map of errors. -// NOTE: it returns ValidationErrors instead of a single FieldError because this can also -// validate Array, Slice and maps fields which may contain more than one error -func (v *Validate) FieldWithValue(val interface{}, field interface{}, tag string) error { - v.initCheck() - - errs := v.errsPool.Get().(ValidationErrors) - topVal := reflect.ValueOf(val) - - v.traverseField(topVal, topVal, reflect.ValueOf(field), blank, blank, errs, false, tag, blank, blank, false, false, nil, nil) - - if len(errs) == 0 { - v.errsPool.Put(errs) - return nil - } - - return errs -} - -// StructPartial validates the fields passed in only, ignoring all others. -// Fields may be provided in a namespaced fashion relative to the struct provided -// i.e. NestedStruct.Field or NestedArrayField[0].Struct.Name and returns nil or ValidationErrors as error -// You will need to assert the error if it's not nil i.e. err.(validator.ValidationErrors) to access the map of errors. -func (v *Validate) StructPartial(current interface{}, fields ...string) error { - v.initCheck() - - sv, _ := v.ExtractType(reflect.ValueOf(current)) - name := sv.Type().Name() - m := map[string]*struct{}{} - - if fields != nil { - for _, k := range fields { - - flds := strings.Split(k, namespaceSeparator) - if len(flds) > 0 { - - key := name + namespaceSeparator - for _, s := range flds { - - idx := strings.Index(s, leftBracket) - - if idx != -1 { - for idx != -1 { - key += s[:idx] - m[key] = emptyStructPtr - - idx2 := strings.Index(s, rightBracket) - idx2++ - key += s[idx:idx2] - m[key] = emptyStructPtr - s = s[idx2:] - idx = strings.Index(s, leftBracket) - } - } else { - - key += s - m[key] = emptyStructPtr - } - - key += namespaceSeparator - } - } - } - } - - errs := v.errsPool.Get().(ValidationErrors) - - v.tranverseStruct(sv, sv, sv, blank, blank, errs, true, len(m) != 0, false, m, false) - - if len(errs) == 0 { - v.errsPool.Put(errs) - return nil - } - - return errs -} - -// StructExcept validates all fields except the ones passed in. -// Fields may be provided in a namespaced fashion relative to the struct provided -// i.e. NestedStruct.Field or NestedArrayField[0].Struct.Name and returns nil or ValidationErrors as error -// You will need to assert the error if it's not nil i.e. err.(validator.ValidationErrors) to access the map of errors. -func (v *Validate) StructExcept(current interface{}, fields ...string) error { - v.initCheck() - - sv, _ := v.ExtractType(reflect.ValueOf(current)) - name := sv.Type().Name() - m := map[string]*struct{}{} - - for _, key := range fields { - m[name+namespaceSeparator+key] = emptyStructPtr - } - - errs := v.errsPool.Get().(ValidationErrors) - - v.tranverseStruct(sv, sv, sv, blank, blank, errs, true, len(m) != 0, true, m, false) - - if len(errs) == 0 { - v.errsPool.Put(errs) - return nil - } - - return errs -} - -// Struct validates a structs exposed fields, and automatically validates nested structs, unless otherwise specified. -// it returns nil or ValidationErrors as error. -// You will need to assert the error if it's not nil i.e. err.(validator.ValidationErrors) to access the map of errors. -func (v *Validate) Struct(current interface{}) error { - v.initCheck() - - errs := v.errsPool.Get().(ValidationErrors) - sv := reflect.ValueOf(current) - - v.tranverseStruct(sv, sv, sv, blank, blank, errs, true, false, false, nil, false) - - if len(errs) == 0 { - v.errsPool.Put(errs) - return nil - } - - return errs -} - -// tranverseStruct traverses a structs fields and then passes them to be validated by traverseField -func (v *Validate) tranverseStruct(topStruct reflect.Value, currentStruct reflect.Value, current reflect.Value, errPrefix string, nsPrefix string, errs ValidationErrors, useStructName bool, partial bool, exclude bool, includeExclude map[string]*struct{}, isStructOnly bool) { - - if current.Kind() == reflect.Ptr && !current.IsNil() { - current = current.Elem() - } - - if current.Kind() != reflect.Struct && current.Kind() != reflect.Interface { - panic("value passed for validation is not a struct") - } - - // var ok bool - typ := current.Type() - - sName := typ.Name() - - if useStructName { - errPrefix += sName + namespaceSeparator - - if v.fieldNameTag != blank { - nsPrefix += sName + namespaceSeparator - } - } - - // structonly tag present don't tranverseFields - // but must still check and run below struct level validation - // if present - if !isStructOnly { - - var fld reflect.StructField - - // is anonymous struct, cannot parse or cache as - // it has no name to index by - if sName == blank { - - var customName string - var ok bool - numFields := current.NumField() - - for i := 0; i < numFields; i++ { - - fld = typ.Field(i) - - if fld.PkgPath != blank && !fld.Anonymous { - continue - } - - if partial { - - _, ok = includeExclude[errPrefix+fld.Name] - - if (ok && exclude) || (!ok && !exclude) { - continue - } - } - - customName = fld.Name - - if v.fieldNameTag != blank { - - name := strings.SplitN(fld.Tag.Get(v.fieldNameTag), ",", 2)[0] - - // dash check is for json "-" means don't output in json - if name != blank && name != dash { - customName = name - } - } - - v.traverseField(topStruct, currentStruct, current.Field(i), errPrefix, nsPrefix, errs, true, fld.Tag.Get(v.tagName), fld.Name, customName, partial, exclude, includeExclude, nil) - } - } else { - s, ok := v.structCache.Get(typ) - if !ok { - s = v.parseStruct(current, sName) - } - - for i, f := range s.fields { - - if partial { - - _, ok = includeExclude[errPrefix+f.Name] - - if (ok && exclude) || (!ok && !exclude) { - continue - } - } - fld = typ.Field(i) - - v.traverseField(topStruct, currentStruct, current.Field(i), errPrefix, nsPrefix, errs, true, f.CachedTag.tag, fld.Name, f.AltName, partial, exclude, includeExclude, f.CachedTag) - } - } - } - - // check if any struct level validations, after all field validations already checked. - if v.hasStructLevelFuncs { - if fn, ok := v.structLevelFuncs[current.Type()]; ok { - fn(v, &StructLevel{v: v, TopStruct: topStruct, CurrentStruct: current, errPrefix: errPrefix, nsPrefix: nsPrefix, errs: errs}) - } - } -} - -// traverseField validates any field, be it a struct or single field, ensures it's validity and passes it along to be validated via it's tag options -func (v *Validate) traverseField(topStruct reflect.Value, currentStruct reflect.Value, current reflect.Value, errPrefix string, nsPrefix string, errs ValidationErrors, isStructField bool, tag, name, customName string, partial bool, exclude bool, includeExclude map[string]*struct{}, cTag *cachedTag) { - - if tag == skipValidationTag { - return - } - - if cTag == nil { - var isCached bool - cTag, isCached = v.tagCache.Get(tag) - - if !isCached { - cTag = v.parseTags(tag, name) - } - } - - current, kind := v.ExtractType(current) - var typ reflect.Type - - switch kind { - case reflect.Ptr, reflect.Interface, reflect.Invalid: - if cTag.isOmitEmpty { - return - } - - if tag != blank { - - ns := errPrefix + name - - if kind == reflect.Invalid { - errs[ns] = &FieldError{ - FieldNamespace: ns, - NameNamespace: nsPrefix + customName, - Name: customName, - Field: name, - Tag: cTag.tags[0].tag, - ActualTag: cTag.tags[0].tagVals[0][0], - Param: cTag.tags[0].tagVals[0][1], - Kind: kind, - } - return - } - - errs[ns] = &FieldError{ - FieldNamespace: ns, - NameNamespace: nsPrefix + customName, - Name: customName, - Field: name, - Tag: cTag.tags[0].tag, - ActualTag: cTag.tags[0].tagVals[0][0], - Param: cTag.tags[0].tagVals[0][1], - Value: current.Interface(), - Kind: kind, - Type: current.Type(), - } - - return - } - - // if we get here tag length is zero and we can leave - if kind == reflect.Invalid { - return - } - - case reflect.Struct: - typ = current.Type() - - if typ != timeType { - - if cTag.isNoStructLevel { - return - } - - v.tranverseStruct(topStruct, current, current, errPrefix+name+namespaceSeparator, nsPrefix+customName+namespaceSeparator, errs, false, partial, exclude, includeExclude, cTag.isStructOnly) - return - } - } - - if tag == blank { - return - } - - typ = current.Type() - - var dive bool - var diveSubTag string - - for _, valTag := range cTag.tags { - - if valTag.tagVals[0][0] == existsTag { - continue - } - - if valTag.tagVals[0][0] == diveTag { - dive = true - diveSubTag = strings.TrimLeft(strings.SplitN(cTag.diveTag, diveTag, 2)[1], ",") - break - } - - if valTag.tagVals[0][0] == omitempty { - - if !HasValue(v, topStruct, currentStruct, current, typ, kind, blank) { - return - } - continue - } - - if v.validateField(topStruct, currentStruct, current, typ, kind, errPrefix, nsPrefix, errs, valTag, name, customName) { - return - } - } - - if dive { - // traverse slice or map here - // or panic ;) - switch kind { - case reflect.Slice, reflect.Array: - v.traverseSlice(topStruct, currentStruct, current, errPrefix, nsPrefix, errs, diveSubTag, name, customName, partial, exclude, includeExclude, nil) - case reflect.Map: - v.traverseMap(topStruct, currentStruct, current, errPrefix, nsPrefix, errs, diveSubTag, name, customName, partial, exclude, includeExclude, nil) - default: - // throw error, if not a slice or map then should not have gotten here - // bad dive tag - panic("dive error! can't dive on a non slice or map") - } - } -} - -// traverseSlice traverses a Slice or Array's elements and passes them to traverseField for validation -func (v *Validate) traverseSlice(topStruct reflect.Value, currentStruct reflect.Value, current reflect.Value, errPrefix string, nsPrefix string, errs ValidationErrors, tag, name, customName string, partial bool, exclude bool, includeExclude map[string]*struct{}, cTag *cachedTag) { - - for i := 0; i < current.Len(); i++ { - v.traverseField(topStruct, currentStruct, current.Index(i), errPrefix, nsPrefix, errs, false, tag, fmt.Sprintf(arrayIndexFieldName, name, i), fmt.Sprintf(arrayIndexFieldName, customName, i), partial, exclude, includeExclude, cTag) - } -} - -// traverseMap traverses a map's elements and passes them to traverseField for validation -func (v *Validate) traverseMap(topStruct reflect.Value, currentStruct reflect.Value, current reflect.Value, errPrefix string, nsPrefix string, errs ValidationErrors, tag, name, customName string, partial bool, exclude bool, includeExclude map[string]*struct{}, cTag *cachedTag) { - - for _, key := range current.MapKeys() { - v.traverseField(topStruct, currentStruct, current.MapIndex(key), errPrefix, nsPrefix, errs, false, tag, fmt.Sprintf(mapIndexFieldName, name, key.Interface()), fmt.Sprintf(mapIndexFieldName, customName, key.Interface()), partial, exclude, includeExclude, cTag) - } -} - -// validateField validates a field based on the provided tag's key and param values and returns true if there is an error or false if all ok -func (v *Validate) validateField(topStruct reflect.Value, currentStruct reflect.Value, current reflect.Value, currentType reflect.Type, currentKind reflect.Kind, errPrefix string, nsPrefix string, errs ValidationErrors, valTag *tagVals, name, customName string) bool { - - var valFunc Func - var ok bool - - if valTag.isOrVal { - - errTag := blank - - for _, val := range valTag.tagVals { - - valFunc, ok = v.validationFuncs[val[0]] - if !ok { - panic(strings.TrimSpace(fmt.Sprintf(undefinedValidation, name))) - } - - if valFunc(v, topStruct, currentStruct, current, currentType, currentKind, val[1]) { - return false - } - - errTag += orSeparator + val[0] - } - - ns := errPrefix + name - - if valTag.isAlias { - errs[ns] = &FieldError{ - FieldNamespace: ns, - NameNamespace: nsPrefix + customName, - Name: customName, - Field: name, - Tag: valTag.tag, - ActualTag: errTag[1:], - Value: current.Interface(), - Type: currentType, - Kind: currentKind, - } - } else { - errs[errPrefix+name] = &FieldError{ - FieldNamespace: ns, - NameNamespace: nsPrefix + customName, - Name: customName, - Field: name, - Tag: errTag[1:], - ActualTag: errTag[1:], - Value: current.Interface(), - Type: currentType, - Kind: currentKind, - } - } - - return true - } - - valFunc, ok = v.validationFuncs[valTag.tagVals[0][0]] - if !ok { - panic(strings.TrimSpace(fmt.Sprintf(undefinedValidation, name))) - } - - if valFunc(v, topStruct, currentStruct, current, currentType, currentKind, valTag.tagVals[0][1]) { - return false - } - - ns := errPrefix + name - - errs[ns] = &FieldError{ - FieldNamespace: ns, - NameNamespace: nsPrefix + customName, - Name: customName, - Field: name, - Tag: valTag.tag, - ActualTag: valTag.tagVals[0][0], - Value: current.Interface(), - Param: valTag.tagVals[0][1], - Type: currentType, - Kind: currentKind, - } - - return true -} diff --git a/vendor/gopkg.in/gorp.v1/LICENSE b/vendor/gopkg.in/gorp.v1/LICENSE deleted file mode 100644 index b661111..0000000 --- a/vendor/gopkg.in/gorp.v1/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -(The MIT License) - -Copyright (c) 2012 James Cooper - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -'Software'), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/gopkg.in/gorp.v1/Makefile b/vendor/gopkg.in/gorp.v1/Makefile deleted file mode 100644 index edf771c..0000000 --- a/vendor/gopkg.in/gorp.v1/Makefile +++ /dev/null @@ -1,6 +0,0 @@ -include $(GOROOT)/src/Make.inc - -TARG = github.com/coopernurse/gorp -GOFILES = gorp.go dialect.go - -include $(GOROOT)/src/Make.pkg \ No newline at end of file diff --git a/vendor/gopkg.in/gorp.v1/README.md b/vendor/gopkg.in/gorp.v1/README.md deleted file mode 100644 index c7cd5d8..0000000 --- a/vendor/gopkg.in/gorp.v1/README.md +++ /dev/null @@ -1,672 +0,0 @@ -# Go Relational Persistence - -[![build status](https://secure.travis-ci.org/go-gorp/gorp.png)](http://travis-ci.org/go-gorp/gorp) - -I hesitate to call gorp an ORM. Go doesn't really have objects, at least -not in the classic Smalltalk/Java sense. There goes the "O". gorp doesn't -know anything about the relationships between your structs (at least not -yet). So the "R" is questionable too (but I use it in the name because, -well, it seemed more clever). - -The "M" is alive and well. Given some Go structs and a database, gorp -should remove a fair amount of boilerplate busy-work from your code. - -I hope that gorp saves you time, minimizes the drudgery of getting data -in and out of your database, and helps your code focus on algorithms, -not infrastructure. - -* Bind struct fields to table columns via API or tag -* Support for embedded structs -* Support for transactions -* Forward engineer db schema from structs (great for unit tests) -* Pre/post insert/update/delete hooks -* Automatically generate insert/update/delete statements for a struct -* Automatic binding of auto increment PKs back to struct after insert -* Delete by primary key(s) -* Select by primary key(s) -* Optional trace sql logging -* Bind arbitrary SQL queries to a struct -* Bind slice to SELECT query results without type assertions -* Use positional or named bind parameters in custom SELECT queries -* Optional optimistic locking using a version column (for update/deletes) - -## Installation - - # install the library: - go get gopkg.in/gorp.v1 - - // use in your .go code: - import ( - "gopkg.in/gorp.v1" - ) - -## Versioning - -This project provides a stable release (v1.x tags) and a bleeding edge codebase (master). - -`gopkg.in/gorp.v1` points to the latest v1.x tag. The API's for v1 are stable and shouldn't change. Development takes place at the master branch. Althought the code in master should always compile and test successfully, it might break API's. We aim to maintain backwards compatibility, but API's and behaviour might be changed to fix a bug. Also note that API's that are new in the master branch can change until released as v2. - -If you want to use bleeding edge, use `github.com/go-gorp/gorp` as import path. - -## API Documentation - -Full godoc output from the latest v1 release is available here: - -https://godoc.org/gopkg.in/gorp.v1 - -For the latest code in master: - -https://godoc.org/github.com/go-gorp/gorp - -## Quickstart - -```go -package main - -import ( - "database/sql" - "gopkg.in/gorp.v1" - _ "github.com/mattn/go-sqlite3" - "log" - "time" -) - -func main() { - // initialize the DbMap - dbmap := initDb() - defer dbmap.Db.Close() - - // delete any existing rows - err := dbmap.TruncateTables() - checkErr(err, "TruncateTables failed") - - // create two posts - p1 := newPost("Go 1.1 released!", "Lorem ipsum lorem ipsum") - p2 := newPost("Go 1.2 released!", "Lorem ipsum lorem ipsum") - - // insert rows - auto increment PKs will be set properly after the insert - err = dbmap.Insert(&p1, &p2) - checkErr(err, "Insert failed") - - // use convenience SelectInt - count, err := dbmap.SelectInt("select count(*) from posts") - checkErr(err, "select count(*) failed") - log.Println("Rows after inserting:", count) - - // update a row - p2.Title = "Go 1.2 is better than ever" - count, err = dbmap.Update(&p2) - checkErr(err, "Update failed") - log.Println("Rows updated:", count) - - // fetch one row - note use of "post_id" instead of "Id" since column is aliased - // - // Postgres users should use $1 instead of ? placeholders - // See 'Known Issues' below - // - err = dbmap.SelectOne(&p2, "select * from posts where post_id=?", p2.Id) - checkErr(err, "SelectOne failed") - log.Println("p2 row:", p2) - - // fetch all rows - var posts []Post - _, err = dbmap.Select(&posts, "select * from posts order by post_id") - checkErr(err, "Select failed") - log.Println("All rows:") - for x, p := range posts { - log.Printf(" %d: %v\n", x, p) - } - - // delete row by PK - count, err = dbmap.Delete(&p1) - checkErr(err, "Delete failed") - log.Println("Rows deleted:", count) - - // delete row manually via Exec - _, err = dbmap.Exec("delete from posts where post_id=?", p2.Id) - checkErr(err, "Exec failed") - - // confirm count is zero - count, err = dbmap.SelectInt("select count(*) from posts") - checkErr(err, "select count(*) failed") - log.Println("Row count - should be zero:", count) - - log.Println("Done!") -} - -type Post struct { - // db tag lets you specify the column name if it differs from the struct field - Id int64 `db:"post_id"` - Created int64 - Title string - Body string -} - -func newPost(title, body string) Post { - return Post{ - Created: time.Now().UnixNano(), - Title: title, - Body: body, - } -} - -func initDb() *gorp.DbMap { - // connect to db using standard Go database/sql API - // use whatever database/sql driver you wish - db, err := sql.Open("sqlite3", "/tmp/post_db.bin") - checkErr(err, "sql.Open failed") - - // construct a gorp DbMap - dbmap := &gorp.DbMap{Db: db, Dialect: gorp.SqliteDialect{}} - - // add a table, setting the table name to 'posts' and - // specifying that the Id property is an auto incrementing PK - dbmap.AddTableWithName(Post{}, "posts").SetKeys(true, "Id") - - // create the table. in a production system you'd generally - // use a migration tool, or create the tables via scripts - err = dbmap.CreateTablesIfNotExists() - checkErr(err, "Create tables failed") - - return dbmap -} - -func checkErr(err error, msg string) { - if err != nil { - log.Fatalln(msg, err) - } -} -``` - -## Examples - -### Mapping structs to tables - -First define some types: - -```go -type Invoice struct { - Id int64 - Created int64 - Updated int64 - Memo string - PersonId int64 -} - -type Person struct { - Id int64 - Created int64 - Updated int64 - FName string - LName string -} - -// Example of using tags to alias fields to column names -// The 'db' value is the column name -// -// A hyphen will cause gorp to skip this field, similar to the -// Go json package. -// -// This is equivalent to using the ColMap methods: -// -// table := dbmap.AddTableWithName(Product{}, "product") -// table.ColMap("Id").Rename("product_id") -// table.ColMap("Price").Rename("unit_price") -// table.ColMap("IgnoreMe").SetTransient(true) -// -type Product struct { - Id int64 `db:"product_id"` - Price int64 `db:"unit_price"` - IgnoreMe string `db:"-"` -} -``` - -Then create a mapper, typically you'd do this one time at app startup: - -```go -// connect to db using standard Go database/sql API -// use whatever database/sql driver you wish -db, err := sql.Open("mymysql", "tcp:localhost:3306*mydb/myuser/mypassword") - -// construct a gorp DbMap -dbmap := &gorp.DbMap{Db: db, Dialect: gorp.MySQLDialect{"InnoDB", "UTF8"}} - -// register the structs you wish to use with gorp -// you can also use the shorter dbmap.AddTable() if you -// don't want to override the table name -// -// SetKeys(true) means we have a auto increment primary key, which -// will get automatically bound to your struct post-insert -// -t1 := dbmap.AddTableWithName(Invoice{}, "invoice_test").SetKeys(true, "Id") -t2 := dbmap.AddTableWithName(Person{}, "person_test").SetKeys(true, "Id") -t3 := dbmap.AddTableWithName(Product{}, "product_test").SetKeys(true, "Id") -``` - -### Struct Embedding - -gorp supports embedding structs. For example: - -```go -type Names struct { - FirstName string - LastName string -} - -type WithEmbeddedStruct struct { - Id int64 - Names -} - -es := &WithEmbeddedStruct{-1, Names{FirstName: "Alice", LastName: "Smith"}} -err := dbmap.Insert(es) -``` - -See the `TestWithEmbeddedStruct` function in `gorp_test.go` for a full example. - -### Create/Drop Tables ### - -Automatically create / drop registered tables. This is useful for unit tests -but is entirely optional. You can of course use gorp with tables created manually, -or with a separate migration tool (like goose: https://bitbucket.org/liamstask/goose). - -```go -// create all registered tables -dbmap.CreateTables() - -// same as above, but uses "if not exists" clause to skip tables that are -// already defined -dbmap.CreateTablesIfNotExists() - -// drop -dbmap.DropTables() -``` - -### SQL Logging - -Optionally you can pass in a logger to trace all SQL statements. -I recommend enabling this initially while you're getting the feel for what -gorp is doing on your behalf. - -Gorp defines a `GorpLogger` interface that Go's built in `log.Logger` satisfies. -However, you can write your own `GorpLogger` implementation, or use a package such -as `glog` if you want more control over how statements are logged. - -```go -// Will log all SQL statements + args as they are run -// The first arg is a string prefix to prepend to all log messages -dbmap.TraceOn("[gorp]", log.New(os.Stdout, "myapp:", log.Lmicroseconds)) - -// Turn off tracing -dbmap.TraceOff() -``` - -### Insert - -```go -// Must declare as pointers so optional callback hooks -// can operate on your data, not copies -inv1 := &Invoice{0, 100, 200, "first order", 0} -inv2 := &Invoice{0, 100, 200, "second order", 0} - -// Insert your rows -err := dbmap.Insert(inv1, inv2) - -// Because we called SetKeys(true) on Invoice, the Id field -// will be populated after the Insert() automatically -fmt.Printf("inv1.Id=%d inv2.Id=%d\n", inv1.Id, inv2.Id) -``` - -### Update - -Continuing the above example, use the `Update` method to modify an Invoice: - -```go -// count is the # of rows updated, which should be 1 in this example -count, err := dbmap.Update(inv1) -``` - -### Delete - -If you have primary key(s) defined for a struct, you can use the `Delete` -method to remove rows: - -```go -count, err := dbmap.Delete(inv1) -``` - -### Select by Key - -Use the `Get` method to fetch a single row by primary key. It returns -nil if no row is found. - -```go -// fetch Invoice with Id=99 -obj, err := dbmap.Get(Invoice{}, 99) -inv := obj.(*Invoice) -``` - -### Ad Hoc SQL - -#### SELECT - -`Select()` and `SelectOne()` provide a simple way to bind arbitrary queries to a slice -or a single struct. - -```go -// Select a slice - first return value is not needed when a slice pointer is passed to Select() -var posts []Post -_, err := dbmap.Select(&posts, "select * from post order by id") - -// You can also use primitive types -var ids []string -_, err := dbmap.Select(&ids, "select id from post") - -// Select a single row. -// Returns an error if no row found, or if more than one row is found -var post Post -err := dbmap.SelectOne(&post, "select * from post where id=?", id) -``` - -Want to do joins? Just write the SQL and the struct. gorp will bind them: - -```go -// Define a type for your join -// It *must* contain all the columns in your SELECT statement -// -// The names here should match the aliased column names you specify -// in your SQL - no additional binding work required. simple. -// -type InvoicePersonView struct { - InvoiceId int64 - PersonId int64 - Memo string - FName string -} - -// Create some rows -p1 := &Person{0, 0, 0, "bob", "smith"} -dbmap.Insert(p1) - -// notice how we can wire up p1.Id to the invoice easily -inv1 := &Invoice{0, 0, 0, "xmas order", p1.Id} -dbmap.Insert(inv1) - -// Run your query -query := "select i.Id InvoiceId, p.Id PersonId, i.Memo, p.FName " + - "from invoice_test i, person_test p " + - "where i.PersonId = p.Id" - -// pass a slice to Select() -var list []InvoicePersonView -_, err := dbmap.Select(&list, query) - -// this should test true -expected := InvoicePersonView{inv1.Id, p1.Id, inv1.Memo, p1.FName} -if reflect.DeepEqual(list[0], expected) { - fmt.Println("Woot! My join worked!") -} -``` - -#### SELECT string or int64 - -gorp provides a few convenience methods for selecting a single string or int64. - -```go -// select single int64 from db (use $1 instead of ? for postgresql) -i64, err := dbmap.SelectInt("select count(*) from foo where blah=?", blahVal) - -// select single string from db: -s, err := dbmap.SelectStr("select name from foo where blah=?", blahVal) - -``` - -#### Named bind parameters - -You may use a map or struct to bind parameters by name. This is currently -only supported in SELECT queries. - -```go -_, err := dbm.Select(&dest, "select * from Foo where name = :name and age = :age", map[string]interface{}{ - "name": "Rob", - "age": 31, -}) -``` - -#### UPDATE / DELETE - -You can execute raw SQL if you wish. Particularly good for batch operations. - -```go -res, err := dbmap.Exec("delete from invoice_test where PersonId=?", 10) -``` - -### Transactions - -You can batch operations into a transaction: - -```go -func InsertInv(dbmap *DbMap, inv *Invoice, per *Person) error { - // Start a new transaction - trans, err := dbmap.Begin() - if err != nil { - return err - } - - trans.Insert(per) - inv.PersonId = per.Id - trans.Insert(inv) - - // if the commit is successful, a nil error is returned - return trans.Commit() -} -``` - -### Hooks - -Use hooks to update data before/after saving to the db. Good for timestamps: - -```go -// implement the PreInsert and PreUpdate hooks -func (i *Invoice) PreInsert(s gorp.SqlExecutor) error { - i.Created = time.Now().UnixNano() - i.Updated = i.Created - return nil -} - -func (i *Invoice) PreUpdate(s gorp.SqlExecutor) error { - i.Updated = time.Now().UnixNano() - return nil -} - -// You can use the SqlExecutor to cascade additional SQL -// Take care to avoid cycles. gorp won't prevent them. -// -// Here's an example of a cascading delete -// -func (p *Person) PreDelete(s gorp.SqlExecutor) error { - query := "delete from invoice_test where PersonId=?" - err := s.Exec(query, p.Id); if err != nil { - return err - } - return nil -} -``` - -Full list of hooks that you can implement: - - PostGet - PreInsert - PostInsert - PreUpdate - PostUpdate - PreDelete - PostDelete - - All have the same signature. for example: - - func (p *MyStruct) PostUpdate(s gorp.SqlExecutor) error - -### Optimistic Locking - -gorp provides a simple optimistic locking feature, similar to Java's JPA, that -will raise an error if you try to update/delete a row whose `version` column -has a value different than the one in memory. This provides a safe way to do -"select then update" style operations without explicit read and write locks. - -```go -// Version is an auto-incremented number, managed by gorp -// If this property is present on your struct, update -// operations will be constrained -// -// For example, say we defined Person as: - -type Person struct { - Id int64 - Created int64 - Updated int64 - FName string - LName string - - // automatically used as the Version col - // use table.SetVersionCol("columnName") to map a different - // struct field as the version field - Version int64 -} - -p1 := &Person{0, 0, 0, "Bob", "Smith", 0} -dbmap.Insert(p1) // Version is now 1 - -obj, err := dbmap.Get(Person{}, p1.Id) -p2 := obj.(*Person) -p2.LName = "Edwards" -dbmap.Update(p2) // Version is now 2 - -p1.LName = "Howard" - -// Raises error because p1.Version == 1, which is out of date -count, err := dbmap.Update(p1) -_, ok := err.(gorp.OptimisticLockError) -if ok { - // should reach this statement - - // in a real app you might reload the row and retry, or - // you might propegate this to the user, depending on the desired - // semantics - fmt.Printf("Tried to update row with stale data: %v\n", err) -} else { - // some other db error occurred - log or return up the stack - fmt.Printf("Unknown db err: %v\n", err) -} -``` - -## Database Drivers - -gorp uses the Go 1 `database/sql` package. A full list of compliant drivers is available here: - -http://code.google.com/p/go-wiki/wiki/SQLDrivers - -Sadly, SQL databases differ on various issues. gorp provides a Dialect interface that should be -implemented per database vendor. Dialects are provided for: - -* MySQL -* PostgreSQL -* sqlite3 - -Each of these three databases pass the test suite. See `gorp_test.go` for example -DSNs for these three databases. - -Support is also provided for: - -* Oracle (contributed by @klaidliadon) -* SQL Server (contributed by @qrawl) - use driver: github.com/denisenkom/go-mssqldb - -Note that these databases are not covered by CI and I (@coopernurse) have no good way to -test them locally. So please try them and send patches as needed, but expect a bit more -unpredicability. - -## Known Issues - -### SQL placeholder portability - -Different databases use different strings to indicate variable placeholders in -prepared SQL statements. Unlike some database abstraction layers (such as JDBC), -Go's `database/sql` does not standardize this. - -SQL generated by gorp in the `Insert`, `Update`, `Delete`, and `Get` methods delegates -to a Dialect implementation for each database, and will generate portable SQL. - -Raw SQL strings passed to `Exec`, `Select`, `SelectOne`, `SelectInt`, etc will not be -parsed. Consequently you may have portability issues if you write a query like this: - -```go -// works on MySQL and Sqlite3, but not with Postgresql -err := dbmap.SelectOne(&val, "select * from foo where id = ?", 30) -``` - -In `Select` and `SelectOne` you can use named parameters to work around this. -The following is portable: - -```go -err := dbmap.SelectOne(&val, "select * from foo where id = :id", - map[string]interface{} { "id": 30}) -``` - -### time.Time and time zones - -gorp will pass `time.Time` fields through to the `database/sql` driver, but note that -the behavior of this type varies across database drivers. - -MySQL users should be especially cautious. See: https://github.com/ziutek/mymysql/pull/77 - -To avoid any potential issues with timezone/DST, consider using an integer field for time -data and storing UNIX time. - -## Running the tests - -The included tests may be run against MySQL, Postgresql, or sqlite3. -You must set two environment variables so the test code knows which driver to -use, and how to connect to your database. - -```sh -# MySQL example: -export GORP_TEST_DSN=gomysql_test/gomysql_test/abc123 -export GORP_TEST_DIALECT=mysql - -# run the tests -go test - -# run the tests and benchmarks -go test -bench="Bench" -benchtime 10 -``` - -Valid `GORP_TEST_DIALECT` values are: "mysql", "postgres", "sqlite3" -See the `test_all.sh` script for examples of all 3 databases. This is the script I run -locally to test the library. - -## Performance - -gorp uses reflection to construct SQL queries and bind parameters. See the BenchmarkNativeCrud vs BenchmarkGorpCrud in gorp_test.go for a simple perf test. On my MacBook Pro gorp is about 2-3% slower than hand written SQL. - -## Help/Support - -IRC: #gorp -Mailing list: gorp-dev@googlegroups.com -Bugs/Enhancements: Create a github issue - -## Pull requests / Contributions - -Contributions are very welcome. Please follow these guidelines: - -* Fork the `master` branch and issue pull requests targeting the `master` branch -* If you are adding an enhancement, please open an issue first with your proposed change. -* Changes that break backwards compatibility in the public API are only accepted after we - discuss on a GitHub issue for a while. - -Thanks! - -## Contributors - -* matthias-margush - column aliasing via tags -* Rob Figueiredo - @robfig -* Quinn Slack - @sqs diff --git a/vendor/gopkg.in/gorp.v1/dialect.go b/vendor/gopkg.in/gorp.v1/dialect.go deleted file mode 100644 index 5e8fdc6..0000000 --- a/vendor/gopkg.in/gorp.v1/dialect.go +++ /dev/null @@ -1,692 +0,0 @@ -package gorp - -import ( - "errors" - "fmt" - "reflect" - "strings" -) - -// The Dialect interface encapsulates behaviors that differ across -// SQL databases. At present the Dialect is only used by CreateTables() -// but this could change in the future -type Dialect interface { - - // adds a suffix to any query, usually ";" - QuerySuffix() string - - // ToSqlType returns the SQL column type to use when creating a - // table of the given Go Type. maxsize can be used to switch based on - // size. For example, in MySQL []byte could map to BLOB, MEDIUMBLOB, - // or LONGBLOB depending on the maxsize - ToSqlType(val reflect.Type, maxsize int, isAutoIncr bool) string - - // string to append to primary key column definitions - AutoIncrStr() string - - // string to bind autoincrement columns to. Empty string will - // remove reference to those columns in the INSERT statement. - AutoIncrBindValue() string - - AutoIncrInsertSuffix(col *ColumnMap) string - - // string to append to "create table" statement for vendor specific - // table attributes - CreateTableSuffix() string - - // string to truncate tables - TruncateClause() string - - // bind variable string to use when forming SQL statements - // in many dbs it is "?", but Postgres appears to use $1 - // - // i is a zero based index of the bind variable in this statement - // - BindVar(i int) string - - // Handles quoting of a field name to ensure that it doesn't raise any - // SQL parsing exceptions by using a reserved word as a field name. - QuoteField(field string) string - - // Handles building up of a schema.database string that is compatible with - // the given dialect - // - // schema - The schema that lives in - // table - The table name - QuotedTableForQuery(schema string, table string) string - - // Existance clause for table creation / deletion - IfSchemaNotExists(command, schema string) string - IfTableExists(command, schema, table string) string - IfTableNotExists(command, schema, table string) string -} - -// IntegerAutoIncrInserter is implemented by dialects that can perform -// inserts with automatically incremented integer primary keys. If -// the dialect can handle automatic assignment of more than just -// integers, see TargetedAutoIncrInserter. -type IntegerAutoIncrInserter interface { - InsertAutoIncr(exec SqlExecutor, insertSql string, params ...interface{}) (int64, error) -} - -// TargetedAutoIncrInserter is implemented by dialects that can -// perform automatic assignment of any primary key type (i.e. strings -// for uuids, integers for serials, etc). -type TargetedAutoIncrInserter interface { - // InsertAutoIncrToTarget runs an insert operation and assigns the - // automatically generated primary key directly to the passed in - // target. The target should be a pointer to the primary key - // field of the value being inserted. - InsertAutoIncrToTarget(exec SqlExecutor, insertSql string, target interface{}, params ...interface{}) error -} - -func standardInsertAutoIncr(exec SqlExecutor, insertSql string, params ...interface{}) (int64, error) { - res, err := exec.Exec(insertSql, params...) - if err != nil { - return 0, err - } - return res.LastInsertId() -} - -/////////////////////////////////////////////////////// -// sqlite3 // -///////////// - -type SqliteDialect struct { - suffix string -} - -func (d SqliteDialect) QuerySuffix() string { return ";" } - -func (d SqliteDialect) ToSqlType(val reflect.Type, maxsize int, isAutoIncr bool) string { - switch val.Kind() { - case reflect.Ptr: - return d.ToSqlType(val.Elem(), maxsize, isAutoIncr) - case reflect.Bool: - return "integer" - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - return "integer" - case reflect.Float64, reflect.Float32: - return "real" - case reflect.Slice: - if val.Elem().Kind() == reflect.Uint8 { - return "blob" - } - } - - switch val.Name() { - case "NullInt64": - return "integer" - case "NullFloat64": - return "real" - case "NullBool": - return "integer" - case "Time": - return "datetime" - } - - if maxsize < 1 { - maxsize = 255 - } - return fmt.Sprintf("varchar(%d)", maxsize) -} - -// Returns autoincrement -func (d SqliteDialect) AutoIncrStr() string { - return "autoincrement" -} - -func (d SqliteDialect) AutoIncrBindValue() string { - return "null" -} - -func (d SqliteDialect) AutoIncrInsertSuffix(col *ColumnMap) string { - return "" -} - -// Returns suffix -func (d SqliteDialect) CreateTableSuffix() string { - return d.suffix -} - -// With sqlite, there technically isn't a TRUNCATE statement, -// but a DELETE FROM uses a truncate optimization: -// http://www.sqlite.org/lang_delete.html -func (d SqliteDialect) TruncateClause() string { - return "delete from" -} - -// Returns "?" -func (d SqliteDialect) BindVar(i int) string { - return "?" -} - -func (d SqliteDialect) InsertAutoIncr(exec SqlExecutor, insertSql string, params ...interface{}) (int64, error) { - return standardInsertAutoIncr(exec, insertSql, params...) -} - -func (d SqliteDialect) QuoteField(f string) string { - return `"` + f + `"` -} - -// sqlite does not have schemas like PostgreSQL does, so just escape it like normal -func (d SqliteDialect) QuotedTableForQuery(schema string, table string) string { - return d.QuoteField(table) -} - -func (d SqliteDialect) IfSchemaNotExists(command, schema string) string { - return fmt.Sprintf("%s if not exists", command) -} - -func (d SqliteDialect) IfTableExists(command, schema, table string) string { - return fmt.Sprintf("%s if exists", command) -} - -func (d SqliteDialect) IfTableNotExists(command, schema, table string) string { - return fmt.Sprintf("%s if not exists", command) -} - -/////////////////////////////////////////////////////// -// PostgreSQL // -//////////////// - -type PostgresDialect struct { - suffix string -} - -func (d PostgresDialect) QuerySuffix() string { return ";" } - -func (d PostgresDialect) ToSqlType(val reflect.Type, maxsize int, isAutoIncr bool) string { - switch val.Kind() { - case reflect.Ptr: - return d.ToSqlType(val.Elem(), maxsize, isAutoIncr) - case reflect.Bool: - return "boolean" - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint8, reflect.Uint16, reflect.Uint32: - if isAutoIncr { - return "serial" - } - return "integer" - case reflect.Int64, reflect.Uint64: - if isAutoIncr { - return "bigserial" - } - return "bigint" - case reflect.Float64: - return "double precision" - case reflect.Float32: - return "real" - case reflect.Slice: - if val.Elem().Kind() == reflect.Uint8 { - return "bytea" - } - } - - switch val.Name() { - case "NullInt64": - return "bigint" - case "NullFloat64": - return "double precision" - case "NullBool": - return "boolean" - case "Time": - return "timestamp with time zone" - } - - if maxsize > 0 { - return fmt.Sprintf("varchar(%d)", maxsize) - } else { - return "text" - } - -} - -// Returns empty string -func (d PostgresDialect) AutoIncrStr() string { - return "" -} - -func (d PostgresDialect) AutoIncrBindValue() string { - return "default" -} - -func (d PostgresDialect) AutoIncrInsertSuffix(col *ColumnMap) string { - return " returning " + col.ColumnName -} - -// Returns suffix -func (d PostgresDialect) CreateTableSuffix() string { - return d.suffix -} - -func (d PostgresDialect) TruncateClause() string { - return "truncate" -} - -// Returns "$(i+1)" -func (d PostgresDialect) BindVar(i int) string { - return fmt.Sprintf("$%d", i+1) -} - -func (d PostgresDialect) InsertAutoIncrToTarget(exec SqlExecutor, insertSql string, target interface{}, params ...interface{}) error { - rows, err := exec.query(insertSql, params...) - if err != nil { - return err - } - defer rows.Close() - - if rows.Next() { - err := rows.Scan(target) - return err - } - - return errors.New("No serial value returned for insert: " + insertSql + " Encountered error: " + rows.Err().Error()) -} - -func (d PostgresDialect) QuoteField(f string) string { - return `"` + strings.ToLower(f) + `"` -} - -func (d PostgresDialect) QuotedTableForQuery(schema string, table string) string { - if strings.TrimSpace(schema) == "" { - return d.QuoteField(table) - } - - return schema + "." + d.QuoteField(table) -} - -func (d PostgresDialect) IfSchemaNotExists(command, schema string) string { - return fmt.Sprintf("%s if not exists", command) -} - -func (d PostgresDialect) IfTableExists(command, schema, table string) string { - return fmt.Sprintf("%s if exists", command) -} - -func (d PostgresDialect) IfTableNotExists(command, schema, table string) string { - return fmt.Sprintf("%s if not exists", command) -} - -/////////////////////////////////////////////////////// -// MySQL // -/////////// - -// Implementation of Dialect for MySQL databases. -type MySQLDialect struct { - - // Engine is the storage engine to use "InnoDB" vs "MyISAM" for example - Engine string - - // Encoding is the character encoding to use for created tables - Encoding string -} - -func (d MySQLDialect) QuerySuffix() string { return ";" } - -func (d MySQLDialect) ToSqlType(val reflect.Type, maxsize int, isAutoIncr bool) string { - switch val.Kind() { - case reflect.Ptr: - return d.ToSqlType(val.Elem(), maxsize, isAutoIncr) - case reflect.Bool: - return "boolean" - case reflect.Int8: - return "tinyint" - case reflect.Uint8: - return "tinyint unsigned" - case reflect.Int16: - return "smallint" - case reflect.Uint16: - return "smallint unsigned" - case reflect.Int, reflect.Int32: - return "int" - case reflect.Uint, reflect.Uint32: - return "int unsigned" - case reflect.Int64: - return "bigint" - case reflect.Uint64: - return "bigint unsigned" - case reflect.Float64, reflect.Float32: - return "double" - case reflect.Slice: - if val.Elem().Kind() == reflect.Uint8 { - return "mediumblob" - } - } - - switch val.Name() { - case "NullInt64": - return "bigint" - case "NullFloat64": - return "double" - case "NullBool": - return "tinyint" - case "Time": - return "datetime" - } - - if maxsize < 1 { - maxsize = 255 - } - return fmt.Sprintf("varchar(%d)", maxsize) -} - -// Returns auto_increment -func (d MySQLDialect) AutoIncrStr() string { - return "auto_increment" -} - -func (d MySQLDialect) AutoIncrBindValue() string { - return "null" -} - -func (d MySQLDialect) AutoIncrInsertSuffix(col *ColumnMap) string { - return "" -} - -// Returns engine=%s charset=%s based on values stored on struct -func (d MySQLDialect) CreateTableSuffix() string { - if d.Engine == "" || d.Encoding == "" { - msg := "gorp - undefined" - - if d.Engine == "" { - msg += " MySQLDialect.Engine" - } - if d.Engine == "" && d.Encoding == "" { - msg += "," - } - if d.Encoding == "" { - msg += " MySQLDialect.Encoding" - } - msg += ". Check that your MySQLDialect was correctly initialized when declared." - panic(msg) - } - - return fmt.Sprintf(" engine=%s charset=%s", d.Engine, d.Encoding) -} - -func (d MySQLDialect) TruncateClause() string { - return "truncate" -} - -// Returns "?" -func (d MySQLDialect) BindVar(i int) string { - return "?" -} - -func (d MySQLDialect) InsertAutoIncr(exec SqlExecutor, insertSql string, params ...interface{}) (int64, error) { - return standardInsertAutoIncr(exec, insertSql, params...) -} - -func (d MySQLDialect) QuoteField(f string) string { - return "`" + f + "`" -} - -func (d MySQLDialect) QuotedTableForQuery(schema string, table string) string { - if strings.TrimSpace(schema) == "" { - return d.QuoteField(table) - } - - return schema + "." + d.QuoteField(table) -} - -func (d MySQLDialect) IfSchemaNotExists(command, schema string) string { - return fmt.Sprintf("%s if not exists", command) -} - -func (d MySQLDialect) IfTableExists(command, schema, table string) string { - return fmt.Sprintf("%s if exists", command) -} - -func (d MySQLDialect) IfTableNotExists(command, schema, table string) string { - return fmt.Sprintf("%s if not exists", command) -} - -/////////////////////////////////////////////////////// -// Sql Server // -//////////////// - -// Implementation of Dialect for Microsoft SQL Server databases. -// Tested on SQL Server 2008 with driver: github.com/denisenkom/go-mssqldb - -type SqlServerDialect struct { - suffix string -} - -func (d SqlServerDialect) ToSqlType(val reflect.Type, maxsize int, isAutoIncr bool) string { - switch val.Kind() { - case reflect.Ptr: - return d.ToSqlType(val.Elem(), maxsize, isAutoIncr) - case reflect.Bool: - return "bit" - case reflect.Int8: - return "tinyint" - case reflect.Uint8: - return "smallint" - case reflect.Int16: - return "smallint" - case reflect.Uint16: - return "int" - case reflect.Int, reflect.Int32: - return "int" - case reflect.Uint, reflect.Uint32: - return "bigint" - case reflect.Int64: - return "bigint" - case reflect.Uint64: - return "bigint" - case reflect.Float32: - return "real" - case reflect.Float64: - return "float(53)" - case reflect.Slice: - if val.Elem().Kind() == reflect.Uint8 { - return "varbinary" - } - } - - switch val.Name() { - case "NullInt64": - return "bigint" - case "NullFloat64": - return "float(53)" - case "NullBool": - return "tinyint" - case "Time": - return "datetime" - } - - if maxsize < 1 { - maxsize = 255 - } - return fmt.Sprintf("varchar(%d)", maxsize) -} - -// Returns auto_increment -func (d SqlServerDialect) AutoIncrStr() string { - return "identity(0,1)" -} - -// Empty string removes autoincrement columns from the INSERT statements. -func (d SqlServerDialect) AutoIncrBindValue() string { - return "" -} - -func (d SqlServerDialect) AutoIncrInsertSuffix(col *ColumnMap) string { - return "" -} - -// Returns suffix -func (d SqlServerDialect) CreateTableSuffix() string { - - return d.suffix -} - -func (d SqlServerDialect) TruncateClause() string { - return "delete from" -} - -// Returns "?" -func (d SqlServerDialect) BindVar(i int) string { - return "?" -} - -func (d SqlServerDialect) InsertAutoIncr(exec SqlExecutor, insertSql string, params ...interface{}) (int64, error) { - return standardInsertAutoIncr(exec, insertSql, params...) -} - -func (d SqlServerDialect) QuoteField(f string) string { - return `"` + f + `"` -} - -func (d SqlServerDialect) QuotedTableForQuery(schema string, table string) string { - if strings.TrimSpace(schema) == "" { - return table - } - return schema + "." + table -} - -func (d SqlServerDialect) QuerySuffix() string { return ";" } - -func (d SqlServerDialect) IfSchemaNotExists(command, schema string) string { - s := fmt.Sprintf("if not exists (select name from sys.schemas where name = '%s') %s", schema, command) - return s -} - -func (d SqlServerDialect) IfTableExists(command, schema, table string) string { - var schema_clause string - if strings.TrimSpace(schema) != "" { - schema_clause = fmt.Sprintf("table_schema = '%s' and ", schema) - } - s := fmt.Sprintf("if exists (select * from information_schema.tables where %stable_name = '%s') %s", schema_clause, table, command) - return s -} - -func (d SqlServerDialect) IfTableNotExists(command, schema, table string) string { - var schema_clause string - if strings.TrimSpace(schema) != "" { - schema_clause = fmt.Sprintf("table_schema = '%s' and ", schema) - } - s := fmt.Sprintf("if not exists (select * from information_schema.tables where %stable_name = '%s') %s", schema_clause, table, command) - return s -} - -/////////////////////////////////////////////////////// -// Oracle // -/////////// - -// Implementation of Dialect for Oracle databases. -type OracleDialect struct{} - -func (d OracleDialect) QuerySuffix() string { return "" } - -func (d OracleDialect) ToSqlType(val reflect.Type, maxsize int, isAutoIncr bool) string { - switch val.Kind() { - case reflect.Ptr: - return d.ToSqlType(val.Elem(), maxsize, isAutoIncr) - case reflect.Bool: - return "boolean" - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint8, reflect.Uint16, reflect.Uint32: - if isAutoIncr { - return "serial" - } - return "integer" - case reflect.Int64, reflect.Uint64: - if isAutoIncr { - return "bigserial" - } - return "bigint" - case reflect.Float64: - return "double precision" - case reflect.Float32: - return "real" - case reflect.Slice: - if val.Elem().Kind() == reflect.Uint8 { - return "bytea" - } - } - - switch val.Name() { - case "NullInt64": - return "bigint" - case "NullFloat64": - return "double precision" - case "NullBool": - return "boolean" - case "NullTime", "Time": - return "timestamp with time zone" - } - - if maxsize > 0 { - return fmt.Sprintf("varchar(%d)", maxsize) - } else { - return "text" - } - -} - -// Returns empty string -func (d OracleDialect) AutoIncrStr() string { - return "" -} - -func (d OracleDialect) AutoIncrBindValue() string { - return "default" -} - -func (d OracleDialect) AutoIncrInsertSuffix(col *ColumnMap) string { - return " returning " + col.ColumnName -} - -// Returns suffix -func (d OracleDialect) CreateTableSuffix() string { - return "" -} - -func (d OracleDialect) TruncateClause() string { - return "truncate" -} - -// Returns "$(i+1)" -func (d OracleDialect) BindVar(i int) string { - return fmt.Sprintf(":%d", i+1) -} - -func (d OracleDialect) InsertAutoIncr(exec SqlExecutor, insertSql string, params ...interface{}) (int64, error) { - rows, err := exec.query(insertSql, params...) - if err != nil { - return 0, err - } - defer rows.Close() - - if rows.Next() { - var id int64 - err := rows.Scan(&id) - return id, err - } - - return 0, errors.New("No serial value returned for insert: " + insertSql + " Encountered error: " + rows.Err().Error()) -} - -func (d OracleDialect) QuoteField(f string) string { - return `"` + strings.ToUpper(f) + `"` -} - -func (d OracleDialect) QuotedTableForQuery(schema string, table string) string { - if strings.TrimSpace(schema) == "" { - return d.QuoteField(table) - } - - return schema + "." + d.QuoteField(table) -} - -func (d OracleDialect) IfSchemaNotExists(command, schema string) string { - return fmt.Sprintf("%s if not exists", command) -} - -func (d OracleDialect) IfTableExists(command, schema, table string) string { - return fmt.Sprintf("%s if exists", command) -} - -func (d OracleDialect) IfTableNotExists(command, schema, table string) string { - return fmt.Sprintf("%s if not exists", command) -} diff --git a/vendor/gopkg.in/gorp.v1/errors.go b/vendor/gopkg.in/gorp.v1/errors.go deleted file mode 100644 index 356d684..0000000 --- a/vendor/gopkg.in/gorp.v1/errors.go +++ /dev/null @@ -1,26 +0,0 @@ -package gorp - -import ( - "fmt" -) - -// A non-fatal error, when a select query returns columns that do not exist -// as fields in the struct it is being mapped to -type NoFieldInTypeError struct { - TypeName string - MissingColNames []string -} - -func (err *NoFieldInTypeError) Error() string { - return fmt.Sprintf("gorp: No fields %+v in type %s", err.MissingColNames, err.TypeName) -} - -// returns true if the error is non-fatal (ie, we shouldn't immediately return) -func NonFatalError(err error) bool { - switch err.(type) { - case *NoFieldInTypeError: - return true - default: - return false - } -} diff --git a/vendor/gopkg.in/gorp.v1/gorp.go b/vendor/gopkg.in/gorp.v1/gorp.go deleted file mode 100644 index 1ad6187..0000000 --- a/vendor/gopkg.in/gorp.v1/gorp.go +++ /dev/null @@ -1,2085 +0,0 @@ -// Copyright 2012 James Cooper. All rights reserved. -// Use of this source code is governed by a MIT-style -// license that can be found in the LICENSE file. - -// Package gorp provides a simple way to marshal Go structs to and from -// SQL databases. It uses the database/sql package, and should work with any -// compliant database/sql driver. -// -// Source code and project home: -// https://github.com/coopernurse/gorp -// -package gorp - -import ( - "bytes" - "database/sql" - "database/sql/driver" - "errors" - "fmt" - "reflect" - "regexp" - "strings" - "time" - "log" - "os" -) - -// Oracle String (empty string is null) -type OracleString struct { - sql.NullString -} - -// Scan implements the Scanner interface. -func (os *OracleString) Scan(value interface{}) error { - if value == nil { - os.String, os.Valid = "", false - return nil - } - os.Valid = true - return os.NullString.Scan(value) -} - -// Value implements the driver Valuer interface. -func (os OracleString) Value() (driver.Value, error) { - if !os.Valid || os.String == "" { - return nil, nil - } - return os.String, nil -} - -// A nullable Time value -type NullTime struct { - Time time.Time - Valid bool // Valid is true if Time is not NULL -} - -// Scan implements the Scanner interface. -func (nt *NullTime) Scan(value interface{}) error { - nt.Time, nt.Valid = value.(time.Time) - return nil -} - -// Value implements the driver Valuer interface. -func (nt NullTime) Value() (driver.Value, error) { - if !nt.Valid { - return nil, nil - } - return nt.Time, nil -} - -var zeroVal reflect.Value -var versFieldConst = "[gorp_ver_field]" - -// OptimisticLockError is returned by Update() or Delete() if the -// struct being modified has a Version field and the value is not equal to -// the current value in the database -type OptimisticLockError struct { - // Table name where the lock error occurred - TableName string - - // Primary key values of the row being updated/deleted - Keys []interface{} - - // true if a row was found with those keys, indicating the - // LocalVersion is stale. false if no value was found with those - // keys, suggesting the row has been deleted since loaded, or - // was never inserted to begin with - RowExists bool - - // Version value on the struct passed to Update/Delete. This value is - // out of sync with the database. - LocalVersion int64 -} - -// Error returns a description of the cause of the lock error -func (e OptimisticLockError) Error() string { - if e.RowExists { - return fmt.Sprintf("gorp: OptimisticLockError table=%s keys=%v out of date version=%d", e.TableName, e.Keys, e.LocalVersion) - } - - return fmt.Sprintf("gorp: OptimisticLockError no row found for table=%s keys=%v", e.TableName, e.Keys) -} - -// The TypeConverter interface provides a way to map a value of one -// type to another type when persisting to, or loading from, a database. -// -// Example use cases: Implement type converter to convert bool types to "y"/"n" strings, -// or serialize a struct member as a JSON blob. -type TypeConverter interface { - // ToDb converts val to another type. Called before INSERT/UPDATE operations - ToDb(val interface{}) (interface{}, error) - - // FromDb returns a CustomScanner appropriate for this type. This will be used - // to hold values returned from SELECT queries. - // - // In particular the CustomScanner returned should implement a Binder - // function appropriate for the Go type you wish to convert the db value to - // - // If bool==false, then no custom scanner will be used for this field. - FromDb(target interface{}) (CustomScanner, bool) -} - -// CustomScanner binds a database column value to a Go type -type CustomScanner struct { - // After a row is scanned, Holder will contain the value from the database column. - // Initialize the CustomScanner with the concrete Go type you wish the database - // driver to scan the raw column into. - Holder interface{} - // Target typically holds a pointer to the target struct field to bind the Holder - // value to. - Target interface{} - // Binder is a custom function that converts the holder value to the target type - // and sets target accordingly. This function should return error if a problem - // occurs converting the holder to the target. - Binder func(holder interface{}, target interface{}) error -} - -// Bind is called automatically by gorp after Scan() -func (me CustomScanner) Bind() error { - return me.Binder(me.Holder, me.Target) -} - -// DbMap is the root gorp mapping object. Create one of these for each -// database schema you wish to map. Each DbMap contains a list of -// mapped tables. -// -// Example: -// -// dialect := gorp.MySQLDialect{"InnoDB", "UTF8"} -// dbmap := &gorp.DbMap{Db: db, Dialect: dialect} -// -type DbMap struct { - // Db handle to use with this map - Db *sql.DB - - // Dialect implementation to use with this map - Dialect Dialect - - TypeConverter TypeConverter - - tables []*TableMap - logger GorpLogger - logPrefix string -} - -// TableMap represents a mapping between a Go struct and a database table -// Use dbmap.AddTable() or dbmap.AddTableWithName() to create these -type TableMap struct { - // Name of database table. - TableName string - SchemaName string - gotype reflect.Type - Columns []*ColumnMap - keys []*ColumnMap - uniqueTogether [][]string - version *ColumnMap - insertPlan bindPlan - updatePlan bindPlan - deletePlan bindPlan - getPlan bindPlan - dbmap *DbMap -} - -// ResetSql removes cached insert/update/select/delete SQL strings -// associated with this TableMap. Call this if you've modified -// any column names or the table name itself. -func (t *TableMap) ResetSql() { - t.insertPlan = bindPlan{} - t.updatePlan = bindPlan{} - t.deletePlan = bindPlan{} - t.getPlan = bindPlan{} -} - -// SetKeys lets you specify the fields on a struct that map to primary -// key columns on the table. If isAutoIncr is set, result.LastInsertId() -// will be used after INSERT to bind the generated id to the Go struct. -// -// Automatically calls ResetSql() to ensure SQL statements are regenerated. -// -// Panics if isAutoIncr is true, and fieldNames length != 1 -// -func (t *TableMap) SetKeys(isAutoIncr bool, fieldNames ...string) *TableMap { - if isAutoIncr && len(fieldNames) != 1 { - panic(fmt.Sprintf( - "gorp: SetKeys: fieldNames length must be 1 if key is auto-increment. (Saw %v fieldNames)", - len(fieldNames))) - } - t.keys = make([]*ColumnMap, 0) - for _, name := range fieldNames { - colmap := t.ColMap(name) - colmap.isPK = true - colmap.isAutoIncr = isAutoIncr - t.keys = append(t.keys, colmap) - } - t.ResetSql() - - return t -} - -// SetUniqueTogether lets you specify uniqueness constraints across multiple -// columns on the table. Each call adds an additional constraint for the -// specified columns. -// -// Automatically calls ResetSql() to ensure SQL statements are regenerated. -// -// Panics if fieldNames length < 2. -// -func (t *TableMap) SetUniqueTogether(fieldNames ...string) *TableMap { - if len(fieldNames) < 2 { - panic(fmt.Sprintf( - "gorp: SetUniqueTogether: must provide at least two fieldNames to set uniqueness constraint.")) - } - - columns := make([]string, 0) - for _, name := range fieldNames { - columns = append(columns, name) - } - t.uniqueTogether = append(t.uniqueTogether, columns) - t.ResetSql() - - return t -} - -// ColMap returns the ColumnMap pointer matching the given struct field -// name. It panics if the struct does not contain a field matching this -// name. -func (t *TableMap) ColMap(field string) *ColumnMap { - col := colMapOrNil(t, field) - if col == nil { - e := fmt.Sprintf("No ColumnMap in table %s type %s with field %s", - t.TableName, t.gotype.Name(), field) - - panic(e) - } - return col -} - -func colMapOrNil(t *TableMap, field string) *ColumnMap { - for _, col := range t.Columns { - if col.fieldName == field || col.ColumnName == field { - return col - } - } - return nil -} - -// SetVersionCol sets the column to use as the Version field. By default -// the "Version" field is used. Returns the column found, or panics -// if the struct does not contain a field matching this name. -// -// Automatically calls ResetSql() to ensure SQL statements are regenerated. -func (t *TableMap) SetVersionCol(field string) *ColumnMap { - c := t.ColMap(field) - t.version = c - t.ResetSql() - return c -} - -type bindPlan struct { - query string - argFields []string - keyFields []string - versField string - autoIncrIdx int - autoIncrFieldName string -} - -func (plan bindPlan) createBindInstance(elem reflect.Value, conv TypeConverter) (bindInstance, error) { - bi := bindInstance{query: plan.query, autoIncrIdx: plan.autoIncrIdx, autoIncrFieldName: plan.autoIncrFieldName, versField: plan.versField} - if plan.versField != "" { - bi.existingVersion = elem.FieldByName(plan.versField).Int() - } - - var err error - - for i := 0; i < len(plan.argFields); i++ { - k := plan.argFields[i] - if k == versFieldConst { - newVer := bi.existingVersion + 1 - bi.args = append(bi.args, newVer) - if bi.existingVersion == 0 { - elem.FieldByName(plan.versField).SetInt(int64(newVer)) - } - } else { - val := elem.FieldByName(k).Interface() - if conv != nil { - val, err = conv.ToDb(val) - if err != nil { - return bindInstance{}, err - } - } - bi.args = append(bi.args, val) - } - } - - for i := 0; i < len(plan.keyFields); i++ { - k := plan.keyFields[i] - val := elem.FieldByName(k).Interface() - if conv != nil { - val, err = conv.ToDb(val) - if err != nil { - return bindInstance{}, err - } - } - bi.keys = append(bi.keys, val) - } - - return bi, nil -} - -type bindInstance struct { - query string - args []interface{} - keys []interface{} - existingVersion int64 - versField string - autoIncrIdx int - autoIncrFieldName string -} - -func (t *TableMap) bindInsert(elem reflect.Value) (bindInstance, error) { - plan := t.insertPlan - if plan.query == "" { - plan.autoIncrIdx = -1 - - s := bytes.Buffer{} - s2 := bytes.Buffer{} - s.WriteString(fmt.Sprintf("insert into %s (", t.dbmap.Dialect.QuotedTableForQuery(t.SchemaName, t.TableName))) - - x := 0 - first := true - for y := range t.Columns { - col := t.Columns[y] - if !(col.isAutoIncr && t.dbmap.Dialect.AutoIncrBindValue() == "") { - if !col.Transient { - if !first { - s.WriteString(",") - s2.WriteString(",") - } - s.WriteString(t.dbmap.Dialect.QuoteField(col.ColumnName)) - - if col.isAutoIncr { - s2.WriteString(t.dbmap.Dialect.AutoIncrBindValue()) - plan.autoIncrIdx = y - plan.autoIncrFieldName = col.fieldName - } else { - s2.WriteString(t.dbmap.Dialect.BindVar(x)) - if col == t.version { - plan.versField = col.fieldName - plan.argFields = append(plan.argFields, versFieldConst) - } else { - plan.argFields = append(plan.argFields, col.fieldName) - } - - x++ - } - first = false - } - } else { - plan.autoIncrIdx = y - plan.autoIncrFieldName = col.fieldName - } - } - s.WriteString(") values (") - s.WriteString(s2.String()) - s.WriteString(")") - if plan.autoIncrIdx > -1 { - s.WriteString(t.dbmap.Dialect.AutoIncrInsertSuffix(t.Columns[plan.autoIncrIdx])) - } - s.WriteString(t.dbmap.Dialect.QuerySuffix()) - - plan.query = s.String() - t.insertPlan = plan - } - - return plan.createBindInstance(elem, t.dbmap.TypeConverter) -} - -func (t *TableMap) bindUpdate(elem reflect.Value) (bindInstance, error) { - plan := t.updatePlan - if plan.query == "" { - - s := bytes.Buffer{} - s.WriteString(fmt.Sprintf("update %s set ", t.dbmap.Dialect.QuotedTableForQuery(t.SchemaName, t.TableName))) - x := 0 - - for y := range t.Columns { - col := t.Columns[y] - if !col.isAutoIncr && !col.Transient { - if x > 0 { - s.WriteString(", ") - } - s.WriteString(t.dbmap.Dialect.QuoteField(col.ColumnName)) - s.WriteString("=") - s.WriteString(t.dbmap.Dialect.BindVar(x)) - - if col == t.version { - plan.versField = col.fieldName - plan.argFields = append(plan.argFields, versFieldConst) - } else { - plan.argFields = append(plan.argFields, col.fieldName) - } - x++ - } - } - - s.WriteString(" where ") - for y := range t.keys { - col := t.keys[y] - if y > 0 { - s.WriteString(" and ") - } - s.WriteString(t.dbmap.Dialect.QuoteField(col.ColumnName)) - s.WriteString("=") - s.WriteString(t.dbmap.Dialect.BindVar(x)) - - plan.argFields = append(plan.argFields, col.fieldName) - plan.keyFields = append(plan.keyFields, col.fieldName) - x++ - } - if plan.versField != "" { - s.WriteString(" and ") - s.WriteString(t.dbmap.Dialect.QuoteField(t.version.ColumnName)) - s.WriteString("=") - s.WriteString(t.dbmap.Dialect.BindVar(x)) - plan.argFields = append(plan.argFields, plan.versField) - } - s.WriteString(t.dbmap.Dialect.QuerySuffix()) - - plan.query = s.String() - t.updatePlan = plan - } - - return plan.createBindInstance(elem, t.dbmap.TypeConverter) -} - -func (t *TableMap) bindDelete(elem reflect.Value) (bindInstance, error) { - plan := t.deletePlan - if plan.query == "" { - - s := bytes.Buffer{} - s.WriteString(fmt.Sprintf("delete from %s", t.dbmap.Dialect.QuotedTableForQuery(t.SchemaName, t.TableName))) - - for y := range t.Columns { - col := t.Columns[y] - if !col.Transient { - if col == t.version { - plan.versField = col.fieldName - } - } - } - - s.WriteString(" where ") - for x := range t.keys { - k := t.keys[x] - if x > 0 { - s.WriteString(" and ") - } - s.WriteString(t.dbmap.Dialect.QuoteField(k.ColumnName)) - s.WriteString("=") - s.WriteString(t.dbmap.Dialect.BindVar(x)) - - plan.keyFields = append(plan.keyFields, k.fieldName) - plan.argFields = append(plan.argFields, k.fieldName) - } - if plan.versField != "" { - s.WriteString(" and ") - s.WriteString(t.dbmap.Dialect.QuoteField(t.version.ColumnName)) - s.WriteString("=") - s.WriteString(t.dbmap.Dialect.BindVar(len(plan.argFields))) - - plan.argFields = append(plan.argFields, plan.versField) - } - s.WriteString(t.dbmap.Dialect.QuerySuffix()) - - plan.query = s.String() - t.deletePlan = plan - } - - return plan.createBindInstance(elem, t.dbmap.TypeConverter) -} - -func (t *TableMap) bindGet() bindPlan { - plan := t.getPlan - if plan.query == "" { - - s := bytes.Buffer{} - s.WriteString("select ") - - x := 0 - for _, col := range t.Columns { - if !col.Transient { - if x > 0 { - s.WriteString(",") - } - s.WriteString(t.dbmap.Dialect.QuoteField(col.ColumnName)) - plan.argFields = append(plan.argFields, col.fieldName) - x++ - } - } - s.WriteString(" from ") - s.WriteString(t.dbmap.Dialect.QuotedTableForQuery(t.SchemaName, t.TableName)) - s.WriteString(" where ") - for x := range t.keys { - col := t.keys[x] - if x > 0 { - s.WriteString(" and ") - } - s.WriteString(t.dbmap.Dialect.QuoteField(col.ColumnName)) - s.WriteString("=") - s.WriteString(t.dbmap.Dialect.BindVar(x)) - - plan.keyFields = append(plan.keyFields, col.fieldName) - } - s.WriteString(t.dbmap.Dialect.QuerySuffix()) - - plan.query = s.String() - t.getPlan = plan - } - - return plan -} - -// ColumnMap represents a mapping between a Go struct field and a single -// column in a table. -// Unique and MaxSize only inform the -// CreateTables() function and are not used by Insert/Update/Delete/Get. -type ColumnMap struct { - // Column name in db table - ColumnName string - - // If true, this column is skipped in generated SQL statements - Transient bool - - // If true, " unique" is added to create table statements. - // Not used elsewhere - Unique bool - - // Passed to Dialect.ToSqlType() to assist in informing the - // correct column type to map to in CreateTables() - // Not used elsewhere - MaxSize int - - fieldName string - gotype reflect.Type - isPK bool - isAutoIncr bool - isNotNull bool -} - -// Rename allows you to specify the column name in the table -// -// Example: table.ColMap("Updated").Rename("date_updated") -// -func (c *ColumnMap) Rename(colname string) *ColumnMap { - c.ColumnName = colname - return c -} - -// SetTransient allows you to mark the column as transient. If true -// this column will be skipped when SQL statements are generated -func (c *ColumnMap) SetTransient(b bool) *ColumnMap { - c.Transient = b - return c -} - -// SetUnique adds "unique" to the create table statements for this -// column, if b is true. -func (c *ColumnMap) SetUnique(b bool) *ColumnMap { - c.Unique = b - return c -} - -// SetNotNull adds "not null" to the create table statements for this -// column, if nn is true. -func (c *ColumnMap) SetNotNull(nn bool) *ColumnMap { - c.isNotNull = nn - return c -} - -// SetMaxSize specifies the max length of values of this column. This is -// passed to the dialect.ToSqlType() function, which can use the value -// to alter the generated type for "create table" statements -func (c *ColumnMap) SetMaxSize(size int) *ColumnMap { - c.MaxSize = size - return c -} - -// Transaction represents a database transaction. -// Insert/Update/Delete/Get/Exec operations will be run in the context -// of that transaction. Transactions should be terminated with -// a call to Commit() or Rollback() -type Transaction struct { - dbmap *DbMap - tx *sql.Tx - closed bool -} - -// SqlExecutor exposes gorp operations that can be run from Pre/Post -// hooks. This hides whether the current operation that triggered the -// hook is in a transaction. -// -// See the DbMap function docs for each of the functions below for more -// information. -type SqlExecutor interface { - Get(i interface{}, keys ...interface{}) (interface{}, error) - Insert(list ...interface{}) error - Update(list ...interface{}) (int64, error) - Delete(list ...interface{}) (int64, error) - Exec(query string, args ...interface{}) (sql.Result, error) - Select(i interface{}, query string, - args ...interface{}) ([]interface{}, error) - SelectInt(query string, args ...interface{}) (int64, error) - SelectNullInt(query string, args ...interface{}) (sql.NullInt64, error) - SelectFloat(query string, args ...interface{}) (float64, error) - SelectNullFloat(query string, args ...interface{}) (sql.NullFloat64, error) - SelectStr(query string, args ...interface{}) (string, error) - SelectNullStr(query string, args ...interface{}) (sql.NullString, error) - SelectOne(holder interface{}, query string, args ...interface{}) error - query(query string, args ...interface{}) (*sql.Rows, error) - queryRow(query string, args ...interface{}) *sql.Row -} - -// Compile-time check that DbMap and Transaction implement the SqlExecutor -// interface. -var _, _ SqlExecutor = &DbMap{}, &Transaction{} - -type GorpLogger interface { - Printf(format string, v ...interface{}) -} - -// TraceOn turns on SQL statement logging for this DbMap. After this is -// called, all SQL statements will be sent to the logger. If prefix is -// a non-empty string, it will be written to the front of all logged -// strings, which can aid in filtering log lines. -// -// Use TraceOn if you want to spy on the SQL statements that gorp -// generates. -// -// Note that the base log.Logger type satisfies GorpLogger, but adapters can -// easily be written for other logging packages (e.g., the golang-sanctioned -// glog framework). -func (m *DbMap) TraceOn(prefix string, logger GorpLogger) { - m.logger = logger - if prefix == "" { - m.logPrefix = prefix - } else { - m.logPrefix = fmt.Sprintf("%s ", prefix) - } -} - -// TraceOff turns off tracing. It is idempotent. -func (m *DbMap) TraceOff() { - m.logger = nil - m.logPrefix = "" -} - -// AddTable registers the given interface type with gorp. The table name -// will be given the name of the TypeOf(i). You must call this function, -// or AddTableWithName, for any struct type you wish to persist with -// the given DbMap. -// -// This operation is idempotent. If i's type is already mapped, the -// existing *TableMap is returned -func (m *DbMap) AddTable(i interface{}) *TableMap { - return m.AddTableWithName(i, "") -} - -// AddTableWithName has the same behavior as AddTable, but sets -// table.TableName to name. -func (m *DbMap) AddTableWithName(i interface{}, name string) *TableMap { - return m.AddTableWithNameAndSchema(i, "", name) -} - -// AddTableWithNameAndSchema has the same behavior as AddTable, but sets -// table.TableName to name. -func (m *DbMap) AddTableWithNameAndSchema(i interface{}, schema string, name string) *TableMap { - t := reflect.TypeOf(i) - if name == "" { - name = t.Name() - } - - // check if we have a table for this type already - // if so, update the name and return the existing pointer - for i := range m.tables { - table := m.tables[i] - if table.gotype == t { - table.TableName = name - return table - } - } - - tmap := &TableMap{gotype: t, TableName: name, SchemaName: schema, dbmap: m} - tmap.Columns, tmap.version = m.readStructColumns(t) - m.tables = append(m.tables, tmap) - - return tmap -} - -func (m *DbMap) readStructColumns(t reflect.Type) (cols []*ColumnMap, version *ColumnMap) { - n := t.NumField() - for i := 0; i < n; i++ { - f := t.Field(i) - if f.Anonymous && f.Type.Kind() == reflect.Struct { - // Recursively add nested fields in embedded structs. - subcols, subversion := m.readStructColumns(f.Type) - // Don't append nested fields that have the same field - // name as an already-mapped field. - for _, subcol := range subcols { - shouldAppend := true - for _, col := range cols { - if !subcol.Transient && subcol.fieldName == col.fieldName { - shouldAppend = false - break - } - } - if shouldAppend { - cols = append(cols, subcol) - } - } - if subversion != nil { - version = subversion - } - } else { - columnName := f.Tag.Get("db") - if columnName == "" { - columnName = f.Name - } - gotype := f.Type - if m.TypeConverter != nil { - // Make a new pointer to a value of type gotype and - // pass it to the TypeConverter's FromDb method to see - // if a different type should be used for the column - // type during table creation. - value := reflect.New(gotype).Interface() - scanner, useHolder := m.TypeConverter.FromDb(value) - if useHolder { - gotype = reflect.TypeOf(scanner.Holder) - } - } - cm := &ColumnMap{ - ColumnName: columnName, - Transient: columnName == "-", - fieldName: f.Name, - gotype: gotype, - } - // Check for nested fields of the same field name and - // override them. - shouldAppend := true - for index, col := range cols { - if !col.Transient && col.fieldName == cm.fieldName { - cols[index] = cm - shouldAppend = false - break - } - } - if shouldAppend { - cols = append(cols, cm) - } - if cm.fieldName == "Version" { - log.New(os.Stderr, "", log.LstdFlags).Println("Warning: Automatic mapping of Version struct members to version columns (see optimistic locking) will be deprecated in next version (V2) See: https://github.com/go-gorp/gorp/pull/214") - version = cm - } - } - } - return -} - -// CreateTables iterates through TableMaps registered to this DbMap and -// executes "create table" statements against the database for each. -// -// This is particularly useful in unit tests where you want to create -// and destroy the schema automatically. -func (m *DbMap) CreateTables() error { - return m.createTables(false) -} - -// CreateTablesIfNotExists is similar to CreateTables, but starts -// each statement with "create table if not exists" so that existing -// tables do not raise errors -func (m *DbMap) CreateTablesIfNotExists() error { - return m.createTables(true) -} - -func (m *DbMap) createTables(ifNotExists bool) error { - var err error - for i := range m.tables { - table := m.tables[i] - - s := bytes.Buffer{} - - if strings.TrimSpace(table.SchemaName) != "" { - schemaCreate := "create schema" - if ifNotExists { - s.WriteString(m.Dialect.IfSchemaNotExists(schemaCreate, table.SchemaName)) - } else { - s.WriteString(schemaCreate) - } - s.WriteString(fmt.Sprintf(" %s;", table.SchemaName)) - } - - tableCreate := "create table" - if ifNotExists { - s.WriteString(m.Dialect.IfTableNotExists(tableCreate, table.SchemaName, table.TableName)) - } else { - s.WriteString(tableCreate) - } - s.WriteString(fmt.Sprintf(" %s (", m.Dialect.QuotedTableForQuery(table.SchemaName, table.TableName))) - - x := 0 - for _, col := range table.Columns { - if !col.Transient { - if x > 0 { - s.WriteString(", ") - } - stype := m.Dialect.ToSqlType(col.gotype, col.MaxSize, col.isAutoIncr) - s.WriteString(fmt.Sprintf("%s %s", m.Dialect.QuoteField(col.ColumnName), stype)) - - if col.isPK || col.isNotNull { - s.WriteString(" not null") - } - if col.isPK && len(table.keys) == 1 { - s.WriteString(" primary key") - } - if col.Unique { - s.WriteString(" unique") - } - if col.isAutoIncr { - s.WriteString(fmt.Sprintf(" %s", m.Dialect.AutoIncrStr())) - } - - x++ - } - } - if len(table.keys) > 1 { - s.WriteString(", primary key (") - for x := range table.keys { - if x > 0 { - s.WriteString(", ") - } - s.WriteString(m.Dialect.QuoteField(table.keys[x].ColumnName)) - } - s.WriteString(")") - } - if len(table.uniqueTogether) > 0 { - for _, columns := range table.uniqueTogether { - s.WriteString(", unique (") - for i, column := range columns { - if i > 0 { - s.WriteString(", ") - } - s.WriteString(m.Dialect.QuoteField(column)) - } - s.WriteString(")") - } - } - s.WriteString(") ") - s.WriteString(m.Dialect.CreateTableSuffix()) - s.WriteString(m.Dialect.QuerySuffix()) - _, err = m.Exec(s.String()) - if err != nil { - break - } - } - return err -} - -// DropTable drops an individual table. Will throw an error -// if the table does not exist. -func (m *DbMap) DropTable(table interface{}) error { - t := reflect.TypeOf(table) - return m.dropTable(t, false) -} - -// DropTable drops an individual table. Will NOT throw an error -// if the table does not exist. -func (m *DbMap) DropTableIfExists(table interface{}) error { - t := reflect.TypeOf(table) - return m.dropTable(t, true) -} - -// DropTables iterates through TableMaps registered to this DbMap and -// executes "drop table" statements against the database for each. -func (m *DbMap) DropTables() error { - return m.dropTables(false) -} - -// DropTablesIfExists is the same as DropTables, but uses the "if exists" clause to -// avoid errors for tables that do not exist. -func (m *DbMap) DropTablesIfExists() error { - return m.dropTables(true) -} - -// Goes through all the registered tables, dropping them one by one. -// If an error is encountered, then it is returned and the rest of -// the tables are not dropped. -func (m *DbMap) dropTables(addIfExists bool) (err error) { - for _, table := range m.tables { - err = m.dropTableImpl(table, addIfExists) - if err != nil { - return - } - } - return err -} - -// Implementation of dropping a single table. -func (m *DbMap) dropTable(t reflect.Type, addIfExists bool) error { - table := tableOrNil(m, t) - if table == nil { - return errors.New(fmt.Sprintf("table %s was not registered!", table.TableName)) - } - - return m.dropTableImpl(table, addIfExists) -} - -func (m *DbMap) dropTableImpl(table *TableMap, ifExists bool) (err error) { - tableDrop := "drop table" - if ifExists { - tableDrop = m.Dialect.IfTableExists(tableDrop, table.SchemaName, table.TableName) - } - _, err = m.Exec(fmt.Sprintf("%s %s;", tableDrop, m.Dialect.QuotedTableForQuery(table.SchemaName, table.TableName))) - return err -} - -// TruncateTables iterates through TableMaps registered to this DbMap and -// executes "truncate table" statements against the database for each, or in the case of -// sqlite, a "delete from" with no "where" clause, which uses the truncate optimization -// (http://www.sqlite.org/lang_delete.html) -func (m *DbMap) TruncateTables() error { - var err error - for i := range m.tables { - table := m.tables[i] - _, e := m.Exec(fmt.Sprintf("%s %s;", m.Dialect.TruncateClause(), m.Dialect.QuotedTableForQuery(table.SchemaName, table.TableName))) - if e != nil { - err = e - } - } - return err -} - -// Insert runs a SQL INSERT statement for each element in list. List -// items must be pointers. -// -// Any interface whose TableMap has an auto-increment primary key will -// have its last insert id bound to the PK field on the struct. -// -// The hook functions PreInsert() and/or PostInsert() will be executed -// before/after the INSERT statement if the interface defines them. -// -// Panics if any interface in the list has not been registered with AddTable -func (m *DbMap) Insert(list ...interface{}) error { - return insert(m, m, list...) -} - -// Update runs a SQL UPDATE statement for each element in list. List -// items must be pointers. -// -// The hook functions PreUpdate() and/or PostUpdate() will be executed -// before/after the UPDATE statement if the interface defines them. -// -// Returns the number of rows updated. -// -// Returns an error if SetKeys has not been called on the TableMap -// Panics if any interface in the list has not been registered with AddTable -func (m *DbMap) Update(list ...interface{}) (int64, error) { - return update(m, m, list...) -} - -// Delete runs a SQL DELETE statement for each element in list. List -// items must be pointers. -// -// The hook functions PreDelete() and/or PostDelete() will be executed -// before/after the DELETE statement if the interface defines them. -// -// Returns the number of rows deleted. -// -// Returns an error if SetKeys has not been called on the TableMap -// Panics if any interface in the list has not been registered with AddTable -func (m *DbMap) Delete(list ...interface{}) (int64, error) { - return delete(m, m, list...) -} - -// Get runs a SQL SELECT to fetch a single row from the table based on the -// primary key(s) -// -// i should be an empty value for the struct to load. keys should be -// the primary key value(s) for the row to load. If multiple keys -// exist on the table, the order should match the column order -// specified in SetKeys() when the table mapping was defined. -// -// The hook function PostGet() will be executed after the SELECT -// statement if the interface defines them. -// -// Returns a pointer to a struct that matches or nil if no row is found. -// -// Returns an error if SetKeys has not been called on the TableMap -// Panics if any interface in the list has not been registered with AddTable -func (m *DbMap) Get(i interface{}, keys ...interface{}) (interface{}, error) { - return get(m, m, i, keys...) -} - -// Select runs an arbitrary SQL query, binding the columns in the result -// to fields on the struct specified by i. args represent the bind -// parameters for the SQL statement. -// -// Column names on the SELECT statement should be aliased to the field names -// on the struct i. Returns an error if one or more columns in the result -// do not match. It is OK if fields on i are not part of the SQL -// statement. -// -// The hook function PostGet() will be executed after the SELECT -// statement if the interface defines them. -// -// Values are returned in one of two ways: -// 1. If i is a struct or a pointer to a struct, returns a slice of pointers to -// matching rows of type i. -// 2. If i is a pointer to a slice, the results will be appended to that slice -// and nil returned. -// -// i does NOT need to be registered with AddTable() -func (m *DbMap) Select(i interface{}, query string, args ...interface{}) ([]interface{}, error) { - return hookedselect(m, m, i, query, args...) -} - -// Exec runs an arbitrary SQL statement. args represent the bind parameters. -// This is equivalent to running: Exec() using database/sql -func (m *DbMap) Exec(query string, args ...interface{}) (sql.Result, error) { - m.trace(query, args...) - return m.Db.Exec(query, args...) -} - -// SelectInt is a convenience wrapper around the gorp.SelectInt function -func (m *DbMap) SelectInt(query string, args ...interface{}) (int64, error) { - return SelectInt(m, query, args...) -} - -// SelectNullInt is a convenience wrapper around the gorp.SelectNullInt function -func (m *DbMap) SelectNullInt(query string, args ...interface{}) (sql.NullInt64, error) { - return SelectNullInt(m, query, args...) -} - -// SelectFloat is a convenience wrapper around the gorp.SelectFlot function -func (m *DbMap) SelectFloat(query string, args ...interface{}) (float64, error) { - return SelectFloat(m, query, args...) -} - -// SelectNullFloat is a convenience wrapper around the gorp.SelectNullFloat function -func (m *DbMap) SelectNullFloat(query string, args ...interface{}) (sql.NullFloat64, error) { - return SelectNullFloat(m, query, args...) -} - -// SelectStr is a convenience wrapper around the gorp.SelectStr function -func (m *DbMap) SelectStr(query string, args ...interface{}) (string, error) { - return SelectStr(m, query, args...) -} - -// SelectNullStr is a convenience wrapper around the gorp.SelectNullStr function -func (m *DbMap) SelectNullStr(query string, args ...interface{}) (sql.NullString, error) { - return SelectNullStr(m, query, args...) -} - -// SelectOne is a convenience wrapper around the gorp.SelectOne function -func (m *DbMap) SelectOne(holder interface{}, query string, args ...interface{}) error { - return SelectOne(m, m, holder, query, args...) -} - -// Begin starts a gorp Transaction -func (m *DbMap) Begin() (*Transaction, error) { - m.trace("begin;") - tx, err := m.Db.Begin() - if err != nil { - return nil, err - } - return &Transaction{m, tx, false}, nil -} - -// TableFor returns the *TableMap corresponding to the given Go Type -// If no table is mapped to that type an error is returned. -// If checkPK is true and the mapped table has no registered PKs, an error is returned. -func (m *DbMap) TableFor(t reflect.Type, checkPK bool) (*TableMap, error) { - table := tableOrNil(m, t) - if table == nil { - return nil, errors.New(fmt.Sprintf("No table found for type: %v", t.Name())) - } - - if checkPK && len(table.keys) < 1 { - e := fmt.Sprintf("gorp: No keys defined for table: %s", - table.TableName) - return nil, errors.New(e) - } - - return table, nil -} - -// Prepare creates a prepared statement for later queries or executions. -// Multiple queries or executions may be run concurrently from the returned statement. -// This is equivalent to running: Prepare() using database/sql -func (m *DbMap) Prepare(query string) (*sql.Stmt, error) { - m.trace(query, nil) - return m.Db.Prepare(query) -} - -func tableOrNil(m *DbMap, t reflect.Type) *TableMap { - for i := range m.tables { - table := m.tables[i] - if table.gotype == t { - return table - } - } - return nil -} - -func (m *DbMap) tableForPointer(ptr interface{}, checkPK bool) (*TableMap, reflect.Value, error) { - ptrv := reflect.ValueOf(ptr) - if ptrv.Kind() != reflect.Ptr { - e := fmt.Sprintf("gorp: passed non-pointer: %v (kind=%v)", ptr, - ptrv.Kind()) - return nil, reflect.Value{}, errors.New(e) - } - elem := ptrv.Elem() - etype := reflect.TypeOf(elem.Interface()) - t, err := m.TableFor(etype, checkPK) - if err != nil { - return nil, reflect.Value{}, err - } - - return t, elem, nil -} - -func (m *DbMap) queryRow(query string, args ...interface{}) *sql.Row { - m.trace(query, args...) - return m.Db.QueryRow(query, args...) -} - -func (m *DbMap) query(query string, args ...interface{}) (*sql.Rows, error) { - m.trace(query, args...) - return m.Db.Query(query, args...) -} - -func (m *DbMap) trace(query string, args ...interface{}) { - if m.logger != nil { - var margs = argsString(args...) - m.logger.Printf("%s%s [%s]", m.logPrefix, query, margs) - } -} - -func argsString(args ...interface{}) string { - var margs string - for i, a := range args { - var v interface{} = a - if x, ok := v.(driver.Valuer); ok { - y, err := x.Value() - if err == nil { - v = y - } - } - switch v.(type) { - case string: - v = fmt.Sprintf("%q", v) - default: - v = fmt.Sprintf("%v", v) - } - margs += fmt.Sprintf("%d:%s", i+1, v) - if i+1 < len(args) { - margs += " " - } - } - return margs -} - -/////////////// - -// Insert has the same behavior as DbMap.Insert(), but runs in a transaction. -func (t *Transaction) Insert(list ...interface{}) error { - return insert(t.dbmap, t, list...) -} - -// Update had the same behavior as DbMap.Update(), but runs in a transaction. -func (t *Transaction) Update(list ...interface{}) (int64, error) { - return update(t.dbmap, t, list...) -} - -// Delete has the same behavior as DbMap.Delete(), but runs in a transaction. -func (t *Transaction) Delete(list ...interface{}) (int64, error) { - return delete(t.dbmap, t, list...) -} - -// Get has the same behavior as DbMap.Get(), but runs in a transaction. -func (t *Transaction) Get(i interface{}, keys ...interface{}) (interface{}, error) { - return get(t.dbmap, t, i, keys...) -} - -// Select has the same behavior as DbMap.Select(), but runs in a transaction. -func (t *Transaction) Select(i interface{}, query string, args ...interface{}) ([]interface{}, error) { - return hookedselect(t.dbmap, t, i, query, args...) -} - -// Exec has the same behavior as DbMap.Exec(), but runs in a transaction. -func (t *Transaction) Exec(query string, args ...interface{}) (sql.Result, error) { - t.dbmap.trace(query, args...) - return t.tx.Exec(query, args...) -} - -// SelectInt is a convenience wrapper around the gorp.SelectInt function. -func (t *Transaction) SelectInt(query string, args ...interface{}) (int64, error) { - return SelectInt(t, query, args...) -} - -// SelectNullInt is a convenience wrapper around the gorp.SelectNullInt function. -func (t *Transaction) SelectNullInt(query string, args ...interface{}) (sql.NullInt64, error) { - return SelectNullInt(t, query, args...) -} - -// SelectFloat is a convenience wrapper around the gorp.SelectFloat function. -func (t *Transaction) SelectFloat(query string, args ...interface{}) (float64, error) { - return SelectFloat(t, query, args...) -} - -// SelectNullFloat is a convenience wrapper around the gorp.SelectNullFloat function. -func (t *Transaction) SelectNullFloat(query string, args ...interface{}) (sql.NullFloat64, error) { - return SelectNullFloat(t, query, args...) -} - -// SelectStr is a convenience wrapper around the gorp.SelectStr function. -func (t *Transaction) SelectStr(query string, args ...interface{}) (string, error) { - return SelectStr(t, query, args...) -} - -// SelectNullStr is a convenience wrapper around the gorp.SelectNullStr function. -func (t *Transaction) SelectNullStr(query string, args ...interface{}) (sql.NullString, error) { - return SelectNullStr(t, query, args...) -} - -// SelectOne is a convenience wrapper around the gorp.SelectOne function. -func (t *Transaction) SelectOne(holder interface{}, query string, args ...interface{}) error { - return SelectOne(t.dbmap, t, holder, query, args...) -} - -// Commit commits the underlying database transaction. -func (t *Transaction) Commit() error { - if !t.closed { - t.closed = true - t.dbmap.trace("commit;") - return t.tx.Commit() - } - - return sql.ErrTxDone -} - -// Rollback rolls back the underlying database transaction. -func (t *Transaction) Rollback() error { - if !t.closed { - t.closed = true - t.dbmap.trace("rollback;") - return t.tx.Rollback() - } - - return sql.ErrTxDone -} - -// Savepoint creates a savepoint with the given name. The name is interpolated -// directly into the SQL SAVEPOINT statement, so you must sanitize it if it is -// derived from user input. -func (t *Transaction) Savepoint(name string) error { - query := "savepoint " + t.dbmap.Dialect.QuoteField(name) - t.dbmap.trace(query, nil) - _, err := t.tx.Exec(query) - return err -} - -// RollbackToSavepoint rolls back to the savepoint with the given name. The -// name is interpolated directly into the SQL SAVEPOINT statement, so you must -// sanitize it if it is derived from user input. -func (t *Transaction) RollbackToSavepoint(savepoint string) error { - query := "rollback to savepoint " + t.dbmap.Dialect.QuoteField(savepoint) - t.dbmap.trace(query, nil) - _, err := t.tx.Exec(query) - return err -} - -// ReleaseSavepint releases the savepoint with the given name. The name is -// interpolated directly into the SQL SAVEPOINT statement, so you must sanitize -// it if it is derived from user input. -func (t *Transaction) ReleaseSavepoint(savepoint string) error { - query := "release savepoint " + t.dbmap.Dialect.QuoteField(savepoint) - t.dbmap.trace(query, nil) - _, err := t.tx.Exec(query) - return err -} - -// Prepare has the same behavior as DbMap.Prepare(), but runs in a transaction. -func (t *Transaction) Prepare(query string) (*sql.Stmt, error) { - t.dbmap.trace(query, nil) - return t.tx.Prepare(query) -} - -func (t *Transaction) queryRow(query string, args ...interface{}) *sql.Row { - t.dbmap.trace(query, args...) - return t.tx.QueryRow(query, args...) -} - -func (t *Transaction) query(query string, args ...interface{}) (*sql.Rows, error) { - t.dbmap.trace(query, args...) - return t.tx.Query(query, args...) -} - -/////////////// - -// SelectInt executes the given query, which should be a SELECT statement for a single -// integer column, and returns the value of the first row returned. If no rows are -// found, zero is returned. -func SelectInt(e SqlExecutor, query string, args ...interface{}) (int64, error) { - var h int64 - err := selectVal(e, &h, query, args...) - if err != nil && err != sql.ErrNoRows { - return 0, err - } - return h, nil -} - -// SelectNullInt executes the given query, which should be a SELECT statement for a single -// integer column, and returns the value of the first row returned. If no rows are -// found, the empty sql.NullInt64 value is returned. -func SelectNullInt(e SqlExecutor, query string, args ...interface{}) (sql.NullInt64, error) { - var h sql.NullInt64 - err := selectVal(e, &h, query, args...) - if err != nil && err != sql.ErrNoRows { - return h, err - } - return h, nil -} - -// SelectFloat executes the given query, which should be a SELECT statement for a single -// float column, and returns the value of the first row returned. If no rows are -// found, zero is returned. -func SelectFloat(e SqlExecutor, query string, args ...interface{}) (float64, error) { - var h float64 - err := selectVal(e, &h, query, args...) - if err != nil && err != sql.ErrNoRows { - return 0, err - } - return h, nil -} - -// SelectNullFloat executes the given query, which should be a SELECT statement for a single -// float column, and returns the value of the first row returned. If no rows are -// found, the empty sql.NullInt64 value is returned. -func SelectNullFloat(e SqlExecutor, query string, args ...interface{}) (sql.NullFloat64, error) { - var h sql.NullFloat64 - err := selectVal(e, &h, query, args...) - if err != nil && err != sql.ErrNoRows { - return h, err - } - return h, nil -} - -// SelectStr executes the given query, which should be a SELECT statement for a single -// char/varchar column, and returns the value of the first row returned. If no rows are -// found, an empty string is returned. -func SelectStr(e SqlExecutor, query string, args ...interface{}) (string, error) { - var h string - err := selectVal(e, &h, query, args...) - if err != nil && err != sql.ErrNoRows { - return "", err - } - return h, nil -} - -// SelectNullStr executes the given query, which should be a SELECT -// statement for a single char/varchar column, and returns the value -// of the first row returned. If no rows are found, the empty -// sql.NullString is returned. -func SelectNullStr(e SqlExecutor, query string, args ...interface{}) (sql.NullString, error) { - var h sql.NullString - err := selectVal(e, &h, query, args...) - if err != nil && err != sql.ErrNoRows { - return h, err - } - return h, nil -} - -// SelectOne executes the given query (which should be a SELECT statement) -// and binds the result to holder, which must be a pointer. -// -// If no row is found, an error (sql.ErrNoRows specifically) will be returned -// -// If more than one row is found, an error will be returned. -// -func SelectOne(m *DbMap, e SqlExecutor, holder interface{}, query string, args ...interface{}) error { - t := reflect.TypeOf(holder) - if t.Kind() == reflect.Ptr { - t = t.Elem() - } else { - return fmt.Errorf("gorp: SelectOne holder must be a pointer, but got: %t", holder) - } - - // Handle pointer to pointer - isptr := false - if t.Kind() == reflect.Ptr { - isptr = true - t = t.Elem() - } - - if t.Kind() == reflect.Struct { - var nonFatalErr error - - list, err := hookedselect(m, e, holder, query, args...) - if err != nil { - if !NonFatalError(err) { - return err - } - nonFatalErr = err - } - - dest := reflect.ValueOf(holder) - if isptr { - dest = dest.Elem() - } - - if list != nil && len(list) > 0 { - // check for multiple rows - if len(list) > 1 { - return fmt.Errorf("gorp: multiple rows returned for: %s - %v", query, args) - } - - // Initialize if nil - if dest.IsNil() { - dest.Set(reflect.New(t)) - } - - // only one row found - src := reflect.ValueOf(list[0]) - dest.Elem().Set(src.Elem()) - } else { - // No rows found, return a proper error. - return sql.ErrNoRows - } - - return nonFatalErr - } - - return selectVal(e, holder, query, args...) -} - -func selectVal(e SqlExecutor, holder interface{}, query string, args ...interface{}) error { - if len(args) == 1 { - switch m := e.(type) { - case *DbMap: - query, args = maybeExpandNamedQuery(m, query, args) - case *Transaction: - query, args = maybeExpandNamedQuery(m.dbmap, query, args) - } - } - rows, err := e.query(query, args...) - if err != nil { - return err - } - defer rows.Close() - - if !rows.Next() { - return sql.ErrNoRows - } - - return rows.Scan(holder) -} - -/////////////// - -func hookedselect(m *DbMap, exec SqlExecutor, i interface{}, query string, - args ...interface{}) ([]interface{}, error) { - - var nonFatalErr error - - list, err := rawselect(m, exec, i, query, args...) - if err != nil { - if !NonFatalError(err) { - return nil, err - } - nonFatalErr = err - } - - // Determine where the results are: written to i, or returned in list - if t, _ := toSliceType(i); t == nil { - for _, v := range list { - if v, ok := v.(HasPostGet); ok { - err := v.PostGet(exec) - if err != nil { - return nil, err - } - } - } - } else { - resultsValue := reflect.Indirect(reflect.ValueOf(i)) - for i := 0; i < resultsValue.Len(); i++ { - if v, ok := resultsValue.Index(i).Interface().(HasPostGet); ok { - err := v.PostGet(exec) - if err != nil { - return nil, err - } - } - } - } - return list, nonFatalErr -} - -func rawselect(m *DbMap, exec SqlExecutor, i interface{}, query string, - args ...interface{}) ([]interface{}, error) { - var ( - appendToSlice = false // Write results to i directly? - intoStruct = true // Selecting into a struct? - pointerElements = true // Are the slice elements pointers (vs values)? - ) - - var nonFatalErr error - - // get type for i, verifying it's a supported destination - t, err := toType(i) - if err != nil { - var err2 error - if t, err2 = toSliceType(i); t == nil { - if err2 != nil { - return nil, err2 - } - return nil, err - } - pointerElements = t.Kind() == reflect.Ptr - if pointerElements { - t = t.Elem() - } - appendToSlice = true - intoStruct = t.Kind() == reflect.Struct - } - - // If the caller supplied a single struct/map argument, assume a "named - // parameter" query. Extract the named arguments from the struct/map, create - // the flat arg slice, and rewrite the query to use the dialect's placeholder. - if len(args) == 1 { - query, args = maybeExpandNamedQuery(m, query, args) - } - - // Run the query - rows, err := exec.query(query, args...) - if err != nil { - return nil, err - } - defer rows.Close() - - // Fetch the column names as returned from db - cols, err := rows.Columns() - if err != nil { - return nil, err - } - - if !intoStruct && len(cols) > 1 { - return nil, fmt.Errorf("gorp: select into non-struct slice requires 1 column, got %d", len(cols)) - } - - var colToFieldIndex [][]int - if intoStruct { - if colToFieldIndex, err = columnToFieldIndex(m, t, cols); err != nil { - if !NonFatalError(err) { - return nil, err - } - nonFatalErr = err - } - } - - conv := m.TypeConverter - - // Add results to one of these two slices. - var ( - list = make([]interface{}, 0) - sliceValue = reflect.Indirect(reflect.ValueOf(i)) - ) - - for { - if !rows.Next() { - // if error occured return rawselect - if rows.Err() != nil { - return nil, rows.Err() - } - // time to exit from outer "for" loop - break - } - v := reflect.New(t) - dest := make([]interface{}, len(cols)) - - custScan := make([]CustomScanner, 0) - - for x := range cols { - f := v.Elem() - if intoStruct { - index := colToFieldIndex[x] - if index == nil { - // this field is not present in the struct, so create a dummy - // value for rows.Scan to scan into - var dummy sql.RawBytes - dest[x] = &dummy - continue - } - f = f.FieldByIndex(index) - } - target := f.Addr().Interface() - if conv != nil { - scanner, ok := conv.FromDb(target) - if ok { - target = scanner.Holder - custScan = append(custScan, scanner) - } - } - dest[x] = target - } - - err = rows.Scan(dest...) - if err != nil { - return nil, err - } - - for _, c := range custScan { - err = c.Bind() - if err != nil { - return nil, err - } - } - - if appendToSlice { - if !pointerElements { - v = v.Elem() - } - sliceValue.Set(reflect.Append(sliceValue, v)) - } else { - list = append(list, v.Interface()) - } - } - - if appendToSlice && sliceValue.IsNil() { - sliceValue.Set(reflect.MakeSlice(sliceValue.Type(), 0, 0)) - } - - return list, nonFatalErr -} - -// maybeExpandNamedQuery checks the given arg to see if it's eligible to be used -// as input to a named query. If so, it rewrites the query to use -// dialect-dependent bindvars and instantiates the corresponding slice of -// parameters by extracting data from the map / struct. -// If not, returns the input values unchanged. -func maybeExpandNamedQuery(m *DbMap, query string, args []interface{}) (string, []interface{}) { - arg := reflect.ValueOf(args[0]) - for arg.Kind() == reflect.Ptr { - arg = arg.Elem() - } - switch { - case arg.Kind() == reflect.Map && arg.Type().Key().Kind() == reflect.String: - return expandNamedQuery(m, query, func(key string) reflect.Value { - return arg.MapIndex(reflect.ValueOf(key)) - }) - // #84 - ignore time.Time structs here - there may be a cleaner way to do this - case arg.Kind() == reflect.Struct && !(arg.Type().PkgPath() == "time" && arg.Type().Name() == "Time"): - return expandNamedQuery(m, query, arg.FieldByName) - } - return query, args -} - -var keyRegexp = regexp.MustCompile(`:[[:word:]]+`) - -// expandNamedQuery accepts a query with placeholders of the form ":key", and a -// single arg of Kind Struct or Map[string]. It returns the query with the -// dialect's placeholders, and a slice of args ready for positional insertion -// into the query. -func expandNamedQuery(m *DbMap, query string, keyGetter func(key string) reflect.Value) (string, []interface{}) { - var ( - n int - args []interface{} - ) - return keyRegexp.ReplaceAllStringFunc(query, func(key string) string { - val := keyGetter(key[1:]) - if !val.IsValid() { - return key - } - args = append(args, val.Interface()) - newVar := m.Dialect.BindVar(n) - n++ - return newVar - }), args -} - -func columnToFieldIndex(m *DbMap, t reflect.Type, cols []string) ([][]int, error) { - colToFieldIndex := make([][]int, len(cols)) - - // check if type t is a mapped table - if so we'll - // check the table for column aliasing below - tableMapped := false - table := tableOrNil(m, t) - if table != nil { - tableMapped = true - } - - // Loop over column names and find field in i to bind to - // based on column name. all returned columns must match - // a field in the i struct - missingColNames := []string{} - for x := range cols { - colName := strings.ToLower(cols[x]) - field, found := t.FieldByNameFunc(func(fieldName string) bool { - field, _ := t.FieldByName(fieldName) - fieldName = field.Tag.Get("db") - - if fieldName == "-" { - return false - } else if fieldName == "" { - fieldName = field.Name - } - if tableMapped { - colMap := colMapOrNil(table, fieldName) - if colMap != nil { - fieldName = colMap.ColumnName - } - } - return colName == strings.ToLower(fieldName) - }) - if found { - colToFieldIndex[x] = field.Index - } - if colToFieldIndex[x] == nil { - missingColNames = append(missingColNames, colName) - } - } - if len(missingColNames) > 0 { - return colToFieldIndex, &NoFieldInTypeError{ - TypeName: t.Name(), - MissingColNames: missingColNames, - } - } - return colToFieldIndex, nil -} - -func fieldByName(val reflect.Value, fieldName string) *reflect.Value { - // try to find field by exact match - f := val.FieldByName(fieldName) - - if f != zeroVal { - return &f - } - - // try to find by case insensitive match - only the Postgres driver - // seems to require this - in the case where columns are aliased in the sql - fieldNameL := strings.ToLower(fieldName) - fieldCount := val.NumField() - t := val.Type() - for i := 0; i < fieldCount; i++ { - sf := t.Field(i) - if strings.ToLower(sf.Name) == fieldNameL { - f := val.Field(i) - return &f - } - } - - return nil -} - -// toSliceType returns the element type of the given object, if the object is a -// "*[]*Element" or "*[]Element". If not, returns nil. -// err is returned if the user was trying to pass a pointer-to-slice but failed. -func toSliceType(i interface{}) (reflect.Type, error) { - t := reflect.TypeOf(i) - if t.Kind() != reflect.Ptr { - // If it's a slice, return a more helpful error message - if t.Kind() == reflect.Slice { - return nil, fmt.Errorf("gorp: Cannot SELECT into a non-pointer slice: %v", t) - } - return nil, nil - } - if t = t.Elem(); t.Kind() != reflect.Slice { - return nil, nil - } - return t.Elem(), nil -} - -func toType(i interface{}) (reflect.Type, error) { - t := reflect.TypeOf(i) - - // If a Pointer to a type, follow - for t.Kind() == reflect.Ptr { - t = t.Elem() - } - - if t.Kind() != reflect.Struct { - return nil, fmt.Errorf("gorp: Cannot SELECT into this type: %v", reflect.TypeOf(i)) - } - return t, nil -} - -func get(m *DbMap, exec SqlExecutor, i interface{}, - keys ...interface{}) (interface{}, error) { - - t, err := toType(i) - if err != nil { - return nil, err - } - - table, err := m.TableFor(t, true) - if err != nil { - return nil, err - } - - plan := table.bindGet() - - v := reflect.New(t) - dest := make([]interface{}, len(plan.argFields)) - - conv := m.TypeConverter - custScan := make([]CustomScanner, 0) - - for x, fieldName := range plan.argFields { - f := v.Elem().FieldByName(fieldName) - target := f.Addr().Interface() - if conv != nil { - scanner, ok := conv.FromDb(target) - if ok { - target = scanner.Holder - custScan = append(custScan, scanner) - } - } - dest[x] = target - } - - row := exec.queryRow(plan.query, keys...) - err = row.Scan(dest...) - if err != nil { - if err == sql.ErrNoRows { - err = nil - } - return nil, err - } - - for _, c := range custScan { - err = c.Bind() - if err != nil { - return nil, err - } - } - - if v, ok := v.Interface().(HasPostGet); ok { - err := v.PostGet(exec) - if err != nil { - return nil, err - } - } - - return v.Interface(), nil -} - -func delete(m *DbMap, exec SqlExecutor, list ...interface{}) (int64, error) { - count := int64(0) - for _, ptr := range list { - table, elem, err := m.tableForPointer(ptr, true) - if err != nil { - return -1, err - } - - eval := elem.Addr().Interface() - if v, ok := eval.(HasPreDelete); ok { - err = v.PreDelete(exec) - if err != nil { - return -1, err - } - } - - bi, err := table.bindDelete(elem) - if err != nil { - return -1, err - } - - res, err := exec.Exec(bi.query, bi.args...) - if err != nil { - return -1, err - } - rows, err := res.RowsAffected() - if err != nil { - return -1, err - } - - if rows == 0 && bi.existingVersion > 0 { - return lockError(m, exec, table.TableName, - bi.existingVersion, elem, bi.keys...) - } - - count += rows - - if v, ok := eval.(HasPostDelete); ok { - err := v.PostDelete(exec) - if err != nil { - return -1, err - } - } - } - - return count, nil -} - -func update(m *DbMap, exec SqlExecutor, list ...interface{}) (int64, error) { - count := int64(0) - for _, ptr := range list { - table, elem, err := m.tableForPointer(ptr, true) - if err != nil { - return -1, err - } - - eval := elem.Addr().Interface() - if v, ok := eval.(HasPreUpdate); ok { - err = v.PreUpdate(exec) - if err != nil { - return -1, err - } - } - - bi, err := table.bindUpdate(elem) - if err != nil { - return -1, err - } - - res, err := exec.Exec(bi.query, bi.args...) - if err != nil { - return -1, err - } - - rows, err := res.RowsAffected() - if err != nil { - return -1, err - } - - if rows == 0 && bi.existingVersion > 0 { - return lockError(m, exec, table.TableName, - bi.existingVersion, elem, bi.keys...) - } - - if bi.versField != "" { - elem.FieldByName(bi.versField).SetInt(bi.existingVersion + 1) - } - - count += rows - - if v, ok := eval.(HasPostUpdate); ok { - err = v.PostUpdate(exec) - if err != nil { - return -1, err - } - } - } - return count, nil -} - -func insert(m *DbMap, exec SqlExecutor, list ...interface{}) error { - for _, ptr := range list { - table, elem, err := m.tableForPointer(ptr, false) - if err != nil { - return err - } - - eval := elem.Addr().Interface() - if v, ok := eval.(HasPreInsert); ok { - err := v.PreInsert(exec) - if err != nil { - return err - } - } - - bi, err := table.bindInsert(elem) - if err != nil { - return err - } - - if bi.autoIncrIdx > -1 { - f := elem.FieldByName(bi.autoIncrFieldName) - switch inserter := m.Dialect.(type) { - case IntegerAutoIncrInserter: - id, err := inserter.InsertAutoIncr(exec, bi.query, bi.args...) - if err != nil { - return err - } - k := f.Kind() - if (k == reflect.Int) || (k == reflect.Int16) || (k == reflect.Int32) || (k == reflect.Int64) { - f.SetInt(id) - } else if (k == reflect.Uint) || (k == reflect.Uint16) || (k == reflect.Uint32) || (k == reflect.Uint64) { - f.SetUint(uint64(id)) - } else { - return fmt.Errorf("gorp: Cannot set autoincrement value on non-Int field. SQL=%s autoIncrIdx=%d autoIncrFieldName=%s", bi.query, bi.autoIncrIdx, bi.autoIncrFieldName) - } - case TargetedAutoIncrInserter: - err := inserter.InsertAutoIncrToTarget(exec, bi.query, f.Addr().Interface(), bi.args...) - if err != nil { - return err - } - default: - return fmt.Errorf("gorp: Cannot use autoincrement fields on dialects that do not implement an autoincrementing interface") - } - } else { - _, err := exec.Exec(bi.query, bi.args...) - if err != nil { - return err - } - } - - if v, ok := eval.(HasPostInsert); ok { - err := v.PostInsert(exec) - if err != nil { - return err - } - } - } - return nil -} - -func lockError(m *DbMap, exec SqlExecutor, tableName string, - existingVer int64, elem reflect.Value, - keys ...interface{}) (int64, error) { - - existing, err := get(m, exec, elem.Interface(), keys...) - if err != nil { - return -1, err - } - - ole := OptimisticLockError{tableName, keys, true, existingVer} - if existing == nil { - ole.RowExists = false - } - return -1, ole -} - -// PostUpdate() will be executed after the GET statement. -type HasPostGet interface { - PostGet(SqlExecutor) error -} - -// PostUpdate() will be executed after the DELETE statement -type HasPostDelete interface { - PostDelete(SqlExecutor) error -} - -// PostUpdate() will be executed after the UPDATE statement -type HasPostUpdate interface { - PostUpdate(SqlExecutor) error -} - -// PostInsert() will be executed after the INSERT statement -type HasPostInsert interface { - PostInsert(SqlExecutor) error -} - -// PreDelete() will be executed before the DELETE statement. -type HasPreDelete interface { - PreDelete(SqlExecutor) error -} - -// PreUpdate() will be executed before UPDATE statement. -type HasPreUpdate interface { - PreUpdate(SqlExecutor) error -} - -// PreInsert() will be executed before INSERT statement. -type HasPreInsert interface { - PreInsert(SqlExecutor) error -} diff --git a/vendor/gopkg.in/gorp.v1/test_all.sh b/vendor/gopkg.in/gorp.v1/test_all.sh deleted file mode 100755 index f870b39..0000000 --- a/vendor/gopkg.in/gorp.v1/test_all.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/sh - -# on macs, you may need to: -# export GOBUILDFLAG=-ldflags -linkmode=external - -set -e - -export GORP_TEST_DSN=gorptest/gorptest/gorptest -export GORP_TEST_DIALECT=mysql -go test $GOBUILDFLAG . - -export GORP_TEST_DSN=gorptest:gorptest@/gorptest -export GORP_TEST_DIALECT=gomysql -go test $GOBUILDFLAG . - -export GORP_TEST_DSN="user=gorptest password=gorptest dbname=gorptest sslmode=disable" -export GORP_TEST_DIALECT=postgres -go test $GOBUILDFLAG . - -export GORP_TEST_DSN=/tmp/gorptest.bin -export GORP_TEST_DIALECT=sqlite -go test $GOBUILDFLAG . diff --git a/vendor/gopkg.in/yaml.v2/LICENSE b/vendor/gopkg.in/yaml.v2/LICENSE deleted file mode 100644 index a68e67f..0000000 --- a/vendor/gopkg.in/yaml.v2/LICENSE +++ /dev/null @@ -1,188 +0,0 @@ - -Copyright (c) 2011-2014 - Canonical Inc. - -This software is licensed under the LGPLv3, included below. - -As a special exception to the GNU Lesser General Public License version 3 -("LGPL3"), the copyright holders of this Library give you permission to -convey to a third party a Combined Work that links statically or dynamically -to this Library without providing any Minimal Corresponding Source or -Minimal Application Code as set out in 4d or providing the installation -information set out in section 4e, provided that you comply with the other -provisions of LGPL3 and provided that you meet, for the Application the -terms and conditions of the license(s) which apply to the Application. - -Except as stated in this special exception, the provisions of LGPL3 will -continue to comply in full to this Library. If you modify this Library, you -may apply this exception to your version of this Library, but you are not -obliged to do so. If you do not wish to do so, delete this exception -statement from your version. This exception does not (and cannot) modify any -license terms which apply to the Application, with which you must still -comply. - - - GNU LESSER GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - - This version of the GNU Lesser General Public License incorporates -the terms and conditions of version 3 of the GNU General Public -License, supplemented by the additional permissions listed below. - - 0. Additional Definitions. - - As used herein, "this License" refers to version 3 of the GNU Lesser -General Public License, and the "GNU GPL" refers to version 3 of the GNU -General Public License. - - "The Library" refers to a covered work governed by this License, -other than an Application or a Combined Work as defined below. - - An "Application" is any work that makes use of an interface provided -by the Library, but which is not otherwise based on the Library. -Defining a subclass of a class defined by the Library is deemed a mode -of using an interface provided by the Library. - - A "Combined Work" is a work produced by combining or linking an -Application with the Library. The particular version of the Library -with which the Combined Work was made is also called the "Linked -Version". - - The "Minimal Corresponding Source" for a Combined Work means the -Corresponding Source for the Combined Work, excluding any source code -for portions of the Combined Work that, considered in isolation, are -based on the Application, and not on the Linked Version. - - The "Corresponding Application Code" for a Combined Work means the -object code and/or source code for the Application, including any data -and utility programs needed for reproducing the Combined Work from the -Application, but excluding the System Libraries of the Combined Work. - - 1. Exception to Section 3 of the GNU GPL. - - You may convey a covered work under sections 3 and 4 of this License -without being bound by section 3 of the GNU GPL. - - 2. Conveying Modified Versions. - - If you modify a copy of the Library, and, in your modifications, a -facility refers to a function or data to be supplied by an Application -that uses the facility (other than as an argument passed when the -facility is invoked), then you may convey a copy of the modified -version: - - a) under this License, provided that you make a good faith effort to - ensure that, in the event an Application does not supply the - function or data, the facility still operates, and performs - whatever part of its purpose remains meaningful, or - - b) under the GNU GPL, with none of the additional permissions of - this License applicable to that copy. - - 3. Object Code Incorporating Material from Library Header Files. - - The object code form of an Application may incorporate material from -a header file that is part of the Library. You may convey such object -code under terms of your choice, provided that, if the incorporated -material is not limited to numerical parameters, data structure -layouts and accessors, or small macros, inline functions and templates -(ten or fewer lines in length), you do both of the following: - - a) Give prominent notice with each copy of the object code that the - Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the object code with a copy of the GNU GPL and this license - document. - - 4. Combined Works. - - You may convey a Combined Work under terms of your choice that, -taken together, effectively do not restrict modification of the -portions of the Library contained in the Combined Work and reverse -engineering for debugging such modifications, if you also do each of -the following: - - a) Give prominent notice with each copy of the Combined Work that - the Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the Combined Work with a copy of the GNU GPL and this license - document. - - c) For a Combined Work that displays copyright notices during - execution, include the copyright notice for the Library among - these notices, as well as a reference directing the user to the - copies of the GNU GPL and this license document. - - d) Do one of the following: - - 0) Convey the Minimal Corresponding Source under the terms of this - License, and the Corresponding Application Code in a form - suitable for, and under terms that permit, the user to - recombine or relink the Application with a modified version of - the Linked Version to produce a modified Combined Work, in the - manner specified by section 6 of the GNU GPL for conveying - Corresponding Source. - - 1) Use a suitable shared library mechanism for linking with the - Library. A suitable mechanism is one that (a) uses at run time - a copy of the Library already present on the user's computer - system, and (b) will operate properly with a modified version - of the Library that is interface-compatible with the Linked - Version. - - e) Provide Installation Information, but only if you would otherwise - be required to provide such information under section 6 of the - GNU GPL, and only to the extent that such information is - necessary to install and execute a modified version of the - Combined Work produced by recombining or relinking the - Application with a modified version of the Linked Version. (If - you use option 4d0, the Installation Information must accompany - the Minimal Corresponding Source and Corresponding Application - Code. If you use option 4d1, you must provide the Installation - Information in the manner specified by section 6 of the GNU GPL - for conveying Corresponding Source.) - - 5. Combined Libraries. - - You may place library facilities that are a work based on the -Library side by side in a single library together with other library -facilities that are not Applications and are not covered by this -License, and convey such a combined library under terms of your -choice, if you do both of the following: - - a) Accompany the combined library with a copy of the same work based - on the Library, uncombined with any other library facilities, - conveyed under the terms of this License. - - b) Give prominent notice with the combined library that part of it - is a work based on the Library, and explaining where to find the - accompanying uncombined form of the same work. - - 6. Revised Versions of the GNU Lesser General Public License. - - The Free Software Foundation may publish revised and/or new versions -of the GNU Lesser General Public License from time to time. Such new -versions will be similar in spirit to the present version, but may -differ in detail to address new problems or concerns. - - Each version is given a distinguishing version number. If the -Library as you received it specifies that a certain numbered version -of the GNU Lesser General Public License "or any later version" -applies to it, you have the option of following the terms and -conditions either of that published version or of any later version -published by the Free Software Foundation. If the Library as you -received it does not specify a version number of the GNU Lesser -General Public License, you may choose any version of the GNU Lesser -General Public License ever published by the Free Software Foundation. - - If the Library as you received it specifies that a proxy can decide -whether future versions of the GNU Lesser General Public License shall -apply, that proxy's public statement of acceptance of any version is -permanent authorization for you to choose that version for the -Library. diff --git a/vendor/gopkg.in/yaml.v2/LICENSE.libyaml b/vendor/gopkg.in/yaml.v2/LICENSE.libyaml deleted file mode 100644 index 8da58fb..0000000 --- a/vendor/gopkg.in/yaml.v2/LICENSE.libyaml +++ /dev/null @@ -1,31 +0,0 @@ -The following files were ported to Go from C files of libyaml, and thus -are still covered by their original copyright and license: - - apic.go - emitterc.go - parserc.go - readerc.go - scannerc.go - writerc.go - yamlh.go - yamlprivateh.go - -Copyright (c) 2006 Kirill Simonov - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/gopkg.in/yaml.v2/README.md b/vendor/gopkg.in/yaml.v2/README.md deleted file mode 100644 index 7b8bd86..0000000 --- a/vendor/gopkg.in/yaml.v2/README.md +++ /dev/null @@ -1,131 +0,0 @@ -# YAML support for the Go language - -Introduction ------------- - -The yaml package enables Go programs to comfortably encode and decode YAML -values. It was developed within [Canonical](https://www.canonical.com) as -part of the [juju](https://juju.ubuntu.com) project, and is based on a -pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) -C library to parse and generate YAML data quickly and reliably. - -Compatibility -------------- - -The yaml package supports most of YAML 1.1 and 1.2, including support for -anchors, tags, map merging, etc. Multi-document unmarshalling is not yet -implemented, and base-60 floats from YAML 1.1 are purposefully not -supported since they're a poor design and are gone in YAML 1.2. - -Installation and usage ----------------------- - -The import path for the package is *gopkg.in/yaml.v2*. - -To install it, run: - - go get gopkg.in/yaml.v2 - -API documentation ------------------ - -If opened in a browser, the import path itself leads to the API documentation: - - * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2) - -API stability -------------- - -The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in). - - -License -------- - -The yaml package is licensed under the LGPL with an exception that allows it to be linked statically. Please see the LICENSE file for details. - - -Example -------- - -```Go -package main - -import ( - "fmt" - "log" - - "gopkg.in/yaml.v2" -) - -var data = ` -a: Easy! -b: - c: 2 - d: [3, 4] -` - -type T struct { - A string - B struct { - RenamedC int `yaml:"c"` - D []int `yaml:",flow"` - } -} - -func main() { - t := T{} - - err := yaml.Unmarshal([]byte(data), &t) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- t:\n%v\n\n", t) - - d, err := yaml.Marshal(&t) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- t dump:\n%s\n\n", string(d)) - - m := make(map[interface{}]interface{}) - - err = yaml.Unmarshal([]byte(data), &m) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- m:\n%v\n\n", m) - - d, err = yaml.Marshal(&m) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- m dump:\n%s\n\n", string(d)) -} -``` - -This example will generate the following output: - -``` ---- t: -{Easy! {2 [3 4]}} - ---- t dump: -a: Easy! -b: - c: 2 - d: [3, 4] - - ---- m: -map[a:Easy! b:map[c:2 d:[3 4]]] - ---- m dump: -a: Easy! -b: - c: 2 - d: - - 3 - - 4 -``` - diff --git a/vendor/gopkg.in/yaml.v2/apic.go b/vendor/gopkg.in/yaml.v2/apic.go deleted file mode 100644 index 95ec014..0000000 --- a/vendor/gopkg.in/yaml.v2/apic.go +++ /dev/null @@ -1,742 +0,0 @@ -package yaml - -import ( - "io" - "os" -) - -func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { - //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) - - // Check if we can move the queue at the beginning of the buffer. - if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { - if parser.tokens_head != len(parser.tokens) { - copy(parser.tokens, parser.tokens[parser.tokens_head:]) - } - parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] - parser.tokens_head = 0 - } - parser.tokens = append(parser.tokens, *token) - if pos < 0 { - return - } - copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) - parser.tokens[parser.tokens_head+pos] = *token -} - -// Create a new parser object. -func yaml_parser_initialize(parser *yaml_parser_t) bool { - *parser = yaml_parser_t{ - raw_buffer: make([]byte, 0, input_raw_buffer_size), - buffer: make([]byte, 0, input_buffer_size), - } - return true -} - -// Destroy a parser object. -func yaml_parser_delete(parser *yaml_parser_t) { - *parser = yaml_parser_t{} -} - -// String read handler. -func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { - if parser.input_pos == len(parser.input) { - return 0, io.EOF - } - n = copy(buffer, parser.input[parser.input_pos:]) - parser.input_pos += n - return n, nil -} - -// File read handler. -func yaml_file_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { - return parser.input_file.Read(buffer) -} - -// Set a string input. -func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { - if parser.read_handler != nil { - panic("must set the input source only once") - } - parser.read_handler = yaml_string_read_handler - parser.input = input - parser.input_pos = 0 -} - -// Set a file input. -func yaml_parser_set_input_file(parser *yaml_parser_t, file *os.File) { - if parser.read_handler != nil { - panic("must set the input source only once") - } - parser.read_handler = yaml_file_read_handler - parser.input_file = file -} - -// Set the source encoding. -func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { - if parser.encoding != yaml_ANY_ENCODING { - panic("must set the encoding only once") - } - parser.encoding = encoding -} - -// Create a new emitter object. -func yaml_emitter_initialize(emitter *yaml_emitter_t) bool { - *emitter = yaml_emitter_t{ - buffer: make([]byte, output_buffer_size), - raw_buffer: make([]byte, 0, output_raw_buffer_size), - states: make([]yaml_emitter_state_t, 0, initial_stack_size), - events: make([]yaml_event_t, 0, initial_queue_size), - } - return true -} - -// Destroy an emitter object. -func yaml_emitter_delete(emitter *yaml_emitter_t) { - *emitter = yaml_emitter_t{} -} - -// String write handler. -func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { - *emitter.output_buffer = append(*emitter.output_buffer, buffer...) - return nil -} - -// File write handler. -func yaml_file_write_handler(emitter *yaml_emitter_t, buffer []byte) error { - _, err := emitter.output_file.Write(buffer) - return err -} - -// Set a string output. -func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { - if emitter.write_handler != nil { - panic("must set the output target only once") - } - emitter.write_handler = yaml_string_write_handler - emitter.output_buffer = output_buffer -} - -// Set a file output. -func yaml_emitter_set_output_file(emitter *yaml_emitter_t, file io.Writer) { - if emitter.write_handler != nil { - panic("must set the output target only once") - } - emitter.write_handler = yaml_file_write_handler - emitter.output_file = file -} - -// Set the output encoding. -func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { - if emitter.encoding != yaml_ANY_ENCODING { - panic("must set the output encoding only once") - } - emitter.encoding = encoding -} - -// Set the canonical output style. -func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { - emitter.canonical = canonical -} - -//// Set the indentation increment. -func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { - if indent < 2 || indent > 9 { - indent = 2 - } - emitter.best_indent = indent -} - -// Set the preferred line width. -func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { - if width < 0 { - width = -1 - } - emitter.best_width = width -} - -// Set if unescaped non-ASCII characters are allowed. -func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { - emitter.unicode = unicode -} - -// Set the preferred line break character. -func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { - emitter.line_break = line_break -} - -///* -// * Destroy a token object. -// */ -// -//YAML_DECLARE(void) -//yaml_token_delete(yaml_token_t *token) -//{ -// assert(token); // Non-NULL token object expected. -// -// switch (token.type) -// { -// case YAML_TAG_DIRECTIVE_TOKEN: -// yaml_free(token.data.tag_directive.handle); -// yaml_free(token.data.tag_directive.prefix); -// break; -// -// case YAML_ALIAS_TOKEN: -// yaml_free(token.data.alias.value); -// break; -// -// case YAML_ANCHOR_TOKEN: -// yaml_free(token.data.anchor.value); -// break; -// -// case YAML_TAG_TOKEN: -// yaml_free(token.data.tag.handle); -// yaml_free(token.data.tag.suffix); -// break; -// -// case YAML_SCALAR_TOKEN: -// yaml_free(token.data.scalar.value); -// break; -// -// default: -// break; -// } -// -// memset(token, 0, sizeof(yaml_token_t)); -//} -// -///* -// * Check if a string is a valid UTF-8 sequence. -// * -// * Check 'reader.c' for more details on UTF-8 encoding. -// */ -// -//static int -//yaml_check_utf8(yaml_char_t *start, size_t length) -//{ -// yaml_char_t *end = start+length; -// yaml_char_t *pointer = start; -// -// while (pointer < end) { -// unsigned char octet; -// unsigned int width; -// unsigned int value; -// size_t k; -// -// octet = pointer[0]; -// width = (octet & 0x80) == 0x00 ? 1 : -// (octet & 0xE0) == 0xC0 ? 2 : -// (octet & 0xF0) == 0xE0 ? 3 : -// (octet & 0xF8) == 0xF0 ? 4 : 0; -// value = (octet & 0x80) == 0x00 ? octet & 0x7F : -// (octet & 0xE0) == 0xC0 ? octet & 0x1F : -// (octet & 0xF0) == 0xE0 ? octet & 0x0F : -// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; -// if (!width) return 0; -// if (pointer+width > end) return 0; -// for (k = 1; k < width; k ++) { -// octet = pointer[k]; -// if ((octet & 0xC0) != 0x80) return 0; -// value = (value << 6) + (octet & 0x3F); -// } -// if (!((width == 1) || -// (width == 2 && value >= 0x80) || -// (width == 3 && value >= 0x800) || -// (width == 4 && value >= 0x10000))) return 0; -// -// pointer += width; -// } -// -// return 1; -//} -// - -// Create STREAM-START. -func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) bool { - *event = yaml_event_t{ - typ: yaml_STREAM_START_EVENT, - encoding: encoding, - } - return true -} - -// Create STREAM-END. -func yaml_stream_end_event_initialize(event *yaml_event_t) bool { - *event = yaml_event_t{ - typ: yaml_STREAM_END_EVENT, - } - return true -} - -// Create DOCUMENT-START. -func yaml_document_start_event_initialize(event *yaml_event_t, version_directive *yaml_version_directive_t, - tag_directives []yaml_tag_directive_t, implicit bool) bool { - *event = yaml_event_t{ - typ: yaml_DOCUMENT_START_EVENT, - version_directive: version_directive, - tag_directives: tag_directives, - implicit: implicit, - } - return true -} - -// Create DOCUMENT-END. -func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) bool { - *event = yaml_event_t{ - typ: yaml_DOCUMENT_END_EVENT, - implicit: implicit, - } - return true -} - -///* -// * Create ALIAS. -// */ -// -//YAML_DECLARE(int) -//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t) -//{ -// mark yaml_mark_t = { 0, 0, 0 } -// anchor_copy *yaml_char_t = NULL -// -// assert(event) // Non-NULL event object is expected. -// assert(anchor) // Non-NULL anchor is expected. -// -// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0 -// -// anchor_copy = yaml_strdup(anchor) -// if (!anchor_copy) -// return 0 -// -// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark) -// -// return 1 -//} - -// Create SCALAR. -func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - anchor: anchor, - tag: tag, - value: value, - implicit: plain_implicit, - quoted_implicit: quoted_implicit, - style: yaml_style_t(style), - } - return true -} - -// Create SEQUENCE-START. -func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(style), - } - return true -} - -// Create SEQUENCE-END. -func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - } - return true -} - -// Create MAPPING-START. -func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) bool { - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(style), - } - return true -} - -// Create MAPPING-END. -func yaml_mapping_end_event_initialize(event *yaml_event_t) bool { - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - } - return true -} - -// Destroy an event object. -func yaml_event_delete(event *yaml_event_t) { - *event = yaml_event_t{} -} - -///* -// * Create a document object. -// */ -// -//YAML_DECLARE(int) -//yaml_document_initialize(document *yaml_document_t, -// version_directive *yaml_version_directive_t, -// tag_directives_start *yaml_tag_directive_t, -// tag_directives_end *yaml_tag_directive_t, -// start_implicit int, end_implicit int) -//{ -// struct { -// error yaml_error_type_t -// } context -// struct { -// start *yaml_node_t -// end *yaml_node_t -// top *yaml_node_t -// } nodes = { NULL, NULL, NULL } -// version_directive_copy *yaml_version_directive_t = NULL -// struct { -// start *yaml_tag_directive_t -// end *yaml_tag_directive_t -// top *yaml_tag_directive_t -// } tag_directives_copy = { NULL, NULL, NULL } -// value yaml_tag_directive_t = { NULL, NULL } -// mark yaml_mark_t = { 0, 0, 0 } -// -// assert(document) // Non-NULL document object is expected. -// assert((tag_directives_start && tag_directives_end) || -// (tag_directives_start == tag_directives_end)) -// // Valid tag directives are expected. -// -// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error -// -// if (version_directive) { -// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) -// if (!version_directive_copy) goto error -// version_directive_copy.major = version_directive.major -// version_directive_copy.minor = version_directive.minor -// } -// -// if (tag_directives_start != tag_directives_end) { -// tag_directive *yaml_tag_directive_t -// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) -// goto error -// for (tag_directive = tag_directives_start -// tag_directive != tag_directives_end; tag_directive ++) { -// assert(tag_directive.handle) -// assert(tag_directive.prefix) -// if (!yaml_check_utf8(tag_directive.handle, -// strlen((char *)tag_directive.handle))) -// goto error -// if (!yaml_check_utf8(tag_directive.prefix, -// strlen((char *)tag_directive.prefix))) -// goto error -// value.handle = yaml_strdup(tag_directive.handle) -// value.prefix = yaml_strdup(tag_directive.prefix) -// if (!value.handle || !value.prefix) goto error -// if (!PUSH(&context, tag_directives_copy, value)) -// goto error -// value.handle = NULL -// value.prefix = NULL -// } -// } -// -// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, -// tag_directives_copy.start, tag_directives_copy.top, -// start_implicit, end_implicit, mark, mark) -// -// return 1 -// -//error: -// STACK_DEL(&context, nodes) -// yaml_free(version_directive_copy) -// while (!STACK_EMPTY(&context, tag_directives_copy)) { -// value yaml_tag_directive_t = POP(&context, tag_directives_copy) -// yaml_free(value.handle) -// yaml_free(value.prefix) -// } -// STACK_DEL(&context, tag_directives_copy) -// yaml_free(value.handle) -// yaml_free(value.prefix) -// -// return 0 -//} -// -///* -// * Destroy a document object. -// */ -// -//YAML_DECLARE(void) -//yaml_document_delete(document *yaml_document_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// tag_directive *yaml_tag_directive_t -// -// context.error = YAML_NO_ERROR // Eliminate a compliler warning. -// -// assert(document) // Non-NULL document object is expected. -// -// while (!STACK_EMPTY(&context, document.nodes)) { -// node yaml_node_t = POP(&context, document.nodes) -// yaml_free(node.tag) -// switch (node.type) { -// case YAML_SCALAR_NODE: -// yaml_free(node.data.scalar.value) -// break -// case YAML_SEQUENCE_NODE: -// STACK_DEL(&context, node.data.sequence.items) -// break -// case YAML_MAPPING_NODE: -// STACK_DEL(&context, node.data.mapping.pairs) -// break -// default: -// assert(0) // Should not happen. -// } -// } -// STACK_DEL(&context, document.nodes) -// -// yaml_free(document.version_directive) -// for (tag_directive = document.tag_directives.start -// tag_directive != document.tag_directives.end -// tag_directive++) { -// yaml_free(tag_directive.handle) -// yaml_free(tag_directive.prefix) -// } -// yaml_free(document.tag_directives.start) -// -// memset(document, 0, sizeof(yaml_document_t)) -//} -// -///** -// * Get a document node. -// */ -// -//YAML_DECLARE(yaml_node_t *) -//yaml_document_get_node(document *yaml_document_t, index int) -//{ -// assert(document) // Non-NULL document object is expected. -// -// if (index > 0 && document.nodes.start + index <= document.nodes.top) { -// return document.nodes.start + index - 1 -// } -// return NULL -//} -// -///** -// * Get the root object. -// */ -// -//YAML_DECLARE(yaml_node_t *) -//yaml_document_get_root_node(document *yaml_document_t) -//{ -// assert(document) // Non-NULL document object is expected. -// -// if (document.nodes.top != document.nodes.start) { -// return document.nodes.start -// } -// return NULL -//} -// -///* -// * Add a scalar node to a document. -// */ -// -//YAML_DECLARE(int) -//yaml_document_add_scalar(document *yaml_document_t, -// tag *yaml_char_t, value *yaml_char_t, length int, -// style yaml_scalar_style_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// mark yaml_mark_t = { 0, 0, 0 } -// tag_copy *yaml_char_t = NULL -// value_copy *yaml_char_t = NULL -// node yaml_node_t -// -// assert(document) // Non-NULL document object is expected. -// assert(value) // Non-NULL value is expected. -// -// if (!tag) { -// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG -// } -// -// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error -// tag_copy = yaml_strdup(tag) -// if (!tag_copy) goto error -// -// if (length < 0) { -// length = strlen((char *)value) -// } -// -// if (!yaml_check_utf8(value, length)) goto error -// value_copy = yaml_malloc(length+1) -// if (!value_copy) goto error -// memcpy(value_copy, value, length) -// value_copy[length] = '\0' -// -// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) -// if (!PUSH(&context, document.nodes, node)) goto error -// -// return document.nodes.top - document.nodes.start -// -//error: -// yaml_free(tag_copy) -// yaml_free(value_copy) -// -// return 0 -//} -// -///* -// * Add a sequence node to a document. -// */ -// -//YAML_DECLARE(int) -//yaml_document_add_sequence(document *yaml_document_t, -// tag *yaml_char_t, style yaml_sequence_style_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// mark yaml_mark_t = { 0, 0, 0 } -// tag_copy *yaml_char_t = NULL -// struct { -// start *yaml_node_item_t -// end *yaml_node_item_t -// top *yaml_node_item_t -// } items = { NULL, NULL, NULL } -// node yaml_node_t -// -// assert(document) // Non-NULL document object is expected. -// -// if (!tag) { -// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG -// } -// -// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error -// tag_copy = yaml_strdup(tag) -// if (!tag_copy) goto error -// -// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error -// -// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, -// style, mark, mark) -// if (!PUSH(&context, document.nodes, node)) goto error -// -// return document.nodes.top - document.nodes.start -// -//error: -// STACK_DEL(&context, items) -// yaml_free(tag_copy) -// -// return 0 -//} -// -///* -// * Add a mapping node to a document. -// */ -// -//YAML_DECLARE(int) -//yaml_document_add_mapping(document *yaml_document_t, -// tag *yaml_char_t, style yaml_mapping_style_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// mark yaml_mark_t = { 0, 0, 0 } -// tag_copy *yaml_char_t = NULL -// struct { -// start *yaml_node_pair_t -// end *yaml_node_pair_t -// top *yaml_node_pair_t -// } pairs = { NULL, NULL, NULL } -// node yaml_node_t -// -// assert(document) // Non-NULL document object is expected. -// -// if (!tag) { -// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG -// } -// -// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error -// tag_copy = yaml_strdup(tag) -// if (!tag_copy) goto error -// -// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error -// -// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, -// style, mark, mark) -// if (!PUSH(&context, document.nodes, node)) goto error -// -// return document.nodes.top - document.nodes.start -// -//error: -// STACK_DEL(&context, pairs) -// yaml_free(tag_copy) -// -// return 0 -//} -// -///* -// * Append an item to a sequence node. -// */ -// -//YAML_DECLARE(int) -//yaml_document_append_sequence_item(document *yaml_document_t, -// sequence int, item int) -//{ -// struct { -// error yaml_error_type_t -// } context -// -// assert(document) // Non-NULL document is required. -// assert(sequence > 0 -// && document.nodes.start + sequence <= document.nodes.top) -// // Valid sequence id is required. -// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) -// // A sequence node is required. -// assert(item > 0 && document.nodes.start + item <= document.nodes.top) -// // Valid item id is required. -// -// if (!PUSH(&context, -// document.nodes.start[sequence-1].data.sequence.items, item)) -// return 0 -// -// return 1 -//} -// -///* -// * Append a pair of a key and a value to a mapping node. -// */ -// -//YAML_DECLARE(int) -//yaml_document_append_mapping_pair(document *yaml_document_t, -// mapping int, key int, value int) -//{ -// struct { -// error yaml_error_type_t -// } context -// -// pair yaml_node_pair_t -// -// assert(document) // Non-NULL document is required. -// assert(mapping > 0 -// && document.nodes.start + mapping <= document.nodes.top) -// // Valid mapping id is required. -// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) -// // A mapping node is required. -// assert(key > 0 && document.nodes.start + key <= document.nodes.top) -// // Valid key id is required. -// assert(value > 0 && document.nodes.start + value <= document.nodes.top) -// // Valid value id is required. -// -// pair.key = key -// pair.value = value -// -// if (!PUSH(&context, -// document.nodes.start[mapping-1].data.mapping.pairs, pair)) -// return 0 -// -// return 1 -//} -// -// diff --git a/vendor/gopkg.in/yaml.v2/decode.go b/vendor/gopkg.in/yaml.v2/decode.go deleted file mode 100644 index 085cddc..0000000 --- a/vendor/gopkg.in/yaml.v2/decode.go +++ /dev/null @@ -1,683 +0,0 @@ -package yaml - -import ( - "encoding" - "encoding/base64" - "fmt" - "math" - "reflect" - "strconv" - "time" -) - -const ( - documentNode = 1 << iota - mappingNode - sequenceNode - scalarNode - aliasNode -) - -type node struct { - kind int - line, column int - tag string - value string - implicit bool - children []*node - anchors map[string]*node -} - -// ---------------------------------------------------------------------------- -// Parser, produces a node tree out of a libyaml event stream. - -type parser struct { - parser yaml_parser_t - event yaml_event_t - doc *node -} - -func newParser(b []byte) *parser { - p := parser{} - if !yaml_parser_initialize(&p.parser) { - panic("failed to initialize YAML emitter") - } - - if len(b) == 0 { - b = []byte{'\n'} - } - - yaml_parser_set_input_string(&p.parser, b) - - p.skip() - if p.event.typ != yaml_STREAM_START_EVENT { - panic("expected stream start event, got " + strconv.Itoa(int(p.event.typ))) - } - p.skip() - return &p -} - -func (p *parser) destroy() { - if p.event.typ != yaml_NO_EVENT { - yaml_event_delete(&p.event) - } - yaml_parser_delete(&p.parser) -} - -func (p *parser) skip() { - if p.event.typ != yaml_NO_EVENT { - if p.event.typ == yaml_STREAM_END_EVENT { - failf("attempted to go past the end of stream; corrupted value?") - } - yaml_event_delete(&p.event) - } - if !yaml_parser_parse(&p.parser, &p.event) { - p.fail() - } -} - -func (p *parser) fail() { - var where string - var line int - if p.parser.problem_mark.line != 0 { - line = p.parser.problem_mark.line - } else if p.parser.context_mark.line != 0 { - line = p.parser.context_mark.line - } - if line != 0 { - where = "line " + strconv.Itoa(line) + ": " - } - var msg string - if len(p.parser.problem) > 0 { - msg = p.parser.problem - } else { - msg = "unknown problem parsing YAML content" - } - failf("%s%s", where, msg) -} - -func (p *parser) anchor(n *node, anchor []byte) { - if anchor != nil { - p.doc.anchors[string(anchor)] = n - } -} - -func (p *parser) parse() *node { - switch p.event.typ { - case yaml_SCALAR_EVENT: - return p.scalar() - case yaml_ALIAS_EVENT: - return p.alias() - case yaml_MAPPING_START_EVENT: - return p.mapping() - case yaml_SEQUENCE_START_EVENT: - return p.sequence() - case yaml_DOCUMENT_START_EVENT: - return p.document() - case yaml_STREAM_END_EVENT: - // Happens when attempting to decode an empty buffer. - return nil - default: - panic("attempted to parse unknown event: " + strconv.Itoa(int(p.event.typ))) - } - panic("unreachable") -} - -func (p *parser) node(kind int) *node { - return &node{ - kind: kind, - line: p.event.start_mark.line, - column: p.event.start_mark.column, - } -} - -func (p *parser) document() *node { - n := p.node(documentNode) - n.anchors = make(map[string]*node) - p.doc = n - p.skip() - n.children = append(n.children, p.parse()) - if p.event.typ != yaml_DOCUMENT_END_EVENT { - panic("expected end of document event but got " + strconv.Itoa(int(p.event.typ))) - } - p.skip() - return n -} - -func (p *parser) alias() *node { - n := p.node(aliasNode) - n.value = string(p.event.anchor) - p.skip() - return n -} - -func (p *parser) scalar() *node { - n := p.node(scalarNode) - n.value = string(p.event.value) - n.tag = string(p.event.tag) - n.implicit = p.event.implicit - p.anchor(n, p.event.anchor) - p.skip() - return n -} - -func (p *parser) sequence() *node { - n := p.node(sequenceNode) - p.anchor(n, p.event.anchor) - p.skip() - for p.event.typ != yaml_SEQUENCE_END_EVENT { - n.children = append(n.children, p.parse()) - } - p.skip() - return n -} - -func (p *parser) mapping() *node { - n := p.node(mappingNode) - p.anchor(n, p.event.anchor) - p.skip() - for p.event.typ != yaml_MAPPING_END_EVENT { - n.children = append(n.children, p.parse(), p.parse()) - } - p.skip() - return n -} - -// ---------------------------------------------------------------------------- -// Decoder, unmarshals a node into a provided value. - -type decoder struct { - doc *node - aliases map[string]bool - mapType reflect.Type - terrors []string -} - -var ( - mapItemType = reflect.TypeOf(MapItem{}) - durationType = reflect.TypeOf(time.Duration(0)) - defaultMapType = reflect.TypeOf(map[interface{}]interface{}{}) - ifaceType = defaultMapType.Elem() -) - -func newDecoder() *decoder { - d := &decoder{mapType: defaultMapType} - d.aliases = make(map[string]bool) - return d -} - -func (d *decoder) terror(n *node, tag string, out reflect.Value) { - if n.tag != "" { - tag = n.tag - } - value := n.value - if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG { - if len(value) > 10 { - value = " `" + value[:7] + "...`" - } else { - value = " `" + value + "`" - } - } - d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type())) -} - -func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) { - terrlen := len(d.terrors) - err := u.UnmarshalYAML(func(v interface{}) (err error) { - defer handleErr(&err) - d.unmarshal(n, reflect.ValueOf(v)) - if len(d.terrors) > terrlen { - issues := d.terrors[terrlen:] - d.terrors = d.terrors[:terrlen] - return &TypeError{issues} - } - return nil - }) - if e, ok := err.(*TypeError); ok { - d.terrors = append(d.terrors, e.Errors...) - return false - } - if err != nil { - fail(err) - } - return true -} - -// d.prepare initializes and dereferences pointers and calls UnmarshalYAML -// if a value is found to implement it. -// It returns the initialized and dereferenced out value, whether -// unmarshalling was already done by UnmarshalYAML, and if so whether -// its types unmarshalled appropriately. -// -// If n holds a null value, prepare returns before doing anything. -func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { - if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "") { - return out, false, false - } - again := true - for again { - again = false - if out.Kind() == reflect.Ptr { - if out.IsNil() { - out.Set(reflect.New(out.Type().Elem())) - } - out = out.Elem() - again = true - } - if out.CanAddr() { - if u, ok := out.Addr().Interface().(Unmarshaler); ok { - good = d.callUnmarshaler(n, u) - return out, true, good - } - } - } - return out, false, false -} - -func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) { - switch n.kind { - case documentNode: - return d.document(n, out) - case aliasNode: - return d.alias(n, out) - } - out, unmarshaled, good := d.prepare(n, out) - if unmarshaled { - return good - } - switch n.kind { - case scalarNode: - good = d.scalar(n, out) - case mappingNode: - good = d.mapping(n, out) - case sequenceNode: - good = d.sequence(n, out) - default: - panic("internal error: unknown node kind: " + strconv.Itoa(n.kind)) - } - return good -} - -func (d *decoder) document(n *node, out reflect.Value) (good bool) { - if len(n.children) == 1 { - d.doc = n - d.unmarshal(n.children[0], out) - return true - } - return false -} - -func (d *decoder) alias(n *node, out reflect.Value) (good bool) { - an, ok := d.doc.anchors[n.value] - if !ok { - failf("unknown anchor '%s' referenced", n.value) - } - if d.aliases[n.value] { - failf("anchor '%s' value contains itself", n.value) - } - d.aliases[n.value] = true - good = d.unmarshal(an, out) - delete(d.aliases, n.value) - return good -} - -var zeroValue reflect.Value - -func resetMap(out reflect.Value) { - for _, k := range out.MapKeys() { - out.SetMapIndex(k, zeroValue) - } -} - -func (d *decoder) scalar(n *node, out reflect.Value) (good bool) { - var tag string - var resolved interface{} - if n.tag == "" && !n.implicit { - tag = yaml_STR_TAG - resolved = n.value - } else { - tag, resolved = resolve(n.tag, n.value) - if tag == yaml_BINARY_TAG { - data, err := base64.StdEncoding.DecodeString(resolved.(string)) - if err != nil { - failf("!!binary value contains invalid base64 data") - } - resolved = string(data) - } - } - if resolved == nil { - if out.Kind() == reflect.Map && !out.CanAddr() { - resetMap(out) - } else { - out.Set(reflect.Zero(out.Type())) - } - return true - } - if s, ok := resolved.(string); ok && out.CanAddr() { - if u, ok := out.Addr().Interface().(encoding.TextUnmarshaler); ok { - err := u.UnmarshalText([]byte(s)) - if err != nil { - fail(err) - } - return true - } - } - switch out.Kind() { - case reflect.String: - if tag == yaml_BINARY_TAG { - out.SetString(resolved.(string)) - good = true - } else if resolved != nil { - out.SetString(n.value) - good = true - } - case reflect.Interface: - if resolved == nil { - out.Set(reflect.Zero(out.Type())) - } else { - out.Set(reflect.ValueOf(resolved)) - } - good = true - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - switch resolved := resolved.(type) { - case int: - if !out.OverflowInt(int64(resolved)) { - out.SetInt(int64(resolved)) - good = true - } - case int64: - if !out.OverflowInt(resolved) { - out.SetInt(resolved) - good = true - } - case uint64: - if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { - out.SetInt(int64(resolved)) - good = true - } - case float64: - if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { - out.SetInt(int64(resolved)) - good = true - } - case string: - if out.Type() == durationType { - d, err := time.ParseDuration(resolved) - if err == nil { - out.SetInt(int64(d)) - good = true - } - } - } - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - switch resolved := resolved.(type) { - case int: - if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { - out.SetUint(uint64(resolved)) - good = true - } - case int64: - if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { - out.SetUint(uint64(resolved)) - good = true - } - case uint64: - if !out.OverflowUint(uint64(resolved)) { - out.SetUint(uint64(resolved)) - good = true - } - case float64: - if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { - out.SetUint(uint64(resolved)) - good = true - } - } - case reflect.Bool: - switch resolved := resolved.(type) { - case bool: - out.SetBool(resolved) - good = true - } - case reflect.Float32, reflect.Float64: - switch resolved := resolved.(type) { - case int: - out.SetFloat(float64(resolved)) - good = true - case int64: - out.SetFloat(float64(resolved)) - good = true - case uint64: - out.SetFloat(float64(resolved)) - good = true - case float64: - out.SetFloat(resolved) - good = true - } - case reflect.Ptr: - if out.Type().Elem() == reflect.TypeOf(resolved) { - // TODO DOes this make sense? When is out a Ptr except when decoding a nil value? - elem := reflect.New(out.Type().Elem()) - elem.Elem().Set(reflect.ValueOf(resolved)) - out.Set(elem) - good = true - } - } - if !good { - d.terror(n, tag, out) - } - return good -} - -func settableValueOf(i interface{}) reflect.Value { - v := reflect.ValueOf(i) - sv := reflect.New(v.Type()).Elem() - sv.Set(v) - return sv -} - -func (d *decoder) sequence(n *node, out reflect.Value) (good bool) { - l := len(n.children) - - var iface reflect.Value - switch out.Kind() { - case reflect.Slice: - out.Set(reflect.MakeSlice(out.Type(), l, l)) - case reflect.Interface: - // No type hints. Will have to use a generic sequence. - iface = out - out = settableValueOf(make([]interface{}, l)) - default: - d.terror(n, yaml_SEQ_TAG, out) - return false - } - et := out.Type().Elem() - - j := 0 - for i := 0; i < l; i++ { - e := reflect.New(et).Elem() - if ok := d.unmarshal(n.children[i], e); ok { - out.Index(j).Set(e) - j++ - } - } - out.Set(out.Slice(0, j)) - if iface.IsValid() { - iface.Set(out) - } - return true -} - -func (d *decoder) mapping(n *node, out reflect.Value) (good bool) { - switch out.Kind() { - case reflect.Struct: - return d.mappingStruct(n, out) - case reflect.Slice: - return d.mappingSlice(n, out) - case reflect.Map: - // okay - case reflect.Interface: - if d.mapType.Kind() == reflect.Map { - iface := out - out = reflect.MakeMap(d.mapType) - iface.Set(out) - } else { - slicev := reflect.New(d.mapType).Elem() - if !d.mappingSlice(n, slicev) { - return false - } - out.Set(slicev) - return true - } - default: - d.terror(n, yaml_MAP_TAG, out) - return false - } - outt := out.Type() - kt := outt.Key() - et := outt.Elem() - - mapType := d.mapType - if outt.Key() == ifaceType && outt.Elem() == ifaceType { - d.mapType = outt - } - - if out.IsNil() { - out.Set(reflect.MakeMap(outt)) - } - l := len(n.children) - for i := 0; i < l; i += 2 { - if isMerge(n.children[i]) { - d.merge(n.children[i+1], out) - continue - } - k := reflect.New(kt).Elem() - if d.unmarshal(n.children[i], k) { - kkind := k.Kind() - if kkind == reflect.Interface { - kkind = k.Elem().Kind() - } - if kkind == reflect.Map || kkind == reflect.Slice { - failf("invalid map key: %#v", k.Interface()) - } - e := reflect.New(et).Elem() - if d.unmarshal(n.children[i+1], e) { - out.SetMapIndex(k, e) - } - } - } - d.mapType = mapType - return true -} - -func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) { - outt := out.Type() - if outt.Elem() != mapItemType { - d.terror(n, yaml_MAP_TAG, out) - return false - } - - mapType := d.mapType - d.mapType = outt - - var slice []MapItem - var l = len(n.children) - for i := 0; i < l; i += 2 { - if isMerge(n.children[i]) { - d.merge(n.children[i+1], out) - continue - } - item := MapItem{} - k := reflect.ValueOf(&item.Key).Elem() - if d.unmarshal(n.children[i], k) { - v := reflect.ValueOf(&item.Value).Elem() - if d.unmarshal(n.children[i+1], v) { - slice = append(slice, item) - } - } - } - out.Set(reflect.ValueOf(slice)) - d.mapType = mapType - return true -} - -func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) { - sinfo, err := getStructInfo(out.Type()) - if err != nil { - panic(err) - } - name := settableValueOf("") - l := len(n.children) - - var inlineMap reflect.Value - var elemType reflect.Type - if sinfo.InlineMap != -1 { - inlineMap = out.Field(sinfo.InlineMap) - inlineMap.Set(reflect.New(inlineMap.Type()).Elem()) - elemType = inlineMap.Type().Elem() - } - - for i := 0; i < l; i += 2 { - ni := n.children[i] - if isMerge(ni) { - d.merge(n.children[i+1], out) - continue - } - if !d.unmarshal(ni, name) { - continue - } - if info, ok := sinfo.FieldsMap[name.String()]; ok { - var field reflect.Value - if info.Inline == nil { - field = out.Field(info.Num) - } else { - field = out.FieldByIndex(info.Inline) - } - d.unmarshal(n.children[i+1], field) - } else if sinfo.InlineMap != -1 { - if inlineMap.IsNil() { - inlineMap.Set(reflect.MakeMap(inlineMap.Type())) - } - value := reflect.New(elemType).Elem() - d.unmarshal(n.children[i+1], value) - inlineMap.SetMapIndex(name, value) - } - } - return true -} - -func failWantMap() { - failf("map merge requires map or sequence of maps as the value") -} - -func (d *decoder) merge(n *node, out reflect.Value) { - switch n.kind { - case mappingNode: - d.unmarshal(n, out) - case aliasNode: - an, ok := d.doc.anchors[n.value] - if ok && an.kind != mappingNode { - failWantMap() - } - d.unmarshal(n, out) - case sequenceNode: - // Step backwards as earlier nodes take precedence. - for i := len(n.children) - 1; i >= 0; i-- { - ni := n.children[i] - if ni.kind == aliasNode { - an, ok := d.doc.anchors[ni.value] - if ok && an.kind != mappingNode { - failWantMap() - } - } else if ni.kind != mappingNode { - failWantMap() - } - d.unmarshal(ni, out) - } - default: - failWantMap() - } -} - -func isMerge(n *node) bool { - return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG) -} diff --git a/vendor/gopkg.in/yaml.v2/emitterc.go b/vendor/gopkg.in/yaml.v2/emitterc.go deleted file mode 100644 index 2befd55..0000000 --- a/vendor/gopkg.in/yaml.v2/emitterc.go +++ /dev/null @@ -1,1685 +0,0 @@ -package yaml - -import ( - "bytes" -) - -// Flush the buffer if needed. -func flush(emitter *yaml_emitter_t) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) { - return yaml_emitter_flush(emitter) - } - return true -} - -// Put a character to the output buffer. -func put(emitter *yaml_emitter_t, value byte) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { - return false - } - emitter.buffer[emitter.buffer_pos] = value - emitter.buffer_pos++ - emitter.column++ - return true -} - -// Put a line break to the output buffer. -func put_break(emitter *yaml_emitter_t) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { - return false - } - switch emitter.line_break { - case yaml_CR_BREAK: - emitter.buffer[emitter.buffer_pos] = '\r' - emitter.buffer_pos += 1 - case yaml_LN_BREAK: - emitter.buffer[emitter.buffer_pos] = '\n' - emitter.buffer_pos += 1 - case yaml_CRLN_BREAK: - emitter.buffer[emitter.buffer_pos+0] = '\r' - emitter.buffer[emitter.buffer_pos+1] = '\n' - emitter.buffer_pos += 2 - default: - panic("unknown line break setting") - } - emitter.column = 0 - emitter.line++ - return true -} - -// Copy a character from a string into buffer. -func write(emitter *yaml_emitter_t, s []byte, i *int) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { - return false - } - p := emitter.buffer_pos - w := width(s[*i]) - switch w { - case 4: - emitter.buffer[p+3] = s[*i+3] - fallthrough - case 3: - emitter.buffer[p+2] = s[*i+2] - fallthrough - case 2: - emitter.buffer[p+1] = s[*i+1] - fallthrough - case 1: - emitter.buffer[p+0] = s[*i+0] - default: - panic("unknown character width") - } - emitter.column++ - emitter.buffer_pos += w - *i += w - return true -} - -// Write a whole string into buffer. -func write_all(emitter *yaml_emitter_t, s []byte) bool { - for i := 0; i < len(s); { - if !write(emitter, s, &i) { - return false - } - } - return true -} - -// Copy a line break character from a string into buffer. -func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { - if s[*i] == '\n' { - if !put_break(emitter) { - return false - } - *i++ - } else { - if !write(emitter, s, i) { - return false - } - emitter.column = 0 - emitter.line++ - } - return true -} - -// Set an emitter error and return false. -func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { - emitter.error = yaml_EMITTER_ERROR - emitter.problem = problem - return false -} - -// Emit an event. -func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { - emitter.events = append(emitter.events, *event) - for !yaml_emitter_need_more_events(emitter) { - event := &emitter.events[emitter.events_head] - if !yaml_emitter_analyze_event(emitter, event) { - return false - } - if !yaml_emitter_state_machine(emitter, event) { - return false - } - yaml_event_delete(event) - emitter.events_head++ - } - return true -} - -// Check if we need to accumulate more events before emitting. -// -// We accumulate extra -// - 1 event for DOCUMENT-START -// - 2 events for SEQUENCE-START -// - 3 events for MAPPING-START -// -func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { - if emitter.events_head == len(emitter.events) { - return true - } - var accumulate int - switch emitter.events[emitter.events_head].typ { - case yaml_DOCUMENT_START_EVENT: - accumulate = 1 - break - case yaml_SEQUENCE_START_EVENT: - accumulate = 2 - break - case yaml_MAPPING_START_EVENT: - accumulate = 3 - break - default: - return false - } - if len(emitter.events)-emitter.events_head > accumulate { - return false - } - var level int - for i := emitter.events_head; i < len(emitter.events); i++ { - switch emitter.events[i].typ { - case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: - level++ - case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: - level-- - } - if level == 0 { - return false - } - } - return true -} - -// Append a directive to the directives stack. -func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { - for i := 0; i < len(emitter.tag_directives); i++ { - if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { - if allow_duplicates { - return true - } - return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") - } - } - - // [Go] Do we actually need to copy this given garbage collection - // and the lack of deallocating destructors? - tag_copy := yaml_tag_directive_t{ - handle: make([]byte, len(value.handle)), - prefix: make([]byte, len(value.prefix)), - } - copy(tag_copy.handle, value.handle) - copy(tag_copy.prefix, value.prefix) - emitter.tag_directives = append(emitter.tag_directives, tag_copy) - return true -} - -// Increase the indentation level. -func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { - emitter.indents = append(emitter.indents, emitter.indent) - if emitter.indent < 0 { - if flow { - emitter.indent = emitter.best_indent - } else { - emitter.indent = 0 - } - } else if !indentless { - emitter.indent += emitter.best_indent - } - return true -} - -// State dispatcher. -func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { - switch emitter.state { - default: - case yaml_EMIT_STREAM_START_STATE: - return yaml_emitter_emit_stream_start(emitter, event) - - case yaml_EMIT_FIRST_DOCUMENT_START_STATE: - return yaml_emitter_emit_document_start(emitter, event, true) - - case yaml_EMIT_DOCUMENT_START_STATE: - return yaml_emitter_emit_document_start(emitter, event, false) - - case yaml_EMIT_DOCUMENT_CONTENT_STATE: - return yaml_emitter_emit_document_content(emitter, event) - - case yaml_EMIT_DOCUMENT_END_STATE: - return yaml_emitter_emit_document_end(emitter, event) - - case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: - return yaml_emitter_emit_flow_sequence_item(emitter, event, true) - - case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: - return yaml_emitter_emit_flow_sequence_item(emitter, event, false) - - case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: - return yaml_emitter_emit_flow_mapping_key(emitter, event, true) - - case yaml_EMIT_FLOW_MAPPING_KEY_STATE: - return yaml_emitter_emit_flow_mapping_key(emitter, event, false) - - case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: - return yaml_emitter_emit_flow_mapping_value(emitter, event, true) - - case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: - return yaml_emitter_emit_flow_mapping_value(emitter, event, false) - - case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: - return yaml_emitter_emit_block_sequence_item(emitter, event, true) - - case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: - return yaml_emitter_emit_block_sequence_item(emitter, event, false) - - case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: - return yaml_emitter_emit_block_mapping_key(emitter, event, true) - - case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: - return yaml_emitter_emit_block_mapping_key(emitter, event, false) - - case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: - return yaml_emitter_emit_block_mapping_value(emitter, event, true) - - case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: - return yaml_emitter_emit_block_mapping_value(emitter, event, false) - - case yaml_EMIT_END_STATE: - return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") - } - panic("invalid emitter state") -} - -// Expect STREAM-START. -func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if event.typ != yaml_STREAM_START_EVENT { - return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") - } - if emitter.encoding == yaml_ANY_ENCODING { - emitter.encoding = event.encoding - if emitter.encoding == yaml_ANY_ENCODING { - emitter.encoding = yaml_UTF8_ENCODING - } - } - if emitter.best_indent < 2 || emitter.best_indent > 9 { - emitter.best_indent = 2 - } - if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { - emitter.best_width = 80 - } - if emitter.best_width < 0 { - emitter.best_width = 1<<31 - 1 - } - if emitter.line_break == yaml_ANY_BREAK { - emitter.line_break = yaml_LN_BREAK - } - - emitter.indent = -1 - emitter.line = 0 - emitter.column = 0 - emitter.whitespace = true - emitter.indention = true - - if emitter.encoding != yaml_UTF8_ENCODING { - if !yaml_emitter_write_bom(emitter) { - return false - } - } - emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE - return true -} - -// Expect DOCUMENT-START or STREAM-END. -func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - - if event.typ == yaml_DOCUMENT_START_EVENT { - - if event.version_directive != nil { - if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { - return false - } - } - - for i := 0; i < len(event.tag_directives); i++ { - tag_directive := &event.tag_directives[i] - if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { - return false - } - if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { - return false - } - } - - for i := 0; i < len(default_tag_directives); i++ { - tag_directive := &default_tag_directives[i] - if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { - return false - } - } - - implicit := event.implicit - if !first || emitter.canonical { - implicit = false - } - - if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { - if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if event.version_directive != nil { - implicit = false - if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if len(event.tag_directives) > 0 { - implicit = false - for i := 0; i < len(event.tag_directives); i++ { - tag_directive := &event.tag_directives[i] - if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { - return false - } - if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { - return false - } - if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - } - - if yaml_emitter_check_empty_document(emitter) { - implicit = false - } - if !implicit { - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { - return false - } - if emitter.canonical { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - } - - emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE - return true - } - - if event.typ == yaml_STREAM_END_EVENT { - if emitter.open_ended { - if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_flush(emitter) { - return false - } - emitter.state = yaml_EMIT_END_STATE - return true - } - - return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") -} - -// Expect the root node. -func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { - emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) - return yaml_emitter_emit_node(emitter, event, true, false, false, false) -} - -// Expect DOCUMENT-END. -func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if event.typ != yaml_DOCUMENT_END_EVENT { - return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") - } - if !yaml_emitter_write_indent(emitter) { - return false - } - if !event.implicit { - // [Go] Allocate the slice elsewhere. - if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_flush(emitter) { - return false - } - emitter.state = yaml_EMIT_DOCUMENT_START_STATE - emitter.tag_directives = emitter.tag_directives[:0] - return true -} - -// Expect a flow item node. -func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - if first { - if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { - return false - } - if !yaml_emitter_increase_indent(emitter, true, false) { - return false - } - emitter.flow_level++ - } - - if event.typ == yaml_SEQUENCE_END_EVENT { - emitter.flow_level-- - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - if emitter.canonical && !first { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { - return false - } - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - - return true - } - - if !first { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - } - - if emitter.canonical || emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) - return yaml_emitter_emit_node(emitter, event, false, true, false, false) -} - -// Expect a flow key node. -func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - if first { - if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { - return false - } - if !yaml_emitter_increase_indent(emitter, true, false) { - return false - } - emitter.flow_level++ - } - - if event.typ == yaml_MAPPING_END_EVENT { - emitter.flow_level-- - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - if emitter.canonical && !first { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { - return false - } - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true - } - - if !first { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - } - if emitter.canonical || emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { - emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, true) - } - if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { - return false - } - emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -// Expect a flow value node. -func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { - if simple { - if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { - return false - } - } else { - if emitter.canonical || emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { - return false - } - } - emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -// Expect a block item node. -func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - if first { - if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) { - return false - } - } - if event.typ == yaml_SEQUENCE_END_EVENT { - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true - } - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { - return false - } - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) - return yaml_emitter_emit_node(emitter, event, false, true, false, false) -} - -// Expect a block key node. -func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - if first { - if !yaml_emitter_increase_indent(emitter, false, false) { - return false - } - } - if event.typ == yaml_MAPPING_END_EVENT { - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true - } - if !yaml_emitter_write_indent(emitter) { - return false - } - if yaml_emitter_check_simple_key(emitter) { - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, true) - } - if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { - return false - } - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -// Expect a block value node. -func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { - if simple { - if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { - return false - } - } else { - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { - return false - } - } - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -// Expect a node. -func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, - root bool, sequence bool, mapping bool, simple_key bool) bool { - - emitter.root_context = root - emitter.sequence_context = sequence - emitter.mapping_context = mapping - emitter.simple_key_context = simple_key - - switch event.typ { - case yaml_ALIAS_EVENT: - return yaml_emitter_emit_alias(emitter, event) - case yaml_SCALAR_EVENT: - return yaml_emitter_emit_scalar(emitter, event) - case yaml_SEQUENCE_START_EVENT: - return yaml_emitter_emit_sequence_start(emitter, event) - case yaml_MAPPING_START_EVENT: - return yaml_emitter_emit_mapping_start(emitter, event) - default: - return yaml_emitter_set_emitter_error(emitter, - "expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS") - } - return false -} - -// Expect ALIAS. -func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_process_anchor(emitter) { - return false - } - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true -} - -// Expect SCALAR. -func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_select_scalar_style(emitter, event) { - return false - } - if !yaml_emitter_process_anchor(emitter) { - return false - } - if !yaml_emitter_process_tag(emitter) { - return false - } - if !yaml_emitter_increase_indent(emitter, true, false) { - return false - } - if !yaml_emitter_process_scalar(emitter) { - return false - } - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true -} - -// Expect SEQUENCE-START. -func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_process_anchor(emitter) { - return false - } - if !yaml_emitter_process_tag(emitter) { - return false - } - if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || - yaml_emitter_check_empty_sequence(emitter) { - emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE - } else { - emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE - } - return true -} - -// Expect MAPPING-START. -func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_process_anchor(emitter) { - return false - } - if !yaml_emitter_process_tag(emitter) { - return false - } - if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || - yaml_emitter_check_empty_mapping(emitter) { - emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE - } else { - emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE - } - return true -} - -// Check if the document content is an empty scalar. -func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { - return false // [Go] Huh? -} - -// Check if the next events represent an empty sequence. -func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { - if len(emitter.events)-emitter.events_head < 2 { - return false - } - return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && - emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT -} - -// Check if the next events represent an empty mapping. -func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { - if len(emitter.events)-emitter.events_head < 2 { - return false - } - return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && - emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT -} - -// Check if the next node can be expressed as a simple key. -func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { - length := 0 - switch emitter.events[emitter.events_head].typ { - case yaml_ALIAS_EVENT: - length += len(emitter.anchor_data.anchor) - case yaml_SCALAR_EVENT: - if emitter.scalar_data.multiline { - return false - } - length += len(emitter.anchor_data.anchor) + - len(emitter.tag_data.handle) + - len(emitter.tag_data.suffix) + - len(emitter.scalar_data.value) - case yaml_SEQUENCE_START_EVENT: - if !yaml_emitter_check_empty_sequence(emitter) { - return false - } - length += len(emitter.anchor_data.anchor) + - len(emitter.tag_data.handle) + - len(emitter.tag_data.suffix) - case yaml_MAPPING_START_EVENT: - if !yaml_emitter_check_empty_mapping(emitter) { - return false - } - length += len(emitter.anchor_data.anchor) + - len(emitter.tag_data.handle) + - len(emitter.tag_data.suffix) - default: - return false - } - return length <= 128 -} - -// Determine an acceptable scalar style. -func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { - - no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 - if no_tag && !event.implicit && !event.quoted_implicit { - return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") - } - - style := event.scalar_style() - if style == yaml_ANY_SCALAR_STYLE { - style = yaml_PLAIN_SCALAR_STYLE - } - if emitter.canonical { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - if emitter.simple_key_context && emitter.scalar_data.multiline { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - - if style == yaml_PLAIN_SCALAR_STYLE { - if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || - emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { - style = yaml_SINGLE_QUOTED_SCALAR_STYLE - } - if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { - style = yaml_SINGLE_QUOTED_SCALAR_STYLE - } - if no_tag && !event.implicit { - style = yaml_SINGLE_QUOTED_SCALAR_STYLE - } - } - if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { - if !emitter.scalar_data.single_quoted_allowed { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - } - if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { - if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - } - - if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { - emitter.tag_data.handle = []byte{'!'} - } - emitter.scalar_data.style = style - return true -} - -// Write an achor. -func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { - if emitter.anchor_data.anchor == nil { - return true - } - c := []byte{'&'} - if emitter.anchor_data.alias { - c[0] = '*' - } - if !yaml_emitter_write_indicator(emitter, c, true, false, false) { - return false - } - return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) -} - -// Write a tag. -func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { - if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { - return true - } - if len(emitter.tag_data.handle) > 0 { - if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { - return false - } - if len(emitter.tag_data.suffix) > 0 { - if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { - return false - } - } - } else { - // [Go] Allocate these slices elsewhere. - if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { - return false - } - if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { - return false - } - } - return true -} - -// Write a scalar. -func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { - switch emitter.scalar_data.style { - case yaml_PLAIN_SCALAR_STYLE: - return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) - - case yaml_SINGLE_QUOTED_SCALAR_STYLE: - return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) - - case yaml_DOUBLE_QUOTED_SCALAR_STYLE: - return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) - - case yaml_LITERAL_SCALAR_STYLE: - return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) - - case yaml_FOLDED_SCALAR_STYLE: - return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) - } - panic("unknown scalar style") -} - -// Check if a %YAML directive is valid. -func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { - if version_directive.major != 1 || version_directive.minor != 1 { - return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") - } - return true -} - -// Check if a %TAG directive is valid. -func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { - handle := tag_directive.handle - prefix := tag_directive.prefix - if len(handle) == 0 { - return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") - } - if handle[0] != '!' { - return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") - } - if handle[len(handle)-1] != '!' { - return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") - } - for i := 1; i < len(handle)-1; i += width(handle[i]) { - if !is_alpha(handle, i) { - return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") - } - } - if len(prefix) == 0 { - return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") - } - return true -} - -// Check if an anchor is valid. -func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { - if len(anchor) == 0 { - problem := "anchor value must not be empty" - if alias { - problem = "alias value must not be empty" - } - return yaml_emitter_set_emitter_error(emitter, problem) - } - for i := 0; i < len(anchor); i += width(anchor[i]) { - if !is_alpha(anchor, i) { - problem := "anchor value must contain alphanumerical characters only" - if alias { - problem = "alias value must contain alphanumerical characters only" - } - return yaml_emitter_set_emitter_error(emitter, problem) - } - } - emitter.anchor_data.anchor = anchor - emitter.anchor_data.alias = alias - return true -} - -// Check if a tag is valid. -func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { - if len(tag) == 0 { - return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") - } - for i := 0; i < len(emitter.tag_directives); i++ { - tag_directive := &emitter.tag_directives[i] - if bytes.HasPrefix(tag, tag_directive.prefix) { - emitter.tag_data.handle = tag_directive.handle - emitter.tag_data.suffix = tag[len(tag_directive.prefix):] - return true - } - } - emitter.tag_data.suffix = tag - return true -} - -// Check if a scalar is valid. -func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { - var ( - block_indicators = false - flow_indicators = false - line_breaks = false - special_characters = false - - leading_space = false - leading_break = false - trailing_space = false - trailing_break = false - break_space = false - space_break = false - - preceeded_by_whitespace = false - followed_by_whitespace = false - previous_space = false - previous_break = false - ) - - emitter.scalar_data.value = value - - if len(value) == 0 { - emitter.scalar_data.multiline = false - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = true - emitter.scalar_data.single_quoted_allowed = true - emitter.scalar_data.block_allowed = false - return true - } - - if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { - block_indicators = true - flow_indicators = true - } - - preceeded_by_whitespace = true - for i, w := 0, 0; i < len(value); i += w { - w = width(value[i]) - followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) - - if i == 0 { - switch value[i] { - case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': - flow_indicators = true - block_indicators = true - case '?', ':': - flow_indicators = true - if followed_by_whitespace { - block_indicators = true - } - case '-': - if followed_by_whitespace { - flow_indicators = true - block_indicators = true - } - } - } else { - switch value[i] { - case ',', '?', '[', ']', '{', '}': - flow_indicators = true - case ':': - flow_indicators = true - if followed_by_whitespace { - block_indicators = true - } - case '#': - if preceeded_by_whitespace { - flow_indicators = true - block_indicators = true - } - } - } - - if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { - special_characters = true - } - if is_space(value, i) { - if i == 0 { - leading_space = true - } - if i+width(value[i]) == len(value) { - trailing_space = true - } - if previous_break { - break_space = true - } - previous_space = true - previous_break = false - } else if is_break(value, i) { - line_breaks = true - if i == 0 { - leading_break = true - } - if i+width(value[i]) == len(value) { - trailing_break = true - } - if previous_space { - space_break = true - } - previous_space = false - previous_break = true - } else { - previous_space = false - previous_break = false - } - - // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. - preceeded_by_whitespace = is_blankz(value, i) - } - - emitter.scalar_data.multiline = line_breaks - emitter.scalar_data.flow_plain_allowed = true - emitter.scalar_data.block_plain_allowed = true - emitter.scalar_data.single_quoted_allowed = true - emitter.scalar_data.block_allowed = true - - if leading_space || leading_break || trailing_space || trailing_break { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - } - if trailing_space { - emitter.scalar_data.block_allowed = false - } - if break_space { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - emitter.scalar_data.single_quoted_allowed = false - } - if space_break || special_characters { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - emitter.scalar_data.single_quoted_allowed = false - emitter.scalar_data.block_allowed = false - } - if line_breaks { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - } - if flow_indicators { - emitter.scalar_data.flow_plain_allowed = false - } - if block_indicators { - emitter.scalar_data.block_plain_allowed = false - } - return true -} - -// Check if the event data is valid. -func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { - - emitter.anchor_data.anchor = nil - emitter.tag_data.handle = nil - emitter.tag_data.suffix = nil - emitter.scalar_data.value = nil - - switch event.typ { - case yaml_ALIAS_EVENT: - if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { - return false - } - - case yaml_SCALAR_EVENT: - if len(event.anchor) > 0 { - if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { - return false - } - } - if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { - if !yaml_emitter_analyze_tag(emitter, event.tag) { - return false - } - } - if !yaml_emitter_analyze_scalar(emitter, event.value) { - return false - } - - case yaml_SEQUENCE_START_EVENT: - if len(event.anchor) > 0 { - if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { - return false - } - } - if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { - if !yaml_emitter_analyze_tag(emitter, event.tag) { - return false - } - } - - case yaml_MAPPING_START_EVENT: - if len(event.anchor) > 0 { - if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { - return false - } - } - if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { - if !yaml_emitter_analyze_tag(emitter, event.tag) { - return false - } - } - } - return true -} - -// Write the BOM character. -func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { - if !flush(emitter) { - return false - } - pos := emitter.buffer_pos - emitter.buffer[pos+0] = '\xEF' - emitter.buffer[pos+1] = '\xBB' - emitter.buffer[pos+2] = '\xBF' - emitter.buffer_pos += 3 - return true -} - -func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { - indent := emitter.indent - if indent < 0 { - indent = 0 - } - if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { - if !put_break(emitter) { - return false - } - } - for emitter.column < indent { - if !put(emitter, ' ') { - return false - } - } - emitter.whitespace = true - emitter.indention = true - return true -} - -func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { - if need_whitespace && !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - if !write_all(emitter, indicator) { - return false - } - emitter.whitespace = is_whitespace - emitter.indention = (emitter.indention && is_indention) - emitter.open_ended = false - return true -} - -func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { - if !write_all(emitter, value) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { - if !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - if !write_all(emitter, value) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { - if need_whitespace && !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - for i := 0; i < len(value); { - var must_write bool - switch value[i] { - case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': - must_write = true - default: - must_write = is_alpha(value, i) - } - if must_write { - if !write(emitter, value, &i) { - return false - } - } else { - w := width(value[i]) - for k := 0; k < w; k++ { - octet := value[i] - i++ - if !put(emitter, '%') { - return false - } - - c := octet >> 4 - if c < 10 { - c += '0' - } else { - c += 'A' - 10 - } - if !put(emitter, c) { - return false - } - - c = octet & 0x0f - if c < 10 { - c += '0' - } else { - c += 'A' - 10 - } - if !put(emitter, c) { - return false - } - } - } - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { - if !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - - spaces := false - breaks := false - for i := 0; i < len(value); { - if is_space(value, i) { - if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { - if !yaml_emitter_write_indent(emitter) { - return false - } - i += width(value[i]) - } else { - if !write(emitter, value, &i) { - return false - } - } - spaces = true - } else if is_break(value, i) { - if !breaks && value[i] == '\n' { - if !put_break(emitter) { - return false - } - } - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !write(emitter, value, &i) { - return false - } - emitter.indention = false - spaces = false - breaks = false - } - } - - emitter.whitespace = false - emitter.indention = false - if emitter.root_context { - emitter.open_ended = true - } - - return true -} - -func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { - - if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { - return false - } - - spaces := false - breaks := false - for i := 0; i < len(value); { - if is_space(value, i) { - if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { - if !yaml_emitter_write_indent(emitter) { - return false - } - i += width(value[i]) - } else { - if !write(emitter, value, &i) { - return false - } - } - spaces = true - } else if is_break(value, i) { - if !breaks && value[i] == '\n' { - if !put_break(emitter) { - return false - } - } - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if value[i] == '\'' { - if !put(emitter, '\'') { - return false - } - } - if !write(emitter, value, &i) { - return false - } - emitter.indention = false - spaces = false - breaks = false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { - spaces := false - if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { - return false - } - - for i := 0; i < len(value); { - if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || - is_bom(value, i) || is_break(value, i) || - value[i] == '"' || value[i] == '\\' { - - octet := value[i] - - var w int - var v rune - switch { - case octet&0x80 == 0x00: - w, v = 1, rune(octet&0x7F) - case octet&0xE0 == 0xC0: - w, v = 2, rune(octet&0x1F) - case octet&0xF0 == 0xE0: - w, v = 3, rune(octet&0x0F) - case octet&0xF8 == 0xF0: - w, v = 4, rune(octet&0x07) - } - for k := 1; k < w; k++ { - octet = value[i+k] - v = (v << 6) + (rune(octet) & 0x3F) - } - i += w - - if !put(emitter, '\\') { - return false - } - - var ok bool - switch v { - case 0x00: - ok = put(emitter, '0') - case 0x07: - ok = put(emitter, 'a') - case 0x08: - ok = put(emitter, 'b') - case 0x09: - ok = put(emitter, 't') - case 0x0A: - ok = put(emitter, 'n') - case 0x0b: - ok = put(emitter, 'v') - case 0x0c: - ok = put(emitter, 'f') - case 0x0d: - ok = put(emitter, 'r') - case 0x1b: - ok = put(emitter, 'e') - case 0x22: - ok = put(emitter, '"') - case 0x5c: - ok = put(emitter, '\\') - case 0x85: - ok = put(emitter, 'N') - case 0xA0: - ok = put(emitter, '_') - case 0x2028: - ok = put(emitter, 'L') - case 0x2029: - ok = put(emitter, 'P') - default: - if v <= 0xFF { - ok = put(emitter, 'x') - w = 2 - } else if v <= 0xFFFF { - ok = put(emitter, 'u') - w = 4 - } else { - ok = put(emitter, 'U') - w = 8 - } - for k := (w - 1) * 4; ok && k >= 0; k -= 4 { - digit := byte((v >> uint(k)) & 0x0F) - if digit < 10 { - ok = put(emitter, digit+'0') - } else { - ok = put(emitter, digit+'A'-10) - } - } - } - if !ok { - return false - } - spaces = false - } else if is_space(value, i) { - if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { - if !yaml_emitter_write_indent(emitter) { - return false - } - if is_space(value, i+1) { - if !put(emitter, '\\') { - return false - } - } - i += width(value[i]) - } else if !write(emitter, value, &i) { - return false - } - spaces = true - } else { - if !write(emitter, value, &i) { - return false - } - spaces = false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { - if is_space(value, 0) || is_break(value, 0) { - indent_hint := []byte{'0' + byte(emitter.best_indent)} - if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { - return false - } - } - - emitter.open_ended = false - - var chomp_hint [1]byte - if len(value) == 0 { - chomp_hint[0] = '-' - } else { - i := len(value) - 1 - for value[i]&0xC0 == 0x80 { - i-- - } - if !is_break(value, i) { - chomp_hint[0] = '-' - } else if i == 0 { - chomp_hint[0] = '+' - emitter.open_ended = true - } else { - i-- - for value[i]&0xC0 == 0x80 { - i-- - } - if is_break(value, i) { - chomp_hint[0] = '+' - emitter.open_ended = true - } - } - } - if chomp_hint[0] != 0 { - if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { - return false - } - } - return true -} - -func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { - if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { - return false - } - if !yaml_emitter_write_block_scalar_hints(emitter, value) { - return false - } - if !put_break(emitter) { - return false - } - emitter.indention = true - emitter.whitespace = true - breaks := true - for i := 0; i < len(value); { - if is_break(value, i) { - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !write(emitter, value, &i) { - return false - } - emitter.indention = false - breaks = false - } - } - - return true -} - -func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { - if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { - return false - } - if !yaml_emitter_write_block_scalar_hints(emitter, value) { - return false - } - - if !put_break(emitter) { - return false - } - emitter.indention = true - emitter.whitespace = true - - breaks := true - leading_spaces := true - for i := 0; i < len(value); { - if is_break(value, i) { - if !breaks && !leading_spaces && value[i] == '\n' { - k := 0 - for is_break(value, k) { - k += width(value[k]) - } - if !is_blankz(value, k) { - if !put_break(emitter) { - return false - } - } - } - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - leading_spaces = is_blank(value, i) - } - if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - i += width(value[i]) - } else { - if !write(emitter, value, &i) { - return false - } - } - emitter.indention = false - breaks = false - } - } - return true -} diff --git a/vendor/gopkg.in/yaml.v2/encode.go b/vendor/gopkg.in/yaml.v2/encode.go deleted file mode 100644 index 84f8499..0000000 --- a/vendor/gopkg.in/yaml.v2/encode.go +++ /dev/null @@ -1,306 +0,0 @@ -package yaml - -import ( - "encoding" - "fmt" - "reflect" - "regexp" - "sort" - "strconv" - "strings" - "time" -) - -type encoder struct { - emitter yaml_emitter_t - event yaml_event_t - out []byte - flow bool -} - -func newEncoder() (e *encoder) { - e = &encoder{} - e.must(yaml_emitter_initialize(&e.emitter)) - yaml_emitter_set_output_string(&e.emitter, &e.out) - yaml_emitter_set_unicode(&e.emitter, true) - e.must(yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)) - e.emit() - e.must(yaml_document_start_event_initialize(&e.event, nil, nil, true)) - e.emit() - return e -} - -func (e *encoder) finish() { - e.must(yaml_document_end_event_initialize(&e.event, true)) - e.emit() - e.emitter.open_ended = false - e.must(yaml_stream_end_event_initialize(&e.event)) - e.emit() -} - -func (e *encoder) destroy() { - yaml_emitter_delete(&e.emitter) -} - -func (e *encoder) emit() { - // This will internally delete the e.event value. - if !yaml_emitter_emit(&e.emitter, &e.event) && e.event.typ != yaml_DOCUMENT_END_EVENT && e.event.typ != yaml_STREAM_END_EVENT { - e.must(false) - } -} - -func (e *encoder) must(ok bool) { - if !ok { - msg := e.emitter.problem - if msg == "" { - msg = "unknown problem generating YAML content" - } - failf("%s", msg) - } -} - -func (e *encoder) marshal(tag string, in reflect.Value) { - if !in.IsValid() { - e.nilv() - return - } - iface := in.Interface() - if m, ok := iface.(Marshaler); ok { - v, err := m.MarshalYAML() - if err != nil { - fail(err) - } - if v == nil { - e.nilv() - return - } - in = reflect.ValueOf(v) - } else if m, ok := iface.(encoding.TextMarshaler); ok { - text, err := m.MarshalText() - if err != nil { - fail(err) - } - in = reflect.ValueOf(string(text)) - } - switch in.Kind() { - case reflect.Interface: - if in.IsNil() { - e.nilv() - } else { - e.marshal(tag, in.Elem()) - } - case reflect.Map: - e.mapv(tag, in) - case reflect.Ptr: - if in.IsNil() { - e.nilv() - } else { - e.marshal(tag, in.Elem()) - } - case reflect.Struct: - e.structv(tag, in) - case reflect.Slice: - if in.Type().Elem() == mapItemType { - e.itemsv(tag, in) - } else { - e.slicev(tag, in) - } - case reflect.String: - e.stringv(tag, in) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - if in.Type() == durationType { - e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String())) - } else { - e.intv(tag, in) - } - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - e.uintv(tag, in) - case reflect.Float32, reflect.Float64: - e.floatv(tag, in) - case reflect.Bool: - e.boolv(tag, in) - default: - panic("cannot marshal type: " + in.Type().String()) - } -} - -func (e *encoder) mapv(tag string, in reflect.Value) { - e.mappingv(tag, func() { - keys := keyList(in.MapKeys()) - sort.Sort(keys) - for _, k := range keys { - e.marshal("", k) - e.marshal("", in.MapIndex(k)) - } - }) -} - -func (e *encoder) itemsv(tag string, in reflect.Value) { - e.mappingv(tag, func() { - slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem) - for _, item := range slice { - e.marshal("", reflect.ValueOf(item.Key)) - e.marshal("", reflect.ValueOf(item.Value)) - } - }) -} - -func (e *encoder) structv(tag string, in reflect.Value) { - sinfo, err := getStructInfo(in.Type()) - if err != nil { - panic(err) - } - e.mappingv(tag, func() { - for _, info := range sinfo.FieldsList { - var value reflect.Value - if info.Inline == nil { - value = in.Field(info.Num) - } else { - value = in.FieldByIndex(info.Inline) - } - if info.OmitEmpty && isZero(value) { - continue - } - e.marshal("", reflect.ValueOf(info.Key)) - e.flow = info.Flow - e.marshal("", value) - } - if sinfo.InlineMap >= 0 { - m := in.Field(sinfo.InlineMap) - if m.Len() > 0 { - e.flow = false - keys := keyList(m.MapKeys()) - sort.Sort(keys) - for _, k := range keys { - if _, found := sinfo.FieldsMap[k.String()]; found { - panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String())) - } - e.marshal("", k) - e.flow = false - e.marshal("", m.MapIndex(k)) - } - } - } - }) -} - -func (e *encoder) mappingv(tag string, f func()) { - implicit := tag == "" - style := yaml_BLOCK_MAPPING_STYLE - if e.flow { - e.flow = false - style = yaml_FLOW_MAPPING_STYLE - } - e.must(yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) - e.emit() - f() - e.must(yaml_mapping_end_event_initialize(&e.event)) - e.emit() -} - -func (e *encoder) slicev(tag string, in reflect.Value) { - implicit := tag == "" - style := yaml_BLOCK_SEQUENCE_STYLE - if e.flow { - e.flow = false - style = yaml_FLOW_SEQUENCE_STYLE - } - e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) - e.emit() - n := in.Len() - for i := 0; i < n; i++ { - e.marshal("", in.Index(i)) - } - e.must(yaml_sequence_end_event_initialize(&e.event)) - e.emit() -} - -// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. -// -// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported -// in YAML 1.2 and by this package, but these should be marshalled quoted for -// the time being for compatibility with other parsers. -func isBase60Float(s string) (result bool) { - // Fast path. - if s == "" { - return false - } - c := s[0] - if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { - return false - } - // Do the full match. - return base60float.MatchString(s) -} - -// From http://yaml.org/type/float.html, except the regular expression there -// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. -var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) - -func (e *encoder) stringv(tag string, in reflect.Value) { - var style yaml_scalar_style_t - s := in.String() - rtag, rs := resolve("", s) - if rtag == yaml_BINARY_TAG { - if tag == "" || tag == yaml_STR_TAG { - tag = rtag - s = rs.(string) - } else if tag == yaml_BINARY_TAG { - failf("explicitly tagged !!binary data must be base64-encoded") - } else { - failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) - } - } - if tag == "" && (rtag != yaml_STR_TAG || isBase60Float(s)) { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } else if strings.Contains(s, "\n") { - style = yaml_LITERAL_SCALAR_STYLE - } else { - style = yaml_PLAIN_SCALAR_STYLE - } - e.emitScalar(s, "", tag, style) -} - -func (e *encoder) boolv(tag string, in reflect.Value) { - var s string - if in.Bool() { - s = "true" - } else { - s = "false" - } - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) intv(tag string, in reflect.Value) { - s := strconv.FormatInt(in.Int(), 10) - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) uintv(tag string, in reflect.Value) { - s := strconv.FormatUint(in.Uint(), 10) - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) floatv(tag string, in reflect.Value) { - // FIXME: Handle 64 bits here. - s := strconv.FormatFloat(float64(in.Float()), 'g', -1, 32) - switch s { - case "+Inf": - s = ".inf" - case "-Inf": - s = "-.inf" - case "NaN": - s = ".nan" - } - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) nilv() { - e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) { - implicit := tag == "" - e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) - e.emit() -} diff --git a/vendor/gopkg.in/yaml.v2/parserc.go b/vendor/gopkg.in/yaml.v2/parserc.go deleted file mode 100644 index 0a7037a..0000000 --- a/vendor/gopkg.in/yaml.v2/parserc.go +++ /dev/null @@ -1,1096 +0,0 @@ -package yaml - -import ( - "bytes" -) - -// The parser implements the following grammar: -// -// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END -// implicit_document ::= block_node DOCUMENT-END* -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// block_node_or_indentless_sequence ::= -// ALIAS -// | properties (block_content | indentless_block_sequence)? -// | block_content -// | indentless_block_sequence -// block_node ::= ALIAS -// | properties block_content? -// | block_content -// flow_node ::= ALIAS -// | properties flow_content? -// | flow_content -// properties ::= TAG ANCHOR? | ANCHOR TAG? -// block_content ::= block_collection | flow_collection | SCALAR -// flow_content ::= flow_collection | SCALAR -// block_collection ::= block_sequence | block_mapping -// flow_collection ::= flow_sequence | flow_mapping -// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END -// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ -// block_mapping ::= BLOCK-MAPPING_START -// ((KEY block_node_or_indentless_sequence?)? -// (VALUE block_node_or_indentless_sequence?)?)* -// BLOCK-END -// flow_sequence ::= FLOW-SEQUENCE-START -// (flow_sequence_entry FLOW-ENTRY)* -// flow_sequence_entry? -// FLOW-SEQUENCE-END -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// flow_mapping ::= FLOW-MAPPING-START -// (flow_mapping_entry FLOW-ENTRY)* -// flow_mapping_entry? -// FLOW-MAPPING-END -// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? - -// Peek the next token in the token queue. -func peek_token(parser *yaml_parser_t) *yaml_token_t { - if parser.token_available || yaml_parser_fetch_more_tokens(parser) { - return &parser.tokens[parser.tokens_head] - } - return nil -} - -// Remove the next token from the queue (must be called after peek_token). -func skip_token(parser *yaml_parser_t) { - parser.token_available = false - parser.tokens_parsed++ - parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN - parser.tokens_head++ -} - -// Get the next event. -func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { - // Erase the event object. - *event = yaml_event_t{} - - // No events after the end of the stream or error. - if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { - return true - } - - // Generate the next event. - return yaml_parser_state_machine(parser, event) -} - -// Set parser error. -func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { - parser.error = yaml_PARSER_ERROR - parser.problem = problem - parser.problem_mark = problem_mark - return false -} - -func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { - parser.error = yaml_PARSER_ERROR - parser.context = context - parser.context_mark = context_mark - parser.problem = problem - parser.problem_mark = problem_mark - return false -} - -// State dispatcher. -func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { - //trace("yaml_parser_state_machine", "state:", parser.state.String()) - - switch parser.state { - case yaml_PARSE_STREAM_START_STATE: - return yaml_parser_parse_stream_start(parser, event) - - case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: - return yaml_parser_parse_document_start(parser, event, true) - - case yaml_PARSE_DOCUMENT_START_STATE: - return yaml_parser_parse_document_start(parser, event, false) - - case yaml_PARSE_DOCUMENT_CONTENT_STATE: - return yaml_parser_parse_document_content(parser, event) - - case yaml_PARSE_DOCUMENT_END_STATE: - return yaml_parser_parse_document_end(parser, event) - - case yaml_PARSE_BLOCK_NODE_STATE: - return yaml_parser_parse_node(parser, event, true, false) - - case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: - return yaml_parser_parse_node(parser, event, true, true) - - case yaml_PARSE_FLOW_NODE_STATE: - return yaml_parser_parse_node(parser, event, false, false) - - case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: - return yaml_parser_parse_block_sequence_entry(parser, event, true) - - case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: - return yaml_parser_parse_block_sequence_entry(parser, event, false) - - case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: - return yaml_parser_parse_indentless_sequence_entry(parser, event) - - case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: - return yaml_parser_parse_block_mapping_key(parser, event, true) - - case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: - return yaml_parser_parse_block_mapping_key(parser, event, false) - - case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: - return yaml_parser_parse_block_mapping_value(parser, event) - - case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: - return yaml_parser_parse_flow_sequence_entry(parser, event, true) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: - return yaml_parser_parse_flow_sequence_entry(parser, event, false) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: - return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: - return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: - return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) - - case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: - return yaml_parser_parse_flow_mapping_key(parser, event, true) - - case yaml_PARSE_FLOW_MAPPING_KEY_STATE: - return yaml_parser_parse_flow_mapping_key(parser, event, false) - - case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: - return yaml_parser_parse_flow_mapping_value(parser, event, false) - - case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: - return yaml_parser_parse_flow_mapping_value(parser, event, true) - - default: - panic("invalid parser state") - } - return false -} - -// Parse the production: -// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END -// ************ -func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_STREAM_START_TOKEN { - return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) - } - parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE - *event = yaml_event_t{ - typ: yaml_STREAM_START_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - encoding: token.encoding, - } - skip_token(parser) - return true -} - -// Parse the productions: -// implicit_document ::= block_node DOCUMENT-END* -// * -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// ************************* -func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { - - token := peek_token(parser) - if token == nil { - return false - } - - // Parse extra document end indicators. - if !implicit { - for token.typ == yaml_DOCUMENT_END_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - } - - if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && - token.typ != yaml_TAG_DIRECTIVE_TOKEN && - token.typ != yaml_DOCUMENT_START_TOKEN && - token.typ != yaml_STREAM_END_TOKEN { - // Parse an implicit document. - if !yaml_parser_process_directives(parser, nil, nil) { - return false - } - parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) - parser.state = yaml_PARSE_BLOCK_NODE_STATE - - *event = yaml_event_t{ - typ: yaml_DOCUMENT_START_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - - } else if token.typ != yaml_STREAM_END_TOKEN { - // Parse an explicit document. - var version_directive *yaml_version_directive_t - var tag_directives []yaml_tag_directive_t - start_mark := token.start_mark - if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { - return false - } - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_DOCUMENT_START_TOKEN { - yaml_parser_set_parser_error(parser, - "did not find expected ", token.start_mark) - return false - } - parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) - parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE - end_mark := token.end_mark - - *event = yaml_event_t{ - typ: yaml_DOCUMENT_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - version_directive: version_directive, - tag_directives: tag_directives, - implicit: false, - } - skip_token(parser) - - } else { - // Parse the stream end. - parser.state = yaml_PARSE_END_STATE - *event = yaml_event_t{ - typ: yaml_STREAM_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - skip_token(parser) - } - - return true -} - -// Parse the productions: -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// *********** -// -func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || - token.typ == yaml_TAG_DIRECTIVE_TOKEN || - token.typ == yaml_DOCUMENT_START_TOKEN || - token.typ == yaml_DOCUMENT_END_TOKEN || - token.typ == yaml_STREAM_END_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - return yaml_parser_process_empty_scalar(parser, event, - token.start_mark) - } - return yaml_parser_parse_node(parser, event, true, false) -} - -// Parse the productions: -// implicit_document ::= block_node DOCUMENT-END* -// ************* -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// -func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - - start_mark := token.start_mark - end_mark := token.start_mark - - implicit := true - if token.typ == yaml_DOCUMENT_END_TOKEN { - end_mark = token.end_mark - skip_token(parser) - implicit = false - } - - parser.tag_directives = parser.tag_directives[:0] - - parser.state = yaml_PARSE_DOCUMENT_START_STATE - *event = yaml_event_t{ - typ: yaml_DOCUMENT_END_EVENT, - start_mark: start_mark, - end_mark: end_mark, - implicit: implicit, - } - return true -} - -// Parse the productions: -// block_node_or_indentless_sequence ::= -// ALIAS -// ***** -// | properties (block_content | indentless_block_sequence)? -// ********** * -// | block_content | indentless_block_sequence -// * -// block_node ::= ALIAS -// ***** -// | properties block_content? -// ********** * -// | block_content -// * -// flow_node ::= ALIAS -// ***** -// | properties flow_content? -// ********** * -// | flow_content -// * -// properties ::= TAG ANCHOR? | ANCHOR TAG? -// ************************* -// block_content ::= block_collection | flow_collection | SCALAR -// ****** -// flow_content ::= flow_collection | SCALAR -// ****** -func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { - //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_ALIAS_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - *event = yaml_event_t{ - typ: yaml_ALIAS_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - anchor: token.value, - } - skip_token(parser) - return true - } - - start_mark := token.start_mark - end_mark := token.start_mark - - var tag_token bool - var tag_handle, tag_suffix, anchor []byte - var tag_mark yaml_mark_t - if token.typ == yaml_ANCHOR_TOKEN { - anchor = token.value - start_mark = token.start_mark - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_TAG_TOKEN { - tag_token = true - tag_handle = token.value - tag_suffix = token.suffix - tag_mark = token.start_mark - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - } else if token.typ == yaml_TAG_TOKEN { - tag_token = true - tag_handle = token.value - tag_suffix = token.suffix - start_mark = token.start_mark - tag_mark = token.start_mark - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_ANCHOR_TOKEN { - anchor = token.value - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - } - - var tag []byte - if tag_token { - if len(tag_handle) == 0 { - tag = tag_suffix - tag_suffix = nil - } else { - for i := range parser.tag_directives { - if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { - tag = append([]byte(nil), parser.tag_directives[i].prefix...) - tag = append(tag, tag_suffix...) - break - } - } - if len(tag) == 0 { - yaml_parser_set_parser_error_context(parser, - "while parsing a node", start_mark, - "found undefined tag handle", tag_mark) - return false - } - } - } - - implicit := len(tag) == 0 - if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), - } - return true - } - if token.typ == yaml_SCALAR_TOKEN { - var plain_implicit, quoted_implicit bool - end_mark = token.end_mark - if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { - plain_implicit = true - } else if len(tag) == 0 { - quoted_implicit = true - } - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - value: token.value, - implicit: plain_implicit, - quoted_implicit: quoted_implicit, - style: yaml_style_t(token.style), - } - skip_token(parser) - return true - } - if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { - // [Go] Some of the events below can be merged as they differ only on style. - end_mark = token.end_mark - parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), - } - return true - } - if token.typ == yaml_FLOW_MAPPING_START_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), - } - return true - } - if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), - } - return true - } - if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), - } - return true - } - if len(anchor) > 0 || len(tag) > 0 { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - quoted_implicit: false, - style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), - } - return true - } - - context := "while parsing a flow node" - if block { - context = "while parsing a block node" - } - yaml_parser_set_parser_error_context(parser, context, start_mark, - "did not find expected node content", token.start_mark) - return false -} - -// Parse the productions: -// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END -// ******************** *********** * ********* -// -func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_BLOCK_ENTRY_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) - return yaml_parser_parse_node(parser, event, true, false) - } else { - parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - } - if token.typ == yaml_BLOCK_END_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - - skip_token(parser) - return true - } - - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a block collection", context_mark, - "did not find expected '-' indicator", token.start_mark) -} - -// Parse the productions: -// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ -// *********** * -func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_BLOCK_ENTRY_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_BLOCK_ENTRY_TOKEN && - token.typ != yaml_KEY_TOKEN && - token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) - return yaml_parser_parse_node(parser, event, true, false) - } - parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - start_mark: token.start_mark, - end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? - } - return true -} - -// Parse the productions: -// block_mapping ::= BLOCK-MAPPING_START -// ******************* -// ((KEY block_node_or_indentless_sequence?)? -// *** * -// (VALUE block_node_or_indentless_sequence?)?)* -// -// BLOCK-END -// ********* -// -func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_KEY_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_KEY_TOKEN && - token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) - return yaml_parser_parse_node(parser, event, true, true) - } else { - parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - } else if token.typ == yaml_BLOCK_END_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - skip_token(parser) - return true - } - - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a block mapping", context_mark, - "did not find expected key", token.start_mark) -} - -// Parse the productions: -// block_mapping ::= BLOCK-MAPPING_START -// -// ((KEY block_node_or_indentless_sequence?)? -// -// (VALUE block_node_or_indentless_sequence?)?)* -// ***** * -// BLOCK-END -// -// -func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_VALUE_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_KEY_TOKEN && - token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) - return yaml_parser_parse_node(parser, event, true, true) - } - parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) -} - -// Parse the productions: -// flow_sequence ::= FLOW-SEQUENCE-START -// ******************* -// (flow_sequence_entry FLOW-ENTRY)* -// * ********** -// flow_sequence_entry? -// * -// FLOW-SEQUENCE-END -// ***************** -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * -// -func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - if !first { - if token.typ == yaml_FLOW_ENTRY_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } else { - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a flow sequence", context_mark, - "did not find expected ',' or ']'", token.start_mark) - } - } - - if token.typ == yaml_KEY_TOKEN { - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - implicit: true, - style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), - } - skip_token(parser) - return true - } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - - skip_token(parser) - return true -} - -// -// Parse the productions: -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// *** * -// -func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_FLOW_ENTRY_TOKEN && - token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - mark := token.end_mark - skip_token(parser) - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) -} - -// Parse the productions: -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// ***** * -// -func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_VALUE_TOKEN { - skip_token(parser) - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) -} - -// Parse the productions: -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * -// -func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - start_mark: token.start_mark, - end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? - } - return true -} - -// Parse the productions: -// flow_mapping ::= FLOW-MAPPING-START -// ****************** -// (flow_mapping_entry FLOW-ENTRY)* -// * ********** -// flow_mapping_entry? -// ****************** -// FLOW-MAPPING-END -// **************** -// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * *** * -// -func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ != yaml_FLOW_MAPPING_END_TOKEN { - if !first { - if token.typ == yaml_FLOW_ENTRY_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } else { - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a flow mapping", context_mark, - "did not find expected ',' or '}'", token.start_mark) - } - } - - if token.typ == yaml_KEY_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_FLOW_ENTRY_TOKEN && - token.typ != yaml_FLOW_MAPPING_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } else { - parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) - } - } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - skip_token(parser) - return true -} - -// Parse the productions: -// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * ***** * -// -func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { - token := peek_token(parser) - if token == nil { - return false - } - if empty { - parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) - } - if token.typ == yaml_VALUE_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) -} - -// Generate an empty scalar event. -func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - start_mark: mark, - end_mark: mark, - value: nil, // Empty - implicit: true, - style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), - } - return true -} - -var default_tag_directives = []yaml_tag_directive_t{ - {[]byte("!"), []byte("!")}, - {[]byte("!!"), []byte("tag:yaml.org,2002:")}, -} - -// Parse directives. -func yaml_parser_process_directives(parser *yaml_parser_t, - version_directive_ref **yaml_version_directive_t, - tag_directives_ref *[]yaml_tag_directive_t) bool { - - var version_directive *yaml_version_directive_t - var tag_directives []yaml_tag_directive_t - - token := peek_token(parser) - if token == nil { - return false - } - - for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { - if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { - if version_directive != nil { - yaml_parser_set_parser_error(parser, - "found duplicate %YAML directive", token.start_mark) - return false - } - if token.major != 1 || token.minor != 1 { - yaml_parser_set_parser_error(parser, - "found incompatible YAML document", token.start_mark) - return false - } - version_directive = &yaml_version_directive_t{ - major: token.major, - minor: token.minor, - } - } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { - value := yaml_tag_directive_t{ - handle: token.value, - prefix: token.prefix, - } - if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { - return false - } - tag_directives = append(tag_directives, value) - } - - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - - for i := range default_tag_directives { - if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { - return false - } - } - - if version_directive_ref != nil { - *version_directive_ref = version_directive - } - if tag_directives_ref != nil { - *tag_directives_ref = tag_directives - } - return true -} - -// Append a tag directive to the directives stack. -func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { - for i := range parser.tag_directives { - if bytes.Equal(value.handle, parser.tag_directives[i].handle) { - if allow_duplicates { - return true - } - return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) - } - } - - // [Go] I suspect the copy is unnecessary. This was likely done - // because there was no way to track ownership of the data. - value_copy := yaml_tag_directive_t{ - handle: make([]byte, len(value.handle)), - prefix: make([]byte, len(value.prefix)), - } - copy(value_copy.handle, value.handle) - copy(value_copy.prefix, value.prefix) - parser.tag_directives = append(parser.tag_directives, value_copy) - return true -} diff --git a/vendor/gopkg.in/yaml.v2/readerc.go b/vendor/gopkg.in/yaml.v2/readerc.go deleted file mode 100644 index f450791..0000000 --- a/vendor/gopkg.in/yaml.v2/readerc.go +++ /dev/null @@ -1,394 +0,0 @@ -package yaml - -import ( - "io" -) - -// Set the reader error and return 0. -func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { - parser.error = yaml_READER_ERROR - parser.problem = problem - parser.problem_offset = offset - parser.problem_value = value - return false -} - -// Byte order marks. -const ( - bom_UTF8 = "\xef\xbb\xbf" - bom_UTF16LE = "\xff\xfe" - bom_UTF16BE = "\xfe\xff" -) - -// Determine the input stream encoding by checking the BOM symbol. If no BOM is -// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. -func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { - // Ensure that we had enough bytes in the raw buffer. - for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { - if !yaml_parser_update_raw_buffer(parser) { - return false - } - } - - // Determine the encoding. - buf := parser.raw_buffer - pos := parser.raw_buffer_pos - avail := len(buf) - pos - if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { - parser.encoding = yaml_UTF16LE_ENCODING - parser.raw_buffer_pos += 2 - parser.offset += 2 - } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { - parser.encoding = yaml_UTF16BE_ENCODING - parser.raw_buffer_pos += 2 - parser.offset += 2 - } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { - parser.encoding = yaml_UTF8_ENCODING - parser.raw_buffer_pos += 3 - parser.offset += 3 - } else { - parser.encoding = yaml_UTF8_ENCODING - } - return true -} - -// Update the raw buffer. -func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { - size_read := 0 - - // Return if the raw buffer is full. - if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { - return true - } - - // Return on EOF. - if parser.eof { - return true - } - - // Move the remaining bytes in the raw buffer to the beginning. - if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { - copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) - } - parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] - parser.raw_buffer_pos = 0 - - // Call the read handler to fill the buffer. - size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) - parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] - if err == io.EOF { - parser.eof = true - } else if err != nil { - return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) - } - return true -} - -// Ensure that the buffer contains at least `length` characters. -// Return true on success, false on failure. -// -// The length is supposed to be significantly less that the buffer size. -func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { - if parser.read_handler == nil { - panic("read handler must be set") - } - - // If the EOF flag is set and the raw buffer is empty, do nothing. - if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { - return true - } - - // Return if the buffer contains enough characters. - if parser.unread >= length { - return true - } - - // Determine the input encoding if it is not known yet. - if parser.encoding == yaml_ANY_ENCODING { - if !yaml_parser_determine_encoding(parser) { - return false - } - } - - // Move the unread characters to the beginning of the buffer. - buffer_len := len(parser.buffer) - if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { - copy(parser.buffer, parser.buffer[parser.buffer_pos:]) - buffer_len -= parser.buffer_pos - parser.buffer_pos = 0 - } else if parser.buffer_pos == buffer_len { - buffer_len = 0 - parser.buffer_pos = 0 - } - - // Open the whole buffer for writing, and cut it before returning. - parser.buffer = parser.buffer[:cap(parser.buffer)] - - // Fill the buffer until it has enough characters. - first := true - for parser.unread < length { - - // Fill the raw buffer if necessary. - if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { - if !yaml_parser_update_raw_buffer(parser) { - parser.buffer = parser.buffer[:buffer_len] - return false - } - } - first = false - - // Decode the raw buffer. - inner: - for parser.raw_buffer_pos != len(parser.raw_buffer) { - var value rune - var width int - - raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos - - // Decode the next character. - switch parser.encoding { - case yaml_UTF8_ENCODING: - // Decode a UTF-8 character. Check RFC 3629 - // (http://www.ietf.org/rfc/rfc3629.txt) for more details. - // - // The following table (taken from the RFC) is used for - // decoding. - // - // Char. number range | UTF-8 octet sequence - // (hexadecimal) | (binary) - // --------------------+------------------------------------ - // 0000 0000-0000 007F | 0xxxxxxx - // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx - // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx - // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx - // - // Additionally, the characters in the range 0xD800-0xDFFF - // are prohibited as they are reserved for use with UTF-16 - // surrogate pairs. - - // Determine the length of the UTF-8 sequence. - octet := parser.raw_buffer[parser.raw_buffer_pos] - switch { - case octet&0x80 == 0x00: - width = 1 - case octet&0xE0 == 0xC0: - width = 2 - case octet&0xF0 == 0xE0: - width = 3 - case octet&0xF8 == 0xF0: - width = 4 - default: - // The leading octet is invalid. - return yaml_parser_set_reader_error(parser, - "invalid leading UTF-8 octet", - parser.offset, int(octet)) - } - - // Check if the raw buffer contains an incomplete character. - if width > raw_unread { - if parser.eof { - return yaml_parser_set_reader_error(parser, - "incomplete UTF-8 octet sequence", - parser.offset, -1) - } - break inner - } - - // Decode the leading octet. - switch { - case octet&0x80 == 0x00: - value = rune(octet & 0x7F) - case octet&0xE0 == 0xC0: - value = rune(octet & 0x1F) - case octet&0xF0 == 0xE0: - value = rune(octet & 0x0F) - case octet&0xF8 == 0xF0: - value = rune(octet & 0x07) - default: - value = 0 - } - - // Check and decode the trailing octets. - for k := 1; k < width; k++ { - octet = parser.raw_buffer[parser.raw_buffer_pos+k] - - // Check if the octet is valid. - if (octet & 0xC0) != 0x80 { - return yaml_parser_set_reader_error(parser, - "invalid trailing UTF-8 octet", - parser.offset+k, int(octet)) - } - - // Decode the octet. - value = (value << 6) + rune(octet&0x3F) - } - - // Check the length of the sequence against the value. - switch { - case width == 1: - case width == 2 && value >= 0x80: - case width == 3 && value >= 0x800: - case width == 4 && value >= 0x10000: - default: - return yaml_parser_set_reader_error(parser, - "invalid length of a UTF-8 sequence", - parser.offset, -1) - } - - // Check the range of the value. - if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { - return yaml_parser_set_reader_error(parser, - "invalid Unicode character", - parser.offset, int(value)) - } - - case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: - var low, high int - if parser.encoding == yaml_UTF16LE_ENCODING { - low, high = 0, 1 - } else { - low, high = 1, 0 - } - - // The UTF-16 encoding is not as simple as one might - // naively think. Check RFC 2781 - // (http://www.ietf.org/rfc/rfc2781.txt). - // - // Normally, two subsequent bytes describe a Unicode - // character. However a special technique (called a - // surrogate pair) is used for specifying character - // values larger than 0xFFFF. - // - // A surrogate pair consists of two pseudo-characters: - // high surrogate area (0xD800-0xDBFF) - // low surrogate area (0xDC00-0xDFFF) - // - // The following formulas are used for decoding - // and encoding characters using surrogate pairs: - // - // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) - // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) - // W1 = 110110yyyyyyyyyy - // W2 = 110111xxxxxxxxxx - // - // where U is the character value, W1 is the high surrogate - // area, W2 is the low surrogate area. - - // Check for incomplete UTF-16 character. - if raw_unread < 2 { - if parser.eof { - return yaml_parser_set_reader_error(parser, - "incomplete UTF-16 character", - parser.offset, -1) - } - break inner - } - - // Get the character. - value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + - (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) - - // Check for unexpected low surrogate area. - if value&0xFC00 == 0xDC00 { - return yaml_parser_set_reader_error(parser, - "unexpected low surrogate area", - parser.offset, int(value)) - } - - // Check for a high surrogate area. - if value&0xFC00 == 0xD800 { - width = 4 - - // Check for incomplete surrogate pair. - if raw_unread < 4 { - if parser.eof { - return yaml_parser_set_reader_error(parser, - "incomplete UTF-16 surrogate pair", - parser.offset, -1) - } - break inner - } - - // Get the next character. - value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + - (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) - - // Check for a low surrogate area. - if value2&0xFC00 != 0xDC00 { - return yaml_parser_set_reader_error(parser, - "expected low surrogate area", - parser.offset+2, int(value2)) - } - - // Generate the value of the surrogate pair. - value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) - } else { - width = 2 - } - - default: - panic("impossible") - } - - // Check if the character is in the allowed range: - // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) - // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) - // | [#x10000-#x10FFFF] (32 bit) - switch { - case value == 0x09: - case value == 0x0A: - case value == 0x0D: - case value >= 0x20 && value <= 0x7E: - case value == 0x85: - case value >= 0xA0 && value <= 0xD7FF: - case value >= 0xE000 && value <= 0xFFFD: - case value >= 0x10000 && value <= 0x10FFFF: - default: - return yaml_parser_set_reader_error(parser, - "control characters are not allowed", - parser.offset, int(value)) - } - - // Move the raw pointers. - parser.raw_buffer_pos += width - parser.offset += width - - // Finally put the character into the buffer. - if value <= 0x7F { - // 0000 0000-0000 007F . 0xxxxxxx - parser.buffer[buffer_len+0] = byte(value) - buffer_len += 1 - } else if value <= 0x7FF { - // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx - parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) - parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) - buffer_len += 2 - } else if value <= 0xFFFF { - // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx - parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) - parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) - parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) - buffer_len += 3 - } else { - // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx - parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) - parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) - parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) - parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) - buffer_len += 4 - } - - parser.unread++ - } - - // On EOF, put NUL into the buffer and return. - if parser.eof { - parser.buffer[buffer_len] = 0 - buffer_len++ - parser.unread++ - break - } - } - parser.buffer = parser.buffer[:buffer_len] - return true -} diff --git a/vendor/gopkg.in/yaml.v2/resolve.go b/vendor/gopkg.in/yaml.v2/resolve.go deleted file mode 100644 index 93a8632..0000000 --- a/vendor/gopkg.in/yaml.v2/resolve.go +++ /dev/null @@ -1,203 +0,0 @@ -package yaml - -import ( - "encoding/base64" - "math" - "strconv" - "strings" - "unicode/utf8" -) - -type resolveMapItem struct { - value interface{} - tag string -} - -var resolveTable = make([]byte, 256) -var resolveMap = make(map[string]resolveMapItem) - -func init() { - t := resolveTable - t[int('+')] = 'S' // Sign - t[int('-')] = 'S' - for _, c := range "0123456789" { - t[int(c)] = 'D' // Digit - } - for _, c := range "yYnNtTfFoO~" { - t[int(c)] = 'M' // In map - } - t[int('.')] = '.' // Float (potentially in map) - - var resolveMapList = []struct { - v interface{} - tag string - l []string - }{ - {true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}}, - {true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}}, - {true, yaml_BOOL_TAG, []string{"on", "On", "ON"}}, - {false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}}, - {false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}}, - {false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}}, - {nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}}, - {math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}}, - {math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}}, - {math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}}, - {math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}}, - {"<<", yaml_MERGE_TAG, []string{"<<"}}, - } - - m := resolveMap - for _, item := range resolveMapList { - for _, s := range item.l { - m[s] = resolveMapItem{item.v, item.tag} - } - } -} - -const longTagPrefix = "tag:yaml.org,2002:" - -func shortTag(tag string) string { - // TODO This can easily be made faster and produce less garbage. - if strings.HasPrefix(tag, longTagPrefix) { - return "!!" + tag[len(longTagPrefix):] - } - return tag -} - -func longTag(tag string) string { - if strings.HasPrefix(tag, "!!") { - return longTagPrefix + tag[2:] - } - return tag -} - -func resolvableTag(tag string) bool { - switch tag { - case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG: - return true - } - return false -} - -func resolve(tag string, in string) (rtag string, out interface{}) { - if !resolvableTag(tag) { - return tag, in - } - - defer func() { - switch tag { - case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG: - return - } - failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) - }() - - // Any data is accepted as a !!str or !!binary. - // Otherwise, the prefix is enough of a hint about what it might be. - hint := byte('N') - if in != "" { - hint = resolveTable[in[0]] - } - if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG { - // Handle things we can lookup in a map. - if item, ok := resolveMap[in]; ok { - return item.tag, item.value - } - - // Base 60 floats are a bad idea, were dropped in YAML 1.2, and - // are purposefully unsupported here. They're still quoted on - // the way out for compatibility with other parser, though. - - switch hint { - case 'M': - // We've already checked the map above. - - case '.': - // Not in the map, so maybe a normal float. - floatv, err := strconv.ParseFloat(in, 64) - if err == nil { - return yaml_FLOAT_TAG, floatv - } - - case 'D', 'S': - // Int, float, or timestamp. - plain := strings.Replace(in, "_", "", -1) - intv, err := strconv.ParseInt(plain, 0, 64) - if err == nil { - if intv == int64(int(intv)) { - return yaml_INT_TAG, int(intv) - } else { - return yaml_INT_TAG, intv - } - } - uintv, err := strconv.ParseUint(plain, 0, 64) - if err == nil { - return yaml_INT_TAG, uintv - } - floatv, err := strconv.ParseFloat(plain, 64) - if err == nil { - return yaml_FLOAT_TAG, floatv - } - if strings.HasPrefix(plain, "0b") { - intv, err := strconv.ParseInt(plain[2:], 2, 64) - if err == nil { - if intv == int64(int(intv)) { - return yaml_INT_TAG, int(intv) - } else { - return yaml_INT_TAG, intv - } - } - uintv, err := strconv.ParseUint(plain[2:], 2, 64) - if err == nil { - return yaml_INT_TAG, uintv - } - } else if strings.HasPrefix(plain, "-0b") { - intv, err := strconv.ParseInt(plain[3:], 2, 64) - if err == nil { - if intv == int64(int(intv)) { - return yaml_INT_TAG, -int(intv) - } else { - return yaml_INT_TAG, -intv - } - } - } - // XXX Handle timestamps here. - - default: - panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")") - } - } - if tag == yaml_BINARY_TAG { - return yaml_BINARY_TAG, in - } - if utf8.ValidString(in) { - return yaml_STR_TAG, in - } - return yaml_BINARY_TAG, encodeBase64(in) -} - -// encodeBase64 encodes s as base64 that is broken up into multiple lines -// as appropriate for the resulting length. -func encodeBase64(s string) string { - const lineLen = 70 - encLen := base64.StdEncoding.EncodedLen(len(s)) - lines := encLen/lineLen + 1 - buf := make([]byte, encLen*2+lines) - in := buf[0:encLen] - out := buf[encLen:] - base64.StdEncoding.Encode(in, []byte(s)) - k := 0 - for i := 0; i < len(in); i += lineLen { - j := i + lineLen - if j > len(in) { - j = len(in) - } - k += copy(out[k:], in[i:j]) - if lines > 1 { - out[k] = '\n' - k++ - } - } - return string(out[:k]) -} diff --git a/vendor/gopkg.in/yaml.v2/scannerc.go b/vendor/gopkg.in/yaml.v2/scannerc.go deleted file mode 100644 index 2580800..0000000 --- a/vendor/gopkg.in/yaml.v2/scannerc.go +++ /dev/null @@ -1,2710 +0,0 @@ -package yaml - -import ( - "bytes" - "fmt" -) - -// Introduction -// ************ -// -// The following notes assume that you are familiar with the YAML specification -// (http://yaml.org/spec/cvs/current.html). We mostly follow it, although in -// some cases we are less restrictive that it requires. -// -// The process of transforming a YAML stream into a sequence of events is -// divided on two steps: Scanning and Parsing. -// -// The Scanner transforms the input stream into a sequence of tokens, while the -// parser transform the sequence of tokens produced by the Scanner into a -// sequence of parsing events. -// -// The Scanner is rather clever and complicated. The Parser, on the contrary, -// is a straightforward implementation of a recursive-descendant parser (or, -// LL(1) parser, as it is usually called). -// -// Actually there are two issues of Scanning that might be called "clever", the -// rest is quite straightforward. The issues are "block collection start" and -// "simple keys". Both issues are explained below in details. -// -// Here the Scanning step is explained and implemented. We start with the list -// of all the tokens produced by the Scanner together with short descriptions. -// -// Now, tokens: -// -// STREAM-START(encoding) # The stream start. -// STREAM-END # The stream end. -// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. -// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. -// DOCUMENT-START # '---' -// DOCUMENT-END # '...' -// BLOCK-SEQUENCE-START # Indentation increase denoting a block -// BLOCK-MAPPING-START # sequence or a block mapping. -// BLOCK-END # Indentation decrease. -// FLOW-SEQUENCE-START # '[' -// FLOW-SEQUENCE-END # ']' -// BLOCK-SEQUENCE-START # '{' -// BLOCK-SEQUENCE-END # '}' -// BLOCK-ENTRY # '-' -// FLOW-ENTRY # ',' -// KEY # '?' or nothing (simple keys). -// VALUE # ':' -// ALIAS(anchor) # '*anchor' -// ANCHOR(anchor) # '&anchor' -// TAG(handle,suffix) # '!handle!suffix' -// SCALAR(value,style) # A scalar. -// -// The following two tokens are "virtual" tokens denoting the beginning and the -// end of the stream: -// -// STREAM-START(encoding) -// STREAM-END -// -// We pass the information about the input stream encoding with the -// STREAM-START token. -// -// The next two tokens are responsible for tags: -// -// VERSION-DIRECTIVE(major,minor) -// TAG-DIRECTIVE(handle,prefix) -// -// Example: -// -// %YAML 1.1 -// %TAG ! !foo -// %TAG !yaml! tag:yaml.org,2002: -// --- -// -// The correspoding sequence of tokens: -// -// STREAM-START(utf-8) -// VERSION-DIRECTIVE(1,1) -// TAG-DIRECTIVE("!","!foo") -// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") -// DOCUMENT-START -// STREAM-END -// -// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole -// line. -// -// The document start and end indicators are represented by: -// -// DOCUMENT-START -// DOCUMENT-END -// -// Note that if a YAML stream contains an implicit document (without '---' -// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be -// produced. -// -// In the following examples, we present whole documents together with the -// produced tokens. -// -// 1. An implicit document: -// -// 'a scalar' -// -// Tokens: -// -// STREAM-START(utf-8) -// SCALAR("a scalar",single-quoted) -// STREAM-END -// -// 2. An explicit document: -// -// --- -// 'a scalar' -// ... -// -// Tokens: -// -// STREAM-START(utf-8) -// DOCUMENT-START -// SCALAR("a scalar",single-quoted) -// DOCUMENT-END -// STREAM-END -// -// 3. Several documents in a stream: -// -// 'a scalar' -// --- -// 'another scalar' -// --- -// 'yet another scalar' -// -// Tokens: -// -// STREAM-START(utf-8) -// SCALAR("a scalar",single-quoted) -// DOCUMENT-START -// SCALAR("another scalar",single-quoted) -// DOCUMENT-START -// SCALAR("yet another scalar",single-quoted) -// STREAM-END -// -// We have already introduced the SCALAR token above. The following tokens are -// used to describe aliases, anchors, tag, and scalars: -// -// ALIAS(anchor) -// ANCHOR(anchor) -// TAG(handle,suffix) -// SCALAR(value,style) -// -// The following series of examples illustrate the usage of these tokens: -// -// 1. A recursive sequence: -// -// &A [ *A ] -// -// Tokens: -// -// STREAM-START(utf-8) -// ANCHOR("A") -// FLOW-SEQUENCE-START -// ALIAS("A") -// FLOW-SEQUENCE-END -// STREAM-END -// -// 2. A tagged scalar: -// -// !!float "3.14" # A good approximation. -// -// Tokens: -// -// STREAM-START(utf-8) -// TAG("!!","float") -// SCALAR("3.14",double-quoted) -// STREAM-END -// -// 3. Various scalar styles: -// -// --- # Implicit empty plain scalars do not produce tokens. -// --- a plain scalar -// --- 'a single-quoted scalar' -// --- "a double-quoted scalar" -// --- |- -// a literal scalar -// --- >- -// a folded -// scalar -// -// Tokens: -// -// STREAM-START(utf-8) -// DOCUMENT-START -// DOCUMENT-START -// SCALAR("a plain scalar",plain) -// DOCUMENT-START -// SCALAR("a single-quoted scalar",single-quoted) -// DOCUMENT-START -// SCALAR("a double-quoted scalar",double-quoted) -// DOCUMENT-START -// SCALAR("a literal scalar",literal) -// DOCUMENT-START -// SCALAR("a folded scalar",folded) -// STREAM-END -// -// Now it's time to review collection-related tokens. We will start with -// flow collections: -// -// FLOW-SEQUENCE-START -// FLOW-SEQUENCE-END -// FLOW-MAPPING-START -// FLOW-MAPPING-END -// FLOW-ENTRY -// KEY -// VALUE -// -// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and -// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' -// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the -// indicators '?' and ':', which are used for denoting mapping keys and values, -// are represented by the KEY and VALUE tokens. -// -// The following examples show flow collections: -// -// 1. A flow sequence: -// -// [item 1, item 2, item 3] -// -// Tokens: -// -// STREAM-START(utf-8) -// FLOW-SEQUENCE-START -// SCALAR("item 1",plain) -// FLOW-ENTRY -// SCALAR("item 2",plain) -// FLOW-ENTRY -// SCALAR("item 3",plain) -// FLOW-SEQUENCE-END -// STREAM-END -// -// 2. A flow mapping: -// -// { -// a simple key: a value, # Note that the KEY token is produced. -// ? a complex key: another value, -// } -// -// Tokens: -// -// STREAM-START(utf-8) -// FLOW-MAPPING-START -// KEY -// SCALAR("a simple key",plain) -// VALUE -// SCALAR("a value",plain) -// FLOW-ENTRY -// KEY -// SCALAR("a complex key",plain) -// VALUE -// SCALAR("another value",plain) -// FLOW-ENTRY -// FLOW-MAPPING-END -// STREAM-END -// -// A simple key is a key which is not denoted by the '?' indicator. Note that -// the Scanner still produce the KEY token whenever it encounters a simple key. -// -// For scanning block collections, the following tokens are used (note that we -// repeat KEY and VALUE here): -// -// BLOCK-SEQUENCE-START -// BLOCK-MAPPING-START -// BLOCK-END -// BLOCK-ENTRY -// KEY -// VALUE -// -// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation -// increase that precedes a block collection (cf. the INDENT token in Python). -// The token BLOCK-END denote indentation decrease that ends a block collection -// (cf. the DEDENT token in Python). However YAML has some syntax pecularities -// that makes detections of these tokens more complex. -// -// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators -// '-', '?', and ':' correspondingly. -// -// The following examples show how the tokens BLOCK-SEQUENCE-START, -// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: -// -// 1. Block sequences: -// -// - item 1 -// - item 2 -// - -// - item 3.1 -// - item 3.2 -// - -// key 1: value 1 -// key 2: value 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-ENTRY -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 3.1",plain) -// BLOCK-ENTRY -// SCALAR("item 3.2",plain) -// BLOCK-END -// BLOCK-ENTRY -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// 2. Block mappings: -// -// a simple key: a value # The KEY token is produced here. -// ? a complex key -// : another value -// a mapping: -// key 1: value 1 -// key 2: value 2 -// a sequence: -// - item 1 -// - item 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-MAPPING-START -// KEY -// SCALAR("a simple key",plain) -// VALUE -// SCALAR("a value",plain) -// KEY -// SCALAR("a complex key",plain) -// VALUE -// SCALAR("another value",plain) -// KEY -// SCALAR("a mapping",plain) -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// KEY -// SCALAR("a sequence",plain) -// VALUE -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// YAML does not always require to start a new block collection from a new -// line. If the current line contains only '-', '?', and ':' indicators, a new -// block collection may start at the current line. The following examples -// illustrate this case: -// -// 1. Collections in a sequence: -// -// - - item 1 -// - item 2 -// - key 1: value 1 -// key 2: value 2 -// - ? complex key -// : complex value -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// BLOCK-ENTRY -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// BLOCK-ENTRY -// BLOCK-MAPPING-START -// KEY -// SCALAR("complex key") -// VALUE -// SCALAR("complex value") -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// 2. Collections in a mapping: -// -// ? a sequence -// : - item 1 -// - item 2 -// ? a mapping -// : key 1: value 1 -// key 2: value 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-MAPPING-START -// KEY -// SCALAR("a sequence",plain) -// VALUE -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// KEY -// SCALAR("a mapping",plain) -// VALUE -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// YAML also permits non-indented sequences if they are included into a block -// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: -// -// key: -// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. -// - item 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-MAPPING-START -// KEY -// SCALAR("key",plain) -// VALUE -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// - -// Ensure that the buffer contains the required number of characters. -// Return true on success, false on failure (reader error or memory error). -func cache(parser *yaml_parser_t, length int) bool { - // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) - return parser.unread >= length || yaml_parser_update_buffer(parser, length) -} - -// Advance the buffer pointer. -func skip(parser *yaml_parser_t) { - parser.mark.index++ - parser.mark.column++ - parser.unread-- - parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) -} - -func skip_line(parser *yaml_parser_t) { - if is_crlf(parser.buffer, parser.buffer_pos) { - parser.mark.index += 2 - parser.mark.column = 0 - parser.mark.line++ - parser.unread -= 2 - parser.buffer_pos += 2 - } else if is_break(parser.buffer, parser.buffer_pos) { - parser.mark.index++ - parser.mark.column = 0 - parser.mark.line++ - parser.unread-- - parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) - } -} - -// Copy a character to a string buffer and advance pointers. -func read(parser *yaml_parser_t, s []byte) []byte { - w := width(parser.buffer[parser.buffer_pos]) - if w == 0 { - panic("invalid character sequence") - } - if len(s) == 0 { - s = make([]byte, 0, 32) - } - if w == 1 && len(s)+w <= cap(s) { - s = s[:len(s)+1] - s[len(s)-1] = parser.buffer[parser.buffer_pos] - parser.buffer_pos++ - } else { - s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) - parser.buffer_pos += w - } - parser.mark.index++ - parser.mark.column++ - parser.unread-- - return s -} - -// Copy a line break character to a string buffer and advance pointers. -func read_line(parser *yaml_parser_t, s []byte) []byte { - buf := parser.buffer - pos := parser.buffer_pos - switch { - case buf[pos] == '\r' && buf[pos+1] == '\n': - // CR LF . LF - s = append(s, '\n') - parser.buffer_pos += 2 - parser.mark.index++ - parser.unread-- - case buf[pos] == '\r' || buf[pos] == '\n': - // CR|LF . LF - s = append(s, '\n') - parser.buffer_pos += 1 - case buf[pos] == '\xC2' && buf[pos+1] == '\x85': - // NEL . LF - s = append(s, '\n') - parser.buffer_pos += 2 - case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): - // LS|PS . LS|PS - s = append(s, buf[parser.buffer_pos:pos+3]...) - parser.buffer_pos += 3 - default: - return s - } - parser.mark.index++ - parser.mark.column = 0 - parser.mark.line++ - parser.unread-- - return s -} - -// Get the next token. -func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { - // Erase the token object. - *token = yaml_token_t{} // [Go] Is this necessary? - - // No tokens after STREAM-END or error. - if parser.stream_end_produced || parser.error != yaml_NO_ERROR { - return true - } - - // Ensure that the tokens queue contains enough tokens. - if !parser.token_available { - if !yaml_parser_fetch_more_tokens(parser) { - return false - } - } - - // Fetch the next token from the queue. - *token = parser.tokens[parser.tokens_head] - parser.tokens_head++ - parser.tokens_parsed++ - parser.token_available = false - - if token.typ == yaml_STREAM_END_TOKEN { - parser.stream_end_produced = true - } - return true -} - -// Set the scanner error and return false. -func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { - parser.error = yaml_SCANNER_ERROR - parser.context = context - parser.context_mark = context_mark - parser.problem = problem - parser.problem_mark = parser.mark - return false -} - -func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { - context := "while parsing a tag" - if directive { - context = "while parsing a %TAG directive" - } - return yaml_parser_set_scanner_error(parser, context, context_mark, "did not find URI escaped octet") -} - -func trace(args ...interface{}) func() { - pargs := append([]interface{}{"+++"}, args...) - fmt.Println(pargs...) - pargs = append([]interface{}{"---"}, args...) - return func() { fmt.Println(pargs...) } -} - -// Ensure that the tokens queue contains at least one token which can be -// returned to the Parser. -func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { - // While we need more tokens to fetch, do it. - for { - // Check if we really need to fetch more tokens. - need_more_tokens := false - - if parser.tokens_head == len(parser.tokens) { - // Queue is empty. - need_more_tokens = true - } else { - // Check if any potential simple key may occupy the head position. - if !yaml_parser_stale_simple_keys(parser) { - return false - } - - for i := range parser.simple_keys { - simple_key := &parser.simple_keys[i] - if simple_key.possible && simple_key.token_number == parser.tokens_parsed { - need_more_tokens = true - break - } - } - } - - // We are finished. - if !need_more_tokens { - break - } - // Fetch the next token. - if !yaml_parser_fetch_next_token(parser) { - return false - } - } - - parser.token_available = true - return true -} - -// The dispatcher for token fetchers. -func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool { - // Ensure that the buffer is initialized. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - // Check if we just started scanning. Fetch STREAM-START then. - if !parser.stream_start_produced { - return yaml_parser_fetch_stream_start(parser) - } - - // Eat whitespaces and comments until we reach the next token. - if !yaml_parser_scan_to_next_token(parser) { - return false - } - - // Remove obsolete potential simple keys. - if !yaml_parser_stale_simple_keys(parser) { - return false - } - - // Check the indentation level against the current column. - if !yaml_parser_unroll_indent(parser, parser.mark.column) { - return false - } - - // Ensure that the buffer contains at least 4 characters. 4 is the length - // of the longest indicators ('--- ' and '... '). - if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { - return false - } - - // Is it the end of the stream? - if is_z(parser.buffer, parser.buffer_pos) { - return yaml_parser_fetch_stream_end(parser) - } - - // Is it a directive? - if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { - return yaml_parser_fetch_directive(parser) - } - - buf := parser.buffer - pos := parser.buffer_pos - - // Is it the document start indicator? - if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { - return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) - } - - // Is it the document end indicator? - if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { - return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) - } - - // Is it the flow sequence start indicator? - if buf[pos] == '[' { - return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) - } - - // Is it the flow mapping start indicator? - if parser.buffer[parser.buffer_pos] == '{' { - return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) - } - - // Is it the flow sequence end indicator? - if parser.buffer[parser.buffer_pos] == ']' { - return yaml_parser_fetch_flow_collection_end(parser, - yaml_FLOW_SEQUENCE_END_TOKEN) - } - - // Is it the flow mapping end indicator? - if parser.buffer[parser.buffer_pos] == '}' { - return yaml_parser_fetch_flow_collection_end(parser, - yaml_FLOW_MAPPING_END_TOKEN) - } - - // Is it the flow entry indicator? - if parser.buffer[parser.buffer_pos] == ',' { - return yaml_parser_fetch_flow_entry(parser) - } - - // Is it the block entry indicator? - if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { - return yaml_parser_fetch_block_entry(parser) - } - - // Is it the key indicator? - if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { - return yaml_parser_fetch_key(parser) - } - - // Is it the value indicator? - if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { - return yaml_parser_fetch_value(parser) - } - - // Is it an alias? - if parser.buffer[parser.buffer_pos] == '*' { - return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) - } - - // Is it an anchor? - if parser.buffer[parser.buffer_pos] == '&' { - return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) - } - - // Is it a tag? - if parser.buffer[parser.buffer_pos] == '!' { - return yaml_parser_fetch_tag(parser) - } - - // Is it a literal scalar? - if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { - return yaml_parser_fetch_block_scalar(parser, true) - } - - // Is it a folded scalar? - if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { - return yaml_parser_fetch_block_scalar(parser, false) - } - - // Is it a single-quoted scalar? - if parser.buffer[parser.buffer_pos] == '\'' { - return yaml_parser_fetch_flow_scalar(parser, true) - } - - // Is it a double-quoted scalar? - if parser.buffer[parser.buffer_pos] == '"' { - return yaml_parser_fetch_flow_scalar(parser, false) - } - - // Is it a plain scalar? - // - // A plain scalar may start with any non-blank characters except - // - // '-', '?', ':', ',', '[', ']', '{', '}', - // '#', '&', '*', '!', '|', '>', '\'', '\"', - // '%', '@', '`'. - // - // In the block context (and, for the '-' indicator, in the flow context - // too), it may also start with the characters - // - // '-', '?', ':' - // - // if it is followed by a non-space character. - // - // The last rule is more restrictive than the specification requires. - // [Go] Make this logic more reasonable. - //switch parser.buffer[parser.buffer_pos] { - //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': - //} - if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || - parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || - parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || - parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || - parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || - parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || - parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || - parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || - parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || - parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || - (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || - (parser.flow_level == 0 && - (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && - !is_blankz(parser.buffer, parser.buffer_pos+1)) { - return yaml_parser_fetch_plain_scalar(parser) - } - - // If we don't determine the token type so far, it is an error. - return yaml_parser_set_scanner_error(parser, - "while scanning for the next token", parser.mark, - "found character that cannot start any token") -} - -// Check the list of potential simple keys and remove the positions that -// cannot contain simple keys anymore. -func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool { - // Check for a potential simple key for each flow level. - for i := range parser.simple_keys { - simple_key := &parser.simple_keys[i] - - // The specification requires that a simple key - // - // - is limited to a single line, - // - is shorter than 1024 characters. - if simple_key.possible && (simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index) { - - // Check if the potential simple key to be removed is required. - if simple_key.required { - return yaml_parser_set_scanner_error(parser, - "while scanning a simple key", simple_key.mark, - "could not find expected ':'") - } - simple_key.possible = false - } - } - return true -} - -// Check if a simple key may start at the current position and add it if -// needed. -func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { - // A simple key is required at the current position if the scanner is in - // the block context and the current column coincides with the indentation - // level. - - required := parser.flow_level == 0 && parser.indent == parser.mark.column - - // A simple key is required only when it is the first token in the current - // line. Therefore it is always allowed. But we add a check anyway. - if required && !parser.simple_key_allowed { - panic("should not happen") - } - - // - // If the current position may start a simple key, save it. - // - if parser.simple_key_allowed { - simple_key := yaml_simple_key_t{ - possible: true, - required: required, - token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), - } - simple_key.mark = parser.mark - - if !yaml_parser_remove_simple_key(parser) { - return false - } - parser.simple_keys[len(parser.simple_keys)-1] = simple_key - } - return true -} - -// Remove a potential simple key at the current flow level. -func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { - i := len(parser.simple_keys) - 1 - if parser.simple_keys[i].possible { - // If the key is required, it is an error. - if parser.simple_keys[i].required { - return yaml_parser_set_scanner_error(parser, - "while scanning a simple key", parser.simple_keys[i].mark, - "could not find expected ':'") - } - } - // Remove the key from the stack. - parser.simple_keys[i].possible = false - return true -} - -// Increase the flow level and resize the simple key list if needed. -func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { - // Reset the simple key on the next level. - parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) - - // Increase the flow level. - parser.flow_level++ - return true -} - -// Decrease the flow level. -func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { - if parser.flow_level > 0 { - parser.flow_level-- - parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1] - } - return true -} - -// Push the current indentation level to the stack and set the new level -// the current column is greater than the indentation level. In this case, -// append or insert the specified token into the token queue. -func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { - // In the flow context, do nothing. - if parser.flow_level > 0 { - return true - } - - if parser.indent < column { - // Push the current indentation level to the stack and set the new - // indentation level. - parser.indents = append(parser.indents, parser.indent) - parser.indent = column - - // Create a token and insert it into the queue. - token := yaml_token_t{ - typ: typ, - start_mark: mark, - end_mark: mark, - } - if number > -1 { - number -= parser.tokens_parsed - } - yaml_insert_token(parser, number, &token) - } - return true -} - -// Pop indentation levels from the indents stack until the current level -// becomes less or equal to the column. For each indentation level, append -// the BLOCK-END token. -func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool { - // In the flow context, do nothing. - if parser.flow_level > 0 { - return true - } - - // Loop through the indentation levels in the stack. - for parser.indent > column { - // Create a token and append it to the queue. - token := yaml_token_t{ - typ: yaml_BLOCK_END_TOKEN, - start_mark: parser.mark, - end_mark: parser.mark, - } - yaml_insert_token(parser, -1, &token) - - // Pop the indentation level. - parser.indent = parser.indents[len(parser.indents)-1] - parser.indents = parser.indents[:len(parser.indents)-1] - } - return true -} - -// Initialize the scanner and produce the STREAM-START token. -func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { - - // Set the initial indentation. - parser.indent = -1 - - // Initialize the simple key stack. - parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) - - // A simple key is allowed at the beginning of the stream. - parser.simple_key_allowed = true - - // We have started. - parser.stream_start_produced = true - - // Create the STREAM-START token and append it to the queue. - token := yaml_token_t{ - typ: yaml_STREAM_START_TOKEN, - start_mark: parser.mark, - end_mark: parser.mark, - encoding: parser.encoding, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the STREAM-END token and shut down the scanner. -func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { - - // Force new line. - if parser.mark.column != 0 { - parser.mark.column = 0 - parser.mark.line++ - } - - // Reset the indentation level. - if !yaml_parser_unroll_indent(parser, -1) { - return false - } - - // Reset simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - parser.simple_key_allowed = false - - // Create the STREAM-END token and append it to the queue. - token := yaml_token_t{ - typ: yaml_STREAM_END_TOKEN, - start_mark: parser.mark, - end_mark: parser.mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. -func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { - // Reset the indentation level. - if !yaml_parser_unroll_indent(parser, -1) { - return false - } - - // Reset simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - parser.simple_key_allowed = false - - // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. - token := yaml_token_t{} - if !yaml_parser_scan_directive(parser, &token) { - return false - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the DOCUMENT-START or DOCUMENT-END token. -func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // Reset the indentation level. - if !yaml_parser_unroll_indent(parser, -1) { - return false - } - - // Reset simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - parser.simple_key_allowed = false - - // Consume the token. - start_mark := parser.mark - - skip(parser) - skip(parser) - skip(parser) - - end_mark := parser.mark - - // Create the DOCUMENT-START or DOCUMENT-END token. - token := yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. -func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // The indicators '[' and '{' may start a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // Increase the flow level. - if !yaml_parser_increase_flow_level(parser) { - return false - } - - // A simple key may follow the indicators '[' and '{'. - parser.simple_key_allowed = true - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. - token := yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. -func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // Reset any potential simple key on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Decrease the flow level. - if !yaml_parser_decrease_flow_level(parser) { - return false - } - - // No simple keys after the indicators ']' and '}'. - parser.simple_key_allowed = false - - // Consume the token. - - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. - token := yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the FLOW-ENTRY token. -func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { - // Reset any potential simple keys on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Simple keys are allowed after ','. - parser.simple_key_allowed = true - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the FLOW-ENTRY token and append it to the queue. - token := yaml_token_t{ - typ: yaml_FLOW_ENTRY_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the BLOCK-ENTRY token. -func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { - // Check if the scanner is in the block context. - if parser.flow_level == 0 { - // Check if we are allowed to start a new entry. - if !parser.simple_key_allowed { - return yaml_parser_set_scanner_error(parser, "", parser.mark, - "block sequence entries are not allowed in this context") - } - // Add the BLOCK-SEQUENCE-START token if needed. - if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { - return false - } - } else { - // It is an error for the '-' indicator to occur in the flow context, - // but we let the Parser detect and report about it because the Parser - // is able to point to the context. - } - - // Reset any potential simple keys on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Simple keys are allowed after '-'. - parser.simple_key_allowed = true - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the BLOCK-ENTRY token and append it to the queue. - token := yaml_token_t{ - typ: yaml_BLOCK_ENTRY_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the KEY token. -func yaml_parser_fetch_key(parser *yaml_parser_t) bool { - - // In the block context, additional checks are required. - if parser.flow_level == 0 { - // Check if we are allowed to start a new key (not nessesary simple). - if !parser.simple_key_allowed { - return yaml_parser_set_scanner_error(parser, "", parser.mark, - "mapping keys are not allowed in this context") - } - // Add the BLOCK-MAPPING-START token if needed. - if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { - return false - } - } - - // Reset any potential simple keys on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Simple keys are allowed after '?' in the block context. - parser.simple_key_allowed = parser.flow_level == 0 - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the KEY token and append it to the queue. - token := yaml_token_t{ - typ: yaml_KEY_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the VALUE token. -func yaml_parser_fetch_value(parser *yaml_parser_t) bool { - - simple_key := &parser.simple_keys[len(parser.simple_keys)-1] - - // Have we found a simple key? - if simple_key.possible { - // Create the KEY token and insert it into the queue. - token := yaml_token_t{ - typ: yaml_KEY_TOKEN, - start_mark: simple_key.mark, - end_mark: simple_key.mark, - } - yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) - - // In the block context, we may need to add the BLOCK-MAPPING-START token. - if !yaml_parser_roll_indent(parser, simple_key.mark.column, - simple_key.token_number, - yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { - return false - } - - // Remove the simple key. - simple_key.possible = false - - // A simple key cannot follow another simple key. - parser.simple_key_allowed = false - - } else { - // The ':' indicator follows a complex key. - - // In the block context, extra checks are required. - if parser.flow_level == 0 { - - // Check if we are allowed to start a complex value. - if !parser.simple_key_allowed { - return yaml_parser_set_scanner_error(parser, "", parser.mark, - "mapping values are not allowed in this context") - } - - // Add the BLOCK-MAPPING-START token if needed. - if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { - return false - } - } - - // Simple keys after ':' are allowed in the block context. - parser.simple_key_allowed = parser.flow_level == 0 - } - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the VALUE token and append it to the queue. - token := yaml_token_t{ - typ: yaml_VALUE_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the ALIAS or ANCHOR token. -func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // An anchor or an alias could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow an anchor or an alias. - parser.simple_key_allowed = false - - // Create the ALIAS or ANCHOR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_anchor(parser, &token, typ) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the TAG token. -func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { - // A tag could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow a tag. - parser.simple_key_allowed = false - - // Create the TAG token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_tag(parser, &token) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. -func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { - // Remove any potential simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // A simple key may follow a block scalar. - parser.simple_key_allowed = true - - // Create the SCALAR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_block_scalar(parser, &token, literal) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. -func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { - // A plain scalar could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow a flow scalar. - parser.simple_key_allowed = false - - // Create the SCALAR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_flow_scalar(parser, &token, single) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the SCALAR(...,plain) token. -func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { - // A plain scalar could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow a flow scalar. - parser.simple_key_allowed = false - - // Create the SCALAR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_plain_scalar(parser, &token) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Eat whitespaces and comments until the next token is found. -func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { - - // Until the next token is not found. - for { - // Allow the BOM mark to start a line. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { - skip(parser) - } - - // Eat whitespaces. - // Tabs are allowed: - // - in the flow context - // - in the block context, but not at the beginning of the line or - // after '-', '?', or ':' (complex value). - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Eat a comment until a line break. - if parser.buffer[parser.buffer_pos] == '#' { - for !is_breakz(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - } - - // If it is a line break, eat it. - if is_break(parser.buffer, parser.buffer_pos) { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - skip_line(parser) - - // In the block context, a new line may start a simple key. - if parser.flow_level == 0 { - parser.simple_key_allowed = true - } - } else { - break // We have found a token. - } - } - - return true -} - -// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -// -func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { - // Eat '%'. - start_mark := parser.mark - skip(parser) - - // Scan the directive name. - var name []byte - if !yaml_parser_scan_directive_name(parser, start_mark, &name) { - return false - } - - // Is it a YAML directive? - if bytes.Equal(name, []byte("YAML")) { - // Scan the VERSION directive value. - var major, minor int8 - if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { - return false - } - end_mark := parser.mark - - // Create a VERSION-DIRECTIVE token. - *token = yaml_token_t{ - typ: yaml_VERSION_DIRECTIVE_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - major: major, - minor: minor, - } - - // Is it a TAG directive? - } else if bytes.Equal(name, []byte("TAG")) { - // Scan the TAG directive value. - var handle, prefix []byte - if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { - return false - } - end_mark := parser.mark - - // Create a TAG-DIRECTIVE token. - *token = yaml_token_t{ - typ: yaml_TAG_DIRECTIVE_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: handle, - prefix: prefix, - } - - // Unknown directive. - } else { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "found unknown directive name") - return false - } - - // Eat the rest of the line including any comments. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - if parser.buffer[parser.buffer_pos] == '#' { - for !is_breakz(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - } - - // Check if we are at the end of the line. - if !is_breakz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "did not find expected comment or line break") - return false - } - - // Eat a line break. - if is_break(parser.buffer, parser.buffer_pos) { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - skip_line(parser) - } - - return true -} - -// Scan the directive name. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^^^^ -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^ -// -func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { - // Consume the directive name. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - var s []byte - for is_alpha(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check if the name is empty. - if len(s) == 0 { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "could not find expected directive name") - return false - } - - // Check for an blank character after the name. - if !is_blankz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "found unexpected non-alphabetical character") - return false - } - *name = s - return true -} - -// Scan the value of VERSION-DIRECTIVE. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^^^^^^ -func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { - // Eat whitespaces. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Consume the major version number. - if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { - return false - } - - // Eat '.'. - if parser.buffer[parser.buffer_pos] != '.' { - return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", - start_mark, "did not find expected digit or '.' character") - } - - skip(parser) - - // Consume the minor version number. - if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { - return false - } - return true -} - -const max_number_length = 2 - -// Scan the version number of VERSION-DIRECTIVE. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^ -// %YAML 1.1 # a comment \n -// ^ -func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { - - // Repeat while the next character is digit. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - var value, length int8 - for is_digit(parser.buffer, parser.buffer_pos) { - // Check if the number is too long. - length++ - if length > max_number_length { - return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", - start_mark, "found extremely long version number") - } - value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check if the number was present. - if length == 0 { - return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", - start_mark, "did not find expected version number") - } - *number = value - return true -} - -// Scan the value of a TAG-DIRECTIVE token. -// -// Scope: -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -// -func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { - var handle_value, prefix_value []byte - - // Eat whitespaces. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Scan a handle. - if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { - return false - } - - // Expect a whitespace. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if !is_blank(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", - start_mark, "did not find expected whitespace") - return false - } - - // Eat whitespaces. - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Scan a prefix. - if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { - return false - } - - // Expect a whitespace or line break. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if !is_blankz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", - start_mark, "did not find expected whitespace or line break") - return false - } - - *handle = handle_value - *prefix = prefix_value - return true -} - -func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { - var s []byte - - // Eat the indicator character. - start_mark := parser.mark - skip(parser) - - // Consume the value. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_alpha(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - end_mark := parser.mark - - /* - * Check if length of the anchor is greater than 0 and it is followed by - * a whitespace character or one of the indicators: - * - * '?', ':', ',', ']', '}', '%', '@', '`'. - */ - - if len(s) == 0 || - !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || - parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || - parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || - parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || - parser.buffer[parser.buffer_pos] == '`') { - context := "while scanning an alias" - if typ == yaml_ANCHOR_TOKEN { - context = "while scanning an anchor" - } - yaml_parser_set_scanner_error(parser, context, start_mark, - "did not find expected alphabetic or numeric character") - return false - } - - // Create a token. - *token = yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - value: s, - } - - return true -} - -/* - * Scan a TAG token. - */ - -func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { - var handle, suffix []byte - - start_mark := parser.mark - - // Check if the tag is in the canonical form. - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - if parser.buffer[parser.buffer_pos+1] == '<' { - // Keep the handle as '' - - // Eat '!<' - skip(parser) - skip(parser) - - // Consume the tag value. - if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { - return false - } - - // Check for '>' and eat it. - if parser.buffer[parser.buffer_pos] != '>' { - yaml_parser_set_scanner_error(parser, "while scanning a tag", - start_mark, "did not find the expected '>'") - return false - } - - skip(parser) - } else { - // The tag has either the '!suffix' or the '!handle!suffix' form. - - // First, try to scan a handle. - if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { - return false - } - - // Check if it is, indeed, handle. - if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { - // Scan the suffix now. - if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { - return false - } - } else { - // It wasn't a handle after all. Scan the rest of the tag. - if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { - return false - } - - // Set the handle to '!'. - handle = []byte{'!'} - - // A special case: the '!' tag. Set the handle to '' and the - // suffix to '!'. - if len(suffix) == 0 { - handle, suffix = suffix, handle - } - } - } - - // Check the character which ends the tag. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if !is_blankz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a tag", - start_mark, "did not find expected whitespace or line break") - return false - } - - end_mark := parser.mark - - // Create a token. - *token = yaml_token_t{ - typ: yaml_TAG_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: handle, - suffix: suffix, - } - return true -} - -// Scan a tag handle. -func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { - // Check the initial '!' character. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if parser.buffer[parser.buffer_pos] != '!' { - yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find expected '!'") - return false - } - - var s []byte - - // Copy the '!' character. - s = read(parser, s) - - // Copy all subsequent alphabetical and numerical characters. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for is_alpha(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check if the trailing character is '!' and copy it. - if parser.buffer[parser.buffer_pos] == '!' { - s = read(parser, s) - } else { - // It's either the '!' tag or not really a tag handle. If it's a %TAG - // directive, it's an error. If it's a tag token, it must be a part of URI. - if directive && !(s[0] == '!' && s[1] == 0) { - yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find expected '!'") - return false - } - } - - *handle = s - return true -} - -// Scan a tag. -func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { - //size_t length = head ? strlen((char *)head) : 0 - var s []byte - - // Copy the head if needed. - // - // Note that we don't copy the leading '!' character. - if len(head) > 1 { - s = append(s, head[1:]...) - } - - // Scan the tag. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - // The set of characters that may appear in URI is as follows: - // - // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', - // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', - // '%'. - // [Go] Convert this into more reasonable logic. - for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || - parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || - parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || - parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || - parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || - parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || - parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || - parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || - parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || - parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || - parser.buffer[parser.buffer_pos] == '%' { - // Check if it is a URI-escape sequence. - if parser.buffer[parser.buffer_pos] == '%' { - if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { - return false - } - } else { - s = read(parser, s) - } - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check if the tag is non-empty. - if len(s) == 0 { - yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find expected tag URI") - return false - } - *uri = s - return true -} - -// Decode an URI-escape sequence corresponding to a single UTF-8 character. -func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { - - // Decode the required number of characters. - w := 1024 - for w > 0 { - // Check for a URI-escaped octet. - if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { - return false - } - - if !(parser.buffer[parser.buffer_pos] == '%' && - is_hex(parser.buffer, parser.buffer_pos+1) && - is_hex(parser.buffer, parser.buffer_pos+2)) { - return yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find URI escaped octet") - } - - // Get the octet. - octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) - - // If it is the leading octet, determine the length of the UTF-8 sequence. - if w == 1024 { - w = width(octet) - if w == 0 { - return yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "found an incorrect leading UTF-8 octet") - } - } else { - // Check if the trailing octet is correct. - if octet&0xC0 != 0x80 { - return yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "found an incorrect trailing UTF-8 octet") - } - } - - // Copy the octet and move the pointers. - *s = append(*s, octet) - skip(parser) - skip(parser) - skip(parser) - w-- - } - return true -} - -// Scan a block scalar. -func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { - // Eat the indicator '|' or '>'. - start_mark := parser.mark - skip(parser) - - // Scan the additional block scalar indicators. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - // Check for a chomping indicator. - var chomping, increment int - if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { - // Set the chomping method and eat the indicator. - if parser.buffer[parser.buffer_pos] == '+' { - chomping = +1 - } else { - chomping = -1 - } - skip(parser) - - // Check for an indentation indicator. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if is_digit(parser.buffer, parser.buffer_pos) { - // Check that the indentation is greater than 0. - if parser.buffer[parser.buffer_pos] == '0' { - yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found an indentation indicator equal to 0") - return false - } - - // Get the indentation level and eat the indicator. - increment = as_digit(parser.buffer, parser.buffer_pos) - skip(parser) - } - - } else if is_digit(parser.buffer, parser.buffer_pos) { - // Do the same as above, but in the opposite order. - - if parser.buffer[parser.buffer_pos] == '0' { - yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found an indentation indicator equal to 0") - return false - } - increment = as_digit(parser.buffer, parser.buffer_pos) - skip(parser) - - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { - if parser.buffer[parser.buffer_pos] == '+' { - chomping = +1 - } else { - chomping = -1 - } - skip(parser) - } - } - - // Eat whitespaces and comments to the end of the line. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - if parser.buffer[parser.buffer_pos] == '#' { - for !is_breakz(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - } - - // Check if we are at the end of the line. - if !is_breakz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "did not find expected comment or line break") - return false - } - - // Eat a line break. - if is_break(parser.buffer, parser.buffer_pos) { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - skip_line(parser) - } - - end_mark := parser.mark - - // Set the indentation level if it was specified. - var indent int - if increment > 0 { - if parser.indent >= 0 { - indent = parser.indent + increment - } else { - indent = increment - } - } - - // Scan the leading line breaks and determine the indentation level if needed. - var s, leading_break, trailing_breaks []byte - if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { - return false - } - - // Scan the block scalar content. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - var leading_blank, trailing_blank bool - for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { - // We are at the beginning of a non-empty line. - - // Is it a trailing whitespace? - trailing_blank = is_blank(parser.buffer, parser.buffer_pos) - - // Check if we need to fold the leading line break. - if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { - // Do we need to join the lines by space? - if len(trailing_breaks) == 0 { - s = append(s, ' ') - } - } else { - s = append(s, leading_break...) - } - leading_break = leading_break[:0] - - // Append the remaining line breaks. - s = append(s, trailing_breaks...) - trailing_breaks = trailing_breaks[:0] - - // Is it a leading whitespace? - leading_blank = is_blank(parser.buffer, parser.buffer_pos) - - // Consume the current line. - for !is_breakz(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Consume the line break. - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - leading_break = read_line(parser, leading_break) - - // Eat the following indentation spaces and line breaks. - if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { - return false - } - } - - // Chomp the tail. - if chomping != -1 { - s = append(s, leading_break...) - } - if chomping == 1 { - s = append(s, trailing_breaks...) - } - - // Create a token. - *token = yaml_token_t{ - typ: yaml_SCALAR_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: s, - style: yaml_LITERAL_SCALAR_STYLE, - } - if !literal { - token.style = yaml_FOLDED_SCALAR_STYLE - } - return true -} - -// Scan indentation spaces and line breaks for a block scalar. Determine the -// indentation level if needed. -func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { - *end_mark = parser.mark - - // Eat the indentation spaces and line breaks. - max_indent := 0 - for { - // Eat the indentation spaces. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - if parser.mark.column > max_indent { - max_indent = parser.mark.column - } - - // Check for a tab character messing the indentation. - if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { - return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found a tab character where an indentation space is expected") - } - - // Have we found a non-empty line? - if !is_break(parser.buffer, parser.buffer_pos) { - break - } - - // Consume the line break. - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - // [Go] Should really be returning breaks instead. - *breaks = read_line(parser, *breaks) - *end_mark = parser.mark - } - - // Determine the indentation level if needed. - if *indent == 0 { - *indent = max_indent - if *indent < parser.indent+1 { - *indent = parser.indent + 1 - } - if *indent < 1 { - *indent = 1 - } - } - return true -} - -// Scan a quoted scalar. -func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { - // Eat the left quote. - start_mark := parser.mark - skip(parser) - - // Consume the content of the quoted scalar. - var s, leading_break, trailing_breaks, whitespaces []byte - for { - // Check that there are no document indicators at the beginning of the line. - if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { - return false - } - - if parser.mark.column == 0 && - ((parser.buffer[parser.buffer_pos+0] == '-' && - parser.buffer[parser.buffer_pos+1] == '-' && - parser.buffer[parser.buffer_pos+2] == '-') || - (parser.buffer[parser.buffer_pos+0] == '.' && - parser.buffer[parser.buffer_pos+1] == '.' && - parser.buffer[parser.buffer_pos+2] == '.')) && - is_blankz(parser.buffer, parser.buffer_pos+3) { - yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", - start_mark, "found unexpected document indicator") - return false - } - - // Check for EOF. - if is_z(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", - start_mark, "found unexpected end of stream") - return false - } - - // Consume non-blank characters. - leading_blanks := false - for !is_blankz(parser.buffer, parser.buffer_pos) { - if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { - // Is is an escaped single quote. - s = append(s, '\'') - skip(parser) - skip(parser) - - } else if single && parser.buffer[parser.buffer_pos] == '\'' { - // It is a right single quote. - break - } else if !single && parser.buffer[parser.buffer_pos] == '"' { - // It is a right double quote. - break - - } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { - // It is an escaped line break. - if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { - return false - } - skip(parser) - skip_line(parser) - leading_blanks = true - break - - } else if !single && parser.buffer[parser.buffer_pos] == '\\' { - // It is an escape sequence. - code_length := 0 - - // Check the escape character. - switch parser.buffer[parser.buffer_pos+1] { - case '0': - s = append(s, 0) - case 'a': - s = append(s, '\x07') - case 'b': - s = append(s, '\x08') - case 't', '\t': - s = append(s, '\x09') - case 'n': - s = append(s, '\x0A') - case 'v': - s = append(s, '\x0B') - case 'f': - s = append(s, '\x0C') - case 'r': - s = append(s, '\x0D') - case 'e': - s = append(s, '\x1B') - case ' ': - s = append(s, '\x20') - case '"': - s = append(s, '"') - case '\'': - s = append(s, '\'') - case '\\': - s = append(s, '\\') - case 'N': // NEL (#x85) - s = append(s, '\xC2') - s = append(s, '\x85') - case '_': // #xA0 - s = append(s, '\xC2') - s = append(s, '\xA0') - case 'L': // LS (#x2028) - s = append(s, '\xE2') - s = append(s, '\x80') - s = append(s, '\xA8') - case 'P': // PS (#x2029) - s = append(s, '\xE2') - s = append(s, '\x80') - s = append(s, '\xA9') - case 'x': - code_length = 2 - case 'u': - code_length = 4 - case 'U': - code_length = 8 - default: - yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", - start_mark, "found unknown escape character") - return false - } - - skip(parser) - skip(parser) - - // Consume an arbitrary escape code. - if code_length > 0 { - var value int - - // Scan the character value. - if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { - return false - } - for k := 0; k < code_length; k++ { - if !is_hex(parser.buffer, parser.buffer_pos+k) { - yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", - start_mark, "did not find expected hexdecimal number") - return false - } - value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) - } - - // Check the value and write the character. - if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { - yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", - start_mark, "found invalid Unicode character escape code") - return false - } - if value <= 0x7F { - s = append(s, byte(value)) - } else if value <= 0x7FF { - s = append(s, byte(0xC0+(value>>6))) - s = append(s, byte(0x80+(value&0x3F))) - } else if value <= 0xFFFF { - s = append(s, byte(0xE0+(value>>12))) - s = append(s, byte(0x80+((value>>6)&0x3F))) - s = append(s, byte(0x80+(value&0x3F))) - } else { - s = append(s, byte(0xF0+(value>>18))) - s = append(s, byte(0x80+((value>>12)&0x3F))) - s = append(s, byte(0x80+((value>>6)&0x3F))) - s = append(s, byte(0x80+(value&0x3F))) - } - - // Advance the pointer. - for k := 0; k < code_length; k++ { - skip(parser) - } - } - } else { - // It is a non-escaped non-blank character. - s = read(parser, s) - } - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - } - - // Check if we are at the end of the scalar. - if single { - if parser.buffer[parser.buffer_pos] == '\'' { - break - } - } else { - if parser.buffer[parser.buffer_pos] == '"' { - break - } - } - - // Consume blank characters. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { - if is_blank(parser.buffer, parser.buffer_pos) { - // Consume a space or a tab character. - if !leading_blanks { - whitespaces = read(parser, whitespaces) - } else { - skip(parser) - } - } else { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - // Check if it is a first line break. - if !leading_blanks { - whitespaces = whitespaces[:0] - leading_break = read_line(parser, leading_break) - leading_blanks = true - } else { - trailing_breaks = read_line(parser, trailing_breaks) - } - } - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Join the whitespaces or fold line breaks. - if leading_blanks { - // Do we need to fold line breaks? - if len(leading_break) > 0 && leading_break[0] == '\n' { - if len(trailing_breaks) == 0 { - s = append(s, ' ') - } else { - s = append(s, trailing_breaks...) - } - } else { - s = append(s, leading_break...) - s = append(s, trailing_breaks...) - } - trailing_breaks = trailing_breaks[:0] - leading_break = leading_break[:0] - } else { - s = append(s, whitespaces...) - whitespaces = whitespaces[:0] - } - } - - // Eat the right quote. - skip(parser) - end_mark := parser.mark - - // Create a token. - *token = yaml_token_t{ - typ: yaml_SCALAR_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: s, - style: yaml_SINGLE_QUOTED_SCALAR_STYLE, - } - if !single { - token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - return true -} - -// Scan a plain scalar. -func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { - - var s, leading_break, trailing_breaks, whitespaces []byte - var leading_blanks bool - var indent = parser.indent + 1 - - start_mark := parser.mark - end_mark := parser.mark - - // Consume the content of the plain scalar. - for { - // Check for a document indicator. - if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { - return false - } - if parser.mark.column == 0 && - ((parser.buffer[parser.buffer_pos+0] == '-' && - parser.buffer[parser.buffer_pos+1] == '-' && - parser.buffer[parser.buffer_pos+2] == '-') || - (parser.buffer[parser.buffer_pos+0] == '.' && - parser.buffer[parser.buffer_pos+1] == '.' && - parser.buffer[parser.buffer_pos+2] == '.')) && - is_blankz(parser.buffer, parser.buffer_pos+3) { - break - } - - // Check for a comment. - if parser.buffer[parser.buffer_pos] == '#' { - break - } - - // Consume non-blank characters. - for !is_blankz(parser.buffer, parser.buffer_pos) { - - // Check for 'x:x' in the flow context. TODO: Fix the test "spec-08-13". - if parser.flow_level > 0 && - parser.buffer[parser.buffer_pos] == ':' && - !is_blankz(parser.buffer, parser.buffer_pos+1) { - yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", - start_mark, "found unexpected ':'") - return false - } - - // Check for indicators that may end a plain scalar. - if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || - (parser.flow_level > 0 && - (parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == ':' || - parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || - parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || - parser.buffer[parser.buffer_pos] == '}')) { - break - } - - // Check if we need to join whitespaces and breaks. - if leading_blanks || len(whitespaces) > 0 { - if leading_blanks { - // Do we need to fold line breaks? - if leading_break[0] == '\n' { - if len(trailing_breaks) == 0 { - s = append(s, ' ') - } else { - s = append(s, trailing_breaks...) - } - } else { - s = append(s, leading_break...) - s = append(s, trailing_breaks...) - } - trailing_breaks = trailing_breaks[:0] - leading_break = leading_break[:0] - leading_blanks = false - } else { - s = append(s, whitespaces...) - whitespaces = whitespaces[:0] - } - } - - // Copy the character. - s = read(parser, s) - - end_mark = parser.mark - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - } - - // Is it the end? - if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { - break - } - - // Consume blank characters. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { - if is_blank(parser.buffer, parser.buffer_pos) { - - // Check for tab character that abuse indentation. - if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", - start_mark, "found a tab character that violate indentation") - return false - } - - // Consume a space or a tab character. - if !leading_blanks { - whitespaces = read(parser, whitespaces) - } else { - skip(parser) - } - } else { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - // Check if it is a first line break. - if !leading_blanks { - whitespaces = whitespaces[:0] - leading_break = read_line(parser, leading_break) - leading_blanks = true - } else { - trailing_breaks = read_line(parser, trailing_breaks) - } - } - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check indentation level. - if parser.flow_level == 0 && parser.mark.column < indent { - break - } - } - - // Create a token. - *token = yaml_token_t{ - typ: yaml_SCALAR_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: s, - style: yaml_PLAIN_SCALAR_STYLE, - } - - // Note that we change the 'simple_key_allowed' flag. - if leading_blanks { - parser.simple_key_allowed = true - } - return true -} diff --git a/vendor/gopkg.in/yaml.v2/sorter.go b/vendor/gopkg.in/yaml.v2/sorter.go deleted file mode 100644 index 5958822..0000000 --- a/vendor/gopkg.in/yaml.v2/sorter.go +++ /dev/null @@ -1,104 +0,0 @@ -package yaml - -import ( - "reflect" - "unicode" -) - -type keyList []reflect.Value - -func (l keyList) Len() int { return len(l) } -func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } -func (l keyList) Less(i, j int) bool { - a := l[i] - b := l[j] - ak := a.Kind() - bk := b.Kind() - for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() { - a = a.Elem() - ak = a.Kind() - } - for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() { - b = b.Elem() - bk = b.Kind() - } - af, aok := keyFloat(a) - bf, bok := keyFloat(b) - if aok && bok { - if af != bf { - return af < bf - } - if ak != bk { - return ak < bk - } - return numLess(a, b) - } - if ak != reflect.String || bk != reflect.String { - return ak < bk - } - ar, br := []rune(a.String()), []rune(b.String()) - for i := 0; i < len(ar) && i < len(br); i++ { - if ar[i] == br[i] { - continue - } - al := unicode.IsLetter(ar[i]) - bl := unicode.IsLetter(br[i]) - if al && bl { - return ar[i] < br[i] - } - if al || bl { - return bl - } - var ai, bi int - var an, bn int64 - for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { - an = an*10 + int64(ar[ai]-'0') - } - for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { - bn = bn*10 + int64(br[bi]-'0') - } - if an != bn { - return an < bn - } - if ai != bi { - return ai < bi - } - return ar[i] < br[i] - } - return len(ar) < len(br) -} - -// keyFloat returns a float value for v if it is a number/bool -// and whether it is a number/bool or not. -func keyFloat(v reflect.Value) (f float64, ok bool) { - switch v.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return float64(v.Int()), true - case reflect.Float32, reflect.Float64: - return v.Float(), true - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return float64(v.Uint()), true - case reflect.Bool: - if v.Bool() { - return 1, true - } - return 0, true - } - return 0, false -} - -// numLess returns whether a < b. -// a and b must necessarily have the same kind. -func numLess(a, b reflect.Value) bool { - switch a.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return a.Int() < b.Int() - case reflect.Float32, reflect.Float64: - return a.Float() < b.Float() - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return a.Uint() < b.Uint() - case reflect.Bool: - return !a.Bool() && b.Bool() - } - panic("not a number") -} diff --git a/vendor/gopkg.in/yaml.v2/writerc.go b/vendor/gopkg.in/yaml.v2/writerc.go deleted file mode 100644 index 190362f..0000000 --- a/vendor/gopkg.in/yaml.v2/writerc.go +++ /dev/null @@ -1,89 +0,0 @@ -package yaml - -// Set the writer error and return false. -func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { - emitter.error = yaml_WRITER_ERROR - emitter.problem = problem - return false -} - -// Flush the output buffer. -func yaml_emitter_flush(emitter *yaml_emitter_t) bool { - if emitter.write_handler == nil { - panic("write handler not set") - } - - // Check if the buffer is empty. - if emitter.buffer_pos == 0 { - return true - } - - // If the output encoding is UTF-8, we don't need to recode the buffer. - if emitter.encoding == yaml_UTF8_ENCODING { - if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { - return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) - } - emitter.buffer_pos = 0 - return true - } - - // Recode the buffer into the raw buffer. - var low, high int - if emitter.encoding == yaml_UTF16LE_ENCODING { - low, high = 0, 1 - } else { - high, low = 1, 0 - } - - pos := 0 - for pos < emitter.buffer_pos { - // See the "reader.c" code for more details on UTF-8 encoding. Note - // that we assume that the buffer contains a valid UTF-8 sequence. - - // Read the next UTF-8 character. - octet := emitter.buffer[pos] - - var w int - var value rune - switch { - case octet&0x80 == 0x00: - w, value = 1, rune(octet&0x7F) - case octet&0xE0 == 0xC0: - w, value = 2, rune(octet&0x1F) - case octet&0xF0 == 0xE0: - w, value = 3, rune(octet&0x0F) - case octet&0xF8 == 0xF0: - w, value = 4, rune(octet&0x07) - } - for k := 1; k < w; k++ { - octet = emitter.buffer[pos+k] - value = (value << 6) + (rune(octet) & 0x3F) - } - pos += w - - // Write the character. - if value < 0x10000 { - var b [2]byte - b[high] = byte(value >> 8) - b[low] = byte(value & 0xFF) - emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1]) - } else { - // Write the character using a surrogate pair (check "reader.c"). - var b [4]byte - value -= 0x10000 - b[high] = byte(0xD8 + (value >> 18)) - b[low] = byte((value >> 10) & 0xFF) - b[high+2] = byte(0xDC + ((value >> 8) & 0xFF)) - b[low+2] = byte(value & 0xFF) - emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1], b[2], b[3]) - } - } - - // Write the raw buffer. - if err := emitter.write_handler(emitter, emitter.raw_buffer); err != nil { - return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) - } - emitter.buffer_pos = 0 - emitter.raw_buffer = emitter.raw_buffer[:0] - return true -} diff --git a/vendor/gopkg.in/yaml.v2/yaml.go b/vendor/gopkg.in/yaml.v2/yaml.go deleted file mode 100644 index 36d6b88..0000000 --- a/vendor/gopkg.in/yaml.v2/yaml.go +++ /dev/null @@ -1,346 +0,0 @@ -// Package yaml implements YAML support for the Go language. -// -// Source code and other details for the project are available at GitHub: -// -// https://github.com/go-yaml/yaml -// -package yaml - -import ( - "errors" - "fmt" - "reflect" - "strings" - "sync" -) - -// MapSlice encodes and decodes as a YAML map. -// The order of keys is preserved when encoding and decoding. -type MapSlice []MapItem - -// MapItem is an item in a MapSlice. -type MapItem struct { - Key, Value interface{} -} - -// The Unmarshaler interface may be implemented by types to customize their -// behavior when being unmarshaled from a YAML document. The UnmarshalYAML -// method receives a function that may be called to unmarshal the original -// YAML value into a field or variable. It is safe to call the unmarshal -// function parameter more than once if necessary. -type Unmarshaler interface { - UnmarshalYAML(unmarshal func(interface{}) error) error -} - -// The Marshaler interface may be implemented by types to customize their -// behavior when being marshaled into a YAML document. The returned value -// is marshaled in place of the original value implementing Marshaler. -// -// If an error is returned by MarshalYAML, the marshaling procedure stops -// and returns with the provided error. -type Marshaler interface { - MarshalYAML() (interface{}, error) -} - -// Unmarshal decodes the first document found within the in byte slice -// and assigns decoded values into the out value. -// -// Maps and pointers (to a struct, string, int, etc) are accepted as out -// values. If an internal pointer within a struct is not initialized, -// the yaml package will initialize it if necessary for unmarshalling -// the provided data. The out parameter must not be nil. -// -// The type of the decoded values should be compatible with the respective -// values in out. If one or more values cannot be decoded due to a type -// mismatches, decoding continues partially until the end of the YAML -// content, and a *yaml.TypeError is returned with details for all -// missed values. -// -// Struct fields are only unmarshalled if they are exported (have an -// upper case first letter), and are unmarshalled using the field name -// lowercased as the default key. Custom keys may be defined via the -// "yaml" name in the field tag: the content preceding the first comma -// is used as the key, and the following comma-separated options are -// used to tweak the marshalling process (see Marshal). -// Conflicting names result in a runtime error. -// -// For example: -// -// type T struct { -// F int `yaml:"a,omitempty"` -// B int -// } -// var t T -// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) -// -// See the documentation of Marshal for the format of tags and a list of -// supported tag options. -// -func Unmarshal(in []byte, out interface{}) (err error) { - defer handleErr(&err) - d := newDecoder() - p := newParser(in) - defer p.destroy() - node := p.parse() - if node != nil { - v := reflect.ValueOf(out) - if v.Kind() == reflect.Ptr && !v.IsNil() { - v = v.Elem() - } - d.unmarshal(node, v) - } - if len(d.terrors) > 0 { - return &TypeError{d.terrors} - } - return nil -} - -// Marshal serializes the value provided into a YAML document. The structure -// of the generated document will reflect the structure of the value itself. -// Maps and pointers (to struct, string, int, etc) are accepted as the in value. -// -// Struct fields are only unmarshalled if they are exported (have an upper case -// first letter), and are unmarshalled using the field name lowercased as the -// default key. Custom keys may be defined via the "yaml" name in the field -// tag: the content preceding the first comma is used as the key, and the -// following comma-separated options are used to tweak the marshalling process. -// Conflicting names result in a runtime error. -// -// The field tag format accepted is: -// -// `(...) yaml:"[][,[,]]" (...)` -// -// The following flags are currently supported: -// -// omitempty Only include the field if it's not set to the zero -// value for the type or to empty slices or maps. -// Does not apply to zero valued structs. -// -// flow Marshal using a flow style (useful for structs, -// sequences and maps). -// -// inline Inline the field, which must be a struct or a map, -// causing all of its fields or keys to be processed as if -// they were part of the outer struct. For maps, keys must -// not conflict with the yaml keys of other struct fields. -// -// In addition, if the key is "-", the field is ignored. -// -// For example: -// -// type T struct { -// F int "a,omitempty" -// B int -// } -// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" -// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" -// -func Marshal(in interface{}) (out []byte, err error) { - defer handleErr(&err) - e := newEncoder() - defer e.destroy() - e.marshal("", reflect.ValueOf(in)) - e.finish() - out = e.out - return -} - -func handleErr(err *error) { - if v := recover(); v != nil { - if e, ok := v.(yamlError); ok { - *err = e.err - } else { - panic(v) - } - } -} - -type yamlError struct { - err error -} - -func fail(err error) { - panic(yamlError{err}) -} - -func failf(format string, args ...interface{}) { - panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) -} - -// A TypeError is returned by Unmarshal when one or more fields in -// the YAML document cannot be properly decoded into the requested -// types. When this error is returned, the value is still -// unmarshaled partially. -type TypeError struct { - Errors []string -} - -func (e *TypeError) Error() string { - return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) -} - -// -------------------------------------------------------------------------- -// Maintain a mapping of keys to structure field indexes - -// The code in this section was copied from mgo/bson. - -// structInfo holds details for the serialization of fields of -// a given struct. -type structInfo struct { - FieldsMap map[string]fieldInfo - FieldsList []fieldInfo - - // InlineMap is the number of the field in the struct that - // contains an ,inline map, or -1 if there's none. - InlineMap int -} - -type fieldInfo struct { - Key string - Num int - OmitEmpty bool - Flow bool - - // Inline holds the field index if the field is part of an inlined struct. - Inline []int -} - -var structMap = make(map[reflect.Type]*structInfo) -var fieldMapMutex sync.RWMutex - -func getStructInfo(st reflect.Type) (*structInfo, error) { - fieldMapMutex.RLock() - sinfo, found := structMap[st] - fieldMapMutex.RUnlock() - if found { - return sinfo, nil - } - - n := st.NumField() - fieldsMap := make(map[string]fieldInfo) - fieldsList := make([]fieldInfo, 0, n) - inlineMap := -1 - for i := 0; i != n; i++ { - field := st.Field(i) - if field.PkgPath != "" && !field.Anonymous { - continue // Private field - } - - info := fieldInfo{Num: i} - - tag := field.Tag.Get("yaml") - if tag == "" && strings.Index(string(field.Tag), ":") < 0 { - tag = string(field.Tag) - } - if tag == "-" { - continue - } - - inline := false - fields := strings.Split(tag, ",") - if len(fields) > 1 { - for _, flag := range fields[1:] { - switch flag { - case "omitempty": - info.OmitEmpty = true - case "flow": - info.Flow = true - case "inline": - inline = true - default: - return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)) - } - } - tag = fields[0] - } - - if inline { - switch field.Type.Kind() { - case reflect.Map: - if inlineMap >= 0 { - return nil, errors.New("Multiple ,inline maps in struct " + st.String()) - } - if field.Type.Key() != reflect.TypeOf("") { - return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String()) - } - inlineMap = info.Num - case reflect.Struct: - sinfo, err := getStructInfo(field.Type) - if err != nil { - return nil, err - } - for _, finfo := range sinfo.FieldsList { - if _, found := fieldsMap[finfo.Key]; found { - msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String() - return nil, errors.New(msg) - } - if finfo.Inline == nil { - finfo.Inline = []int{i, finfo.Num} - } else { - finfo.Inline = append([]int{i}, finfo.Inline...) - } - fieldsMap[finfo.Key] = finfo - fieldsList = append(fieldsList, finfo) - } - default: - //return nil, errors.New("Option ,inline needs a struct value or map field") - return nil, errors.New("Option ,inline needs a struct value field") - } - continue - } - - if tag != "" { - info.Key = tag - } else { - info.Key = strings.ToLower(field.Name) - } - - if _, found = fieldsMap[info.Key]; found { - msg := "Duplicated key '" + info.Key + "' in struct " + st.String() - return nil, errors.New(msg) - } - - fieldsList = append(fieldsList, info) - fieldsMap[info.Key] = info - } - - sinfo = &structInfo{fieldsMap, fieldsList, inlineMap} - - fieldMapMutex.Lock() - structMap[st] = sinfo - fieldMapMutex.Unlock() - return sinfo, nil -} - -func isZero(v reflect.Value) bool { - switch v.Kind() { - case reflect.String: - return len(v.String()) == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - case reflect.Slice: - return v.Len() == 0 - case reflect.Map: - return v.Len() == 0 - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Struct: - vt := v.Type() - for i := v.NumField() - 1; i >= 0; i-- { - if vt.Field(i).PkgPath != "" { - continue // Private field - } - if !isZero(v.Field(i)) { - return false - } - } - return true - } - return false -} diff --git a/vendor/gopkg.in/yaml.v2/yamlh.go b/vendor/gopkg.in/yaml.v2/yamlh.go deleted file mode 100644 index d60a6b6..0000000 --- a/vendor/gopkg.in/yaml.v2/yamlh.go +++ /dev/null @@ -1,716 +0,0 @@ -package yaml - -import ( - "io" -) - -// The version directive data. -type yaml_version_directive_t struct { - major int8 // The major version number. - minor int8 // The minor version number. -} - -// The tag directive data. -type yaml_tag_directive_t struct { - handle []byte // The tag handle. - prefix []byte // The tag prefix. -} - -type yaml_encoding_t int - -// The stream encoding. -const ( - // Let the parser choose the encoding. - yaml_ANY_ENCODING yaml_encoding_t = iota - - yaml_UTF8_ENCODING // The default UTF-8 encoding. - yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. - yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. -) - -type yaml_break_t int - -// Line break types. -const ( - // Let the parser choose the break type. - yaml_ANY_BREAK yaml_break_t = iota - - yaml_CR_BREAK // Use CR for line breaks (Mac style). - yaml_LN_BREAK // Use LN for line breaks (Unix style). - yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). -) - -type yaml_error_type_t int - -// Many bad things could happen with the parser and emitter. -const ( - // No error is produced. - yaml_NO_ERROR yaml_error_type_t = iota - - yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. - yaml_READER_ERROR // Cannot read or decode the input stream. - yaml_SCANNER_ERROR // Cannot scan the input stream. - yaml_PARSER_ERROR // Cannot parse the input stream. - yaml_COMPOSER_ERROR // Cannot compose a YAML document. - yaml_WRITER_ERROR // Cannot write to the output stream. - yaml_EMITTER_ERROR // Cannot emit a YAML stream. -) - -// The pointer position. -type yaml_mark_t struct { - index int // The position index. - line int // The position line. - column int // The position column. -} - -// Node Styles - -type yaml_style_t int8 - -type yaml_scalar_style_t yaml_style_t - -// Scalar styles. -const ( - // Let the emitter choose the style. - yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota - - yaml_PLAIN_SCALAR_STYLE // The plain scalar style. - yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. - yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. - yaml_LITERAL_SCALAR_STYLE // The literal scalar style. - yaml_FOLDED_SCALAR_STYLE // The folded scalar style. -) - -type yaml_sequence_style_t yaml_style_t - -// Sequence styles. -const ( - // Let the emitter choose the style. - yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota - - yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. - yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. -) - -type yaml_mapping_style_t yaml_style_t - -// Mapping styles. -const ( - // Let the emitter choose the style. - yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota - - yaml_BLOCK_MAPPING_STYLE // The block mapping style. - yaml_FLOW_MAPPING_STYLE // The flow mapping style. -) - -// Tokens - -type yaml_token_type_t int - -// Token types. -const ( - // An empty token. - yaml_NO_TOKEN yaml_token_type_t = iota - - yaml_STREAM_START_TOKEN // A STREAM-START token. - yaml_STREAM_END_TOKEN // A STREAM-END token. - - yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. - yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. - yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. - yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. - - yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. - yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. - yaml_BLOCK_END_TOKEN // A BLOCK-END token. - - yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. - yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. - yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. - yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. - - yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. - yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. - yaml_KEY_TOKEN // A KEY token. - yaml_VALUE_TOKEN // A VALUE token. - - yaml_ALIAS_TOKEN // An ALIAS token. - yaml_ANCHOR_TOKEN // An ANCHOR token. - yaml_TAG_TOKEN // A TAG token. - yaml_SCALAR_TOKEN // A SCALAR token. -) - -func (tt yaml_token_type_t) String() string { - switch tt { - case yaml_NO_TOKEN: - return "yaml_NO_TOKEN" - case yaml_STREAM_START_TOKEN: - return "yaml_STREAM_START_TOKEN" - case yaml_STREAM_END_TOKEN: - return "yaml_STREAM_END_TOKEN" - case yaml_VERSION_DIRECTIVE_TOKEN: - return "yaml_VERSION_DIRECTIVE_TOKEN" - case yaml_TAG_DIRECTIVE_TOKEN: - return "yaml_TAG_DIRECTIVE_TOKEN" - case yaml_DOCUMENT_START_TOKEN: - return "yaml_DOCUMENT_START_TOKEN" - case yaml_DOCUMENT_END_TOKEN: - return "yaml_DOCUMENT_END_TOKEN" - case yaml_BLOCK_SEQUENCE_START_TOKEN: - return "yaml_BLOCK_SEQUENCE_START_TOKEN" - case yaml_BLOCK_MAPPING_START_TOKEN: - return "yaml_BLOCK_MAPPING_START_TOKEN" - case yaml_BLOCK_END_TOKEN: - return "yaml_BLOCK_END_TOKEN" - case yaml_FLOW_SEQUENCE_START_TOKEN: - return "yaml_FLOW_SEQUENCE_START_TOKEN" - case yaml_FLOW_SEQUENCE_END_TOKEN: - return "yaml_FLOW_SEQUENCE_END_TOKEN" - case yaml_FLOW_MAPPING_START_TOKEN: - return "yaml_FLOW_MAPPING_START_TOKEN" - case yaml_FLOW_MAPPING_END_TOKEN: - return "yaml_FLOW_MAPPING_END_TOKEN" - case yaml_BLOCK_ENTRY_TOKEN: - return "yaml_BLOCK_ENTRY_TOKEN" - case yaml_FLOW_ENTRY_TOKEN: - return "yaml_FLOW_ENTRY_TOKEN" - case yaml_KEY_TOKEN: - return "yaml_KEY_TOKEN" - case yaml_VALUE_TOKEN: - return "yaml_VALUE_TOKEN" - case yaml_ALIAS_TOKEN: - return "yaml_ALIAS_TOKEN" - case yaml_ANCHOR_TOKEN: - return "yaml_ANCHOR_TOKEN" - case yaml_TAG_TOKEN: - return "yaml_TAG_TOKEN" - case yaml_SCALAR_TOKEN: - return "yaml_SCALAR_TOKEN" - } - return "" -} - -// The token structure. -type yaml_token_t struct { - // The token type. - typ yaml_token_type_t - - // The start/end of the token. - start_mark, end_mark yaml_mark_t - - // The stream encoding (for yaml_STREAM_START_TOKEN). - encoding yaml_encoding_t - - // The alias/anchor/scalar value or tag/tag directive handle - // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). - value []byte - - // The tag suffix (for yaml_TAG_TOKEN). - suffix []byte - - // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). - prefix []byte - - // The scalar style (for yaml_SCALAR_TOKEN). - style yaml_scalar_style_t - - // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). - major, minor int8 -} - -// Events - -type yaml_event_type_t int8 - -// Event types. -const ( - // An empty event. - yaml_NO_EVENT yaml_event_type_t = iota - - yaml_STREAM_START_EVENT // A STREAM-START event. - yaml_STREAM_END_EVENT // A STREAM-END event. - yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. - yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. - yaml_ALIAS_EVENT // An ALIAS event. - yaml_SCALAR_EVENT // A SCALAR event. - yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. - yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. - yaml_MAPPING_START_EVENT // A MAPPING-START event. - yaml_MAPPING_END_EVENT // A MAPPING-END event. -) - -// The event structure. -type yaml_event_t struct { - - // The event type. - typ yaml_event_type_t - - // The start and end of the event. - start_mark, end_mark yaml_mark_t - - // The document encoding (for yaml_STREAM_START_EVENT). - encoding yaml_encoding_t - - // The version directive (for yaml_DOCUMENT_START_EVENT). - version_directive *yaml_version_directive_t - - // The list of tag directives (for yaml_DOCUMENT_START_EVENT). - tag_directives []yaml_tag_directive_t - - // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). - anchor []byte - - // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). - tag []byte - - // The scalar value (for yaml_SCALAR_EVENT). - value []byte - - // Is the document start/end indicator implicit, or the tag optional? - // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). - implicit bool - - // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). - quoted_implicit bool - - // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). - style yaml_style_t -} - -func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } -func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } -func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } - -// Nodes - -const ( - yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. - yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. - yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. - yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. - yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. - yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. - - yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. - yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. - - // Not in original libyaml. - yaml_BINARY_TAG = "tag:yaml.org,2002:binary" - yaml_MERGE_TAG = "tag:yaml.org,2002:merge" - - yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. - yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. - yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. -) - -type yaml_node_type_t int - -// Node types. -const ( - // An empty node. - yaml_NO_NODE yaml_node_type_t = iota - - yaml_SCALAR_NODE // A scalar node. - yaml_SEQUENCE_NODE // A sequence node. - yaml_MAPPING_NODE // A mapping node. -) - -// An element of a sequence node. -type yaml_node_item_t int - -// An element of a mapping node. -type yaml_node_pair_t struct { - key int // The key of the element. - value int // The value of the element. -} - -// The node structure. -type yaml_node_t struct { - typ yaml_node_type_t // The node type. - tag []byte // The node tag. - - // The node data. - - // The scalar parameters (for yaml_SCALAR_NODE). - scalar struct { - value []byte // The scalar value. - length int // The length of the scalar value. - style yaml_scalar_style_t // The scalar style. - } - - // The sequence parameters (for YAML_SEQUENCE_NODE). - sequence struct { - items_data []yaml_node_item_t // The stack of sequence items. - style yaml_sequence_style_t // The sequence style. - } - - // The mapping parameters (for yaml_MAPPING_NODE). - mapping struct { - pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). - pairs_start *yaml_node_pair_t // The beginning of the stack. - pairs_end *yaml_node_pair_t // The end of the stack. - pairs_top *yaml_node_pair_t // The top of the stack. - style yaml_mapping_style_t // The mapping style. - } - - start_mark yaml_mark_t // The beginning of the node. - end_mark yaml_mark_t // The end of the node. - -} - -// The document structure. -type yaml_document_t struct { - - // The document nodes. - nodes []yaml_node_t - - // The version directive. - version_directive *yaml_version_directive_t - - // The list of tag directives. - tag_directives_data []yaml_tag_directive_t - tag_directives_start int // The beginning of the tag directives list. - tag_directives_end int // The end of the tag directives list. - - start_implicit int // Is the document start indicator implicit? - end_implicit int // Is the document end indicator implicit? - - // The start/end of the document. - start_mark, end_mark yaml_mark_t -} - -// The prototype of a read handler. -// -// The read handler is called when the parser needs to read more bytes from the -// source. The handler should write not more than size bytes to the buffer. -// The number of written bytes should be set to the size_read variable. -// -// [in,out] data A pointer to an application data specified by -// yaml_parser_set_input(). -// [out] buffer The buffer to write the data from the source. -// [in] size The size of the buffer. -// [out] size_read The actual number of bytes read from the source. -// -// On success, the handler should return 1. If the handler failed, -// the returned value should be 0. On EOF, the handler should set the -// size_read to 0 and return 1. -type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) - -// This structure holds information about a potential simple key. -type yaml_simple_key_t struct { - possible bool // Is a simple key possible? - required bool // Is a simple key required? - token_number int // The number of the token. - mark yaml_mark_t // The position mark. -} - -// The states of the parser. -type yaml_parser_state_t int - -const ( - yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota - - yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. - yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. - yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. - yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. - yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. - yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. - yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. - yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. - yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. - yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. - yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. - yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. - yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. - yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. - yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. - yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. - yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. - yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. - yaml_PARSE_END_STATE // Expect nothing. -) - -func (ps yaml_parser_state_t) String() string { - switch ps { - case yaml_PARSE_STREAM_START_STATE: - return "yaml_PARSE_STREAM_START_STATE" - case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: - return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" - case yaml_PARSE_DOCUMENT_START_STATE: - return "yaml_PARSE_DOCUMENT_START_STATE" - case yaml_PARSE_DOCUMENT_CONTENT_STATE: - return "yaml_PARSE_DOCUMENT_CONTENT_STATE" - case yaml_PARSE_DOCUMENT_END_STATE: - return "yaml_PARSE_DOCUMENT_END_STATE" - case yaml_PARSE_BLOCK_NODE_STATE: - return "yaml_PARSE_BLOCK_NODE_STATE" - case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: - return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" - case yaml_PARSE_FLOW_NODE_STATE: - return "yaml_PARSE_FLOW_NODE_STATE" - case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: - return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" - case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: - return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" - case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: - return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" - case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: - return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" - case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: - return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" - case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: - return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" - case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" - case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: - return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" - case yaml_PARSE_FLOW_MAPPING_KEY_STATE: - return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" - case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: - return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" - case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: - return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" - case yaml_PARSE_END_STATE: - return "yaml_PARSE_END_STATE" - } - return "" -} - -// This structure holds aliases data. -type yaml_alias_data_t struct { - anchor []byte // The anchor. - index int // The node id. - mark yaml_mark_t // The anchor mark. -} - -// The parser structure. -// -// All members are internal. Manage the structure using the -// yaml_parser_ family of functions. -type yaml_parser_t struct { - - // Error handling - - error yaml_error_type_t // Error type. - - problem string // Error description. - - // The byte about which the problem occured. - problem_offset int - problem_value int - problem_mark yaml_mark_t - - // The error context. - context string - context_mark yaml_mark_t - - // Reader stuff - - read_handler yaml_read_handler_t // Read handler. - - input_file io.Reader // File input data. - input []byte // String input data. - input_pos int - - eof bool // EOF flag - - buffer []byte // The working buffer. - buffer_pos int // The current position of the buffer. - - unread int // The number of unread characters in the buffer. - - raw_buffer []byte // The raw buffer. - raw_buffer_pos int // The current position of the buffer. - - encoding yaml_encoding_t // The input encoding. - - offset int // The offset of the current position (in bytes). - mark yaml_mark_t // The mark of the current position. - - // Scanner stuff - - stream_start_produced bool // Have we started to scan the input stream? - stream_end_produced bool // Have we reached the end of the input stream? - - flow_level int // The number of unclosed '[' and '{' indicators. - - tokens []yaml_token_t // The tokens queue. - tokens_head int // The head of the tokens queue. - tokens_parsed int // The number of tokens fetched from the queue. - token_available bool // Does the tokens queue contain a token ready for dequeueing. - - indent int // The current indentation level. - indents []int // The indentation levels stack. - - simple_key_allowed bool // May a simple key occur at the current position? - simple_keys []yaml_simple_key_t // The stack of simple keys. - - // Parser stuff - - state yaml_parser_state_t // The current parser state. - states []yaml_parser_state_t // The parser states stack. - marks []yaml_mark_t // The stack of marks. - tag_directives []yaml_tag_directive_t // The list of TAG directives. - - // Dumper stuff - - aliases []yaml_alias_data_t // The alias data. - - document *yaml_document_t // The currently parsed document. -} - -// Emitter Definitions - -// The prototype of a write handler. -// -// The write handler is called when the emitter needs to flush the accumulated -// characters to the output. The handler should write @a size bytes of the -// @a buffer to the output. -// -// @param[in,out] data A pointer to an application data specified by -// yaml_emitter_set_output(). -// @param[in] buffer The buffer with bytes to be written. -// @param[in] size The size of the buffer. -// -// @returns On success, the handler should return @c 1. If the handler failed, -// the returned value should be @c 0. -// -type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error - -type yaml_emitter_state_t int - -// The emitter states. -const ( - // Expect STREAM-START. - yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota - - yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. - yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. - yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. - yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. - yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. - yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. - yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. - yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. - yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. - yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. - yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. - yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. - yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. - yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. - yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. - yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. - yaml_EMIT_END_STATE // Expect nothing. -) - -// The emitter structure. -// -// All members are internal. Manage the structure using the @c yaml_emitter_ -// family of functions. -type yaml_emitter_t struct { - - // Error handling - - error yaml_error_type_t // Error type. - problem string // Error description. - - // Writer stuff - - write_handler yaml_write_handler_t // Write handler. - - output_buffer *[]byte // String output data. - output_file io.Writer // File output data. - - buffer []byte // The working buffer. - buffer_pos int // The current position of the buffer. - - raw_buffer []byte // The raw buffer. - raw_buffer_pos int // The current position of the buffer. - - encoding yaml_encoding_t // The stream encoding. - - // Emitter stuff - - canonical bool // If the output is in the canonical style? - best_indent int // The number of indentation spaces. - best_width int // The preferred width of the output lines. - unicode bool // Allow unescaped non-ASCII characters? - line_break yaml_break_t // The preferred line break. - - state yaml_emitter_state_t // The current emitter state. - states []yaml_emitter_state_t // The stack of states. - - events []yaml_event_t // The event queue. - events_head int // The head of the event queue. - - indents []int // The stack of indentation levels. - - tag_directives []yaml_tag_directive_t // The list of tag directives. - - indent int // The current indentation level. - - flow_level int // The current flow level. - - root_context bool // Is it the document root context? - sequence_context bool // Is it a sequence context? - mapping_context bool // Is it a mapping context? - simple_key_context bool // Is it a simple mapping key context? - - line int // The current line. - column int // The current column. - whitespace bool // If the last character was a whitespace? - indention bool // If the last character was an indentation character (' ', '-', '?', ':')? - open_ended bool // If an explicit document end is required? - - // Anchor analysis. - anchor_data struct { - anchor []byte // The anchor value. - alias bool // Is it an alias? - } - - // Tag analysis. - tag_data struct { - handle []byte // The tag handle. - suffix []byte // The tag suffix. - } - - // Scalar analysis. - scalar_data struct { - value []byte // The scalar value. - multiline bool // Does the scalar contain line breaks? - flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? - block_plain_allowed bool // Can the scalar be expressed in the block plain style? - single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? - block_allowed bool // Can the scalar be expressed in the literal or folded styles? - style yaml_scalar_style_t // The output style. - } - - // Dumper stuff - - opened bool // If the stream was already opened? - closed bool // If the stream was already closed? - - // The information associated with the document nodes. - anchors *struct { - references int // The number of references. - anchor int // The anchor id. - serialized bool // If the node has been emitted? - } - - last_anchor_id int // The last assigned anchor id. - - document *yaml_document_t // The currently emitted document. -} diff --git a/vendor/gopkg.in/yaml.v2/yamlprivateh.go b/vendor/gopkg.in/yaml.v2/yamlprivateh.go deleted file mode 100644 index 8110ce3..0000000 --- a/vendor/gopkg.in/yaml.v2/yamlprivateh.go +++ /dev/null @@ -1,173 +0,0 @@ -package yaml - -const ( - // The size of the input raw buffer. - input_raw_buffer_size = 512 - - // The size of the input buffer. - // It should be possible to decode the whole raw buffer. - input_buffer_size = input_raw_buffer_size * 3 - - // The size of the output buffer. - output_buffer_size = 128 - - // The size of the output raw buffer. - // It should be possible to encode the whole output buffer. - output_raw_buffer_size = (output_buffer_size*2 + 2) - - // The size of other stacks and queues. - initial_stack_size = 16 - initial_queue_size = 16 - initial_string_size = 16 -) - -// Check if the character at the specified position is an alphabetical -// character, a digit, '_', or '-'. -func is_alpha(b []byte, i int) bool { - return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' -} - -// Check if the character at the specified position is a digit. -func is_digit(b []byte, i int) bool { - return b[i] >= '0' && b[i] <= '9' -} - -// Get the value of a digit. -func as_digit(b []byte, i int) int { - return int(b[i]) - '0' -} - -// Check if the character at the specified position is a hex-digit. -func is_hex(b []byte, i int) bool { - return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' -} - -// Get the value of a hex-digit. -func as_hex(b []byte, i int) int { - bi := b[i] - if bi >= 'A' && bi <= 'F' { - return int(bi) - 'A' + 10 - } - if bi >= 'a' && bi <= 'f' { - return int(bi) - 'a' + 10 - } - return int(bi) - '0' -} - -// Check if the character is ASCII. -func is_ascii(b []byte, i int) bool { - return b[i] <= 0x7F -} - -// Check if the character at the start of the buffer can be printed unescaped. -func is_printable(b []byte, i int) bool { - return ((b[i] == 0x0A) || // . == #x0A - (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E - (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF - (b[i] > 0xC2 && b[i] < 0xED) || - (b[i] == 0xED && b[i+1] < 0xA0) || - (b[i] == 0xEE) || - (b[i] == 0xEF && // #xE000 <= . <= #xFFFD - !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF - !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) -} - -// Check if the character at the specified position is NUL. -func is_z(b []byte, i int) bool { - return b[i] == 0x00 -} - -// Check if the beginning of the buffer is a BOM. -func is_bom(b []byte, i int) bool { - return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF -} - -// Check if the character at the specified position is space. -func is_space(b []byte, i int) bool { - return b[i] == ' ' -} - -// Check if the character at the specified position is tab. -func is_tab(b []byte, i int) bool { - return b[i] == '\t' -} - -// Check if the character at the specified position is blank (space or tab). -func is_blank(b []byte, i int) bool { - //return is_space(b, i) || is_tab(b, i) - return b[i] == ' ' || b[i] == '\t' -} - -// Check if the character at the specified position is a line break. -func is_break(b []byte, i int) bool { - return (b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) -} - -func is_crlf(b []byte, i int) bool { - return b[i] == '\r' && b[i+1] == '\n' -} - -// Check if the character is a line break or NUL. -func is_breakz(b []byte, i int) bool { - //return is_break(b, i) || is_z(b, i) - return ( // is_break: - b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) - // is_z: - b[i] == 0) -} - -// Check if the character is a line break, space, or NUL. -func is_spacez(b []byte, i int) bool { - //return is_space(b, i) || is_breakz(b, i) - return ( // is_space: - b[i] == ' ' || - // is_breakz: - b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) - b[i] == 0) -} - -// Check if the character is a line break, space, tab, or NUL. -func is_blankz(b []byte, i int) bool { - //return is_blank(b, i) || is_breakz(b, i) - return ( // is_blank: - b[i] == ' ' || b[i] == '\t' || - // is_breakz: - b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) - b[i] == 0) -} - -// Determine the width of the character. -func width(b byte) int { - // Don't replace these by a switch without first - // confirming that it is being inlined. - if b&0x80 == 0x00 { - return 1 - } - if b&0xE0 == 0xC0 { - return 2 - } - if b&0xF0 == 0xE0 { - return 3 - } - if b&0xF8 == 0xF0 { - return 4 - } - return 0 - -} diff --git a/vendor/vendor.json b/vendor/vendor.json deleted file mode 100644 index 24c004d..0000000 --- a/vendor/vendor.json +++ /dev/null @@ -1,176 +0,0 @@ -{ - "comment": "", - "ignore": "test", - "package": [ - { - "path": "github.com/BurntSushi/toml", - "revision": "056c9bc7be7190eaa7715723883caffa5f8fa3e4", - "revisionTime": "2015-05-01T06:40:42-04:00" - }, - { - "path": "github.com/Sirupsen/logrus", - "revision": "2cea0f0d141f56fae06df5b813ec4119d1c8ccbd", - "revisionTime": "2015-03-09T11:58:39-04:00" - }, - { - "path": "github.com/dgrijalva/jwt-go", - "revision": "c48cfd5d9711c75acb6036d2698ef3aef7bb655a", - "revisionTime": "2015-04-01T11:06:36-07:00" - }, - { - "path": "github.com/elazarl/go-bindata-assetfs", - "revision": "bea323321994103859d60197d229f1a94699dde3", - "revisionTime": "2015-04-14T21:44:09+03:00" - }, - { - "path": "github.com/franela/goblin", - "revision": "2042c4d610d2da4e4d2afd754da0b4eef31753bb", - "revisionTime": "2015-01-11T21:09:40-03:00" - }, - { - "path": "github.com/gin-gonic/contrib/ginrus", - "revision": "14f66d54cdb96059bafca98665bcc6d9df4951f2", - "revisionTime": "2015-08-15T19:25:43+02:00" - }, - { - "path": "github.com/gin-gonic/gin", - "revision": "5caaac4c5c712a9e7a7de29e6c24ef46c753017f", - "revisionTime": "2016-04-15T01:39:28+02:00" - }, - { - "path": "github.com/gin-gonic/gin/binding", - "revision": "5caaac4c5c712a9e7a7de29e6c24ef46c753017f", - "revisionTime": "2016-04-15T01:39:28+02:00" - }, - { - "path": "github.com/gin-gonic/gin/render", - "revision": "5caaac4c5c712a9e7a7de29e6c24ef46c753017f", - "revisionTime": "2016-04-15T01:39:28+02:00" - }, - { - "path": "github.com/go-sql-driver/mysql", - "revision": "d512f204a577a4ab037a1816604c48c9c13210be", - "revisionTime": "2015-11-12T17:33:55+01:00" - }, - { - "path": "github.com/golang/protobuf/proto", - "revision": "2ebff28ac76fb19e2d25e5ddd4885708dfdd5611", - "revisionTime": "2016-04-18T12:44:00+10:00" - }, - { - "path": "github.com/google/go-github/github", - "revision": "47f2593dad1971eec2f0a0971aa007fef5edbc50", - "revisionTime": "2015-11-20T13:11:25-08:00" - }, - { - "path": "github.com/google/go-querystring/query", - "revision": "2a60fc2ba6c19de80291203597d752e9ba58e4c0", - "revisionTime": "2015-10-28T14:53:53-04:00" - }, - { - "path": "github.com/ianschenck/envflag", - "revision": "9111d830d133f952887a936367fb0211c3134f0d", - "revisionTime": "2014-07-20T15:03:42-06:00" - }, - { - "path": "github.com/joho/godotenv", - "revision": "4ed13390c0acd2ff4e371e64d8b97c8954138243", - "revisionTime": "2015-09-07T11:02:28+10:00" - }, - { - "path": "github.com/joho/godotenv/autoload", - "revision": "4ed13390c0acd2ff4e371e64d8b97c8954138243", - "revisionTime": "2015-09-07T11:02:28+10:00" - }, - { - "path": "github.com/koding/cache", - "revision": "487fc0ca06f9aa1a02d796f5510784b47d5afae2", - "revisionTime": "2014-09-12T11:56:02+03:00" - }, - { - "path": "github.com/manucorporat/sse", - "revision": "fe6ea2c8e398672518ef204bf0fbd9af858d0e15", - "revisionTime": "2015-07-15T20:48:05+02:00" - }, - { - "path": "github.com/mattn/go-colorable", - "revision": "3dac7b4f76f6e17fb39b768b89e3783d16e237fe", - "revisionTime": "2015-11-27T10:45:55+09:00" - }, - { - "path": "github.com/mattn/go-isatty", - "revision": "7fcbc72f853b92b5720db4a6b8482be612daef24", - "revisionTime": "2015-08-14T09:26:29+09:00" - }, - { - "path": "github.com/mattn/go-sqlite3", - "revision": "542ae647f8601bafd96233961b150cae198e0295", - "revisionTime": "2015-04-28T08:57:25+09:00" - }, - { - "path": "github.com/rubenv/sql-migrate", - "revision": "53184e1edfb4f9655b0fa8dd2c23e7763f452bda", - "revisionTime": "2015-07-13T16:07:51+02:00" - }, - { - "path": "github.com/rubenv/sql-migrate/sqlparse", - "revision": "53184e1edfb4f9655b0fa8dd2c23e7763f452bda", - "revisionTime": "2015-07-13T16:07:51+02:00" - }, - { - "path": "github.com/russross/meddler", - "revision": "cd98050d9328eb98ed57de34c74660dea3ca5df4", - "revisionTime": "2015-01-03T13:11:39-07:00" - }, - { - "path": "github.com/stretchr/objx", - "revision": "cbeaeb16a013161a98496fad62933b1d21786672", - "revisionTime": "2014-05-26T12:09:21-06:00" - }, - { - "path": "github.com/stretchr/testify/assert", - "revision": "73a8112e05603706e47153d5cbb86aac8d85adb0", - "revisionTime": "2015-05-04T23:00:33+01:00" - }, - { - "path": "github.com/stretchr/testify/mock", - "revision": "73a8112e05603706e47153d5cbb86aac8d85adb0", - "revisionTime": "2015-05-04T23:00:33+01:00" - }, - { - "path": "golang.org/x/net/context", - "revision": "62ac18b461605b4be188bbc7300e9aa2bc836cd4", - "revisionTime": "2015-11-04T16:49:45+09:00" - }, - { - "path": "golang.org/x/oauth2", - "revision": "442624c9ec9243441e83b374a9e22ac549b5c51d", - "revisionTime": "2015-11-16T13:49:40-08:00" - }, - { - "path": "golang.org/x/oauth2/internal", - "revision": "442624c9ec9243441e83b374a9e22ac549b5c51d", - "revisionTime": "2015-11-16T13:49:40-08:00" - }, - { - "path": "gopkg.in/bluesuncorp/validator.v5", - "revision": "d5acf1dac43705f8bfbb71d878e290e2bed3950b", - "revisionTime": "2015-09-24T07:44:28-04:00" - }, - { - "path": "gopkg.in/go-playground/validator.v8", - "revision": "014792cf3e266caff1e916876be12282b33059e0", - "revisionTime": "2016-02-23T15:26:51-05:00" - }, - { - "path": "gopkg.in/gorp.v1", - "revision": "c87af80f3cc5036b55b83d77171e156791085e2e", - "revisionTime": "2015-02-04T09:55:30+01:00" - }, - { - "path": "gopkg.in/yaml.v2", - "revision": "a83829b6f1293c91addabc89d0571c246397bbf4", - "revisionTime": "2016-03-01T17:40:22-03:00" - } - ] -} diff --git a/version/version.go b/version/version.go index d7f1eb8..4a07d07 100644 --- a/version/version.go +++ b/version/version.go @@ -1,19 +1,3 @@ package version -import "fmt" - -var ( - // VersionMajor is for an API incompatible changes - VersionMajor = 1 - // VersionMinor is for functionality in a backwards-compatible manner - VersionMinor = 0 - // VersionPatch is for backwards-compatible bug fixes - VersionPatch = 0 - - // VersionDev indicates development branch. Releases will be empty string. - VersionDev = "dev" -) - -// Version is the specification version that the package types support. -var Version = fmt.Sprintf("%d.%d.%d+%s", - VersionMajor, VersionMinor, VersionPatch, VersionDev) +var Version = "dev" diff --git a/web/approval.go b/web/approval.go new file mode 100644 index 0000000..d2699aa --- /dev/null +++ b/web/approval.go @@ -0,0 +1,336 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package web + +import ( + "context" + "fmt" + "regexp" + "strconv" + "strings" + + "github.com/capitalone/checks-out/envvars" + "github.com/capitalone/checks-out/logstats" + "github.com/capitalone/checks-out/model" + "github.com/capitalone/checks-out/remote" + "github.com/capitalone/checks-out/set" + + log "github.com/Sirupsen/logrus" +) + +// https://help.github.com/articles/closing-issues-via-commit-messages/ +var closePullRequest = regexp.MustCompile(`(closes|closed|close|fixes|fixed|fix|resolves|resolved|resolve) #(\d+)`) + +type CurCommentStatus int + +const ( + CurCommentOther CurCommentStatus = iota + CurCommentNoChange + CurCommentApproval + CurCommentDisapproval + CurCommentPRAuthor + CurCommentPRTitle + CurCommentPRAudit +) + +type CurCommentInfo struct { + Status CurCommentStatus + Author string +} + +type ApprovalInfo struct { + Policy *model.ApprovalPolicy + Approved bool + AuthorApproved bool + TitleApproved bool + AuditApproved bool + Approvers set.Set + Disapprovers set.Set + CurCommentInfo +} + +type PullRequestFeedback struct { + All []model.Feedback + Approval []model.Feedback + Disapproval []model.Feedback +} + +func approve(c context.Context, params HookParams, id int, setStatus bool) (*ApprovalInfo, error) { + user := params.User + repo := params.Repo + + pullRequest, err := remote.GetPullRequest(c, user, repo, id) + if err != nil { + return nil, err + } + + return approvePullRequest(c, params, id, &pullRequest, setStatus) +} + +func approvePullRequest(c context.Context, params HookParams, id int, pullRequest *model.PullRequest, setStatus bool) (*ApprovalInfo, error) { + user := params.User + repo := params.Repo + config := params.Config + maintainer := params.Snapshot + + request := model.ApprovalRequest{ + Repository: repo, + Config: config, + Maintainer: maintainer, + PullRequest: pullRequest, + } + + approval, err := buildApprovers(c, user, &request) + if err != nil { + return nil, err + } + + if setStatus { + status, desc := generateStatus(approval) + + err = remote.SetStatus(c, user, repo, pullRequest.Branch.CompareSHA, model.ServiceName, status, desc) + if err != nil { + return nil, err + } + + recordStats(approval, repo, id) + } + + log.Debugf("processed comment for %s. received %d approvals and %d disapprovals", + repo.Slug, len(approval.Approvers), len(approval.Disapprovers)) + + return approval, nil + +} + +func getFeedbackRanges(c context.Context, + user *model.User, + request *model.ApprovalRequest, + policy *model.ApprovalPolicy) (PullRequestFeedback, error) { + var err error + config := request.Config + fb := PullRequestFeedback{} + fb.All, err = getFeedback(c, user, request, policy, model.All, config.Commit.IgnoreUIMerge) + if err != nil { + return fb, err + } + if config.Commit.Range == model.All { + fb.Approval = fb.All + } else { + fb.Approval, err = getFeedback(c, user, request, policy, config.Commit.Range, config.Commit.IgnoreUIMerge) + } + if err != nil { + return fb, err + } + if config.Commit.Range == config.Commit.AntiRange { + fb.Disapproval = fb.Approval + } else { + fb.Disapproval, err = getFeedback(c, user, request, policy, config.Commit.AntiRange, config.Commit.IgnoreUIMerge) + } + return fb, err +} + +func getIssuesFromMessage(c context.Context, user *model.User, repo *model.Repo, + message string, numbers set.Set, issues []*model.Issue) []*model.Issue { + message = strings.ToLower(message) + comments := closePullRequest.FindAllStringSubmatch(message, -1) + for _, comment := range comments { + if len(comment) != 3 { + log.Errorf("Issue close does not have 3 parts: %v", comment) + continue + } + numbers.Add(comment[2]) + } + for _, number := range numbers.Keys() { + num, err := strconv.Atoi(number) + if err != nil { + log.Errorf("Unable to convert match (\\d+) into number: %s", number) + continue + } + i, err := remote.GetIssue(c, user, repo, num) + if err != nil { + log.Warnf("Unable to fetch issue %s/%s/%d", repo.Owner, repo.Name, num) + continue + } + issues = append(issues, &i) + } + return issues +} + +func getIssues(c context.Context, u *model.User, r *model.Repo, + pr *model.PullRequest, feedback []model.Feedback) []*model.Issue { + numbers := set.Empty() + issues := getIssuesFromMessage(c, u, r, pr.Body, numbers, nil) + for _, fb := range feedback { + issues = getIssuesFromMessage(c, u, r, fb.GetBody(), numbers, issues) + } + return issues +} + +func buildApprovers(c context.Context, + user *model.User, + request *model.ApprovalRequest) (*ApprovalInfo, error) { + repo := request.Repository + pr := request.PullRequest + files, err := getPullRequestFiles(c, user, repo, pr.Number) + if err != nil { + return nil, err + } + request.Files = files + policy := model.FindApprovalPolicy(request) + feedback, err := getFeedbackRanges(c, user, request, policy) + if err != nil { + return nil, err + } + request.ApprovalComments = feedback.Approval + request.DisapprovalComments = feedback.Disapproval + request.Issues = getIssues(c, user, repo, pr, feedback.All) + audit, err := calculateAuditInfo(c, user, request) + if err != nil { + return nil, err + } + return calculateApprovalInfo(request, policy, audit) +} + +func calculateAuditInfo(c context.Context, user *model.User, request *model.ApprovalRequest) (bool, error) { + var validAudit bool + var err error + if requireAudit(request.Config, request.PullRequest) { + validAudit, err = testAudit(c, user, request.Repository, request.PullRequest) + } else { + validAudit = true + } + return validAudit, err +} + +func calculateApprovalInfo(request *model.ApprovalRequest, policy *model.ApprovalPolicy, audit bool) (*ApprovalInfo, error) { + approvers := set.Empty() + disapprovers := set.Empty() + validAuthor := false + validTitle := false + validAudit := audit + approved, err := model.Approve(request, policy, + func(f model.Feedback, op model.ApprovalOp) { + author := f.GetAuthor().String() + switch op { + case model.Approval: + approvers.Add(author) + case model.DisapprovalInsert: + disapprovers.Add(author) + case model.DisapprovalRemove: + disapprovers.Remove(author) + case model.ValidAuthor: + validAuthor = true + case model.ValidTitle: + validTitle = true + default: + panic(fmt.Sprintf("Unknown approval operation %d", op)) + } + }) + if err != nil { + return nil, err + } + approved = approved && audit + + ai := ApprovalInfo{ + Policy: policy, + Approved: approved, + AuthorApproved: validAuthor, + TitleApproved: validTitle, + AuditApproved: validAudit, + Approvers: approvers, + Disapprovers: disapprovers, + CurCommentInfo: CurCommentInfo{ + Author: "", + Status: CurCommentNoChange, + }, + } + + if !validAudit { + ai.Status = CurCommentPRAudit + // need to check title before author, since it's processed first and + // if title is triggered, then author never gets processed + } else if !validTitle { + ai.Status = CurCommentPRTitle + } else if !validAuthor { + ai.Status = CurCommentPRAuthor + } else if len(request.ApprovalComments) > 0 && len(request.DisapprovalComments) > 0 { + //go back and check if the last comment is an approval or a block + lookback := *request + lookback.ApprovalComments = request.ApprovalComments[len(request.ApprovalComments)-1:] + lookback.DisapprovalComments = request.DisapprovalComments[len(request.DisapprovalComments)-1:] + model.Approve(&lookback, policy, + func(f model.Feedback, op model.ApprovalOp) { + switch op { + case model.Approval: + ai.Status = CurCommentApproval + ai.Author = f.GetAuthor().String() + case model.DisapprovalInsert: + ai.Status = CurCommentDisapproval + ai.Author = f.GetAuthor().String() + } + }) + } + + return &ai, nil +} + +func generateStatus(info *ApprovalInfo) (string, string) { + status := "pending" + var desc string + if info.Approved { + status = "success" + if len(info.Approvers) > 0 { + desc = "approved by " + info.Approvers.Print(",") + } else { + desc = "approval did not require approvers" + } + } else if !info.AuditApproved { + status = "error" + desc = "audit chain must be manually approved" + } else if !info.TitleApproved { + // put title first because we check title first + // longer term fix is to get rid of the side effects + status = "error" + desc = "pull request title is blocking merge" + } else if !info.AuthorApproved { + status = "error" + desc = "pull request author not allowed" + } else if len(info.Disapprovers) > 0 { + desc = "blocked by " + info.Disapprovers.Print(",") + } else if len(info.Approvers) > 0 { + desc = fmt.Sprintf("more approvals needed. %s: %s", envvars.Env.Branding.ShortName, info.Approvers.Print(",")) + } else { + desc = "no approvals received" + } + return status, desc +} + +func recordStats(approval *ApprovalInfo, repo *model.Repo, pr int) { + if approval.Approved && len(approval.Approvers) > 0 { + id := fmt.Sprintf("%s/%s/%d", repo.Owner, repo.Name, pr) + logstats.RecordPR(id) + } + for id := range approval.Approvers { + logstats.RecordApprover(id) + } + for id := range approval.Disapprovers { + logstats.RecordDisapprover(id) + } +} diff --git a/web/approval_hook.go b/web/approval_hook.go new file mode 100644 index 0000000..92c7b80 --- /dev/null +++ b/web/approval_hook.go @@ -0,0 +1,49 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package web + +import ( + "context" + + "github.com/capitalone/checks-out/notifier" +) + +func doApprovalHook(c context.Context, hook *ApprovalHook) (*ApprovalOutput, error) { + params, err := GetHookParameters(c, hook.Repo.Slug, true) + if err != nil { + return nil, err + } + approvalInfo, err := approve(c, params, hook.Issue.Number, true) + + if err != nil { + notifier.SendErrorMessage(c, params.Config, hook.Issue.Title, + hook.Issue.Number, hook.Repo.Slug, err.Error()) + return nil, err + } + mw := handleApprovalNotification(hook, &approvalInfo.CurCommentInfo) + notifier.SendMessage(c, params.Config, *mw) + approvalOutput := ApprovalOutput{ + Policy: approvalInfo.Policy, + Settings: params.Config, + Approved: approvalInfo.Approved, + Approvers: approvalInfo.Approvers, + Disapprovers: approvalInfo.Disapprovers, + } + return &approvalOutput, nil +} diff --git a/web/approval_test.go b/web/approval_test.go new file mode 100644 index 0000000..ee2812f --- /dev/null +++ b/web/approval_test.go @@ -0,0 +1,117 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package web + +import ( + "fmt" + "testing" + + "github.com/capitalone/checks-out/envvars" + "github.com/capitalone/checks-out/set" +) + +func TestGenerateStatus(t *testing.T) { + testCases := map[*ApprovalInfo]struct { + status string + desc string + }{ + &ApprovalInfo{ + Approved: true, + }: { + status: "success", + desc: "approval did not require approvers", + }, + + &ApprovalInfo{ + Approved: true, + Approvers: set.New("bob", "frank"), + }: { + status: "success", + desc: "approved by bob,frank", + }, + + &ApprovalInfo{ + Approved: false, + AuditApproved: false, + TitleApproved: false, + AuthorApproved: false, + }: { + status: "error", + desc: "audit chain must be manually approved", + }, + + &ApprovalInfo{ + Approved: false, + AuditApproved: true, + TitleApproved: false, + AuthorApproved: false, + }: { + status: "error", + desc: "pull request title is blocking merge", + }, + + &ApprovalInfo{ + Approved: false, + AuditApproved: true, + TitleApproved: true, + AuthorApproved: false, + }: { + status: "error", + desc: "pull request author not allowed", + }, + + &ApprovalInfo{ + Approved: false, + AuditApproved: true, + TitleApproved: true, + AuthorApproved: true, + Disapprovers: set.New("bob", "frank"), + }: { + status: "pending", + desc: "blocked by bob,frank", + }, + + &ApprovalInfo{ + Approved: false, + AuditApproved: true, + TitleApproved: true, + AuthorApproved: true, + Approvers: set.New("bob", "frank"), + }: { + status: "pending", + desc: fmt.Sprintf("more approvals needed. %s: bob,frank", envvars.Env.Branding.ShortName), + }, + + &ApprovalInfo{ + Approved: false, + AuditApproved: true, + TitleApproved: true, + AuthorApproved: true, + }: { + status: "pending", + desc: "no approvals received", + }, + } + for k, v := range testCases { + status, desc := generateStatus(k) + if v.status != status || v.desc != desc { + t.Errorf("Expected status %s and desc %s but got %s and %s", v.status, v.desc, status, desc) + } + } +} diff --git a/web/audit.go b/web/audit.go new file mode 100644 index 0000000..6d73904 --- /dev/null +++ b/web/audit.go @@ -0,0 +1,137 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package web + +import ( + "context" + "fmt" + "time" + + "github.com/capitalone/checks-out/model" + "github.com/capitalone/checks-out/remote" +) + +const noAudit = "the entire branch history. No audits found in the past 30 commits" + +func hasAuditStatus(c context.Context, u *model.User, r *model.Repo, context, sha string) (bool, error) { + status, err := remote.GetStatus(c, u, r, sha) + if err != nil { + return false, err + } + _, ok := status.Statuses[context] + return ok, nil +} + +func requireAudit(cfg *model.Config, pr *model.PullRequest) bool { + return cfg.Audit.Enable && cfg.Audit.Branches.Contains(pr.Branch.BaseName) +} + +// testAudit determines whether a pull request preserves the audit chain +func testAudit(c context.Context, u *model.User, r *model.Repo, pr *model.PullRequest) (bool, error) { + ctx := model.AuditName + "/" + pr.Branch.BaseName + // If the base branch has an audit stamp then the chain is preserved + ok, err := hasAuditStatus(c, u, r, ctx, pr.Branch.BaseSHA) + if err != nil { + return false, err + } + if ok { + return true, nil + } + // If the compare branch has an audit stamp then the stamp was created + // by the service to indicate manual approval of the audit chain. + ok, err = hasAuditStatus(c, u, r, ctx, pr.Branch.CompareSHA) + if err != nil { + return false, err + } + if ok { + return true, nil + } + return false, nil +} + +func applyAudit(c context.Context, u *model.User, r *model.Repo, pr *model.PullRequest) error { + ctx := model.AuditName + "/" + pr.Branch.BaseName + state := "success" + desc := fmt.Sprintf("audited by pr %d", pr.Number) + return remote.SetStatus(c, u, r, pr.Branch.MergeCommitSHA, ctx, state, desc) +} + +func findAuditRange(c context.Context, u *model.User, r *model.Repo, pr *model.PullRequest) (string, error) { + ctx := model.AuditName + "/" + pr.Branch.BaseName + commits, _, err := remote.GetCommits(c, u, r, pr.Branch.BaseSHA, 1, 30) + if err != nil { + return "", err + } + for _, commit := range commits { + var ok bool + ok, err = hasAuditStatus(c, u, r, ctx, commit) + if err != nil { + return "", err + } + if ok { + auditRange := fmt.Sprintf("the range %s", + remote.CreateURLCompare(c, u, r, commit, pr.Branch.BaseSHA)) + return auditRange, nil + } + } + return noAudit, nil +} + +func manualAudit(c context.Context, u *model.User, r *model.Repo, pr *model.PullRequest) error { + base := pr.Branch.BaseName + ctx := model.AuditName + "/" + base + branchName := fmt.Sprintf("pr-%d-audit-%d", pr.Number, time.Now().Unix()) + prTitle := fmt.Sprintf("Audit branch %s for pr %d", base, pr.Number) + auditRange, err := findAuditRange(c, u, r, pr) + if err != nil { + return nil + } + prBody := fmt.Sprintf(`Please review the commits in %s. `+ + `You must review commits that were not submitted via pull request. `+ + `Approving this pull request indicates `+ + `that you have reviewed the commits to branch %s.`, auditRange, base) + commitMsg := fmt.Sprintf(`empty commit. Added commit status branch '%s' manual audit + +The commits have been reviewed in %s`, base, auditRange) + commitSha, err := remote.CreateEmptyCommit(c, u, r, pr.Branch.BaseSHA, commitMsg) + if err != nil { + return err + } + state := "success" + desc := fmt.Sprintf("manual audit of branch %s", base) + err = remote.SetStatus(c, u, r, commitSha, ctx, state, desc) + if err != nil { + return err + } + _, err = remote.CreateReference(c, u, r, commitSha, "heads/"+branchName) + if err != nil { + return err + } + num, err := remote.CreatePR(c, u, r, prTitle, branchName, base, prBody) + if err != nil { + return err + } + message := fmt.Sprintf(`You must approve pr #%d to preserve the audit chain. `+ + `Use the "Update branch" button after you have merged the other pull request.`, num) + err = remote.WriteComment(c, u, r, pr.Number, message) + if err != nil { + return err + } + return nil +} diff --git a/web/comment_hook.go b/web/comment_hook.go new file mode 100644 index 0000000..586edbd --- /dev/null +++ b/web/comment_hook.go @@ -0,0 +1,43 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package web + +import ( + "context" + "strings" + + multierror "github.com/mspiegel/go-multierror" + "github.com/capitalone/checks-out/model" +) + +func (hook *CommentHook) Process(c context.Context) (interface{}, error) { + approvalOutput, e1 := doCommentHook(c, hook) + if e1 != nil { + e2 := sendErrorStatus(c, &hook.ApprovalHook, e1) + e1 = multierror.Append(e1, e2) + } + return approvalOutput, e1 +} + +func doCommentHook(c context.Context, hook *CommentHook) (*ApprovalOutput, error) { + if strings.HasPrefix(hook.Comment, model.CommentPrefix) { + return nil, nil + } + return doApprovalHook(c, &hook.ApprovalHook) +} diff --git a/web/endpoints.go b/web/endpoints.go new file mode 100644 index 0000000..ff29637 --- /dev/null +++ b/web/endpoints.go @@ -0,0 +1,59 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package web + +import ( + "path" + "strconv" + + "github.com/gin-gonic/gin" +) + +// ApprovalStatus generates a response with the current status of a pull request +func ApprovalStatus(c *gin.Context) { + var ( + owner = c.Param("owner") + name = c.Param("repo") + id = c.Param("id") + ) + + pr, err := strconv.Atoi(id) + if err != nil { + c.String(400, "Unable to convert pull request id %s to number", id) + return + } + + params, err := GetHookParameters(c, path.Join(owner, name), false) + if err != nil { + c.Error(err) + return + } + approvalInfo, err := approve(c, params, pr, false) + if err != nil { + c.Error(err) + } else { + c.IndentedJSON(200, gin.H{ + "policy": approvalInfo.Policy, + "settings": params.Config, + "approved": approvalInfo.Approved, + "approvers": approvalInfo.Approvers, + "disapprovers": approvalInfo.Disapprovers, + }) + } +} diff --git a/web/github_create.go b/web/github_create.go new file mode 100644 index 0000000..68d0f17 --- /dev/null +++ b/web/github_create.go @@ -0,0 +1,256 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package web + +import ( + "bytes" + "context" + "encoding/json" + "io" + "io/ioutil" + "net/http" + + "github.com/capitalone/checks-out/exterror" + "github.com/capitalone/checks-out/model" + "github.com/capitalone/checks-out/shared/httputil" + "github.com/capitalone/checks-out/strings/lowercase" + "github.com/capitalone/checks-out/usage" + + log "github.com/Sirupsen/logrus" + "github.com/google/go-github/github" +) + +// TODO: move this into its own package when +// we support backends other than GitHub + +func createHook(c context.Context, r *http.Request) (Hook, context.Context, error) { + + // For server requests the Request Body is always non-nil + // but will return EOF immediately when no body is present. + // The Server will close the request body. The ServeHTTP + // Handler does not need to. + + body, err := ioutil.ReadAll(r.Body) + if err != nil { + return nil, c, err + } + + event := r.Header.Get("X-Github-Event") + usage.RecordIncomingWebHook(event) + + var hook Hook + switch event { + case "pull_request_review": + hook, err = createReviewHook(body) + case "issue_comment": + hook, err = createCommentHook(body) + case "status": + hook, err = createStatusHook(body) + case "pull_request": + hook, err = createPRHook(body) + case "repository": + hook, err = createRepoHook(r, body) + } + c2 := usage.AddEventToContext(c, event) + return hook, c2, err +} + +func createError(msg string, body []byte, e error) error { + if e == io.EOF { + log.Errorf("Logging request body on eof error: %s", body) + } + e = exterror.Create(http.StatusInternalServerError, e) + return exterror.Append(e, msg) +} + +func createReviewHook(body []byte) (Hook, error) { + + data := github.PullRequestReviewEvent{} + err := json.NewDecoder(bytes.NewReader(body)).Decode(&data) + if err != nil { + err = createError("Getting pull request review hook", body, err) + return nil, err + } + + // don't process reviews on closed pull requests + if data.PullRequest.GetState() == "closed" { + log.Debugf("PR %s is closed -- not processing comments for it any more", data.PullRequest.Title) + return nil, nil + } + + hook := &ReviewHook{ + ApprovalHook: ApprovalHook{ + Issue: &model.Issue{ + Title: data.PullRequest.GetTitle(), + Number: data.PullRequest.GetNumber(), + Author: lowercase.Create(data.PullRequest.User.GetLogin()), + }, + Repo: &model.Repo{ + Owner: data.Repo.Owner.GetLogin(), + Name: data.Repo.GetName(), + Slug: data.Repo.GetFullName(), + }, + }, + } + + return hook, nil +} + +func createCommentHook(body []byte) (Hook, error) { + + data := github.IssueCommentEvent{} + err := json.NewDecoder(bytes.NewReader(body)).Decode(&data) + if err != nil { + err = createError("Getting comment hook", body, err) + return nil, err + } + + // don't process comments on GitHub issues + if len(data.Issue.PullRequestLinks.GetURL()) == 0 { + return nil, nil + } + + // don't process comments on closed pull requests + if data.Issue.GetState() == "closed" { + log.Debugf("PR %s is closed -- not processing comments for it any more", data.Issue.Title) + return nil, nil + } + + hook := &CommentHook{ + ApprovalHook: ApprovalHook{ + Issue: &model.Issue{ + Title: data.Issue.GetTitle(), + Number: data.Issue.GetNumber(), + Author: lowercase.Create(data.Issue.User.GetLogin()), + }, + Repo: &model.Repo{ + Owner: data.Repo.Owner.GetLogin(), + Name: data.Repo.GetName(), + Slug: data.Repo.GetFullName(), + }, + }, + Comment: data.Comment.GetBody(), + } + + return hook, nil +} + +func createStatusHook(body []byte) (Hook, error) { + + data := github.StatusEvent{} + err := json.NewDecoder(bytes.NewReader(body)).Decode(&data) + if err != nil { + err = createError("Getting status hook", body, err) + return nil, err + } + + log.Debug(data) + + hook := &StatusHook{ + SHA: data.GetSHA(), + Status: &model.CommitStatus{ + State: data.GetState(), + Context: data.GetContext(), + Description: data.GetDescription(), + }, + Repo: &model.Repo{ + Owner: data.Repo.Owner.GetLogin(), + Name: data.Repo.GetName(), + Slug: data.Repo.GetFullName(), + }, + } + + return hook, nil +} + +func createPRHook(body []byte) (Hook, error) { + + data := github.PullRequestEvent{} + err := json.NewDecoder(bytes.NewReader(body)).Decode(&data) + if err != nil { + err = createError("Getting pull request hook", body, err) + return nil, err + } + + log.Debug(data) + + mergeable := true + if data.PullRequest.Mergeable != nil { + mergeable = data.PullRequest.GetMergeable() + } + + hook := &PRHook{ + ApprovalHook: ApprovalHook{ + Issue: &model.Issue{ + Title: data.PullRequest.GetTitle(), + Number: data.PullRequest.GetNumber(), + Author: lowercase.Create(data.PullRequest.User.GetLogin()), + }, + Repo: &model.Repo{ + Owner: data.Repo.Owner.GetLogin(), + Name: data.Repo.GetName(), + Slug: data.Repo.GetFullName(), + }, + }, + PullRequest: &model.PullRequest{ + Issue: model.Issue{ + Number: data.PullRequest.GetNumber(), + Title: data.PullRequest.GetTitle(), + Author: lowercase.Create(data.PullRequest.User.GetLogin()), + }, + // head branch contains what you like to be applied + // base branch contains where changes should be applied + Branch: model.Branch{ + CompareName: data.PullRequest.Head.GetRef(), + CompareSHA: data.PullRequest.Head.GetSHA(), + CompareOwner: data.PullRequest.Head.User.GetLogin(), + Mergeable: mergeable, + Merged: data.PullRequest.GetMerged(), + MergeCommitSHA: data.PullRequest.GetMergeCommitSHA(), + BaseName: data.PullRequest.Base.GetRef(), + BaseSHA: data.PullRequest.Base.GetSHA(), + }, + Body: data.PullRequest.GetBody(), + }, + Action: data.GetAction(), + } + + return hook, nil +} + +func createRepoHook(r *http.Request, body []byte) (Hook, error) { + + data := github.RepositoryEvent{} + err := json.NewDecoder(bytes.NewReader(body)).Decode(&data) + if err != nil { + err = createError("Getting repository hook", body, err) + return nil, err + } + + log.Debug(data) + + hook := &RepoHook{ + Action: data.GetAction(), + Name: data.Repo.GetName(), + Owner: data.Repo.Owner.GetLogin(), + BaseURL: httputil.GetURL(r), + } + + return hook, nil +} diff --git a/web/hook.go b/web/hook.go index 1378415..37b1fbf 100644 --- a/web/hook.go +++ b/web/hook.go @@ -1,132 +1,92 @@ +/* + +SPDX-Copyright: Copyright (c) Brad Rydzewski, project contributors, Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Brad Rydzewski, project contributors, Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ package web import ( - "regexp" - - "github.com/lgtmco/lgtm/cache" - "github.com/lgtmco/lgtm/model" - "github.com/lgtmco/lgtm/remote" - "github.com/lgtmco/lgtm/store" + "context" - log "github.com/Sirupsen/logrus" "github.com/gin-gonic/gin" + "github.com/capitalone/checks-out/model" ) -func Hook(c *gin.Context) { - hook, err := remote.GetHook(c, c.Request) - if err != nil { - log.Errorf("Error parsing hook. %s", err) - c.String(500, "Error parsing hook. %s", err) - return - } - if hook == nil { - c.String(200, "pong") - return - } - - repo, err := store.GetRepoSlug(c, hook.Repo.Slug) - if err != nil { - log.Errorf("Error getting repository %s. %s", hook.Repo.Slug, err) - c.String(404, "Repository not found.") - return - } - user, err := store.GetUser(c, repo.UserID) - if err != nil { - log.Errorf("Error getting repository owner %s. %s", repo.Slug, err) - c.String(404, "Repository owner not found.") - return - } +type Hook interface { + Process(c context.Context) (interface{}, error) +} - rcfile, _ := remote.GetContents(c, user, repo, ".lgtm") - config, err := model.ParseConfig(rcfile) - if err != nil { - log.Errorf("Error parsing .lgtm file for %s. %s", repo.Slug, err) - c.String(500, "Error parsing .lgtm file. %s.", err) - return - } +type ApprovalHook struct { + Repo *model.Repo + Issue *model.Issue +} - // THIS IS COMPLETELY DUPLICATED IN THE API SECTION. NOT IDEAL - file, err := remote.GetContents(c, user, repo, "MAINTAINERS") - if err != nil { - log.Debugf("no MAINTAINERS file for %s. Checking for team members.", repo.Slug) - members, merr := cache.GetMembers(c, user, repo.Owner) - if merr != nil { - log.Errorf("Error getting repository %s. %s", repo.Slug, err) - log.Errorf("Error getting org members %s. %s", repo.Owner, merr) - c.String(404, "MAINTAINERS file not found. %s", err) - return - } else { - for _, member := range members { - file = append(file, member.Login...) - file = append(file, '\n') - } - } - } +type CommentHook struct { + ApprovalHook + Comment string +} - maintainer, err := model.ParseMaintainer(file) - if err != nil { - log.Errorf("Error parsing MAINTAINERS file for %s. %s", repo.Slug, err) - c.String(500, "Error parsing MAINTAINERS file. %s.", err) - return - } +type ReviewHook struct { + ApprovalHook +} - comments, err := remote.GetComments(c, user, repo, hook.Issue.Number) - if err != nil { - log.Errorf("Error retrieving comments for %s pr %d. %s", repo.Slug, hook.Issue.Number, err) - c.String(500, "Error retrieving comments. %s.", err) - return - } - approvers := getApprovers(config, maintainer, hook.Issue, comments) - approved := len(approvers) >= config.Approvals - err = remote.SetStatus(c, user, repo, hook.Issue.Number, approved) - if err != nil { - log.Errorf("Error setting status for %s pr %d. %s", repo.Slug, hook.Issue.Number, err) - c.String(500, "Error setting status. %s.", err) - return - } +type PRHook struct { + ApprovalHook + PullRequest *model.PullRequest + Action string +} - log.Debugf("processed comment for %s. received %d of %d approvals", repo.Slug, len(approvers), config.Approvals) +type RepoHook struct { + Name string + Owner string + Action string + BaseURL string +} - c.IndentedJSON(200, gin.H{ - "approvers": maintainer.People, - "settings": config, - "approved": approved, - "approved_by": approvers, - }) +type StatusHook struct { + SHA string + Status *model.CommitStatus + Repo *model.Repo } -// getApprovers is a helper function that analyzes the list of comments -// and returns the list of approvers. -func getApprovers(config *model.Config, maintainer *model.Maintainer, issue *model.Issue, comments []*model.Comment) []*model.Person { - approverm := map[string]bool{} - approvers := []*model.Person{} +type HookParams struct { + Repo *model.Repo + User *model.User + Cap *model.Capabilities + Config *model.Config + Snapshot *model.MaintainerSnapshot +} - matcher, err := regexp.Compile(config.Pattern) +func ProcessHook(c *gin.Context) { + hook, c2, err := createHook(c, c.Request) if err != nil { - // this should never happen - return approvers - } + c.Error(err) + } else if hook == nil { + c.String(200, "pong") + } else { + output, err := hook.Process(c2) + if err != nil { + c.Error(err) + } else { + if output == nil { + c.String(200, "pong") + } else { + c.IndentedJSON(200, output) + } - for _, comment := range comments { - // cannot lgtm your own pull request - if config.SelfApprovalOff && comment.Author == issue.Author { - continue - } - // the user must be a valid maintainer of the project - person, ok := maintainer.People[comment.Author] - if !ok { - continue - } - // the same author can't approve something twice - if _, ok := approverm[comment.Author]; ok { - continue - } - // verify the comment matches the approval pattern - if matcher.MatchString(comment.Body) { - approverm[comment.Author] = true - approvers = append(approvers, person) } } - - return approvers } diff --git a/web/index.go b/web/index.go index 26ed102..b744599 100644 --- a/web/index.go +++ b/web/index.go @@ -1,22 +1,43 @@ +/* + +SPDX-Copyright: Copyright (c) Brad Rydzewski, project contributors, Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Brad Rydzewski, project contributors, Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ package web import ( - "github.com/lgtmco/lgtm/cache" - "github.com/lgtmco/lgtm/router/middleware/session" - "github.com/lgtmco/lgtm/shared/token" + "github.com/capitalone/checks-out/remote" + "github.com/capitalone/checks-out/router/middleware/session" + "github.com/capitalone/checks-out/shared/token" "github.com/gin-gonic/gin" + "github.com/capitalone/checks-out/envvars" ) func Index(c *gin.Context) { user := session.User(c) + docsUrl := envvars.Env.Monitor.DocsUrl + switch { case user == nil: - c.HTML(200, "brand.html", gin.H{}) + c.HTML(200, "brand.html", gin.H{"DocsUrl": docsUrl}) default: - teams, _ := cache.GetTeams(c, user) + teams, _ := remote.GetOrgs(c, user) csrf, _ := token.New(token.CsrfToken, user.Login).Sign(user.Secret) - c.HTML(200, "index.html", gin.H{"user": user, "csrf": csrf, "teams": teams}) + c.HTML(200, "index.html", gin.H{"user": user, "csrf": csrf, "teams": teams, "docsUrl": docsUrl}) } } diff --git a/web/login.go b/web/login.go index c608839..46947c0 100644 --- a/web/login.go +++ b/web/login.go @@ -1,16 +1,39 @@ +/* + +SPDX-Copyright: Copyright (c) Brad Rydzewski, project contributors, Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Brad Rydzewski, project contributors, Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ package web import ( + "context" + "database/sql" + "errors" + "fmt" "net/http" "time" - "github.com/lgtmco/lgtm/model" - "github.com/lgtmco/lgtm/remote" - "github.com/lgtmco/lgtm/shared/httputil" - "github.com/lgtmco/lgtm/shared/token" - "github.com/lgtmco/lgtm/store" - log "github.com/Sirupsen/logrus" + "github.com/capitalone/checks-out/envvars" + "github.com/capitalone/checks-out/exterror" + "github.com/capitalone/checks-out/model" + "github.com/capitalone/checks-out/remote" + "github.com/capitalone/checks-out/shared/httputil" + "github.com/capitalone/checks-out/shared/token" + "github.com/capitalone/checks-out/store" "github.com/gin-gonic/gin" ) @@ -27,12 +50,12 @@ func Login(c *gin.Context) { // when dealing with redirects we may need // to adjust the content type. I cannot, however, - // rememver why, so need to revisit this line. + // remember why, so need to revisit this line. c.Writer.Header().Del("Content-Type") tmpuser, err := remote.GetUser(c, c.Writer, c.Request) if err != nil { - log.Errorf("cannot authenticate user. %s", err) + log.Warnf("cannot authenticate user. %s", err) c.Redirect(303, "/login?error=oauth_error") return } @@ -44,17 +67,27 @@ func Login(c *gin.Context) { // get the user from the database u, err := store.GetUserLogin(c, tmpuser.Login) - if err != nil { + if err != nil && err != sql.ErrNoRows { + c.HTML(500, "error.html", gin.H{"error": err}) + return + } else if err == sql.ErrNoRows { + err = validateUserAccess(c, tmpuser) + if err != nil { + log.Warnf("cannot create account for user. %s", tmpuser.Login, err) + c.Redirect(303, "/login?error=no_access_error") + return + } // create the user account u = &model.User{} u.Login = tmpuser.Login u.Token = tmpuser.Token u.Avatar = tmpuser.Avatar + u.Scopes = tmpuser.Scopes u.Secret = model.Rand() // insert the user into the database - if err := store.CreateUser(c, u); err != nil { + if err = store.CreateUser(c, u); err != nil { log.Errorf("cannot insert %s. %s", u.Login, err) c.Redirect(303, "/login?error=internal_error") return @@ -66,15 +99,15 @@ func Login(c *gin.Context) { u.Token = tmpuser.Token u.Avatar = tmpuser.Avatar - if err := store.UpdateUser(c, u); err != nil { + if err = store.UpdateUser(c, u); err != nil { log.Errorf("cannot update %s. %s", u.Login, err) c.Redirect(303, "/login?error=internal_error") return } exp := time.Now().Add(time.Hour * 72).Unix() - token := token.New(token.SessToken, u.Login) - tokenstr, err := token.SignExpires(u.Secret, exp) + sessToken := token.New(token.SessToken, u.Login) + tokenstr, err := sessToken.SignExpires(u.Secret, exp) if err != nil { log.Errorf("cannot create token for %s. %s", u.Login, err) c.Redirect(303, "/login?error=internal_error") @@ -85,25 +118,70 @@ func Login(c *gin.Context) { c.Redirect(303, "/") } +func validateUserAccess(c context.Context, u *model.User) error { + if !envvars.Env.Access.LimitUsers && !envvars.Env.Access.LimitOrgs { + return nil + } + checkedUsers := false + if envvars.Env.Access.LimitUsers { + checkedUsers = true + valid, err := store.CheckValidUser(c, u.Login) + if err != nil || valid { + return err + } + } + checkedOrgs := false + if envvars.Env.Access.LimitOrgs { + checkedOrgs = true + orgs, err := remote.GetOrgs(c, u) + if err != nil { + return err + } + validOrgs, err := store.GetValidOrgs(c) + if err != nil { + return err + } + for _, v := range orgs { + if validOrgs.Contains(v.Login) { + return nil + } + } + } + msg := "User %s is not on " + if checkedUsers { + msg += "the allowed users " + if checkedOrgs { + msg += " or " + } + } + if checkedOrgs { + msg += "the allowed orgs " + } + msg += "list." + return fmt.Errorf(msg, u.Login) +} + // LoginToken authenticates a user with their GitHub token and -// returns an LGTM API token in the response. +// returns a service API token in the response. func LoginToken(c *gin.Context) { access := c.Query("access_token") login, err := remote.GetUserToken(c, access) if err != nil { - c.String(403, "Unable to authenticate user. %s", err) + c.Error(exterror.Append(err, "Unable to authenticate user")) return } user, err := store.GetUserLogin(c, login) if err != nil { - c.String(404, "Unable to authenticate user %s. Not registered.", user.Login) + c.Error(exterror.Create(http.StatusUnauthorized, errors.New("Unable to authenticate user"))) return } exp := time.Now().Add(time.Hour * 72).Unix() - token := token.New(token.UserToken, user.Login) - tokenstr, err := token.SignExpires(user.Secret, exp) + userToken := token.New(token.UserToken, user.Login) + tokenstr, err := userToken.SignExpires(user.Secret, exp) if err != nil { - c.AbortWithError(http.StatusInternalServerError, err) + log.Errorf("cannot create token for %s. %s", user.Login, err) + err = errors.New("Internal error attempting to authenticate user") + c.Error(exterror.Create(http.StatusInternalServerError, err)) return } c.IndentedJSON(http.StatusOK, &tokenPayload{ diff --git a/web/login_test.go b/web/login_test.go new file mode 100644 index 0000000..1292d40 --- /dev/null +++ b/web/login_test.go @@ -0,0 +1,152 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package web + +import ( + "context" + "testing" + "time" + + "github.com/capitalone/checks-out/cache" + "github.com/capitalone/checks-out/envvars" + "github.com/capitalone/checks-out/model" + "github.com/capitalone/checks-out/remote" + "github.com/capitalone/checks-out/store" +) + +type mockStore struct { + store.Store + callCount map[string]int +} + +func (ms *mockStore) GetValidOrgs() ([]string, error) { + ms.callCount["getValidOrgs"]++ + return []string{"beatles", "stones", "ledzep"}, nil +} + +func (ms *mockStore) CheckValidUser(login string) (bool, error) { + ms.callCount["getValidOrgs"]++ + return login == "frankie", nil +} + +type mockRemote struct { + remote.Remote + callCount map[string]int +} + +func (mr *mockRemote) GetOrgs(c context.Context, user *model.User) ([]*model.GitHubOrg, error) { + mr.callCount["getOrgs"]++ + if user.Login == "john" { + return []*model.GitHubOrg{ + { + Login: "beatles", + }, + }, nil + } + + if user.Login == "eric" { + return []*model.GitHubOrg{ + { + Login: "cream", + }, + }, nil + } + + return []*model.GitHubOrg{}, nil +} + +func TestValidateUserAccess(t *testing.T) { + s := &mockStore{callCount: map[string]int{}} + r := &mockRemote{callCount: map[string]int{}} + + c := context.Background() + c = context.WithValue(c, "store", s) + c = context.WithValue(c, "cache", cache.NewTTL(40*time.Millisecond)) + c = context.WithValue(c, "remote", r) + + // case 1 -- no validation + envvars.Env.Access.LimitOrgs = false + envvars.Env.Access.LimitUsers = false + + u := &model.User{ + Login: "john", + } + err := validateUserAccess(c, u) + if err != nil { + t.Errorf("Expected no err, got %v", err) + } + + // case 2 -- validate user only + envvars.Env.Access.LimitUsers = true + err = validateUserAccess(c, u) + if err == nil { + t.Errorf("Expected err, got %v", err) + } + + //2a -- valid user + u.Login = "frankie" + err = validateUserAccess(c, u) + if err != nil { + t.Errorf("Expected no err, got %v", err) + } + + // case 3 -- validate org only (frankie fails, john passes) + envvars.Env.Access.LimitOrgs = true + envvars.Env.Access.LimitUsers = false + + err = validateUserAccess(c, u) + if err == nil { + t.Errorf("Expected err, got %v", err) + } + + //case 3a -- valid user + u.Login = "john" + err = validateUserAccess(c, u) + if err != nil { + t.Errorf("Expected no err, got %v", err) + } + + // case 4 - valid org and user (frankie works, john works, eric fails, waldo fails) + envvars.Env.Access.LimitOrgs = true + envvars.Env.Access.LimitUsers = true + + u.Login = "frankie" + err = validateUserAccess(c, u) + if err != nil { + t.Errorf("Expected no err, got %v", err) + } + + u.Login = "john" + err = validateUserAccess(c, u) + if err != nil { + t.Errorf("Expected no err, got %v", err) + } + + u.Login = "eric" + err = validateUserAccess(c, u) + if err == nil { + t.Errorf("Expected err, got %v", err) + } + + u.Login = "waldo" + err = validateUserAccess(c, u) + if err == nil { + t.Errorf("Expected err, got %v", err) + } +} diff --git a/web/merge.go b/web/merge.go new file mode 100644 index 0000000..aeea045 --- /dev/null +++ b/web/merge.go @@ -0,0 +1,67 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package web + +import ( + "context" + + log "github.com/Sirupsen/logrus" + "github.com/capitalone/checks-out/model" + "github.com/capitalone/checks-out/remote" +) + +func isBehind(c context.Context, user *model.User, repo *model.Repo, branch model.Branch) (bool, error) { + resp, err := remote.CompareBranches(c, user, repo, branch.BaseName, branch.CompareName, branch.CompareOwner) + if err != nil { + return false, err + } + return resp.BehindBy > 0, nil +} + +func doMerge(c context.Context, user *model.User, + hook *StatusHook, req *model.ApprovalRequest, policy *model.ApprovalPolicy, mergeMethod string) (*string, error) { + approvals, err := buildApprovers(c, user, req) + if err != nil { + return nil, err + } + var people []*model.Person + for id := range approvals.Approvers { + if p, ok := req.Maintainer.People[id]; ok { + people = append(people, p) + } else { + people = append(people, &model.Person{Login: id}) + } + } + message := getCommitComment(req, policy) + log.Debugf("parsed out commit comment message, got: %v", message) + + SHA, err := remote.MergePR(c, user, hook.Repo, *req.PullRequest, people, message, mergeMethod) + if err != nil { + return nil, err + } + return SHA, nil +} + +func doMergeDelete(c context.Context, user *model.User, + hook *StatusHook, req *model.ApprovalRequest) error { + // Head branch contains what changes you like to be applied. + // Do not delete the base branch. + ref := req.PullRequest.Branch.CompareName + return remote.DeleteBranch(c, user, hook.Repo, ref) +} diff --git a/web/notification.go b/web/notification.go new file mode 100644 index 0000000..19c0e9d --- /dev/null +++ b/web/notification.go @@ -0,0 +1,67 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package web + +import ( + "fmt" + + "github.com/capitalone/checks-out/model" + "github.com/capitalone/checks-out/notifier" + + log "github.com/Sirupsen/logrus" +) + +func handleApprovalNotification(hook *ApprovalHook, curCommentInfo *CurCommentInfo) *notifier.MessageWrapper { + mw := ¬ifier.MessageWrapper{ + MessageHeader: notifier.MessageHeader{ + PrName: hook.Issue.Title, + PrNumber: hook.Issue.Number, + Slug: hook.Repo.Slug, + }, + } + if curCommentInfo != nil { + var mi notifier.MessageInfo + switch curCommentInfo.Status { + case CurCommentNoChange: + // do nothing + case CurCommentApproval: + mi.Message = fmt.Sprintf("approval added by %s.", curCommentInfo.Author) + mi.Type = model.CommentApprove + case CurCommentDisapproval: + mi.Message = fmt.Sprintf("blocked by %s.", curCommentInfo.Author) + mi.Type = model.CommentBlock + case CurCommentPRAuthor: + mi.Message = fmt.Sprintf("blocked because it was created by unapproved author %s.", hook.Issue.Author) + mi.Type = model.CommentBlock + case CurCommentPRTitle: + mi.Message = "blocked because its title indicates that it should not be merged" + mi.Type = model.CommentBlock + case CurCommentPRAudit: + mi.Message = "blocked by gap in audit chain" + mi.Type = model.CommentBlock + default: + log.Warnf("Invalid curCommentInfo.Status found, skipping: %v", curCommentInfo.Status) + } + + if mi.Message != "" { + mw.Messages = append(mw.Messages, mi) + } + } + return mw +} diff --git a/web/pr_hook.go b/web/pr_hook.go new file mode 100644 index 0000000..c6adcbd --- /dev/null +++ b/web/pr_hook.go @@ -0,0 +1,229 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package web + +import ( + "context" + "fmt" + + log "github.com/Sirupsen/logrus" + multierror "github.com/mspiegel/go-multierror" + "github.com/capitalone/checks-out/model" + "github.com/capitalone/checks-out/notifier" + "github.com/capitalone/checks-out/remote" + "github.com/capitalone/checks-out/set" +) + +type ApprovalOutput struct { + Policy *model.ApprovalPolicy `json:"policy"` + Settings *model.Config `json:"config"` + Approved bool `json:"approved"` + Approvers set.Set `json:"approvers"` + Disapprovers set.Set `json:"disapprovers"` +} + +var actionWhiteList = set.New("synchronize", "opened", "reopened", "closed") + +func (hook *PRHook) Process(c context.Context) (interface{}, error) { + approvalOutput, e1 := doPRHookAndNotify(c, hook) + if e1 != nil { + e2 := sendErrorStatusPR(c, &hook.ApprovalHook, hook.PullRequest, e1) + e1 = multierror.Append(e1, e2) + } + return approvalOutput, e1 +} + +func doPRHookAndNotify(c context.Context, hook *PRHook) (*ApprovalOutput, error) { + if !actionWhiteList.Contains(hook.Action) { + return nil, nil + } + params, err := GetHookParameters(c, hook.Repo.Slug, true) + if err != nil { + return nil, err + } + approvalOutput, mw, err := doPRHook(c, hook, params) + mw = createMessage(hook, mw, err) + if mw != nil { + notifier.SendMessage(c, params.Config, *mw) + } + return approvalOutput, err +} + +func doPRHook(c context.Context, hook *PRHook, params HookParams) (*ApprovalOutput, *notifier.MessageWrapper, error) { + switch hook.Action { + case "closed": + mw, err := prClosed(c, hook, params) + return nil, mw, err + case "opened", "reopened": + err := prOpened(c, hook, params) + if err != nil { + return nil, nil, err + } + } + return prHandle(c, hook, params) +} + +func prOpened(c context.Context, hook *PRHook, params HookParams) error { + request := model.ApprovalRequest{ + Repository: params.Repo, + Config: params.Config, + PullRequest: hook.PullRequest, + } + success, err := calculateAuditInfo(c, params.User, &request) + if err != nil { + return err + } + if success { + return nil + } + return manualAudit(c, params.User, params.Repo, hook.PullRequest) +} + +func prHandle(c context.Context, hook *PRHook, params HookParams) (*ApprovalOutput, *notifier.MessageWrapper, error) { + var approvalOutput *ApprovalOutput + approvalInfo, err := approvePullRequest(c, params, hook.Issue.Number, hook.PullRequest, true) + + if err == nil { + approvalOutput = &ApprovalOutput{ + Policy: approvalInfo.Policy, + Settings: params.Config, + Approved: approvalInfo.Approved, + Approvers: approvalInfo.Approvers, + Disapprovers: approvalInfo.Disapprovers, + } + } + + mw := handleNotification(c, hook, params, approvalInfo) + + if err == nil { + mw.Merge(*handleApprovalNotification(&hook.ApprovalHook, &approvalInfo.CurCommentInfo)) + } + return approvalOutput, mw, err +} + +func prClosed(c context.Context, hook *PRHook, params HookParams) (*notifier.MessageWrapper, error) { + user := params.User + repo := params.Repo + config := params.Config + + pr, err := remote.GetPullRequest(c, user, repo, hook.Issue.Number) + if err != nil { + return nil, err + } + + if requireAudit(config, &pr) { + audit, err := testAudit(c, user, repo, &pr) + if err != nil { + return nil, err + } + if audit { + err = applyAudit(c, user, repo, &pr) + if err != nil { + return nil, err + } + } + } + mw := handleNotification(c, hook, params, nil) + return mw, nil +} + +func policyDescription(ai *ApprovalInfo) string { + if ai == nil { + return "" + } + if len(ai.Policy.Name) > 0 { + return ai.Policy.Name + } + if ai.Policy.Position > 0 { + return fmt.Sprintf("# %d", ai.Policy.Position) + } + return "" +} + +func handleNotification(c context.Context, prHook *PRHook, params HookParams, ai *ApprovalInfo) *notifier.MessageWrapper { + mw := ¬ifier.MessageWrapper{ + MessageHeader: notifier.MessageHeader{ + PrName: prHook.Issue.Title, + PrNumber: prHook.Issue.Number, + Slug: prHook.Repo.Slug, + }, + } + if ai == nil { + return mw + } + var mi notifier.MessageInfo + switch prHook.Action { + case "opened": + desc := policyDescription(ai) + mi.Message = "opened" + if len(desc) > 0 { + mi.Message += fmt.Sprintf(" Applying approval policy %s", desc) + } + mi.Type = model.CommentOpen + case "closed": + if prHook.PullRequest.Branch.Merged { + mi.Message = "merged" + mi.Type = model.CommentAccept + } else { + mi.Message = "closed without being merged" + mi.Type = model.CommentClose + } + case "reopened": + mi.Message = "reopened" + mi.Type = model.CommentOpen + case "synchronize": + if params.Config.Commit.Range == model.Head { + if params.Config.Commit.IgnoreUIMerge { + merge, err := remote.IsHeadUIMerge(c, params.User, prHook.Repo, prHook.Issue.Number) + if err != nil { + log.Warnf("Unable to test HEAD of pull request %s/%s/%d", + prHook.Repo.Owner, prHook.Repo.Name, prHook.Issue.Number) + } else if merge { + mi.Message = "merged through the user interface. Merge commit ignored." + mi.Type = model.CommentPushIgnore + } else { + mi.Message = "updated. No comments before this one will count for approval." + mi.Type = model.CommentReset + } + } else { + mi.Message = "updated. No comments before this one will count for approval." + mi.Type = model.CommentReset + } + } + } + if mi.Message != "" { + mw.Messages = append(mw.Messages, mi) + } + return mw +} + +func createMessage(hook *PRHook, mw *notifier.MessageWrapper, err error) *notifier.MessageWrapper { + if mw == nil && err == nil { + return nil + } + if err == nil { + return mw + } + mw2 := notifier.BuildErrorMessage(hook.Issue.Title, hook.Issue.Number, hook.Repo.Slug, err.Error()) + if mw == nil { + return &mw2 + } + mw.Merge(mw2) + return mw +} diff --git a/web/remote.go b/web/remote.go new file mode 100644 index 0000000..0880d65 --- /dev/null +++ b/web/remote.go @@ -0,0 +1,175 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package web + +import ( + "context" + "fmt" + "sort" + "strings" + + "github.com/capitalone/checks-out/exterror" + "github.com/capitalone/checks-out/model" + "github.com/capitalone/checks-out/remote" +) + +func getPullRequestFiles(c context.Context, user *model.User, repo *model.Repo, num int) ([]model.CommitFile, error) { + files, err := remote.GetPullRequestFiles(c, user, repo, num) + if err != nil { + msg := fmt.Sprintf("Error retrieving files for %s pr %d", repo.Slug, num) + err = exterror.Append(err, msg) + return nil, err + } + return files, nil +} + +func getFeedback(c context.Context, + user *model.User, + request *model.ApprovalRequest, + policy *model.ApprovalPolicy, + crange model.CommitRange, + noUIMerge bool) ([]model.Feedback, error) { + repo := request.Repository + pr := request.PullRequest + var feedback []model.Feedback + var err error + fbConfig := request.Config.GetFeedbackConfig(policy) + switch crange { + case model.All: + feedback, err = getAllFeedback(c, user, repo, pr.Number, fbConfig.Types) + case model.Head: + feedback, err = getFeedbackSinceHead(c, user, repo, pr.Number, noUIMerge, fbConfig.Types) + default: + feedback, err = nil, fmt.Errorf("Unknown commit range '%s' in configuration", + crange.String()) + } + if err != nil { + return nil, err + } + return feedback, nil +} + +func hasFeedbackType(types []model.FeedbackType, target model.FeedbackType) bool { + for _, t := range types { + if t == target { + return true + } + } + return false +} + +func filterFeedback(feedback []model.Feedback) []model.Feedback { + filter := feedback[:0] + for _, msg := range feedback { + if !strings.HasPrefix(msg.GetBody(), model.CommentPrefix) { + filter = append(filter, msg) + } + } + return filter +} + +func sortFeedback(feedback []model.Feedback) { + sort.SliceStable(feedback, func(i, j int) bool { + return feedback[i].GetSubmittedAt().Before(feedback[j].GetSubmittedAt()) + }) +} + +func getAllFeedback(c context.Context, user *model.User, repo *model.Repo, num int, types []model.FeedbackType) ([]model.Feedback, error) { + var feedback []model.Feedback + var comments []*model.Comment + var reviews []*model.Review + var err error + if hasFeedbackType(types, model.CommentType) { + comments, err = remote.GetAllComments(c, user, repo, num) + if err != nil { + msg := fmt.Sprintf("Error retrieving comments for %s pr %d", repo.Slug, num) + err = exterror.Append(err, msg) + return nil, err + } + } + if hasFeedbackType(types, model.ReviewType) { + reviews, err = remote.GetAllReviews(c, user, repo, num) + if err != nil { + msg := fmt.Sprintf("Error retrieving reviews for %s pr %d", repo.Slug, num) + err = exterror.Append(err, msg) + return nil, err + } + } + for _, c := range comments { + feedback = append(feedback, c) + } + for _, r := range reviews { + feedback = append(feedback, r) + } + feedback = filterFeedback(feedback) + sortFeedback(feedback) + return feedback, nil +} + +func getFeedbackSinceHead(c context.Context, user *model.User, repo *model.Repo, num int, noUIMerge bool, types []model.FeedbackType) ([]model.Feedback, error) { + var feedback []model.Feedback + var comments []*model.Comment + var reviews []*model.Review + var err error + if hasFeedbackType(types, model.CommentType) { + comments, err = remote.GetCommentsSinceHead(c, user, repo, num, noUIMerge) + if err != nil { + msg := fmt.Sprintf("Error retrieving comments for %s pr %d", repo.Slug, num) + err = exterror.Append(err, msg) + return nil, err + } + } + if hasFeedbackType(types, model.ReviewType) { + reviews, err = remote.GetReviewsSinceHead(c, user, repo, num, noUIMerge) + if err != nil { + msg := fmt.Sprintf("Error retrieving reviews for %s pr %d", repo.Slug, num) + err = exterror.Append(err, msg) + return nil, err + } + } + for _, c := range comments { + feedback = append(feedback, c) + } + for _, r := range reviews { + feedback = append(feedback, r) + } + feedback = filterFeedback(feedback) + sortFeedback(feedback) + return feedback, nil +} + +func sendErrorStatusPR(c context.Context, hook *ApprovalHook, pr *model.PullRequest, e error) error { + repo, user, _, err := GetRepoAndUser(c, hook.Repo.Slug) + if err != nil { + return err + } + return remote.SetStatus(c, user, repo, pr.Branch.CompareSHA, model.ServiceName, "error", e.Error()) +} + +func sendErrorStatus(c context.Context, hook *ApprovalHook, e error) error { + repo, user, _, err := GetRepoAndUser(c, hook.Repo.Slug) + if err != nil { + return err + } + pr, err := remote.GetPullRequest(c, user, repo, hook.Issue.Number) + if err != nil { + return err + } + return remote.SetStatus(c, user, repo, pr.Branch.CompareSHA, model.ServiceName, "error", e.Error()) +} diff --git a/web/repo_hook.go b/web/repo_hook.go new file mode 100644 index 0000000..3b769a7 --- /dev/null +++ b/web/repo_hook.go @@ -0,0 +1,67 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package web + +import ( + "context" + + "github.com/capitalone/checks-out/api" + "github.com/capitalone/checks-out/store" +) + +func (hook *RepoHook) Process(c context.Context) (interface{}, error) { + return doRepoHook(c, hook) +} + +type RepoOutput struct { + Action string `json:"action"` + Owner string `json:"owner"` + Name string `json:"name"` +} + +func doRepoHook(c context.Context, hook *RepoHook) (*RepoOutput, error) { + + orgDb, err := store.GetOrgName(c, hook.Owner) + if err != nil { + return nil, err + } + user, err := store.GetUser(c, orgDb.UserID) + if err != nil { + return nil, err + } + + repoOutput := &RepoOutput{ + Action: hook.Action, + Owner: hook.Owner, + Name: hook.Name, + } + + switch hook.Action { + case "created": + api.TurnOnRepoQuiet(c, user, hook.Owner, hook.Name, hook.BaseURL) + case "deleted": + repo, err := store.GetRepoOwnerName(c, hook.Owner, hook.Name) + if err != nil { + api.TurnOffRepo(c, user, repo, hook.Owner, hook.Name, hook.BaseURL) + } + default: + repoOutput = nil + } + return repoOutput, nil +} diff --git a/web/review_hook.go b/web/review_hook.go new file mode 100644 index 0000000..7852a44 --- /dev/null +++ b/web/review_hook.go @@ -0,0 +1,34 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package web + +import ( + "context" + + multierror "github.com/mspiegel/go-multierror" +) + +func (hook *ReviewHook) Process(c context.Context) (interface{}, error) { + approvalOutput, e1 := doApprovalHook(c, &hook.ApprovalHook) + if e1 != nil { + e2 := sendErrorStatus(c, &hook.ApprovalHook, e1) + e1 = multierror.Append(e1, e2) + } + return approvalOutput, e1 +} diff --git a/web/static/files/angular-toggle-switch.min.js b/web/static/files/angular-toggle-switch.min.js new file mode 100644 index 0000000..6cdfd9c --- /dev/null +++ b/web/static/files/angular-toggle-switch.min.js @@ -0,0 +1 @@ +!function(){var module=angular.module("toggle-switch",["ng"]);module.provider("toggleSwitchConfig",[function(){this.onLabel="On",this.offLabel="Off",this.knobLabel=" ";var self=this;this.$get=function(){return{onLabel:self.onLabel,offLabel:self.offLabel,knobLabel:self.knobLabel}}}]),module.directive("toggleSwitch",["toggleSwitchConfig",function(toggleSwitchConfig){return{restrict:"EA",replace:!0,require:"ngModel",scope:{disabled:"@",onLabel:"@",offLabel:"@",knobLabel:"@"},template:'',compile:function(element,attrs){return attrs.onLabel||(attrs.onLabel=toggleSwitchConfig.onLabel),attrs.offLabel||(attrs.offLabel=toggleSwitchConfig.offLabel),attrs.knobLabel||(attrs.knobLabel=toggleSwitchConfig.knobLabel),this.link},link:function(scope,element,attrs,ngModelCtrl){var KEY_SPACE=32;element.on("click",function(){scope.$apply(scope.toggle)}),element.on("keydown",function(e){var key=e.which?e.which:e.keyCode;key===KEY_SPACE&&scope.$apply(scope.toggle)}),ngModelCtrl.$formatters.push(function(modelValue){return modelValue}),ngModelCtrl.$parsers.push(function(viewValue){return viewValue}),ngModelCtrl.$viewChangeListeners.push(function(){scope.$eval(attrs.ngChange)}),ngModelCtrl.$render=function(){scope.model=ngModelCtrl.$viewValue},scope.toggle=function(){scope.disabled||(scope.model=!scope.model,ngModelCtrl.$setViewValue(scope.model))}}}}])}(); \ No newline at end of file diff --git a/web/static/files/checksout.html b/web/static/files/checksout.html new file mode 100644 index 0000000..ae58f7b --- /dev/null +++ b/web/static/files/checksout.html @@ -0,0 +1,65 @@ + + + + +
    +
    +
    +
    + +
    +
    +
    There are no repositories to manage.
    +
    {{error.data}}
    +
      +
    • +

      {{ repo.slug }} link

      + + +
    • +
    +
    +
    +
    +
    + +
    + + +
    + diff --git a/web/static/files/checksout.js b/web/static/files/checksout.js new file mode 100644 index 0000000..8b27485 --- /dev/null +++ b/web/static/files/checksout.js @@ -0,0 +1,320 @@ +(function () { + + /** + * Creates the angular application. + */ + angular.module('app', [ + 'ngRoute', + 'toggle-switch' + ]); + + /** + * Defines the route configuration for the + * main application. + */ + function Config ($routeProvider, $httpProvider, $locationProvider) { + $routeProvider + .when('/', { + templateUrl: '/static/checksout.html', + controller: 'RepoCtrl' + }) + .when('/:org', { + templateUrl: '/static/checksout.html', + controller: 'RepoCtrl' + }) + ; + + // Enables html5 mode + $locationProvider.html5Mode(true); + + // Enables XSRF protection + $httpProvider.defaults.headers.common['X-CSRF-TOKEN'] = window.STATE_FROM_SERVER.csrf; + } + + function Noop($rootScope) {} + + angular + .module('app') + .config(Config) + .run(Noop); +})(); + +(function () { + + function parseRepo() { + return function(conf_url) { + var parts = conf_url.split("/"); + return parts[3]+"/"+parts[4]; + } + } + + angular + .module('app') + .filter('parseRepo', parseRepo); + +})(); + +(function () { + function PropService($http) { + var docsUrl_ = window.STATE_FROM_SERVER.docsUrl; + + this.docsUrl = function() { + return docsUrl_; + } + } + + angular + .module('app') + .service('prop', PropService); +})(); + +(function () { + function UserService($http) { + var user_ = window.STATE_FROM_SERVER.user; + var deleted_ = false; + + this.current = function() { + return user_; + }; + + this.deleted = function() { + return deleted_; + }; + + this.delete = function() { + _deleted = true; + return $http.delete('/api/user'); + }; + } + + angular + .module('app') + .service('user', UserService); +})(); + +(function () { + function TeamService($http) { + var teams_ = window.STATE_FROM_SERVER.teams || []; + teams_.unshift(window.STATE_FROM_SERVER.user); + + this.list = function() { + return teams_; + }; + + this.get = function(name) { + for (var i=0; i 0) { + $scope.validInfo.fileContent = d.file; + } + }).catch(function(err) { + err.slug = repo.slug; + $scope.validInfo = { + slug: repo.slug, + message: err.data + }; + }); + }; + + $scope.saving = false; + } + + angular + .module('app') + .controller('RepoCtrl', RepoCtrl); +})(); diff --git a/web/static/files/images/meowser.png b/web/static/files/images/meowser.png new file mode 100644 index 0000000..90b57b4 Binary files /dev/null and b/web/static/files/images/meowser.png differ diff --git a/web/static/files/lgtm.html b/web/static/files/lgtm.html deleted file mode 100644 index 875d7a5..0000000 --- a/web/static/files/lgtm.html +++ /dev/null @@ -1,193 +0,0 @@ - - - - -
    -
    -
    -
    - -
    -
    -
    There are no repositories to manage.
    -
    {{error.data}}
    -
      -
    • -

      {{ repo.slug }} link

      - - -
    • -
    -
    -
    -
    -
    - -
    - diff --git a/web/static/files/lgtm.js b/web/static/files/lgtm.js deleted file mode 100644 index 11ed8a6..0000000 --- a/web/static/files/lgtm.js +++ /dev/null @@ -1,165 +0,0 @@ -(function () { - - /** - * Creates the angular application. - */ - angular.module('app', [ - 'ngRoute' - ]); - - /** - * Defines the route configuration for the - * main application. - */ - function Config ($routeProvider, $httpProvider, $locationProvider) { - $routeProvider - .when('/', { - templateUrl: '/static/lgtm.html', - controller: 'RepoCtrl' - }) - .when('/:org', { - templateUrl: '/static/lgtm.html', - controller: 'RepoCtrl' - }) - ; - - // Enables html5 mode - $locationProvider.html5Mode(true); - - // Enables XSRF protection - $httpProvider.defaults.headers.common['X-CSRF-TOKEN'] = window.STATE_FROM_SERVER.csrf; - } - - function Noop($rootScope) {} - - angular - .module('app') - .config(Config) - .run(Noop); -})(); - -(function () { - - function parseRepo() { - return function(conf_url) { - var parts = conf_url.split("/") - return parts[3]+"/"+parts[4]; - } - } - - angular - .module('app') - .filter('parseRepo', parseRepo); - -})(); - -(function () { - function UserService($http) { - var user_ = window.STATE_FROM_SERVER.user; - - this.current = function() { - return user_; - }; - } - - angular - .module('app') - .service('user', UserService); -})(); - -(function () { - function TeamService($http) { - var teams_ = window.STATE_FROM_SERVER.teams || []; - teams_.unshift(window.STATE_FROM_SERVER.user); - - this.list = function() { - return teams_; - }; - - this.get = function(name) { - for (var i=0; iul,ol li>ul,ul li>ol,ol li>ol{margin-top:30px}ul li ul li,ol li ul li{list-style:circle}ul li li ul li,ol li li ul li{list-style:square}ul li{list-style:disc}.list-unstyled{padding-left:0}.list-unstyled li{list-style:none}.list-inline li{display:inline-block}dl dt{font-weight:500;color:#000}dl dd{margin:0}.container{margin:0 auto}.grid-flex-container{padding:0;list-style:none;display:-webkit-flex;display:-ms-flexbox;display:flex;-ms-flex-flow:row wrap;flex-flow:row wrap;-webkit-flex-flow:row wrap}[class^="grid-flex-cell"]{margin-bottom:30px}@media screen and (max-width:600px){.container{width:90%}[class^="grid-flex-cell"]{-webkit-flex:0 0 100%;-ms-flex:0 0 100%;flex:0 0 100%}}@media screen and (min-width:601px){[class^="grid-flex-cell"]{-webkit-flex:1;-ms-flex:1;flex:1;padding:15px 0}[class^="grid-flex-cell"]:not(:first-child){margin-left:30px}.grid-flex-cell-1of2,.grid-flex-cell-1of3,.grid-flex-cell-1of4{-webkit-flex:none;-ms-flex:none;flex:none}.grid-flex-cell-1of2{width:50%}.grid-flex-cell-1of3{width:34%}.grid-flex-cell-1of4{width:25%}}@media screen and (min-width:481px) and (max-width:900px){.container{width:85%}}@media screen and (min-width:901px){.container{width:70%}}select,.form-input,.form-checkbox,.form-radio{margin-bottom:15px}legend,label{font-weight:800}legend{padding:0 15px;font-size:22px;color:#000}fieldset{border:1px solid #e6eaed;border-radius:3px;padding:30px;margin:0 0 30px 0}label{display:block;margin-bottom:5px;font-size:12px;cursor:pointer}select,.form-input,.form-radio,.form-checkbox{outline:0}.form-inline{overflow:hidden}.form-input{display:block;padding:10px;width:100%;border-radius:3px;border:1px solid #e6eaed;-webkit-appearance:none}.form-input:focus{transition:all .2s ease-in;border:1px solid #74ddff;background-color:#f8fdff}.form-input:disabled{background-color:#fafbfb;cursor:not-allowed}.form-input:required:focus{border:1px solid #fec57b;background-color:#fff9f2}.form-input.input-invalid{border:1px solid #ffc7d8;color:#ff749d}.form-input.input-invalid:focus{border:1px solid #e80044;background-color:#fff4f7}.form-input.input-valid{border:1px solid #c6ebd3;color:#8ed6a8}.form-input.input-valid:focus{border:1px solid #43bb6e;background-color:#f4fbf6}textarea{resize:none;min-height:90px}input[type=file],label.radio,label.checkbox,select{cursor:pointer}input[type=file]{padding:10px;background-color:#f8f9fa;font-size:12px;color:#b7c0c3}select{border:1px solid #e6eaed;height:40px;padding:10px;width:100%;background-color:#fff}select:focus{border:1px solid #74ddff}select:focus:-moz-focusring{color:transparent;text-shadow:0 0 0 #000}.input-radio,.input-checkbox{margin:5px 0;font-size:16px;font-weight:normal}.input-radio.disabled,.input-checkbox.disabled{color:#b7c0c3;cursor:not-allowed}.input-radio input[type=checkbox],.input-checkbox input[type=checkbox],.input-radio input[type=radio],.input-checkbox input[type=radio]{display:inline-block;margin-right:10px}@media screen and (min-width:768px){.form-inline.form-no-labels .button{margin-top:0}.form-inline .form-element{float:left;width:50%}.form-inline .button{margin-top:22.5px}.form-inline .form-element:not(:first-of-type){padding-left:5px}.form-inline .form-input,.form-inline .input-checkbox,.form-inline .input-radio,.form-inline select{margin:0}}.button{display:inline-block;border:none;border-radius:3px;padding-left:15px;padding-right:15px;text-align:center;cursor:pointer;background-color:#00b0e9;transition-delay:0;transition-duration:.3s;transition-timing-function:ease-in;transition-property:background,border-color;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;appearance:none}.button:not([class*='button-outlined']){padding-top:10px;padding-bottom:10px}.button:hover{background-color:#009ed2}.button:disabled,.button .button-disabled{background-color:#74ddff;cursor:not-allowed}.button:focus{outline:none}.button,.button:hover{color:#fff}.button-small{padding:3px 5px;font-size:12px}.button-large{padding:15px 30px;font-size:22px}.button-unstyled,.button-unstyled:hover{background-color:transparent}.button-unstyled{padding:0;color:#00b0e9}.button-unstyled:hover{color:#008dba}.button-neutral{background-color:#e6eaed}.button-neutral:hover{background-color:#b0bcc6}.button-neutral:disabled,.button-neutral .button-disabled{background-color:#e6eaed;color:#b7c0c3}.button-neutral:disabled:hover,.button-neutral .button-disabled:hover{color:#b7c0c3}.button-neutral,.button-neutral:hover{color:#272b2d}.button-approve{background-color:#43bb6e}.button-approve:hover{background-color:#369658}.button-approve:disabled,.button-approve .button-disabled{background-color:#c6ebd3}.button-warn{background-color:#e80044}.button-warn:hover{background-color:#ba0036}.button-warn:disabled,.button-warn.button-disabled{background-color:#ffc7d8}.button-caution{background-color:transparent;color:#e80044;text-decoration:underline}.button-caution:hover{background-color:transparent;color:#ba0036}.button-caution:disabled,.button-caution .button-disabled{color:#b7c0c3}*[class*='button-outlined']{border-width:2px;border-style:solid;padding-top:8px;padding-bottom:8px}*[class*='button-outlined'],*[class*='button-outlined']:hover,*[class*='button-outlined']:disabled{background-color:transparent}.button-outlined{border-color:#00b0e9;color:#00b0e9}.button-outlined:hover{border-color:#008dba;color:#008dba}.button-outlined:disabled{border-color:#74ddff;color:#74ddff}.button-outlined-neutral{border-color:#b7c0c3;color:#828c8f}.button-outlined-neutral:hover{border-color:#828c8f;color:#4d5659}.button-outlined-neutral:disabled,.button-outlined-neutral.button-disabled{color:#c5cdcf;border-color:#e6eaed}.button-outlined-neutral:disabled:hover,.button-outlined-neutral.button-disabled:hover{color:#c5cdcf}.button-outlined-approve{border-color:#43bb6e;color:#43bb6e}.button-outlined-approve:hover{border-color:#369658;color:#369658}.button-outlined-approve:disabled{border-color:#c6ebd3;color:#c6ebd3}.button-outlined-warn{border-color:#e80044;color:#e80044}.button-outlined-warn:hover{border-color:#ba0036;color:#ba0036}.button-outlined-warn:disabled{border-color:#ffc7d8;color:#ffc7d8}.button-group .button{border-radius:0;margin-left:-1px;border:1px solid #008dba}.button-group .button:first-child,.button-group .button:first-child:only-child{margin:0}.button-group .button:first-child:only-child{border-radius:3px}.button-group .button:first-child{border-radius:3px 0 0 3px}.button-group .button:last-child{border-radius:0 3px 3px 0}.button-group .button-neutral,.button-group .button-outlined{border:1px solid #b7c0c3}@media screen and (max-width:480px){.button-group .button{border-radius:none;margin:0;width:100%}.button-group .button:first-of-type{border-radius:3px 3px 0 0}.button-group .button:last-of-type{border-radius:0 0 3px 3px}.button-group .button:not(:last-of-type){border-bottom:none}}code{font-family:Menlo,Monaco,Consolas,monospace;font-size:12px;padding:3px 5px}pre{display:block;padding:15px;margin-bottom:30px;max-width:100%;line-height:1.5;word-break:break-all;word-wrap:break-word}pre code{padding:0;white-space:pre-wrap}code,pre{border-radius:3px}.code-light{color:#000;background:#f5f7f8}.code-dark{color:#5ca1b5;background:#003546}.dropdown-standalone{position:relative;display:inline-block;cursor:pointer;padding:10px;border-radius:3px}.dropdown-standalone:hover{border-radius:3px 3px 0 0}.dropdown-standalone:hover .dropdown{display:block;position:absolute;top:100%;left:0;width:100%;border-radius:0 0 3px 3px}.dropdown-standalone:hover .dropdown a{padding:10px;display:block}.dropdown-standalone .dropdown{display:none}.dropdown-standalone .icon-arrow-down{position:relative;top:-2px;margin-left:5px}.dropdown-outlined,.dropdown-outlined .dropdown{border:1px solid #e6eaed}.dropdown-standalone.dropdown-outlined .dropdown{width:101%;left:-1px}.dropdown-outlined .dropdown{background-color:#fff}.dropdown-outlined .dropdown li:not(:last-child){border-bottom:1px solid #e6eaed}.dropdown-coloured,.dropdown-coloured .dropdown{background-color:#00b0e9}.dropdown-coloured .button-unstyled,.dropdown-coloured .dropdown a{color:#fff}.dropdown-coloured .icon-arrow-down{border-top-color:#fff}.dropdown-coloured .dropdown li:not(:last-child){border-bottom:1px solid #009ed2}.dropdown-coloured .dropdown a:hover{color:#e3f8ff}.dropdown-neutral,.dropdown-neutral .dropdown{background-color:#e6eaed}.dropdown-neutral a{color:#4d5659}.dropdown-neutral a:hover,.dropdown-neutral .button-unstyled:hover{color:#272b2d}.dropdown-neutral .icon-arrow-down{border-top-color:#4d5659}.dropdown-neutral .dropdown li:not(:last-child){border-bottom:1px solid #b7c0c3}.icon-arrow-left,.icon-arrow-right,.icon-arrow-up,.icon-arrow-down{display:inline-block;width:0;height:0;line-height:0;font-size:0}.icon-arrow-up,.icon-arrow-down{border-left:5px solid transparent;border-right:5px solid transparent}.icon-arrow-left,.icon-arrow-right{border-top:5px solid transparent;border-bottom:5px solid transparent}.icon-arrow-left{border-right:5px solid #00b0e9}.icon-arrow-right{border-left:5px solid #00b0e9}.icon-arrow-down{border-top:5px solid #00b0e9}.icon-arrow-up{border-bottom:5px solid #00b0e9}.media-outlined,img{border-radius:3px}figure{width:auto;margin:0}figure img{vertical-align:middle}figure figcaption{color:#828c8f;font-size:12px}.media-outlined{display:inline-block;padding:5px;border:1px solid #e6eaed}@media screen and (max-width:480px){figure{text-align:center}figure img+img{margin:5px auto 0}}@media screen and (min-width:481px){figure img:not(:last-child):not(:only-child){margin-right:5px}figure img:not(:only-child){margin-top:5px}figure img+figcaption{margin-bottom:5px}figure img+figcaption:not(:only-child){margin-right:5px}}.message{padding:15px;position:relative;background-color:#e3f8ff;color:#00b0e9}.message a{font-weight:bold}.message p:last-of-type{margin:0}.message:after{border:solid transparent;content:'';height:0;width:0;position:absolute;pointer-events:none;border-color:rgba(255,255,255,0);border-width:5px}.message-dismissable{padding-right:30px}.message-full-width{text-align:center}.message-above:after,.message-below:after{left:50%;margin-left:-6px}.message-below:after{border-bottom-color:#e3f8ff;bottom:100%}.message-below.message-error:after{border-bottom-color:#ffe3eb}.message-below.message-success:after{border-bottom-color:#e8f7ef}.message-below.message-alert:after{border-bottom-color:#ffeed7}.message-above:after{border-top-color:#e3f8ff;top:100%}.message-above.message-error:after{border-top-color:#ffe3eb}.message-above.message-success:after{border-top-color:#e8f7ef}.message-above.message-alert:after{border-top-color:#ffeed7}.message-error{background-color:#ffe3eb;color:#e80044}.message-error a{color:#ba0036}.message-error a:hover{color:#8b0029}.message-alert{background-color:#ffeed7;color:#f18902}.message-alert a{color:#c16e02}.message-alert a:hover{color:#915201}.message-success{background-color:#e8f7ef;color:#43bb6e}.message-success a{color:#369658}.message-success a:hover{color:#287042}.close{display:block;position:absolute;top:50%;right:15px;margin-top:-15px;cursor:pointer;font-size:large;font-weight:bold}form .message{padding:10px}.modal{position:fixed;top:50%;left:50%;border-radius:3px;z-index:20;-webkit-transform:translate(-50%, -50%);-ms-transform:translate(-50%, -50%);transform:translate(-50%, -50%);background-color:#fff;-webkit-overflow-scrolling:touch}.modal.modal-no-sections{padding:30px;text-align:center}.modal-title{margin:0;float:left}.modal-head,.modal-body{padding:15px}.modal-head{border-bottom:1px solid #e6eaed}.modal-footer{padding:15px 0;text-align:center;border-top:1px solid #e6eaed}.modal-footer button,.modal-footer .button{min-width:120px}.modal-footer button+button,.modal-footer .button+.button{margin-left:5px}.modal-close{float:right;color:#b7c0c3;line-height:1.5;font-weight:bold;font-size:22px}.modal-close:hover{color:#828c8f}.has-modal:before{content:''}.overlay,.has-modal:before{position:fixed;top:0;left:0;width:100%;height:100%;z-index:10;box-shadow:inset 0 0 100px rgba(0,0,0,0.7);background-color:rgba(0,0,0,0.4)}@media screen and (max-width:480px){.modal{height:100%;width:100%;top:0;left:0;border-radius:0;-webkit-transform:none;-ms-transform:none;transform:none}}@media screen and (min-width:481px) and (max-width:768px){.modal{width:80%}}@media screen and (min-width:769px){.modal{width:50%}}.top-nav{margin-bottom:30px}.top-nav ul{margin:0}.top-nav li{padding:15px}.top-nav a{display:inline-block}.top-nav label{font-weight:normal;font-size:16px;line-height:1}.top-nav .menu-toggle{visibility:hidden;position:absolute;left:-100%}.top-nav .icon-arrow-down{position:relative;top:-2px;margin-left:5px}.top-nav-light{border-bottom:1px solid #e6eaed}.top-nav-light label{color:#00b0e9}.top-nav-light,.top-nav-light li{background-color:#fff}.top-nav-dark label{color:#438193}.top-nav-dark a{color:#438193}.top-nav-dark a:hover,.top-nav-dark a.active,.top-nav-dark label{color:#85b8c7}.top-nav-dark,.top-nav-dark li{background-color:#003546}.top-nav-dark .icon-arrow-down{border-top-color:#438193}.breadcrumbs{padding:10px 0}.breadcrumbs li{color:#b7c0c3}.breadcrumbs a{padding:0 10px}.pagination li:not(:last-child){border-right:1px solid #e6eaed}.pagination em[class^='icon']{top:1px;position:relative}.pagination .icon-arrow-right{border-left:5px solid #00b0e9}.pagination .icon-arrow-left{border-right:5px solid #00b0e9}.tabs{border-bottom:1px solid #e6eaed}.tabs a{display:block}.side-nav li:not(:last-child) a{border-bottom:1px solid #e6eaed}.unavailable-item{color:#b7c0c3}.unavailable-item:not([href]),.unavailable-item:not([href]):hover{color:#b7c0c3;cursor:default;pointer-events:none}.current-item{color:#4d5659;font-weight:800}.side-nav a,.pagination a,.side-nav li,.pagination li{display:block}.side-nav a:hover:not(.unavailable-item),.pagination a:hover:not(.unavailable-item),.side-nav .current-item,.pagination .current-item{background-color:#fafbfb}.pagination,.breadcrumbs{display:inline-block}.pagination li,.breadcrumbs li{float:left}.breadcrumbs,.side-nav,.pagination{border-radius:3px;border:1px solid #e6eaed}.side-nav a,.pagination a{padding:10px 15px}.breadcrumbs a,.pagination a{font-size:12px}@media screen and (max-width:480px){.tabs a{position:relative;padding:10px}}@media screen and (min-width:481px){.tabs a{padding:10px 15px}.tabs .current-item{position:relative;top:1px;border:1px solid #e6eaed;border-bottom-color:transparent;border-radius:3px 3px 0 0;background-color:#fff}}@media screen and (max-width:768px){.top-nav ul{display:none}.top-nav ul>li{padding:10px}.top-nav .icon-arrow-down{display:none}.top-nav label,.top-nav .menu-toggle:checked~ul,.top-nav .menu-toggle:checked~ul>li{display:block}.top-nav label{padding:15px 15px 15px 0;margin-bottom:0;text-align:right;cursor:pointer}.top-nav .menu-toggle:checked~ul .dropdown{display:block;z-index:1000}.top-nav .menu-toggle:checked~ul .dropdown li{padding:5px 15px;float:left;border:0}.top-nav .menu-toggle:checked~ul .dropdown a{font-size:12px;font-weight:normal}.top-nav-light .menu-toggle:checked~ul>li:not(:last-child){border-bottom:1px solid #e6eaed}.top-nav-dark .menu-toggle:checked~ul>li:not(:last-child){border-bottom:1px solid #1f4753}}@media screen and (min-width:769px){.top-nav label{display:none}.top-nav .has-dropdown{position:relative}.top-nav .has-dropdown:hover .dropdown{display:block}.top-nav .dropdown{display:none;position:absolute;top:100%;left:0}.top-nav .dropdown li{display:block}.top-nav-light .dropdown{border-left:1px solid #e6eaed;border-right:1px solid #e6eaed}.top-nav-light .dropdown li{border-bottom:1px solid #e6eaed}.top-nav-dark .dropdown li{border-bottom:1px solid #1f4753}}.progress,.meter{border-radius:3px;line-height:0}.progress.progress-rounded,.progress-rounded .meter{border-radius:50px}.progress{border:1px solid #e6eaed;background-color:#fdfdfd}.progress .meter{display:inline-block;background-color:#00b0e9;height:100%}.progress-small{height:15px}.progress-large{height:25px}.progress-success .meter{background-color:#43bb6e}.progress-warning .meter{background-color:#f18902}.progress-error .meter{background-color:#e80044}.table-responsive{overflow-x:auto}table{width:100%;max-width:100%;margin-bottom:30px}td,th{text-align:left;padding:10px;border-bottom:1px solid #e6eaed}thead th{font-weight:800;color:#4d5659}tr.table-cell-error{background-color:#ffe3eb;color:#e80044}tr.table-cell-error a{color:#ba0036}tr.table-cell-error a:hover{color:#8b0029}tr.table-cell-success{background-color:#e8f7ef;color:#43bb6e}tr.table-cell-success a{color:#369658}tr.table-cell-success a:hover{color:#287042}tr.table-cell-alert{background-color:#ffeed7;color:#f18902}tr.table-cell-alert a{color:#c16e02}tr.table-cell-alert a:hover{color:#915201}.table-striped tr:nth-child(even){color:#828c8f}.table-outlined{border:1px solid #e6eaed;border-radius:3px}.table-outlined tr:last-of-type td{border:none}.table-with-hover tr:hover,.table-striped tr:nth-child(even){background-color:#fafbfb}@media screen and (max-width:767px){table td,table th{padding:5px 10px}}.avatar-radius{border-radius:3px}.avatar-rounded{border-radius:100%}.avatar-small{height:25px;width:25px}.avatar-medium{height:50px;width:50px}.avatar-large{height:100px;width:100px}@media print{*{background:transparent !important;color:#000 !important;box-shadow:none !important;text-shadow:none !important}a,a:visited{text-decoration:underline}a[href]:after{content:" (" attr(href) ")"}abbr[title]:after{content:" (" attr(title) ")"}a[href^="#"]:after,a[href^="javascript:"]:after{content:''}pre,blockquote{page-break-inside:avoid}thead{display:table-header-group}tr,img{page-break-inside:avoid}img{max-width:100% !important}p,h2,h3{orphans:3;widows:3}h2,h3{page-break-after:avoid}}body{border-style:solid;border-color:#f2f4f6}pre{color:#000;background:#f5f7f8}code{color:#000;background:#f5f7f8}header{padding-bottom:30px;border-bottom:1px solid #f2f4f6}header h1{margin-bottom:0}header p{font-size:22px}header .button{margin:30px 0}footer[role='contentinfo']{padding:30px 0;border-top:1px solid #f2f4f6;color:#828c8f;text-align:center}footer[role='contentinfo'] li:not(:last-of-type):after{content:'\2219';margin:0 5px 0 10px}footer[role='contentinfo'] p{margin-bottom:0}footer[role='contentinfo'] .logo{margin-top:30px;display:inline-block;width:17px;height:20px;background:url("data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0idXRmLTgiPz4NCjwhLS0gR2VuZXJhdG9yOiBBZG9iZSBJbGx1c3RyYXRvciAxNy4xLjAsIFNWRyBFeHBvcnQgUGx1Zy1JbiAuIFNWRyBWZXJzaW9uOiA2LjAwIEJ1aWxkIDApICAtLT4NCjwhRE9DVFlQRSBzdmcgUFVCTElDICItLy9XM0MvL0RURCBTVkcgMS4xLy9FTiIgImh0dHA6Ly93d3cudzMub3JnL0dyYXBoaWNzL1NWRy8xLjEvRFREL3N2ZzExLmR0ZCI+DQo8c3ZnIHZlcnNpb249IjEuMSIgaWQ9IkxheWVyXzEiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgeG1sbnM6eGxpbms9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkveGxpbmsiIHg9IjBweCIgeT0iMHB4Ig0KCSB2aWV3Qm94PSIxNjIgMjQ2LjEgMjg4IDMzNS42IiBlbmFibGUtYmFja2dyb3VuZD0ibmV3IDE2MiAyNDYuMSAyODggMzM1LjYiIHhtbDpzcGFjZT0icHJlc2VydmUiPg0KPGc+DQoJPGc+DQoJCTxwYXRoIGZpbGw9IiNCN0MwQzMiIGQ9Ik0zMDIuMSwzOTUuMmw1OC4xLDcwLjNsMi0yYzYuMy02LjcsMTIuMS0xMy4zLDE3LjctMjBjNS41LTYuNywyMC0yMiwyNS4yLTI5LjFMNDI4LDQ1Ng0KCQkJYy0zLjksNS41LTkuMSwxMi4xLTE1LjMsMTkuNmMtNi4zLDcuNS0xMy43LDE2LjEtMjIuOCwyNS42bDYwLjEsNzQuM2gtNjAuOWwtMzIuMy0zOS43Yy0yOS41LDMwLjctNjAuNSw0Ni05Mi4zLDQ2DQoJCQljLTI4LjcsMC01My4xLTkuMS03My4xLTI3LjVjLTE5LjYtMTguNC0yOS41LTQxLjItMjkuNS02OC40YzAtMzIuMywxNS4zLTU5LjMsNDYuOC04MC41bDIxLjItMTQuNWMwLjQsMCwwLjgtMC40LDEuNi0xLjINCgkJCWMwLjgtMC40LDEuNi0xLjIsMi44LTIuNGMtMjEuMi0yMi44LTMyLjMtNDUuMi0zMi4zLTY3LjJzNy4xLTM5LjcsMjEuMi01My41YzE0LjUtMTMuNywzMy4zLTIwLjQsNTYuNS0yMC40DQoJCQljMjIuNCwwLDQwLjUsNi43LDU1LjEsMjBjMTQuNSwxMy4zLDIyLDMwLjMsMjIsNTAuM2MwLDE0LjEtMy45LDI2LjctMTEuNywzOC4xQzMzNi43LDM2NS43LDMyMi41LDM3OS41LDMwMi4xLDM5NS4yeg0KCQkJIE0yNjMuMiw0MjIuN2wtMi44LDJjLTIwLDEzLjctMzMuNywyNS4yLTQwLjgsMzMuM2MtNy4xLDguMS0xMC43LDE3LjMtMTAuNywyNi43YzAsMTMuMyw1LjUsMjUuOSwxNi4xLDM2LjkNCgkJCWMxMS4xLDEwLjcsMjMuNiwxNi4xLDM2LjksMTYuMWMxOC44LDAsNDAuNS0xMi4xLDY1LjItMzYuOUwyNjMuMiw0MjIuN3ogTTI3My45LDM2MC45bDMuOS0zLjJjNi43LTUuMSwxMi41LTkuOSwxNi45LTEzLjMNCgkJCWM0LjMtMy45LDcuOS03LjEsOS45LTkuOWM0LjMtNS4xLDYuMy0xMS4zLDYuMy0xOS4yYzAtOC43LTIuOC0xNS4zLTguNy0yMC44cy0xMy4zLTcuOS0yMy4yLTcuOWMtOC43LDAtMTYuMSwyLjgtMjIuNCw4LjMNCgkJCWMtNS45LDUuMS05LjEsMTEuNy05LjEsMjBjMCw5LjUsMy45LDE4LjgsMTEuMywyOC4zbDEyLjEsMTQuNUMyNzEuNSwzNTguMywyNzIuMywzNTkuMywyNzMuOSwzNjAuOXoiLz4NCgk8L2c+DQo8L2c+DQo8L3N2Zz4NCg==") no-repeat 0 0}section{padding:30px 0}section:not(:last-child){border-bottom:1px solid #f2f4f6}.sample{border:1px solid #e6eaed;padding:30px;border-radius:3px}.sample *:last-child{margin-bottom:0}.sample h1,.sample h2,.sample h3,.sample h4,.sample h5,.sample h6{margin-bottom:10px}.avatar{margin:0 30px 30px 0}.buttons .button-group{margin-bottom:10px}.buttons .button-group .button{margin:0 0 0 -1px}.buttons .button{margin:5px}.top-nav ul{z-index:10}.progress{margin-bottom:10px}.progress .meter{min-width:50%}.modal-sample{padding:15px;margin-bottom:30px;box-shadow:inset 0 0 100px rgba(0,0,0,0.7);background-color:rgba(0,0,0,0.4)}.modal-sample .modal{position:relative;top:0;left:50%;margin-left:-25%;-webkit-transform:none;-ms-transform:none;transform:none}.message,.form-inline{margin-bottom:10px}.color{display:inline-block;padding:15px;color:#fff;text-align:center}.blue1{background-color:#001620}.blue2{background-color:#002531}.blue3{background-color:#003546}.blue4{background-color:#1f4753}.blue5{background-color:#438193}.blue6{background-color:#00b0e9}.blue7{background-color:#74ddff}.blue8{background-color:#e3f8ff;color:#00b0e9}.gray1{background-color:#272b2d}.gray2{background-color:#4d5659}.gray3{background-color:#828c8f}.gray4{background-color:#b7c0c3}.gray5{background-color:#e6eaed}.gray4,.gray5{color:#272b2d}.pink1{background-color:#ee0788}.pink2{background-color:#fec1d1}.pink3{background-color:#ffedf2}.pink2,.pink3{color:#ee0788}.red1{background-color:#e80044}.red2{background-color:#ffc7d8}.red3{background-color:#ffe3eb}.red2,.red3{color:#e80044}.green1{background-color:#43bb6e}.green2{background-color:#c6ebd3}.green3{background-color:#e8f7ef}.green2,.green3{color:#43bb6e}.orange1{background-color:#f18902}.orange2{background-color:#fec57b}.orange3{background-color:#ffeed7}.orange2,.orange3{color:#f18902}.icon-sample{margin:0 5px}[class^="grid-flex-cell"]{border:1px solid #e6eaed;border-radius:3px;text-align:center}[class^="grid-flex-cell"] p{margin:0}@media screen and (min-width:769px){body{border-width:10px}}@media screen and (max-width:768px){body{border-width:5px}.modal-sample .modal{width:auto;left:0;margin-left:0}}@media screen and (max-width:480px){.button{display:inline-block;width:auto}.logo{margin:30px auto;display:block;text-align:center}.color{width:100%}}@media screen and (min-width:481px){.logo{margin:20px;display:inline-block}.color{width:250px;margin-bottom:5px}}html,body,h1,h2,h3,p{font-family:"Roboto"}body{border:none}.hidden{display:none}.top-nav{margin-bottom:60px;border-bottom:1px solid #e6eaed;background-color:white}.top-nav ul{margin:0}.top-nav li{padding:15px}.top-nav a{display:inline-block}.top-nav label{font-weight:normal;font-size:16px;line-height:1}.navbar{background-color:#FFF;border-radius:0;z-index:2;margin-top:15px;padding-bottom:15px;margin-bottom:30px;border-bottom:1px solid #e6eaed;background-color:white}.navbar-brand{background-image:url("data:image/svg+xml,%3C%3Fxml%20version%3D%221.0%22%20encoding%3D%22utf-8%22%3F%3E%0D%0A%3C!--%20Generator%3A%20Adobe%20Illustrator%2019.1.0%2C%20SVG%20Export%20Plug-In%20.%20SVG%20Version%3A%206.00%20Build%200)%20%20--%3E%0D%0A%3Csvg%20version%3D%221.1%22%20id%3D%22Layer_1%22%20xmlns%3D%22http%3A%2F%2Fwww.w3.org%2F2000%2Fsvg%22%20xmlns%3Axlink%3D%22http%3A%2F%2Fwww.w3.org%2F1999%2Fxlink%22%20x%3D%220px%22%20y%3D%220px%22%0D%0A%09%20viewBox%3D%220%200%20133%2032%22%20style%3D%22enable-background%3Anew%200%200%20133%2032%3B%22%20xml%3Aspace%3D%22preserve%22%3E%0D%0A%3Cstyle%20type%3D%22text%2Fcss%22%3E%0D%0A%09.st0%7Bfill%3A%23E48125%3B%7D%0D%0A%09.st1%7Bfill%3A%23414042%3B%7D%0D%0A%3C%2Fstyle%3E%0D%0A%3Cpath%20class%3D%22st0%22%20d%3D%22M133%2C14.2c0-2.2-1.6-4.1-4-4.1l-8%2C1.4c0.7-0.7%2C3-3.7%2C3.6-4.3c1.6-1.7%2C1.7-4.3%2C0.1-5.8c-0.8-0.8-1.8-1.2-2.9-1.2%0D%0A%09c-1.1%2C0-2.2%2C0.4-3%2C1.3c0%2C0-2.8%2C2.7-5.4%2C5.2c-1.6%2C1.3-5.3%2C3.4-6.4%2C3.5h-2V28h1.9c0.8%2C0%2C2.2%2C0.8%2C3.5%2C1.5c2.2%2C1.2%2C4.5%2C2.5%2C7.2%2C2.5%0D%0A%09c0.3%2C0%2C6.9%2C0%2C6.9%2C0c2.1%2C0%2C4.3-1.3%2C4.3-3.3c0-0.3-0.2-0.9-0.3-1.2c1.2-0.8%2C2-2.1%2C2-3.7c0-0.4-0.1-0.9-0.2-1.3%0D%0A%09c1.3-0.8%2C2.1-2.1%2C2.1-3.7c0-0.7-0.2-1.3-0.5-1.9C132.9%2C16.2%2C133%2C15.3%2C133%2C14.2L133%2C14.2z%20M131%2C14.2c0%2C1.2-0.7%2C2.2-1.9%2C2.2h-0.8%0D%0A%09c1.4%2C0%2C2.2%2C1%2C2.2%2C2.4c0%2C1.4-0.8%2C2.3-2.2%2C2.3h-1.9c1.4%2C0%2C2.8%2C1%2C2.5%2C2.8c-0.2%2C1.4-0.8%2C2.2-2.2%2C2.2H125c1.1%2C0%2C2%2C1.3%2C2%2C2.3%0D%0A%09c0%2C1-0.9%2C1.7-2%2C1.7h-7c-4.1%2C0-8.2-3.9-10.9-3.9v-14c2.1%2C0%2C6.1-2.7%2C8.8-4.9l4.4-4.4c0.5-0.4%2C1.1-0.7%2C1.6-0.7s1.1%2C0.2%2C1.5%2C0.6%0D%0A%09c0.8%2C0.8%2C0.8%2C2.2-0.1%2C3.1c0%2C0-6.4%2C8.3-6.2%2C8.3l11.9-2C130.3%2C12.1%2C131%2C13%2C131%2C14.2L131%2C14.2z%22%2F%3E%0D%0A%3Cg%3E%0D%0A%09%3Cpath%20class%3D%22st1%22%20d%3D%22M5.5%2C23.9h8.8v4.4H0V4.6h5.5V23.9z%22%2F%3E%0D%0A%09%3Cpath%20class%3D%22st1%22%20d%3D%22M29.9%2C24.3c0.8%2C0%2C1.4-0.1%2C2-0.2c0.6-0.1%2C1.1-0.3%2C1.7-0.5V20h-2.3c-0.3%2C0-0.6-0.1-0.8-0.3%0D%0A%09%09c-0.2-0.2-0.3-0.4-0.3-0.7v-3h8.4v10c-0.6%2C0.4-1.2%2C0.8-1.9%2C1.1c-0.7%2C0.3-1.4%2C0.6-2.1%2C0.8c-0.7%2C0.2-1.5%2C0.4-2.4%2C0.5%0D%0A%09%09c-0.8%2C0.1-1.7%2C0.1-2.7%2C0.1c-1.7%2C0-3.3-0.3-4.8-0.9c-1.5-0.6-2.7-1.5-3.8-2.5c-1.1-1.1-1.9-2.4-2.5-3.8c-0.6-1.5-0.9-3.1-0.9-4.8%0D%0A%09%09c0-1.8%2C0.3-3.4%2C0.9-4.9c0.6-1.5%2C1.4-2.7%2C2.5-3.8c1.1-1.1%2C2.4-1.9%2C3.9-2.5c1.5-0.6%2C3.3-0.9%2C5.2-0.9c1%2C0%2C1.9%2C0.1%2C2.8%2C0.2%0D%0A%09%09s1.7%2C0.4%2C2.4%2C0.7s1.4%2C0.6%2C2%2C1c0.6%2C0.4%2C1.1%2C0.8%2C1.6%2C1.3l-1.6%2C2.4c-0.2%2C0.2-0.3%2C0.4-0.5%2C0.5c-0.2%2C0.1-0.4%2C0.2-0.7%2C0.2%0D%0A%09%09c-0.3%2C0-0.7-0.1-1-0.3c-0.4-0.3-0.8-0.5-1.2-0.7c-0.4-0.2-0.8-0.3-1.2-0.5C32.1%2C9.1%2C31.6%2C9%2C31.2%2C9c-0.4-0.1-0.9-0.1-1.5-0.1%0D%0A%09%09c-1%2C0-2%2C0.2-2.8%2C0.5c-0.8%2C0.4-1.5%2C0.9-2.1%2C1.5c-0.6%2C0.7-1%2C1.5-1.3%2C2.4s-0.5%2C2-0.5%2C3.1c0%2C1.3%2C0.2%2C2.4%2C0.5%2C3.3c0.3%2C1%2C0.8%2C1.8%2C1.4%2C2.5%0D%0A%09%09c0.6%2C0.7%2C1.3%2C1.2%2C2.2%2C1.5C28%2C24.1%2C28.9%2C24.3%2C29.9%2C24.3z%22%2F%3E%0D%0A%09%3Cpath%20class%3D%22st1%22%20d%3D%22M62.9%2C4.6V9h-6.6v19.3h-5.5V9h-6.6V4.6H62.9z%22%2F%3E%0D%0A%09%3Cpath%20class%3D%22st1%22%20d%3D%22M81.8%2C18.7c0.2%2C0.4%2C0.4%2C0.9%2C0.6%2C1.4c0.2-0.5%2C0.4-0.9%2C0.6-1.4c0.2-0.4%2C0.4-0.9%2C0.6-1.3l6.1-11.9%0D%0A%09%09C89.7%2C5.2%2C89.8%2C5.1%2C90%2C5c0.1-0.1%2C0.2-0.2%2C0.4-0.3c0.1-0.1%2C0.3-0.1%2C0.5-0.1c0.2%2C0%2C0.4%2C0%2C0.6%2C0h4.2v23.6h-4.8V14.6%0D%0A%09%09c0-0.7%2C0-1.4%2C0.1-2.1l-6.3%2C12.1c-0.2%2C0.4-0.5%2C0.7-0.8%2C0.9s-0.7%2C0.3-1.1%2C0.3H82c-0.4%2C0-0.8-0.1-1.1-0.3c-0.3-0.2-0.6-0.5-0.8-0.9%0D%0A%09%09l-6.3-12.2c0%2C0.4%2C0.1%2C0.8%2C0.1%2C1.1c0%2C0.4%2C0%2C0.7%2C0%2C1v13.6h-4.8V4.6h4.2c0.2%2C0%2C0.4%2C0%2C0.6%2C0c0.2%2C0%2C0.3%2C0%2C0.5%2C0.1%0D%0A%09%09c0.1%2C0.1%2C0.3%2C0.1%2C0.4%2C0.3c0.1%2C0.1%2C0.2%2C0.3%2C0.3%2C0.5l6.1%2C12C81.4%2C17.8%2C81.6%2C18.3%2C81.8%2C18.7z%22%2F%3E%0D%0A%3C%2Fg%3E%0D%0A%3C%2Fsvg%3E%0D%0A");width:133px;min-width:133px;max-width:133px;height:32px;min-height:32px;max-height:32px;background-size:133px;background-repeat:no-repeat;background-position:center center;background-size:116px;background-position-x:0;display:inline-block;float:left;border-radius:3px;margin-left:12px}.navbar img{width:32px;min-width:32px;max-width:32px;height:32px;min-height:32px;max-height:32px;border-radius:50%}.navbar-nav.navbar-right{margin:0;list-style:none;display:inline-block;float:right}.navbar-nav.navbar-right li{line-height:32px;max-height:32px;padding:0;display:inline-block;vertical-align:middle}.navbar.navbar-homepage{margin-bottom:0;border:none}.navbar.navbar-homepage .button{padding:0 30px;text-transform:uppercase;font-size:14px;line-height:28px;color:#E48125;border-color:#E48125}.footer{background:#FFF;padding:30px 0;color:#AAA;text-align:right}.footer a{color:#666}.message{margin-top:100px}.banner{height:calc(100vh - 10px);min-height:500px;background:#FFF;display:flex;align-items:center;justify-content:center}.banner-text{text-align:center}.banner-text p{font-size:22px;max-width:700px;margin:40px auto}.banner-text p a{color:#4d5659}.banner-text .button{color:#E48125;border:2px solid #E48125;background:#FFF}.banner-text .button:last-child{color:#CCC;border:2px solid #CCC}.banner-actions{max-width:500px;margin:0 auto;display:flex}.banner-actions a{text-transform:uppercase;flex:1 1 50%}.banner-actions a:first-child{margin-right:10px}.banner-actions a span{margin-right:10px}.ribbon{background:#565656;background:#666;padding:60px 0;margin-top:120px}.ribbon h4{color:#FFF;text-shadow:0 1px 2px rgba(0,0,0,0.2);text-align:center;margin-bottom:30px;padding-bottom:0;font-size:32px}.ribbon p{font-weight:300;color:#FFF;padding-bottom:0;text-align:center;max-width:400px;margin:0 auto;margin-bottom:30px;margin-top:0;font-size:18px}.ribbon span.separator{color:rgba(255,255,255,0.5);margin:0 20px}.ribbon .button-outlined{color:#FFF;border-color:#FFF;text-transform:uppercase;border-radius:20px;padding:5px 40px;padding:5px 20px;border-radius:3px}.ribbon .button-outlined .octicon{margin-right:6px}.ribbon .container{text-align:center}.ribbon-login{background:#fff;margin-top:10px;padding-top:0;padding-bottom:0;margin-bottom:200px}.ribbon-login .button{text-transform:uppercase;border-radius:30px;padding:7px 30px;font-size:18px}.pricing{margin-top:100px}.pricing .grid-flex-cell{min-height:300px}.feature{padding-top:60px;border-top:1px solid #EEE}.features .grid-flex-container{background:#FFF;margin-bottom:90px}.features .grid-flex-container:last-child{margin-bottom:90px}.features .grid-flex-cell{border:none;text-align:left}.issues .container{max-width:600px !IMPORTANT}.feature{margin-bottom:90px}.feature>div:first-child{text-align:center}.feature>div:first-child p{font-size:18px;max-width:600px;margin:0 auto}.maintainers{max-width:600px;margin:0 auto;margin-top:60px;-webkit-touch-callout:none;-webkit-user-select:none;-khtml-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;border:1px solid #e6eaed;box-shadow:3px 3px rgba(0,0,0,0.02);border-radius:3px}.maintainers *{cursor:default}.maintainers div{font-family:"Roboto Mono";font-size:14px;color:#CCC;background:#fafafa;border-bottom:1px solid #f0f0f0;text-align:left;padding:10px}.maintainers table{margin-bottom:0}.maintainers table td{border:none;font-family:"Roboto Mono";font-size:13px;padding:5px 15px;color:#999}.maintainers table td:first-child{text-align:right;width:50px;border-right:1px solid #f0f0f0}.maintainers table tr:first-child td{padding-top:10px}.maintainers table tr:last-child td{padding-bottom:10px}.footer{background:#FFF;padding:30px 0;color:#AAA;text-align:right}.footer a{color:#666}.issues{padding-left:10px;margin:60px auto;-webkit-touch-callout:none;-webkit-user-select:none;-khtml-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none}.issues *{cursor:default}.issues .issue{display:flex;margin-bottom:30px}.issues .issue img{width:32px;height:32px;border-radius:3px;margin-right:20px;opacity:.5}.issues .issue em{font-weight:bold;color:#999;font-style:normal}.issues .issue b{font-size:28px}.issues .issue>div{flex:1 1 auto;box-shadow:3px 3px rgba(0,0,0,0.02)}.issues .issue>div div:first-child{color:#CCC;background:#fafafa;border:1px solid #f0f0f0;line-height:32px;padding:0 10px;border-top-right-radius:1px;border-top-left-radius:1px;font-size:14px}.issues .issue>div div:last-child{border:1px solid #f0f0f0;border-top:none;border-bottom-right-radius:1px;border-bottom-left-radius:1px;padding:10px}.issues .checks{display:flex;margin-bottom:20px}.issues .checks>div:first-child{width:32px;height:32px;border-radius:3px;margin-right:20px;background:#e2cc7a}.issues .checks>div:first-child .octicon{color:#FFF;font-size:28px;text-align:center;line-height:32px;display:inline-block;width:32px}.issues .checks>div:last-child{border:1px solid #e2cc7a;border-radius:4px;overflow:hidden;flex:1 1 auto;box-shadow:3px 3px rgba(0,0,0,0.02);box-shadow:3px 3px rgba(226,204,122,0.2)}.issues .checks>div:last-child>div{padding:10px 15px}.issues .checks strong{font-size:14px;font-weight:bold;color:#cea61b}.issues .checks p{font-size:14px;color:#767676;margin:0;margin-bottom:10px;padding:0}.issues .checks ul{padding:0;margin:0;border-top:1px solid #eee}.issues .checks li{list-style:none;padding:7px 15px;font-size:14px;background:#fafafa;border-bottom:1px solid #f0f0f0}.issues .checks li:last-child{border-bottom:none}.issues .checks li .dot-green{margin-right:5px;color:#6cc644}.issues .checks li .dot-yellow{color:#e2cc7a;margin-right:5px}.issues .checks li em{font-style:normal;font-weight:bold}.issues .checks-success>div:first-child{background:#6cc644}.issues .checks-success>div:last-child{border:1px solid #6cc644;box-shadow:3px 3px rgba(0,0,0,0.02)}.issues .checks-success>div:last-child .button{background:#6cc644;font-size:14px;padding:5px 15px;margin:5px}.issues .checks-success strong{color:#6cc644}.issues .more{text-align:center;color:#DDD}.issues .more .octicon{font-size:26px} \ No newline at end of file +html { + font-family: sans-serif; + -ms-text-size-adjust: 100%; + -webkit-text-size-adjust: 100% +} + +body { + margin: 0 +} + +article, +aside, +details, +figcaption, +figure, +footer, +header, +hgroup, +main, +menu, +nav, +section, +summary { + display: block +} + +audio, +canvas, +progress, +video { + display: inline-block; + vertical-align: baseline +} + +[hidden], +template { + display: none +} + +a { + background-color: transparent +} + +a:active, +a:hover { + outline: 0 +} + +abbr[title] { + border-bottom: 1px dotted +} + +b, +strong { + font-weight: bold +} + +dfn { + font-style: italic +} + +h1 { + font-size: 2em; + margin: .67em 0 +} + +mark { + background: #ff0; + color: #000 +} + +small { + font-size: 80% +} + +sub, +sup { + font-size: 75%; + line-height: 0; + position: relative; + vertical-align: baseline +} + +sup { + top: -0.5em +} + +sub { + bottom: -0.25em +} + +img { + border: 0 +} + +figure { + margin: 1em 40px +} + +hr { + -moz-box-sizing: content-box; + box-sizing: content-box; + height: 0 +} + +pre { + overflow: auto +} + +code, +kbd, +pre, +samp { + font-family: monospace, monospace; + font-size: 1em +} + +button, +input, +optgroup, +select, +textarea { + color: inherit; + font: inherit; + margin: 0 +} + +button { + overflow: visible +} + +button, +select { + text-transform: none +} + +button, +html input[type="button"], +input[type="reset"], +input[type="submit"] { + -webkit-appearancebuttoncursor: pointer +} + +button[disabled], +html input[disabled] { + cursor: default +} + +button::-moz-focus-inner, +input::-moz-focus-inner { + border: 0; + padding: 0 +} + +input { + line-height: normal +} + +input[type="checkbox"], +input[type="radio"] { + box-sizing: border-box; + padding: 0 +} + +input[type="number"]::-webkit-inner-spin-button, +input[type="number"]::-webkit-outer-spin-button { + height: auto +} + +input[type="search"] { + -webkit-appearance: textfield; + box-sizing: content-box +} + +input[type="search"]::-webkit-search-cancel-button, +input[type="search"]::-webkit-search-decoration { + -webkit-appearance: none +} + +fieldset { + border: 1px solid #c0c0c0; + margin: 0 2px; + padding: .35em .625em .75em +} + +legend { + border: 0; + padding: 0 +} + +textarea { + overflow: auto +} + +optgroup { + font-weight: bold +} + +table { + border-collapse: collapse; + border-spacing: 0 +} + +td, +th { + padding: 0 +} + +html { + font-family: sans-serif; + -ms-text-size-adjust: 100%; + -webkit-text-size-adjust: 100% +} + +body { + margin: 0 +} + +article, +aside, +details, +figcaption, +figure, +footer, +header, +hgroup, +main, +menu, +nav, +section, +summary { + display: block +} + +audio, +canvas, +progress, +video { + display: inline-block; + vertical-align: baseline +} + +audio:not([controls]) { + display: none; + height: 0 +} + +[hidden], +template { + display: none +} + +a { + background-color: transparent +} + +a:active, +a:hover { + outline: 0 +} + +abbr[title] { + border-bottom: 1px dotted +} + +b, +strong { + font-weight: bold +} + +dfn { + font-style: italic +} + +h1 { + font-size: 2em; + margin: .67em 0 +} + +mark { + background: #ff0; + color: #000 +} + +small { + font-size: 80% +} + +sub, +sup { + font-size: 75%; + line-height: 0; + position: relative; + vertical-align: baseline +} + +sup { + top: -0.5em +} + +sub { + bottom: -0.25em +} + +img { + border: 0 +} + +svg:not(:root) { + overflow: hidden +} + +figure { + margin: 1em 40px +} + +hr { + -moz-box-sizing: content-box; + box-sizing: content-box; + height: 0 +} + +pre { + overflow: auto +} + +code, +kbd, +pre, +samp { + font-family: monospace, monospace; + font-size: 1em +} + +button, +input, +optgroup, +select, +textarea { + color: inherit; + font: inherit; + margin: 0 +} + +button { + overflow: visible +} + +button, +select { + text-transform: none +} + +button, +html input[type="button"], +input[type="reset"], +input[type="submit"] { + -webkit-appearance: button; + cursor: pointer +} + +button[disabled], +html input[disabled] { + cursor: default +} + +button::-moz-focus-inner, +input::-moz-focus-inner { + border: 0; + padding: 0 +} + +input { + line-height: normal +} + +input[type="checkbox"], +input[type="radio"] { + -moz-box-sizing: border-box; + box-sizing: border-box; + padding: 0 +} + +input[type="number"]::-webkit-inner-spin-button, +input[type="number"]::-webkit-outer-spin-button { + height: auto +} + +input[type="search"] { + -webkit-appearance: textfield; + -moz-box-sizing: content-box; + box-sizing: content-box +} + +input[type="search"]::-webkit-search-cancel-button, +input[type="search"]::-webkit-search-decoration { + -webkit-appearance: none +} + +fieldset { + border: 1px solid #c0c0c0; + margin: 0 2px; + padding: .35em .625em .75em +} + +legend { + border: 0; + padding: 0 +} + +textarea { + overflow: auto +} + +optgroup { + font-weight: bold +} + +table { + border-collapse: collapse; + border-spacing: 0 +} + +td, +th { + padding: 0 +} + +*, +*:after, +*:before { + -moz-box-sizing: border-box; + box-sizing: border-box +} + +html { + height: 100% +} + +body { + min-height: 100%; + font-family: 'proxima-nova', helvetica neue, helvetica, arial, sans-serif; + font-size: 16px; + line-height: 1.5; + color: #4d5659; + text-rendering: optimizeSpeed +} + +p { + margin-top: 0; + margin-bottom: 30px +} + +::-moz-selection { + background-color: #74ddff; + color: #fff +} + +::selection { + background-color: #74ddff; + color: #fff +} + +a { + color: #00b0e9; + text-decoration: none; + cursor: pointer +} + +a:focus { + outline: none +} + +a:hover { + color: #008dba +} + +.pull-left { + float: left +} + +.pull-right { + float: right +} + +.hide { + display: none +} + +.cf:before, +.cf:after { + content: " "; + display: table +} + +.cf:after { + clear: both +} + +img, +iframe, +video { + max-width: 100% +} + +h1, +h2, +h3, +h4, +h5, +h6 { + margin-top: 0; + margin-bottom: 30px; + line-height: 1.25; + font-family: 'aktiv-grotesk', helvetica neue, helvetica, arial, sans-serif +} + +h1, +h2, +h3, +h4, +h5, +h6, +strong { + color: #000 +} + +h1, +h2, +h3, +h4, +h5, +h6 { + font-weight: 300 +} + +h6 { + text-transform: uppercase; + color: #828c8f +} + +abbr { + color: #828c8f; + border-bottom: 1px dotted #b7c0c3; + cursor: help +} + +small, +sup, +sub { + font-size: 12px +} + +mark { + padding: 3px 5px; + background: #ffffb3 +} + +blockquote { + margin: 30px 0; + padding: 0 15px; + color: #828c8f; + font: italic 16px/1.55 Georgia, Times, serif +} + +blockquote p:only-child, +blockquote p:last-child { + margin: 0 +} + +blockquote footer { + margin-top: 15px; + font-size: 16px; + color: #9ba3a5 +} + +blockquote footer:before { + content: '\2014 \00A0' +} + +.blockquote-centered { + text-align: center +} + +.blockquote-medium { + font-size: 22px +} + +.blockquote-large { + font-size: 36px; + line-height: 1.3 +} + +@media screen and (max-width:899px) { + h1 { + font-size: 38px + } + h2 { + font-size: 32px + } + h3 { + font-size: 26px + } + h4 { + font-size: 20px + } + h5 { + font-size: 16px + } + h6 { + font-size: 14px + } + .blockquote-large { + font-size: 32px + } +} + +@media screen and (min-width:900px) { + h1 { + font-size: 42px + } + h2 { + font-size: 36px + } + h3 { + font-size: 30px + } + h4 { + font-size: 24px + } + h5 { + font-size: 18px + } + h6 { + font-size: 16px + } +} + +ul, +ol { + margin: 0 0 30px 0; + padding-left: 30px +} + +ul li>ul, +ol li>ul, +ul li>ol, +ol li>ol { + margin-top: 30px +} + +ul li ul li, +ol li ul li { + list-style: circle +} + +ul li li ul li, +ol li li ul li { + list-style: square +} + +ul li { + list-style: disc +} + +.list-unstyled { + padding-left: 0 +} + +.list-unstyled li { + list-style: none +} + +.list-inline li { + display: inline-block +} + +dl dt { + font-weight: 500; + color: #000 +} + +dl dd { + margin: 0 +} + +.container { + margin: 0 auto +} + +.grid-flex-container { + padding: 0; + list-style: none; + display: -webkit-flex; + display: -ms-flexbox; + display: flex; + -ms-flex-flow: row wrap; + flex-flow: row wrap; + -webkit-flex-flow: row wrap +} + +[class^="grid-flex-cell"] { + margin-bottom: 30px +} + +@media screen and (max-width:600px) { + .container { + width: 90% + } + [class^="grid-flex-cell"] { + -webkit-flex: 0 0 100%; + -ms-flex: 0 0 100%; + flex: 0 0 100% + } +} + +@media screen and (min-width:601px) { + [class^="grid-flex-cell"] { + -webkit-flex: 1; + -ms-flex: 1; + flex: 1; + padding: 15px 0 + } + [class^="grid-flex-cell"]:not(:first-child) { + margin-left: 30px + } + .grid-flex-cell-1of2, + .grid-flex-cell-1of3, + .grid-flex-cell-1of4 { + -webkit-flex: none; + -ms-flex: none; + flex: none + } + .grid-flex-cell-1of2 { + width: 50% + } + .grid-flex-cell-1of3 { + width: 34% + } + .grid-flex-cell-1of4 { + width: 25% + } +} + +@media screen and (min-width:481px) and (max-width:900px) { + .container { + width: 85% + } +} + +@media screen and (min-width:901px) { + .container { + width: 70% + } +} + +select, +.form-input, +.form-checkbox, +.form-radio { + margin-bottom: 15px +} + +legend, +label { + font-weight: 800 +} + +legend { + padding: 0 15px; + font-size: 22px; + color: #000 +} + +fieldset { + border: 1px solid #e6eaed; + border-radius: 3px; + padding: 30px; + margin: 0 0 30px 0 +} + +label { + display: block; + margin-bottom: 5px; + font-size: 12px; + cursor: pointer +} + +select, +.form-input, +.form-radio, +.form-checkbox { + outline: 0 +} + +.form-inline { + overflow: hidden +} + +.form-input { + display: block; + padding: 10px; + width: 100%; + border-radius: 3px; + border: 1px solid #e6eaed; + -webkit-appearance: none +} + +.form-input:focus { + transition: all .2s ease-in; + border: 1px solid #74ddff; + background-color: #f8fdff +} + +.form-input:disabled { + background-color: #fafbfb; + cursor: not-allowed +} + +.form-input:required:focus { + border: 1px solid #fec57b; + background-color: #fff9f2 +} + +.form-input.input-invalid { + border: 1px solid #ffc7d8; + color: #ff749d +} + +.form-input.input-invalid:focus { + border: 1px solid #e80044; + background-color: #fff4f7 +} + +.form-input.input-valid { + border: 1px solid #c6ebd3; + color: #8ed6a8 +} + +.form-input.input-valid:focus { + border: 1px solid #43bb6e; + background-color: #f4fbf6 +} + +textarea { + resize: none; + min-height: 90px +} + +input[type=file], +label.radio, +label.checkbox, +select { + cursor: pointer +} + +input[type=file] { + padding: 10px; + background-color: #f8f9fa; + font-size: 12px; + color: #b7c0c3 +} + +select { + border: 1px solid #e6eaed; + height: 40px; + padding: 10px; + width: 100%; + background-color: #fff +} + +select:focus { + border: 1px solid #74ddff +} + +select:focus:-moz-focusring { + color: transparent; + text-shadow: 0 0 0 #000 +} + +.input-radio, +.input-checkbox { + margin: 5px 0; + font-size: 16px; + font-weight: normal +} + +.input-radio.disabled, +.input-checkbox.disabled { + color: #b7c0c3; + cursor: not-allowed +} + +.input-radio input[type=checkbox], +.input-checkbox input[type=checkbox], +.input-radio input[type=radio], +.input-checkbox input[type=radio] { + display: inline-block; + margin-right: 10px +} + +@media screen and (min-width:768px) { + .form-inline.form-no-labels .button { + margin-top: 0 + } + .form-inline .form-element { + float: left; + width: 50% + } + .form-inline .button { + margin-top: 22.5px + } + .form-inline .form-element:not(:first-of-type) { + padding-left: 5px + } + .form-inline .form-input, + .form-inline .input-checkbox, + .form-inline .input-radio, + .form-inline select { + margin: 0 + } +} + +.button { + display: inline-block; + border: none; + border-radius: 3px; + padding-left: 15px; + padding-right: 15px; + text-align: center; + cursor: pointer; + background-color: #00b0e9; + transition-delay: 0; + transition-duration: .3s; + transition-timing-function: ease-in; + transition-property: background, border-color; + -webkit-user-select: none; + -moz-user-select: none; + -ms-user-select: none; + user-select: none; + appearance: none +} + +.button:not([class*='button-outlined']) { + padding-top: 10px; + padding-bottom: 10px +} + +.button:hover { + background-color: #009ed2 +} + +.button:disabled, +.button .button-disabled { + background-color: #74ddff; + cursor: not-allowed +} + +.button:focus { + outline: none +} + +.button, +.button:hover { + color: #fff +} + +.button-small { + padding: 3px 5px; + font-size: 12px +} + +.button-large { + padding: 15px 30px; + font-size: 22px +} + +.button-unstyled, +.button-unstyled:hover { + background-color: transparent +} + +.button-unstyled { + padding: 0; + color: #00b0e9 +} + +.button-unstyled:hover { + color: #008dba +} + +.button-neutral { + background-color: #e6eaed +} + +.button-neutral:hover { + background-color: #b0bcc6 +} + +.button-neutral:disabled, +.button-neutral .button-disabled { + background-color: #e6eaed; + color: #b7c0c3 +} + +.button-neutral:disabled:hover, +.button-neutral .button-disabled:hover { + color: #b7c0c3 +} + +.button-neutral, +.button-neutral:hover { + color: #272b2d +} + +.button-approve { + background-color: #43bb6e +} + +.button-approve:hover { + background-color: #369658 +} + +.button-approve:disabled, +.button-approve .button-disabled { + background-color: #c6ebd3 +} + +.button-warn { + background-color: #e80044 +} + +.button-warn:hover { + background-color: #ba0036 +} + +.button-warn:disabled, +.button-warn.button-disabled { + background-color: #ffc7d8 +} + +.button-caution { + background-color: transparent; + color: #e80044; + text-decoration: underline +} + +.button-caution:hover { + background-color: transparent; + color: #ba0036 +} + +.button-caution:disabled, +.button-caution .button-disabled { + color: #b7c0c3 +} + +*[class*='button-outlined'] { + border-width: 2px; + border-style: solid; + padding-top: 8px; + padding-bottom: 8px +} + +*[class*='button-outlined'], +*[class*='button-outlined']:hover, +*[class*='button-outlined']:disabled { + background-color: transparent +} + +.button-outlined { + border-color: #00b0e9; + color: #00b0e9 +} + +.button-outlined:hover { + border-color: #008dba; + color: #008dba +} + +.button-outlined:disabled { + border-color: #74ddff; + color: #74ddff +} + +.button-outlined-neutral { + border-color: #b7c0c3; + color: #828c8f +} + +.button-outlined-neutral:hover { + border-color: #828c8f; + color: #4d5659 +} + +.button-outlined-neutral:disabled, +.button-outlined-neutral.button-disabled { + color: #c5cdcf; + border-color: #e6eaed +} + +.button-outlined-neutral:disabled:hover, +.button-outlined-neutral.button-disabled:hover { + color: #c5cdcf +} + +.button-outlined-approve { + border-color: #43bb6e; + color: #43bb6e +} + +.button-outlined-approve:hover { + border-color: #369658; + color: #369658 +} + +.button-outlined-approve:disabled { + border-color: #c6ebd3; + color: #c6ebd3 +} + +.button-outlined-validate { + border-color: #43bb6e; + color: #43bb6e +} + +.button-outlined-validate:hover { + border-color: #369658; + color: #369658 +} + +.button-outlined-validate:disabled { + border-color: #c6ebd3; + color: #c6ebd3 +} + +.button-outlined-warn { + border-color: #e80044; + color: #e80044 +} + +.button-outlined-warn:hover { + border-color: #ba0036; + color: #ba0036 +} + +.button-outlined-warn:disabled { + border-color: #ffc7d8; + color: #ffc7d8 +} + +.button-group .button { + border-radius: 0; + margin-left: -1px; + border: 1px solid #008dba +} + +.button-group .button:first-child, +.button-group .button:first-child:only-child { + margin: 0 +} + +.button-group .button:first-child:only-child { + border-radius: 3px +} + +.button-group .button:first-child { + border-radius: 3px 0 0 3px +} + +.button-group .button:last-child { + border-radius: 0 3px 3px 0 +} + +.button-group .button-neutral, +.button-group .button-outlined { + border: 1px solid #b7c0c3 +} + +@media screen and (max-width:480px) { + .button-group .button { + border-radius: none; + margin: 0; + width: 100% + } + .button-group .button:first-of-type { + border-radius: 3px 3px 0 0 + } + .button-group .button:last-of-type { + border-radius: 0 0 3px 3px + } + .button-group .button:not(:last-of-type) { + border-bottom: none + } +} + +code { + font-family: Menlo, Monaco, Consolas, monospace; + font-size: 12px; + padding: 3px 5px +} + +pre { + display: block; + padding: 15px; + margin-bottom: 30px; + max-width: 100%; + line-height: 1.5; + word-break: break-all; + word-wrap: break-word +} + +pre code { + padding: 0; + white-space: pre-wrap +} + +code, +pre { + border-radius: 3px +} + +.code-light { + color: #000; + background: #f5f7f8 +} + +.code-dark { + color: #5ca1b5; + background: #003546 +} + +.dropdown-standalone { + position: relative; + display: inline-block; + cursor: pointer; + padding: 10px; + border-radius: 3px +} + +.dropdown-standalone:hover { + border-radius: 3px 3px 0 0 +} + +.dropdown-standalone:hover .dropdown { + display: block; + position: absolute; + top: 100%; + left: 0; + width: 100%; + border-radius: 0 0 3px 3px +} + +.dropdown-standalone:hover .dropdown a { + padding: 10px; + display: block +} + +.dropdown-standalone .dropdown { + display: none +} + +.dropdown-standalone .icon-arrow-down { + position: relative; + top: -2px; + margin-left: 5px +} + +.dropdown-outlined, +.dropdown-outlined .dropdown { + border: 1px solid #e6eaed +} + +.dropdown-standalone.dropdown-outlined .dropdown { + width: 101%; + left: -1px +} + +.dropdown-outlined .dropdown { + background-color: #fff +} + +.dropdown-outlined .dropdown li:not(:last-child) { + border-bottom: 1px solid #e6eaed +} + +.dropdown-coloured, +.dropdown-coloured .dropdown { + background-color: #00b0e9 +} + +.dropdown-coloured .button-unstyled, +.dropdown-coloured .dropdown a { + color: #fff +} + +.dropdown-coloured .icon-arrow-down { + border-top-color: #fff +} + +.dropdown-coloured .dropdown li:not(:last-child) { + border-bottom: 1px solid #009ed2 +} + +.dropdown-coloured .dropdown a:hover { + color: #e3f8ff +} + +.dropdown-neutral, +.dropdown-neutral .dropdown { + background-color: #e6eaed +} + +.dropdown-neutral a { + color: #4d5659 +} + +.dropdown-neutral a:hover, +.dropdown-neutral .button-unstyled:hover { + color: #272b2d +} + +.dropdown-neutral .icon-arrow-down { + border-top-color: #4d5659 +} + +.dropdown-neutral .dropdown li:not(:last-child) { + border-bottom: 1px solid #b7c0c3 +} + +.icon-arrow-left, +.icon-arrow-right, +.icon-arrow-up, +.icon-arrow-down { + display: inline-block; + width: 0; + height: 0; + line-height: 0; + font-size: 0 +} + +.icon-arrow-up, +.icon-arrow-down { + border-left: 5px solid transparent; + border-right: 5px solid transparent +} + +.icon-arrow-left, +.icon-arrow-right { + border-top: 5px solid transparent; + border-bottom: 5px solid transparent +} + +.icon-arrow-left { + border-right: 5px solid #00b0e9 +} + +.icon-arrow-right { + border-left: 5px solid #00b0e9 +} + +.icon-arrow-down { + border-top: 5px solid #00b0e9 +} + +.icon-arrow-up { + border-bottom: 5px solid #00b0e9 +} + +.media-outlined, +img { + border-radius: 3px +} + +figure { + width: auto; + margin: 0 +} + +figure img { + vertical-align: middle +} + +figure figcaption { + color: #828c8f; + font-size: 12px +} + +.media-outlined { + display: inline-block; + padding: 5px; + border: 1px solid #e6eaed +} + +@media screen and (max-width:480px) { + figure { + text-align: center + } + figure img+img { + margin: 5px auto 0 + } +} + +@media screen and (min-width:481px) { + figure img:not(:last-child):not(:only-child) { + margin-right: 5px + } + figure img:not(:only-child) { + margin-top: 5px + } + figure img+figcaption { + margin-bottom: 5px + } + figure img+figcaption:not(:only-child) { + margin-right: 5px + } +} + +.message { + padding: 15px; + position: relative; + background-color: #e3f8ff; + color: #00b0e9 +} + +.message a { + font-weight: bold +} + +.message p:last-of-type { + margin: 0 +} + +.message:after { + border: solid transparent; + content: ''; + height: 0; + width: 0; + position: absolute; + pointer-events: none; + border-color: rgba(255, 255, 255, 0); + border-width: 5px +} + +.message-dismissable { + padding-right: 30px +} + +.message-full-width { + text-align: center +} + +.message-above:after, +.message-below:after { + left: 50%; + margin-left: -6px +} + +.message-below:after { + border-bottom-color: #e3f8ff; + bottom: 100% +} + +.message-below.message-error:after { + border-bottom-color: #ffe3eb +} + +.message-below.message-success:after { + border-bottom-color: #e8f7ef +} + +.message-below.message-alert:after { + border-bottom-color: #ffeed7 +} + +.message-above:after { + border-top-color: #e3f8ff; + top: 100% +} + +.message-above.message-error:after { + border-top-color: #ffe3eb +} + +.message-above.message-success:after { + border-top-color: #e8f7ef +} + +.message-above.message-alert:after { + border-top-color: #ffeed7 +} + +.message-error { + background-color: #ffe3eb; + color: #e80044 +} + +.message-error a { + color: #ba0036 +} + +.message-error a:hover { + color: #8b0029 +} + +.message-alert { + background-color: #ffeed7; + color: #f18902 +} + +.message-alert a { + color: #c16e02 +} + +.message-alert a:hover { + color: #915201 +} + +.message-success { + background-color: #e8f7ef; + color: #43bb6e +} + +.message-success a { + color: #369658 +} + +.message-success a:hover { + color: #287042 +} + +.close { + display: block; + position: absolute; + top: 50%; + right: 15px; + margin-top: -15px; + cursor: pointer; + font-size: large; + font-weight: bold +} + +form .message { + padding: 10px +} + +.modal { + position: fixed; + top: 50%; + left: 50%; + border-radius: 3px; + z-index: 20; + -webkit-transform: translate(-50%, -50%); + -ms-transform: translate(-50%, -50%); + transform: translate(-50%, -50%); + background-color: #fff; + -webkit-overflow-scrolling: touch +} + +.modal.modal-no-sections { + padding: 30px; + text-align: center +} + +.modal-title { + margin: 0; + float: left +} + +.modal-head, +.modal-body { + padding: 15px +} + +.modal-head { + border-bottom: 1px solid #e6eaed +} + +.modal-footer { + padding: 15px 0; + text-align: center; + border-top: 1px solid #e6eaed +} + +.modal-footer button, +.modal-footer .button { + min-width: 120px +} + +.modal-footer button+button, +.modal-footer .button+.button { + margin-left: 5px +} + +.modal-close { + float: right; + color: #b7c0c3; + line-height: 1.5; + font-weight: bold; + font-size: 22px +} + +.modal-close:hover { + color: #828c8f +} + +.has-modal:before { + content: '' +} + +.overlay, +.has-modal:before { + position: fixed; + top: 0; + left: 0; + width: 100%; + height: 100%; + z-index: 10; + box-shadow: inset 0 0 100px rgba(0, 0, 0, 0.7); + background-color: rgba(0, 0, 0, 0.4) +} + +@media screen and (max-width:480px) { + .modal { + height: 100%; + width: 100%; + top: 0; + left: 0; + border-radius: 0; + -webkit-transform: none; + -ms-transform: none; + transform: none + } +} + +@media screen and (min-width:481px) and (max-width:768px) { + .modal { + width: 80% + } +} + +@media screen and (min-width:769px) { + .modal { + width: 50% + } +} + +.top-nav { + margin-bottom: 30px +} + +.top-nav ul { + margin: 0 +} + +.top-nav li { + padding: 15px +} + +.top-nav a { + display: inline-block +} + +.top-nav label { + font-weight: normal; + font-size: 16px; + line-height: 1 +} + +.top-nav .menu-toggle { + visibility: hidden; + position: absolute; + left: -100% +} + +.top-nav .icon-arrow-down { + position: relative; + top: -2px; + margin-left: 5px +} + +.top-nav-light { + border-bottom: 1px solid #e6eaed +} + +.top-nav-light label { + color: #00b0e9 +} + +.top-nav-light, +.top-nav-light li { + background-color: #fff +} + +.top-nav-dark label { + color: #438193 +} + +.top-nav-dark a { + color: #438193 +} + +.top-nav-dark a:hover, +.top-nav-dark a.active, +.top-nav-dark label { + color: #85b8c7 +} + +.top-nav-dark, +.top-nav-dark li { + background-color: #003546 +} + +.top-nav-dark .icon-arrow-down { + border-top-color: #438193 +} + +.breadcrumbs { + padding: 10px 0 +} + +.breadcrumbs li { + color: #b7c0c3 +} + +.breadcrumbs a { + padding: 0 10px +} + +.pagination li:not(:last-child) { + border-right: 1px solid #e6eaed +} + +.pagination em[class^='icon'] { + top: 1px; + position: relative +} + +.pagination .icon-arrow-right { + border-left: 5px solid #00b0e9 +} + +.pagination .icon-arrow-left { + border-right: 5px solid #00b0e9 +} + +.tabs { + border-bottom: 1px solid #e6eaed +} + +.tabs a { + display: block +} + +.side-nav li:not(:last-child) a { + border-bottom: 1px solid #e6eaed +} + +.unavailable-item { + color: #b7c0c3 +} + +.unavailable-item:not([href]), +.unavailable-item:not([href]):hover { + color: #b7c0c3; + cursor: default; + pointer-events: none +} + +.current-item { + color: #4d5659; + font-weight: 800 +} + +.side-nav a, +.pagination a, +.side-nav li, +.pagination li { + display: block +} + +.side-nav a:hover:not(.unavailable-item), +.pagination a:hover:not(.unavailable-item), +.side-nav .current-item, +.pagination .current-item { + background-color: #fafbfb +} + +.pagination, +.breadcrumbs { + display: inline-block +} + +.pagination li, +.breadcrumbs li { + float: left +} + +.breadcrumbs, +.side-nav, +.pagination { + border-radius: 3px; + border: 1px solid #e6eaed +} + +.side-nav a, +.pagination a { + padding: 10px 15px +} + +.breadcrumbs a, +.pagination a { + font-size: 12px +} + +@media screen and (max-width:480px) { + .tabs a { + position: relative; + padding: 10px + } +} + +@media screen and (min-width:481px) { + .tabs a { + padding: 10px 15px + } + .tabs .current-item { + position: relative; + top: 1px; + border: 1px solid #e6eaed; + border-bottom-color: transparent; + border-radius: 3px 3px 0 0; + background-color: #fff + } +} + +@media screen and (max-width:768px) { + .top-nav ul { + display: none + } + .top-nav ul>li { + padding: 10px + } + .top-nav .icon-arrow-down { + display: none + } + .top-nav label, + .top-nav .menu-toggle:checked~ul, + .top-nav .menu-toggle:checked~ul>li { + display: block + } + .top-nav label { + padding: 15px 15px 15px 0; + margin-bottom: 0; + text-align: right; + cursor: pointer + } + .top-nav .menu-toggle:checked~ul .dropdown { + display: block; + z-index: 1000 + } + .top-nav .menu-toggle:checked~ul .dropdown li { + padding: 5px 15px; + float: left; + border: 0 + } + .top-nav .menu-toggle:checked~ul .dropdown a { + font-size: 12px; + font-weight: normal + } + .top-nav-light .menu-toggle:checked~ul>li:not(:last-child) { + border-bottom: 1px solid #e6eaed + } + .top-nav-dark .menu-toggle:checked~ul>li:not(:last-child) { + border-bottom: 1px solid #1f4753 + } +} + +@media screen and (min-width:769px) { + .top-nav label { + display: none + } + .top-nav .has-dropdown { + position: relative + } + .top-nav .has-dropdown:hover .dropdown { + display: block + } + .top-nav .dropdown { + display: none; + position: absolute; + top: 100%; + left: 0 + } + .top-nav .dropdown li { + display: block + } + .top-nav-light .dropdown { + border-left: 1px solid #e6eaed; + border-right: 1px solid #e6eaed + } + .top-nav-light .dropdown li { + border-bottom: 1px solid #e6eaed + } + .top-nav-dark .dropdown li { + border-bottom: 1px solid #1f4753 + } +} + +.progress, +.meter { + border-radius: 3px; + line-height: 0 +} + +.progress.progress-rounded, +.progress-rounded .meter { + border-radius: 50px +} + +.progress { + border: 1px solid #e6eaed; + background-color: #fdfdfd +} + +.progress .meter { + display: inline-block; + background-color: #00b0e9; + height: 100% +} + +.progress-small { + height: 15px +} + +.progress-large { + height: 25px +} + +.progress-success .meter { + background-color: #43bb6e +} + +.progress-warning .meter { + background-color: #f18902 +} + +.progress-error .meter { + background-color: #e80044 +} + +.table-responsive { + overflow-x: auto +} + +table { + width: 100%; + max-width: 100%; + margin-bottom: 30px +} + +td, +th { + text-align: left; + padding: 10px; + border-bottom: 1px solid #e6eaed +} + +thead th { + font-weight: 800; + color: #4d5659 +} + +tr.table-cell-error { + background-color: #ffe3eb; + color: #e80044 +} + +tr.table-cell-error a { + color: #ba0036 +} + +tr.table-cell-error a:hover { + color: #8b0029 +} + +tr.table-cell-success { + background-color: #e8f7ef; + color: #43bb6e +} + +tr.table-cell-success a { + color: #369658 +} + +tr.table-cell-success a:hover { + color: #287042 +} + +tr.table-cell-alert { + background-color: #ffeed7; + color: #f18902 +} + +tr.table-cell-alert a { + color: #c16e02 +} + +tr.table-cell-alert a:hover { + color: #915201 +} + +.table-striped tr:nth-child(even) { + color: #828c8f +} + +.table-outlined { + border: 1px solid #e6eaed; + border-radius: 3px +} + +.table-outlined tr:last-of-type td { + border: none +} + +.table-with-hover tr:hover, +.table-striped tr:nth-child(even) { + background-color: #fafbfb +} + +@media screen and (max-width:767px) { + table td, + table th { + padding: 5px 10px + } +} + +.avatar-radius { + border-radius: 3px +} + +.avatar-rounded { + border-radius: 100% +} + +.avatar-small { + height: 25px; + width: 25px +} + +.avatar-medium { + height: 50px; + width: 50px +} + +.avatar-large { + height: 100px; + width: 100px +} + +@media print { + * { + background: transparent !important; + color: #000 !important; + box-shadow: none !important; + text-shadow: none !important + } + a, + a:visited { + text-decoration: underline + } + a[href]:after { + content: " (" attr(href) ")" + } + abbr[title]:after { + content: " (" attr(title) ")" + } + a[href^="#"]:after, + a[href^="javascript:"]:after { + content: '' + } + pre, + blockquote { + page-break-inside: avoid + } + thead { + display: table-header-group + } + tr, + img { + page-break-inside: avoid + } + img { + max-width: 100% !important + } + p, + h2, + h3 { + orphans: 3; + widows: 3 + } + h2, + h3 { + page-break-after: avoid + } +} + +body { + border-style: solid; + border-color: #f2f4f6 +} + +pre { + color: #000; + background: #f5f7f8 +} + +code { + color: #000; + background: #f5f7f8 +} + +header { + padding-bottom: 30px; + border-bottom: 1px solid #f2f4f6 +} + +header h1 { + margin-bottom: 0 +} + +header p { + font-size: 22px +} + +header .button { + margin: 30px 0 +} + +footer[role='contentinfo'] { + padding: 30px 0; + border-top: 1px solid #f2f4f6; + color: #828c8f; + text-align: center +} + +footer[role='contentinfo'] li:not(:last-of-type):after { + content: '\2219'; + margin: 0 5px 0 10px +} + +footer[role='contentinfo'] p { + margin-bottom: 0 +} + +footer[role='contentinfo'] .logo { + margin-top: 30px; + display: inline-block; + width: 17px; + height: 20px; + background: url("data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0idXRmLTgiPz4NCjwhLS0gR2VuZXJhdG9yOiBBZG9iZSBJbGx1c3RyYXRvciAxNy4xLjAsIFNWRyBFeHBvcnQgUGx1Zy1JbiAuIFNWRyBWZXJzaW9uOiA2LjAwIEJ1aWxkIDApICAtLT4NCjwhRE9DVFlQRSBzdmcgUFVCTElDICItLy9XM0MvL0RURCBTVkcgMS4xLy9FTiIgImh0dHA6Ly93d3cudzMub3JnL0dyYXBoaWNzL1NWRy8xLjEvRFREL3N2ZzExLmR0ZCI+DQo8c3ZnIHZlcnNpb249IjEuMSIgaWQ9IkxheWVyXzEiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgeG1sbnM6eGxpbms9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkveGxpbmsiIHg9IjBweCIgeT0iMHB4Ig0KCSB2aWV3Qm94PSIxNjIgMjQ2LjEgMjg4IDMzNS42IiBlbmFibGUtYmFja2dyb3VuZD0ibmV3IDE2MiAyNDYuMSAyODggMzM1LjYiIHhtbDpzcGFjZT0icHJlc2VydmUiPg0KPGc+DQoJPGc+DQoJCTxwYXRoIGZpbGw9IiNCN0MwQzMiIGQ9Ik0zMDIuMSwzOTUuMmw1OC4xLDcwLjNsMi0yYzYuMy02LjcsMTIuMS0xMy4zLDE3LjctMjBjNS41LTYuNywyMC0yMiwyNS4yLTI5LjFMNDI4LDQ1Ng0KCQkJYy0zLjksNS41LTkuMSwxMi4xLTE1LjMsMTkuNmMtNi4zLDcuNS0xMy43LDE2LjEtMjIuOCwyNS42bDYwLjEsNzQuM2gtNjAuOWwtMzIuMy0zOS43Yy0yOS41LDMwLjctNjAuNSw0Ni05Mi4zLDQ2DQoJCQljLTI4LjcsMC01My4xLTkuMS03My4xLTI3LjVjLTE5LjYtMTguNC0yOS41LTQxLjItMjkuNS02OC40YzAtMzIuMywxNS4zLTU5LjMsNDYuOC04MC41bDIxLjItMTQuNWMwLjQsMCwwLjgtMC40LDEuNi0xLjINCgkJCWMwLjgtMC40LDEuNi0xLjIsMi44LTIuNGMtMjEuMi0yMi44LTMyLjMtNDUuMi0zMi4zLTY3LjJzNy4xLTM5LjcsMjEuMi01My41YzE0LjUtMTMuNywzMy4zLTIwLjQsNTYuNS0yMC40DQoJCQljMjIuNCwwLDQwLjUsNi43LDU1LjEsMjBjMTQuNSwxMy4zLDIyLDMwLjMsMjIsNTAuM2MwLDE0LjEtMy45LDI2LjctMTEuNywzOC4xQzMzNi43LDM2NS43LDMyMi41LDM3OS41LDMwMi4xLDM5NS4yeg0KCQkJIE0yNjMuMiw0MjIuN2wtMi44LDJjLTIwLDEzLjctMzMuNywyNS4yLTQwLjgsMzMuM2MtNy4xLDguMS0xMC43LDE3LjMtMTAuNywyNi43YzAsMTMuMyw1LjUsMjUuOSwxNi4xLDM2LjkNCgkJCWMxMS4xLDEwLjcsMjMuNiwxNi4xLDM2LjksMTYuMWMxOC44LDAsNDAuNS0xMi4xLDY1LjItMzYuOUwyNjMuMiw0MjIuN3ogTTI3My45LDM2MC45bDMuOS0zLjJjNi43LTUuMSwxMi41LTkuOSwxNi45LTEzLjMNCgkJCWM0LjMtMy45LDcuOS03LjEsOS45LTkuOWM0LjMtNS4xLDYuMy0xMS4zLDYuMy0xOS4yYzAtOC43LTIuOC0xNS4zLTguNy0yMC44cy0xMy4zLTcuOS0yMy4yLTcuOWMtOC43LDAtMTYuMSwyLjgtMjIuNCw4LjMNCgkJCWMtNS45LDUuMS05LjEsMTEuNy05LjEsMjBjMCw5LjUsMy45LDE4LjgsMTEuMywyOC4zbDEyLjEsMTQuNUMyNzEuNSwzNTguMywyNzIuMywzNTkuMywyNzMuOSwzNjAuOXoiLz4NCgk8L2c+DQo8L2c+DQo8L3N2Zz4NCg==") no-repeat 0 0 +} + +section { + padding: 30px 0 +} + +section:not(:last-child) { + border-bottom: 1px solid #f2f4f6 +} + +.sample { + border: 1px solid #e6eaed; + padding: 30px; + border-radius: 3px +} + +.sample *:last-child { + margin-bottom: 0 +} + +.sample h1, +.sample h2, +.sample h3, +.sample h4, +.sample h5, +.sample h6 { + margin-bottom: 10px +} + +.avatar { + margin: 0 30px 30px 0 +} + +.buttons .button-group { + margin-bottom: 10px +} + +.buttons .button-group .button { + margin: 0 0 0 -1px +} + +.buttons .button { + margin: 5px +} + +.top-nav ul { + z-index: 10 +} + +.progress { + margin-bottom: 10px +} + +.progress .meter { + min-width: 50% +} + +.modal-sample { + padding: 15px; + margin-bottom: 30px; + box-shadow: inset 0 0 100px rgba(0, 0, 0, 0.7); + background-color: rgba(0, 0, 0, 0.4) +} + +.modal-sample .modal { + position: relative; + top: 0; + left: 50%; + margin-left: -25%; + -webkit-transform: none; + -ms-transform: none; + transform: none +} + +.message, +.form-inline { + margin-bottom: 10px +} + +.color { + display: inline-block; + padding: 15px; + color: #fff; + text-align: center +} + +.blue1 { + background-color: #001620 +} + +.blue2 { + background-color: #002531 +} + +.blue3 { + background-color: #003546 +} + +.blue4 { + background-color: #1f4753 +} + +.blue5 { + background-color: #438193 +} + +.blue6 { + background-color: #00b0e9 +} + +.blue7 { + background-color: #74ddff +} + +.blue8 { + background-color: #e3f8ff; + color: #00b0e9 +} + +.gray1 { + background-color: #272b2d +} + +.gray2 { + background-color: #4d5659 +} + +.gray3 { + background-color: #828c8f +} + +.gray4 { + background-color: #b7c0c3 +} + +.gray5 { + background-color: #e6eaed +} + +.gray4, +.gray5 { + color: #272b2d +} + +.pink1 { + background-color: #ee0788 +} + +.pink2 { + background-color: #fec1d1 +} + +.pink3 { + background-color: #ffedf2 +} + +.pink2, +.pink3 { + color: #ee0788 +} + +.red1 { + background-color: #e80044 +} + +.red2 { + background-color: #ffc7d8 +} + +.red3 { + background-color: #ffe3eb +} + +.red2, +.red3 { + color: #e80044 +} + +.green1 { + background-color: #43bb6e +} + +.green2 { + background-color: #c6ebd3 +} + +.green3 { + background-color: #e8f7ef +} + +.green2, +.green3 { + color: #43bb6e +} + +.orange1 { + background-color: #f18902 +} + +.orange2 { + background-color: #fec57b +} + +.orange3 { + background-color: #ffeed7 +} + +.orange2, +.orange3 { + color: #f18902 +} + +.icon-sample { + margin: 0 5px +} + +[class^="grid-flex-cell"] { + border: 1px solid #e6eaed; + border-radius: 3px; + text-align: center +} + +[class^="grid-flex-cell"] p { + margin: 0 +} + +@media screen and (min-width:769px) { + body { + border-width: 10px + } +} + +@media screen and (max-width:768px) { + body { + border-width: 5px + } + .modal-sample .modal { + width: auto; + left: 0; + margin-left: 0 + } +} + +@media screen and (max-width:480px) { + .button { + display: inline-block; + width: auto + } + .logo { + margin: 30px auto; + display: block; + text-align: center + } + .color { + width: 100% + } +} + +@media screen and (min-width:481px) { + .logo { + margin: 20px; + display: inline-block + } + .color { + width: 250px; + margin-bottom: 5px + } +} + +html, +body, +h1, +h2, +h3, +p { + font-family: "Roboto" +} + +body { + border: none +} + +.hidden { + display: none +} + +.top-nav { + margin-bottom: 60px; + border-bottom: 1px solid #e6eaed; + background-color: white +} + +.top-nav ul { + margin: 0 +} + +.top-nav li { + padding: 15px +} + +.top-nav a { + display: inline-block +} + +.top-nav label { + font-weight: normal; + font-size: 16px; + line-height: 1 +} + +.navbar { + background-color: #FFF; + border-radius: 0; + z-index: 2; + margin-top: 15px; + padding-bottom: 15px; + margin-bottom: 30px; + border-bottom: 1px solid #e6eaed; + background-color: white +} + +.navbar img { + width: 32px; + min-width: 32px; + max-width: 32px; + height: 32px; + min-height: 32px; + max-height: 32px; + border-radius: 50% +} + +.navbar-nav.navbar-right { + margin: 0; + list-style: none; + display: inline-block; + float: right +} + +.navbar-nav.navbar-right li { + line-height: 32px; + max-height: 32px; + padding: 0; + display: inline-block; + vertical-align: middle +} + +.navbar.navbar-homepage { + margin-bottom: 0; + border: none +} + +.navbar.navbar-homepage .button { + padding: 0 30px; + text-transform: uppercase; + font-size: 14px; + line-height: 28px; + color: #E48125; + border-color: #E48125 +} + +.footer { + background: #FFF; + padding: 30px 0; + color: #AAA; + text-align: right +} + +.footer a { + color: #666 +} + +.message { + margin-top: 100px +} + +.banner { + height: calc(100vh - 10px); + min-height: 500px; + background: #FFF; + display: flex; + align-items: center; + justify-content: center +} + +.banner-text { + text-align: center +} + +.banner-text p { + font-size: 22px; + max-width: 700px; + margin: 40px auto +} + +.banner-text p a { + color: #4d5659 +} + +.banner-text .button { + color: #E48125; + border: 2px solid #E48125; + background: #FFF +} + +.banner-text .button:last-child { + color: #CCC; + border: 2px solid #CCC +} + +.banner-actions { + max-width: 500px; + margin: 0 auto; + display: flex +} + +.banner-actions a { + text-transform: uppercase; + flex: 1 1 50% +} + +.banner-actions a:first-child { + margin-right: 10px +} + +.banner-actions a span { + margin-right: 10px +} + +.ribbon { + background: #565656; + background: #666; + padding: 60px 0; + margin-top: 120px +} + +.ribbon h4 { + color: #FFF; + text-shadow: 0 1px 2px rgba(0, 0, 0, 0.2); + text-align: center; + margin-bottom: 30px; + padding-bottom: 0; + font-size: 32px +} + +.ribbon p { + font-weight: 300; + color: #FFF; + padding-bottom: 0; + text-align: center; + max-width: 400px; + margin: 0 auto; + margin-bottom: 30px; + margin-top: 0; + font-size: 18px +} + +.ribbon span.separator { + color: rgba(255, 255, 255, 0.5); + margin: 0 20px +} + +.ribbon .button-outlined { + color: #FFF; + border-color: #FFF; + text-transform: uppercase; + border-radius: 20px; + padding: 5px 40px; + padding: 5px 20px; + border-radius: 3px +} + +.ribbon .button-outlined .octicon { + margin-right: 6px +} + +.ribbon .container { + text-align: center +} + +.ribbon-login { + background: #fff; + margin-top: 10px; + padding-top: 0; + padding-bottom: 0; + margin-bottom: 200px +} + +.ribbon-login .button { + text-transform: uppercase; + border-radius: 30px; + padding: 7px 30px; + font-size: 18px +} + +.pricing { + margin-top: 100px +} + +.pricing .grid-flex-cell { + min-height: 300px +} + +.feature { + padding-top: 60px; + border-top: 1px solid #EEE +} + +.features .grid-flex-container { + background: #FFF; + margin-bottom: 90px +} + +.features .grid-flex-container:last-child { + margin-bottom: 90px +} + +.features .grid-flex-cell { + border: none; + text-align: left +} + +.issues .container { + max-width: 600px !IMPORTANT +} + +.feature { + margin-bottom: 90px +} + +.feature>div:first-child { + text-align: center +} + +.feature>div:first-child p { + font-size: 18px; + max-width: 600px; + margin: 0 auto +} + +.maintainers { + max-width: 600px; + margin: 0 auto; + margin-top: 60px; + -webkit-touch-callout: none; + -webkit-user-select: none; + -khtml-user-select: none; + -moz-user-select: none; + -ms-user-select: none; + user-select: none; + border: 1px solid #e6eaed; + box-shadow: 3px 3px rgba(0, 0, 0, 0.02); + border-radius: 3px +} + +.maintainers * { + cursor: default +} + +.maintainers div { + font-family: "Roboto Mono"; + font-size: 14px; + color: #CCC; + background: #fafafa; + border-bottom: 1px solid #f0f0f0; + text-align: left; + padding: 10px +} + +.maintainers table { + margin-bottom: 0 +} + +.maintainers table td { + border: none; + font-family: "Roboto Mono"; + font-size: 13px; + padding: 5px 15px; + color: #999 +} + +.maintainers table td:first-child { + text-align: right; + width: 50px; + border-right: 1px solid #f0f0f0 +} + +.maintainers table tr:first-child td { + padding-top: 10px +} + +.maintainers table tr:last-child td { + padding-bottom: 10px +} + +.footer { + background: #FFF; + padding: 30px 0; + color: #AAA; + text-align: right +} + +.footer a { + color: #666 +} + +.issues { + padding-left: 10px; + margin: 60px auto; + -webkit-touch-callout: none; + -webkit-user-select: none; + -khtml-user-select: none; + -moz-user-select: none; + -ms-user-select: none; + user-select: none +} + +.issues * { + cursor: default +} + +.issues .issue { + display: flex; + margin-bottom: 30px +} + +.issues .issue img { + width: 32px; + height: 32px; + border-radius: 3px; + margin-right: 20px; + opacity: .5 +} + +.issues .issue em { + font-weight: bold; + color: #999; + font-style: normal +} + +.issues .issue b { + font-size: 28px +} + +.issues .issue>div { + flex: 1 1 auto; + box-shadow: 3px 3px rgba(0, 0, 0, 0.02) +} + +.issues .issue>div div:first-child { + color: #CCC; + background: #fafafa; + border: 1px solid #f0f0f0; + line-height: 32px; + padding: 0 10px; + border-top-right-radius: 1px; + border-top-left-radius: 1px; + font-size: 14px +} + +.issues .issue>div div:last-child { + border: 1px solid #f0f0f0; + border-top: none; + border-bottom-right-radius: 1px; + border-bottom-left-radius: 1px; + padding: 10px +} + +.issues .checks { + display: flex; + margin-bottom: 20px +} + +.issues .checks>div:first-child { + width: 32px; + height: 32px; + border-radius: 3px; + margin-right: 20px; + background: #e2cc7a +} + +.issues .checks>div:first-child .octicon { + color: #FFF; + font-size: 28px; + text-align: center; + line-height: 32px; + display: inline-block; + width: 32px +} + +.issues .checks>div:last-child { + border: 1px solid #e2cc7a; + border-radius: 4px; + overflow: hidden; + flex: 1 1 auto; + box-shadow: 3px 3px rgba(0, 0, 0, 0.02); + box-shadow: 3px 3px rgba(226, 204, 122, 0.2) +} + +.issues .checks>div:last-child>div { + padding: 10px 15px +} + +.issues .checks strong { + font-size: 14px; + font-weight: bold; + color: #cea61b +} + +.issues .checks p { + font-size: 14px; + color: #767676; + margin: 0; + margin-bottom: 10px; + padding: 0 +} + +.issues .checks ul { + padding: 0; + margin: 0; + border-top: 1px solid #eee +} + +.issues .checks li { + list-style: none; + padding: 7px 15px; + font-size: 14px; + background: #fafafa; + border-bottom: 1px solid #f0f0f0 +} + +.issues .checks li:last-child { + border-bottom: none +} + +.issues .checks li .dot-green { + margin-right: 5px; + color: #6cc644 +} + +.issues .checks li .dot-yellow { + color: #e2cc7a; + margin-right: 5px +} + +.issues .checks li em { + font-style: normal; + font-weight: bold +} + +.issues .checks-success>div:first-child { + background: #6cc644 +} + +.issues .checks-success>div:last-child { + border: 1px solid #6cc644; + box-shadow: 3px 3px rgba(0, 0, 0, 0.02) +} + +.issues .checks-success>div:last-child .button { + background: #6cc644; + font-size: 14px; + padding: 5px 15px; + margin: 5px +} + +.issues .checks-success strong { + color: #6cc644 +} + +.issues .more { + text-align: center; + color: #DDD +} + +.issues .more .octicon { + font-size: 26px +} \ No newline at end of file diff --git a/web/static/files/toggle_switch.css b/web/static/files/toggle_switch.css new file mode 100644 index 0000000..554ac3a --- /dev/null +++ b/web/static/files/toggle_switch.css @@ -0,0 +1,76 @@ +.toggle-switch { + position: absolute; + top: 20px; + right: 20px; + border: 1px solid; + cursor: pointer; + display: inline-block; + text-align: left; + overflow: hidden; + line-height: 8px; + min-width: 75px; +} + +.toggle-switch.disabled > div > span.knob { + background: #AAA; +} + +.toggle-switch span { + cursor: pointer; + display: inline-block; + float: left; + height: 100%; + line-height: 20px; + padding: 4px; + text-align: center; + width: 33%; + white-space: nowrap; + + box-sizing: border-box; + -o-box-sizing: border-box; + -moz-box-sizing: border-box; + -webkit-box-sizing: border-box; +} + +.toggle-switch > div { + position: relative; + width: 150%; +} + +.toggle-switch .knob { + background: red; + border-left: 1px solid #ccc; + border-right: 1px solid #ccc; + background-color: #f5f5f5; + width: 34%; + z-index: 100; +} + +.toggle-switch .switch-on { + left: 0%; +} + +.toggle-switch .switch-off { + left: -50% +} + +.toggle-switch .switch-left, .toggle-switch .switch-right { + z-index: 1; +} + +.toggle-switch .switch-left { + color: #fff; + background: #005fcc; +} + +.toggle-switch .switch-right { + color: #fff; + background: #222222; +} + +.toggle-switch-animate { + transition: left 0.5s; + -o-transition: left 0.5s; + -moz-transition: left 0.5s; + -webkit-transition: left 0.5s; +} diff --git a/web/static/static.go b/web/static/static.go index cb59dd8..428dad4 100644 --- a/web/static/static.go +++ b/web/static/static.go @@ -1,3 +1,21 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ package static //go:generate go-bindata -pkg static -o static_gen.go files/... @@ -9,7 +27,7 @@ import ( ) func FileSystem() http.FileSystem { - fs := &assetfs.AssetFS{Asset: Asset, AssetDir: AssetDir, Prefix: "files"} + fs := &assetfs.AssetFS{Asset: Asset, AssetDir: AssetDir, AssetInfo: AssetInfo, Prefix: "files"} return &binaryFileSystem{ fs, } diff --git a/web/static/static_gen.go b/web/static/static_gen.go new file mode 100644 index 0000000..16458c0 --- /dev/null +++ b/web/static/static_gen.go @@ -0,0 +1,492 @@ +// Code generated by go-bindata. +// sources: +// files/angular-toggle-switch.min.js +// files/checksout.html +// files/checksout.js +// files/favicon.ico +// files/images/maintainers.png +// files/images/meowser.png +// files/images/pending_approval.png +// files/images/received_approval.png +// files/logo.svg +// files/main_styles.css +// files/styles.css +// files/toggle_switch.css +// DO NOT EDIT! + +package static + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" +) + +func bindataRead(data []byte, name string) ([]byte, error) { + gz, err := gzip.NewReader(bytes.NewBuffer(data)) + if err != nil { + return nil, fmt.Errorf("Read %q: %v", name, err) + } + + var buf bytes.Buffer + _, err = io.Copy(&buf, gz) + clErr := gz.Close() + + if err != nil { + return nil, fmt.Errorf("Read %q: %v", name, err) + } + if clErr != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +type asset struct { + bytes []byte + info os.FileInfo +} + +type bindataFileInfo struct { + name string + size int64 + mode os.FileMode + modTime time.Time +} + +func (fi bindataFileInfo) Name() string { + return fi.name +} +func (fi bindataFileInfo) Size() int64 { + return fi.size +} +func (fi bindataFileInfo) Mode() os.FileMode { + return fi.mode +} +func (fi bindataFileInfo) ModTime() time.Time { + return fi.modTime +} +func (fi bindataFileInfo) IsDir() bool { + return false +} +func (fi bindataFileInfo) Sys() interface{} { + return nil +} + +var _filesAngularToggleSwitchMinJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x7c\x54\xdd\x6a\xe3\x38\x18\x7d\x15\x47\x94\x26\x06\xd5\x5d\x76\xef\x9c\xaa\xdb\x12\x7a\xb5\x5d\x66\xa0\x50\x18\xda\x32\x28\xf6\x67\x47\x44\x91\x3c\x92\xe2\x10\x12\xbf\xcb\x3c\xcb\x3c\xd9\x20\x5b\x92\xed\x3a\xcc\x4d\xe2\xef\xe7\x9c\xf3\x1d\xfd\xcd\x8a\xbd\xc8\x0c\x93\x62\x11\x9f\x6a\xaa\xa2\x9d\xcc\xf7\x1c\x08\x15\xe5\x9e\x53\x95\x74\xe1\x02\x19\x59\x96\x1c\x6e\xf4\x81\x99\x6c\x83\xf0\x1b\x12\x25\xfa\x88\x97\x5d\x39\xa9\x94\xac\x59\x0e\xca\xf7\xbd\xb4\x6d\x2b\x29\x0a\x56\x22\xfc\x36\x90\x30\x1b\xa6\x13\x29\x9e\xe9\x1a\x38\x41\x5f\x04\xc2\x5d\xa6\x28\x7c\xaa\x28\x5c\x6e\x2b\xe4\xda\x25\x7f\xfd\x44\x4b\x3b\x9c\x06\x5e\x10\x5b\x5c\xb6\x1d\x57\x25\x18\x32\x20\x57\x60\xf6\x4a\x9c\x1c\x7d\x6a\xbb\xbd\x16\xf6\x0a\x2e\xeb\x22\x1c\x44\xba\x7c\x08\x9b\xa6\xf9\x88\xb1\xb3\x97\x33\x05\x99\x61\x35\x8c\xfd\xd9\x65\xb8\xe4\x37\x4c\x34\x2d\x86\x19\x15\x68\xa3\x58\x66\x52\xf4\xf4\x88\xb0\x82\x8a\xd3\x0c\xd2\xd9\x5f\x58\xc1\x8f\x3d\x53\x90\x22\x51\xfe\x2f\x73\xe0\x08\xeb\x4c\x56\x90\x9e\x72\xa6\xe9\x9a\x43\x9e\xa2\x07\x84\xbd\xc5\xf6\xdb\x3b\xb3\x41\xef\x07\x3d\xa0\x06\x1b\xd8\x55\x9c\x1a\x48\xe7\x77\x39\xab\x23\x25\x39\x10\xa4\x68\xce\x24\x8a\x32\x4e\xb5\x26\x9f\x76\x36\x12\xe5\x8d\x2b\x9c\xa2\xf7\xb9\x17\x7d\x9f\xa7\x91\xff\x8e\x1a\x74\xdf\xb2\x5d\x22\xb8\xa1\x82\xed\xa8\x81\x11\xd1\xfb\xdc\x15\x65\x51\x58\xa6\xd9\xce\x3a\xc3\x51\x9f\x17\x36\xdd\x66\x2d\xb9\xae\xa8\xf0\xec\xae\x83\x43\x61\x5a\xce\x35\x13\x39\x41\xce\x3f\xba\xbf\xbb\xb5\xcd\x63\x88\x5d\x83\x41\x6f\x58\x92\xcb\xdd\x4e\x40\xb1\x72\x33\x52\x70\xab\xda\x83\x6e\x73\x56\xbb\xdf\x39\xce\xe4\xae\x62\x1c\xd2\xb0\xd7\xc0\x61\x07\xc2\x60\x6a\x8c\xd2\x7e\x9b\xa3\x36\xf2\x67\xf0\x7c\x5e\x8c\x62\x32\x3d\x1f\xbe\x14\x63\xd7\xe9\xa6\xe8\xa1\xfe\xa2\x5c\xc2\xba\x9a\x07\x07\xe3\x01\xdd\x5f\xa9\x0b\xf0\x50\x8c\xbb\xfb\xc7\x99\xd8\x36\xd8\xfe\xf6\x2e\xdb\xb3\x88\x47\x5e\xb1\x3b\xa8\x2b\xa3\x78\xf7\x84\xfc\xf7\xf4\xed\xfb\xcb\xd7\xc7\xd5\x13\xf9\xe7\xef\xa5\xeb\x4d\xa4\x58\xa0\x8c\xb3\x6c\x3b\xb8\x1f\xf1\xa9\xe5\x4b\xae\x68\x55\xf1\x63\x47\x9e\x74\x93\xc5\x4d\x8c\x87\xd0\x2d\x1c\x73\x79\x10\x03\x30\x74\x62\x5b\x38\x12\x48\x0e\x1b\x96\x6d\xfe\x75\xff\x29\x24\x5b\x38\xae\x64\x0e\x4b\x5b\x25\x24\x0c\x74\x7d\xfd\x47\xc1\x81\x93\xe4\xaa\x90\x6a\x47\x8d\x01\xa5\x93\x6a\xaf\x37\x8b\x20\xdc\x1e\xd3\x57\xca\xf7\x10\xb6\xb9\x4f\x7d\x66\xa9\xa8\xd2\x53\x8a\x9a\xc1\x61\xcc\x10\x32\x9f\x09\x6c\x61\xb5\xa1\xa2\x84\x67\xa6\x0d\x88\x29\x59\x58\x45\xa8\x29\x77\x1b\x2d\xca\x0e\x33\x71\xa5\x40\xe4\xa0\xc8\x04\xdc\x3a\x20\x13\xe5\x6e\x24\x3c\x5c\xa8\x29\xd6\x3f\x0c\xe7\xf3\x62\x48\x36\x1b\x04\xe3\x21\x34\x98\x57\x4f\x3e\x84\xc4\x71\xd3\xbe\xbb\xcd\x22\x5e\xfe\x0e\x00\x00\xff\xff\x6a\x1f\x75\x60\x98\x06\x00\x00") + +func filesAngularToggleSwitchMinJsBytes() ([]byte, error) { + return bindataRead( + _filesAngularToggleSwitchMinJs, + "files/angular-toggle-switch.min.js", + ) +} + +func filesAngularToggleSwitchMinJs() (*asset, error) { + bytes, err := filesAngularToggleSwitchMinJsBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "files/angular-toggle-switch.min.js", size: 1688, mode: os.FileMode(420), modTime: time.Unix(1509717508, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _filesChecksoutHtml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xbc\x55\xc1\x8e\xe3\x36\x0c\xbd\xef\x57\x68\x75\x58\xcf\xa0\xb5\xdd\x62\x6e\x5b\x5b\x40\xd1\x53\x4f\xbd\x74\x7b\x0d\x18\x9b\xb6\x89\x51\xa4\x40\xa2\xdd\x5d\x04\xf9\x8e\x7e\x50\x7f\xac\x90\x6c\x67\xec\x24\x4e\xa7\x97\x5e\x12\x8b\x22\x45\xea\xf1\x3d\xea\x43\xa1\xc9\xbc\x8a\xce\x61\x53\xca\xdc\x33\x30\x55\xf9\x01\xc8\xec\x3c\x7f\xd3\xe8\xb3\xca\x7b\x29\x1c\xea\x52\x8e\x86\x0e\x91\xa5\xc8\xd5\xdd\x40\xb6\x6d\xab\x71\xe7\xff\x24\xae\xba\x07\xa1\x35\x0d\xa2\xd2\xe0\x7d\x29\x0d\x0c\x7b\x70\x52\x7d\x10\x42\x88\xe5\x46\x65\x0d\x03\x19\x74\xa2\x6a\xa6\xed\xe8\xd2\xeb\x75\x68\x6a\x60\x10\xd3\xa7\xa3\xb6\xe3\x85\x73\x0c\xd0\xa4\x0a\x10\xa6\x4d\x2b\x4d\xd5\x6b\x29\x6b\xd4\xc8\xf8\xc5\xa3\x7b\x7a\x96\x6a\x5c\x89\xde\xa3\x2b\x72\x50\x45\xae\xe9\x6e\xfc\x78\xd1\xd3\xa9\xb6\x95\xff\xe2\xf4\xf9\x2c\x05\x83\x6b\x91\x4b\xb9\xdb\x6b\x30\xaf\x52\x85\xad\x77\x9c\x91\x6b\xdb\xda\x9e\x17\xf1\x1e\x75\x23\xd5\x68\x7e\x70\x00\x1d\xda\x70\x0d\xef\xaa\x50\x48\xa8\x38\x83\x01\x18\x5c\x28\x26\xbf\x8a\x2a\xf2\x5e\x4f\xa0\xe6\x35\x0d\xea\xc3\xfc\x17\x7f\x37\xc1\x5e\x22\xfd\xb6\x9f\xb4\x8e\xea\xb4\xd1\xf8\x35\xbd\x78\x26\xcb\x0a\x37\x7c\x51\x6b\xb1\x5e\xa6\x3f\xda\xe6\x25\x59\x5f\x4e\x88\x22\xf4\xd0\x59\x8d\x65\x62\x60\xa0\x16\x98\xac\xb9\xf1\x5a\xb7\x3f\xd1\xe4\x39\xed\x4d\xe4\x56\x2d\x3c\xd5\x18\xa8\x70\x37\x6a\x82\x30\xa0\xe7\xf0\x88\xc0\xa5\xb4\x82\x8c\xb0\xae\xf5\x72\x2b\x20\x06\x45\xe2\x4c\x7d\x3b\x9d\x6c\xa6\x6d\x4b\x26\xe0\x1d\xf9\x14\xb1\x3b\x25\x55\xef\x1c\x1a\x4e\x89\xf1\x90\x7c\x16\x93\x97\x28\xcb\x32\x64\x98\x62\x1e\xe6\x89\xb9\xd6\xfd\xb5\xab\xe6\xfe\x5b\xac\x3f\x82\x51\x8b\x02\x8b\x3c\x5a\x1e\x5d\x2d\x87\x87\xdb\xa3\x96\xd3\x51\xcb\xa1\x2e\x6a\x4a\x39\x5f\xed\x63\x19\x15\x33\xad\x3e\x7d\x12\x36\x83\xfa\x40\x26\xe2\x72\xb0\x75\x10\xbd\xcd\xd0\xc0\x5e\x63\x2d\x17\xe2\x1b\x4f\xfd\xcd\xb5\x4f\xf6\x59\xaa\x22\x5f\x65\xd9\x6c\xdd\x8d\x1e\x66\xfb\xcc\xf1\xa5\xcd\xc0\xb0\xa2\x66\x7e\x61\xfc\x64\xd8\xe4\xea\x2d\x2f\x17\x12\x39\xa0\xf7\xd0\xa2\x9c\xa1\x70\x78\xb4\x3e\x5c\x3d\x7e\x64\x1a\x4d\xcb\x9d\x28\x4b\xf1\x83\x54\xbf\x77\xe8\x50\x80\x43\x61\xec\xb8\x4f\x6c\x1d\xa1\x17\x6c\xc5\x01\x0c\xb4\x98\xdd\xd4\xb5\x91\x50\x4c\xff\x29\x3a\x67\xdd\x25\xfd\xb8\x52\xa7\x53\xfc\xc8\x6a\x60\x08\x5d\xbf\x77\xe6\xdb\xc4\x5c\x4b\x26\xae\x62\x75\x77\xb9\x79\x25\x98\xe0\x18\x34\xb3\x1d\x10\x83\xba\x17\x75\x3a\x45\xa7\xcc\xeb\xbe\x15\xe7\x73\x10\xd1\xd5\xa8\x5c\x62\x98\xd1\x48\x90\x79\xc0\x46\x5b\x78\x5b\x76\xbd\xd3\xe2\x3b\x91\xe4\x1e\x99\xc9\xb4\x3e\xdf\x3b\x30\x55\x87\x3e\x39\x9f\xa5\x2a\xe8\x02\x14\x30\x3a\x02\x9d\x52\x65\x8d\x97\x2a\x04\x17\x39\xa9\x71\x94\x76\x2f\x9b\xb5\xee\x7b\x66\x6b\x6e\x8a\x59\xd0\x75\x00\x4d\x35\x30\x3e\x85\xcd\x67\x29\xe6\x94\x53\xe4\xf8\x97\xda\x9e\x35\x19\xac\xd3\xd9\x5d\xaa\x3f\xa6\xaf\x22\x1f\x7d\x36\x6b\xb8\x91\xd9\xa4\x9e\x25\x34\x2b\xed\x4c\xa5\xbc\x47\x3b\xf7\x74\x73\xad\x99\x2b\xca\x2c\x96\xeb\x77\x63\x7c\xb4\x17\x50\xc9\x19\x0b\x3b\xa0\xd3\xf0\x2d\x54\x74\x79\x61\xee\x3a\x1e\x6c\x0d\xfa\xf6\xa5\x4f\xa2\x3d\xed\x10\x6a\x51\x35\x17\x0d\x16\xdd\xcb\xda\x81\x89\x35\x26\x6a\x62\x48\x20\x57\x20\xfc\x5b\x7b\x2f\x0f\xec\x3a\x61\x5a\x69\xeb\x71\x89\x63\x34\x84\xb7\xff\xef\xbf\x2e\x43\x70\x79\xef\xa5\x06\xe3\x09\x8d\xb5\xbc\x7e\x1a\xa7\xfe\x3f\x66\x03\x1c\x8f\xce\x0e\xab\xd4\x50\x31\x0d\x33\x9f\xbe\x17\x81\xa9\xe3\xef\x8e\x03\xbd\x26\xd4\x3e\xc6\x1b\x56\xd6\x34\x41\x03\xd1\x5c\x93\x8f\xb3\xb4\x94\x1e\x06\x32\xad\x54\x3f\x4f\x47\xad\x19\xf6\xa0\x69\x91\x9c\xbf\x9a\xe6\x7d\x9d\xbb\xf5\xfe\x6f\xed\x7b\xd4\xc0\xcb\xd9\x77\xba\xf8\xff\xf6\xf1\xce\xb8\x9d\x35\x9c\x3a\xf4\xbd\x66\xb9\xaa\x77\x72\x0a\x43\xed\xe8\x50\xc4\x41\x5a\x4a\xc6\xaf\x9c\x82\xa6\xd6\x7c\xd6\xd8\xf0\x4f\xf2\x06\xc4\xac\x21\x8d\xbf\x58\xc3\x68\xae\x4e\x5c\x6c\x04\x20\x8e\x0e\xd5\xa6\x06\xff\x09\x00\x00\xff\xff\xad\x8b\x41\xa9\xad\x0b\x00\x00") + +func filesChecksoutHtmlBytes() ([]byte, error) { + return bindataRead( + _filesChecksoutHtml, + "files/checksout.html", + ) +} + +func filesChecksoutHtml() (*asset, error) { + bytes, err := filesChecksoutHtmlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "files/checksout.html", size: 2989, mode: os.FileMode(420), modTime: time.Unix(1509733223, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _filesChecksoutJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xe4\x59\x5f\x6f\xdb\x38\x12\x7f\x96\x3f\xc5\x5c\x51\x40\x72\xa3\x4a\x05\xee\xf6\xa5\xae\x0f\x38\xe4\x52\xa0\x38\xec\x66\x91\xb4\x87\x05\x82\x20\x60\x24\x4a\x66\x96\x16\x05\x92\xb2\x2f\x68\xfd\xdd\x0f\x43\x52\x12\x2d\x4b\xb2\x77\x71\x6f\xe7\x87\x36\x26\xe7\xff\xfc\xe6\x8f\xe4\xa8\x68\xaa\x4c\x33\x51\x41\xb4\x84\xef\x8b\x45\x90\xbe\x7b\xb7\x08\xe0\x1d\x5c\x4b\x4a\x34\x55\xa0\x37\x14\x48\x55\x36\x9c\x48\x20\x75\xcd\x59\x46\x90\x3c\x41\xa2\x74\x11\xb8\xab\x64\x2b\xf2\x86\xd3\x28\x24\x75\x1d\xc6\xf0\xb0\x08\x82\x20\xac\xca\x3b\xd1\x68\x1a\xc6\xe6\x9b\x16\x65\xc9\xe9\x7b\xb5\x67\x3a\xdb\x84\x8b\x20\x78\x5c\xae\x3c\x7d\xff\xa4\x05\xab\x9c\x3e\x89\x6c\x90\x89\xaa\x60\x65\x23\x8d\x3e\x28\x84\xc4\x3b\x43\xbb\x25\xac\x1a\x33\xa6\xf3\xe5\xda\xb0\x42\xf4\xd6\x48\xfa\x55\x8a\x1d\xcb\xa9\x8c\xe1\xed\x46\xeb\xda\xfb\xca\x85\x15\xd0\x1e\x61\x08\x82\xe0\x98\x6b\x11\x04\xc9\x7e\x43\xab\x28\x4c\xc3\xd8\xdc\x07\x9a\x6e\x6b\x4e\x34\xfd\x26\xf9\x47\x08\x53\xa5\x89\x66\x59\x9a\x6d\x68\xf6\xbb\x12\x8d\x4e\x36\x7a\xcb\xad\xd7\x99\xa8\xb4\x14\x9c\x53\xf9\x11\xc2\x3b\x5a\x8b\x6b\x2d\x39\x3a\x7f\x58\x7a\x72\x3f\x0a\x59\xfe\xaf\x65\x63\x6c\x83\x34\x85\x9b\x8a\x3c\x73\xaa\x00\xf9\x7e\x82\xad\xc8\x29\x7a\x38\x74\xdc\x88\xfd\xe9\x67\x91\xd3\x48\xcb\x86\x2e\x87\xdc\xbf\xdd\xdf\x7d\x86\x5a\x0a\x4d\x4d\x80\x51\x84\x1f\xca\x24\xa7\x05\x69\xb8\x56\xc9\x86\x92\x9c\x4a\x95\x64\x62\xbb\x15\xd5\x43\xf8\xdb\xfb\xeb\xfb\xbb\xcf\xef\xbf\xde\xfe\xeb\xe6\x97\xf0\x11\xd6\xb0\x67\x55\x2e\xf6\xc9\xfd\xd7\x7f\x7c\xbd\x79\xfa\x7c\x77\xfb\xf3\xd3\xfd\xcd\xdd\xbf\x6f\xee\x92\x4c\xc9\x62\xb5\x08\x0e\x0b\x2f\x8f\xbf\x08\x51\x63\x12\x85\xbe\xcf\x44\x4d\x97\xf0\x1d\xaf\x1d\xe6\x30\x80\x3e\xec\x4c\x44\x2d\x68\x22\x0b\x00\x73\x22\x9b\x2a\x42\x39\xcb\xd5\xe2\xb0\x8c\xd0\xb3\x13\xcc\x77\xdf\x6b\x22\x15\xc5\x50\x46\x16\x09\x92\xea\x46\x56\xd0\xde\x47\x28\xfd\xa9\x91\xdc\xde\x06\x3b\x22\x91\x45\x2b\x58\x43\x7b\x95\xa8\x9a\x33\x1d\xbd\x49\xdf\x2c\x57\x48\xe3\x44\x18\xb2\x87\xbf\x3e\x5e\xbd\x49\xdf\x5c\xd9\x2f\x7f\x7b\x44\x82\x83\x75\x79\xce\xa7\x82\x71\x4d\x65\x14\x76\xd6\x85\x71\x6f\x29\x3a\x34\xee\x57\xef\xd6\xaf\x52\xd4\xf7\x54\xee\x58\x46\x23\x93\x37\x73\x0d\x00\x80\x1e\xe4\x22\x53\xdf\x24\x7f\x9a\x4b\x8e\xa3\x41\x58\x20\x9b\xde\x30\xd5\x9e\xc1\xba\x8f\x4f\x27\x17\x3f\xce\xf3\x56\xfc\xca\xde\x5c\xe0\xaf\x72\xa6\x86\xb5\x14\xd8\x4d\x3c\xf3\x27\x93\xd8\x3b\xfb\x4d\x51\x79\xea\xac\xc9\x55\xa3\xa8\x9c\x75\x13\x09\x56\x8e\x38\xa7\x9c\x6a\x9a\x23\x7d\x41\xb8\xa2\xa6\x24\x8c\xe3\x59\x23\x25\xad\xf4\x89\xe3\x5d\xb2\x8d\x1e\x93\xdc\x9e\xc9\x89\x9b\x66\x6a\xf5\x8d\xf2\x8d\xb0\x3d\xf5\x12\xb1\x64\x7d\xb0\x19\xb7\x1d\x67\x14\xa6\xa4\x66\x29\x9a\x14\x2e\x9d\xec\xcb\x33\x60\xd8\x62\x3f\xa6\x17\x64\xe0\x2b\x25\xdb\x89\x0c\x68\x4a\xb6\x6a\x36\x05\x86\x02\x7e\xfc\x80\x07\x53\x1e\x96\x21\x69\x2a\xb5\x61\x85\x8e\x66\x33\xb7\xec\xa3\xc6\x99\x9a\xc9\x8f\x15\x3a\x08\x74\x49\x8f\x38\x2a\xb2\xa5\x8e\x0b\x47\x4f\x84\xd6\xb3\xf5\x87\x15\xb0\x4f\xce\x26\x4e\xab\x52\x6f\x56\xc0\xae\xae\x1c\x61\xc0\x0a\x88\xec\xed\x03\x7b\x4c\xb8\x28\x59\x05\xeb\xf5\x1a\x3c\x61\x03\x23\x1e\x98\x71\xd3\xf4\x01\xfb\xcf\x1f\x29\x10\x23\x23\x8c\xfd\x88\x5f\x90\x1f\x6c\x1b\x27\xf9\x99\x8a\x1c\x06\xf6\x4b\x1e\x83\x90\xe5\x97\xdc\xf9\x80\x6e\xda\x73\xe3\x9d\x7f\x75\x0c\xc1\x92\x6a\x0f\x7f\xa9\xa4\xb5\x50\x16\x85\xc1\x01\x28\x57\xf4\x52\xa6\x34\x84\x2b\xa7\x67\xd5\x05\xaa\xcf\x5d\x2d\x8e\x8d\x46\x9e\x18\x9e\x45\xfe\x7a\x9c\x78\xab\x00\xa9\x9d\x06\x27\xfc\x0a\xff\x4f\xc4\xbe\xa2\xf2\x2a\x6c\xbf\x62\xd2\x9c\x90\xb3\x35\x89\x0c\x63\xaa\x8e\x6a\xf0\x9c\xb2\xa1\x9a\x1d\xe1\x2c\x27\x97\x29\xea\x83\x76\x4e\xcb\x55\x98\xb6\x82\xff\x44\x43\xb0\x29\x8c\x7d\x0c\x4d\x22\x0e\xdb\x7d\x77\x72\x2b\xcb\x53\xcc\xb5\xa3\x62\xb2\x66\xc1\xfb\xcc\x81\x44\xc8\x52\xa5\xd4\x2c\x2b\x39\x7a\xd5\xf2\x60\x34\x8f\x94\x9c\xa6\x4e\xc8\xd2\xd7\x74\x41\xfe\x84\x2c\x6d\x6d\x77\x09\x3b\x52\x41\xf2\xfc\x62\xf9\x23\x50\xec\xa4\xc7\xf0\xfd\xe0\x27\x08\xb9\xdb\x1c\xb5\xd2\x06\x99\xea\x8e\xbb\x7c\x61\x60\xc2\xd8\x8b\xfe\x85\xfd\x01\x77\xc9\xe8\xad\xc2\xad\x2b\x06\xb7\x10\x13\x49\xb6\xca\xdb\x9a\x63\x30\x36\xc7\xb6\x95\xc5\x66\xec\xc5\xb8\x25\xd6\xa6\x5d\x28\xd7\x56\xac\x94\x44\xd2\x42\x52\xb5\x99\xcf\x70\x2d\x2c\x0e\x9c\x6a\xd3\xd6\xdb\x68\xb8\xa3\x3e\xfa\x89\xc6\xf5\xb9\x93\x56\x93\x57\x2e\x48\xbe\x3c\x96\x89\x9f\xce\x82\x5a\xe0\xaa\xe6\x08\x93\x9c\x68\xb2\x3a\x21\x76\x08\x71\x3c\x54\x4a\x21\x8f\x89\x0e\xcb\x24\x23\x3a\xdb\xf4\x9a\xa9\x94\xd3\x5a\x8d\x04\x58\x03\x95\x27\x72\x3c\xe4\xe0\x07\x63\x66\xbd\xbf\xdc\xb5\x34\xad\x84\x06\x5a\x14\x2c\x63\xb4\xd2\x31\x3c\x37\x1a\x53\xf0\x4c\x9e\xf9\x2b\xe0\x9d\x16\x02\xb6\xa4\x7a\x05\x57\x1e\x46\xcd\x88\x9c\x8c\x54\xf0\x4c\x41\xd2\x1d\x53\x0c\xf7\x0a\x56\x80\xe2\x62\x7f\x42\xda\x8f\x43\x58\xc3\x87\x15\xfb\x74\x14\xd0\x76\x2c\xba\xa9\x38\x64\x06\xb7\x74\x32\x4d\xb7\x83\x54\x98\x51\x38\x46\xdf\x29\x7c\x31\x0a\x5f\x3e\xf5\x48\x50\xad\xba\x97\x69\x75\xf8\xc1\x89\xe5\x71\x3d\xbc\xf8\xd3\x19\x6d\x71\x90\x9a\x11\x01\x7d\x4a\x5b\x11\x6d\x44\xdb\x0d\x6c\x8e\xf5\x59\x52\xf2\xfb\x34\xc9\x61\xf4\xe6\xf4\xf4\x70\x01\x14\x47\x9c\x38\x8f\xc5\x6e\xf4\x60\xa8\xfe\x62\xea\xce\x2d\x98\xd1\xd2\x15\x72\xd0\xbb\x8f\x2e\x63\xd1\x9b\x3e\xec\xb7\x07\x73\xf9\xe3\x87\xe9\x05\xed\xa2\x1c\x2d\xbd\x76\xe9\x09\x51\x9d\x14\x0b\x7a\xff\x1a\xf9\x61\x3d\x10\xe3\x13\xf4\x4f\x1f\xd8\x70\xda\xaf\x91\xdd\x01\x07\x3d\xc7\x72\x1e\xbc\x66\x44\x32\xcd\x76\xd3\xa3\xd5\x20\xb4\xca\xe9\x7f\x60\x7d\xd4\x3c\x12\x73\x78\x5b\x58\x5a\xdf\x1c\x45\x76\xac\x2a\x8f\x77\x71\x64\x30\x0d\xde\xae\x23\xdf\x0f\xd3\x65\x8d\x4b\xd0\x71\xe3\x41\x9e\xd5\xc8\xb9\x6b\x48\x81\xef\x64\x2d\xd4\x83\xb1\xec\xf1\xa4\xbb\x05\x23\x26\xba\xc7\x19\x5c\xa3\x26\x5a\xd9\x9c\x39\x63\x50\xf2\xe8\xcd\x9e\xc1\xf2\xf3\x9a\x87\xb3\x73\x90\x99\x5b\x03\xb2\xa9\x29\xea\x31\x0c\x22\xef\x53\x98\x06\x41\xf2\xdc\x70\x5f\xdc\x52\xc7\x1c\x9f\xa7\x19\x19\x12\x63\x06\x3a\xef\x27\xc8\x3c\xac\x9e\x2f\xf1\x3f\x67\xf5\xb9\x2e\xe0\x62\xe6\xf5\xb5\x79\x93\xe7\x3c\x3b\x0c\xb6\xb0\xbe\x70\xe7\x96\xe7\x11\x10\xd9\x3a\x72\x8b\x98\x21\x9d\xc6\xec\x04\x36\x27\xc1\x66\xa5\x9e\x87\xda\xd9\xa0\x18\xa4\x39\x1b\xff\x0f\xc0\xf6\x07\x81\x34\x56\x9b\x67\xf0\xf1\xcd\xf6\xff\xe1\x03\xbc\x37\x97\x66\x96\x24\x84\x82\x7b\x53\xd0\xee\xaa\xc9\x46\xd2\x02\xd6\x10\xa6\x5c\x94\xa2\xd1\xe1\x4c\xff\x83\x0b\xc1\xd4\x91\x64\x1b\x52\x95\x43\x18\xed\x08\x6f\xda\x87\xfe\x89\xc1\x69\x49\x4e\xc4\xd1\x9c\x9d\x3c\xcd\x1e\x0b\xc2\x13\x58\x43\xdb\x94\x8f\xb8\xed\x7b\xf6\x09\x7e\x33\xdc\x5d\x75\x0d\xfc\xf4\x2b\xec\xf4\x09\x7d\xd0\x9d\x7d\xb2\xf1\xd2\xb2\x66\x9c\x2f\x2d\x63\x91\x87\x95\x99\xed\xa5\xab\x56\x23\x67\x00\xa7\xd6\xd8\x09\x5e\x6f\xac\x8c\x71\x8f\x43\x31\xe3\x42\x8d\xbd\x7a\x9b\x18\x8d\xc7\xc7\xe6\x21\xfb\x4b\x55\x9c\x66\xe8\xec\x73\x3d\x36\xbc\x96\xc8\xb5\xbc\x39\xa8\xb7\x13\x5f\xf1\xa6\x74\xa8\x30\x7f\x1f\xcd\xe0\xce\x9c\xb5\x23\xb7\xb7\xe6\x4d\x67\xbf\xe5\x74\x54\xde\xfa\x90\xa6\x39\xa8\x8d\x68\x78\x8e\xcf\x07\x04\x5e\x94\xa8\x60\xcf\xf4\x06\xf4\x5e\x40\xc1\x28\xcf\x55\x0c\x5b\xaa\x14\x29\x29\x90\x2a\x87\x82\x71\x9a\x98\x7f\x61\xcf\x38\x47\x36\xba\xad\xf5\x2b\x26\x5b\x6f\xa8\xa4\xa1\x82\x4a\x58\x02\x2d\x20\x13\xd5\x8e\x4a\xed\x94\xb5\x82\x9c\x4a\xc2\xf7\xe4\x55\xa1\x88\x5a\xd4\x0d\x27\x9a\xe6\x53\x39\xee\x8d\x6f\x65\xac\x21\x6f\xff\x5e\x75\x2f\xe8\xf2\xc4\xd8\x67\x1f\x1c\xe0\xef\xf0\xa1\x2d\x84\x51\x51\x48\x7b\x2d\x2a\x6d\x5f\xf8\x5a\xde\xd3\x66\x77\x38\xdb\x4d\xa8\x94\x17\x26\x08\xd6\x9e\x3d\x48\xf6\xb1\xe7\x88\xbb\x0b\xe7\xd6\x47\xec\x4b\x26\x59\xf6\x2d\xe2\x54\x8b\x3a\x59\xc1\x2e\xf8\x21\xc5\xfd\xba\x14\xf5\x3f\x2e\xc5\xdd\xbb\x81\xee\x35\xc2\x7f\x03\x00\x00\xff\xff\x32\x6d\xc9\xd5\x41\x1c\x00\x00") + +func filesChecksoutJsBytes() ([]byte, error) { + return bindataRead( + _filesChecksoutJs, + "files/checksout.js", + ) +} + +func filesChecksoutJs() (*asset, error) { + bytes, err := filesChecksoutJsBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "files/checksout.js", size: 7233, mode: os.FileMode(420), modTime: time.Unix(1509733218, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _filesFaviconIco = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xc4\x57\xfb\x6f\x14\x55\x14\x1e\xfe\x00\x13\x7e\xf2\x47\x5d\x10\xb5\xa8\x28\x56\x34\x44\x10\x8d\x9a\xe0\x2b\x0a\xd1\x04\x35\xc6\xc4\x18\x31\xa6\x46\x8d\x46\xa3\x51\x5a\x5b\x28\x7d\xd9\x16\x0a\x2d\x6d\xc5\x36\x08\xb4\x85\xa2\xa9\xc5\xf8\x04\x29\xa5\x80\xb4\xd5\x22\x18\x29\x16\xa1\x65\x67\x66\x67\x1f\xb3\x8f\xd9\xd9\xee\xee\xcc\xec\x7e\xe6\x9c\xb6\x66\xdb\xee\xd6\xee\xa6\x5d\xbe\xe4\xa6\x69\x7b\xe7\x7e\xe7\x9e\xc7\x77\xce\x15\x84\x05\xc2\x02\xc1\x66\xa3\x9f\x36\xa1\x6d\xa1\x20\x5c\x2f\x08\x42\x8e\x20\x08\x36\x41\x10\x1e\x12\xc6\xfe\xce\x58\x28\x08\x17\xae\x1b\x5b\x13\xc0\x2c\x10\x8b\x03\x97\x3d\x26\x1a\x7b\x34\x6c\x68\x72\x21\xb7\x4c\xc6\x92\x4f\x45\xdc\x34\xc3\x5a\x56\x2c\xe1\xa5\x3d\x6e\x1c\x19\x0c\xc3\x8a\xcd\x86\x65\x3a\xe2\x00\xae\xa8\x26\xaa\x7e\x09\xe0\xd1\x1d\x0a\x96\x14\xd2\xd9\x76\xdc\x55\x22\x61\x75\x95\x03\x6b\xb6\xa5\x5e\x2b\x3f\x93\x79\xff\x7d\x15\x32\x0e\x9f\x1f\x4d\x9b\x57\xf4\x59\xa8\xeb\xd6\xf0\x48\x8d\x02\x5b\x81\x9d\xef\xf3\xf2\x97\x6e\x34\x9d\x0e\xa2\x6f\x24\x8a\x11\xd5\x84\xe4\xb7\x52\xae\x4b\x6e\x13\xa5\x3f\xf9\x71\x73\xa1\x88\xe7\x76\xbb\x10\x08\xa7\x76\x02\xf9\x47\x0e\x98\xb0\xfb\x4c\x9c\x15\xa3\xa8\xe9\x0a\x60\xed\x4e\x05\x8b\x0b\x44\x2c\xdb\x2a\x21\xef\xa0\x8a\xae\xa1\x30\xf4\x68\x7a\x8e\xbc\xea\xb5\xd8\x4f\x6b\x6b\x15\xb8\xf5\xd4\xdf\x2a\x9a\xc5\x36\xae\xaa\x72\xe0\xee\x52\x09\xb6\x7c\x3b\xee\x28\x96\xf0\x7a\xab\x87\x79\xc3\x46\x3c\x2d\xde\x09\x9c\x19\x8e\x60\x79\x89\x84\x27\xea\x14\x0c\xab\x66\xca\x7d\xe4\xaf\x07\xaa\x1d\xec\x2b\xda\xfb\x51\xa7\x97\x79\x43\xd1\xcc\x78\x09\xce\xa0\x85\x0f\xbf\xf1\xe2\xce\xad\x12\xe7\xe3\x6b\x2d\x1e\xa8\xa1\xe4\x3e\x90\xc7\xf9\xef\xaf\x74\x60\xd0\x69\x20\x9e\x39\xed\x7f\x20\xdb\x29\x87\xba\x2f\x85\xf1\xe4\x2e\x27\xe7\x62\x4b\xbf\x3e\x23\x3f\x2d\xf2\xc5\x5c\xa3\xa5\x4f\xe7\x5c\x7a\xe7\x90\xca\xb5\x9c\x6d\xfe\x5f\xaf\x44\x70\xdb\x16\x09\x6f\xb4\x79\x92\x6a\xc1\xbc\xdf\xbf\x5f\x67\xff\xbf\xd8\xec\x66\xae\x6c\xf1\x1b\x56\x1c\xbd\xc3\x11\xbc\xd0\xec\x46\x4e\x91\xc8\x9a\x99\x77\xc0\x03\xff\x68\x2c\x2b\xfc\xe4\x6b\x77\xd0\xe2\xda\xdb\xdb\x1b\xc4\x3d\x65\x32\x6e\x29\x12\x71\x68\x20\x94\x15\xfe\x44\x90\x2f\xde\x6e\x57\x71\xc3\x26\x3b\x3e\xe8\xf0\x4e\xaa\xb1\x6c\xf0\x13\x8a\xbe\xf7\x33\xff\x5b\xed\x93\xeb\x20\x1b\xfc\x1e\x3d\x86\xe7\x9b\x5d\xac\xad\x3b\xba\xb4\x49\xff\x9b\x6f\xfe\x51\x23\x8e\xfd\x7d\x3a\xf7\x6d\xd2\x81\x8a\x23\xfe\xac\xf2\x53\xac\x49\x0f\x1b\x4f\x06\x39\xff\x9e\x6e\x70\xc2\x97\x50\x03\xe9\xf2\x53\xec\xe8\xfb\xa8\x99\x9e\x50\x53\x1d\x90\xc6\xaf\xae\x76\x40\x4c\xe0\x49\x97\x9f\x72\x79\xe7\x71\x0d\xaf\xec\x73\xa3\xe8\x3b\x1f\xf6\x9c\x09\xe2\xf8\x50\x18\x43\x2e\x83\xe3\x1c\xb5\x92\xdb\x75\x41\x31\x78\x1e\x79\xb8\x46\xe1\x9e\x3b\x95\x9f\x6c\xfb\x53\x36\x92\x6a\xf4\x54\x0c\xb9\x4c\x3c\x56\xab\xe0\xc6\xfc\x31\x5d\x59\xba\x59\xe2\xb3\x1f\xaf\x53\xf0\xea\x7e\x0f\x0a\xd9\x2e\x1d\xc7\xfe\x0e\xe3\xa2\xd3\x60\xee\xf7\x3b\xbc\x58\x54\x20\x72\x1d\x1a\x09\x36\x4e\xf4\x5f\xd2\x48\x9a\xb3\xde\xfb\x5a\xe5\xb9\x4d\xff\x9f\xfe\x7b\xf4\x62\x18\x2b\xca\xc7\x34\x65\x5d\xa3\x0b\xeb\x3f\x77\xf1\xdc\x45\xb6\x10\x0f\xad\x5b\x8b\x44\xdc\x5b\x2e\xf3\x3e\xfa\x7d\x4d\xb5\x83\x35\x31\x11\x4e\xcd\xc2\x86\x26\x37\x1e\xdc\xa6\x8c\xef\xb3\x73\xbf\xa0\xf9\x83\x7a\x47\xaa\xf9\x91\xfc\x54\x7f\x42\x63\x0e\x8a\x05\xcd\x5c\x14\xe3\x93\xff\x44\xd0\xda\xaf\xa3\xe4\x47\x3f\x9f\xf1\x54\xbd\x93\xef\xf5\xe6\x41\x95\x67\x92\xa9\xb7\xa2\xf3\x5d\x41\x0b\x8e\x80\x85\xbf\x1c\x06\xea\x7b\x34\xee\xd9\x8b\x0b\xec\x58\x51\x26\xa3\xf2\x68\x80\x75\x34\x19\x82\x91\x18\xde\xfd\x4a\xe5\x19\x63\xd3\xb7\x3e\xae\xb5\x44\x90\x9f\xbd\xa1\x18\x9f\x9f\x4e\xbe\x52\x4e\x6c\x3f\xa6\x61\x55\xa5\x83\xed\xa0\x59\x7a\x40\x8c\x26\xdd\x3b\xe2\x35\xb1\xae\xd1\xc9\x71\xd8\x75\x42\xcb\x78\xde\x9e\x0a\xaa\xdb\x73\x52\x14\x1b\x5b\x3d\x9c\x63\x14\xbb\x8e\x3f\x42\xb0\x92\x24\xe8\xa9\xcb\x11\x8e\x3d\xcd\x7b\x9d\xe7\x42\x73\x63\xc0\x38\xfc\xe1\x18\xca\x7f\xf6\x23\x67\xb3\xc8\xe7\xef\xeb\xd5\x93\xde\x71\x6f\xaf\xce\xb9\x47\x76\x9e\x4d\xe1\xab\x4c\x11\x36\xe3\xd8\xde\x15\xc0\xd2\x71\x1b\x68\xa6\x98\x6a\x43\xc4\x8c\xe3\x93\xc3\x3e\xd6\xf8\x8f\x3b\x7d\xb3\xaa\xe3\x74\x40\xe7\xd7\x76\x6b\x5c\x1b\xcb\x4b\x25\xb4\xfd\xa6\x4f\xe3\xf8\xfd\x6a\x94\xdf\x47\xeb\x1b\x67\x7e\x73\x64\x0a\xd2\xb6\x86\x1e\x0d\xb7\x17\x4b\xc8\x2d\x95\xd1\x3e\x30\xd9\x86\xbe\x91\x08\xcf\xdc\xcf\xee\x76\x41\x8b\xcc\x3d\x3f\xc6\x6b\xea\x8b\x53\x41\x7e\x93\xd1\x4c\xd3\x7c\x3a\xc8\x6f\xa7\x41\xc5\x40\xde\x01\x15\x8b\xf2\x45\x6c\xf9\xc1\x3f\xad\xce\xe7\x12\x66\x2c\xce\xba\x9f\x5b\x2a\xb1\xfe\x50\xce\xad\xac\x90\x59\x07\xa8\x16\x49\x8b\xe6\x1b\xa6\x15\xe7\x5a\x7b\xa6\xc1\xc9\x79\x49\x71\xdf\xd8\xe2\xc1\x79\x69\xf6\xb9\x2f\x5c\x63\xfc\x1b\x00\x00\xff\xff\x5a\xa0\x6a\xac\xbe\x10\x00\x00") + +func filesFaviconIcoBytes() ([]byte, error) { + return bindataRead( + _filesFaviconIco, + "files/favicon.ico", + ) +} + +func filesFaviconIco() (*asset, error) { + bytes, err := filesFaviconIcoBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "files/favicon.ico", size: 4286, mode: os.FileMode(420), modTime: time.Unix(1509717368, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _filesImagesMaintainersPng = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x84\xfb\x75\x50\x9c\x5d\xb3\xc6\x8d\x0e\x3a\x38\x81\x09\xee\x36\x48\x90\xc1\x5d\x83\x33\x04\x77\x82\xbb\x5b\x70\xd7\x40\x08\x01\x06\x19\xdc\xdd\xdd\x9d\x10\xdc\x25\x78\xd0\x00\xc1\x25\x38\x9c\x7a\x9e\xf7\xdd\x7b\x9f\x73\xea\xab\xfa\xfe\xbb\x6a\xad\x5a\x3d\xab\x7a\xfa\xae\xbb\xfb\x57\xd7\x1d\xad\xfa\x41\x0e\x1b\x83\x14\x03\x00\x00\x60\x2b\xc8\x4b\xab\x03\x00\xc8\x50\x00\x00\xa1\x19\x0d\x00\x00\x00\x24\x0c\xa0\xdd\x00\x40\xf4\x8a\x82\xb4\xa4\xa6\x37\xc2\xf1\x7c\x04\x36\x92\x24\x16\xa0\x76\x35\xe8\xc7\x81\x0c\x23\xab\x86\xb4\x96\x90\x44\xda\x08\x65\x7f\x15\x25\x20\x97\x20\x18\x0a\x48\x03\x70\x21\xc4\xa3\x53\xc3\x10\x2d\x10\x87\x91\x25\xc0\xff\xb7\x02\x92\x00\x03\xaa\x00\x16\x97\x87\x12\xe0\x2f\x62\xc8\xbd\xdf\xf5\x29\xea\x4f\x10\xd6\x54\xc0\xa2\x43\xe8\x2e\x11\xda\x40\x56\xb3\xcc\xde\x75\x8b\x59\xba\x90\xc7\x11\x8b\xcf\xc8\xa7\x8c\xb0\xcf\xc8\xff\x39\xd4\xcb\xf1\xdf\xc3\x5c\x08\xff\x84\x8c\x47\xff\x6f\x48\x0b\xc4\x7f\x56\x83\xa1\xc1\xc2\xff\xf7\x3b\x34\xb4\x08\x69\xc3\xc8\x16\xb4\xa0\xbb\x2b\xa8\xe7\x71\x26\x50\x85\x6c\x99\x30\x38\x68\x9e\x91\xe6\x6b\xb8\x05\x73\xfd\xee\xc2\xdf\x0d\x3f\x63\xed\x55\x04\xc7\xb3\xf5\xd5\x2a\x64\x6f\xec\xae\xeb\x23\x8b\x19\xba\x55\x05\x9a\x05\xb8\x08\xa4\x6b\x34\x3c\x40\xa0\x11\xab\x2f\xbb\xed\xc8\xb7\x4d\xe8\x6c\x34\x45\xf4\xe7\xa0\x41\x96\x97\x38\xc1\x63\x9c\x55\x22\xb7\xdc\xda\xd4\x2d\x25\xd5\xb7\xe4\xe8\xef\x66\x96\x97\xbf\xe9\x42\x60\xda\x3d\x0c\x81\x49\x44\x5d\x9b\x6a\x60\xc2\x2f\xc9\x0a\x9e\x26\xe5\xce\xb2\x28\x5b\x1b\xae\xab\x32\x46\xe3\x04\x93\xbd\x44\xa4\x25\x0b\x35\xbd\x47\x4d\x3c\xbe\x56\x90\x0c\xf9\xcc\x91\xd2\x4f\xd3\x24\x6d\xc4\x2a\x01\x35\xfa\x44\x8c\xb9\x1d\xbe\x69\xad\xd5\x35\xc8\x41\x5c\x09\xf2\x56\x8b\x09\xd5\x95\x15\x7a\x6a\xaf\xfc\xba\x95\xd0\x7a\xe0\x7b\x97\x95\xd8\x87\xf7\x00\x2a\x0b\x2d\x66\x25\x9c\xde\xf5\x08\x2e\x85\x3e\xe6\x9b\x77\xd3\xe8\x61\x8a\xba\x36\xb2\x73\x9d\xa5\x2c\xf5\xd8\xba\x5e\x80\x8d\xe7\xb5\xb4\x0a\x75\xb0\x11\x92\x08\x0b\x11\x15\x6e\x81\x4f\x50\x99\x7a\xfb\x1a\x1a\x29\xed\x89\x12\x0b\x9a\xb8\x8f\x05\x54\x8f\x35\x35\x01\xaa\xfd\x57\x28\x82\xbb\x1e\xb8\x55\x16\xec\xf3\xfb\xf6\xe6\xaf\xb1\x1e\x2d\xcc\xe9\x66\xec\xc2\x69\x37\x21\x13\x3f\x35\x1c\xaf\xfa\x93\x16\x83\x15\xec\x79\x75\xc9\xb9\x81\xfd\xa0\x4c\x1d\x9c\x25\x30\x1c\xf0\x1a\xde\x8b\xda\x22\xd5\xbe\x06\x41\x9c\x2c\xc5\x30\x11\x1c\x24\x1a\x35\x6b\xf3\xb7\xd8\x16\x47\x41\xe1\xd2\xc3\x79\x3f\xb6\x6d\x42\xcc\x57\x41\xfd\x04\xa6\x03\xd1\x34\xbc\x13\xab\x09\x14\xec\x72\x51\x45\xe7\x6c\xf5\x95\x52\x36\x42\x0f\xd2\xf9\x63\xe2\xf0\xcc\xa5\x87\x31\x77\xac\xc3\x00\x83\xa2\xed\x76\xeb\xd4\x84\x73\x10\x69\xe5\xd0\x97\x3a\xb4\xaf\xa1\xdd\x7c\x94\x8b\x3b\xc3\xe8\xbb\xd1\x23\x68\x75\xb3\xd5\x22\x1c\xb3\xa4\x87\xc9\x1e\x79\x7a\x99\x5b\x59\x10\xf1\xf8\x28\x7e\xea\x6f\x5a\xe6\xe2\xba\x4f\xbe\xd8\xfc\x91\xc6\x9d\xf3\x5b\xa0\x81\x2b\x88\x21\x14\x03\xab\xca\x18\xcc\x12\xc3\x6d\xf0\x15\xfa\xb4\xc4\xc9\x45\xf2\x9e\x90\xe6\x35\xf2\xdb\x68\x91\x92\xc5\x45\x69\x89\x71\xcd\xe0\x48\xa9\xba\x1b\xde\x4c\xd1\x77\xb7\xb6\x61\xfa\xe6\xbd\x4e\xd2\x48\x0d\x2b\x6d\xee\xd2\xb1\x3c\x55\x7e\x6d\x35\xf0\xa6\xc3\x5a\xa3\xed\x1c\x11\x37\xd7\xdd\x12\x30\x83\xc8\x1c\xa7\xb3\x8e\xc9\xfc\x35\xaa\x23\x84\x88\x31\x39\xde\x01\xb9\x3a\xf6\x6f\x19\xf5\x1a\xda\xd9\x83\x55\xea\x40\x87\xe2\x9c\xec\x7a\x21\x0b\x03\xc5\x27\xea\x4b\x33\x1d\x76\x79\xf0\x88\x5f\x5b\x5a\xd4\xb8\xee\x7d\x96\x50\x78\x09\x4e\xf5\xfe\xad\x85\xb8\x26\x1d\x0c\x34\x4f\xd1\xa1\x7d\x2a\x1c\x11\xde\x42\xba\xf5\x60\x78\xef\xd1\x86\xd7\x36\xe8\x1c\x2a\xd6\x74\xfe\x22\x97\xe5\xe6\x7f\x9c\xe5\x35\x59\xfa\xe2\x25\xe6\x87\xe2\xf4\x72\xdc\x30\x25\xf6\xc0\x4c\xfd\xf1\x45\x52\xb4\xfd\xa4\xc6\xd6\xb2\x6e\x3e\x06\x1a\x63\x55\x48\x46\xd3\x58\xaa\x29\xdc\xdb\xa7\xc8\x75\x41\x25\x35\xe7\xd8\xac\x24\x80\xf2\x1d\xf9\x16\x1d\x39\xed\xdd\x09\xda\xd2\xef\xd0\x5b\x96\xc6\x7d\xc6\xa7\xcd\x24\x2b\x95\x5f\x89\x3b\x3c\xa1\xdc\x3a\x2c\xa0\x1e\x8a\x3c\xad\x41\x1e\x3f\x06\x29\xaf\xc0\x96\x4b\x4b\x02\x2e\x57\x3a\xff\xcc\x2f\x26\xda\xe0\xb3\x43\x73\x6e\x1c\x2a\x60\xc9\x3b\x17\x4b\xae\x12\x39\x32\x69\x3b\xb5\x03\xb5\x57\xe4\x81\x1f\x45\x87\x97\xe3\xc3\x84\x5a\xac\x4d\xef\x60\xb5\xec\xe1\x1f\xf2\x6d\xcb\x5a\xb9\x04\xeb\x7d\xe4\x34\xab\x93\xa6\x8c\x9d\x84\xdb\xa9\xd2\xb4\x8d\x90\x04\x8c\x73\xa2\x69\xfe\x7c\xe1\x2c\xb5\x57\xc9\x76\xc0\x95\x41\xb4\xbc\xa4\x20\x9f\xf2\x93\x21\x7c\x75\xfd\x5c\x61\xe6\x31\xb2\xbd\x3e\x37\x72\xd7\xf5\x09\x81\xeb\x10\x75\x25\xae\x05\x49\x7c\xd8\x02\xd5\x3a\xc8\xd9\x3a\x2c\x93\xda\x8b\xb1\xcf\x38\x1d\x6a\x36\x6f\x3d\x5e\xb4\xd1\xe5\x22\x5d\x96\x4c\xf4\xcb\x65\xd9\x23\xda\x16\x35\x32\xaa\xa7\xa3\x0e\x27\xc0\x56\x18\xff\x87\xba\x6e\xaa\xea\x6c\x87\x10\xdf\x80\x06\xc5\x85\x77\x67\x0d\xcf\xc7\x2c\xa1\xcb\x17\x3a\xb5\x77\x20\xa5\x89\x51\xe9\x94\xfd\x0d\x90\x61\x4e\xf8\xe2\x25\x17\xde\xd0\x15\x57\x82\x9d\x94\xf8\x01\xc5\x98\x82\xf7\x6c\xf2\x7e\xdc\x6d\x23\x6d\x3c\x7a\x76\x96\xaa\xe4\x71\xb1\x4b\xbf\xf8\x9e\x42\x16\x2d\x35\xe2\xad\xe2\xa9\x56\xfa\xcc\xac\x63\x9c\x97\xf8\x53\x29\x41\x54\x78\x2b\x19\xe5\x2c\x4d\xa9\xb3\x86\xe2\xb0\xd1\x18\x5d\x2a\x3b\x8c\xe6\xb3\x92\x59\x92\xd8\x3b\x2c\xdc\xce\xad\x5e\x1d\xa3\x65\x8f\xe8\xf1\xf0\x92\xfa\x07\x29\xef\x85\x54\xae\x31\x31\x3c\xf3\x6e\x2b\xf5\xcc\xea\x75\x79\xe0\x66\x7f\x6c\xbd\xae\x9d\x0e\xb8\xa6\xec\xc3\x01\x99\x71\x09\x34\x12\xc3\x92\xcb\xa6\x5d\xf0\x5a\x9e\x85\x32\x90\x8e\x46\xa7\x4c\xe1\x40\xdd\xbe\x01\x98\x88\x62\xff\x6d\xae\xfd\x29\xe4\xbd\xbd\xaa\xb3\x81\xbb\xea\x41\x7a\x23\x5d\xf9\x39\x99\xcb\xfe\x75\x99\xc0\xe1\x65\x5f\x8d\x12\x78\xe3\xeb\x5a\x02\x2f\x17\x9e\x8e\x24\x05\x59\xbd\xe4\x21\x46\xca\x48\x32\xf3\x32\xf0\xa8\xf6\x49\x36\x73\x79\xdb\xd9\x3d\x66\xae\x09\xf8\xbb\xb2\x7e\x5e\x48\x1b\x5c\xc3\x1e\x79\x27\x8f\xad\x45\x03\x62\xa4\xab\xf9\xb6\xdb\xa8\xde\x7a\x5f\x27\xc7\xee\xd6\x24\x56\xa9\x42\xc7\xd7\xaa\x69\x04\xee\x47\x30\xd1\x31\x23\x43\xae\x86\xf6\x4f\x7b\x3c\x64\x52\xd1\xc3\xde\x36\xee\x57\xb0\x3d\xcc\x83\xcf\x74\xe2\x47\x3c\x0f\x34\x7c\xd2\x06\xdb\x42\x4b\x8c\xb0\x76\xc2\xb9\xf6\x66\x19\xda\x3a\x3d\x5f\xd6\xda\xa2\x89\xcd\x9a\x9c\x3e\xbf\xe3\xfd\x73\x5e\x6e\xcc\x0a\xdb\xf6\x38\x41\xb5\x7e\x4c\xaa\x60\xe5\xfd\x9a\xac\x30\xe0\x17\x4e\x58\x9e\x38\x42\xa1\xe7\x1e\x7a\xda\x23\x11\xaf\x5f\x76\x8b\x98\xde\xfe\x95\x88\xc5\xd1\x7d\x57\x93\x86\x3a\xb4\x2a\xd7\x27\xa0\x98\x09\xe0\xb3\xf4\x91\x1a\x76\x5f\x7b\x4c\x1b\x5e\xde\x22\x08\x7b\x03\x13\x1d\x29\xc8\xc6\xfa\x13\x95\x7c\x69\x90\xbb\xcc\x01\x63\x1f\x29\x7a\x72\xee\xd8\x3a\xe3\xea\xe6\x2e\xf1\x3c\xfc\x15\x63\xa9\x9e\x59\xed\x43\x62\x3c\x73\xf8\x4e\xe9\xc4\x0c\xb3\x56\x22\x55\x9d\xd9\xa6\x4d\x33\x6d\x18\x3c\x30\x5c\xf8\x22\xbd\x1d\x74\x30\x10\x28\x12\xde\xd8\x94\xc2\xdc\x8b\x11\x28\x87\x76\xb4\x82\xef\x56\x09\xdf\xf1\x2a\x70\x05\x87\x59\xb8\xe9\xdb\x76\xcb\x52\x29\x0a\x3f\x29\x93\x94\x73\xf4\x3a\x45\x45\xa6\x28\x67\x88\xa1\x76\xdb\xe9\xcd\xe4\x1f\x4d\x47\x75\xd9\xbb\x72\x59\xce\x25\x1b\x8f\xb3\x46\x30\xf0\x3a\x42\x0e\xd6\x87\x23\x17\x00\x69\x6f\x8b\xec\x05\x51\xa9\xf7\x18\x95\xaa\xcc\x74\xc0\xb3\x91\x4d\xd6\x55\x35\x91\x64\x3e\xf4\xaa\xe0\x21\xae\xcb\x76\xdf\x2a\xff\x78\xa2\x7a\xc8\x39\xfa\xb4\xda\x4b\x85\xa9\xe9\xf2\xcd\x42\x48\x53\xfb\x8c\x36\xb8\x59\x50\x7d\x7c\xbe\x30\xf7\x47\x78\xbf\x55\x03\x97\x60\xb5\x73\x44\x29\x97\xe6\xeb\xc2\x39\xfb\xa5\x80\xfb\xec\xb3\x99\xe7\xa7\xbd\x39\x4b\x0b\x5a\xe7\x5d\x76\x03\x70\x9e\xa8\xf3\x9b\x06\x0f\x0c\xc5\xa5\xb5\xb2\x92\x42\xbf\xef\xea\x30\xdd\xb4\xbb\xb3\xb0\x6a\xdc\x87\x9a\xea\x1b\x2f\x72\x06\x8f\xee\x7a\xe8\x93\xec\x11\x77\x79\x38\x72\xa3\xa6\x8d\x3e\xb7\xf7\x2d\xea\x0b\x66\xbf\x83\xee\x19\x31\x74\xf9\x53\x09\x27\x7e\xcd\xbd\x1b\xae\x6f\x7f\xc7\x56\xbf\x7c\x85\x93\xbe\xb0\xef\x0f\xee\x74\xb9\x10\x46\x32\xc2\x05\x59\xf9\x7d\x4b\xfc\xf8\xea\xdc\x64\x05\xf9\xfc\x9b\xec\x5f\xa3\x44\x2e\xb3\x3e\x61\x22\x0c\xa8\xe8\x1f\x52\xef\xf9\x96\xd5\x95\x74\xd3\x62\xcd\x31\x72\x3f\xd6\xd2\x94\xcc\xa3\xd9\xf3\x36\x9c\x45\x8a\x25\x5b\x13\xfb\x22\x5a\xe4\x7b\x4a\x42\x27\x87\x0f\xf4\xdc\xfa\x08\xd0\x05\x1a\x93\x9c\x4b\xf8\xef\xaa\xce\xed\x7a\xa3\xaf\x86\x78\x64\xc1\x4e\x70\xd9\x29\x4d\x69\xf2\x80\x2f\xdc\x71\xc7\xf4\x62\x11\x7c\xa0\x90\x73\xd3\xe4\x93\x6b\x75\xee\xb6\x55\xd8\xea\xb7\xd1\xac\x83\xcf\x63\xc5\x63\x3c\x55\xf7\xc6\x7d\x7e\x56\xfc\x51\x68\xd2\x5c\x56\xfa\x77\x8a\xb3\x4d\xac\xbf\x3d\x0c\xd2\xe6\xdf\x6a\x83\x8d\x7e\x74\xdf\x31\x9e\x1b\x89\x50\xf6\x50\x17\xf5\x72\x10\xc9\x85\x05\xb6\x81\xaf\x83\xaf\x34\xa6\xa2\x1b\x37\x5a\xa0\x2a\x53\x4f\x00\xe1\xc9\x0f\x2a\x39\x1c\x29\xd1\x64\x91\x4b\xe0\x86\xab\x80\x9c\x5c\xb2\x9a\xe6\x39\x21\x6f\x31\x61\x37\x20\x79\xf5\xf5\x7b\x71\x7e\x7c\xe5\x17\x91\x59\xac\x58\x54\xb6\x3c\xa3\x33\xc1\x54\xd1\x47\xc2\x7a\xe3\x84\x6f\x7e\x62\x1f\xc0\xf8\xd9\xea\x06\xb9\x81\xbd\x54\x31\xd4\xdf\x13\x79\xce\x4f\x1d\x3c\xab\x61\x4e\x57\xef\xc5\xd3\x9f\x58\x2c\xeb\xc9\xfb\x1a\xe7\x40\x59\x06\x87\xe8\xef\x61\x3a\xe0\x8f\xb4\xa0\xdf\x12\x3f\xf8\x33\x06\x7a\x99\x41\xc1\xef\x75\xb6\x38\x02\x8d\xc8\xc8\xa9\x26\x4b\xb7\x3d\xee\x55\x04\xfd\x8a\xd7\x92\x9d\xe4\x76\xad\xcb\xf0\x2a\xa1\x29\x52\xf7\x89\x12\xcb\x9b\xf3\x98\x46\x74\xc1\x4f\x23\x61\xb2\x91\x81\x05\xe3\x7c\x57\x2b\xdf\x24\x2d\x42\x5d\xac\x9b\x8f\xe9\xd0\x2c\x86\xd0\xc5\xcb\xd4\xd5\xd1\x50\xd8\xe8\xad\xa7\x0f\xe2\x74\x6f\x65\x99\x2a\x2f\x5f\xc2\x4d\x2c\x66\x51\x4a\xb4\x3e\x71\x8d\x2c\x39\xad\x62\xd0\x32\xa5\xb2\xe3\x61\x4b\x75\xe0\x9b\xbd\x75\xea\x7a\xb8\xba\x4f\xb8\x2d\x4a\x7b\x56\xd7\xb3\x03\xbe\x03\xdd\x2a\xbe\x91\xc2\xdc\xcb\xf9\x4b\x77\x97\x41\x61\xd2\xa0\xfe\xf2\xc3\xe4\x67\x99\x17\xcb\xb7\xfd\xdd\x2b\x6d\xb0\xf6\xbe\x72\xda\x9e\x73\xec\x01\xc9\x2b\xb2\x8d\xd9\x3b\x02\xaa\xb8\xdd\x80\xf0\xfd\x66\x53\x82\xde\x35\xfb\x5c\x5b\x45\x22\x4d\x67\x32\xe8\xf1\xf2\xd4\x8c\x34\x07\xb1\xe8\x70\xa1\xbf\xdd\x10\x8f\xbb\x61\x4e\x95\x3e\x18\x6f\xae\xc9\x4d\x97\xfc\x22\x9b\xcf\x79\xd9\x23\x6d\x70\xe3\x81\x08\x5d\xdb\x83\x50\xcb\x47\xd6\xf8\xbd\xba\x5e\x66\x75\x9c\xf0\xb4\xc0\xa3\xa8\xd6\x98\xee\xb1\x39\x19\xf0\xd1\xcf\x7a\xbe\xa6\xb7\x5f\xc2\x6e\xd7\x5a\x05\xfd\xdd\xe1\xd6\x7c\x6d\x05\x62\xc2\xd0\x52\xf1\x53\x19\x52\x97\x46\x37\x06\xf8\x36\x27\x61\x5e\xc2\x6a\xe2\x95\x2b\xf9\x68\xea\xed\x28\x63\x32\xa9\x9e\xa6\x54\x7e\xfa\x0f\x3e\xd4\x5f\x25\x1a\x65\x79\xdf\xcb\x67\x9b\xf0\x1e\x16\x53\xab\xf7\xa2\x43\xe6\x35\x09\x58\xea\x74\x8f\x91\x8a\x54\x9e\xf6\x71\x93\x02\xb3\xf6\xd7\xa8\x1e\x25\x0e\xf5\xe4\x9a\x7d\x08\xbf\x24\x8f\x58\x4e\x38\x67\xe6\x24\x67\xd3\xbd\x5a\x96\xb0\x63\xbc\x89\x41\x04\x04\x47\xf8\xaa\x87\x60\x85\xe7\xe3\xf0\xc6\x83\x2c\x7a\x9f\x23\xe9\x7b\x7b\x9c\x69\x16\x31\x4d\xbd\x11\xd2\x9e\xa4\xc1\x73\xe7\xd3\xe2\x86\x12\x60\xae\xfb\x11\x91\x70\x11\x5d\x23\x09\x30\x57\xc8\xd4\x88\x34\x38\x18\x6a\x81\xc8\x95\x80\x3e\x2c\x0d\xa6\x06\xfd\x47\xfd\xdb\x7b\xfd\xb7\xcd\xea\x04\xff\x8f\xfa\xa7\xd1\xca\x41\xfa\x7f\xec\xc2\x00\x69\xe3\x62\x88\xd4\x30\xda\x10\xa8\x13\x83\x6e\x8d\x73\x15\xc2\xf0\x2e\xc1\x7f\xd6\x87\xc3\x2f\xb2\xba\x1a\x2e\xa2\xb7\x57\x5e\x5f\xa9\x6e\x3e\xf0\xa5\x85\x5b\xc0\x32\x94\xd1\x68\x22\x79\x8a\x13\x24\xa0\xca\x64\xef\xc9\x1c\xf9\x36\x79\x58\x86\x9e\xaa\x57\x18\x1b\xc5\x72\x17\x8d\x34\x4b\x17\xd4\x95\x9c\xcb\xa4\x84\xf5\x7d\x25\x8b\x58\x74\x76\xd9\x31\x50\xde\x6a\x53\x12\x1c\x94\x57\xa8\xfa\xe8\x5b\x78\xfe\x7e\xba\x1b\xbf\xdf\xb5\x10\x64\xdf\x39\xa5\x35\xf2\x13\x3e\xa3\xef\xa8\xe6\x76\x9f\xfc\xed\x3f\xf9\xbb\x73\x5f\x04\xcb\xdd\x44\x7a\xf0\x4f\x3e\x61\x65\x1a\x61\xf6\x4b\x3e\x24\xda\xc3\xf3\xaf\x28\xf4\x33\x51\x79\xb1\x85\xb1\x21\x6b\xc5\xcf\x4e\xef\x84\xe3\x98\xc4\x9d\x66\x55\x3b\xf6\x65\x3c\x90\x1e\x13\xd9\x95\x99\x4c\xc4\x97\x15\x3c\x22\xf6\x98\x00\x55\x37\xe5\xca\x79\x91\x0e\xdf\x47\x12\x76\x73\xf3\x53\xb3\x14\x41\x51\xb3\x85\x26\xd5\x60\xc7\xe8\xd4\x23\xd1\x0e\xff\xf5\xfc\x13\x67\x28\x3a\x7f\x5d\x98\xc7\x99\xf2\xc3\x45\xc4\x9d\xd2\x68\x98\xc2\x99\x7e\xbc\xff\x23\x6b\x82\x94\xb6\xc1\x3b\x63\xc6\x63\xea\x45\x8e\x4d\x62\x0e\x40\x8b\xd1\xcf\xe9\xe6\x7c\x01\xe6\x08\x21\x62\xd0\x3b\xb6\xf9\xaf\x64\x98\xb6\x69\xfc\xe6\x80\x69\x95\x69\x1e\x32\x89\x22\xdd\x32\x1f\x4f\xd5\xe4\x0b\xbf\x78\xa7\x68\x2d\x1d\xd0\xf4\xab\x5b\x97\xb5\x33\xf2\x2f\xfd\x40\x4f\x55\x96\xb3\xac\x70\x3c\x82\xec\x97\x50\x88\x2d\xea\xbb\x4f\xbb\xe1\x21\x7c\x34\x96\xa9\xed\xbc\x43\x0f\x3c\xe9\x5f\x53\xa4\x44\x63\x15\xea\xb8\x02\x54\xac\x4a\xcc\xb3\xd7\x57\xe7\x52\x71\xbd\xe0\x83\x6e\x6a\x0c\xea\x70\x6b\xde\xce\xd4\xd4\xd4\x1e\x8e\x44\x4a\x28\xdf\x0e\xf6\x24\xc3\xca\xba\xce\x23\x8b\x49\xa2\xee\x97\xc4\xd6\x07\xde\x4e\x6b\x65\x23\x23\x05\x0d\x33\xcb\x0d\xb2\x53\x92\xbb\xa0\xd3\x85\x01\x1c\xc5\xad\x2e\x34\x17\x7d\xef\x07\x8d\x01\xfa\x45\xeb\xf4\x16\xae\x88\x11\x0d\x71\x55\xca\x68\x19\x03\xbe\x3b\x75\x4a\x2a\x80\x53\xd7\x10\x44\x4d\xc1\x65\xfb\xb4\xcd\x1a\x08\x13\x13\x2f\x64\x8f\xf6\x98\x7f\x30\x97\x67\xfa\xda\x89\x01\xbf\x06\x10\x11\xe8\x8a\x45\xcd\x0f\x1d\x26\xfb\x9e\x49\x35\x43\xea\x5a\xfa\xa9\x40\xd9\x2f\x9d\x90\xb4\x13\x4e\xa2\x9b\x1c\xa8\xe9\x2c\x32\x0e\x42\x07\xbf\xe5\xfb\x0e\xa0\x07\xc7\x57\xfe\xb1\x51\x1c\x70\x8a\xb2\xda\x10\x9e\x52\x59\x27\x16\xdc\x50\x62\xb2\xd2\xc9\x48\x1b\x12\x6d\x20\x35\x88\xb8\x8b\xb9\xde\xa9\x0d\x20\x08\xb8\xe5\x20\xd6\x8b\x65\x80\x7f\xaf\x78\xe7\xc7\xa1\x09\x1c\xf0\x25\x1a\x76\xa6\xec\xe9\xa2\x60\x6a\xd2\xd9\x16\xf5\x5a\xfc\x73\x53\xe2\x53\x43\x62\x03\xca\x7e\x69\x84\xa4\xf1\x4e\x54\x9a\xa8\x97\xd4\x20\x8d\x2f\xdd\x21\x18\x7f\xc7\x7a\x7e\x34\x7e\x8c\x73\x00\x84\xca\xc5\x91\x33\x77\xfc\x1e\x5c\xed\x68\x20\x40\x3d\xdf\xba\xfe\x62\xca\xf5\xf0\x0c\x46\xd2\xf5\x4a\xe4\x6e\x9a\x22\x95\x70\x29\x8d\xcd\x70\x2f\x05\x30\x39\x38\xba\x44\x8c\x38\xcc\xad\x30\xb7\x5f\x6e\xfb\x02\x22\x8c\x65\xb2\xb9\x39\x5c\x8f\x7a\xf1\xc4\x6b\xfd\x66\x3a\xc3\x38\xd7\x45\xbc\xe0\x07\xfb\x13\x94\xf9\x0a\x05\x9b\x57\x6a\x0c\x1e\x57\xb2\xc1\x99\x7e\x93\x9d\x66\xec\xcd\xab\x01\x39\x94\x4c\xfc\xb5\x7f\x46\x18\x7b\xad\x93\x16\x2d\x76\x26\x45\xa8\xbd\x8c\xc5\x1f\xef\xa8\x18\x09\x6f\x1a\xb2\x08\xa1\xc7\x60\x16\xbd\xf3\xed\x50\xea\x4f\xce\xbd\x19\x3f\x0f\x32\x3b\x79\x60\x33\xa1\x98\x9a\xd4\x4b\x38\x5a\x69\x55\xad\xa1\x8a\x25\xb1\x9b\x0a\xd7\x13\x59\x7a\x48\x77\x8a\x81\x16\x56\xd2\xb7\xa4\x12\x46\x01\xfd\x0b\x7e\x42\xd0\x46\x33\x3b\x9c\xd9\x2e\xb9\x74\xf4\x5f\xb9\x77\x73\xa3\x61\x23\x8d\xb6\x1b\xe9\xa7\xe3\xe4\x60\xdf\x28\x33\x1e\x80\x9c\xa0\xb9\xd4\xcf\xef\xaa\xed\x5b\x83\x64\x58\xf1\x2f\x84\x7e\x6a\x52\x0d\x81\xf2\x24\x48\xd7\xdc\x75\x28\xac\x25\x82\x4b\x12\xa3\x28\xe0\x67\xbf\x3d\xd6\x0f\x66\xa0\xc2\x38\xbb\xfd\x11\x41\xad\x14\x67\x29\xee\x29\x0a\x70\xbb\xcf\x0c\x40\x57\x4b\x8a\x7b\x6a\x23\x9a\xdc\x9a\x6f\x06\xdf\x91\xa9\xd6\x0e\xa1\x88\xd4\x3a\x3b\x71\x81\x5a\x3e\x80\xba\x0b\x4f\x05\x08\x9e\xf1\x97\x6f\x12\xb4\x97\x30\x41\x72\xa0\x49\xb1\xc2\x47\xad\x6d\xd9\x2c\xca\xe0\x94\xb2\xaa\x08\x0a\x64\xfe\xaa\x5c\xf9\x9d\xc0\xda\x54\xa6\xfc\x63\x7c\xb5\xa4\x9d\xf9\xf8\x82\x1f\xe9\xff\xdc\x53\xd1\x21\x5f\xe1\x40\xbc\x42\x04\x3e\x28\x64\x1d\x7e\x30\x42\x0e\x26\x02\x14\xac\x1b\x71\x45\x5c\x4e\x7e\x9d\x74\xec\xdd\x64\xa5\x11\x0a\xb2\xfb\x89\x99\xc4\x0f\xb7\x5a\x07\x91\x38\x33\x75\xba\x52\xf6\x7e\x27\xea\xb3\x18\xfa\x76\x82\xbd\xce\x1f\xda\xbb\x79\x30\xf4\x88\x6d\x62\x78\xea\x93\x61\x3a\x8b\x92\xdb\x7e\xd6\xb0\x67\xca\x75\xe3\x8e\x00\xb8\x2b\x09\xb7\x41\x8e\xbd\x0a\xa8\xda\x43\x06\x4d\x88\x3c\xc6\x2c\x6b\x22\x1b\x4f\xe4\xfb\xfc\xce\xb1\xdd\x6f\xeb\x97\xe6\xe7\xb8\xe0\x53\xf5\xbe\x4a\xdf\xb4\xc5\x49\xf7\xc1\xcb\xd5\xfb\x7c\xe0\x64\x62\x5e\x65\x0a\x9d\x47\x2a\xaa\x0a\x34\x31\x67\x0a\xf6\xda\x99\x1f\xdd\x28\x3b\x39\x28\x14\xbe\x2a\x35\x37\x56\x76\x74\x04\x30\x1c\xf6\xd2\xa7\x28\x31\xfc\xb1\x5d\x39\xce\x9c\x46\xfd\x35\xa6\xca\x2b\x81\xa6\x9b\x3e\x52\x8a\x00\xb8\x1f\xf3\x52\x49\x0f\x79\x93\x5a\x4b\x8e\x92\x52\xcb\x51\x15\x52\x63\x27\xcc\x12\xd1\x45\xe6\x5d\xc2\xb0\xf6\xe4\x25\x78\xd4\x8a\x6d\xa2\xa2\xa3\xc4\x5e\x2b\xd7\xea\xab\x12\x16\xad\x0d\xad\x7a\xe1\x39\xbb\x28\x8c\xe6\xa4\x11\x79\x8e\x66\x34\x69\x08\x45\xe7\x55\x21\xa6\xe2\x8d\xbd\x28\xa7\x54\x95\xc9\xfa\x65\x17\xc7\xec\x38\x89\xdc\xd7\xea\xfe\xc8\xd6\x60\x0c\x00\x3d\xce\x22\xcb\xf1\xfb\x66\x45\x9a\xc7\xce\x62\x8f\x85\xa0\x17\xbd\x77\x4f\x33\x2e\x8e\x8c\x5e\x37\x40\x9d\xad\xae\x96\x9e\x85\x38\x1c\xe5\x39\x65\xf3\x1d\x79\xf1\xaa\x84\x45\x5b\x6d\x00\x54\x1b\xa9\x07\xcd\xc5\x79\x81\x13\x9d\x42\xe9\x4a\x49\x27\x69\x81\x4b\x30\xc1\xa6\x73\x6c\x8e\xc0\xbb\xdf\xe1\x68\xd8\xae\x7e\xb2\x5f\x4d\xca\x0c\xd7\x4c\xd5\x19\x85\xd1\xa4\x0c\x71\xc6\x50\x7d\x16\xd2\x72\xf2\xbe\x34\x41\x8f\x4f\x3f\x52\x9f\x9c\x36\x69\x94\x14\x5e\x7e\x46\x31\x51\xcf\x2b\x2c\x2f\x55\xeb\x31\x9d\x09\x41\x4b\x60\xba\x20\x74\xc9\x6a\x67\x33\xf8\x8c\x5e\xbd\x31\x67\x92\x68\x08\x5d\x97\x4c\xbf\x25\xb6\xf3\x18\x9c\x33\xc6\x06\xd3\x1e\x47\x15\xfc\x02\xf9\x74\x3c\x65\x80\x02\x30\x11\x1c\x5a\x72\x8c\x27\x24\x7b\x5c\x68\x7b\x04\xb2\xf7\x91\x8f\xa9\x5d\xbe\x88\xed\x99\x43\xbd\x4a\xab\x54\xc1\x93\x12\x26\xe0\x78\xda\xdd\xec\x45\x20\xc0\xe4\xe3\x40\x63\xcc\x81\x5c\x68\x51\x5a\x09\x4a\x67\x86\x0f\x29\x94\xe7\xea\xc8\x92\x0e\xd4\x34\x4f\x9e\x92\x6e\x7d\xf6\x9d\x28\x89\xb6\x9b\xec\x31\xc6\x94\x8b\xf9\x28\xe1\x4a\xb3\xb1\x34\xa3\xa6\x51\x8e\xb7\xf0\xda\xa4\x5e\x35\x5e\x44\x8f\x80\xd3\xc2\x6b\x68\x03\x5a\x26\x96\x33\x52\xc9\xdd\x15\xda\xd9\x5e\x36\x4f\xa7\xb5\xff\xcc\x14\x5b\x7a\x80\x80\xe1\xd7\xe1\x46\x00\x4d\xc4\xec\x80\xbd\x22\x68\xb4\xfa\xaa\x88\xa7\x6b\x42\x27\x24\xd3\x6f\x75\x8b\x81\xb0\x1e\x7a\xe8\xe0\x13\x2e\x0a\x35\x49\x81\x5b\xec\x73\x20\x7a\x74\x81\x41\x58\x34\x27\x73\x8f\x92\x11\x16\x6c\x98\xff\x8c\x40\x9d\xeb\xba\x67\xc2\xdf\x81\x4f\x02\xdb\x31\x29\x3f\x4e\x78\xc0\x77\x78\x59\x12\xe0\x1c\xc8\x9b\xee\xbf\xca\x6b\xf7\xc3\x2c\xb4\x30\xa3\x04\x71\x63\x5d\xe4\x9f\xa7\xc0\x8e\xd7\x22\x18\xcb\xc0\x6e\x63\x3b\x8a\x39\x32\xd8\x38\xf3\xf4\xcd\x40\x61\x8a\x58\xa4\x85\x53\x03\xb6\x01\x06\x7f\x4a\xa5\x64\x31\x24\x1a\x45\x35\x5d\xb5\xa7\xdf\xa3\xeb\x32\x3f\x05\x9d\xcc\x50\x79\xc1\x63\x9e\x00\x8c\x74\xfe\xe3\x58\xfd\x2d\xab\x80\xe8\x24\x4b\xd4\x9b\x3f\xe9\x54\xdc\x2a\x4e\x88\x77\x3d\xf8\xf8\x7b\xfb\x47\xa2\xcf\x36\xea\x9c\xbc\x40\x06\x9f\xa9\xb0\x68\x20\x08\x23\xf9\x05\x06\x40\xf1\x22\x56\xf4\xa6\x43\x81\xc6\xb0\x2e\x18\xae\x99\x9a\xe9\x84\x46\xb0\x47\xa1\x39\xd6\x5b\x4b\xea\xa8\x8f\x6d\x5d\xab\x0f\xec\xda\x24\x16\xa9\x20\x51\xef\xf1\xae\x48\xd9\xec\xdb\x22\x9c\x39\x19\x5d\x22\x4c\x88\xd8\xbd\x55\x5b\x20\x4f\x49\x6f\xb9\x9b\x91\x71\xf4\x18\x25\xc1\xe3\x82\x0a\x1d\x9a\xf7\x8e\xdc\x93\xea\x0a\x64\x89\x41\x5b\x0d\xfd\x51\x72\x16\x8b\xfb\xa4\x2d\xf3\x8f\xce\x88\x83\x83\x5a\x71\xdc\x51\x55\xf1\x5b\x5a\x46\xf1\x74\x78\x93\xf3\x33\x18\x0d\x3b\x5a\xff\xe4\x6a\x32\x1b\x76\x95\x50\x29\xb3\x17\x6b\x07\x8d\x6c\x5c\x45\x0a\x52\x42\xe5\xe9\x22\xb1\x8c\x3a\x37\xd7\x99\x17\x4a\x5a\x50\xbb\x6d\x8b\x50\x60\x56\x00\xbf\xf7\x59\xeb\x9b\x97\x02\x88\x5f\x31\xc0\x49\x16\x69\x38\xa0\xa2\x0a\x14\xbb\x24\x3a\x5e\x03\x04\xfc\xaf\xbb\xbc\x57\x8d\xc1\x5e\xfc\x1c\x87\xc1\x56\x94\xbc\xc0\xa7\xea\xf4\x30\xae\x6f\xa6\xa1\x06\x66\x12\x46\xfc\xe6\x10\x28\xea\xfc\x0f\xa6\xe0\xdb\x8c\x0e\xe1\x1a\xcb\x35\xee\x68\x62\x81\x26\x64\x30\x45\xe8\x35\x77\x56\x8d\x65\xa2\xc8\x64\xdb\x96\x10\x02\x19\xe7\xe2\x8f\x13\xf5\xfb\x8a\xc5\x3a\x9b\xca\xb7\x71\xec\x0a\x07\x9a\x7f\x3d\x84\xec\xa8\xdc\x6f\xb3\x3d\x79\xda\xe0\xe9\x61\x1e\x67\xe8\x5d\x53\x87\x87\x35\x40\x90\x2b\x9c\xec\x47\xcb\x83\x86\xce\xc1\x6c\xba\x02\x29\xa4\x2e\x0c\xf2\xb0\xfb\xc4\xf6\x5c\x33\x12\xcd\x04\xcf\xfc\x80\x0e\x9a\x74\xe5\x90\x17\x16\xda\x1e\xbd\x34\x90\xec\xe5\xa7\x55\x87\xeb\xc8\x71\x63\x8e\xa5\x11\x91\x90\x93\xfb\xb3\x26\x1d\x57\xd5\x77\x1d\xec\xcf\x1c\x9d\xbd\x9d\x62\xd0\x8c\x2e\xda\x17\x54\xe6\x66\xfd\xed\x28\x7b\x4e\x0e\xb5\x0b\xc9\xc9\x1c\x09\x5f\xb3\x34\xc1\xf7\x54\x9c\x4c\x37\x6e\xb4\x66\xca\x2b\x0e\x1a\x4d\xf5\x6e\x8b\xec\xfa\x3c\xed\x0e\xa9\xb3\x41\x4d\x21\xfa\xfc\x31\x35\x72\x80\xa9\x38\x7b\x44\x92\xab\x20\xf6\x95\xfb\xa5\xd4\xa7\xca\xb0\xd1\x0e\xf5\xd4\xe2\x97\xa8\xeb\x07\x0a\x65\x96\xe2\xd3\x5c\xfa\x33\x1f\xa5\x01\x56\x5c\x30\x01\xcf\xf5\x57\x41\xcc\xec\x11\x52\x94\xc0\x51\x44\x70\x91\x37\x7f\x5e\xfb\xaf\xf9\x42\xbb\xc3\x25\xe2\x83\xd1\x65\x40\x28\x7a\x76\x58\xa6\xe1\xbe\x2b\x2f\xe0\x3c\xcb\x1f\xac\x9b\x9f\x3e\xab\xb6\x7f\x18\xb4\x9d\xb2\x22\xe2\x57\xc0\x37\xa2\x8a\xf3\x6a\x88\x71\x75\xd0\x90\xea\x7c\xa1\x65\x56\xe6\xf3\x57\x3b\x13\x5f\x0d\xc9\xd2\xd5\x34\xe0\x5c\x04\x7a\xd4\x46\x13\x51\x87\x05\xa5\x45\x9d\x04\xc3\x50\xba\xd1\x79\xad\xfb\x87\x20\xb8\x9e\xcb\x97\x79\xdc\x9b\xa5\xa8\xb0\xe5\x1a\xac\x69\x69\xf5\xb9\x6b\x4c\x2a\x7d\xcc\x39\x5e\x02\x1c\xa6\xf7\x0e\x78\x2d\x8e\xe6\xfd\x21\x12\x4d\xf2\x73\x7e\x85\x84\x9e\x5f\xe4\xd9\x8b\x4f\xa9\x27\xfc\x8e\xf2\x9a\x44\xfb\xe7\x8c\x71\xc0\x09\x73\x53\xde\x4e\xa0\x17\xe6\x92\x29\xdf\x2c\x5c\x56\x97\xc2\x6d\x89\x76\xc8\xe1\x87\x53\xe4\xb7\xba\x49\x76\x80\xd6\x3d\x7a\xa3\xda\x5f\x80\x2f\xd5\xd0\x4e\xb6\x54\xf0\xaa\x21\xa5\x3e\x99\xe0\x60\x90\xaf\x40\xc6\x9b\x42\x3e\x45\xc6\x08\x9f\xb0\xde\x64\x4b\x53\x75\x0c\x82\x4d\x7a\x82\xbf\xed\x3e\x59\x12\x24\x1c\x59\xda\xe8\x13\x9d\xdc\xf3\xa3\xef\x82\x3f\x19\xa3\xc4\x63\x80\x65\xc3\xab\xd7\x32\xb1\x40\xa8\x94\xdc\x74\x87\x7b\x00\x24\x79\x96\x0b\xd3\x5f\x4a\x87\x3e\x47\x7e\x6f\x7b\x73\x13\x7e\xbc\x0f\x7b\x78\xa3\xc4\x4d\x83\xbc\x35\xed\x22\xf4\x6d\x4e\xfc\xe6\x31\x12\x3d\x80\x89\x03\x26\x13\x94\xee\x51\xf0\x46\x03\x81\x18\x77\x97\xa0\x00\xdd\xeb\xac\xae\x0d\xab\x7f\xd7\xec\x08\x67\xb6\x93\x71\x01\xba\xb0\x7b\xc8\x01\x84\x41\xfc\x19\x1f\x5f\xbc\x67\x77\x81\xa3\x02\x1b\x15\x8c\x55\xd1\xcd\x8e\x22\x74\xad\xf1\xb8\xfe\xa2\x5c\x3c\x8d\xf1\xa8\xd0\xd1\x71\x2a\xf2\x8f\xf5\x65\x14\x41\x7c\x04\x67\x38\x12\x52\x26\xfc\xe7\x2f\xa0\x0a\x54\x97\x6a\x07\x44\x37\x54\x69\x79\xde\xaa\xa4\xf4\xd7\xe4\x97\xeb\x69\x1c\xd2\x1b\xf3\xba\x7b\x3e\x24\x59\x9e\x64\x1f\xba\x60\xd7\xfb\x8d\x5f\x5d\xd2\x42\x90\x43\xc4\x58\xfb\x9e\x26\xf1\x37\xe9\x3e\x9a\x43\xa3\x11\x65\x14\x93\x87\xe9\x3e\x13\xf3\x2e\xac\x42\xbb\xd3\xca\x61\xd1\xe8\x6f\xd7\x5b\xe8\x24\xbb\x67\xe3\x62\xbf\x89\x7b\x13\xbd\x5c\x5c\x5c\x44\xf1\x79\x61\x04\x87\x7d\x99\x13\x89\xfe\x6d\x82\x8f\xa0\x4a\xd8\x04\x26\xcd\x4a\xf4\x07\xee\xae\x35\x01\xdd\x24\xb9\x99\x85\xa0\x8b\xbc\x53\x45\x37\x51\x81\x9a\xf1\x59\xf3\xed\x45\x71\x31\xf0\xd7\x13\x63\x2a\x03\x6e\x71\x89\xea\x37\x4b\x48\xb2\x57\x6b\x4f\x05\x43\x37\x43\xe1\x1d\xb7\xc4\xca\xbd\xcb\xd9\x93\x81\x79\x04\x1a\x15\xe2\x13\x7e\x1b\x54\x0f\xcf\x53\xbd\xbb\xda\xfb\x10\xd3\x0b\x8d\x1d\xb0\xae\xcd\xce\x45\xfd\xc3\xf7\xa0\xe2\x1f\x9b\xed\x47\x29\xac\x8d\x58\x12\xd1\xd0\x98\x87\xbe\xfc\x77\xa0\x0c\x43\x63\xbf\xdc\x6b\x26\xc9\x06\xb2\x4a\x0e\x69\x96\xab\x03\x81\x92\x82\x34\xb9\xd7\x69\x93\xc4\x9f\xa3\x5f\xa1\xfc\x35\x14\xea\x1e\xed\x56\x94\x4d\x2a\x06\x9f\xe3\x46\x47\x23\x0f\x4f\xe3\xbc\x29\x9e\x37\x83\x1f\xe0\x13\xca\xdd\x89\x89\x0b\x4a\x9e\x64\xaa\x61\x19\x21\x0c\x0c\xdd\x29\x86\x89\x3f\xf5\xe5\x09\x06\xc3\x75\xd3\xa3\x29\x14\x5e\x09\x90\x95\xaf\x0b\x33\xe0\x22\x5e\xdb\x7f\x5e\xa2\xc9\xf0\x48\xe3\x6b\xab\x85\x71\x96\x0b\xf4\x7e\x6f\x8b\xce\x0a\x56\x2b\xb3\x5b\x29\x02\x0f\x37\xe8\x64\xd0\xa2\xb3\x5f\x06\x21\x69\x14\xdf\xa4\x03\x79\x39\x5e\x28\x49\x72\x46\x91\xc1\x0d\xc6\x04\xbd\x6a\xdf\xb1\xbc\x9c\xf8\x83\x93\xfc\x8e\xcb\xe2\x31\x04\x5f\x67\x94\xd7\x6d\x97\x43\x5a\xbc\xe6\x1a\x12\xdb\xd3\x73\xa5\xe5\x3a\x90\xdc\x08\x17\x38\x18\xa9\x0b\x64\x00\xe7\x03\x51\x97\x5b\x18\xf6\x54\x00\x72\x11\xa4\xd7\x46\xef\x10\xd2\x18\x09\xae\x49\x46\xf5\x0c\xeb\xe7\xd6\x48\x09\x81\xe4\x7e\x11\x3f\xc6\xc6\xd2\x66\x7b\xac\xe1\xb2\x0f\x13\x9d\x1f\xc4\xf7\x5c\x99\xa9\xac\x23\x9c\x36\x50\xe8\x6b\xb6\xf8\xe6\x4c\x5c\x24\x02\x3e\x1f\xc8\x02\x74\xe7\xf4\x8e\xa1\x72\xb1\xa4\x26\x33\x92\x65\x09\x13\x47\x4b\x00\x3d\xa1\xb2\x59\x52\x4c\x45\x67\x51\x8f\xcb\x27\xf2\xc2\x4b\x6b\x66\x2e\x85\xd7\x3d\xf9\x3a\xc3\x3b\x66\xc1\x94\x4f\x6b\xb8\xb3\xed\x1f\x68\x3e\x69\xbd\xb0\x57\x7e\xeb\xf0\xfb\xb6\xa0\xff\x6b\xbe\x58\x1b\x3a\x21\xd9\x7e\xdb\x99\x4a\xf7\x7c\x6f\xca\x65\x55\x3f\xde\x87\xc5\x68\x52\x72\x1a\x35\x40\xc7\x99\xf7\x16\x3b\x2a\xea\xe5\xcd\xbb\xe4\x00\x71\x91\x78\x91\xcf\x24\xd8\xb8\x50\xc6\xdf\xae\xb7\xae\xed\x49\x9d\x41\xfc\x22\x29\xba\x96\x57\x95\x79\xe7\x9f\x93\x67\x45\x7c\x9f\x9a\xe6\x8b\x90\x08\xd0\x37\x03\x98\xea\xe7\x07\xcd\xd4\x92\x34\x8a\xfb\xb7\xba\x15\x12\x92\x09\x2f\xd1\x67\x0b\x3d\x7b\xbd\x26\x3e\x08\x5d\x5f\x1b\x09\x64\x5a\xe4\x88\xfa\xfe\x3e\x7d\x26\x49\x48\x68\xfc\xa7\xaa\xbb\x7e\x77\x35\x5f\x4c\xe1\xc2\x80\x18\xe0\x82\x0d\xcc\x22\xe3\xe2\x4a\x36\xc0\x5f\x9e\x8f\xd8\x11\x4e\xe3\x31\x4b\x9a\x27\x47\xcb\x4f\x6f\x82\xf9\x5f\x7d\x6e\xf4\x0f\x64\x73\xee\x38\x0e\x7f\xcf\xaf\x33\xab\x6c\xac\x1e\x8a\x2e\x17\xf3\x7d\xbd\xd6\x7f\x87\x1f\xae\x49\xcd\x6e\xc4\xed\x73\x69\xb2\x6d\x5b\x7d\xdf\x1b\x87\x1b\xf3\x05\xc3\xf7\x7b\x8e\xe4\xc0\x42\xd0\xa6\x9a\xba\xe6\x53\x02\x10\x84\x0b\xa6\x84\x8a\xf8\x5c\xee\xf6\x0d\xc2\x18\x55\xfe\x47\x86\x45\xff\xab\xe1\xde\x97\xbb\x23\xff\xe8\xec\xff\x91\x40\x90\x4a\xb6\x88\x0f\xe4\x62\x77\x04\x06\x84\x2d\xc4\xff\xdf\xce\xc8\x1f\x8e\xff\xee\x30\xd7\xa1\xec\x6d\x07\x29\x7b\x0a\x7a\xd2\x87\x7f\x11\x81\x8f\xff\x90\xd7\x06\x7f\xa8\x62\x12\x74\xb2\x4e\xc6\x31\x4e\xeb\xaa\xfc\x16\xf6\xa3\xe9\x81\x7c\xb5\xfe\x93\x61\xa7\xee\x02\xc3\xe3\x0e\x2d\x37\x6f\xea\xf1\x67\x74\xa2\xae\x59\xb3\x21\xb1\x95\x59\x16\xae\xa4\xd3\x5c\x5d\xb5\xad\x7c\xcf\xc1\xef\xe5\x15\x24\x07\x01\x0a\x4d\x90\xf6\xad\x2e\x3f\x5a\xd1\xef\x62\x4c\x6d\x60\xed\xfc\x83\xfd\x5b\xf6\x92\x4b\xca\x8a\xf5\x67\x86\xb7\x94\xd0\x48\x6a\xad\xf9\x24\x4f\xb6\x55\x03\x8d\xe3\x18\x2e\x40\x99\x89\xce\x8a\x8e\x55\xde\x32\xe0\x1a\xb9\xbf\xb7\x8a\xc7\x01\xc7\x2c\xc0\x4a\xf0\xaa\x42\xf7\xe3\x35\xcb\x17\x85\x57\x2d\xe6\x7f\x62\x93\x20\x49\xec\x07\xea\x8e\x64\x6c\x6a\x1b\x61\x22\x39\xdd\x74\xb8\x51\x1d\x56\x97\x1b\x2e\xfa\x75\xe2\x5e\x19\x23\x81\xa3\xdb\xca\xc4\x8b\xb7\x49\xfa\xc3\x69\x7a\x6b\xb8\x3c\x13\xc6\x50\x7f\xe9\x0b\x75\x6c\x1f\x7e\x4c\x8b\xf0\x26\xe2\xe8\x6a\x5e\xad\x6f\x5c\xfe\xd8\x59\x90\xf5\xc5\xc8\x92\xd3\xfa\x49\xc1\x00\x80\x7d\xa6\x07\x4b\xd5\x99\x4d\x7a\x73\x3a\x4b\x61\x71\xfb\xf7\x7a\xaf\x07\x03\xde\x48\x8f\x64\xb2\x5d\xb1\xdd\xd9\xf9\xc9\xda\x6a\x99\xb3\x02\x08\x72\x33\x48\x1e\xd6\x7e\xf0\x1c\x69\xbf\x6d\x35\xfc\x18\x84\x02\x87\xde\xc2\xe3\x50\xf1\x49\x06\x2c\xcc\x4b\x6f\x64\x09\x82\x2f\x09\xfa\xc2\x67\xd1\x3f\xfa\x92\x05\xdf\x2b\x1a\x2c\xef\x0a\x5e\x1d\x1a\x03\xc0\xda\x03\x6d\xa6\x34\xc9\x77\xe4\xea\x64\x25\x11\x15\x1e\x40\xe6\x39\x6d\xbd\xe8\x89\xc6\x47\xdc\x3f\x44\xf2\x24\x61\x41\x62\xf0\x83\x8d\x0c\x74\x93\x8e\x67\xa2\xe0\x11\x65\xd8\x27\xc1\x6c\x66\xe8\x07\x61\xbe\xc2\xe5\xd7\x26\x07\xc7\x0a\x4c\xb6\x2c\x3b\x51\x8a\xe7\xd1\xe4\x96\xfa\xe3\x55\xfe\xe5\xf8\x6c\xd6\x77\x06\xf0\x4a\x5f\xce\x2a\xaf\x90\xa7\xfd\x8b\xd5\xd5\x10\x75\x61\x61\xf1\xca\xfa\xa1\xe0\xd1\x42\xec\xe3\xaf\xed\x5b\x79\xe6\xc1\xe2\x29\xae\x6c\x83\x6c\x8c\x10\xe7\x22\x1f\x3b\xd6\xf3\x7a\xd3\xfc\x0a\x72\xd9\x6b\x0e\x00\x6c\x73\x3c\xe3\xdb\x6f\x22\xca\xb4\xe2\xe5\x1c\x66\x68\xb9\x37\x99\x5e\x9b\x73\xdf\xc9\x37\x5c\x5d\xd1\xb3\x39\x2b\xfc\x06\x29\x28\xdf\xe9\x38\x92\xdb\xad\x6b\xfa\x5f\x3d\x24\x34\x9c\xe4\xa2\x48\x05\x82\x81\x47\x67\x4e\xce\x26\xaf\x20\x98\x0b\xcf\xe5\x61\x49\xcb\xcd\xbb\x2a\x3a\xaa\x33\xaf\xeb\x91\xe8\x70\xea\x1b\x27\x24\xd5\xf9\x0f\x19\x23\xc7\x8f\xfd\xf8\x95\x93\x01\x40\x42\xb6\x7a\x8e\x6a\xa0\xea\x9d\x16\x7d\x5b\x72\xc4\x2d\xd5\x7d\x66\x37\x07\x85\xb8\xeb\xf6\xe0\x4c\xd4\x69\x55\xc7\x4e\x96\x13\xc6\xce\x2f\x17\xe6\x5f\x8f\x91\x6c\x39\x3f\x35\xf8\x08\xd7\xed\x7b\x6f\x67\xee\xdc\xbf\xbf\xcd\x43\xcc\xad\x6d\xda\xf2\x81\xdb\xe0\xd4\xab\x9c\xd4\x24\x1d\x7e\xc7\x39\xeb\xc9\x25\x9a\x13\xbb\xdd\x0a\x9c\x2d\x79\x14\xcf\x26\xa4\x1f\xc5\xf5\xb9\x41\xbf\x54\x4c\xe3\x12\x6a\x2f\x89\xb5\x22\xee\x18\x01\xde\xef\x75\x95\xc5\x2c\xdf\x0a\x12\x5c\xf1\x6f\xf4\xe0\xc6\xa7\x05\x02\xaf\x19\x3f\x1c\x8b\x77\x23\xc8\x32\xed\xa6\x76\x30\xae\x76\x69\x7d\x07\x28\x06\xee\x66\x87\xec\x53\x7b\xc1\x63\x13\x03\xda\xe4\x64\x51\x0e\xfb\xa2\x86\x65\xb2\x1b\x06\x86\x67\xa1\xc7\x31\xeb\xcf\xa7\x3c\xad\x12\x64\xa9\x0a\xbc\x88\x67\x63\x27\x90\xe0\x1c\x23\x55\xcc\x2b\x47\xbf\x45\x40\xf7\xc9\x6b\xf3\x82\x36\xf5\xf0\x33\xa3\x8a\x7e\x3c\x1e\xdb\x22\xbf\x7d\x70\xa2\x81\xd9\xaf\x39\x95\x81\xce\xfb\xe9\x6e\x95\xec\x69\x87\x11\x9a\xde\x96\xb3\x5b\x7f\x4d\xf4\x96\xf3\xfa\x9e\x8d\xd7\x82\xfa\xcb\x35\xeb\x4d\xfa\x70\xb8\xf0\x7b\x9f\x94\x27\x81\x0c\xf2\xc1\xd4\xdb\xb3\x9a\xa5\xce\x12\x54\x51\x9d\x17\x44\x88\x0f\xd3\xce\xa7\x51\x9e\x9a\xd5\xe5\x6b\x28\x6a\x2f\x4c\xa9\x09\x12\xd7\xa7\x46\x76\xb2\x38\x66\xf7\x0b\x45\x5c\xdb\x0e\xee\x22\x0c\x01\x1f\xc0\x84\xdb\xf7\x24\x52\x03\xf7\x62\xeb\xe8\x29\x3a\x30\xd4\x93\x9b\xef\x22\xff\xa2\x59\x53\x14\x0c\x55\xf2\xf9\x0b\xa5\xcf\x8f\xfa\x02\x0c\x09\x42\x4f\xdc\xb3\xb8\xa3\xb3\x39\x83\x0d\xde\x0e\x55\x7e\x25\x8c\x42\x29\x7f\xca\xac\xd5\x86\xaa\x21\x2d\xba\x43\x66\xc8\xa5\xcb\xe2\xce\x6d\x48\x4b\x6e\xcc\xca\x2d\x7c\x68\x6f\x49\x7f\x86\x12\x13\xe4\xca\xd4\x4a\x7d\xb0\x2f\xd3\x26\x83\x8b\x1a\x43\x2f\x1c\x67\xf9\x2c\xe8\x3a\xab\x4a\x65\xfc\x9c\x3f\xa6\x61\x4b\x30\x5b\xa7\xc7\x7c\x67\xc9\xf5\x9e\xa6\xd8\xc5\x81\x35\x8c\x80\xc3\x7b\x87\x26\x31\xeb\x78\x40\xc2\x68\xbb\xc6\x2e\x24\xa6\xcb\x1d\xcb\xae\x1f\x24\x15\x87\x37\x5d\x0d\xc8\x87\x10\x6e\xf5\xdf\x6c\x86\x69\xa5\x98\x3c\xd5\x3a\x5d\x1c\xdb\xe9\xe1\xad\xf2\x03\x8e\x1b\xb3\x3e\xb3\xba\xf0\x3d\x47\xb3\x20\x55\x84\x1b\x78\x2f\x67\xec\x2d\x46\x16\x0e\x66\x78\x78\xcf\xe2\x96\xfd\x2d\xa4\x16\x03\x12\xdf\x7a\x9c\x95\x7d\x14\x79\x91\xaa\x10\xa4\x69\x70\xa5\x79\x0f\x77\xa0\x22\x21\x17\xb0\x46\x76\x89\xb1\xc6\x00\x54\x56\x08\x77\x28\xf4\xef\x66\xad\xc4\x4d\xbe\x8f\x6f\x49\x77\x06\xec\xb8\x4d\xad\x56\x43\x48\xc2\x34\x8c\x37\x5d\xd0\xc6\x91\x0f\x4a\x3c\x2d\xb7\x4b\xa8\x08\x17\xce\x11\x7e\x9c\x90\x00\x16\x02\x3e\x01\xd8\x63\x56\x25\x32\xd2\x85\x4b\x02\x8e\x48\x11\x76\xfc\x8d\xed\x7d\xb8\xa0\x4c\xe3\x52\xa1\xf3\xfa\x57\xef\xdb\x94\xf8\x7f\x3e\x4c\x59\xc5\xfb\x5a\x12\x13\xbd\x78\x4e\xa8\x38\x26\x9e\xa0\xc4\xc2\xf6\xd9\xc9\x17\xbe\x2c\xef\xfd\xbc\xd2\x63\x81\x1f\xe0\x1c\x6d\xab\xaf\x1a\x94\xbd\x56\x08\x97\x36\x79\xd6\xc1\xc5\xb3\x64\x83\x4d\xa4\x64\x06\xb2\xab\x53\x0c\x4d\x70\xdb\x64\x50\x2c\x79\xdd\xc0\xe8\x78\xdd\x47\xa7\x19\x0b\x2f\x6b\xde\x31\x66\x38\x8b\x4f\xe7\xbc\x02\x25\x6e\x2b\xe0\xe5\xee\x46\xd4\xee\xf2\x93\xd2\xd8\xbd\x7e\x2f\x5b\xaa\x60\xe3\x38\xe1\x79\x9c\x81\x96\x33\x45\x15\x73\xc0\xcf\xb4\x7e\xc9\x6a\x08\x27\xc2\xf1\x69\xeb\x7d\x91\xbb\x46\x34\xe2\x4f\x12\x3b\x1e\x09\x61\x65\x8e\x89\x41\x31\xe4\xd9\x53\x23\x83\xc2\x45\x29\xc9\xd9\x8d\xdf\xdd\x9f\xdf\x88\x92\x05\xaf\x6b\x7d\x39\x6c\x5f\x40\xec\xa3\x3f\xb3\xb0\x9f\xf2\x42\x75\x49\xe0\x67\x5c\x5f\x30\x18\x83\xb8\xc6\x02\x5c\x6a\xb7\x4a\x2f\x64\x33\xbd\xcc\xbc\x4c\xae\x23\x40\xcc\x4c\x9d\x0a\x65\x89\xdf\xd6\xfa\x2b\xb4\xb1\x35\x98\x9e\x35\x06\x45\x3b\xe7\xf0\x9d\x5e\x84\x57\x34\xdf\x7c\x21\xed\x7d\x85\x7c\xf2\xfe\x8e\xa8\xd9\xd8\x83\x05\xad\xce\x48\xad\x3a\xd6\x76\xb7\x73\x58\xd0\xfe\xc0\x58\xba\x29\x4a\x23\x6d\xf3\x99\x8a\x4c\xe2\xeb\xb1\x47\xec\xec\x7e\x3c\x3b\x84\xe4\x23\xe3\x8e\x45\xec\x19\x05\x5b\xaf\xca\x98\x61\x1a\x53\x4a\xc0\x90\x02\x92\xe0\x6d\xb8\xbb\xdf\x7d\x86\x8c\x56\xa5\x55\x07\xd3\x77\xa1\x0d\x3c\xab\xa3\xbc\x5f\xda\x29\x36\x48\xfe\xeb\x2c\x3b\x7e\x63\xfd\xf4\x97\xc9\x5b\x6b\xc8\x35\xe3\x82\x5f\xd1\xbe\xa7\xbc\xc1\x7d\x1a\x56\xeb\xf8\x83\x62\x3d\x3c\x1b\x58\x05\xf7\x38\x62\xa0\x19\x4c\x65\x36\xc9\x17\x1e\x66\x89\xef\x38\x92\xc5\x4e\x5a\x19\x13\x38\xa0\x92\x68\x63\xe3\xff\xdb\xcd\xde\xb2\x75\xf1\x15\xcc\x07\x62\xa2\x58\xce\x38\xdc\xc5\x3b\x15\x37\xb8\xd5\x20\x82\x45\xe6\x69\x91\x4f\x82\xf1\x64\xca\x59\x52\xde\xcd\x34\xc2\x70\x58\x7d\x7d\xcb\x7d\x42\x56\x1b\x82\x14\xdd\xe3\x85\xa1\x07\xbe\x86\xee\x22\x1f\x29\xbc\x6a\xef\x70\x33\xd6\x64\xaa\x54\xa6\x2c\xf2\xd9\x01\x00\x9f\x4f\xb0\xb1\xca\x69\x75\xca\xcc\x55\x4f\x83\x9b\x61\xc5\xed\xd3\x26\x08\x7e\x7d\xc4\x1a\x21\x27\x9c\xba\x59\xd0\xc9\x5e\x25\x32\x3a\x28\x33\xd1\xb9\xa5\xac\x98\xd4\xfb\xf6\x36\x90\x31\x39\x1f\x85\x86\xf2\x91\x19\xf5\xe3\x40\x05\xf0\xe0\x8d\x7a\x0b\xf3\x47\xbf\x77\x5e\x5a\xb4\x73\x2c\x9f\x2b\x10\x5e\x77\xe6\xb7\x64\x6e\x3e\x5e\x2e\x85\x28\x8c\x17\x76\x8d\x35\x84\x2a\x9c\x33\xb7\x10\x94\x8f\x15\xe9\xbb\xac\xdc\x50\x6f\xaa\x9c\xb2\x22\x81\x2a\xdf\x8d\x87\xe1\xca\x70\xde\x4b\x87\x70\xf6\x79\x1d\x27\xe9\xcc\xd2\x68\xba\x45\x2a\x67\x72\x3f\x80\x32\x6c\x52\x47\x87\x9e\x12\x89\x8b\x88\xf3\x5f\x35\x77\x28\x49\xd4\x56\xab\x11\x64\x37\x31\xad\x1f\x73\x9e\xfd\x90\x00\xc4\x0b\x78\x38\xf6\x62\x27\xf4\x41\xb8\x80\x97\x65\xcd\xef\x85\x22\x1b\x9f\x95\xe0\x45\xc1\xd1\xfb\xb9\xfa\xd4\xf4\xc2\x43\x8e\x63\x02\xa5\x38\xf5\x25\xe2\xc3\x59\x28\x0f\x51\xcc\xe5\x65\xbc\x5f\xdf\x56\x92\x14\x17\x7b\x49\x7d\x56\xcb\xfd\x5e\x05\xb7\x2b\x32\x4b\x22\x5f\x41\x7a\xfb\xd7\x73\x4f\x11\x9e\x99\x6f\x5e\x12\xd1\xb4\x28\x18\x72\x43\xd7\x66\xfc\x4f\x62\xd2\x43\x2e\x77\xd7\xbf\xb8\x67\xba\x08\xbc\xdf\x53\x4a\x18\xe9\xa5\x89\xf7\x3c\x5c\x07\xf1\xf1\x12\xff\xfe\xad\x3c\x2d\xfb\x11\xb0\xb3\x32\xed\x79\xa9\x40\xda\xd4\x0b\x9e\x2d\xc1\xfb\xa4\x9b\x81\x0c\xdb\x28\xb4\x4c\xde\x6e\xf5\xa5\x2c\xa1\x6c\x9d\x4c\x56\x63\x30\x1c\xb0\x9f\x49\x21\x35\xa5\xf2\x76\xcf\x61\xcb\xa1\x55\x87\xa8\x55\x97\xdf\xa7\x44\x7d\x6e\xbf\x4d\x72\x0c\xa2\x80\x7b\xa4\x5b\xd1\x6b\xc0\x8f\x73\x1e\x77\xbe\x09\xaf\xf6\x3a\xed\xa5\x9b\x4e\xf0\x39\x30\xaf\xe9\xd6\x0f\x95\x15\x59\xf1\xfa\x2a\x63\x83\xd4\x01\x67\x0c\xc8\x65\xd8\x90\x2c\xe1\xbf\x1f\x84\x6f\x92\x17\x7e\xc5\x0b\xa3\x13\xbf\xea\xd4\x97\x9a\x09\xfd\xa1\x7f\x72\x7f\x8b\xd6\xb5\x1d\x0a\x52\xfe\x14\xac\x26\x4c\xa8\xe1\x4a\xaa\x5a\xa0\xeb\x05\xf7\xc7\xed\x65\x3a\x53\x2e\x65\xfa\xb5\xb8\x00\x34\xdc\x1a\xeb\x96\xff\x38\x3e\x53\x98\xf7\x05\x07\xb4\x2e\xa9\xd9\x7b\xc2\xd7\xd4\xd5\xc3\x56\xc8\xbe\xef\x1c\xbc\xa9\xb0\x07\x06\x94\xc0\x19\x24\x08\x9e\x8f\x18\x90\x17\xd4\x73\x0a\x95\xdb\x6b\xf2\x83\xc0\x32\x9c\xbc\x70\xac\x5e\xaa\x06\x25\xb5\x5a\x09\x8a\x4f\x40\xb0\x56\x92\x31\x41\xf2\xb4\x60\xde\xad\xaa\x2b\xcd\x9e\x29\xd4\x0a\x15\x26\x94\x6b\x7c\x66\x78\xde\xa5\x3f\x26\x6d\xd7\xed\xc8\x38\x85\x5a\x14\x8e\xce\xb8\xf3\x87\x08\xec\x4f\xd7\x68\x08\x85\xb8\xd6\xa0\x54\x9c\xcc\xf5\xd3\x62\x84\x25\xb6\x92\xbe\x8b\xf6\x45\x81\xed\x7a\x06\x11\x13\x7b\x86\x56\x86\x14\x17\x0a\x6a\xfd\xd0\x5d\x3a\x57\xec\xfc\x3a\x3b\xe8\x00\x78\xb1\xf5\x63\x74\xfd\x7c\xd0\x5c\x7d\x5f\xd7\xc4\xf3\x39\x6b\x50\x04\xfe\xd4\xfd\x93\xb3\x8e\x3b\x62\x84\xd9\x13\xd4\xde\x7a\xe9\x1b\xd1\xad\x21\xee\xb2\x82\xde\xc2\x57\xbd\x98\x04\x19\xa9\xa9\x5a\x8e\x34\xf4\x70\x91\x56\xb3\xcc\x2a\x28\x97\xf7\xb1\x14\xf6\xbe\x19\xc6\x09\x45\x11\x6a\xa3\xe8\xde\x4a\x03\xa8\xd2\xfd\x15\xea\xbb\xe2\x01\xa8\x1b\x15\x4c\xa0\xc7\x37\xff\x98\x0b\x56\x00\x90\x45\xc1\x0a\x7a\xa9\x34\x70\x05\x47\xc7\x26\x8b\xd9\xad\x88\xb1\x60\xb3\x6f\x21\x33\xe4\x83\xfc\xaa\xed\xf4\x9f\x38\x0b\x02\xcf\x97\x9b\x54\xa5\x8a\xe0\x90\x69\x09\xfe\x75\x93\xfc\x11\x53\xb0\xca\xfa\x43\x9c\xf8\xdc\x97\xb2\x81\x70\x59\x4c\x03\xb5\x1d\x1e\xe2\x48\x05\x33\x98\xdd\xef\x3a\xaf\xbe\xd1\x91\x37\x60\xeb\x56\x8e\xfe\x36\x35\x3a\x1d\xc8\x0c\x57\x42\xbd\xf0\x13\x29\x94\x07\x5f\x8d\xeb\x8b\xed\xe2\x58\x9c\x21\x63\xdf\x60\x13\xe1\x10\xa2\x49\x17\x59\x89\xf4\xc6\xb4\x69\xbe\x50\x38\x94\x66\x48\xd4\x2b\x68\x59\xf7\xf6\xfc\xc1\x2d\xe1\xc1\x14\xb0\x7e\xe1\x5f\x12\xbe\x2e\x42\xb5\x96\x8c\x05\x6b\x10\x82\xbe\x24\x0e\x64\xa2\xcf\x39\xc4\x46\x57\xa6\x44\x7e\x77\x2c\x56\x0c\x50\x57\xfd\x8a\xe8\xdd\xd3\xd6\x71\x45\xaa\x2a\x23\x37\x7d\x80\x0a\xab\xc9\xb2\x1a\xd9\x92\xd2\xc0\x79\xf6\xff\xf1\xc1\x75\xaf\x15\x61\xf3\xf3\xd1\x37\xa3\x87\xaa\x39\xb5\xc7\xb7\xeb\x47\x06\xfd\x6f\x55\x16\xf5\x97\x4f\xb2\xab\x82\x0b\xb8\x32\x0c\xd2\xbe\x55\xac\x52\x80\x91\x32\x7c\x92\x0a\x48\xd5\xea\xbe\xab\x0d\x7d\xc5\xff\xc8\xfc\x45\x41\x6e\xd4\xd0\x34\x9f\x7c\x90\x22\x2e\x67\xd0\xf4\x4e\x56\x83\x70\x4e\x1f\xf3\x29\xff\xf4\x29\x59\xd4\x70\x9c\x33\x5d\x72\x99\xec\x0a\x49\xb0\x21\x6a\x0f\x7b\x89\xef\xf9\x89\xbc\x5e\x1b\x8a\x0d\xc6\x1b\xb0\x85\xd1\x44\xa9\x60\x81\x3a\x34\x03\x92\xe6\xfc\xad\xe4\xac\xa7\xe2\x62\xa3\x04\xf7\x14\x8a\x92\x91\xc8\x66\xd1\xda\xa4\xc7\xe8\x7d\x32\xaf\xb6\x3a\x57\x91\x1d\x58\xbd\x38\x27\x7c\x7d\x14\x70\xfb\x06\x8b\x08\x4b\xf3\x68\x7b\x02\x67\x44\xe3\x6e\xcd\x10\xee\xd8\xb0\xc1\x25\x5a\x84\xbb\xe2\xaf\x0d\xa5\x21\x5c\x71\xb9\x9b\x5a\xac\x1d\x51\x3a\x64\xfe\xdf\xd0\xfe\x18\xab\xb8\x6c\xfe\x4b\x19\x5c\x11\xd1\x82\xcf\x6b\x78\xce\x75\xb2\x82\xb3\xb8\x94\xdd\xf5\x2d\xd0\x41\x0f\x51\x2f\x6b\xd6\x6c\x4d\x6f\x79\x1f\xad\x11\x3f\x3a\x41\xca\xb6\x8b\x7e\xb9\x93\x47\x96\x92\xa3\x94\x47\xc8\xbb\x5a\xb2\xe6\xfc\x99\x3d\x2c\xdc\x1f\x8f\xd9\x9d\xf1\xb5\xc4\xb6\x8b\x71\x73\x02\xce\x27\xbe\x0c\x56\x51\x67\xfe\xca\x45\x7e\xd2\xc0\x45\xb8\xee\x18\x90\x66\xc3\x40\x3a\xe3\x99\x62\xe6\x92\x6f\x20\x6d\x1c\x6b\x34\xbb\x34\xf3\xc1\x87\x82\x81\x40\xe0\x0f\x75\x71\x62\xa5\x39\xc4\x3f\xd6\x6e\x87\x46\xe9\xf9\x70\x4f\x12\xd5\xea\x7d\x7a\x17\x39\xa6\xe8\x01\x26\xf4\x3c\x07\xaa\x15\xbe\xad\xe0\xf8\xf4\x76\x25\xec\x46\xc9\x77\xc5\xa8\x30\x48\x4c\x99\xb5\x9b\x6a\x71\xf5\x80\x05\x5f\x2f\xd2\x0b\xe6\x18\xb6\x7b\x33\x78\x53\x9f\xee\x0b\x8a\x1f\xab\x66\x1d\xad\x42\x16\xa6\x1e\x2d\x27\x9a\xe6\x11\x64\x8b\x44\x37\x20\x44\xd7\x80\xd4\x0a\x0e\xfd\xac\xaf\x30\x26\x1b\x1a\x6d\xc0\x16\xde\x17\x14\x5d\xb7\x1a\x82\xfe\xa8\xd9\x27\xed\xd7\xd9\x84\x77\x57\x25\xf9\xb0\xba\x31\x32\xd7\x16\xf6\x36\xec\x74\x4b\xe6\xb6\x9c\x6d\x88\x43\xb6\x56\x8b\x80\xff\xa5\xd4\x87\x6d\x57\xd9\x19\xf0\x33\xc5\x06\xe1\xc9\xdd\xd7\xfb\x79\x2f\x25\xd2\x5a\x79\xce\x9f\xb6\x3e\x42\x01\x57\xb5\xac\x25\xd9\xb7\x24\xa5\xf5\xb7\x4a\x4e\x13\xce\xf8\x16\x22\xac\xb4\x4f\x2b\xe1\x67\xf8\x13\x9b\xa1\xf9\x43\xb2\x27\xb3\xe5\x50\xf9\x0e\x78\x98\x45\xce\x28\xe4\x6c\xcb\x14\xf9\xd0\xa9\x46\xf3\x64\x9e\x39\x4a\x97\xee\xbc\x05\x4e\x06\xd5\xd5\xfe\xa1\x1d\xef\x29\x5e\x48\x1e\x30\x50\x9f\xbc\x95\xe2\x99\xa6\x3d\xbe\x0c\x08\x45\xf7\x09\xb3\x54\xf1\x9f\xc2\x35\x2e\x0c\x8d\xbe\x43\xed\xc6\x15\xf4\xb8\x31\xbb\xdc\x7d\xb7\x3a\x62\x62\xd9\xd4\xfb\xf3\x73\x0a\x5f\xe7\xce\x0f\x99\xbc\x79\x74\x4d\xbb\xef\xf3\xa6\x52\x6d\x3a\x02\xee\x0d\x34\x07\x10\x57\xa7\xcd\x15\xff\x6f\xfb\xb9\x6e\xd5\x94\x37\x6f\x47\xd5\xb4\x58\xb9\xb2\x8b\xb2\x41\x76\x88\xec\x47\x10\x74\x47\xd4\xc3\x86\xc0\xa5\xa8\x45\xb8\x02\x45\xa8\xf5\xd4\x41\xf6\xa9\x3e\x26\x92\x44\x0b\x29\x83\x1a\xfa\xb0\x6a\xa8\xa4\x22\x5c\xe4\xc6\xfc\xca\xf7\xad\xee\xf1\xbe\xd1\xfe\xa7\x64\x17\xdc\x9a\x41\xfd\x4e\xc7\x27\xb6\xe6\xc5\xdb\x6a\x77\x36\x25\x76\xde\xab\x80\x70\xb7\x35\x5b\xfb\xad\x1b\x1d\x5f\x9b\xaf\x3c\x41\xcb\x3c\x3e\x5b\x74\x5f\xa2\x16\xcd\x9c\x11\x26\x7c\x0d\xb6\x3a\xb4\xcc\xb8\x55\xf7\x4c\xda\x82\x69\x2b\xfb\xb1\xc1\x66\x54\x9f\x94\x5b\xed\xfa\x06\x0c\x9f\x47\x49\x4d\xfc\xa4\x6a\x1f\x7a\x03\x8c\x39\xc5\x27\x53\xbb\x4c\x9b\x42\x9c\x09\x1b\xec\xc4\x52\x4d\xa5\x42\x9a\x41\x3f\x2d\x31\x3d\x29\xa7\xf7\xd4\x0f\xce\x9e\x01\x2b\x99\x77\x2c\xa9\x98\x91\xb2\xd2\x98\xf8\x55\xf0\xe8\x16\x4b\x03\x1b\x0d\x18\xfc\x06\xa0\x98\x12\xa6\x80\x2c\xd1\x2b\x9e\x5c\x0e\xef\x41\xb8\x04\xd8\xb2\x00\x76\x42\xa8\x97\x0b\xd0\x81\x02\x10\x4a\x65\xe2\xf8\x34\xd1\x2f\xe8\xb2\xae\x96\xa3\x66\x2c\x50\x91\x9b\xc7\xc8\x43\x9e\xa4\x54\x1a\x14\x28\xa2\x40\xaf\xaa\x19\x1d\x2f\xb0\x2c\xf2\xc7\xd7\x38\xc5\xca\xd1\x04\xd0\xa7\x17\xaf\x9f\x9e\xa4\xf3\x04\x87\xec\x9b\x9f\x51\x41\x19\xba\xf5\xd5\xb6\xa7\xdf\xbf\xcd\xc1\xe4\xc8\x4a\x9f\xf3\xaf\xc3\x77\xa4\x96\xf4\x89\x5d\xea\x35\x83\xe2\x5d\x13\x21\xb2\x94\x0a\xd8\xa0\x18\xee\xdc\x48\xbc\x58\x2e\xc1\xeb\x3c\x0d\xc3\x81\xb1\xe2\xb1\xd1\xb2\xc8\xda\x55\xb0\x17\x86\x1c\x01\x06\xd8\x4d\x10\x17\x9b\xb6\xe8\xf6\xef\x33\x2f\x65\x09\xd1\x54\x20\xef\xc4\xa6\x0c\x1f\x84\xb9\x73\xde\xe5\x10\x13\x69\x97\xb1\xaa\x0a\x2e\x31\x13\x20\x8c\x9f\xc4\x69\x7d\x86\xba\x11\x64\xb6\xd0\x4b\x00\x2e\xf8\xc0\x91\x90\xaa\xed\xd3\xa5\xd1\xe1\x86\x2c\xb1\xaa\xea\xad\x46\x47\x3e\x6b\x7f\xca\x3a\xbe\xf1\x3a\xf9\x2a\x6b\xd8\x5c\xcf\x51\xfa\x51\x08\x7b\xe4\xbf\x8c\x25\xac\x52\x03\xe9\xf3\xb2\x21\x89\xad\xc6\xce\x0e\x6e\x0d\x40\x5c\x84\x94\xca\x98\xaf\xf9\xe4\x54\x3d\x12\x9d\x85\xa8\xd8\x13\x50\x84\xce\x16\xdb\x64\x36\xbb\x2e\x60\x2d\x11\x5e\x8b\x0c\xfe\xa3\x31\x10\xce\x2a\x0d\xd8\x2b\x8b\x51\x90\xbc\x9f\x10\x65\xd9\x5f\x6f\x85\x7e\xb7\xc9\xce\x08\x9a\x6f\xdd\x80\xbb\x44\xca\x31\xc5\x31\x6e\x58\x59\x2d\xe6\x97\x07\x4d\x92\xb2\x20\x56\x8b\x1b\x8f\x5b\xef\xec\xf4\xdc\x3e\xd6\xb1\x09\x89\xbd\x58\x5c\xb4\x67\xcd\xfb\x17\xc6\x89\x6d\x72\x87\x90\x64\x38\x9b\xad\x6a\x08\x94\x62\x42\xaa\x9f\x17\x5f\x24\xc2\xa2\x1d\xb2\x64\xed\x1d\x02\x6d\xfc\xa8\xd9\x73\x6a\xed\x3f\x40\xfd\x25\xd0\xa6\x4d\xf1\x07\xf3\xb6\xae\x5f\xa6\xa7\x36\x94\x5e\x3f\xef\x87\xe6\x18\x7e\xaa\x8a\xa0\xe9\x3d\x90\x17\x11\x9d\xdc\x20\x43\x71\x59\xa0\x9c\xf8\xce\x05\x41\x1f\xf8\x21\x4d\x6b\xec\x56\xee\x11\xe3\xe7\xcd\x3b\x27\x39\x83\x4e\xa9\xf2\xb0\xf9\x72\x53\xc7\x7b\x85\xc4\x61\xe9\xf6\x26\x2a\xfa\xbc\xfd\x56\xa0\x50\xe9\xa4\x07\x00\xad\x7c\xd7\xc8\x94\x49\x00\xd5\xed\x8e\xff\x4b\x88\x4b\x32\x51\xc9\x62\x94\x4e\xdd\xd6\x1f\x7e\x5a\x62\x4d\x72\x1b\x4b\x70\xa2\x98\xcc\x88\x4b\x59\x7c\x09\x0b\x52\xb7\xf0\x10\xf8\x46\x04\x57\x02\xef\xf4\xc8\x1b\x31\xb5\x62\x13\x17\x1c\x51\x1f\x96\x1a\x8c\x98\xcf\xdb\x5b\x91\xe0\x75\x3d\x96\x13\x69\x88\xf6\xa9\x64\x4f\x9b\xe4\x0b\x7e\x16\x56\x3e\x50\x56\x83\x72\xa5\xec\x15\x60\x1e\x8b\x8e\x8c\xe0\x7b\x7f\x40\xbf\x50\x31\xe9\x38\xe9\x0d\xd7\x72\x38\xfc\x9e\x04\x4c\x8e\x4a\xcf\x4c\xe8\xdf\x6d\x03\x19\xdf\xaa\xac\x7d\x2e\x45\x31\x60\xc9\xcb\x5c\x52\x1c\xa5\x8f\x08\xc3\x37\x58\xe0\x68\xa7\x47\x07\xd7\x79\xc1\xec\x24\x45\x09\xad\x59\xad\x8a\x2d\xb9\x04\x61\x7f\xdf\xa1\xc9\x66\xf1\xd9\xf3\x66\x95\x3a\x64\xb4\x2d\xff\xd6\xc7\x4c\x42\x78\xf7\xeb\xed\x8b\xfd\xb3\xeb\x43\x11\x44\xe1\xed\x7b\xb6\xb3\x46\xc6\xa5\x1c\x3f\xef\xaf\xb3\x42\x44\x9f\x40\x33\x1c\x20\xcb\x8f\x2a\x38\xdb\x41\xb3\xd2\xde\x18\x67\x4d\xaa\x36\x2f\x67\x03\x83\xdd\x4e\xec\xfd\x16\x6b\x6a\xb4\x20\x78\xac\xc4\xe8\xbd\x06\xa3\x3a\x9d\x60\x06\x43\x3e\x19\x82\x89\xb2\x35\x67\x79\x56\xce\xbf\x6c\xc7\xdf\x78\x66\xb6\x4c\x4d\x97\x39\xba\xe1\xcf\x18\x8a\x42\x8f\x1b\x32\x6b\xb6\x79\xc3\x64\xaf\x5a\xad\xf2\xc6\x86\xad\x5a\x0d\xa5\x98\x35\x74\x43\x15\x39\xd1\x55\xac\x0f\x13\x65\x15\x7f\x8d\x68\x83\xf9\x38\x4c\x55\xe8\x9d\xa1\xe5\x99\xcc\x39\x39\xe0\xed\x4b\xf5\xf1\x89\x7e\x35\x3c\x65\x2e\x42\xcc\x3b\x12\x8b\x26\xcd\xa1\xec\x94\xba\x1b\xee\x64\x5f\x16\x43\x32\xb6\x94\x62\x5b\xf3\x20\xc9\x52\x59\xd2\x18\xe8\xf9\x86\x87\x3a\x8d\xd9\x89\xbb\x97\x65\x3f\x87\xa3\xd8\x26\x52\x1d\x22\x09\x6b\xc0\x85\xe9\x7c\x98\xc4\xef\xe9\x29\xc0\xf5\xf5\x5f\x42\x2e\x6b\x0f\xb9\x84\x1e\x52\x77\x0c\x01\x54\x07\xd6\xbc\x3e\x0f\xd5\x4f\x85\xee\x56\x34\x1c\xca\x21\xc9\x9f\x89\x40\x13\x14\x1e\x38\xe4\xf2\xc9\xa5\x0e\x17\x43\x6f\xda\x34\xe6\x74\xbe\xd5\x37\x28\xff\x08\x06\x0f\x2c\x8d\xc1\xca\x17\x68\xf5\xa8\x18\x45\x7f\x2f\x1d\x96\xbc\xe3\xd8\xd3\x60\xfe\x38\xf0\x25\x71\x39\x85\xba\xe3\x89\x53\xc8\xe3\x5f\x90\x64\x45\xc5\x40\x4d\x74\x44\xe6\x71\x36\x03\x80\xdb\x3d\x04\x51\x81\x2f\x8c\x95\x53\x56\x37\xf7\x2d\xf2\xe4\xef\xaa\x41\xed\x95\x33\xe7\xbd\x8d\xf2\x1a\x3f\x42\x36\x78\x6a\xfc\xf9\xde\x52\x12\x7f\x8b\x4e\x4a\xef\x0a\xe2\x17\x49\x79\xc3\x21\xe9\xfe\x7e\xcb\xcf\x53\xff\x7b\x6c\xe4\x99\x18\xa1\x6d\x39\x76\xa9\x72\x03\xfc\xef\x41\x56\xc7\xad\xb5\xb4\xe2\x84\x1c\xea\x40\x18\xe7\x85\x59\x13\x52\xb7\xfd\xdf\xcb\xea\x63\x5c\xdc\xef\xd2\x9e\xf5\x91\x8f\xa7\x62\xf2\x68\xc9\xd3\x59\x40\x5a\x61\xde\x31\xb2\x1e\xff\xc1\x2e\xa1\x6c\x27\x36\x91\x2e\xb2\xee\xd4\x37\x8a\x73\x5e\x03\x3b\x65\x53\xbb\xdf\x4d\xf7\x6c\x4d\xb1\x23\xe4\x2a\x2b\xfb\xff\x1c\x6e\x4b\x5c\xe7\xc9\x2b\x65\x05\x25\x2c\xf1\xad\xbc\x6a\x43\xda\x0e\x70\x60\x3a\xc5\xa5\x92\x01\x7f\x5f\x7d\x18\x16\xbb\x8f\x14\xda\xfe\x17\x2e\x6d\xda\xff\x49\x52\x66\xb2\x56\x04\x02\x76\x7e\x2e\x54\xe4\xa9\xfa\x19\x76\x42\x9b\x7e\x69\xd5\xcc\xae\x74\x1c\x72\xf6\xeb\xb0\x9f\x9d\x5c\x5f\xf5\x34\x5d\x1e\x6f\xbd\x36\x1e\x0b\x53\xac\x06\xab\x7c\xaf\xd0\xa2\x47\x2b\x93\x30\xec\xfc\x9b\xfe\x2a\x62\xbb\x7d\x60\xc9\xbc\xe2\x93\xcd\xa8\xf0\x7a\xbb\xd7\x47\x8f\x05\x90\x49\xb9\xa3\x5a\x8a\xa5\xca\x25\xd8\xd4\xa9\xda\x3a\xea\x39\x63\x62\x66\x79\x61\x62\x7f\x41\x8e\x59\x0c\xa8\xdf\xe9\xea\xf7\xda\x48\xc7\xe7\x50\xde\xbc\x4a\x7a\x32\x96\xa4\x84\xca\x02\x36\xb7\x48\x2e\x7a\xd8\xd3\xcd\xd7\x5d\x8d\xbd\x7d\xbf\xdb\x21\x5f\x1e\xe1\x6f\xab\x70\xe0\x82\xb3\x45\x7c\x2e\x4d\xb6\x47\x60\x8c\xff\x7f\x10\x4b\xdb\xf8\xff\x6b\xe7\x3f\xf2\xff\x07\x6f\xc1\x18\x55\x5e\x29\xfe\x4b\xb1\x40\x2a\xd9\xf8\x06\xaa\xec\x2f\x6f\xeb\x63\x5c\x3f\x3c\x2d\xed\xf5\xd1\x37\x97\xea\x09\x8f\x09\x75\xce\x6d\x6c\x50\xfc\xf6\x88\xc1\x20\x6a\xa2\x63\xd7\x96\xa9\xc3\x55\xac\x8e\xbe\xb6\xd0\x49\x22\x6f\xf9\x8e\x61\x91\x5a\x63\xc0\x3a\x42\xa6\xb1\xfd\x49\xfd\xb8\x80\xad\xaa\x66\xa5\xd3\x2a\x77\xe9\xc6\x7b\x24\x0e\x2e\x6f\x5c\xa1\x17\x64\xdc\x8d\xea\xec\xfb\xd7\x44\x1d\x98\x78\xa6\x2a\x6d\x51\x5d\x4b\x05\x88\x5d\x0d\xe5\x31\xb7\xe1\x88\xfa\xb4\x20\x6f\xef\x25\x8b\xc0\x4d\x26\x61\xd9\xde\xd7\x4a\xf0\xfd\x9d\x54\x7a\x57\xc2\x66\x5e\x7a\x4a\x65\xf3\xa2\xd6\xd9\x65\x1d\x12\xcf\x95\xb6\xde\xd3\xa5\x0c\xd1\x42\xd5\x5a\xcc\xf1\x9f\x6f\xb4\x36\x4d\x7c\x8d\x17\x26\xf8\x39\x19\x0f\xe6\xa3\xa3\x6f\xc1\x37\x26\x5a\x8c\xd2\x91\x47\x08\x15\x03\xd1\x21\x1b\xd9\xdd\x4f\x1e\xa6\x66\xce\xbb\x5f\x6d\xd6\x8a\x9c\x6d\x7e\x57\xa7\x36\x53\x1d\xbb\x4c\xf5\xbe\x86\x71\xa6\x81\xd5\x42\x9a\x59\x0f\x5f\xb4\x5c\x8d\x36\xda\xb9\x1b\x0e\xbb\x05\xf7\x4a\xad\x36\x58\xbe\x18\x63\x4e\xd4\x93\x6e\x8c\x8d\x8d\x6d\x12\xe9\xe1\x0b\x57\x98\xa0\x2c\x19\x0f\xe1\x69\xc6\x65\xa6\x5a\x7e\x01\xc2\xa2\x65\xe7\xd4\x7e\x8e\x12\x83\x81\xbe\x2f\x4d\x21\xb7\x9b\xe3\xb5\x01\xe6\x53\xc7\xc7\x08\xbd\xe2\xa9\xb6\xdc\xc5\xdc\xc6\x06\xf0\x52\x8a\x8b\xb5\x42\xc7\xfb\x94\x88\xc0\x90\xa4\x04\xed\x13\x03\xec\xdf\x91\xbc\x57\xda\x3a\x9d\xa6\x28\x15\x53\xb8\x65\x4a\x35\xe8\xd0\xeb\x86\xfe\x60\x66\xa3\x38\x11\xf8\x93\x53\x3d\x3b\x87\x05\x06\x68\x45\xee\x04\xff\x4c\x26\x2d\x64\x25\x24\xb1\x15\xe4\x6e\x33\xd2\x4a\x72\xd6\xdb\x81\x9e\x50\xbc\x4d\xe8\x78\x99\x4e\x09\x65\xcd\x68\x1f\xd1\x1b\xa0\xff\xe2\xe9\xf6\x8d\x1c\xde\x76\xac\xd8\x5b\x4a\x46\x29\x84\x7a\x67\x5e\x61\x63\xd4\xbc\x5d\xd5\xa8\x5b\x05\x60\x79\x38\x35\x86\x68\x26\xe9\x2e\xda\xe9\xe6\x1d\x71\xec\xa0\xd5\x63\xd4\x53\xad\x1d\x5d\x9a\xcc\x0a\x08\x75\xd5\x9e\xef\xd6\xdb\x6e\x8c\x60\x81\xfd\xcd\x45\x81\xd9\x6d\x41\xe7\x34\x6e\x09\x8c\x5e\x29\xf2\x99\x0f\x85\xbc\xf5\x98\xe7\x5a\x25\xa5\x25\x6e\xa7\x9b\x5e\x08\x7e\x1e\x70\xe8\xf1\xba\x31\x9e\xc4\x96\x37\xb6\xea\x75\x14\xf5\x27\x67\xa4\x78\xe3\x0d\x25\xe0\x18\x2e\x15\x6a\xec\x25\x9d\x1c\xce\xe2\xb0\xf7\x41\x51\x98\xcc\x09\x57\xc2\x46\xb9\x99\xf5\x1f\xec\xc8\xed\xe3\xff\x20\x1b\x7e\xb8\x2e\xc5\x99\xc7\x5c\x91\x35\x36\xb5\x80\xe6\x90\xf6\x78\x0e\x8f\x7f\xe4\xd9\xcf\x4a\xea\xe7\x3f\x0d\xe9\x87\x18\x48\x77\x90\x9b\xbf\xad\xd8\x28\xba\x61\xeb\x3b\x53\x2d\x8b\x83\xd1\xcf\x3f\xf8\x7b\xd1\x22\x9e\x1a\x19\x18\x99\xd5\x9d\x00\x6d\xc2\xfa\xdb\x49\x47\xdd\x74\x64\xc1\xbf\x12\xe8\xe4\x46\x4f\xd1\xd5\x3d\x6a\x10\x76\x17\xc6\xcc\xbe\x06\xf6\x8c\x00\x16\xb3\x32\xdf\x5c\xce\x67\x49\x4c\xdc\xa1\xfe\xea\xf4\xd7\xf9\xf9\x18\xc7\x9c\xd5\xab\xf2\x20\xb9\x0e\x0c\xc8\xc9\x11\x70\xdd\x3e\x70\x3e\xec\xb3\x78\xa8\xe0\x0d\x3e\x1b\x60\x37\xb8\x0a\x49\x19\x69\xfa\xea\x68\x58\x76\x9a\xe1\x9f\x28\x1b\x4c\x5e\xe2\x0f\x6b\x73\xcb\xc8\x51\x45\x50\xfd\xb9\x22\xa1\x7d\xb0\xf1\xe7\xf1\xfa\x57\x63\x1b\xea\xcf\x03\x43\x88\x9a\x11\xdf\x4a\x1d\xc2\xb0\x7b\xf1\x14\xc7\xd9\xe3\x75\xd9\xf5\xed\xd2\xa3\x8a\x4d\x5b\xc7\x42\x0e\x65\xb4\x2f\x72\x82\xdd\xc2\x7e\x1f\x25\xd4\x28\x71\x9f\x6c\xe2\xf5\xe7\x72\x2f\xbd\xdd\xb0\xb0\x05\x9a\x3a\xd9\xef\x3d\x89\xac\x26\x0d\x95\x24\x3e\x3c\xce\x7c\x91\xdb\x26\xb1\xea\x3e\x7a\x2c\xe6\xa6\xf7\x36\x9f\x66\xc5\xe4\x03\x54\x31\xa6\x0f\x3d\x09\x0a\x59\x4e\x0a\x95\x7c\x80\xa7\x36\x00\x56\x6c\xf0\x0a\xce\x5e\x4b\x66\x32\x34\xc5\x5c\x26\xbc\x66\x9c\xe4\xf7\x44\x27\x16\xa0\x33\xa0\x06\xfc\x07\x49\xbe\xe9\xa8\x2a\x19\x2c\x96\xec\x36\x9c\x1f\xb7\xab\x5c\x1d\x93\xe7\x11\x36\xd9\x8d\xfa\xd7\x28\x96\xfd\xe6\xfd\x28\x71\x3c\x2a\xd3\xcd\x6c\xed\xc4\x72\x84\x80\xd3\x97\x01\x81\x5c\x8a\xf9\x00\xea\x6e\x94\xc3\x2f\x8d\x26\xb3\x65\xc7\x73\xfe\xb9\x96\xea\x98\x77\xdd\xdb\xfb\xbf\x9b\x9f\x05\xce\xb0\x28\x3f\x3d\xd1\xaf\x64\xb2\xf9\xda\xf7\x72\x81\x3f\x9d\xa9\xb5\x73\x69\x6c\x9e\x27\x4a\xdc\xc9\x9c\x86\xcf\x51\x87\x54\xf2\xd1\xaa\xbb\x55\x88\x8b\xe2\xdf\xa0\xe9\x67\x05\x30\x75\x2e\x64\x52\xae\xf0\x33\xba\xc3\xdf\x90\x31\x3d\xe5\x6b\x7e\xcc\x93\x45\x12\xe8\x9d\xa3\xfd\xb9\xe0\x0a\x0d\x02\x09\xb4\x70\xc5\xbc\xb3\x46\xde\xce\x59\x34\xe1\x02\xec\xb6\x10\x21\x09\x9e\x50\x51\xff\x2e\xf3\x67\x70\xf8\x74\x3b\x61\x17\xc4\x7e\xe2\xe1\xd5\xe7\x00\x89\x77\xdf\xe0\x99\xf8\x13\xfe\x8d\x1b\x50\xfe\x78\x04\x30\x4c\x5a\x15\x3b\xed\x98\x9d\x23\x06\x78\x2a\x36\xa8\x7a\x5f\xa5\x07\xe9\xc1\x75\x5f\x97\xb1\x9c\x42\x37\x01\x43\x7b\xd9\x2d\x82\xc4\xb6\x5f\x56\xfb\x53\x16\x3f\x51\x73\x8f\xf0\xe4\x9e\x0f\xf9\x4c\x4a\x6b\xa0\x98\x64\xe2\x06\xa0\xdd\xd1\xea\xdb\x33\x10\xdd\xb1\x9f\x14\x0c\xb9\x90\x1f\x24\x92\xe1\xa4\x7c\xf2\xc8\x20\xf3\xdf\x9d\x9c\xcc\x21\xfd\x8b\xb8\x87\x5d\xac\x77\x2b\x7d\xda\x1b\x25\xfa\x91\x8d\x27\x8d\x9e\xac\xf7\x6f\xe4\x29\x99\xe4\xf4\x1a\x12\x89\x4b\x7b\x76\xfc\x34\x5a\x26\x21\xdb\x07\xed\x7f\xd9\x0f\x53\x73\x0a\xf9\xbc\xbf\x04\x82\xfa\xd5\xc0\x92\x84\x96\x10\x84\x04\x70\xee\xa9\x74\xdb\x16\xcc\x99\x77\x2f\xd1\x90\xab\xa7\x5e\x33\x81\x5f\xd6\xa4\x9a\x2b\x4b\x86\x67\x4d\x76\xf4\xaf\x49\xa1\x0e\xee\x94\xaa\x61\x9a\xc3\x09\xe5\x39\x53\xc3\x19\xea\xb1\x71\x0e\xe5\xe2\x03\x1a\xcf\xbd\x7b\x7a\xb5\x9a\x91\x1d\xc9\xb4\x62\x73\xf9\x68\xe3\x56\xf6\x6f\xce\xd4\x3b\x2e\xba\xa0\x3e\x90\x68\x63\x9a\x5b\x2b\x04\x50\x47\x12\x6d\x18\xcb\x35\x55\xf3\x1b\xdc\x27\x48\x7e\x3d\x86\xbd\x0d\x36\x44\x06\x54\xe8\x67\x2f\x62\x8b\xe9\x43\x01\xd6\x91\x26\xb6\x79\x13\x36\xc9\xe4\x17\xce\x5f\xd4\x7c\x8a\x59\xb6\xae\x4a\xb7\xa1\xf0\xfe\x6f\x5e\x01\x02\xe3\xaf\xe2\x74\x22\x81\x94\x67\x8d\x0f\x52\x32\x98\x8d\xa9\x1a\x47\xaa\xd1\xfe\x96\x02\xca\xdb\xa3\x99\xe6\xf1\x9c\x47\xf0\x1d\xdd\x19\x86\xcd\x38\xe7\x2e\xcc\x70\x7a\x68\xe8\xfc\x3b\x45\xce\x19\x28\xa7\x53\x2c\x3a\x0d\x4b\xc8\xe8\x5b\xc8\xf2\x7b\xbf\xf1\x53\x7a\x9b\x97\xdd\x4e\xa9\x07\x36\x49\xd2\x38\x71\x7e\xeb\x44\x6f\xa5\x66\x3c\xfb\x2f\xa0\xcb\xbf\x0c\x3f\xd8\x3c\x05\xa7\x95\x50\xdf\xea\xfe\xb5\xba\x23\x3a\xa6\x56\xdc\x29\xfa\xe9\x54\x11\x08\x8d\x6e\xda\x35\x91\xba\x0a\x29\x88\x58\x6b\x34\x31\x73\x46\x61\x01\x59\x8c\x2d\xcd\xa5\xe3\x3a\xb0\xc1\xda\x8c\xf9\x01\xc1\x67\x3f\x7f\x72\xd9\xcf\x60\x80\x3f\xcd\xdb\x52\xf0\xf6\x66\xe1\x5d\x32\x4c\x0c\x04\x2a\x88\x7b\x9c\xa9\x49\xdc\x33\xa2\xb4\x55\x9a\xf4\x95\x89\x1b\x03\xd3\x18\x9c\xe5\xfc\x42\x4c\x25\x73\x73\xdf\xf2\xb6\x2d\xe6\x50\x46\xb3\x84\x26\x0d\x89\xc0\x3b\x2d\xe0\x22\x8f\x13\x3c\x8f\x32\xcd\x10\x92\x22\xfd\xe2\x38\xe4\x1b\xc2\x56\x8c\xe7\xba\xf9\x8a\xbb\xce\xb6\x66\x72\x49\xd5\xe1\x3f\x90\xc3\xdc\x6f\xec\x78\x9c\x4c\xcf\x6a\xb9\x0c\x0b\x86\x22\x06\xd3\x83\xaf\x12\x89\x27\xe2\x42\x9f\xcb\x7f\x1b\xe8\x3a\xc6\xa8\xfa\xfc\x75\x2e\xe1\x25\x5e\xc0\x3b\x94\x91\xa7\x3b\x1b\x36\x11\x81\x82\x87\x37\xfa\xd1\xd1\x61\x81\x25\x25\x30\xb3\xec\x6a\x14\x81\xef\x63\x35\x1e\xaa\x51\x23\xcc\x0f\x02\x6c\x95\x4d\xa5\x6e\xa4\x77\x7f\xf7\xb4\xdd\x51\xb5\x54\xa4\xd8\xd8\xce\x6b\x0c\x2c\x24\x14\x0c\x03\x93\xcc\x03\x58\xa9\x1c\x3f\xbc\xeb\xe4\x6b\xa4\xd0\x75\x66\x13\x24\x5e\xf2\xef\xf5\x2c\x44\x81\xcd\x14\x76\x29\xf2\x52\xfd\x9c\xb2\xbe\x3c\x88\xdd\x34\x75\xfd\x92\x6c\xf7\x1c\x7b\x30\x96\xe1\x1b\x28\x19\x37\xe0\x65\x6e\x8c\x27\x6e\xd6\xc6\xa3\xcf\x60\x14\x4f\xd0\x7e\x7b\xd4\x76\xcc\x48\xb1\x0a\xf4\xe6\xd8\xcb\x70\x7b\x83\x0f\x7e\x9f\xc9\x51\xce\x32\xa6\xee\x6b\xd5\xb1\x90\x43\x18\xed\x8b\x7c\x88\x73\x69\x52\x6f\xc6\xb0\xe2\xd4\x08\x2f\xf6\xe1\xce\x16\x85\xde\x79\x16\x0a\x7d\xc5\xc2\x7a\x10\xcb\x60\x86\x3a\xa9\x0d\xaf\xe5\xcf\x39\x09\xf6\xa8\xcd\x88\xc3\xf0\xc1\x8c\x08\x3b\xaf\x8a\x84\x5a\xe3\xc5\xd7\x94\x2c\xec\xac\xcc\x79\xb5\x45\x02\x7c\xe0\x24\x51\x66\x3e\x15\xce\x51\x3f\xd0\x53\x43\x1a\x47\xaa\x65\x6d\xcb\xec\xcd\xa5\xb5\x42\xa6\xb0\x51\x12\xe7\xf3\xac\xe6\x98\xce\x25\xaf\x10\xb4\x3c\xd1\xf3\xb4\xf8\x05\x5d\x51\xf8\xa5\x56\xa3\x6f\x17\x0c\x28\xa1\x67\x70\x70\xdc\xcd\xb8\x25\x54\x92\xe2\x16\xaa\x1f\xf4\x8c\x87\x72\xd7\x05\xb2\xfc\xfa\x0f\x8b\x21\x95\x85\x28\x88\x11\x1b\x6f\x87\x8d\x9b\x70\x01\x32\x41\xac\x13\xc0\xe6\x14\x74\x15\xaf\x39\x0f\xae\x15\x43\xd3\x7a\x82\x62\xe2\xc4\x06\xca\x05\x08\x58\xae\xcd\x23\xb3\x73\x5f\x14\x87\x89\x2f\xe1\xcf\xd7\x6e\xbf\x88\xa3\x44\x72\x4f\x45\xb1\x50\xfa\x2c\xb7\x1a\x71\x51\x7c\x1d\x12\xf0\xc6\x7b\xfb\x54\xe5\x86\x3e\x9e\xe8\x4d\x3d\x5c\x77\xb3\x36\xac\x40\x14\x98\x5b\x57\x7e\xa3\x29\xce\x54\x73\x61\xe8\x48\x61\x8e\x69\xfe\xfe\xa2\xff\x8f\x1c\x5b\xc8\xcc\x3e\x8a\xf3\x85\xd5\x53\xe7\x7a\x0b\x48\x7d\x66\xcf\x14\xbf\x77\x68\x6b\xd7\xda\x67\xf5\xed\x1e\x53\x94\x99\xfd\xc8\x14\xed\x3b\x2d\x2c\xb7\x35\xb9\xe5\xf2\x79\x88\x82\x83\x3a\x04\xef\x35\xbc\x60\x1d\x97\x17\x2e\xcf\xc4\xac\x35\xba\x60\xac\x4d\x64\x6d\xa8\x4c\x75\xa3\x2b\x0e\xcb\xb8\xd2\x9c\x1f\x6b\x3b\xfc\x10\x19\x2d\x44\xac\xf2\x57\x32\x12\x72\x98\xd7\xa9\xfb\x4f\x35\x82\x16\x21\x7b\x0b\xfb\x5c\xe3\x79\x16\xed\x83\xc1\x9d\xe6\x22\x37\x64\x30\xd3\x5c\xc5\x36\xa5\x5b\xca\xe7\x58\xd6\x90\x91\x1b\x0b\x8e\x34\x09\xb2\x2b\xc9\xf0\x84\xc2\x82\xd3\x9b\x12\x39\xda\xc7\x39\xb2\xfd\x4a\x7c\xe8\xf1\xcc\xcc\x56\x50\x86\xce\x84\x78\x7d\xa8\xc2\x36\x9b\x54\xe8\x5c\x19\xe3\x57\x08\xf4\x80\x9b\x17\x6a\x95\x25\xc9\xa9\xd2\x3b\xa4\xde\x47\x9f\x32\x46\xe5\x31\x7c\x1d\xa6\x99\x9b\xa3\xd0\xd9\x5b\xb4\x29\x44\xd1\xa4\x22\xc8\x8d\x35\x34\x12\x37\x99\x4d\x61\x81\x4b\xf1\x14\x20\x08\xc7\x72\xd0\x2e\x20\x67\x57\x30\x77\x83\x70\xfd\x31\xbb\x67\xda\xca\x19\xeb\x0f\x5f\xad\xec\x12\xe6\x85\x1e\xb0\xa0\x80\xf0\xf4\x71\x13\x39\xcb\x07\xc2\xd3\xdf\x5b\xe2\xa6\x5a\x8c\x1f\xb2\x98\x3a\x97\x36\x3c\x1a\x2c\x70\xe3\xdd\x60\x80\xbf\x8c\x2c\x8d\x5d\xfb\x12\x83\x8f\x4c\xfc\xa3\x71\xb9\xc2\x14\x30\x71\x71\xe4\xd3\x71\x7f\x22\xde\x24\x88\x4c\x92\x78\xa5\x05\x17\x59\x38\x45\x23\xdc\xdb\x0f\x41\xc0\xc0\x60\x2e\x19\xdc\x09\xbf\x51\x9e\x17\xab\xb3\xd2\x33\x05\xa4\xa2\xbf\x3d\x7b\x83\x3f\x1c\xe6\x18\x8a\x82\x6e\x3a\x96\xa8\x08\x36\x29\xc0\xbe\xe1\xde\x10\xbd\x3a\x4a\xf8\xfe\x9e\x34\x1f\x24\x5a\xc0\x1e\xa7\x01\xc1\xa8\x9e\x0c\x99\x85\x17\x5e\x58\xc7\xd3\x49\xc0\x50\x66\x2a\x95\xde\xee\x87\x26\x81\x94\x73\xb8\x3b\x40\x77\x99\xb9\x5c\x14\x9b\xae\x8d\xeb\x33\x9b\xbf\xfb\x76\x55\xa3\x69\x3c\x1f\x72\xfe\xe1\x2a\xb3\xf3\xf0\x13\x03\xca\x8e\xb6\x0f\xc3\x33\x6b\x9c\xfb\x31\xc3\x0f\x13\x0b\x17\x97\x73\xd7\xcb\x30\x3f\x0f\x55\xde\x0d\x77\x11\xaf\xc2\x81\xc1\x99\xa7\xe7\x33\xec\x26\xfd\xd3\x71\xac\xb2\x90\x45\x8a\x3c\x0b\xe0\xfd\x48\x92\x1f\xb3\x29\x17\x8d\xdd\xde\xb5\x2c\x9d\x07\x9c\xf5\x22\x04\x15\x34\x21\x32\xbb\x6b\xa6\x30\x34\xff\x84\x55\x8d\x96\xb2\x8f\x44\x01\xb6\x43\xfd\x5b\xf1\xf9\xcf\x2c\x24\x8d\x12\xdf\x94\xf8\x5b\x66\x46\x2c\x12\x6c\x0e\x03\x9c\x7e\x6b\x68\xda\x5f\xf6\xe1\x98\xba\x81\xa7\x6d\x31\x07\x15\xc4\x16\xda\x59\xb4\xb5\xa0\x4f\x4a\x09\x99\x10\xaf\x0f\x51\xe0\x3d\x47\x59\x20\x10\x97\x5a\x47\x36\xf4\x88\x42\x9f\x88\x6f\x72\x65\xa3\x54\x9d\xaa\xbb\xff\x9d\x12\x86\x9e\xa3\x3f\x82\x9c\x60\x9d\x78\xa4\x69\xbf\x5b\xf9\xa1\x02\xaf\xd5\xbd\x36\x75\xd5\x56\xeb\xe8\xc7\x42\x73\xa8\x1c\x85\x69\x6e\xca\x63\xc7\xdb\xd8\x8e\x2d\xb2\xad\x1c\xbf\x70\x1c\xd0\x8b\x7b\x5b\xc1\x93\x37\x21\xb7\x13\x0d\x15\x4a\x4a\xfb\xc0\x6d\x71\x38\xfa\x3b\xd2\xdc\x37\xbf\x99\xa7\x72\xe6\x27\xf0\x16\x94\xc0\x69\x9b\xca\x2e\x08\x9f\xe0\x50\x94\x69\x9a\x15\xc0\x9c\x82\x42\x54\xf4\x3f\x77\x1e\x15\x59\xbc\x7d\x7a\xc2\xc9\x78\x03\x45\x63\x9e\x00\x92\x42\x65\xc7\x24\x30\x36\x62\x3a\x44\x1b\x87\xac\xfe\xe3\x01\xf1\x2d\x1c\x84\x67\x62\x0f\x8b\x10\x62\x0c\x8b\xac\xfb\x9d\x42\xf0\x3e\xf6\xb2\x8b\xa3\xc4\xb2\xb7\x3c\xcc\xd1\xaa\x7d\xa3\x3e\xcb\x6b\xd2\x0a\x1d\x91\xdf\x87\xb4\xbd\x9f\xa2\x44\x24\xce\x63\x49\xf4\x6a\x96\xe6\xd4\x2a\x12\x86\x3a\xf5\x93\x2c\x80\xf9\xca\x0c\x10\xf9\x9f\xdf\x42\x7c\x08\x61\xca\xd3\x26\xf0\xf3\xe1\x15\xb5\x1c\x18\x8e\x6d\xe0\x1a\x09\x0f\x85\xc9\x29\x31\x6c\x52\x87\x84\xa3\xdd\x8e\x90\x49\xd9\xff\x0c\x98\x06\x39\x4c\xdd\xa9\xc6\x60\x24\x46\x17\xdf\x93\x7a\x71\x90\x47\x81\xa9\x97\xd2\xff\xa0\xa6\x99\x2d\x6f\x27\xd3\x0c\x6e\x60\x36\x35\x13\xdf\xd3\x32\x41\x07\x09\x49\xf2\x7e\x2d\x03\xc4\x02\x45\x76\xa1\xaa\x64\x1e\xea\x59\x58\x22\xb8\xb0\x88\x09\x9b\xdc\x65\xf9\xa4\x6c\x80\x6a\xfe\x00\xfd\x32\x90\xd4\x82\xf7\xaa\x15\x63\xa1\x92\xab\xe8\x9e\x8a\x04\x0d\x33\x1d\x97\x07\x2e\x30\xb7\x64\x9d\x7f\x28\xf2\x21\x08\xb5\xc3\x41\xd1\x7c\x5d\x07\xa1\x9e\xe9\xa2\x79\x10\x4a\xf6\x3c\xdd\x75\x4b\x0b\xd7\x45\x41\x70\x49\xfa\xe7\x4d\xc6\x86\xdf\x84\xb6\xed\xc1\x1c\x63\x54\x2e\x02\x8f\xb5\xc4\x81\xf5\xb4\x2c\x22\xf7\xfe\x71\x6b\x41\xe0\x61\x04\x8b\xe5\x9e\x7b\x84\xa1\x3b\xaf\x7c\x14\xba\xb8\x3e\x53\x08\x32\x9c\x45\x21\x5e\x0a\xb4\x0f\x3a\x27\x87\xda\xa9\x2d\x88\x5b\x63\x13\x94\x59\x27\x93\x1e\xea\x35\xfa\x85\xe1\xb6\xea\xd5\xb4\x35\x40\xfc\x5e\x85\x5f\xc9\x3e\x04\x11\x5a\x39\x74\xa3\x73\xa9\x1f\x92\xd4\x8d\x46\x94\x51\xb8\x90\xe1\xad\xae\x17\xd9\x14\xf8\x91\x6c\x04\x99\x2d\x48\xa0\xee\xc4\x52\x95\x66\x66\x88\x33\xcc\xb9\x46\x44\x77\x91\x78\x8b\x93\xf6\x60\xe4\x2a\x89\xfa\x22\x66\x9e\x1c\x1e\x07\xe1\x80\x91\x7e\xae\xdf\x44\xdd\x1c\x98\xff\x4d\x6d\x3c\xff\x93\x90\x97\x87\x72\x08\x2d\xf0\x71\x7a\x51\xdb\xf1\x35\xef\x1f\xd9\x6a\x28\xf3\xf6\x02\x4c\x0d\x0c\xae\x28\x89\xf6\xda\xb0\x93\x77\xe5\xf5\x49\x5f\x85\x78\xf3\x89\xbd\xf8\xf5\x50\xad\xe6\xef\xec\xa8\xf0\x5c\xb5\x62\x1c\x2a\xf2\xce\xf7\x8b\x97\x54\x0d\xeb\x74\xa3\x4f\x00\x85\xa0\xaa\x2b\xc9\xf2\x67\xb8\xcf\xe9\xe8\x94\x8b\x38\x37\x16\x8e\xa0\x78\x6e\x6a\x29\x87\xec\x7d\x51\x2f\xb8\x2a\xd3\xee\xe5\x51\x13\x38\xf5\x3f\xff\xad\xba\xee\xa7\x37\xa2\xf0\x4a\x5d\x6e\xf2\x95\x9e\xf4\x7d\x4d\x2b\x88\x69\xda\xd3\x3e\x85\x3c\xc9\xc0\x24\x32\xaa\x4e\xde\xd3\xe2\x3c\x50\xbb\x26\xa6\xa7\x8a\x9a\x42\x39\x52\x41\xa2\x3f\x5c\xcb\xa1\xae\x0d\xe1\xc6\xa3\x0b\x3e\x0a\xfc\x12\x98\x6e\x32\x72\xff\xc5\xb3\x76\xad\x64\x09\x6c\x26\x25\xfd\xac\x44\xf9\xa3\x1b\x95\x50\xf9\xef\x85\xf1\xb7\x32\xe7\x87\x28\x45\x75\xd1\xf8\x08\xae\x85\xce\xc9\x3a\x49\x3b\x2d\x78\x75\x69\x7e\xae\xa5\x2b\x95\x74\x72\xd5\x57\xfc\x75\x98\xc3\x23\x20\xc9\x69\x24\x16\x86\x4a\x32\xf9\xbc\x68\x27\xa4\xb6\xcc\xe7\xde\xa0\x22\x5c\x7a\x1f\xec\x6e\x12\x96\x92\x7e\x4b\x0e\x3d\x0e\x2a\x7d\xc7\x4b\x65\x93\x36\x2e\xae\x70\x4b\x80\x3c\xa8\x74\xe4\xee\xa1\xa9\xcd\x07\xf5\xf2\x1a\x2c\xe3\x2e\xe7\x9a\xe9\x2a\x31\x72\x53\x9f\x12\x9b\x36\xc5\xe7\xf5\x16\x28\x18\x29\x7a\xb9\x96\x9f\xa7\xfb\xac\x39\x89\x65\xd0\x26\x02\x1f\xe4\x30\xee\x12\xe7\xa5\x7a\xec\x9a\x19\x3f\x47\xd7\x05\x15\x76\xda\x89\x31\x1d\xa3\xc6\x6f\xf8\x0c\x73\x3f\x94\xb4\x82\xd4\x61\x76\xf2\xc9\x98\x80\x7b\x85\xc5\x55\x5e\xd4\x60\x52\xce\xd1\xa0\xa0\x6d\x7f\x0f\x4c\xd7\x8c\x81\x1a\x33\xa3\xda\x62\x16\xac\xe0\x1c\x8b\xda\xc9\x5e\xb5\x69\xdb\xf8\xd9\xfc\x23\x54\x23\xfa\x83\x6b\x24\x08\xfb\x9e\x7b\xcf\x86\xf2\x0c\x2d\xe0\x6e\xeb\x7d\x60\xc9\xb7\x39\x46\x89\x49\x67\x91\xc1\xc0\xa5\x90\x11\xa7\xc9\xb9\x0d\xf4\x5f\x49\xed\x5c\x68\x89\xb6\x98\x45\x8b\x88\x3f\xba\x65\x88\x32\x8b\x60\x3d\x05\xdf\x48\x69\xc0\x9e\x7d\x6a\xb6\xd2\x59\x24\x2b\x80\x73\xfa\xc2\xd6\x23\x60\x59\xad\x58\x35\xd6\xcf\x99\x2d\x39\x22\x92\x5a\x36\x1a\xe8\x68\x96\x18\xd3\x63\xbc\xc9\x21\xf0\x13\x15\x8f\x7d\xd0\x41\x92\x05\xa5\x0e\x0d\xc1\x87\x34\x10\xa0\x6c\x6d\xd6\x3f\xd7\x32\x71\x64\x2c\x81\x7d\x98\x59\x87\xe1\xbf\x36\x96\x6b\xf7\x0d\xfb\x7d\xdf\x47\x92\x17\x66\x20\x0f\xc5\x87\xd2\xd2\xdf\xe1\xa0\x36\x08\x6d\xda\xe2\xbf\x0d\xba\x0f\x87\x9c\x6f\x1b\x84\x53\x0f\x9b\xfb\x9d\x3f\xf0\xb8\xe1\xe5\x3c\x0b\x48\xcb\xb6\x3c\xd0\xe2\x8c\x0b\x96\x16\x4a\xff\x86\x15\x09\x34\x1f\xdb\x17\x7c\x4b\x49\xfc\x0d\xdd\xb2\x57\x75\x36\x6d\x51\xd5\x7b\x10\xff\xd0\x96\x47\x04\x92\xd5\xa9\xbb\x41\x44\xa9\xf0\xb9\xf1\x6d\x1b\xa8\x9b\x4c\x69\x6a\x41\x25\xb2\xdf\x8b\x31\xd5\x38\xb7\xc7\xfd\xf6\x25\xf6\x17\xd5\xed\x00\xd2\x58\xe6\xfd\x4f\xdc\xc2\x07\x03\x87\x75\xa0\xde\xe7\x51\x37\x8e\xe2\x47\x6a\x4c\x5e\xa4\xe7\x4e\x37\x39\xba\x15\x86\x13\x31\xff\x5b\x57\xed\x8d\x82\x6c\x05\xc8\xc5\xec\x9f\x9a\x15\x3f\xc7\x9d\xb1\xb0\xe8\x80\x9a\x09\x8f\xb7\x46\x77\x1a\x9d\xaa\x2b\x1a\xf4\x00\xe2\x1f\xd5\x46\xd8\x11\x2b\x3b\xdb\x37\x9a\x66\xa0\x51\x92\x03\x03\xc0\xb1\xad\x3e\xcf\xdf\xa1\x4a\xdd\x34\x16\x6d\x73\x74\xfe\x62\x2f\x43\x51\x78\x25\x54\xdb\xc1\xca\x97\xef\xc2\xda\x47\xb8\xab\xe4\x7e\x8a\x66\xef\x11\xa4\x19\xeb\x18\xda\x44\x8b\x1f\xeb\x41\x85\xa4\xfc\xc7\xf7\xb2\xb9\xdd\xbb\xec\xf2\x6d\xd5\xa3\xc5\x0f\xbf\x88\xb3\x4e\x5e\x86\x17\xe4\x74\x7b\xb1\xda\x9d\xb0\xed\xfc\xd0\x0d\x82\x9e\xf2\xc7\xf6\xf3\x7f\x46\xf4\x92\xe3\xef\x15\x3d\x67\x8d\x98\xff\x8c\xf3\x0a\xff\xaf\xe3\x3c\x2b\x2e\xb8\x26\xef\xff\xd1\xc6\x92\xfd\x7f\x2e\x96\x15\xbe\x9d\xbd\xed\xa9\x9a\xcf\x5b\x7f\x08\x38\x14\x28\xa1\xfb\xe6\xf4\x4a\x85\x89\x37\xae\x23\x3e\x75\x99\xdb\xfe\x09\xb5\x07\x54\xb6\x23\xd5\x4c\x82\x4e\x98\x65\x8b\xb8\x62\x69\x44\x1c\xda\x07\xeb\xb7\xb2\xd2\x7c\x31\x4e\x81\x15\x1c\x1d\x61\x27\x0c\x19\xc5\x32\x56\xff\x3a\xf6\xe5\x97\x17\x56\xa7\x7e\x6e\x7f\x2f\x47\x0f\x7c\x03\xe2\x50\xa8\xfb\xd6\xdc\xe8\x76\xba\xf7\x07\x03\x13\x73\x13\x8e\x5f\x51\x6d\x95\xeb\xc5\x6b\xdf\xb7\x3e\xc3\xc1\x16\x3a\x1f\x58\xf7\x2e\x15\x08\xa3\x91\x27\xe0\xfd\xae\x71\x7a\xa4\xdd\xc6\x4a\xac\xe8\x63\x0e\xda\x68\xe3\xa4\x58\x36\x68\xdb\x9f\xef\x90\x5a\x2f\x6a\x3e\x1d\x97\x32\x6d\x87\x54\x84\x35\x71\xd4\x8f\x2f\xa8\x79\x42\xaa\x0e\x1a\xc3\x9c\xda\x2c\xb2\xba\xba\xf0\x7a\x47\x35\x3e\xbc\x66\xc5\x95\xf1\xcc\xae\x38\x5a\x1b\x63\x1e\x0a\xf9\xee\xe1\xed\x76\x6f\xc6\x01\x61\xd2\xb4\xef\xe1\x58\x12\x8a\x8e\xcd\xa9\xa7\x2f\x55\x76\x3f\xca\x75\xa9\x45\xe1\x36\xab\xb5\x01\x39\x46\x4c\xf3\xd6\x98\x30\x84\x7f\x02\x3e\x78\xfa\x9c\xce\x52\x58\x3a\x0f\x91\x5d\x34\x1a\x6e\xa5\x8f\x9d\x67\xf3\xfa\xf8\x30\x28\xa3\x27\xc4\x59\x3d\x09\xb0\xdc\xb5\x67\x87\xbc\xca\x31\xd7\x34\xeb\x27\x8a\xda\xfc\xd7\xd1\x9f\x1f\x3c\x44\x39\xdc\x4d\xb8\x46\xf1\xcc\x45\xc7\xa4\x96\x7b\xb2\xbf\xd7\x17\xbe\x60\x9b\xde\xc2\x15\x39\x32\xa4\xfa\xe7\x2b\x0b\x60\x8f\x64\xc0\x22\x51\x65\x14\x15\x8c\x64\x1d\x14\xfd\xca\xd1\x2f\xdd\xf8\x42\x8c\xff\x2e\x42\x61\xdb\x5d\xde\x11\x6a\xdf\x6b\xc9\x9b\x49\x46\xa8\x48\xa4\x4d\x05\xee\xc4\x45\xfd\x97\x0a\x94\xa7\xe8\x50\x81\x39\xd8\x42\xed\x42\x4c\x71\xc1\x53\x7f\xf2\x96\x6d\x5e\x7e\x4c\xbe\x05\x97\x50\x90\xe5\xd2\x49\x6a\x7d\x5c\xf3\x65\x86\xd4\x39\x6c\x38\x4a\x2e\xd9\xf5\x0f\xd6\x73\x89\xcb\x67\xf0\xd8\x00\xd7\xa5\x4f\x67\xfe\x22\xea\x31\x43\x2b\xff\xd7\xb0\x62\x60\x01\x8e\xdc\x2a\x8f\xf1\x3a\x30\x6b\x42\x2c\x3d\xe8\x08\xea\x03\x3c\xa0\xd8\x02\x9d\x2d\x4b\xc9\xab\xe4\xe2\x03\x5d\x6e\xc8\xb3\x64\x91\x65\x82\x39\x44\xe0\x22\x25\x28\xbe\x32\xcf\x41\x5c\xa7\x69\x9a\x82\x4b\xfb\x97\xdd\x28\xb8\x62\xd2\xec\x97\x93\x8c\x70\xd0\xa6\x67\x15\xf0\xb2\xba\xeb\x0d\x38\xf5\x36\xe0\x41\x33\xfe\xb8\xe6\x36\xe7\xb3\x86\xa7\xdf\xe8\x50\xbe\x1f\xf6\x9d\x18\x69\xc9\xab\xa8\x3d\xa0\x2f\x8f\x0b\x1d\xf4\x67\x15\xf4\x45\x88\xa1\x80\xc9\xf5\x08\xb6\x38\xcc\xde\xab\x33\x91\x19\x9d\x44\x26\x71\x1a\x97\xd2\x8a\x81\x22\x74\xd8\xcb\xb8\xfe\xaf\xbb\xa3\x7f\xa6\x29\x20\x47\x04\x6f\x8c\xf9\x61\x22\xae\x15\x54\x86\x6d\x1f\xfc\xf5\xbd\x54\xb9\x1f\xd2\xc6\xa0\x2c\x63\x75\x63\x18\xf3\x5f\x4e\xfe\xc3\x5f\xa2\x5c\x4f\xf3\x2f\x8a\x1b\xed\x67\xe7\x2f\x98\xd7\x01\x8f\x3b\x48\xda\x53\x8f\x12\xa7\x75\x7a\xa4\x83\x54\x88\xfe\x8f\x81\x53\xa9\x46\x99\x77\x40\x82\x63\x6b\x03\xa1\xce\x4e\x87\x46\xf3\xf7\x89\xb7\xed\x2d\x08\xc3\x1c\x67\xca\x9e\x9b\xf3\x5b\x03\x1d\x94\xf6\x1b\x0d\xb7\x88\x5c\x73\x67\xc1\x2f\xff\x4b\x17\xdc\xce\xcc\x3a\x5a\xca\x89\x25\x46\xbe\xad\x71\x1f\x3c\xfa\x73\x8e\x15\xf4\x14\x01\x61\x33\x5d\xbb\xab\x74\x05\xa1\x3c\x41\xb3\xe7\x24\x62\x25\xc4\xdb\xc6\x8f\x71\xc7\xc8\x76\x43\xfc\x80\xdf\x11\x1f\xeb\x3a\x72\x68\xf6\xbb\xbd\x98\x6c\x91\x87\x69\x54\xa2\x47\x0c\x1d\x5a\x18\x47\xff\x17\x7d\xd0\x93\xcb\x79\xbd\xa6\x1c\x23\x61\xa0\xef\x2e\x68\x78\xdb\x02\x3f\xd2\xc9\x7b\x79\x34\xe3\x15\x5a\xf7\xef\xa8\x8f\x85\x45\x57\x1b\x71\xbf\x83\x67\xee\x3b\x5b\x6f\x40\x90\xcb\xbe\x5e\x25\x82\x2a\x31\x84\xf7\x90\x59\xcd\xe5\x4f\xc9\x16\x7a\x73\x53\xcd\xa7\x21\xaa\x7b\xc0\xc3\xd2\x86\xb8\xe1\xed\xae\x6b\x9f\xf4\x78\xe0\x93\x7a\x27\xbf\x35\x50\xde\x1f\xfe\x9e\xe6\xe2\x35\x38\x9f\x84\x4b\x3d\x1b\xc6\x94\x3b\xae\x31\x8a\x81\xc2\x4c\x24\x94\x7a\xe3\xdc\x36\x8c\x58\xc6\xa2\x28\x21\x5c\x3a\x80\x8c\x10\xef\x3f\xf0\x81\x93\xb3\xbf\x01\x59\x60\xe2\xaa\xe1\xc7\x7c\x47\x72\x41\x08\x83\x5f\xf8\x0d\x83\x33\x16\x01\xaa\x6e\xcf\xb9\x27\xd6\xaf\x01\x0c\xf0\x3c\xe0\xe2\x5b\xf8\x7e\x17\xde\x2f\xa5\x12\xa6\xc6\xd5\xdf\xc9\xcb\x80\x24\xb7\x62\xad\xfa\x5f\x13\x13\xdf\x5b\xa8\x03\x12\x08\xa1\x93\xe0\x8c\x37\xe8\x2e\xf9\x3b\x93\xfe\x90\x24\xdf\x43\x9d\x17\x14\x4a\xd3\xb6\x33\xbb\xd3\x63\x36\xaa\xe4\xc5\xe6\xbc\xde\xa3\x30\x75\xea\x53\x81\x95\xd4\xe3\x30\xfb\x5f\xe7\x31\xe8\xcf\xb9\xf6\xc8\x87\x71\x93\x5c\xa1\x8d\xd0\x94\x75\xf0\x40\xd8\x48\xe1\x32\xe4\xfe\x8c\xd5\xa4\x3e\xc0\xc2\xe5\x57\x26\x9e\x8b\xa7\x60\x8c\xfa\xd7\x70\x52\xec\x5f\x80\x01\xe4\x0e\x5f\x7b\x84\xc6\xa2\x60\xf4\xea\xb5\xd3\xc3\xb6\x80\x01\xdb\x0e\x95\xe7\x6b\xe6\xa8\xdc\xa5\x85\xf1\x6a\xd7\xd9\x35\x99\xbc\xf3\x7f\xbf\x30\x20\xc3\x82\x0d\x7e\xe3\x15\x47\x16\x24\x17\x84\xc5\x99\x07\x3b\x27\xb5\xac\xb2\xfe\x39\xe0\xe3\xbf\xd3\x64\x12\x60\xc5\x10\x7a\x24\x75\x0f\x0e\x4f\xfd\x93\x2e\xdb\xed\x96\xcf\x3a\x0c\x29\x3b\x7d\xc4\xe2\xd2\x5c\x01\xf8\x93\x94\x47\xae\x29\xe9\xfd\x93\x07\xc8\xcb\x42\xfa\xbb\x16\xc1\xd3\xed\xf3\xa6\x96\xcb\x36\x0f\xe2\x3f\x47\x93\x4e\x52\x19\x0d\x79\xd0\xad\x09\xfb\xc0\x73\x05\x15\x7b\x3b\x7b\xa4\xce\x8b\x9a\xf1\xdd\x3f\x71\x8e\xbd\x9b\xac\x3e\xc0\x5b\x2d\xaf\xcd\xca\xbf\xbf\x87\x16\x9e\x3e\xf1\x50\xa3\x37\x0d\xfa\x79\x0a\x3a\x6e\xd8\x1b\xeb\x23\x19\x2f\x78\x6a\x0e\xb9\x6d\x75\x34\x7e\xd3\x5f\x73\xcd\xfd\x32\x7a\xba\xea\x55\x54\xc2\x09\x38\x4e\x9a\xd1\xb8\x1a\x58\x4a\x0a\x20\xfd\xaf\xfd\x84\x61\x9d\x1f\x4d\x93\xc9\xc1\xf6\x23\x90\xb1\x45\xfd\x97\x87\xa8\x64\x19\x9e\x80\x23\x15\xa2\xad\x57\x0d\x9b\xba\x7d\x5e\xf0\xc9\x27\x7d\xcb\x86\x37\x33\xeb\xbd\x43\xfa\x03\x70\x78\x97\x09\x1f\xf1\xb0\xa0\xf3\xe8\xfa\x2f\x1e\xaa\x35\xd4\xda\x92\x0a\x06\xa2\x9f\x05\xc2\xc3\xa2\xa1\x1e\x72\xd6\x64\x92\xbe\xe0\xb2\x1f\x92\x94\x50\x7c\x9d\xf1\x40\x57\x06\x4c\x78\xad\x9d\xc3\x56\xe3\x43\xd0\x4f\x29\x91\xf3\x81\xcf\xa3\xfa\xe5\xb9\x6a\x9c\xe6\x86\x51\x0a\x6e\x5e\xe6\x1c\x10\xd6\xcc\x37\x37\x3f\x04\x4c\x91\xad\x6d\xbc\xd7\x55\x0e\x89\x0b\x06\x11\x85\xb4\x72\xc0\x85\x50\xbb\xdd\xea\xdf\x44\x7b\x84\x4c\xce\xe8\xb3\x10\x2f\xf5\x19\x99\x25\x47\xf7\x40\xde\xab\xaf\x7b\xb5\xe4\xd9\xf6\xf5\x2b\x7c\x48\xc1\x67\x3f\x21\xbf\x83\xb4\xd0\xd4\x0e\x0b\x3f\xf4\xc9\xf7\xe8\x8f\x20\x9e\x98\x60\xc0\xef\x18\x6d\x5f\x15\xac\xe9\x1c\x2b\x24\x08\x99\xec\x72\xe7\xf7\x07\x04\xb2\x72\xd5\xc6\x90\x8b\x34\x1a\xa9\xbd\xe6\xd5\xab\xf4\x94\xc5\x67\xbf\xb6\x84\x31\xd1\x9b\xfb\x4b\xbb\x41\x75\x6f\x51\x06\x2c\xc0\xa4\xbf\x9e\x52\xbd\x39\x08\x83\xcb\xfe\xda\xca\x68\xb1\x12\xf9\x0d\xc3\xb3\x78\x4c\xcf\x61\x6f\x34\x26\x30\xd7\xb2\x9f\x35\xa4\xec\x68\xf8\x29\xe0\xd3\x37\x2e\xb2\x6f\x35\x16\x5c\xa3\x12\x65\x52\x32\x73\xd0\x13\x04\x51\x52\xba\x48\x5e\x50\x84\xe5\xed\xbb\x41\x9f\x10\x00\xc3\x15\x4a\x59\xb3\x0e\x12\x5a\x16\x14\x6d\x51\x5e\x68\x7b\xb1\x5e\x15\x82\xa0\x09\x60\x74\x8b\x05\xa8\xb9\x79\x5a\xe5\xe4\x3f\xd5\x25\x14\xda\x08\x15\x28\x15\x81\xdb\x90\x72\x71\xb0\xd3\x07\x4e\x50\x82\xfb\x4a\xe2\xbe\xb9\x67\x72\xee\x91\xc4\x40\x80\x88\xa0\xd0\x74\xda\x85\x0c\x61\x68\x7f\x2f\xfa\x82\x75\x3c\xfa\xa3\x35\x0a\x51\xfc\xeb\x15\x4e\x56\x79\x84\x88\xe5\xf6\xb7\x32\x0a\xca\xb2\x04\x07\xe3\xd9\xfa\x80\xd7\x0d\xc4\xe3\x98\x2b\xd4\x86\x49\xe5\x4f\x8b\xf7\xb3\x6f\xcb\xb6\x45\x65\xd9\x26\x37\x47\xbe\x9c\xc9\xe0\xbd\xff\x4b\x74\x3c\xa1\x49\x7b\x92\x5b\xec\xbb\xe1\x7d\x5f\xb8\x72\xc8\x4f\xef\x75\xeb\xb8\x33\x80\x06\x9e\x2f\x6b\x9d\xbc\x15\x6c\xe2\xc9\xc4\x82\x9d\x84\xe7\x64\x5c\x8e\xaa\xe5\x5a\x46\x3f\x3e\x65\x21\x4c\xda\x96\xdd\xca\xa2\xef\x8a\x4a\x98\x3a\xeb\x54\x87\x8f\x30\xbb\xcd\xda\x9a\x56\x29\x5b\x0d\x7d\x9b\x15\x51\x67\x15\x85\xff\x7f\xe8\x7a\x0b\xb7\xb6\xb6\x7e\xfb\x37\x78\xf1\x16\x8a\x53\x9c\xe0\x25\xc5\xdd\x4a\xd1\x20\xc5\x9d\xe0\x10\xdc\xdd\xbd\x94\xe2\x10\xbc\xb8\xbb\x15\xf7\xe2\x14\x77\x2b\x5a\xdc\xdd\xee\xb3\xdb\xbd\xf7\x79\x7f\xe7\xbd\xe7\x1f\xc8\xb3\x56\xb2\x32\xe7\x1a\x73\x8c\xf1\xf9\x72\x94\xe0\xf1\x23\x3b\xa0\x1c\xab\xfa\x06\x0c\x33\x84\x0f\x45\x39\xd4\xfb\xae\xbd\x94\x6a\xbe\x38\x42\x35\x23\xdc\x24\x0a\x6d\xbd\xe9\x12\xbd\x74\x35\xd2\x7f\x07\x9e\x43\x60\xf0\x02\x15\x7a\x28\xfb\x15\x91\x3b\x45\x52\x2c\x8f\x6b\x4a\x46\x27\xa2\x5a\xf4\x07\x86\x97\x6b\x4e\x7e\x0e\x13\xe7\x12\x8c\x7b\x74\xf4\x7d\xb0\x37\x23\xb1\xfe\xfe\xc3\x95\x34\x23\x69\xab\x51\x35\x83\x2e\x4c\x00\x1b\x57\x7f\xf5\xaf\xcd\xf9\x48\x39\x50\xa5\x54\x18\x63\xf5\x05\x5d\x69\xb1\x6c\x55\x66\xec\x2c\x24\x9b\x2d\xeb\xb8\xc6\xeb\x9e\xf9\xf0\xa6\x5b\xad\xe0\xf4\x44\xbb\xce\xcf\xc0\x6c\x78\x82\x82\xa3\xf7\x01\x20\x33\xaa\xb0\xc7\x3e\xe4\x12\x8d\x26\xb1\x61\x3b\x5d\x30\x8d\x0d\x8d\x2d\xa0\x47\xcf\x41\x0e\xf6\x32\x64\xa9\x08\x76\xd0\x71\x94\x5d\x47\x16\xac\xc4\x00\x17\xdf\x34\x27\x55\xe9\x8e\x0b\xd7\xc4\xf7\xa2\xdc\x98\x55\x5f\x23\xaf\x45\x7d\x54\x30\xe8\xf4\xd7\x4d\x78\x8f\x56\xe7\xdb\xc4\xce\xed\x52\xf8\xad\x32\x37\x28\xc5\xc6\x14\x54\x24\x7b\xf4\x63\x5e\xfb\xb6\x56\x93\x38\x89\x57\x6d\x51\x94\x6d\x59\xc5\x99\x63\xb5\x3e\xce\x8c\x4c\x07\xf2\xcf\x69\x03\x1f\xd8\x89\x7c\x09\xf1\xc4\x97\x96\xe6\xab\x8c\xf0\x2f\x7c\xd1\xa6\x9c\x9a\x28\x0a\x41\x5c\x6c\x60\x91\xd3\xb2\x55\x4d\x4d\xe0\x00\x03\x96\x8d\xd3\x47\xe7\xfe\x3b\x65\xd0\x7c\xf2\x4b\x2f\x57\xf9\x77\x70\xf7\x1c\x8d\x8c\x72\x5f\xe5\x24\x80\x05\x7e\x35\xd2\x1f\x97\xbe\x17\xe4\x07\x49\xbb\x3b\x44\xdc\xd0\x91\x1e\xfa\xc0\x5e\xa3\xb5\x91\x51\xaa\xf8\x15\xb9\x7c\xcd\x35\xfd\xe9\x13\x4e\x4b\x30\xfc\xba\x98\x95\x0d\x50\xcb\xa0\xf4\x0b\x85\x73\x9a\x9d\x77\x5f\x75\x11\x94\x0d\xa6\x96\xcf\xe2\x2e\x12\x95\x58\x86\x38\xe8\x96\xf9\x15\x4b\x9b\xaf\x9e\x9a\x49\x83\x3e\x52\x27\xd0\x1c\x63\x2a\x6e\xac\x30\xf6\x09\x4f\x63\xc4\xf5\x00\x91\xe2\xe6\x9a\x21\x5b\xc6\x2f\xe2\xd8\xab\xbf\xc7\xa7\xf7\x26\x16\xbf\xb6\x70\xe6\xf4\x9e\x9a\x0c\x8d\x5f\xfe\x39\x6b\x90\x8d\xe9\x48\x9f\xfc\xf6\x9d\xc8\x16\xc5\x85\x05\x68\xdc\x5b\x11\x9d\x5e\x75\x0f\x65\x99\x4a\xca\xd5\xe9\xea\x55\xae\xd9\x3a\xa6\xa3\x92\xf9\x30\x47\x41\x10\x02\x2e\x7f\x49\x9e\x81\x7d\x6a\xe1\xba\x53\x91\x34\x35\x06\xae\xa9\x2c\xd2\xfd\xe2\x21\x6f\x9c\x74\x8a\xca\x0d\xb7\xdd\x47\xda\xa5\x04\xa7\x0a\x5b\xa7\x40\xe9\x84\x3b\x60\xf0\x4c\xa0\xf1\xe7\xb3\xbc\x7e\x04\x56\x6d\xfe\x1c\x9b\xdf\x71\x89\x2c\xa6\xd3\xb2\x2c\x09\xde\x01\x83\x34\x8d\x21\xd0\xee\xcb\xdc\x4c\x7f\xed\x3b\x51\x39\xab\xe4\xbb\x36\xd4\x07\xf1\x86\xea\x8f\x42\xa5\x29\x34\xbd\x3c\x3e\xae\x81\x03\x65\x9b\xb6\x8b\x17\x25\x92\x91\xd3\xf4\xf6\x24\x9d\x1e\x45\xd7\x54\xc8\x9c\x36\xd2\xc7\x5a\x21\xe1\xba\xc5\x42\x83\x16\x6a\xd5\x39\x35\x66\x92\xdf\xbb\xd3\xce\x72\x11\x1c\x20\xdd\xf7\x17\x89\x20\xf0\xde\xb4\xe4\x04\xcf\x4e\xb7\x7b\x12\xaa\x86\xcc\x76\x22\x21\x80\x75\xe2\xf7\x7d\xa0\xfc\x29\x7c\x4c\x62\x37\x7d\x88\x3e\xfa\x75\x40\x2d\x35\xd4\x93\xea\x27\x97\x05\xb3\xc0\x8a\x99\xac\x30\x3e\x45\xb4\x4a\xb8\x43\x79\xa5\x3d\x5a\x8b\x05\x9f\x20\x1d\xb9\x1a\x54\xb1\x8a\x07\x6c\xd1\xe1\xa9\x0a\x4c\xb2\xe2\x15\xd3\x00\x8e\x25\xbc\x05\x4b\x9d\x5f\xb7\x73\xbb\xa5\x57\x90\x15\xbe\xda\x52\xf4\x7d\x31\xf1\xe2\x61\xa6\x63\xc0\xa2\xda\x06\x19\xae\x43\xfd\x5d\x8b\x31\xac\x06\xd7\x37\x62\xbc\x82\x78\x14\x9f\xf2\x91\x22\x78\x6e\x6d\x6d\x4d\x03\x0a\x1b\xdb\xeb\xb8\x4b\xd6\x71\x01\xc7\xfe\x74\xe2\x25\x70\x05\x61\xc2\xa2\xf3\x8f\x94\x10\xac\xd7\x3e\x1a\xba\x66\xfb\xf1\x7c\x90\xb9\xd2\x23\xec\x9d\x62\xe7\x95\x7a\xd6\xce\xef\x0c\x2a\x0e\x9b\x0d\x62\x65\xe6\x1e\xe6\x4d\x5a\x82\x0b\x67\xc6\x3f\xa8\x9a\xd9\x5a\xbf\x2a\x7a\xaa\xff\x74\x36\x69\xd0\xc5\x5e\x59\x2b\x72\xd0\xc4\x71\xa0\xc8\x8a\xb1\x11\xe3\x88\x12\x0b\x9a\x2f\xf6\xca\xbb\xfd\x4a\xf4\x8d\x5a\xe3\x4e\x74\xf6\xc5\xc4\x9b\x74\x6f\xc4\xdd\x64\x37\xd9\x6e\x06\x7e\x60\x7a\x77\x77\xf7\xe3\x36\x95\x4a\x08\xee\x7c\x47\x6e\xe8\xc9\xab\xa1\xad\x64\x7f\xa0\x58\x43\xca\xfc\xfa\x9c\x7d\x68\xf8\x2f\x78\x97\x5b\x7b\x91\x88\xd7\x19\x97\xc5\xce\xa5\xe2\x5a\x20\xca\x0f\xee\x5f\xea\x56\x88\xbc\xb7\x4a\xa4\x63\x0b\x7c\xac\xf4\x14\x22\x92\x19\xf0\x04\x8d\x00\x22\x7a\x39\x65\xa6\xd7\xb9\xee\xd4\x1d\x6f\x17\x89\xbc\x94\xb1\xdf\xf3\x7f\x2b\x3c\x9e\xde\xa0\x24\x06\x4b\xad\x76\x5c\x53\xda\x7d\x60\x4f\x53\xe9\x7b\x9f\xb1\x7d\x01\x11\xdb\x43\x64\x9c\xa6\xb7\x47\x04\xef\x1a\x1f\x07\x33\x04\xa2\xed\xa0\xd6\xbd\x74\x82\xf1\x28\x57\x61\x04\x9a\x40\x26\xba\x24\x8a\x84\x1f\xf8\x8b\x99\x25\xad\xd9\xb0\xbd\xa8\x75\x18\x3c\x83\x90\xe3\xc6\x4c\xa3\x85\xf6\x5d\xbd\xcd\x23\x47\x78\x68\x0b\x4b\x08\x09\x66\x50\x30\xe2\x5a\x4f\x7e\xe4\xad\x23\x79\xab\x35\x77\xf4\xd0\xfe\xb5\x31\x90\x8b\x6a\x01\x39\xa6\x74\x89\xe5\xc9\xc0\x3e\xdc\x09\xc3\xdf\x83\x99\xc9\xb4\x22\x5a\x91\x38\x94\x49\x79\x22\x81\x1f\x0f\xc4\xb5\x98\xd5\x9f\xbc\xcc\x4f\x04\xf2\x98\xc0\xa5\x01\x80\x6f\xa3\x75\x8a\xb0\x8b\x1c\xbf\xcf\xc5\x76\x90\xec\x48\xfc\x5a\xc5\xca\xcc\x33\xb7\x90\xf7\xfe\x39\x8a\x72\xb0\x43\x84\x6f\x49\x31\x4a\x2e\xb3\x5c\x03\xc1\x70\x1d\x84\x71\x72\x25\x69\x56\x39\xfa\x2a\x38\xd9\x79\x2e\x8a\xa6\x81\xb1\x5b\x10\x1c\xb8\x97\x27\x0a\x16\xef\x86\x8d\xe9\xe3\xbe\x03\xbb\x37\x97\xe3\x41\xee\xf8\x71\xef\x6c\x40\xe0\x34\x63\xb0\xdd\xac\x94\x7b\x94\xdb\x2e\x49\xfe\xa4\xa2\x5e\xc8\x6b\x10\xea\x1a\xc0\xac\xec\x00\xd2\x11\x3d\xa5\x13\x58\x8d\x3c\x58\xfb\xe2\xe6\x96\xcd\x73\x4e\xcd\x92\x5f\x85\x01\x93\x18\xcc\x6c\x38\xd0\x94\xdf\xe8\x44\x56\x20\xfc\x3e\xbe\x1c\x30\xe3\x8d\x07\x88\xdf\xa0\x62\x70\x7f\x87\xe7\x77\x2a\xb2\xc0\x99\x32\x86\xdd\x24\x11\x7d\xf4\xab\xf4\xaf\x87\xcf\x33\x4d\x28\x18\xb8\xe0\x43\xbd\xcc\x26\x57\x53\xc3\x9e\x41\x04\x46\x63\xe5\xd4\xb9\xeb\x8a\xfe\x61\xbb\x8c\x3c\x96\x06\xbb\x11\xe1\x62\x09\x08\x3f\x2e\x16\x4c\x19\x6a\xb5\x77\x85\x85\x69\x6d\xb2\x39\xd5\x2a\x56\xba\x22\x1c\x0b\x57\x41\x3b\x40\xe1\x5f\xa2\x84\x85\xe0\x9c\xc4\xbf\xfb\x36\x5c\x28\x12\x76\x1c\x95\x0b\x97\x4a\x92\x3b\xe2\x9d\xfb\xd5\x63\x05\x51\x2b\x11\x0a\x7d\x52\x25\x90\x9b\xb9\xa3\x05\xaf\x91\xe3\x94\x01\xee\xb6\x02\x77\x79\x5d\x0f\xfd\x75\xcc\xda\x34\xf9\xfb\x41\x9a\xed\xa4\xe0\x03\x0a\x78\xfa\x59\xdb\x30\x01\x78\xb1\xad\xae\x29\x2c\x9b\x2a\x17\x63\xaf\x87\xce\xe6\x93\xa8\xa8\x28\xe1\xda\xcb\x7a\xa5\x88\xe2\x27\x5e\xcb\xc2\x17\xdf\xca\x97\x76\xc0\xc8\xf1\xf3\xd2\x2b\x71\xe1\x47\xb9\xdb\xd4\xce\x17\xb7\x19\xa8\xc3\x73\x5e\xb8\xdf\x76\x85\x48\xc8\x9f\x5b\x57\xa3\xac\x23\x84\xbc\xb3\xf9\x6c\x79\x16\x27\xe7\x2b\x3a\xc5\x03\xe6\xcc\x23\x47\xb0\xad\xb1\x7b\xb6\x3e\x3b\x6f\x2d\x3d\x30\xc8\x48\x86\x4a\xbf\x0c\x0d\x99\x7c\x78\xdf\x6e\xba\x97\xa3\xc1\x1f\x9b\xc9\x6b\x9d\x8b\x32\x7c\x84\x29\x34\x06\xa1\x5a\xb6\xe2\xf8\x31\x22\x0c\xd5\xb1\x05\xff\xfa\x9f\xf5\x45\x4c\x78\xfc\xb8\x93\x26\xf8\x00\x4f\xa0\x76\x1b\xfd\x23\xfc\x56\x34\x5b\xc8\x56\xae\x60\x49\x3f\x4e\x4a\x61\xaa\x60\x95\x93\xa9\x30\xb4\xd0\xa7\x76\xa6\x31\xea\xae\x33\x5b\xd9\xc8\x87\xbe\x6e\xf2\x92\xed\xf5\xa6\x2b\x21\xf3\x06\xdc\xb5\xad\xed\xc0\x5a\x2f\x9c\x76\xbf\x3c\xae\xe0\xce\x4f\xc6\xfd\x18\x46\x42\x9e\x5c\x43\xcb\xb8\x95\xda\xaf\x6f\xa6\x20\x4a\x05\x19\xdd\xf9\xb6\x4a\xa9\x4d\x09\x45\xdd\xa5\x2e\x95\x3c\x9f\x13\x3e\x87\xef\x18\x74\x31\xd6\xc7\x2e\x5d\x73\x00\x74\x26\x5a\x66\x37\x05\xfa\x85\xdd\x8f\x7a\x66\x1f\x71\xb9\xa9\xa0\xaa\x6e\xa7\xc4\x83\xbd\xe0\x41\x6a\x86\xf9\x5c\xfb\x05\xde\xcb\x88\xb7\x1f\x41\x1f\xd1\x45\xbd\x0e\x88\x35\x08\x76\x5b\x68\xbb\xd3\x06\x92\x6b\xf5\x45\x45\x7e\xd5\x04\xff\x2a\x2c\xb5\x82\x1d\x1d\x85\x9d\x4f\x8a\xa4\x85\x1f\x21\x78\x62\x6b\x30\x40\xd6\x71\xfe\x59\xef\x63\xe8\xd7\xde\x16\x01\xb1\x81\xa7\x9f\xd5\x1d\x25\x28\x54\xcf\x36\x4a\x8d\xc5\x11\xa4\x8e\x05\x9c\xa0\x05\x80\x84\x50\x38\xc8\x6c\xcb\x98\xc9\x71\x90\xef\x27\x75\x84\x9b\xe4\x40\xf1\xb7\xd7\x44\x3c\x3f\xdf\x66\xbd\x66\x81\xf7\x23\x17\x12\xb7\xe8\xd2\x44\x57\xdc\xcb\xd3\x2f\xe0\x49\x27\x66\x73\x24\x20\x4d\x18\x8c\x4f\xf9\x46\xa4\x80\xdc\xb1\x3c\x15\xe5\x8f\x7f\x6a\x18\xd9\x65\x66\xb3\x49\xe7\x29\x3d\xb3\x43\x91\x20\x2c\xeb\x1c\xad\x55\x98\xc2\x69\xb5\xcd\x39\x9b\x39\xdd\x65\xfd\x5a\xba\xa1\x5a\x82\xaf\x74\x6a\x17\x05\xa2\x33\xd8\xa7\x29\x33\x03\xd0\x78\x62\xb8\x9b\xee\xcc\xf8\x95\xbf\x55\x2d\x67\xa7\xf2\xf3\x66\x62\x65\xa8\xc8\xf5\x4c\x44\xd2\x5d\x41\x5e\x55\x10\xac\xc7\xcb\x2b\xbb\xf2\xf4\x45\x08\x6a\xfe\xf1\x45\xee\x1e\xb2\x85\x24\x7b\xfc\x6b\x23\xed\x81\x4e\x15\x06\x27\xd4\xfe\x56\xe6\x7b\x66\xa9\x36\xbe\x82\xb9\x65\xf9\x9c\x1f\xb7\x23\xef\x42\x67\x12\xc0\x4d\x61\x42\x6c\xee\xbc\x13\x48\x64\xf4\x74\x16\xc5\x5d\x10\x04\x0e\xf2\x17\xf6\xd3\xd8\x4a\x06\xad\x3e\x63\x9c\x7f\xfe\xbb\x1e\xc0\xcd\xe7\xfd\xae\x40\x69\x34\x5a\x1f\xe7\x50\x69\x3e\x8f\x19\xf2\xb6\x73\x8a\xdb\xa6\x66\x83\xcb\x49\xdd\x8f\xdd\x79\x3f\x40\xc4\x64\x30\xf9\xb8\xb0\xdf\x67\x0c\x1f\x18\x4f\xd7\x4f\x6a\xa5\x94\x63\x4e\x96\x85\xa7\xa2\x8a\xec\xbe\xd9\x2a\x09\xc3\x12\x17\x6e\xd4\x12\x65\x7e\x1f\x4f\x58\x58\x08\xb1\x11\x92\x7d\xce\xfb\xd9\xea\xd2\xe1\x3a\xe2\x52\x92\x95\x05\xeb\x97\x9f\xb1\x5b\x5d\x46\x49\x8b\xac\x94\xbc\xef\xa2\x0e\x2f\xd5\x5f\x9c\xfb\xc1\x2a\x84\x28\x33\xf9\x89\x65\xf4\x7c\x74\xf9\x9e\x5d\x53\xfc\x25\xbf\x47\xde\xab\x5c\x93\xd8\x39\x71\xc1\x5e\x95\x33\x88\x43\xef\xc1\xed\x71\xab\x62\xd3\xba\x6a\x0b\x5c\xc5\xd6\xf4\x8f\xa4\xf1\x49\xfa\x8f\xa7\x8c\xad\x04\xf0\x27\xdb\xc2\x15\x7a\xe8\xcb\xbf\x70\x2b\x0a\x0a\xf1\x12\x1d\x99\x56\xe1\x21\x0a\x93\xd3\x8a\xbf\xbe\x1f\xec\xff\xb5\x06\x51\xbb\x3d\xea\xd1\x1f\xb6\xa6\xfb\xe4\xde\x13\x9a\x2b\xd9\xa8\xe2\x1d\x0a\x60\xa7\x9c\x78\x06\x57\x1f\x3b\x59\x21\x08\xe1\xad\x27\x78\xb0\xe2\x80\xef\xf8\x9f\x66\x75\xeb\x51\xd5\x16\xf7\xc9\x44\x19\xe3\x66\x35\xc8\xc0\x07\x91\x1d\x7b\x51\x42\xed\x28\x7a\xce\x7d\xc6\xdf\x7f\x28\xbb\x57\xcc\x79\xfe\xd3\x99\xf9\xa8\xf3\x80\xca\x31\xeb\x05\xe9\x43\xe9\xfd\x80\xd4\x5a\xe1\x8e\x58\x04\xd6\xb3\x74\x07\xcd\x63\xb6\x4c\xbe\x55\xd3\x8c\x14\xd1\xd3\x15\x6b\x91\xe9\x3d\xf3\x7f\x73\x8d\x64\x31\xdd\x7a\xad\x8e\x7f\xdf\xe1\x58\xfb\x77\x6f\xc6\xfd\x71\xf3\x3f\x0e\x31\xfe\x27\xb8\xf0\xdf\x07\x1a\xff\xdd\xbe\x09\xdf\xea\xec\x89\x2b\x18\x00\x68\xf5\x64\xea\xb3\x68\xb0\xb3\xe8\xb1\xbb\x83\xc2\x79\xab\x3f\xb8\x10\x2f\x56\x3b\xeb\xe4\x2a\x19\xd9\x18\x18\x18\xc8\x47\xf5\xab\x9c\xbf\x53\xdb\x56\xfb\xc0\xdd\xee\xaa\xd9\x62\x18\xbc\xe2\x6d\x2b\x4e\x6f\x72\xdc\xfa\x77\xa3\x06\x3a\xb4\x92\x3a\xd3\x9c\xb7\x20\xcb\x65\xb2\xaa\xa6\xde\xfe\xc9\x8d\x69\x6c\x56\x96\x0c\xbc\xf7\xb3\xbf\xe1\xf8\xba\x56\xac\xaa\xd9\xd9\x62\x11\x15\xa1\x21\xd1\x57\x13\xf0\x66\x08\x5c\x05\x88\x55\xa1\x65\xfb\x48\x58\xf8\x5a\x73\x10\x6a\x39\x52\xbc\xa1\x96\xfa\x81\x48\xd9\xdd\xf6\xfb\x1a\xc1\x66\x79\xbd\x84\xbf\xa3\x85\x8a\x0d\x0b\x86\xba\xef\xe7\x5a\x17\xf6\x5d\x75\xb5\x0f\xb8\xdc\x36\xcc\xc6\xea\x1f\xe8\xd4\x8f\x83\x55\xc9\xc0\x38\xcc\xfc\x24\xb0\x68\x1f\x1c\xd6\x99\x52\x72\xc9\xcb\x5f\xe6\xb8\x45\xaf\x0f\x8f\x86\xf2\xb3\x46\x6e\x62\x68\x06\x99\xbc\x31\x8d\x65\xc8\x5b\x49\x35\xa5\x56\x31\x47\xb1\xac\x87\x86\x86\x16\xc7\x13\x2c\xca\x5d\x6c\x15\xf4\x9f\x4a\xfc\x06\x7e\x6e\xbe\x89\x06\xb6\x31\x8e\x0e\x69\xbb\x9a\xaa\x95\xe8\x24\xe9\xc2\x69\x5e\x49\x5c\x5e\xed\x25\xa8\x8e\x5f\x23\x96\x95\x16\xd3\xc3\xa7\xe1\x97\x50\x61\x03\x6b\x80\xfb\xd9\x09\x0c\xe0\x2f\x21\x62\xce\x3e\xdf\xda\x4f\xf4\x17\x0f\xa5\x2d\xce\xd2\x6e\x9d\xe6\x9b\xd5\xf1\x41\x55\xe5\x3a\x4d\x4e\xbb\x7b\xa1\x27\xd3\x9c\xac\x4f\x39\xcf\x09\x9d\xc6\x7e\x56\xba\xbc\x60\x20\xb7\x58\xb1\x00\xe5\x52\x9e\xa7\xc2\xe1\x4a\x5d\x93\xc7\x9e\x64\xf2\xdd\x29\x7a\xee\x67\xd1\x56\x87\x65\x83\xae\xfa\x72\x52\x60\x9a\x82\x9a\xaa\xb1\x6d\xb0\x30\x50\x4d\x95\x0c\x4c\x4c\xc0\x16\x2c\xcd\xcd\xb9\x2e\x67\x11\xf9\x6a\xbd\x91\xa7\x75\x10\x33\xde\x09\x8e\xb7\x46\x53\x02\x9a\xd8\x84\xe2\x54\x41\x0a\x7c\x25\x5c\x59\x66\x54\x6a\xcd\x72\x79\xce\x36\x0d\xed\x16\x5f\xcd\x5d\x2c\x47\xfc\xfc\xa7\x54\x93\xaf\xfa\xd6\xf6\xdc\x40\xd3\x90\xe8\xb0\xa3\x39\x3c\xa1\x3b\xc8\x92\xaf\x99\x11\xaa\xd7\x7b\xdd\xeb\x01\xda\xf5\x7d\x30\xff\x18\x75\x38\x0c\xe9\x1a\x77\x96\x3c\xca\xb4\xb8\x56\xc3\xe3\x27\xd1\x07\xc5\x1e\x44\xec\xc3\xbd\xf6\xf9\xa0\x8d\x70\xe1\xed\xb1\x79\xdc\x91\x2a\xf9\x85\x74\xde\x57\x2e\xfa\xed\xdf\xc5\x98\x80\xe9\x56\x4c\x11\x1f\x77\x5b\x34\xd8\x70\xc3\xa1\x26\xc2\x4d\xfd\xf3\xa2\xcb\x04\x03\x49\x5c\xf8\xec\x81\x70\x16\x37\xf9\xa1\x4f\x02\x0f\xed\x79\xf5\x97\xc3\x76\x4a\xa9\x06\x61\x21\xc8\x65\x0b\xf6\x26\x59\xaa\xf3\x15\x2a\xd5\xb9\xcb\xe7\xb7\xe1\x53\x43\xf7\xd7\xb3\xcd\xc3\x70\x3e\x1b\x60\x47\xf9\x03\x06\x92\x88\x23\xce\xb5\x7b\x73\x9d\x07\xcb\xd5\x18\x4c\x11\xbb\x2a\x88\xc9\x75\x7f\xdb\xe3\xd6\xe8\xa6\xd1\xce\x53\xce\xbb\xbe\x10\xdc\x7b\xed\x52\xdb\x19\x00\xad\xbc\xfd\xb6\xe5\x83\x5f\x85\xa1\x44\xe3\x8f\x77\xe7\x1d\x74\xcb\xa3\xb2\xf3\xe5\x7c\xea\x56\xfe\xc2\x5c\x55\xae\xbe\x1f\x00\xc1\x1b\x9c\x22\xe3\xe4\x29\xd8\x4a\x6e\x3f\xde\x00\xdf\x68\x17\x12\xb7\xa5\xe2\xe4\xa2\x90\x53\x11\xb8\xef\xe0\x4e\xd4\x6f\x90\x40\x82\xc4\x7c\x16\xe4\xa7\xca\xe7\x96\x6d\x9b\xdb\xcd\xc1\x47\xe2\x1f\xa0\x88\x2b\x49\x5b\x34\x17\x16\x09\xda\x51\x69\xd2\x8e\x98\x95\x31\xa4\x62\x7b\x79\x83\x2c\x50\x57\xf8\xa2\x87\x13\x83\x0e\x7a\x61\x87\x4d\x88\x1c\xd5\x3d\x23\x55\xc1\x9b\x53\x84\x66\x82\x73\x01\x42\x5c\x9a\xe2\x97\x9b\xd3\xec\xf9\xdb\xa3\xd1\xed\x9b\x64\xcc\x5a\x78\xa7\x47\x02\x28\xbd\x8c\x03\x81\xe1\x7b\xef\xcc\x10\xe2\x5e\xf1\xa4\x32\x84\x74\xc6\xa6\x1a\xd1\x71\xd1\x8e\xe8\x79\xa1\xe7\x05\x0a\xd8\xc9\xfb\x84\xdb\xd5\xd5\xe3\xa9\xb0\x81\xdb\x50\x4a\x98\x7f\x1e\x40\x75\x7a\xbd\x6d\x4a\x5d\x43\x51\x47\xb2\x54\xce\xe4\xf5\x9f\x5f\x0a\xb0\xbb\x61\xac\x79\x19\x59\x91\x95\xd3\x7e\xd8\xe3\xed\xf8\x46\xa7\x34\x84\x86\x44\x79\x8e\x81\xa0\xea\xa3\xc9\xd1\x4c\x28\xc3\x3d\xb5\x99\x73\x97\xdf\x32\xf4\x49\x65\x5c\x02\xc1\x2e\x9e\x3d\x7f\x89\x0e\xe7\x19\x9b\x3e\xe5\x15\x81\xdf\x33\x4f\x63\x18\x6a\x77\x58\x50\x56\x10\x44\xec\xdc\xff\x48\xd0\x15\xa6\x41\x4e\xa7\x40\x15\x4c\x51\x67\xbf\x52\x25\xc7\x60\xb6\x80\x75\xa6\xb6\xf1\xb6\x79\x39\xbe\x67\x3a\xac\xae\x27\xfb\x29\x8c\x05\x6b\xc3\x4f\xaf\xf1\xd2\x0f\x3c\xcf\x7d\x30\xa7\x86\x54\x24\xa3\x1d\x26\x44\x8f\xb4\xce\x54\x48\x95\xe2\x64\x7a\x4b\x2a\xa9\x8f\x40\xc0\xfc\x7d\x9f\x53\xe9\x50\xec\x97\x6a\x4f\x95\xf4\xab\x66\xc6\x33\xc8\x59\x9f\x0c\x64\x02\xc3\x8b\x92\x7b\x75\xe9\x8f\x8a\xdf\x6c\x4e\xb2\x6f\xab\xa8\x5b\x0a\x5f\xef\x6f\x0c\x21\xa6\x98\x83\xa4\x98\x56\x50\xd4\xcd\x3c\x28\x96\xfb\xbd\x83\xbf\x29\x6e\xb6\x55\xbb\x77\x54\xee\xfa\xb7\x30\xc2\xe5\xb9\x95\x9e\xad\xed\xa7\x1f\xc4\x04\xbe\x48\x27\x77\xef\x72\xd3\x5d\x9e\x97\x33\x77\xe4\xb2\x60\xf7\x69\xd3\x85\xbb\x91\x50\xcd\x9d\x11\x39\x84\xe4\x31\x9d\x25\xc0\xad\x90\x7e\x92\x47\xfe\xdd\x81\xc1\x4b\x17\xe6\x3c\x40\x1a\x24\x40\x57\xbb\x16\x5f\x24\x8b\x3f\xcf\x07\xce\xd8\x4d\xb9\x51\xcb\x03\x7a\xf8\xb9\xf9\x1a\xe0\x7c\x6b\xe6\xbc\xa7\x5e\x89\x8e\x65\x3b\x1c\xbe\xea\xe2\x33\xa0\x38\xbe\x77\x7e\x81\x09\xb8\x3d\xec\x15\x91\xbf\x47\xe9\x77\x2f\xbe\x15\xb9\xce\x40\x07\xbb\xd1\x95\xb1\x36\x6b\x0f\xa5\xa9\x91\x20\x55\xb2\x4f\x99\x03\x50\x48\x17\xbd\x92\xae\xe0\x6e\xb6\x78\xc6\x4c\xfe\x39\x65\x88\xe7\x3d\xe2\x59\x25\x78\x95\x6d\x86\x50\xd9\x8c\xd6\x6b\x5f\xe6\x30\x30\xf2\xbd\x5d\xcb\x91\xb2\x40\x63\x14\x50\x78\x54\x12\x68\x74\x7f\x80\x54\xd3\x29\x32\x7a\xdb\xab\x5c\xb1\x38\xaf\xfe\xca\x5a\xf8\x37\xaa\xc1\x30\x63\x27\x13\xb5\x91\xf3\x30\xa3\x3c\x51\x27\x9a\x64\x6b\x51\x4b\xbb\x6b\xc4\x97\x8e\x66\x57\x35\xbe\x25\x6f\x13\xda\x1f\x38\x60\xdc\xf5\xc9\x0c\x15\x97\xdf\xb8\xdf\xd5\x45\x81\x93\xf5\xe4\xad\xc3\xa8\x8f\xa3\x73\xcb\xf5\xfc\x6b\xee\x1f\x48\xde\xa3\x9b\x7a\x6c\xe0\xf2\xb1\x06\x67\xe1\x5e\x90\x19\x8d\x31\x7f\x15\x73\x83\xf0\x1d\xf2\xaf\xad\x43\x17\xba\x6a\xff\x81\x5d\x44\xcc\x62\x60\x8f\xdc\xda\x23\x6c\xa9\x06\x8a\x2d\xc9\x78\x19\xf5\x10\xa5\x5c\xc1\x6c\x83\x0c\x58\xad\x32\xe0\xc2\x7a\x31\xa1\x68\x52\xaa\x4f\x12\x5d\x10\x14\xee\x9b\xe6\xb5\x64\x13\x11\x27\xa2\x2a\x76\xd5\xfd\xf6\x0c\xff\x7e\xca\x7b\x11\x9f\xc0\xf0\x25\xa1\xda\x62\xf4\xfa\x0c\x80\x97\x06\x83\x63\xcc\xdf\x67\x19\x25\xe5\x5a\xc7\x80\x0d\xb0\xc9\xe3\x5d\xd6\x06\xfc\xe9\x76\xfc\x77\xc6\xc2\x78\x91\x6c\xef\xe4\xf8\x75\x15\x60\x64\x12\x3c\x85\x04\xa0\xa8\xf8\xc2\x3c\xdb\xf3\xf8\x2e\x61\xd1\x45\xfb\x12\x49\x66\x5d\x6b\x6b\x06\xe1\x49\x5a\x94\x61\xb2\xd2\x7c\x22\x1a\x59\x1c\x21\x48\x71\xc2\x86\xd3\x2c\xd6\x1b\xcf\xc2\x11\x18\xde\x60\xfc\x42\x4b\x9b\xf4\x64\x4e\xdb\x9d\xa0\xb7\xdf\x69\x8f\x17\xb7\x39\x6d\xcf\xec\x72\x53\xae\x63\x05\x36\x4c\x0a\xa4\x67\xe6\x78\xd9\xd1\xb9\x49\x54\x47\x6e\x00\xee\xbf\xba\xe8\xb7\xe6\xa4\x43\x25\xcc\x11\x89\x90\xf2\x06\xdb\xb2\x52\x4f\xa1\x3b\x88\xd5\x4e\x8d\xc2\x03\x3b\x83\x64\x22\xb3\xe5\xe5\x0e\xb0\x01\x9c\xab\x67\x00\x39\xf5\x5c\xb4\x5c\x48\x6a\xde\xe7\x40\x5f\xb5\x9a\x69\x24\x60\xeb\x20\xdc\x52\x1a\x3e\xfc\x9c\xcd\x24\x6f\x9b\xaf\x2d\x9c\xcb\x47\x23\xab\x6c\x9b\x6b\x6e\x91\x79\x3b\x5c\x78\x2c\x30\x1f\xfd\xf2\xdb\x57\xef\x12\x0d\xbe\x8b\x68\x81\xaa\xae\xd9\x0a\xdc\x25\x41\x2b\x0f\xcf\x18\x2f\xa9\x3c\xe5\xc7\xfb\x2c\x0c\xca\x4c\xb0\x31\x49\xe1\x2d\xd4\x8b\x8f\x50\x1b\xa5\x30\xdd\x16\x0a\x53\x4d\x74\x48\x16\xb0\x06\x35\xf4\x47\xfc\x07\x14\x1c\xc1\x3c\xed\x29\x37\xed\xe3\x15\x8b\x29\x26\x78\xbb\x9c\x72\xfd\xcb\x03\xcf\x7e\x03\x20\x7b\x4c\xba\xc9\xc0\x01\x68\xc5\x76\x50\x8d\xd7\x68\xae\xbc\xd4\xe5\xef\xbc\x81\x7f\x69\x21\x5f\x47\xe1\x9b\x88\xc6\x6e\xf1\x47\x74\xbc\x47\xf6\xbb\x77\xd3\xac\x37\xb5\x95\x85\xb9\x1a\x2b\x08\xb8\xb6\xbc\x8b\x14\x9c\xe3\xe9\xf4\x46\xb0\x93\x0f\x2d\xac\x5b\xa9\xc7\x09\xcc\xd0\x48\xc7\x27\x00\x69\x1e\xa3\xe9\x30\xf6\xd7\xcd\x07\xe3\x58\x7b\xc4\x5a\x27\x83\x2e\x83\x11\x84\xdb\x6b\x59\xa9\x10\xe5\x78\x0f\x4e\x67\x91\xd1\x85\xe4\xb0\xad\xec\x61\x97\x6d\x0e\x5b\x5e\xe9\x09\x23\x8b\xf8\x0e\x96\x11\x14\x8b\xee\x20\x92\xf5\x34\xc8\xa9\xd0\xa9\x57\x52\x92\xcf\x47\xed\xd4\x92\xdd\xa3\x74\xc0\xc7\x0e\x24\x60\x0d\x9c\x7e\x3b\x28\x7d\xd9\x15\x37\x55\xb9\x6b\x33\xf1\x0c\xce\xb9\xef\xf3\x73\x8e\x18\x01\x42\x83\x93\x76\x4d\x19\xd5\x1b\x20\x37\x1c\x4f\x72\xbc\x62\x27\x3d\x7a\xa9\xa5\x8f\x95\x3b\xc8\xfd\x12\x94\xcc\xe5\x49\x0e\x16\x6a\xb1\x49\xd9\x0e\x04\x1b\x54\x5f\x86\xda\xce\x52\xb3\xd2\x0d\xde\x6f\xd6\x38\x00\xb7\xc8\x1e\x73\x5b\x24\x16\xf1\x80\x2d\x01\xa2\x67\x6f\x7c\x13\xb3\xaf\xfa\xe0\x21\xad\x24\x3b\x9d\xfe\x95\xdc\xdd\xc2\x09\x98\x60\x0f\xd5\x1a\xf8\x5a\xb9\x4b\x69\xb3\x24\x4f\xfc\xa3\xa4\x39\xa5\xee\x1f\xf9\xd8\xb8\x9c\x20\xd4\xf7\xa8\xb6\xf4\xb0\x1b\x0f\x63\xbb\xcc\xfc\x4e\x89\x8a\xc9\x30\x42\x60\x9c\x4f\x48\xf8\x76\x20\x18\x12\x3b\x37\x7c\x63\x10\x05\x42\xc2\x03\x9b\x33\xc6\x1a\x74\x55\xf3\x3f\x14\x41\xba\x66\xed\xd8\x78\x15\x3e\xb4\x32\x6b\x98\x31\xc2\x08\x5f\x6d\x21\xf0\xbf\xa0\x7d\x03\x9c\x57\x8f\xc1\x8a\xd3\xa2\xe7\x75\x7c\xbf\x2c\x40\xb2\x98\xc7\x6b\x39\x1d\xca\x4b\x06\xb6\x0a\x3f\x79\x35\xd9\x92\xd6\xff\x12\xe8\x1a\x54\xf9\x0c\x57\x30\x6b\x93\x34\x59\x6a\x2c\x69\xf6\x03\xeb\x44\x46\x02\x89\x34\x61\xbe\x1c\x44\x39\x9f\x4f\x9e\x1b\x00\xf3\xf9\x9c\x0f\x8c\xd1\x5f\x9d\x9f\x4f\x66\x32\x02\x89\xc9\x22\x7f\x44\xf0\x0c\xe2\x07\xd3\x13\x5f\x4a\xe5\xc9\xb7\xf1\x25\x4e\xae\x0c\x7c\x1a\x92\xc2\x14\x91\xa8\xce\xef\x69\xf8\x3e\xf1\xf1\x0b\xa9\x6f\x6e\x88\xf4\xf2\x39\x88\x37\xdd\x69\xe5\x7e\x39\xcd\x84\xdc\xf5\xe0\x8c\xe2\x79\x29\x6f\xde\x62\xe9\xcb\xea\x81\x52\x88\x34\x73\x92\x30\x07\xb9\x31\x3c\xae\x65\x19\x72\x9c\xb2\x80\x74\x2f\x28\x8b\xe9\x96\xfa\x77\x6b\x63\x08\x91\x99\x88\x8f\x61\x6f\xab\x82\x0c\x13\xc8\xa7\x78\x58\xf6\x77\xd0\x61\xb7\xde\x54\xfb\x03\x1f\x78\xcf\x6f\x90\x6c\xd4\x78\xb8\x11\x21\xa1\x6b\x3c\xcf\x04\x52\x3d\x1d\x38\xb0\xd7\x21\xbe\xbb\x7a\x31\xa5\xb5\x5d\xc2\x1b\x0e\xff\x35\x93\x31\xb3\xaf\x86\xe0\x2b\x23\x74\x5d\xed\xe5\xd4\x9b\xed\x35\x9e\x43\x36\x56\x36\xe7\xa4\xf5\x6f\xfe\xbe\xc1\xaa\x9e\x98\xe7\xa5\x01\x73\x2c\x46\xd1\x7c\xb4\x3f\x88\x6f\x2a\x3d\x7f\xd0\x90\xeb\x9b\xc7\xec\x91\xae\x6b\x1a\x66\x20\xa9\xd4\x5b\xc9\xd6\xfa\xf5\x21\xa6\x94\xa6\x8a\x02\x4e\x54\x31\x2c\x43\x84\x7f\x24\x4a\x10\x7e\xc4\x04\xaa\x4a\x97\x9d\x6a\x8f\x78\x73\x66\x93\xd2\xcb\x29\x5a\xda\x49\x90\xb2\x38\x48\x88\x05\x86\x6f\x07\xaa\x80\xe6\x53\x7a\x30\x3c\xdd\xec\xec\xec\xe4\x3f\x4a\x60\xd6\x14\xa2\x7d\x33\x63\x7f\xa1\xb2\x07\xfa\xc8\x5e\x79\x07\x5f\xd5\xc6\xc1\x5c\xa4\xce\x84\x6b\x43\x13\xf1\xcd\x41\xb9\xcb\xb8\x22\x05\xa8\x5a\x5f\xfd\x8a\xa9\x2c\x9b\x8f\xfe\x84\x63\xd2\xda\xba\x68\x3d\x2a\x65\xf6\x6c\xb4\x83\xef\x49\xbf\x73\xa2\x12\x25\xee\xa3\x16\xd5\xa8\xa1\xc5\xb7\x60\xe2\xa0\x26\x54\xcb\xa9\x8a\xed\x38\x7a\x9e\x00\xd4\x59\xcd\x2a\x79\x34\xfb\xa2\x30\xff\x9a\xbd\x52\x1b\xce\x95\x9d\x8e\xc6\xc6\x84\xec\x9e\x37\x40\xe3\xb0\x16\x43\x44\xad\xdc\x41\x23\xde\x38\x59\x6b\x5e\x39\xff\xa8\xf4\x7a\xa9\xbc\x5f\x44\xef\x23\xe7\x9d\xba\x25\xfd\x33\x6b\xd9\xe6\x9b\x95\x3b\x68\xbe\xcb\xf1\x0e\x78\xeb\x4e\xa5\x85\x7d\xad\x8b\x08\xc5\xb1\xe5\xe5\x28\x0f\xc1\xc2\x5d\x46\x61\x68\x51\x2f\x5f\x20\x8c\x1a\x5a\x78\x0d\x1e\xcb\x9c\x62\x37\x68\xe7\x84\x23\x70\x86\x05\x71\x05\x5e\x24\xf3\xe9\xc6\xb5\xc6\xa9\x99\xcd\x7d\xc6\x1a\xdf\x06\x3b\x4b\xcc\x53\xa9\x36\xf2\x81\xc5\xd3\x47\xbd\x3d\x59\x17\x9b\x26\x18\xa1\x4a\x1f\x72\x8c\xa6\xc5\x08\x28\xbb\x61\x63\xc5\x52\x82\xb5\xf5\x92\xa5\xf9\x02\xb0\x87\x0c\x5a\x30\xf3\x5e\x8a\xa2\x36\x6f\x87\x94\x1d\xbc\xa1\x1a\xb2\x3a\x89\x9d\xec\x32\xcd\x86\x2c\x58\xdd\xe0\xea\x33\xb9\x21\x87\x62\x11\x31\x59\x3d\x2d\x8f\xbc\x5d\x70\x16\x0d\xc9\x44\xae\xfe\xa2\xff\x12\x12\x50\x7e\x4a\xed\x57\x8c\x0c\x1f\x8a\xca\x49\xcb\xaf\xdc\x94\x97\x61\x5b\x4a\x43\xf2\x2c\xcd\x96\x24\x3f\x9c\xcd\x0f\x61\xe5\x7c\x14\x1a\x06\x3c\x2f\xbd\xcc\xcd\x41\x06\x9e\x76\xe9\x9a\x8d\x5f\x97\xb8\xd7\x9c\x96\x3d\xa2\xd9\x3f\x94\xb3\xc9\xdb\x20\x3b\xc1\x7e\x09\x8d\xb5\xf4\x3d\x3e\xac\xd0\x78\xc6\xfb\x23\xde\xe1\x0c\x15\xe5\xda\xd7\xd5\x53\xe3\xf4\xc6\xef\x28\xc1\xe6\xe2\x4f\x9b\xf2\x1b\x39\xc8\x0a\x14\xac\x24\x10\xca\x30\xb2\x2e\x10\x59\x31\x1d\xb7\xf2\x03\xa4\xc9\x15\xad\x50\x27\x1e\xdc\xce\xd2\x06\x07\x4b\xcc\x15\xf1\x43\x38\xf0\x00\xc7\xd8\xb6\x51\x4a\x73\x76\x51\x79\x01\x90\x6c\xf5\x57\x45\xee\x75\xdc\x14\xd7\x90\x09\x03\x2e\xa1\x21\x64\x60\x76\x78\x13\x50\x65\xc4\xd4\xaa\x0d\x15\x07\xe8\x32\x56\x48\x1e\x31\xd2\x40\x04\xca\x86\xae\xee\xe8\x3e\x5e\x23\xa7\x43\x92\x89\xc9\x7d\xdd\x90\x6e\x7e\x8e\xdf\xdc\x90\x2e\x27\x95\xdc\xed\x94\x66\x51\x95\x1b\xfa\x78\x5d\xad\xad\x23\xbe\x17\xc3\x80\x93\x90\x5c\xdf\xb5\xb7\x7b\xef\x30\x01\x77\xa7\x6b\x40\xe5\x8c\x41\xef\xcd\xe6\x08\xef\x91\xcb\x56\x56\x69\x39\xe5\xb1\xe0\xf9\x42\x52\x35\xf7\x13\x48\x98\x22\x41\x2b\xae\xce\xe7\x0d\x70\xe9\xb9\x6f\x96\xfa\xfd\x7b\xf4\xd8\xd0\xb6\x12\x35\x5b\x00\x1f\x18\x16\x51\x98\x51\x0b\x5c\xa8\xcf\x92\x6b\x8c\xe2\x51\xac\x04\xed\x09\x78\x9c\xbd\xc9\x5d\xe0\x80\x99\x0b\x8d\xb5\x48\xee\xfd\x81\x51\x8c\x41\xa8\x44\xf5\xdd\x66\x53\x5a\x93\x7d\xe3\x33\x0f\xa1\xff\xd5\x5b\x78\x93\x66\x5e\x9e\x22\x2b\x5f\xac\xf3\x06\x18\xcf\x35\xb3\xa4\x09\x6b\x7d\x7e\xc3\x1b\x35\xf5\xe2\xf5\x4a\x0b\xf7\x0d\x24\x4f\x95\x03\x9e\x30\xe9\x18\x73\x45\x56\x4a\x9f\x78\xc4\xf1\x3c\x8a\x78\x16\x57\xc8\xf0\xa9\xc1\xd7\xdb\x6a\xe4\x01\x40\xfc\x69\x57\x75\xf3\x31\x6b\xb5\xb8\x2b\xe8\xf1\x0e\xf6\x64\xc5\xc3\x04\x53\xbc\x99\x30\x34\x38\xdb\x6c\x4e\xf9\xae\x9e\x1e\x26\x74\xa9\xc1\x4d\xae\xd2\x6e\xd9\xc9\xa8\x76\xbb\xba\x2d\x99\xc7\x40\xa8\x09\xd7\xc6\xd4\xe0\x1f\xd0\x83\x04\xb4\x2a\x25\x1c\xd7\xb3\x51\xea\x0b\x9d\x23\x56\x73\xc8\xfd\xe4\x5b\x2c\xbd\x16\x5f\x4d\x96\x47\x9c\x59\x75\x6b\xfd\xc9\xe9\x00\x81\xb3\xbb\x3f\x96\x4c\x69\x67\xe6\x34\x7e\xd2\x74\x4d\xc4\xb0\x01\x77\xa5\x7b\x9a\x0e\x9a\x43\xcd\x6d\x1a\x07\xc1\xe9\x60\xf9\x6b\xfb\x94\x0a\x2f\xdb\x23\xd0\x8d\x35\x5e\x83\x09\x17\x11\x1f\x38\x48\x11\x75\x1b\xa7\x06\x0c\xcc\xf4\x46\xfd\x96\xf0\xa0\xbd\x0b\xac\xd2\x7c\xdc\x04\x6b\x76\xee\xfb\xda\x41\x13\xee\x92\x14\xd8\x20\x4a\x84\x01\x1b\xee\x9a\x3e\x53\x31\x95\x0b\x02\xff\xc2\x4a\xdc\xa6\x2b\x23\xd1\x7e\x93\xde\x87\x2f\x54\x1d\x0c\x55\xc4\x67\x10\x9c\xf6\xa5\xbb\xfb\x8b\x87\x03\x3f\x8b\x94\x1c\x22\x16\x4c\xee\xe2\x1d\xb4\x1f\xca\x87\x73\x40\xa9\x69\x4c\x87\xd2\xc5\xdf\x6d\x03\x4a\x8d\xee\x66\xc0\x6d\x65\x80\xf7\x4b\x19\xb3\xe0\x6f\xad\xa8\x5f\x18\xb7\x83\x27\xfa\x4d\x3b\x34\x9f\xeb\x84\x5c\x63\x1b\x3d\x5e\x0b\x6c\x80\x52\xd1\xbd\xe8\x12\xe3\xa4\x1b\x6b\xb4\xe0\x28\xda\x6b\x5e\x70\x5a\x55\x38\x86\x71\xde\xbe\x92\x6a\xc3\x0e\x35\x3e\xe7\x19\x8a\x98\x9a\x92\x5f\xe2\xcc\x56\x40\x08\xe2\x02\x1c\x44\xcc\xa1\x00\xec\xee\xcc\xd8\x74\x47\x41\x40\xec\x6d\x14\xae\xdf\xe4\x8b\x83\x1f\x97\x67\x43\x2f\xbd\xa9\xa9\x45\xc6\x64\x32\x42\x76\x69\x61\x3c\xe4\xcb\x29\xd8\x1c\xc7\xd2\xf3\x9c\xc8\xb7\x6a\x97\x60\x3e\x15\xad\x9a\x50\xca\xe5\xb5\xb4\x19\xc0\x04\x3f\xf6\x96\x40\x9f\x66\x0a\xea\xed\x17\x21\x28\x9b\x1b\x2f\x4e\x7a\x54\xb4\xe2\x04\x63\x43\xf2\xf2\xb0\xd0\xcb\x19\xa2\x13\x65\x5f\x31\x68\x71\xd7\xd7\xcc\xd3\x04\xb5\x98\xcb\x0d\x62\xbf\xef\xa1\xb9\x25\x3e\xd1\x68\xfb\x1b\x62\x1a\x7d\x5b\x51\xce\xe9\x1e\x78\xee\xac\x92\x5a\xd6\xb0\x07\x68\xb5\x4f\x8d\xfa\x15\xd6\x9c\xd9\xe3\xcf\x2c\x98\xe2\xdf\x45\x83\x5d\xf3\xdf\xf2\x3c\x7c\xca\xc3\x83\x1b\xec\x3d\xcf\xcc\xe1\xca\x2d\x60\x13\xb1\xc5\xe1\xcc\x2d\x70\xc1\x43\xa9\x57\x2d\xd1\x8d\x30\x69\xa8\xf9\xa1\xfb\x67\x56\x3c\xae\xb2\x3e\xbc\x65\xe1\x48\x8b\x31\x79\x25\x01\x72\x06\xcd\xf4\x78\x3a\x0a\x93\xb7\xf5\x86\x91\xb0\xe2\x1b\x82\xdd\xbd\x79\x45\x37\x88\x40\xfd\xa0\xaa\x71\xd4\x4a\xa3\xa7\xaf\xc4\xed\xbe\x1e\xcf\xc3\x49\x42\x7b\xfd\x78\x60\xb8\xf7\xa2\xd3\x14\xe1\xa9\x1e\xfd\xe3\x6a\x9c\x2e\x26\xc5\xcd\xc6\xf4\x8c\xa2\xa5\x78\x4b\xbe\x07\x29\x40\xc1\x5d\x02\xde\xcf\xfe\x60\x6f\x5a\xb1\x50\xf0\xed\x93\xd5\xfa\xdb\xc9\xbd\x7e\x4f\xeb\x8d\x9a\xf9\xdd\x5d\xd8\xb5\xcb\xdd\x73\x79\x13\x02\x34\x64\xed\x72\xf6\x61\x67\xc0\x16\x70\x72\xa1\x35\xb0\xb8\xd7\x15\xb7\x7c\x1a\xf5\x86\x5f\xe6\x13\xda\x7e\x4f\xa6\xe8\x78\x93\x82\x4b\x8b\xfa\xcd\xa8\x76\xc5\xb2\xdc\x4a\xdc\x7f\xe2\x29\x70\x7f\xcb\x6c\xd0\xe9\xff\x93\x1b\xf8\xef\xae\x80\x80\xfb\x59\xee\xbf\x2a\xfb\xdf\x4a\xc0\xef\x0f\x29\xc2\x95\x10\x7f\x60\x98\x0a\xe0\x9c\x5c\x57\xa0\xc1\x06\x8e\x8f\xd8\x5e\x86\x3f\x87\x68\x09\xdf\x18\x3f\x29\xdc\x77\xc5\x1d\x9a\x1d\x24\xed\x07\xbe\xf2\x35\x30\xf8\x99\xe2\x68\x38\x5d\xa7\x28\x72\x8c\xe2\x1b\x29\x2e\xe4\xe6\xbd\x92\xb6\xd9\xd9\xb3\x32\xe2\x09\x9b\xf3\xec\xe4\x87\x99\x57\x2f\x68\xc5\x95\xd7\x34\x6b\x4b\x22\x0e\xcd\xdf\xb2\xa1\x73\xc2\xdf\x7c\x58\x5d\xa2\xf0\x3e\xe5\x97\xd0\x2f\xaf\x35\xfc\x14\x33\x63\xe3\xa9\x0a\xc1\xd1\xb9\xd5\x63\x4d\xc1\xee\xa2\x50\x0d\x57\xc5\x3f\x96\x89\x09\x06\xda\x2c\x69\x8c\x51\x9b\x6f\x39\x44\x05\xe2\xb5\x4f\x8e\x07\x0e\x74\x6d\x92\x4f\x9e\x27\x77\x1a\x33\x39\xbc\x72\xd0\x7b\x99\x96\xda\x04\xed\x12\x49\x55\x4c\xb9\xdc\x28\x6c\x33\x91\xcf\x2f\x46\x71\x3b\x3f\xf6\x3f\x20\xee\xbc\x66\x25\x07\x6a\xbd\x3b\x8c\xbc\x28\xde\x54\x8d\x6e\x75\x9f\x0f\x3a\x1c\xab\x6f\x69\x69\x79\xa3\x9b\x7c\x07\x29\xdf\xbf\x8a\x54\x87\x8b\xeb\x89\xf5\x46\x0a\xbd\x2e\x1d\x18\x62\xb2\x90\xc9\xb5\x2d\x26\x13\x6d\xd2\x83\x92\x8a\xfb\x4d\x92\x81\xd5\xbe\xf6\xe8\xc6\x4d\xf1\x24\xa1\x82\x65\xe0\x0e\xea\x83\xcd\x37\x46\x58\x38\xde\xb1\xd5\x46\x0e\xa9\x63\xc4\x09\xe8\x68\xc2\x81\x91\x47\x9f\xce\x62\xb5\xf9\x08\x24\x56\x8b\x1c\xf1\xa6\xee\x8c\xa4\x72\xa7\x36\x25\xa4\x12\xe8\xd8\xae\x3a\xe1\xa7\x07\xa1\x25\x28\xdf\x0a\xc5\x0e\x1d\xa5\xac\xd3\xe4\x0b\xda\x22\xa5\x05\x03\xd4\xde\x5e\xf3\x68\xcc\x0b\x11\x92\x81\x4b\x2d\xf3\xa7\xc8\x93\xbb\x7a\x84\xb7\x30\x5c\x95\xba\x37\xc9\xc6\x58\xe2\xd7\x1b\xc1\x5e\xd3\x4f\xeb\x6a\xf8\x20\xd1\x35\x4c\xd9\xce\x4d\xe7\x68\x35\x70\x55\xa0\x4b\x9d\x57\xc2\xa7\x1b\x7f\xeb\x37\x46\xd6\x96\xfe\x99\xe2\xf9\x9f\x5a\xe1\x61\x73\x9e\x59\x77\x70\x33\xcd\x22\x3e\xe7\x37\x87\x87\x22\x45\x2c\x7a\xb9\xe8\xfd\xc9\xd3\xd4\xe6\xd3\xd3\x18\xb5\xee\xa0\x01\xad\xf4\xfc\x79\x1e\x03\x41\xcf\x89\x3b\x0e\x76\xd8\x66\x79\xa7\xf3\xe9\xd4\x31\x93\x3d\xc4\xf0\x5a\x0d\x0f\x81\xfe\x76\x9c\x68\xfb\xe9\xb7\xc4\x3d\xe9\x8f\xa1\x36\xb9\x0e\xfa\x75\x7f\xd1\xbf\xea\xcf\x4d\xe0\x7b\xd8\xfe\xdd\xd3\xe7\xa9\x1e\xae\x3f\x87\x3c\xf7\x97\x1e\x42\x7f\x89\xc2\x8b\x77\x67\x16\xf3\xef\x3b\x3f\x1d\x65\x59\x3a\xc3\xbe\xb2\xa0\x02\x5d\xde\xc3\xcd\x40\xae\xbf\x45\x37\x3d\x9e\xc2\x95\xec\x78\x2f\x80\x03\x02\x40\xf6\x86\xbe\x11\x97\x23\xc4\x5f\x37\xf1\x7b\x4f\x1b\xe8\xf0\x9d\x07\x71\x94\x1c\xcd\x66\x00\x6d\xb1\xe4\x66\xcb\xf4\x8b\xa7\xf2\xdc\x09\xc6\x74\x8a\xec\x6d\x70\x3c\xac\xf8\x9c\x7f\xd0\x80\x0e\xeb\xb7\x86\xf2\xf1\xe4\x42\x18\x84\x5b\x92\x5f\x48\x4b\xc7\xc3\xc5\xe4\x79\x78\xac\xcb\xe1\xb8\x1c\xb2\x38\x16\x49\x84\xf8\x8d\xf4\x99\x91\xd1\x7e\xd0\xf5\x6f\x79\x0e\x7e\xef\x67\x6a\x8b\xfa\xbe\x3b\x2b\xdc\x7f\x95\x79\xf8\xba\xbe\xef\xcc\xbf\xa4\x05\x25\x48\x78\xe6\x7a\xd7\xdf\x01\xa1\x39\xc3\x0b\xde\x0b\xbf\x40\x4f\xf6\x0f\x15\xb0\x9d\x62\x57\xc8\x2e\x34\x7c\xb9\x96\x4c\xe4\x5f\x22\xa0\xdb\x10\x6a\x68\xeb\x3d\x13\x3c\x3d\x69\x05\xdc\xa7\xa7\x97\x6a\x03\x49\x31\x04\x70\x25\xc3\x6a\x3a\xa2\xb4\xf3\xb4\x6d\x08\x77\x04\xfb\x3b\x37\xc8\x84\x59\xd1\xe4\x12\x99\x63\x54\xf4\x7f\x88\x84\xf7\xfd\xd9\xec\x46\x5a\x88\x31\x05\x43\x12\xfc\x3c\x01\x4e\xe7\x4e\xc7\x8d\x7d\xfe\xde\x31\x32\x9a\x37\xaa\x34\x16\xc7\x63\x81\x64\xe0\xea\xe9\x5a\x8b\xfa\x13\x01\x75\x53\xd0\xfc\x1d\x9e\xcb\x27\x7c\xab\x87\x02\x84\x38\x90\x46\xad\x51\xb9\x2e\x46\x1e\x89\xd8\x5f\x8a\x1b\x07\x49\x28\xda\x8d\xef\x92\x38\x97\xeb\x7b\xa1\xec\x12\x95\x06\xe5\x5a\x22\x56\x5c\x7a\x6d\x73\x59\x40\xf8\x25\x5c\xae\x56\x70\x8e\x16\x61\xdf\x5a\xa1\x9c\x8d\xbf\xaa\x5a\xae\x8b\x57\xca\xbb\x4d\xa0\x1c\x74\x46\xaf\x58\x37\x17\xdf\x16\xe4\x35\x84\x25\x86\xda\x3a\xda\xaf\x3b\x48\xe8\x56\x0e\xf3\x56\x44\xeb\x09\x2e\xd7\xa2\xa2\x1c\xe0\x66\xce\x68\x58\xc6\x5a\x4f\x57\xc8\x02\xff\x25\xcb\x2f\x7d\x76\x85\xde\x7e\x40\x5b\x64\x05\x3c\xdb\xc7\xe2\xe2\x86\xbc\x2a\x0a\xc6\x81\xa6\x11\x91\xaa\x05\x52\x30\x90\xe7\xc2\xc5\x99\x0b\x7d\xd5\xb1\x50\x7f\x5f\x3c\xb8\x9d\x62\x8a\x84\x5b\x03\xb7\xf6\xc8\x35\xe4\x9c\x99\xc5\x78\xa9\xe0\xf5\x07\x35\xd8\x14\x1b\x9c\x66\xc1\x12\xd0\xaf\x92\x68\x2b\x16\x41\x21\x94\x40\x2d\xc5\xad\x70\x9d\x61\x49\x3a\xb6\x12\xb3\xd7\x83\x77\x0c\x3d\xea\x34\xee\xe1\x28\x10\xe7\x02\xa1\x62\x51\x99\xb9\xa0\x40\x49\x88\x80\x6d\xde\xea\x1e\xc4\x08\x1d\xf2\x7b\x87\x0f\x53\x95\x04\xf4\xb6\xa0\x91\x26\x3a\x31\xf1\x51\xce\x29\x40\x67\xaf\xbe\x69\x45\x10\xea\x89\x73\x1f\x57\x86\x7c\xeb\xdf\x34\xbf\xd0\x23\x74\xa8\x45\x39\x31\x6f\x1c\x1f\x18\x3a\x0a\xf7\x06\x0d\x88\xe7\x67\x2f\x74\x41\xb9\x16\x51\x58\x8e\x3e\x33\x9d\xbe\x1c\xc4\x35\xaf\x44\x7f\x45\xfc\x85\xf8\xb5\x78\x38\xb9\xa5\xcf\x0b\xbe\xc4\x33\xad\x29\x7e\x96\x9f\xdb\xed\xac\xb3\x79\x75\x4f\x35\x9a\xeb\x81\x21\x8e\x05\x0c\xcb\xac\xc4\xfe\x93\xa3\x6d\x6b\x8b\x24\x4d\xb6\x88\x16\x11\xde\xad\xb0\x1a\x82\x61\x64\x04\xde\x1d\xb9\xc9\x16\x9b\x02\xd2\x5c\xa2\x67\xe8\x08\xd2\x60\x90\x0d\x3f\x36\xc9\xab\x87\xc6\xa6\x0e\x18\x84\xa0\x93\xc7\xe2\xc8\x21\xfb\xc1\x42\xce\xda\x5a\x64\xd4\x10\x5a\xaf\x8d\x34\x57\x73\xe3\xde\x25\xb2\x72\xbd\xff\xb2\x18\x45\x46\xfb\xb1\xe3\xfb\x4b\xeb\x38\x79\x90\x7b\x41\x7d\xce\xf9\xcb\xe3\xa2\x22\xaf\x35\x2c\x5c\x21\xb1\xab\x4b\x0e\xeb\xef\x2c\x2b\x12\xa7\x43\x2f\x39\xe7\x99\xbe\x58\xff\xab\xe5\x9b\xd6\x00\xd3\x0d\x5e\xd7\xb9\x9a\xd6\xf3\x26\x49\x39\x1d\x38\x57\x79\x9c\x27\xaa\xd6\x92\x5b\x86\xf8\x6d\xb2\xf7\x2f\x5e\xf4\xf9\xbf\x65\x09\x38\x60\x32\x55\x7e\xe5\xc5\x19\x51\x2e\xf8\x3c\xcc\xf9\xe7\xc1\xe9\xf5\x37\xcf\x55\xba\xaa\x4b\x64\x07\xbb\xed\x3c\xa2\x62\x03\xe7\x07\xda\xe4\x63\xc8\x1b\xd6\x89\xba\x37\x75\x2d\x59\xa0\xf0\x84\xb6\x76\xc5\xa6\xc5\xee\xfe\x99\x0c\x27\x0a\x79\x30\xb0\x86\x06\x9f\xd9\xfa\xfe\x57\xa8\xa3\xa1\xc0\x69\xe2\x52\xdf\xf8\x42\xf5\x0d\x7b\x95\xe6\xc8\xc3\x62\xea\x65\xe3\xb0\xc7\xda\x3e\x51\xa0\x56\x78\xd0\x80\xb2\xf0\xe7\x77\x94\xe2\xb4\x8a\x45\x0b\xfb\xbb\x88\x97\x1e\x74\x3c\x61\x0f\x53\x3f\xd2\xad\x1b\xdc\x5f\x96\x43\xaa\x13\x1a\x08\x23\x73\xd3\x50\x3e\x7b\x99\xf0\x08\xe6\x31\x39\xaa\x8c\xc2\x0f\xad\x60\x3c\x07\xbc\x11\xb7\x10\x9b\x4b\x02\x7e\xd3\xfd\xaa\xce\x0a\xb4\x70\x1d\xe4\xc5\xe5\x1a\x15\xde\x75\xca\x41\xc7\xe7\x58\x24\x02\xb6\xa9\xdd\xf5\xf9\xae\x2f\x91\x06\xb0\xd7\xb6\x9b\xb5\x47\xaf\x27\xeb\x8e\x5c\x89\x42\x34\x7a\x66\xc4\xf6\x12\x6b\x98\xdc\x58\x2e\xbd\xa1\xe9\xc1\xf7\x63\x2f\x1b\xcc\xa6\x0a\xc3\xc3\x2e\x20\x4a\x3b\x4e\xaf\x09\x9c\x79\x99\xc5\xd7\xad\x79\x13\xa1\x48\x44\x3f\xad\x72\xc9\x90\x89\xe9\xaf\x3f\x9c\x4b\x30\xe5\x36\x28\x5d\xdb\xba\x93\xab\xac\x34\x4e\xd1\xfc\x90\x56\xed\x75\xd8\x5a\x36\xa2\x7b\x73\x23\x1d\x41\x61\x3d\xda\x43\xfb\xcf\xd7\x52\xb8\xd1\xb7\x9b\x3f\xb5\xb6\xf0\xb6\x20\x70\x7e\x70\x71\xfc\xf8\x69\xd3\x64\x61\x8c\x35\x91\x12\x3c\x43\x37\xca\xa3\xdb\x97\x17\x77\x24\xcc\x36\xb7\xac\xee\x58\xaf\x5c\xb7\x82\xb8\x90\x76\x55\xa9\x8c\x8f\x0c\x2a\xe2\x67\xa3\x95\x40\x9b\xc1\x50\x3e\xbe\x08\x9a\x21\x45\xa5\xec\xf3\xd3\x0b\x91\x46\xb7\x25\xe6\xdf\xd8\xc9\xb0\x10\xbc\x6c\x8e\xab\x11\xaf\x48\xf4\xc3\x8a\x7b\xed\x71\x75\x8c\x09\x80\xa8\x38\x46\xc1\xd1\xda\xdf\x7e\x2c\x5f\x03\x46\xb6\x26\x3c\x41\xb1\x48\xcc\xce\xe2\x30\x70\x9b\x0e\x9d\xb2\xa0\x27\x69\xc5\xbe\x6a\xe7\x0d\x58\xb8\x4a\xb8\xba\x8f\x98\x8a\x5c\x4b\x41\x00\x1b\x5e\xa4\xd2\x82\x1d\x1f\xad\x34\x1d\x0d\x74\x38\x95\x5d\x21\xbb\x60\xd4\x3a\x73\xa8\x49\x69\x98\x68\x18\x5d\x9b\x72\x31\x82\x08\x8a\xb7\xd3\xf8\x33\xbf\x49\x83\x8d\x0d\x02\x60\xb5\xaa\x6a\x6a\xcb\x74\xe1\x4b\x57\xd8\x84\x48\x7d\x65\x03\x93\x7a\xd7\x66\x7c\xd8\x2b\xaa\xd2\xba\xc2\x09\x2a\x29\xf6\xe9\x7f\xee\x13\xee\x71\x3f\xc6\xb4\x62\xb7\x93\xc6\x92\xb6\x1c\x66\xca\x55\xc2\xfb\x7a\x89\x1a\x00\x55\x99\xfe\x53\x22\x80\x7b\x76\x08\x95\xc6\xf3\xbd\xe6\x83\xf3\x7f\x66\xa2\x84\x53\xbd\x28\x51\x4f\x43\x80\xf6\xbe\x18\x4b\x84\x07\x7e\x44\xc8\xab\x07\x67\x25\x3a\x56\x36\x14\xa7\x5f\xdb\x0b\xf2\xca\x46\x12\x5a\x31\x70\x2f\x3b\x8f\x44\x95\x4d\x61\x53\x8e\xd1\x40\x6d\xa9\x8a\xd2\x5c\xa2\xb5\xaa\x19\x52\x1d\x37\x53\xaf\xf1\x23\x2c\x8c\x11\xe2\xb4\x07\x5f\x2c\xbb\x34\x8d\x85\x01\xbb\x83\x42\x3d\xb5\xba\x9a\x0e\x9d\xe7\x92\xd7\x88\x46\xe5\x41\x60\xe8\xae\x91\x14\x90\x7f\xb1\x32\xcf\xce\x68\x80\xd8\x18\x2d\x4e\x03\x90\x77\x00\x97\x11\xa8\x68\x12\x4d\x8b\x55\x32\x32\x9d\x97\xc1\x65\xe5\x70\xc4\x6c\x03\x22\xf2\x13\x36\x4d\x72\xe8\xc3\x50\x95\xdf\x86\x3e\x3b\xb1\x04\x70\x28\xeb\x52\xfd\x5e\x16\x64\x49\xa3\x9f\x21\x53\x5f\x7a\xf5\xa8\xc0\x02\xb0\x0b\xb0\xc2\xa3\x69\x4a\x12\x2e\x51\x17\x4d\x15\x3e\xe1\xcf\xe2\xf0\x56\x74\x35\x79\x62\x50\x63\xf9\xe8\x57\xd3\x3c\x15\x06\x20\x06\xc7\x5c\x6a\x51\xe8\x78\x4f\x99\x6c\x60\x38\x3d\x8a\xc4\xf1\x78\x3b\xfd\x06\xfd\x62\x36\x64\xa2\x99\x66\x08\x62\x98\x4c\xb1\xb7\xcd\xb0\x5a\xae\x02\x42\x85\x6c\x9c\x4c\x48\xe8\xc8\x91\x5f\x5e\x18\xa8\x07\x04\x97\x83\x0d\x0c\xc5\xe3\x5f\x35\x1d\x5e\x1c\x66\xea\xc1\xc7\xd5\x14\x41\xb2\x17\x6d\xd6\x39\x38\x1f\xf4\x44\xd3\xbd\x43\xa4\xe9\xbc\x31\x78\x06\xf4\x10\xd0\x0b\xd8\x03\x07\x26\xfd\x5a\xc4\x34\x27\xf3\x3b\x8d\x27\x2a\x20\x54\x9c\xab\x2f\xbe\xf2\x1c\x71\x0d\x13\x7d\x7d\x5a\x56\x9b\x87\x2b\x31\x85\xd1\x70\xbd\x03\xd7\xbe\x98\x00\x3b\x87\x53\x2c\x0b\x2c\x7b\xda\xd8\x8b\x6c\xd1\x6c\x71\x2e\xb0\x5a\xc0\x36\x3a\x69\x86\x99\x7a\x14\xd8\x82\xc2\xcd\x1f\xda\xbd\xed\x35\x39\x5d\x78\xe5\xdc\x9d\x9d\xae\x0e\xa4\x2c\x61\x8e\xb2\xde\xca\x95\xaa\xbf\xbd\xf5\x1c\xa2\xf8\xd6\x77\xc2\xd1\xec\x9e\xb0\x63\x3a\x3e\xf0\x77\xbf\xd6\x6b\x22\x20\xdb\xde\xc1\x32\x1a\x50\x1a\xfb\x6a\x88\xac\x36\x16\x3f\xdd\x28\xcf\xce\x49\xfa\x17\xf1\x5c\xa9\x04\x6f\x71\xda\xe5\x5e\x0e\x51\xc7\x8a\xa2\xd9\x75\x83\xe6\x05\x98\xde\xf1\x69\xc6\x6a\xe7\xa7\x4a\xec\xab\x61\x46\x41\x83\xe5\x2b\x53\x15\x96\xc4\x49\x98\xdd\x28\x04\x86\x51\x18\xf5\x11\x70\xcc\xed\xea\xaa\x37\xf4\xcd\x0c\xe7\x9d\x1d\x34\x9c\xf3\x4d\x8b\x02\x6b\x77\x50\xf9\x66\xd4\xa6\xfd\xa8\xb0\x75\x03\xea\x5c\x58\x55\x86\xc4\xe7\xd1\x0a\xad\x90\x70\xdd\x41\x72\x2d\x44\x8b\xfc\x26\xf1\x68\x9b\x8c\x23\xc1\x6b\x1e\x58\x19\x40\xba\x4c\xcf\x51\xfd\x9d\xb6\x3b\xc8\x22\x8c\x37\x9a\x6b\xe4\x27\x86\x41\xac\x0a\x67\x0d\x9c\xd6\xb8\x72\x9c\xb6\x47\xe4\xc4\xe9\x64\xd1\xfb\x2c\xa2\x6f\xd4\xf1\x3d\x06\xf1\x41\x8a\xf5\xd8\xd6\xa9\x76\x22\xb9\x01\xa8\x9c\xed\x12\xef\xbb\x10\x97\xad\x22\xde\xfd\x24\xf3\xa6\x39\x74\x7b\xf8\x78\xae\xd5\x08\x8c\x73\xf8\x48\xe8\x56\xe0\x2e\x1f\x8a\x8a\xe9\xf6\x59\x28\xc3\xb5\x35\x21\x1b\x76\xa5\x96\x42\xae\x96\x42\x4a\x4a\x4a\x2a\x8b\x8a\xbb\xec\x72\x55\x70\x44\xb5\xbc\xda\x74\xe8\xec\x90\x3b\x26\x36\x49\x7a\x3b\x66\xa0\x84\xfe\xd0\xe3\x98\x44\x0c\xbe\xd0\xa2\x9c\xef\x8e\x98\x19\x65\x58\x4d\x60\xad\xca\x0f\x61\x95\x90\x0a\x45\xfd\xd1\x71\x06\x5f\xb5\x38\x61\xf7\x27\x07\xe0\x9e\xb0\x0b\x92\x7e\x25\xea\xec\x65\x6b\x17\x45\xb1\x2c\xf0\x74\x5c\xbb\x21\x02\x5e\x42\x83\x6f\x8b\x49\xaa\xe6\x38\xe8\xcc\x9d\x77\x61\x1c\xe2\x90\xac\x0e\x45\xdd\x10\x46\x3d\xc8\x21\x1d\x80\x4a\xb0\xc2\x34\xde\xbe\x2e\x0e\xc6\x19\x21\xb5\x11\x97\x25\x59\x39\xdd\x30\x94\x81\x85\x51\x46\x17\xd3\xef\x8d\x51\x18\xb3\x33\x56\xb2\x05\xa1\xa2\x60\x29\x4b\x25\x4d\x57\xe1\xe5\x2e\x63\x3b\xc3\xe2\xcf\xa0\x98\xae\x2b\x97\x86\x0b\x9e\x16\x78\x1f\x76\x3d\x33\x24\xc7\x0c\xb2\xe7\x33\xde\x81\xe3\x0f\xeb\x22\x0e\x64\x5a\xb6\x6a\xbb\xa0\xbf\x40\x59\x47\x3f\x5e\xff\x70\xcf\x0d\x09\xaf\x4b\xb4\x0c\xde\x09\xf0\xe7\xe2\x34\xeb\xda\x82\x90\x1d\x54\x9b\xec\x7a\xe5\x10\x83\x0f\x32\xd1\x8c\x15\xde\xde\x75\x85\xce\x74\xc5\x3b\x07\xbf\xdf\x78\xe9\x04\x33\x7f\x95\xfb\xe8\xff\x55\x16\xd1\x62\xdb\x04\x0b\x97\xd2\xbb\x80\x4c\xab\x9b\x2c\xa6\x4e\x00\xa6\xe3\x4b\x04\x42\xed\xff\x22\x4e\xf7\x8b\x60\xf6\xa7\xff\xe6\xb9\x6a\xc9\x84\x98\x01\x23\x60\xc3\x5f\xc0\x8c\xe3\xe7\x67\x43\x88\x40\x0a\xa8\xc3\x3f\xab\x53\x5c\x82\xc4\xad\x83\x9a\xc4\x90\xa9\x72\x52\x1e\x1d\xb8\xa7\xd6\xa7\x8f\x57\x64\x20\x96\x72\xb3\x62\xa1\xcd\x07\xae\x1f\x64\xa6\x43\xd0\x41\x56\x05\x5b\x9b\x8a\x6b\x7e\x8e\x7d\x0d\x1a\xb7\x52\xc0\x06\x4e\x6d\x8d\x5b\x1f\x53\xea\xd4\x8e\xdf\x40\xd8\x95\x3a\xc4\x89\xe3\xe6\xc0\x47\x13\xde\x1d\x98\x49\xb0\xac\x07\xa4\x84\x7d\x03\x9f\x36\x30\x58\x7e\x62\x92\xd9\xe1\xcd\x7d\x96\x7f\x3b\x8b\x39\xc8\xfe\xe5\xeb\x96\x04\xa9\x0d\xfb\xbb\x02\x9f\x55\x90\xdc\xd7\x9c\x16\x8c\x31\x83\x58\xaa\x00\x64\x7f\x26\x0b\xf1\x68\xd3\x0c\x7e\x30\x07\xf9\xb0\x4c\xf3\xe2\xd7\xb7\xe6\xc8\xe5\x9e\xa0\x01\x35\xd7\xe1\xef\x66\x0f\x34\x07\x27\x9f\x3c\x80\x20\xa3\x73\x39\xc0\xe3\xba\xe7\xdb\x36\x32\x22\x62\x2d\xc5\x28\xad\xc3\xbd\x76\x6e\x9e\xa2\x50\x78\x5d\xec\x7f\xf2\xf9\x62\x7a\x6a\xa4\x0a\x2d\x72\xad\x37\x53\xa0\x57\x06\x21\xa8\x72\x53\x6d\xe4\x91\xfa\x5b\x03\x8a\xae\x30\xf1\xbe\x3e\xf2\x4c\x00\x1f\xd5\x46\xbb\x36\xd5\xb6\xef\xfd\x6c\xdf\xcc\x30\xf6\x9a\x7b\x72\x7c\x75\xb3\x6c\x66\xa8\x34\xc3\xc9\xbc\x4a\x0b\x7f\xc7\xa1\x3c\x6b\xbe\x39\x22\x9a\xed\x0a\x89\xd7\x57\x66\x53\xee\x5d\xd0\xb8\xb5\x6b\x60\xf8\x1b\x22\x72\xdf\x93\xb7\x0f\x1b\x1b\x1b\xd8\x87\x79\xf5\x51\x01\x77\xb6\xab\xa2\xc5\xff\x52\x22\x77\xdf\x8e\xa0\x7c\x69\x83\x5b\x69\x9c\x9e\x17\xba\x27\x05\x53\x01\x06\xb3\x67\xc4\xa9\x17\x82\xe2\x44\x47\x75\x3d\x29\x08\x78\x68\x39\x60\x3d\x0f\xdf\x85\x73\x2c\x1e\xb3\xc8\x1b\xa1\x74\x9d\x62\x7b\x85\x58\x71\x3b\x54\xe6\x88\xa0\xb7\xab\x24\x2d\x49\x76\x91\x14\xcb\x02\xea\x30\x73\xa1\xca\x40\x89\x47\x0d\xab\xb6\x91\xb5\x5d\xbb\x63\xec\x47\xb5\x42\xc9\x7c\xf7\x55\x8b\xec\xfc\x5b\xfa\x38\xf5\x97\x34\x75\x38\x01\x7e\xa8\x89\x4d\xf3\xb3\x0e\x41\xe8\x92\xde\xa4\x62\x3f\x42\x03\x5a\xe9\x6e\x65\x3c\x1b\x70\x71\x9e\x37\xe9\x50\x95\x3a\x69\xaa\x92\xb5\xfb\x0a\xe1\x8a\xaa\xc5\x3f\x5d\xab\xf0\x94\x7c\x61\xcc\x7e\x50\x84\x16\xbc\x67\xbe\x6e\x1e\x13\x07\x27\xd6\xe8\x79\x63\x88\xec\x26\xea\x81\x8f\x68\xcf\x03\x17\xf9\xc0\x73\x3c\x2e\x49\x36\xfc\xf7\xca\xa1\xa8\xcf\x52\x57\xd6\xd5\x87\x9b\x2c\x9f\x1e\x7f\xee\xe1\xd6\x8d\xc7\x90\xd4\xc8\x93\x21\xec\x19\x71\x11\x30\x9f\x05\x6d\x23\xe0\x38\xa3\x27\x9c\xbe\xfb\x20\x35\xc1\x59\xf9\x0b\xe6\xc1\x5c\x4e\x51\x8f\x08\xf4\xef\x8d\x92\x3c\xad\xd5\x17\x1d\xdb\x7c\x60\xe8\xfc\x97\x45\xa0\x6a\xc0\xe8\xa5\xfa\xe9\x45\x75\xe0\xc3\xd0\xdf\xef\x0f\x3d\x70\x3b\x22\x77\x62\xa2\xf1\xfe\xf1\x91\x02\x30\x8d\x24\x02\x8b\xd8\xca\x10\xb9\xf3\xec\x84\xf1\xa1\x98\x00\xd2\x0f\xc2\xfc\x0a\xcc\x0b\x7f\x89\x75\x45\xec\xce\x50\x2e\x80\xe2\x8f\x50\xac\x63\x78\x3f\xe2\xf9\x2f\x7c\x37\xa3\xd4\xd8\x7a\x97\x81\x2a\x1a\xe4\xae\xb1\xef\x2b\x70\xb1\xf3\xc7\xa6\x5e\xe8\x9d\xee\x6f\x06\x89\x73\xbc\x88\xb5\x38\x47\x65\xb4\x55\x62\x26\x9f\x1e\xa0\xb6\xc9\xd3\x33\x78\x93\xc4\x5b\xbe\x0f\xde\xb0\xd5\x6b\xa0\x0d\x92\x0e\x76\x16\x0a\x9a\x64\xe8\xfc\x82\x25\xdc\xa4\x84\xef\x38\x98\x1c\x69\xbc\x0a\xf3\x23\x4e\xe7\xa8\xb1\xdf\x7c\xbb\xec\x32\x28\x99\xde\x5f\xfe\x73\x33\x99\x9f\xf9\x39\xde\xf4\xfe\xc5\xcb\xda\xa9\xe4\x11\xa7\x2e\xbf\xbb\x97\xe3\x91\x80\xca\xfc\x1a\x98\x40\xd6\xad\xdf\xc8\xf2\xd0\x77\xc7\x1f\xb9\xef\xf3\x38\x81\x87\xe8\x42\x4e\x01\x9b\x8e\xd1\x83\x99\x31\xbc\x22\xac\xe9\x07\x5e\x4e\x05\xf7\x0e\x34\xa3\x9d\xcf\xa0\x46\x99\x51\x34\x4f\xba\xb6\x9c\xa8\x14\x95\x57\xc6\x8f\xdf\x90\x45\x46\xe3\x2d\x11\x77\xb7\x77\x57\x1c\x23\x10\xc6\x15\x11\x63\xed\x33\xc2\x8e\x5a\x49\x2d\xb6\x3e\xa5\x94\x4a\x32\xfd\x25\x81\x9f\x4e\x14\xc1\xc6\xb4\x9e\x17\xa3\xcb\xf7\xec\x54\xe8\x1f\x8e\x82\x8e\x4d\x13\x76\x27\x3d\x7f\x9e\x74\x97\x5a\xb5\x92\x93\xc1\x5e\x37\x3d\x59\xd6\xfb\xa2\x96\x12\x39\x0c\x50\x7b\x5d\x67\xf5\x2b\xe1\xfe\x3c\x19\xab\x85\xc8\x05\x77\x7b\xcf\xe3\xa5\x86\x70\x3f\x20\xea\xf0\xaf\x35\x9b\x56\x48\x20\x9a\x58\xaf\x1c\x3c\x36\xff\x7c\x27\x11\x7e\xbd\xf0\x3d\x61\x7f\xc0\x44\x6c\xa0\xae\xc0\xb7\x8b\x40\xc6\xb3\xba\x04\x0e\xd1\xb7\x65\x81\x18\x1a\x19\x36\xab\x72\x08\x55\xe3\x43\x5c\x1b\xf1\xd5\x6b\xc5\x6f\x1a\xa7\xd3\x7b\x5f\x64\x93\x83\x12\x77\xc0\x29\x2c\x28\x78\x4d\xe7\xcd\xdf\x5a\x70\x04\x98\xb2\xea\xb2\xac\x90\x52\xf4\x95\x0e\xa4\x3b\x3e\xbc\xc8\xd0\x23\x99\x7f\xf3\x3c\x57\x95\x93\x5b\x58\x29\xff\x40\xc4\xa1\xf3\x78\xfd\xc1\x28\x42\xf8\x3f\xcc\xec\xff\xe5\x60\x4b\xff\x2b\xad\xff\x0f\x4d\xfe\x7f\xd8\xe0\x32\x56\xa2\x8d\x37\xfa\x71\xaa\x9f\x1d\xd7\x9d\x93\x50\xe2\xdc\x86\x63\xd2\x0d\xb6\xbe\xc7\x1a\x87\x4f\xab\x38\x9d\x6b\x7b\x1b\x72\x72\xae\xb3\x32\x33\xeb\xd4\x30\x56\xf1\xd5\x9b\xec\x7f\xb3\x7b\xcd\xc4\xa8\x33\xf5\xe5\x9b\x86\x33\x27\x42\xeb\x90\xda\x62\x1f\xed\xb6\x6f\xb7\xa7\xe6\x3d\xc4\xa9\xdb\xf3\x1a\x71\x77\x68\xf6\xf9\xd0\x82\x0c\x7c\xa0\x74\xc9\x36\x85\x61\x8c\x12\x93\x7b\x59\xb2\x20\xde\x65\xac\x83\x9f\xf2\xcd\xde\x88\x72\x04\x43\x4a\xb5\xc4\xf2\xfe\x73\xd5\x5d\xc6\x84\x5e\x78\xa2\x58\x0d\x9c\x4a\xfe\x40\xd8\x8c\xcd\xfd\x96\x3b\xab\x25\x39\x06\xa0\x41\xef\x70\x7f\x52\x00\x36\x87\x9a\x07\x36\xa8\x7e\x47\x02\x3b\x51\x2d\xe5\x56\x23\x01\x5a\x53\xa1\x7b\x63\x53\xb7\x66\x2d\xeb\xcc\x1e\x92\xec\x76\x1b\xbb\xfd\x48\x69\x69\x89\xcd\x17\x0f\xa9\x5f\x6c\x15\xf4\xbe\xad\x80\x07\xb2\x0c\x38\xf3\x9e\xfe\x5d\xc8\x97\x5e\xdc\x23\x95\x6b\x08\x28\x52\xd3\x2f\xbf\x9e\x43\xd1\xec\x14\x8f\x3f\xd7\x36\x11\x2c\xbb\x6a\xe3\x6d\x4f\x91\xf1\x35\xe6\x19\xde\x6d\x71\xda\xdd\x4b\x22\x5a\x7d\xe0\x7f\xcb\xa2\xa7\x86\xe9\x65\xef\x7c\x4a\x4e\xa9\x5d\x9a\xbd\x9d\xea\xeb\x4e\x53\x1d\x5d\x2f\x1b\x97\xea\xd6\xdc\xc2\x11\xff\x1b\x0f\xf9\x81\x38\x45\xc0\x4c\x68\xda\x15\x5f\x53\x97\x96\x8b\xce\x9a\xf5\x0c\x3d\x17\x03\xeb\x9d\x41\x03\xd7\x05\x9a\xeb\x6e\x7d\x1b\xf8\xf8\x63\xd3\x7c\x17\x0d\x06\xac\x6b\x72\xaf\x9b\xc6\x87\xd7\x13\x56\xe8\xcd\xc6\x8e\xaf\x3b\xc3\xa2\xa7\xad\x2b\x32\x5a\x2a\xa5\x5c\xc5\x91\x1a\x78\xf9\x36\x7a\x55\xc0\xc7\x93\x5f\x31\x45\x48\x54\x9f\x16\xfc\xfd\x2b\x32\x9b\x0b\x2d\x64\x0a\x13\xfb\xcd\x7b\xfc\xc5\xfe\x83\x8e\xe6\x70\x70\x4b\x03\x9a\x50\x98\xb6\x18\x46\x71\x7f\x2d\x9a\xde\x48\x62\xe6\x0a\x68\xe7\x94\x5a\xe9\x22\xcb\x42\xaf\x9e\x2d\x20\x7c\xf4\xf9\xb7\xe5\x0b\x9d\xf4\x5b\xb1\x06\x7c\x5a\x58\x66\xa5\x1e\xf9\xd7\xa1\xae\xd7\x9b\x5b\xed\xa1\x3e\x56\x10\x6e\x95\x5b\x5d\x26\x39\x01\x72\x9d\x0d\xac\x5f\xf5\xa1\x94\x66\x5c\xfb\x2f\x19\x0d\x35\x5c\xcd\xe7\xd7\xd5\x5e\x7f\x9f\x47\x90\xfc\x42\x7a\x1b\x2f\x98\x9b\x6a\xa6\xaf\x89\x0d\xe9\x33\xff\x74\x7f\x5e\x84\x20\xf9\xd0\x71\xd4\x1b\x81\xfd\x7d\x69\x64\x32\x35\x30\x43\x7a\xbd\xe7\x09\x7f\x3f\x95\x53\x64\x80\x65\x61\x08\x3e\x41\x5e\xdc\x17\xe5\x82\x95\x8c\xf7\x37\xc7\x31\x74\xbc\xc0\x44\xac\xc9\xe9\xa1\x27\x1c\x57\x92\x3c\x7f\x36\x5a\xe2\x9a\x58\xe4\x28\xe6\xe3\x72\x40\x5e\xf2\x1c\x5e\xf3\xbd\xb5\x6b\x90\x98\x75\xf6\xdc\x8b\x96\xe3\x8b\x4c\xbe\xad\xa3\x82\xc3\x7b\xfc\xe5\xaa\x76\xcd\xca\x6d\x05\x62\xa1\xee\x82\x62\x0a\x19\xe4\xcd\x69\xe5\x0c\xdc\xad\x50\x6c\x04\x89\xdb\xec\x73\xe8\xaf\xeb\xbf\xd9\xfb\xed\xca\xbb\x99\x3e\xc4\xdb\xba\xee\x57\xde\x67\x9b\x1f\x56\x52\xe7\x94\xb9\x24\x18\xee\x85\x88\x7d\x47\x51\x72\x56\x6d\xd9\xfc\x9c\xe9\x4c\x8c\x9f\xfe\x26\x02\xd8\x69\x1d\x4e\xf8\x2f\xf9\x02\xe5\x60\xd3\x5d\x61\x61\xea\x40\x5b\x78\x97\x19\x8e\xfb\x5e\x19\x08\x4e\xc0\xd4\x77\xf7\x82\xed\x12\x0a\x55\x8b\xf6\x17\xc0\xd5\x40\xd5\x44\xbb\x80\x88\xe6\x85\x2d\x11\x82\x76\xaf\x74\x15\x13\x74\xac\x08\x23\xa8\x63\x61\xfc\xde\xea\x50\xb3\xbb\x35\x38\x65\x6c\x7c\x24\xc2\x3a\x60\xaa\x2e\x5e\xd9\x9b\x79\x35\xa8\xd9\x5d\xcb\x71\x8c\x3d\x8b\x77\xa8\x9a\x2a\x83\x98\xf8\xe7\x23\x17\xcb\x42\xfc\xde\xe0\xfa\xab\xbc\x34\x0b\x16\x5c\x13\x4f\x6d\x2d\x10\x2f\xba\x1d\x59\x0a\x92\x2c\x7b\x55\xa0\xc8\x13\x52\xd2\x2b\xfc\xc3\x17\xc2\x45\x77\xe8\xb4\xf8\xcb\x6b\xd6\xf6\x73\xf3\xb5\xe7\xda\x9a\x49\x62\x8c\x38\x6b\xe2\x7d\xb4\x44\x1d\xee\xdf\x83\x7e\x05\x2a\x51\x64\x08\x88\xce\xac\xa9\xf0\x1d\x6f\xa4\xd0\x8b\x8c\xc6\x97\x5c\xbb\x85\x1a\x0b\x4b\x49\x3a\x93\x0f\xdf\x51\x9a\x60\x9c\xc4\xb7\xc1\x4d\xac\x1a\x62\x88\x30\xa8\xde\xa6\x6f\x0e\xa9\x03\xb1\x81\x06\x93\x69\x7a\xa8\xe0\x6c\x80\x4b\x08\x31\x26\x4f\x27\xdd\x6f\x97\x16\xc1\xee\xb4\x17\x34\xc0\x3f\xbf\x70\x9e\x4e\xa9\xf0\x18\x82\x3d\x8c\x98\x56\xc6\xbf\x08\xf0\x22\xa2\xca\xd6\xf9\xe8\xb9\x69\xa0\xbb\x3c\x2f\x57\x94\x63\xb4\xb7\x03\xbf\x52\x22\x13\xa6\x56\x63\xc0\x4a\x97\x9f\x9a\x34\x87\x41\x28\xbc\x5a\x3e\xc6\xc5\xb1\x2d\x14\x31\xaf\x82\xa8\x2f\x17\x68\x91\xe1\xbb\x6d\x93\xb1\x42\xca\x41\x3e\x46\x09\x40\x21\xf9\x42\x6c\xab\xb1\x55\xdb\x50\xe9\x6d\x81\xe9\xa5\xba\xf8\xb1\xd1\xad\xe7\x0f\x4b\x11\x41\x57\x0b\x68\xec\x39\x82\xbd\x35\xe8\xe2\x0a\x83\x01\x5c\x5c\x97\x1e\x70\x7a\xed\x47\xfe\xfa\x81\x43\x45\x16\x96\x2d\x23\xa6\xb4\xe8\x15\xdd\x6b\x3b\xfe\xb6\x95\x07\xa5\x5d\xd7\x0a\x42\x88\xed\x78\xc3\x43\xe8\xad\x2c\x74\xfb\x16\x69\x57\xff\xc4\xd7\x6b\x37\xae\xff\x98\xcb\xee\x93\xb9\x2c\xa5\xa8\xc0\xe3\x29\x17\x27\xb2\xd8\x2f\x6a\x24\xbd\xa1\xee\xe8\x82\xea\xc7\x98\x3c\xb5\x3c\x16\x47\x13\x10\x2a\xba\xc0\x06\x6e\xa7\xd5\x17\x01\x63\xf2\x28\x7e\x5d\x24\x3b\xbc\x73\x19\x77\xfe\xba\xad\x75\x03\x20\x22\x8a\x45\xfd\x84\xbf\xf6\xa7\xa2\xb3\x9a\x50\xd4\xba\xea\xdf\x81\x77\xfe\x88\xbb\x38\x64\x87\xa3\x8f\x7e\x88\x27\xdc\xfb\x48\x20\x22\xd4\xf9\x16\x5b\xdd\x68\xae\xb7\x3b\xcd\xea\x11\x93\xed\xf9\x82\xa1\x15\x4a\x1b\xf0\x48\x38\xdd\x9b\x4e\x01\x12\xc2\xb8\x0c\xa6\x0f\x43\x22\x0b\xd3\x09\x63\x40\x27\xa7\x73\x1b\xf8\xe3\xbe\xcd\x7c\x45\xcf\xa5\x1d\xf0\xd2\xf0\x64\x4a\x60\x22\x6a\x77\x8f\x82\xeb\xf1\xfc\x55\x23\x00\x6c\x90\xf4\xfe\x13\xd7\xe1\xca\x7b\x91\x05\x69\x6d\xab\xa7\x5c\x93\xec\xc5\x23\xb4\x38\x0d\xbf\x66\x75\xe5\x69\xfb\x11\x78\xc1\xb8\xf0\x26\x00\x60\x73\x5a\xee\x6d\xcd\xd5\x90\xbb\xa2\xcd\xa2\xf5\xf4\x4f\xfa\x70\x4b\xe5\x3c\x5d\x91\x18\x5a\x3a\x7f\x31\x31\x9a\x95\xd8\xd4\x52\x2d\xed\x0e\xe3\x95\x74\x0e\x33\x1c\xe2\x75\x14\x0d\x45\xea\x76\x62\x37\xed\x0b\x1f\xd7\xe3\x77\xe0\x00\x76\x0e\xe8\xee\xac\x37\x44\x3f\x7b\xc1\xf5\x61\x5e\x16\x65\x13\xff\x93\xb7\x48\xf2\xe1\x58\xdd\x92\xcb\x9e\x1a\xaf\xdd\x70\xe0\x40\x49\xd0\x00\xc2\x24\x29\x01\x93\x9f\xb4\x21\xcb\x31\xb3\xef\xf7\xf0\x4a\x8f\xaf\xd2\x76\x1b\xc7\xae\xa1\xe1\x33\x7a\x07\x5f\x49\x73\xad\x9c\xc5\x18\x0d\xfb\x35\xea\xb9\x4b\xd2\x3d\x54\x1d\x27\x2f\x6a\x85\xd6\x2e\xb4\x88\xf3\x2f\x75\x9c\xce\xe9\x99\x56\x0b\x17\xf2\x17\xbc\x39\xfb\x7a\x05\xca\x78\xcd\x93\x41\x60\x0f\xf7\x68\x85\x9d\x6b\x40\x70\xab\x6e\x23\x0e\xb0\xe0\xd7\x89\x85\x41\xd9\x07\x55\x19\x53\x8c\x5c\x94\x38\xd3\xaf\x3a\x95\x1c\xe8\x4a\xc6\x6c\x76\x52\xeb\x6a\xc7\x19\x95\x75\xa2\xdb\x12\x42\x0b\xbf\x90\x6d\x75\x48\x16\xe4\x5f\xcf\x0e\xba\xc3\x5c\x5f\xb5\x62\xa6\xd9\x6d\xf3\xba\x0e\x42\x10\x87\xb7\xac\x74\x5f\x8c\xf5\x8a\x73\xed\xed\x0c\x4f\x24\x95\x5c\xcf\xc1\x3d\x69\x5e\xa3\x6a\xa4\x18\xe5\x21\xb0\xf5\xe3\xd8\x2a\x57\xbb\x30\x34\xa6\x88\x85\x60\x41\x1b\x5b\xd0\xdc\x41\xa8\xc9\x91\x3f\xaa\x43\xc2\x9b\xca\x8a\xc8\xb6\xa4\x3b\x6e\xda\x63\x3f\xe0\x0d\x75\x97\x83\xaa\x16\x84\x58\x98\xb3\xd9\xdf\xd8\xd4\xf2\xbe\x23\xe2\x4c\x23\x13\x81\x29\x19\x85\x2d\xad\x1f\xef\x0f\x09\xa3\x5f\x6f\xab\xe7\x14\x39\x8c\x8f\xe4\x3c\x8d\xa6\x1e\xc7\xb8\x5b\x3a\x41\x7f\x81\xec\x39\x15\xd0\x55\x46\x50\x9c\xc2\xbe\x8b\x59\x85\x0b\x9b\x26\x65\x43\x3a\x77\x43\xd4\x79\x09\x8a\xd2\x9b\xeb\xc0\x6c\x01\xd2\x6f\x26\xf9\x82\x91\x3c\x80\x02\xcb\xad\xb8\x14\x14\x4d\x1d\xc6\x5c\x7f\x4a\xee\x1c\x14\x53\x10\x2a\x51\x3e\x06\x05\x6a\x4b\x5b\x78\xbb\xe2\x26\xf1\x44\xad\x1a\xe7\x7a\x03\xd9\x9f\xdf\x63\x39\xbd\x0e\x40\xa0\x83\x9b\x4a\xd1\x6f\x1e\x51\xed\xed\x1a\x2e\x9c\x74\x67\x23\x9e\xc1\xb4\xb4\xc8\xdd\x41\x18\xd9\xb6\xd5\x92\x91\x33\x7b\x82\xb0\x16\x0f\xd8\xc9\x85\x65\x94\x06\x1c\xb8\xb0\xa9\x4d\xb1\x5b\x19\x3a\x2f\xc3\x1a\x34\xa0\x01\x28\x75\xdd\x90\x97\x71\xf2\xe5\xa5\x8b\xc2\x06\x58\xf3\x81\x3d\x92\x42\x07\xc6\xc3\xe3\x1b\x7f\x6d\x17\x25\x4b\xb7\x6a\xee\xfe\xb9\x5c\x41\x5c\x9b\x0c\xd4\x7a\x44\x5a\x7b\xcf\x92\x25\x9b\xd6\x14\x83\xd8\xb9\x9e\x43\x9d\x21\x1a\x6a\x58\xda\xcf\x7b\x77\x34\xf4\x5a\xb0\x86\x4b\xa8\x3e\x53\x9d\x04\x95\x8c\xe9\xbb\x11\x0f\x0b\xcf\x02\x2c\x5c\x99\xd2\xb9\xd8\xe7\xf7\x7d\xd0\xac\x20\x99\x65\x35\x32\xa6\x18\x79\x26\x11\x50\xe4\x52\x4b\xdf\xaa\x99\x72\x67\x90\x7a\xb9\xa6\xae\x93\x5f\x88\xae\x17\x3c\xde\x06\xe6\x1b\x60\x85\x03\x6c\xa3\xd3\xb8\x83\x83\x4e\x0d\x2d\x4e\x39\x80\x63\xbd\xc3\x98\xc9\x6e\xb7\x41\x5e\xf2\x50\xf9\x08\x54\x85\xe6\xf7\x70\xd7\x8b\xcb\xc0\xe1\xce\x74\xf4\xd4\xa3\xd5\x08\x65\xeb\x10\x84\x9e\xd6\x04\x84\x43\xf9\x53\xca\xf1\xcd\x73\x48\x13\x22\x6c\xc8\x7a\xb5\xe5\x3d\xe9\x98\x8e\xd0\x70\x09\xce\x6b\xb7\xbf\x9a\x74\x3d\xdb\x6b\x49\x7d\xf8\x7c\x69\xdf\xac\x72\xb5\x6a\x49\x04\x74\x61\xf0\x35\x4f\x36\x46\x8c\x73\xe0\xe2\x0c\x46\xd2\x02\x0e\x6c\xa4\x61\xef\x2c\x03\x21\x46\x8d\x11\x16\x33\x43\xb2\x1e\x4a\x1d\x41\xcd\x0a\x55\x5e\x77\x92\x6f\x14\xe7\xbb\x55\x05\xf3\x17\xf9\xd0\xd6\x1e\xb7\x8f\x46\x2d\x0f\x62\x7a\xb4\x4f\x84\x23\x88\xe9\x71\x8f\x56\x8c\x50\x9c\x58\x10\xfe\x99\x50\x00\xdf\x30\x47\xe1\x2c\xc6\x0c\x68\xcf\x83\x9c\x0d\xd2\x41\x04\x61\x3d\xbe\x9c\xac\x74\xd5\x81\xd2\x05\xc4\xdb\xfd\x9e\x7b\x14\x8a\xd2\xd8\xaa\x9b\xc3\xf9\xcf\x6e\xeb\x6f\x77\x06\x97\x49\x99\xa8\x45\xb6\xe8\x15\x47\x0a\xa6\xed\xce\x25\xa4\x8e\xd9\x7f\x11\xcf\xa1\x4f\x7c\xf9\xf2\x2d\x31\xc2\x66\x45\xcc\xff\x49\x63\xfa\x61\x6d\x6d\x4d\x51\xec\xb1\x54\xb9\xdf\x81\x3b\x2a\x0e\x68\xf8\x2b\xd9\xbc\x75\xff\x37\x9f\x2f\xda\xb0\x0d\x1c\x1a\x9e\x28\xcd\x08\xb3\xda\x42\x3e\xa0\xaf\x36\x3f\x34\xea\x15\xbc\x7c\x11\x6c\xe3\x9f\x88\xfa\xa3\x03\x8a\xde\x73\x37\x01\xea\x36\x16\x9b\x1a\xa4\xd4\x32\xfa\x64\x80\x55\xb6\x5f\x2c\xaf\xc5\x6e\x27\x59\x79\xc3\x43\xf2\xd3\x0e\x57\x98\x25\xb9\xd5\x1a\xd1\xc7\x92\x42\xe6\x9b\xa7\x82\xa3\xdf\x36\x42\xca\xb7\x3d\xe5\xb8\x29\x8c\x57\x48\x71\x3a\xf2\x05\x4f\xed\x7f\x29\x4a\x1a\x68\x4a\x37\x15\x3f\x6c\x6a\x80\x51\x19\x1b\xc2\x01\x47\x90\xfe\x7c\x28\x83\x0b\x44\x10\x71\x75\x60\x4d\xe5\xdb\xbe\xd5\xfe\x16\xfe\x19\xd0\x6c\x45\x19\x26\xdf\xf6\xd1\x15\x76\xb2\x01\x05\xac\xf1\xdf\x82\x87\x4c\x8a\xdc\xa7\x48\x13\xf3\x71\xb6\x0b\x64\x34\xe4\x04\x5e\xfd\x84\x92\x0d\x69\x0d\x19\x11\xb1\xe0\x62\x02\x8d\x5d\x83\x27\x91\xc5\xc6\x0c\xb2\xcd\xb2\xdd\xc3\x54\xb0\x9e\x97\x62\xdb\x6d\xf8\x8a\x21\x01\x5a\x9d\x13\xcc\x63\x6c\xff\x02\xf5\xe6\x3e\x63\x75\xc6\xbd\x87\x7b\x03\x44\x7c\xb3\xfa\x02\x9b\xec\x2a\x3a\x1d\x43\x0d\x7f\x3a\xa1\xba\x2e\xa3\x31\x67\xbb\x2d\xe3\xae\xca\x5d\x90\x85\xa1\xf9\xcf\x65\x37\xc7\xdb\x22\x3b\x33\x40\x0c\x6d\xf3\x26\xfb\x3f\x3f\x0f\xbe\xb7\x24\x03\x2f\xc8\x39\x72\xbd\x87\xf5\xa0\x35\x35\xea\xd9\x47\x94\x1c\xa1\x71\xd6\x19\xd2\x4d\xdc\x75\x6c\x40\xdf\x09\x3f\x19\xc2\xc6\x82\xb3\xb5\xca\x48\x38\x10\xd6\x5e\xd6\xa0\x41\x14\x57\x6f\xb3\x3a\x63\xb6\xd1\x70\x27\xfa\x29\xcd\xa8\xc4\x87\x3e\xb0\x1f\x93\x2a\x52\x92\xca\xc9\x9f\x85\x4b\xc9\xbb\x45\x5b\x3b\x70\x46\xf5\x76\x96\x98\x65\x77\x6e\x16\xad\x72\x83\x27\xeb\xe9\x59\xdf\xa3\xc1\xcc\x93\x2b\x31\x75\x65\x61\xcb\xa9\x53\xab\xa1\x32\x3c\xf0\x9b\xe2\xe9\xcf\x6f\xea\x39\xd1\x13\x4d\x0d\x71\x65\x67\x38\x47\xf2\x4e\x79\xbc\x0d\xd9\x89\xd3\x0a\xa4\x65\x14\x00\xf4\xe3\x90\x6c\xfc\x68\xc6\x34\xf7\xa9\x22\xc0\x67\x94\x89\xf2\xf1\xa0\x15\xcd\x75\x5f\x20\x17\x53\xe0\x35\xdc\xd2\xa2\xd3\x75\x06\x16\xee\x15\x9c\x3e\x8d\x3d\x07\x91\x14\x1c\xae\xbd\x52\x57\x58\x5a\x99\xc4\xc4\xeb\xc8\x0c\xb5\x37\x40\x81\xc7\x3b\x72\xfe\xf7\xb0\x61\x64\xc5\x85\x77\xb7\x9b\x8e\x1d\xc2\xbd\xfb\x41\xe6\xf2\x60\x2f\x8a\xc0\xa8\x75\xef\x91\xf4\xe0\xf9\xfb\xd5\xdb\xd5\xb0\xe6\x6e\xe3\x17\x20\x3e\x1c\x85\xa8\xf7\x9c\xb9\xc9\x00\x8c\xda\x17\x92\xab\x70\xbf\xf3\xdc\x6b\x2a\x4d\xbe\x01\xdc\xcf\xfb\xc8\x99\xc8\xb8\xcb\x2d\x1a\x80\x39\x4e\xa8\xe0\xa1\x60\x50\x4e\x11\x84\xed\x6f\x43\xf6\x5b\x4a\xdf\x1d\xb2\xdb\x8e\x9a\xd9\x7d\x69\x46\x38\xd4\xb7\x65\xa1\x49\x59\x30\x35\x3c\x87\x18\x7c\x01\xce\xff\xde\x47\xae\x07\xe7\xe3\xc6\x21\x24\x54\x90\x71\xf8\x5f\xfc\xba\x72\xcd\xaf\x9d\x34\x61\xef\xbb\x77\x1f\xb3\x32\x5b\x93\x9e\x5e\x8d\x04\xe7\x70\x3b\xc1\x86\x6f\xfc\x07\x1b\x6f\x95\x0f\xaa\x3d\x3a\xfe\xe0\xf2\x32\x88\x56\x7e\xbb\xc0\x52\xf5\x29\xbf\x09\x7e\x41\x6c\x3c\x18\xe4\x35\xdf\x47\x3c\x89\xc1\x1c\xe4\xd1\x08\x42\xa9\x9f\x5c\x51\xa2\xd1\x80\x39\x80\xf2\x85\x9c\x46\x29\x32\xe9\x67\x1a\xbd\x48\x01\x18\x46\xea\x36\x42\x8c\xc5\x0f\x9a\x4a\xe3\x14\x3d\xc1\x68\xa4\xee\xf0\xb9\x22\xaf\xca\x25\x48\x97\x5c\x20\xde\xda\x0c\x23\xe3\x57\xbe\xc1\xaa\xba\x68\xc6\xb0\xd3\xe5\x70\xea\x19\xd5\x28\x1c\xad\xb7\x4d\xee\xde\x10\xda\x6e\x63\x31\xdf\x9d\x4f\xef\x7e\x42\xc9\x2c\x65\x3c\xb2\x0f\x1b\xc1\x9b\xd5\xcc\x25\xa8\x6d\x06\x77\xca\x6b\xff\x72\xe9\x9a\x86\xd4\x2c\x92\x15\xaf\x52\xbc\xa9\xaa\x54\x1f\xb3\x65\x63\x21\x38\x9e\xea\xbc\x5a\x35\x22\x43\xc6\x0e\xc0\x43\xf5\x97\x9f\xf9\xbc\xb1\x4a\x76\xc5\x4d\x16\x87\x47\x84\xac\x8b\x55\x58\xc6\x4f\xec\x7a\x56\x72\xca\x3e\x8f\x20\xc6\x0b\xd6\x60\xd0\xd0\x07\xe2\xf6\x2e\x95\x39\x5e\x66\x60\xe1\x8e\xa0\x20\x28\x36\x7d\x78\x82\xc0\x4e\xae\x67\x07\x50\x2e\xa5\xa1\x0a\x25\xe8\x71\x3d\x78\x35\xf6\x94\xca\xb0\xb0\x19\x39\x0a\xa2\x48\x2e\xee\x8e\xdf\x65\x72\xdd\x21\x76\xbb\xd7\x56\xb8\x4d\xb5\x45\x1e\xfb\x8e\x74\xe9\x25\x92\xd3\x2d\xba\xe8\xd7\x39\xb7\xf4\x45\x69\x66\xa2\xc4\xec\x1e\x9c\x1f\xa2\x3e\xd1\x2d\xb7\x0c\x44\x89\x9c\x79\xf3\xe4\x42\x62\xd3\x2a\x2f\x37\x88\xef\x32\x19\x5c\xcf\x35\x94\x47\x7b\x6c\x1b\x12\xc3\x75\x8e\x0a\xe5\x53\x46\x4f\x84\xc1\x7a\xbc\xbc\x4c\x07\xe9\x37\x5f\x84\xa0\x17\xe0\xdf\xa0\xba\xdc\x03\xe2\xc0\x3e\x14\x27\x54\xd7\x29\xd2\x79\x33\x93\x8f\x7b\x56\x7f\xed\xd6\x17\xfa\x5f\xc4\x79\x83\x6a\x03\x15\x30\xe9\xfd\x78\xbd\xe1\x37\x1f\xbe\x0b\xff\x26\xd5\x45\xd5\xbc\x67\xec\x9f\xa1\x78\xfc\x64\x83\xe8\xc7\x99\xde\x3b\x88\x4b\x8e\xc0\x62\xe1\x6e\x8b\xc8\x28\xee\x44\x5e\x09\xc7\xcf\xf7\xe0\xf4\xa9\x55\x5d\x2d\xb2\x55\x7b\x9b\x01\x4c\x97\x0c\xec\x9e\xbd\x2f\xd6\xff\xb2\xd9\xb9\x59\x65\xb1\x32\xb0\x2d\x67\xe9\x78\x7d\xbc\x42\x56\x1b\x34\xf1\xbb\x33\x2e\x2d\x1c\xda\x85\xb0\xf1\x2e\x26\x24\x63\x05\x73\x32\x4c\x6c\x97\x67\x2c\xf5\xb5\xf5\x4f\x3b\x7d\x17\x6e\x7c\x1f\x31\xb6\x3a\x77\x65\xd7\xee\xf7\x51\xb8\xf3\xa9\x05\xb2\x3f\xc4\xd5\xff\x12\x89\x05\xe9\xd2\xc0\x3f\xfc\x38\x73\x7c\xe3\x1c\xd3\xaf\xfa\x21\x41\xe4\x85\x5f\xf6\x75\xc7\x73\x25\x9d\x7f\x47\xb4\x73\x33\x5d\x8f\x56\x32\x5f\x31\xe0\xa1\xae\xaa\xbe\xdc\xea\xfd\xb9\x3b\xdc\x5a\x30\xd4\x46\x4e\xa6\x21\x44\x9f\x7f\x1c\xd2\x67\x7d\x59\x72\x37\xdd\x5a\x3f\x1e\x38\xe0\x67\xf1\x74\xca\xd7\xaa\xc8\x07\x6d\x29\xbd\xad\x3e\x17\xf9\x55\xf7\x8e\xf5\x3f\x34\xa5\x32\x2d\x57\x96\xce\xa3\x01\xe8\xb3\x91\x7a\x45\x88\xd8\xa1\x3d\x67\x83\x2a\x83\x5a\x39\x7e\xcb\x9e\x8d\x18\x33\xb9\x59\x70\x41\xfb\xfa\xe2\xee\x8e\xaf\xe5\x48\x5e\xe7\x8d\x8c\x89\x6b\x28\xdf\x0f\x38\x89\x8b\xcb\x77\xac\x38\xe0\xbb\x8d\x87\x59\xdd\xf3\x4c\xba\xd6\x50\x03\x8a\x29\x7b\x16\x40\x6d\x19\xac\x3b\x7d\xa7\x73\xff\x47\x86\x00\xac\xb4\xa9\x0b\xf0\x6b\xf7\x57\x02\xf4\x3b\xf6\xc4\x6c\x45\x82\xfb\x7f\x06\xb5\xff\xa3\x66\x7e\x10\xb9\x27\xfd\x9c\xb0\xeb\xb9\x96\xe0\x0c\x16\x70\x07\x11\xc1\x79\x85\x9a\xe9\xc9\xec\xe0\xf7\x5b\xea\x59\x9f\x3f\xc1\xaf\x5f\x12\xeb\x5f\x0e\xbe\x9a\x7b\xb1\x39\x4d\x1b\xf4\x7f\xf6\xcc\x53\xfe\xd7\x20\x04\xfb\xb8\xff\xa7\x61\xfe\xdf\x1e\xb2\xf4\xef\x71\x08\x0a\x94\x25\x84\x3b\x3d\x25\x31\xbf\xc5\x2b\x4b\xf5\xd8\x5c\x62\xd9\xc6\x7a\x6f\x4a\xd4\x7c\x9a\xfa\x27\x2f\x89\x87\xe0\xe0\x60\xcb\x6f\x6d\xc1\xba\x6d\x36\xa7\x3b\x24\xca\xeb\x26\x85\xad\xc6\x93\xb5\xfd\x32\x65\xe4\x5e\x6c\xac\x7a\xa5\x63\xcd\xc7\x91\x61\x5b\x53\xb0\x66\x8d\xd8\x34\xb2\x27\xbc\x3f\xe2\xd6\xed\xf6\x93\x77\x6d\x9e\xea\xf5\x0a\xb4\x84\xe8\x17\x9e\xe6\x60\x39\x3d\xaf\x4d\x45\x18\x36\x39\x4a\x14\x9d\x78\xd3\xdd\x99\x6a\xab\xda\x40\x60\xb8\x6e\xb1\xf5\xfc\x0e\x55\xf3\xef\x50\x77\x81\xf3\x32\xe0\x06\xaf\x66\x1c\xa0\xc8\xc4\x11\x2c\x5a\xac\xc4\x2f\xf2\x8b\xff\xa2\x4c\xa4\x6e\x17\x68\x5c\x57\xf7\x56\xb0\xea\x6d\x2b\x93\xe5\xf2\xe0\xf3\x23\x4e\xfb\x28\xe1\x1a\xb7\x5c\xbb\xd3\x08\xb2\x62\x23\xda\x8a\xec\xc6\x72\xf4\xfc\xc0\xca\xa7\xf5\x1b\x34\xf4\x83\x94\x0e\x29\x4b\xee\x6e\xc6\x66\x02\x21\x0f\x50\x55\xfe\xd3\x8d\x11\x17\x3a\xcc\x0a\x4c\x0e\xac\xf1\x21\x9d\x16\x1d\x4b\xc4\x07\x17\x23\xda\x42\x15\x13\x5f\xa6\x6c\x37\xd9\xac\xae\xe1\x4a\x75\x0b\x22\x3c\x1f\x9b\xb4\xf2\xdd\x78\xce\xba\xe4\x25\x50\xa7\x32\xed\x2f\x3f\xe0\x1f\x1e\xc9\x06\x44\x2e\x2f\x4e\x26\x65\x72\x81\x43\xfe\x31\xa1\xef\x67\x92\x72\x9a\xae\xd5\x34\x59\x06\x90\x1a\xfe\x26\xd4\xbd\x3a\x9a\x70\xbd\x1c\xd2\xab\xd2\x11\x84\x13\xcc\x5d\x95\x12\xd2\xf9\x92\x26\x8d\x0d\x24\xa2\xf7\x99\xa5\xe1\x94\x37\x16\xf8\x5b\x23\xef\x76\xd0\xf4\xce\x8c\xc7\xed\x0c\x17\x58\x74\x1b\x33\x92\xdf\x78\x0e\x12\x6a\x68\xa4\x1c\x05\x27\x4d\x93\x66\x3f\x37\x5d\x80\x73\x73\xe0\x5a\x1f\x10\xcc\x7c\xc3\xcd\xb0\x71\xf3\x5e\xf1\x65\xb8\x53\xcb\x78\x61\x15\x45\x5a\x44\x4e\x94\x86\x38\x76\x97\xac\xb5\x3a\x5d\x4d\x52\x5c\xde\x52\x3a\xef\x72\xfa\x87\x67\xf8\x10\x82\x04\x61\xb9\xac\xf7\xab\xc0\xfd\x8f\x5c\xb8\x95\xd7\x13\x81\xd2\x69\xf5\xcb\x23\x1a\x18\x71\x1a\x70\x2f\x3f\x37\x8b\xc8\x2c\xd2\xdc\xa8\xee\x91\x8d\xb1\x2c\x25\xe0\x81\x91\xab\x03\xc2\xff\x95\xcd\xd0\x08\x70\xdd\xdd\xca\x46\x7a\xdb\x3f\xd0\x3a\xbf\x27\x27\x34\xb3\x49\x90\x99\xaa\x01\x95\x4b\x61\xa4\xd1\xf4\x95\xae\x3d\x36\xd0\x62\x3f\xc6\x42\x2f\x0d\xe7\xd4\x60\xb2\x85\xf4\x87\xcf\x86\xc3\x78\x43\xfb\x52\x23\x81\xcb\x91\x30\xb1\x63\xd7\xf3\x9f\x31\x71\xbf\xe6\x0a\x24\x9f\xb0\x5a\xdc\x37\xfb\x63\x1e\x67\x11\x77\x27\x4e\x71\xe9\xe4\x49\xbd\xe9\x49\xff\x61\xce\xd9\x79\x10\xc1\x34\x7a\x45\xc2\x7b\x71\xab\x39\x7d\x3a\xd4\xdf\xc5\x61\x81\x3d\x1a\xe0\xe8\xa1\x3b\xfa\xff\x13\xf8\xd6\x6d\x13\x3e\xc3\x94\x73\xd4\x92\xc4\x62\x4d\xf1\xf0\x1b\x69\xac\x7d\xd6\xa6\x6b\x19\x7e\x5c\x5d\x1c\x9d\xdb\x1b\x26\xee\x50\x3f\xdb\x60\x4e\x6e\x3b\xee\x47\xa6\xf8\xe6\xe6\x32\x2f\x48\x77\xd5\x26\x58\x55\xa8\x3c\x31\xd3\x53\xa4\xa0\x45\xdd\x5c\xab\x1e\xbf\x0b\x92\x4e\xe3\x3b\x91\x6e\x88\xbb\xfc\x6d\x8a\xe7\xc2\xe1\xde\xa3\x35\xba\xf4\xd1\x3f\x5b\xdd\x1c\x7c\xf6\xae\x22\xa4\x17\xc8\xac\xbc\x86\x4f\xb0\x5f\x69\x29\x15\xb6\x37\x7c\x3e\xc5\x04\x89\x51\xb6\x2f\xa5\xcc\x1e\x95\x31\x22\x2b\x9e\x51\x5f\x63\xdc\x09\x15\x68\xdd\xb0\x08\xfe\x99\xa4\x60\x9c\x7f\x4e\x87\x99\xf9\xd3\x91\x54\x5a\x68\x0e\x75\xea\xb3\x2d\x60\x14\xbd\xd9\xd0\x55\xf8\xfd\xb0\x37\xb6\xd2\x91\xd7\x16\x8f\x4d\xea\x70\xaf\x8b\xe7\x3b\x19\x2c\xf9\x54\x4b\xbf\xc8\xeb\xa0\x44\xd4\x01\xa1\xdf\x55\xfc\xab\xc0\xed\x40\x9b\xe2\xbd\x87\x33\xbd\x43\x19\xb5\x3e\x7c\xb4\xde\xe3\xfc\x58\x08\xce\x8a\xef\xe6\xf9\x37\x7c\x9a\xbf\x85\x7a\x0c\x35\x8a\x25\xce\xbc\x15\xf3\x5e\x67\xee\x2c\x47\x41\xda\x1d\x88\x64\xa7\x26\x34\x3c\xa5\x6a\xf3\xb8\x27\x3f\x57\x2e\xb7\x2d\x78\xa2\x33\x77\x20\x91\xab\xee\x67\xc2\x5f\x7a\xb6\x48\xe9\x39\x25\x96\xb1\x9a\x27\x17\xc1\x2e\x9e\xfd\x4a\x0c\x36\xa7\x1a\x5d\x5c\x8b\x44\xe2\x82\x91\x66\x2b\xa3\x99\x45\x21\xe7\x78\x16\x44\x87\x17\x38\xc2\xf4\x85\xce\x5b\xe1\x8e\x72\xfa\x6d\xd0\x6f\x77\xdb\x5c\x7d\xf4\xa6\x73\xc1\x82\x15\x25\xae\xe7\x19\xe1\x93\x69\xec\x55\xdd\x02\xa1\x27\x25\xd8\xf0\x40\x4b\xc7\x08\xe1\x74\xb4\x20\xfa\xf6\x0b\x40\x9f\xef\x43\xd6\x88\x60\xc5\x30\xf2\xd7\x78\x76\x39\x6f\x17\x35\xff\x6e\xe9\xf2\x44\xd6\x89\x80\x81\xf9\x4d\x96\x69\x2a\x38\x79\x27\x1d\xb5\x4c\x25\xd6\x47\xa5\xfb\xec\xe6\xe3\x2f\x6f\x0f\xd2\x5b\xaa\x8a\x68\x17\xcc\x9b\x7f\x68\x89\xe4\xef\x93\xe0\x6b\x01\x47\x3d\xd8\x22\x1e\xbe\xde\x7f\xd5\xd2\x2c\xd0\x65\x20\xb0\x53\xa1\x67\x84\x5d\x00\xbe\x15\xa3\x48\x93\xc7\xc3\x9f\x5a\xd5\x0b\x5c\x43\x26\xac\x53\xe4\x01\xc2\xa0\x91\xf0\x03\x83\x6c\xf8\x98\x76\x92\xdf\x63\x0d\x1a\x95\x03\x63\x1c\x31\x39\x45\x2a\x3b\x39\x46\xf4\x44\xa1\x65\xb7\x59\x69\xe5\xca\x96\x62\xc1\xdc\xa3\x08\xdf\x60\x8c\x97\x83\x51\xfe\x54\x36\x86\x0a\x8f\x1d\x7f\x87\xb7\xf5\x10\xe3\x7e\x13\xee\x54\xaa\x2f\x01\x95\xe5\x47\x07\xf9\x53\xe3\xaf\x6e\x07\x01\xc7\xa4\x26\x9c\x30\xb9\x0f\x69\xd1\xf5\xd7\x34\x9c\xc8\xbf\xc7\x07\x34\xc7\xc9\xec\x28\x60\x7a\xfa\x9f\xf5\x06\x4c\x0b\xda\x8b\x3d\xf5\xa5\x90\x07\x38\xd1\xa6\x22\x1f\xec\xf1\x20\x3b\xb3\xce\x18\xac\x73\xc4\x7c\xed\x6d\x4e\x3a\xda\x85\x0e\x20\x5d\x7d\xeb\x0a\xba\x3e\xe4\xf4\x69\xd3\x15\x1d\x60\x7c\xa6\xbc\x71\x74\xf6\x3f\xcd\x4d\x23\xce\xfb\x5b\xbe\x13\x0d\x05\x1f\x4f\x08\x01\xda\x49\x51\x9a\x5e\xfc\x6b\xa7\x13\x09\x72\x0a\x7b\x61\xaa\xe4\x21\xe0\xe2\x76\x1c\x7d\x49\xd5\xc7\x51\x44\xfd\x53\x48\x1f\x8c\xac\x16\xd6\x55\x50\x84\x22\xca\x2f\xfe\x8f\xa6\x47\x0a\x07\x8c\x88\x89\xbb\x22\x90\x58\x62\x1a\x75\x15\x51\x6c\xe8\x97\x35\x8a\x24\x8f\xb1\xd9\xbb\xbc\xe2\x7c\x7b\xdd\x32\x02\x1a\xf7\x02\x89\x2d\xb7\xff\x9e\xc6\xf0\xcd\x87\xd6\x56\xf3\xca\xc4\xd5\x92\x88\x11\x0f\xcc\x07\x17\x94\x81\x1e\x3b\x8f\x6e\x1a\xe8\x9f\x76\x9e\x6f\xa1\xd7\x50\x87\xe7\xc7\xb1\x75\x09\x26\x36\xfa\xb2\x58\x68\x76\x36\x01\x51\xea\x5b\xee\x2e\x0a\x25\x15\xbf\x4b\x1b\x68\x35\x86\x29\xe2\x1e\x21\x0c\x1e\xec\x28\x73\x03\xd6\xb0\x9e\x05\x50\x55\x68\x37\xd3\x7f\xad\xa3\x43\xd8\x2c\xfa\x85\x9c\x38\x44\xd6\x0c\x60\x69\xf9\x11\xcd\x74\x4a\xa5\xc7\x7f\x22\xe1\xd3\x24\xfa\x58\x40\xed\xd3\x90\x38\x1d\x63\xad\x4c\xf6\xf1\x0a\x1a\xd8\x54\x34\x53\x7c\x15\x31\xb8\xa9\xf0\x47\x1d\xcb\x5b\xd7\xfc\x2e\xe7\x6a\x23\x74\xdc\x2e\x24\x20\xb7\xff\xb5\x91\x83\x72\x85\xa0\xd7\x8b\xdf\xbe\xb3\x29\x3c\xc8\x3d\x49\x6e\xe5\x8d\x37\xf6\xe1\x74\xa3\x72\x0f\x7c\xed\xfc\x91\xca\x39\x7c\x83\xd9\x73\xbc\x90\xef\x21\x56\x9c\x86\xdf\x3d\x40\x99\xc3\x78\x3f\x3b\x23\x3b\x2d\x13\xf5\xe3\xd5\xff\x6f\x01\x1d\x2f\x17\xeb\x09\x46\xc8\x29\x92\xfd\x79\x1f\xee\x39\x97\x22\x10\x1d\xbf\x16\xdc\x9f\xf4\xbd\xf0\xe0\x78\x62\x2b\x05\x5b\xc2\x6d\xd5\x75\xbf\xed\xe5\x42\xc2\xa6\x14\x19\xf8\x92\xf3\x1d\x18\xc7\xda\xaf\x82\xdf\x24\x3a\x6c\xd9\x61\x54\xdc\x62\x38\x50\x1a\x98\xc2\x31\xb2\x8a\x73\xf1\x81\xc7\x47\x13\x94\x55\x21\x59\x5e\xc2\x8b\x30\x00\xda\x2d\x40\xbe\xe4\xfa\xe1\x6c\x61\x66\xef\x2b\x0a\xec\x72\x6e\x7b\xd9\xe1\xb2\xf8\x06\x78\x29\x82\xfb\x8c\xf0\xf8\x30\xe2\xb9\xb4\xf7\x49\xe5\x54\x22\x3f\x31\x23\x1a\x1d\x5c\xeb\x60\xae\x5e\x6d\xf1\x34\x57\x8d\xcd\x34\xd5\x7d\x7d\xcc\xd2\x51\xfe\x4d\x39\xaa\xb8\x06\x00\x2b\x07\x65\xf9\xda\x47\x19\x97\x80\x33\x1e\xd4\xbb\x03\x45\x08\x3f\xee\x75\x12\xfc\x19\x5b\x80\xf3\x2e\xfb\x5b\x1d\xd7\xbc\xac\xb0\x18\x8b\x5e\xdf\x44\xa0\x34\xbb\xe2\xdb\x78\x2c\x84\x37\x2d\x44\x0f\xa8\x83\x6d\x34\x6f\x85\x07\xe7\xbf\xfb\xbf\xa7\x38\x9c\x68\xa6\x4b\xa3\x54\xcf\x6f\x25\xd0\x57\xcc\xdb\x68\xae\xaa\x20\x04\x56\x1f\xd0\x19\x80\xcd\x93\xa3\xf5\x49\xc9\x57\xe0\xb6\xb2\xee\x21\xaf\xcd\xa5\xad\xc9\x4c\xba\x73\x3f\xde\xda\x36\x23\x13\x7a\x17\x05\x04\x3d\x96\xce\xef\x77\xd2\x20\x44\xc2\x4c\xdf\x1f\xfb\x82\xcf\xd7\xd3\xdc\x92\x05\x71\x09\xeb\x8c\xaa\x6a\x21\x54\x15\x5f\xfd\x74\xb0\x80\x4e\xfa\x5b\xb7\x9f\xae\x65\xa5\x90\xd3\xf7\xd1\xe2\xe2\xfc\x4e\x7d\x9d\xa0\x55\x6c\x2f\x67\x3f\x30\xfe\x6e\x99\x9f\x3b\x1f\x81\xb2\x58\x4b\x2d\x7d\xac\xf2\x50\x70\x9b\x02\xda\x7e\xe4\xfb\x49\x9f\xcd\x3d\x00\x01\x7d\x73\x38\x5f\x07\xdf\x00\xa7\x1e\x56\xbc\xa2\xe4\x75\x73\x1e\x4c\xc5\x6f\x89\x6f\x8c\xa6\x94\x35\xec\x90\x83\x54\x37\x53\x73\x97\x87\x08\xa6\x3d\x0b\xbf\x45\x9f\x81\x80\xec\x81\xd2\xba\x67\x0e\x11\xad\x09\x0a\xa9\x98\x2f\xc5\xfa\xd9\xf8\xa5\xb6\x93\x8b\x29\x47\x1b\x0e\x37\x1b\xa9\x09\x60\x56\xc1\x0c\x21\x7e\xfa\x85\x05\x03\xa0\x8f\x31\xb9\x26\x62\x1c\xcd\x79\x7c\x5c\x29\xd8\xc6\xf1\x2a\x17\x3f\x72\xd9\x7e\xfc\x1c\xd1\x71\x78\x82\xe3\xb4\xce\x47\x09\x47\x89\x4b\xfa\xea\x9b\xda\x9e\x62\x9a\x43\x84\xc7\xfa\x0a\xf8\x67\xa0\x9f\x45\x47\x6e\xbf\x8c\x6a\x94\x97\xda\x68\xcf\x9b\x0e\xc2\x2d\x55\xdf\x3c\xee\x25\x48\xf6\x08\x7a\xb5\xff\x2f\x5d\x9f\xe6\x72\x7d\x43\x2a\x16\x58\x98\x4d\x06\xfd\xa1\x0c\x0a\x70\xab\x29\x9c\x0f\xec\x25\x28\xc0\x5b\x1a\x24\xe3\x0a\x2b\xd4\x54\xb5\x29\x73\xea\x17\x1e\x99\x55\xdc\x58\x61\xbc\x67\xf8\x5f\x2d\xf2\x7e\xf7\x14\x2b\xd9\xaa\x3c\x73\x06\x96\x92\xfc\x28\x8f\xbb\xe8\x6a\xfe\x07\x06\x83\x2e\x8e\x35\xe3\xa6\x17\xbe\xca\x01\x63\x92\xa0\xac\x98\x77\xda\x59\x26\x62\x1c\x96\xca\xed\x1c\xe4\x85\xf3\x9a\xab\xa7\x66\x29\xee\x88\x19\x23\x41\xf1\xc6\x3c\xd1\xf5\x7d\x75\x14\xe3\x90\x75\x4c\xc7\xea\x38\xe7\xb2\xac\xd3\xbd\xc3\xf2\x0f\x5f\x3f\x57\x20\x02\x9d\x65\x0d\xa8\x4c\x99\xd6\x0a\x87\x7d\xde\x81\x65\x54\x2c\xe5\x1e\x79\xbc\x79\x15\x49\x93\xda\x57\x8e\x8f\xe2\xf7\x70\x8f\x40\xf3\x68\x3a\x77\x72\x19\x0b\x42\xb4\xd8\xf6\x81\x63\x09\x37\x33\x6b\x1f\xe5\xf7\x82\x38\xce\xbf\xcb\xfc\xfc\xa5\x44\x0c\x2a\x72\x08\x95\x26\x83\x21\x38\x8b\x98\x60\x93\xf0\x5f\x89\xc1\x36\x6b\x5d\xab\xd7\x65\x3d\x9d\x5e\x75\x5b\x2c\xfd\xcb\xfd\x47\xf8\x83\xbd\x23\xc2\x76\x1c\x8c\xc3\x2e\xc4\x30\xba\xa5\x37\x80\x4d\x95\x44\xe5\xda\x8f\x86\x9e\xbf\xc5\xad\x8c\x1f\x66\x79\x47\xe1\x3b\x42\x88\x03\x64\x0c\xa8\x05\xf7\x6b\x47\x0e\x80\xc6\xbd\x21\x77\xa2\x8c\x61\xbb\xdb\xe3\x2f\x14\x42\xc3\xb1\x3b\xe6\xa7\x44\xb3\x51\xb2\xb8\xe1\x52\x93\xbb\xf2\x79\xc5\x45\x9b\xe6\xf8\x93\x47\xe0\x81\xe9\xa4\x17\x91\xa0\x6c\x2d\x11\xf9\x25\x34\xf8\xb6\x57\x36\x08\x3c\xe9\xc4\xc0\x85\xb2\xcc\xdc\xd9\x2d\x66\x05\x93\x2f\xf0\xab\x68\x79\x2a\xbb\x19\x46\x86\x63\x8a\xbc\x66\x5c\xcb\x74\xd5\x7f\x67\xc5\x0b\xfc\xa4\x89\x3e\xd5\x7d\x51\x7e\xf8\x78\xae\x35\xe6\x98\x66\x65\xa0\x9e\x02\xaa\x0a\x45\x41\x41\xd9\x99\xb2\x28\xa4\x94\x49\x04\x6e\xa6\xf5\xbc\x69\xd5\x52\x69\x42\xb3\x58\x22\xae\x63\xe8\xa2\xe9\xee\xfa\x3d\x2b\xd0\xa2\x1e\x2b\x6e\x1e\x50\x81\xaa\x3f\x16\xc2\x9b\x72\xad\x11\xf3\xc1\x88\x9c\xc9\x28\x52\xaa\x9a\x85\x57\xd9\xcb\xfe\xf4\x1c\xf4\x93\x48\x43\x8b\xc2\x45\x97\xd8\xa5\x90\x20\x51\xd9\x92\x7a\x0c\x92\x6d\x42\x05\x3e\x0b\x09\x08\x3f\x42\x98\x98\xd2\xa5\xd2\x08\xb4\x21\x33\xc9\x01\x36\xba\x51\x59\xee\x51\xfa\xf9\xb0\x48\xa9\xcc\xbb\xe1\xaa\xd8\x5a\xf0\xa0\xd3\xfa\x70\xa6\x5a\xdf\xf5\x19\x4d\x92\x0e\xc0\x03\x9d\x11\x91\x90\x50\xf0\x98\x4b\xa1\x42\x32\x97\xe4\x60\x63\x74\xdc\xf1\xb7\x59\xaf\xd1\xcd\x55\x59\x7e\x7d\x8f\x4b\xb3\x41\x1f\x2b\x39\x90\x59\xee\x57\x66\xf2\xf8\xbe\x66\x90\xec\xc2\xf5\x2c\xeb\xe9\xc4\x56\x15\x7e\x7c\x84\xf8\x2d\x73\x22\x00\x35\xaa\x60\x21\x56\xcd\xf1\xd7\x76\x38\xb2\xe6\x30\xd4\x5b\x6d\x4e\x83\x6d\x52\x09\xeb\x39\x0d\x3a\x18\x37\xd4\x19\x54\x5d\x6e\x67\x4a\xf0\x7e\xcc\x60\x1d\xdc\xeb\x80\xc6\x07\xe6\x38\x2e\x32\xf3\xff\xc2\x68\xf4\x31\xb5\x07\xa1\xed\xfd\x67\xb8\xff\xdd\x16\xcf\x46\x1e\x5c\xef\xba\x13\x09\x0d\x4f\x44\xcd\xaa\x53\x7b\xca\xfe\x1c\xeb\xf5\x95\xf3\x41\x66\x59\x80\xb3\xbd\x02\x0d\x4c\x39\xd5\x8f\xcd\xb9\x2b\x2a\xe6\x66\x20\xec\x89\x5d\x66\x92\x15\x3c\x64\xaa\x11\x6d\xca\x1e\x28\x5d\xfa\x55\x3b\x84\xed\x3d\x25\x73\xf3\x17\x6c\x4c\x22\x9c\x1a\x54\x51\xa8\xac\x18\xf2\x0f\x87\x4a\x64\xff\x72\x27\x03\x31\x38\x31\x5c\xa6\x2b\x35\xce\x70\x62\xcd\x97\x34\xde\x3e\xd6\x7e\xa5\x76\x53\x28\x64\x7c\x11\xb7\xf6\xae\xb0\xca\x83\x72\x6b\x00\x51\x51\xba\x2e\x93\xb6\x06\x7a\xae\xbb\xd7\x7e\x39\x04\xa6\xe8\x44\xf6\x52\x06\xca\x93\xd7\x1d\xf4\xdc\x79\xa4\x88\x5d\x86\x29\x25\x23\x8f\x04\x11\x88\x56\xfb\x0c\x9c\x82\x1e\xd4\x8b\xe3\xd4\xa8\xf2\x0b\xa3\xa8\x9c\xf0\x2d\xe5\x22\xc7\x15\xf8\xac\x82\xea\x14\xfe\x30\xf6\x27\x26\xe4\x99\xfd\x07\x76\x5f\xaf\x6c\x7c\x52\xfd\x1a\xee\x97\xc9\x5a\x13\x6a\xad\xb7\x68\xef\xe5\x87\x8c\x09\xc4\xaf\x32\xd6\x9d\xd4\xe1\x8b\x9a\xcc\xec\x7c\x9b\x3a\xa1\x71\xad\x0a\xe0\x03\xd7\x82\xcd\xf7\x5a\x21\x9d\xb7\x1f\x88\x76\x85\xdc\xf0\x71\x73\x0b\x45\xa5\x2d\xd5\x3f\xe1\x9f\x6c\x4c\xce\x7c\x6b\xa2\x9b\xe3\x17\x4d\xde\x17\xed\xf2\x06\x1d\xda\x7b\x0c\x05\x4a\x37\x79\xb8\xcc\xb7\x24\x60\x82\x4d\x11\xe8\x0f\x61\x5e\x67\xbc\x6b\x99\xeb\x29\x17\xba\x75\x1e\x26\x3a\x2f\xf8\xcd\xa7\xa2\x7a\xb7\x17\x31\xcc\x4e\xba\x79\xe6\xa6\x7d\x6d\xc7\x50\x93\x8a\x4e\x4f\x43\xd5\xfd\xc8\xc9\x22\xa3\x7e\xe4\xc1\xe1\x66\xfc\x28\x73\xf1\xc9\xf6\x98\x48\x31\x5f\xcc\x3f\x3a\x59\x05\x1c\x2f\xed\x59\x65\xaa\x2a\xd4\xfd\xb4\x52\x30\x6b\xb7\xc2\xf8\x9b\xbb\xaf\xe0\x67\x17\x8a\x6a\x59\x3d\xf3\x2c\x00\x5e\x80\x7b\x5e\x7b\xf8\xd0\x0f\xfa\x72\xfb\x37\xa5\xbe\x23\x7d\x27\xd8\x6b\x6d\x3f\x26\x8b\x12\xc5\x66\xfc\xf4\xb4\x8f\xce\xef\x4b\xff\xcd\xb1\x1a\x1f\x88\x52\x4a\xf1\xff\x93\xf6\xc2\x78\xf8\x45\x84\xaf\x7e\x3f\x78\xf0\x88\x45\x90\x79\x29\xe4\xc8\xfc\x03\x0b\x79\xca\x7a\xa3\xfc\xab\xf9\x1f\x66\x08\xf9\xdc\x98\x36\x97\x85\x7b\xe5\xe2\xf3\xdf\x7d\x12\x8e\x3d\x9b\x7d\xc0\xa0\x12\x7a\xb6\x9d\x58\x6f\xdd\xa2\x4a\xff\xf6\xce\x1b\xec\x12\x6b\x37\xfe\x5c\xf8\xb6\x71\xc7\x9b\xc4\xd0\x73\x67\xe7\x2f\x2a\x29\x99\xc1\x52\x98\x61\x71\xd6\xfe\x4b\xf6\x32\xbb\xae\xad\xce\x67\x17\xab\x1d\x76\x39\x7d\x84\x73\xfe\xf7\xf9\x11\xa6\x31\xdc\xb3\xf8\xa1\x67\xf5\x5d\x5c\xeb\xb6\x7f\x52\x45\xc3\xa1\xdd\xd7\xde\x9c\xad\xf9\xca\xb2\x46\xc6\x22\xfe\x4a\x73\x0b\x74\xfd\xba\x03\xaf\xd9\xc7\xc9\x1e\xf1\xf7\x0c\xda\xe5\xa3\xa4\x8d\xce\x3e\xf4\xa9\x52\x63\xdc\xe0\xa7\x1d\xb5\x62\xc7\x83\xca\x4f\x4f\x96\x6f\xfd\x6d\x71\x68\xab\xe7\x97\xc5\xaf\xf2\xb7\xff\xb0\x36\xf5\x71\xe9\x96\x79\x36\x2b\x41\x73\x86\x0f\xeb\xb4\x3f\x36\xcb\xa7\x55\x2d\xfb\xc4\xf8\x61\xfd\x96\xfb\xc6\x49\xb6\x97\xd8\xb4\x7b\x18\x26\xf5\x38\x85\xbe\x79\xbd\x4a\x79\x0d\x4f\xc7\xcf\x85\x57\xfc\x96\x7b\xcd\xb8\xb0\xbd\xe8\xe6\xda\x25\x0f\x17\xe6\x44\x5b\x24\x70\x57\xf3\x47\x9c\x3a\x71\x5b\xe1\xe7\x8f\xf4\xa4\xd5\x45\xf1\xf1\x93\x3e\x9b\xec\x37\xb5\xec\x5f\xe4\x3f\xcf\xfa\x63\xe9\xff\x23\xbd\x1d\x51\xa2\xcb\x8a\x57\xdd\xa8\x9c\xd3\x69\x34\xc7\x76\x49\x75\x3f\xd2\xb6\x73\xcb\x6a\xd6\xc9\x3f\x9e\xf8\xdc\x5a\xaf\xa0\xf0\xe5\xa6\x13\xec\x10\xbc\x50\xd6\xab\xa7\xdf\x2f\xf1\xe1\xb8\xfc\xef\x49\xd4\x85\x9a\x03\xaa\x0f\xf3\xda\xca\x96\x4a\x7c\x5e\x1d\x10\xb2\xda\xf3\x49\x6c\x72\x08\xb7\xd0\x25\x9f\x86\xfa\x97\xc6\xe7\xe5\x7c\x2e\x05\xf6\x67\xc6\xcd\xf1\xd1\xe1\x99\xbd\xcc\x72\xf2\xc9\x39\x9e\xaf\xd5\x26\xf0\x6c\xfd\xf1\xbb\xd1\x79\xda\xfc\x54\xff\xa5\x0b\x3d\xe4\x6c\xe7\x0b\xac\x79\x18\xdb\x30\xc7\x7d\x2b\xbf\xd3\x4b\x1d\xde\xe0\xfb\xbd\xde\x52\xac\xc1\x07\x9f\xe8\xfc\x57\x6c\x9f\xb3\x5a\xe6\x06\x43\xd0\xbb\x8d\xcb\x9b\x4c\xb7\x8a\x33\xec\x9a\x1a\xc9\xda\xe0\x73\xc2\x8a\xd9\xdd\x91\x73\xd6\xb1\x9e\x38\xdf\x8f\xa2\x21\x22\xbd\x82\xbe\x35\xfb\xe5\x0a\x82\xb6\xf6\xfd\x76\x8c\xba\xbe\xd2\x8f\x37\x37\x81\x39\x23\xc9\x63\xe5\xf4\xba\xb9\x8f\x42\x5b\xa5\x5a\x55\x4b\x56\x94\xf5\xc7\xd5\x05\x77\x1b\x1f\xd9\xaf\x75\x59\x77\x4d\xb8\x9d\xc4\x4d\x9e\x77\xdf\x26\xbe\x91\x6a\xbe\xab\xae\x66\xcf\x74\xf2\xbb\xa2\xef\x1c\xed\xff\x85\x7c\xff\xda\x4f\x6a\xab\xa7\x9f\x2c\xbc\x7f\xd1\x9d\x77\x6d\x65\x43\xc8\x5d\x05\xdb\x8b\x3c\x4b\x13\x9c\x3e\xcf\x2b\xb1\xb8\x79\x7e\xd3\xf9\x53\x36\xaf\xe7\xdc\x7a\x57\x36\x2b\xe6\x5c\xbe\xdf\x37\x13\x19\xe3\x97\x4a\xbf\x4e\x3c\x79\xea\x21\xfe\xc3\xed\x48\xd6\x33\xae\x45\x1c\x97\x8c\x14\x67\x59\x30\x7e\x3d\xdc\x5e\xb2\xf1\x5d\xf3\xed\xd9\x0f\x56\x24\xc5\x4d\xd8\xa2\x7d\xf4\x4d\xe2\x84\x4f\xd6\xb2\xf3\x3f\xbc\xd8\x28\x75\x63\x25\x3b\x67\xe6\xaa\x43\x09\xe2\x15\xfa\xee\xa7\xb2\xe6\x1c\x96\x3a\x53\x90\x59\xf1\xee\xdf\xdd\xdb\xcd\xd3\x02\xa4\x78\xf9\x7d\xb8\x97\xde\xfc\x5d\x7e\x38\x99\xf3\xe3\x25\x51\xde\xf6\xf6\x7f\x1e\x9b\x33\x9c\x1f\x36\x7e\x9c\x71\x22\x66\xcf\xa5\xb3\x9b\x7e\x1f\x56\xf6\x3c\x5a\x53\xa5\x2f\x53\xa7\x11\xb7\xeb\xcd\x45\xeb\xf2\xf7\xe7\x3b\xb6\x40\xd6\xba\x4f\xcd\xcf\x5c\xf6\x6b\xd6\xde\x3d\xce\xbf\x0e\x3b\x1b\x4e\x9c\xbb\xe6\xe1\xbe\xdd\xfc\x42\xa7\x6e\xec\xfb\xe0\xa1\xf4\xfa\xba\xd9\xb7\xd5\xcf\xdf\x7b\x05\x1f\x9f\x36\xaf\x53\x2e\x6e\x17\xff\x33\xe5\xe6\x87\x07\x67\xd4\x2d\xbd\x74\xe7\xee\x87\x03\x93\x75\x9a\x96\xdc\xaa\xce\x4d\x6e\xdb\xf4\x6e\xd6\x92\xd5\xfd\x7b\xc5\xbd\xb4\x6f\x5a\x27\xfe\xd1\xf4\x96\x45\xda\x9c\xbe\x52\x68\x76\xc0\xb9\x89\xdb\x59\x0f\xcc\x5b\xb6\x7a\xe1\x85\xde\xc8\x8f\x52\x5a\xd3\xfa\x37\x1f\x6a\xf6\x4c\xeb\x3a\xef\xe3\x7f\xbc\x65\x0a\xfb\xe4\x9f\xdf\x02\x9e\x27\xfe\x89\x4c\xff\xa8\xec\xf8\x77\xfd\x8b\xf8\xe4\x9b\xcb\xec\x58\x9f\x9d\xbc\xfd\xea\x51\xbd\x67\xa2\xc5\x2b\x81\xfa\x20\xde\xf0\xff\xb5\xee\x9e\x81\x1f\xdd\x0b\x3f\xaa\x5f\x88\xf9\xdc\x28\x2b\x2f\x22\x2e\xfa\xe5\xdf\xff\xb2\x57\xb7\x18\x12\x19\x18\x18\x24\xce\x17\x99\xbc\x33\x78\xcc\x7d\x6b\xef\xbb\x49\x32\x13\xf5\x8d\x5e\xce\x2f\x0d\x38\x59\xf1\xeb\xad\xce\x04\x4e\x05\x61\x07\x35\x23\xc6\x99\x6b\x52\x98\x52\x27\x9c\x4c\xfc\x38\x85\x53\x44\x7d\xf2\xe9\x47\x2a\xc2\x1a\x7e\x6a\xf7\x0f\x3f\xfa\x52\x73\x9a\x45\x63\xd6\xc9\xdf\x1b\x4e\x30\x25\x88\x4c\x3c\xf3\x74\x43\x14\x43\xca\xc9\x25\x16\x48\x17\xf0\x43\x2f\xde\x87\x18\xd4\xe0\xc3\x80\xe3\x3a\x7e\x06\xc8\x1d\xfe\x0e\x6a\x04\x34\x1a\x44\xd8\x47\x85\xdd\x3f\x26\x3f\xd9\xa0\x82\x81\x81\x81\xc1\xd3\xd5\xcf\x65\x9d\x53\x42\x13\x20\x00\x00\xff\xff\x80\x9f\xaf\x90\x13\x8a\x00\x00") + +func filesImagesMaintainersPngBytes() ([]byte, error) { + return bindataRead( + _filesImagesMaintainersPng, + "files/images/maintainers.png", + ) +} + +func filesImagesMaintainersPng() (*asset, error) { + bytes, err := filesImagesMaintainersPngBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "files/images/maintainers.png", size: 35347, mode: os.FileMode(420), modTime: time.Unix(1509717368, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _filesImagesMeowserPng = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x74\xcb\x77\x3c\xd4\x8f\x03\xf8\xf1\xb7\x7d\x2e\xe3\x92\x15\xc2\xd9\x9b\x3b\x32\xb3\xcf\x5e\xe7\x10\xca\x1e\x99\xc7\x65\x6f\x67\x2b\x8a\x50\xc8\xca\xcc\x28\x94\x0f\x47\xe8\x8c\x90\x75\x22\x49\xd9\x23\x3b\xdb\x19\x71\x7e\x8f\xef\xef\xbf\xdf\x1f\xbf\xc7\xeb\xf1\xfa\xf3\xf9\x04\x69\xaa\x47\x0f\xe6\x00\x03\x00\x40\x6f\xa0\x8f\x40\x01\x00\x20\xf3\xbf\x41\xe4\x00\x00\x4c\x3b\xf9\x7d\x03\x00\x80\xd2\x43\xd3\x44\x13\x00\x3e\x64\xdc\xb8\x74\xa6\x02\x00\x80\x2c\x10\xa5\xa7\x05\xd4\x13\xb8\x36\x01\x00\xe0\x75\xd5\x47\x99\x00\x40\x84\x10\x00\x60\x13\x01\xe0\x02\x00\x00\xec\x06\x00\x04\xcb\x00\xc0\x96\x13\x00\x28\xe7\x01\x00\x1b\xa6\xf0\x93\xf9\x3d\x00\x00\xa8\x5d\x8c\xf4\x10\xc0\xf5\xff\x7a\xd3\xf9\x36\x1b\x00\x00\x5a\x8c\xbe\x6d\x20\x00\x00\xfa\xff\x77\xdd\x41\x1f\x35\x00\x80\x54\x1a\x20\x34\x2d\x6d\x7a\x47\x6d\x23\xf3\x2c\xfb\x5e\x5e\x5b\xfd\xe8\xc4\x2d\xe0\xde\xb6\x7b\x54\x45\xce\xcb\xc8\xc8\x50\x50\x80\xf8\x52\x29\x91\x93\xe9\x2f\xd8\xdd\xad\x99\x79\x38\x57\xf8\x3f\x49\x09\x08\x3d\xe5\xb8\xdb\x7d\x52\xe4\xb6\x52\x3b\x18\xe3\x39\xf9\x03\x61\x12\x6f\xa6\x0b\x45\x94\x99\x23\x99\x35\xbf\x91\xe9\xce\x67\xe4\xc7\x85\x9b\x9d\x1e\xff\x0b\x59\x88\xac\xf3\xbe\xde\x20\xa3\x16\xf8\xe2\x86\xbf\xfa\xac\xea\x18\x7d\xda\x9e\x51\x6b\xf1\x96\xa0\x74\xa2\xb2\x5e\xcc\x87\x32\xbc\x8c\xe5\x73\x3f\x4b\x58\xe4\x4a\xd4\x30\xd4\x02\x95\x1e\x5f\x7b\x1a\x8a\x56\x32\xbb\x78\xf2\x19\xd2\x42\x7b\xf1\x59\x16\x9e\xfa\x18\xd2\x1f\xb2\x6b\x4e\xcd\x44\x48\xce\x07\x8b\x53\x65\xd3\x37\x66\xf3\x11\x7f\xd2\xcc\xe4\x2d\x33\x6a\x4a\x22\xff\xe0\xf2\xd7\x38\xa4\x6e\x57\x32\x5f\x57\xbb\xc5\x1f\xdd\x5a\x9d\x6e\xdd\x3a\x99\xca\xf8\x69\x9d\x71\x3b\x92\x14\x4a\x3a\x71\x03\xe7\xa0\x0c\xc3\x67\x46\x2e\x9e\x74\x45\xd1\x7b\x90\x67\xc7\xe9\x78\x98\x3e\x38\x3d\xf4\xff\x73\xf0\x05\xfd\x30\xc6\x9d\xa9\xf0\x06\x21\x05\x7f\xbe\xdc\x8b\x97\xe8\x5c\xdd\x96\xc5\x8a\x76\x45\xd1\x5b\xc4\x89\x0f\xe7\x94\x12\xb4\x98\xfd\x90\x79\xae\xc3\x25\xf3\xd5\x14\x53\xf3\x6a\xb6\xf5\xe8\xa1\x4e\xc5\x7f\x7f\x75\x82\x15\xee\x2f\xc9\x39\x51\x37\x1e\x67\x55\x4d\x31\x2e\xa8\xfa\x09\x41\xc1\xd6\xdd\x4a\x38\x34\x01\xed\x37\x93\x54\x50\x1e\x95\x62\xb1\x5e\x2c\x5d\xdc\x7a\xae\xb2\x5e\xeb\x49\x29\x75\xb4\x14\x6a\x6d\xd9\x5f\xac\x66\xf4\x79\xe1\xbf\x23\x0f\xd9\x7e\x0a\xff\xeb\x0b\xf5\xca\x9c\xd9\x16\x70\x11\x8b\x06\xb9\xbe\x6e\xb9\x9a\x53\x40\x02\x5b\x4d\x61\x7f\xd8\xf7\x8e\xa1\xaa\xb4\x5a\xd7\x3b\xfd\xaf\x9b\x53\x5e\x5b\xd7\xdb\x4e\x77\x08\x95\x2d\xf5\xf8\xd4\x3b\x60\xb6\x8b\x5e\x9e\x3c\xa3\x6a\xe0\x4b\x5c\xcc\x21\xb9\x0b\xdc\x6a\x50\x25\xee\x3f\x75\x1f\x26\x12\x2b\xc5\xe3\xd2\xa4\x41\xd1\x75\x5e\x96\xa3\xec\x3c\x79\x88\x09\x44\x4f\x75\xdd\x54\x10\xef\xcd\x3b\x22\x8a\x66\xce\x39\xa8\xd6\x23\x2a\x6d\xf9\x6f\x73\x25\x3f\x29\x08\x88\xdf\x8f\x92\x31\xca\xcd\x4e\xd3\xc1\x7d\xc1\xf4\xd6\x76\xaa\xc4\x6f\xbb\xab\x05\x3e\xc4\xeb\x83\xfc\xdc\x98\xd3\x11\xa3\xef\xc2\x08\xd2\x4a\xef\x7d\x9c\x3a\x25\x6f\xb6\x21\x23\x91\x68\x40\x4b\x40\xa7\x68\x07\x87\xcf\xe9\x08\x2e\x91\x2f\xfc\x61\x94\x08\xf7\xdd\x2c\x67\x58\xec\xf0\xda\xfd\xc7\x5a\x24\xef\x1c\x00\x8b\x02\xc3\x5b\xee\x28\x34\x25\x8b\x52\x3e\x7d\xda\x9c\x90\x92\x4e\x87\x54\xad\x7f\x7c\x06\xcf\x89\x77\xeb\xfe\x18\x40\x46\xf7\x25\xc0\xd5\x0a\x57\x26\xf2\xba\x3f\x2b\xe1\x71\x7b\x89\x8a\x2a\x77\xc9\x98\x85\xd5\x5b\x69\xb5\xed\xf4\x3e\x75\xf7\x99\x85\x9d\x4f\x7f\xe6\xc7\xa4\xdf\xd7\x56\xb3\x2b\xfc\xe9\x5a\x2f\xc9\xab\x72\x03\x28\x40\x99\xd4\x94\x8b\xd4\x3d\xe5\x7a\xd6\xe9\xd5\x97\x1b\x73\xcd\xd6\x0e\x90\x36\xf7\x4d\xb7\x56\x10\x75\x6b\xfa\xeb\x03\x82\xa6\xac\xbf\xf2\x9c\x29\x6e\xc0\x95\x3d\xdc\xf3\xbf\xf7\xb2\x14\x6f\x76\x8f\x65\x53\x44\x41\x60\x32\x32\x08\x37\xec\x37\xed\x31\xa3\x9b\x66\x5a\x7e\xcf\xef\x90\xbd\xe7\x7a\xae\xe9\x30\x5a\x0e\xda\x7f\x62\xdb\x75\x91\x6a\x95\xd2\xf5\x1c\xd3\x1d\xfc\x16\x16\xd6\x76\x8e\x1c\x0d\x44\x7f\x9b\x28\xd9\xf7\x12\xdf\x8b\xa4\x4c\xac\x6e\x97\x0b\x14\x35\x07\x24\xdf\x31\x73\x7b\x18\xb2\xe3\xf3\x0b\xd7\xe0\x6c\x89\x99\x93\x1f\x8f\xde\x7e\x5e\x27\x45\x67\x52\x0d\x53\x99\x61\x38\xa5\x5b\xe5\x85\x0a\x36\xbc\xbb\xc3\x22\xf2\x4c\xa3\x94\x46\x99\x49\x8d\xd7\xd2\x81\x0a\xcb\x25\x7b\x1b\x0a\xcc\xea\x40\xd9\xbe\xcd\xe5\x26\x69\x9b\xf4\xd1\x2c\x8f\xe7\xcd\x6c\x0b\x26\xe6\x6c\x74\x48\xbf\xba\xec\x8b\x8e\x54\xf3\xac\xa4\x67\xf9\x15\x5b\x30\xb0\x66\x7c\x02\x95\x03\x7b\xdf\xf4\x65\xb1\x7b\x21\x0f\x6e\x5d\x62\xec\x19\x73\xcb\x3b\xfc\xd0\x74\x94\xfb\xd0\xeb\xcb\xca\x43\xbc\xfe\x55\x84\xfb\xa1\xe3\xab\xfe\x6d\x71\xcc\x8a\xc2\xd9\xaa\xb2\x5e\x6c\x96\x3b\xfc\xbe\xff\xfa\x92\x86\x01\x6c\xd1\xd7\x27\x23\xc9\xc8\xfe\xcd\x50\xa6\x1e\xf7\x41\x29\x54\xec\xb0\x69\x77\x9e\x99\xb9\x4f\xde\x44\xab\xba\x2a\x8b\xa9\x08\x7f\xa4\x14\xe9\x52\x89\xff\x2c\x71\x9d\xc2\x63\xeb\x2a\x9c\xab\x95\xb6\xff\xee\xa7\xbe\x16\x88\x41\x14\x64\x1c\x0f\x65\xcf\xe6\x65\x02\x44\x16\xcb\x46\xb6\x83\x10\x42\xa3\x23\x52\x2a\x4d\xb8\x48\xeb\x9d\x03\x96\xa0\x41\xff\x8e\xdc\xb1\x8f\x0d\x4a\x3f\xa7\xd8\x9f\xf5\xa3\x41\x9c\xf1\xbb\xea\x79\xef\x6f\xab\x6f\x82\x78\xb3\xca\x85\x38\xcb\xb2\x86\x86\x3e\x7f\x0e\xc1\xd4\x78\xd7\x0e\xc7\xf1\x65\x84\x75\xd8\xda\xb7\x3d\x99\x8b\x30\xb8\xd8\xa2\x72\x50\xec\xe7\xf0\xc2\xbd\x6c\x11\x0d\xa0\xcc\xa2\x77\x85\x7d\xc1\xe8\xc3\x19\xa6\x21\x5f\x6f\x23\x56\xb9\x8c\x6b\x24\x39\xad\x0f\x9a\x92\x5b\x8e\xac\x07\x62\x2f\xb3\xde\x36\xdb\xb7\x7b\xc4\x99\xa3\x10\x45\xa6\x26\x55\x3b\x9f\x35\x0b\x65\xdd\xbd\xc9\x6e\x64\x78\x6f\xb0\x93\x92\xc5\xc5\x5d\x61\xbc\xe8\x97\x88\x4b\x4d\x7c\xc4\x5e\x9a\xef\x15\xc3\xc4\x65\xf6\xf1\x0f\xeb\xdc\x7c\x62\xeb\xea\x1b\x21\x89\x84\x6e\x01\x6d\xee\xb0\xa7\x57\xa6\x60\x56\x27\x19\x67\x8f\xec\xc7\xde\x6e\x03\xc5\xa1\xe3\xa9\xa1\x3c\xf2\x15\xab\x69\xc5\x96\xb2\xb3\x69\x62\xa1\x1e\xfe\x52\x64\xe9\xc5\x01\xca\x80\xe8\xd9\x78\x37\x33\x96\x99\x5c\xe3\x57\xea\xcd\x71\x01\xa7\x1b\x39\xca\xdc\x51\x3f\x02\xab\x1d\xdf\x7d\x5e\xa8\x2e\x56\x14\x7a\xf9\xae\x65\x84\x2b\x41\x89\x4b\xf2\x33\xc7\x4d\xac\xcc\x74\x0f\x3c\x2b\xc1\x79\xef\xeb\x81\xb4\x64\x73\x9d\xe1\x59\x99\xe3\x63\x81\xc0\x57\xee\xeb\x71\x5b\xc9\x07\xc2\x8d\xaa\xac\x13\xfc\xfc\x3e\x16\x40\x10\x71\x36\x3b\x45\x6d\xe6\x65\xf4\x95\xf0\xab\xf2\x0e\xb5\x7c\x10\xb2\xf8\x43\x1b\x84\xdc\x4e\x5f\x8a\xf4\xdc\x4e\xc7\x70\xda\xce\x90\x16\x2a\x10\x26\x74\x3b\xb4\x47\x76\x57\xe1\x54\xe3\x27\xdc\xe8\xd5\xee\x42\x5b\x68\x23\x88\x5d\xe9\xe0\x78\x89\x5b\x75\x75\x12\x65\x48\x9e\xa8\xf7\x5c\x8a\xfc\xbe\x42\xa8\xfa\xe9\xa3\xeb\x1b\xb8\x40\x3d\x2b\x7b\xb1\x1a\xd9\x17\xc1\x9e\x23\xb7\xc6\x35\x40\xa0\xcc\x3f\x9b\x9a\x47\xfe\xd4\xb6\xe4\x20\xda\x8c\xec\x17\x99\xf2\xc7\x85\x6c\xba\x72\xb9\x23\xad\x41\xd7\x79\xb9\xbb\xbf\xc3\xcf\xf9\xfb\x4b\x9f\x93\x7e\xc3\xab\x4d\xc7\x30\x19\x6a\x7e\xb9\x56\x93\x3f\xea\xba\xa4\x73\x19\xed\xae\x83\x7d\x46\x26\x31\x62\xb6\xc6\x3a\xfd\x2b\x56\x4b\x8f\x1a\xe4\x78\x48\xad\x77\x97\x02\xc5\x7f\x3a\xda\xc8\xbb\x14\xaa\xcf\xa0\x0b\x95\xeb\x56\x98\xd4\xf0\x60\xc3\xe2\x98\xa5\xc5\x94\x99\x65\xd3\x67\x0e\xb1\xce\xe7\xf9\xb2\x46\xfa\x0d\x2f\x8c\xb7\x2e\xaa\x87\x04\x73\x89\x97\x67\x35\xb3\x85\x4d\x03\xf4\x92\x8b\x88\x38\x10\x70\x46\xde\x7d\x0f\xb9\xa9\xd0\xd0\x89\x64\x39\x37\xe4\x3b\x34\x7d\xd6\x3d\x58\xdf\xd1\xe4\x7e\xf2\xbc\x95\x92\x7d\x7f\xa7\xdd\x1e\x8f\x8f\x9e\x55\x5a\x47\xa0\xec\x6d\x0d\xcd\xea\x45\xa0\x4b\x47\x62\x90\xc3\x8e\x3d\x7f\x52\x87\xb8\x0f\x85\xe9\xe2\xcb\xb1\xb2\xb1\x79\x4b\x9c\x53\xcc\xcb\x70\x45\x9f\x2a\xfe\xf5\xc2\xa2\x77\x91\xfc\xc5\x09\xd7\xc3\xbd\xa2\x80\x96\x08\x76\xe2\x66\x39\xfa\xe9\x11\x47\xb6\x08\xad\x87\xb1\x15\x26\x2d\xa9\xc8\x4c\xd2\xdf\xb1\x93\xdf\x61\x7b\x9c\xdb\x01\x19\x38\x9e\x88\xbd\x71\xdc\x9e\xb1\x3b\x74\xba\x1e\x15\x63\x92\x9a\x5d\x97\x62\xce\x5b\x2d\x19\x11\xf4\x93\x58\xef\x5b\x69\x14\x7d\x91\x3c\x9e\x6b\x2a\x2e\x25\x14\xb4\x79\x44\xb3\x43\xea\xd8\x75\xff\x75\x6f\x51\xe2\x9c\xa8\xf3\xfb\xa2\x99\x53\x82\x9f\x5e\x63\xc8\xb5\xc3\x13\x9a\x06\x64\x1a\x08\xb5\x75\xb5\x59\x58\x4d\x93\x81\x86\xff\xba\xe9\xff\x1a\xbe\x9e\x58\xa1\x20\x78\x4f\xb2\xa9\xca\x1c\xe4\x0b\xd2\xab\x29\xc8\x26\x86\x5d\x36\x33\x6c\x3c\x4d\x04\x56\x69\x47\xd7\x7a\xee\x4c\xb4\xa0\xb9\x6c\x9a\x1f\x77\xe9\x6d\x94\x19\x89\x49\xe1\x23\x2f\xaf\x7e\x4e\x29\xdf\x7d\x2d\x77\x3e\x44\x4f\xf1\xcd\x0a\x74\x2b\x59\x2d\x54\x57\x83\x6c\xea\x92\x18\x0e\xcc\xae\xa1\x52\xb5\x48\x08\xd1\x4a\x63\x73\xe0\x5d\xec\xd1\x7b\x08\xf2\xfe\x1d\x99\x43\x86\x19\x38\xcb\xb3\x66\xdc\xdb\x76\xd7\x2a\x70\x84\x89\x85\x12\xb1\xb2\x19\xfc\xb3\xcf\x23\x29\xa0\xab\x96\x1e\xb6\xca\xfc\xb6\x97\x66\x99\xdf\x52\x78\x32\xb6\x62\x33\x31\x62\xe9\x98\xe4\xad\x63\x46\xe3\x26\x50\x3d\x1f\xf6\x03\x14\xc5\x71\xc3\x2a\x5c\x17\x51\x08\x10\xb9\x09\xf1\x10\xbf\x99\x9a\x8e\x78\xbe\xb1\x49\xb1\x77\x3e\x31\xb7\xb0\x65\xb5\x89\xa8\x21\x76\x77\xe3\x27\xc4\x90\xad\xb8\xcc\x5a\x25\x46\xbf\x19\xb5\xce\x9b\xec\xb7\x02\xdc\xa3\xa5\xea\xdf\x7c\xf9\x35\xf3\x7a\xda\x35\x50\x01\xb2\x0f\x73\x43\x43\xac\x57\xaa\x3f\xae\xb6\x7f\x08\xd3\x7d\x12\xac\x47\xe1\xc0\x5f\x90\xc6\x94\x87\x58\x23\x83\x3e\xa1\x40\xb2\xf4\xa5\xd9\x54\x97\xd9\x14\xd9\xa2\x66\x83\x74\x41\x23\xf9\xa3\x42\x73\xa6\x4c\x26\x36\xf6\x5d\xa5\xf3\x5c\x43\x73\xd0\xa4\xf8\x56\x08\x88\x50\x1c\xb5\x68\xf6\xaf\x1d\xfc\x6a\xa8\x8a\x29\xe1\x4a\xb5\x33\x65\x86\x16\x10\x86\x50\x5e\x62\xc7\x12\x9e\x36\x6e\x9d\x2a\x1b\x05\x55\xca\xbd\x19\x2d\xb1\xff\xb0\xde\xb8\xf4\x6d\x63\x84\xd6\xd9\xd0\x4a\x74\x22\x31\xf4\x11\x1c\xcd\x40\x4d\x8e\x21\x4b\xa1\xd6\x21\x63\xd3\xae\xe7\x17\x84\x53\x8b\x2c\x4a\x51\xe5\x81\xf9\xb3\x70\xc3\x94\x97\x52\xc2\xe7\x9e\x3e\x63\x53\x57\x83\x9b\x27\xa4\xb0\xa1\xb0\x48\x0b\xfc\x8b\xdc\x83\xa0\x5d\x8c\xbf\x94\x7c\x6a\x0e\x82\x4e\x06\xa8\x6b\x5c\x21\xfa\xcd\xcc\xce\xc5\xc8\xe4\x8e\xa9\x86\x9f\xda\xc3\x63\x14\x99\xa7\x18\x88\x4f\x8a\xab\x3b\x07\xfa\xe3\x06\xaa\x2c\x8b\x1c\xe2\xf3\x1a\x99\xae\xda\x46\xe4\x07\x93\x6a\xbb\xfe\x94\x15\x26\xa2\x6f\x06\x84\x25\xdc\x7f\xfe\xb8\xbb\xd0\x8f\x06\x04\x7b\xd6\x01\x99\xdd\xd4\x94\xba\x9b\x37\xa6\x57\xb3\x21\x90\x75\x83\xac\xe7\x8d\xe2\x40\xbb\x96\x3b\x06\xf0\x0b\xf9\x15\xec\x63\xbd\x37\x6c\x06\xce\x1c\x82\x94\xf7\xf8\xa9\xa7\xf7\xa9\xc7\x8a\xa0\x2c\x1e\x32\xd3\xdd\x4f\xe7\xb2\x47\x99\x07\xef\x34\xac\x24\xbf\x60\x19\x9b\x7f\xd5\x23\xe8\x01\x7d\xdf\x3b\x02\x7d\x3d\x80\x7d\x61\x20\x7e\x3f\x46\x1a\xc6\x68\x41\x85\x78\x1f\x8f\xa2\x0a\x6b\xc5\xaa\x7f\x7d\xf0\xe0\x5f\xc1\x2b\x3f\x5a\x95\xc5\x53\x2c\xb0\xc9\x45\xf4\x38\xe8\x29\xbb\x48\x93\x66\xcf\xdf\x69\x62\x2f\x87\x50\x87\xb1\x29\x9c\xf6\x26\x23\xef\x7d\x23\xec\xb7\x49\xac\x85\xf2\xee\xc4\xcf\xd0\xce\x7d\x69\x79\x25\xce\xca\xf5\x60\xea\x93\x02\x52\xe5\x82\x9d\x01\x6d\xfb\x2f\x64\x7e\x04\x8f\xbb\xec\x43\xa3\xed\xd5\xc3\xad\xb6\x23\xfd\xb4\x25\xf2\xf7\xb8\xbe\x7a\x04\xd8\xe0\x77\x8c\x33\xb2\x0d\x6d\x35\x99\x59\x86\x40\x5a\x96\xa2\x47\x99\x5e\xe4\x64\xf5\x2c\xe1\x49\x54\x36\xac\xe0\x30\xb7\x99\xc6\x38\x0c\xb6\x87\xa0\xb5\xaa\x21\x92\x4a\xde\x7c\xdf\x5c\xcf\x70\x1b\x28\x57\xce\x2d\xa4\xe2\x62\x24\x7d\x3c\xd9\xbc\x1b\x6d\xa1\xeb\xf9\x69\xc0\xfc\x0e\x5c\x45\x32\x2a\x96\x34\xeb\xb3\xf7\x2f\xbf\x56\xa4\x92\xc5\xaa\x54\x5f\xc9\x59\x03\xd8\x27\xd3\x54\x00\x25\x0d\x21\xcd\x77\x40\x34\x77\xee\xde\x3c\xcd\x4a\x2a\x6f\xcb\x54\xba\x91\xa3\x5c\x31\xa2\x1d\xc0\xd5\x1d\x0e\x5d\x7a\xd6\x1a\xe2\x97\x16\x7d\xfd\xe4\x6f\x64\x64\x2b\xa5\x2e\xeb\x77\x3d\x3f\xef\x3c\x98\x6c\xd6\xe6\x20\x03\xa3\x27\x6f\x3a\x84\xa6\x62\xb4\x6d\x53\x3c\xe4\xfd\xbf\xd6\xf5\x93\xe0\xa2\x5f\x5c\x7b\x1b\xa3\x03\xdf\x5b\x7a\x20\x26\xbc\xf4\x5f\xb0\x76\x71\x2a\xeb\xf2\x95\x3b\x55\x35\xd9\x0d\xdc\x3f\xe1\xdb\xdd\x4d\xce\xb9\xd0\x02\xcd\x89\xad\xd9\x89\xf5\x09\x9b\xae\xb7\x82\x70\x56\xe8\x61\x85\x68\x0a\xfe\x0b\xad\x62\x8b\x6f\xc4\x42\xcb\xe8\xe6\x77\x42\x12\xc5\x60\xf1\x48\x6e\xbc\x21\x59\x19\xc8\x75\x72\x1b\x15\x12\x5a\xd1\x3f\x45\x83\xfc\x1b\x37\x43\xaf\xfa\x22\xcc\xf9\xa8\x87\x47\xaf\x80\xa6\x8c\x41\x35\x2f\x95\x58\xb5\x0c\x4b\x42\x51\xf3\xfe\x08\xdd\xc0\xd0\xdb\x9e\x7c\xef\x1c\xe5\x54\x5b\xf8\x28\xfb\x41\x21\x83\x6e\xff\x7b\x78\x65\xe8\x7c\x70\x84\xb9\x1c\xcd\x4b\x72\x40\x70\xe0\x09\x02\x90\x5a\x0a\x2a\x89\x89\x03\x7b\x69\x45\x38\xb1\x47\x3c\x0b\xd5\x4f\x5e\x2a\x45\x90\x6b\xc5\x67\xfa\xfa\x87\x2c\x17\x49\x0a\x21\xa9\xe4\xee\x7b\x2f\x14\x3a\x58\xae\x35\x9b\xdc\x48\x97\x01\x48\xc8\x69\x1e\x70\x76\xdf\x42\x5e\xcb\x66\xa7\xc9\x93\x7b\x59\x58\xb2\xf4\x1f\xb0\xf7\xdd\xea\x3e\x2e\x9e\x13\x9e\xd1\x23\x67\x4e\xfe\x0f\xbd\xbf\xef\x6a\xd2\x96\x89\xb9\xf1\x3a\xaf\x19\xc7\xb8\x80\xf8\x39\xc6\x70\x82\x4d\x21\xe6\xe6\x92\x50\xae\xde\xb2\xfa\x9c\x2c\x26\xd4\x83\x55\xa0\x2b\xc9\xb7\x9a\xf8\x3c\xdf\x86\x4b\x35\x6f\x36\x86\xf7\x7c\xb3\xbe\x9b\xb1\xa5\x3f\x24\xc8\xc6\x96\x17\x98\xee\x3a\x53\xbe\x4e\xaf\xf2\x1f\x6d\x3b\x83\xc3\xa9\x5a\x37\x7e\xa7\x29\xd4\x3b\xe0\x30\x0f\x2e\x5f\x6f\x0b\xf7\x5a\x50\x7b\x77\x33\x7f\x6b\x3e\x9d\x57\xdc\x37\xcb\xd8\x95\xea\x41\x6e\x99\x7f\xcd\x1f\x94\x31\xca\xe4\x37\x89\x07\x50\xdc\xe1\xe1\xff\x51\x00\x06\xbc\x64\x7c\xe3\x34\xf7\x62\xac\x7e\xd0\x2b\x3c\xd3\x9b\x48\x4c\x88\xf7\xd6\x70\x71\x3d\x9a\x52\x29\x36\x1c\x6b\x77\x76\x1e\xae\xca\x50\x9f\x9e\x18\xa7\x10\x79\x58\x41\x89\xd1\xb3\xb0\x78\xb7\x3e\xb6\xff\x20\x9f\xc0\x85\x83\xcb\xeb\x35\x4e\x34\x5d\x5e\xd5\x43\x30\x9f\x6b\x6d\xec\xe2\xb1\x86\x99\xfc\xf9\xb2\xdd\xdf\xe4\x10\x5c\x2f\x2c\x51\x06\xbc\x9b\x2f\xcc\xf4\x94\x31\xe4\xb4\x8a\x58\xfa\x44\x3c\x51\x73\x8c\xfe\x86\xae\xe0\x45\x20\x36\x67\xd2\xca\xf2\xd5\x16\xe7\x57\x15\x50\xa7\xeb\x2f\x1e\xb0\x8f\xcc\xc4\x60\x69\xfa\xe3\x9e\x77\x4d\x53\x51\xe0\xa0\x01\x4b\x21\x76\xf7\x74\x26\x64\xc0\x7c\xce\x32\x5d\xa6\xb9\x00\x4b\xd6\x0d\x2f\x03\x2f\xb0\x17\xad\x17\xad\x17\x78\x9a\x89\x35\x75\x30\xd5\xa0\xc6\xa0\x66\x30\x55\x27\x55\xd2\x19\x6c\xe3\x52\xfd\x56\x54\x90\x2d\x2b\xc0\x0b\x8e\x40\x8f\x14\x43\xb0\x0f\xa1\x94\xe0\x72\x86\x97\x7c\xd9\x36\x6d\x84\xda\x7a\x9e\xe0\xc9\x21\xa9\x06\xd3\xa4\x45\x60\xcd\x3e\xd4\x5e\xa1\xb1\xd3\x1d\x19\x28\xa8\x38\xce\x9e\x94\xfa\x47\xfa\xdf\x23\x13\x7b\xda\x16\x59\xa9\xdb\xa5\x4c\xf1\x5a\x4c\x58\x14\x99\x97\x61\x31\xeb\x3d\xe8\x8e\x66\xcc\x3b\xc1\xc4\x22\xd6\x7b\xd2\x6d\x58\xa0\xf2\x0d\x02\x02\xa1\x25\xdf\x79\x1c\x88\x4b\x7e\xc8\x96\xae\xd5\x23\x80\xc5\x6e\x05\x78\x94\xd1\x58\x70\xb6\x63\x36\x97\xfb\x44\x26\x87\x00\xe8\x19\xac\x82\xb4\x34\x6b\x4b\x51\xf2\x3e\xf6\xe0\xd7\x53\x3b\x3f\x3a\xdc\xa1\x1d\x48\x75\x59\xad\x55\x45\x6d\xbb\x65\xcc\xfb\xd2\x53\x1f\x0e\x4d\x22\xf3\x02\xe8\x30\x80\xa6\xea\xc6\xe3\xcd\xda\xb2\xda\xda\x6c\x88\xcb\x6b\xa5\x61\xf9\xc0\x73\x63\xb9\x79\x7f\x80\x0b\x43\x2e\x05\xa1\xaa\xe0\xe7\x55\xa0\x1f\x5a\x9b\xd9\x5e\x2f\x74\xcc\xdf\xd1\x97\x35\xec\x9a\xe5\x8e\x5e\x88\x66\x6c\xc6\xe3\x6f\xfb\x54\x7e\x69\x54\x2f\xd5\x32\x95\x04\x95\x9e\x76\x5e\x6e\xa5\x4f\x7c\x54\xff\xfc\x4b\x61\x0f\xee\x0f\xbf\x5f\x93\xe9\x5b\xab\xa0\x12\xa0\xb5\xf1\x3e\x92\x53\xba\x0d\xff\xe3\xf8\xf0\xb1\xed\x86\x1d\xfa\x1d\x5a\x42\x9c\x67\xf3\x34\xb9\x90\x41\x01\x04\x4d\xa4\xfb\x2b\x81\xb2\x29\x90\x1b\xb5\xe3\x63\xd9\xde\xf4\xda\x9e\xfe\xf9\xfa\x65\x7f\x3d\x5f\x5b\x24\x64\x6c\x56\x31\x80\x40\xf5\x23\x9f\x98\xa3\xe5\x7c\x13\x87\xaf\x0e\x1d\x10\x7e\xb2\x80\xee\x97\x8e\xbd\xb2\x71\xd6\xe6\x1e\x75\xc9\xa8\x7b\xd3\x79\x30\xcd\x1f\x3a\x17\x71\x5a\xb2\xf7\xc3\x47\xc8\x5a\x98\xa6\x17\xfc\x51\x4b\xdf\xbf\x79\x0f\xe9\xb3\xb3\x1c\xbb\x30\x91\xdf\xd5\xfc\xec\x96\x74\x49\x94\x8e\x58\x76\xfa\xa3\x72\x5b\x59\x7e\xca\xdc\x81\x5e\x41\xe8\x64\xa9\x72\xda\x5c\x5f\x8c\xa3\xd3\xe8\x73\x2b\x33\xbf\xf3\xaf\xa5\x4b\xa7\xa1\xcf\xd5\x57\x0a\x9f\xfa\x46\x82\x51\x0f\x30\xff\xd2\x98\xce\x09\x09\x81\x78\xb6\x02\xf9\xc0\x65\xd7\x6d\x5c\x31\x50\x2e\x3c\xc5\xce\xf3\x29\x80\x34\x1c\xeb\x49\xfa\x4e\x11\xd1\x57\xb1\x65\x29\x61\xac\x63\x0a\x9a\xb1\xde\xd5\xb9\x17\x3b\xdc\xd0\xa2\x8a\x83\xda\xee\xf5\xab\x67\x94\xcc\x41\x38\x56\x2c\xa2\x59\xd7\x5f\x35\xfa\x3a\x1b\x61\x99\x6a\x61\x56\xe5\x62\x29\xcd\xc2\x2d\xd7\x7f\xa6\x42\xd5\x8c\x7e\x2f\xbc\xfb\x59\x7c\xd9\xed\xd5\x6d\x4e\x0a\xd2\x03\x27\x82\x97\x27\x28\x60\xe3\x73\xb0\xbd\xb5\x6a\x9c\x71\x6c\xb3\xb8\x39\x72\x6c\x1f\xff\x3c\x9b\x59\x75\x0f\xbf\xf3\xb0\xa1\xe3\x53\xc0\x39\x93\xd9\x4a\x61\x80\x8a\xad\x5f\x1e\xe2\xb0\xb0\xbe\x4e\x24\x04\x1d\xd5\x70\x54\xbc\x8e\x3f\xab\xb8\x45\xe6\x14\x11\xee\xe6\xd2\x2a\xb5\x31\x45\xc1\x60\x5f\x6a\x38\x05\xbf\x80\x7f\xb0\xa7\xf8\x5b\xc0\xf0\xd7\x7f\xc5\x93\x6e\xf1\xc5\xf6\x00\x01\x5d\xab\xfb\xdb\x58\xf7\x28\x51\xb9\x90\xa1\x76\x6b\x4e\x75\x26\x92\x6d\x5e\xd5\x86\x6b\xdc\x71\xdb\x74\xd1\x2d\xe8\x50\x6c\xe7\xc8\x68\x2f\x6a\x23\xae\x64\x21\x44\xde\x8f\xe4\x47\x59\x0f\xe7\xbf\xf5\xb5\xa0\xc5\xd0\xd7\x6c\x9a\x6c\x28\xa4\x85\xae\xe3\x98\xd0\x1d\xfa\x80\x09\x0d\x9b\xd6\x33\xbe\xaf\xf1\xb5\x91\xc6\xc3\x0c\xe3\x70\x17\x17\xd2\x9f\x0e\xb2\x74\x39\x66\x57\x2a\x50\x92\x1b\xfd\x6b\xc2\xbb\x70\x75\xf1\xf4\x73\xa4\xc3\x07\xa9\xbc\xbb\x27\x07\x8c\x6a\x7d\x8e\xa5\xf8\x07\xac\xf3\x07\x07\x34\x0a\x84\xc2\x72\x0a\xc6\x6b\x0d\x55\x74\x88\xa0\xe7\x57\x42\xd8\x84\x52\x19\x7c\x89\x56\x64\x5f\xe4\xeb\xf4\x26\x69\x67\x57\xbb\x26\xc7\x59\xb3\xe7\x8f\x53\xf9\x50\x74\xdf\x4d\xc6\x28\xb1\x43\x3e\xb6\x26\xba\x5b\x6a\x6c\xe6\xf7\x6e\x67\x76\x47\x17\x44\xec\x9e\xe9\x44\x98\x71\x33\xde\x23\x7d\xbd\x7c\x18\x93\x88\x37\x7d\x5b\x6b\x41\x11\xb6\x1c\xa9\xa0\x7e\xed\x68\x66\xb2\xbf\xd8\x9f\x94\x77\xb7\x9d\xae\xdb\x05\xd4\x20\x97\xd3\xd1\xc7\x5e\x59\xdb\xbf\xe8\xa4\xcd\xc2\xcc\x90\x57\x58\x30\xfb\xb1\xcd\xe2\xc3\xd2\x1a\xf2\x63\x2a\x83\xef\xf4\xb7\xeb\x1e\xf0\x51\x2f\xb7\xdc\x68\xd1\xc9\xe2\xd3\x47\xb9\xdc\x5c\xac\x61\x24\xee\xb8\xe7\xe4\x09\xe8\xd7\x66\x97\xfb\x34\xe2\xaf\x54\x6b\xc8\x1c\xeb\x7a\xdb\x1c\x7b\x2b\x1e\xeb\xe8\x38\xab\x77\x48\x55\x73\x08\x70\xc2\x0b\xf8\xba\x94\x2a\x34\x84\x38\xb7\x4d\x7d\xe7\x6e\x2f\xcc\xf2\x5d\x5f\xde\xa6\xc6\x06\x12\x54\xff\xb5\xb8\x0f\x8b\xf5\xcb\x2d\x8a\x7f\xab\x91\x93\xeb\xf0\xa5\x58\x2c\xb7\xaa\x9b\x8f\x58\xc3\x3f\x9c\x8f\x15\x7e\x39\xb4\xa1\x51\xc3\x59\x0a\x74\xf8\x31\xd3\x64\xe2\x20\x69\xfc\x05\xda\x62\xcf\xea\x99\x99\xb0\xba\x30\xbf\x8b\xf4\xaa\x3a\x56\x82\xe3\x7f\x67\x9e\x06\xda\xe0\x03\x05\x33\x09\x52\xf2\xa0\xe9\x90\xe0\xdd\xbc\xe9\x0f\x5e\x16\x03\xc5\x9d\x68\x95\xd0\x47\xa8\x1a\xb7\xd8\x06\xf4\xeb\xb8\x8f\x01\xb9\x03\xf6\x30\xef\xa0\xd1\xe9\x4f\x9f\x9e\xdc\x1a\xf6\xf6\x49\x61\x3c\x0f\x80\x5d\xf3\xb0\x6d\x54\xb7\xac\x5a\x2b\xd6\x28\xc6\x51\xae\xc7\xee\x29\x11\xac\x59\x36\xc2\xc6\xdd\x9c\x99\x4b\xff\x43\x6a\xab\x2f\x7d\x47\x9a\x5c\xf1\x28\x73\x9e\xf3\x10\x0f\xdb\xdf\xe4\x95\xa9\xdc\x04\x00\x40\x30\x48\xc7\x26\xc8\xcd\x39\xc8\x5d\xd9\x35\xc0\xdd\x39\xc8\x1d\x80\xcb\xc0\xe4\x25\x61\x30\x49\x98\xbc\xa5\x8c\xbc\xf2\x5d\x45\x65\x59\x79\x49\x19\x45\x65\x19\x19\xd4\xdf\xa9\x96\xff\x07\xa0\xfd\xdd\xbc\x1e\x85\xff\xff\x01\x7f\xa3\x1f\x18\x00\x00\xc0\x40\xc7\x14\x51\xaf\xe5\x14\xff\x7f\x02\x00\x00\xff\xff\xfd\x02\xe2\x19\xb0\x11\x00\x00") + +func filesImagesMeowserPngBytes() ([]byte, error) { + return bindataRead( + _filesImagesMeowserPng, + "files/images/meowser.png", + ) +} + +func filesImagesMeowserPng() (*asset, error) { + bytes, err := filesImagesMeowserPngBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "files/images/meowser.png", size: 4528, mode: os.FileMode(420), modTime: time.Unix(1509717508, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _filesImagesPending_approvalPng = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xb4\xba\x77\x50\xd4\x4f\xd3\x2f\xba\x80\xa2\xe4\x20\x48\x12\x90\x9c\x33\x48\x06\x15\x11\x05\x04\x01\x77\x59\x72\x06\x5d\x72\xce\x41\x41\x72\xce\x19\x01\x49\xbb\x0b\xec\x92\x73\xce\x51\x84\x25\x4a\x92\xbc\xe4\x9c\xe1\x16\xfa\xfc\x9e\xe7\x3d\x55\xef\x7b\xce\x3d\xb7\xea\xee\x1f\x5b\x53\xd3\xdf\xe9\xe9\xfe\xf4\x74\x4f\x7f\xaa\x26\xe4\xbd\x8a\x02\x01\x2e\x35\x2e\x00\x00\x20\x78\xfb\xe6\x95\x3a\x00\x70\x6f\x19\x00\xc0\x70\x7b\x88\x09\x00\x00\x3c\x65\x91\x63\x00\xc0\x1b\xbf\xb7\xaf\x5e\x7c\x70\xc3\xd8\x9a\xad\x74\xe4\xd2\x24\x6b\x59\xbc\x9a\x31\x32\x57\x81\xda\xa9\x17\x28\x52\x99\x63\x3b\x26\xdf\x37\xd7\xd1\xf6\x6f\xb1\x9c\x4d\x47\x9a\x9f\x9c\x0b\xf0\xb1\xcf\x8b\x16\x80\xa9\x37\xa6\x67\x55\x2b\xaa\x99\x01\x00\x40\x21\x15\x12\x17\x00\xf0\x03\xdf\x07\x00\x00\xff\xe7\x51\xcb\x6d\xec\xdd\xf8\x39\x1c\xf3\x5f\xb3\xff\xdd\xe8\xff\x85\x9e\xff\x8c\x00\xc6\xe4\xff\xdb\x91\xb3\xef\xe6\xe5\xb1\x81\x3e\x64\x70\xf6\x74\xd7\xb5\x1a\xe5\x62\xec\x8e\xec\x01\x43\xa0\xd3\x8b\x3b\x7d\xe8\xbe\x43\xe4\xf7\x94\x27\x26\xe1\x02\x5d\x94\x26\xe1\x45\x32\x20\xb6\xf3\xa7\xb8\x08\xad\xdb\x4a\xb0\x1d\xb7\x24\xb0\xa0\xfc\x6c\xf2\xdd\x15\x56\xc4\x1f\xa1\xc0\x6b\xf8\xe3\xbc\xe9\x1a\xfd\x77\xa8\x94\x12\x06\x0c\x00\xe0\x8c\x72\xef\xdd\xc5\x91\xad\xa3\x9c\xec\x63\x3e\x90\x05\x31\x00\xf0\x14\x34\xe1\xeb\x83\xac\x45\x52\x87\x51\xc0\x84\x1e\x02\x00\xc4\xb0\x4e\x3e\x4e\x11\x7b\xe3\x7d\x63\x67\x6c\x0c\x00\xc0\xb8\x72\xc3\xe0\x9c\x1c\x76\x67\x50\xa5\x39\x13\x08\x03\x00\xe8\x80\x58\xee\x34\xf9\x9c\x5e\xbf\xd8\x5d\x5b\xb2\x7f\xfc\x88\x06\x49\xf3\xa0\x13\x22\x02\x00\x00\x3a\xd0\x5d\xb6\xb6\xcb\x4b\x3e\x12\xaa\x4f\xc2\x35\x8c\x2b\xad\xee\xd6\x88\x38\x08\x67\x08\xf6\x3e\x10\xd8\x88\x57\xf7\x04\xde\xe1\x14\x8c\x1a\xff\xe5\xdd\xe9\x09\x6d\xdf\x3a\xdd\x39\xd0\x18\x5d\x42\x92\xd4\x77\x5e\xc2\xec\x53\xc0\x3c\x38\x77\xe0\x6c\xbb\xce\xf3\x29\x1e\x5f\x95\xcb\x97\x76\x99\x19\x57\x32\xff\xc1\xe1\xc7\xa5\x78\xc6\x3a\xaf\xac\xf7\xcd\x94\x58\xb2\x4a\xc7\x3f\xd3\xc7\x6e\xe9\xef\x9a\x0b\x3c\x90\x78\xf9\xf3\x06\x11\xfd\x1d\x90\xb7\x77\x16\x40\x2a\xd6\x13\x3a\x20\x32\xd3\xe4\x30\x16\x8c\x3b\x57\x96\xfc\x87\x12\xbc\xf0\xcf\x0c\x3d\xf5\xe1\x89\xf7\xee\x22\x75\x22\xb1\xab\xdb\x4b\xc4\xbd\x7d\x75\x98\xf8\xaf\x1d\xad\x87\x65\xf9\xaf\xa7\x21\x94\x47\xbe\x9b\xbc\xd2\x02\xd2\x74\x34\xc8\x34\x95\x27\x04\xd5\xda\x8e\xa7\xa1\x28\x07\xc0\x9d\xc1\x9f\x92\x64\x80\xbf\x20\x22\x1d\x27\x1b\xb3\xbd\xfb\x88\xb3\x00\xc4\x5d\x08\xd9\xfd\xc1\x3c\x73\x0a\x83\x0a\x8d\x4e\x0c\x53\xd3\x05\xe4\xf1\xd0\xbb\x49\x62\x98\xc0\xf5\xce\xe5\xc6\x4b\xd6\x73\x44\xe3\x1e\x02\x5c\x4c\x72\x17\x4c\x6a\xc2\x2a\x8a\x0e\x48\xe3\x1f\x55\x88\x9b\x7e\x2e\xe3\x4a\xfd\x3f\x60\x8d\xd8\x1d\xce\xef\xee\x92\xc1\x8e\x31\xee\x82\x10\x46\x84\x35\xcf\x0b\xb2\xfc\x63\x64\xe0\x75\xf1\x6e\xe7\x3f\x2b\xaa\xf7\x2b\xc1\xc5\x14\x7f\x14\x95\x1d\x0f\xff\xa3\x88\x7f\xc0\x67\x97\x57\xda\x7d\xaa\xdb\x6a\x58\xf6\xc7\x66\x83\x90\xcc\xba\xf2\x41\xeb\xbe\x18\xcd\x91\x4f\xa4\xc5\xa7\xc1\x59\xb6\xbb\x40\xd9\x48\x21\xa9\x59\xe3\x72\x6f\xd0\x0a\x19\x9d\x0a\x7c\x27\x7e\xb3\x79\x05\x8c\xef\x47\xdd\x0f\xec\x7d\x97\xaa\x45\x3b\xd0\x22\x98\x77\x06\xe7\x5e\xfb\x8e\x32\x37\x3b\x55\x10\x7a\x34\x5b\xdd\x1e\x9b\x5b\xec\xef\x97\x2b\x1d\x87\x04\xff\x14\xa2\x58\xf1\x4f\x76\x52\x3c\x82\x82\x1b\x1f\xff\x09\x24\x61\xc9\xed\x81\xfa\x48\x4a\x52\x62\x54\xf9\x8b\xdb\x89\xeb\x62\xfa\xb2\x4a\xd5\x03\xb5\xdd\x81\x07\x81\x49\x59\xa9\x32\xf2\x56\x1d\x36\xfb\x37\x10\xcb\x1d\x19\xa4\x07\xcf\x1f\xf7\xde\x9a\xca\x64\x44\xb9\x8c\xa2\xc2\x84\xa5\x6f\x43\x7e\x1b\xee\x2f\x18\x1f\x53\x63\xdc\x99\x9e\x7c\x72\x25\x97\x7b\x3b\x29\x69\x9b\x35\xe5\xc2\x19\x7a\xcb\x10\x3f\x36\x36\x76\x8a\x6a\xa0\xf6\x58\xcf\x66\x95\x93\x0b\x0c\xd4\x8d\x71\x1c\x12\x5f\x4b\x77\x17\x5f\x4b\x77\xd4\x8e\xcd\x63\xb1\xf4\xc9\x1d\xdc\x62\xf2\x15\xfa\xb5\x5b\xc2\xb2\x89\xd6\xa5\x1f\x3e\x71\x49\x56\xbd\x14\x3d\xd9\x77\x5f\xee\x2a\xc1\xca\xd1\x05\x7d\x1f\xb0\xd7\x5c\xe3\xb9\x0f\x72\x25\x93\xf6\x18\x36\xb0\x39\xdb\xb6\x7c\x5d\x77\x5a\xc7\x97\xde\x47\x00\xb4\x16\x5d\xfe\x99\x32\xee\x3c\x33\x3f\xbf\x51\x37\xb4\x92\x3c\xb7\x37\xa3\xa9\x8a\x45\x5d\x9c\x6e\xd8\x9d\x92\x52\x77\x26\xbb\xdd\xcd\x1e\x9a\xc3\x1e\x8a\x09\x53\x39\x9d\xdb\x43\x59\x64\xb9\xc3\x4b\x9c\x7c\x54\x12\x2a\xc3\x85\x4f\xc8\x37\xcf\xf2\x5a\xd0\x3f\xb0\xaa\xaf\xde\x93\x2c\x3a\x37\xbb\xa1\x7c\x9b\xbc\xa7\x50\xb2\xee\x5f\x4b\x12\x1c\x7b\xb1\xf7\x7d\xcf\x07\xc2\x5e\x93\x2e\xa7\x47\x97\x6e\x75\x69\x31\x8d\xfb\x36\x79\xb7\x3a\x67\xf4\x0b\x74\xfa\xbd\xe6\x62\x9b\x24\x31\xcf\xe7\x75\x13\xbd\xd3\x9c\x33\xca\x85\xe8\xab\x1e\x91\x72\x18\xce\xfa\x9a\xf3\x19\x05\xb4\x62\x7d\x0e\x14\xeb\x39\x2a\x2d\x6d\x3e\x6a\x60\xb0\x34\x14\x41\x50\xdf\x07\x51\x70\xfe\xf9\xb2\x85\x73\x94\xf6\xf6\xbc\x45\xce\xf5\x66\xba\x80\xdd\x8c\x66\x6f\xf5\x3e\x50\x10\x3a\xfa\x16\x6b\xaa\x05\x62\xe8\x7c\xcd\xac\x59\xca\xfb\xc9\xc1\x30\x61\x3e\xde\xf1\x01\xec\x1a\xfb\xb5\x1a\x99\xbc\x1a\x19\xf8\x7e\xa5\xcf\x5e\xf6\x8c\xef\xcd\xd1\x92\xeb\xa7\x6c\xe1\x07\xdc\xce\xf4\x43\x57\x9b\x6b\x06\x5b\x1f\xb7\xe8\x0d\x7c\x7a\xc9\xe7\x7d\x53\x8c\x05\xaf\x25\xd7\x36\xb5\x59\xb0\x60\x9f\x51\x61\x1f\x3c\x7e\x8d\x14\x3f\xaa\xc1\x06\x61\xc1\x2c\x66\x5b\xfe\xe5\xba\x72\x69\xfd\xf1\xae\x6d\xf3\x52\x33\x7d\xdd\xda\xf4\xf0\x9c\xef\xa1\x6b\xc3\xfa\xc5\x99\xbf\xe6\x66\x06\xcc\xe7\x1c\x7a\x93\xa6\x39\x81\x3f\x7b\x55\x20\x2a\x46\x56\x7f\xd1\xcd\x97\x65\x2b\x7d\xbc\x5e\xdc\xb7\x3f\x6b\xe9\xbb\xdf\xd7\x02\x31\x3c\x66\x4c\xf0\xf9\xba\x3f\x2c\xd0\x6d\xe3\x70\xe7\x8c\xfc\x9d\x59\x86\x09\x6f\x75\x39\x9f\x54\xd2\xea\x5d\x36\xcb\xb9\xea\x5d\xdb\x93\xd7\xc7\x1b\x01\xb9\x0a\x09\x3b\x5f\x15\x0c\x89\x96\x42\xde\x25\xa8\xc9\x10\xa1\x1b\xce\x87\x6d\x1b\x4e\xeb\xf8\x38\x83\x1e\xc7\xcb\xc6\x96\x74\xfb\x76\x2b\x78\xf9\xc5\x29\xf5\x05\x17\x0f\x56\xb3\xcf\x56\x92\x50\x59\xc1\x2f\xda\x1c\xf3\x16\x75\x68\xb3\x39\xbb\x77\x50\x4d\xb3\xe7\xf3\x51\x75\xe4\x88\x5f\x76\xb9\xe6\x38\xe7\x0b\x91\x95\xb2\x52\xbf\x8f\xab\x7c\x1b\xfa\x33\x46\xbf\x00\xbd\xd1\x43\x5c\x32\x10\xa6\x8f\xf7\xbb\x30\xb1\x14\x64\x4f\x79\xb7\x86\x47\x4d\xd6\x12\x25\x75\x33\x27\xb9\x79\x36\x73\x5c\xdc\xf4\x41\xa8\xf5\x1a\x75\xe9\xa8\x6d\xaf\x23\xe6\x00\xba\x2a\x5b\x41\x30\xc5\x2a\x71\xf9\x80\xdd\xf3\x7c\xa9\x29\xab\xe6\x64\x7e\x79\xdf\x48\x5e\x75\x4d\x32\xa0\x26\x42\x5b\xc5\xfc\x12\xee\xa5\xf3\xa3\xc8\xea\x49\xdf\x34\xa1\x7c\x16\x0e\xac\x93\x9e\x2a\xf2\x9b\xde\x8a\x9e\xf7\x0f\xe7\x8d\xb6\xeb\x29\x1b\xc3\x3b\xdc\xe5\x3f\xa3\x8a\x1b\x2e\xf4\x69\xb3\x5f\xa5\xe5\x80\x62\xd1\x2c\xf7\x61\xaf\xe2\x41\x18\x3c\xbd\xa9\x26\x19\xf1\x4b\xd1\x49\x63\x05\x8e\x81\xe7\x24\x9d\xbe\xa1\x57\x0d\x2d\x92\x05\x5c\x4d\xbe\xe7\xfa\x3e\xab\x5a\xef\x05\x61\x4f\x6f\xec\x94\x8f\x25\xd2\xe2\x1c\xc8\x54\x47\xb7\x17\xb1\xfb\x47\xab\x49\x2c\x76\x2b\xe1\x57\xd6\x36\xd1\x2c\x6f\x0c\x14\xab\x4a\xf1\x5c\x09\x25\xcf\xf7\x52\xbe\xc5\x18\x13\xf5\x0b\x08\xfa\xb9\x95\x96\xa8\xe0\x1d\x1c\x95\x5f\x14\xdf\xcb\x4a\x6d\xcc\x69\xcf\x1d\xfd\xd0\x64\x37\xe9\x79\xef\x68\xf7\x17\x4c\xb9\x83\x81\x50\x20\x0d\x5e\xf7\x4a\xd2\x93\x2a\x44\x0e\xce\x7c\x33\xc5\x21\x3e\xcc\x35\xad\x19\xc9\xfe\x61\xfc\x83\x9a\xf7\x4d\x4a\x2a\x88\x28\x27\x9c\x0c\x31\x9d\x3a\x70\xcc\xb4\x33\xd3\x44\x46\xfd\xf4\xd9\x0f\x7a\xbe\xd7\xdd\xd2\xb9\x78\x84\x54\xa2\x51\xe1\x7c\x24\x97\x1a\x06\xf4\x83\x89\xaf\xb1\x66\xfe\x9c\x3f\x76\x44\x68\xae\x3f\x7d\xcd\xf1\x03\xdc\x17\x4e\x57\xa5\x2d\x40\x36\xce\xde\x85\xc9\x6b\x1a\xb0\x73\xfc\x2e\xcb\x2b\xc1\x84\x03\x05\x4e\x63\xc5\x97\x9d\xca\xde\xce\xa9\x26\x5a\x82\x8b\x2a\xbf\x7a\xcf\xca\x2a\xd5\x6a\x01\xe4\x2c\x02\x87\x18\x36\x4c\x7b\xf7\x9f\x0b\x52\xa5\x6b\xdb\x44\xb3\x80\x63\x6e\x74\x5c\x23\x73\xf7\xe4\x0c\x2b\x55\xf4\x5f\x7e\xa7\x31\xc5\xf7\xec\xc4\x10\x94\x2b\x13\x4a\xc0\xd1\xf6\x9f\x7a\x47\x58\x8f\x25\xda\x0e\xc5\x8e\x91\x1e\xa8\xd5\x3e\x25\xca\x7e\x03\xf1\x56\x70\xeb\xcf\x0f\xcd\xb7\x6f\xb3\xfb\x52\xc9\xea\x38\xe8\x10\x34\xb3\x58\x63\x4b\x49\xfb\x8e\x27\xb9\xd8\x66\x9f\x8f\x32\x6b\x98\x08\x7e\x42\xfe\x17\xf4\x3c\x0e\x04\x74\xdd\x9a\x2d\x70\xbc\xaa\x7d\x55\x81\x97\xfd\x80\xe6\x33\xab\xbf\xb8\x5e\xb4\xe8\x11\xc9\x83\xf9\x06\x33\x0d\xdd\xc8\x34\xdb\xde\x2f\x68\xa0\xfb\xe3\xfc\xf5\x81\x3d\xdd\xcf\xdc\x3c\xdf\x98\xbd\xb0\xf0\xce\xd3\x39\x29\x5c\xb2\xfc\xd8\xa9\xef\x50\x86\x7a\x71\x3b\x8a\x3d\x4d\xd3\xae\xbe\x87\x45\x8f\x0b\x83\x44\x3a\xc4\xbf\xae\x0f\x8e\x97\xdc\x4e\x80\xd5\xc1\x94\x41\x05\xf5\xbf\xde\x0e\xcd\xd6\xbe\xc2\xd3\xd2\x78\x14\xff\xb1\x6a\x73\x4f\xa5\x91\x5b\xdc\x21\x7c\x94\x2c\x8d\x7b\xe9\xa8\x38\x99\xc8\x25\xb9\x99\x20\x46\x56\x7c\x6a\x55\x47\xb1\xb3\x94\x26\xbd\x5a\x32\x28\x0f\xc3\x46\xba\x95\x3a\xce\xbd\x26\xdb\xeb\x22\x7f\xca\xb3\x86\xd6\x0d\xb3\x1a\x0f\xcc\x93\xa8\xba\xdf\x18\x5d\xf9\x9b\x2f\x39\xec\xdc\x35\xe8\x76\xea\x6f\xae\x0d\x49\xc3\x7f\x93\x68\xd0\x86\xdc\xe3\xd0\xa8\xc6\x84\x59\x7c\x6f\xd4\x61\xaf\x8c\x8b\x1b\xeb\xa3\x75\x15\xe2\xde\x48\xe5\x32\xc4\x16\x6a\xfb\xae\x16\xfa\x2b\x6f\x21\x4d\x40\x7a\xf8\x8b\x71\xba\x76\x29\x7d\x1f\x17\x68\x6d\x01\x95\xb8\xf1\x8c\x22\x37\x7d\xb8\x6a\x8c\xe0\x53\x0d\x46\xe2\xde\x34\x87\xac\x64\xa9\x7a\x1f\x55\x89\xbc\x26\x52\x66\x52\x95\xae\xcf\x70\xff\x57\x19\x94\x95\x83\x40\xc6\xc8\x99\xf3\xbd\x2a\x40\x23\x20\x28\x56\xa6\x10\x9d\x52\x99\x4a\x28\xf7\xd1\x61\xc9\xf2\xf7\x46\x10\x2b\x4c\x62\xe8\x01\x16\x22\xc8\x9f\xcd\x2a\x4e\x6a\xca\x79\xa5\xa2\xb9\xa5\x77\x23\xf2\x77\x5d\x5e\x28\x39\x62\x3a\x79\x00\x75\x2e\x47\x20\x49\x27\xb8\x03\x04\xc5\x4d\x29\x87\xbe\x47\x8f\x18\xed\x0e\x3a\x1b\xbd\x54\x69\x8c\xc1\x4a\xfc\x7b\x36\x24\x64\x75\x25\xb2\x7c\xbe\xf3\xd4\x2b\x1e\x32\xe5\x73\x12\x83\xf5\xb8\x1e\x94\x2d\x03\x1f\xcc\x4b\xb8\xde\xeb\x87\x06\xb7\x91\x68\x49\x4c\x3b\x16\x76\x1f\xd4\xc9\xd7\x54\x9e\x71\x24\x2c\x7e\xc1\xdf\x34\xc3\x0a\x43\x24\xd8\x43\xc3\x89\xe6\xc1\x93\x2e\x94\x90\x47\x64\xc5\xe7\x21\xf5\x25\x34\x42\x4a\x67\x0c\x46\x5d\x83\x5b\x6b\x74\xb1\xd9\xc9\xe9\xf0\xfa\x89\xb0\x38\x7d\xb3\x1c\x5a\xf7\x30\xba\x72\xdb\x49\x2c\x5d\xa9\x05\x5c\x29\x90\x49\x1a\x21\x9f\x1f\x30\x93\x4d\xf9\x82\x3a\xb3\xf0\x53\x67\x07\x48\x4e\x2c\xf8\x98\xa2\xf5\x6d\xe2\xc6\xed\x15\x07\xd5\xb2\xb1\x88\xc2\xe7\x95\x59\x58\x71\xaf\x2e\x42\xb7\x9f\xf0\x32\xa0\x59\x00\x85\xe8\xab\x72\xfc\xf8\x1a\xcb\xd9\xf2\x46\x05\x68\xb0\xdd\x02\xb9\xcb\xd2\xfb\xa0\x6b\x70\x51\x26\xeb\xb1\xa5\x9a\xed\xe5\x84\xa7\x42\x13\xef\xd5\xcb\x0f\xe4\xef\x91\x2d\x68\x07\x71\xd6\x34\x8a\xc7\x6e\x4d\x78\xf6\x6d\x29\xf2\x15\x9a\x41\x1e\xc7\x3c\x0a\x85\x3d\xb5\x56\xe9\xbd\x1b\xcb\xe5\xa6\xeb\xc0\xc6\x8d\x0f\x8c\xde\x95\xbf\x56\x19\xd4\x25\x9b\x7a\x75\xe4\x54\x26\x08\x76\xd0\xcc\x2b\xb1\x04\xf1\x5d\x6d\x35\x82\x49\x4f\x2a\x45\xea\x91\xae\x0f\x56\xbd\xda\xca\xc5\x3b\xbe\xe2\x2c\x7a\xed\x73\xbc\x70\x2d\xa0\x39\xce\xed\x6d\x50\x9e\xb0\x73\xc3\x80\x7f\xa9\x1e\xc1\x12\xaa\xa3\x09\xda\x82\x44\x40\xd2\xb7\x9d\xca\x9a\xf2\x3e\xc0\xe3\x1d\xc8\x42\x0f\x32\x7a\x75\x91\xba\xfd\x91\xb3\x0b\xa7\xd5\x3b\x9f\x0f\x26\xd8\xad\x5e\x28\xc4\x89\x14\xb9\x3c\x19\xed\x98\x41\xda\xd9\x1a\x8a\xfc\xad\xc7\x12\x80\xde\xef\x7d\xf5\x3e\x3c\xd0\xd0\xfb\x20\x0a\x3c\x8c\x2c\x10\xcd\x93\xc5\xfd\x8d\x32\x89\xa6\xb5\xd0\x75\x4e\x70\x65\x06\x23\xc3\xeb\xf9\x1b\xc5\xde\xc4\x99\xf2\x15\x09\xe7\xd9\x6f\x66\x25\x5b\x28\xb6\xf5\x50\x5c\xa7\x13\xee\x22\x67\x1c\x78\x9f\xef\x54\xe9\x8c\xe4\x2b\x61\x72\x7c\x84\xf8\x03\x97\xdc\x2b\x71\x41\x78\x55\x70\x1e\x9d\x71\x49\x30\x1d\x06\xc8\x5d\x53\xe3\xcb\x4a\x7d\x5c\xed\x7a\x4a\x22\x6d\x49\xc0\xa9\x11\xea\x23\x1a\xe8\x7d\xd3\xc1\xaf\xc3\x15\xb7\x89\x0b\x57\xfc\x7b\xa7\x38\xf3\x7b\xcf\x34\xcf\x55\x9f\x6a\x81\xf8\xe8\xef\x83\xa2\x98\x48\xbe\x32\xbd\x51\xf0\x24\x3c\x0e\x56\x16\x31\xa5\x23\x00\x21\x2b\x4b\x01\xc9\x8f\x5a\xa0\x1e\x95\x2b\xd8\x3c\xa3\x79\x75\x52\x59\xbc\xfa\xf7\x61\xab\xc3\x8c\xfb\x4e\x8c\xaa\xdf\xcf\x9b\xf5\xc0\x28\xdc\xc5\x0d\x43\x72\xe2\x87\xf6\x19\x88\x92\x53\x66\x04\xb4\x7e\x60\x43\x65\x95\x05\x21\x31\x9e\x47\xd1\xc2\x59\xc5\xf3\x01\x76\x2a\x2f\xb8\x83\x0d\x0a\xfa\xd7\xc1\xa2\x53\x45\x35\x37\x88\xc4\x66\x6a\xc9\x7f\x6e\x41\x7f\x71\xfe\xb0\xeb\xbb\xf4\x9b\x66\xd8\xdf\xc8\xcb\x4b\x1b\x3c\x8b\xef\x00\x99\x78\x6b\x08\x34\xd7\xb1\xf9\x21\xa8\x16\x60\x02\x55\xb5\xb7\xf6\x0a\xd8\xb5\x34\x24\x6f\x67\xf0\xf8\x90\x12\x53\x43\x00\x9e\xd6\x52\x4c\x60\x6b\xa9\xc5\x86\xf7\xa1\x1e\xb5\x41\xec\x0b\x1d\xde\x85\x17\x04\x01\xdf\x0b\xf7\x57\xb3\x1e\x92\x82\x69\x89\x9e\x72\xfb\x01\xa5\x1e\x0e\x0e\xae\x55\x8d\x83\x25\x08\xe1\x7f\x2e\x4e\x43\xe0\x70\x2e\x1d\x26\x8c\xf3\x31\x58\x8f\xfd\xeb\xac\x48\x2f\xff\xd3\x55\x95\x97\xd8\x61\x9b\xd5\x18\xe5\x76\x1a\xba\x4f\xb8\x6a\x1a\x37\x9f\xf1\x94\x3a\x98\xec\x58\xef\x17\x80\x42\xed\xc6\x2d\xc9\x6b\x2c\x95\xa5\xbd\x74\xf1\xe7\xb1\x61\x22\xe7\x14\x2e\xcb\x03\xbf\x1e\x1b\x58\xb5\xc7\x2e\xb5\x71\x62\xd3\xea\x49\xdf\xae\xe7\x1a\x15\x37\x83\xac\x6e\xec\x0d\x81\x57\xa0\x24\xdb\x77\xab\xa9\x2b\x12\x2e\x67\x9a\x66\x78\xf0\x83\xe0\x4f\x62\x81\xb5\x6a\x5c\x08\x89\x7f\xdd\x1b\xdd\xf4\x35\xbf\xa3\xad\x5b\xee\xb6\xce\xf9\xfd\x70\x4a\xe2\xe9\x33\xbe\xfd\x3d\xca\xa6\x71\x33\x16\xf8\x81\x18\xc8\x30\xb3\xa8\xe8\x94\xf5\xb5\xd4\xde\x94\x73\x1a\x5b\x3d\x22\x8b\xf6\x4b\xf5\xaf\xa7\x4a\xeb\x4c\x6e\xaa\x44\xc3\x0b\x3c\x91\x28\xad\x16\x9f\x63\x9b\xbd\xb6\x48\x5c\xc7\xe1\x67\x9f\x42\xb9\x4a\x84\xf6\x33\x71\x41\xe7\x3e\xd4\x0e\x67\x94\xf9\x1e\x32\x7b\xdf\x36\x96\x5a\x38\xe7\xd7\xdb\x0f\x8a\x7b\x75\xcb\x75\xfb\x09\x6f\xa1\x56\x62\x04\xc9\xf9\xfc\x49\xa1\x75\xfc\xdd\x98\x30\xd0\xdf\xac\xa0\x6f\xd8\xee\x0e\x7a\x82\xf8\x6e\xfc\x19\x25\xf2\xf2\x0a\xfa\x3d\xce\x2d\xb4\x45\x3b\x86\x51\x4e\x3f\x9b\xe2\x14\xdf\xf3\xab\x29\x15\x1c\x73\xd9\x69\x7a\x9b\xb0\x88\x33\xc7\xec\x48\x2b\xbd\x99\xd1\x76\x84\x68\xc6\x95\x6f\x46\xd0\x3b\x9a\x83\xca\x35\x82\x47\xb4\x87\xd4\xcb\x88\x55\x52\x81\xe5\xad\x5f\xbf\x21\x39\x40\x99\xf4\x1b\xad\x44\xef\xb0\xc3\xb1\x73\x20\xa3\x9e\xe5\x0f\xe9\xa3\xee\x20\xa0\xf8\x30\x43\x3e\xed\x2a\xa5\x8f\x98\x57\x9d\xb3\x0b\xc2\xa3\xf3\x94\xcf\x22\x6b\x6c\x57\xcb\xfa\xdd\x6a\xec\x0a\xb5\xcb\x99\xe6\x7d\x21\xdc\x0c\x8d\xbe\xfc\x1a\x8f\xe6\x43\xfd\x5f\x8b\x80\x67\x69\xca\x8e\x7c\x7f\xbb\x07\x75\xc3\xe3\xd5\xe6\x26\x20\x8d\xf7\x20\x4c\xdf\x00\xf5\xd5\x40\x2f\x17\xf3\x0e\x2b\xa6\x00\x44\x24\xd9\x35\x21\x29\xcf\x2f\x7b\x92\x81\xfd\xf4\x5c\xe9\xc6\xa4\x97\xf1\x30\xdf\xda\x5d\x1b\x16\x38\x66\xf1\x4a\x66\xfa\x58\x3f\x9b\xae\xf3\xf1\x53\xf6\xf4\xec\xb5\x99\xc7\x01\x9f\xdc\x01\x09\xdc\x5c\x1b\xc6\x25\xc8\x35\x6c\x30\xa6\x2c\xae\x98\x6f\x33\x16\x9a\x9e\x04\xcf\xc3\x61\xe5\x99\xa1\xe8\x0b\xe7\xfd\x17\x5a\xbc\x36\x4b\xe9\xdf\xfb\x08\x06\xbb\x04\x3a\x7c\x98\x22\x0b\x8c\x47\x8d\x1e\x5e\x89\xa7\xbe\xff\xd1\x93\xec\xb3\x65\x24\xb9\xca\xf2\xaa\x82\xec\xab\xc0\x93\x7e\x23\x54\x7a\xbe\x22\xf0\x38\x19\xfb\x17\xa4\x1f\x67\x3c\x63\x40\xb0\x52\x23\xc3\x43\x7d\x70\x77\x7a\x31\xfc\x20\xeb\x1b\x2f\x1c\x42\x86\x7e\x34\x01\x4e\x5a\x9d\x2c\x24\x8c\x2b\xff\xb8\x20\xfe\x2f\x8c\x63\x76\xea\x2e\x96\x6e\x7a\xdc\x7c\x80\x9f\x86\xd4\x3e\x9b\x3d\xa4\x06\xcb\x5b\x69\xb2\x25\x18\xc5\x0f\x74\xe4\x85\x03\x11\x7d\x09\xa2\xac\x42\xfa\xd7\x6f\x74\x66\x07\x9a\x66\x9e\xd2\x33\x97\x2e\x3a\xcd\x3e\x55\xc2\xdf\x5f\x04\x86\x9b\x2c\xb6\x60\x82\x81\x11\xab\x25\x8c\xc8\x2e\xe3\xca\x08\x3c\x03\xe7\x35\xab\x40\xa0\x93\xc9\x57\x39\x5d\x17\x93\xdf\xea\xfc\xb5\x31\xed\xd9\x89\x99\x3c\x70\x46\xfb\x76\x59\xb6\xc9\xc0\xde\x81\x94\xa7\x1a\x3c\x92\x9d\xe1\x90\x76\xce\xa7\xee\x3b\x16\x43\xed\x10\xf6\x4d\xec\x4c\xdf\xbf\x9d\x18\x79\x03\xfa\x18\xa5\xd5\xa7\xab\x0c\xfd\x0c\x7c\xb5\xcd\x7a\x0f\x3b\xfe\xbb\x48\x60\x27\xfe\xd7\xa4\x76\xe8\x04\xfc\xf5\xb7\xb7\xb2\xb5\x01\xea\x4c\x0f\xa4\xe7\xb7\xaa\xb4\x52\x67\xc5\x40\x0f\xcd\xa2\x6d\xac\xcc\x71\x64\x1a\xb4\xf1\x43\x89\x16\xf4\x3c\xf5\x63\xb0\x5f\x9a\x8b\xe3\x96\xb5\xcb\xf3\x54\x4d\x87\xbf\xe3\x98\xe7\x1f\x1e\x1a\x99\x05\x22\x20\x4c\x22\x84\xf6\xb7\x68\xfd\xc8\x97\x8e\xfc\xf4\xef\xcf\x94\x8d\x12\x80\x87\x0f\x30\x55\x10\x27\xa6\xd8\x4f\xa6\x3f\xf4\x13\x9e\xbe\xa9\x27\xe7\xec\xa5\x2b\x5c\x89\xea\x31\x3c\x2d\xb4\x46\x19\x37\x15\xd3\xb8\xdd\xfc\xbd\xc4\x81\x3b\x0f\x7c\x16\xc6\xe9\x37\x9e\xb2\xd0\x5e\xc1\xbe\xe3\x49\x17\x78\x80\xea\x7d\xb1\x60\xc1\xc5\x5f\x9f\x87\xa2\x5e\x93\x52\x3e\xe4\xa1\xfc\x32\x5c\x93\x83\x79\x13\xfc\x45\x96\xd7\x98\x5d\xe1\x9c\x49\xa0\x4d\x72\xf9\xf1\xc4\xfc\x72\xac\xa0\x73\x7f\x49\x9b\xea\x43\x7d\x82\xe1\x59\xf5\x15\x2b\x97\xb4\x44\x33\x3b\xf0\x63\xa7\xd6\xdb\x40\x9a\xb2\xdd\x3a\xaf\xfe\x8c\x6f\xca\x6c\x11\x9d\x56\xa3\xd2\x4c\x4b\x2e\xdf\x12\x43\xa7\xa8\x71\x22\xaa\x03\xb7\x9a\xb5\x25\xd2\xe1\xf5\x43\xae\x1b\xe4\x16\xb9\xa1\xbd\xdc\x12\x05\xdb\x76\xc5\xb5\xf5\x90\xd2\xd5\x0d\x36\xdf\x5f\xed\xf7\xeb\x7a\xd6\x6a\x11\xbc\xe0\xe4\xf2\x8c\xc1\x65\x9e\x13\xc0\xe2\x28\x10\x46\x23\xfd\xb6\x0f\x67\x3c\x79\xc0\x8b\xa7\x5c\xb2\x23\xbf\xce\x85\xf9\x72\x8b\xca\x8c\x6d\x73\x64\x31\x65\x39\xc0\xb1\x8e\xa7\xdc\x81\x52\x6e\xca\x72\x01\x6f\x6b\xb6\x9a\xa4\xed\x39\x71\xe0\xc8\xf4\x27\xff\x58\x04\x87\x38\x0e\xef\x77\xc0\x4d\x4a\xd8\x1e\x5d\x45\xee\xe9\x2a\x2e\xf8\x6b\xd6\x40\xe8\x05\x80\x18\x36\x2f\x65\x4c\xfe\x74\x63\x65\xc3\x00\xf3\x78\x79\xf3\x17\x0c\x00\x40\xd6\xa5\x13\xa1\x13\x21\x81\x7c\x24\x30\x80\x71\xa5\xf7\xa1\x97\xb5\xff\x04\x07\xe0\x39\xbc\x84\xa2\xbd\x0e\x99\xba\x58\xff\x04\x06\x30\x16\xa1\x48\x1b\xd6\x70\xcc\xbc\x2c\x23\x03\x10\xc3\x2e\x67\x2b\x6c\xe0\xe6\x6e\x48\x5c\xa7\x74\x70\x63\x53\xcb\x13\x00\x31\xec\x94\xae\xc4\x03\x7e\x31\x08\xbe\x4f\x0c\x2b\xe0\xaa\xdc\xc5\x02\x74\x40\x6e\x92\xb5\x4b\x34\x85\xec\xba\x1e\xc3\x9e\x93\x5b\xbb\xda\x48\x36\x4e\xf3\xc4\x87\x59\x6f\xe6\x96\xbb\xd4\xf0\x48\x63\x03\x3a\xd0\xa3\xbb\x58\x05\x00\xc9\x26\x5b\x8e\x25\xb3\x9e\x4d\xdb\xdb\x50\x20\x00\xe0\x77\x81\x0c\xfd\x4b\x5b\x8d\xc9\xef\x78\x27\xe3\x1f\x7a\xff\x2f\x26\x1b\x0c\xbc\xe3\x9c\x7f\xe4\x1d\x9c\xff\xa3\x9c\xf3\xcf\xd4\xbf\xe4\xa1\xff\x59\x42\x0c\xfb\x10\x09\xec\x5a\xc2\xba\x3c\x46\xb0\x4a\x12\x03\xde\x9f\x15\xe7\x5a\xea\x98\xcc\x54\xd1\x1b\x5a\xe5\x56\xd2\xea\x24\x94\xe8\x18\xe4\xba\x4c\x24\x2f\x5c\xa5\xbd\xde\x4a\x2f\x2a\x29\x29\x39\x39\x39\x69\x6c\x94\xde\x90\x1e\xf9\x58\x51\x5c\xfc\xae\x78\xae\xaf\x2f\xf4\x22\xc4\x54\x50\x5c\x4a\x0a\x4e\xa8\xe5\xeb\x7b\xbb\xbd\x5d\x7c\xc4\x56\xb9\x0a\xb9\x46\x81\xc2\x23\x23\xeb\x4a\xb8\x76\x83\xb5\x9d\x3c\x44\xb0\xb1\xb1\x03\x88\x49\xef\x78\x3c\x0b\xe8\xb6\xd2\x4d\x6a\x6e\x6a\xaa\xb6\xb9\x99\xcf\x37\xe0\xc0\xfd\xfa\xa2\x32\x46\x50\x40\x80\xa1\x51\x81\x56\x81\x29\xaf\xac\x4c\xfd\x26\xc7\xdf\x1f\x2b\x81\x71\x73\x73\x53\xf0\xf9\xd0\xd0\x10\x77\xbf\x20\x7e\xbb\xf0\x3b\xef\xc0\xac\xac\xac\xa4\x84\x04\xef\x9b\x2b\x77\x0f\x8f\x69\x03\x9d\xfd\x85\x56\x2c\x86\x80\x24\x17\x56\x00\xa0\x45\x36\x18\x48\x0c\x53\x50\x50\x68\x6f\x6f\x27\xa2\x97\xb9\x3a\xbf\x60\x04\xc9\xfa\x5c\x7d\xfc\x3e\x30\x30\xf0\x66\x77\x63\x73\x53\x48\x1d\x8d\x46\x2f\x0f\xd7\xd6\xd5\xa5\xa6\xa7\x8f\xcd\x00\x81\x40\x16\xac\xe5\x83\x83\x9f\xf6\xe9\x4e\x06\x45\xd4\x49\x1c\x26\x26\x26\x81\x39\x33\xc3\xb2\x3a\xfb\x47\x1b\x1a\x13\xce\x8d\x65\x42\x18\x00\xc0\x8d\x0d\xfc\x0b\x98\xfc\xe5\xc0\xd0\xd0\xea\xf2\x32\x65\x6a\x09\x5f\x00\x07\x88\x29\x26\x58\x51\x43\xee\xb2\xb3\x8f\x05\x14\x72\x87\xa2\x71\x02\xf8\x9a\xad\x96\x7c\x15\xcc\x02\xf2\xf4\xf4\x24\xe0\x31\x45\x7a\xf8\xf8\xe8\x6b\x68\xc4\x65\x65\xc9\xca\xca\xf2\x49\x7f\xf9\x1b\x97\x60\x14\x11\xbd\x0c\x25\x47\xfe\x27\x17\x17\x97\xe9\xaa\x77\xf0\x9b\xe9\xb9\x43\x5f\x38\x5a\xe0\xb8\x87\xcf\xa0\x09\xdf\x14\x9e\x99\x91\xe1\xde\x5c\xea\xc3\x01\xfa\x4a\xdc\xc1\x09\x00\x3c\x05\xf9\x87\x80\x07\x07\x07\x79\xb8\xb9\x7f\x4d\x4e\xbe\x9f\x47\xc9\x8a\x8b\x8b\x37\x6b\x1b\x84\xbc\x32\xe6\xe8\x74\x9e\xf5\xf7\xf7\x9f\x9b\x9b\x9b\x8e\xcf\xcf\xcf\x7f\xc3\x7c\x90\x21\x6f\x6b\x63\x73\x72\x74\x54\xf8\x44\x9d\xa9\xba\xb2\xb2\x28\x6b\x9e\xca\xdd\x7e\x9f\xcb\xd3\xa2\x55\x67\x30\xe6\xd5\xc8\x2d\xad\xb9\x79\x55\x39\x99\xf9\x50\xf7\x00\xf3\x43\xa1\x25\x41\x21\xfa\xbc\xef\xb9\xaa\x66\xb6\x9b\xc7\xd3\xee\x2e\xf1\x9e\x89\xdd\xd2\xd2\xd2\x22\xd7\x20\xef\x35\xd0\x44\xc3\xc9\x72\x9c\x2a\x68\x98\xec\xe6\xfc\xea\x70\xf8\x41\xc2\xe7\x37\x5b\xf0\xc4\x28\xe9\x13\x4a\x49\xfe\xdb\x9b\xd3\xe6\xea\x67\x07\x6b\xb2\xb7\xe7\x1d\x21\x8f\x54\xa5\xf6\xef\x45\x50\xe1\x44\x54\x42\x3b\x04\x10\x51\x13\x23\xbf\xdb\x80\x47\xf9\x6d\x9d\x91\xef\x63\x4b\x38\x86\x37\x23\xbc\xae\x8f\x27\x2c\xf9\x8e\x4e\x66\x6c\x1f\x0d\x73\xda\xbc\xda\xa0\x7e\xa9\x7d\x7a\xd0\x86\xc7\x87\x9f\xd3\xa1\xe5\xcc\x64\xb7\xd0\x82\x61\x8a\x7f\xdb\xf4\x6d\xf5\x57\xa3\xe7\x29\x4a\xd3\x60\x70\x5b\xe3\x53\xd5\xcd\xda\x2d\x72\xea\x38\x42\x74\xce\x70\xf2\x17\x49\x75\x8b\xb5\x43\x89\x3f\x95\x14\x26\x75\xf3\x05\x1a\x76\x6c\xe7\x7b\x94\x4b\x3f\x42\x09\x73\xe4\xef\xae\xf9\x11\xc3\xa4\x00\xb5\xe0\x6d\xad\x2d\x46\xfe\x20\x9a\x41\x64\x44\x04\x44\x1a\x09\x7b\x03\x99\x0a\xe2\xa8\xcf\xbb\x4a\xdf\xac\x0f\x35\x1b\xa3\x4c\xf3\xe9\xbf\xc8\x69\xe2\xbd\x8e\x4d\x59\xcd\x95\x72\xdb\x5b\xa0\xbf\xd9\x0b\xfa\x5c\xde\x17\x81\x6f\xd1\xf3\xaa\x70\x86\xf5\xa4\x3d\x88\xc8\x91\xe2\x26\xf2\xc9\x2d\x18\xee\xd1\x2f\x12\x54\xbb\xee\xfb\xf2\x21\x43\x00\x31\x23\x88\x09\x94\x94\x32\x81\xe6\x08\x4e\x97\x58\xf9\xfc\x80\x6e\x61\x9e\xe6\x74\xde\xb7\xd9\x52\xd7\xfd\xf6\xfa\xb4\xd2\xee\xfe\x3a\xae\x75\x55\x95\x7d\x71\x08\x4d\xf5\x9c\x07\x2c\xcc\xed\x81\x7f\x22\x02\xe3\x23\x35\xbd\xac\xcf\x2f\xf3\x2f\xb1\xc8\x08\x5d\xa7\x11\x79\x4e\xa3\xf3\xf3\x61\x5f\x9f\xc5\xaf\x14\x55\x89\x8f\x73\xb5\xf9\x75\x83\x08\x69\xcb\xbc\xf1\x62\x6e\xc0\xb0\xe3\xe9\xd7\x85\x7e\x56\x98\x80\xaf\x6f\x1f\x5b\x5a\x76\xf9\xd5\xda\xab\x9e\x57\x71\x09\x31\x28\xa1\xe3\xfb\x79\xeb\xb7\xf2\xd3\x6c\xb4\x2f\x1f\x2a\x7a\x8a\xfe\x10\xda\x1c\x0a\xd0\x28\x50\xa9\x14\x07\x65\x10\xcb\xcd\xf4\x1f\x4f\x18\x44\xd9\x41\xa4\x0f\x9f\xb5\xce\x24\xa1\x15\xbc\x57\x7a\x8a\x53\x1a\x5a\xb9\xa6\x9f\xef\xd4\xae\xd1\xa9\x18\x34\x98\xf2\xfe\x8c\xef\x62\xac\x77\xd9\x3e\xc8\x86\x91\x52\x2d\xcf\x35\x7a\x3e\x10\x32\x7d\x36\xe6\x13\x87\xfd\xb9\x6e\x67\xb7\x47\x79\xe3\x33\x38\xaf\xb0\xac\x60\xef\x96\x4d\x27\xae\xf0\xc9\x9d\x9b\xc1\x28\x62\x24\x9a\x03\x0b\x41\xaf\x85\x88\x9c\x38\x6a\xf3\xa8\xd3\x15\x23\x28\x79\x61\xb2\xfc\x44\x62\x30\x66\x0f\x23\xf6\xc5\x52\xae\x42\xd4\x4f\xe5\x4c\xef\x13\xad\x5b\x66\xde\xbe\xda\x9d\x73\x90\xb5\xc1\x8a\xaa\xed\xfe\x76\x92\x25\xad\xb1\x4d\x29\x0a\x80\x37\x87\x87\x3c\xc6\x93\x60\x54\x22\x86\x35\x69\x7c\xbb\xa6\x9c\x16\x3e\x17\xfe\x18\x6e\xa5\xe3\x15\x59\x5c\x52\x12\xe9\x9c\xeb\x84\x44\xf7\x34\x4e\x9d\x73\x41\x7f\x3c\xae\xfd\x0d\x20\xdb\x38\x63\x18\x1a\x8c\x3e\x7f\x9f\x66\x2d\xf0\x31\xbc\x53\x82\x18\xe6\xcc\xc0\x72\x3d\x4e\x01\xc7\x65\x08\x20\x7e\xd9\x01\x09\x06\x03\x93\x8c\xe9\x91\x2f\x59\x28\x48\x82\x06\x97\x98\xe9\xa1\x70\x61\xe6\x4f\x2d\xa2\x7a\x30\x46\xe4\x63\xc6\xbd\x79\x9d\x35\x33\x37\x50\x89\x60\x48\x98\xe9\xe2\x74\xb2\xb2\xb6\x10\x91\xf4\x1b\xff\x3e\x5b\x53\xca\x24\x7d\x8f\x4f\xb4\x1d\x25\xcc\x1b\xd5\x29\x9f\x8e\x9d\x1f\x27\x4e\x07\xeb\x5f\x68\xca\xcd\xd9\xb5\x13\xe0\x57\x76\xd9\x4d\xe8\xb9\x4e\x25\x0b\x2b\x49\x18\xb5\x70\x4f\x89\xec\x74\x2d\x62\x1e\x28\x59\xbb\xcf\x7b\x9f\x92\xe4\x38\xbc\x2f\x6e\xc2\x0b\x6f\xfe\x04\x98\xf1\x3d\x46\x69\xe2\x47\x71\x1b\x44\x49\x4a\x15\xea\x1a\x78\x37\xa0\xd3\xef\x59\x15\x2f\xac\x84\x01\x19\xfd\x0d\xba\x93\x7d\x4f\xeb\xe7\xa5\x6f\xaf\x0e\xe9\x2c\xb8\xac\x08\x66\xb6\x73\xa9\x33\x0d\x7c\xce\x59\xb6\x7d\x4f\xca\xb3\x66\xdd\x96\x22\x64\x4a\x3d\x82\xad\x23\xf5\x9c\x51\x40\x29\x0e\xea\xdb\xa3\x02\xd9\x4e\xbd\x6a\x81\x09\x68\xa6\xfb\xd4\x62\xa3\x10\xa9\x29\x73\x44\x08\x94\x29\x85\x18\x76\xa8\x79\x34\x57\xcb\x78\x3e\x87\x17\x0a\x64\x0b\x11\x8c\xb2\x04\x92\x9e\xf5\xf5\x29\xa6\x31\x3f\x12\x32\x15\x1c\x7b\x4e\x88\xa9\x30\x90\xb0\xf0\x2b\x55\xe5\x5e\xeb\x29\xf7\xd5\xbd\x27\x81\xf9\xc8\x98\xa3\x97\x4d\x6f\xfe\x20\x33\x29\x88\xa4\x66\x04\xe9\x09\xd7\xcb\x94\xfc\x1a\xeb\xe7\x6b\xd6\x25\xd9\xac\xab\x15\xce\xb4\xf4\x36\xe2\xd2\x13\x61\xbd\x8f\x04\xf2\xff\x26\x4b\x5c\xaa\xba\xd7\x57\x9d\x5a\x37\xc5\xac\x50\xde\x00\x7e\xed\x58\xa7\x84\x46\x3f\x16\x58\x70\x68\xc8\xbb\x04\x99\x7c\xfd\xac\x20\xfc\x6d\xd9\x9f\x48\x66\x4f\x75\xc9\x8e\xe3\x3b\xd0\x52\xf1\xd4\xb2\xb4\x71\x9c\x2e\x15\x0c\x94\x09\xb9\xa6\x94\xf6\x78\x1f\x11\xa7\x62\x75\x60\xf1\x75\xfa\xf4\x7e\x3f\x19\xac\xe3\xbb\x78\xa1\x95\xf7\x87\xf6\xb1\x4d\x9c\x6f\xa9\x9a\x87\xf3\xe1\x21\xa3\xe6\xf3\x4d\xde\x43\x02\x58\x85\xf7\x20\xc8\x97\xaa\xe3\x2f\x9e\x79\x3d\x0b\xd2\x5d\xb7\x03\xc1\x87\xf4\x63\x2c\x84\x9b\x8c\xe5\x7c\xce\xda\xe9\xab\xfc\x1e\x72\x60\x45\xda\x41\x02\x95\xbf\x4e\xbb\xbe\x6d\xda\xff\x54\xdb\xca\x2c\x28\xa6\x99\x06\x36\x2b\xdb\x37\xbd\x16\xff\xf1\x53\x78\x83\x87\x35\x80\xf8\xe5\x43\x12\x18\x83\x31\x90\x63\xb2\x4c\x40\x4f\xa2\xbc\xb9\x62\xba\xbc\x50\x7e\x35\xc1\xfe\x5e\x9b\xca\x48\xd5\x0d\x08\xde\x56\xeb\xb8\x66\x85\xc3\xdd\xc1\xc1\x59\xee\xb8\xa6\xf2\x8b\x79\x50\x5d\x73\x79\xf0\xa7\xb8\xcb\x16\x25\x4f\x2f\xf2\xbb\x53\xc4\x45\xf9\x6b\x79\xf2\x94\x67\xe1\x0a\x74\xee\x1d\x4e\x0a\x4a\x04\x18\xfd\x1e\x14\x17\xe5\x8b\x39\x2c\x73\x64\xae\xd7\xfd\x06\x37\x07\xbe\xdb\x69\xd6\xc6\x95\xa0\xab\x62\xde\x11\x6d\x3e\x25\xfb\xdf\x24\xf6\x7b\xfd\x64\xb0\x6d\x32\xe7\xc8\xa6\x27\x27\xe6\xb4\xd9\x62\xef\x1d\x67\xbf\xbe\x3f\x9b\x6e\x13\x83\x39\x77\x47\xf9\x31\x3d\xc6\x39\x2c\x0e\x02\x1b\x7e\x62\x07\xb1\x95\x7c\x9c\x83\xf3\x5e\x33\x6a\x92\x8d\xa4\x01\xb1\xaa\x99\xf5\x33\x0e\x8b\xa8\x04\x69\x20\x15\xde\x87\xda\x9d\x13\x6f\x9b\x2a\xf7\xb5\x85\xf3\xbd\xe2\xb5\x70\xd0\xcb\xdb\xcf\xff\x56\x09\xb9\x0e\x88\x41\x98\xe5\x0e\x01\xb4\x3f\x3a\x32\x78\xed\xb5\x31\x8f\x62\xac\x84\xc0\xa0\x0c\xa6\x99\x1f\x8d\xe4\xbc\xc7\x66\x71\x9f\x92\x55\xa1\x73\x31\x4f\xc0\x43\xe6\x52\x75\x9a\xf2\x5e\x4e\x4d\xd4\xc3\x37\xc4\x12\x2f\x88\xc4\xf0\xcd\xd2\x5e\x8f\x76\xb5\xcb\x9b\x0b\xf2\x26\xba\x16\x77\xcf\x74\x40\x9f\x6d\xd5\xdc\x40\x4f\x8d\x4d\xec\xf7\x74\xb8\x40\x69\xb5\x05\xa2\x85\x55\xd9\xd4\x87\x41\xf4\x3e\xeb\x4c\xcf\x7d\x92\xfc\x4e\xf3\xc3\x85\x86\xc3\xea\xe9\x64\x31\x0d\xec\x84\xad\xcd\x01\x05\x1d\xe8\x31\x72\x69\xc1\xa6\x9c\xe0\xe6\xf8\x2a\x02\x1d\xb1\x83\x5e\xd1\xbd\x32\x13\x48\xe0\x40\xe0\xb4\xa8\x4c\xcd\xd1\xe6\xb8\xc4\x6a\x06\x5e\x85\xb0\x61\x51\x30\xea\x69\xb5\xd0\xdd\x31\x0a\x06\x13\x90\xc0\xf2\x6b\x4f\x9a\x0f\xf4\x1e\x95\x13\x74\xa3\xfc\xc4\xfa\xef\x23\xa3\x67\xfb\x40\xc0\x59\x9a\x17\x18\xd3\xd8\xca\x2a\x3e\x87\x83\xa2\x83\x51\xce\x9a\x73\x0f\x83\xb2\xfb\xd4\xcf\x07\x82\xfa\xbb\x0a\x35\x0e\x99\xe5\xb7\xb2\xbd\x93\x4d\xb9\xe5\xed\x2a\xd0\x4e\x32\x02\xc6\xe5\xdb\xac\xa0\xe3\xc9\x6b\xae\x5d\x12\xfc\x4e\x3f\x56\x50\xe3\x26\xd2\xe9\xfb\xaf\x0f\xb6\x52\x7c\xa0\x82\x92\x8f\x26\xca\x97\x1c\x48\xce\xa5\x24\x42\x41\x49\x39\x73\xb9\x6d\x47\x57\x09\x1e\x7e\xfc\xb7\xe7\xf3\xbb\x84\xff\xe4\xf8\xf1\xa3\xa7\x37\xa8\xd8\x82\x36\x8f\xd3\x2e\x5b\x3b\x85\xf4\xd4\x56\x11\x81\xf3\x20\xde\x9a\xce\x08\x97\xa7\x5e\x91\x95\x15\xa1\x85\xb6\x83\x67\x40\xdf\x48\x23\x51\xee\x25\x85\xcd\x93\x03\xe8\xe1\xa2\xa0\x28\x70\xf2\xa3\xb6\xeb\xa0\xb7\xf5\x2b\xf8\x81\xbc\x96\xeb\x7d\x5b\x1d\x24\x24\xb6\xca\xf7\xd8\x37\xd2\x48\x31\xda\x43\x9a\x85\x16\x87\xa4\xe9\x22\xb6\x13\x7a\x23\x0b\x0c\xcb\xef\x80\x84\x45\x11\x79\x97\x77\xa0\xad\x0a\xd3\x30\x64\xb8\x40\x3c\x2f\x4f\xb2\xac\x95\xfe\x09\x17\x54\x72\x1f\x7c\x4f\x34\x6b\xed\x6a\x37\x0c\xfd\xaa\xf6\xba\x8c\x27\x3e\x8f\x13\x9f\xc9\x89\xd6\x68\x34\xba\xb8\x90\x54\xc4\x76\x3d\xe8\x7b\x47\xe5\x7c\xed\xe2\xf8\xaf\xd9\x3e\xd6\x46\xa2\x71\x7f\x6f\x7f\x7c\xf0\xa7\xed\xc7\xe3\xe7\x76\xce\x7c\x14\xbe\xa8\x59\x3f\x3f\xf9\x06\x12\x64\xdf\x7b\x82\xe9\x2a\x72\xd8\x38\xf8\xa0\x14\xbf\x89\x8e\xe0\x56\xa5\x90\xa0\x3e\x39\xa1\xf1\x5e\x96\xa3\x9c\x31\x4a\xd8\x70\x53\x7c\x30\x0c\x88\x55\xb6\x31\x84\xfe\x50\x28\xd2\x62\x91\x01\xdc\x74\xbe\x9c\x9f\x7d\xdb\xf4\x0e\xe7\x1f\x14\x9c\xb3\xb7\x32\x81\xb1\x9f\x9c\x31\xdf\x0d\xf2\xa4\xee\x38\xf0\xd1\x57\xf4\xe5\x04\x13\xe3\xcf\x8c\x13\x75\xc6\x41\x91\xb3\xe3\x7b\xdc\xdd\x90\xcf\xf3\x21\xf1\x4a\xd6\x93\x20\x49\x0b\x41\xdd\x28\x99\x8b\x42\x45\xe7\xed\x02\x91\x6d\x23\xac\xa6\x14\x75\xaf\xcd\x4e\xc8\x84\xf8\xeb\x96\x97\x32\x36\xdf\x2b\xba\x50\x7e\xee\x1b\xeb\xcf\x16\xe2\xd8\x54\x79\x5f\xc0\x3d\xe4\x0d\x32\x06\xc2\x80\x58\xc6\x66\xb9\x37\xf9\x81\x04\x73\x06\x5e\xfb\xf7\x91\xa9\xa9\x0c\xe5\xd9\xac\x64\xfe\x56\xfa\xd8\x5d\x94\x94\xbd\xd3\xd2\xb0\x78\xbf\xb2\xd8\x47\xe0\xf7\xd6\x9b\xff\x36\xe9\xab\xa0\x45\x07\xa4\x42\x91\xa3\x32\x2b\xf6\xb5\xf0\xfc\x55\x9c\xe2\xc0\x13\xcd\x11\xda\xca\x37\xa5\x5f\x30\xc0\xca\xd5\x24\x65\xb2\x20\xd7\x8a\xda\x7a\x2b\x0b\x3d\xd7\x5e\x95\xac\xed\x97\x7a\xf2\x35\xd2\x0e\x9f\xed\x1a\xcf\x7e\x87\xa1\xc2\xb9\x57\xc6\xb1\x4e\xca\x3c\x8d\x45\x7f\xa7\x15\xeb\xc8\x9f\x14\x15\x64\x6a\xe7\x83\x2b\x09\xc7\xeb\xc8\xed\x8b\xf7\x35\xc8\xfd\x07\x2e\xaa\x1c\x12\xc6\x84\x2e\x06\x6d\x7d\x84\xb8\x68\x8b\xd7\xe0\xbf\x73\x5e\xc0\x4c\x5e\xd5\x3c\x53\x83\x53\xaf\x9d\x38\x6b\xbc\xa0\xaf\xe2\x0a\xad\xf0\x13\xee\x84\x2d\x7e\x12\x59\x08\xa1\x12\xa4\x82\xf8\x52\x6f\x9b\xa8\x6d\x47\xb4\xc7\x30\x29\x34\x24\x0a\x4e\x4a\xcc\x4c\x35\x50\xff\xbd\xb4\xac\x5f\x75\x40\x43\x3f\x48\xe6\xfd\xf4\xd0\xab\x85\xca\x02\xbf\x17\x0f\x41\xfb\xed\x35\xaf\x50\x72\x4a\xdc\x64\x04\x6f\x85\x39\x23\x2c\xe7\xbf\xf5\x55\xa9\x68\x66\x15\xc6\x2b\x72\x4d\xd7\x04\x8e\x08\x9f\x64\xcb\x05\xfd\x5a\x42\x8b\xca\x6f\x69\x91\xe0\x4d\x7e\x2f\x96\xfa\xb9\xf0\x28\xb5\x64\x61\xfb\xb3\x11\x98\x44\x91\x41\x80\x4b\x1e\xa2\x59\xbc\xaf\xe9\x0c\x20\x59\x5a\x38\xe8\x84\x62\x3b\x3c\x5a\xde\xce\x65\xdd\xa9\xfa\x7a\x77\xf9\x96\xd4\xbc\x58\xb0\xee\x6c\x70\x3a\xba\xe4\xf2\xbe\x65\x6e\xad\x18\x3b\x5c\x0d\x05\xe6\xbf\xaf\xcf\x21\x1d\xdb\x5d\x27\xec\x84\xda\xba\x5c\xa4\x5b\x65\x44\x0b\xcb\x88\xca\xad\xca\x6d\xab\xfa\x2f\xdf\x17\xa2\x06\x6c\x43\x80\x46\xa4\x07\x13\xcd\x89\x59\xe9\xce\x15\x0c\x7f\x8b\x9e\x8e\x4a\x07\x94\xcb\x61\x99\xbd\xef\x70\xca\x22\xeb\xf8\x9b\x78\xcb\xca\xbd\x03\x59\xe9\x48\x4d\xff\x37\xa3\x59\x56\x22\xe6\x67\xae\xef\xd4\xdc\x16\x3e\x3f\x78\x9f\xe6\x65\x87\x3a\xae\x98\xdf\x76\x32\x28\x5c\x09\xa3\xc4\x2e\x0a\x01\xa7\x52\x47\x69\xe7\x42\xf1\xb2\xfd\x1e\x45\x6c\xbc\xf0\x04\x8d\x02\xc9\x8d\xb2\x59\x6f\xd8\x8c\x07\x04\xf8\x48\x79\xd3\x0b\x0f\xbb\x03\x49\x82\x3c\x26\x76\x21\x8a\x9f\x14\x43\xd9\x37\x08\x38\xaa\xc3\x63\x35\x5b\x79\x0d\xe0\x75\x0c\x29\x41\x5f\x85\x05\x8d\x66\x9e\x2c\xac\xb2\xa0\xbb\xd5\x99\x52\xf9\xa1\x56\x9f\x9f\x0f\x2a\x76\xe7\xd7\xf8\xfd\x04\x79\x31\x3e\x24\x28\x1a\x9f\xb6\x42\x4e\xe3\x7d\x1c\xf1\xe3\x8b\x39\x18\x6f\x0f\xaa\x0c\x06\x57\x0a\x7f\x68\xe5\x0b\x2e\x1a\xca\x75\xc2\xbc\xf4\xb1\x30\x0e\x54\x0e\x9c\xde\x58\x26\x3f\xa7\xcd\x90\xf1\xd6\x56\xad\x8c\x5d\x73\x5f\x14\xec\xb1\x5b\x75\x85\x98\xcc\xde\x10\xff\x0d\x48\x30\x8a\xc0\xa8\x28\xd9\x1d\x29\xf1\x45\xa4\x7a\x7b\xf0\xe7\x13\xc3\x21\x3b\x96\xc7\x50\x81\x57\x60\xd1\x19\xd2\x1f\xe9\x61\x44\x58\x1c\x22\x1f\xc3\x39\xc6\x1a\x5b\x19\x96\xec\xb2\xd9\xf1\xe1\xc1\xe8\x12\x6b\x83\x0f\xa1\xe1\xcd\x3a\x30\x9a\x5f\x53\xea\x48\x09\x37\x70\x1b\x0f\xfb\x30\xb5\xc4\xb2\x8a\xd2\xae\x87\x94\x09\x41\xfc\x81\x72\x47\x09\xb6\xb1\xb3\x63\x69\x8f\x75\x64\x27\xc4\x95\x6f\x82\x86\x7f\x29\x3d\x83\xa1\x3c\x9b\xf7\x7d\x81\xff\x3c\x2b\x0d\x68\x86\x7f\xba\xb2\xcc\x5d\xe4\x2f\xb2\x24\xb0\xb4\xf0\xc4\xf8\x4f\xd5\x1a\xc6\x09\x40\x8e\xd5\xe5\xe2\xea\xb9\xe9\xf1\xf5\x4d\x6d\x2e\x4a\x1b\xde\xbe\x04\x08\x7c\x4c\x04\xa2\x7b\x89\xe9\xb2\xee\x4f\x24\xd3\xd3\x3a\x2e\x44\x58\xc0\x65\xd5\x2a\x58\x77\xd0\xc7\x35\x51\xd2\xa9\x93\x02\xb2\xd6\xa7\x7d\xc3\x80\xa3\xa5\x5a\xe9\xcd\x52\x59\xd9\x21\x7d\xf8\x2d\x4d\xed\xc1\x67\x87\xe6\x92\x85\x34\x2d\x7b\xc5\x76\xc5\xa0\x08\x26\x4b\x6f\xa1\xb1\x97\xfd\x05\x22\x51\xd3\xd1\x90\x57\x8e\x88\x15\x4a\xbe\x87\x48\x91\x22\x67\x10\xdc\x38\xeb\x37\x73\xf5\x5a\x27\x4d\x68\x9d\xe4\xc8\x91\x89\xd5\x33\xac\x08\x7b\xc6\xf1\x7c\x33\x25\x5a\x5b\xad\x1f\x32\x98\x9e\xcf\x2e\xa7\xc5\xdb\xc9\x3a\x31\x45\xfc\x06\x95\xbc\x7e\x87\xb0\x99\x97\x4e\x66\x0e\xa1\xd1\x90\x7a\x46\xd1\x9f\xe4\xb3\xc4\x12\x8b\x69\x6f\x79\x9e\x72\x71\x71\x65\x4a\x8a\xc8\x99\xbf\x9c\x15\x2d\xbb\x20\x55\xac\x0c\x9f\xda\x94\x73\xa2\xd4\x8b\x39\x71\x55\x1e\x6a\xfc\x37\xc0\x2b\x23\x8f\x58\xa7\x96\xb5\xe8\x08\xdf\x56\x79\xd9\xc5\xb6\x65\x5f\x39\x6f\x31\x5d\x1f\x24\x35\x7f\xa7\xfb\x39\xe9\x89\x7c\xca\xe9\x7d\x73\x54\x20\x9b\x27\x6d\x11\xe1\xd9\x65\x00\x35\x07\xf1\xd2\xfa\x51\xde\x1c\x8d\xaa\xc2\x04\xd8\x08\xb5\xd5\xed\x6a\xb4\x7d\x0e\x7a\xd9\x52\x03\x34\xec\x28\x08\x07\xe8\x9e\xed\x6b\x7d\x58\xc3\x77\x38\x77\x01\x1d\xb2\xd5\x6a\xe8\x13\x8b\x25\xcb\xb7\xa9\x13\x4d\x58\x57\xa7\x61\x7e\xb1\x6e\x9d\x3a\xe3\xd1\x8b\x91\x1e\xf6\x37\x6e\xc0\xea\xd5\x16\x8a\x98\xef\xb9\x92\x9a\x0a\x78\xef\xc9\xa7\xec\x1c\xf8\xc2\x13\x68\xff\xf0\x4f\xb9\x40\xbb\x46\x05\x03\xa5\x0b\xf7\xa4\xbf\x87\xa0\x76\x89\x3c\x42\xae\x88\xe3\x57\x72\x14\xac\xfb\x4a\xae\x92\x1a\x2f\x9f\x3d\xa7\x0c\x6c\xe3\xaf\x11\x79\x54\xc3\x48\xb4\x80\x94\x7a\x20\x9d\xd2\xdc\x86\x64\xca\xad\x27\xe8\xf1\xaa\x3a\xea\xa0\xc1\x00\x18\x93\x23\xb5\x9f\x74\x40\x24\x5b\xd9\xab\x55\xd3\xf2\x6b\x7d\x85\xcb\xde\x7b\x49\x6d\x53\x01\x00\xc8\x0b\x56\x63\x20\x9c\x11\x54\x6f\x02\x2b\x8f\x73\x44\x82\x9d\x5c\xb7\xcd\x59\xec\xc6\xb1\x0c\xe6\x71\xfe\x50\x59\xc7\x5f\x4a\x92\xb2\x81\xea\x2b\xdf\x86\x4e\xbc\x52\x0f\x4c\xd7\x1c\x31\x9d\x9f\xe5\x41\xa1\x2e\xae\xec\x00\x80\x31\x10\xde\xf4\xee\xf5\xb4\xf9\x16\xa4\xeb\xbc\x8a\xe6\x84\xdd\x61\xd4\x5c\x6f\xc2\x9d\x66\xb9\x66\xda\xb2\x48\x90\x9c\x83\xc9\xb1\x7a\xc6\xba\x4d\xc4\x24\xef\x30\xa0\x22\x2e\x0f\xe3\xff\x1f\xfe\x7c\x27\xff\x67\xc9\x7f\x27\xff\xaf\x2a\xff\xc8\xff\x3f\x6d\xf9\x47\xfe\x5f\xb7\xfc\xbf\x32\xe9\x7f\xd9\xf2\xff\x88\x02\xda\x96\x18\x26\x09\x00\xf8\xc9\xb2\x80\x3a\x39\xef\x28\xbb\x37\x1d\xd2\x03\x00\x00\x9c\x69\x18\xff\x7d\x1a\x00\xec\xf8\x33\xfd\xf7\xb5\x01\x39\x12\xf7\x8e\xb0\xfe\xfd\x52\xfe\x3f\xf2\xa7\x20\x92\xff\x51\xfe\x77\xea\xbf\xc8\xff\x51\xf9\x47\xfe\xcf\x92\xbf\xf2\xff\x76\x4b\x0f\x93\x5d\xee\xe4\xa5\x49\x24\xff\xc5\x52\x62\xca\xa9\x80\x90\x90\xdc\x82\x80\x5c\x72\xca\xdd\xaf\x95\xcc\x3c\x52\x5a\xde\x93\x13\x7a\x01\x81\x26\xac\x1f\x57\xcc\x6e\x86\x6f\x4a\xe6\xf9\xa1\x20\x95\xd4\x8f\x55\x4a\x9c\x80\xa7\xdc\xe0\x06\xd3\xd6\xd8\xbc\xf8\x77\x3a\x55\xe3\x63\x37\x7b\x23\xb9\x09\xf5\x21\x8d\x97\xa7\x51\x4f\x44\x7d\xc6\xce\x70\x08\x22\x22\x12\x0e\xc4\x9d\x2f\xf0\x27\xa8\x45\x6c\xda\xea\x22\xd2\x3e\x68\x71\x8a\xda\xcc\xa5\x0b\x5b\x5f\x4c\x86\xc4\xad\xf1\x82\xc7\xc6\xc7\x4f\xc2\xb6\x87\xb3\x64\x87\x1f\x7e\x6a\xfe\xb8\x51\x26\x59\x29\x0a\x9b\x28\xa1\x11\x32\x22\xdd\xdd\x5f\x2e\xd2\x20\xff\xf3\xd8\xa3\x1f\x8a\x84\x77\x2d\xf8\x50\x1a\xea\xba\xec\xce\xba\x2e\x7f\x6c\x9f\x72\x5c\xe3\xdc\x5e\x45\xa3\x60\xb9\x6c\xca\x4f\xc4\xac\x6a\xa4\x1e\xdd\x1c\xcc\x5f\xbe\x3c\x3f\x59\x9b\x4b\x78\x52\x28\x3d\xae\x5d\x9b\x46\x29\xed\x63\x4c\x39\xd9\x27\x62\xc9\xb5\x52\xee\x78\x9a\x69\x57\x4b\x7f\x79\x52\x1a\xdf\xf2\xdb\xac\x5f\x76\xfd\x7c\xe3\xb4\xf1\x42\xb7\xc1\x4d\x60\x6c\x2f\xc9\x57\x66\x8f\x5f\xd3\x6b\xaf\xd7\xc2\x66\xae\x21\xc5\x31\x20\xe4\x04\x3b\xc8\xf7\xbc\xd8\x55\x3d\xeb\xef\x43\x12\x33\x14\x18\x64\x2f\xeb\xbe\x60\x8d\x97\x39\xa5\x62\x46\xcb\xef\xe4\xb6\x8e\x46\xae\xe9\x3f\xd7\xad\x4f\x86\xab\xa4\x4b\x48\xe3\x2b\x67\x0f\xc5\x62\xd5\x3e\x68\x5b\xb9\x20\xa0\x9a\xde\x7f\x31\x46\x23\x4a\xe7\x81\x9e\x1a\xd8\xd6\x6d\x72\x58\x69\x26\x92\x7d\x3d\x18\xc7\xd7\xf4\x73\x7a\x3a\xd5\x37\x60\xb8\xf9\x64\x35\x63\x5a\x07\x34\xbf\x3d\x9e\x7b\x66\xf1\xf7\x15\x8b\x96\x48\x22\x30\x21\x71\xa9\x9b\x4b\xfe\x85\xc2\x92\x0f\x5a\x05\x54\x30\x90\xdc\x89\xba\xf5\x18\xbe\xd6\xa9\xb2\xde\x85\x96\x71\xf2\xe5\x59\xcd\xd4\x96\x67\x94\x41\x4f\xba\xca\x20\x7f\xb1\xd8\x28\x2b\xf3\x38\x80\x1c\x6e\x75\x91\x5c\x9f\xfa\x1c\xb5\xae\x7a\xce\xcd\x1c\x65\x35\x79\xce\x67\xa1\xc4\xe0\x2b\x07\x8b\x91\x09\x28\x9b\x2c\xdf\x10\x89\xc0\x83\x1e\xa1\x61\x58\xe6\xc1\x74\x3d\x3d\x3b\xbb\x36\x17\x17\xa8\x54\xed\xf6\x58\x92\x40\x2c\xf1\x67\x09\x1b\x0c\xe1\x31\x7b\x41\x67\xe0\x23\x6d\x37\x19\xac\x94\xc4\xa7\x03\xe2\x81\xdd\x39\x8b\x84\x53\xf7\xa3\x69\x3f\xc1\x2a\x78\x24\x9f\x1d\x1f\x1f\xfb\xfa\xd8\x6f\x97\x0a\xad\xa0\x9c\x7d\xc1\x3f\xa4\x4a\x8a\x7f\x4e\xab\x95\xa5\xf6\xea\x75\x6c\x19\x1c\x09\x4a\x68\xdf\xc3\x84\x4a\x34\xaa\x98\x39\xa3\x07\xb4\x8c\x00\x69\xd5\x36\x9b\x6d\x65\xd4\x26\x5a\x3c\x72\xa0\xf9\x9b\xc2\x34\x33\x14\xa5\x49\xb8\x40\x22\x50\x63\xbe\x4c\xc2\xce\x40\xb4\x41\x58\xcd\x67\xee\xda\x9c\xab\x49\xfb\x0b\x1c\x53\x04\xc9\xc2\x88\x5c\xd6\xc7\x65\x79\x64\xb3\xd2\x7d\x46\x9c\x0f\xed\x4b\x2f\xeb\x73\x7e\x2b\x25\x97\xc9\xe6\xb3\x06\x57\x89\x07\xee\x7c\xf1\xea\x64\x26\xb8\x40\x07\x65\xb5\x01\xb0\xa3\x28\x3b\xa3\x66\x5c\xb6\x3a\x3a\x98\x61\x2f\x8c\x63\xa2\x8f\x69\x44\x8f\x69\xa4\x2a\x86\xbf\xb5\x92\xfd\x5c\x64\x15\xc4\xa7\x95\xb5\x37\x1e\x6f\x38\xcf\xde\x5f\x20\x61\xea\xfd\x08\x8c\x83\x3b\xe0\xca\x0a\xb2\xd5\xde\x46\x1b\x1d\x3d\x6f\xca\x13\xc7\x7b\xac\x5f\xef\xc2\xd3\xd4\x97\xb8\x3d\x53\xfb\xf5\x90\x82\xb7\x23\xe4\x6b\x51\xc1\x8f\xd7\x09\x41\x84\xf4\xd9\xc3\x76\xae\xee\xa6\x95\x45\x79\x5f\xc9\x34\x16\x7d\xde\xda\x7a\xa2\x8f\x36\x72\x09\x22\xc4\xcd\x12\xaa\x07\x26\xd9\x8b\xad\xad\x0b\x86\xd3\x53\xf8\xcd\xb4\xd7\x2d\x7e\xec\xc5\x3c\x20\xa4\x53\x4c\xa6\x1b\x86\x5c\x2d\x17\x2d\x8e\x44\x50\x4a\xda\x0c\x37\x5d\x87\x74\xfd\x1a\xfa\xa1\x66\xf2\x72\xf8\xbb\x88\xcc\xcf\x1f\xd1\x3d\x15\x03\x34\x38\x3c\x21\x3c\xa5\xda\xf5\x3b\xd3\xc7\x9b\xdb\xbe\xde\xe1\x9f\x87\xc1\xb9\xb3\x08\x77\x3c\x91\x2b\xa1\xe8\x7f\x23\xd2\x5d\x56\x77\x75\xe6\xf4\xf6\x53\xce\x80\x25\xf5\xf5\x6d\x0c\xea\x88\x03\x52\x2a\xb7\x9f\x8a\x0a\x80\x2b\x12\x7a\x7a\xed\x03\xc5\xbe\xbf\x64\x5d\x54\x89\x01\xae\x54\x45\xf4\xcf\xef\x78\x03\xaa\x8b\x12\x51\x6f\x47\xcd\x1b\xf6\x6f\x14\x52\xcc\x6f\x6b\x76\x5b\x27\x23\x7b\x4c\x8e\xe4\xb2\x9a\xec\xb3\x94\x09\x56\x4e\x1c\xfb\xd0\x92\xf4\xae\x0a\x6e\xbf\xd5\x04\x16\xd2\x7b\x43\x9d\xd7\xbb\x65\xae\xbf\x03\x87\x1b\x2f\x5e\x83\x53\x69\x76\x78\x79\x60\x26\x6c\xef\x86\xf0\xfd\x1e\xd1\xbb\xfd\xde\x90\x2a\x83\x5a\xaa\xf9\xec\x88\x3e\xf5\xa1\x9e\xbf\x1a\x0b\x1b\x5d\x9a\xb9\x1d\x96\x8b\xea\x8a\xd8\xe0\x9c\xc3\x0a\xec\x0a\x97\xb2\xb9\xf2\x39\x55\xe5\xd5\x6d\x48\xa6\xc2\x17\x74\x9f\xa9\x6e\x30\x3d\x0f\x35\x2e\x55\x55\xf1\xdc\x35\x51\x35\xd7\xe6\x0c\x4b\xf8\xb7\x0f\x49\x65\x1e\x68\x48\x98\xf9\x6d\x3e\xde\xe1\xe7\x34\x3e\x7a\x4e\x39\x50\xe5\xaf\x0a\x2b\x2d\x20\x7e\x62\x78\xeb\x87\xae\xbc\x72\xd3\x04\xeb\x0f\x87\x3d\x90\x9c\xa7\xb6\xeb\x11\x12\x56\xa8\x2a\x16\xd7\x8f\x5e\xf2\x6d\x71\xf1\xa8\x57\x6a\xdf\x05\xa4\x95\xb4\x82\x84\x73\xc1\x96\x1c\xb5\x92\x2f\x33\x3e\x69\xb1\xe8\xa6\xd0\xbd\xd6\x06\x39\x17\xbf\xda\x38\x99\xa2\x90\xe2\xcd\x78\xa9\x2c\x38\x72\x66\x40\x24\x93\xf3\xbc\x50\xa6\xb3\xe6\xa3\x0e\xad\x7e\xe8\xeb\x83\x6d\xd5\x46\xd2\x88\x90\x68\x0a\x4f\x6f\x2f\x06\xa4\x36\x0c\x0c\x8a\xf8\x9b\x3d\xc7\x9c\xb0\xc3\xb4\xee\xa7\x39\xe2\xfd\x15\x81\x9f\x69\x59\x61\xa9\x37\x75\xce\x96\x95\x12\x82\x97\xd5\x56\x5a\x40\x25\xda\x1c\xa7\x12\x38\xa6\x68\x24\xa2\x77\x70\x95\x47\xf7\xd5\x01\x83\xb5\x1c\x75\xbc\x2a\x76\xd8\xdc\x68\x72\xee\x80\x19\xb0\x51\x70\xb3\x2d\x80\x43\x03\x28\xeb\xb8\xd3\xd0\x9f\xc4\x77\x58\x48\xc1\xab\x95\xaf\xc8\x3a\xba\x1a\x0f\x6c\x4b\x1e\x78\x65\xe6\xb3\xdb\xa9\x94\x44\xcc\x59\x42\xa7\x65\x8c\x53\x46\xdd\xf8\x51\x26\xf6\xfa\x01\x91\x03\xaf\xcd\xf8\x61\x7f\x69\x58\x2f\xa7\x7e\xdf\xc6\x40\x6a\x57\x05\xbd\xef\x0d\x0b\xea\x90\x24\xf0\xc4\x29\xcf\x8f\x99\xa0\x6d\xc2\x79\x69\x75\x70\xa3\x7c\xad\x50\xc0\x37\x4a\xb2\xf9\x76\xbf\xc0\x65\x77\x56\x89\xc0\x36\x42\xab\x66\x8c\x9d\x94\x8e\x9a\x5e\xca\x0b\x9c\xc0\xd4\x1d\x51\x40\x95\xa0\xbc\x09\x8c\x14\xa8\x4a\x59\x61\x18\xbb\xb8\xa8\x31\x3e\x7e\xf2\xd9\x3e\x2e\xff\x07\x8e\x68\xcf\x96\x31\x5d\xe9\x7f\x39\x2e\x12\x1f\x0d\x4e\xaf\xca\x2e\x0e\x77\xaa\x15\x67\xeb\x02\x13\x22\x84\x79\x55\x41\xce\x25\x07\xd8\xb9\xc1\x3f\xcd\x7d\x06\x38\x60\xc9\xd8\x4a\xf4\x6c\xaa\x59\x6a\x1a\x46\x18\xcc\x0e\xea\x62\x34\x08\xf3\xab\xc0\xce\x2c\x87\x84\x28\xd9\x6b\xa9\x27\xc6\x75\x56\x85\xaf\x8c\xc9\x21\x1a\xb2\xb8\xc6\xdd\x24\x6d\x15\xf0\x9d\xc9\x4a\x9d\xe3\x96\x97\x1a\xb4\xc6\x83\x16\x45\x65\xd3\x96\x2d\xcf\xdd\x73\x5f\xd3\x78\x18\xda\xdb\x16\x97\x5d\x9c\x57\xcf\x07\x3d\x84\xae\x4e\xc9\xec\xb7\x29\x5d\x90\x1e\xad\x53\x02\xdd\x86\xd6\x86\xb3\x0a\x58\x23\xf7\x4f\x68\x85\x99\xe3\x5a\x83\x35\x52\x53\xd3\x8a\x48\xa8\x6b\x59\x65\x37\x63\x04\x0f\x06\x48\xc7\xed\xe4\x7d\x2c\x78\x6c\x8a\x4b\xa3\xa2\x3e\x3f\x3a\x26\x7a\x92\x92\xd4\xc3\xd0\x0f\xbd\x8b\x0d\x37\x2c\x5f\xdf\xac\x72\x26\xf1\xc1\x61\x3f\x7a\x86\xe6\x56\x2d\x13\xee\x33\xc1\x75\x66\x5a\x5b\xd6\x47\xda\x77\xa6\x7b\x0e\xa1\xd3\x42\x91\x8e\x62\xca\xa3\xd5\xc1\xa0\x35\x5e\x30\x99\xed\x60\x2a\xb2\x55\x64\x53\x34\x20\x95\x42\xdf\x97\x8b\xc9\x61\x72\x7a\x94\xaf\x11\x8a\xd3\x85\xa3\x95\xb0\xc0\x72\x14\x15\x2f\xbb\xd2\x0b\x85\xb0\xc3\x26\x4a\x60\x0f\xef\x73\x3a\xf6\xa3\x33\x76\x5c\x97\x56\x79\x42\x21\xa4\x12\x84\xdb\xe7\x9c\x35\xd7\xab\x89\x26\x29\x5d\x70\xdc\xf1\x08\x53\x53\x11\x3a\x86\x1c\x9a\x1b\x71\x39\x68\xa9\xed\x2a\xca\x75\x8e\xe9\x83\x52\x04\xb8\xc1\x4a\x9c\xc7\x3d\xca\xb4\x04\x7a\x7c\xcc\xe1\x2d\xfc\xfb\xdd\x73\x94\xce\x84\x93\xd4\x3f\x35\x8f\xd3\xec\x28\x20\xc4\xac\x1f\x9a\xba\x88\xff\xb5\xba\x64\xca\x75\xc1\x10\x43\x23\x61\x96\x3c\x11\xf8\x0e\xfc\x6e\xe6\x69\x2a\x19\x24\xd3\x77\x7b\xf0\x9b\x3e\xa7\x58\xa6\xfa\xa3\x0d\x52\x1c\x2d\xb3\x54\x7b\x2f\xa7\x69\xf8\x54\x9d\x71\x51\x19\x35\x66\xf8\x3d\x71\x1c\x3f\x8a\x99\x3a\xd6\xa6\xc4\xf7\xf8\x87\x2a\x9c\x33\x93\x63\x03\x16\x2d\x1d\xda\xca\x0e\x5d\xe9\xb5\x17\x0e\x4e\x4c\x8a\xd5\x5c\xc8\xf9\x26\xc7\x24\x73\x1b\x8b\xf7\x17\xc6\xb8\xbd\xd1\xf9\xc3\xa9\x72\xa1\x26\xf2\x1e\xf2\xc5\xa5\xaa\x51\x49\xb5\xdd\x73\x16\xff\x0a\xf7\x28\xaf\x14\x41\x72\x51\xf8\x95\x81\x1d\x39\xe1\xa7\xa9\x4c\x73\xcc\x24\x36\x98\x18\x14\x82\xdb\xcf\x19\xbd\x98\xff\xbc\x95\x1e\xb7\x1f\xe2\x43\xa3\xcc\x76\x90\xbd\x9c\x04\x3c\xa6\xa2\xe7\xd1\x01\x99\xc5\x8a\xd1\x88\x98\xf5\x70\x96\x9c\xfd\x4c\xdb\xed\x50\xa8\xc5\x2b\xd6\x8d\x08\xb3\xb9\xf7\xfe\x97\x18\x39\x7f\xbf\xe5\x54\xe3\xeb\xf2\xe0\xfb\x73\x34\x05\x22\xaf\xed\xb5\xac\xe0\x44\xb9\xe3\x13\xdd\xcd\xbb\xeb\x06\x02\x12\xca\x7f\x33\x88\x1b\xd6\x85\x93\x18\xaf\xf3\xa6\xe9\x62\xba\x9e\x3e\x9e\x19\x06\xe5\xe0\x84\xb1\xbc\xff\x02\xcf\x61\x27\x3d\x70\x32\x4a\x4b\xa9\xd7\x02\x1d\x27\xf6\xc7\x01\xad\xad\x72\x73\x53\x41\xc5\x6d\x85\xfa\xbe\xbb\x58\xa1\x2f\x53\xeb\x48\x06\xe9\xe8\x9f\x79\xe0\x4b\x77\xa8\xba\xad\xa6\xbb\xa7\x70\x95\xd1\x5a\xb2\x0e\x96\x3e\x5e\x09\x5c\xb7\x7f\x57\x1a\xd5\x55\x63\x73\x16\x73\xa4\x44\xcb\xf9\xe7\x5a\x11\x48\x04\x2e\x15\x5b\x32\xa2\xb8\x81\x20\xe4\xef\x12\x1e\xd8\x0f\xdf\x1e\xfe\x41\x09\x19\xa7\xb3\x87\xfa\x14\x49\x7a\x9e\x27\xbf\x83\x3a\x4d\x13\xf2\x32\xcd\xfa\xa1\x90\x0a\xea\x72\x7c\xad\x46\x9f\x6b\x8f\xf7\xef\x69\xcd\x20\x36\xc7\x8f\xe1\x7d\x66\xe4\xa0\x5e\xd6\xcc\xde\x08\x1d\xaa\x40\x12\x3d\x97\xfc\x08\xd1\xef\x24\xa1\xdd\x82\xcd\x7e\x3e\x60\x2e\x9a\x5d\x47\x8d\xd1\xcd\xa5\x50\x6d\xe1\x99\x3c\xd3\x7f\x41\x68\xa2\xf5\x75\xa5\xbb\x60\x2d\x06\xe5\xfe\x2e\xf7\x93\x6e\x28\x9c\x73\x37\xc6\xa5\x94\xc3\x77\x90\x93\xc6\xc7\x09\xa8\x95\xc7\xba\x46\x3a\x15\x00\x67\xf5\x78\x1a\x5d\xd8\x50\xb3\xd2\xcb\x29\xb9\xf2\xf1\xc5\x06\x72\x89\xad\x2f\x1a\xf8\x5d\x60\x3d\x67\x1c\x2e\x88\xa7\x97\xf6\xcb\xce\xa6\xe1\xbd\x38\x13\x8c\xc2\x36\x66\xaa\xe2\x71\x27\x8b\x21\x4c\xaa\x3d\x9b\x1e\x13\x91\x6d\x7f\xa6\xd6\xce\x74\x01\xe5\x68\x3d\xb3\x2b\x2e\x55\x1d\xf5\xf3\xdd\x9a\x69\x86\xf5\xfe\xeb\xbc\x77\x95\xe1\x62\x9d\x3a\x5f\xe6\xa1\xa0\xb6\x55\x20\xed\xfe\x38\x20\x0b\x94\xe1\x80\x0a\xf2\x9d\x60\x9f\xc3\xea\xa3\x16\x18\x64\xc2\xc5\x1b\x4d\x50\x23\xf5\xfb\x95\xe2\x7d\x2b\xbb\xd6\xd8\x3c\x45\x87\xfe\xbe\xde\x49\xa3\xc9\x17\x6c\x05\x70\x46\xbd\x94\xb1\xca\xe4\x6c\xd6\x0e\x46\x6d\x1c\xe9\xc4\xa9\xc7\xd1\x40\xb3\x26\xd4\x49\xf6\xd4\x79\xc5\x4c\xf5\x04\x33\xac\xf0\xe9\x86\xd7\x92\xb2\x5a\x28\x24\x70\x26\xfe\xb2\x53\x54\xd0\x77\x84\x94\x56\xfd\x8b\x41\x8a\x75\xfe\xfe\x34\x01\x0d\x2c\xee\x08\xa4\xba\xc3\x62\xab\x61\x1e\xa0\x4d\xaf\xe4\x54\x31\x3e\x51\x80\x37\x92\x4c\x9b\x20\xc8\x93\xe2\xa0\x3b\xff\x4f\xdb\x80\x84\x57\x01\xa7\x13\x69\xd3\x52\x53\x88\xe0\x53\xcd\x78\xa9\xdd\x95\xbd\x1f\x24\xb5\x41\x6a\xaf\xf0\x19\x1a\x19\x38\x2f\x1d\x3f\x54\x5b\xe8\x59\x06\x6d\x95\x4b\xd8\x4d\xda\x79\x84\xe5\xd6\xbb\xfa\xc3\x55\xb2\xa4\x0b\x10\x70\xc1\xfa\x04\xaf\x72\x45\x0f\xb8\x27\x3e\x66\xfb\x89\xb8\x51\x24\xec\x09\x5f\xe3\x30\x5a\xf5\xfa\xc2\x31\x51\x65\x4d\x81\x53\xe8\xad\x87\x08\x15\x79\x14\x6a\xcb\x78\x43\x57\x40\xeb\xeb\xd4\x21\x05\x3b\xba\x07\xa2\xea\xec\xb9\x73\xe5\x6e\xc3\xdf\x0c\x87\x58\x19\xf9\x52\xbf\xdb\x34\x03\x28\x7a\x70\x33\x25\x5c\x7e\xb2\x42\x2c\x8e\x0f\x58\xf0\xc0\x56\x43\xb3\x1f\x10\xd8\xae\xed\xcc\xa6\x5c\x07\x1c\x5d\x7d\x82\x37\xb4\xb6\x5a\x89\x2c\x10\x3f\x07\x85\xb2\xee\x20\xf3\x27\x0a\xba\xd0\x54\x02\x25\xd4\xed\xca\x9c\x9b\xff\xae\xf6\x89\x40\xfb\xff\xb9\xf1\x31\x12\xf9\x58\x00\x87\xdc\xf0\xe8\x40\xa1\xd1\x12\x22\x63\x4a\x32\x2c\x79\x28\xad\x2f\x24\xda\x8c\x2a\xeb\x79\xca\x95\xc9\x70\x68\x5e\x87\xbf\x4d\x08\x42\x80\x13\xe8\x92\x03\x87\xcc\x90\x4a\xe1\x2e\xa6\xf0\x9b\x71\x0e\x67\x83\xb4\x12\x9c\x8a\xb8\x6c\x88\xd3\xda\xce\x53\xc0\x5c\xa8\x0f\x63\xbc\x0c\x30\x5c\xd7\xb8\x39\x97\xb9\x36\xf9\xd4\xd9\xd4\x14\xd7\x3e\x61\x3d\xd4\x92\x8a\xb0\x79\x69\x52\x94\xec\xb5\x6e\x42\xa7\x29\x30\x01\xfd\xd9\x06\xc7\x0a\xca\xda\x0c\x47\xab\xed\x99\x0b\x0c\xab\x2d\xb1\x15\xaa\x3b\xfa\x7d\x6e\xde\xcb\x45\x3f\xc0\xa7\x1d\x4a\xe4\x0f\x55\x35\x2f\xe9\xd9\x45\xbd\xf5\xf7\xdd\x11\x35\x3c\x1c\xb7\x86\x4d\x14\x27\x25\xa9\x77\x29\x27\x93\x79\xe8\xd7\x19\x9b\x1d\x1f\x9a\x52\xde\x0b\x72\x6f\x06\xdb\x3f\xa1\x2e\xde\x38\x56\x33\xc8\x08\x4e\xee\xda\xaa\xcc\x15\x98\x48\x82\x53\xdf\xb5\x86\x65\x10\x6e\x58\x27\x87\x73\x16\x30\x75\x45\x16\x86\x80\x3b\x84\x0a\x8b\x89\xc4\x0e\xf9\x05\x48\x50\xfb\x4b\x71\x32\xc0\x0e\x74\xa6\xbe\x51\xca\xba\x43\xbc\x0c\xec\x99\xf9\x79\x60\xf6\x6a\x81\x73\xd1\x04\x6f\xbe\x41\x42\x4e\xa8\xb4\xbc\x2f\x73\x6d\xa8\x94\xc6\x4a\xa0\xea\xad\x62\x37\x54\x49\x3f\xf4\x17\x3b\x2a\xe0\x5d\x2c\x2a\xef\xbe\x5f\xbd\x0e\xc9\x51\x7e\x63\xe4\xce\xd9\xae\x22\x9d\x07\x45\x12\xd0\x8a\xb0\x4a\x87\x1b\x97\x46\x79\x5e\xa1\x83\xb2\x70\x6d\x34\xe2\xff\x64\xeb\x9f\xbd\x95\x5d\xe3\x5d\x70\xbf\x84\x06\x54\xd9\xcc\xd4\x24\xc3\x43\x13\x97\xba\xf7\x51\x35\x3a\x9c\x92\x9a\xa0\xcf\x00\x73\x6e\x98\xb6\x5e\xad\x4c\x59\xe4\xd4\xe1\xe3\x00\xb9\x0e\x00\xd3\xda\x2f\x7a\xdb\xf8\x83\x4e\x57\x59\xbb\xcb\x04\xe6\xd4\x05\x09\xe3\x81\x1a\xa0\xe5\x84\x99\x13\x72\xc2\xef\xf3\x99\x11\xfc\x6b\x5c\xfe\xe0\x7b\x9a\xfb\x34\x13\xf0\x79\xe2\x1b\x15\x7e\x0d\x1e\xb9\xad\x98\x3d\x43\x67\x53\x11\x65\x0b\xef\xc5\xe3\x6a\x66\x18\xee\x0c\x67\x0e\xcc\x4d\x4c\x82\x76\xfa\xf0\x54\x2a\x06\x58\x22\xcf\x7c\xd8\xb8\x23\x0d\xbb\xcc\x7e\x30\x2e\xab\xc9\x98\x58\xac\xcb\x72\x1f\x68\x6e\x38\x9e\x4a\xba\x3a\x28\xf6\x8c\x98\x70\x5b\xda\xf0\xfd\x2a\xc9\xec\x61\xcf\x52\x43\xd9\xf8\x47\x1d\x3d\xfd\xf9\x51\xbe\xe6\x7c\x73\xc5\x36\xa7\xe5\x8f\x58\x0a\x78\xcd\x32\x36\x1a\x71\x3e\x2c\x15\x5e\x6f\x02\xc0\x30\x56\x4a\x85\x3f\x3e\x81\x41\x11\x66\xe4\x79\xaf\x6e\x6d\x74\x10\xf7\x71\xd8\x3a\x7e\xf8\x85\x44\x4e\xb2\x7e\x3d\x61\xd6\x37\x2e\xfa\xae\x32\x51\xf6\x16\x0d\x2c\x72\x04\xb6\x8f\x4c\x06\x4c\xc9\x04\x38\x43\x9d\xfa\x77\xaf\x32\xa8\x9d\x73\x18\x9c\x71\xcf\x54\x1d\xfb\x05\xd2\x97\x07\xa6\xca\x13\x5c\xda\x99\x24\x9c\x96\x7b\x7a\x35\x0a\xd6\x35\x18\x5a\x53\x9f\x4a\x05\xbf\x23\x91\x5f\x46\xea\xd6\xb8\x09\x8c\x1c\x4c\x57\x5a\x56\x2c\x6b\x07\x04\xa3\xca\x32\xde\x68\x51\xc9\x76\xb9\xfd\x5e\xee\x95\xda\x5e\xe2\xe8\x5e\xd4\x68\x15\x8a\x7a\x26\x7c\x60\x45\x43\x4d\x23\x9e\x2a\x56\x5d\x94\x62\x99\xf1\x3b\xd4\xdd\xe6\x9e\xe7\xc4\x43\x71\xae\x59\xa1\xcd\x0f\x34\xa9\xa9\x6e\x8a\x57\x91\x79\xae\x63\x9b\xa3\xed\x05\xde\x48\x0a\xeb\xf0\x73\x7a\x4f\x57\x2b\x00\xe0\x9e\x9c\x9a\xdd\xad\x75\xc4\xc2\xe9\x27\x8c\x9d\x2a\xb7\x68\x7e\xfa\x3b\xce\xf6\xfc\x9a\x1e\xef\x0f\xb9\x9b\x03\x20\x71\xff\x0c\xbe\x80\xef\xff\x21\xad\xc4\xb0\x94\x34\x60\x01\x23\xc6\x1f\xb6\x87\xd0\x18\x47\x99\x67\xd8\x76\x9d\x8f\x8b\xbf\xfd\xbf\x67\x99\x75\x24\xff\x07\xce\xf8\x6f\x4e\xf9\xbf\x28\xf8\xdf\x6d\x80\x9c\xa6\xbb\xff\x9f\xc9\x4e\x21\x43\x00\x00\x30\xec\xf7\x3f\x31\xd9\xee\xff\x9e\xab\xa3\x86\x7c\x68\x1f\x02\xee\x7e\xe7\xcd\xcf\xda\xbe\x42\xa1\x50\xad\x0c\xd4\xdd\xbf\x35\x08\x06\x85\x9a\x3b\xff\xf9\xaf\xd4\x2a\x86\x42\x53\x51\xe6\x95\xff\xfc\x43\x40\x69\xa8\xff\xed\xa7\xff\x59\x00\x01\xb5\x67\x4a\xb3\xf7\x92\xdf\x03\x90\x5d\xb7\x4e\x56\x1d\x3e\x40\x5a\x97\x58\xcf\x54\x59\xd3\x98\xc0\xeb\x94\xdc\x81\xb4\x8e\x8d\xa3\x53\x9e\xd0\xb0\xc3\x2d\x1d\x39\xf8\xef\x53\xc8\x26\x4d\xd9\x21\xde\x58\xf4\x34\x7a\x1d\x8d\x1b\x12\x8e\x1b\x11\x4a\xf7\x84\x3a\x36\x93\x80\xf8\x25\x73\x5e\x9e\x42\x47\x47\x27\x2b\xeb\xf0\xe0\xb0\x92\xb5\x11\x64\x9a\x8c\x9a\xca\x3a\x7c\xfa\x02\x1b\x07\xf0\x14\xf4\x02\xce\xcb\xd4\x7e\x81\x8d\xbd\x4c\xbf\x30\x95\xe6\xaa\x73\x4a\xfb\x41\x0e\xc2\xc5\xcd\xf9\x7a\xfb\xe5\x5b\xf7\xaf\xba\x01\x0d\x6d\x39\x30\xd7\x9d\x46\xb7\xcb\x39\x9b\xea\x7e\x33\x86\xea\x97\x8c\xb9\xe1\x26\x94\xb4\x59\x88\xa7\x1d\x75\xbc\xaf\x7c\x5d\xea\xbd\x27\x16\x4d\xba\x56\xea\xae\x5b\x8c\x72\x5d\x8e\xa4\xa9\x3f\x7e\xb2\xd4\x54\xcd\xd4\xa4\x6f\x58\x66\xc4\x00\x04\x03\x89\x61\x36\xd5\x97\x96\xfe\xb3\x3e\x67\xe0\x9b\x89\x60\x54\x72\x00\x73\x25\x5e\x82\x48\xe5\x81\xc1\xea\xea\x6a\x64\x66\xe3\xde\xec\x91\x52\xe0\x5c\x93\x8f\x33\x00\x68\x53\xbc\xf9\xf2\x68\xcd\xa9\xf6\x84\x56\xb2\xc8\xc5\x6b\xda\xf4\xe7\xe9\xc7\x6f\xb9\x30\x9d\x8c\xca\xa8\x1b\x0c\x52\xc0\x73\xf8\x17\xb0\xde\xd5\x3c\xd4\x80\x66\x80\xd2\x3c\x4c\x1e\x1d\x05\x8e\x50\x55\x11\xa9\x72\x9c\xb9\x91\x3b\x8e\x78\xd7\x1f\x09\x4c\xc0\x85\x5c\x74\x25\x3e\xae\xf5\x08\x46\xed\xaa\x46\xab\x20\x15\xff\x9c\x44\x63\xf2\x0e\xf4\xd5\x40\x85\xb5\xf0\xc4\x18\x8d\xe0\xdc\xd5\x47\xf8\x46\xab\x93\x6a\xb4\xb5\xb0\xc5\x90\x08\xaf\xe6\xcc\xe0\x83\xa8\xa6\x28\x54\x40\xfc\x87\x22\x59\xe3\xe3\x4b\x76\x3d\x7d\x38\xa7\x59\x02\xb0\x9a\x5e\x7a\x1f\x07\xdf\x45\x2c\xc3\x61\xe3\x69\x40\xf0\x3b\x65\xc1\x48\x23\x63\xc8\xf4\x17\xe2\x30\x31\x92\x30\x70\x8f\x79\x04\x8a\x40\x96\xfa\x29\x08\xc3\x0f\x7c\x1f\xc9\xf9\x24\xce\x2c\x97\x40\x14\x45\xed\x4b\x79\x2c\xba\xb5\xf4\xb5\xeb\x9e\x43\x42\x32\x4f\x62\x29\x46\x7e\x57\x6f\x58\x52\xde\x4d\xf0\x9d\xf2\xca\x22\x67\x82\x88\xb0\x48\xa0\xd5\x15\xce\x05\xdd\x16\x61\x37\x11\xb2\x0e\xf9\x1c\x8e\x09\x40\xe2\x1a\xa3\xc6\x68\x20\xf0\x75\x3e\x13\x68\xbf\x6a\xdf\x57\xe0\x95\xf1\x4d\xc7\x9c\xd6\xde\x3a\x92\x09\xef\x88\x8f\x57\x48\x48\xcf\xdc\xdc\x3c\x9f\xbf\xa9\x7f\x60\x60\x00\xdf\x68\x68\xc7\x74\x23\x67\x8f\xb6\xf7\xe7\x14\xc8\x5d\x7d\xc9\x62\x70\x48\x63\xb5\xce\x54\x3e\x6e\x07\xe2\xe8\x7b\xb0\xc1\x99\x24\xe7\xb1\xd6\x78\x0a\x19\xb4\x96\x74\xb1\x9a\xac\x09\xe9\xa0\xbf\x4a\x7a\xaf\xfc\x6a\x9d\x81\xf5\xd1\xad\x03\x72\x5c\x21\x7b\xbd\xbd\xdd\x2a\x6c\x85\x28\xfe\x2d\x5f\xf3\x89\x76\xaf\x11\xa7\xfc\x0b\x6f\x52\x08\xcb\x61\x6a\x09\x41\xef\xe5\x90\xe7\xee\xe1\x3c\x73\xd6\x77\xd0\x78\xab\x15\x47\x7f\xba\xda\xc7\x20\xa0\xe3\x89\x3d\xa7\xed\x06\x5e\x33\x16\x55\xb3\x8d\x71\x4a\xba\x6b\xaf\x59\x1e\x54\x09\x54\xb2\xda\x5c\xda\x7c\x3e\x9e\x64\xe0\xc3\x01\x7f\xfc\xba\xed\xe5\x71\x9b\x12\x9f\x41\xb9\xe9\x14\x50\x94\x9b\x41\xfa\x6a\x72\xc9\xe0\x03\x86\xa6\x7d\x6d\x87\x2a\xaa\x5b\xce\x6d\xb8\x7e\xd7\xf7\x9b\x54\xd8\xc6\xf1\xc6\x5a\xe6\xed\x51\xf9\x70\xc3\x0c\x23\xcd\x33\xeb\x36\x9a\xaa\x0f\x86\x2b\x75\xc7\xb6\x8d\x87\x2a\x39\x03\xdf\xae\x86\x79\xaa\xb3\xb5\xe2\xa9\xae\xb5\xd2\x54\x81\x06\xde\x4b\x11\x3c\x3a\xd5\x07\x06\xba\x49\x9f\x64\x2e\x34\xe7\x11\x47\x37\x41\x74\x99\x29\x29\x23\xc2\xa7\xab\x33\xbb\xc4\xa4\x29\x3a\x01\x9e\xad\xcf\xbb\x8b\x3d\x9e\xf7\x06\x05\x6c\xef\xd8\x0a\x09\xad\x7f\xb3\x92\xee\x5d\x94\x7d\x74\x10\xa3\xa0\xf4\xc8\x06\x8c\x0c\x6d\x3a\x18\xaf\xe0\x3e\x30\x42\x0e\xcd\x3b\x12\x46\xc7\x9a\xc6\x9e\xe2\xa7\x56\xaa\x8c\x6b\xce\x54\x7d\x4d\xb1\xdb\xf5\x27\x62\x91\x0b\x11\x14\xf2\x9a\x42\x57\x7e\xd4\x77\x40\xa1\xd8\x4d\x9c\x4b\xd7\xb0\xc5\x47\xd5\x25\x9e\xa5\x98\xbf\xe8\x59\x8b\xac\xdf\x95\x2f\x70\x5a\xad\xdf\xf5\xdd\x9b\xb2\xc8\x02\x22\xb4\x4f\x5f\xfb\x86\x7c\x94\xb9\x19\x1b\xfd\x98\xf8\xb6\x2d\x0a\xd0\xde\x68\xb2\xa6\xd2\xb5\x5f\x29\x3a\x77\xc5\xb6\x0d\xb9\x5c\x3e\x9d\x8c\x53\x15\x1f\xa5\xf3\x38\xb2\x97\xfc\x5e\x5f\x6d\x4d\xa8\x45\x82\x9d\x62\x40\x2e\xac\x74\xe3\x65\xce\x18\x64\x5a\xf7\x93\x87\xc5\x11\x93\x03\x36\x75\x46\x3a\xb2\xc6\x92\xe5\xf1\x2c\x96\x9c\x8f\xac\x3a\xb9\x84\x41\x47\x5f\xd4\x07\x57\x31\xfb\x8a\xec\xbd\x1c\xbd\xc1\x07\x84\x84\x69\x4f\x34\x03\x16\x28\x08\x1f\x7c\xdd\x5d\xb2\x46\xaa\x2c\xb2\x3d\x23\x23\x5b\x50\x70\x75\x75\xea\xcb\x89\x17\xbc\x99\x58\x9c\x7c\x93\xf1\x8d\xfa\x45\x32\x6f\xdd\x6f\xd9\xc9\xd5\x4e\x3f\x8b\xb9\x3a\x9f\xf3\xd0\xf6\x48\x69\x86\xf5\xd2\xda\x7d\x56\x90\xed\x68\xc2\xb8\x30\xac\x68\xd5\x49\x9e\xb2\x3f\x42\x8b\x8a\xbb\x0f\xf4\x6a\xae\xa4\x31\xa3\x37\x91\x7a\x7c\x4a\xfd\xf1\x85\x38\x3c\xd5\x5a\x1f\xf9\x95\xa8\x56\x8b\xcd\xf5\x14\x07\x96\x7e\xdf\xa1\x63\x9f\xed\x7e\xa5\xed\xcd\x1e\x1b\x60\xd9\xc9\xfd\xa2\x5b\x2e\x8a\x5d\x87\xab\x7f\x49\xdf\xc2\x48\xc2\x92\x23\x4a\x66\xdf\x54\x49\x1a\x23\xaa\x3d\x2a\xac\xfc\xcb\x51\x90\x5c\xd4\x64\x95\xb7\xe7\x7b\x73\xef\xdb\x20\x39\xc2\xd2\x84\xa2\x99\x92\xd9\x7a\xb9\xc3\x41\xb6\xb3\x6a\x4b\xd5\x40\xa9\x0c\xf4\xe8\xbd\xc1\x71\x02\x36\xbd\x7a\x27\xf1\x6d\xdc\xbe\x20\x3a\xc7\x27\x57\xed\x75\xcd\x43\x6b\xd1\xb9\x51\x33\x0a\xcb\xcb\x9c\x99\xdb\xaa\x3e\x87\x9e\x74\x29\x8e\x7e\x90\x22\xa1\x7d\x09\x43\x97\x65\x1c\x36\x54\xed\xf5\xba\x66\xf3\x0a\x97\xdc\xd2\x87\xd6\x94\xdf\x85\x8f\x54\x4b\x36\xfd\x5f\x5e\x93\x91\x9d\xce\x66\xdd\x30\xb2\x0b\x49\x67\xba\x5d\xa4\x68\xf9\xab\x38\xa2\x95\x65\x06\xe3\x29\x0c\xdc\x2b\x77\x2c\x14\x5e\x80\x8b\x3d\x73\xeb\x5f\x7d\x3b\x7a\xd0\x7c\xf5\xee\x87\xd4\x7a\x8f\x09\xe5\xaf\xae\xe0\xa9\xe8\xc5\x84\x89\x59\x3f\xf3\x8c\x33\xdc\xd1\x89\x99\x6c\xf1\x82\x2c\x4f\x99\x1a\x7d\xd7\xb0\xe6\x6a\xa3\xba\x4c\xef\xc5\x4e\xe8\x94\x6c\x04\x1f\x49\xc0\xf9\x92\xc1\xed\x90\x68\x9e\xcc\x31\xf4\x57\xce\x61\x9e\xe5\x5c\xfe\xde\xa1\x4d\x90\xdc\x25\x16\x88\x22\x01\x9f\x4a\xd0\xb4\x05\x1d\x8d\x01\x33\xf0\xaa\x73\x9d\xb1\x2c\x50\xfc\xae\xe5\x71\xfc\x7d\xf8\x66\x25\xb5\x49\x03\xf1\x4d\xa5\xb8\x48\x83\xcb\xf2\x1b\xb7\x1a\xcf\x5b\xee\xb6\x7c\xa7\xa4\x8f\xb8\x79\x89\x8c\xf0\xf1\xb7\x4f\x99\x6f\xaf\x37\x10\xda\x65\x1f\x29\x9d\x7d\x8b\x11\x9f\xbb\x62\xfa\xca\x57\x1e\x43\x70\xe0\xac\xe2\x6f\x1a\xf0\x7c\xc3\x21\x78\x70\xd6\x10\x14\xe7\x0b\x3f\xbd\x7b\xf4\x9e\x7d\x9f\x2a\xf5\x36\xdb\xfa\x4b\xde\x38\xb4\x44\x8b\x14\xd8\x5a\xf8\x05\xbc\x07\xb2\xbf\x5c\xc9\xa8\x77\x41\x8a\xa9\xc4\x8b\xda\x4c\x3f\xa1\xe8\x68\xdc\x68\x8f\x92\x21\x24\xd8\x55\xea\xce\xf0\x90\x10\x0f\xd0\x4b\xf2\x95\xd8\x7a\x63\x68\x38\x84\x01\xcd\x1b\xea\x00\x0f\xe2\x83\x7e\x0a\xa5\x55\x5b\x15\x97\x43\x36\xb8\x48\xe2\xa7\xb1\xd5\x8f\x6d\x54\x2e\xb7\x47\xe1\x3d\x4a\xa5\x0d\xa9\x9b\x92\x58\x9b\x73\xeb\x5c\x31\xe2\x89\x03\x5c\x9f\x79\xd8\x50\x7e\x19\x0c\x46\x84\xdb\x5b\xdb\x81\xab\x5e\x97\x6c\xb2\x05\xf4\x07\x1b\x9c\xf0\xaa\xe6\x57\x5e\xbe\x24\x1e\xc5\xe1\xb2\xc5\x3f\xf3\x57\x0d\xb0\x19\x3f\x91\xc8\x6e\xdd\x1d\xb8\xbf\xde\x77\x62\xc1\x7c\xe7\x6f\x28\xe5\x85\xf3\xbd\x75\x76\x77\x79\xd8\x78\x65\xf8\x85\xef\xb7\x2f\xc5\x0d\x4b\x49\xb2\xc1\x1d\x3a\xc3\x44\xd2\xdc\x47\x18\xdf\x13\x76\xf1\x5e\xb1\xd1\x18\x55\x4a\x15\xaa\x85\xa5\x5a\xe2\x56\xad\xa7\xb6\xb4\x6f\x1c\x52\x2b\xf7\x14\xdb\x90\xb9\x66\x6b\xc5\x42\x52\x2d\x2c\x57\xa2\x0b\xa6\x0e\x6a\x1a\xa9\x3d\x1c\x5a\xef\xb5\x98\x93\x87\x25\xe5\x3f\xfc\xcc\xc6\xd0\x55\xe8\x41\x32\xc0\x98\x80\xce\x1e\xd0\x6b\xe0\x6d\x8c\x2c\x37\xc0\x84\xd1\x7b\x1f\x84\xa4\x3e\x71\xd5\x92\xa2\xdb\x67\x33\x15\x13\x6e\x88\xb6\x50\x97\xd3\xaa\xfe\x40\x14\xd0\x32\x42\xc2\x67\x3c\x31\xca\x96\xe5\xfe\x4c\x9e\x82\x4e\xec\xac\xd0\x44\xf7\xd2\x61\x9d\x4b\xf3\x7d\x97\xb3\x63\x1d\xe3\x9a\x62\xc4\xbd\x3d\x7e\xc4\x7e\xa9\xef\x6d\xda\xb8\x91\x35\x0c\xdf\xc0\x75\x70\x6b\x46\x41\x9a\x43\xd3\x9f\xae\xa4\xb6\xbc\xe4\xa6\xb4\xe6\xc4\x63\x32\x50\xfd\xdb\x94\xc1\xf5\xc6\x37\xc3\xe4\xdf\x8c\x79\x7b\x17\x86\xf7\x60\x2f\xd4\xca\x2e\x06\x0d\x08\x39\x4c\x13\x88\xde\xf3\x38\xb3\xfa\x78\x21\xc5\x24\xbe\x3b\x31\xf9\x4e\x3e\xa9\x17\xf8\x0c\x74\x6c\x11\x56\xc4\xa6\xec\xcb\xf7\x3e\x5e\x6b\x8f\x2a\x13\xab\x8e\x4a\x04\xd9\x4e\x97\xdd\xbf\xde\x96\x74\x31\xee\x72\xfa\xf5\xe9\x4d\xb7\x70\xfb\x44\x52\xab\xfa\xf9\x14\x85\x3e\xb3\x45\x93\x70\x71\xe3\xfe\xea\x13\xd0\x76\x44\x64\x88\xf4\xf5\x5b\xad\xc6\x9b\xd7\x10\xc2\x09\x71\x2a\x6d\x43\xf2\x09\x03\x11\x23\xa9\x50\xae\xe6\x1c\xa7\x54\xa9\xde\x8e\xf5\x75\xeb\xb4\xdc\x8c\x2b\xff\x28\x99\x9c\x21\x53\x9f\x21\xf9\x24\x37\xef\xde\x3e\xa0\x51\xdd\x45\xc2\x76\xe6\x03\x3a\x66\x61\xed\x7d\x98\xad\x6f\x48\x28\xc6\xb3\x70\x73\x6b\xa7\xe3\xb4\x48\x3a\x16\xb3\xb1\x9e\xaa\xb3\x39\x4b\x4f\x57\x6c\x42\x30\xad\xa0\x69\x4f\x34\x06\x4c\xee\x31\x16\x7c\xe7\xdc\xed\x9d\x24\x7c\xe7\xdc\x6e\xc3\x8b\xb6\x34\xb4\x3f\x65\x3d\xaf\x58\xf0\x33\xf0\x26\x33\xf5\x71\xc1\x8f\xf5\x82\x23\x40\xde\xe2\xf7\x27\x14\xc0\x0f\xb9\x81\xc0\xa3\x02\xd6\x77\xe3\x19\xa3\x39\x05\x2a\xcd\x70\xb1\xcd\xe2\xaa\x4a\x59\xc7\x88\xdf\xd8\x49\xb8\xa9\x22\x34\x6c\x08\x6d\x4f\xda\x20\x19\x64\xe9\x81\x77\x72\xf5\x2f\xfb\xb5\x5b\x8e\x59\xb7\x4d\xeb\xfa\xd3\xad\x8d\x48\x6c\x78\x96\xbe\x80\x61\xdd\x9c\x99\x99\x96\x4d\x71\xfd\x79\xe2\x3d\x90\x9e\x53\xa8\x4b\xaa\xd6\x7b\xae\xbe\xb7\xc7\x2d\x34\x92\xdb\x5f\x93\x0c\x52\xe6\xda\x1a\x1e\xf5\xdc\xf3\x4f\x0b\xb3\x72\x19\x25\x17\x49\xd1\x2a\x71\x5b\xad\x5d\x93\x9b\x15\x1e\xe6\xe4\x1a\x2f\x29\xd7\xf5\x06\x61\x39\x5f\x5d\x0c\x3f\x78\xc1\x6d\x74\x1c\xd5\xee\x78\xf6\x78\x41\x5b\xf7\x1a\xf1\xad\x5a\x51\x44\xeb\x58\xbe\xae\x6f\x9f\x3b\xb9\x2c\xcb\x73\x3f\xcf\x10\x8c\xa7\x7e\x0a\x44\x7c\x3b\xc0\x9c\xde\xde\x99\xef\x2a\x9e\x1d\x96\xc8\x46\x1d\xc9\xd5\x3a\x4f\x34\x9e\xba\x2a\x0b\x19\xd8\x41\x7e\x13\xbd\x89\xa9\xdf\xa9\x6f\xa6\x05\xcd\x28\xbd\xfa\x8c\x2f\x97\xb1\xfd\xd8\x76\x45\xae\x9d\x77\x30\xa7\xca\x61\xfb\x82\x5f\xee\xa9\x74\xf1\x8a\x77\xa3\xb5\x63\xb2\x9a\xcf\xc5\x48\xe9\x0b\xca\x2e\xda\x89\x0d\xb4\x6b\xc0\x97\x0c\xc5\x4b\xa5\x0f\x65\xb1\x56\x4a\x87\x28\xa7\xa1\x38\xdf\x91\xb1\x22\x66\xaf\xc7\xcb\xcd\xfb\xf7\xa9\xfe\x5d\x5f\xb9\x31\xc1\xb3\xfb\xe3\xf7\xb7\x11\x75\xd7\xc7\x42\x75\x78\xb8\xf7\x1e\xea\xb2\x50\x58\x7e\x78\x44\x61\x79\xe3\xc4\x57\xa8\x29\x6c\xd2\xb5\x0d\x26\xcd\x47\x68\x3b\x9a\xbc\xea\x06\x57\xb4\x70\x3c\x4e\x6d\xbe\x08\x21\xe2\x8f\x8a\x7f\xf5\xe8\xf7\x81\xc0\x89\x5c\x00\x10\x66\xdd\x2e\xf9\xf2\x41\x12\xd4\x85\xd9\x75\xd4\xb9\x1d\xed\x4d\x24\xa3\x4b\x3f\x88\x86\x08\x7e\x06\x3e\x9b\xc0\xe1\x31\xd2\x0a\x1f\xc1\x4e\x64\xcd\x52\x95\xbd\x16\x36\x2d\x31\x3c\xdc\xf7\xef\x49\x02\x72\x33\x56\xa3\xc8\x3f\xda\xeb\xa9\x50\x7e\xef\x10\xf1\x53\x4c\x84\xeb\x12\x3f\x5a\x37\x03\x3d\x73\x0c\xa1\xe7\xa7\xa9\xfb\x51\x2c\x2c\xac\x3b\x57\x58\x0c\xbe\x87\x28\x82\x99\x56\xdf\x56\x1b\x1d\x77\xe9\xd8\x12\x3d\x7e\x36\xeb\x8f\xa3\xd1\xf1\xbc\x65\x79\xac\x8a\x49\x62\x33\x29\xee\x73\x4b\x83\x56\xc9\xd5\xce\x37\xb9\x7e\xf2\x1c\xae\xfb\xde\xfb\x93\x36\xb6\xe9\xe6\x18\xac\x9b\x6a\x42\x2d\xea\x98\x30\xda\x47\x33\x06\x58\x3c\xa1\x46\x5e\xb4\xa9\x57\x9e\xf4\xde\x6f\xa0\x4b\xfc\xb4\xbe\x68\xaa\xf1\xda\xdc\xea\xc3\x0d\x06\x7c\x4b\xae\xaa\xa1\x05\xf6\x0f\xe2\xc0\x1d\xb7\x10\x3a\xb7\xb3\x91\x0b\x4b\xb2\x91\x1e\xcc\x3d\x0a\x21\xee\xed\x36\x34\x8d\xbe\x29\x36\x0b\xe1\x0f\x3f\x14\x83\x5c\x20\xfe\x1d\x56\x2d\x90\xdc\x8f\xaf\xc8\xc5\x31\x2f\x3f\xd9\x58\x17\x94\x4e\x47\x83\x85\x4c\xf9\x1d\x20\xaf\x12\x2e\x1a\x62\xc1\x7e\x8e\x73\xb0\xc3\xa4\x97\xd6\xb6\x7e\x46\x34\x91\xef\xda\xfd\x5a\xea\xaa\xdf\xbe\xab\x35\x43\x05\x2b\xba\xb5\x57\x3d\x00\x3a\x50\xcd\xc6\xdc\x0b\xd4\x07\x8b\x04\xb9\x71\x05\x01\xb7\xb3\xea\xa6\xdb\xe4\xe1\x5a\x33\x51\xf5\x63\xcf\x29\xb6\x3e\xa7\x36\x04\xe3\x1f\xed\x07\x12\x98\x8a\x89\x1c\x10\x5f\x57\x82\xc6\xa7\xdb\x53\x29\xb1\x54\xca\x9e\x9e\x24\xbe\x4a\xcd\x35\xb4\xaa\xf9\x5d\x5c\x41\x44\x01\x00\x7d\xc5\xdd\xfe\xe4\xf5\x69\xfa\x72\xa2\x79\xa8\x5f\x8d\x91\x30\xce\xb5\xc8\xef\xc4\xf4\x97\xc2\xe2\x23\x3a\xbe\x90\x7d\x63\xc2\x80\x26\xeb\x2c\xf3\xa3\xb5\x11\xb6\x0c\xd2\x78\xb9\x76\x47\xf0\x76\xf2\xa4\x3e\x43\x4d\x09\x04\x07\x5e\x15\xb0\xb0\xaa\xc7\xfa\x91\x6a\x67\xc5\xab\xed\x5b\x29\xdc\x51\xc1\x76\x6d\xd4\x9f\x3a\xd1\x40\x68\x57\xdb\x02\xde\xc7\xf0\x43\xaf\x5b\x89\x19\x7f\x73\x78\xb7\x00\x57\xfb\xc3\x6f\xab\x09\x64\x58\x8a\x4f\xe7\xae\xff\xaa\x52\xd5\x7f\x4a\xe1\x22\xf6\x2f\x26\x11\xde\x71\xfa\xe9\xe3\x91\x62\x35\x9e\x28\xf2\xf4\x16\x28\x29\x7e\x2c\x0e\x9c\xb5\x1a\xc2\x55\x48\x4e\xf8\x91\x33\x20\xac\x14\xb7\x5a\x97\x23\x30\xbe\x1d\xe2\x70\x42\x04\xc7\x3b\x7d\xa6\xce\x55\x3f\x2d\x11\x1b\x75\xda\x0a\x9d\x90\x34\x24\xd7\xf2\x3d\x2c\x76\xe6\x27\x67\x2c\x33\x5c\x7b\xab\xec\xfe\x59\x59\xac\xd1\x64\x28\xd3\xc8\xbf\x50\x69\xef\xde\xe2\xd1\xb3\x01\x23\xd4\x7d\x27\x59\xf8\x2a\x7f\x39\x02\x81\xfb\x32\xdd\xed\x42\xcb\xe8\xe9\x7b\x93\x21\x1a\xbe\x02\x22\xd0\x31\xed\x31\xef\x3c\xd3\x11\xc5\xbb\x58\xc4\xb4\xcc\xe0\x77\x8e\xfa\x3a\x22\xd0\x8c\xd2\x51\x28\xe1\xe2\x2c\xa2\xaf\x64\xa8\xbf\xb4\x96\x97\x08\x17\x35\x32\xe3\xa8\x42\x24\x2a\xe5\x28\x9a\x52\xe2\xcd\x47\x52\xbb\x2f\xb4\xa3\x84\x8e\x42\xd4\x43\xda\x37\x34\xba\xd3\xbd\x20\xb3\x27\x18\xb0\xbb\x38\x0a\x1a\x3a\xbf\x29\x08\x07\x28\x6d\xcf\x15\x38\xe8\x49\xc2\x63\xa9\x8d\x50\xf5\x3c\x08\x88\xb7\x03\x98\x9c\x1b\xab\x15\x3d\xc7\xfd\xa3\x0b\xdc\xd4\x88\x80\x68\x39\x33\x5a\x70\x35\x18\x96\x01\x10\x10\xc3\xc9\x36\x4a\x06\xa4\x20\xa1\xe5\xef\x0e\xf6\xb1\x87\xb0\xc4\xcc\x68\xfc\xaa\x8c\x52\x72\xb8\x36\x55\x83\xbe\x45\x04\x8e\x57\xab\x99\xe6\xb7\xa9\x07\x30\x09\x4f\xcf\x89\xdb\xd5\x5e\xbd\xb2\xed\xd1\x70\xb5\x2e\x99\xe6\x16\x04\x54\x01\x6b\x26\xe1\xd2\x50\xe4\x55\xfa\xcf\xa6\xc1\xcf\xca\xdf\x9f\x80\x0e\xe7\x86\xd2\x72\x1f\xb8\xe1\xc2\xae\xf9\x62\xba\x1f\x6e\xe0\xa4\xd7\x80\x67\xd8\x9b\xc1\x8b\x33\xaa\x87\x46\xd3\xff\xd5\xc0\x5c\xcb\x6f\x95\xe9\x3f\x5e\x06\xcf\xba\x80\x51\x45\x35\xb4\xa0\xd6\x55\xf7\x0a\x38\x6e\x62\x6b\x3c\xba\x44\x33\x74\xc6\x40\xf6\x9b\x1c\x88\x45\x6d\xed\x35\xb8\x52\x2c\xb3\x8c\x2c\x2e\xcd\x81\x82\x0a\x8e\x7d\x2a\x7b\x14\x36\xff\xad\xe3\x37\x15\x43\x88\x1a\xfb\xa9\xe9\xf8\x39\x3f\x87\x9e\xaf\xbc\x34\x65\xb3\x0d\x02\x4a\xec\x95\x4c\xf5\x50\x3b\x5e\x8a\x25\xa2\x49\x1a\x76\x41\xe9\x39\x61\xdd\x78\xd5\x1f\xbf\x3a\xa5\x15\xf7\x78\xf6\xe4\x93\xb9\x2f\xf5\x23\x4b\x06\x1e\xd8\x45\x8e\x6b\x0b\xd4\xf3\x85\x80\xb6\x8c\xc2\xbd\xa5\xe5\x96\x82\xaa\x39\x1b\x57\xac\x13\x7e\xd4\x3d\x50\xbd\x01\xa3\xe7\x24\xc1\xdc\x73\xb7\x0c\x7f\x36\x36\xaa\x47\x33\xbd\xab\x2a\x4b\xff\xb6\x37\xa6\xc7\x0f\xb8\x42\x5f\x9f\x69\x29\x08\xb3\xf1\x85\xf3\xf9\x2e\x31\x88\xb0\x16\x9b\x8a\xc3\xdb\x0a\xb7\x3d\x41\xbd\x42\xf3\xe5\xa9\x17\xf8\x0a\x53\xf3\xfb\x88\x9c\x5c\x5e\x6b\x0c\x5a\x9e\xb1\xb3\x16\x71\xc0\x9a\xf4\x53\x80\x46\x40\x6a\x17\x50\xff\x09\x02\x42\x23\x4e\x72\x8f\xc3\x52\x43\x24\x04\x1a\x6a\x1e\x7c\x10\x4b\xf9\x35\x7c\x8c\x74\xdd\x75\x74\x75\xf0\x1b\x45\x98\x95\x77\xa0\xdc\xf3\x22\xe2\x89\x35\x4b\x3c\xd8\x4a\x0b\xcd\x96\xa7\x11\xf6\x5d\x87\xca\x10\xff\x81\x10\xf6\x64\x27\xb6\x63\xd6\x48\xb3\xc0\x13\xf4\xc8\x56\x3d\x0f\x0b\x74\x7d\xdc\xc2\xe1\x6a\x64\x48\x3e\xec\xeb\xde\x44\x04\x92\x26\x62\x0d\x98\x2c\x66\x53\x55\xb0\xd5\x59\x9b\xd6\x99\xf3\x53\x8e\x40\x4c\xef\x93\x92\xd7\xfe\xae\xdb\x54\xda\x7c\xa8\x49\x38\xd8\x15\x17\x7d\xe5\xb3\x18\x52\x6a\x3f\x86\x0d\x27\xa4\x12\x34\xed\xf1\x43\x75\xb2\x60\x81\x12\x1a\xe9\x4b\x87\xde\x3a\x9f\x3e\x50\x02\x23\x9d\xd6\xe8\x07\x02\x9d\xbf\xfc\xc2\x94\x58\x2d\xec\x62\x6a\x55\x4f\x6d\x81\xb2\x17\x4e\x09\x05\xa1\x96\x9f\x67\x18\xd2\xca\xf3\x58\x38\xe4\x3c\x47\xa1\xa2\x7b\x25\x0d\x51\x5c\x63\xd9\xfa\xde\xd8\x2b\x5f\xe3\x89\x6d\xcd\x9b\x0c\x45\xbe\x7d\x55\x08\x81\x5e\x8d\xce\xdf\x30\x3c\xf9\xa5\x8e\xb7\x0f\x93\x8a\x6b\x7f\x0e\x2f\x69\xf3\xbd\xb9\x94\x55\x11\x6a\x3e\xa8\xa3\x89\xfd\xd1\xfa\xf2\x2b\xb9\x0c\xc2\x10\x85\x81\xf6\x03\x52\x02\x3f\x3c\x74\xb1\x02\x54\x2d\xc3\x4d\x2c\x08\x61\xc9\x1b\xe6\x11\xad\xb0\x5c\xb8\xb9\x30\xd1\x44\x06\x27\x0d\x65\xe2\x9e\x8a\xf7\xe8\xd6\x74\x5e\x3f\xc1\x1b\x2e\xc2\xa9\xc7\xa9\xe2\xbb\x88\x0b\x34\x8d\xad\x5b\x6a\x61\xc5\x2e\xf3\x5d\xd7\x47\x0a\xae\x34\xfe\x79\x39\x30\xa0\xbc\x2e\x7d\xa1\x88\xd8\x5d\xdf\xee\x38\x1c\x8f\x7e\x13\x28\xd9\x57\x60\xd9\xad\x24\x5c\x6c\xf7\xc9\xc4\x50\xcf\xa8\xf2\xcc\x76\xe9\xc8\xb2\xaa\xcb\x30\x97\x75\xc7\x88\xee\x9f\xf2\x4f\x11\x84\x0f\x06\x36\x5d\x74\x97\x58\x54\x35\xba\xf4\x59\xcd\x35\xb3\xdd\x7a\x77\x2f\xb1\x3b\x59\x64\x3c\xe1\xa5\xce\xb9\xc4\x96\x21\x1b\xa8\x90\x0b\x0d\x9b\xe0\x33\x88\x49\x2f\x47\x6f\xf0\x81\xfc\xa4\xf2\x14\xe8\x86\x8d\x44\xbe\x2d\x6d\xbb\xce\x6b\xd8\x36\x15\x6f\x97\x38\x17\xea\xbf\xd0\x8e\xe9\x35\x5b\xf5\x60\xb2\x24\x84\xc5\x83\x61\x5a\xc8\x5b\x57\xf0\xb1\x5b\xc2\xf9\x57\xe0\xbb\xf1\x82\x74\xe7\x48\xae\x11\xc4\xd7\x28\xda\x62\x71\xd7\x4f\x0a\xa9\x6d\xbd\x42\xc3\xcd\x4f\x5f\x7e\x03\x11\xf7\xd8\xe8\x36\xde\x74\x7d\x92\xa1\xf4\x6a\xc8\x63\x38\x7d\x1e\x00\x7c\x50\x48\x0b\x92\x13\x25\x0d\x78\x66\xe5\x57\x41\xb3\x52\x78\x0f\xf4\xb6\x03\x91\xf1\xec\xbd\xab\xf1\xdb\x01\x13\x79\x91\xcf\xe3\x16\x71\xb1\xec\x75\x26\xa9\xef\x0c\x7c\x8d\x5d\xdf\xd5\xee\x0f\x1c\xf2\x2d\xff\x96\x6a\x83\x7a\x81\x9f\x87\x39\xfd\x2a\x2a\x50\xe5\x09\xed\x07\x95\x22\x05\x71\xe0\x1e\x75\x70\xa1\xd1\x2c\xda\x5c\x7d\xf7\x0a\x65\x05\x5b\x1d\x0b\x6a\x95\xf0\xfd\x0d\x1d\xd0\xc9\x37\xea\x1d\xf6\xac\x12\x5a\xbe\x18\x02\xee\xf9\xcc\x74\x25\xf0\x81\x5c\x15\x99\x26\xbc\x74\xa5\xc5\x94\x48\xbd\xca\x36\xe3\xf3\xbf\x4b\x5e\x4e\x0c\x06\x6c\xc7\x60\xb6\xdc\x76\xa6\xdd\x53\x56\xbf\x72\x69\xd0\xb1\xba\xd9\xfe\x24\x33\x0a\xb4\x35\x73\xf2\x63\xed\x36\xf6\x42\x42\x7e\x4f\xa9\x4a\x23\xd4\x08\x1a\x02\x61\x05\x54\x3e\x39\xca\x89\xb3\x16\x53\x17\x09\xa0\x6e\xdb\x2d\x59\xef\xff\xfc\xdb\x0c\x13\x56\x72\x5b\xb1\x4a\x65\xe9\xea\xd1\x12\xfe\x2b\x2d\x59\x14\x4b\x48\xa7\x30\x84\x3a\xb6\x60\x61\x16\x00\x13\x78\x30\xf3\x91\x95\x3a\x91\x42\xfd\xa8\xca\x19\xa9\x24\x96\x42\x63\xdb\x18\x71\x93\x42\x21\x6a\x18\x2e\x3b\x6a\x63\xd1\x80\xd0\x88\x69\x9e\x01\xe9\x4d\x8c\xab\x9b\x10\x35\xc5\xab\xca\x9e\x4a\x15\x64\xf7\xb4\xd4\x29\xa2\x23\xd7\x9d\x7f\x47\xe6\x84\x4c\x3d\x6e\xd9\xcb\x29\x50\xa0\x63\x47\xa4\xe2\xbf\x97\x84\xd7\x59\xa8\xa7\x31\x03\xde\xea\xa6\x6f\xa4\xb3\x8d\x0d\x26\x11\x78\x82\x9c\x83\x57\x1e\x61\xcd\xea\x3c\x63\x1f\x67\xab\xff\xa8\x8b\x7a\x19\x1c\xc4\xa9\xf2\x9d\x42\x19\x8a\x74\xd8\xbe\x68\xb2\x65\x8c\xf4\xb1\x17\x6d\x3c\x9c\xbb\xdc\x23\x9d\x80\x8b\xee\x22\x20\xe1\x2c\x05\x02\x05\xbb\x76\x8a\x76\x38\x56\x97\xd7\x59\xb7\xd0\x4e\xb9\xcb\xf4\x30\xde\x94\x93\xdb\x3e\xe7\x4d\x71\xb6\xf1\xe4\xde\xd5\x1f\x13\x33\xe3\x5a\x81\x16\x76\xbb\xbe\x67\x24\xf3\x82\x42\xaa\xf9\x96\x13\x4a\x64\x44\x63\x02\xfa\x9a\x35\x34\xab\x58\xe5\x31\x74\x62\x9b\x5f\xc2\x24\x7e\x33\xf0\x85\xdf\xa1\x77\xd7\xa0\x61\x81\x88\x00\x31\x17\x47\x21\xef\x86\xaa\x46\x9e\x72\x40\x59\x38\x7e\xd8\xe6\x98\xdf\x47\x94\xad\x22\xa0\xd1\x42\x17\x04\xf2\xdd\x05\x1f\x7e\xbc\xad\xc9\x9f\x92\xfa\xf4\x3a\x12\x01\xbd\x2f\x62\x9d\x1b\xd0\xd0\xd2\xb6\x91\x04\x92\x8b\xfa\xe1\xf5\x8e\x4f\xa6\x36\xd5\xa9\x3d\x2d\x2a\xe8\xe9\x5b\xe7\x3e\x6f\xbf\x41\x97\xde\x3e\xe0\xb1\xbf\x7a\x74\x30\xb9\xb2\xaa\xb3\x31\xf8\x98\xc5\x26\x08\x55\xc4\x32\x48\x90\x32\xa7\x48\xbd\x53\x6c\x0e\x16\x71\xec\xe5\xa4\xff\xc6\xfe\x96\xf5\xe8\xa7\x1a\x5b\xa6\xee\x69\x88\xb9\xc4\x12\x39\x0b\x22\x35\x4b\xce\x64\x1e\x7b\x78\x38\x1a\xa2\xe4\x79\x29\x3c\x6a\xc8\x7e\x9e\x8c\xbe\x29\xf2\xcc\xad\xef\xf0\xd2\x4f\xb8\x72\x04\x5f\xcf\x0c\x28\xab\x29\x91\x62\xa4\x4b\x78\xcc\xac\x32\x87\x88\xbe\x59\x31\xad\x73\x69\x78\x35\xc2\x0c\x0b\xa9\x89\x9d\x98\x73\x8b\xd3\x39\x2f\xa4\x72\xb4\x6c\x81\x70\xfe\xab\xd6\xf5\xf8\x01\x17\xdf\xdd\x7b\x67\x06\xd9\x6c\x59\x15\x92\x17\xa9\x15\xc8\x2e\x7b\x95\xf2\x6a\x75\x79\x8a\xf2\x40\xd3\xa7\x4a\xd1\x28\x2f\x01\xc0\x90\xc7\x36\x35\x5d\x12\x1d\x30\x79\xe0\x3a\x3f\x22\xf2\x81\x9b\xf1\xa8\xad\xef\x9a\x48\x5a\xb7\xf4\x78\x3f\x77\xce\x60\x21\xd1\x73\xc7\x17\xf1\xab\xed\x25\xa9\xb2\x7e\xb4\x2a\x62\x72\xef\x6c\x97\x50\x06\x4e\x37\x4e\x40\x23\x2a\x02\x22\x52\x52\xdb\x17\x12\x46\x1a\xa1\xa4\x3d\x5f\x20\xee\xdf\xe7\xf7\x4b\x5d\x77\x47\x3e\xb3\x5a\xd3\x29\x73\x70\x7e\xb7\xeb\x51\x35\x0c\xd1\x42\x95\x0a\xda\xd6\x20\xc1\xfe\xd5\xe5\x27\x12\xae\x39\xa9\xe2\x40\x91\x43\xf1\x07\xec\x8a\xa9\x38\x09\xad\x61\xe0\x01\xa1\x16\xf2\x44\xe1\x67\xc0\x67\x99\xe5\xa1\x2f\x88\x5c\xfd\x0c\x49\x4d\xe9\x8b\xde\xc8\x92\x17\x06\x4a\xc3\xa0\xe2\x76\xe1\x5f\xe9\x66\xb7\x47\xb9\xc1\xe8\xa3\x04\xbb\x60\x81\x69\x93\x8f\xa8\x6c\xe1\x8d\x88\x48\xd2\x22\x7e\xba\xb1\xec\xcd\xbd\x11\xe9\xdf\x67\x71\xd4\x26\x06\x6a\x79\x45\xde\x95\x60\x33\x71\x61\x72\xb9\x42\xc3\xf1\x91\x19\x06\x7d\x81\x48\xec\xec\xd8\x57\x44\x20\xa1\x3f\x99\xf3\x9b\xa4\x46\x19\x21\xd1\x54\x6d\x9b\x3e\x92\xf9\xdd\x06\x00\x34\xf8\x78\xf9\xc3\xf5\x91\xc2\x66\x72\x7f\xf4\x7f\x7a\x25\xa3\x97\xaa\x16\x24\x7b\xd4\x09\xf2\x22\xaa\xbc\xf7\x5d\x6b\x7b\x9a\xaf\x22\x22\x84\xc5\xdf\x26\xd4\x9b\x62\x1e\xef\x8b\x96\x34\x5b\x11\x13\xd0\x45\xb1\x83\x1f\xaf\xac\x22\x5d\x46\x18\xb2\xd4\x8f\xd2\x1f\xa4\xe4\xbc\x2f\xcb\x0b\x0e\x58\x2c\x02\xf6\x8e\xe2\x82\xe9\xc3\xf8\x61\xa9\x9e\xbd\xe9\x2a\x0b\xfb\x0d\xc6\x78\x7c\x35\x4b\x36\xb3\x55\x19\x8b\x42\x67\x83\x5e\x21\xd8\xff\x8f\x36\xfa\x46\x6f\x7b\x73\x6b\xef\xec\xfa\x5e\x8e\x5b\x1a\xef\x0a\x92\x33\xbd\xf9\x96\x58\x4e\xcd\xf4\xc8\xf4\x0b\x79\x37\xe9\xa8\x89\x43\x83\x9e\xb3\x83\x1d\xcb\x32\xae\x98\x8b\x15\x25\xbc\x22\xa7\xda\xdb\xe6\xee\x2e\x33\xfe\xff\x7d\xe9\x82\xe8\x29\x9a\x1d\xb3\x3e\xe7\xc7\x9f\x15\x2e\x56\xfd\xdc\xd8\x26\x20\x12\xf5\xeb\xef\xfa\x53\xcb\xe7\xd7\x79\x1d\x9e\x19\x28\x9f\x7f\xc3\xe1\x7a\xe6\xd2\x2f\xdd\xce\xa7\x15\xa6\x6d\x6f\xe0\x7b\xf8\xf1\xdd\x3d\x3f\x9f\x2f\x22\x89\xaa\x9f\x3f\xc6\x3e\x98\x7e\xe2\xe5\xf7\xfd\x6f\xef\x32\xe7\xb9\x24\x32\xb4\x5e\x5c\x7b\xbd\x27\xab\x5c\xf5\x29\x5f\x9b\xf6\x8a\xc5\x6c\x5b\xf3\x73\xab\xef\x59\xee\x74\x14\xbc\xf2\xd7\x28\xff\xdf\xf5\x04\xfb\x19\x0c\x49\xa2\x1b\x7f\x9d\xdf\x53\xf7\x78\xda\xf3\xe4\xaf\x0f\xf4\x19\x63\xa6\xcd\x7c\x72\x3a\x79\xdb\xd4\xa8\xb7\xad\x9b\xcb\x2e\x27\xec\xff\x72\x73\x9f\xfc\x6a\x86\x80\x96\x08\xd7\x35\x5d\xa1\x0e\x6b\x9e\x7d\x3c\x3f\xfb\xf1\xca\x22\xc3\x6f\x67\x4b\x37\x3c\xcd\xd2\x15\x3f\x75\x6f\x7f\x4c\xcc\x1a\x26\x85\x30\xc1\xd5\x49\xa2\x02\xab\xed\x97\xd6\xa7\xbc\x79\xb7\xd9\xa1\xd8\x24\x6f\xde\xcc\xca\xef\xa6\xd3\xb7\x17\x09\xae\x66\xe8\x08\x3d\xb2\x2a\x82\xb5\x2e\xe4\x5d\x67\xd5\xff\x3e\xcd\xd6\xea\x82\xd2\x94\xa2\x5f\x3b\x8d\x73\x3e\x9f\xfc\x79\xf7\x7c\x59\x9e\xb4\x72\x18\xa3\xc0\xea\x8e\xd0\x8d\x5c\xae\x5b\x36\xed\x9a\xe5\x9d\x58\xec\x3e\xf9\x66\xeb\xd1\xe6\x45\xa7\x36\x07\x15\xf7\x14\x64\x97\xe7\xbf\xf3\x9c\xbd\xb9\x75\xf6\xee\xaa\x22\xb9\xd2\x75\xff\xfe\x7e\x3d\x5f\xd6\x27\x72\xce\x3a\xdd\xf2\xe8\x2f\xf0\xfa\xa9\xaf\x57\x8d\x55\x8a\x0b\x9e\x3e\xe5\xbb\xf7\xe0\xd9\xdc\xf5\xfd\xb2\x6c\xb4\x59\x75\x45\xe4\x42\xb0\x4d\x41\x2f\x3d\xd7\x98\xbd\x3a\x03\x06\x47\x5e\xed\xdf\xb2\x39\x32\x7e\xf5\xf7\xdf\xdf\x4c\x8d\x41\x60\xe3\xaf\xe5\x1b\xf7\xef\xbd\x63\x00\xe6\x18\x0b\xaf\xde\xef\xbc\x66\x16\x78\x55\x54\x67\xe8\xc6\xac\x34\x10\x50\x0b\x13\x5c\x0d\x66\xa4\x25\x4d\x6d\xbe\x5b\x7a\xff\x2c\x54\xf4\xbf\x58\x1f\x7f\x39\x5f\x99\x79\x8d\xf3\x6c\xb0\x86\x88\xbf\x62\x0c\x0c\x0c\x0a\x61\xe0\xe3\x59\x88\x63\x81\xcf\xd7\x21\x96\x85\xf3\x6c\x1e\xec\xa7\xf4\x44\x69\xc0\x0e\xdd\x39\xa2\x85\xc2\x02\x07\x11\x7e\x96\x05\x73\x6b\xae\x8f\x80\x8d\x4e\xa6\x2b\x03\x03\x03\x83\xa7\xab\x9f\xcb\x3a\xa7\x84\x26\x40\x00\x00\x00\xff\xff\x14\x78\x87\xe7\xb9\x48\x00\x00") + +func filesImagesPending_approvalPngBytes() ([]byte, error) { + return bindataRead( + _filesImagesPending_approvalPng, + "files/images/pending_approval.png", + ) +} + +func filesImagesPending_approvalPng() (*asset, error) { + bytes, err := filesImagesPending_approvalPngBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "files/images/pending_approval.png", size: 18617, mode: os.FileMode(420), modTime: time.Unix(1509717368, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _filesImagesReceived_approvalPng = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xa4\xbb\x05\x50\x5c\xcd\xb3\x37\x4c\x02\x01\x42\x70\x77\x27\x24\x84\x04\x09\xae\xc1\x09\x0e\x61\x59\x64\x81\xe0\xee\xee\x12\x5c\x42\x02\x59\x5c\x82\xc3\x6e\x58\xdc\x2d\x40\x70\x0b\xb2\xf8\xe2\xb0\xb8\xbb\x7e\xf5\x24\xcf\xff\x91\x5b\xf7\xbd\xdf\xad\xf7\x9d\x3a\x35\x9c\xea\xd3\xdd\xfb\xeb\x99\xee\x9e\x39\x9c\x9e\x48\x35\x15\x39\x1c\x2c\x4a\x2c\x14\x14\x14\x9c\xb7\xf2\xd2\x1a\x28\x28\xe8\x42\x28\x28\x0f\xbf\x63\x3e\x44\x41\x41\xc9\xbf\x12\xb1\x46\x41\x49\x88\x7c\x2b\x2d\xa1\xe9\xf9\x60\x67\xea\xc1\x03\x14\x7c\x7c\x34\xce\x5b\x22\xc9\x64\x22\x54\xff\xe8\x72\x52\x14\x14\x14\x94\xba\x30\xd8\x1f\xbc\x6f\x80\x0f\x50\x50\x50\x02\xff\x9f\xef\x51\xf4\x49\xfe\xe8\x5d\x7f\xf5\x1b\x6c\xff\xaf\xf7\x3f\x01\xbf\xee\x21\xf5\x5d\xe6\x48\x58\x2b\x91\x5c\x5d\x77\xdc\xfd\xf6\x3b\x91\x1f\x41\xf8\x0f\x18\x99\x1f\x48\x3a\x26\x49\x3a\xd9\x44\x03\x53\x36\xeb\xc4\x1b\x99\xf4\x77\x81\x1d\x3d\xdd\x88\x4c\x51\xc4\xb5\x10\x59\xf8\xea\xf9\x79\x7d\x9e\xda\xb3\x51\x62\x22\xcc\x32\x02\x1d\xd3\x3a\xfa\xe0\xaf\x06\x68\xac\x6f\x0d\x05\x08\x13\xbf\x44\x90\xe3\xbc\x4d\x25\x57\xed\xe6\x06\x6b\x16\xa4\x2a\x81\x89\xca\xab\x68\x49\x3b\x62\xe2\x1e\xab\x3d\x48\x3d\x59\x11\x08\xcd\x09\xe9\xde\x75\x13\x14\x9d\x68\xe6\xb5\x2f\x64\x5d\xc4\x8b\xaa\x20\xd6\xca\xd7\x7e\x35\x9f\xe6\xd1\xb5\x6d\x6f\xa1\x6b\x75\xe6\x32\x68\x70\x57\x66\x68\x7b\xf6\x64\x57\xbd\x5e\xbc\x44\x55\x65\xb6\xb0\x7f\xde\x17\x41\x35\xa8\x57\x22\x21\x98\x01\xf7\x1a\x22\x33\xf5\xcd\xa1\x5e\x92\x91\x50\x29\xca\x29\xc0\x67\x56\x7e\xaf\xa9\x14\xed\xc0\x37\x7b\x7f\x3e\xe9\xee\xe1\xe1\xe1\xe3\x73\x4d\x80\xf2\x9f\xb6\xa8\xba\x18\x20\x04\x44\xdc\x96\x36\xb6\x7d\xa3\x52\x47\xfb\xb8\xa2\x1e\x82\x5d\x71\x53\x0b\x5a\xeb\x54\xe3\x90\x78\xe7\xff\x36\x91\x99\x85\xbd\x98\x40\x27\xe9\x1b\x08\x5c\x92\x5f\xe9\x36\xc6\xa6\x15\xa9\xb4\x63\x03\x3b\x4a\xf0\x9f\x53\xa4\xb7\x1f\x5f\x39\x6a\xa0\xb6\x2f\x44\x98\x21\xee\x07\x89\xb0\x3b\x50\xd9\x89\x14\x37\x73\xbd\x02\x6a\xb6\x3c\x58\xc1\x67\xf4\x0b\x3b\xbb\x85\xbc\xcf\xdc\x9f\xc2\xdb\x9c\x4c\x84\xc4\x53\x4b\xaf\xfb\x77\xdf\x57\x58\xd6\x62\xac\x69\x6a\xad\x08\xd0\x51\x9d\xc8\xe5\x71\x72\xec\xba\x00\xa4\xc1\x4e\xf1\xce\x82\xf8\x71\x44\x87\xde\xe5\x21\x46\x37\x57\xdb\xa5\x01\x9d\xa0\xd3\xcb\x63\x64\xdc\x3d\xcd\x5f\x38\x51\xe8\x81\xca\xeb\x1d\xc6\xaf\x5f\xc8\x76\x88\x70\xbc\x7f\x66\xb6\x06\x29\x63\xaf\x10\x41\xca\x38\x11\xb3\x58\xf6\xb1\xbe\x25\x50\xc7\xed\x85\x14\x75\xe5\x1e\x2a\xb1\xeb\xf1\x3e\x19\x74\x85\x99\x53\xa6\x68\x4b\x63\x5f\x5b\x89\x7f\x5c\xa4\x0d\xef\x36\xf3\xa8\xdf\xb5\x9f\xac\x75\x12\x6c\x8d\x39\xf5\xcd\x82\xa5\xbf\xca\x8a\xb3\xf6\xd8\x2a\x84\x2e\x39\x6a\x39\xe0\x61\x95\xf8\x67\x8f\xf4\xf2\x09\xd5\xe7\x1a\x79\x79\xce\x09\x18\xb8\x27\xa6\xb5\x59\x0b\xa7\xb3\xc7\x7d\x44\x07\x6b\xca\xae\x08\xd2\x75\xa3\x4b\x12\x04\x25\x49\x3a\x4b\xe2\xc8\xb1\x60\xb3\x22\x70\x4b\xbb\x37\x65\xd4\xb1\x3b\x0f\xa5\x19\x2f\x81\x03\x00\xba\xd6\xba\x6b\xa2\xbf\x71\x5e\xd0\x0d\x02\x1a\xe5\x80\x60\xad\xec\xd1\x57\x3d\x0d\x75\xc3\x00\x72\x92\x55\x4f\x75\xb0\xa7\xc4\x9b\x12\xd0\x1a\x8b\x6f\x95\xa1\x7a\x5e\x04\x73\x84\x82\xb6\x72\x91\x2c\x5f\x66\x69\xbe\xc2\x60\x95\x5b\x96\x52\x37\xd3\x67\xcc\xa0\xcf\xb4\x8d\xb3\xb7\x66\xe9\x7c\x85\x2b\xdf\x8d\xc4\x1d\x05\x6c\x55\x0a\x11\xbb\x65\x8d\x3b\x7c\x49\x1d\x7b\x77\x47\xa3\x43\xe8\x7e\xbb\x2a\xd5\xfc\xc5\x1e\xb2\x66\x50\x6d\x1a\x77\x97\x41\x7b\x98\x70\xf1\x47\x55\x3c\xd5\xc4\x0c\xb8\x7b\x99\x18\xd5\x50\x62\xc6\x50\x59\xbc\x42\xc0\x30\xc0\x88\x21\x4f\x3a\xe7\xad\x68\x5e\x4f\x11\x18\x9a\x6e\xf0\xc3\xac\x93\xed\x61\xc7\x63\x62\x96\xc9\xed\xdd\xe6\x3e\xc0\xdf\x40\x3f\x41\xd9\x62\x8d\xab\x01\x45\x1e\x0a\xd6\x20\x52\x49\x06\xea\xc2\x0f\x22\x1c\x56\x15\xf8\xb8\xb9\xcf\x82\xb2\x43\x16\x90\x71\xb2\x2c\xea\x84\xcc\x2f\xa8\x3a\x73\x64\xf9\x12\x4b\xf3\xfb\xc3\xa7\x8d\x5f\xa6\x29\xb1\x0a\x7c\x94\x03\x19\x24\x21\x6e\xdd\x92\xfa\xec\x5d\x66\x70\xa9\xaf\xcb\x75\xbc\xfa\x36\xcb\xf6\xf5\xc0\x88\x9a\x80\x05\xa3\x58\x44\x57\xe8\x43\x73\x03\x87\xb3\x28\x39\x54\x1b\xac\x5d\xff\xec\xd7\x6c\xb1\x1f\xd3\x58\xa1\x1f\xe1\x90\x09\x24\xd3\xf6\x5b\x8f\x2c\xa8\x66\xe6\xe9\xa1\xed\x1b\xcc\xe3\xa5\xa2\xa2\x94\xc3\xfd\x7a\x2c\x19\x10\x79\x18\x81\x80\x44\x36\x39\xc5\x58\xac\x0f\x18\xfa\xcf\x19\xdf\x0c\xd3\xb0\x21\x4b\x6d\x76\x9e\xd3\x94\x5a\xe2\x94\xf8\x99\x8e\x1f\xcf\x23\x83\xe9\xd7\x0f\x78\xd0\x6a\x38\x49\xac\xf3\x58\x51\x41\x3a\x56\x26\xa1\xde\x33\x3b\x11\x46\xbb\xae\x00\x07\xb1\x17\x7c\x46\x5b\xab\xf0\x01\x5d\x3f\x5d\xbb\x11\xe2\x4b\xeb\x38\x33\x6b\xdb\xf2\xc9\x71\x16\x3c\xab\xef\x33\x9b\x8d\xeb\xd8\x74\xff\x89\x6d\x50\x94\x8c\x30\x2a\x16\x66\x77\x97\xcb\xdf\xab\x13\x49\x87\xab\x26\x66\x9c\x5e\x3a\xba\x70\x89\xe9\x7b\x34\x7e\x88\xb2\x92\xad\xa7\x7f\xe9\x18\x83\xbf\x2e\xad\x3c\x31\xf6\x58\x22\x51\x2d\xc6\x55\xb8\xc9\xe0\xbc\x16\x14\x6a\x44\xac\xfc\x68\x1f\xf0\x3b\xbf\xfc\x6a\x81\x20\x96\xe2\x60\x30\x74\xa6\x23\xf5\xf3\x84\xd4\x12\xbe\xc4\x95\xb6\x44\x44\x11\x61\xf7\x13\x01\x59\xdd\x0e\x4e\x25\x46\x98\x0c\x3c\xe2\x59\x78\x7e\x91\xa9\x39\xd1\x9a\x2c\xcc\x5d\x5b\xde\x8c\x90\xba\xbf\x3e\xc3\x20\x89\xc6\x69\x46\x34\x60\xac\x30\x9d\x2f\x69\xfb\xfb\xa6\x98\xa3\x17\x2e\x68\xef\x96\x28\xfc\x74\xd4\xc8\x65\x16\x77\xee\x1a\xfc\x22\xbc\x63\xd7\x99\x59\xb9\x5a\xf8\x99\x3b\x46\x68\x5b\x95\x90\x50\x71\x9c\x6a\xda\xb1\xf2\x70\x1f\x60\x62\x60\xb2\xf9\xae\x28\x51\x89\xcd\xf7\x13\xee\xa1\x84\xd8\x8f\xb7\x0d\x39\x16\xd8\x2a\x84\x04\x5f\x1c\x85\xd5\xc6\x63\x57\xc8\x65\x3e\xf1\x84\xa9\x47\x84\x87\x80\x1e\xfd\x85\xd1\xb1\xd4\x19\x4c\xbe\x66\x5d\x72\x33\x52\x64\xbf\xbe\xac\x05\x63\x7c\x9e\x85\x1f\xd9\xe3\xfd\x85\x4c\x12\x4d\x65\xe5\x59\xef\x05\x6a\xaf\xa9\x12\x5c\x9a\x85\x2d\x2a\x8a\x5c\xd1\x7a\x30\xd7\x3a\x3f\x93\x2c\x84\x7e\x59\xf6\x9b\x96\x19\x0b\x73\xe1\xa0\x49\xc0\xa2\xa3\xa8\x23\x3d\x1f\x99\xb5\x0b\x91\x4a\x48\xe7\x26\x3e\xc5\xa2\x53\x94\x44\x53\x1d\x62\xcd\x7f\xd1\x9d\xda\xd3\x56\xab\xaa\x62\x9a\x06\x83\x0f\x80\x0d\x4f\x2b\xde\x2b\x9b\xa2\x4a\xba\x4a\x38\xba\xc8\x13\x5b\x70\x2d\xd1\x72\x73\x17\xac\x69\x00\x38\x68\x2b\xfb\xf5\x02\xda\x93\x40\x2c\x5b\xd7\xbb\x6c\xaf\x08\xcf\x3c\x9d\x92\x88\x64\x44\x7f\x2c\xac\xb2\x71\x3c\xac\xc7\xfa\x3b\x05\xf9\xaf\x41\x22\x7e\x68\xe9\x5d\xee\xe5\xa5\xbc\x7a\xda\xa1\x11\x52\x70\xe3\xfb\x2c\xaf\xab\xe5\x86\x0b\x9f\xc9\x89\x83\xd6\x34\x59\x93\x2d\x9b\x95\x58\x90\x52\x2b\x27\x87\x14\x08\xa9\xf6\xad\x4e\x7d\xc2\xfd\xa5\xc3\xbe\x76\x16\xa4\x55\xee\x3c\x0d\xa2\x4d\x4b\x38\x7c\xef\x1d\xa8\x28\x97\xa7\x25\x56\x9b\x89\x3b\x91\xbb\x57\xf1\x6e\x7f\x75\xf9\x71\xa2\x91\xe8\x17\x64\xd0\xa4\xb9\x8b\x68\xd9\x6c\x06\x1f\x95\xa2\x7a\xbd\x6f\x93\x38\xe4\x04\xfd\x72\xfe\x99\xb8\x58\xf1\xcb\x61\x1f\x0b\x60\xee\x31\xf4\x33\x18\x7a\xd5\x61\xcd\xcd\x70\x88\xdd\x79\x8c\x4d\x66\xbe\x16\x92\x93\x6f\x0c\x12\xd8\x91\xe0\x4c\x7a\x72\xc8\x48\xfe\xf1\x1d\xf1\x66\x07\x27\xa1\x04\x67\xcf\x3f\x02\xe7\xdc\xa6\x51\x61\x1d\x32\x08\x72\x98\x19\x00\x8c\xe6\xb0\x5f\xf9\x06\xa9\x3d\xcb\x2b\xe2\xe6\x38\x60\x56\x21\x78\xd6\xf6\x48\x87\x89\x62\xac\xc4\x3c\x4f\xb1\xdc\x93\x47\x5f\x5e\x8b\x2a\x54\x81\x97\xec\xf1\xf1\xab\xa4\x59\x23\x0f\x2f\xea\x88\xc9\xa5\x75\xf9\x9c\xf8\x88\xac\x53\x1f\x0e\x0e\x3e\x3a\x03\xd1\x96\xdb\xa5\xe9\xdd\xd9\xb4\x2c\x03\x90\x98\x21\x4e\x57\xa8\xa6\x7a\x14\xfa\x5c\x48\x7f\x10\xb5\x00\xc8\x3d\x8d\xe9\x8d\x46\xa8\x64\xd3\xd8\x8b\x98\xa7\x6b\xd3\x4d\x5c\x3a\x3c\x64\x16\xcd\xd1\xdb\x87\x6b\x70\xcf\x96\xdd\x53\x74\x33\x26\xd2\xcb\x44\x28\x23\x68\x5c\xcd\x64\x53\x80\x0c\xcb\xa7\x29\x79\x76\x57\xa6\x59\xe0\xf0\x25\x90\xe0\xd3\x3f\x62\x68\x4f\x1f\x16\x3f\xa5\xc0\x34\x96\x45\x43\x6a\x4d\xa1\x1e\x75\xe1\xa7\xf4\x8c\x58\xab\xba\x08\x1f\xeb\xb3\x80\x8c\x31\x73\x3f\xda\x3b\x2b\xa6\x4c\xcd\xd8\xbc\x44\x85\xee\x77\x9d\xd2\x6b\x65\x6a\xc5\x6b\x6e\xca\xc7\x8c\x75\xb3\x4b\x32\x34\xb4\x1f\xe9\x75\x05\x57\xa6\xbd\xaf\x96\x0e\xa7\x77\xb7\x11\xc8\x56\x91\xab\x03\xff\xf3\xcd\xdd\x53\x24\x12\x29\x6e\x30\x94\x39\x3c\x03\x95\x55\xc7\xe3\xd6\x82\x3b\x0b\xe0\x37\x0e\x2a\x13\x49\xf3\x51\xed\xba\x72\x57\x26\xcc\xb8\xce\x93\x67\xe8\xd6\x43\x02\x9c\x78\x5b\x1a\x28\x39\x6a\x9b\xed\x5b\xbf\xfc\xec\xb8\x06\x75\x72\x81\x22\xa2\x9f\x7e\x03\x81\x31\xb1\x82\xa9\x24\xce\xeb\xb1\x58\xe4\x83\x0b\xe4\xe9\xa2\x24\xff\x11\x55\x75\xe1\xf5\x51\x2f\x26\xc7\x6e\x75\x2a\xe7\x06\x7a\xd5\xb9\x16\xf1\xc2\x3a\x08\x00\xef\x1e\x31\x48\x2f\x11\x2f\xb1\x68\x87\xf5\x9a\x0f\x86\x14\x77\x12\x29\xf0\xd9\x98\xf6\xc9\x9e\x72\x1a\xb2\x21\x89\x05\x15\x3c\xad\xc6\x83\x08\x71\x3d\xd8\x08\xbd\x45\xec\x02\xe6\x33\xfc\xfd\x05\xee\x4f\x4c\x2f\x0f\xd7\xd6\x3b\xfa\x77\x0b\x2d\x66\x67\xd3\x4e\x77\x4f\x0f\xbb\x2b\x97\x12\xde\xe6\x92\x26\x61\x86\xf7\x9b\x1d\x0b\xed\x62\xae\xcb\x44\xe5\x5f\x58\x82\x32\x87\xac\xc7\xc6\xba\x5c\x47\x74\x6d\xaf\x93\x80\x09\x88\x6a\x30\xb4\x01\x61\xf0\x1c\x16\x56\x87\xd2\x59\x89\x4d\xb7\x0e\x90\x8a\xce\x27\x94\x50\x39\xf0\xe4\xac\xa5\xea\x54\x8b\x67\xf6\x25\xde\x94\x53\x27\x74\x26\xf9\x0b\xef\x77\xbf\x7e\x40\xfc\x40\x12\x64\xfd\xfd\xc4\xb7\xea\x27\x0a\x9a\xd8\xdc\xb6\xcc\xec\x90\xd4\xa7\x07\x0c\x84\xb9\x98\x21\x8b\xe4\x7c\x19\x7c\xf1\xcf\x45\x69\x81\xdc\x29\xd2\xd9\xc6\x13\x78\xc7\x6b\x1a\x2a\xd8\x91\x68\x92\x46\xb8\xe3\x48\x49\x4a\xd9\x1b\xb8\xf7\xc1\xe1\xd2\x61\x21\xeb\xb1\x2a\x87\x16\x9d\x21\x4c\xbc\x65\xe6\xee\x66\x7d\x2b\x09\x98\xd4\x2a\xa6\xe3\xd7\x90\x29\x2a\x68\x17\x47\x75\x59\xf7\xac\x3b\x57\x76\xa6\xc7\x7c\x74\x73\xbe\xac\x7a\x26\x86\x0c\x3a\x6f\x8c\x10\x35\xa8\x02\x5e\x83\x78\xe7\x77\xbb\xa2\x95\x6b\x67\x5f\x76\xe5\x04\xe3\x7f\xbd\xa4\x67\x0a\x79\x34\x18\xb1\xd9\x8b\xd6\x71\xa4\x05\x3b\xaa\x53\x7f\x98\x6b\xc5\xfd\xe6\xb3\xbf\x6d\x16\xf4\xe0\xfa\xdf\x79\xf5\xad\x6d\x4b\x3a\x39\x0d\x3f\x9e\x70\x67\xd1\xd3\x72\xec\x17\xb2\xcc\xca\x93\x8f\xc4\xa4\x8d\x78\x82\xf0\x7d\x3f\x29\x5a\xc4\xbf\x7d\xa7\x98\x6f\x9c\xe2\xda\xa4\xcd\x87\xa7\x97\xaf\x20\x48\xb7\xfc\x68\x3c\xd6\x8b\x46\xfe\x7b\x34\xcd\xda\x6b\x8e\xda\xb1\x38\xd5\x09\xf1\x66\x91\xeb\xcb\xc1\x4e\x0e\x07\x8e\x00\x7f\x9f\xdb\xe3\xc6\xf0\x2c\x43\xfd\xdb\x8b\xd5\x9d\x6e\x8b\x24\x87\x4c\x83\x05\x90\xe1\x60\x1c\x30\x61\x64\xd3\xd5\xd5\xd9\xbb\x62\x6a\x8f\xb6\xe5\x6e\x69\xd7\xd0\xdf\xf7\xee\x6c\x77\xef\xc6\xb8\x4a\x70\xdf\xd0\x20\x0e\xe6\xbe\xb2\x56\x30\x5b\xee\x0c\x66\xc4\xbc\x2b\xad\x67\xcb\xa3\x7a\xfc\x02\xf8\x88\x7c\x68\xa5\xeb\x9b\x98\x0c\xa1\x18\x1d\xc5\x57\x4c\xf1\x2d\x16\x82\xe1\xdb\x7f\xaf\x58\xa6\x29\xfd\x24\x26\x5a\x30\xeb\xf3\xfe\xd4\x57\x2e\x12\x5a\x31\x3d\x10\xc8\x21\x28\xa2\xdd\x37\x24\x7c\x91\x82\x2f\xe1\x6d\xaa\x95\x68\x42\x24\xb2\x42\xb6\x3c\xdd\x9a\x06\xb0\x75\x9a\xf3\x39\x46\xb1\x82\x70\x6c\x65\x0d\x74\xb7\xe3\x79\x65\x5f\x7b\xbb\xb4\xb4\x8e\x58\xbb\xe9\x46\x2c\x9f\x1d\x9e\x8d\xe5\x0e\x56\x9e\xae\x8e\x58\x1f\x1d\x6d\xee\x2d\x5f\x41\x26\x28\x42\x4d\xbc\x62\x3e\x83\x0c\x5a\xbb\xa5\x7c\xd4\xc3\xb0\x36\xd7\xdc\xef\x3b\xa1\x85\xaa\xf6\x5a\x78\xa5\x4f\x67\xed\x90\xce\xfc\x0d\xde\xa6\x6b\x10\x83\xc2\x77\x3f\x59\x86\xfa\x00\xa0\xc0\xa5\x50\x35\x76\xab\x8d\x30\x23\xd1\x6a\x67\x30\x19\xc3\xb3\xb9\xfd\xc2\x60\x1e\x82\xef\x67\xf5\x51\x75\xc0\x7f\xe4\x36\x14\x67\x70\x8e\xe3\x74\x93\xf9\x1a\x64\xaf\xac\xa0\x3b\x17\xf4\x29\x41\x5a\xe7\x1d\x6f\x26\xc9\xb3\x39\x39\xcf\x5e\xfc\x7e\x8b\xd7\xbd\x9a\x57\xfd\xd6\x80\x79\x6b\x00\x7c\xec\xf4\x70\x7a\xcf\xd8\xcf\x8d\x4b\x48\x70\x13\x3b\x0c\x31\xbf\x78\x72\xe1\xea\xe1\x75\xb7\xdc\xbb\x07\x1f\x83\x02\xf9\xd3\xe2\x38\x5e\x89\x37\xcf\x34\xfb\xf9\x9f\xde\xdd\x1e\x0f\xb9\xf6\x15\x15\x84\x47\xcf\xa2\x36\xc4\x26\xe2\xfe\xfc\x3a\xf8\x76\x9a\xba\x2a\x41\x43\xde\x80\x66\xb8\x60\x38\xd3\x9e\x63\xe8\x6a\xc1\x19\x3e\xe8\x59\xbd\x4b\x53\x7d\xac\x1b\x42\xe2\xab\x05\xc3\x0a\x61\xfa\xac\xcd\x9d\xdd\xb9\xf2\x40\x1c\xe9\x64\xb4\xb1\xb0\xca\xc6\x1d\x98\xfd\x85\xac\xde\xf0\xdf\x60\x49\xd7\x2a\x05\xed\x9c\xe7\xf6\xe2\x70\x4f\x43\xfa\x0e\xd3\x9f\x45\x35\x40\x74\xc1\x51\xed\xcf\xe6\x14\x0f\x24\x5e\x00\x53\x28\x5e\x4f\x40\x48\x4a\x6b\xa2\x93\x1c\x32\x33\xc5\xfd\x12\x37\x07\x82\x0f\x73\x66\x59\x05\xf0\xfb\x36\xcd\x44\x43\xe4\x47\x08\xde\x72\xf1\x21\x93\xc4\x11\x77\x07\x4b\x83\x7b\xb7\xc7\xc7\xc3\x74\x06\x86\x62\xfe\x39\x5a\xde\x4a\xdf\x96\xf3\x4c\x85\x69\x05\xe9\x19\xf9\x44\x4e\xbe\x4b\xcc\xda\xe6\x8d\xbd\xa8\xb3\xe2\x9a\xb4\x05\x89\xea\x0d\x03\x4b\x2a\x2b\xdb\x4e\xed\x9d\xfd\x5e\x83\xec\xe2\x66\xc2\xeb\x2d\x23\xd6\x7c\xfd\xf9\xbe\x87\x30\x2d\xd5\x59\xad\x41\x92\xb3\xcc\x09\xc8\xc2\x18\x56\x93\x40\x9d\x6a\x5a\xb0\xa3\x51\x03\x3c\xaf\xd2\x7f\x44\x20\xca\x2a\x9b\x3e\x09\x37\x85\x1b\xb7\x0d\x5e\x57\x49\x72\xf6\xe1\x43\x56\xbf\x41\x40\x9b\x43\x7d\x94\xc9\xa5\x26\x77\xe6\x30\xf3\x2b\x2d\x56\xba\xf9\xe6\x06\xaf\x43\x8d\x3e\x1e\xfc\xf2\x90\x90\x68\x5c\x49\x77\x6f\x6d\x56\xc2\x9a\x7a\x96\xc6\xd7\x1b\x92\xc9\x9f\x8a\x9a\x7d\x8e\x97\x2f\xed\xcb\xa9\xe1\x01\xbe\xd7\xbe\xe5\xf5\xef\x63\x21\x40\x79\xba\xc5\xa8\x0f\x11\x33\x98\x9b\x28\x46\x74\x53\x8e\x8d\x19\x22\x8e\xe5\x2a\xdc\xf1\x76\x13\xb5\xfc\x85\xc7\xf0\xa5\xcb\x69\xc7\xda\xb4\x3d\xda\x38\x60\x9a\x3b\x34\x05\x10\xe2\x07\x5b\x26\x7f\xca\x2e\xbb\x5c\x1c\x42\x39\xe6\x47\xbc\xe2\x66\x5c\x97\x94\xe7\xb8\x20\x06\x24\x78\x40\x9c\x8d\xcf\x6f\xfb\xeb\xcd\xe4\x77\x9b\xab\x04\x91\x90\x68\x14\x70\x67\xd6\x74\x55\xaa\xb4\x8f\x87\xe2\x97\xf6\x38\x2b\xc1\x43\xfc\x60\x39\x4e\x1c\xbd\xe5\xa0\xa7\x66\xa6\x63\x57\xde\x62\xde\x87\x3b\x23\x4b\x96\x7b\x4b\xca\x72\x65\x8b\x5f\x5f\xe3\x6a\x3a\x09\xe8\x34\x26\x54\xd6\xb7\x63\xe0\x4e\xec\x99\x37\x79\x2e\x2e\x9d\x5e\x1a\x66\xea\x8b\xb4\x8a\xb4\xf8\xdd\xfb\x7a\x34\x33\x09\xde\x06\xaa\xed\x8a\x8d\x75\xac\x8b\xb4\x24\x23\x88\xaa\x9c\xe1\x41\x19\xde\x8b\x4b\xfa\x62\xe5\x62\x1b\xb0\xd2\xa7\x8a\x2f\x80\x76\xd7\x49\x64\x1d\x88\x95\x5e\xef\x31\x2b\x5e\x6e\x42\xba\x55\x2b\x71\xb2\xf7\xd2\xcf\xe3\xe5\x29\xd4\xf5\x73\x1f\xb7\x4e\x56\x0f\x03\x26\x88\xc4\x6f\xf9\x67\x01\xe3\xff\x65\x7f\xb8\xac\xfb\xe3\x26\x1e\x5a\xc0\xaa\xff\x9a\x06\x2f\xfc\x91\x4c\xa2\x1b\x60\x05\x95\x3a\x72\xcf\x85\x84\x6e\x75\xb4\x58\x7a\xf4\x93\xde\xe9\xdc\x4a\x5a\xbf\x73\xe1\x7d\x20\x31\xdd\x62\x39\x2a\x8f\xb9\x1c\x73\x78\x74\x75\xa3\x76\x1a\xab\xf3\x22\x29\xde\xdc\x43\x95\x23\x55\xba\x5d\x7f\x6f\x0f\x97\xf3\xa5\x5e\x0e\x71\x91\xcb\x83\x3d\x99\xb9\x9e\x69\xc7\xde\x5e\x12\x11\xc9\x69\x99\x6e\x3b\xd8\x62\xd0\xa0\xc7\x22\x26\xed\x7b\x23\xee\xbc\xcc\xcc\x97\x49\xb3\x77\xc6\xf3\x02\x0b\xee\x1c\x1c\xe1\x8d\x72\x63\xf0\xab\x22\x1d\xc5\xb7\xb6\xdf\xfb\x00\xdf\xb2\x08\x58\xb8\x52\x54\x20\x0d\xb2\x04\xfe\xe3\xb1\x04\xcf\xd6\x72\x6f\x1e\x18\x7e\x6a\x49\xfa\xb7\xe7\x12\x24\x0c\xe4\x89\x50\xf2\x5b\xb0\x22\x5c\x9f\x1a\xad\x41\x8a\xd8\xb7\xd5\x5f\x6e\x3c\xf1\x1f\x04\x50\x77\x98\x6b\x84\x59\x3d\x9b\xfe\x22\x44\x4b\x13\x8b\x7c\x42\x46\x52\x61\x14\x85\x03\xdf\x89\x30\x3e\x0f\xc1\x13\x31\xa2\x82\x08\x39\x76\xec\x81\x65\x9e\x5f\x26\x01\x39\xec\x17\x44\x6e\xc4\xb3\x32\x6f\xcf\x3a\x72\xe3\x4a\x3d\x1a\x8f\x8f\x51\x4a\x45\xe9\x35\xd3\x38\xac\xa4\xcc\x5e\x68\x8a\x3a\x72\x62\x64\x7e\xca\xc0\x36\xb3\x8e\x8d\x0e\x4f\x83\x43\x84\xc5\xed\xd2\xed\xe2\x60\x30\xf4\x38\xfb\x8f\x40\xa8\x4a\xae\x71\x54\x1b\x8a\xdc\x65\xfd\x1b\x35\x62\x99\xd7\x09\x51\xd6\x6c\xca\x17\xdf\x40\x32\xc6\xd7\xa0\xce\x8b\x0f\xc4\x9b\x72\xcd\xc4\xa0\xc6\x7f\x6c\x79\x5a\xd6\x20\xa6\x4a\x2a\x4f\x7f\xb4\x95\xb9\x73\xef\x5b\x38\x77\x25\x81\xf2\x2d\xa3\xba\x2e\xc8\xb1\x8e\x97\x7a\xc4\x80\xf9\x83\x21\x9f\xd5\xb8\x9f\x37\xeb\xf6\x66\xea\x83\x7a\xac\x67\x3e\x4a\xdb\x1a\xbc\x00\x6e\xba\x97\xba\x07\x4a\xd3\xd1\xa7\x0e\x42\x85\x45\x97\xfc\x7d\x7c\xaf\xaf\xd6\x86\x39\x3c\x5a\x9b\xe7\x6f\x8f\x6e\x96\xbb\xe5\xd2\x90\xb3\x70\xf7\x72\x02\xe9\x8e\xa0\x50\xba\xba\x60\xbc\x9d\xf1\xa0\x04\xf7\xe5\xf4\xcc\x5e\x07\xd8\x04\x74\xd0\xb1\x72\x6f\x10\x39\x78\x5a\x53\x3d\x8b\x43\xb7\x40\xcd\x41\xb6\x6b\xcd\x55\x3e\x0c\xc8\x8b\xb3\xd0\x56\x8f\x61\x1e\x33\xd7\x04\x3c\x4f\xaf\x71\x06\xbb\xed\x17\xa2\x4a\x88\x2b\x7e\xf6\xdf\xa1\x5d\x67\xfb\x7b\x74\xa7\x60\xa3\x66\x4c\xaf\xce\xae\xe1\x63\x32\xe2\x49\xaa\xb2\x8c\x0c\x32\xf1\x80\xb7\x38\x3f\xb1\x43\xf5\xea\xa3\x96\x29\x18\x12\x14\x28\x1f\xf3\x66\x44\x91\x3c\xd9\xec\x35\xf5\x14\x31\x34\xa9\xf2\x0d\x94\xc7\x88\x30\x3e\x96\xe7\xab\xf5\x5c\x8c\x9a\x02\x49\x82\x38\xe6\xbe\x0d\x27\x85\x57\x2b\xe7\x56\xc1\x48\x0b\x36\x57\xa7\x94\xce\x62\x7e\x14\x4c\x36\x27\x73\xcc\x86\xeb\x77\xf4\x40\xac\xdb\x07\xd6\x3f\xca\xde\xee\xae\x6d\x9d\x54\x35\x0b\xd8\xd9\x4e\x36\x77\x1e\x59\xba\x38\x9d\x1c\x98\x9f\xac\xae\x5f\x9c\x54\xe9\xf4\x6d\x12\x74\x0d\x7f\x6e\x51\xfb\x94\xc4\x2c\x98\xc2\x6d\xc6\xa2\x60\x4c\xbc\xde\xf6\xdc\x38\x0b\x4a\x6e\xf8\x89\xd1\x97\x1c\xab\x7e\xcb\x01\x31\x37\x87\xdd\xea\x73\x7f\x77\x8b\xa0\x7b\x8c\xd2\x7a\x7f\x77\x0d\x87\xc3\x2f\x2f\x2f\x3b\x3b\x3b\x87\xc5\xef\x7d\xcf\x39\xfe\xda\x64\xcc\x02\xa2\x08\xc8\x7c\x6e\xfb\x8c\x05\x7b\xc8\x94\x64\x48\x49\xd4\x09\x1f\x8d\xf4\x01\x78\x08\xa6\x90\x90\xaf\x60\xe8\x40\x5e\x76\xfe\x80\x76\x79\xb2\x15\x0f\xd6\x76\xa5\x9b\x83\xb3\xc2\x71\x2a\x8d\x19\xde\x9c\x7f\x78\xbf\xea\x98\x31\x43\x80\x67\xb5\x08\x5d\x52\xbf\xd4\x6b\xae\x18\x2b\xb9\xf8\x39\x02\xb8\x17\xa4\xcc\xa6\x46\xd6\xbc\x22\x8d\x4f\xb8\xf3\x46\xbd\x59\x88\xd2\x9d\xdf\x82\xf5\x18\xfe\x9e\xdb\xfe\x23\x46\x28\xbc\xa9\x45\xc7\x2d\x69\x7b\xef\xd2\xed\x7e\x64\x70\xe3\x30\x77\xb3\xd7\x29\x37\x19\xde\xee\x4f\x27\x77\x55\x02\x22\x89\x95\x7d\x97\xf2\xe5\xdb\x33\x34\x85\x65\x4c\x15\xa2\x16\x94\x41\xb4\x5b\x76\xe5\x2b\xa7\x18\x59\x02\x2c\x67\x38\x50\x2b\x7f\xb9\xb5\x72\x14\x8e\x68\x5d\x3f\x46\x0e\x9f\x7a\x37\x6b\xb2\xed\xb6\xde\x2c\x60\x07\xe2\xd1\x89\xa5\xfb\x9e\x73\x20\xc7\x93\x38\x0c\x61\x5a\xd0\x35\xe7\xcb\xe5\xce\x68\x61\x94\xef\x59\xc4\x7f\xbe\x95\x90\x24\x25\x3c\x2e\x1f\x7b\xfe\x4a\xab\xc3\xf8\x39\x4d\x6c\x9f\x93\x3e\x59\x68\x44\x1c\xa6\x5a\x1d\x18\x2a\x1f\x44\xe4\xac\xf3\x02\x28\xf3\xae\x40\xeb\xb9\xc5\x76\x2c\xa8\x12\x60\x90\xf5\x92\x9f\x0c\x79\x3b\x92\xe7\x36\x4e\x87\x6b\xbe\xdd\xe9\xfe\x5a\x78\x93\x14\x2b\xc3\xf4\x88\x39\x6b\xbf\x7b\x99\x06\xb0\x5f\x74\x08\x28\xe7\xe3\x48\xc4\x23\xa9\xce\x92\xd7\x22\x61\xa1\x35\x86\x5e\xfe\xd8\xfc\xc6\x4b\x05\x28\xec\xe8\x33\xd5\xd6\x1d\xd4\xdb\x76\xf3\x68\xde\x34\x10\xa7\x16\xf7\xe7\xf6\xb9\x70\x14\xc9\x98\xc0\xa2\x10\x5d\xcd\xd5\xe9\xe3\x38\x08\x26\x8d\x4b\xa1\x78\x47\x28\x33\xaa\x88\x4f\x3b\x17\x77\x4d\x2e\xd4\x73\x25\x48\x47\x41\xaf\x86\x75\x7e\x87\xe1\x42\xb2\xca\xf6\x42\xf3\xd0\x30\x22\xc7\x32\x1f\xda\x78\xaf\x82\xe2\x7b\x75\xb2\xb9\xda\xe8\xb1\x3f\x8f\xb8\x3d\xe6\x4b\x1b\xec\x25\xd3\x2a\x6f\x40\x45\xb9\x11\xfc\x9d\x3a\x7b\x00\xc5\x21\xc6\x94\xc6\x8d\x19\x86\xec\xe1\xf9\xce\xb2\x89\x8c\x5c\x80\x31\x92\xd8\x6c\xc1\xd4\xf7\x8e\x1c\xcf\xcb\x40\x62\x49\xdc\x09\x10\x2b\xe9\x17\xf9\x4a\x95\x3a\x96\xd4\xa4\x54\x83\xce\xc7\x65\x13\x74\x27\x42\xbb\x2a\x4f\xc1\x70\x96\x6a\x5b\x72\x84\x52\x41\x96\x52\xa2\x00\xac\xef\x89\xfe\x0b\xe0\x73\x6f\x26\xe6\xfc\x9b\x96\x30\x76\x3c\xec\xca\xec\x2d\x2b\xae\xb9\xe2\x35\xd2\xca\x33\x5b\x4d\xc1\x94\x2f\x2a\x05\xf9\xf0\x9c\x7a\x9d\xda\x5a\x0b\x84\x1c\x7f\x25\xab\xaa\x6a\x29\x4c\xb6\x43\x5d\x30\x49\x43\x8a\x67\xf0\x3a\x78\x49\x4d\x5a\xf1\x47\x82\xd5\x6b\xc2\x8f\x9f\x30\x55\x18\x43\xc0\xd0\x60\x43\x17\x12\x82\xef\xcd\xcc\x41\xa3\xe7\x62\xc0\xdb\x7e\x9d\x2d\xf1\x74\x28\x77\x0a\xf6\x7d\x46\xf5\xb0\xf3\x79\xf7\xf7\x80\xa4\x5e\xd3\xbd\x3b\x8c\x20\xe4\x70\x56\x96\xaf\x93\xbb\xfb\x19\x46\x2f\x00\x0a\xdf\x0a\x28\xc8\xcd\xc8\xcc\xd4\x2d\xd4\x63\x27\x6d\xbd\x39\xea\xff\xcf\x3f\x38\x02\xc5\x65\x61\xf1\xca\x52\x22\x22\x03\xec\x89\x1f\x14\x78\x5f\xf4\xe7\x14\x58\x2a\x68\x3f\x56\xdb\x11\x03\x32\x98\x46\x10\x29\x84\xda\x3e\xcd\x67\x8d\xb5\xac\xff\x62\x45\x07\xd0\x93\x83\x7b\xe7\x7e\x19\x4c\x79\xfe\x3a\xbb\xcf\x39\xf0\xe4\xd1\x5b\x43\x59\xf5\xab\xfa\x39\x27\xb1\x80\x74\x15\x3c\xd7\x5b\xf9\x78\xa1\x9c\x51\xc0\x78\xe2\x53\x9c\x01\x69\x0e\x23\x4d\x12\x38\xfe\x85\x7f\x6e\x55\x72\x75\x41\x6e\x9f\xad\x9a\x48\x8a\xe6\x44\xf4\x84\xb9\x9f\xab\xfb\x44\xa3\x97\xaf\xb3\x8b\x8b\xaf\x3f\x17\x35\xb6\x02\x85\xd4\xd2\x73\xdd\x03\x97\x0a\x62\x38\x91\xac\x88\x91\x36\x22\xea\x84\xf0\x4b\xd2\x9c\x03\x50\xbd\xed\xeb\xbe\x0d\x2c\x47\x7e\x00\xa3\x7e\x4b\x3c\x11\x6a\x5e\x49\x71\x1f\x6a\x44\xee\xb1\x20\x01\x0b\x06\x99\xc1\x73\x5a\xed\x61\xde\xb7\x57\xa7\x57\x4f\xa0\x39\x77\xaa\x30\xbf\x41\xa4\xcb\x5f\xef\x49\x07\x72\xbd\x00\x73\x10\x5f\xac\x7c\xb3\xfd\x2b\xad\x5b\xef\x5c\x7c\x40\x5e\x11\x31\x58\xca\xf9\xd3\xba\x65\x7a\x9b\xa8\x74\xc4\xda\xb1\xce\x58\x1f\xc0\xea\x1d\x54\xd3\xb9\xc4\xbd\x1a\x51\x0c\xb0\xb9\xf4\x1d\x1a\x48\x7e\xac\x1f\xaf\x3d\xda\x4d\x58\xd5\x57\x71\x2a\xed\x9b\x3a\x5c\xd2\x9f\xef\x56\xec\x98\xae\xa2\x4c\xd8\x35\xce\x29\xb7\x69\xba\xa0\x54\x73\x2a\xad\x12\xf1\xe3\x43\x9b\x44\x33\x67\x16\x2f\x6b\xc7\x68\x95\xa1\x28\x05\x2f\x7f\xff\xbc\x3b\x42\x58\x9c\x4a\xa4\x41\xd0\xff\xc2\xd2\xdb\x7f\x9e\xee\x24\x7a\xb3\x6e\x20\xc1\x55\x80\x5e\xda\x4c\x39\x96\xe5\x44\x9b\x91\x5d\x1a\x33\xe4\xab\xda\xa8\xea\x05\x85\x50\xcf\x4f\x76\xe5\x0b\x27\x0e\x2b\x84\x14\xec\x2a\x6e\x8d\x2d\x1a\x8c\x7a\xd7\xc6\xcf\x00\x94\x80\xcd\xd8\xd7\xde\x23\x26\x61\x33\x48\x88\x78\xcc\xec\xec\x6c\x6b\x2f\xa0\xf5\xf6\x6c\xf6\xef\x29\x92\x84\x29\x6c\xc5\xa7\xe0\xdb\xf0\x9a\xd4\x6f\xf1\x53\x3e\x02\x7c\xe6\x79\x0b\x7e\x24\x28\xef\x24\x26\xf2\x86\x3e\xef\x59\x88\x4b\xca\xaa\x18\x30\xdf\xf5\x05\x90\x77\x16\x6c\x0e\xd5\x28\x25\x53\x08\xd5\xe4\xa3\x88\x7b\x4b\x58\x3b\x0c\x10\xe3\xea\x49\xcd\x99\x52\x79\xaa\xaa\x10\x0a\xa8\x76\x4d\x1d\x1e\x74\xb9\xf4\x82\x09\xa8\x20\x3f\x1a\xf7\x1a\xcb\x73\xa3\x82\xa2\x56\x84\x43\xc7\x7b\x8b\xf8\x28\xc0\xe6\xb9\x33\x87\x75\x35\xe9\xb5\x5a\xd0\xb3\x69\xaf\x7d\x6f\xfe\x96\x06\xca\xcc\x8c\x53\xd9\x5b\x67\xb6\xa7\x4e\xcc\x34\x8f\xc3\x58\xd4\xc1\xb9\xba\xec\xf6\xd2\x68\x87\xf7\x95\x1e\xb4\x4d\x51\x4c\x8a\xc4\x77\xdb\x09\x44\xbd\xcc\x54\x9c\x35\x4e\x28\x54\x9c\x97\xeb\x3e\x8f\xf9\xbb\x7b\x5f\x3b\xb8\x0c\xa9\x1b\x25\x54\xcc\xa7\x58\x15\x9b\xd6\x93\x87\xf0\x71\x6c\x1d\xcf\xef\x49\x36\x36\x34\x34\xec\xe2\xf8\x0a\x21\xfc\x43\xa1\xf1\x0a\xaf\x32\xc2\xab\x15\x06\xad\x04\x1a\xb1\xea\xd8\x92\x9e\xc3\x08\x61\xed\x8a\x63\x1b\xbb\xfe\x53\x8f\x59\x54\xf2\x0a\x12\x46\x82\x12\xe9\xd3\xe3\x54\x94\x90\x47\x47\x1a\x3c\xce\x20\x96\x65\x19\xb6\x90\x48\xcd\x74\xc0\xe3\xce\x56\xee\xec\xc6\xba\x93\x33\x9b\xc8\x61\x11\x56\xe9\xb7\x3d\x94\x52\xa7\x26\xb2\x3c\xc2\xcc\x12\x4f\x38\xc2\x0a\xf0\xd1\x1e\xc4\x56\x0c\xe8\x21\xc5\xee\x2e\x4c\x76\x8c\x7d\xee\x74\xb5\x65\x05\x5c\xe7\x89\x63\x1a\xcf\xee\xae\xf7\x51\xfb\x22\x16\x3d\x16\xfc\xce\xc3\xe9\xfc\x93\xfb\x8f\x3e\x11\x3d\x6c\xbf\xb8\xbb\xde\x57\x2f\x8b\xe8\x7e\x96\xe5\x7b\xf4\x58\x96\xd8\xa8\xee\x64\xde\x67\x3f\x52\xb1\xe7\x72\x44\x8e\x6e\x84\x25\xb5\xae\x87\x49\xa1\x3d\xbc\x5e\x2a\x97\x6c\x74\x94\x98\x73\x6e\x0e\xf2\x8c\x1c\x55\x9d\xf3\x08\x05\xbd\xb9\xd8\x5f\x20\x4c\x0d\x95\xe4\x71\x76\x69\x44\xb6\x13\x43\x8c\x3c\xab\xac\xdf\xd5\x66\x61\x84\x84\xca\xc7\xaa\xb2\x1a\xc6\xc7\xf3\x87\xe7\x83\xfc\x0b\xec\xda\xa6\xcc\x79\x66\x12\x92\x6a\xd9\x2c\x4e\x24\x89\xc4\xb9\x52\x5f\x0c\xe7\x0c\x31\x1f\x1d\xe4\xbf\x71\x06\xb5\x13\x3b\x16\x5e\x30\x7e\x7d\xef\xbf\x12\xe2\xf8\x45\x58\xc8\xff\xa0\x0d\x43\x53\xfb\x44\xc5\x69\xc4\x39\x72\xe9\xc7\x02\x59\x0f\xc7\x7a\xc4\x23\x09\x16\xc5\x39\xee\xb3\x88\x0a\xbd\xb0\xea\xe5\x38\xfe\x93\xb5\x24\xc3\x96\xe2\x27\xb7\x95\x96\x32\xcc\x2c\x7f\x37\x27\xf0\x7e\xb2\x91\xda\x2c\x08\x2f\x53\x44\xf0\x7b\xbd\x66\x8d\x19\x8f\x6d\x6d\x81\x5e\x67\x4c\xaa\xf2\xea\xeb\x73\x12\xa8\xf1\x89\xd0\xa0\x7d\xe1\xc8\xf6\x95\x5e\x1a\x6a\x19\xdb\x4d\x80\x85\x05\x4f\x70\x23\x64\xda\x52\x9b\x17\xe7\x63\xa1\x66\x0b\x64\x33\x48\x47\xef\x7c\x2b\xa1\x20\x60\x14\x67\xd8\x07\x11\xad\xb2\x5f\x35\x8d\x80\x5e\xfa\x0d\x5a\x22\x15\x3f\x44\x09\x72\x7f\x8b\x6d\xf0\xa9\x12\x16\x99\xa3\xa3\xa1\xc5\x55\x19\x96\x05\xe2\xf1\xd3\x89\x23\xbc\xce\x9c\x6f\xce\x84\xe5\xf6\x5b\x7c\x8e\x8e\x3a\xdb\x46\x6c\xc7\x9d\x63\x9d\x85\xa0\x64\x2f\x0a\x36\xd1\x4a\x7e\xe2\x3d\xb4\x95\x27\x2e\x7d\x34\x60\x76\x64\xdd\x82\xf6\x01\xc0\x07\xee\x96\xed\x94\xd4\xd2\xc3\x01\x3d\xfa\x62\x92\x6a\x4e\x11\x85\xf9\x92\x5e\x38\x24\xa8\x28\xe3\x45\x18\x34\xb0\xca\x93\x0f\xed\xa0\x12\x5a\x74\xed\x2a\xb4\x26\xa7\x73\xe8\x32\xad\x98\x4f\x18\x9a\xfd\xf9\xde\xb3\x00\x2a\x2f\xdf\xf4\x54\x97\xaa\xd7\x4d\xff\xc6\x12\xd9\xc9\xfe\x2a\xed\xc9\xe2\xa2\x75\x66\x59\xda\x4e\x92\xa1\x7f\xe4\xed\x4e\x77\x84\xcb\x87\x05\x2f\x64\x86\x36\x73\x13\xdf\x2a\xdf\xf2\xe1\x72\x3a\xd3\xe7\x58\xc3\xab\xc3\x6e\x3a\x2f\x2f\xf9\x88\xc6\x81\x15\x27\x8b\x07\x96\x3f\x0e\xdf\x72\x05\x49\x1d\x5e\x06\x2b\x1f\xc8\x63\x62\x7f\x9c\xe4\x88\x1f\x78\x49\x98\xb6\x98\x53\x50\xd8\xf9\x58\xe2\xe3\xcf\xd0\xee\xc0\xd6\xf7\xec\x1f\xd9\xbd\x98\xb7\x74\x08\x81\x41\xd7\x3f\x6a\x58\x34\x3e\x6b\xb3\xb0\x5f\x73\x73\x50\xa3\xed\xac\x3c\x8a\xc6\xfe\xaa\xf9\x99\xc5\x98\x70\x85\xfb\x50\xe1\x60\x60\x60\xb7\x71\x20\x58\x8d\x53\x39\x7c\x2b\x1b\x8c\xec\xe2\x3f\x0a\x38\x7c\x05\x3d\xf3\x57\x7d\xfe\x77\x8b\x84\x52\x26\x4e\x9f\xed\x2e\x7d\xbd\xdb\x18\xb1\x15\xa1\x10\xac\x86\xec\x7b\xbb\xc3\xd9\x0a\x00\x6b\xfe\x2a\x5e\x5f\xbf\xeb\xc3\x9f\x04\x41\xb7\x6e\x0d\xca\x9a\x79\xe7\xd2\x6c\x8d\xc5\xe9\xe7\x00\x1d\x06\xd0\xd9\x93\xed\x02\x7d\x25\x32\x2c\x2b\x00\x10\xa7\xd1\xf4\xdb\xe4\x07\x27\xfc\x9d\x06\xdc\x8f\xc0\x85\x67\x54\xb9\x39\xf3\xfd\xc0\x2b\x5d\xb7\x0e\x57\x77\x5b\x87\xb1\xaa\xeb\x86\x4d\x6b\x96\x61\x18\xce\x55\xf3\xa6\xed\xf9\xd1\xd6\xe2\xc8\xd8\x87\xc2\xe1\xb3\x05\xef\xeb\x8b\x0f\xe7\x43\x86\x62\xe2\x53\x74\x78\xc1\xba\xd5\x5f\x34\x02\xa1\xec\x63\xf6\x6e\xed\xf4\x07\x6e\xca\x3e\xa1\x50\xee\xfa\xf4\x23\x4a\xe0\x1a\xcf\x22\xe4\x04\x55\x66\x10\x0c\x44\x01\x7e\x36\x8a\x8a\xae\x1b\x3e\xaa\xb3\x0e\xda\xd8\xb1\x0e\x83\xae\x27\x3c\x5d\x56\x56\xc7\x97\xdd\xc0\x8a\x12\xca\x33\x1e\x50\x2c\x43\x45\x43\x93\x78\x13\x0b\xa2\x17\x6b\xc5\x00\x96\x1b\x1e\x81\x6e\x78\x18\x71\x33\x05\x2c\xe9\xf1\x51\x12\x46\xe0\xfb\x24\x57\x1a\x6c\x90\x83\xf3\xc7\x88\x24\x23\x80\x74\x41\x62\x29\x9d\x00\x87\x7b\x1e\x31\xd2\xd2\xd4\x43\x37\x1d\xa3\xc3\x89\xa7\xa3\x9a\x19\xc6\x88\x29\x3f\x15\x41\xc7\x51\x7d\x21\xff\x2d\x2b\xe0\x4e\x30\xea\xbe\x6e\xd1\x56\x72\xf0\x47\x65\x62\x37\xc0\xb7\xbe\xee\xbd\x69\xb4\x7b\x90\x8f\xcd\x21\x53\x94\xb5\x4c\x1e\x67\xdd\xe1\xc3\xef\x68\x82\xf1\xd1\x08\xb2\xa3\xe5\xb7\xff\x68\x25\x98\x30\x48\x9a\xf9\x86\xf0\x87\x4c\x1f\x2f\xc1\x4c\xe5\x49\xd1\x59\x4b\x81\x8f\xfa\x25\x24\xef\x64\xd6\xfc\xb5\xbc\x3e\x95\x6c\x9a\x8b\xdb\xba\x75\xc8\xb6\x6e\x3b\x38\xe0\x14\x54\x96\xf4\x9a\xba\xb8\xe4\xd6\x8a\xed\x60\xb7\x17\x58\xcc\x3c\xba\x28\x8b\x67\xb9\x31\x54\xb4\xd9\x36\x38\xa6\xdf\xd9\xc1\x18\x8c\x9c\x4c\x9e\x8a\x15\xed\x98\x85\x08\x35\xaa\xf8\x8d\x62\xfe\xac\xb0\x9b\xd9\x0e\xdb\x2e\x9e\x68\xd6\xab\xbf\x3e\x67\xef\x9f\x9f\x9a\x11\x14\xaf\xf7\x74\x3e\x5d\xdd\xba\xec\x8f\xf6\x98\x17\xec\x28\xdf\xae\x6c\x6e\x71\x48\x84\x9b\x36\x68\xf1\x81\xba\x6d\x3c\xda\xb5\xc6\x4a\x8b\xac\x8f\xfc\x35\xde\xae\x1d\x29\x48\x0c\x5a\x79\xe5\x27\x2b\xaf\x05\x42\xab\xda\x03\xa8\x80\x4e\xb8\x8f\xc8\xd1\xde\x48\xc9\x47\xec\xa8\xa8\x9d\x57\xbf\x57\x90\x7c\x92\x11\xff\x8a\x12\xb9\xcc\x46\x1b\x37\xec\xd0\x0e\xc0\xaf\x38\xe4\x5c\xba\x89\x1a\x65\x89\x38\xe6\x60\xa0\x7c\x52\x7c\x11\xa9\x5b\x60\x15\xec\x9e\x1d\x8a\x4e\x18\x1b\xec\x12\x69\x9d\xbb\x9b\x23\x83\xdd\xd6\x59\x8c\xa6\x86\x4b\x8e\x86\x42\xb1\x23\x38\xc7\x17\x0a\x55\x4f\x67\xe6\x4b\x7d\xe0\xb8\xf8\x1a\x03\x27\xc4\xa7\xe7\x7d\xa8\xa7\x67\x35\x4a\x9c\xa5\x74\x3b\x8e\x25\x5a\xe7\xc5\xd5\x76\xa9\xba\x35\x71\x84\x23\x28\xb4\xa5\x0e\x81\x19\xf6\x36\x3c\x8a\x39\x12\x27\xb0\x0e\xcb\x45\x19\x9b\xcd\x89\x95\xa0\xab\x73\x82\xf3\xdb\x9b\x81\xf3\x10\x8b\xf8\xc9\xb8\xf7\x3b\xef\xa4\x38\xa3\xf2\xdc\x8d\xa9\xe5\x9f\x08\x93\x63\xe5\x12\xa8\xa3\xaa\x61\xc6\x1f\x68\x4e\x8d\x84\x91\xc8\x92\xd1\x93\x58\x87\x7c\x88\x19\x0a\x8a\x82\xe1\xc9\xe3\xa0\xe7\x0b\x1f\x9d\x2f\xb2\x2d\x8b\xad\x6a\x4a\xe2\xd4\x79\xd4\xd5\x8f\x47\x08\x87\x7a\x19\x95\x70\x2a\x7f\x7b\x8c\xa0\xf1\x0d\xc5\x6f\x65\x86\xb5\xeb\x2e\xc9\x32\x09\x6a\xd1\x6f\x81\x3e\x60\x44\x11\xb1\xac\x76\xeb\xe3\x63\x27\x61\x12\x2b\xbf\xa3\x37\x49\x7d\x4a\x81\xfa\x19\xcb\x30\x37\xee\x99\x45\x52\x88\x5c\x7e\xde\xdf\x4d\x0b\x03\xe8\x25\x5b\x4d\x24\x70\x0b\x73\xf1\x2a\xe7\x4f\xb3\xaf\xab\xd1\xa9\x15\xd5\xab\x8d\xee\x95\x3c\x6b\x89\x57\x36\xa5\xe9\xa0\x75\xbb\xbe\x75\xaf\x24\x9b\xf0\xa8\x69\xbc\x1e\x3b\x3d\x34\xf5\x73\xf1\xa9\x6a\x99\xdf\x0f\xb7\x1b\x76\x06\x6b\xae\xed\xe9\xdf\x9e\xc5\x3a\x3f\xb7\xdb\x43\x5b\x92\x98\x12\xda\xab\xb1\xf0\xe7\x74\xf3\xc3\x87\x96\x8f\x25\xa3\xb7\xf9\x43\x79\x8f\x6f\xca\xcd\x76\x05\x78\x55\xa1\x88\x16\x9d\x66\xaf\xda\x0c\x75\x56\x99\xed\xba\x39\x01\x97\x06\x56\x9b\x95\xca\x63\xb9\xa4\x38\x5a\x3a\x65\xf7\x70\x98\x3a\x5b\x41\x7c\x8d\xc1\x6e\xd5\x6c\xb3\x7d\xed\x5e\x43\x7e\x70\x54\x25\xd4\xb9\x18\xc8\xf7\xea\xa5\x5e\x9f\x45\x14\x96\x23\x80\xfb\xdd\xf3\xcd\x7b\x2b\x90\xcc\x04\x5b\xcb\x93\xb8\x22\x7c\xc1\x98\x44\x71\x6b\x9a\x6f\xe1\x1a\x04\x29\x8c\x63\x44\x0e\x79\x1e\x8f\x8e\x14\x2c\x59\xe9\xb3\x59\x30\xe9\x30\x61\x83\x88\xc7\x11\x9a\x5c\x81\xa1\x78\xa6\x9d\x8c\x74\xa4\x83\xcb\xe7\x34\xd5\x2a\x28\xc1\x44\xf8\x17\x31\x39\x81\x1f\xfc\x8f\x9f\x68\x12\xcc\xaa\x3f\xda\xc0\xc7\xf9\xc8\x45\x99\xf3\x98\xfa\x8d\x5a\xd3\x4f\x87\xd6\x5b\xd9\x32\xf4\xbc\xb0\x15\xb6\xaf\x2c\xeb\xa9\x46\xe4\xc1\x05\x5d\x61\xf4\x1b\xa8\xf4\x84\x54\x64\x46\x21\xc8\x22\xff\x21\x31\xe3\xef\x32\x3f\x13\x3a\xe5\x4a\x88\x22\x96\xa2\x63\x7a\x9e\xdd\x4a\x28\x12\x57\x75\xd6\x7c\xc9\x29\x6b\xc5\x01\xaa\xa3\xdb\xbb\x2e\xa7\x61\x79\x39\xbf\x12\xa3\x06\x77\x93\xa9\x39\xbe\x69\xe8\xe4\x61\xbc\x3a\x7a\xc4\x30\x4c\x2c\x2a\x5e\x2e\x17\x4a\xae\x65\x5b\xfc\x5c\xb0\x7e\x2b\xef\x9f\xa3\x8e\x05\x14\xf4\x66\xc1\x4e\x37\xb0\xcc\xca\xda\x1d\x7a\x4a\x55\x03\x78\x04\x83\x6c\xba\x8b\xe9\x0c\x88\xf4\x98\x25\x08\x0e\x3b\x9f\xf1\xb5\x15\xb4\xfb\xb3\x4e\xcd\xea\x67\xed\xec\xdb\xc6\x9e\x87\xe9\x55\x6a\x79\x4c\x72\xfb\x28\xcc\xb4\xf8\x35\x7a\xaf\x24\x7a\x6d\x87\x19\x28\x6d\xa4\x19\xb8\xac\xf3\xab\x08\x97\x24\x94\xf9\x91\x57\x7d\xbc\x99\x70\xaa\x74\x9b\xdf\x4f\x77\x95\x5c\x84\x44\xae\xd8\xfb\x9c\x6a\x34\x78\x1f\x27\x26\xf5\x28\x27\xcc\xe8\x54\xdc\x3c\x53\xf7\x4a\x67\x37\x5c\x87\xb0\x16\x78\x8c\xef\x65\xb8\x37\xac\x58\x77\x86\x03\xfb\xd9\xf5\x8c\x1a\xeb\xf9\xb6\x4e\x85\x69\xa4\x5e\x3b\x46\x69\x31\xe8\xb3\xb7\xb4\x03\xd6\x7c\x21\x9f\xde\xb2\x7d\x07\x00\x9a\xaf\x0c\xbb\x01\xa6\xec\xcb\x81\x61\x59\x84\xd5\xc4\x30\xe3\x02\x56\xdf\x35\x7d\x01\x58\xbb\xc3\xfa\x85\xff\x9b\x1a\xa2\x4f\x4c\x7c\x68\x3f\xa5\xd7\x25\xb9\x18\x14\xe4\x33\xb9\x87\x6a\x12\x7f\x00\xa8\x5f\x07\xad\xfc\x80\xb5\xbf\x0a\x71\x67\x48\x36\x7b\x1f\xb4\x34\x10\x41\xf4\x8c\x51\x20\xc4\x07\x93\xe5\xd3\x06\x61\x79\x76\x3d\x1a\x06\x11\x73\x54\xdb\x26\x3a\x06\x75\xf9\x58\xb5\x84\x81\x31\x36\x8b\xc2\xdc\xc6\x37\x57\x5c\xe3\x1f\xcf\x34\x99\xbb\x65\x5d\x23\xa3\x56\xdd\x8e\x28\x9c\x62\x3a\xa3\xa5\xcd\x8f\x92\x0e\x7b\x8d\x83\x03\xf8\x61\xc6\xbc\x2b\x6f\x3d\xaf\xab\x03\xcb\xd5\x76\xf7\x3b\x00\x21\x59\x9c\x4c\xfe\x91\x99\x14\xce\xf7\x4e\x8e\x5d\x76\xef\x4e\x50\x72\xdf\x47\x21\x55\xb8\x8c\x8d\x72\x84\xeb\xb7\x7a\xfa\x58\xff\xb1\xfa\x56\x0b\xd5\xf5\xc1\xbe\x2c\xc1\xb6\x6a\x6c\x2c\x58\x67\xbf\x03\x78\x73\x97\xe1\x20\xc5\x5a\xf0\x97\xd4\xf6\x69\x40\x64\xfa\x63\xf7\x9f\x10\x07\x3b\xec\xb0\x8e\xea\x8c\xe6\x95\xb6\xbd\xc8\x49\x71\x9e\xb4\x42\x96\xed\x0b\x09\xeb\xdd\xae\x97\xf7\x5d\x2f\xf7\xaf\x94\xc9\x0b\x62\xc6\x5f\x56\x97\xc0\xcb\x36\xf7\xf3\x28\xd2\xc3\x97\x74\xc8\x33\x6b\x67\x93\x32\xd6\x01\xad\x7e\xe3\x58\x05\xa7\xef\x47\x2d\xc3\xbd\x45\x9d\xba\xeb\x12\xc8\xcd\x4f\xa2\xbf\xdc\x1d\xb4\x5c\x71\xe6\xb7\xca\x5b\xaa\xd2\x15\x01\xdf\xee\x8a\x40\xb3\x90\xd0\x70\x1d\x37\xa0\xba\xb7\x2c\x6f\x48\xd7\xca\xa0\x51\x2d\xd4\x5c\x33\x08\x7a\x54\x9c\x03\xf9\x62\xb0\x04\x09\x79\x68\x6c\x46\x39\xc1\xd6\x4c\xa1\xd6\xe4\x1d\x67\x90\xd5\x3d\x3d\x18\x5f\x17\x35\xf3\x7c\xd7\x18\xd4\x19\xab\xc9\x29\xe5\x34\x2e\xb3\x88\x4a\x27\x40\x29\x45\x86\x3b\x69\xec\xc0\x97\x97\x84\x2a\x1b\x44\xd6\x72\x87\x0b\x92\x99\x0a\xbd\x07\x15\x32\x3a\x15\xf1\xb9\x0f\xc8\xbc\x94\xdc\x68\x5f\xa0\x93\x4b\x0f\xc5\xc4\x65\x41\xda\x86\xbe\x22\xe0\x5b\x2e\x32\xf3\x02\x55\x27\xb7\xff\x64\xb1\x66\x48\x0c\x6a\x61\x56\x4a\xf8\x1a\x16\x81\x3c\xbd\xc7\x05\xc9\xb4\x85\xf8\x73\xaa\xb9\x82\x5c\xe3\x72\x18\x5b\xb8\x6d\x65\x03\x30\x60\x8c\x0e\x34\x6e\x41\xc8\xe8\x64\xa6\xc9\xf8\xba\x2d\x0f\x4d\x42\x44\x53\xef\xbd\x4e\x1b\x1e\x34\xe7\xd6\xaf\xea\xf1\x17\xdc\xa9\x76\x47\xe2\x18\x33\xd6\xae\x06\xd3\xe9\x7b\x8b\x7d\xf6\x64\x2a\xb9\x44\x81\x3b\xb8\x49\xbc\xa9\xd5\x0f\xd3\x9a\x6f\x9c\x16\x0c\x65\xf8\x20\xe1\xf7\xda\x42\x77\xc4\xa5\x8b\x4e\x23\xd7\x16\x7e\x93\x6b\x33\xfe\x00\x58\x9f\x19\x47\x9a\xe6\x7d\xfb\x84\xe7\xf8\xe8\xc5\xf7\xd6\xad\x3d\x02\x5a\xba\x60\x7a\xcc\x5f\x57\x03\x24\x6d\xa2\x06\x78\xbc\x1b\xbb\x07\x5f\xc4\xec\x0c\xd4\x29\x4c\xf1\xf0\xce\x90\x5d\x9f\x2e\x7f\x87\x23\xf9\x7a\x83\x57\xd7\x6b\x62\xef\x65\xc9\x2b\xbb\xa6\x21\x7e\x58\x42\xa7\x4d\xad\x9f\xdd\xa4\x9c\xb9\xab\x9d\x27\x92\x65\x65\x5a\xd2\x2b\x5f\x6d\x28\x6a\x4d\x89\x83\x57\x0f\xe2\x36\xa5\xbf\x5b\x0a\x8b\xab\xe1\x73\xe0\x9b\x2c\xe5\x07\x76\x0b\x6f\x77\x64\x0c\x1e\xc3\x4f\x13\xe0\x25\xcd\x67\xde\xa6\xee\xd8\x26\x63\x2a\xe8\x06\xd7\xb9\xb3\x57\xda\xca\x61\x10\xbe\xf2\x6d\xa2\xd7\x34\x94\xbd\x60\xeb\x8f\x3b\x67\x3e\x85\x62\x67\xf1\x12\xa3\x9e\xd5\x2d\x6e\x8e\x8e\x75\xad\x0d\xc9\xe1\x6c\x07\x4a\xe2\x15\x0e\x6e\x80\x1d\xe6\x16\x12\x01\x40\x37\xc0\xbe\x74\xd6\x87\x5b\x18\x35\x7e\x8f\xe1\xce\x5b\xbf\x73\x8d\xd8\x8b\x85\x50\x1b\xa4\xa2\x6e\x4e\x3a\xbc\x9b\x93\xf1\xe0\x52\x25\x34\x40\x03\xf8\xf9\xc9\xe2\xd5\x87\x8e\x31\xbb\x8b\xc0\x69\xe5\xe9\xbd\xaf\x3e\x2d\xd5\xb0\x1c\x34\xae\x60\x97\xfd\x86\xa8\x73\x76\xe4\x4f\x9d\x9e\x92\x9f\x6f\xe9\x14\x80\x6d\xf2\xa3\xbc\x43\xc1\x70\xd6\xb5\x2b\xb2\x0d\xb6\x96\x16\xb4\x56\xa4\x2f\x52\x62\xce\xf7\xc6\x44\xe6\x73\x42\x56\x70\x2f\x63\xb9\x0c\x87\x95\x04\x3e\xda\xaf\x2b\xa4\xb0\x00\x3a\xe3\x54\x51\x1d\xef\x50\xea\x71\x4d\x86\x41\x56\x18\xde\xdf\x6c\x3f\x3f\x54\x61\x4e\x5a\xc4\xc9\x79\x53\x47\xc9\xbe\xe0\x2e\xb2\x20\x5f\x61\x21\xe1\xbe\xe4\x36\xef\xb3\x3b\xec\x7e\xdb\x4c\x0b\x9a\xae\x8e\x7c\x47\xb5\x39\x34\x1b\x5c\x03\x18\x9f\x20\x65\x7b\xd1\xe9\x0c\x63\x2b\x01\x5b\x5e\xba\x7a\x21\xcd\x6d\x3c\xaa\x2a\x85\x67\x66\x29\x5a\x3d\xfd\xab\xcf\x97\x77\xb7\xf6\xbc\x1b\xc8\x37\x4e\x15\x9e\xbf\x46\xf0\x1b\x1c\xe7\x22\x2e\x4c\x27\xc5\x52\x47\x8a\x28\x0d\x84\xab\x23\x67\x7d\x1d\x26\xba\xcc\x4d\x6c\x6b\xba\x65\x85\x34\x36\x7f\x94\x1a\xd6\x1c\xad\xec\x36\x4f\x46\x3c\x2b\x03\x0d\x9f\x48\x0a\x07\x97\x47\xed\x2c\xb8\x81\x0b\x9b\x3a\xe7\x97\xdf\xcd\x50\x1c\xb4\xb9\x91\x8c\x13\x36\x8d\xda\x8a\x2e\x02\x03\x8e\xc6\x1e\x88\x3c\x79\xe4\xd7\x24\xf2\x69\x17\xfb\xe2\x7d\x43\x54\x83\x58\x9b\xf1\x00\x90\x79\x21\x17\x96\x83\x56\x2e\xf8\xc6\x65\x2e\x17\x96\x93\x26\x42\x16\x41\xf2\x23\xa6\x52\xbc\x0e\x96\xd3\x2f\xfd\x91\xc4\xc3\xee\x1d\xe2\xc3\xdb\xbb\x87\xfa\xbc\x8d\xa6\x63\x67\x46\x2e\x86\x28\x95\x91\xfe\x1b\x0f\x54\x5f\x0e\xd0\x17\x9a\xdb\xd1\x2b\x91\xf2\xc2\xda\xb3\xef\xb1\xfe\xe3\x34\x22\xa2\xfa\x70\xec\x84\xc1\x7c\x5f\x9b\x12\x1f\xa7\xab\x8a\x80\x3b\xeb\xe7\xdb\xdf\xce\x12\x1b\x4f\xed\xdf\xa3\x67\xef\x75\x01\x1c\xca\x2b\x6b\xea\xee\xf8\x74\x77\x66\x33\xf0\xb4\x56\x30\x36\xd8\x5e\xb7\x70\x07\x7b\x18\xb8\x2c\xf3\x3c\xe5\x8b\x01\xfb\x68\x71\x9f\x91\xb1\x6d\x73\x6a\xe5\x1b\x0f\xc3\xaa\x5c\xc4\x03\xe6\x0d\x84\x07\xf8\x71\x49\xf9\x3d\x2a\xab\x3b\x05\x54\x07\x0b\x00\x31\x6c\x9d\xb2\xea\xb3\x55\x2b\x8a\xa5\x8f\x80\xbc\xea\x3f\x35\x7a\xd5\xce\xaf\x54\x84\xdc\xb7\xe8\x00\xbb\x09\x3a\x3e\x86\x25\x86\xe0\x0f\x1e\xb2\xdc\x92\xcb\xe0\x77\xad\x1b\xec\x74\xb5\xba\x2f\x32\x37\x2f\x7e\x02\xd8\xa2\x34\x63\x54\x37\xd8\x66\x2f\x72\xa0\x15\x37\xe5\xdb\x6c\x2d\x1e\xf3\xa8\xa4\xa8\xfe\x60\x95\x47\x4e\xdf\x67\xd5\x1f\x64\xaa\xc8\xa4\xdd\xaa\x07\xbc\xf0\x27\x55\xc2\x44\xba\x19\x76\x06\x56\x36\xfb\x7c\xf2\x6c\xc7\x38\x98\x50\x11\x21\x09\xf5\x10\xed\xbe\xbd\x14\x73\x23\x69\x4f\xb6\x2b\x0b\x45\x3a\x7f\xaf\xba\x38\xf4\x77\x25\x39\x05\x2e\x93\x44\x60\xc7\xe4\xa5\x14\x4b\x4c\xa1\x6e\xb1\x3d\xd4\xb1\x8a\x48\x9f\x49\xff\x5c\x2e\x59\xb5\xa0\x0d\x2c\x47\x37\xc5\x4e\x7e\xea\x3f\x5c\x1c\xef\xc4\x67\xab\x24\xa6\x27\x12\x41\x86\x62\x7f\x3a\xc0\xf0\xb7\xff\x14\x42\x6f\x3c\x5d\xb8\x16\x0c\xfa\x87\x3e\x66\x9d\x0f\x1b\x32\x35\x62\x35\xc3\x6d\x24\x10\x5c\xbe\xaa\xb5\x52\x63\xfd\x3e\x8d\x88\xea\xa0\xb4\xd8\x6f\xeb\x72\xaf\x6f\x17\x79\x64\x53\x5d\x8c\x65\x50\x0b\xfd\x67\xdd\xfb\xac\xee\x77\x6a\xc5\x17\xd6\xb5\xc1\xc0\x6c\xe8\x81\x0e\xbb\x99\x6e\xb9\x3d\x98\x10\x00\xd3\x59\x4b\x35\x50\xa8\x92\x94\x7a\x51\xf5\x33\xba\x59\x6d\x0b\x3c\xe0\x57\x7b\x38\x0f\xeb\x00\xe0\x00\xbc\x4d\x26\xc3\x52\x3d\x8c\x64\x27\xd9\x68\xf4\x36\x7d\xa8\x1a\xd8\x76\xa8\x24\x16\xe4\xc3\xbd\xed\x40\x25\x86\x3e\xb1\x09\xcc\xfa\x1e\x38\x1c\x65\xc1\x3d\x2d\x70\x40\x39\xcd\x8a\xc7\x6c\x99\x7e\x7f\x2f\xad\xcf\x3e\x1b\xa1\x90\xed\xbe\xc8\x01\x7d\x83\xb5\x96\xba\xba\x31\xb1\x2d\x71\x5c\x3a\x31\x09\x0e\xaa\xe7\x55\xba\xfd\x21\xf1\xe8\xed\x29\x4c\x92\xf2\x09\x5d\xe0\x2a\x71\x5d\x5a\xbd\x99\x0e\xb3\x59\x8b\x8b\xbe\x4c\xdc\x00\xdc\xe5\x3c\x07\xba\xce\xb3\x65\xac\x96\xcb\xa2\xf1\x12\xaf\x08\x7a\x77\x86\x61\x48\x92\x6f\x2c\x39\x3f\x7b\x37\xbb\xc9\xe6\x1d\x72\x43\xe3\x30\x30\xbc\x2f\x78\x43\xa9\x8a\x71\xbd\x85\x8f\xc6\x75\x7b\x64\x28\x81\x9f\xc9\xfc\xdb\x75\x72\xa0\x5b\x06\x75\x9f\xcf\xbf\x42\x55\xe6\x77\x70\x86\x0c\x4b\x57\xa7\x35\x58\xe3\x10\xd2\x14\x99\xa3\xb6\xaa\x97\x88\xf9\x4a\xe6\x7d\xbd\x92\xcf\x3a\xe1\x8a\x0d\x13\x73\x95\x51\x42\xe5\x2b\x0c\xf1\xd9\x9a\xdb\x40\xf8\x65\xe7\xfe\xad\x65\x27\x92\x25\x02\x72\x34\x1a\x8c\x6e\xe2\x36\xeb\xd5\xac\xb9\x45\xaa\x88\x68\xdf\xb5\x68\xd4\xee\x3b\xd4\x79\x76\x8e\x35\xd3\x40\xa2\x5f\x77\x8a\xe7\x8e\xe3\x8c\x95\xa2\x93\x35\xb7\x8e\xb4\x76\x7d\xe7\xbe\x90\x72\x88\xcf\x1b\xa6\x22\xc4\x35\xd7\x42\x81\x7d\x72\x01\x49\x9f\xdf\xc0\xc1\x16\x3e\xbf\x51\xd6\xc3\x7b\xf8\x22\x46\x93\x38\xd5\xe8\x7d\x98\x98\x2b\xd8\xde\xa7\x26\xa9\x5a\x38\x4e\x1f\x58\xee\x77\x4e\xab\x04\x3c\x6c\xd1\xf4\x3d\x16\xfb\x4e\xa5\x3f\x68\x1e\x7e\x78\xac\x81\x3e\xd1\xbc\xcd\x56\x81\x37\x76\x7e\xec\x5b\x29\x0e\x49\x0f\x66\xff\x2a\x5e\xec\x19\x95\x42\x49\x88\xb3\x90\x0b\x4b\xb5\x63\x0a\x05\x95\xdd\xa7\xc9\x4a\x53\x38\x7a\x4c\x00\xbe\x04\x76\x39\x70\x6b\x12\xef\x6b\xc4\x05\xa0\x2d\xa2\xd5\x07\xb4\x3d\x3f\x00\xd5\x00\x57\xb2\x46\x01\x55\x1c\xb4\x86\x96\xf3\x8b\xe9\x89\x99\xdd\x6f\xe0\xd1\x0b\xba\xc0\xdb\x6d\xc3\xfb\x76\xce\x16\xd3\xc0\xe5\xbc\xd0\xc5\xa3\x2f\x6f\xec\xe9\xc5\x7f\x54\x7f\xfd\x33\xc0\x30\x55\xb1\x46\x00\x91\x44\xdf\x2c\xa6\xe7\xad\xf7\x57\x2d\x91\x88\xcd\xb6\x19\xe7\xc4\x5a\xfb\x17\x0f\x68\x3a\x6a\x78\xc5\x0f\x46\x65\xcf\x46\xad\x0c\xad\x4f\xa9\x44\x69\x75\x96\x94\x3b\x7d\x4d\x1e\xa1\x0a\xdf\xbe\x87\x8b\xeb\x95\xd0\x01\xae\xe5\xc4\xa5\x82\xe1\x72\x83\x33\x1c\x5f\x26\xb1\xc0\x9d\x7a\x2f\x1f\x09\x02\x82\xab\x74\xd9\xe7\xef\x32\x11\x0a\x50\xe7\x1a\xb5\x21\xc5\xab\x9a\xb5\xcc\xa2\x83\x51\x95\xf0\x9f\x00\x95\x15\xd6\xd6\xcb\x04\x05\x98\xc2\x84\x0d\x09\x5e\x73\x17\xc6\x6b\xda\x8d\xa1\x14\x0a\x83\xad\x25\x00\x4b\xcb\x88\x0d\x11\x13\xb5\xf6\x1e\xcc\x4a\x3a\x9b\x44\xa6\xb6\x8b\x0d\xfb\xdb\xf7\xd7\xa8\xef\xde\x65\xae\xc0\x4a\xd8\x45\xdd\xf5\x1f\x75\x4a\x4d\x03\xbe\x3d\xdb\x37\x72\x25\x51\x38\xdb\x42\x83\xa2\xa5\x2b\x03\xb3\x1f\xcc\xd5\x8d\xa0\x07\xbb\x46\xf0\x57\x35\x7c\x80\x2d\x37\x84\xb7\x3a\x84\x3f\x63\x70\x90\xd4\x7b\xa0\x52\xfd\x44\xd5\x2d\x48\x87\x1e\x07\xad\x30\x0c\xe9\x9f\xd5\x80\x75\x73\x36\xeb\x60\xf2\xe6\xe5\xd2\xb4\xe6\x17\x4b\x77\x0b\xfd\x4e\x22\xf7\x32\xaa\x45\x5b\xe8\x91\xce\xe3\x47\x2e\x3c\xd5\xac\x98\x31\x5a\xe9\xe8\xb9\x86\x53\xdf\x82\xdb\xaa\xce\x73\xa0\x1b\x20\x5f\xdd\xcc\xb1\x13\x00\x8c\xce\x1e\x77\xd2\x80\x7c\x58\x03\xb6\x27\x07\x7c\x60\x2d\xd6\x99\x41\x5c\xf2\x50\x9c\x03\x5c\xa0\x51\xd4\xf4\x84\x2a\x80\xe5\xc9\x9f\x39\xea\x60\x1c\x5f\xbf\x7a\x62\xf7\x0d\x30\x83\x76\x17\x1b\xc3\x61\x22\xb1\xa1\x1f\x61\xf0\xc2\x9a\xc5\x9e\x5a\xfb\xf4\xbb\x0d\x6b\x89\x14\x9c\x5c\x2b\x9e\xea\x65\x13\xd7\x6d\x7f\x94\xf7\xca\x0f\x13\xe1\xdb\x36\x6b\xdf\xaa\xd9\xac\x92\x75\x0b\xf8\x05\xb1\xab\xd8\xf3\x56\xc3\x6f\x4d\xcf\xbe\xf0\x63\xe5\xaf\x19\x5b\x15\x6b\x4b\x02\x16\xad\xcb\x23\xf6\x15\x3d\xf3\xd4\x0c\x38\x34\xf9\x5a\x2e\xbe\x9d\x06\xb7\xe8\x95\x2f\x14\x59\x92\x8f\xfb\x63\x6e\x67\x69\x7a\xaa\xf3\x9c\xed\x5e\xcc\xbc\x95\x4b\x0d\x30\x38\x7c\xf5\xc9\x21\x7f\x85\x28\xa0\x8b\x49\xb4\xae\xaa\x89\x2c\x4c\xfd\x49\x89\x00\xcf\x87\x05\x37\x30\x89\x6e\xc3\xca\xf4\x90\x46\xa6\x68\xac\xf1\x1b\x6e\x2d\x47\xfe\x32\x99\x8b\xd0\x5a\x5d\x64\xa7\x77\x51\x59\xda\x59\x50\x83\xaa\xbe\x4c\x6a\xe4\xe4\x21\x97\xa3\xa5\x3b\xf3\x82\xfb\x2e\x14\x9a\x2c\xab\x18\xf6\x71\x9b\x8d\x8d\xf3\x47\x72\x1b\x59\x4f\xa6\x63\x11\x07\xef\x9a\x36\x03\xde\xa3\x92\x80\xaf\x84\x75\x4c\x5f\xd1\x5e\x10\x52\x90\xc7\x98\xbf\xb9\x19\xbc\xf7\x80\x42\x9f\xb5\x45\xc1\xc8\x2c\xda\x00\x03\x7c\xe6\xc1\x9b\x75\x52\x37\x79\xce\xf2\x8a\x4b\x92\x6a\xca\x82\x34\xe2\xc2\xe2\xcb\x41\xc1\x12\x61\x94\x2a\x3c\xd7\xf4\x37\x4f\x0c\x09\x6e\x70\x1b\xa5\xc3\xd6\x86\x1a\xa2\x6c\x99\x44\x3f\x5a\x11\xa0\x64\x25\xe2\x75\x75\x32\xbe\x07\x3d\xa7\x50\xea\x23\x5e\xd1\x51\xa9\xd3\x18\x88\xa9\xe8\xf6\x19\x01\x88\xd0\x61\x71\xbd\x6f\xc6\xc7\x9b\xf8\x9c\xa6\xd5\x42\x9a\xa7\x96\x22\x6a\xca\x25\xc2\x85\xf6\x9f\x0c\xa7\x4f\x32\x7b\xa7\x00\xad\x6e\x1c\xf5\x69\x6c\xe5\xaa\x17\xd3\x31\x94\xe5\x7a\x5f\x9a\x51\x65\xc2\x7f\x38\x10\xb6\xff\x32\xfc\xd4\x04\x40\x45\xb5\x49\x0e\x0e\x6c\xf0\xd6\xfb\xb6\x07\xd9\x50\xc8\xa7\x83\xf6\xfe\xb4\x0b\x70\x4c\x13\xa7\x58\x2a\x5a\x6e\x7a\xbe\x35\xfc\x04\x8c\x94\xd9\xf5\xca\x8f\xda\xdf\x7e\x62\x96\xa7\xb3\x26\x5f\xd9\xbe\x0f\x40\xf6\xe0\x0e\xe1\x9c\x42\xf2\xf6\x89\xab\x4a\x82\x5b\x2a\xbf\xee\x02\x62\x50\xd7\x32\x53\x86\x66\xe3\xcb\x81\x3f\x6b\x18\x4a\x38\xed\x3a\x05\xa1\x68\xf2\x25\x94\x94\x87\x3f\xe5\xed\xf3\xc1\x14\x86\xd0\x83\x1e\x4e\xe1\x6c\x05\xd4\xd7\x98\x25\x3f\xf9\xfd\x6a\xb7\x5e\x20\xc2\x60\x5b\x6e\xb3\x71\x40\xa0\x13\xff\x0f\x06\x8f\x0e\x9f\xf4\x61\x67\xc3\x86\x28\x1b\x26\x83\xee\x6e\x2a\xfd\xc3\x1d\xce\x60\x77\x6c\x53\x4e\x9a\x2e\xe7\xbe\xba\x81\x87\x20\x3a\xb5\x56\x66\xdc\x71\xd4\x49\x29\x6a\xb6\xfa\x65\x95\x70\x48\x17\x2d\x08\x38\xb2\x95\xb0\xb9\xf8\x3e\x9f\xf3\xc2\xfe\x70\xac\xc5\x5f\x24\x28\x3a\x3e\x59\x40\xe4\x16\xe3\x2c\x5c\x4e\xfe\xe0\x93\x00\x08\xf8\xd9\x2e\x5d\xf8\xa5\x43\x87\xaa\x23\x47\x6f\x3e\x6b\xc6\x39\x6e\x43\x54\x15\x99\x8c\xe4\x5a\xd4\x4d\x18\x8a\x81\x77\xef\x70\xbb\x19\x29\x16\x36\x17\x55\xbf\xa1\x8f\x33\xe6\x5f\x3b\x2d\x36\x0d\xff\x79\xcb\x37\x40\x9a\xbd\x12\x6c\x8e\xb8\x49\xa0\xb7\xd7\x0b\x1b\x34\x1c\x6d\x86\x59\x79\x1a\x0f\x44\xa9\x3e\x79\x8f\x1b\x96\x3d\xc4\xdf\xf0\xf5\xf9\x13\xb0\x19\x42\x8c\xfb\x83\xc4\xb4\x3f\x34\x3e\x28\x21\xa6\x7f\xcb\x54\x62\xe1\x85\x56\xd6\xe6\x50\x39\x96\xc3\x9c\xa9\xe9\x15\xab\x8c\x89\x4f\x76\x85\x16\xb7\xbb\xa6\xeb\xde\x24\xab\x96\xa7\x89\xc0\x3e\x64\x48\x78\x4d\x7f\x7a\x29\xd6\x9c\x8e\xac\x0a\x21\x69\xf6\x04\x35\xea\x6a\x5d\xd2\x6c\xec\x28\x4e\x39\x4f\xab\x98\x68\x57\x0b\xb8\xe4\x0a\x94\x9d\x9a\x6c\x72\xe9\x68\x2d\xeb\x98\x04\xbc\xc0\xd3\xdc\x02\xde\x72\x80\x03\x5b\x6c\x2e\x12\x0b\xbe\x42\x49\x2b\xbb\x11\x8d\x51\xa9\x6f\xd4\xdb\x47\xbe\xdd\x06\xbd\x5e\x8e\x54\x34\x2d\x68\x29\x86\x31\x0e\x86\x7c\x37\xa7\xfa\xee\xc2\x19\xe8\xc5\xa2\xa5\xdb\xf3\x7d\xfa\x50\xfa\xd5\x8a\xe3\xab\x4c\x1f\xde\xda\x48\x12\x96\xb7\x04\x65\x24\x53\xa2\xb1\x78\x61\xda\xd9\x20\x86\x87\x71\x4a\xaf\x23\x51\x80\xe4\x22\xc2\x91\xe6\x8b\x56\x02\xc4\x8f\x69\x96\x45\xb3\x3d\x0b\xb0\x73\x32\xa0\xd0\x88\x80\x9b\x15\x3a\x91\x72\x4c\x2c\x54\x03\xd4\x72\x84\xa0\x3f\x16\x98\xb5\xe0\xb0\x81\xef\xe1\xe7\x83\x87\x42\xa6\x41\xea\xfe\x44\xa1\xe1\x21\xec\x21\x3f\x4e\x49\xc3\xb7\xd9\xac\x87\x64\x52\x14\x2a\x25\xda\x5b\x0e\xad\x9c\x52\x3e\x81\x05\x38\x26\xa5\xc6\x83\x33\x80\xbc\x43\x70\x00\xf3\xa9\xa5\xfe\x05\x83\x7f\x10\x66\x58\x61\xb2\x11\xfa\xbc\x6c\x6c\x29\x01\xd0\xa7\xea\x4a\xbb\xf4\x5e\x0c\xf0\x0f\x8f\x64\xe3\x80\xc2\xbf\x42\xcf\x3c\x92\x4a\x31\x1c\xf8\xb4\x8e\x0d\x5e\x28\xce\x8d\x4f\xd0\x8c\x45\xd8\x7a\x74\x2b\x63\xc0\x2f\x64\xaf\x35\xd0\xb5\x8b\xf7\x88\xfb\xec\x78\x8a\x46\xbf\x59\xac\x6d\x45\xe7\x04\x0d\xdf\xe6\x9c\x49\xbe\xc6\xdb\xcd\xdb\xe7\x9d\x9f\xdd\x94\x03\x2b\x69\x3a\x9b\x5c\x58\x98\x6d\x4f\x45\x86\xa9\x08\x4d\x7c\x88\xce\xf1\x83\xd4\x03\xed\x45\xa2\x28\x27\x8d\x8e\x29\x51\x07\x36\x2a\x28\xf5\x8b\x2e\xf4\x75\x68\xc0\x90\xeb\x3e\x71\xd8\x8c\x03\xcf\x31\x18\x57\x66\xed\x14\xe7\x3c\xe7\x9b\xc1\xf6\x5e\x39\x49\xfa\x8b\xfb\x22\xeb\x97\x97\x6c\x6e\x38\x78\x84\x38\x0d\x7d\x23\xa2\xd0\x9c\x87\xdd\x7e\x0f\x1b\xa2\x78\x70\x9a\xb0\x8f\x62\x16\x9d\x3e\x19\xa1\xe9\x85\xc8\x1d\x68\xf9\x1e\x60\x9c\xc0\xc0\xf3\xa3\xaa\x18\xa8\x17\xf4\x22\xed\x18\x44\x17\x09\x74\xc1\xc2\x5f\x31\xac\x98\x03\xb0\xc2\x06\xdd\xec\x6e\xf2\x44\x3e\x60\xa3\x3e\xd3\x15\x09\xc5\xe0\xf4\x79\xc1\x69\x2e\x44\x3e\x29\x00\x7e\xea\x12\x24\xfc\x66\xd8\x41\x9b\xf5\xbb\x1d\xa6\x03\xfe\xc7\x4f\xf8\x9c\x99\x0f\xfb\x0f\x16\xe4\x30\xf3\xe0\x27\xde\x87\x34\x64\xce\xad\x63\xed\x8e\x0f\xe2\x0d\xdf\xf3\xf5\x5b\x79\x1b\x88\x05\x7a\x34\x16\x62\x5e\xf6\x08\x6f\xb0\x61\xa1\xb8\x48\xea\x3f\x10\x7b\x89\xc1\x1f\xde\xfe\x75\x68\x1b\x6d\x1f\x10\xf7\xd9\x77\xea\x76\x5e\xcf\x37\xeb\x9a\x90\xe4\xef\x21\x5c\xf3\x48\x70\xf5\x11\xf4\x69\x01\x1b\x9e\x2f\xd8\x7e\x7b\x69\x36\x68\xbf\x05\xff\x62\x4e\x85\x8e\xdb\x52\x16\xdf\x76\x3f\xd5\x08\xad\xee\x8a\xb7\x0c\x5a\x29\x15\x63\x6e\x6c\xaa\xd9\xaf\x5b\xc6\xc7\xfb\x11\x5b\xb7\xd8\xe6\x56\x90\x8d\xc8\x30\xdf\xc1\xec\xde\x7d\x84\xdb\xec\xbf\xd0\x50\x7a\x7d\x0f\xac\x8e\xf2\x92\x1d\xa0\x5d\x0a\x4f\xff\xe2\xf2\xfc\xf8\x35\x5e\x25\x63\x6c\xfc\xcf\x27\xa0\x7e\x9f\x0f\x83\x6a\xb6\xef\xd1\x64\xcd\x3d\x55\xf1\x68\xe5\x17\xbe\xf8\x78\xd4\x5b\xe8\xdd\x22\x9d\xdc\xcb\xc4\x52\x3f\xf3\xda\xba\x03\x86\xc7\xe9\x10\x14\x7b\xf3\xeb\xdb\x72\x0d\x01\xb3\x98\x19\x1b\xd9\xa8\x4b\xd3\x5a\xe3\x62\x23\x0d\xed\x96\x46\x35\x2a\x9e\x9f\xad\xdc\x20\x35\xc4\xba\x1d\xd6\x19\x7a\x54\xed\x7b\xf8\x3a\xc4\xce\x9d\x5f\xfc\x33\x28\xd1\xf9\x5d\xcd\x8b\xa4\x91\x51\x65\x6e\x2e\x4e\x7c\x67\xcf\x9c\x49\x50\x16\x03\xe6\xd5\x7c\x7a\x00\x28\x81\x59\x44\x7f\x7e\x74\x33\x7b\xcc\xb4\xe3\x39\x16\x7d\x08\x62\x14\xdb\xd0\x4a\x80\x46\x57\x2e\xd5\xc9\x1d\x43\xa7\x16\xe7\x0a\x85\x00\x7b\x34\x0c\x50\xd5\x12\x2e\xad\xb4\x08\x32\xfc\xc1\x46\xc7\x79\xe5\xa3\xc4\xd2\x99\x8d\x4c\x8f\x6e\xd5\x99\xdc\x85\x02\x8b\xfd\xd7\xf9\x8a\x83\xe2\x15\x8c\x8d\xb2\x96\x90\xab\xf3\x7c\x0f\x3d\x37\x74\x08\x42\x26\xa7\x33\x26\x91\x24\xf9\x42\x72\xcc\xd1\xd7\xd3\xf3\xac\x69\x4e\xe4\x2c\xac\x56\xea\x09\x86\x8a\xab\x69\x58\x97\xf6\x36\x41\x89\xa2\x29\x3c\x74\x54\x93\x38\x85\x9d\x2d\x2f\x44\x26\x9a\x92\xc6\xfa\x9c\xb8\xe4\xed\x78\x94\xb9\x29\xf6\xa1\x43\x2d\xf4\x8e\x09\x28\xd8\xc0\x75\x7f\x1f\xe0\x41\x82\xb2\x78\xef\x8b\x71\x5e\xc6\x47\x8b\x72\x70\xfc\xf0\xcf\xcf\xfa\xb6\xaa\xde\x5c\x99\xfa\xb1\xa7\xc2\x60\x25\xe7\xcb\x25\xae\xaf\xeb\x93\x65\x83\x5e\x6a\xa7\x3c\x75\x0e\x44\x3a\x52\xb6\x31\x11\x23\x32\x39\x21\x9e\x58\x87\xe9\xed\xdc\xc5\xed\x13\x13\x23\xd0\xcb\xf1\xf1\xc8\x8f\xc8\x78\x9f\xcf\xa1\xbb\x8b\x37\x5f\xeb\x9c\xe0\x94\x0b\x5d\x44\x09\x3d\xcb\x96\xcd\xde\x00\x07\x43\x0e\xd8\x49\xf0\xa1\x75\x89\x5f\xbd\xc7\x85\x77\x9d\x97\x93\xfb\x69\x53\x0b\x97\x9d\xfb\x2b\x39\x1f\xfe\x84\x0f\x85\xbd\xed\x9a\xa7\x55\x91\x33\xce\x95\x6e\xa1\xeb\x29\xec\x2f\x98\xe3\xbc\xc0\x45\x34\xc6\x67\x91\xa2\x8e\xd0\x97\x46\xb5\x00\x97\xc1\x4f\x12\x19\x14\x05\x9c\x6e\xd6\x0a\xdf\x9c\x7e\xd8\x29\x67\xe1\x84\x33\x00\x1f\x1c\x60\x8a\x04\xdc\xf9\x3a\x38\xec\xb7\xda\xa7\x79\x6b\xef\xa2\xa1\x5c\xbc\xfd\xfd\x21\xd0\x13\xbb\x27\xf6\xd3\x5a\xde\xba\x49\x0a\xd8\xb2\x54\xbf\xef\x69\xd2\xba\xe4\x48\x20\xc1\xcf\xc8\x97\x66\x4a\xc7\x7c\xc5\xaf\x4d\x87\x48\x79\xf9\x8b\x05\xdf\x58\x77\x74\x56\xd8\xc7\xcb\xb8\xe6\x4f\x67\xad\x43\x84\xdd\x5d\xbb\x31\xe9\x30\x0d\xbb\x71\xab\xaf\xf5\x86\x47\x6d\xae\x3b\x4a\x58\xe5\x92\xf8\x85\xe5\x5a\x9f\x56\xfa\x28\x42\x57\x8e\xa1\x16\x3e\xe1\x16\xe0\xae\x21\x53\xe4\xd5\xc1\x4e\x6f\x6f\xe7\xe0\xde\xc9\xd1\xce\x91\x8e\x87\xe7\x3c\x25\xc8\xa6\x85\x32\x1e\x27\x87\x0f\xc1\xac\xe8\x49\x5b\xf9\x70\xdd\x8c\x9d\x4a\xbe\x26\x99\xf7\xbb\x77\x78\x2a\x8d\x55\x6d\xbd\xc1\x4f\x33\x50\x61\x58\x27\x7b\xc2\x14\xa1\xe2\xc7\x88\x92\xd4\xe3\xcf\x17\x4c\x7f\xb8\x45\x5a\x5a\xda\xee\xfd\x01\x5d\xe3\x40\x80\xee\x7f\xbe\x39\xd7\xb3\x7d\x4d\xeb\xad\xb7\x34\x17\x9c\xb0\x23\x3d\xda\x04\x5f\x3a\x0e\x14\x7a\xbb\xda\x5a\xea\xce\x9d\x38\xbc\xaf\xec\xbf\x17\x05\x3e\xf6\x09\x34\x6d\x0b\x4d\xb4\x00\x8d\x38\xfb\x8d\x6e\xd0\x90\x07\x19\xf8\x9e\x7a\x37\xbb\x21\xc8\xa9\x33\xef\xae\xc4\xa8\x6d\x38\x58\xd1\xf9\x39\x0c\x17\x5a\xbc\x45\x6b\x7f\x72\x96\xdc\x1c\x6e\xee\x5e\xae\xf5\x8f\xac\x1f\x3a\x6e\x37\x09\x8b\x88\xd5\xd7\x21\xa6\xc5\x5b\xeb\xfd\x5a\xab\xb8\x7f\xee\x70\x6b\xd9\x68\xbb\x47\x58\xd6\x39\x29\xb4\xbd\xb0\x8f\x77\x1f\xc9\x61\xdc\x6a\xa7\xe5\xb4\x59\x4c\x7c\xb9\x79\x52\xae\x85\xf4\x7e\x4b\xe2\x94\x41\xf1\x94\x17\xc7\x16\xaa\xed\x1c\xd2\x5c\x50\x49\x9e\xe6\x83\x56\x8f\x85\x72\x70\xa0\xba\x7c\x7f\xd1\xba\x8f\x7c\xf4\x57\xfd\x47\xc8\x1a\x60\x10\xee\xc9\xdc\x4c\x6e\x07\x38\xbe\x3c\xb1\x16\x21\xff\x28\x90\x1d\xb8\x57\xb9\xeb\xf5\xe2\xe7\xfa\x3c\x01\x97\xdc\xe8\x34\x8b\xec\x7d\x96\x1c\xb9\x5d\x28\x33\x08\xa6\x2d\x90\x2e\x2e\x16\xd0\xf2\x25\xf5\x47\x12\x7f\x1c\x6c\x3e\x13\xd1\x24\x72\xbd\x3b\xe8\xed\x55\xab\x36\x63\x53\x5d\xd8\x71\x68\x15\xe0\x58\xd3\x34\xb7\x87\x37\x84\x24\xd3\x4d\xc8\x18\xb3\x28\x15\x2b\x70\xc9\xed\x58\x21\x6b\x6e\x1b\xb9\x6b\xdb\xdd\x3b\xe1\xd3\x9b\x4b\xd2\x02\xf7\x3c\x25\xe0\x7d\x54\xa8\xdf\x17\x25\x79\x3c\x6e\xab\xb8\xa0\x7d\xf5\x34\x6a\x82\xc5\x3c\x4b\x18\x55\xc6\xae\xc8\xfd\x43\xa5\x0e\x98\x12\x9d\xd7\xd5\x56\x81\x5d\x80\x91\xb6\x23\x25\xfa\x3b\xc4\xed\xaf\x1a\x85\xc0\xee\x44\xa8\x6d\x1c\x0f\x30\x67\xe4\xc4\xb8\x49\xbd\xaf\xc4\x8c\xf0\xf0\xbd\xa3\x18\x39\xce\xac\x41\x96\xc7\x08\xae\x7b\xcc\x41\x07\x7f\xc0\xdc\xbc\xf9\x04\xfe\x26\x2a\xe4\x3c\x99\xfd\x59\xdf\x43\xc9\xca\xc3\x69\xdd\x7c\x27\x90\x5d\x46\x9c\xbb\xbf\x1e\x94\x96\x3d\xbf\xb6\x2f\x76\x05\xbc\xfa\x83\xd3\xba\x40\x47\xf1\x69\xad\x16\x51\xc2\x9b\x32\x83\x92\xfd\x5a\xa1\xeb\xeb\xf0\xd6\xbd\x1d\xc4\x8f\x15\xc4\x9b\x32\x23\x9f\xa8\xe3\x7e\x54\x87\xbc\x58\xd3\x50\x2b\xbf\x76\x3d\x2c\x1d\x29\x57\x65\x72\xbb\x4d\xaa\xc7\xf7\x21\x53\xa8\x9d\x1b\x4e\xfb\xd2\x5c\x42\x03\x7b\xc5\x2a\x69\xd1\x2c\xbd\xec\x1f\x28\xd1\xb9\x58\x96\x1f\xbe\x6b\xdd\xcd\x88\x96\xa6\x03\xfe\xab\x7e\x38\x5f\x88\xd6\xe8\x55\x38\x53\xf1\xa5\x27\x4b\x96\xee\xf0\x53\x73\xa9\xa0\x4e\x4b\x9e\x24\x79\xec\x30\xb9\xa0\x7a\x65\xa6\x8a\x52\xf4\xb3\x90\xca\x97\xcd\x44\x79\x78\x9f\x26\xda\x2b\xbe\x2e\x3e\xaa\xe5\xee\xf7\xc4\xad\xfc\xc6\x79\xd4\xf0\x0e\x2e\xaa\x0f\xa7\x55\x2b\x89\xd1\xc6\x1d\x21\x2c\x90\x91\x76\x49\x26\x79\xa9\x6a\xe3\x7d\xe3\xed\x94\xff\xb9\xe7\xa4\xf1\x83\x0d\xa0\x94\x6b\xfe\x7d\x4e\xb0\xd9\xa3\x62\xc0\xb7\x9a\x10\x2e\xe2\x2d\xf4\x1c\xaf\x82\x44\xd7\xe1\x37\x57\x5d\x03\xaf\xb5\x62\xc4\x8e\xed\x35\x2c\x94\x32\xb2\x4f\x94\xeb\xe0\x4b\xd6\x0a\x8e\xa5\xb2\x6e\x34\x54\x84\x8f\x83\x74\xbe\x1f\x76\x7f\x28\xb3\xfe\xbb\xe8\x7e\xd1\x69\x8d\x4d\xce\x85\xda\x88\x3d\x94\x31\x25\xb4\x5e\xe3\xa9\xfa\x00\x8b\xd6\xba\x89\xf0\xca\x5d\xa1\x87\x83\x0a\x87\x81\xd0\x48\x39\xf6\xae\x55\x36\xea\xba\x2d\xd6\x7e\x88\xa0\xde\xd4\x86\xb1\x0e\xb0\xe7\x28\x46\x58\xc5\xeb\x41\xf9\xa9\xe1\x4f\x36\xe3\x47\x90\xea\x88\x09\xa9\x9c\x8f\x17\x0d\x69\xb0\xe7\x3c\xa5\x65\x22\xeb\xbc\x30\x38\xb7\x8d\x1c\x62\x29\x41\x72\x6d\x9c\x6c\x47\x43\x9d\xc5\xf8\x43\x1f\xdf\xe7\x97\xcc\x10\x79\x4d\xfc\x17\x84\xa9\x4e\xb9\x97\x14\x3c\x71\xa6\x41\xc9\xfa\xb6\x67\xc5\xaf\x09\x5b\x5e\x2e\xc7\xd4\x9d\x2f\x81\xec\xcf\xf3\x5b\x86\xbc\xde\xe4\x37\x48\x28\x65\x63\x06\x49\x7f\x7f\x2c\xed\x1b\x24\x97\xf8\xaf\x92\xc6\xe9\x9a\x9f\xa4\x0f\xdb\x76\x96\xe7\x74\x8d\xcb\x67\xd9\x6f\xbd\x9d\x1c\xef\x77\x6f\xe9\x5a\x17\x62\xdd\x61\x27\x3f\x23\x1e\x9d\x99\x3a\x66\xf9\xd4\x16\xf3\x7e\x9a\xd7\x37\x72\xe0\xae\x8e\x95\x1f\x5d\xfb\x68\x94\xba\xac\xdb\x1b\x4a\xf4\x2a\x5d\xfa\xdb\x1b\xcf\x8c\x03\xb6\x9c\x47\xa1\x30\x51\x6c\xbd\xee\x1c\x59\xd3\x0f\x9b\x2f\x07\x67\x3e\x79\x27\x1c\x60\xad\x93\x30\xea\x49\xdb\xe3\xb7\x04\x69\x53\x8c\x50\x33\x3c\xef\x24\x71\x6b\xf4\xc5\x8a\x62\xcc\xcc\xf7\x4e\x13\xa7\xe6\x31\x1a\xea\xe0\x7d\xb1\x97\xe1\xb8\x9f\x3c\x8d\x6a\xa5\x55\x87\x69\x33\xc0\x53\xc4\x32\xa7\x61\xcc\xbb\x11\x72\x53\x83\x54\x5f\xfb\x47\x0d\x4d\x0f\x80\x16\x51\x5e\x27\x84\x3c\x36\x4b\x5a\x9f\x81\xb7\xcf\x26\xa9\xfa\xb7\x5e\x1f\x20\xf7\xb3\x5a\xef\x2f\x0c\x6c\xf7\xac\xb3\xb9\xf0\x54\xbc\xde\x1b\xe2\x29\xd7\x5b\xb0\x37\xcd\x9a\xbf\x57\xac\xd4\x25\xe7\x18\x1c\x40\x23\x36\x53\x29\x92\xe9\x9b\xd4\x57\x78\x38\xc6\x63\xf4\xa8\xcf\x26\xe2\x0b\xc1\x73\x46\xdd\x37\x55\x21\x32\x44\x50\x9a\x02\xbc\x3a\xbe\x3c\xe7\x0f\x11\x42\x9f\x01\xec\xd1\x11\x64\x04\x6f\x9f\x7b\x4e\x94\xa0\x7d\x16\x0e\x34\xa7\x36\xeb\xd2\x98\x53\x7f\x1a\x35\xb7\xf3\xd6\xe2\x9b\xf7\xd3\x51\xe1\x25\xbc\x7a\x6b\xa5\x5d\x86\x6a\xef\xf6\x22\x47\x02\x40\x71\xb1\x91\x54\x1d\x00\x6c\xc9\xc0\xba\xb3\x71\x96\xfb\xef\xb2\xaf\x9b\xd4\xc9\x25\xc6\xbd\xca\x2b\x2a\x30\x7f\x69\xd6\xc2\xed\xc5\xdd\xca\xdd\xec\x6d\xd6\x42\x56\xeb\x75\xd5\xa7\x2d\x9d\xa5\xcf\xa4\xbe\x53\xfa\xf9\xe8\x1e\xf9\x01\x69\x82\x4c\x90\x24\x84\x67\xef\x1a\xee\x3b\x42\x00\x48\x3d\x18\x98\xaf\xa5\x21\x1f\xf6\x15\x2d\x5b\x70\xa0\x3f\x02\x88\xff\x92\x50\xf7\x47\x95\xa4\x69\xda\x06\x93\x75\x4c\xd1\x3c\x4b\x45\x9b\x69\x6c\x14\xe1\x92\x60\x28\x33\x49\xd5\xd7\x67\xfe\xb5\xb3\x1a\xa0\x9a\x6b\x32\xc4\x6d\xf4\x2a\xfb\xab\x6a\xee\x51\xa1\x81\x2d\xc5\x46\xa8\xb0\xbe\x95\x83\x65\x66\xcd\xba\x95\x95\x96\xf2\xd8\xf1\x83\xfb\xa8\xfc\x19\xb6\x91\x14\x3d\x6b\xcd\x27\x3e\x0f\x3f\x56\xd1\xea\xfd\x2b\x82\xd6\x3e\x5b\xe7\x7e\xba\xbb\x38\xdb\xbc\x45\x20\x1b\xe9\x16\xf6\x02\x7c\x2f\x9d\xce\xd7\x96\x97\x37\xdb\x7d\x5f\x92\x6e\xcb\x1a\x9b\x04\x98\xbe\xbf\x16\x6e\xa8\x66\x18\x6f\x78\x6f\x15\xb6\xdc\x10\x71\x3d\xc0\x52\x20\xfd\xe9\xc5\x90\x8e\xee\x01\xb5\x64\x7f\x44\x01\x81\x61\x77\x53\xa0\x55\xe6\x27\x11\xa3\x47\x10\xbe\x48\xb1\xf7\xb2\x59\x10\xb7\x81\x27\x21\xab\x98\x2a\x44\x5f\xbe\xf7\x11\x3a\x75\x82\x29\xe8\xdc\xd3\xc2\xca\x70\xb2\xd5\xf9\x65\x5f\x25\x54\x02\x00\x89\xb6\xb9\xd4\x67\x60\x2d\x24\x55\x3e\xa8\x74\x36\x39\x66\x76\xc5\xde\x8c\x89\x54\x5b\xb9\xd4\x9c\x2a\x3f\xa7\x8d\xc5\xc5\xe5\x1d\x39\x43\xb7\x03\x17\xd1\x3f\x4b\x84\x73\xfb\x00\x04\x0e\x75\xe9\x83\x2d\x7e\xee\x7e\xf7\xbb\x83\xb7\xe7\x78\xad\x01\xf7\xae\xf7\x6b\x88\xb8\xb8\x52\x7b\x95\x38\x7e\xfe\x2b\x64\x23\x2b\x30\x1c\x58\x88\x81\x14\xce\xfd\xd0\x75\xf5\x8c\xa0\x58\xba\x5a\x9a\x2d\xbe\x75\x4a\x5c\x38\x8b\x96\x4f\x31\xf6\xd2\x71\xf4\xfd\xb1\x90\x7e\xc1\xb3\x8f\x3f\xa9\x19\x1e\xbf\x3a\x0a\x59\xe7\xd4\x07\x7a\x86\x6e\xd9\xd9\xe3\x26\x4f\x48\xf3\x40\x9f\x45\x47\xd9\x90\x0e\xfe\xe8\x51\xfa\x52\x97\x63\xf9\x54\x69\x2e\xa8\xfe\xa0\x7d\xb0\x73\xe4\x18\x7a\xdb\x6c\xf0\x9a\x42\xe7\xe0\x58\x8d\xf1\x0b\x35\xd0\x87\x3f\xbc\x30\xcb\x3e\x6b\xae\xd9\xff\xaa\x3b\x2d\xad\xd5\x20\x3d\xa0\x7e\x8e\x4a\x53\x41\xaf\xb9\xab\x11\x99\x94\x92\xef\xb5\xd7\x5e\xc3\x83\x5a\xb3\x30\xf2\xde\xe3\x1f\xd8\x57\xfa\x00\x9d\xd5\xed\xbd\xae\xe7\xbb\xc7\x50\x07\x3a\x87\xfd\x16\x91\xd6\xeb\x9b\x9b\xbd\xdb\xc1\x38\x7e\xa0\xa1\x03\xde\xbc\xa1\x2d\x42\x54\xf4\x6e\x65\x70\x2d\x8b\xae\xf4\xfe\xe6\x62\xf5\x0e\x9e\xe0\xc1\x5a\x38\x66\x8d\x44\xc0\x4b\x03\x02\xfc\x4f\x27\x6b\xc9\x02\x7c\x7d\x4e\x0f\x4f\xa3\x57\x32\x3a\x34\x20\x22\x48\xc3\x13\x88\x7a\x46\x97\x66\x45\xbb\x3c\xfa\x18\x23\x9c\x2f\x79\x1a\x59\x11\xe6\xa4\xe0\xd4\x4d\x02\x44\x69\xf4\x9b\x3a\x1d\x5e\x01\x80\x1e\x84\x20\x79\x68\xe7\xf6\x78\xee\x23\x69\x7c\xbe\xc7\xeb\x3f\x41\x7d\x26\x72\x6a\x43\x65\x37\x5b\x83\x30\x6b\x64\x73\x67\x7b\xc5\xcc\xda\x6b\x25\x12\xb3\x94\x3d\x79\x5f\x22\x1a\xb5\x4b\xa5\xf9\x9e\x50\x9e\x25\xc5\x72\xd5\xd1\x33\xb8\x5e\xf4\xbc\x4e\xe3\xab\xa6\x5f\xc9\xc7\x13\x74\x8d\xad\x07\x40\x5b\xc0\xf2\xe5\x18\x0d\xb8\x87\x31\xba\x74\x6d\xb9\x55\x3c\xe0\xee\xf0\xd0\x66\xff\x3c\x73\xba\x58\x70\xea\x1f\x0e\xaf\x6a\x0d\x04\x28\xe4\x27\xdc\xee\x2a\x88\x31\x72\xea\xe8\x97\x08\xe8\x96\xb0\x24\xcd\xfa\xce\x3f\x5d\xf6\x3e\xa4\x54\x17\x64\x07\x94\xab\x84\xa0\x13\xf4\x01\x92\x06\x46\x6a\xc3\x5b\x34\x9b\x2b\xeb\xcb\x5e\x9a\x00\xbb\x33\xbd\x90\x9b\x3d\x04\x2f\xc1\x49\x96\x0d\xe9\x6c\x99\xbb\xf3\x43\x1f\xe9\x4e\x6e\x67\xee\x4e\x4d\xce\xb7\x91\xb1\x1e\xa5\x4f\x0d\xc5\xf7\x03\x4e\x6f\x90\xdb\x64\xe2\xf3\x4d\x3b\x5e\xc7\xd6\x1b\x6b\xd1\x4b\x93\xa5\xac\xe2\x7e\xda\xc9\x3b\x34\xf0\xfe\x91\x37\x6c\x0c\x1f\x8e\xec\xba\xcc\xfa\x53\xd6\x1e\x5d\x9e\x09\x7c\xe5\x0e\x4a\x96\xb3\xee\xb5\xd3\x02\x55\x07\x5f\x34\xbd\x8b\xcc\x73\x4f\x4a\xe8\x4e\xcb\xec\xd5\xc3\x6a\xae\x16\xb9\xbf\xee\xaf\xcd\xdc\x6f\x75\x9e\x9e\x4b\x98\xf9\x07\xf2\x32\x28\xed\xae\x3d\x6c\x21\x73\x9f\x08\x9e\xdc\x93\xaf\xcc\xe2\xf0\x44\x27\x5a\xc5\x1a\x2c\xff\xda\xe3\x43\xc4\x43\xd2\x1e\x35\x0b\xd4\x0f\x32\xce\xf0\x7c\x6e\x59\x96\x28\x5e\x42\x3d\xd0\x94\x56\x07\x2c\xf2\xb3\x5f\x38\x4f\xcc\x97\xef\x93\xed\xdf\xeb\x8e\xed\x5a\xcd\x80\x15\xf9\xcd\x6b\xb7\xa1\x4b\x9d\x9b\x99\xbd\xc5\x6d\x9b\xd9\x99\x4b\xfb\x74\x2d\x01\x9e\xa7\xa7\xbb\xd1\x99\xaa\xfb\x05\x62\xcd\x86\x37\x57\x4e\xa7\xdd\xb3\x87\x3d\x5d\xb2\x3d\xa6\x65\x9a\x12\xa4\xc8\x46\x49\x4b\x75\xe3\x1d\xa5\xa6\xa9\xe3\x3e\x75\xe9\xa7\x9f\x92\xa5\xac\x29\x7a\x59\xe2\x94\x85\xd7\xb6\x4f\xf6\x46\x47\x96\x35\xe5\x90\xb3\xa4\x86\xbd\x9d\xb2\xd7\xde\x49\x48\xd2\x7d\xba\xb6\xbf\x43\xf6\x3b\x11\x6a\x63\x9f\x4b\xf3\x06\xdc\x8b\xcb\x47\x8a\x80\x2b\x45\xd1\x5a\x49\xc3\xe6\x47\xe8\x0d\x38\xbe\xd9\x39\xd2\x96\x7e\xe2\x93\xc4\x35\xee\x36\xef\x36\x8d\x10\xf2\x08\x2e\xfc\xa1\x13\x37\xba\x91\xd0\x3d\x6d\xea\xf5\x1e\xad\x94\x6c\x04\x4f\xcc\x78\xe3\x1b\x93\x9e\xf6\xf5\x99\xe5\xc3\xb3\xa1\xde\x99\x2b\xf8\x7a\x41\xd7\xbb\x82\xf6\xd1\x6a\xc9\xa7\x84\x82\x0b\xc2\x08\x1d\x41\xdd\x34\x61\xed\xd4\xb9\x99\x54\x7d\xc2\x27\x87\x6f\xd8\x1f\x39\x23\xc6\xe4\x87\x9f\x4a\x45\xbb\x46\xc4\x10\x8b\x8e\xbf\x2e\xcc\x90\x25\x7e\x2c\x68\xf8\x8a\xa9\x7c\x55\xdd\xaa\xd1\x11\x3c\xad\x27\xb4\x00\xb2\xd5\x6b\xb2\xfb\x4c\x68\x18\xa3\x6f\xe8\x0d\x7b\xd2\xb8\xb5\xf9\x77\xad\xee\x0e\xa4\xd5\x4a\x39\xc5\x24\x9a\x1a\xfc\xa9\xb7\x4e\x33\x52\xa3\xf7\x09\x97\xdf\x74\x09\xab\x68\xf6\x4f\xf8\x76\x66\x47\x18\x1f\xd9\x33\x1c\x43\x3b\x34\xf4\x4d\x3d\xe9\x29\xe3\x0f\x35\x81\xd4\xd9\x26\x9c\x2f\x63\x4f\x5e\x8a\x55\x85\x96\xbd\x64\xcd\xb5\x60\x62\x92\x1a\x7b\x57\xa9\x98\x50\x59\x9f\x96\x56\x36\xdb\x24\x15\x57\x7c\x0e\xf3\x9e\x1c\xf1\xd5\x3b\x74\x2d\x87\x96\xeb\x67\x1c\xd8\x3d\x2d\xab\xfb\x69\x2c\xf9\x01\xb3\x4a\xb7\x88\xef\x61\xf1\xde\xf1\x5a\x20\x47\x98\x86\xa0\x1a\x24\x13\xe4\x72\xa9\x91\xa4\x94\x72\x19\x3d\xbe\x07\x3a\xb1\xd4\xb7\x21\x6c\x78\x2b\x54\xe3\x7d\xef\xe0\xf3\x7e\x9f\xd6\xd7\x4f\xb0\x5d\x00\xf7\x3e\x34\x16\xfa\x15\xb3\xaf\x08\xc7\x7a\xb6\xc6\x8e\xa9\x08\x81\x0e\x50\x9b\xb5\xcf\x6d\x5c\x3b\x4b\xb3\xe5\xba\x6c\x3e\x9f\x72\xc9\x64\x7d\xf7\x6a\x68\x5e\x20\x6b\xa0\x49\xee\xd7\xe8\xaf\xb2\x61\xfd\x2a\x96\x9d\x02\xad\xb2\xad\xfe\x5a\x93\xea\xa3\xb2\xfe\xb0\xb5\x06\xa7\x5e\xe1\x17\xc7\x27\xa8\xb6\x37\x8c\xf2\x17\xe9\xc1\x5f\xa4\xdf\x07\x49\x56\xd9\x7e\xc9\xbe\x81\x49\xfe\x49\xfe\x53\x03\xd6\x9f\xe4\xf8\x3f\xb9\xfe\x24\xff\x57\xe1\x5f\xe4\xbf\x85\xff\x37\x3a\xa7\x34\xff\x8f\xda\xfe\x67\x28\x92\x7f\x5a\xfb\xff\x87\xf0\xea\x62\x4c\x55\xfc\xf2\xa4\xd2\x0a\xa7\x83\xf3\x95\x7e\xa6\xf7\x35\xe2\x9c\x36\xe0\x2a\x65\x76\xaf\x05\xb9\x45\x65\x3f\xd0\xda\x74\xb6\x9e\x66\x18\x70\xbd\x4b\x2e\x73\x17\xe0\xff\x93\x5b\xeb\x76\xe6\x5b\x67\x80\x91\xd7\x6c\x01\xda\xaf\x85\xdc\x19\xfe\xaa\x71\xf7\x72\x41\x01\x5b\xd7\xba\x48\x21\x3d\x35\xb3\x64\xbb\x49\xf1\x55\x09\x87\xc8\xf9\xb2\xa2\x8b\x15\x05\xd4\xdf\x6e\x62\x76\xbf\x45\xf8\x55\xbb\x8d\x43\x93\xce\xfc\x04\xd9\x9f\x06\x54\xa8\x6e\xfd\x64\xfa\x68\xc0\xf9\xda\xac\x1f\x3e\xb3\x06\x81\xc2\x5a\x9d\x40\x21\x2b\xaa\xc2\xb9\xc8\x5f\x47\xb6\x9c\x49\x9c\xe1\x9c\xd8\xd7\xb4\x9e\x07\x8c\x8a\xef\xf8\xf8\x4b\xd4\x61\x7d\x57\x97\x49\x86\xa4\x03\xd6\xc2\x5e\x7a\xaa\x7f\x9a\xe7\x7d\x53\x5c\x38\x8b\x51\xe9\xf7\x5c\xd9\x3a\x15\x08\x4e\x5a\xdd\x9a\xbf\xa6\x1b\x9f\xe7\xd4\x8f\xba\x51\xb4\x4f\xd6\xa9\x45\xb8\xa9\x5e\x43\xbc\x3b\x02\xe8\xb4\x3a\xde\xf9\x4f\x08\x94\x36\x35\xfd\xa5\x1b\xc0\xc7\x53\xad\x07\x1d\xfd\xb0\x15\x73\x66\x5a\x5c\xa3\x2d\x7a\xd0\x93\x1b\x1d\x63\x38\xed\xcf\x3b\x54\x12\x56\x48\x2d\x0f\xea\x7a\x55\x46\xf0\x0b\xa8\x37\x3d\x10\x7c\xab\x60\xa5\x51\x0d\x80\x63\x82\x8b\xaa\x64\xeb\x21\xb8\xa0\x52\xa9\x89\x5f\x5b\x99\x4f\x50\xef\xa7\x40\x16\xf5\x97\x9e\xa4\x0e\x33\x21\x80\x61\x54\x98\x0c\x0c\x82\x3a\xb9\xfd\xc9\xed\xd7\x40\xaf\x42\xec\x7b\x01\xd1\x59\xae\x77\xed\xfd\xa5\xcc\x32\xca\x36\xbb\xce\x80\xdb\x18\x21\x53\xfd\x5f\x93\x52\x0f\x91\x73\x06\x58\xbd\x59\xc1\x2a\xa1\x07\xae\x0d\xb9\x27\x4c\xde\x84\x93\xe2\x09\xde\xc8\xff\x29\x89\xdb\x0b\xa8\xb7\x48\x58\x85\xd8\x22\xd8\x2a\xcb\xfb\x3e\x9d\x16\x58\xf4\x02\x7e\xed\xeb\xdc\xe4\x61\x10\x2b\xab\x5c\x67\x40\x0b\xdf\xbb\xaa\xad\xef\xa7\x42\xa6\xa2\x7a\x50\x6b\x33\xd4\x5f\x52\x43\xbd\x00\x3b\x42\xc8\x0b\x20\x50\x14\xf8\x87\x41\x6e\x8b\xde\x34\x15\x99\xae\x2c\xc0\x12\xc1\x3f\xbd\xa3\xde\x7a\x3d\x09\x6f\x5d\x7c\x3b\xb0\x64\xe7\x3b\x65\x98\xc4\x9f\x48\x84\x9d\x01\x82\xa6\x49\x4b\x6a\x54\x69\xd0\xa3\x80\xad\xef\xa7\x29\x0c\xdb\x97\xe7\x57\x76\x4c\xbf\x7f\x70\xb8\xe8\x74\xc7\x4a\x41\xc4\x7a\x52\x68\x15\x12\xf3\xcd\x1a\x03\x36\xc9\xb0\x7d\xc9\xf6\x27\x4e\xb1\x5e\xc0\xae\x60\x23\x1f\xfa\xa0\x19\x8d\x33\xc0\x34\xde\x5b\xd8\xb1\x19\x71\x0f\x7e\xac\x65\xd2\xf3\x1b\xee\xe9\x5b\x18\x9b\xd7\x33\xb9\x5d\x3e\x6e\x20\xb8\xfb\xb5\xa3\x9f\x79\x4f\xc3\xef\x19\x00\x20\x57\x21\x67\xe7\x87\x6b\x53\x08\x4e\x6c\xaa\x34\xa8\x39\x3a\x6c\x18\x5b\x27\x09\xfa\x6b\x5e\xb7\x52\x41\x80\x21\x54\x03\xbf\x8e\xd8\x62\x49\xaa\x64\x28\x63\x1a\x64\x46\xd1\x0d\x99\xf4\xa7\x60\xe1\x2a\xa4\xa2\x8d\x6d\xfb\xc7\xd8\xf0\xcd\xea\xd8\x70\x97\xac\xbb\x08\x8b\x49\x89\x1e\xec\x0f\x60\xf5\xba\xbf\x36\x1a\x3d\x00\xbd\x78\x50\xf5\x22\xe5\x3d\x2f\xb6\x94\x70\x1b\x64\x57\x0b\xa9\xef\x6c\xa5\x82\x39\xe1\xe2\xf5\x58\xdb\x26\x09\x6a\x7c\x95\x79\x95\x79\xe7\xf2\x9b\xb3\x07\xf0\xae\x3c\x35\x2c\x75\x12\xa0\x07\xaf\x1a\xb1\xc2\x71\x06\x78\xbd\xa5\xa8\x98\xf7\xf9\x3d\x32\x58\xf5\x90\xe7\x5a\x82\x24\xc9\x94\x80\x7d\x5b\x0c\x79\x70\xaf\x96\x4d\x3c\xb4\xc2\x07\x8a\x63\xfa\x1f\xf3\xca\xd7\x9c\x01\x1a\x23\xcb\x93\x02\xcf\x47\xb6\x42\x93\x0c\x7b\xcb\x8e\xbf\x14\xb8\xe6\xd8\xd6\xf7\xb5\xae\x8a\x57\xb6\x5a\x0d\xa1\x06\xe6\xb3\xdc\x9e\x33\x1d\x8d\x33\xb6\xd6\xb4\x88\x0c\xab\x24\x71\xf7\x59\xdb\x2a\x71\x41\x6f\x24\x7e\x27\x1d\xde\x81\x5f\xcb\x97\x33\xc9\x2f\xdc\x81\xa0\xd0\x5f\x36\xfe\x81\xea\x1f\x64\x7a\x20\xc3\xaf\x43\x13\x7f\x91\xc1\xbf\xb8\xfe\x26\xff\x4b\x98\x1e\xd8\xfb\x6f\x9d\x2c\xff\x16\xfe\x6f\x75\xfe\x2d\xfc\x7f\xa5\xf3\xbf\x02\xfa\x6f\x75\xfe\x17\x23\xcb\x58\x5c\x48\xc4\x6f\x4f\xc6\x86\xff\xd0\x30\xdb\x41\xda\x03\xd8\x99\x2a\x37\x39\x26\x7d\xf1\x7b\x64\x16\xfe\xf8\x23\x54\x1d\xff\x3f\x2b\xf9\xdb\xaa\x3f\x7a\xdb\xe3\x5f\xc4\xc0\xdf\x47\x3e\x7f\xd3\x7e\xf7\xbf\xb9\x7e\xf7\xf4\xbf\x14\xfd\xee\x03\xff\x67\x81\xff\x8e\xb5\x73\x61\x07\xf1\x21\x8e\xd9\x8a\xfc\xfb\x92\xf8\x1e\x75\xba\xdd\x6c\xcd\x1f\xd7\x42\xec\x64\x8d\x87\xed\xb0\xef\xb9\xb9\xd0\xb5\x99\xb3\xc9\x89\x65\x16\xab\x43\xbc\x46\x03\xa4\x36\xf7\x36\x1f\x82\xfe\x3b\xfb\xfe\xdd\xff\x4e\xbc\xbf\xfb\xdf\x99\xfc\x77\xff\xe6\xf7\xe1\xfd\xdf\x3b\x84\x3f\x59\x21\x0f\xff\xd7\xbc\xff\x1b\xe5\xff\xb7\x38\xfe\x4f\x02\xff\x61\xbd\xaa\xbd\x6a\xec\x4e\xd4\x77\x02\xf4\x32\x90\xdd\x35\xef\xa7\x67\xd4\x35\xd5\xf5\xd4\x8d\xf3\xcc\x92\x4e\xde\x7a\x0d\xe7\xb8\x2c\xc7\xc7\x70\x75\x91\x9b\xfc\xea\x48\x6f\x06\x5c\x61\xd6\xfb\x76\x91\xe8\x3c\xad\x77\xb1\xcb\xc1\x2b\x15\x63\x3b\xf5\xf5\xcb\x21\x39\xbe\xbc\x26\x7f\x71\xd5\x5b\xcf\x57\x43\x40\xdf\x46\x77\x9e\x83\xa4\x52\x0d\xfe\xf8\x29\x5a\xf4\x9b\xc2\xab\xd9\x4b\xaa\xb3\xe5\xe5\x5f\xd9\xf7\xbd\xc4\x67\x28\xad\xbe\xfb\xe5\xcd\x20\xc0\xe9\xf0\xd7\xb4\x95\x7d\xed\x01\x5c\x22\xb6\x8e\x9d\xfe\xc1\xb1\x7e\x1d\xdd\xf2\xe7\x42\x67\x8d\x1b\xf3\xa7\x33\xe9\x03\x9c\x37\x26\xf5\x0c\x0d\x27\x0e\x1c\xde\x79\xe8\x2b\xb1\xff\xb0\xb6\xfd\xf2\xcb\x1c\xf3\x54\xd0\xe9\x40\x14\x15\x65\xa3\xf5\x18\x7e\x39\x97\xfb\x6f\xb7\x7f\x2d\x07\x8b\xa2\x07\xa3\x5b\xed\xdb\x45\xbe\xfd\x40\xdc\xac\xfd\x67\x30\x00\xfc\xd1\x39\x17\x80\xd5\x90\xf2\xa3\x8f\x90\x31\xac\x59\x13\x06\xad\x63\x51\x2d\xa7\x64\x10\xc0\xf5\xf7\xe3\xa6\xaf\x6c\xb5\x90\x6c\x1f\x97\xf2\xa8\x8e\x55\x48\x0e\xea\xef\x23\x44\xe0\x2d\xab\xef\x34\x80\x42\x3c\x04\x15\xd5\x58\xa0\x80\x12\x8c\x4d\xe8\x17\x34\xb6\x3b\xcc\xf7\x4b\x7e\x00\x26\x1a\x35\xcf\x38\x00\xf5\x53\x20\x09\xcd\xef\xd3\x0b\xbc\xd6\x22\xf5\x90\x16\x97\xda\xff\x28\x85\x4f\x4c\x54\xaf\x78\x91\x97\x7d\x5b\x67\xad\x87\x6c\xfc\x76\x10\x8e\x9a\x26\xee\x4c\x60\x49\x6b\xed\x12\xd7\xaf\x59\xfa\xba\xb1\x0a\xb1\x51\x8c\x8b\x38\xd7\xd8\x7d\xae\x45\x52\xab\x95\x09\x9a\xa2\x7d\x52\x9e\x8a\xae\xe9\x61\xdb\x58\xb1\x8a\xff\x17\x4f\xf2\xec\x0b\x55\xbb\x1d\x88\xb2\xcf\x8f\xa3\x45\xf8\xc7\xf9\x9a\x9b\xf4\xaf\xbf\x86\x47\x0d\xe4\x0c\x68\x49\x82\xb4\xd8\x78\x50\x0e\xc5\xcd\x2e\x64\x8d\x28\xfc\x27\xdc\xb9\x0c\x1d\xfc\x44\x71\x0a\x73\x6b\x4f\xaf\x2a\x72\xd7\x20\xbf\xce\x23\xbc\x81\x61\xad\xf1\x8a\xf7\x03\x5c\x7e\x87\x07\x49\xa6\xb6\x3f\x18\xc4\x0b\xfc\x1d\x44\x8d\xc2\x35\x06\x57\xc3\x12\xbf\x26\xe7\xfb\x4e\x02\xf4\x88\x29\x88\xec\xb6\x39\x0b\xc4\xcb\xfa\xfb\xf9\x52\xd6\x96\xc3\x1a\xa4\xfe\xb7\x0b\x61\x05\xec\x5c\x81\x60\xa9\x2f\x7e\x5b\x75\x70\xfc\x31\x6e\xf5\xaf\x47\x50\xca\x17\x0e\x50\x11\xb3\x81\xe1\xa6\x9a\x93\x08\x8e\x9a\xa6\x8a\x6a\x37\x3b\x1f\x07\xc5\x77\x1e\x88\x8a\xb0\x80\x15\x76\x20\x98\x9a\xf1\x17\x46\x25\xd6\x7a\x36\xd7\x73\x37\xbf\xda\xdd\xdd\xb4\xbe\x38\xba\x4d\x54\x87\x52\x11\x5e\x42\x00\x65\x4e\x39\xf4\x2c\x85\x84\xff\x77\x58\x13\x3d\x05\xda\xd9\xea\x8c\x8c\x2d\x64\xc8\x4e\x64\x5d\x25\xf2\x7f\x69\x6a\x39\x6b\xd1\x63\x53\xa1\xbe\xb1\xaa\xd5\xca\xa4\xd6\x61\xa3\x23\xf5\x00\x76\xe6\x49\xe4\xfc\x39\xd3\x44\x4c\xc0\xce\xae\xee\x44\xfd\xcf\x8a\x63\x1b\x94\x8d\x36\x40\x9e\x2f\x60\xcd\x80\xfb\x8e\xb3\xf3\xad\x46\x77\x64\x37\xfa\x7e\x91\x50\x89\x63\x94\x76\x04\xdc\x37\x0e\x0e\x60\x09\xef\xfb\x4b\x06\x26\x0c\x55\x29\xdd\x9e\x03\x3f\x7d\xb5\x3c\xfe\x93\xa8\xb3\xeb\x66\xe9\xf7\x40\x84\xf4\x02\xb8\x26\xc1\x34\xd5\x2d\x5e\x3a\xa9\x5f\x85\x21\x2a\x74\xde\x39\x53\xc9\xe8\x3f\xd2\x26\xd7\x20\x91\x33\xe3\x15\x47\xb1\xd5\xe7\x64\xeb\x0b\x5b\xfb\xa3\x81\xda\x3d\x54\x2b\x81\x3c\xbc\x32\x17\x03\x97\x4e\x14\x87\x4a\x36\x4b\xd2\x86\xf6\x5e\x2c\x83\x52\x3f\xf0\x90\x64\x68\x28\x28\x53\x7a\xce\x80\xcc\xda\x49\xf1\xb9\x89\xb1\xf4\x2e\x56\x5a\x06\xea\x4d\x50\x68\xe1\x93\x15\x99\x1d\x67\xf8\x27\x9e\xd7\xcb\x01\x35\xc7\x5c\x97\x0b\xac\x47\x0a\x7b\xec\x5e\x26\xf6\xda\xef\xd4\x42\x86\xd3\xf1\x92\x3e\xab\xba\x5a\xc0\x29\xac\x68\x19\xfd\x05\x5a\xe9\x51\x50\xbe\x1b\x24\x41\x8d\x61\x2f\x45\x97\xb3\xb6\x1c\xae\xdf\xb5\x82\x78\x77\xed\x61\x02\x0c\x01\xe7\x37\x47\x1c\xf5\x0a\x30\x4a\x82\x3f\x20\x07\x82\xc0\xc8\x3b\x16\xe4\x7d\xc5\xca\x8a\x3c\x1b\x2d\x7a\x7a\x57\xe1\x0d\x7a\xc0\x79\x5d\x9b\x02\x2c\x1e\xda\xf5\xe4\x0f\x87\x7a\x03\xeb\x3b\xfe\x1e\xff\xc4\x7f\xea\x6e\x0f\x76\x36\xe8\x57\x3c\x0c\x9d\x28\x5c\xbe\x04\xd4\x62\xdf\xed\x39\x15\x82\x41\x2c\x40\x06\xa0\x32\xf9\x6f\x45\x39\xad\xe7\xad\xe5\x37\x53\x18\x49\xa0\x93\xd1\x2d\x61\xb1\xf9\x13\xdb\x96\x43\x6b\x80\x49\x67\x09\x98\x95\xec\x63\x64\xa8\xb6\x32\x2a\x65\xa3\x75\x29\x9b\xa0\x26\xac\x6f\xf9\x79\xff\xbd\xdf\x6d\xfe\xe9\xfd\x1f\xeb\x51\xc2\xaf\xb5\xb2\x07\xe0\xe5\x9c\x12\x45\x0f\x3e\x8b\x58\x6b\xd2\x3d\x18\x88\xa2\x3a\x6a\x73\x86\x0f\x0b\x8b\x51\x5f\x9d\xae\xf8\x43\x73\xd6\x21\x37\x16\xe3\xc6\x0a\x90\x7b\x93\x7d\x76\x6d\x98\xc7\x47\x81\xab\xdb\xad\x85\xaf\x51\x13\x08\x84\x5e\xd5\x5e\x94\xd6\xa7\x86\x95\x71\x04\x22\x7a\x52\xfc\x0d\x1e\x3b\xb6\xd4\x90\x4f\x21\x3f\xf3\x3d\xf0\x55\x6b\xc7\xd1\x27\xd6\x94\x9d\x21\xfb\xe0\xb9\x0e\xf3\x73\x45\x33\xf2\x36\x37\x67\x70\x32\xc9\x4d\xe4\x42\x6b\x86\x2a\x19\x72\x4f\x33\xcf\x66\xcf\x31\x32\x99\xd6\xb7\x19\x26\x89\x24\x68\xc5\xea\x26\x3b\x23\x32\x45\x5c\xe1\x09\xea\x99\xe8\xcb\x64\xcc\x35\xb2\x5d\xce\x2c\x88\x6d\x5e\x64\x3e\xe6\x82\x47\x2b\x74\x79\x5d\x19\x38\x43\xa1\xbc\x6a\xb0\x44\xa8\x0b\x87\xf7\xc2\x63\x83\xc3\xdb\xe2\x91\x24\x65\xe3\xa1\x90\x55\x9f\x68\x97\xa4\xc7\xee\x49\x38\x82\x9c\xde\x80\x97\xdd\xc7\xd7\x6a\x26\x43\x6d\xf7\x83\xd9\xc1\x93\x1b\x32\x40\x09\x4c\x86\x10\x7c\x49\x4c\x06\x69\xa0\xf2\x1a\x60\xc1\x87\x3a\xba\xb8\x51\xa5\x09\xde\x05\x90\x01\x36\x5f\xca\xa9\xea\xc4\x8a\x1d\x53\xe1\xed\x77\x5b\x6c\xb2\x45\xb3\xa9\xae\x2f\x63\xa7\xf6\x66\x46\x1c\x4d\x64\x89\xc5\x99\xf2\x1d\x0a\x04\x0b\x53\x86\x08\x4f\xd8\x66\x3c\xb8\x10\x00\x5e\xcf\xec\x8b\x17\x63\x34\xda\xc0\x1f\x0d\x4b\xa3\xe2\x16\x65\x0f\x9d\xbf\x9a\x8a\xb7\xdd\x30\x34\x6e\x20\x54\x69\x20\xcb\xfb\x71\x59\x2f\xd8\xb0\x29\x72\xbb\xf1\xdc\xed\x09\x86\x4a\x60\xf9\x68\x94\xf9\x3b\x8e\xe4\xad\xba\xcc\x50\xa2\xca\x22\xc6\x64\xe8\x71\x65\xd1\x5c\x91\x29\xee\xde\xf2\xa8\xe0\x4f\x82\x56\x91\x37\xfb\x75\xab\xd6\x19\x9c\x4d\xa7\x85\xc3\xa7\xa8\x39\x25\x1b\x90\x3f\x20\x86\xe0\x4b\xe2\x8f\x00\x8e\x88\xb3\xee\xc6\x30\xee\x6f\x16\x0c\x2b\x49\xe8\xb7\xa2\xaa\xe1\xc5\x65\x2d\x0f\x62\x60\x90\x85\x7b\x3f\x59\x40\x96\x55\xe7\x61\x64\x43\x5f\xdb\x2b\xba\x7b\xc4\xec\x3a\x67\x29\x86\x43\xe8\xea\x72\x0b\x3d\x31\xb5\x49\xc9\x0a\x0c\x20\xa5\xf2\xa1\xb1\x61\xbf\xec\x22\x59\xee\xff\x63\xef\xbf\xa3\x9a\xda\xbe\x7e\x61\x1c\x14\x44\x3a\xd2\x14\x44\x50\xaa\x44\x9a\x74\x90\xa2\x48\xef\x02\x21\x08\x84\x2a\x20\xbd\x86\x5e\xa5\x09\xd2\x7b\xef\x35\x81\xd0\xa4\x85\xde\xa5\xd7\xd0\xab\xf4\xd0\xa4\x77\xf8\x8d\xa3\x9e\x73\xfc\x3e\x77\x8c\xe7\xf7\x3e\xf7\x8e\xfb\xfe\xf5\xe6\xaf\xec\xbd\xb3\xf6\x9a\xf3\xb3\xe7\x9a\x73\xae\xcf\x5e\x6b\x66\x2d\xe9\xd4\x65\xa3\x64\x47\xc6\x68\x95\xc2\xde\xf3\xe8\x2c\xbe\xf6\x21\xb0\xad\x45\x27\x42\xe3\xe1\x6b\x7b\x7b\x6d\x31\xe3\x63\xbf\x5a\x8f\xfb\x8c\x7a\xd4\x34\x4d\x0d\xe3\x4a\x52\x76\x9f\xe1\x38\xbf\x40\xf2\xdb\x80\x92\xc8\x3a\x90\x05\x7c\xcc\x36\x95\xa5\xe1\x38\x5e\xab\x81\x1f\xec\x5c\x83\x84\x2f\x7c\x9b\xd4\x25\x80\x26\x84\x79\x19\x16\x45\xd6\x68\x99\xaf\xa7\xe7\x82\x9e\x3e\x3d\x9b\xf1\x7d\x80\xaf\x98\x50\xa7\x4e\x49\xe0\x38\x71\x0f\x84\x29\xcc\x67\x96\xad\x78\x4f\x99\x73\x97\x36\x4e\x15\xd6\x6b\x45\x20\x6d\x3d\xaa\xab\x92\xdb\x9b\xe2\x60\x8f\xe4\xc1\x65\xdf\xaa\xef\xe7\x57\xce\xa7\x19\x8a\x6d\x28\x58\x2a\x16\x2c\x0e\xae\x99\x4e\x36\x1c\x7a\xee\x41\x4f\xf1\x56\x2e\xb7\xa7\x18\x31\xeb\xdb\x30\xca\x50\x3c\xb4\x21\xdb\xf6\x24\x73\x2f\x59\x0f\x9a\x7b\x23\xa2\xda\xd7\x58\x29\x99\xe6\x6c\x57\xc3\x03\x26\xfb\x05\xce\xfd\x61\x20\x0b\x0f\x10\x90\xd9\x26\xc3\x26\x01\x12\xfe\xf0\xcd\x48\x1a\xe7\x47\xc5\x6d\x0d\xdc\x4e\xcb\x48\x67\x1b\xf7\xe3\x9c\x64\xe1\x0b\x62\x6d\xef\xd1\x36\xd5\x9a\xb5\x28\x5b\x61\x7d\x1d\xc0\xc4\xf6\xde\x5a\x41\x3b\x18\x56\xf8\xda\xea\x23\xd0\xbc\x6d\xc4\x3a\x91\xf5\x1e\xba\x07\x91\xd0\x1a\xa4\x51\x3e\x9a\xf5\xd1\xdd\xef\x6b\x16\xf4\xd7\x0f\x3e\x86\x3b\x10\xcc\x42\x56\xb9\x5f\x9a\x33\x33\x69\x8f\x66\x7c\x1b\xf7\x5d\x03\x75\x30\xf5\x32\xb9\x22\xa2\x17\x9e\x0b\x22\x47\x6d\x5c\x79\x0c\xa8\x9c\xdd\x2f\xbe\x28\x68\x09\x27\x44\xab\x98\xf2\x65\xc2\x7e\xcb\xf3\x0c\x11\x2c\x91\x77\x21\xb8\xe1\x5e\x23\xb5\x29\xa7\xf0\x80\x08\x0c\xe1\x1c\x21\x3a\x86\xcd\x33\x0d\x8e\x00\x6b\xb0\x61\x44\x30\x74\x79\x71\xa4\x80\xd7\x11\xa8\xff\x3d\x19\xe4\x22\x58\x0c\x52\x67\xce\x66\xaf\x6f\xae\xfe\xb6\x05\xe8\xd4\x24\xd9\xa9\xdb\x37\x55\xd2\x3f\xd8\x20\xa9\xb7\xdb\x8a\xc2\xec\x82\xb1\xb4\x7d\x5f\xfb\x36\x4e\xa7\x64\xd5\x16\xef\x7d\xf2\x66\xfc\xa5\x3d\xfb\x15\x4f\x96\x79\x41\xb7\xf6\x07\x9d\x86\x02\x93\xa5\xf8\xe0\xd2\x5a\xbb\x26\x9f\x6b\x3c\x4b\x7c\xb9\x69\x2d\x0b\x9f\xae\x50\x1c\xcb\x3a\x0b\x43\xeb\x37\x20\xff\xdf\xc6\xe3\x40\xd6\x14\x6c\xfe\xe6\x48\xf7\xea\xd0\xad\xe4\xe6\x66\x79\xc2\x85\xe0\xcc\x29\x95\xc9\xbb\x74\x03\x7a\xb8\x2f\x0e\x38\x7f\x40\x30\x30\x0c\xbc\x39\x2a\x8a\xf0\xb2\x4d\x1d\xc8\xd4\x45\x99\xcc\x5f\x7e\x01\x95\x2a\x68\xb1\xf7\xdd\x43\x79\x6c\x82\x4f\x52\x1a\x8f\x6d\x69\xfd\x7d\x65\xdb\x3a\x10\xa1\x50\x5e\xf3\x60\x37\x12\x44\xa2\xbe\x55\x58\x75\xca\xe1\xbc\x57\xe5\xf9\x93\xb9\xa3\x57\xb3\x96\x19\x34\x81\x1e\x44\xbe\x55\x14\x92\xf7\xc6\x4b\x7a\x66\x71\xd7\x6b\x5d\x5c\xea\xa5\xf4\x77\x4e\xa5\xee\x78\xa2\xff\x78\x23\xdc\xa6\x3f\xd1\xc1\x96\x39\xe4\xb8\x55\x6c\x93\xd0\xbe\x11\xc8\xdb\x40\x7d\x94\x9b\xdc\x18\x99\x8a\xfe\xf6\x1f\xc0\x72\x87\x7a\x81\xe7\x28\xf6\x9b\xf3\x39\x48\x6c\xe9\x06\x34\xf0\x8e\x17\xff\x5a\x78\x6b\x04\x66\x17\x04\x49\xca\x78\x82\x82\x52\x0b\xc5\xbb\xc0\xa3\x6d\xfb\xd5\x9d\x17\xad\xb1\xeb\x80\x6f\x5a\xbe\x69\xa5\x66\xc3\x36\x44\x8e\x07\xaf\x31\x7b\x0b\xe1\x3b\x1b\x61\x58\x8e\x6c\xe2\x3a\x48\xdc\x25\x19\x1d\x39\x35\xfd\xd3\xb5\x35\x96\xcc\x0b\xf9\x17\x16\x31\xab\x0f\xd6\x1b\x22\xc8\x61\xca\x4b\x39\x42\x55\xaa\x85\xbb\x1f\x83\xa7\x2c\x51\x7f\x0f\xab\xcf\x70\x83\x99\x46\xb2\x2b\xd4\x9e\xe8\x52\x98\xb9\x66\x31\xcc\x4f\x02\x94\xea\x4f\x1a\x10\x6c\x4c\x9c\x0a\x87\x39\xec\xb3\x6d\x02\xe4\xfb\xd9\xf2\x17\x93\x60\x7b\xf6\xf5\x12\xd8\xd9\x1e\xc1\xf0\xe9\xe3\xb3\xfb\x87\x23\xdb\x72\x64\xc7\x5e\xa7\x28\x80\xc5\xda\xbc\xce\x19\x74\xe2\x24\x09\x02\xa4\x78\xd1\x35\xd1\x8e\xe8\x77\xaf\x4e\x52\x35\x5e\x92\xae\xab\x73\x20\x0b\x55\xff\x11\xba\xf5\x6f\x4f\x9a\xbd\x6a\xfb\xa7\x2a\x68\xe6\x20\x81\xa7\x08\xd4\x53\x37\xaf\x82\xc3\xde\x5b\x6d\xb0\x3f\x4b\x0d\x13\x22\xd8\x99\x7d\x87\x7b\x31\x29\x35\x56\xd4\xec\x90\xd1\xe1\x06\xe3\x20\x02\x1e\xa5\xd9\xda\x3b\xb4\x01\xb8\x79\x44\xc4\x3b\x5a\xf8\x4d\x9c\xa6\x72\x67\x08\xf8\xd5\x4e\x19\x14\xd9\x97\x7b\x8d\x0f\x2a\x64\x73\x2b\xa8\xc5\x05\x4e\xdc\x15\x32\xc5\x95\x0d\x2c\xb6\xbc\xb9\x52\x2f\x09\x3b\x75\x5d\x53\x59\x50\x9b\xaf\xf9\x77\xd0\x02\xc6\x73\x11\x92\xa0\x07\xe4\x60\x08\xed\xfc\xa6\x17\xdb\xa4\xcf\x32\xa2\x27\xa9\x5b\xee\x61\xb1\x9d\xde\x0e\xd4\x23\x9d\x42\x47\x13\x2b\xb8\xf5\x0d\x66\xbc\xfc\xfe\x89\xe3\xdb\xb7\xda\x1d\x38\x90\xc7\x9f\xb8\x05\x64\x40\xdd\xd7\xcc\x93\x4f\xe8\x13\xe7\xe3\x05\x6d\x08\xc8\x55\x8c\xbd\xd8\xeb\xed\x5c\x84\xf9\x07\x27\xa7\xf7\x55\x58\xf1\x4d\x2e\xd7\xc6\x5d\xdc\xe7\xb4\x6c\x97\x23\x65\xbc\x84\x16\x66\xc6\x80\xbf\xbb\x23\x02\x1f\x47\x7b\xb9\x2f\x27\x85\xa3\x63\x7f\x81\x0b\x3e\xd8\x73\xe9\x1a\xf4\x8c\x6f\x44\x1b\xb5\x05\xfb\x03\x37\xf3\xf4\x34\xce\x4c\xd3\xe1\x07\x69\xbe\x9f\x4a\xa3\x70\x1d\xaa\xcc\x6b\x5c\xec\xf9\xf2\xdf\x5c\x53\x93\x3f\x62\xb4\x74\x91\x1d\xb0\x05\x77\xf8\x90\x4d\x16\xc1\xbc\x17\x37\x2b\x44\xd8\xcb\xd8\x97\x55\xcd\x2c\xac\xd9\xeb\x67\x96\xbf\xda\xd5\x4a\xe9\x04\x58\xe9\x98\xf5\xbf\x92\x88\x7b\x3b\x5b\xa9\x4a\x3f\x96\xc2\x8f\xb5\xb0\x36\xe0\xfd\x6f\xb7\xb5\xa2\x5e\x2b\x49\xb9\xe3\x7f\x75\x1b\xd6\xe1\x77\x1c\x2c\x2c\x37\xe5\xe7\x8b\x80\xde\xa6\xe5\x0f\x1b\x51\xd5\x97\x8f\xec\xa6\xa2\xc3\x8c\x5d\xfc\x82\xf3\x0f\xcc\x22\xc3\xd4\xb4\x66\x91\xeb\xdf\x6a\x9c\x14\x7d\x9d\x77\x51\xfa\xe3\x96\xe2\xa8\x17\xf5\xd1\x90\x58\x9a\x50\x8f\xe0\x64\x18\xbd\x47\xc9\x5d\x8d\x1b\xf7\x65\xe8\x46\x4d\xe0\x44\x43\xa9\x9a\x66\xf5\x29\xc8\x0a\xbf\x0e\x99\xf9\xf0\x2b\x74\x5f\xfc\xb1\xcb\xe4\xdc\x16\x99\x3c\x34\xb9\x2a\x18\x0e\x43\x79\x14\xe8\x7d\x29\xbd\x49\x3b\x7a\x18\xeb\x2a\x74\xe0\xf9\x69\xb6\x9c\x97\x6a\xe5\xac\xe5\xe9\x6f\xa9\xb2\x60\x69\x97\xfd\x5d\x3f\x6a\x75\x29\x60\x49\x86\x1d\xe8\xb7\xf0\x43\x0c\x22\x72\x30\x84\xca\x8d\x82\xfd\x74\x79\xb0\x0b\xb0\x9d\x01\x87\xce\xc8\xe1\x97\xf2\xf2\xd6\xad\xed\xc7\x2f\xc3\x59\x4f\x36\x01\xa9\xda\x50\xff\x24\x66\xaa\x23\xf2\x54\x8e\x91\xa5\xec\x26\xcf\xd1\xe0\xaf\x5d\xb6\x94\x7a\x89\x76\xcc\xe8\x66\xc6\x2f\x04\xdf\x87\x70\x89\xde\x2b\x8a\xcc\x61\x65\x4f\xe0\x91\xb7\x39\xec\xcb\x1e\x79\xe9\x71\xf1\x08\x69\xa0\x85\xa2\x98\x7a\x5b\xfb\x50\xb8\x8c\x12\x2b\xec\xba\xc7\x65\xe8\xe2\xf3\x7b\x52\xc1\xea\xe3\xc5\x8b\x73\xb9\xe5\x8a\x91\x52\x33\x0c\xac\x51\xa3\x4f\xa6\xad\x86\xab\x8b\xc1\x63\x24\x63\x38\xa7\xa7\x4c\xff\x40\xb7\x7a\x30\x90\xde\x50\xb3\xd0\x21\xa8\x1e\xe3\x19\x80\xd6\x9a\x05\x0b\xcb\x3e\x17\x51\x21\xcc\x1a\x64\x10\xc8\x81\x71\x1b\x9a\x4f\xfa\xdb\xc4\xc3\x47\xfa\x9e\x92\x29\xa4\x14\xb2\xb7\xae\x55\x22\x02\xb8\xeb\x3f\x03\x43\x51\x29\xcd\xb6\x60\x88\xd1\xc3\x2b\x9b\x2e\xd3\x8f\x76\x31\x87\x8b\x7d\x0d\x36\xfb\x94\xeb\x87\x97\x19\xec\xe4\xc2\xdb\x6f\xad\x92\x30\xf2\xb3\x82\x20\x13\x58\x83\x74\x7b\x64\xfb\x95\x76\x53\x1e\x72\xff\x44\xdb\x6e\x0d\x24\x15\x8c\x80\x29\xbc\x0e\xba\x01\x18\xcc\xd8\x57\xfa\x68\x88\xe5\xf7\x8d\xbf\x00\x56\x3f\x12\x51\x0d\x99\x29\x86\x51\x7e\x98\x64\x9a\x3e\xb2\x72\x75\x12\x98\xc9\x65\x50\x95\x75\xe9\x33\x75\x7e\x00\xa3\xde\xc3\xa8\x67\x5d\x7c\x00\xc6\x2c\x78\x0e\x85\xdc\xb4\x6b\x1c\x39\xff\xe8\x2d\xa7\x30\xd3\xb9\xe8\x8c\xb5\x06\x70\x0b\x3b\x9f\x88\x1c\x3f\x75\x6d\xf5\xe0\x55\x15\x3d\xa1\x73\xb4\xc2\xbb\xc9\x5b\x83\xb6\x0b\x16\x8a\xa7\xa5\x57\xfa\xd6\xe9\x56\xe0\xc2\x30\x78\xd3\x29\xee\xf0\x41\x1f\xfe\xad\xf7\xae\x21\x78\xb5\xe4\x8e\xe8\x65\x3e\x61\x45\x8e\x60\x32\x9a\x0a\xbf\x5e\x03\xd4\xbe\xe9\xaf\x68\x9a\x3e\x74\xc5\xa9\xdb\x41\x60\x44\xb0\x7b\x7b\x42\xb8\x8f\xa2\x47\x07\xad\x79\x9f\x35\x9f\xfd\x68\x4b\xfb\x54\x80\xe2\x43\x28\xe6\x8f\xb1\xd7\x1a\x03\xb4\x79\x96\xef\x23\x3c\xcb\xe3\x33\x89\x0d\xd4\x39\xf8\x1c\x13\x39\xf3\xdf\xd0\x33\xc1\x94\x81\x4a\x7a\x40\xe9\xcc\xf0\xd8\x66\x79\xe0\xaa\xf3\x80\x36\x42\xad\x4d\x9a\x5d\x3d\xca\x53\x92\x5a\x18\x64\x94\x5f\x4b\x99\x6b\x8c\x3f\xd0\xde\x79\x30\xf6\xe3\x40\xbc\x60\x88\x27\xc6\x0f\x2a\xa5\x6e\xea\xf6\xda\xd5\xea\x78\x8c\x7f\xa3\xf6\xfc\x01\xa8\xff\x05\x3e\x20\x7d\x21\xd0\x95\x55\xe0\xd1\xa7\x6e\x9f\xe9\x8b\x6f\x24\xdb\x7e\x9b\x94\xe8\xe2\x37\xe1\x0d\x97\xbc\x62\x4f\x03\x5a\x39\x34\x68\x1b\x3e\x3e\x17\x5f\x7d\x7d\x37\x41\x1b\xa8\xfd\x59\xee\x87\xfa\x33\x36\xd7\x91\xed\x30\x9c\xb3\xe5\xbd\x10\x54\xeb\xd9\xcf\x44\x7e\x50\x0a\xfe\xe9\x4c\xa4\x81\xee\x1e\x1a\xda\x6b\x34\x3b\xa4\xb3\x80\x59\x35\x10\xdc\xd5\x0b\x7c\x0a\x82\x60\xfe\x4c\x1c\x19\x2a\x93\x61\x76\x32\x76\x40\xd0\xcf\x19\x65\x04\x6c\xad\x2d\x67\x4e\x1b\xf2\x64\x41\x06\xfe\xab\x2a\x52\x2d\xb4\x87\xc1\xa5\x52\xf6\x17\x9b\xe9\xfd\xbc\xd6\xcc\x49\x83\x0f\x54\x64\x27\x33\x31\x65\x87\xa4\xd1\x7b\x29\x4f\x93\xaf\x13\x7b\x1f\x0d\x0d\xad\xcc\x0e\x78\x0d\xe0\x88\x27\xbd\x1a\xdc\x9a\x70\xa9\xcd\xb9\xc0\xff\x35\x9d\x21\x62\x04\x3d\x78\x0a\x02\x4f\xd6\x62\xcd\x37\x2f\xa1\x79\x67\x2f\x20\x2a\xaa\x03\x4c\x66\x78\x5b\x2b\xac\xad\xb8\xbb\xde\x72\x73\x27\x54\xdf\x1e\x1b\xcf\xb8\x95\x58\x29\xb7\x37\xd1\xa2\xfd\xdf\xe6\x5f\x90\xf3\x91\xff\xd0\xaf\x17\x17\x17\xd4\xc2\xae\x6a\xea\xea\xea\x9a\x9a\x94\x5d\xfb\xf1\xec\xba\xad\x8c\x95\xd5\xd5\xa2\xe9\x23\x48\x64\xa5\xaf\xc9\xf1\xc1\x6a\xef\x43\x76\x1d\x6e\x6e\x6e\xbf\x6b\x2e\x0e\x8e\x88\xd4\xce\xad\x89\x12\xaf\xeb\x0b\xb2\x9f\x30\xf1\xab\xd7\x02\x14\x46\x4f\xf7\x16\x74\x6b\x6c\x96\x8e\x62\xb6\x26\x4a\x26\x26\x26\x94\xe4\xe5\x31\x1c\x55\xa3\xe8\x87\x0d\xc2\x69\x44\x09\x92\x8f\x50\x48\xd8\x47\x59\x08\x04\x32\xf9\x8c\x95\x95\x75\x38\x66\xa6\xda\x3a\xf0\x9a\xcb\x89\x52\x42\x42\x62\x79\x79\x19\xfd\x27\xd3\xb2\x1c\x01\x63\x64\x62\x5a\xa8\x77\x21\x29\xce\xc9\xc8\xc8\xc8\xce\xa6\x2b\x7d\x15\x10\x10\x70\x70\x78\x98\xfd\xbe\xa3\xa3\x63\xca\x1d\xd7\x9a\xcb\x28\xf5\x65\x52\x14\x59\x67\xa9\xa5\xbd\xb5\x85\x84\xea\xee\x42\x13\xcd\xc9\xe0\xac\x69\xbb\x2d\x4b\xda\x9d\xbf\x54\xeb\xfa\xa4\x1d\x5b\x09\xdd\x9a\x28\x91\xc2\x8d\x89\x89\x59\x59\x59\xe9\xb3\x34\xde\x12\x16\x16\xf6\x01\x2b\x83\xc1\xc2\x4e\xd1\xd1\xf4\xd8\xb0\x68\x58\x4f\x6f\xef\xcf\x59\x79\x2d\x4a\x23\x02\x96\x94\x9c\x9c\xfd\x3e\x9e\x5d\x57\x71\xd4\x01\xa1\xef\x71\x7b\x23\x9c\x04\x39\xd9\x9e\xb2\x50\xd2\xcb\xef\x37\xca\x2b\x7a\x14\xa0\x10\x01\x0b\x7b\xfe\x8b\x83\x03\x7c\x51\x66\x52\x4a\x0b\xc9\x4b\x79\xe5\x64\x25\x13\x7d\x71\xb6\xbf\x3c\x6b\xed\xdb\xd9\xd9\x79\xa7\xee\xc4\x11\xb9\x3e\x98\xbe\x0a\xf8\x35\x03\xf9\x2e\x5a\x8b\xa2\x71\x33\x69\x31\xac\x0d\xf3\x1b\xf6\x4a\x5d\xd5\x37\xfe\xfb\xb3\x6d\xc4\x4f\x69\x2e\x72\x1c\x67\x7d\xd1\x31\xac\x2c\xb7\x40\x31\x00\x49\x29\x1f\x25\x5c\x2b\x80\x0a\xb9\x1e\x6d\xa0\x70\x9d\xf6\xe6\xfb\x5f\xbd\xb8\x5e\x22\xf4\x92\xce\x35\x2b\xd1\x6d\xb2\xe2\x9c\xf1\xc7\x7d\xa8\x5e\xa4\x51\xf6\x03\x37\x99\xcf\xda\xf3\xec\x87\xfe\xa3\xc3\xea\xa6\xeb\x9d\xab\xf3\xc3\x87\xec\x3a\x39\xcd\x1c\xb7\x1b\x23\xb9\x72\xee\x39\xd2\xad\xd8\xb6\x97\x27\x3b\xd9\xf8\x02\x5c\xf6\x61\xf3\x41\x24\x4c\xf4\xec\x07\x1c\x6c\xba\x8d\x7d\xb6\x76\x38\xcb\xdd\xd1\x4c\xc2\xed\xf2\x42\xcc\x6b\x02\xf6\x6b\xdb\xf3\x0d\x97\x87\xeb\x83\xc5\xea\x0f\xc3\x91\x6f\x4f\x5e\xca\x0f\xdf\x2c\xd3\xdc\x2e\x22\xf4\x42\x57\x2a\x15\x12\x39\x19\x2c\x45\x98\x16\x86\x55\x53\x2a\x9b\x9c\xf7\xc5\xe4\x16\xc3\xbf\x25\x09\x85\x6c\x4d\x94\x1c\x3e\x58\x9e\xf2\x19\x47\x04\x2f\x56\x5b\xcd\x05\x07\xd3\x8a\x85\x91\xd2\x27\x08\x2a\xb5\xa7\xe5\x7b\xc7\xb3\xeb\x0e\xe8\x09\x96\x54\x9c\xc4\xf8\x4f\x44\xc2\xe1\x6c\x8a\xfb\x72\xbe\x89\xba\xd6\x50\xcb\x47\xef\x40\xa6\x34\x8f\xfa\x44\x9a\x1b\xdc\x4e\x83\x79\x17\x7e\xf3\x2d\x21\x10\x64\xc9\x17\x08\x99\xd7\x2d\x8d\x80\x1d\xbd\x22\xea\xb4\xe9\x56\x58\xa4\x00\xfe\x80\x3f\xdd\xf3\xea\x6c\x9f\xaf\x42\x6f\x64\xb1\x79\xb2\xcc\xb0\x96\x4f\x74\xc3\x72\x77\xb6\x16\x7f\xd0\x9b\x4e\xba\xfe\x2d\x59\xc1\xe6\xa9\x5f\xeb\xc6\xa3\xce\xbe\x44\xae\x43\x8c\x97\xf2\x52\x07\xb6\x3e\xd9\x52\xe1\x55\x1a\xa9\xdc\x38\x07\xb9\x86\x5f\x03\x09\x9e\x8c\x24\xb4\xf8\x62\x81\x28\xa6\xe3\x94\xd2\x45\xca\xd6\xcf\xd5\x37\xe8\x04\xda\xc9\x11\x38\xc9\x6d\x72\x25\x9c\xf7\xf0\x29\x8f\xb6\xa7\x2a\x66\x49\x1e\x8c\x7c\xc2\xc4\x85\x86\xf8\x62\x11\xae\x45\x5b\x2f\x34\x36\xf4\x9c\x1d\xa6\xdf\xb2\x54\x4e\x08\x86\x64\xc0\x86\xf4\x11\x98\x1e\xe3\x1f\x0f\xbd\x58\xff\xa6\x7d\x34\x40\xef\x41\xb1\x6d\x89\xde\xf8\x84\x46\x6e\x3f\x16\xdf\x00\xbf\x98\x61\x1b\x06\xbd\x8f\xe6\x5a\x24\x22\x52\x63\x56\x3f\x8e\x78\x15\x3d\x48\x34\xf4\x55\xe6\xfa\x1b\x10\x78\xac\x9c\x0a\xaa\x17\x3c\x2e\x2e\x09\x0c\xd3\x33\x9d\xfc\xb1\xd4\x06\x53\x7c\xba\x46\x8f\xf3\x6a\xb7\x5e\x44\xac\x45\xcf\x5d\x16\x42\x56\xf5\x65\x94\x1a\x46\x99\x10\x7d\x04\xaf\x49\x3c\xf6\xfb\xca\xc3\x39\xed\x9a\x90\x68\x69\xf1\x24\xc0\x01\x78\x93\x81\x84\x81\x1e\x5a\x78\xee\x75\xfc\xc6\xa8\x08\x16\x02\x89\x45\x61\x00\x09\xc6\x73\xd0\xc9\x5d\xb8\x63\xa0\xb6\xfe\x51\x82\x66\x75\x62\x37\x72\x77\xc1\x64\x1d\x18\x32\x2f\x93\x3e\xdc\x5c\x9d\x7f\x24\x60\x9b\xaf\xb5\x0f\xa4\x4a\x71\x88\x3c\x7a\x0e\x1d\xac\xf9\xcc\xd5\x4a\x65\xcb\x90\xeb\xfb\x0e\x24\xfd\xe8\xdb\x13\xd4\x36\xf7\xdd\x5a\xd1\x11\x86\x9e\x38\xda\xe1\xcd\xfd\xe5\xae\xd9\x9b\xab\x73\xfb\xd5\x4b\x8e\xbb\x72\xcf\xc1\x4f\xc8\xec\x11\xdf\xbc\xb9\xdb\x42\x1f\xf3\xf1\x5b\xbc\x1b\x25\xb6\xc4\xf1\x9e\x24\x67\xd7\x61\x89\x6e\xd5\x23\x9e\xbd\xff\x56\x51\x56\x6b\x1f\x1f\x12\xeb\xa7\xa8\x62\x27\x44\xe0\x1f\x78\x40\x57\x3a\xa4\x2e\xba\x01\x3d\xde\x9a\xf0\x3a\xfb\xe1\x93\x4f\x62\x76\xca\xb3\xd6\x6d\xfb\xba\xe5\x69\xd9\x4b\x81\x99\xdc\xab\xca\x07\x74\x92\xa5\x71\xe3\xfa\xa5\x64\xdd\xbc\x2a\x42\x8f\xb6\x75\xa6\xd6\x4e\x55\xfe\xc6\x10\xd1\xa3\x01\x2a\x1c\x14\xf4\xea\x96\x12\xe6\x7e\xa0\x75\x3f\x26\xc2\x2a\x9b\x96\x13\xa7\x47\x73\x03\xe0\xb9\x3b\x9a\xaf\x64\x25\x39\x22\x15\xa5\xda\x3d\x3c\xc4\x5e\xc8\xa6\x97\xa4\xe3\x9f\x38\x01\x07\x27\xc3\x23\x56\x49\x71\x08\x2f\x4f\xf7\xd2\x45\xa6\x6b\xed\x4d\xb6\x90\xfd\x09\x2f\xdf\xb6\x79\x43\x85\xad\x08\xc9\xdc\x21\x49\x9b\x97\x9b\x00\xf7\x14\xbf\x81\xa9\x0a\x93\xb7\x84\x85\x38\xdc\xc5\xe3\x51\xb9\xa5\x2d\x55\x96\x33\x23\x10\x71\xea\x25\x66\x5d\x79\xf5\x77\x74\x3e\x42\xcc\x6b\x6a\x5e\x5f\x1d\xf5\x3f\xbe\x38\xd9\x9d\x9b\xb9\x7e\xda\x9b\xf3\xb5\x87\xb7\x69\x42\xff\xb7\x5c\xe3\xc0\x4e\x04\xa0\x00\xa7\xdc\x34\x69\x5d\x3f\x55\xc8\x45\x65\xc2\x27\x60\xf6\x31\x9f\xd5\x6c\xb9\x11\x2e\x6b\xae\xe6\x06\x00\xb8\xc4\x3c\x21\x83\x06\x1e\x8c\x4d\x7b\x8b\xf7\x0d\x77\x15\x7d\x95\xcc\xa6\x20\x38\x26\x58\x82\x96\x30\xfa\xf0\x33\x77\x82\xc8\x96\x3e\x47\xa7\x54\xe2\xab\x5c\x98\x6a\xc0\x83\x5d\xc6\xaf\x3d\x59\xa3\x24\x74\xdd\x78\x59\xd9\x57\x83\x23\x40\xee\xa8\x41\x92\xd4\x23\xe2\xd8\xd2\xce\xaa\x2d\x08\xda\xf5\xe2\x16\x54\x3c\x76\x9f\x9e\x09\x59\x86\x9d\x14\xf5\x6d\x48\xa7\xb8\xed\x75\xe8\xb0\x6a\xd1\x45\xcf\xd9\xfe\x32\xc4\x7a\x6a\xed\x54\xf0\x4f\xac\x6c\x8d\xd3\x47\xf4\x37\x1e\xe7\x62\xc4\x51\x1a\xb8\x15\x16\xaa\xe6\xeb\xd4\xda\x31\x00\x77\x24\x70\x20\x64\x77\xc4\x42\x47\x0e\x0b\x89\x34\x9c\x1a\x5f\xfb\x83\xc0\x04\x31\xcd\x1e\x36\x9e\xb3\xd9\x52\xe1\x88\x74\xe7\xfb\x2b\x1f\xe6\x1a\x3d\x2e\xc7\x41\xd2\xb7\x32\xfd\xad\x48\x3e\xe8\x47\x88\x2c\xc1\x1d\x57\xae\x97\x64\xc9\xcc\x30\x5a\xcf\x30\xb8\x8a\xe2\xfb\xd0\x4f\xe2\x02\x65\xee\x8e\x41\x4c\x9d\xd1\xe8\x5b\x0b\x31\xf7\xe4\x58\xe6\x2f\xbc\x26\x49\xfc\xe2\x75\x5c\xeb\x38\xfc\xc6\xc8\x92\x8a\xdc\x0f\x43\xff\xb1\xfc\x44\x9d\x02\xd8\xb7\x47\x2a\xaa\xf9\x9e\xd7\x17\xcc\x72\xb9\x26\x3f\x18\x11\x72\x36\xaf\x14\x52\x13\x4a\x2b\xe1\x51\xfe\xdd\xa1\x32\x69\xa2\xfe\x3e\x51\xed\xa6\xbb\xcb\xa8\x08\x38\x40\xf3\x49\xc4\x0c\xd0\x28\xe2\x1c\x72\x7e\x60\xd4\x37\xa1\x34\x06\xe4\x74\xb5\x59\xeb\x4b\x74\x7f\xed\x56\x56\x40\x6e\x78\xfc\x11\x3a\xae\xff\x39\xe1\x35\xa8\xf1\xbc\xf3\x1f\x0b\x97\x2f\x19\x06\x16\x10\xf1\xf8\x7d\xe7\xa3\x11\xf5\x64\x41\xc4\xa3\xea\x0b\x7c\xc6\x30\xa0\xb1\x18\xef\x28\x09\x7c\x9b\x32\xe1\x2a\x23\xea\x4f\xa6\xd1\x55\x9c\x93\x4a\x14\xb5\x6b\x6c\xf4\x27\x4b\xf5\xc9\x3e\x04\x7c\x11\xab\x4d\x2f\x80\x8d\xe9\x40\x03\xf5\x77\x16\xd4\xee\x7e\x3c\x40\x0a\x86\x78\xe3\x70\xf4\xf2\xfd\xeb\x5d\x62\x37\x19\x10\x38\x7b\x1c\x0e\x1b\x43\x93\xc5\xf5\x44\x57\x04\x26\x65\xc3\xf4\xd2\xf9\x9b\x85\xdb\x30\x7f\x0c\x01\xa7\xdd\x94\x72\x18\xe5\xbd\xf4\x53\xc5\x1e\x7d\x53\x3c\x39\x5f\x01\xc4\x6e\xdc\x37\xe8\xa4\x2c\xe8\xa1\xa0\x3b\x8d\xa8\x67\x8a\x51\x9e\x89\x8c\x4f\x64\xe9\x49\x45\x30\xfa\x4b\x2b\xd4\xe4\xb1\x25\x84\x6c\x10\xb8\xf0\x1c\xc5\x66\x0d\xf9\x54\xcc\xb8\x38\xae\x8f\x86\x1b\xff\xf2\x03\x28\x57\x55\xee\x3f\x7a\xc4\x44\x04\xa7\xc4\x63\x93\x30\xaa\x65\x9c\x28\x31\xb8\x5f\xdc\x61\x64\xe0\x7e\x46\x29\xe4\x6c\xb3\xe1\x03\xa4\x7d\xe3\x20\xc1\x34\xd4\x31\x0f\x3c\xca\x72\xbd\x3a\x2b\x73\x05\xf9\xd7\x27\xdb\x3c\xa7\xf6\xd5\x6d\x70\xcd\x9d\x07\xda\x69\x2d\xd8\x40\x1c\x0e\x14\x7d\x47\x69\x2a\xab\x3f\xe8\x01\x33\x46\xf7\x29\xb7\x00\x38\xc4\x4d\x99\xf0\xac\x97\xee\xcf\xf3\x70\x83\x99\x76\x98\x94\x2c\xae\xf3\x48\x7f\xf0\x11\xfa\xc6\x4b\x6c\xe6\xba\x22\xba\x4a\x82\x0f\xfa\xf9\xfe\x06\x12\x6c\xe4\xc9\x0a\x26\xd3\x46\xd6\xbb\x1c\x42\x4b\x9e\xde\x0f\xb6\xca\xaf\x52\x1c\xcf\xdb\x8d\x69\xb8\xb9\xbe\x8c\x4e\x1a\xab\x98\xa8\x71\xde\x37\x88\x78\xca\x9d\x88\x6a\xc9\x60\x7d\x5f\x4e\xef\x81\xf0\xba\xf5\x8c\xe3\x30\x1f\x2f\x3a\xea\xa2\xe0\x32\xf6\x8b\xe4\x2f\xdc\x65\xba\xfb\xc9\x8c\x1a\x54\xac\x89\x2c\xab\x45\xce\x54\x5b\xcb\x81\xf2\x39\xf9\x9a\xab\xfc\x9e\xec\x8e\xe6\x87\x2b\x76\x99\x67\xc1\xee\x03\xc3\x18\x8b\xfc\xbf\xec\x38\x90\x3d\x06\xc6\x9b\xb4\x07\x9c\xee\xcc\x80\x5a\xe2\x51\x80\x06\xe6\x8c\x77\x6a\x5d\xf4\xcc\x55\x05\xc0\x47\xfb\xb8\x01\x5f\x0b\xeb\x83\x80\xe7\x6b\x8f\x7f\x03\x53\x06\xe7\x6c\x2a\x85\x6b\x56\x18\x87\x16\x11\x75\x58\xe5\x56\x47\x97\xa9\x4c\xbc\xfb\x8c\x10\x40\x7e\x66\x98\x78\xfd\x2e\xe1\xde\xd6\x7b\xef\x98\x25\xee\xea\xa7\x6f\x1c\x00\x34\x11\x3d\x39\x57\x8c\xcd\x3a\xcf\x1c\xb8\x95\xee\x2f\x7f\x8b\xa4\x9b\xb3\xec\x38\x58\xed\x7d\x2b\x32\x00\x07\xd7\x71\x9f\x6d\x0c\x67\xbf\x29\x2a\x25\xf4\x2e\x00\x68\xa6\x66\x61\x7a\xb6\xc5\xee\x9e\xbf\x37\x1b\x57\xb4\xcf\x5d\x9b\x02\x96\x1a\x76\x85\xed\xf2\x4a\xe5\x6f\xc7\xa2\xa0\x1c\x3b\xad\xfe\xb8\xb1\xfe\x18\x4c\x4a\x69\x6a\xc4\x24\x53\x66\x12\x08\x59\x6b\x59\x2e\x3f\xc1\x9e\x06\xb7\x53\xa8\x9b\xda\x16\xc7\x11\x4b\x82\xa8\x57\xa3\x0e\x0e\x4f\x80\x36\xf3\x9c\x3b\x37\x85\x1b\x64\x7b\xb2\xec\x88\xd4\xf0\x1f\xaf\xb0\xf6\x70\x04\xf8\x2d\x09\xa7\x91\x92\xa8\x3c\x94\xdc\xd1\x60\x0e\x01\x51\x41\x43\x87\xbf\x70\xc3\x54\xc6\x96\x46\xaf\xf7\x3d\x42\xb7\x71\xeb\x6a\x81\x81\x4a\x24\xbb\x9e\x57\xbf\xf8\x8c\x3d\x45\xcb\x1c\xe2\x71\x79\x92\xf9\x48\x45\x27\x36\xca\x4a\x7a\x06\xbb\xf5\x8e\xf1\x40\x8a\x0c\xde\xe7\x28\x9f\xbb\x58\xa8\xac\xb0\x2c\xd5\x4f\x40\x03\xd4\x58\xa1\xbb\x5f\xd3\x22\x1b\x58\xbb\xd6\xce\x68\x18\xd5\x8a\xee\xf2\x01\xfb\x01\x2b\xb3\xe1\x63\x5e\x8b\xa7\x76\xac\x3e\x94\x9d\xd4\x0e\xb1\x09\xc2\x6e\x55\x79\x06\x5e\x93\x0c\x27\xb2\x87\x5f\x47\x81\x2d\xa3\xc3\x51\x29\x16\x74\x63\x69\xe3\x59\xbc\xfa\xb9\x08\x1c\x65\x35\xed\x07\xdc\x05\x06\x3b\x16\xf7\xd9\xd8\x19\x75\x62\x03\x1f\xa4\x75\x58\x9f\x19\x4e\x8e\x64\xf5\x4f\xc4\x0f\x7d\x7e\x40\xc7\x5d\xfc\xf7\x9b\x8a\x2d\xc0\xa1\xdc\x06\x60\xc0\xe1\x85\x30\x23\x7b\x60\x98\x58\x0f\xa4\xa7\xe5\x79\x66\xa0\xdd\x9b\x38\xba\x97\x99\x26\xe8\x9e\xd0\xc0\x02\x15\xc7\x90\xf2\x40\xc9\x30\xaa\xf7\x12\xdf\x1f\x78\xa2\x22\x73\xdc\xe1\xde\x9c\x0e\x3d\xea\xf9\x8e\x6b\xc7\x8b\xb5\x49\x29\x78\xf7\xcd\x46\xd4\xb0\xd2\xcb\xfd\x54\xc0\xfe\x63\x6d\x18\x40\x1e\xf9\x64\xde\x50\x20\xdf\x38\x70\x5f\xe1\xd9\x01\xd4\x95\xd7\x7a\x7e\x56\x30\xe4\xea\xbd\x0b\x09\x2c\xab\x1f\xb4\x59\xd0\xa3\xf6\x87\xcf\xd0\x95\x00\x59\xce\x54\x15\xa3\x22\x03\x58\xbf\x76\x6b\xbd\x2f\xd5\x7b\xc3\xc1\x72\xf6\x74\x6d\xd0\x5a\xc8\xed\xa4\x08\x23\x02\xb5\x59\x80\xca\xfa\xf8\x82\xbe\xbe\x20\x91\x54\x96\xf6\x1b\xa5\xd9\xa3\x07\xf8\xb5\x61\x53\xe5\x46\x72\xf7\x7d\xb1\x13\x85\xc2\x29\xfa\xc5\x79\x29\xee\xfa\x11\xdb\x27\xe4\x1f\xa3\xa4\xc7\x68\x73\x5f\x9c\xea\x52\x25\x82\xaa\xcf\x65\x2f\x71\x60\xea\x7c\x4e\xab\x58\xb6\x9a\xdf\x6b\x6c\x97\xc7\xc6\x65\x0f\xb9\x45\xcd\x09\x13\xc4\x99\x0d\x35\x79\xa6\x64\x32\x3b\xb9\xe8\x05\xb1\xc9\x55\xe2\x93\xc4\x5b\x1f\x96\xcb\x8c\x9a\x25\x1c\x40\x35\x03\x9f\x31\x31\x28\xa6\x0a\x92\x1d\x54\x25\x59\x37\x8e\xd4\x7d\xa3\xba\xb2\xa6\xf8\xd7\xfb\x14\xc1\x5c\xc9\xce\xe8\x06\x16\xdd\x4e\xb6\x29\x76\xe0\x02\x01\x4a\x23\x4c\x81\xe1\x23\x09\x97\xa7\x7b\x87\xf6\xd1\xdd\x65\x5c\xa7\xbb\x73\x86\x1c\x87\x13\xb5\x5c\xfe\x2d\x2b\x78\x74\x09\xe1\x34\xa2\x76\xb9\xdd\x7a\x9e\x79\x6d\xae\x97\x48\xa3\x75\xec\x3c\xfd\xab\x22\xd2\x52\xf2\x1c\xa6\x11\x18\xae\x1b\xf0\xb0\xfd\x84\x4b\xb5\x45\xca\xef\x47\xf1\x5a\xcc\x9c\x5c\x49\x6c\x65\x05\xa1\x28\x6d\xc8\x41\xbf\x09\xde\xd9\x02\xd7\xad\x61\x57\x18\x36\x1a\xda\x53\x34\xef\x63\xf6\x17\xde\x72\xf7\x68\x2c\x5e\xde\x43\x43\xcb\x80\x4d\x8e\xfd\x4c\xc1\x11\x38\x60\xcc\x9f\xe5\xac\x7f\x96\xb5\xf6\x86\x8b\xfd\x4c\xd0\x37\x00\x3f\xcf\x66\xc0\x7e\x15\xb8\x46\xc8\xfe\x3e\xfb\xfb\x47\x23\x7f\x36\xcd\x80\xfd\xb7\x4d\xff\xbf\xfb\xff\x2f\xf7\x27\x23\xfc\xeb\xf2\x51\xdd\x3f\xd7\x81\x22\x7f\x9d\xf8\x91\xf3\xdf\x4b\xf2\xdf\x77\xf2\xf3\x47\xff\x36\xfd\xdf\xd0\x54\x16\xf4\x6e\x3b\x15\xd3\x7a\xe5\x1b\xb9\xff\xed\xbc\x96\x31\xb5\xb1\x31\x35\x3e\x1b\x17\x1e\xd7\x5b\x7e\x0a\x3c\x3c\xd5\x47\xbc\xc9\x3b\x1c\xa3\xca\x09\xac\xae\x52\x5a\x23\xef\xba\xe7\x0e\x04\x15\x53\x52\xc3\x06\xf2\x29\x37\xec\x6a\xb5\xef\x0d\xe7\xbb\x59\x63\xff\x3d\x81\x17\x43\xc8\x4a\x83\xfe\x3d\x92\x06\xc9\xfc\xc7\x11\xf6\x26\xe0\x9f\xa3\xfb\x9b\x80\x67\x60\x9e\xc4\x0f\x73\x88\x54\xc8\x25\x81\xa6\xbe\x6d\x77\x4d\xe7\x04\x0f\x37\x7f\x09\x71\x24\x14\x5a\x11\xcf\x96\x5e\x97\x32\x34\x39\xb6\xe2\x37\x38\x6a\x32\x68\x5b\x6c\x1f\x4f\x6c\x87\x8b\x93\xca\xac\xef\xfc\x81\x9e\x99\xf5\xda\xc0\xe0\x20\xb4\x28\xa5\xac\xec\xe5\xfa\xc2\x06\x0a\xd5\x8a\xf3\x52\x64\x7d\x00\x1c\xe6\xf4\x8c\x5a\xb8\xbb\x21\x22\xc2\xef\x74\xf2\xfc\xa8\x90\xee\x77\x4f\x23\x40\x31\x44\xcf\xbd\x13\x2c\x82\xe7\x08\xa0\x38\x60\xeb\x31\xa5\x6e\xbd\xfb\xa6\x7c\x82\x8f\x8f\xcf\x21\x76\x90\x94\x52\x6a\x95\xc2\xa8\x16\x8d\x97\xf1\xb4\x92\xd7\xb6\x93\x7b\xf3\xfe\x0f\xf5\x01\x58\xfb\x79\x6f\x90\xb1\x3e\x21\xb5\xa8\x6d\x6b\x7e\xfd\x77\x6b\xfb\xda\x05\x02\x6a\x47\xb1\xc5\xf1\x39\xb7\x99\x53\xcf\xe3\xde\xc6\xbb\xc2\xc2\xc2\x79\x58\xb7\x2e\xb7\x5f\xde\xd7\x7c\x0f\xc1\xfc\x22\x25\x29\xe9\x0e\x1f\x32\xac\xf7\xee\xd0\xaa\xb4\xb5\xdf\x1e\x94\xfc\x4d\xab\xe4\xc0\xfc\x1c\xc8\xb6\x09\x15\x94\xfb\xee\x8d\x72\xbf\x31\x85\xac\xac\x98\x72\x21\x67\x73\x7b\x33\xe1\x79\x89\x70\x59\x79\x05\x3e\xdc\xe4\x24\x9b\x7c\x69\x60\x65\xed\x6c\x26\x09\x13\x93\x68\x84\x99\xb3\xf3\x5a\x2b\x03\x7c\x66\x73\x6b\xef\xd6\x79\x5c\xfb\x88\x45\xa3\xcc\xd5\xc1\x61\xec\x16\x27\xbe\x78\xae\x79\x9f\x2d\x07\x56\xb5\x3d\x4d\xb2\x52\xf1\xe0\x0f\x08\x3b\xc6\xa7\xe7\x77\x31\x30\xc5\x7a\xd2\x98\x14\x21\x40\xd9\x4e\xa0\xf6\xf5\x8e\x92\x08\x3b\x1b\x6b\x32\x1c\x11\x6c\x8c\x53\xfc\xb5\xde\x5d\x0c\x11\xbc\x9c\xcf\x30\x02\xfc\xa3\x15\x52\x60\xc8\xae\x78\xb3\x6f\xe9\x5b\xd8\x8e\x44\xba\x05\xeb\xd5\x86\xde\x87\x19\x64\x9e\x6a\xb1\x07\x24\x1a\x06\x73\xf4\x38\x7c\xb2\x12\x69\x5a\xf1\x90\xdd\x50\x74\x7a\x9a\x75\x46\x41\x4f\x0d\x6d\x57\xd4\xb5\xef\x7c\x99\x8f\x62\x68\x5c\x65\x54\x09\x14\x29\x55\xeb\xe2\xd6\x78\xb9\xb4\x51\xc8\xd5\xe0\xe1\x31\x02\x1c\x01\xbe\xfc\xe3\x21\x43\x5f\x9a\x6c\x5b\x69\xa1\x10\xf5\x7c\xb6\x02\x20\xad\xe8\x4b\x4d\xf5\xde\xdc\x97\x26\x0e\xe5\x0e\xb1\xb0\x60\x01\x3b\xb4\x67\x09\xb3\x52\x77\x7d\x32\x00\x69\xd4\xf9\x08\xc0\x71\x0f\xb0\xc2\x3b\x2b\x6a\xc4\xb4\x08\x66\xd1\x3a\x0a\x1c\x01\x26\x3b\x6e\x47\xc1\x89\xff\xbe\x59\x90\x33\x12\x28\xf5\xc2\x69\x49\x8c\x7a\x27\x3c\x18\x3f\x7b\xe0\xe3\x23\x04\x4e\x46\xd7\x89\xf8\x98\x46\x41\xab\x55\xd2\x8b\x15\x1c\xd1\x9d\xb7\x0e\xc0\xed\xa3\x42\x98\x0d\x38\x26\x43\xb2\xc3\xe9\xca\x72\x20\xc9\x15\xdc\xf1\xaa\x75\xde\xff\x39\xcd\xcd\x75\x49\x53\x32\x26\x95\xa8\x3d\x81\xa8\xdc\x2d\x4b\xe0\x97\x4c\x38\x27\x25\x2e\x25\x6d\x49\xc6\x8f\xa1\xfd\x68\xac\xb0\x27\x52\x6d\xe3\xce\xd4\x95\x9d\x33\xaf\x04\xe1\xf3\x7d\xdc\xbe\xe1\xe9\x02\x5f\xa6\x4f\x96\x47\x80\xfc\x98\xa2\x91\x89\xce\x67\xed\xcc\x7d\xde\x3d\x5b\x13\x15\xe5\x58\x58\x58\xd9\x25\x2a\xda\xea\x01\x5a\x1b\xa8\xdd\xb0\xf9\x0f\x44\xd7\x1f\x1f\xb5\x0d\x4c\x23\x5c\xfb\x4c\x77\x07\xc2\xe1\x66\x56\x96\x12\x65\x2e\xf6\x4d\x93\xa8\xdd\xcd\x0a\x13\xfc\x50\x05\x11\x08\xd9\x3c\xe7\x78\x00\xaf\x20\xef\xc9\xf6\xf9\xa1\x5f\x0c\x37\xd7\x60\xd9\xa8\x09\xcc\xdc\x4c\x58\x9b\x30\x0d\xb9\xba\xa2\x76\xb8\x31\xa0\x5d\x7f\xe4\x64\x14\xdb\xa5\x37\x3b\xd0\x6d\xff\xb0\xaf\x6f\xf2\x7b\x69\x63\xd9\xb0\xe1\x84\x44\xfd\x03\x2e\xe2\x7d\x23\xc2\xf4\x15\x02\x02\x7c\x85\x95\x63\x5b\xfc\xb3\x90\xda\xe9\xab\x2c\x83\x23\x65\x0c\xb9\xbf\x07\x53\x0e\x4c\x3c\x65\x13\xea\xb6\xb3\xb9\x26\x36\xbf\x63\x7c\x78\xce\x3c\x06\xa4\xce\xb7\x11\x22\xac\x35\x07\xb9\x6c\x00\x50\x99\xc2\x0b\x9e\xd6\x04\x65\xfa\xa0\x96\x5b\x59\x90\x6d\xfa\x2b\x90\xdb\xfa\x28\xbb\x1e\x9f\x05\x38\x36\x26\xae\xcc\x73\x86\x60\xa2\xf4\x54\x24\x59\xff\xf2\x81\x3a\xf7\xed\x71\x73\xcb\x86\x40\xe4\xd3\x80\x99\xd3\x42\x18\xef\xde\xac\xc2\xaa\x7e\x09\x0e\xee\x49\xad\xed\x33\xc2\x6b\xc7\x45\xa2\x8e\x9e\x11\x9e\x2d\x85\x20\x36\x7b\xa7\x79\x04\xca\x40\xa5\xdc\x1c\x9d\x7f\xff\xcb\xf9\x97\xab\x0d\x80\x0e\xee\x4e\xf8\x5b\x29\xfe\xbb\x33\xe1\x4d\x7b\x15\x4a\xeb\x3b\x5e\xdc\xf6\x8a\x47\x7a\xf2\xc8\xf3\x23\xd3\x23\xbd\x0a\x3b\xd0\x4c\xd5\x65\xef\x6e\x1a\xf5\x84\x41\x46\xb4\x41\xe3\xea\xcc\xe9\x2e\x96\x9b\xc7\x57\xcd\xf4\x01\xf0\x22\x65\xb7\xeb\x89\x6d\x1e\xec\x8f\xc1\x89\xda\x7c\x6d\xde\x47\xd5\x9c\xf0\xde\x87\xd5\xa3\xf2\xd9\x06\x20\x2e\x15\x07\x0c\x7a\x49\x80\x5b\x87\xd5\x39\xf6\x56\x15\x0e\x2b\x10\x82\x5e\x6d\x00\xc2\xef\x79\xdc\x6d\x69\xc1\x7e\x20\xf3\xd5\x6c\xd9\x38\xbc\xe5\x5a\x8d\x9a\x1a\xd1\x53\xf1\x72\xfa\xdb\x2b\x50\x64\x60\x02\xe8\x5b\x92\x6c\xcd\x2b\xbf\x57\x2e\x5b\x00\x3f\x16\x17\xf6\x36\x2d\xd0\xbe\x26\xb5\xe5\xc4\xc4\xc2\x77\xdb\x7d\xd2\x76\x06\x17\xbc\x1b\xbd\x4e\x8b\x7b\x6f\xa7\xd2\x0d\xfa\x78\x0d\x5b\x0c\x1a\x49\x8b\x56\xf2\x17\x4e\x5d\xf0\x24\x63\xb4\x4b\x27\x02\xd9\xfd\xec\xfa\x91\xdf\x55\xfe\x85\xd7\xcf\x01\x68\xdc\x8b\xa9\xdd\x6a\x22\x67\x2e\x24\x68\x8a\x08\x96\xb1\x61\x60\xa8\x12\x12\x23\xd0\x9b\x44\x04\xd3\xb9\xdc\xdf\x11\x1d\x03\xca\x7b\xfa\x95\x67\xa6\xd6\x26\xa6\xfa\x72\xae\x08\x0b\x0b\xc7\x88\x54\xb5\x7d\x5a\x6c\xd3\xa3\x92\x90\x7c\x7c\x59\xdb\x4b\x2d\x2d\x2d\x63\x74\x1b\xfb\x98\xef\xf2\xe2\xe2\x47\x41\x85\xf5\x82\x11\x93\x0a\xca\x1d\x05\xf0\xe3\x7c\x46\x83\xff\xde\x53\xb8\xa1\x41\x19\x50\x83\xd8\x09\x12\x43\x02\x4f\xbe\x74\x4a\xa9\x00\x55\x7d\xed\xf5\x4d\x4e\x3c\x4f\xdf\x72\x80\x31\xb9\xfc\x47\x23\x5f\x61\x4f\xf0\x5a\x4f\xc9\x9e\x2c\xd3\x71\x2f\xbf\x24\xf3\x8d\xe2\x68\x76\x07\xb7\xd1\xd1\x6d\x4e\xc0\xed\x39\x6e\x68\x84\xcf\xd7\x0f\xc9\xd5\xc5\x69\x37\x5a\xb4\xde\xb2\x51\xbb\x3e\xc2\x37\xb9\x37\x31\x86\x2c\x3b\xdc\xce\xed\xcd\xeb\x14\x8a\xba\xac\x99\x69\xbe\xd4\xed\x6b\x7d\xfb\xe3\xf6\xa2\xe6\xde\x2b\xc5\x1f\x2a\x2a\x38\x8c\x2d\xf8\xb4\x5d\x11\xd6\x80\x01\x87\xd8\x3f\x7c\xf5\x13\xd8\xec\x8e\x2e\xe8\xfa\x5d\x99\xed\x27\x44\x4f\x5d\x66\x6b\xe4\x5d\xb1\x51\xa0\x50\xbc\xdb\x7d\x31\xe2\x77\x29\x4b\xf9\x4a\x60\xcc\x09\x6f\x62\xe9\xee\x4d\x8a\x65\xb3\xe7\x7e\x18\xc2\x0f\x9c\x4d\x5e\xaf\x5d\x27\xd2\x2a\x7f\x25\x7b\xd5\x74\xd1\x49\x25\x36\x92\xb2\xe9\xd7\x69\x50\x83\xbf\xae\xff\x34\xf9\x09\xc3\x8a\x14\xe8\xfa\x1c\xe4\x6f\xe8\xf3\x49\x55\xa8\x7d\x5a\xe9\xd9\x26\xd4\x43\x2a\x6c\x84\x2a\x11\x69\x77\x8f\xfc\xfc\xa0\x04\xd6\x78\xbe\xf9\x90\x44\xa2\xc3\xa2\xee\xbc\x17\x74\x20\xb7\x98\xdd\x06\xed\xb3\xdf\x45\xa1\xec\x58\x3e\x12\x13\x52\xbf\x5a\x82\x1a\x85\xa9\x26\x69\x69\x52\x19\x14\xa7\x30\x47\xb8\xe7\x68\xd4\xc2\x62\x62\x6b\x69\x6f\x24\x15\xe0\xfb\x8d\x22\xee\x78\xfc\x91\xc1\xa3\xc7\xc4\x2e\xcf\xd3\xfe\x43\x83\x1d\x40\xda\x92\x2e\xc8\x12\xe0\xcc\x2e\x04\x3e\x3e\x14\x31\x8c\x69\x45\x02\x59\x72\x3f\xa4\xcb\x19\xae\x4a\x81\xca\xc6\xd4\xbc\x25\x5d\x1e\x68\x80\xe6\xbe\xf5\xd8\xa6\x46\x45\xd9\x22\x57\x7d\xa9\xd7\x57\x93\xd3\x56\xb7\x78\x55\x56\x39\x7a\x2d\x86\x7b\x78\xe4\xb9\x8e\x86\x80\x6a\x42\xd4\x77\xee\x77\x34\xb5\x8d\x02\xb7\xdf\xb8\x1d\xd1\x4f\x3c\xc3\x53\xa7\x0c\x84\x07\x7b\x34\x61\x4a\x74\x84\x58\x88\x9d\x25\x50\x84\x97\xdc\xa9\x5e\x18\x99\x5e\xa3\x0c\x41\xe9\xc1\x1f\x67\xc2\xbf\xe3\x60\xab\xc5\x3f\x48\x8a\xf1\xb8\x32\x1e\xb7\x98\x0f\xb6\x61\xed\xfa\x3e\x3e\xe7\xf6\x28\xe8\xf4\x33\xe5\xc9\xb2\x87\xc7\xac\x2c\x31\x2a\x94\xec\x74\xc7\x8d\x30\x0b\xf6\x67\x70\x2c\x4b\x55\x92\xc6\x53\x02\xd9\x3e\x2a\xf6\x21\xf1\x57\x02\x03\xdf\x83\x49\xa4\x41\xc3\x7b\x18\x13\x40\x79\xad\xc3\x1f\x6f\x88\x18\xf2\x4c\xa6\xa6\xf1\x25\x1b\x4b\x5b\x0e\x4a\xa5\x05\xd8\x97\x32\x61\x11\x16\xdf\x14\xa3\x1c\x8f\x29\x4b\x1e\x41\xe1\x9c\x6f\x24\xef\x3b\x18\x10\x51\x2d\xe1\x78\x6c\x01\x74\xbe\x70\x84\x31\x57\xf5\x99\x99\x59\xb5\x98\x78\x1f\x09\xd6\xe4\x84\x6c\xf1\x9d\xad\xfe\x90\xac\x39\xa6\x46\xea\xbc\x3f\xd7\xed\x5b\x70\x68\x29\xc6\x1b\x56\x26\x17\xbe\x46\x41\x2c\x9c\x3e\x66\x35\x48\x7b\x4f\x74\x75\x7d\xc1\x13\xc7\xc2\x18\xfa\x3b\x18\x3c\x03\x33\xc4\xe0\x7d\xf6\xcb\x43\x04\xdc\xe3\xa5\x32\x38\xa7\x5e\xbb\x1b\x90\x0f\xd3\x2b\x9c\x24\x79\x0d\x7a\x30\x86\x20\x18\xd2\x23\xfa\xb4\x05\x58\x10\xf9\xcc\xd0\xd0\x90\x4c\x03\xee\x10\x98\x93\x6a\x9b\x00\x3a\xc8\x94\xf2\x6e\x60\xbf\x05\xb5\x90\x17\xcf\x96\x0a\x5c\x3c\xdf\xfd\x44\x56\xa4\x1d\xca\xd7\xaa\x26\x12\xe0\x3d\x65\x54\x15\x35\x32\x92\x28\x27\xad\xd7\x98\x55\xcd\xef\xb7\x22\x36\xfc\xf2\x50\x1f\xc3\xd9\x83\xa3\xfe\x28\x21\x3a\xe3\x0f\x2c\x0a\xc6\x04\x61\x21\xa3\x1a\xc5\x73\xe5\xc2\xf4\xe0\x8e\x34\x34\x04\x00\xa3\x75\x04\x18\x41\xaa\x66\x37\xbf\xd3\xb2\xcc\x54\x52\x04\xff\xae\x45\x20\x03\xda\xdf\x18\xe1\xd9\xda\xc9\x96\x87\x7f\x68\x83\x8e\x5b\xce\x85\x75\x96\x32\xce\xdf\xb7\x97\x56\x4a\x6d\x6a\x06\xbc\x73\xac\xb3\xbb\xce\xdb\x0a\xee\xc8\x35\x82\xb3\xde\x6a\xa4\xf4\x8a\x47\x84\x6e\x1d\xeb\x73\x58\xc4\x9f\x07\x89\x6d\xff\x11\xf0\x5e\x2a\x38\x20\x47\x7d\xc6\x21\x2e\xd6\x17\xb6\x40\x73\x3b\x5d\xf3\x4d\x18\xfe\x5b\x50\xed\x07\xd5\x02\x44\xb0\x8e\x05\x02\x05\xb0\x39\x6b\x67\x5e\xa3\x1e\x1f\x47\xb6\x6d\x8b\x57\x37\xdd\x84\x35\x13\x61\x0e\x01\x59\xce\x49\xa1\x70\x03\x42\x18\xa7\x7f\x26\x2c\x20\x20\x20\xc4\xef\xfb\x3b\xfa\x8d\xfd\xc7\x67\xfb\xa7\x1e\x45\x66\xd6\x50\x23\x17\xa1\xe8\x25\x2f\xc7\xc7\x7e\xa8\xe9\x7a\xfb\x6a\xd6\xd8\xd2\xf8\x2c\xc3\x96\x04\x96\xf6\x47\x06\xd4\x63\x7d\xdf\xbf\x51\xe0\x92\x67\x5f\x5c\x3a\xeb\xd2\xc0\xbf\x7d\x7f\xff\x87\x2c\x0f\x4e\x11\xc1\xb3\xe6\x4a\xa7\xd9\x30\x2b\xde\x74\xa7\xd7\xfc\x88\x1e\xb8\xb7\x55\xab\x7c\x2b\x1c\xec\xbf\x73\x9b\x9f\x0b\x37\x20\x94\x33\xc2\x44\x13\x6b\x37\xbc\x87\x41\x62\x1b\x5e\x4f\x14\x8d\xee\xe6\xf6\x5e\x03\x5a\xdc\x40\x87\x07\xc6\xb4\xfc\xe0\xf7\xc0\x1c\xf4\xec\x21\x52\xbe\x6e\x6b\xcb\xda\xd6\xaa\xef\x93\x51\x15\x1c\x75\x8e\x95\xe3\x22\x48\x1e\xa1\x7d\x7f\xfa\x7b\x9a\xaf\x31\x35\x86\x7e\x62\xcb\x1a\x53\x7c\xe0\x7d\x16\xc6\x7b\xad\x9f\x43\x4f\x8f\x5f\x52\xfc\xa8\xf6\x0d\xcc\xfd\xd7\xe9\x1a\x08\x6e\x00\xec\xbe\xda\x4d\x34\x01\x8b\xa7\x39\x50\x21\x01\xf9\x30\x8d\x72\xca\x61\x25\x82\x0c\xfe\xcf\x10\x59\x09\x7a\x62\x0d\x50\x6d\x83\x46\x46\x2a\x7a\x5b\x1b\x5b\x21\xb5\x06\xc8\x68\xae\xb1\x0d\x7f\x4f\xe3\x4d\x77\xd5\x23\xb2\x48\xd9\x74\x04\xce\x74\x47\x65\xe8\x93\x59\xa2\xd1\xef\x19\x9a\x42\x18\x14\x73\xea\x1c\xe0\x0e\xf0\x8f\x21\xe5\xe3\xee\x54\x20\x48\x7d\x4d\x17\x44\x41\xe1\x57\x7c\x9d\x65\xde\xc9\x55\x8e\x5d\x2d\x73\x49\x72\x28\xc9\x69\x6f\x75\x48\xc4\x77\x99\x3d\x63\x87\xef\x19\xa5\xab\x67\x3d\x69\x92\x7f\x96\xf9\xa9\xce\xdc\xbf\x28\xda\xff\x3e\xf3\x6d\x49\xf8\xc3\xef\x8b\xf3\x8d\x22\x13\xd2\x8c\xd4\xb2\x7f\x44\xb9\xab\xdf\xb9\x04\xa0\xe2\x0e\x58\xc0\xfb\x3d\x6b\x7c\x67\x08\x0c\xe7\x0d\xe8\x0d\xf1\xf0\x83\xa1\x17\xb5\x62\x05\xa5\x5a\x4d\xc2\xd4\xc7\xc5\x77\xb6\x00\x6e\xd3\xd9\xf9\x8a\x86\xbc\xaf\x5a\xb7\x90\xc3\xc0\xcd\x09\x38\xd6\x10\xb0\xcf\xb6\x6d\xde\x7e\x16\x6d\x78\x96\x8e\xac\xd3\x1a\x8b\x20\x51\x9e\x4f\x93\x9a\x10\xec\x7f\x72\x99\xd9\xc6\xff\x25\x7f\x99\x49\x10\xfc\xf8\xe4\x25\x03\x3e\xe3\x17\x81\x00\x52\xc1\x16\x07\xb2\x9a\xb8\xc4\x8e\x6f\x74\x6a\x1e\xdf\xbf\xb2\xd7\x21\x9c\x7d\x3d\x1d\xc8\x26\xc6\x91\x73\x96\x4c\xa7\xfa\xe8\xb3\x68\x91\x8a\xc1\x27\x81\xe6\x77\xcf\x6f\x57\x44\xcb\xeb\x29\xe1\x1f\x8d\x17\x0f\x7a\x5e\x00\xf8\x13\x08\xdb\x85\xbc\xf0\x78\xdd\xee\x8f\x58\xda\xb6\x30\x7e\x0e\xe9\x98\xee\x17\xbe\xf0\xf2\xa0\xea\x4b\xcc\xec\x2c\xe2\xd1\x7e\x93\xff\x87\xaf\x6a\xcf\x85\xd3\xa6\xde\x65\x9d\x48\x1a\xa0\x0a\xdb\x9e\xd4\x39\xea\x1e\x6f\x0d\x27\x90\xac\x02\x5b\x0a\x22\xa7\x41\xeb\x11\x41\x9b\x00\xe3\xa5\xeb\x7a\x6b\xd1\xc7\x55\xef\xb8\xae\xc7\x94\xc6\x54\x4e\xc2\x7c\x94\x35\x3f\x0b\xfa\xe8\x65\xc1\x70\xf2\x60\xfc\x51\x28\xf4\x51\xda\x9e\x7b\x30\x83\x75\xaf\x02\x18\xb1\xca\x88\x0d\x24\xdb\x2e\x43\x88\x58\x45\x65\x9f\xaa\x13\x06\x2b\x00\xf3\x93\x59\x11\x89\x60\x8e\x13\x6e\x42\x62\x14\x42\x66\xf7\x87\x80\x78\x2e\x6d\xea\x4e\x7c\x15\xf6\x09\xcb\x6b\x3d\x22\x6f\xcb\xa9\x2f\xf5\x95\x38\xf8\x97\x2f\x42\x53\x75\x3d\xda\x06\xf4\x10\xc1\xa9\x29\xe8\xa3\xb4\x35\x41\xc2\x37\x10\x32\x1a\xcc\x79\x96\xd9\xd1\xda\x5e\xec\xed\x0f\x8c\x4f\x9b\xbd\x7e\x44\x5f\x4b\x29\xa6\x89\xd8\xef\x13\x1b\xc6\x68\x83\xda\x17\x4e\xee\x7d\xf2\xa2\x12\x54\x51\x53\xb9\x78\xce\x17\x6e\x8c\xb3\x23\x26\x16\x95\x78\x3e\x70\x8d\x75\x72\xd8\xce\x49\xf4\x34\xe0\x14\x97\xdd\x38\x50\x0a\xd3\xd9\x63\xa3\x9a\x8f\xa1\x7d\x71\xe5\x08\x0f\x2e\x40\x05\x66\xf8\xc7\xdc\x46\x80\xfb\x84\x90\x58\xa7\x36\x8e\x05\x13\xcc\x09\x20\xbf\x3f\x6d\x9f\x1b\x83\x2a\xa1\xac\xb9\x3d\x3e\x37\x33\x35\x22\x38\xe0\x8b\xc8\x67\x6d\x45\xc5\x50\x17\x73\x47\xee\x08\x25\xb0\x11\xdf\xee\xb8\x09\xea\x13\xc9\x37\xca\xf0\x7d\x36\x70\xa5\x69\x17\xdb\xfc\x93\x7e\x52\x6d\x3c\x23\xe5\xa2\x47\x05\x93\xf8\xcb\x14\x2a\x2b\x99\x30\x21\xca\x2f\x31\x15\xca\x0d\x5d\x7a\xe3\xe2\x26\x89\x00\x96\x99\x2d\xe6\xf7\xe6\xaf\x5c\x76\x3f\x9d\x72\x7e\x6a\x10\x99\x5d\x3e\x6d\xba\xa1\x16\x66\xc9\xf8\x3b\x91\xfc\x95\x29\xe7\x30\x05\x33\xbd\xda\x23\x5c\x62\x7d\x89\x85\x35\x0a\xcc\xa9\x6c\x48\xed\xec\x49\x8b\x4b\x1d\x03\xaa\x12\x7e\x4a\xc8\x82\x90\xc1\x58\x56\x25\x5c\x02\xcd\xf9\x2a\xfa\x9e\xb5\xa3\x4b\xbc\xcf\x07\x48\x4e\x95\x7a\x5c\x1e\x96\xd4\x78\x74\x91\xe4\xd5\xa6\xd6\xda\x6c\x2e\x45\x6b\xc7\x73\x4c\x3b\xad\x3c\xc7\x8f\x64\x55\x88\x1a\xe1\xe4\x0f\x40\x97\x0a\x3f\x3c\xe8\xbf\x7d\x30\xd4\x1e\xf9\x59\x94\xf3\x99\x98\xa3\x43\xac\x8c\x99\x56\xa3\x15\x1d\x84\xec\x6d\xf5\x23\x64\x47\x02\xff\x7d\x1e\x2e\xea\xdb\x53\x12\xb0\xbf\xf9\xb3\x24\xd3\xad\xdd\x86\x13\xa1\x04\x74\xc8\x50\xca\xf3\x5d\x11\x67\x69\xec\x0e\xf0\x9d\x77\xf6\x3d\x7e\xae\xfb\xb9\x8f\x88\x88\x2b\x6f\xbf\xbd\x9c\xa0\xbb\xf8\x70\x22\x54\x5f\xfe\x2e\x5a\xdc\xd4\x67\xa2\xfa\x23\x52\x93\xc0\x50\xe0\xe4\x22\x40\x12\x0b\xc3\xc3\xa3\x46\x21\xef\xe6\x9d\xe9\xd2\x5b\xb1\x84\xd8\x58\xe3\x8f\xcf\xfe\xc8\xa1\x70\x54\xb0\xef\x3c\x37\xf6\x65\xcf\xb0\x0b\xc6\xbe\xf7\x74\x0e\xbb\x11\x1b\x7f\xf9\xdd\xa7\x27\x0c\xd1\xce\x82\xbd\x4c\x99\xe9\xa9\x42\xf1\xc2\xfc\x94\x3b\xad\xbb\xa2\xae\x21\xae\xc0\x04\x72\xaf\xe5\xdb\x8d\x25\x7a\xed\x11\xd2\xb7\x77\x47\x46\xa6\xef\xbd\xd8\xbf\xea\xc0\x35\x69\x5d\x7c\x80\x1c\x9f\x70\x12\xf1\x08\x16\xa0\x20\xe7\x66\x23\xa2\xc5\x48\xff\x24\x4d\x2b\x14\xb7\x9b\x46\x1d\x1e\xe9\xeb\xeb\xfb\x28\xc8\xfc\x99\x7c\x08\x1d\xd3\xfd\x5c\xb2\x71\x42\x93\xb1\xe1\xa9\xcd\xb3\x4e\x45\x21\xee\x74\x1c\x79\xd5\xb7\x86\x24\x69\x96\x86\xaa\x24\x61\xae\x29\x33\xb3\x9f\xcc\x85\x3b\xc8\xae\x67\xef\x5f\xa5\x56\xb6\xe6\xcf\x73\x65\xeb\x00\x0d\xed\xb9\xd2\x88\x6f\xaa\xe4\x3c\x14\xb6\x8f\xdf\x13\x36\x35\xce\x87\xb8\x57\x9e\x89\xba\x9d\x36\x1b\xca\x90\xa3\xe9\x1d\xed\x15\x5b\xce\x84\x8d\xcf\x3f\xf9\xb5\x86\x78\x50\x75\x19\x19\x9a\xc7\x19\xfb\x75\x27\x49\x84\x89\xcf\xc2\xe9\xfc\x05\xfa\xaf\xb7\x8b\xca\x97\x02\xef\x7f\xbf\x42\xfc\xb9\x16\xf9\x29\xa8\x1b\xf8\x73\x39\xf2\x53\xd0\x33\xd0\x1b\x50\x2f\xb0\xdb\xf7\xfc\x45\x5a\x38\x01\x1a\x9a\xb7\xb6\x84\xc9\x1a\x40\x6f\x3e\x8b\xe1\x3a\x00\x2f\x54\x77\x07\xb4\x1c\xb4\xf9\x7f\x77\xe5\xf8\xff\xe1\x3d\x63\xb5\x63\x7f\x72\x0d\xf7\xfe\xd9\xca\xd2\xda\xef\xfd\xd7\x15\xab\x7b\x31\xff\xfd\x3e\x9a\xff\xe1\xce\x17\xc0\xbd\xff\x55\x95\xff\xbf\xeb\xd8\xff\x7b\x55\xfc\xb5\x3b\xd2\x3c\x2e\x41\xc5\x5a\xe9\x21\x11\x9a\x5d\x4a\x69\x29\x42\x86\x6b\x31\x89\x15\x6c\xda\xf5\x8f\x54\x3e\x95\x1f\x1c\x08\xe0\x92\x2b\xa6\x0b\x3b\x9d\x43\x79\xde\x91\x42\xc6\x8a\xec\x00\x89\xc0\x30\x6a\xd1\x07\xd4\xb4\xc1\x59\xfe\x72\xf1\x1d\x24\x06\x03\x9f\x24\xf6\xc5\xb1\x7f\x75\x08\x79\xfd\x59\xc0\xed\xc6\xbd\x00\xcb\x03\x8b\xd0\xe3\x22\x76\x22\xfa\x81\x8a\x39\xaa\x6f\xaa\x3b\xbd\xd4\x65\xe6\x64\x08\xc3\x9c\xcf\xe2\x6a\x75\xcf\xeb\x92\x90\x22\xae\xfb\xc3\x1d\x56\x1c\x2a\xdb\xa3\xa0\xdb\xc0\x8a\xb5\x7e\xf6\xaa\xaf\xdc\x46\x9c\x89\x0f\xe7\xdd\x99\xd2\xcc\xac\x3d\x8e\x4c\x5d\x5c\x5c\xb0\x06\x01\x77\x03\x43\xef\xab\xa5\x13\x0d\x36\x9e\xac\xf5\x6b\x55\xbf\x7d\x85\xfe\x6b\x37\x83\xf2\x73\x35\xb5\x84\x26\xd7\x23\x1c\xa1\x91\x5a\xb3\x93\x2b\xff\xee\xf4\x52\xc3\xd3\xb4\xa1\x84\xf2\xf2\xf2\x6a\xd1\x9b\xa2\x9e\x75\xbe\xc7\x26\x03\x69\x5f\x6e\xdd\x8a\xf1\xa8\x6a\xdd\x6f\x62\x9d\xe6\xc3\x9b\xba\xa5\x74\x3d\xb4\x58\xc8\x5f\x39\xab\x53\xbc\xb4\x72\xcf\xe8\xf1\x93\xce\x5b\x2b\x9e\xf9\x0b\x03\x3b\x32\x3b\xa0\x9b\x8e\x26\xf0\x9d\xfe\x77\xdb\xf5\xdd\xb9\xd7\x17\xf6\xc8\x7a\x09\xb9\xef\x0b\x87\x89\xa0\x85\x49\xa7\x45\xbd\x2f\xf6\xc0\x8e\x16\x17\x21\x02\x2a\x11\xaa\x8f\x75\xda\x40\xc8\x9d\x5c\xba\x9f\xd0\xfe\xa5\xef\xdc\x73\x6d\x1d\x57\xdb\x91\x6f\xcb\x4d\xd7\xe5\xa7\xc8\xf4\x93\x7a\xa1\xab\xb3\x41\x39\x36\xc5\x9a\xa8\x57\x8f\xa7\xa7\x69\x6e\x4f\x8b\x92\x60\x6b\xec\x3a\xc4\xd2\x5e\x70\x68\x52\xae\x66\x0a\xcc\x40\xcb\x69\xfb\x0a\x66\xb2\x37\x4c\xc2\x68\x64\x5f\x3b\xe4\x2e\xfe\x8e\xdb\xa4\xa5\xf5\xcc\xab\x46\xaf\x7b\xc4\x80\x94\xc7\x4b\xec\xd7\xe3\xfa\x06\x74\xf0\xd1\x26\xf3\xff\xd8\x86\x3c\x17\xdd\x76\x07\x9c\x07\x01\xbb\x9c\x5e\x51\xeb\x42\x5c\x24\x49\x83\x33\x43\x67\x6b\xdd\xaa\x69\x5b\xe8\x97\xaf\x07\x9b\xf0\x72\x44\x41\x1d\x53\x2f\xcf\x81\xa7\xbb\xa2\xca\xd6\xf0\x2c\x73\xf9\x8c\xaf\x2b\x00\xce\xa9\x22\x6d\x4c\x34\x6f\x6d\xa4\x42\x37\x30\x9b\xa6\xd4\x89\x5c\xc4\x71\xa5\xe7\xe1\xcd\xa9\xe7\x59\xc2\x02\x27\x97\x6a\x42\x62\x62\x62\x22\x97\xbe\xb1\xb1\xb1\x4e\x02\x8b\x11\x39\xad\x6b\xfa\x87\x9d\xa9\xb8\x17\x85\x47\xfc\x53\x41\x8e\xf9\x61\x45\xc3\x4f\x9c\x20\x53\x96\x4e\x8b\x9e\x50\x40\xfa\x76\xfb\x6b\xc5\xc7\xae\xdb\xbe\x34\x54\x06\xc0\xa3\xd4\xda\xe9\xb0\x54\x7e\x7d\xd6\x12\x9d\x26\xcf\x9b\x56\x09\x4a\x9a\xb3\xd8\xac\xed\x31\x54\x9e\xe0\x2b\xd1\xcd\xaf\x27\x8f\xe8\xdd\x3e\x3f\x5a\x2d\x3b\xf3\x7e\x69\xb9\xe7\x73\xb7\xfb\x2d\xb0\xfb\xf5\x09\x77\x5f\x94\x7d\xef\xc4\x86\x52\xe8\xdd\xde\x33\xe4\xfe\x72\x9f\xac\x7d\x84\x56\x67\x94\x09\x1e\x6a\x19\xe0\x32\xbe\x23\x4f\x1d\x18\x0e\x23\xa6\x98\x4a\x95\x0a\x7b\xb2\xc7\xf5\x51\x3a\x09\x75\xc6\x3c\x7a\x76\xbc\xb8\x44\xa7\x47\xf0\xf2\xb3\xb8\xcc\xfd\xab\xa9\x4a\x93\xbd\x82\x7d\x12\xa9\x90\xc7\x4a\x5a\x75\xfa\xd9\xac\xcb\x0a\x6c\x8f\xa7\xcc\x10\x0d\x00\x95\x99\x38\x9e\x22\xd9\x53\x62\xcf\x4a\x48\xd7\xc0\x2b\x05\x85\xfe\xa4\x9d\xf3\x2f\x0f\xbb\x83\xe4\x48\x12\x38\xed\xc7\x23\x0d\xbb\xa8\x0c\x79\x8d\x4e\xae\xfc\x35\x18\x0d\x1d\x6f\xd3\xc2\x07\xd5\x4e\x69\x04\x1d\xce\x7b\xc8\x3c\x19\x7c\x22\xbb\xba\x3c\x57\x6a\xb0\xf1\xcd\xc7\x79\x79\x79\x01\xdf\x37\x7f\x0c\x1b\xa6\xb8\x74\xe5\xb6\xd0\x49\x91\x9c\xb6\x33\xd4\x65\x9e\x65\x3e\x20\x3e\x96\x30\xa4\xe3\x7d\x64\x9e\xe6\x7c\x76\xee\xaf\x5d\x2d\x22\xdc\x5a\xd3\xad\xfa\xfe\x05\x18\x1a\xe6\x4f\x43\x30\xbd\x5c\xd1\xf4\xc6\xa8\x06\xa7\x3a\xe4\x95\xe5\x78\x0a\xf7\xdb\x4f\xb5\x6c\xba\x0d\x66\xa9\x67\x27\x22\xf0\xf2\xbd\x47\x4a\xed\x4a\xf3\xe8\x26\xe1\xce\x36\xc5\x92\x4f\xf4\xeb\x6d\x7b\xc7\xbf\x26\x58\x50\x78\xd1\xbf\xbf\xda\xe2\xb3\xe2\x98\xb9\x1b\x48\xe0\xa5\xe0\xae\xdb\xad\xf5\xce\x4a\x49\x7c\xe9\xbb\xd1\x8b\xa8\x97\xc9\x6e\x57\x2b\x85\x79\x79\x79\xcd\xc0\xe2\x77\x99\xca\xcf\x49\x71\xfc\xc5\x7c\x60\x62\x3e\x30\x5e\xb0\x6e\xca\xbd\x03\xad\xb6\xd5\xcd\xf0\x0f\x35\xf9\xba\x9e\xac\x1e\x74\x4f\xb8\x6c\xed\x8e\x2b\xee\x9a\x3f\xef\x12\xd1\x9b\x1c\x5c\x94\x4c\xa5\xc8\x18\xce\x2d\x8b\x66\x6c\xba\x94\xa9\xeb\xf3\xbc\x48\xa9\x75\xdf\x2c\xce\xf2\x81\xc2\xa0\x46\xb1\xcf\xa7\x6c\x57\x7b\xe2\x42\xd7\x99\x59\x6c\x2b\x04\xc2\x6b\x8c\x02\xc6\x59\x0d\xf5\xf7\x96\x01\xb5\xc7\x4c\xe3\x51\x77\xdc\x27\x13\xd5\x70\x99\x82\xde\xd8\x10\xdf\x5e\x5a\xbe\xfa\x60\xec\x75\xb5\x4e\xf2\x34\x07\x20\xe2\x7c\x9c\x7a\xaa\x1a\x94\xeb\x50\x77\xa1\x8f\x73\x4a\x65\x74\x9d\xd7\x0f\x59\xeb\x06\x5d\x9f\x4e\x7f\xad\xa0\xd6\x71\x59\xbf\x86\x4d\xc0\x8d\x3a\x9b\x63\x40\xf3\x9e\x83\x94\x5c\xd5\xc9\xd5\x56\x46\xe1\xec\xa2\xef\xe3\x60\x06\x3d\x7a\x4a\xae\x48\x18\x7e\x9a\x11\x4d\x0a\xaf\x6b\x2b\x7d\xda\x0b\x2e\xcf\xf5\x65\xde\x0c\xe1\xe6\xc0\xe3\xc3\x13\x29\x4b\xab\x2d\x52\x09\xfb\x82\xf4\xa6\x37\x98\x35\x66\x13\xf3\x1e\xdd\x5e\xe3\xce\x60\x4c\x9a\x64\xf3\xbd\x45\x80\xa4\xd7\xa1\x6f\x38\xe4\x21\x06\xd3\x71\xdb\xa5\xc8\xc7\x89\x16\xc4\xf5\x84\x3d\xbb\x98\xd0\xde\x38\x91\x06\x8a\x66\xb0\x31\x62\x6e\x40\x83\xa5\x72\xa0\x9c\xc5\xed\x45\xcf\xb6\xd3\xde\x8c\xf5\x29\xd8\x5a\x62\x11\xfa\x0b\xa7\x4c\xe5\x45\xa8\x7a\x4e\x33\xb0\xba\xb1\x01\x4a\xf5\xba\x29\x92\xcc\x79\x75\xab\x82\x50\x80\xea\x9d\x1a\xf2\x33\xd4\x2c\x27\x76\x95\xe5\x13\x4f\x15\xf3\x97\xd2\x1e\x27\x2a\x39\x7d\xe6\x50\x5a\x6d\xa3\xe4\x08\xa2\x9b\x3d\x93\xbb\x04\xa3\x43\xca\x80\x4b\x52\xde\x18\x96\x16\x9e\x05\xd6\x78\x8e\x43\xdb\xd8\x3d\xf4\x10\x48\xb5\x11\xe4\xfb\x58\xa1\x60\x4d\x8f\xe9\xad\xe7\x0d\x03\x01\x06\x69\xe5\x79\x8e\xea\x43\x0f\x01\x90\xc9\xed\xb3\x36\x2a\x42\x71\x53\x61\x8e\x6c\x28\x0e\xe3\xb6\xda\xb3\x87\x9c\x8d\xb6\x87\x8c\xce\x1d\x2f\x1b\x6d\xf6\xa3\x42\x82\xf4\xb2\x2a\x6a\x52\x45\xec\x79\xdd\x7e\x94\x8f\xe7\xd9\x83\x84\x3e\xe4\x6c\x56\x3a\x89\x7a\x1f\xf3\x3f\x56\xf3\x86\x55\xed\x4d\xef\xc8\x2f\x9f\x4f\x2c\x8c\xa9\x93\x4f\x6a\x69\x4b\x14\x8d\x4a\x3d\x11\xec\xa6\x3e\xc8\x25\xa1\x77\xb1\x4c\x11\x46\x38\x9c\xf7\xdc\x7d\x1f\x5b\xd1\x2b\x16\x6a\x64\x2d\xe1\x73\x9d\xe6\x71\x79\xdd\x8d\x01\xff\xa9\xde\x73\x52\x1c\x4c\xb8\x99\x04\x8e\x76\x25\xaa\x07\x2f\xa0\x60\x8a\x1c\x4c\xa5\x25\xa4\x2c\xe9\x78\x9f\xb7\xfb\x53\x38\xaf\xf5\xcc\xcd\x6b\x6d\x89\x70\x41\xd6\xd4\x77\xf9\x01\xcd\xfc\xb8\xce\xcf\xbe\x40\x19\xe1\x23\xa1\xcc\xa5\xfe\x89\x2f\xa8\xe4\xdf\x6a\xdd\x93\x2a\x27\x16\x8e\x6a\xe5\xfe\x61\x4c\x13\x0c\xfb\xc1\x92\xd4\x47\x9a\x65\x14\xca\x95\x6e\xd7\xbb\xa3\x09\xac\x88\x62\xa5\x27\x7a\xec\x60\xf8\xfd\x2b\x05\x0e\x25\x6d\x7e\xd0\xa3\x74\x80\xef\x10\xc8\x2a\x2f\xf0\x25\xa8\x1b\xa2\x22\x2a\xe0\x40\x00\x17\xb4\xdf\x4d\x61\xc9\x1f\x48\x4a\x95\x2d\x8d\x56\xcb\xdd\xf1\xc6\x5d\x1c\x4d\xcc\xfc\x08\x4a\x9a\x77\xda\xb8\xdc\xeb\x66\x20\x55\xde\x98\x6c\x60\x7a\xcb\x24\x72\x30\x1e\x48\x33\x1a\x4d\x5b\x3f\x6e\x39\x2b\x4e\xd8\x99\x13\xd2\x7d\xf7\x9d\xdc\x91\x86\x36\xe6\x83\xbb\x36\xee\x87\x3b\xd5\x42\x73\x7d\xb5\x87\x15\xa7\x66\x2d\x51\xb9\xa1\xd5\xd6\x5c\x4a\x9d\x40\x22\x7c\x6c\xd7\xd3\x76\x60\x9b\x64\xb8\x7f\x3e\x07\x8e\xd0\x56\xed\xc4\x7c\x5f\xab\xe5\xc6\x07\xd2\x9b\x33\x87\x9d\xb9\x89\x3b\x94\x72\x49\x57\xf8\xd0\x70\xd8\x8f\xb6\x60\xe6\xcf\x41\x7c\xa2\x98\x8f\xa2\x96\xf3\x36\x23\x51\xd2\x26\x41\x0d\x2d\xc0\x22\x99\x6a\x91\xeb\x0f\xdd\xe4\x1a\x44\x7d\xbe\xa3\xe9\xbc\x31\x84\x9a\x1a\x96\x05\xc0\x03\xed\xeb\xbb\x8a\x71\xb4\xb8\x62\x05\xf1\x55\x8c\x6d\xc0\xe3\x85\x60\x4e\x37\xce\x1b\x3f\xd5\x44\xb4\x90\x70\x2c\x83\x4c\xe3\xbe\xc4\x87\x35\x00\xd2\xdf\xa8\xd6\x40\x79\x00\x36\xc8\x9a\xe8\xd4\xad\x65\x68\x66\x97\x08\x0b\x55\xe7\x4a\xbf\x42\xba\x30\xe8\x87\xc4\xbe\x2c\x01\xfc\xbb\x3d\x4f\x7d\x41\x70\x28\x9a\x12\x39\x08\x5b\xee\x19\x91\x0a\x8f\x7b\x90\xa9\x86\xb6\xbf\x07\x4f\x11\x76\x8e\xca\x13\x44\xd2\x45\x57\xb5\xb6\x96\x5d\xaf\x31\x8f\xe6\xa3\x7c\xce\xc3\x4e\xce\x7c\x43\x02\x78\x79\x6d\xa0\xa3\xc2\x57\x75\x45\x86\xd4\xf3\x2f\xf1\xdc\xb0\xfe\xf7\xa5\x3f\xc6\xf3\xd3\xc9\xd3\xf1\x0a\xea\x45\x89\xe4\xc9\x28\x9a\x49\x47\x72\x13\x0f\xd7\x5e\xd5\xc8\x4a\x4b\xdb\x48\xe5\x16\x71\xc8\x75\x0e\x1c\xc1\xc1\x4d\x91\xfe\xeb\xd2\xe2\x35\x1f\x58\x5e\xbf\x61\x53\xbe\xb7\x9a\x1a\x94\x14\x64\x64\x31\x53\xc6\x95\x9c\x2c\xf6\x35\xa7\x9b\xcb\xd8\x6c\x56\x41\xab\x1a\xc8\x6e\x39\x59\x2f\xb1\x24\xa1\x1c\x85\x07\x8a\x3c\x49\x18\x6e\x6e\x12\x39\x97\x37\x2e\x11\x17\xc0\xa4\x30\x95\x8d\xe3\x16\x86\x46\xa3\x63\xd6\x3f\xe2\x5e\xf6\xed\x7d\xac\x3e\x93\x51\x7c\x71\x42\x0e\x2a\x7b\x6a\x51\xad\x90\x52\xa4\x39\xdd\x8c\x78\x52\x77\x3a\x54\x28\x2e\xcd\xf4\x55\x56\xab\xd6\x41\xe2\x2d\x6a\x2c\x00\x15\x45\x0e\xbd\xd1\x7a\x67\x5c\xd2\x64\x53\x93\xd3\x34\x14\x15\x07\xb7\xb8\xaf\x4d\xf6\xb7\xc1\x69\x23\x51\xa4\x35\xd0\xd1\x1b\x26\xd4\x32\x34\xfc\xe4\xaa\xcc\xe6\x51\x9b\x77\xf3\x0b\xc5\x7c\x7f\x9e\x3b\x92\xb4\xca\x55\x8c\x31\xae\xa4\x3d\xf4\x6f\xba\x38\x3b\xe9\xb5\x8d\xba\x23\x17\x23\x9f\x68\x7e\x93\xd1\xfb\x76\xd1\x35\xdf\x8d\xb1\x12\x37\x56\xce\xcf\xe2\x9c\x08\xc6\xfa\x02\xe3\xb6\x14\xae\xf9\x10\x5a\xcc\x8d\xfc\x94\x27\x28\xd2\x51\x63\xb6\x8a\xaa\x9c\x62\x55\x13\x7e\x98\x66\x63\x13\x0b\x65\x47\xe7\x23\x8b\x88\x12\x9f\xc5\xe1\xc6\x17\x75\xda\x9d\x7d\x67\x92\xb7\x0a\x53\xc7\x0d\xba\xd1\x83\x46\xc1\x10\x44\xe6\x30\xfe\x22\x16\x54\x57\x00\x16\x67\xf7\x8a\x6c\x4d\xf0\xf4\x64\x62\x19\xdb\xf5\x79\x50\x3e\x90\x58\x27\x23\xb4\x94\x6e\x75\xcb\x88\x90\xcd\xdd\xf0\x6e\x09\x39\xbb\xae\x7e\x7e\x8e\xd1\xad\x84\x2d\xd9\xbe\x88\x52\xe3\x59\x70\xae\x75\x30\xd3\x88\xec\x40\x52\x75\x46\x73\xa3\xe0\xdb\xd6\x92\xfb\xd9\xa3\x75\x58\x7a\xfd\x56\xe9\x15\x7a\xc3\x4a\x46\xcb\xf9\x50\x2e\x7c\xd9\x42\x3b\x1d\x6c\x49\x87\x2c\xd8\x91\x89\xea\x33\x45\xc7\xca\x4a\x65\x4c\x6b\x89\x8a\x6d\xd8\xec\x78\xa3\x44\x93\x45\xe1\xf1\xdf\x8e\xe5\xb9\x0d\xf0\xb9\x4d\xec\x9d\xc2\x33\xbd\x8d\x70\x31\x38\xea\xe0\xe1\x63\xf9\x0f\xcc\x51\xd8\x0e\x2a\x8b\xb1\x35\xe1\xae\x29\xcf\x39\xc2\xcf\x34\x6e\xbf\x2b\xbb\xce\xe3\x05\x6e\xb6\x60\x3b\x8c\x2c\x79\x03\x52\xd9\xac\xbe\x4b\x81\x0a\xab\x04\x05\x6c\x8f\x7b\x61\x5f\x88\xc8\xb8\x09\x68\x58\x3a\x81\x89\x58\xf6\xdd\x6d\xc0\x60\x1f\x95\xd6\x27\x42\x9f\x68\xc7\x29\x9c\xed\xda\x01\x02\x0c\xf0\xfb\x5f\x33\x39\x4f\xa7\x0e\xd7\x6d\x3f\xc7\xb4\x75\x50\xd8\x8f\x6b\x98\x06\xbd\xa6\xfa\xe4\xd4\x9e\x73\x74\x3f\x8a\x2d\x48\xc6\x85\xe3\x9b\x48\x4d\x8e\xdc\x2e\x65\x15\x1a\x9c\xd6\x85\x03\xd3\x05\x7d\x5a\x3a\xd5\xfe\x11\x91\xdc\x3b\xbd\x12\x0a\x28\xb3\xea\xe0\x7c\x9d\xd9\xce\x85\x99\x23\xfd\xd8\x65\xc5\xa0\x68\x93\x96\x16\xe5\xe7\x51\xb4\x73\x64\x17\xb0\xdf\xe3\x05\xce\x36\x30\x5d\xee\xd2\x9c\x8f\xd6\x06\x99\x0d\xfa\x43\x9f\x28\x74\x56\x8e\xf2\xb2\x7b\x3d\x63\x87\xb6\xb1\xc2\x7c\x43\x2d\x40\x15\xf2\x98\x95\x45\xc0\x7a\x54\xa8\xdf\x88\x1f\x17\x3f\x03\x1e\x93\x37\x83\xad\x0c\x5a\x07\x2f\x0d\x68\x53\xae\xb1\xb8\x94\xe0\x1c\xda\x5d\xfc\xdc\x4d\x6d\xa8\xa6\x8c\x0a\x54\xd8\x74\x1f\xf4\xec\x8e\xcd\x51\xa9\x6c\x05\x6a\xca\xeb\xa9\x44\x49\xca\x53\x4a\x35\x23\xf4\xba\x3e\x8c\xed\x9e\x1c\xb1\x77\x2b\xac\xe5\x0a\x04\xed\xef\x54\xd2\xc2\x61\xe2\x0c\x3d\xb6\xb1\xe8\xcf\xc3\x71\x41\x9b\xb6\x3a\x5e\x7b\x6d\x63\x86\xe9\xa0\x67\x12\x98\x27\xe4\x20\x81\x76\xac\xaa\xc9\x46\xf3\xf0\xbe\x28\xec\xc3\x7c\xc2\x9a\xf7\x87\x60\xe0\x64\x7c\xc5\x34\xa6\xb5\x84\xfd\x8e\x7f\xf5\xd7\x63\x56\x3f\x98\xd8\xef\x10\x98\xd9\x0c\xcc\x7c\x5e\xb9\xbb\x9e\xf1\xbe\x5c\x95\x06\x34\xec\xae\x6d\x54\x32\xde\x05\xec\xeb\x6a\x03\x8a\xc9\x05\xe8\xe7\xd2\x4b\x5d\x9b\x0d\x1e\xbf\xe8\x04\x52\xf6\xf2\x0a\x99\xb7\xb2\x16\x92\x37\x03\x3f\xde\xb6\xbc\xc5\xd1\x2c\xe4\x1d\x42\x53\x6e\x6e\x01\xd2\x06\xbd\x32\x5f\x06\x84\xee\x96\xde\x55\x39\xd1\x24\x83\xab\xc8\x32\xbe\x54\x95\xcd\xa8\x18\x84\x65\x64\xae\x4d\x6b\x69\xe7\x0a\x5e\xc4\x14\x34\xad\x0f\x44\xe9\xb8\x0e\x1e\xa2\x5f\xe2\x53\xe4\x61\x9d\x1a\xe2\x31\xd3\xae\x2d\x43\x3f\x55\x67\x35\xa7\x25\xb4\x34\x03\xff\x16\x65\x68\xb0\x06\x40\x5a\x83\x1a\xaf\x06\x2d\xcc\x1c\x7b\x41\x34\x90\xaf\x5e\xdd\x5e\xf6\x4c\x98\x7c\xf9\x52\x74\x19\xd2\x49\x5d\x0d\xa2\x01\xa1\xd3\x25\xf9\xc3\xd0\x4c\x18\x86\x81\x22\x70\xce\x25\x35\xa8\x67\x87\xcd\xa9\x16\x96\x85\x4d\x2c\xa5\x3d\x3c\x08\xc6\x4d\xe5\x64\x4a\xca\x43\xb6\x7e\xb1\xb3\x04\x00\x7a\xdc\xc1\x34\x20\xec\x28\xa0\x2e\x97\x1c\xa6\x35\x2f\xfa\xb4\xc0\x73\x48\x11\x34\xe5\xb2\xd9\xef\x94\x32\x0c\x19\x4b\xbd\x2f\x67\xde\x02\xac\x9a\x8f\x1b\xbe\x79\x1b\xf8\x95\x14\xf3\x84\x02\xae\x52\x65\x5d\xc9\x18\x43\xf6\x5d\x5a\xfc\x86\xa5\x80\xad\x0e\x7a\x60\xa2\xef\xa4\x98\xeb\x68\x52\x89\x3b\x1f\xbc\x36\x6d\x2e\x0e\xdc\xb2\xde\xf9\xc3\xb3\x01\x48\x6b\x70\x2c\x11\x8d\x35\x04\x70\xb3\x1a\xa1\x86\xbe\x26\x40\xf6\x84\x45\x70\xcc\xbe\x2d\x59\x0c\xd4\x73\x07\xe0\x7e\x51\x09\xab\xef\x7b\x43\x27\x68\xb9\x42\x4a\x2c\x44\x2b\x3a\xdc\x9a\x6d\xa1\x9c\xf0\xc5\xb7\xb9\x9e\x8c\xe3\x79\xc4\xb3\x0f\x85\x5c\xe4\x5e\x72\x35\xc1\x26\x39\x44\xc4\xc4\xde\xd2\x71\x29\x9b\x79\xed\xaf\x51\xe8\x9b\x51\xac\xbd\x60\x61\xaf\xa1\x83\x92\x2f\xf1\x6c\x3e\x42\x19\x78\x3d\x54\x7d\xb2\xbd\xc9\xbe\xeb\x9a\x74\xf0\xac\x40\xfe\x42\xee\xb7\x04\x5a\x0e\x80\xcf\x15\xa4\x98\x27\x48\x0a\xcf\xd9\x32\xae\x99\x4c\x22\x89\x3a\x27\xd2\xb6\x0b\x62\x8f\x63\xe0\x89\x89\xfe\xd0\xe7\xc8\xbc\xfa\x06\xd0\x00\x1e\xf4\xf8\xb5\x36\x72\xb6\x3a\x6b\xea\x04\x07\x4d\xcd\xce\xe0\xcf\x40\xae\xbc\x08\xe0\x3e\xf9\x90\x8f\x05\x2c\xbe\x58\x0c\xac\x13\x25\xbd\x1c\x14\x0b\xb4\x18\x3d\x55\x69\xa1\xc3\x1a\xff\x7c\xf9\xdc\xbd\xbb\x0d\x78\xef\x5e\xcb\x00\x95\x36\x03\x39\xe3\xd1\xb2\x78\xb0\xfe\x9a\xaa\x00\xad\xf8\x36\xb7\xf8\x2a\x8e\x4d\xac\x6b\x82\xef\xf5\x41\x3a\x45\x75\xa9\x85\xcc\x71\x60\x17\xf0\x3d\x2f\x8d\x32\x33\x1d\xae\xa0\x9b\xd5\x4c\x58\x2a\x6a\x50\x9c\xe1\xf2\xc3\xdb\xef\x09\xe5\x23\xa4\x8a\xc7\x47\xbd\x25\xea\xa9\x84\x34\x5c\x03\xd4\x74\x09\xfd\x0a\x24\x37\xb1\xdf\x01\xbc\x05\x68\xa0\x7d\x2e\x4c\xfc\x61\xfe\xaa\x12\xb3\xa2\x56\x54\x92\x8c\x8f\x07\x24\x40\x70\xcf\x7a\xa3\xe0\x65\xd5\x67\xb2\x18\x80\x04\x53\x4b\x0e\x29\x90\xa9\xfa\xc8\x04\x8b\x45\xd0\x07\x7c\xd6\xa2\x75\x22\x5b\x7f\xde\xd2\x35\xd5\x8e\x6a\x22\x33\x3a\xc9\x66\x92\x57\xa1\xa2\xb8\x37\x45\xf2\xb2\x7b\xfc\xf3\x3f\x16\xfc\x33\x89\xf3\xac\xbf\x5c\xee\x34\xc7\xad\x28\x7a\xbd\x8a\x06\x6f\xe4\xdc\xea\xe2\x54\x37\xa2\x4d\x76\x90\x4c\xb3\x1f\x49\xbf\x1f\x39\x14\x62\x10\x89\x11\xd2\xa9\x98\x64\xb1\x14\xe6\xfa\x31\xe6\x33\x54\x11\x2d\xfd\xbb\xd4\xec\xee\x12\xe0\x6e\x38\x39\xab\x96\xd5\x17\x9b\xa9\xc9\x17\x6e\x16\xe9\xca\x49\xf4\x00\x26\x33\x63\x5c\xcd\xf3\x9a\xe0\x07\xa1\x89\x89\x93\xcb\x9d\xc0\x2f\xf1\x25\xce\x5f\x6b\x64\xf5\xcd\x53\x5d\x8f\x88\xbd\x64\x26\x69\x9f\x91\x51\xc6\x16\xbb\x1f\x64\x45\x2f\x42\xf9\xf5\x66\x06\xc0\xad\xe4\x09\xc3\x6d\x89\xbe\xa7\xaf\xa2\xad\x4e\xae\xfc\x11\xe2\x1c\x13\x25\x24\xa0\x44\x7c\x4e\xbb\x14\xe5\xcd\x20\x6a\x64\x71\xb5\xff\x20\xd9\xb8\x9a\xb1\xf5\x02\xd6\x4b\xf6\x54\x8e\x04\xcb\x8d\x7a\x09\xb9\x15\x75\xcc\xaa\x28\x5b\x43\xc3\xec\x0b\x2e\x1e\x1e\x9e\x5c\xbb\x77\x5f\x79\x53\xeb\x88\xbf\xab\xc9\x3c\x49\x4d\xbb\xa8\xcc\xc1\xb4\x96\xf0\x70\xeb\x5d\x10\x00\x66\x20\x94\xe7\x2e\x4f\xf2\x2b\x90\x4b\xff\xf9\x00\x37\xd7\xaa\x41\x71\xba\x49\x9c\x17\xe2\x81\x8e\x97\x37\xf3\xc6\x7b\xba\x52\x69\x1f\xeb\xb6\x47\x72\x60\xba\xe9\x2f\x4e\x9f\x1f\x2d\xc7\x03\x39\x9f\xde\xe3\x6a\xa1\xe4\x21\x22\x4b\xe4\x11\xef\x51\x53\xdd\x41\xa3\x38\x88\x20\x7b\xc2\xb4\xc0\x34\x75\x38\x75\x87\x91\x06\x84\x9e\x46\x3a\x9c\x6d\x7f\x6a\xd3\x81\x05\x09\x76\x9f\x22\x62\x92\xeb\xdf\xce\xe0\x36\xfe\xac\x94\x59\x2d\x58\xd3\xe3\xaa\x67\x60\x67\xe0\xdb\xab\xa9\xb2\x32\xbb\xe9\xc6\x93\x5b\xb4\xce\xa2\xf1\xed\x42\x48\x08\x4e\x7c\x3c\x54\x5e\xa1\x2b\xde\xb5\x48\x43\x1f\xe9\x13\x72\x69\xfa\xbe\x0d\xe0\x92\xa1\x73\x39\xfe\xe4\x15\x9f\xd5\x14\x00\x47\xf2\x31\x9f\x0b\x34\x24\x30\x94\x02\xe6\x59\x3e\x52\x4a\xef\x80\x5b\x90\xa9\x98\xfa\xe1\xd4\xbc\x05\x78\x73\x0f\xd9\x05\x84\xf6\xc4\x50\x66\x2a\x15\xf5\x21\x42\x93\xdb\x93\x62\x07\xd4\xff\xc2\x44\x5c\x80\x89\x31\x85\xac\x0c\x25\xfa\x21\x41\x68\x28\x28\x7a\xee\xf2\x04\x74\x7b\xd4\x30\x21\xa9\xaf\xa0\x53\x09\x69\x50\xfb\x12\x5f\xac\x67\x60\xc1\xd7\x74\x73\xbd\xb9\xfe\xf5\x2f\x5c\x7c\x1a\x12\x2c\xcc\x9e\xd4\x9d\x6a\x97\x1b\x30\x9c\x87\xe3\xbc\x36\x07\xff\xe7\x98\x84\x62\x62\x9c\x96\x25\xda\xaa\x47\xa4\x9a\x29\x47\xb5\x00\xf3\x28\x6e\x56\x13\x4c\xef\x88\x6b\x9e\xd7\xc8\xb6\x72\x9d\xf0\x04\xc6\x6d\xc4\xf8\xa8\x60\xb2\x0e\x1e\x24\x89\x75\xe3\xc1\x55\x48\x63\x6f\x43\x0c\x3e\xb0\x16\x55\xac\x0f\x98\x53\xf6\x74\xda\xd9\xc4\xa2\x05\xff\xf0\x43\x61\x6c\x3a\x6a\xb1\xcd\xf1\x8a\x92\x28\x5b\x75\xb3\xd7\x2d\xec\x08\x5f\x12\x92\x4e\x4b\x77\x42\x67\x2c\x1f\xcf\x40\x43\x5a\x2f\x6a\xa0\x94\x4d\xd9\x08\x45\xc4\x8a\x50\x0d\x1b\xb2\x03\x68\xc7\x36\xb2\x62\x1d\x11\xb9\xb7\xf2\xa5\xb3\xd3\x61\x3b\xd5\x12\x49\xdf\x48\xeb\xe6\x0b\x93\xd7\xc8\x6a\x2d\x9c\x2c\x21\x9c\x71\xaa\x4f\x9f\x11\xbd\x39\xc4\xd6\xaa\xd0\x86\x56\xf4\x0a\xd5\xc5\x7d\x5d\x2c\x7e\x5e\xca\xa4\x36\x94\x66\xb7\xb1\xf7\x4f\xe4\xf8\x25\xbf\x74\x2b\xdf\x27\x65\x38\x74\x1f\xb3\xdd\x43\x4d\x3f\xf4\x5b\x64\x9b\x11\x95\x21\xe5\xc0\xfe\x26\xcd\xad\x09\x76\x52\x5e\x8c\x07\xd2\x21\x82\x28\xfb\x95\x55\xc5\xba\x0a\x49\xa7\x6c\xfa\x13\x6d\x20\xbe\x46\x05\x09\x89\xb8\x1b\x8b\x99\x46\xc5\xb8\x67\x1d\x63\x32\xa7\x1c\x56\x74\x1c\x5f\x91\x62\x7c\x5e\x5f\x4a\xac\x7f\xe2\x6b\xaa\x72\x0d\x62\x2b\xaf\xcd\x62\xa7\x9d\xaa\xb4\x26\x9b\xb2\xb5\xa3\xfb\xcd\xef\x8a\x61\x63\xcf\xb5\x25\x36\xe2\x2a\xc5\xfd\xda\xe8\x8d\x33\x25\x2f\x1e\xbd\x48\xef\xd8\x8e\x18\x3c\x26\x71\x49\x34\x3e\x4f\x3d\xd0\xda\x7b\x08\x3e\xbe\x70\xa3\xb1\xeb\xf6\xb2\x61\x43\x1f\x8b\x39\xfc\x98\xed\xa7\x16\x29\x3b\x90\x68\x86\xac\x6d\x7a\xa3\xde\x48\xb2\x96\xad\xb2\x6a\x56\xd2\x78\x78\xd3\x42\x27\xf5\x41\xd5\xb8\x04\xd4\x41\xde\x01\x82\x30\x8d\x33\x19\x9d\x2f\xc5\x15\x49\x3e\xa3\x26\x9b\x63\x2a\x0a\x72\x55\x72\x02\x9b\x1a\x34\x97\x6c\x9f\x60\xca\xc5\x35\x2e\x54\xf9\xde\xd7\x6b\xd1\x7a\x67\xa1\xf4\x84\xad\xc3\xba\x27\xe1\xb5\xd8\xa7\x62\x0b\x57\xc3\xff\x54\x5f\x5b\xf9\x59\x63\x72\xae\xf1\x83\x70\x6b\x16\x8d\xe8\x0d\xb2\x07\xe4\x68\xec\x82\xe7\xdd\x26\x1d\x32\x32\x9c\x9d\x01\x30\x7e\x9f\xcb\x1f\x89\x18\xaf\x71\x24\x8c\x5c\x94\xba\x9f\xf2\x74\x1e\x0f\xce\xb5\xdd\x13\x35\xfd\xa8\x3d\x07\xf1\x7f\x63\xd7\x4b\x44\xd0\x61\xa8\xde\x9d\xcf\x98\x71\xfe\xf1\x7e\x6d\xd6\x54\xb0\x1e\xcb\x9d\xea\xe4\x20\x61\xe3\xc8\x18\xa7\x55\x14\x4f\xd5\x5e\xc5\xa0\xa8\xd7\xd7\x34\x32\x3f\xbb\xfd\x83\xd5\xe4\x6a\xab\x7d\x92\x1c\x95\x45\x1f\x55\x14\x9b\x76\xbd\x1f\x56\x92\x21\xe3\x87\x44\xea\x2e\xca\x87\x1b\xd5\x5f\xdf\x60\x60\xbd\x4b\xb9\xe9\xec\xd7\xba\xb2\xa8\xf7\xa8\x9b\xbc\x59\xc2\x74\x58\x36\x5a\x0c\xfa\x51\xd4\xb3\x75\x0c\x59\x1f\x78\xf2\x99\xe9\xd2\xed\xe4\xca\xc5\x0a\xc4\x92\x7c\x5f\x6f\xf8\xaf\x39\xa6\x10\xe5\x1e\x53\xe7\xad\xfe\x1e\x75\x4c\x31\xce\x5d\x0e\x3c\x5d\x03\x43\x87\x87\x53\x55\xe6\x11\x5b\x82\x66\x93\xcf\xeb\x90\xfc\x33\xbc\x37\x8d\x0e\x16\xb7\xe6\xa2\x83\xee\x53\xfc\x93\x7b\x3a\x35\x90\x4b\xc7\xbd\x5e\xf8\xdd\x9f\xc4\x10\x4f\x71\x00\x7f\xd5\xc7\xfb\x74\xf5\xce\x24\xaf\x7b\xd4\xc2\x34\x11\xd3\xdc\xba\x6e\xbd\x4b\x76\xaa\xae\x07\x4f\x7e\x31\x49\x76\x64\xaf\xc3\x34\x51\x08\x0d\x37\xc5\x88\x6d\x41\x9e\xbd\x9e\x4a\xc6\xf5\xdd\x63\x41\x7f\x92\x5f\x14\xd7\x0a\x40\x0f\x67\x85\xd7\x5f\x98\xfd\x7d\xac\xdb\xc9\x71\x3d\x9e\x7a\x23\x63\xe0\x7b\x0f\xcc\x5f\x8c\x56\x2d\x4e\x42\xf9\xdd\xa2\x93\x27\x22\x82\x93\xc2\x67\x70\xde\x89\xa6\x3b\xbf\xaa\xa5\x90\xae\x03\xbc\xb5\x53\x01\xd5\x74\xda\xd5\x46\x33\xb3\x0c\xbd\x76\x84\xed\x3c\xc9\xd5\x6b\x8a\x91\x03\xe8\x18\x3f\xe9\x1f\xb3\x82\x19\xd3\xd5\x38\x17\x2f\xbe\xd6\x3a\xa2\x1b\xb0\xe7\x22\xe8\x27\x55\x66\xc0\xb8\xf6\x3f\x2a\x60\xf3\x5f\x88\xb7\x5f\xa7\xff\x0b\xf1\xf6\x3f\x2d\x8a\xf3\x5f\x1b\xff\x2c\x7a\x30\x84\xfe\x7f\x58\x6a\xe7\xff\x0d\x49\xff\xf7\x0a\xee\xfc\x6a\xfc\x51\x01\x2e\x78\x7d\x74\x96\x66\x3c\xff\x89\xab\xe2\x20\xf9\x65\x05\x34\xfe\xe1\x4a\x3d\xff\xba\x3f\xe2\x27\xfb\x38\x96\x00\x13\xf1\xf4\x8a\x97\xaa\xa2\x5e\x4a\xae\xb4\x76\x19\x1e\xc0\x75\x46\xff\xab\x59\xc1\x0b\x90\x29\xd0\x78\x87\x4d\xfd\xe7\xf6\x99\xac\x2c\x6d\x08\x6a\xf3\x65\xf8\x7a\xf1\x5c\x45\x7d\xc2\xc3\x9f\x5b\x3a\x34\xed\x90\xbb\xc6\x56\x75\x34\x05\xc9\xef\x75\xfa\x70\x7f\xd2\xc2\xac\xb5\x3d\x95\xc8\x57\xfa\x6b\x3d\x5b\xbf\x5a\xc5\x69\x23\xb1\xc3\x91\xfd\x45\xbf\xe9\x4f\x63\x46\x10\x3a\x1a\x5a\x08\x3b\xe8\x19\x28\x31\xf1\x6f\x6a\xf9\xd7\xb7\xbf\x2e\xdb\xc5\xfe\x3f\x39\xa9\xfc\x06\x2e\x06\xe7\xe4\xfa\x4b\xe9\x5f\x46\xcc\xf5\x6b\x63\x86\x18\x5c\xd3\x56\xff\xa4\x63\x9d\x69\xde\xdd\x23\x72\x95\xeb\x3c\xcb\x86\x43\xf8\x9f\x4b\xb2\x29\x17\x67\xd7\x1e\x9e\xb1\x58\x34\x56\xc6\xe7\xf2\x8d\x05\x1a\x89\xdd\xc0\x9f\x75\x36\xd8\x4f\x06\x3e\x55\x3b\xd2\x04\x65\xeb\x5a\x27\x26\x26\x52\xdc\xf6\xfd\xf5\x09\x09\xef\xed\xeb\xeb\x93\x2f\xe9\xbf\x51\xdb\x0a\x2d\xdf\x1f\x87\x56\x3f\xc4\x59\xff\xa8\x36\xb7\xb5\xd5\xae\xa8\x32\x3c\xdc\x97\xc3\x9f\x01\x8b\x51\xcc\x65\xa0\xb6\xaa\x6b\xf5\x40\x62\x79\x20\x9f\xee\x2e\xea\x49\x8c\xca\x9f\x37\x1b\x48\xdd\x1a\x5c\xce\xf5\xaf\xd7\x46\x6e\xa1\x3c\x47\xeb\x86\xa0\x2d\x27\xb5\xd3\x4d\xf1\x9e\xfb\x7a\xda\xf7\x11\xbf\xff\x89\x14\x8c\xb9\x01\x20\x43\x03\x14\x02\x84\xb3\x4a\x28\xfb\x90\xe7\x0c\x0a\x49\xc6\x3d\x49\x26\x00\x52\xfc\x51\x86\xba\x7d\xc6\x4a\x55\xa6\x39\xf7\x8e\x62\x7c\x2f\x01\x0c\xca\x2c\xb3\x83\x04\x78\x47\xf8\x40\xed\x05\xa2\x50\x95\xfd\x74\xb6\xa2\xeb\x70\x0b\x0e\xb7\xc8\xff\x66\xc9\xa0\xc5\xee\xb0\xe0\x32\x20\xa4\x53\x1b\x6b\x61\xf6\x63\x7b\xbc\x13\xc8\x23\x8a\x4b\xe5\x69\x32\xda\xa4\x69\x02\xbb\xda\xa9\xec\x47\x6e\x44\x36\x4c\xdd\x7e\x5c\xda\x25\xfb\xa3\x5f\x68\x10\x52\x5f\x17\xa4\x50\x12\xba\x8a\xba\x62\x54\x48\x91\x77\xc9\x3d\x6d\xfc\xe1\x3e\x45\x5e\x67\x13\xcd\x8e\x25\xb8\x12\x35\xce\x0b\x8a\x7a\x2a\xf7\xf8\x70\x0f\x4f\xa8\x2c\x9e\xc1\xe1\x9e\x53\x57\xda\x16\x62\x0e\x7f\x7e\xc6\x7d\x69\xed\x24\x55\x77\x41\xbb\xa1\xfe\xa2\xcb\xa7\x4d\x29\xbc\x31\x15\x63\xf8\x46\xae\x73\xfe\x45\xfb\x8e\xd8\xef\xff\xda\xcd\x80\xa1\x81\x21\x76\x47\xf8\xac\xfb\xca\x33\xab\xbb\x30\x91\x0d\xf3\xcd\xb3\xcb\x5a\xc1\x26\x19\x92\x40\x28\xb2\xf5\xf0\x92\x7c\xc6\xa2\xbc\x3b\xfe\x2e\xd5\x5a\x94\xe3\x9b\x65\x07\xd3\x77\x0e\xc8\xfd\xc8\x89\x28\x30\x47\xcd\x18\x24\x67\x25\x72\x17\x75\x75\xd1\x77\x02\x9e\x73\xcf\x5a\x76\x75\x20\xfb\x7d\xc3\x8c\xd7\x19\x30\xb6\xa2\xab\x93\xc9\x1e\x6f\xf5\x77\xd1\x74\xfe\x22\x7d\x45\xa1\xe9\xa0\x9b\x50\xa1\x4d\x00\x59\xde\x9c\xda\x6d\x57\x2a\xe3\x16\x93\xc2\xd0\xd7\xfe\xe2\x8a\x40\x25\xd0\xa6\x9d\x62\x78\xc3\xb5\xe6\x9e\x6f\xc9\xa9\xf0\xca\xf7\xd1\xa5\x78\xa9\xf8\x4b\x92\x25\xa3\x74\x32\x09\xd0\xe6\xda\x8b\xee\xad\x9d\x9b\x2e\xb2\xe9\xf9\xc0\xb7\x83\x84\xec\xa7\xf4\xcc\x8a\x21\xf5\x9e\x39\x0e\xb1\xbb\x66\xcf\x2a\xae\x88\x7f\x01\xe4\x40\x96\x01\xd3\xd6\x56\xde\x2f\xa6\x97\xe8\xd2\x4e\xbd\x77\x98\x03\x13\x3f\x6f\x97\x7a\xa5\x00\x4f\x38\x9b\xcc\xdb\x2c\x68\xcb\x87\xa5\xa1\xea\x87\xd3\x7f\xec\x0f\x7d\xb9\x60\x9e\x32\x76\x37\x2a\xfd\x8c\x90\x95\xf0\xba\xe9\x03\xb9\xed\x55\x33\x3d\x02\x32\xb8\x7e\xa3\xd6\x5e\xfa\x2d\x3d\x1a\x02\x67\x08\x78\x93\x2b\x3c\xef\x69\x69\x05\x01\x4e\x6b\xb0\x5a\xf4\x49\x63\x60\x98\x97\x54\xb5\x22\x64\xef\x48\x28\xf4\x18\x39\x92\xd2\xf5\x7a\x30\x49\xce\xff\x70\x0b\x8c\x0f\x69\x02\xf1\xde\xf8\x4c\xfb\xc1\x26\xdc\x26\xdc\x97\x57\x50\x9f\x19\x4d\x2e\xde\xa6\x52\xb8\x81\x02\xcd\xb5\x9b\x1c\xfb\xf9\x66\x17\xec\x82\x08\x62\x38\x47\x7d\x7a\xec\x82\x35\xf8\xc6\x19\x5c\x5e\x96\x4e\x26\x3e\x90\x9c\xf6\xae\xca\xec\x3f\x1b\xfc\x7e\xbe\x3b\xc3\x03\x1f\x4a\xa1\x42\x41\x9b\xfb\xaa\xa6\xcc\x92\x17\x9d\x2c\x6f\x3f\x9c\x19\xc5\xda\x20\x66\x0a\x30\x1a\x26\x5c\x69\x26\x4d\x33\xab\x2f\x68\x80\x12\x4b\xed\x3d\x11\x8c\x30\x89\x69\xad\xeb\x70\xd3\x75\x54\x70\xee\x8b\xeb\x8f\x1b\x98\xc9\xe4\x90\x9d\xb2\x57\x65\x87\x93\xd9\x54\xe0\x5d\xd5\x36\x37\x73\x94\x35\xec\x39\x99\x95\x87\x3d\xeb\x94\x64\xe8\xfa\x31\x4d\xee\x63\x20\x15\xf8\x57\x7d\x5a\x6f\xf8\x1d\x87\xd8\xaf\xc3\x4d\xbd\x4d\x71\xf9\xdb\x00\x66\xb2\x05\x3a\x7e\x99\x25\xf1\xd8\xa5\xcb\xd4\xc7\x28\x2f\x26\x92\x48\x67\xe9\xf1\xd9\x39\xf7\xb4\x85\x57\xda\x27\x52\x01\x1d\x79\x48\x85\xd4\xaa\x43\x16\x8b\xbe\xe9\x1a\x56\xbe\x4a\x41\x94\x88\x79\xec\x06\xe4\x34\x03\x16\x16\x5a\x9f\xf4\x40\xd2\x57\x2e\x35\x51\x58\x6b\x52\x69\x5a\x45\xdf\x21\xd6\x72\xd0\x22\x49\x98\xf3\x54\xb2\x15\x8f\xae\xb7\xc1\x98\xde\x32\xf0\x40\x35\xa8\x6b\xfb\xe2\x03\xed\x48\x09\x7b\x48\xfd\xc9\x64\x6a\xc1\x10\x30\x18\xb0\x3f\x54\x6c\x9e\xc4\xac\x54\xfc\x5d\x3f\x99\xa5\xf4\x77\x85\xbb\xd7\x20\x74\x44\x8f\x6b\xd6\x6d\xf6\x76\xaf\xc0\x18\xb0\x1c\xbb\x5c\xcf\xf1\xe0\x75\xf4\x71\x62\x65\xd0\xf9\xb1\xe9\xc2\x9e\xb6\xda\x79\x6a\x6c\x85\x5c\xd0\x52\xa1\x9f\x5b\xd2\x20\xe1\x11\x01\xdf\xf5\x03\x91\x70\xb2\x55\x89\x92\x3a\x2c\x70\xae\x6b\xb3\xfc\x85\xca\x9e\x18\xec\x71\x61\x9a\x50\x5c\xa2\x85\xa8\x69\xb4\xff\x30\x70\xc4\xcb\xd3\x31\x16\xb5\x2c\x2f\x7e\xb1\xf1\x2c\x4f\x38\x92\x82\x96\x46\xd7\x4f\x02\xec\x36\xa6\x78\xf4\xe6\x38\xa4\x7b\xe0\x6b\x9f\xba\x59\x92\xf9\xf7\xb5\xec\x0f\x2f\x2c\xcb\xf9\xff\x1c\xd4\xc3\x15\x43\x57\xd5\x23\xec\x2b\xb2\x20\xd6\x74\xb0\x9a\x7f\xfc\x82\x8d\xf0\xe8\x07\x70\x0a\x95\x87\xaa\x41\xef\xe9\x2b\x0f\x5c\xcb\x2a\x3e\xa0\x5d\xed\x74\xc3\xf8\x14\xe0\x71\x8c\xd1\x0f\x48\x6c\x82\xc6\x13\x78\xaa\xe0\x7b\x51\x8f\xb5\x96\xf1\xde\x23\x58\x98\x02\x88\x11\xc1\x36\x0d\xa5\x53\xf5\x72\xd2\x05\x49\x14\x3d\x2e\x81\x5d\xe5\x64\x22\xfb\x8d\xb0\x23\x69\x51\x80\x8a\x77\x9b\x0e\xf4\x4e\x76\xa7\x44\x0c\x7b\x9c\xbe\x9a\x6a\xb9\xf6\x3d\x9c\x6e\x35\x3b\x3d\x5f\xe3\x04\xb9\x4e\x14\x05\x05\x7a\x43\x06\xb1\x7c\x07\xcd\x94\x07\x5b\x89\xc7\x2d\xa2\xf7\x11\x00\x2b\xa6\x3c\x4c\xff\xf2\x06\xea\x8f\x0d\x6a\xbe\x00\xd4\xec\xa4\xa6\xa8\x53\x40\xff\xa5\x26\xdb\x88\x82\x33\xe2\x90\x31\xb5\xdd\x54\x36\xf5\x24\x7a\x29\x9e\xda\xaa\x4e\xcc\x8d\x4d\xf3\xe5\xd3\x7f\xad\x36\x2e\xf1\xc0\xfd\xf4\x89\x17\xf3\xb9\xc5\x9a\xd1\x27\x26\xa9\x85\x88\xbb\x49\x5a\xe9\x51\xca\x35\x35\x28\xb7\xba\x61\x20\xf1\xcb\x89\xb1\x1b\x15\x9d\x66\xeb\x1a\x2b\x85\xf1\xe2\xf9\x9e\x78\x76\x3b\xac\x5d\xa1\xc6\x8e\x82\x74\x11\x4b\xf8\x9b\x27\x9f\x39\x11\xb2\x96\x41\x23\xd1\xec\xc5\x17\x3e\x3b\xfa\x8e\x96\x9c\xa9\xe8\xf4\x34\x99\xc8\xfe\x00\xf1\x92\x3a\x5c\x70\x2e\xc3\x15\x36\xfe\x28\x83\x69\x60\x56\xa4\xc1\x26\x40\x98\xce\x24\x9b\x25\x6d\xbb\xe6\x8d\x53\x77\x49\x9e\x9a\x6e\x7e\x2a\x0e\xef\x71\xb8\x3e\x43\xee\xee\x89\x66\x50\xae\x36\x70\x4d\xb2\x50\xc3\x3a\x1c\x01\x30\x63\x72\xcc\x75\xbd\x36\x72\xcd\x8b\xc9\x25\x70\x62\x72\x29\xbb\x84\x37\x83\x25\x40\x64\xe8\xff\x3c\x7f\x59\xeb\x7b\x47\x39\xc8\x9a\xa4\x18\xd1\x31\xc7\x5a\xf1\xa8\x95\xb1\xc9\x1e\x6f\x92\x17\x38\xf1\xb6\x24\x1c\xea\xa6\xa6\xb4\x64\x06\x88\x16\x4a\xb8\x56\x34\x44\x6e\x53\xa5\x31\xc4\xa8\x32\x81\x51\xd9\x55\x8d\xf1\xf6\x9b\x58\x3e\xb2\x7b\xce\xdd\x05\xab\x82\x78\x8d\x99\xd3\xc8\xc9\x87\x5c\x73\xee\x50\xc1\xf4\x31\x71\x9d\x95\x06\x08\x9b\x87\x12\xe4\x19\x37\x8d\x78\x31\xbd\x66\xa6\x76\xbb\x31\xa0\x37\xbd\x15\x14\xb6\xf5\xa5\x00\xd6\xfe\xbd\x33\xc5\x9f\x4d\x60\xd4\xa4\xe7\x43\xdc\x74\x6e\xdd\x9b\xca\x94\xd3\xba\x84\x2d\xb1\x94\x94\x11\xd1\x10\xcb\x36\x8b\xf8\x53\xfe\x93\x2c\x18\xad\xa3\x01\x30\x14\xce\x0d\x14\x3e\x1b\x8d\x46\xca\x83\x6f\xdd\x82\x5f\x51\xde\x96\x35\xfd\xe9\x1a\x7a\x72\xed\x6a\x15\xd1\x59\xe4\xa4\x69\xe4\x5f\x9c\x2d\xc9\x91\x70\xdd\x37\xfe\x52\xd4\x8d\x78\xac\x46\x43\xeb\xf4\xd9\xf4\x0e\xec\x5b\x05\xa3\xd4\x1e\xa1\x48\xf6\x05\x27\x2e\x33\x61\x6f\xf0\x88\x62\x5f\xd3\x19\x4e\x9b\x27\xa7\xff\x83\xaa\x67\x65\x5b\x01\x10\xe0\x23\x70\xd1\x87\xe0\xb5\x87\x20\xd0\xb9\x86\xb9\x7d\x6a\x7b\x13\x6d\x83\xfa\x5e\x5b\xe6\xfa\x48\x75\xae\xdd\xf7\x68\x07\x44\xf4\x4c\x80\xb0\xb6\xd7\x66\xc3\x06\xb4\x20\xf5\xe3\x8d\xdb\x5d\xa5\x29\xb2\xc2\x13\xd5\x92\x90\xb1\x3a\xba\xfe\x96\xf7\xdb\xec\x7f\x8e\xe8\xc7\x2e\x85\xeb\x56\x4d\x4d\xce\x08\xbd\x7e\x31\x5d\x20\x59\x8d\x1c\x98\x01\x53\xaf\xc3\x68\xdc\x06\x2c\x23\x19\xd7\xb4\x4c\xb0\x0f\x01\xd6\xbc\xad\xfa\x01\x89\x95\xc1\x5f\x08\x9f\x84\xad\xb6\xf2\xd2\x5b\xcd\x39\x83\x8d\xe6\xc5\x55\xfa\xa2\x5c\x83\x61\x30\x87\xcf\x27\xcb\xb4\x3b\x50\xce\xd0\x4e\xed\x37\x46\x68\x33\x21\x40\x55\x4d\xae\x63\x2a\x14\xb4\x20\xb5\x86\x6b\xa3\x89\x6d\x7f\xe8\xf6\x43\xf7\xa4\x1b\x22\xf4\x12\xf4\x47\xb0\xc8\xad\x29\xfd\x3e\xb3\xba\x4c\x03\xc0\x65\x0e\x44\xf4\xe0\x94\x17\x2d\x5d\x78\xf1\xbd\xf7\x5a\xf6\x42\x41\x77\x0e\x9a\x3b\xe8\x43\x62\x93\x31\x21\xb1\x2a\x59\x2c\xfb\x80\x22\x98\xc1\xc4\x9c\xc2\x1e\xc0\x52\xbe\x38\x52\x07\x55\xdb\xd5\xb1\xe5\x41\x28\x3c\x0a\xf9\xe2\x25\x3d\xbc\x80\xf7\x11\x7f\xb1\x58\x76\x7f\xff\xc4\xdc\x72\x16\x4f\x0d\x0d\x16\xa9\x26\xab\x49\x7b\x90\x0d\xcb\x72\x34\x60\x1a\xf6\xaa\x2c\x31\x00\xb7\xa6\x0a\x16\xed\xa9\xfe\xa9\xb0\xbf\x0e\x41\x41\xb0\x68\xc3\x98\x52\xac\xaa\x2c\x8f\x9d\x0c\x1f\xca\x70\xd5\x13\x19\x58\x60\xd0\x2c\x5f\x71\x18\xb5\x7f\x1a\x83\x8c\x82\x0f\xcc\x30\xd7\xc9\xe2\x57\xc8\x73\x79\x70\x1f\x7d\x92\x6d\xb8\xe2\xe7\xb9\x88\xd3\xd9\xaa\xea\xdd\x06\x18\xd9\xf9\x43\x80\x57\x08\x7d\xf0\x2a\xa8\xed\x3d\xa9\x2e\xe8\x5c\xb2\x7b\x40\x2f\xd7\x71\x33\x0f\x9c\xb4\xd9\xf9\x38\x5f\x4e\xf4\x89\x67\x47\x13\x13\x1b\xf7\xa8\xa2\xc1\x20\x5f\xe0\x5f\xe6\x9b\xa8\x2a\xa3\x57\xf8\xbe\x71\x60\x19\xaa\x3a\x8a\x43\xf6\x5b\x90\xd2\x70\x08\x59\x06\xac\xbe\x26\xe3\xbb\xd6\xc0\x22\x1e\x8b\xe3\xf3\x2c\xcd\xe2\xe4\x70\xe5\x3a\x5b\x7b\x49\xf5\xc7\x93\x22\x20\xf3\x40\x67\x44\x8f\x90\xbf\x91\x84\x3f\x6d\x06\x6c\xa4\x16\x6c\x56\xf5\xa4\xfc\xd9\xfe\xd6\x5a\x01\xa6\xca\x7e\x05\x8c\x65\xcc\x69\x61\x4f\xde\xba\x77\xea\x31\xf5\xdd\x67\x13\x85\x96\x93\x14\x76\x30\x58\x96\x63\x56\x4d\xc5\xa6\xae\xec\x37\x95\x8d\xe6\xa3\xbb\xa8\x14\x6b\xe9\x81\xcc\xab\x57\xbf\xad\xae\xf9\x66\x1c\xe8\x0d\x3f\xf8\xde\xda\x63\x0d\x33\xb3\x36\x80\x68\xf0\x78\x13\xe3\x0c\x00\x25\x36\x01\x0c\x14\xef\x65\xbf\x4c\x11\x35\x90\xb2\x67\xc0\x34\x4d\x60\x23\x25\x83\x87\xb1\x43\x41\x91\x7c\xad\x88\x1e\x5d\xcd\x20\x70\xa2\xae\x54\x9c\xdb\x4e\xf3\x67\xc8\x61\x21\xcc\x5d\x81\xb7\x7e\x56\x95\x03\x6c\x94\x9b\x34\xc0\x3a\x6d\x37\x3e\x22\x57\x3e\xa2\xa4\xed\x9a\x71\x3c\xe2\x84\x95\x36\x82\x15\x2f\x9f\x3f\xaa\x59\xcd\xb9\x40\xed\x2a\x27\xa5\xed\x36\xa6\xc8\x3d\x25\xfb\xea\x38\x4f\x2e\x79\x19\x2a\xc2\xba\x5c\x62\x85\x22\xfd\xed\x2e\x33\xbe\xfd\x35\x04\x64\x13\x64\x55\x36\xcc\xb7\x29\x0c\x20\xb1\x0b\xad\xda\xba\x3c\x8f\x86\x80\xd5\xdc\x57\x53\x49\x5e\x72\x82\xb3\x73\xee\x3a\xd3\x3c\x05\x0f\x6b\x5e\x05\xc5\xdd\xdf\x02\x6c\x1d\xef\xe2\x8d\xf5\x0a\x9f\x4e\x2a\x55\xaa\x32\x91\xe8\x63\x75\x96\x24\xbf\x05\x23\x2b\x86\x2c\xde\xeb\x6d\xca\xb6\x8d\x1a\x0e\xec\x91\x4a\x83\x98\xd5\x5e\x9a\xbc\x48\x7b\xe4\x32\x87\x0f\x65\x1b\xb9\xc7\x10\x3b\xd2\x2d\x1b\xe5\x0a\x5f\x48\x54\x89\x9b\x95\x3e\x75\x8f\x96\x91\xd9\x11\x4d\x75\xce\x4d\x9c\x1f\x79\x37\x41\x5d\x46\x72\xcd\xdd\x24\xb2\x6c\xcc\x66\xd1\xf7\x51\x40\x14\x53\xbe\xa0\x22\x36\x5d\xfc\xa4\x65\xb1\x02\x73\xe4\x77\xa9\xd7\x0d\xc0\x6b\x10\x33\xb0\x87\xec\xb4\xd0\x10\x21\x46\x73\x04\xf4\x75\x41\x02\xf3\xa4\x43\x94\x02\x74\x09\xb6\x00\x12\x33\x16\x10\xc1\xc7\x72\x0d\x5f\x57\xd8\x73\xc4\x66\x3f\xc8\x0d\xe3\x6c\x01\xfc\x05\x29\x72\x07\xb3\x03\xbe\xf7\x73\x91\xc9\x92\xdb\xef\x75\x54\xdc\x5d\x9e\xe8\xcf\x85\xab\xcc\x69\x5f\x20\x7d\x64\xf9\x51\x49\xd6\x11\x1f\x64\xc4\xd1\x21\xb1\xc7\x81\xe5\x78\x8c\x7c\x6f\x5e\x0c\x14\x29\x4c\x2a\x72\x67\x9a\xe6\xc9\xe3\x0f\xc4\xa4\xe5\x09\xd7\xb4\x20\xfb\x8d\x9e\x24\x47\xbe\x11\xb3\x87\x32\x8a\x5a\xa6\x16\x57\x71\xf3\x69\x09\xde\x4c\x2f\x3b\x66\xa5\x5b\xe4\x8d\x14\x7e\xc5\xea\x1f\x33\xa7\xb0\x64\xba\xc8\xf0\x63\xd1\x05\x6d\x7e\x05\xa8\x5b\x26\xc3\x86\xf5\x3c\x21\x7f\x78\x15\xc0\xd6\x28\x31\x31\xce\xe6\xf8\x26\x65\x5f\x35\x3c\x4a\x64\xe8\x73\x2d\x9a\x28\xb2\xc3\x2b\x33\x39\xcb\xba\x8c\x75\x8a\xa2\x77\x16\x7b\x13\x60\x9e\x46\xd6\x4c\x10\xa2\xcd\x57\x04\xbb\xea\xc5\x0e\xc4\x9b\x66\xe3\xdb\x64\x8f\x7c\x0b\xce\x8d\x18\x1d\x2e\x51\x7f\x61\xa2\x64\xe3\xe9\x21\xb8\x4f\x74\xd3\x8b\x11\x1b\x17\x8b\x51\x9a\xbb\x5c\x22\x6c\xc0\xa4\xbe\x57\xcb\xd5\x53\x8a\x7b\x95\xfe\x7c\x46\x6e\xee\xfb\x21\xec\x77\x15\xf0\x21\xa0\x37\x3c\xea\xa1\xb2\x0a\xd0\x55\x1c\x7f\x8c\x27\xc1\x1e\x9c\x8b\xf7\x70\x9d\xee\xaa\x28\x37\x6e\xaa\xe7\xe8\x4c\xb8\x7f\xfd\x45\x7e\xa7\x9d\xbf\x7b\x75\xb9\x9d\x23\xb3\xcc\x33\xb5\x85\x53\xcd\x3d\x5f\x5e\xa9\xb9\xf2\x86\xd3\x29\xa3\xb7\xd8\x22\xd3\x1a\x9d\xde\x21\x6c\xa7\x33\xeb\x93\x90\xbb\x08\xd9\xc4\x37\xef\xa6\x80\x8e\x7a\x7b\x62\x72\xc6\x12\xc2\x94\x2f\x34\xa9\xb4\x51\xe5\x89\x00\xfb\xc3\x0a\x98\xc3\xe3\xeb\x57\x7b\x5d\x97\xbe\x83\x1b\xcd\xcd\xf2\xb1\xc2\x9a\x76\xd1\xf5\x1a\x08\x09\xb5\x65\xf9\xcd\xbc\x89\x00\xf9\x27\x79\xbb\x1f\xaf\xa4\xd6\xe8\xad\x8f\x45\xcd\xde\x6b\xaa\x6b\x17\x5a\xa9\xa7\xc1\x83\xeb\x4e\xce\x52\x37\x86\x69\xb8\x80\x66\x1b\xf7\x34\x3b\xd2\x75\xf7\x4c\x11\xb2\xc9\x7d\x25\x7f\x64\x0d\xb2\x3b\x23\x1e\xe0\xdc\x37\x1e\x93\x83\xfb\x0e\xeb\x0a\x34\x04\xc2\xb7\x2a\x83\xc2\xd6\x47\xc2\x45\x8e\x8b\xb7\xa7\xaf\x2e\xc7\xec\xb4\x76\x60\x22\x54\xae\xbb\x65\x27\xe6\x55\x25\x0d\xa3\x38\xe3\xdf\x27\xfa\xe5\xbe\x55\x49\x67\x7f\x94\xe0\x31\xb5\x4b\x9e\x0d\xcc\x3c\x10\x86\x70\xc7\xd7\x8e\xf6\x8f\x81\xf1\x07\xc7\x8a\x67\x1a\x77\x4e\x35\xd7\x99\x47\x81\x4b\xe6\x83\x3b\x8a\x31\xed\xf9\x96\x1b\x2b\x17\xc9\xbb\x11\xab\x5d\x2f\x1a\x4e\x99\x1d\x14\x1b\x54\x22\xe6\x81\xbf\x7d\xd0\xa7\x7a\x5f\xf8\x08\xec\xfa\xca\xe0\x35\x3e\xea\xf0\x19\xc1\xd4\x73\x73\x0c\x2b\xfd\x4d\x69\xcc\xba\xbb\x7e\xba\x81\xce\x31\x74\x7f\xc7\x36\x9f\xd7\x20\xb0\xf6\xb1\xf0\xf1\xa0\x62\xb5\xe6\x04\x08\x9f\xda\x80\x59\xef\xba\x0a\xa8\xed\xe8\xd5\xcb\x10\x17\x9b\xad\xc2\xbf\xe5\x78\x24\x3f\xc1\x5a\x43\xef\x00\xf4\xa1\xfb\x37\x1e\xf6\x84\xc4\x46\x05\xa9\xa1\x15\x0c\x05\xb6\xed\x40\x49\x8f\x47\x4d\xa2\x48\x47\xec\x19\xa1\x54\xae\x58\x54\xb4\xd8\x60\xff\xdf\x18\x38\x90\x91\x12\x38\x90\x0d\x0b\xa9\x46\x6e\x7b\x93\x61\x13\x63\x7e\x1e\xcd\xb6\xbe\x64\x9f\xfb\xff\xc9\x19\xb9\x3d\x2f\xf2\xf8\xdc\xb2\xfa\x95\x5a\x05\x55\xb9\x67\x52\xc3\x75\xf3\xce\x7c\xd5\xdb\x62\x77\xb1\xb0\x56\xf1\xbb\xc3\x9e\x0b\xb5\xcb\xe5\x0e\x5e\x79\xb5\x42\xea\x4f\xbb\xf5\xe2\xff\xed\xb2\xd6\x0a\x4c\x12\x1c\x8c\x12\xcf\xe2\x4b\x9f\x27\x7d\x77\x28\x0a\x0d\xdc\xcd\xc5\xc0\x70\x60\xdd\xa1\x75\xbf\x4c\x3d\x19\x19\x18\x6e\x84\xde\x08\x9d\x74\x4f\x93\x81\x81\x61\x37\xd7\x6e\xaf\x4b\xd6\xac\x0c\x0c\x09\xe1\x89\xe1\x9e\x55\xcb\x19\x18\x18\xde\x68\xbd\xd1\xf2\x7c\x2f\xc2\xc0\xd0\x10\xdb\x1a\xab\xfa\x72\x2e\x13\x03\xc3\x86\xd5\x1b\x56\xab\xec\x0d\x66\x60\x60\x28\x13\x2d\x9b\xba\xa4\x1c\x6a\xd8\xa4\x2f\x7e\x50\xc3\x66\x5d\xb8\x7e\x2e\x76\x91\xb9\x48\x4b\xa2\x44\x6c\x52\xad\x78\x40\xfc\x86\xab\x5b\x63\xfd\x6a\x67\x6a\xce\xe6\x98\x69\xe0\x5f\x27\x2e\x7b\xce\xe7\x5c\xe5\x3e\x59\x96\xf7\xd5\x07\x9f\x40\x0e\x04\x38\x70\x22\x34\x7c\xbd\x7a\xdf\x93\xb3\xa7\xe2\x6a\xce\x04\xeb\x41\xfa\xce\xaf\xb6\x15\x05\x7e\x6b\xdc\x35\xf3\xe8\x71\x1b\x46\x06\x86\x27\x93\x56\x4f\x58\x3d\x2f\xbb\x3c\x15\xd4\xab\xad\x39\x23\x0c\xee\x23\x6a\xed\x3c\xc5\x1a\x96\xf6\xec\x92\x16\xa4\x2f\xf9\xca\xf7\x56\xd7\xd4\xc8\x95\x6b\xb5\xec\x1e\x68\x6a\x43\x96\xe8\x6c\xdd\xa1\x6d\xd0\x97\x73\xba\xe2\xd3\xae\x99\xe9\xab\x8f\x1f\xaf\xdc\xfd\x9d\x81\x89\x98\xa5\x3d\xc3\xe2\x1c\xce\x96\xd7\xe7\x2f\x9f\x08\x3d\x6e\x60\x0c\x01\x9b\x79\x8a\x44\xf7\x9f\x6d\x9f\x3d\xdf\x38\x3f\x0c\x2a\x34\x79\xb5\x45\x80\x32\xc8\x28\xa7\x5c\x3b\x5c\x76\x1a\xca\xf5\xbb\x43\x6c\x65\x4f\x32\x9b\x9d\xbb\x3d\x82\x91\x9c\xb3\x34\x89\xd2\x40\xe6\x39\x9d\xb8\x34\xe0\x54\x3a\x21\x15\x9c\xd3\x1a\x2c\xa5\x79\x5e\x4b\x33\x70\xf7\xd5\x82\x04\x3d\x5d\xfd\x5c\xd6\x39\x25\x34\x01\x02\x00\x00\xff\xff\x03\x9e\x9e\x08\xc2\x93\x00\x00") + +func filesImagesReceived_approvalPngBytes() ([]byte, error) { + return bindataRead( + _filesImagesReceived_approvalPng, + "files/images/received_approval.png", + ) +} + +func filesImagesReceived_approvalPng() (*asset, error) { + bytes, err := filesImagesReceived_approvalPngBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "files/images/received_approval.png", size: 37826, mode: os.FileMode(420), modTime: time.Unix(1509717368, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _filesLogoSvg = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x7c\x55\xdf\x6f\xdb\x38\x12\x7e\x76\x81\xfe\x0f\x3c\xdd\xd3\x01\xe4\x98\x43\x52\x12\x99\xd6\x57\x5c\x83\xa2\x29\xd0\x02\x07\x2c\x90\xd7\x45\xca\xa8\x76\x50\xd5\x0e\x2c\xc7\x49\xba\xd8\xff\x7d\xf1\x0d\xa9\xc4\x08\x76\xfb\x20\x51\x1a\x0e\xe7\xc7\x37\xdf\x0c\xdf\xbe\x7b\xf8\x31\xaa\xe3\xb0\x9f\x6e\x76\xdb\x55\xc3\x64\x1b\x35\x6c\xf3\xee\xfa\x66\xbb\x5e\x35\x77\x87\x6f\x26\x36\xef\xfe\xfb\xfa\xd5\xdb\x7f\x19\xa3\x3e\x0e\xdb\x61\x7f\x75\xd8\xed\xcf\xd4\xff\xae\x77\x5f\x07\xf5\x69\x1c\xef\xa6\x83\x88\x14\x27\x62\xb2\x5a\xfd\x76\xf9\x51\x7d\x78\xb8\xdd\xed\x0f\xea\xff\xe3\xdd\xda\x7c\xda\x2a\x12\xe1\x65\x71\x72\xa6\x3a\xb2\x56\xbd\xbf\xbb\x19\xaf\x95\xfd\x8f\x52\xc6\xc0\xfe\x74\x5c\x9f\x86\xc1\x8d\xba\xb9\x5e\x35\x9f\xaf\x1e\x87\xfd\xef\xdc\xa8\x87\x1f\xe3\x76\x5a\x35\x9b\xc3\xe1\xf6\x6c\xb9\xbc\xbf\xbf\xa7\x7b\x4f\xbb\xfd\x7a\xe9\xac\xb5\xcb\xe9\xb8\xae\x2a\x67\x0f\xe3\xcd\xf6\xfb\xdf\x29\x72\x4a\x69\x29\xbb\x8d\x7a\x58\x35\xf6\xf6\xa1\x51\x8f\x65\x7d\xfd\x6a\xa1\x8e\x37\xc3\xfd\xfb\x1d\x36\x94\x55\xec\xbd\xf2\xae\x51\xd3\xe1\x71\x1c\x56\xcd\xb0\xbd\xfa\x3a\x0e\xe6\xeb\x55\xfe\xbe\xde\xef\xee\xb6\xd7\x67\xdb\xe1\x5e\x3d\x2b\xbe\x11\xe7\x67\xd3\xed\x55\x1e\x56\xcd\xed\x7e\x98\x86\xfd\x71\x68\x24\x2d\x58\x50\x87\xc7\xdb\x61\xd5\x1c\x86\x87\xc3\x32\x4f\x13\x36\x16\x34\x1d\xec\x1f\xdf\x6e\xc6\xf1\xec\xdf\x1f\x42\x64\xd7\xbe\xf9\xb3\x48\xb9\x4a\x03\x07\x1b\x1c\xa4\x6f\x97\x62\x05\xe6\x6e\xaf\x0e\x1b\x95\xc7\xab\x69\x5a\x35\xd3\xc1\x36\xea\x7a\xd5\x7c\x61\xef\x35\x07\x72\xd9\x1a\x47\xce\x30\x75\x26\x10\x9b\x80\xf7\x68\xa2\x66\x0a\xd9\x52\x6f\x2c\xf5\xda\x1b\x8f\xb7\xa8\xf8\x0c\x55\xa6\x5e\x33\xf5\xf8\xd7\x96\xd8\xb4\x14\xb3\xb1\x14\xe5\x61\x79\x9c\x71\x94\xb0\xbe\x7e\xb5\xc8\x86\x89\xb5\x78\xd2\x96\x82\xf1\x9a\xc9\x67\x2b\x92\xa8\x1d\xf5\xa6\xa5\xa0\x5b\x72\x50\xec\xb0\x69\x5a\xf2\xda\x53\x30\x1d\x05\xed\xa9\xdd\x18\x77\xe9\xe2\x86\x29\x65\x4b\x51\x5b\x5d\x4c\x45\xec\x69\xa6\x36\xe3\x9f\xc9\xe9\x40\xad\x76\xd4\xea\x9e\x1c\x56\x38\xb7\x08\x52\x77\x94\xe6\x77\x76\x88\x46\x07\xf2\x86\xc9\xcb\xea\x11\x8f\xb1\xe4\x8d\x25\x67\x2c\x25\xf9\x66\x72\x99\xe5\x3f\x6a\xe4\xc3\xda\x01\x0a\xd1\x0c\x06\x89\x17\x4d\xe0\xe7\xe1\x8a\xc5\x00\x52\xe2\xa2\x4e\xfc\x74\xa0\x9f\x15\x8d\xa5\xd6\x30\xa5\x73\xf6\x8e\x92\xe6\x0e\xa1\xa3\x1c\xc8\x79\xae\xcb\xe7\xf9\xe3\xa7\xfa\xc2\x9e\x6b\xad\x74\x89\xa6\xd7\xa5\x66\x09\xeb\x06\x1e\x8b\xf3\x50\x81\x61\x79\x3b\x94\x10\x85\xac\x21\x79\x53\xa4\x7e\x83\xa3\x4f\xea\x51\xd4\x81\x9a\xd4\xd0\x9d\x9c\x70\xf5\x84\xbb\x60\xd7\x66\xa9\xa1\xc6\xbe\xd7\x62\x47\xc0\xd5\x02\x82\xb0\x01\x5b\xfd\xc6\xf4\x19\x24\xd2\xd6\x44\x02\x5c\xc9\x30\x50\xf2\x94\x8e\x86\x43\xc5\xbe\x13\x80\x7a\x1d\x29\x9a\x40\x69\x0c\x04\xe6\x81\x72\x2d\xb0\xd5\x2c\xd8\x82\x64\x1d\xd6\x49\x7c\x4b\x6c\xad\xb6\xd4\x95\xb2\x46\x3d\x3f\x4e\x60\x61\xed\x89\x85\x56\x60\x4d\x24\x6f\x00\x6d\x24\x3f\x32\x53\x32\xee\x9c\x3d\xb8\xc0\x08\x41\x30\xf5\x7a\x86\xf6\xf3\xfc\xf1\xb3\x59\xa2\x67\xd6\x68\xb7\x17\x9d\xc3\xa5\x73\x5a\x60\xe5\x29\x6d\x22\xc5\x63\xa0\x70\x61\x2f\x03\x75\x9b\x96\xda\x4b\x88\x8b\x81\x7f\x38\xeb\x12\x6a\x86\x2e\x2a\x2c\x2e\x50\x83\x56\x16\xe5\x95\x64\xb9\x26\xef\x05\x53\x0b\xb3\x76\x63\x1c\xf9\x2c\x42\x50\xa9\xab\xdc\x43\xbf\xa1\x0a\x8b\x5c\x69\xeb\x2a\x85\x43\x5d\xfb\xa3\xf1\x9b\x48\xe1\xc8\x16\x2a\x9d\x74\x1f\x97\xd6\x11\xfe\x30\x71\x16\xa0\x0b\xdd\x03\xc0\x15\xe6\xda\xc2\x06\x6c\x80\x69\xad\x9c\x74\xa2\xd0\xce\x1e\xa3\xf4\x3e\x8b\x52\x29\xa7\x85\x39\x11\xa0\xa3\x24\x88\x20\x51\x26\x88\x5b\x53\xac\xf7\xf2\xed\x29\x1a\x47\xad\xcc\x86\xfa\x24\x78\x80\x10\x9b\x12\x70\x3d\x05\xfa\x94\x66\x0b\xc2\xf5\x45\xb6\x18\x33\x12\xb5\x97\xa0\xb0\x93\x72\x3d\x21\xb8\x3a\x69\x93\x62\xaa\xba\x40\x4b\x48\xda\x5e\x5c\x81\xd2\x12\x93\x2e\xc1\x26\x8c\x20\x09\x96\xa5\x36\x49\x4b\x6d\xc4\x0f\xc6\xd8\x62\x2a\xb9\x06\x5d\x80\x00\x2d\x05\x31\x34\x45\xae\xf0\xea\x42\x55\x4c\x50\x99\x64\xa3\x8c\x34\xf4\xa2\x34\x57\xad\x91\x2e\x35\x02\xac\xed\xbc\xc1\xc2\x7c\x5b\x3b\xbc\xba\x7c\x2e\x7a\x2f\x45\x17\x66\xe4\x93\x12\xc7\x3a\x4e\xe4\xd4\xbc\xe1\x66\x72\xd4\x8d\xf6\xdc\xa3\xa8\x09\x2d\x82\x70\x12\x16\xa7\x53\x7e\x31\xc8\xb8\xe2\xcd\xc5\xb5\xcc\x6c\x89\xa8\x80\xd0\xce\x55\x0f\x95\x12\x49\xb8\xc2\x65\x03\xe9\xf7\x46\x7e\x65\xae\x3a\x0a\x93\xe4\x28\x11\xd4\xde\x64\xc9\xdd\x55\x04\x21\xf5\x32\xa0\x67\xcc\xe4\xe6\xa9\x93\x7b\x51\x31\xed\xe5\x14\x97\x51\x04\xf3\xe7\x2e\xa2\x8b\x58\xbb\x58\xfb\x49\x3f\x75\xd6\x2f\xfb\xaf\xc3\xc0\x0d\xd4\x5d\xa6\x8d\xe9\xa8\x3b\x72\xc2\x34\x44\xeb\x16\x01\x3a\xf9\x02\x4a\xbf\xb4\x12\x25\xcc\x88\xb9\x5e\x2e\xb4\xfa\x24\xe1\x42\xb9\x39\x5d\xad\x6e\x30\x55\x6e\x9e\xe5\xe1\x85\xdc\x8f\x18\x87\x98\x51\x48\xfa\x3c\x26\xea\xc1\x44\x1d\x13\x45\xdd\xa2\x6e\x56\xb7\xb9\x54\x89\x2b\x41\x5c\xb5\xe1\x4f\xe4\xbe\xae\x52\xc1\x12\x5b\x89\x4c\xe2\xb2\x9b\x40\xee\xe8\x3c\x75\x1b\x74\xd1\x25\x07\x99\xa3\x8b\x72\x39\x69\x5b\xdb\x5f\x2e\xae\xd1\x74\x75\x50\xe6\x13\x5f\xad\x2e\x2c\x04\x03\xd2\x74\x32\x36\xc4\xf9\x45\x74\xb9\x24\x57\xc9\x57\xba\xba\xd2\x75\xbe\x58\x3b\x53\xe2\x93\xa1\x00\xf7\xe2\xca\xb0\x93\xeb\xad\x60\xc3\x75\xae\xcb\x24\xcc\xcf\x39\xf4\xe8\xca\x23\x3f\x65\x80\xb1\x2b\xd7\xe2\xcb\x4c\x9f\x24\x5e\xde\x88\x9b\x2b\x9f\xb8\xda\xf7\x75\x85\x3f\x7f\x22\x77\x75\x0f\x4f\x8b\xba\x68\x76\xe7\x11\xd0\x70\x4f\x51\x47\xe9\xea\x48\x5e\x3f\x91\xa0\xde\x18\xcb\xb5\xbc\xa7\x23\xd6\xbf\x02\x00\x00\xff\xff\x34\xdf\xd0\x3d\x22\x0b\x00\x00") + +func filesLogoSvgBytes() ([]byte, error) { + return bindataRead( + _filesLogoSvg, + "files/logo.svg", + ) +} + +func filesLogoSvg() (*asset, error) { + bytes, err := filesLogoSvgBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "files/logo.svg", size: 2850, mode: os.FileMode(420), modTime: time.Unix(1509717368, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _filesMain_stylesCss = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xa4\x56\xdb\x6e\xe3\x36\x10\x7d\xd7\x57\x0c\xbc\x28\xd0\x06\xa6\xa3\x8b\xb3\xdd\x2a\x2f\x4d\x81\xe6\x35\xc0\xa2\x3f\x30\x12\xc7\x16\x11\x8a\x14\x28\xfa\x92\x1a\xfe\xf7\x82\x12\x25\xeb\xe6\x4d\x8b\xbe\x24\x06\x35\x3c\x3c\x33\x73\xe6\x90\x85\x2d\xe5\x1a\x32\xcd\x3f\xd6\x50\x44\x6b\x28\xe2\x35\x14\xc9\x1a\x2a\xb8\xec\xb4\xb2\x6c\x87\xa5\x90\x1f\xe9\xea\xbb\xce\xb4\xd5\xab\x6b\xe0\x42\xe1\x92\x69\xc3\xc9\xa4\x4a\x2b\x7a\xbe\x06\xb8\xc9\xc8\x22\xcb\x0c\x2a\x0e\x97\xe0\x1a\x8c\x56\xd2\x14\x77\x96\x0c\x5c\x02\x00\x80\x5c\x2b\x4b\xca\xa6\xb0\x72\x11\xab\xe7\x66\xb1\xd2\xb5\xb0\x42\xab\x14\x30\xab\xb5\x3c\x58\x6a\xd7\x8d\xd8\x17\x36\x05\xb6\x0d\xab\x73\xbb\xd2\x90\xaa\xc5\xdf\x94\x42\x14\x77\x8b\x96\xce\x96\x59\x83\xaa\xde\x69\x53\xa6\x70\xa8\x2a\x32\x39\xd6\x1e\x25\xd7\x52\x9b\x14\xbe\xfc\xb9\xfd\x16\xc5\x4f\xed\x9a\x4f\x00\xa2\xea\x0c\xb5\x96\x82\x8f\x3f\x57\xc8\xb9\x50\xfb\x14\xc2\xea\x0c\xdf\xba\x73\xda\x4d\xcc\x20\x17\x87\x3a\x85\xc4\xad\x5f\x83\x8d\xc2\x63\x86\x86\x29\x3c\x76\x3f\x1b\xde\x20\x05\x60\x9f\x75\x4b\x21\x49\x92\x16\xaa\x44\xb3\x17\x8a\xf9\x04\xa3\x64\x29\xbf\x27\x8f\x5f\x0b\x4e\x0e\x1d\x2e\x03\x12\x99\xb6\x56\x97\x29\x34\x2d\x18\xa1\x45\x61\x75\x1e\xee\x12\xe5\x1e\x2e\x13\xee\xee\x3c\x38\x09\x6e\x8b\x34\x76\xe9\x15\xd4\x6c\x6d\x7e\x1f\xc9\x58\x91\xa3\x64\x28\xc5\x5e\xa5\x50\x0a\xce\xe5\xc2\x19\xcf\xd7\x60\x70\x8a\x14\x3e\xd5\x5b\x2f\x0d\x49\xb4\xe2\x48\x83\x26\x79\x48\x49\x3b\xeb\x52\x0b\x1e\x1f\x46\x08\x75\x85\x0a\x2e\x52\x28\x62\x9e\x51\x12\xdf\x67\xe4\xab\xfa\x25\x0c\xc3\xe7\xeb\xc3\xe3\x98\x0c\xc2\xa5\x67\xd2\x11\xb9\x4e\x42\x36\xf9\xc1\x18\x52\x96\x09\x4b\x65\x9a\xd1\x4e\x1b\x9a\x65\x31\x56\xa4\xd5\x55\x0a\x2c\xba\x29\xa2\xed\xc2\x6d\xa5\x2d\x2a\xf4\x0d\xcd\x30\x7f\xdf\x1b\x7d\x50\x7c\xa2\xc0\xdb\x20\x80\x9f\x02\x57\x95\x0e\x6a\x54\xdb\x8d\xd5\xfb\xbd\x24\x56\x9f\x84\xcd\x0b\xcf\x70\x27\x35\xda\xae\x94\x3d\xb5\x68\x46\x2d\xea\xf1\xf6\x46\x70\xb6\x93\x74\x66\x39\x49\xe9\x61\xba\x41\x68\x74\xe4\xd4\x26\x45\x6d\x99\xa1\x4a\xd7\x3e\x62\xde\xb8\xcf\xf6\x7d\xaa\x85\xc7\x87\x52\xa8\xae\xc5\x51\xe8\xc4\xf4\xf0\x78\x6f\x2e\xe9\x2b\x21\xf1\xd1\x04\x8e\xc4\x3f\x1e\xd8\xf8\xa9\x3a\x37\x7f\xe6\xa4\x52\x89\xb5\x65\x79\x21\x24\x1f\x25\xdf\xc3\x2d\x1c\x3a\xc6\xc8\x0e\xd6\x6a\xf5\x6f\x24\x12\xf7\x8e\xd5\x8d\x78\xbc\xe8\x61\xdb\x89\xb7\x78\xf9\xf4\x5d\xec\xf3\x4a\xaa\xb3\x77\x84\x99\xaa\x5e\x5f\x5f\x97\x89\x6e\xda\x7f\x4c\x1f\xac\x9b\x29\xce\xb0\xaa\x8c\x3e\xd2\x38\x7b\x7f\x64\xdc\xc9\xe4\x73\x98\x23\x4a\xc1\xd1\xfe\x6f\x1c\x45\x07\x6b\x50\x4e\x3c\x72\xa1\xdf\xd3\x4f\xe3\x64\x8b\xc4\x23\x0c\x8d\x03\xa2\xde\xb6\xef\xd8\xc7\xac\x17\xfd\x06\x6f\x75\x9d\x2c\xc6\x8d\x3b\x79\xfc\x6d\x18\x2e\x10\xc1\x1f\x53\xf1\xc8\xed\xa4\xf7\xdd\xe4\xa2\xae\x24\x7e\x2c\x0f\x53\x03\xda\x0d\xd4\x90\xee\xd7\x45\x8d\x17\xfa\x48\x66\xc8\xa4\x07\x17\xca\x91\x9a\x6e\x99\xde\x50\x2f\x2f\x2f\x83\x64\xfd\xf5\x0f\xab\xef\x6f\x7f\xbc\xfd\xf5\xb6\x5a\xa8\x43\xe2\xea\x30\xc8\xad\x91\xff\xaf\xa3\xab\x99\x53\xae\x0d\xb6\xb3\x72\x1b\xd9\x51\x89\xe2\xed\xac\x18\x99\xd4\xf9\xfb\x9c\xae\x58\x4f\x16\xda\x4b\xe3\x87\x9d\x9e\xc8\xf1\x73\x3f\xbd\x06\xc1\xef\x25\x71\x81\x50\xe7\x86\x48\x81\x7b\xd9\xfc\xec\x4c\xcb\xab\xfc\xb7\x30\xaa\xce\xbf\xc0\x25\xd8\x38\x1f\x47\xa1\xfa\x07\x4e\x37\xc1\x61\xf8\x53\x57\x97\x73\x3f\xd6\xad\xd7\xb9\x9c\xc6\x66\xcc\x22\xbd\xeb\x54\x3c\x88\x4f\x9e\x7c\xb8\x4b\xa0\x7d\x5b\xfc\x17\xe7\x2a\x35\x47\xc9\xac\xb0\x92\xe6\xfa\x69\x2b\xde\x47\x15\x84\x7c\x11\xba\xd7\x64\x1b\xd7\xbe\xff\x16\x34\xfe\x75\x6e\x59\xb1\xb3\xe2\x70\x74\x4c\xb3\x5d\xa8\xea\x60\xef\xdc\x62\xe3\x87\x51\x1c\xcd\x77\xd7\x24\x29\xb7\x7d\xb1\x6e\xa2\xeb\x1f\x4c\x83\x60\x89\x19\xc9\x7b\xc3\x33\xd3\xb2\xd2\xa6\x44\xd9\x62\x50\x5d\xe3\x9e\x16\x8e\x09\xef\xd8\x44\xd2\x65\xda\xd9\x23\x33\x54\x1f\x64\xc7\xd4\xcd\xe5\x4e\xea\x13\x3b\x19\xac\x52\xc8\x0c\xe1\x3b\x3b\x69\xc3\xa7\x8e\xde\xb9\x5d\xf3\xa0\xad\xd0\x3d\x50\x82\xeb\x3f\x01\x00\x00\xff\xff\xc7\x56\x15\x91\xa8\x0b\x00\x00") + +func filesMain_stylesCssBytes() ([]byte, error) { + return bindataRead( + _filesMain_stylesCss, + "files/main_styles.css", + ) +} + +func filesMain_stylesCss() (*asset, error) { + bytes, err := filesMain_stylesCssBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "files/main_styles.css", size: 2984, mode: os.FileMode(420), modTime: time.Unix(1509733230, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _filesStylesCss = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x7d\x6b\x73\xdb\x38\xb2\xe8\x77\xff\x0a\x1e\x4f\x6d\x25\x9e\x11\x75\x28\xea\x61\x49\xae\xd9\xba\x8e\xe4\x64\x94\x23\xca\xe3\x58\x79\x38\xb3\x39\x55\x20\x09\x49\x94\x29\x92\xcb\x87\x5e\xa7\xce\xfd\xed\xb7\x00\xf0\x81\x27\x25\x27\xbb\xf7\xd6\xde\x33\x9b\xad\xb1\x4d\x36\x1a\xdd\x8d\x46\xa3\xd1\xdd\x00\x57\xe9\xc6\xd7\xfe\xeb\x42\xd3\x34\x6d\x11\x06\xa9\xbe\x00\x1b\xcf\x3f\x0c\xb5\x04\x04\x89\x9e\xc0\xd8\x5b\xdc\xe0\x97\xfa\x26\xd1\x53\xb8\x4f\xf5\xc4\x3b\x42\x1d\xb8\xeb\x2c\x49\x87\x5a\xcb\x30\xfe\x92\xbf\xdf\x41\xfb\xd9\x4b\x15\x30\x17\xff\x7d\x71\x61\x87\xee\x21\xef\x69\x03\xe2\xa5\x17\x0c\x35\x03\x3d\x07\x71\xea\x39\x3e\x6c\x5c\x80\xc4\x73\x61\xe3\xc2\x85\x29\xf0\xfc\xa4\x71\xb1\xf0\x96\x0e\x88\x52\x2f\x0c\xf0\xef\x59\x0c\x1b\x17\x8b\x30\x4c\x61\xdc\xb8\x58\x41\xe0\xe2\x9f\xcb\x38\xcc\xa2\xc6\xc5\x06\x78\x41\xe3\x62\x03\x83\xac\x71\x11\x80\x6d\xe3\x22\x81\x0e\x69\x99\x64\x9b\x0d\x88\x8b\x9e\x5d\x2f\x89\x7c\x70\x18\x6a\xb6\x1f\x3a\xcf\xb8\xfb\xcc\xf5\xc2\xc6\x85\x03\x82\x2d\x48\x1a\x17\x51\x1c\x2e\x63\x98\x24\x8d\x8b\xad\xe7\xc2\x90\x6f\xe6\x05\xbe\x17\x40\x1d\xb7\x26\x8c\x6f\x21\xa2\x1f\xf8\x3a\xf0\xbd\x65\x30\xd4\x6c\x90\x40\x04\x83\x70\xff\xb1\xf2\x5c\x17\x06\xdf\x1a\x17\x29\xdc\x44\x3e\x48\x21\x8f\x2f\x08\x09\x24\xc8\x5f\xd8\xc0\x79\x46\x2c\x05\xae\xee\x84\x7e\x18\x0f\xb5\x34\x06\x41\x12\x81\x18\x06\x29\x06\x1c\x02\x27\xf5\xb6\x48\x5c\xc3\x55\xb8\x85\x71\xde\x30\xcc\x52\xd4\x6b\x21\x53\xdb\x8e\xff\x48\xbd\xd4\x87\xdf\x0a\xc4\x61\xec\xc2\x58\xb7\xc3\x34\x0d\x37\x43\xad\x15\xed\x35\x37\x4c\x53\xe8\xe2\xa1\x69\x5c\x24\x69\x1c\x06\x4b\x5a\x13\x76\xd0\x5b\xae\xd2\xa1\x66\x87\x3e\x06\x72\x17\x01\xfd\x3a\x49\x0f\x3e\x1c\x6a\x5e\x0a\x7c\xcf\x41\xef\x57\x2d\xe6\xb5\x77\x84\x43\xcd\x84\x9b\x1b\x66\xc4\x9b\xbd\x6b\xb8\x21\x34\x6e\x40\xfc\x2c\x70\x3d\xd4\x7e\x5a\x2c\x0c\xd2\x26\x17\xc0\x4f\x86\x81\xe1\x93\x0d\xf0\x7d\xb1\x8b\x3e\xd1\xae\x24\x43\x4c\x64\x91\x08\x70\xdd\xcd\x55\x14\x8f\xdc\x2a\xe7\x2a\xef\x23\x0a\x13\x0f\xe9\xc9\x50\x8b\xa1\x0f\x90\x60\x4f\x8f\x6a\xd5\x4b\x1a\x46\x43\x4d\x37\x9a\x5d\xb8\xc9\x69\x28\xa5\x4d\xc4\xac\x1b\x4d\x33\x7f\xe9\x6d\x96\xcc\x50\xe4\x23\x45\x14\x9b\x9b\x17\x2d\xb8\xd1\x3a\x46\xb4\xc7\x62\x2d\x46\x58\xdf\x84\x47\xdd\x0e\xf7\x88\x2d\x2f\x58\x0e\x35\x27\x0c\x52\x18\xa4\xe8\xd9\x4d\x8e\xb8\xe6\x65\xc9\x38\x42\x1a\x95\x5d\x22\x15\x5a\xf8\xe1\x6e\xa8\x81\x2c\x0d\xd1\x3b\x27\x44\xb3\xf0\xd9\x76\xd1\x64\x80\x8d\x8b\x04\x6c\x22\x99\x85\xd8\x84\x41\x98\x44\xc0\x81\x8d\xea\xd7\x1b\x5e\xf8\x2d\xc2\xbc\x9d\xa5\x29\x9a\x8c\x5e\x10\x65\x69\xe3\x22\x8c\xd2\x7c\xde\x26\xd0\x87\x4e\x8a\xe6\xc7\x3e\x05\x31\x2c\xa6\x41\x3e\xf4\x5e\xb0\x82\xb1\x97\x56\x58\xb9\x47\x8c\x1d\x21\x7d\x08\x6c\x6d\xbd\xc4\xb3\x7d\x48\x53\x41\x3a\x2d\xc6\x10\x19\x2c\x3c\xc9\x16\x61\xbc\xa9\x66\x64\x01\x8c\x0d\x24\xa6\xfb\x8f\xf4\x10\xc1\x5f\x2f\xc9\x8b\xcb\x6f\x39\x37\xf9\xd3\x18\x26\x30\xe5\x1f\x26\x99\xbd\xf1\xd2\xcb\x62\x0e\x16\x36\x12\x44\x11\x04\x31\x08\x1c\x48\x70\x39\x59\x9c\x20\x76\xa3\xd0\x0b\x52\x18\x57\xbd\xff\xe1\x7a\x09\xb0\x7d\xe8\x7e\x63\xe8\x28\x9f\x16\xd2\xca\xdb\xbb\x70\x01\x32\x3f\xad\xda\x0f\x87\x58\x69\x16\xa1\x93\x25\xba\x17\x04\xc8\x62\x62\x14\xe2\x0b\x5e\x37\xf3\xe9\x01\x5c\x17\xab\x13\x96\x30\x6e\x9a\x03\x32\x93\x29\x08\xe3\x0d\xf0\x4b\x90\x9c\x7b\x67\x05\x9d\x67\x3b\xdc\x0b\xa2\x02\xae\x17\x5e\x56\x86\xa9\x52\xda\xd2\x48\xed\x95\xdd\xe7\x38\x82\x6c\x63\xc3\xf8\xf2\xdb\x70\x58\x08\x15\x73\xa1\x27\x91\x17\xe8\x8c\xb6\x29\x1b\x84\x59\xca\x36\xc8\x09\x2a\x98\x2a\xe6\x03\x33\xa0\x10\xc4\xce\xaa\x66\x40\x87\x58\x9f\x16\x1e\xf4\xdd\xda\x39\xa9\x42\x5c\x91\x47\x9e\xe8\x0e\xc2\xea\x4b\x59\x52\xb6\x71\xa1\x13\xc6\x00\x99\xb5\x1a\x3a\x0b\x3d\xc7\xb4\x26\x30\xe5\x14\x00\x2d\x10\x49\xe8\x7b\xae\xf6\x93\x63\xa0\x7f\xdc\x8c\xd3\xcc\x88\x1f\xa4\x66\xbb\x0b\x37\x5a\xb3\x67\xe2\x1f\xd7\xb9\xdd\xf3\xe1\x12\x06\xee\x39\xea\xc5\xd9\x00\xd1\x32\x15\x66\xa3\x6e\x95\x4a\xd1\xcc\x60\xd7\x3c\x27\xf4\x7d\x10\x25\x10\x0d\x00\xf9\xed\x86\x7e\x8d\x0c\x17\x45\x84\xdb\xb8\x48\x57\x39\x02\x96\xbe\x3f\x5d\xa5\x7f\xa2\xab\x84\x71\x0f\x83\x30\x7d\xfd\x07\x9a\x26\x71\xe8\x27\xdf\xae\x64\xde\x92\x64\x35\xfb\xd3\xcd\xfa\xd3\xcd\x3a\xe1\x66\x25\xdb\x25\x56\xae\x61\x1c\x86\xe9\x95\x60\x61\x88\x02\xfd\xe9\x8f\xfd\x4f\xf7\xc7\x86\x1a\xc1\x76\xc3\xf8\x55\xff\x23\xfd\x32\x41\xbd\x79\xe7\xec\xff\x73\xc7\xed\xbb\xa7\xf7\x9f\xae\xdd\xbf\x9e\x6b\xf7\x73\xe3\xe2\xe7\x21\x58\x60\x7f\xe9\xe7\xa1\x0d\x17\x61\x69\x90\xbf\x73\x1e\x70\x0e\x63\xa1\x9b\x32\x0f\xcf\x0b\x74\xfa\xf5\x8d\x68\xda\x5f\x45\x71\xb8\xf7\x36\x40\x0f\xc2\x2d\x78\xd5\xd0\x56\xd0\xdf\x42\xb4\x70\x6a\x01\xcc\x20\xf5\x77\x43\x03\xb1\x07\xfc\x86\xe0\x93\xd2\x4b\x40\xaf\x18\x5e\xc6\x56\xb4\x9a\x5d\xd6\x1f\xe8\xb8\xdd\x5e\x77\x70\x53\x19\xe5\x18\x06\x2e\x8c\x31\x9f\x61\x94\x7a\x1b\xef\x08\x1f\x23\x48\x5c\x9b\x88\x59\x31\x75\xbc\x62\x33\x7a\x55\xba\x44\xed\x7c\x1d\xcd\x4d\x1d\xb1\xfd\x95\x3e\x8b\x9e\xd9\x4f\xd7\x1d\xd7\x5d\x2c\x58\xe2\x16\x8b\x05\x41\xf2\x63\xed\xb9\xf5\xed\x27\xc3\xb0\x0d\x48\xf3\x5c\xcd\x37\xda\xfb\x94\x2c\x0c\x60\x88\x8d\x36\xef\x26\x96\x8e\x27\xe3\x45\x56\xdd\xf5\x5d\x1b\xa0\xf7\xcd\x28\xf3\x7d\xdd\x87\x8b\x62\xb2\x2e\xfc\x10\xa4\x43\x0d\x3d\xa9\xde\xc7\x68\xa8\x58\x00\xfc\x08\x43\xac\x3c\x57\xe9\xf7\x36\x9d\x45\xae\xd5\x0d\xfc\x3b\x56\xf5\x92\x18\x6c\xba\x86\xda\xa5\x76\x79\xc3\x36\xc7\x33\xae\x68\xcf\xb4\xf1\x21\x88\x91\xba\xa7\xab\xdc\xe9\x6a\x5c\x78\x8b\x18\x6c\x20\xbb\x19\xd8\x80\xbd\xbe\xf3\xdc\x74\x55\x69\xfe\xaa\xd5\xb8\x58\x99\x8d\x8b\x55\xbb\x71\xb1\xea\x34\x2e\x56\xdd\xc6\xc5\xaa\xf7\x22\xf5\x91\x6a\xaf\xd9\x95\x4d\x1c\xf0\x9c\x7a\x5b\x7d\x19\x87\x29\x4c\x9e\x5f\x38\x73\xd4\xd4\x72\x7e\x3c\xe7\x41\xd7\x72\xc8\x58\xb6\x76\xde\xa0\xa7\xf0\x7d\xb2\x28\x82\xb1\x03\x0a\xab\x56\xf4\xd3\x37\xfb\x4e\x7f\x51\x6c\x40\x38\x22\xc8\xcb\x9b\xfa\xdd\x88\xf6\x93\x7d\xed\x18\x4e\x9b\x55\xe7\x15\xf4\xa3\x72\x0b\x80\xdd\xfc\x06\xe5\x6c\xd3\x26\xc4\x24\x73\x98\xda\x5b\x94\xf6\xb4\x1d\xed\xb5\x6e\x31\x46\xdc\x8e\x63\xb1\xb0\xdb\xd8\xfa\xa1\x3d\xe1\xdf\xb3\x30\xe5\x1d\x6d\x34\xba\xe2\x0a\xa2\xb5\x4a\x84\x32\x2e\x73\x47\x14\xef\x90\xb0\x75\xfb\xf7\x56\xb3\xdb\xd5\xde\xc1\x30\x5e\x7a\xa0\xa1\xcd\xbd\x0d\x4c\x1a\x5a\x39\xa6\x54\xef\xd1\x30\x0c\xfc\x83\xee\xac\x3c\xdf\x6d\xb0\x2f\x7c\x90\xa4\xe4\x85\x6c\x27\x4e\x81\x92\x7d\xb6\x44\x81\x2b\xaa\xa5\xd6\xb7\x60\x65\x60\x83\x36\xe8\x4a\x91\xb2\x2b\x51\x39\x51\x5f\xfd\xcd\x34\x5a\x1d\xed\x6f\x86\x71\x6b\xbc\xc2\xd3\xb3\x6a\xa9\x3b\x10\x99\x24\xe8\xd2\x2a\x95\xef\xae\xc8\x2b\xbe\xc1\x06\xba\x5e\xb6\x91\x6c\x2a\xf3\x41\xa6\x61\x7d\x10\x2f\xa1\x08\xda\x56\x2d\x29\x78\xb4\xff\x17\xea\x01\x68\x89\x13\x43\x18\x68\x20\x70\xb5\xd7\x95\x5d\xe8\x0f\x06\xd1\xbe\xd8\x90\x95\x7b\x5b\x1e\x7f\x3f\xda\xe3\xe7\xff\x4d\xc0\x4c\x05\x98\xc9\x82\xb5\xe5\x60\x66\x8f\x05\xeb\x28\xc0\x0c\x16\xac\x2b\x07\x6b\x71\xd8\x7a\x0a\xb0\x0e\x03\xa6\x12\xaa\x9a\x23\x85\x20\xbd\x20\x17\xe4\xc0\x30\x4e\x0b\xb2\x63\x9e\x27\xc8\xde\x59\x82\x6c\x1b\xe7\x09\xb2\x73\x9e\x20\xfb\xe7\x09\xb2\x47\x8b\x24\xf3\x1b\x17\xa1\xcf\xcf\x50\xcd\x90\x59\x12\xbc\xbc\x56\x0e\x48\xe6\x6b\xbe\xf7\xd7\x1c\x41\xfe\x1b\x79\x16\x96\xcf\x38\xd4\x64\x5e\x33\x08\x34\xfc\xdf\x1c\x9e\xfc\x51\xee\xc4\x92\x32\x7c\xe3\x78\xb1\x43\x56\x52\x02\xc1\xb7\xab\x69\x9a\xfc\x3d\x03\x31\xd5\x54\x84\x70\xbd\x04\x47\x86\x9a\xf8\x61\x16\xe0\xc7\x2e\x6b\x97\x73\xde\x0d\x09\x9c\x14\x67\xe9\x38\xe0\x87\x24\x94\x57\x41\x4a\x63\x7c\x38\x78\xe5\x6b\x6e\x2a\x5b\xe9\xba\x86\x3c\xda\x84\x1a\x48\x0d\x6c\x13\x59\x3b\xe0\x05\x9c\x65\x45\x83\x5b\xec\x19\x9a\xcb\xd8\x73\xf5\x85\x0f\xf7\x3a\x0f\x5c\xad\x1d\x37\x52\xd6\x38\x3f\xa7\xd8\x44\x21\x5c\xfc\xab\x4d\x82\x1f\x97\xee\x7e\xf9\xa6\x02\x2e\x60\x74\xb2\xa7\x89\xc3\x9d\xb6\x8b\x41\x94\xdb\x7e\xd5\x0b\xba\x53\x0e\x00\x07\x35\x1d\x1f\x24\xc9\x7f\xfe\x7a\x49\x71\x09\x7d\xbf\xdc\xc2\x2a\xdc\xea\x5a\x63\xdb\xa3\x6d\x84\x20\x61\xf4\xbf\xdc\x59\x1b\x18\x7f\xa1\xa6\xe2\x29\x52\x78\x6e\xc8\x0c\xac\xf6\x32\xb4\x8c\x64\xef\xd8\xe7\x67\xda\xbb\x9e\xd1\xaa\x78\x79\x39\x85\x2d\x19\x69\x2d\x9e\x26\xea\x41\xa9\x52\x68\x59\xd7\x8c\x73\xc4\x43\x22\x8e\x0b\x2f\x2e\x3c\x89\x2b\x8a\x9c\x7c\xfc\x28\x9b\x44\xad\x0e\x2c\x26\xbd\x15\x2e\xcc\x86\xea\x4d\x5b\xf9\xa6\xa3\xe4\xbe\x9a\x03\xac\x00\xd8\xe7\xd5\xb3\x13\xb4\x89\x0a\xd4\x65\x14\x48\x46\xb5\xd8\xa6\xdd\x39\xd1\xa6\x23\xb6\x31\xbb\xe7\xea\x4b\xa7\x8f\xf5\x85\x9b\x11\x83\x73\x67\x44\xff\xec\x8e\x06\xb4\x62\xd6\xa1\xbc\xa6\x75\xbd\x08\xbc\x36\x91\xe7\xaf\xe7\x71\x59\xf2\x47\x11\xaf\x2b\xfe\xc6\x71\x3a\xb9\x15\x40\xba\x59\x45\x66\x1a\x17\x3e\xb0\xa1\x2f\xb3\xc7\x7d\x62\x7e\x99\x08\x8e\xd4\xe1\xe6\x1c\x42\xa9\x11\x3f\x1d\x67\x82\x3d\x08\xa0\xcb\x6c\x4a\x10\x17\x59\x82\xf7\x0b\x9c\xbf\x5f\x6d\xf1\xc4\x05\x1d\x13\x4d\x31\xc5\x26\xa9\xa4\x3b\x46\xa9\x17\x5e\xb1\x22\x6e\xe5\x6b\x86\x02\x8b\x9e\x1f\x17\x69\x6a\xa8\x68\x8c\x17\x4e\x75\xba\x81\xea\xa3\x86\xa3\xca\xf6\x94\xa2\xa1\x76\xd5\xf5\x62\x3d\x31\x1a\x75\xf1\x43\x8a\x38\x26\xba\x81\x77\xa8\x79\x3e\x07\xf8\xbe\xd6\x34\x13\x0d\x82\x04\xea\x5e\xa0\xec\x93\x0e\xc2\x48\x62\x34\x8b\xfe\xc2\x25\x21\x19\xba\xd3\x22\xa0\xae\x8e\xed\x2c\xc0\xc2\x5e\xd8\xec\x50\x06\x21\xda\xf1\xf8\xe1\x8e\x84\xa6\x68\x84\x31\xfc\x7b\xe6\xc5\xd0\x65\xd8\x91\x50\xbb\x80\x4e\xf7\xda\x56\x53\xbb\x58\x0c\x16\x26\x87\xbc\x89\xff\xab\x7b\xc1\x16\x20\x1c\x6a\xdc\x0b\xe7\xda\xed\xf3\xe1\xa8\xeb\xce\x80\xa7\x96\x45\x78\x8a\x64\xd8\x37\x8c\x4e\xa7\x8e\xe4\xce\xe2\x5a\xde\xc3\x09\x82\x9d\x1e\xb4\xdd\x36\xb7\xff\x86\x6e\x0f\xf4\x6b\xd0\x9d\x22\xb7\xd3\xb6\xed\x1e\x54\x93\xdb\x59\xd8\x8b\x9e\x24\x74\x1c\x43\x32\x83\xab\x95\x8a\x8e\x9f\x0e\x72\x2f\x88\x8a\xa5\x2f\x3c\x1f\x7e\xcb\x2d\x61\x33\x9f\xbf\xe4\x8f\xca\xae\x32\x09\x28\x89\x45\xe0\xd1\xf1\x06\xb3\x9a\x96\x72\xd5\x1e\x2c\x40\x9d\x09\xca\x01\x49\x48\xa6\xb2\x40\xe7\x99\xd3\x82\xf5\x8e\x21\x18\xd2\x3a\x6b\x21\xd5\x90\xaa\xef\x53\xc3\x47\xa6\x33\x0f\x4f\x65\xaf\x62\x8f\x0f\x8f\x51\x19\x76\x2a\xc0\x9a\xac\x80\x8b\x4c\xa2\x81\xff\x15\x2b\x4a\xae\x49\x85\xb9\x25\x7f\x71\xf6\xb6\x58\x1c\xba\xd5\x46\x4f\x1a\x67\x61\xd6\xbd\x2a\x23\x46\x77\xd1\x2c\x0c\x8d\xd0\x57\x93\x33\x41\xec\x58\xd5\xdb\x1d\xaa\x03\x3a\x61\x59\xa0\xfe\x26\x32\x56\x0b\x25\x20\xc2\x0f\xea\xb1\x10\x90\xba\x5d\x1b\xb3\x64\xc6\x45\x16\x42\xbd\x97\x28\xdd\x9c\xeb\x5e\x9f\x72\x73\xa8\xd5\x8e\xfc\x1e\x84\x3a\x9e\x65\x89\xd6\x64\x92\x74\x1a\x1f\xe5\xa5\x5d\x3e\x7a\xcd\x24\x7f\x40\x1f\x6e\x60\x90\xd2\xa1\x80\x2a\x30\x7e\x73\xc2\xeb\x64\xd0\xd5\x52\x61\x9a\xcd\x2e\xeb\x81\x2b\x49\xa1\x9d\xfa\x70\xa1\x23\x31\xd3\x6e\x3d\xbb\xdf\x3e\x89\x34\xf7\x2e\xc4\xb7\xec\x98\xaa\x21\xf2\x29\x22\xbc\x66\x4c\x08\xe3\x4b\x55\x3e\x27\x2b\x93\x1a\xf5\x28\x0c\x40\x65\x74\x4f\x38\x71\x39\xfb\x95\x1b\x59\x3c\x2f\x34\xac\x7c\x21\xc6\x27\xa5\x7e\x99\xd2\x6a\x31\x19\x9b\xd2\x2f\xd1\x5d\x88\x59\x31\xc4\x17\x59\x91\xce\x69\xb6\x13\xe1\x6d\xea\x6d\x10\x95\x8b\x2c\x70\x08\x10\xe3\xd6\x50\x80\x51\x1c\x46\x30\x4e\x91\x9b\x56\xd2\xd4\xa0\xb2\x92\x61\xcc\xba\x57\x59\x02\xe3\x3c\xd7\x45\x8b\x11\xdb\x4b\xe5\xbb\x44\xf5\x4a\xf1\x58\xea\xc3\xe5\x15\x07\xa4\xb2\x0a\x6d\x54\x7f\xfe\xf5\x15\x79\xa6\xe7\x1e\xab\xfb\xaa\xac\xb4\x2a\x86\x89\xc4\xaf\xf9\x15\xa5\xda\x66\x18\x45\x68\x98\x20\xa7\x73\x5b\xd2\x31\x1a\x40\xd7\xa4\x1b\x50\xf6\x36\xd7\xc1\xfc\xa7\x7e\xda\xe5\x63\xd2\x79\x0a\xd3\x9b\x77\x53\x9b\x92\x6b\x16\x89\x77\x19\x17\xdc\xa2\x58\x50\x47\x17\x47\xc9\x33\x1e\x92\x2c\x49\xd1\x98\x8e\xf3\xb2\xd1\x84\x6a\xc7\x23\x8d\xbf\x93\xe6\x45\xc0\xae\x21\x3c\x39\x21\x7f\xae\xb4\x8d\x6f\xad\x08\x99\xb1\x13\x4c\xd6\xf0\x54\x4a\x33\x07\x0f\x60\x96\xc6\xc0\x57\x0f\x28\xf1\x66\x24\x4d\x4e\xe9\x95\x6d\xd8\x8e\xd3\x93\x35\x14\xf4\xab\xa4\xe2\x7c\x3d\xa3\x9d\x2c\xd1\x45\x53\xf5\x48\x68\x3e\xdd\xaf\x54\x78\x4a\xf4\x02\x3e\x69\x73\xf3\xda\xb4\x4d\x46\x90\x20\x8a\xe2\x70\x0b\xd5\x4c\x12\x37\x5c\xd2\xe4\x94\xec\xdb\xbd\x41\xaf\xdb\x97\x35\x14\x65\x5f\x50\x71\xbe\xec\xc9\x96\x83\xc6\xbe\x03\x71\x4d\x8a\x9f\x6c\x7e\x78\xf8\x93\xfa\x03\x0c\xa3\xdd\x13\x5a\x89\x0c\xa0\xc7\xe7\x13\x4f\x36\x78\x34\x5a\x07\x64\xb5\x25\x0a\x82\x6b\xcc\xf2\xa5\xa8\x47\xc8\x02\x17\xc6\x45\xb5\x24\xd7\xd5\xf9\x26\x81\x53\x70\x41\x24\x05\x42\x51\x2a\x05\x57\x0a\xc9\x88\x4a\xfd\xb3\x7a\x01\x62\xcb\x7a\x8a\xe0\x1e\x1b\xc3\x28\x33\x22\x68\x0b\x72\x23\xae\x57\x7d\xd5\x72\xd5\x27\x86\xb4\xa6\xfb\x46\xdd\xcb\x62\x4a\xd7\x81\x9c\xd4\x0a\x85\x11\x2e\x50\xb0\xfc\xcb\x9c\x1b\xb5\x3d\x2e\x70\xb0\x63\xce\x63\x42\x66\x99\xc7\xc4\x9b\xea\x12\x13\xcf\x0e\x8b\x4c\x56\x50\x53\x6d\x08\x79\x64\xfc\x02\xc0\xe2\x62\x36\x52\x42\x7d\x83\x0a\x57\x1d\xab\x74\x69\x00\x5b\xcb\x54\x8b\x51\xd4\x6f\x1e\xe4\x84\x9e\x3b\x5d\xc7\x75\xd8\xc2\x0b\xf5\x0a\xa7\xec\x9e\x5f\x3f\x4e\x11\x21\x5d\x08\x08\x29\xd2\xfe\xb8\x15\x81\xa5\x93\x0e\xca\xa8\x57\x08\x1e\x57\xdd\x60\x90\x65\x82\xc5\x28\x2e\x1d\x02\xc6\x7a\xfd\x93\x05\xa4\xc4\x15\xa3\x44\x8a\xe3\x51\xd5\xa1\x80\x1f\xe3\xb9\x40\xf6\x0f\x64\xba\x44\xf9\x0f\xe5\x9a\x5e\x30\x39\x6d\xa4\x16\x15\xf5\x02\xca\x20\xaa\xe3\x96\x2c\x19\xa7\x96\x11\x16\x5d\x3d\xa7\xf2\x08\x29\xbf\xa8\x92\x7a\x51\x76\x0f\xcb\x6d\x4b\xd9\x12\x32\xb2\x27\xd5\x5b\x35\x91\x71\xd1\x24\x32\xbd\xd0\xb9\xbc\xc6\x69\x10\xaa\xb8\x48\x9a\xe2\x7e\x79\x7b\x71\xdb\x7d\x16\x22\x65\x6b\x92\x59\xa9\xc3\x22\x94\x41\xf1\x22\xc6\x68\xda\x45\x6e\x46\x8a\x44\xf4\x62\xd9\xd7\xd2\x45\x90\x19\x97\xca\x87\xa8\xcd\x6f\x77\xfa\x74\x36\xaf\x46\x4f\x24\x8c\xb0\xa9\xcf\x72\xa0\x84\x18\x13\x95\xa2\x56\xf6\xc2\x86\x87\xd4\x7d\x96\xa2\x63\xc3\x60\xea\x81\x38\x89\xd1\xa8\x06\xe4\x24\x46\x1c\xc8\xa2\xd1\x5e\x89\x78\x0b\x17\x8a\x4a\x02\xe7\x47\x52\xe8\xac\x62\x51\x6e\x69\xc1\xc0\x0f\x1b\x9a\x15\x06\xc0\x09\x1b\xda\x28\x0c\x92\xd0\x07\x49\xed\x81\x14\xb1\xd8\x3c\xdf\x4f\x5f\x30\xe7\x62\xea\xd3\x62\x5d\x36\x63\x28\xa9\x16\xe5\x2a\x51\xeb\x0a\xa0\x77\x61\xec\xea\x76\x0c\xc1\xf3\x50\xc3\x3f\x74\xe0\xfb\xd4\xab\x5d\x0c\xa2\xe2\x0d\x7a\x50\x10\x4a\x49\x85\xdf\x4c\xef\x56\x5e\x0a\x71\x39\x3a\x1c\x6a\x51\x0c\xf5\xa2\xca\x83\x1c\xee\xa9\xd8\x54\x4c\x70\x04\xa6\xfb\x54\xf5\x2f\x95\x81\x95\x55\x5a\x76\x17\xd7\x8b\x7e\xd5\xd2\xad\xea\x34\x8b\x86\x5d\x07\xb4\xec\xae\xa4\xad\x61\xb4\xbb\x1d\x62\xba\xdd\x38\x8c\xdc\x70\x17\xe8\x49\x0a\x02\x17\xf8\x61\x99\xca\x54\x9d\xe7\xaa\x89\x21\x4a\x03\x7a\xb2\x0c\x8a\x5c\x02\x12\x5a\x64\x8b\x92\x6c\x66\xd5\x37\x2f\x5f\xd5\x29\x5a\xc9\x2e\xb0\x93\xd0\xcf\xd2\x9c\xdd\x3c\x4c\x56\xaa\x13\xa9\xb1\x3a\x2f\x41\x4b\xcf\xd4\x73\x29\x04\xea\xc4\x93\x78\x5a\x53\x3a\x7c\x4a\x6e\xcb\xa8\x98\xb4\x95\xe7\xa0\x6d\x7c\x1c\x87\x3b\x9d\x6a\xac\xd2\x02\x72\x5e\xcf\xe4\xe6\x64\x15\x11\x67\xba\x29\x8c\x7f\x43\xf2\x4c\x20\x57\x99\x0b\x53\x91\x7e\x06\xce\x72\xa0\x5a\xcc\x30\x22\x3f\x41\x4e\xa9\x48\x95\x32\x99\x56\xdb\xd8\xf7\x28\x13\xcc\xd4\x07\x49\xea\xb6\x6b\xb8\x45\x7d\x66\x31\x2b\xc1\xe2\xd9\x39\xb4\x52\x3b\x4a\x49\x7b\x31\xfa\x58\xd7\x09\x7f\xa0\x42\x90\x43\xd5\x46\xae\x53\x39\xeb\x69\x18\xa9\x65\x29\xe9\xf7\x7b\x64\x49\x85\xa5\x6b\x59\x92\x6e\xb1\x60\x7b\xd1\xe7\xe9\xaa\xdc\x1c\xfe\xd1\x39\xc3\x20\x1b\xda\xa2\x39\x2f\x56\x6a\x3f\x2b\xc2\x96\x5b\x48\x91\x8a\x73\x22\xb8\x54\x14\x51\x44\x70\xf6\xa0\xd5\x51\xf8\x63\xc3\x46\x05\x49\x29\x6a\xd0\xa4\x6d\x30\x4f\x70\xaa\x89\x7d\x94\x45\xec\xdf\x32\x43\x28\xae\x5c\xb9\x81\x30\xd8\x7c\xbb\x21\x71\x21\xc4\x34\xb4\xc1\xd3\x59\x43\x42\xce\x72\x69\x27\x73\x86\x85\x58\x5d\xb1\x90\xe4\x95\xb5\x32\xc0\x33\xa5\x23\x0c\xdf\x39\xfd\x52\x75\x55\xe7\x75\xcc\xad\xd0\x3c\xd9\xb4\xfd\xa9\x27\x90\x97\x8c\xa2\xa5\x54\x31\xcf\x68\x57\x9e\x3e\x54\x32\x4a\x37\xc4\xfb\x10\x6a\xf5\xe2\x0f\x9e\xf3\x1e\x0c\x73\xb0\x3c\x57\x29\x90\xa5\xe1\x8d\xb8\x31\xcc\x41\x2b\x8c\xfc\x31\xf9\x8d\xe7\xba\x3e\xa4\x40\xab\xfb\x1f\xb8\xc9\xcc\x9f\x93\xe1\x53\x53\x2c\x17\xa7\xe7\x42\xe9\x79\x74\x4f\xd6\x97\xbd\x6c\xb7\xc6\x88\x47\x93\x1f\x5c\xa9\x36\x34\x95\x84\x7e\xa9\xa4\xa4\x71\x45\x21\x48\xba\x74\xba\xfb\x9c\xf2\xd0\xff\xe2\xd0\x0b\xe6\x89\x3c\xa8\x36\xe6\x92\x92\xde\x4a\xc3\xa5\x14\x9f\x81\xa1\xd0\x57\x39\xc7\xc2\x60\x4b\x36\x3e\x67\x34\xfe\x1e\x4e\x88\xca\x24\x09\x90\x27\x32\x79\x6f\x99\x75\x0b\x65\x6b\x1e\x5e\x45\x95\x01\xee\xa2\x2b\x40\x6f\x36\xf9\x63\xc1\x25\x54\x24\xdb\x23\xb3\x11\x97\x1c\x94\x39\xcb\x58\x28\xb0\xc2\xea\x55\xa7\xae\x5e\x49\x17\x00\x76\x75\x50\xed\x14\xf2\x5d\x8f\x0e\xb7\x30\x48\x13\x49\x25\x45\xce\x7d\xbc\xb4\xc1\x6b\xb3\xdb\x6d\x68\xd5\x7f\x8c\x2b\x06\xb2\xa8\x75\x29\x67\x30\x66\x49\x77\xbd\x64\xe3\x25\x09\x75\x46\x9a\xab\xb9\x28\x4e\x08\x94\x2d\x16\x99\xef\x13\x6c\x45\x5d\xa7\xfc\xac\x58\x01\x0f\xec\x70\x0b\x8b\x53\xd2\xe5\x53\x1b\xfa\xe1\x8e\x11\x68\x6e\xa6\x8b\x7d\x0f\x1b\x7d\xeb\x71\x44\x88\xcd\x19\xeb\x2b\xd5\x94\xaa\x0c\x81\x1c\x29\x65\x91\x95\x7f\xc1\x38\x0e\xe3\x33\x50\x2f\x16\xb0\x0d\xed\x1a\x44\x49\xe6\x38\x30\x49\xce\xa1\xb2\xbf\xb8\x86\x8b\x1a\x54\xc0\x87\x71\x7a\x1e\x4d\xd0\xbd\x56\x89\x5f\xed\x76\xd1\x62\x2a\x77\xa7\x22\x9a\xd3\x32\x62\x1d\x70\x41\x40\x2c\x96\x1a\x01\x31\xc4\x89\xd2\x61\xf1\x28\xa5\xc3\x51\xc3\x8b\x06\x73\x51\xb7\x19\x43\xf4\x2b\x03\xde\x2c\x16\xde\xd5\xa6\x42\xd9\x1c\xa0\xd4\x7b\xee\xdb\x86\x61\x0e\x58\x1e\x11\x57\xb5\xd4\x41\xf7\x9a\x8b\x76\xb7\xfa\x03\xc3\x94\x60\xe1\xa9\x73\x5a\x3d\x28\x07\x94\x52\x37\x68\x75\x4d\xa3\xc5\x80\xe7\x63\x57\x97\x5b\x47\xa3\xa6\x4c\x90\xf0\x78\x78\x0a\xa9\xc4\x87\x00\x2a\xdf\x7f\xf4\xaf\x8d\x0e\x61\xc9\xf1\xc3\xa4\x36\x00\x58\x17\x97\x29\x0d\x90\x50\x73\x46\xaf\xb2\x3a\x75\xb6\x58\x16\xa1\xa2\x9c\x26\x5c\xbf\x23\xa9\x29\x2d\x16\xa2\x45\x18\x6f\x34\xe5\xf2\x58\x5a\xdf\xd0\x2d\x73\xa1\x15\xfd\x0b\x6f\x5f\xd4\x9a\xb0\xc4\x73\xc6\x54\x55\x77\x77\xd4\xbd\xc0\x85\xfb\xa1\x66\x1a\x6c\xe1\x19\x75\x9a\x1c\xff\xea\x83\x14\xbe\xd6\xbb\xc6\x5f\x1a\x1a\xfa\xef\x55\x55\x6b\x76\x1e\xe4\x79\x50\xf2\x88\x08\x4b\x59\x71\x1c\x42\x4f\x9c\x38\xf4\x7d\x2c\xa5\x34\xcc\x9c\x55\x25\x26\xf2\x5f\x3d\x08\xf5\xfc\x8e\xb2\x44\xa8\xbd\x32\xd4\x85\x84\x15\x1e\x1d\xdf\xd1\xc5\x7b\x04\xf9\x50\x72\x17\x2d\x90\x06\x2b\x08\xdc\x46\xf1\x07\x75\x59\x07\xe3\xee\x70\xf0\x2f\x09\xde\x90\x56\xcc\xd1\x71\xee\x80\x59\x7d\x79\x24\xbd\xb1\x39\xa3\x87\xb2\xd4\x8d\x79\xca\xe6\x44\x2a\x5f\x58\x6b\x99\x8c\xb6\xb2\x58\x7e\xa9\x45\xf6\x0b\x87\x54\x16\xfb\x23\xed\xe8\xc9\x4d\x5f\x66\x21\xab\xb8\xaa\x0b\xd7\x0b\x73\x51\x5d\x49\x47\x75\x2c\x37\xde\x55\xe1\xc1\x0a\x24\x3a\x06\x57\x9d\xc1\x27\x07\xef\x11\x12\x1f\x1c\x1a\xca\x06\xea\x29\x6e\x9c\x13\x34\x16\x2f\x86\x29\x67\x7a\xab\x88\xfc\x87\xfb\xb2\x8a\xde\x0b\x12\x98\x16\x27\x29\xa3\x3d\x71\x27\x8d\x86\x96\xff\xbf\x79\xad\x9c\x9f\x3c\x64\xe7\xea\x85\xd9\x36\xda\xae\xc9\x29\x97\x32\xc8\x8b\x43\x10\x89\xc4\xea\x51\x6f\x24\x66\x4e\x3c\xdb\xa8\x7c\xc9\x5f\x33\xa6\xfd\xc0\x49\x42\xb6\x1e\x9e\x97\x46\x71\x8a\xf0\xec\xe3\xad\xd7\xbd\xc1\x69\x74\x5d\x1a\x5d\x13\x39\x49\x01\xd8\xb2\x33\x8f\x3f\x1f\x5c\x42\x65\xc2\xa9\x79\xe6\x75\x79\xd6\x5b\xb4\x78\x05\x08\xa8\x0b\x17\xb0\xc8\x54\xc7\x10\xc9\x71\x0c\x31\x3c\xa1\xb8\x4f\x82\x41\xda\xdc\xc0\x20\xd3\xd3\x70\xb9\x2c\x4d\x3b\xbe\x5b\xce\xf3\xbd\xf4\x50\x9c\xb1\xab\xf7\x13\x8a\x48\x7f\xe1\x23\x97\xa8\xff\xf1\xf9\x8e\x1c\x35\x93\xc4\x3b\x6b\xa5\x60\x1b\xd2\x92\x94\x6c\x98\x19\xe0\x86\xd0\xd8\x3b\x9d\xb0\x28\x5a\xe0\x94\xa1\xac\xb7\x4e\xbb\xdf\x1a\xb4\x45\x58\x21\x44\xad\x82\x2b\xc3\xd3\xec\xe3\x66\x71\xe1\xe6\x69\x12\xfa\x5d\xbb\xef\x5c\x0b\xa8\x85\xa6\x35\xec\x52\x59\x4e\xa6\xcd\x0b\x02\xdc\x15\x7f\x76\x0c\x81\xeb\xc4\xd9\xc6\x16\xfc\x93\x96\x51\x55\x46\x50\x50\x25\x69\xb2\xca\x5f\x0a\x4e\x48\xf8\x19\x95\x23\x19\x81\xa5\x17\x90\xab\xe0\x4e\x86\xd1\x0b\x17\x58\xaa\x61\x14\x22\xb8\x29\x0e\xb3\xbf\x42\x82\x28\xeb\x32\x0b\x3f\x43\x15\xe0\xe1\xf1\xfc\x40\x20\x57\x81\xe5\x85\x81\xe4\x14\xd8\xc9\x8b\x66\x19\xa0\xa4\x2d\xc9\xa5\x26\x9e\x0b\x73\xbb\x28\x8a\x1a\xbc\xa4\xa7\x2c\x00\x5b\xe0\xf9\xc0\xf6\xa1\xee\xa5\x70\xa3\x56\x04\x1e\x92\x9c\xe1\x58\xc5\x70\xf1\xed\xaa\x51\xff\xba\xa6\xba\x9c\xdd\xeb\xe4\x57\x53\xaa\x83\x54\x64\x1f\x96\xc5\x31\x0c\x52\x19\xbd\xf4\x2d\x71\xb2\x13\xe6\x95\xe4\x40\x83\x19\x5c\xf4\x27\x25\xd5\x06\xa7\xd0\x67\x0c\x45\x6e\x49\x30\xdf\x82\x34\xae\xb8\xde\x4e\xc1\x96\x58\x19\x66\x59\x24\x32\x39\x28\xcf\x24\x73\xca\xdc\x90\x99\x09\xf5\xca\xc9\xc8\xa2\xa1\x30\x1e\xfc\xd6\x85\x02\xa2\x38\x62\x79\x50\xa5\x28\xce\x08\xe6\xab\x87\x52\x6a\xf5\x4a\x7f\x81\x31\x68\xf2\x96\x92\xd4\xc4\x0b\x5c\x4f\x66\xf2\xd6\x2d\xd4\x02\x91\x2f\xf3\xf8\x94\xdd\x89\x7c\x6b\x55\xed\x15\x06\x97\xa9\xce\x29\x4a\x59\xa3\x5b\x3b\x3c\x82\xab\xcc\x86\x13\x85\xa0\xb6\x5c\x07\x8a\x8a\x19\x0a\x46\xee\x28\xd4\x4a\x4d\xe1\x11\x0b\x7e\xa7\x26\x94\x9f\xd0\x42\x2b\xa1\xff\x5a\xaa\xbb\x6a\xf4\x98\x06\x8a\x05\xfc\xac\xce\xb0\xaf\xd1\xe0\xf0\x51\x5e\xe6\x10\x1f\xc7\x84\xee\xff\xce\xce\x84\x62\x69\xe7\x6c\x99\x8a\x00\x29\xb7\xdd\x5c\xb1\x98\xe0\x80\x26\xba\xf9\xd4\x1b\x3a\x74\x40\x6d\xac\x35\xc9\x11\x77\x99\x1c\xe5\x1c\x09\x85\x0c\x22\x63\x55\x37\xd4\x56\xd5\x30\xbe\xaf\x13\xf9\xe0\x17\x82\xa8\xba\x92\x9e\x0a\xae\x6e\x00\xff\xae\xbe\xe9\x29\x2e\xad\x58\xd4\x14\xa7\xcb\xc5\xde\x72\x07\x5c\xad\x26\x2a\xff\x4d\x98\xd2\x32\xdb\x2c\xe9\x8f\x38\xb3\xff\xe0\xee\x5a\x8b\xce\x75\xb7\xfd\x5d\x1b\x58\x95\x86\x9f\x9c\x93\x38\xae\x22\xd1\x39\x89\x0f\x7a\xaa\xb5\xa2\xe0\xef\xbc\xa9\x59\xdb\x8a\x8d\x2a\xa8\x76\x9b\x9a\x58\x31\xa8\x55\xd1\x8e\xfa\x4e\x5f\x66\x48\x0a\x75\x93\xd0\xcc\x78\xe0\xa7\xd7\x11\xe5\xc6\xe1\x8c\x4e\x19\x9a\xbf\x5f\x89\x5f\x8a\x8f\xd7\xd2\x66\xf5\x9d\x8a\xe6\x06\x0a\xa9\x25\xc1\x07\x62\x2b\x7a\x18\x0c\xe5\x2f\x3a\x5e\x13\x71\x35\x1c\xff\x48\xab\xeb\xa4\x5b\x6e\xde\xf2\x56\x5c\x0e\x5a\x75\x65\x92\x64\x19\x76\xd1\x3f\x16\x17\xd3\x73\xdd\x71\xfe\xfa\x63\xf4\xfc\x5d\xd5\x15\x8b\xf4\xf1\xe7\x12\xaa\xcb\xb1\xc4\x9c\x73\x2e\xa0\x4c\x01\xaa\x48\x02\xb1\xe2\xaa\x3d\xa1\x5a\xb6\xdd\x81\x38\xf0\x82\xe5\xc9\xb6\x54\x1a\xad\x6c\x4b\x92\x77\xa7\x5a\x52\xe9\x41\x7c\x09\xb2\x1e\xc3\x24\x0a\x83\xc4\xdb\xf2\xb7\x39\xe9\xfb\xea\x16\x73\xfa\x86\x72\x21\xd2\x29\xad\x40\x57\x44\xe9\x98\xab\xca\xe9\x85\xbc\x5a\xe0\xd4\xa5\xd3\xb5\xfb\xcf\x14\x67\x2a\x4a\xdc\xfc\x96\xed\x46\xb2\xb9\xc3\xcd\xe2\x5c\x0e\xf8\x2e\xb6\x1f\xca\xb7\xca\x50\xa9\x93\xae\x52\xe8\x13\x99\x57\xb6\xcd\x0f\x26\x38\xe5\xc8\xd4\x59\x4e\x05\xfc\x89\x54\x27\xdb\xea\x87\x32\xc6\x32\x54\xea\xb4\xb1\x14\xfa\x54\xee\x98\xb4\x48\xd2\xd8\x8b\xa0\xab\xa5\xf1\x30\x48\x57\xc4\x91\x78\x0d\xb7\x30\xb8\x52\xa7\x55\x48\xcb\xd3\x07\x7e\xea\x2f\x8c\x93\xa1\x4a\x63\xb6\xfc\x27\xe5\xb1\x97\xd1\x0c\xd2\x70\xe7\xa5\x2b\x9d\xf0\x99\xc6\x55\x4c\xf2\x3c\xce\x6a\x77\xfd\x27\x36\x45\xd7\x95\x5b\x44\xec\x05\x9a\xec\xd4\x5f\x2b\xa5\xc7\xcb\x6c\x58\x9b\x60\x0b\x52\x50\x88\x45\xb9\xa6\x31\xa0\xf9\x0a\x25\x85\x2d\xad\x7d\x0e\x2c\xb3\xf5\x66\x97\xbb\x50\xaa\x34\xeb\x79\x23\xe6\x2e\xe9\x55\x79\xf7\x2b\xd7\xaa\x5c\x05\xf3\x56\xb2\x05\x03\xe7\xaf\xf8\xa4\x18\x13\x19\x88\x62\xaf\xbc\x18\xe8\x67\xda\x33\xa0\x4e\xb3\x50\x1b\x5f\xed\xdf\xbc\x4d\x14\xc6\x29\xa0\xf7\xc0\xd4\x31\x1a\xe9\x7b\x3a\xb7\x86\xf4\x47\x0a\xc4\xdc\x63\xc5\x41\x51\x6e\x0d\x20\xa3\x0c\x86\x5b\x2f\xf1\xd2\x72\x18\xb4\xfa\xa3\xfd\x54\x7b\x12\xdb\x63\xca\x63\x34\xf6\xfe\xfe\xd7\x97\x1a\x48\xd3\xf8\x35\x02\xbc\xd2\x2e\xaf\x2e\xe9\xe6\xd5\xb7\xaa\xce\x41\x81\x21\x05\x1c\x98\x84\xff\xfc\xf5\xf2\xa7\xcb\x6f\x45\x21\x18\xf3\x7c\x0d\xb6\x20\x71\x62\x2f\x4a\x87\x97\x35\xdd\xbc\x7a\x45\x21\xc5\x1f\x35\xc2\xc2\xe6\x6f\x85\x27\x33\x60\x09\xc9\x61\x2c\xdd\x0b\x12\xcf\x85\x43\x0d\x6c\x43\x8f\xf6\x17\x53\x2a\xf9\xae\x09\x9f\x2f\xd0\xc9\xf7\xcf\xc8\xd9\x37\xba\x55\x4e\x3b\x5b\x3f\x7a\x46\x77\x7c\xc1\x29\xb3\xaa\xcb\x87\x3e\x22\x5d\xad\xf2\x4b\x5c\x99\xfb\xb4\xc3\x38\x5a\x81\x00\x4d\x56\x26\x4b\x1a\xee\xd0\x23\x0a\x87\xb4\x31\x45\x2e\x16\x36\x4b\x2d\xfb\x9d\x11\xe5\x25\x0a\xfc\xe9\x5b\x73\xd1\x21\x57\xef\x45\x54\xda\xfb\xbc\xe3\x66\xd4\x11\xb8\x73\x9b\x90\xe1\x61\x23\x88\x92\xf3\x7b\x6a\xdf\xa6\xa2\x37\x47\x55\x5e\x7e\xce\xc7\x46\x28\x18\xc9\x07\xcc\x8a\x2a\x81\x1c\x42\x56\xc7\x30\xa4\x6e\x21\x25\x65\x0f\x7f\xc4\xa1\x0f\x7f\x7d\x95\x6b\xb6\x17\x2c\xc2\x57\xc2\xed\x80\xf4\x55\xe4\x8a\xba\x0d\xc2\xc3\x8d\x64\xd5\xac\x2b\x6e\xa9\x21\x81\xd9\xe2\x17\xc7\x3b\xe5\x9f\xfd\x78\xf5\x37\xd3\x6c\x0d\x5e\x71\x75\xe8\xe4\x5a\xbd\x32\xdb\x54\xd3\x55\x54\x23\xed\x9a\x66\x4d\x3f\x5c\x72\xb7\xd8\x96\xb7\xab\xdf\x9c\xda\xc1\x14\x33\xee\x3a\xe2\xbe\xa6\x66\x4a\x6e\x62\x1c\x6a\x59\xec\xbf\xbe\x74\x41\x0a\x86\xde\x06\x2c\xe1\xbf\x27\xdb\xe5\x2f\xfb\x8d\x7f\x63\x83\x04\xf6\x3a\x8d\xdf\xc7\x83\x8e\xfd\x79\xb7\x74\x37\x9f\x0e\x8e\xe9\x6f\xed\xb5\xe1\x59\x8f\x9d\xdd\xc4\x7b\xe3\xdb\x9b\xd9\xf6\xeb\x3b\x3f\xfb\x7a\x34\x3c\xf7\xcb\x87\xcd\x74\xbe\xf4\x7e\x3f\x76\x66\xa3\xf5\x6e\x35\x7d\x34\x96\x1f\xcc\x4f\xd9\xd7\x2f\xef\x57\xee\xbb\xc1\xe1\xde\x7b\xf3\xe6\xeb\xbb\x81\xf7\xf5\xf1\xcd\x7b\xfb\xdd\xbe\xe5\xb4\x3f\x1c\x9e\xbe\x7c\xd8\x3a\xde\xed\x7e\x76\xe8\xec\xa7\xeb\xdb\x64\xf2\x76\xf6\xf9\xc3\xe1\xcd\x5b\xf8\xdb\x9b\xad\x13\x3c\x2c\x3f\xbe\xdb\xb7\xbe\x1e\x5a\xef\x6d\xef\x36\xcb\xdf\x7d\xfe\xfa\xe5\xfd\x11\x7c\x1e\x64\xf7\xde\xad\x39\x5d\xdf\xee\x26\x77\xef\x5b\xe0\xf3\xfe\x79\x32\xbe\x8d\x26\xa3\xdb\x74\x3a\x27\xfd\x7f\xb8\x1b\x8c\x3f\xbd\xf5\x1f\x3e\x3c\xbe\x39\xba\x1b\x67\xf9\xf1\xed\xa7\xd1\xfc\xce\x1f\x4f\x46\x93\x74\x7a\x18\x7c\xb1\x0c\x6b\x3b\x35\x3e\x7c\xfc\x30\x7a\x33\xff\xf4\xec\x2c\xad\xc7\xce\x7e\x7a\x18\xbc\x9d\x7b\x93\xe5\x64\xb3\x32\xdc\xdf\x6e\x7b\xd3\xc3\xa0\xed\xb6\x9d\xcc\x3d\x5a\x99\xdd\x7e\x1f\x4c\x0d\xf7\xf0\xf4\xe5\x4d\x08\x3e\xcf\x8e\xd3\x16\xa2\xa7\xbf\x9f\xae\xef\xb6\x1f\xde\x7e\xb8\x9b\xb6\x67\xe6\xd7\xe3\xdd\x7e\xba\xf9\x60\x7c\x1d\x4d\x7e\x19\x3f\x84\x7d\xa7\xfd\x35\x98\xfc\xf6\xd5\x77\x82\x59\x64\x9b\x9d\xc1\x64\x7d\x97\x59\x8f\x93\x25\xf8\xfc\x30\x98\x3c\xef\x57\xf0\xf3\xa7\xc3\x97\xe3\x9d\x37\xf9\x6d\x95\xda\xef\xba\xc7\xdf\x1f\xdf\x87\xee\x6f\x1f\x76\xf7\x5e\x7f\xeb\xb6\xdd\xf6\x34\x70\x8e\xd3\xcd\xe0\xf0\xf5\xd0\x3f\x58\xe3\xdb\x1d\xee\xe3\x30\x59\xc2\x77\xad\xc4\x0e\xac\x1e\x7c\xb7\x8f\xec\x4d\x32\x50\xd2\x7b\xbc\xeb\xde\xcf\x9f\xb7\x39\x9c\x37\xf9\x6d\x39\x98\xac\xdf\xec\xe0\x68\xb2\x84\x73\xc3\xb3\x7e\x7b\xd3\x99\x2c\x8d\xff\x18\x3d\xbe\x31\xc1\xe7\x4f\xed\x87\xcd\xa0\xf3\xfb\xe3\x64\x3f\x5b\x4f\x96\xd6\xfa\xc1\x9c\xae\xef\x96\xd6\x7a\xd9\x99\x8c\xad\xe3\xec\xb1\x63\x92\x31\x7f\xeb\xd9\xef\x3e\xa6\x4f\x9b\xb7\x6b\x60\xba\x07\xbb\xfd\x29\xfb\x3a\x36\x3c\x7b\xf3\xa9\x3d\x19\xdf\x99\x96\x77\x7b\x98\x8d\x9f\x32\xeb\xf1\xf6\x70\x3f\x5e\x2e\xad\xa3\xd5\x9a\xae\x9f\x08\x8f\xe3\xe8\xe8\xbc\x7b\xbb\xfe\x3a\x37\x3c\xe7\xb7\xf7\xbe\x63\x7e\x3a\xb8\x9b\x8f\xde\xef\x4b\xe3\x3f\x7e\x7f\xe7\x20\x99\xbd\x2f\x7e\x8e\xe6\xfb\xdd\xd3\x97\x0f\xe1\xe4\xdd\xd7\xc8\x7e\xb7\x1b\x4c\xbc\xd9\x68\x66\x58\xbb\x87\xa3\xe5\x4d\xde\x21\xf9\x19\x47\x6b\x3c\xc9\xac\xc7\xdd\xf1\x7e\xfe\x31\xb3\x36\xbb\xd6\xfd\xa8\xb3\x9f\x8e\x9d\xdd\x74\x3d\x4b\x2c\xcf\x38\x3c\x1d\x9f\x32\xeb\x60\x98\xd3\xb5\x93\x58\x73\x04\x6b\xec\xad\x43\xe7\x38\x1d\xdf\xb5\xa7\x6b\x27\xb5\xd6\x6f\xd6\xb3\xc7\x4e\x6b\x3a\x7f\xca\x66\x87\xdd\xc1\x1a\x19\x07\xcb\xdb\x1d\x66\x8f\x9d\xc3\x74\x3e\xe9\x4e\xd7\x6f\xad\xd9\x78\xd2\x99\x8e\x1f\x5a\x33\x24\xa7\x87\xe7\xf7\x4f\x07\xe3\x38\x5d\x3f\x27\xa4\xdd\x33\xea\x7f\x6f\x79\x9d\xfd\x74\x7e\xd7\x9a\xae\xad\xc4\x9a\x3f\x67\xb3\x8d\x95\xce\x3c\xd4\x8f\x93\xcd\x48\x9f\xed\xe9\xf8\x0e\xc9\x33\xb5\xd6\x93\xec\x7e\x84\xfb\x30\xed\xf1\xd3\x6e\xba\xbe\x4b\x66\xc7\x87\xcc\x32\x97\xe9\x6c\x7d\x9b\xdd\x7f\xde\xa5\xd6\x71\x82\xe8\x3e\xde\x3f\x76\xda\x4f\x07\xe3\x70\x8f\xfa\x1a\x5b\x3b\x44\x33\x82\x99\x3d\xee\x8c\x99\x67\x74\x2d\xdc\xc7\x83\x89\xe5\xf5\xe0\xaf\xa7\xf3\x49\x07\xf3\x3a\x32\x5a\x16\x9a\x53\x98\x3e\xa3\x4d\x7e\x9f\xb4\xa7\xeb\x4f\xeb\xe9\xfc\xae\x3b\x5d\x3f\xa5\xd6\x7c\x99\xcd\x46\x39\xee\xf9\xc3\x7e\xba\x9e\xa4\xd6\xfa\x19\xd1\x6b\xde\x8f\x3a\xc6\xd3\xf1\x36\xa7\x63\xb7\x9f\x3d\x76\x8e\xd3\xf9\xc7\x2e\xe2\x0f\x8d\xed\xfd\xc8\xe8\x58\xa3\x4e\xcb\x1e\x4f\x48\xbb\xf9\x43\x36\xfb\x8c\xe8\x7b\x48\xac\xd1\x6e\x37\x5d\x2f\x53\x6b\xd4\x31\xa6\xe3\xbb\x6c\xe6\x19\x08\x66\x36\x5a\x3e\xbf\x1f\x61\x18\xe1\x5d\x62\x79\x9d\xce\x74\x3e\xc9\x66\xef\xac\xd4\x42\x73\xc4\x43\xe3\x80\x9e\x59\x87\xe9\xda\x4a\x67\xe3\x8f\xe8\xd9\x11\xf3\x3b\x7f\x6a\x4f\xd7\xef\x8f\xd8\x66\xcc\xad\x2e\xe6\x97\xb4\x41\x3c\xb7\x9e\x8e\x77\xc6\x74\xfd\x31\xb5\xe6\x16\x1a\xd3\x23\x1e\xef\xf9\x04\xd3\x36\x43\xe3\xfc\x68\x1c\x50\xff\x85\xcc\xd0\x78\xcc\x10\xcd\xe3\x87\xdd\x74\xfd\x31\x99\x79\x68\xac\x3e\xb6\xd0\xb8\x20\xfd\xc0\xbc\xa1\x31\xc6\x7a\x33\x39\x90\x71\xb0\x12\x6b\x3d\x49\x66\xf3\xdb\xcc\x32\xad\xdd\x74\x8c\xfa\xbc\x4b\xad\x43\xa7\x3b\x1d\x4f\x4c\xac\x5b\xf3\x3b\xdc\x3f\xd2\xc9\x87\xa3\x75\x24\x78\x2d\x73\xf6\x88\x7f\x22\xfe\xd0\x98\xb6\x8b\xb1\xc5\x3a\x34\xb6\xba\x48\xf7\x60\xae\x6b\x93\x3b\xe3\x30\x5b\x5b\x99\xe5\xed\x0c\x4c\xa7\xb9\x4b\xb1\x5c\xc6\xef\xd7\x98\xa7\xf1\xdd\x11\xf7\x75\xc4\xbc\xe6\x7a\x8b\xf8\x58\x26\xe8\x99\x65\x5a\x29\x96\xd3\x78\x49\x74\x7f\x84\xf5\xb0\x8d\x64\x6a\xcd\x6f\x49\x1b\xaf\xd3\x7e\x3a\xde\x26\x48\x5e\xd6\x61\xd7\x42\x32\xb0\xd6\x1f\xb3\xfb\xc7\xdd\x7e\x46\x68\x32\xa7\xeb\xe7\x62\xfc\xf6\xd8\x4e\x8e\xef\x76\x44\xee\x56\x36\xf3\x18\xb8\xc4\x9a\x3f\x65\xd6\x67\x6b\x7f\x3f\x42\x74\xde\x26\xb3\xf1\x2d\x99\x03\x18\xe6\xa9\x85\xf5\xe5\xf8\x94\xdd\x7f\xdc\xb1\xbc\xb5\xc3\xe5\x7c\x3e\x69\x13\x19\x5a\xa6\x35\xea\x74\xed\xb1\x95\xdd\x3f\xa2\xf9\xf6\x7e\x8d\xe5\x87\xe6\x39\x99\x6f\x78\xee\xe5\x34\x76\xa7\x73\x24\x07\xab\xa0\xd1\xc0\xfc\x61\x3c\x0e\x6a\xdf\x46\x63\x79\xff\x88\xe0\x9e\xb3\xfb\xfc\xfd\x0c\xf3\x81\xed\x03\xe2\xe9\x58\xfc\x7e\xff\xd8\x39\x20\xdd\xbf\x47\xb2\x9a\xa3\xb9\x6a\xe4\xfa\xbf\xcc\x66\x07\xac\x3b\x1d\xe7\x90\xdb\x91\x39\xc6\x7f\xb0\x0e\x48\xee\x0e\xc2\x4d\xda\x8d\x6f\x53\x2c\x87\xc7\xdd\x01\xeb\x3b\xd1\xb1\x0e\x45\x23\xea\xbf\x3b\x45\x7a\xfd\x68\x74\xb1\xae\x61\x7d\xc9\x7f\x47\x7a\x37\xda\x75\xf1\x58\x60\x3e\xee\x3a\x78\x4c\xe7\x77\x68\x8c\x0e\xf7\xa3\xce\xd1\x1e\xdf\x1d\x48\xbb\x87\x6c\xf6\xd1\x3a\xcc\x8e\x77\x48\x4f\x8f\xb3\xf9\x12\xc3\xcc\xc8\xdc\x3d\xce\x90\x0d\xc0\x7f\x23\x59\xee\x8e\xd8\xc6\x7c\x09\xbd\x29\x5a\xa7\x97\xcf\xfd\xa9\x89\xed\x6d\xf5\x13\xaf\x63\xe8\xdd\xaf\xbf\x5e\x5e\x69\x41\xa8\xc7\x30\x82\x20\x2d\x6e\x0b\x48\x98\x2f\xca\x71\x7e\x1c\x05\xf0\xf2\xb3\xab\x95\xaf\xda\x4c\xc0\x26\xe2\x3e\x67\xa8\x8c\x80\x48\xca\x9d\x15\x3b\xfd\x1c\xeb\xcf\xaa\xcf\x57\xb1\xee\x59\x01\xbe\x6a\x35\xaa\xdf\x4d\xea\xf7\x36\xf5\x7b\x87\xfa\xbd\x4b\xfd\xde\x93\x77\xd0\x62\x37\xf9\x7c\xb1\x21\x91\x66\xdb\xe0\x2e\x8e\x49\xb8\xbb\x4a\xea\x51\xcb\xdb\xc8\x3d\x77\x72\xc1\x74\x79\xaa\x9f\x6b\xca\x01\xf3\xa5\x8e\x65\xd2\x9a\x4a\x69\xca\xe2\xf8\x2a\x42\xe5\x21\x7a\xaa\xe2\xba\x5b\x9c\xd2\xc1\x15\xca\x8c\x6e\xbc\xe4\x96\x93\x7f\x66\x2d\x30\x4b\x9b\xe2\x1c\x83\xa4\x30\xd2\x90\x1e\x64\x60\x4f\x85\x99\x5d\xfe\xdb\xe8\xb2\x92\x5d\x65\x2d\xaf\xe4\x73\xc1\xc5\x39\x8c\x86\xec\x4b\x0c\xaa\x61\xc2\xfc\x9f\xce\x9f\x48\x46\x44\x38\xe5\xa0\xfc\xee\x5a\x06\x5b\x75\x65\x89\xad\x9e\x69\x94\x90\x66\x1d\xa4\xd9\x6d\xb7\x4a\xc8\xf6\x59\xa5\x8e\x08\xb2\xa3\x86\xcc\x33\x67\x05\x64\xb7\x2e\x29\x53\x15\x3f\xfa\x19\xec\xd5\xf5\x5e\x5d\x57\xe8\x67\xf0\x5a\x0d\x49\xdf\x20\xe8\x67\xb0\x5f\x13\xb0\xaf\x3f\x35\xba\x8c\xc1\xa1\x46\xc6\xd4\xfd\x06\x08\xb2\x46\xc6\xd4\x1d\x06\x08\xb2\x46\xc6\x54\x84\x1b\x41\xd6\xc8\x98\xaa\xef\x43\x90\x35\x32\xa6\x0a\xaf\x30\xce\x06\xdb\x42\xc2\x4e\xe4\x05\xcf\x35\x8c\x43\x68\x5c\xf7\xfb\x25\x64\x0d\xe3\x0b\xe8\xb4\xdc\x56\x09\x59\xc3\xf8\x62\x01\xdd\xfc\x4b\x18\x18\x67\x83\x6d\x21\xe9\x3a\x86\x6e\x1d\x8d\x55\xde\x2d\x86\x6e\x1d\x89\xd5\x7d\x70\x31\x74\xeb\x29\x2c\x0e\x2f\x22\x8c\x0d\x06\x5e\xd2\xed\x32\x86\x30\xa8\xa1\x90\xca\x47\x62\xd0\x1a\x1a\xa9\xdb\xf9\x30\x68\x0d\x95\xd4\xd1\x48\x82\xb5\xc1\xb5\x91\x74\x1f\xc6\x20\x58\xd6\x59\x13\x2a\xfd\x49\x60\xeb\x87\xbc\x7b\x6d\x53\xb0\xf5\x22\x2d\x4e\x60\xe6\x78\x1b\x7c\x2b\x09\x09\xb8\x30\x8b\x59\xd9\x98\x60\xd3\xc5\x19\xdf\x3c\xfb\xce\x2f\x0b\xc9\x6d\xb1\xba\xb3\x88\xa7\xef\x65\xc5\x2e\x54\xcc\x55\x13\x8e\x6d\x9f\xae\x3a\x94\xd7\xcf\xd5\x22\xe5\x2a\x0e\xd5\x0b\xb5\x26\xbb\x7b\x42\x93\x1d\xb9\x61\xd6\xe7\x13\x97\x28\xd4\xde\xbf\x27\x0b\xcc\x8b\x4b\x2a\x47\x18\xcd\x0d\x15\x27\x14\x22\xb1\x2c\x13\xaa\x3a\xb4\xda\xab\x24\x98\x45\x5f\x93\xde\xf3\xf7\xb2\xf2\x50\x05\xbd\x55\x64\x52\x29\x8a\x33\xa8\x32\xbb\x0c\x1a\xe5\xad\x0f\xf9\xd7\xc7\x1b\x38\x01\xd0\x60\x3e\x49\xcc\x84\xbe\x8b\x9b\xfb\x2e\x3f\x84\x76\x98\x86\x97\xd2\x9c\x01\xe5\x55\x91\x53\x35\xbc\x8b\x54\xa5\x56\xeb\x8e\x20\xf5\xce\x2e\x54\x50\x39\xa6\xf8\x1a\x3d\xb9\x63\xfe\xaf\x76\x8a\x29\x00\x5b\x1b\xd4\xd4\x50\xbc\x7d\xfb\x56\x6a\xd4\x0c\xfe\xbc\xaf\x78\xa4\x59\xfc\xea\x06\xfd\x79\xb8\x93\x3b\x88\x1f\x1d\x9a\x9c\xb3\x2a\x4d\x56\x7c\xde\xaf\x3a\x20\x55\xed\x7b\xa8\x87\x55\x22\xad\x7a\x58\xc8\x8c\x6d\x2b\x79\x0a\xf6\x92\xa7\x42\xf1\xd5\x5f\x28\x02\xd1\x90\x16\xbf\xd2\x47\x54\xb8\xe3\xc1\x27\xbe\x1c\x2a\x5a\x32\xe1\xbb\xec\xaa\xee\xa8\xef\xae\x52\xda\x71\x8a\x27\xfe\x66\xc9\x1a\x4a\xd4\xb7\x06\xe5\x84\x14\xf4\xac\xc2\x0d\x8c\xaa\x33\xeb\xf2\x92\x62\xc1\x14\x28\x70\xb0\x66\x9f\x3a\xb8\xc4\x1d\xd5\x56\x7f\xe6\x9c\xfb\x6a\xb2\x64\x0a\x99\x7d\x7e\xdf\x75\xd7\xe9\xb7\x8a\x2f\xc0\x73\xf9\x4c\xf2\x0a\x93\xcc\x9c\xb7\x66\xf2\x91\xe5\x74\x93\x66\xed\x0a\x54\xb7\xb7\xb7\xa2\x47\x51\x8d\x74\x8e\x9e\x2f\xb6\xe9\xf5\x98\xeb\x23\x24\x39\xaf\xb2\xb4\xa1\x69\x83\xa0\xfa\x56\x64\xc1\xae\x03\x7c\xe7\x75\xcb\x30\xb6\x2b\x4d\xc7\x0e\xc4\x95\x38\x17\xba\x86\x34\xf3\x55\x31\x26\xf9\x6e\x2d\x66\x00\x9f\x48\x48\xd8\xd3\xe5\xeb\x2c\x49\xbd\xc5\x41\x2f\x73\x84\xf4\x0e\x16\x53\xa8\x23\x09\x48\xea\xd3\x14\x80\x8a\x74\xab\x30\xf3\xaf\x0d\xe1\xdb\x8f\x9d\x62\x9d\x97\x20\xad\xb9\x16\x8f\x06\x64\x55\x52\xad\x32\xf8\x8b\x03\x85\xb9\x63\x5e\x73\x12\x55\xf5\x20\x46\xdd\x8a\xce\x46\xa3\x91\xb2\xa7\xd1\x68\x44\x23\x04\xcc\x4d\x07\x94\x70\xba\xa2\x70\x0c\xca\x03\x62\x06\x58\x86\x10\xd0\xe3\x55\x33\xfd\xc8\x97\x6f\xb5\x56\x69\x30\x79\x3c\x92\xab\xa5\xe5\x1f\x10\x13\x29\x48\x22\x10\xd4\x37\x89\x3d\xdb\x96\x7c\x2a\x63\xa8\xfd\xd4\xed\xa1\x7f\x92\x11\xe9\xf5\x7a\xdc\xe4\xed\x51\x93\x97\x99\x68\x26\xd7\x4d\xf9\xcd\x72\x61\xdd\xe5\xbe\x4e\x87\x16\x42\x53\x12\x45\x33\xaf\x54\x7b\x8c\x13\x0b\x2d\xbf\x36\x8b\x77\x07\xb6\x4d\x96\xd6\x48\xe6\x7b\xb4\xf9\x9a\x4d\xde\x92\xf1\x1d\xa8\x09\x2d\x15\xad\x53\xaf\x68\x35\x77\x2c\x53\xdf\x75\x13\x6d\x79\x9f\x65\x07\xa9\x42\x33\x81\x11\x88\x41\x1a\x72\xc5\x86\xd2\x6b\xb0\x9a\xdd\x2b\x9e\x28\x7e\x38\x15\x37\x8a\xab\x9c\x2a\xf9\xa0\xab\xa7\x06\xe7\x50\x98\xe2\x27\x17\xbb\xd1\x5e\xf6\x25\x46\xf4\xd8\x3c\x15\xbb\x57\xf1\xd0\x0c\x9d\x14\x6d\x9e\xa5\xf3\xa6\xc7\x35\xe6\xbf\x37\xac\x30\xce\x04\x5c\xf7\xc3\xa5\x27\x9d\x6b\x65\x1c\x93\x5d\xa6\x78\xe5\xa5\x87\x5a\xa1\x6f\x9c\xb6\x98\x06\x3b\x64\x39\x09\xac\x89\x7e\xd9\x30\x08\x73\x6a\xa8\x5d\xab\x3e\xaa\x55\x2a\x61\x14\x7b\x4e\xf5\x89\x4a\xf9\x62\x5c\xc0\x70\x9f\xa2\xa6\xa2\xf7\xab\x6a\x16\xe6\x6d\x16\x10\xa4\xd5\x45\x86\x8c\x9c\x84\xfd\x0f\x5f\x05\x74\x77\x77\x47\xe3\x48\xb4\x9a\x2f\xdc\xcb\x17\x79\x4e\xd8\x03\x8e\x2a\x39\xc6\x93\xb9\xa2\x7a\x34\x95\x44\xc4\xcf\x04\xf2\xa5\xeb\x24\x10\x94\x24\x19\xc2\x21\x7e\xe1\xbf\xb4\x40\xf8\x43\xf5\xda\xbf\x4d\xac\xdf\xef\x3f\xcc\x6f\x67\x73\x89\x68\x4f\xd0\xf8\x57\xd7\xdb\x4a\xd6\x29\xc5\x74\x50\x35\x92\xf8\x2d\xad\xbe\xc4\x6f\xe9\xa9\x2c\x26\x71\xfc\x80\x97\x73\x2a\x59\xd5\x95\x4d\xc5\xf9\x57\x29\x50\x99\x26\x09\x33\x67\xa5\x3b\xc0\xf7\xc3\x8c\xfd\x78\xe0\x89\xef\x0e\x3e\xaf\xd2\x8d\xff\x7f\xe7\xab\x84\x27\x83\x76\x55\xca\xaa\x38\x83\xca\x2f\xb2\x86\x79\x55\x6b\x39\x69\x01\x17\xa5\xc0\xdc\x09\x77\x01\xce\xf5\xb6\x35\x81\x10\xcd\x0a\x83\xf0\xb2\x66\x3f\x22\x3a\x76\x8c\xf9\x04\xe8\xdf\xc9\xca\x45\x03\xfd\x93\xcf\x14\x3e\xd3\x64\x48\x58\xa5\x8f\x9b\x48\xd3\xbb\x22\xb0\xb4\x2a\xfe\xe6\xc5\x62\x10\x3f\xda\xce\x1e\xc9\x2c\xcf\x0c\x0c\x06\x4a\x42\x4e\xcc\x4e\xea\xc8\x2a\x55\x36\xce\xaa\x81\x70\x38\x8d\x48\x54\xd5\x65\xcc\x74\x99\xba\x32\x23\x5d\x23\xe8\x94\xb1\x94\x42\x73\x21\x9b\xf8\xff\x76\xb7\x99\x5b\x59\x96\xc6\xfc\xd8\x9f\x60\x71\x7a\x6c\x30\xf5\x5f\xc3\xbc\xd0\x6c\xd6\xcd\xfa\x62\xbd\xc1\x3f\xf9\x68\x5f\xb5\x1b\x56\x5d\x98\xc4\x36\xaf\x09\x6b\x9d\x0c\x40\xb5\x39\x47\x39\xe6\x6b\x56\xc3\x08\x38\xf8\xf2\xa2\x66\x57\xd2\x75\x79\x71\x80\xe2\xc6\x33\x6a\xd2\xd1\x33\xb6\x88\x5d\x55\x1f\xbe\x66\xb0\xda\x92\xad\x79\x5f\xc6\xfa\x5f\x29\x8b\x59\x6e\x11\x2b\x9d\x39\xd3\x8e\x2b\xf0\xca\x57\xeb\x97\x1a\x59\xa5\x75\x55\x84\xd8\xb8\x6b\x75\x78\xe7\x8c\x0c\x50\x75\x42\x46\x02\x81\x26\x94\x08\xc0\xad\x18\x35\x3c\x2b\x3e\xaf\xa4\x64\x84\xf6\x1c\x85\x3b\x84\xf3\x4b\x1f\xea\xa9\xce\x81\xe4\x84\x8b\xeb\x4d\xe9\xac\xad\xa0\xf3\x2c\xdc\x5b\xa2\x9c\x3d\xa6\xbc\xbd\xc2\x2b\xfb\x27\x4c\x25\x46\x53\xa0\xe9\x38\xd7\xe0\x1c\x7a\xb8\xbd\x96\xb0\x41\xe4\x66\x49\xfd\x96\x5a\xa1\x75\xa7\xeb\xda\xcb\x9d\xbf\x84\xda\x73\x34\x86\xf0\x2b\x15\x5c\xe9\xc0\x14\xe7\x57\xd9\x9b\xd2\x7e\x60\x66\x9f\x00\x36\xcd\x5e\x43\x33\x8d\x4e\x43\x6b\x99\x26\x89\x9c\x9c\x66\x91\xb2\x39\x8a\xcb\x65\x38\xfd\x4c\xd2\x38\x2c\x77\x75\x52\xc7\xed\x84\xf1\x74\x20\xe8\xb5\x6c\x19\x6a\xd9\x5e\x40\x70\x07\xaf\x7b\xe8\x1f\xe7\xce\x4b\x67\x88\xb0\x99\x2e\x3f\x16\xc1\x76\x9b\x09\xdf\x9b\x36\xa4\xe8\x55\x37\x82\x42\x28\xc3\x4a\x25\x1f\x64\xf9\x0d\x66\x23\xdd\x92\x7e\xdc\xba\x23\x9d\x69\x2f\x71\x7c\xe5\x74\x29\xbf\x38\xc7\x7c\x8a\x4c\xce\x52\xd3\x0d\x53\x1d\x97\x4d\x48\x63\x25\x82\x7b\xda\x73\x9c\x5e\x5e\xf6\xa1\x40\x76\x80\xbe\x1f\xee\xf8\x6a\x11\x6a\x7a\x89\x37\xe5\xcb\xb1\xb1\x8b\x37\xb3\x28\x2b\xf4\x52\x82\xa8\x38\x4e\xac\x30\xa3\x6c\x50\x54\xc5\x1b\x83\xe4\x1c\x6b\x42\x30\x7d\xff\x2a\x5f\xdf\x2d\xf7\x01\x47\x91\x85\x1a\xd5\x53\xec\x3e\xf8\x2a\x4e\x39\x21\xac\xad\xa8\x51\x89\x4d\x75\x9b\xab\xca\xd2\x17\xad\xc7\xe3\xb1\xd8\x94\x5b\x53\xe8\x25\x04\x47\xef\xfe\x4f\x00\x00\x00\xff\xff\x05\x25\xc2\x5c\x83\xaf\x00\x00") + +func filesStylesCssBytes() ([]byte, error) { + return bindataRead( + _filesStylesCss, + "files/styles.css", + ) +} + +func filesStylesCss() (*asset, error) { + bytes, err := filesStylesCssBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "files/styles.css", size: 44931, mode: os.FileMode(420), modTime: time.Unix(1509733236, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _filesToggle_switchCss = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x94\x52\xd1\x6e\xe2\x30\x10\x7c\xe7\x2b\x56\x42\x7d\x3b\xa3\xd0\x36\xba\x93\x2b\x55\xe2\x53\x9c\x78\x93\xac\x30\xde\xc8\x36\x84\xe3\xd4\x7f\x3f\x25\x76\x4a\x68\x93\x48\x85\x07\xc4\x78\x3c\xde\x99\x9d\x5d\xe0\xba\x36\x28\x7c\x47\xa1\x6c\xe0\xdf\x06\x00\xa0\x65\x4f\x81\xd8\x4a\x50\x85\x67\x73\x0e\xf8\x36\xe0\x81\x5b\x09\xcf\x59\x7b\x8d\x7f\x1d\xd5\x4d\x98\x02\x05\x3b\x8d\x4e\xc2\xbe\xbd\x82\x67\x43\x3a\xc2\xe5\xd9\x79\x76\x12\x5a\x26\x1b\xd0\x45\x50\x93\x6f\x8d\xfa\x2b\x81\xac\x21\x8b\xa2\x30\x5c\x1e\xd3\x33\x78\x0d\x42\x19\xaa\xad\x04\x83\x55\x88\x28\x5f\xd0\x55\x86\x3b\x09\x0d\x69\x8d\x36\xa2\xc3\xdd\x06\xe3\x24\x7f\xc6\x41\x4e\x64\x45\x47\x3a\x34\x12\x7e\xe7\x3d\xf8\xb1\xd9\x3c\x3a\xdd\x69\xf2\xaa\x30\xa8\xe1\x1d\x34\x5d\xe0\x1d\x7c\xab\xec\xee\x68\xb9\x48\x21\x14\xaa\x3c\xd6\x8e\xcf\x56\x4b\xd8\x1e\x0e\x87\x19\x91\xe1\x4e\xa2\xff\xd4\x64\x65\x58\x85\xa9\xbf\xd1\xc4\x3e\xcb\x9e\x66\xbc\xdd\x53\x6e\x95\xd6\x64\x6b\x09\xaf\x23\x32\x0d\xac\xc4\xfb\xf3\x29\x82\x97\x97\x24\xd8\x35\x14\x50\xf8\x56\x95\x28\xc1\x72\xe7\x54\xfb\xb6\x49\x9b\xbb\x0a\x4f\xb7\x41\x36\x6e\x51\x14\x9c\xd4\x05\x8b\xd5\xe3\x13\xdf\xd6\x09\x1d\x16\x47\x0a\x8b\x9c\xef\xb1\xc6\x95\x7c\xed\xa2\x43\xa3\x02\x5d\xf0\xc1\xdb\x3e\xef\xd3\xfa\x2e\xb1\xb4\x49\x87\x7a\xda\x55\xd1\xe7\x3f\x29\x2c\x6c\xcb\xb2\x7c\x20\xa4\x92\xcf\x32\x3e\x75\x45\xc9\xa6\x5f\xfe\xb6\xca\xfb\xef\x63\xfa\xaf\x29\xfd\x9b\x20\xab\xf1\x3a\x6c\x78\x76\xe4\xf8\x2b\x78\xac\x54\x9c\x6d\xc1\xdf\x48\xae\xaa\x07\xb6\xc8\xb3\xa7\x15\x7a\x4f\xfa\x05\x0b\x87\x83\xd5\xa4\x76\x1f\x76\xed\xf5\x5e\x6e\xec\xff\x98\x40\x55\x7d\x4d\x47\xc2\x36\xcb\xf2\xaa\x8f\x6d\x59\x6a\xfa\xf8\xba\xd6\xf3\xf0\x99\xd1\x12\xca\xd2\x49\x05\x4c\x2a\xc1\x29\x3b\x76\x67\x18\x34\xdb\xe5\xfe\xb3\xd2\x6b\xa7\x7d\xa3\xd7\xce\x53\xa1\x17\x28\x1f\x9b\xff\x01\x00\x00\xff\xff\x9a\x5e\xc9\xe4\x55\x05\x00\x00") + +func filesToggle_switchCssBytes() ([]byte, error) { + return bindataRead( + _filesToggle_switchCss, + "files/toggle_switch.css", + ) +} + +func filesToggle_switchCss() (*asset, error) { + bytes, err := filesToggle_switchCssBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "files/toggle_switch.css", size: 1365, mode: os.FileMode(420), modTime: time.Unix(1509717508, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +// Asset loads and returns the asset for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func Asset(name string) ([]byte, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) + } + return a.bytes, nil + } + return nil, fmt.Errorf("Asset %s not found", name) +} + +// MustAsset is like Asset but panics when Asset would return an error. +// It simplifies safe initialization of global variables. +func MustAsset(name string) []byte { + a, err := Asset(name) + if err != nil { + panic("asset: Asset(" + name + "): " + err.Error()) + } + + return a +} + +// AssetInfo loads and returns the asset info for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func AssetInfo(name string) (os.FileInfo, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) + } + return a.info, nil + } + return nil, fmt.Errorf("AssetInfo %s not found", name) +} + +// AssetNames returns the names of the assets. +func AssetNames() []string { + names := make([]string, 0, len(_bindata)) + for name := range _bindata { + names = append(names, name) + } + return names +} + +// _bindata is a table, holding each asset generator, mapped to its name. +var _bindata = map[string]func() (*asset, error){ + "files/angular-toggle-switch.min.js": filesAngularToggleSwitchMinJs, + "files/checksout.html": filesChecksoutHtml, + "files/checksout.js": filesChecksoutJs, + "files/favicon.ico": filesFaviconIco, + "files/images/maintainers.png": filesImagesMaintainersPng, + "files/images/meowser.png": filesImagesMeowserPng, + "files/images/pending_approval.png": filesImagesPending_approvalPng, + "files/images/received_approval.png": filesImagesReceived_approvalPng, + "files/logo.svg": filesLogoSvg, + "files/main_styles.css": filesMain_stylesCss, + "files/styles.css": filesStylesCss, + "files/toggle_switch.css": filesToggle_switchCss, +} + +// AssetDir returns the file names below a certain +// directory embedded in the file by go-bindata. +// For example if you run go-bindata on data/... and data contains the +// following hierarchy: +// data/ +// foo.txt +// img/ +// a.png +// b.png +// then AssetDir("data") would return []string{"foo.txt", "img"} +// AssetDir("data/img") would return []string{"a.png", "b.png"} +// AssetDir("foo.txt") and AssetDir("notexist") would return an error +// AssetDir("") will return []string{"data"}. +func AssetDir(name string) ([]string, error) { + node := _bintree + if len(name) != 0 { + cannonicalName := strings.Replace(name, "\\", "/", -1) + pathList := strings.Split(cannonicalName, "/") + for _, p := range pathList { + node = node.Children[p] + if node == nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + } + } + if node.Func != nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + rv := make([]string, 0, len(node.Children)) + for childName := range node.Children { + rv = append(rv, childName) + } + return rv, nil +} + +type bintree struct { + Func func() (*asset, error) + Children map[string]*bintree +} +var _bintree = &bintree{nil, map[string]*bintree{ + "files": &bintree{nil, map[string]*bintree{ + "angular-toggle-switch.min.js": &bintree{filesAngularToggleSwitchMinJs, map[string]*bintree{}}, + "checksout.html": &bintree{filesChecksoutHtml, map[string]*bintree{}}, + "checksout.js": &bintree{filesChecksoutJs, map[string]*bintree{}}, + "favicon.ico": &bintree{filesFaviconIco, map[string]*bintree{}}, + "images": &bintree{nil, map[string]*bintree{ + "maintainers.png": &bintree{filesImagesMaintainersPng, map[string]*bintree{}}, + "meowser.png": &bintree{filesImagesMeowserPng, map[string]*bintree{}}, + "pending_approval.png": &bintree{filesImagesPending_approvalPng, map[string]*bintree{}}, + "received_approval.png": &bintree{filesImagesReceived_approvalPng, map[string]*bintree{}}, + }}, + "logo.svg": &bintree{filesLogoSvg, map[string]*bintree{}}, + "main_styles.css": &bintree{filesMain_stylesCss, map[string]*bintree{}}, + "styles.css": &bintree{filesStylesCss, map[string]*bintree{}}, + "toggle_switch.css": &bintree{filesToggle_switchCss, map[string]*bintree{}}, + }}, +}} + +// RestoreAsset restores an asset under the given directory +func RestoreAsset(dir, name string) error { + data, err := Asset(name) + if err != nil { + return err + } + info, err := AssetInfo(name) + if err != nil { + return err + } + err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) + if err != nil { + return err + } + err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) + if err != nil { + return err + } + err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) + if err != nil { + return err + } + return nil +} + +// RestoreAssets restores an asset under the given directory recursively +func RestoreAssets(dir, name string) error { + children, err := AssetDir(name) + // File + if err != nil { + return RestoreAsset(dir, name) + } + // Dir + for _, child := range children { + err = RestoreAssets(dir, filepath.Join(name, child)) + if err != nil { + return err + } + } + return nil +} + +func _filePath(dir, name string) string { + cannonicalName := strings.Replace(name, "\\", "/", -1) + return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...) +} + diff --git a/web/status_hook.go b/web/status_hook.go new file mode 100644 index 0000000..3d56f5e --- /dev/null +++ b/web/status_hook.go @@ -0,0 +1,240 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package web + +import ( + "context" + "fmt" + + log "github.com/Sirupsen/logrus" + "github.com/capitalone/checks-out/model" + "github.com/capitalone/checks-out/notifier" + "github.com/capitalone/checks-out/remote" +) + +type StatusResponse struct { + SHA *string `json:"sha,omitempty"` + Tag *string `json:"tag,omitempty"` + Err string `json:"error,omitempty"` + Info string `json:"info,omitempty"` +} + +func generateError(msg string, err error, v model.PullRequest, slug string, + resp *StatusResponse, mw *notifier.MessageWrapper) { + log.Warnf("%s: %s", msg, err) + resp.Err = err.Error() + mw.Merge(notifier.BuildErrorMessage(v.Issue.Title, v.Issue.Number, slug, err.Error())) +} + +func (hook *StatusHook) Process(c context.Context) (interface{}, error) { + + if hook.Status.State != "success" { + return nil, nil + } + + params, err := GetHookParameters(c, hook.Repo.Slug, true) + if err != nil { + return nil, err + } + repo := params.Repo + user := params.User + config := params.Config + maintainer := params.Snapshot + + merged := map[string]StatusResponse{} + + log.Debug("calling getPullRequestsForCommit for sha", hook.SHA) + pullRequests, err := remote.GetPullRequestsForCommit(c, user, hook.Repo, &hook.SHA) + log.Debugf("sha for commit is %s, pull requests are: %v", hook.SHA, pullRequests) + + if err != nil { + notifier.SendErrorMessage(c, config, "Unknown", 0, hook.Repo.Slug, err.Error()) + return nil, err + } + + //check the statuses of all of the checks on the branches for this commit + for _, v := range pullRequests { + mw := ¬ifier.MessageWrapper{ + MessageHeader: notifier.MessageHeader{ + PrName: v.Issue.Title, + PrNumber: v.Issue.Number, + Slug: hook.Repo.Slug, + }, + } + + id := fmt.Sprintf("%d", v.Number) + //if all of the statuses are success, then merge and create a tag for the version + if v.Branch.Mergeable { + result := StatusResponse{} + + files, err := getPullRequestFiles(c, user, hook.Repo, v.Number) + if err != nil { + generateError("Unable to get pull request files", err, v, hook.Repo.Slug, &result, mw) + merged[id] = result + sendMessage(c, config, mw) + continue + } + + req := &model.ApprovalRequest{ + Config: config, + Maintainer: maintainer, + PullRequest: &v, + Repository: repo, + Files: files, + } + + policy := model.FindApprovalPolicy(req) + mergeConfig := req.Config.GetMergeConfig(policy) + + if !mergeConfig.Enable { + result.Info = "merge config not enabled" + merged[id] = result + sendMessage(c, config, mw) + continue + } + + success, err := remote.HasRequiredStatus(c, user, hook.Repo, v.Branch.BaseName, v.Branch.CompareSHA) + + if err != nil { + generateError("Unable to test commit statuses", err, v, hook.Repo.Slug, &result, mw) + merged[id] = result + sendMessage(c, config, mw) + continue + } + + if !success { + result.Info = "required status checks are not passed" + merged[id] = result + sendMessage(c, config, mw) + continue + } + + if mergeConfig.UpToDate { + behind, err2 := isBehind(c, user, repo, v.Branch) + if err2 != nil { + generateError("Unable to compare branches", err2, v, hook.Repo.Slug, &result, mw) + merged[id] = result + sendMessage(c, config, mw) + continue + } + if behind { + result.Info = "compare branch is behind base branch" + merged[id] = result + sendMessage(c, config, mw) + continue + } + } + + SHA, err := doMerge(c, user, hook, req, policy, mergeConfig.Method) + + if err != nil { + generateError("Unable to merge pull request", err, v, hook.Repo.Slug, &result, mw) + merged[id] = result + sendMessage(c, config, mw) + continue + } + + mw.Messages = append(mw.Messages, notifier.MessageInfo{ + Message: "merged", + Type: model.CommentMerge, + }) + + result.SHA = SHA + + tag, err := tagIfEnabled(c, user, hook, req, policy, SHA) + if err != nil { + generateError("Unable to tag", err, v, hook.Repo.Slug, &result, mw) + merged[id] = result + sendMessage(c, config, mw) + continue + } + + result.Tag = tag + + if tag != nil { + mw.Messages = append(mw.Messages, notifier.MessageInfo{ + Message: fmt.Sprintf("Tag %s has been added", *tag), + Type: model.CommentTag, + }) + } + + if mergeConfig.Delete && req.PullRequest.Branch.CompareOwner == hook.Repo.Owner { + err = doMergeDelete(c, user, hook, req) + if err != nil { + generateError("Unable to delete merged branch", err, v, hook.Repo.Slug, &result, mw) + merged[id] = result + sendMessage(c, config, mw) + continue + } + mw.Messages = append(mw.Messages, notifier.MessageInfo{ + Message: fmt.Sprintf("Branch %s has been deleted", req.PullRequest.Branch.CompareName), + Type: model.CommentDelete, + }) + } + + if config.Deployment.Enable { + doDeployment(c, user, config, hook, v.Branch.BaseName) + mw.Messages = append(mw.Messages, notifier.MessageInfo{ + Message: fmt.Sprintf("Deployment has been triggered from branch %s", v.Branch.BaseName), + Type: model.CommentDeployment, + }) + } + merged[id] = result + sendMessage(c, config, mw) + } + } + log.Debugf("processed status for %s. received %v ", repo.Slug, hook) + + return merged, nil +} + +func sendMessage(c context.Context, config *model.Config, mw *notifier.MessageWrapper) { + notifier.SendMessage(c, config, *mw) +} + +func doDeployment(c context.Context, user *model.User, config *model.Config, hook *StatusHook, baseName string) error { + var err error + if dc, ok := config.Deployment.DeploymentMap[baseName]; ok { + //if we are + env := "" + if dc.Environment != nil { + env = *dc.Environment + } + if len(dc.Tasks) == 0 && env != "" { + scheduleDeployment(c, user, hook.Repo, baseName, nil, env) + } else { + for _, task := range dc.Tasks { + scheduleDeployment(c, user, hook.Repo, baseName, &task, env) + } + } + } + return err +} + +func scheduleDeployment(c context.Context, user *model.User, repo *model.Repo, baseName string, task *string, env string) { + di := model.DeploymentInfo{ + Ref: baseName, + Task: *task, + Environment: env, + } + err := remote.ScheduleDeployment(c, user, repo, di) + if err != nil { + log.Warnf("Unable to schedule deployment %v: %s", di, err) + } +} diff --git a/web/status_hook_test.go b/web/status_hook_test.go new file mode 100644 index 0000000..e294c3c --- /dev/null +++ b/web/status_hook_test.go @@ -0,0 +1,700 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package web + +import ( + "context" + "errors" + "math" + "net/http" + "regexp" + "strconv" + "testing" + "time" + + "github.com/capitalone/checks-out/model" + "github.com/capitalone/checks-out/remote" + "github.com/capitalone/checks-out/strings/lowercase" + "github.com/capitalone/checks-out/strings/rxserde" + + "github.com/gin-gonic/gin" + version "github.com/hashicorp/go-version" + "github.com/stretchr/testify/assert" +) + +func TestHandleTimestampMillis(t *testing.T) { + c := &model.TagConfig{Alg: "timestamp-millis"} + stamp, err := handleTimestamp(c) + if err != nil { + t.Errorf("didn't expect error: %v", err) + } + m := time.Now().UTC().Unix() + m1, err := strconv.ParseInt(stamp, 10, 64) + if err != nil { + t.Error(err) + } + if math.Abs(float64(m-m1)) > 100 { + t.Errorf("shouldn't be that different: %d, %d", m, m1) + } +} + +func TestHandleTimestamp3339(t *testing.T) { + c := &model.TagConfig{Alg: "timestamp-rfc3339"} + stamp, err := handleTimestamp(c) + if err != nil { + t.Errorf("didn't expect error: %v", err) + } + //should be able to parse with rfc3339 + t2, err := time.Parse(modifiedRFC3339, stamp) + if err != nil { + t.Error(err) + } + round := t2.Format(modifiedRFC3339) + if round != stamp { + t.Errorf("Expected to be same, but wasn't %s, %s", stamp, round) + } +} + +type myR struct { + remote.Remote +} + +func (m *myR) ListTags(c context.Context, u *model.User, r *model.Repo) ([]model.Tag, error) { + return []model.Tag{ + "a", + "0.1.0", + "0.0.1", + }, nil +} + +func (m *myR) GetCommentsSinceHead(c context.Context, u *model.User, r *model.Repo, num int, noUIMerge bool) ([]*model.Comment, error) { + return []*model.Comment{ + { + Author: lowercase.Create("test_guy"), + Body: "I approve version:0.1.0", + }, + { + Author: lowercase.Create("not_test_guy"), + Body: "this is not an I approve comment", + }, + { + Author: lowercase.Create("not_test_guy"), + Body: "I approve", + }, + { + Author: lowercase.Create("test_guy"), + Body: "I approve version:0.1.0", + }, + { + Author: lowercase.Create("test_guy2"), + Body: "I approve", + }, + { + Author: lowercase.Create("test_guy3"), + Body: "I approve version:0.0.1", + }, + }, nil +} + +func TestGetMaxVersionComment(t *testing.T) { + c := &gin.Context{} + + remote.ToContext(c, &myR{}) + config := model.NonEmptyConfig() + config.Tag.Enable = true + m := &model.MaintainerSnapshot{ + People: map[string]*model.Person{ + "test_guy": &model.Person{ + Name: "test_guy", + }, + "test_guy2": &model.Person{ + Name: "test_guy2", + }, + "test_guy3": &model.Person{ + Name: "test_guy3", + }, + }, + } + i := model.PullRequest{ + Issue: model.Issue{Author: lowercase.Create("test_guy")}, + } + comments := []model.Feedback{ + &model.Comment{ + Author: lowercase.Create("test_guy"), + Body: "I approve version:0.1.0", + }, + &model.Comment{ + Author: lowercase.Create("not_test_guy"), + Body: "this is not an I approve comment", + }, + &model.Comment{ + Author: lowercase.Create("not_test_guy"), + Body: "I approve", + }, + &model.Comment{ + Author: lowercase.Create("test_guy"), + Body: "I approve version:0.1.0", + }, + &model.Comment{ + Author: lowercase.Create("test_guy2"), + Body: "I approve", + }, + &model.Comment{ + Author: lowercase.Create("test_guy3"), + Body: "I approve version:0.0.1", + }, + } + request := &model.ApprovalRequest{ + Config: config, + Maintainer: m, + PullRequest: &i, + ApprovalComments: comments, + } + ver := getMaxVersionComment(request, model.DefaultApprovalPolicy()) + if ver == nil { + t.Fatalf("Got nil for version") + } + expected, _ := version.NewVersion("0.1.0") + if !expected.Equal(ver) { + t.Errorf("Expected %s, got %s", expected.String(), ver.String()) + } +} + +func TestGetMaxVersionCommentBadPattern(t *testing.T) { + c := &gin.Context{} + + remote.ToContext(c, &myR{}) + config := model.NonEmptyConfig() + config.Tag.Enable = true + config.Pattern = rxserde.RegexSerde{Regex: nil} + m := &model.MaintainerSnapshot{ + People: map[string]*model.Person{ + "test_guy": &model.Person{ + Name: "test_guy", + }, + "test_guy2": &model.Person{ + Name: "test_guy2", + }, + "test_guy3": &model.Person{ + Name: "test_guy3", + }, + }, + } + i := model.PullRequest{ + Issue: model.Issue{Author: lowercase.Create("test_guy")}, + } + comments := []model.Feedback{ + &model.Comment{ + Author: lowercase.Create("test_guy"), + Body: "I approve version:0.1.0", + }, + &model.Comment{ + Author: lowercase.Create("not_test_guy"), + Body: "this is not an I approve comment", + }, + &model.Comment{ + Author: lowercase.Create("not_test_guy"), + Body: "I approve", + }, + &model.Comment{ + Author: lowercase.Create("test_guy"), + Body: "not an approval comment", + }, + &model.Comment{ + Author: lowercase.Create("test_guy"), + Body: "I approve version:0.1.0", + }, + &model.Comment{ + Author: lowercase.Create("test_guy2"), + Body: "I approve", + }, + &model.Comment{ + Author: lowercase.Create("test_guy3"), + Body: "I approve version:0.0.1", + }, + } + request := &model.ApprovalRequest{ + Config: config, + Maintainer: m, + PullRequest: &i, + ApprovalComments: comments, + } + ver := getMaxVersionComment(request, model.DefaultApprovalPolicy()) + if ver != nil { + t.Fatal("Should get nil for version. Version is ", ver) + } +} + +func TestGetMaxVersionCommentNoSelfApproval(t *testing.T) { + c := &gin.Context{} + + remote.ToContext(c, &myR{}) + policy := model.DefaultApprovalPolicy() + policy.Match.Matcher.(*model.MaintainerMatch).Self = false + config := model.NonEmptyConfig() + config.Approvals[0] = policy + m := &model.MaintainerSnapshot{ + People: map[string]*model.Person{ + "test_guy": &model.Person{ + Name: "test_guy", + }, + "test_guy2": &model.Person{ + Name: "test_guy2", + }, + "test_guy3": &model.Person{ + Name: "test_guy3", + }, + }, + } + i := model.PullRequest{ + Issue: model.Issue{Author: lowercase.Create("test_guy")}, + } + comments := []model.Feedback{ + &model.Comment{ + Author: lowercase.Create("test_guy"), + Body: "I approve version:0.1.0", + }, + &model.Comment{ + Author: lowercase.Create("not_test_guy"), + Body: "this is not an I approve comment", + }, + &model.Comment{ + Author: lowercase.Create("not_test_guy"), + Body: "I approve", + }, + &model.Comment{ + Author: lowercase.Create("test_guy"), + Body: "I approve version:0.1.0", + }, + &model.Comment{ + Author: lowercase.Create("test_guy2"), + Body: "I approve", + }, + &model.Comment{ + Author: lowercase.Create("test_guy3"), + Body: "I approve version:0.0.1", + }, + } + request := &model.ApprovalRequest{ + Config: config, + Maintainer: m, + PullRequest: &i, + ApprovalComments: comments, + } + ver := getMaxVersionComment(request, policy) + if ver == nil { + t.Fatal("Got nil for version") + } + expected, _ := version.NewVersion("0.0.1") + if !expected.Equal(ver) { + t.Errorf("Expected %s, got %s", expected.String(), ver.String()) + } +} + +func TestGetMaxExistingTagFound(t *testing.T) { + ver := getMaxExistingTag([]model.Tag{ + "a", + "0.1.0", + "0.0.1", + }) + + expected, _ := version.NewVersion("0.1.0") + if !expected.Equal(ver) { + t.Errorf("Expected %s, got %s", expected.String(), ver.String()) + } +} + +func TestGetMaxExistingTagNotFound(t *testing.T) { + ver := getMaxExistingTag([]model.Tag{ + "a", + "b", + "c", + }) + + expected, _ := version.NewVersion("0.0.0") + if !expected.Equal(ver) { + t.Errorf("Expected %s, got %s", expected.String(), ver.String()) + } +} + +func TestHandleSemver(t *testing.T) { + c := &gin.Context{} + + remote.ToContext(c, &myR{}) + config := model.NonEmptyConfig() + config.Tag.Enable = true + m := &model.MaintainerSnapshot{ + People: map[string]*model.Person{ + "test_guy": &model.Person{ + Name: "test_guy", + }, + "test_guy2": &model.Person{ + Name: "test_guy2", + }, + "test_guy3": &model.Person{ + Name: "test_guy3", + }, + }, + } + user := &model.User{} + repo := &model.Repo{} + hook := &StatusHook{ + Repo: &model.Repo{ + Owner: "test_guy", + Name: "test_repo", + }, + } + pr := &model.PullRequest{ + Issue: model.Issue{ + Author: lowercase.Create("test_guy"), + }, + } + request := &model.ApprovalRequest{ + Config: config, + Maintainer: m, + PullRequest: pr, + Repository: repo, + } + ver, err := handleSemver(c, user, hook, request, model.DefaultApprovalPolicy()) + if err != nil { + t.Fatalf("unexpected error %s", err) + } + expected, _ := version.NewVersion("0.1.1") + if expected.String() != ver { + t.Errorf("Expected %s, got %s", expected.String(), ver) + } +} + +type myR2 struct { + remote.Remote +} + +func (m *myR2) ListTags(c context.Context, u *model.User, r *model.Repo) ([]model.Tag, error) { + return []model.Tag{ + "a", + "0.0.1", + "0.0.2", + }, nil +} + +var testComments = []model.Feedback{ + &model.Comment{ + Author: lowercase.Create("test_guy"), + Body: "I approve version:0.1.0", + }, + &model.Comment{ + Author: lowercase.Create("not_test_guy"), + Body: "this is not an I approve comment", + }, + &model.Comment{ + Author: lowercase.Create("not_test_guy"), + Body: "I approve", + }, + &model.Comment{ + Author: lowercase.Create("test_guy"), + Body: "I approve version:0.1.0", + }, + &model.Comment{ + Author: lowercase.Create("test_guy2"), + Body: "I approve", + }, + &model.Comment{ + Author: lowercase.Create("test_guy3"), + Body: "I approve version:0.0.1", + }, +} + +func TestHandleSemver2(t *testing.T) { + c := &gin.Context{} + + remote.ToContext(c, &myR2{}) + config := model.NonEmptyConfig() + config.Tag.Enable = true + m := &model.MaintainerSnapshot{ + People: map[string]*model.Person{ + "test_guy": { + Name: "test_guy", + }, + "test_guy2": { + Name: "test_guy2", + }, + "test_guy3": { + Name: "test_guy3", + }, + }, + } + user := &model.User{} + repo := &model.Repo{} + hook := &StatusHook{ + Repo: &model.Repo{ + Owner: "test_guy", + Name: "test_repo", + }, + } + pr := &model.PullRequest{ + Issue: model.Issue{ + Author: lowercase.Create("test_guy"), + }, + } + request := &model.ApprovalRequest{ + Config: config, + Maintainer: m, + PullRequest: pr, + Repository: repo, + ApprovalComments: testComments, + DisapprovalComments: testComments, + } + ver, err := handleSemver(c, user, hook, request, model.DefaultApprovalPolicy()) + if err != nil { + t.Fatalf("unexpected error %s", err) + } + expected, _ := version.NewVersion("0.1.0") + if expected.String() != ver { + t.Errorf("Expected %s, got %s", expected.String(), ver) + } +} + +type myR3 struct { + remote.Remote +} + +func (m *myR3) ListTags(c context.Context, u *model.User, r *model.Repo) ([]model.Tag, error) { + return nil, errors.New("This is an error") +} + +func (m *myR3) GetCommentsSinceHead(c context.Context, u *model.User, r *model.Repo, num int, noUIMerge bool) ([]*model.Comment, error) { + return []*model.Comment{ + { + Author: lowercase.Create("test_guy"), + Body: "I approve version:0.1.0", + }, + { + Author: lowercase.Create("not_test_guy"), + Body: "this is not an I approve comment", + }, + { + Author: lowercase.Create("not_test_guy"), + Body: "I approve", + }, + { + Author: lowercase.Create("test_guy"), + Body: "I approve version:0.1.0", + }, + { + Author: lowercase.Create("test_guy2"), + Body: "I approve", + }, + { + Author: lowercase.Create("test_guy3"), + Body: "I approve version:0.0.1", + }, + }, nil +} + +func TestHandleSemver3(t *testing.T) { + c := &gin.Context{} + + remote.ToContext(c, &myR3{}) + config := model.NonEmptyConfig() + config.Tag.Enable = true + m := &model.MaintainerSnapshot{ + People: map[string]*model.Person{ + "test_guy": { + Name: "test_guy", + }, + "test_guy2": { + Name: "test_guy2", + }, + "test_guy3": { + Name: "test_guy3", + }, + }, + } + user := &model.User{} + repo := &model.Repo{} + hook := &StatusHook{ + Repo: &model.Repo{ + Owner: "test_guy", + Name: "test_repo", + }, + } + pr := &model.PullRequest{ + Issue: model.Issue{ + Author: lowercase.Create("test_guy"), + }, + } + request := &model.ApprovalRequest{ + Config: config, + Maintainer: m, + PullRequest: pr, + Repository: repo, + ApprovalComments: testComments, + DisapprovalComments: testComments, + } + ver, err := handleSemver(c, user, hook, request, model.DefaultApprovalPolicy()) + if err != nil { + t.Fatalf("unexpected error %s", err) + } + expected, _ := version.NewVersion("0.1.0") + if expected.String() != ver { + t.Errorf("Expected %s, got %s", expected.String(), ver) + } +} + +type myRR struct { + gin.ResponseWriter +} + +func (mr *myRR) Header() http.Header { + return http.Header{} +} + +func (my *myRR) Write(p []byte) (n int, err error) { + return len(p), nil +} + +func TestGetMaxVersionCommentOldPattern(t *testing.T) { + c := &gin.Context{} + + remote.ToContext(c, &myR{}) + config := model.NonEmptyConfig() + + regex, err := regexp.Compile(`(?i)^I approve\s*(?P\S*)`) + assert.Nil(t, err) + + config.Pattern = rxserde.RegexSerde{Regex: regex} + config.Tag.Enable = true + m := &model.MaintainerSnapshot{ + People: map[string]*model.Person{ + "test_guy": &model.Person{ + Name: "test_guy", + }, + "test_guy2": &model.Person{ + Name: "test_guy2", + }, + "test_guy3": &model.Person{ + Name: "test_guy3", + }, + }, + } + i := model.PullRequest{ + Issue: model.Issue{Author: lowercase.Create("test_guy")}, + } + comments := []model.Feedback{ + &model.Comment{ + Author: lowercase.Create("test_guy"), + Body: "I approve 0.1.0", + }, + &model.Comment{ + Author: lowercase.Create("not_test_guy"), + Body: "this is not an I approve comment", + }, + &model.Comment{ + Author: lowercase.Create("not_test_guy"), + Body: "I approve", + }, + &model.Comment{ + Author: lowercase.Create("test_guy"), + Body: "I approve 0.1.0", + }, + &model.Comment{ + Author: lowercase.Create("test_guy2"), + Body: "I approve", + }, + &model.Comment{ + Author: lowercase.Create("test_guy3"), + Body: "I approve 0.0.1", + }, + } + request := &model.ApprovalRequest{ + Config: config, + Maintainer: m, + PullRequest: &i, + ApprovalComments: comments, + } + ver := getMaxVersionComment(request, model.DefaultApprovalPolicy()) + if ver == nil { + t.Fatalf("Got nil for version") + } + expected, _ := version.NewVersion("0.1.0") + if !expected.Equal(ver) { + t.Errorf("Expected %s, got %s", expected.String(), ver.String()) + } +} + +func TestGetCommitComment(t *testing.T) { + c := &gin.Context{} + + remote.ToContext(c, &myR{}) + config := model.NonEmptyConfig() + config.Tag.Enable = true + m := &model.MaintainerSnapshot{ + People: map[string]*model.Person{ + "test_guy": &model.Person{ + Name: "test_guy", + }, + "test_guy2": &model.Person{ + Name: "test_guy2", + }, + "test_guy3": &model.Person{ + Name: "test_guy3", + }, + }, + } + i := model.PullRequest{ + Issue: model.Issue{Author: lowercase.Create("test_guy")}, + } + comments := []model.Feedback{ + &model.Comment{ + Author: lowercase.Create("test_guy"), + Body: "I approve comment:Hello", + }, + &model.Comment{ + Author: lowercase.Create("not_test_guy"), + Body: "this is not an I approve comment", + }, + &model.Comment{ + Author: lowercase.Create("not_test_guy"), + Body: "I approve", + }, + &model.Comment{ + Author: lowercase.Create("test_guy"), + Body: "I approve version:0.1.0", + }, + &model.Comment{ + Author: lowercase.Create("test_guy2"), + Body: "I approve", + }, + &model.Comment{ + Author: lowercase.Create("test_guy3"), + Body: "I approve version:0.0.1 comment:Hi there", + }, + } + request := &model.ApprovalRequest{ + Config: config, + Maintainer: m, + PullRequest: &i, + ApprovalComments: comments, + DisapprovalComments: comments, + } + comment := getCommitComment(request, model.DefaultApprovalPolicy()) + if comment != "Hi there" { + t.Error("Expected Hi there, got", comment) + } +} diff --git a/web/store.go b/web/store.go new file mode 100644 index 0000000..8217676 --- /dev/null +++ b/web/store.go @@ -0,0 +1,77 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package web + +import ( + "context" + "fmt" + + "github.com/capitalone/checks-out/exterror" + "github.com/capitalone/checks-out/model" + "github.com/capitalone/checks-out/remote" + "github.com/capitalone/checks-out/snapshot" + "github.com/capitalone/checks-out/store" +) + +func GetHookParameters(c context.Context, slug string, fixSlackTargets bool) (HookParams, error) { + repo, user, cap, err := GetRepoAndUser(c, slug) + if err != nil { + return HookParams{}, err + } + config, maintainer, err := snapshot.GetConfigAndMaintainers(c, user, cap, repo) + if err != nil { + return HookParams{}, err + } + if fixSlackTargets { + err = snapshot.FixSlackTargets(c, config, user.Login) + if err != nil { + return HookParams{}, err + } + } + result := HookParams{ + Repo: repo, + User: user, + Cap: cap, + Config: config, + Snapshot: maintainer, + } + return result, nil +} + +func GetRepoAndUser(c context.Context, slug string) (*model.Repo, *model.User, *model.Capabilities, error) { + repo, err := store.GetRepoSlug(c, slug) + if err != nil { + msg := fmt.Sprintf("Error getting repository %s", slug) + err = exterror.Append(err, msg) + return nil, nil, nil, err + } + user, err := store.GetUser(c, repo.UserID) + if err != nil { + msg := fmt.Sprintf("Error getting repository owner %s", repo.Slug) + err = exterror.Append(err, msg) + return nil, nil, nil, err + } + cap, err := remote.Capabilities(c, user) + if err != nil { + msg := fmt.Sprintf("Error getting repository %s", slug) + err = exterror.Append(err, msg) + return nil, nil, nil, err + } + return repo, user, cap, err +} diff --git a/web/tag.go b/web/tag.go new file mode 100644 index 0000000..9c1c800 --- /dev/null +++ b/web/tag.go @@ -0,0 +1,279 @@ +/* + +SPDX-Copyright: Copyright (c) Capital One Services, LLC +SPDX-License-Identifier: Apache-2.0 +Copyright 2017 Capital One Services, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and limitations under the License. + +*/ +package web + +import ( + "context" + "fmt" + "regexp" + "strings" + "time" + + log "github.com/Sirupsen/logrus" + version "github.com/hashicorp/go-version" + "github.com/capitalone/checks-out/model" + "github.com/capitalone/checks-out/remote" +) + +func tagIfEnabled(c context.Context, user *model.User, hook *StatusHook, + req *model.ApprovalRequest, policy *model.ApprovalPolicy, SHA *string) (*string, error) { + + var tagConfig *model.TagConfig + + if policy.Tag == nil { + tagConfig = &req.Config.Tag + } else { + tagConfig = policy.Tag + } + + if tagConfig.Enable { + return doTag(c, user, hook, req, policy, SHA) + } + return nil, nil +} + +func doTag(c context.Context, user *model.User, hook *StatusHook, + req *model.ApprovalRequest, policy *model.ApprovalPolicy, SHA *string) (*string, error) { + var tagConfig *model.TagConfig + var vers string + + if policy.Tag == nil { + tagConfig = &req.Config.Tag + } else { + tagConfig = policy.Tag + } + + var err error + if strings.HasPrefix(tagConfig.Alg, "timestamp") { + vers, err = handleTimestamp(tagConfig) + } else if tagConfig.Alg == "semver" { + vers, err = handleSemver(c, user, hook, req, policy) + } else { + log.Warnf("Repo %s should have had a valid tag algorithm configured -- using semver", + req.Repository.Name) + vers, err = handleSemver(c, user, hook, req, policy) + } + if err != nil { + return nil, err + } + tag, err := tagConfig.GenerateTag(model.TemplateTag{Version: vers}) + if err != nil { + return nil, err + } + log.Debugf("Tagging merge from PR with tag: %s", tag) + err = remote.Tag(c, user, req.Repository, &tag, SHA) + if err != nil { + return nil, err + } + return &tag, nil +} + +const modifiedRFC3339 = "2006-01-02T15.04.05Z" + +func handleTimestamp(tag *model.TagConfig) (string, error) { + /* + All times are in UTC + Valid format strings: + - timestamp-rfc3339: RFC 3339 format + - timestamp-millis: milliseconds since the epoch + */ + curTime := time.Now().UTC() + var format string + switch tag.Alg { + case "timestamp-millis": + //special case, return from here + out := fmt.Sprintf("%d", curTime.Unix()) + return out, nil + case "timestamp-rfc3339": + format = modifiedRFC3339 + default: + log.Warnf("invalid tag format %s. Using modified rfc3339", tag.Alg) + format = modifiedRFC3339 + } + out := curTime.Format(format) + return out, nil +} + +func increment(segments []int, tag *model.TagConfig) { + switch tag.Increment { + case model.Major: + segments[0]++ + segments[1] = 0 + segments[2] = 0 + case model.Minor: + segments[1]++ + segments[2] = 0 + case model.Patch: + segments[2]++ + case model.None: + // do nothing + default: + log.Errorf("Unknown semver increment %d", tag.Increment) + } +} + +func handleSemver(c context.Context, user *model.User, hook *StatusHook, req *model.ApprovalRequest, policy *model.ApprovalPolicy) (string, error) { + + var tagConfig *model.TagConfig + + if policy.Tag == nil { + tagConfig = &req.Config.Tag + } else { + tagConfig = policy.Tag + } + + // to create the version, need to scan the comments on the pull request to see if anyone specified a version # + // if so, use the largest specified version #. if not, increment the last version version # for the release + tags, err := remote.ListTags(c, user, hook.Repo) + if err != nil { + log.Warnf("Unable to list tags for %s/%s: %s", hook.Repo.Owner, hook.Repo.Name, err) + } + maxVer := getMaxExistingTag(tags) + + foundVersion := getMaxVersionComment(req, policy) + + if foundVersion != nil && foundVersion.GreaterThan(maxVer) { + maxVer = foundVersion + } else { + maxParts := maxVer.Segments() + increment(maxParts, tagConfig) + maxVer, _ = version.NewVersion(fmt.Sprintf("%d.%d.%d", maxParts[0], maxParts[1], maxParts[2])) + } + + verStr := maxVer.String() + return verStr, nil +} + +// getMaxExistingTag is a helper function that scans all passed-in tags for a +// comments with semantic versions. It returns the max version found. If no version +// is found, the function returns a version with the value 0.0.0 +func getMaxExistingTag(tags []model.Tag) *version.Version { + //find the previous largest semver value + maxVer, _ := version.NewVersion("v0.0.0") + + for _, v := range tags { + curVer, err := version.NewVersion(string(v)) + if err != nil { + continue + } + if curVer.GreaterThan(maxVer) { + maxVer = curVer + } + } + + log.Debugf("maxVer found is %s", maxVer.String()) + return maxVer +} + +func getGroupIndex(re *regexp.Regexp, name string) int { + for i, n := range re.SubexpNames() { + if n == name { + return i + } + } + return 0 +} + +// getMaxVersionComment is a helper function that analyzes the list of comments +// and returns the maximum version found in a comment. if no matching comment is found, +// the function returns version 0.0.0. If there's a bug in the version pattern, +// nil will be returned. +func getMaxVersionComment(request *model.ApprovalRequest, policy *model.ApprovalPolicy) *version.Version { + var matcher *regexp.Regexp + maxVersion, _ := version.NewVersion("0.0.0") + if policy.Pattern != nil { + matcher = policy.Pattern.Regex + } else { + matcher = request.Config.Pattern.Regex + } + if matcher == nil { + return nil + } + index := getGroupIndex(matcher, "version") + if index == 0 { + return nil + } + model.Approve(request, policy, + func(f model.Feedback, op model.ApprovalOp) { + if op != model.Approval { + return + } + body := f.GetBody() + if len(body) == 0 { + return + } + // verify the comment matches the approval pattern + match := matcher.FindStringSubmatch(body) + if len(match) > index { + //has a version + curVersion, err := version.NewVersion(match[index]) + if err != nil { + return + } + if maxVersion == nil || curVersion.GreaterThan(maxVersion) { + maxVersion = curVersion + } + } + }) + + return maxVersion +} + +func getCommitComment(request *model.ApprovalRequest, policy *model.ApprovalPolicy) string { + log.Debugf("request config: %+v", request.Config) + log.Debugf("policy pattern: %+v", policy.Pattern) + var matcher *regexp.Regexp + var commentText string + if policy.Pattern != nil { + matcher = policy.Pattern.Regex + } else { + matcher = request.Config.Pattern.Regex + } + log.Debugf("Matcher: %+v", matcher) + if matcher == nil { + return "" + } + index := getGroupIndex(matcher, "comment") + log.Debugf("index: %+v", index) + if index == 0 { + return "" + } + model.Approve(request, policy, + func(f model.Feedback, op model.ApprovalOp) { + if op != model.Approval { + return + } + body := f.GetBody() + if len(body) == 0 { + return + } + // verify the comment matches the approval pattern + log.Debugf("comment body: %+v", body) + match := matcher.FindStringSubmatch(body) + log.Debugf("match: %+v", match) + if len(match) > index && len(match[index]) > 0 { + //has a comment + commentText = match[index] + log.Debugf("commentText set to %s", commentText) + } + }) + + log.Debugf("returning %s", commentText) + return commentText +} diff --git a/web/template/files/brand.html b/web/template/files/brand.html index 4191625..477b24c 100644 --- a/web/template/files/brand.html +++ b/web/template/files/brand.html @@ -1,7 +1,7 @@ - LGTM + Checks-Out @@ -19,7 +19,6 @@ @@ -85,7 +84,7 @@

    Pending Approvals

    Approval Workflow

    -

    Approvers comment on the issue with LGTM, looks good to me, indicating their approval to merge the changeset.

    +

    Approvers comment on the issue with I approve, indicating their approval to merge the changeset.

    @@ -93,14 +92,14 @@

    Approval Workflow

    bradrydzewski commented just now
    -
    LGTM
    +
    I approve
    @@ -146,14 +145,9 @@

    Enterprise

    -